Hugo Guerrero
2018-06-08 b2812d5cd28561c8371d525d78dcc4d042f7c549
Merge branch 'development' of github.com:hguerrero/ansible_agnostic_deployer into development
102 files added
36 files modified
9569 ■■■■■ changed files
.gitignore 1 ●●●● patch | view | raw | blame | history
ansible/cloud_providers/ec2_destroy_env.yml 40 ●●●●● patch | view | raw | blame | history
ansible/cloud_providers/ec2_infrastructure_deployment.yml 24 ●●●●● patch | view | raw | blame | history
ansible/configs/README.adoc 13 ●●●●● patch | view | raw | blame | history
ansible/configs/RHCLS-ScalableInfra-demo/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/ans-network-lab/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/ans-tower-lab/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-provisioner/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower-terraform/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/How.To.Create.Env.Type.adoc 190 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/README.adoc 85 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/destroy_env.yml 56 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/env_vars.yml 210 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/files/cloud_providers/ec2_cloud_template.j2 726 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/files/ec2_internal_dns.json.j2 72 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/files/hosts_template.j2 202 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/files/htpasswd.openshift 102 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/files/labs_hosts_template.j2 57 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/files/pvs.j2 17 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/files/repos_template.j2 36 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/files/userpvs.j2 20 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/post_infra.yml 32 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/post_software.yml 41 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/pre_infra.yml 13 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/pre_software.yml 89 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/auth-playground-lab/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/bu-workshop/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/generic-example/destroy_env.yml 33 ●●●●● patch | view | raw | blame | history
ansible/configs/generic-example/post_infra.yml 4 ●●●● patch | view | raw | blame | history
ansible/configs/generic-example/post_software.yml 9 ●●●●● patch | view | raw | blame | history
ansible/configs/generic-example/pre_infra.yml 5 ●●●●● patch | view | raw | blame | history
ansible/configs/generic-example/pre_software.yml 2 ●●● patch | view | raw | blame | history
ansible/configs/generic-example/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-demo-lab/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/How.To.Create.Env.Type.adoc 190 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/README.adoc 99 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/aws_test.yml 11 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/destroy_env.yml 31 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/ec2_cloud_template.yml 750 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/env_vars.yml 377 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/cloud_providers/ec2.py_cloud_template.j2 844 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/cloud_providers/ec2_cloud_template.j2 339 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/ec2_internal_dns.json.j2 84 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/hosts_template.j2 301 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/htpasswd.openshift 103 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/labs_hosts_template.j2 55 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/pvs.j2 21 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/pvs_rwx.j2 17 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/repos_template.j2 37 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/userpvs.j2 20 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/post_infra.yml 32 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/post_ocp_nfs_config.yml 58 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/post_software.yml 126 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/pre_infra.yml 13 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/pre_software.yml 75 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/env_vars.yml 2 ●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/files/hosts_template.j2 2 ●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/files/htpasswd.openshift 1 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/README.adoc 4 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/env_vars.yml 2 ●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/files/cloud_providers/ec2_cloud_template.j2 34 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/files/hosts_template.j2 17 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/post_software.yml 21 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/pre_software.yml 9 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/README.adoc 12 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/env_vars.yml 61 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/idle.yml 23 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/imagebuild-pipeline.yml 39 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/msa-cicd-eap-full.yml 188 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/msa-cicd-eap-min.yml 178 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/msa-full.yml 44 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/msa-min.yml 47 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/requirements-prod.yml 30 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/requirements.yml 24 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/undeploy.yml 20 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/unidle.yml 146 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/verify_tasks.yml 37 ●●●●● patch | view | raw | blame | history
ansible/configs/ravello-bastion-setup/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/three-tier-app/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/main.yml 9 ●●●●● patch | view | raw | blame | history
ansible/roles/bastion-opentlc-ipa/tasks/main.yml 42 ●●●●● patch | view | raw | blame | history
ansible/roles/bastion/tasks/main.yml 10 ●●●●● patch | view | raw | blame | history
ansible/roles/lets-encrypt/README.md 42 ●●●●● patch | view | raw | blame | history
ansible/roles/lets-encrypt/files/defaults/main.yml 2 ●●●●● patch | view | raw | blame | history
ansible/roles/lets-encrypt/tasks/main.yml 45 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-ba/defaults/main.yml 26 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-ba/readme.adoc 131 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-ba/tasks/main.yml 20 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-ba/tasks/post_workload.yml 5 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-ba/tasks/pre_workload.yml 32 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-ba/tasks/remove_workload.yml 23 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-ba/tasks/wait_for_build.yml 23 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-ba/tasks/wait_for_deploy.yml 20 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-ba/tasks/workload.yml 100 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-dm/defaults/main.yml 26 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-dm/readme.adoc 63 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-dm/tasks/main.yml 20 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-dm/tasks/post_workload.yml 5 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-dm/tasks/pre_workload.yml 32 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-dm/tasks/remove_workload.yml 23 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-dm/tasks/wait_for_build.yml 23 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-dm/tasks/wait_for_deploy.yml 20 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-dm/tasks/workload.yml 65 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-developer-environment/defaults/main.yml 3 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-developer-environment/readme.adoc 113 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-developer-environment/tasks/post_workload.yml 1 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-developer-environment/tasks/pre_workload.yml 9 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-developer-environment/tasks/remove_workload.yml 24 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-developer-environment/tasks/workload.yml 1 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-enmasse/defaults/main.yml 1 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-enmasse/enmasse-provision.yml 18 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-enmasse/readme.adoc 128 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-enmasse/tasks/main.yml 9 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-enmasse/tasks/pre_workload.yml 30 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-enmasse/tasks/remove_workload.yml 10 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-enmasse/tasks/workload.yml 189 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-ignite/defaults/main.yml 1 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-ignite/tasks/main.yml 8 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-parksmap-demo/defaults/main.yml 19 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-parksmap-demo/description.html 18 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-parksmap-demo/files/workshop-mlbparks.yaml 338 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-parksmap-demo/files/workshop-nationalparks.yaml 335 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-parksmap-demo/files/workshop-parksmap.yaml 127 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-parksmap-demo/files/workshopper-template.yaml 165 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-parksmap-demo/readme.adoc 133 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-parksmap-demo/tasks/main.yml 20 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-parksmap-demo/tasks/post_workload.yml 9 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-parksmap-demo/tasks/pre_workload.yml 40 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-parksmap-demo/tasks/remove_workload.yml 25 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-parksmap-demo/tasks/workload.yml 66 ●●●●● patch | view | raw | blame | history
ansible/roles/opentlc-integration/tasks/main.yml 8 ●●●●● patch | view | raw | blame | history
ansible/software_playbooks/openshift.yml 5 ●●●● patch | view | raw | blame | history
scripts/README.adoc 2 ●●● patch | view | raw | blame | history
.gitignore
@@ -5,3 +5,4 @@
ansible/judd.sh
*.swp
scripts/*.rc
wk*.rc
ansible/cloud_providers/ec2_destroy_env.yml
New file
@@ -0,0 +1,40 @@
---
- name: Delete Infrastructure
  hosts: localhost
  connection: local
  gather_facts: False
  become: no
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - name: Destroy cloudformation template
      cloudformation:
        stack_name: "{{project_tag}}"
        state: "absent"
        region: "{{aws_region}}"
        disable_rollback: false
        tags:
          Stack: "project {{env_type}}-{{ guid }}"
      tags:
        - destroying
        - destroy_cf_deployment
        - destroy_cloud_deployment
      register: cloudformation_result
      until: cloudformation_result|succeeded
      retries: 5
      delay: 60
      ignore_errors: yes
      when: cloud_provider == 'ec2'
    - name: report Cloudformation error
      fail:
        msg: "FAIL {{ project_tag }} Destroy Cloudformation"
      when:
        - not cloudformation_result|succeeded
        - cloud_provider == 'ec2'
      tags:
        - destroying
        - destroy_cf_deployment
        - destroy_cloud_deployment
ansible/cloud_providers/ec2_infrastructure_deployment.yml
@@ -114,10 +114,30 @@
    copy:
      dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.s3user.credentials"
      content: |
        * S3 Bucket for registry: {{s3user}}
        * S3 Bucket for registry: {{s3user}}
        ** S3User access key: {{s3user_access_key}}
        ** S3User secret key: {{s3user_secret_access_key}}
    when: s3user_access_key is defined
  - name: get Route53User credentials from stack outputs
    set_fact:
      route53user: "{{ cloudformation_out.stack_outputs.Route53User }}"
      route53user_access_key: "{{ cloudformation_out.stack_outputs.Route53UserAccessKey }}"
      route53user_secret_access_key: "{{ cloudformation_out.stack_outputs.Route53UserSecretAccessKey }}"
    when:
      - cloudformation_out.stack_outputs.Route53UserAccessKey is defined
      - cloudformation_out.stack_outputs.Route53UserSecretAccessKey is defined
    tags:
      - provision_cf_template
  - name: write down Route53User credentials
    copy:
      dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.route53user.credentials"
      content: |
        * Route53 User for Let's Encrypt: {{ route53user }}
        ** Route53User access key: {{ route53user_access_key }}
        ** Route53User secret key: {{ route53user_secret_access_key }}
    when: route53user_access_key is defined
  - name: Gather EC2 facts
    ec2_remote_facts:
@@ -256,3 +276,5 @@
    - name: wait for windows host to be available
      wait_for_connection:
        timeout: 900
        connect_timeout: 60
        delay: 120
ansible/configs/README.adoc
New file
@@ -0,0 +1,13 @@
= Configs
This directory contains playbooks to provision, deploy, configure softwares. Each config should containsthose files:
* `pre_infra.yml`
* `post_infra.yml`
* `pre_software.yml`
* `software.yml`
* `post_software.yml`
* `env_vars.yml`
* `destroy_env.yml`
For more details see link:../main.yml[main.yml] and link:generic-example[generic-example].
ansible/configs/RHCLS-ScalableInfra-demo/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/ans-network-lab/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/ans-tower-lab/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/ansible-provisioner/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/ansible-tower-terraform/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/archive/ocp-implementation-lab/How.To.Create.Env.Type.adoc
New file
@@ -0,0 +1,190 @@
= How to create an Environment Type
== Create a base for your new environment type
* Duplicate the "generic-example" environemnt type directory or use another
 environment type directory that is closer to your end goal.
== Edit your cloud provider "blueprint" or "template"
NOTE: At this point this is "aws" based, with time we will have other providers.
* Edit the link:./files/cloud_providers/ec2_cloud_template.j2[./files/cloud_provides/ec2_cloud_template.j2]
* Add Security Groups if you require any.
* Add LaunchConfigs and AutoScale Groups
----
"HostLC": {
  "Type": "AWS::AutoScaling::LaunchConfiguration",
  "Properties": {
    "AssociatePublicIpAddress": true,
    "ImageId": {
      "Fn::FindInMap": [
        "RegionMapping",
        {
          "Ref": "AWS::Region"
        },
        "AMI"
      ]
    },
    "InstanceType": "{{host_instance_type}}",
    "KeyName": "{{key_name}}",
    "SecurityGroups": [
      {
        "Ref": "HostSG"
      }
    ],
    "BlockDeviceMappings": [
      {
        "DeviceName": "/dev/xvda",
        "Ebs": {
          "VolumeSize": 30
        }
      },
      {
        "DeviceName": "/dev/xvdb",
        "Ebs": {
          "VolumeSize": 100
        }
      }
    ]
  }
},
"HostAsg": {
  "Type": "AWS::AutoScaling::AutoScalingGroup",
  "Properties": {
    "DesiredCapacity": {{host_instance_count}},
    "LaunchConfigurationName": {
      "Ref": "HostLC"
    },
    "MaxSize": 100,
    "MinSize": 1,
    "Tags": [
      {
        "Key": "Name",
        "Value": "host",
        "PropagateAtLaunch": true
      },
      {
        "Key": "AnsibleGroup",
        "Value": "hosts",
        "PropagateAtLaunch": true
      },
      {
        "Key": "Project",
        "Value": "{{project_tag}}",
        "PropagateAtLaunch": true
      },
      {
        "Key": "{{ project_tag }}",
        "Value": "host",
        "PropagateAtLaunch": true
      }
    ],
    "VPCZoneIdentifier": [
      {
        "Ref": "PublicSubnet"
      }
    ]
  }
},
----
** Pay attention to the Tags created for the different AS groups
----
{
  "Key": "Project",
  "Value": "{{project_tag}}",
  "PropagateAtLaunch": true
},
{
  "Key": "{{ project_tag }}",
  "Value": "host",
  "PropagateAtLaunch": true
}
----
* Add DNS Entries you need for your environment:
----
"MasterDNS": {
  "Type": "AWS::Route53::RecordSetGroup",
  "DependsOn": "Master",
  "Properties": {
    "HostedZoneId": "{{HostedZoneId}}",
    "RecordSets": [
      {
        "Name": "{{master_public_dns}}",
        "Type": "A",
        "TTL": "10",
        "ResourceRecords": [
          {
            "Fn::GetAtt": [
              "Master",
              "PublicIp"
            ]
          }
        ]
      }
    ]
  }
},
----
* Add S3 or other resources you require:
----
"RegistryS3": {
  "Type": "AWS::S3::Bucket",
  "Properties": {
    "BucketName": "{{ env_type }}-{{ guid }}",
    "Tags": [
      {
        "Key": "Name",
        "Value": "s3-{{ env_type }}-{{ guid }}"
      },
      {
        "Key": "Project",
        "Value": "{{project_tag}}"
      }
    ]
  }
}
},
----
* Add any "outputs" you need from the cloud provider:
----
"RegistryS3Output": {
  "Description": "The ID of the S3 Bucket",
  "Value": {
    "Ref": "RegistryS3"
  }},
----
== Internal DNS file
* Edit the internal dns template: link:./files/ec2_internal_dns.json.j2[./files/ec2_internal_dns.json.j2]
** You can create nicely indexed internal hostname by creating a for loop in the file  for each host group
----
{% for host in groups['support'] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "support{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
----
ansible/configs/archive/ocp-implementation-lab/README.adoc
New file
@@ -0,0 +1,85 @@
= OPENTLC OCP-IMPLEMENTATION-LAB Env_Type config
For example, we will include things such as ec2 instance names, secret
variables such as private/public key pair information, passwords, etc.
Eventually, all sensitive information will be encrypted via Ansible Vault. The
inclusion as well as instructions on doing this will be included in a later
release.
== Set up your "Secret" variables
* You need to provide some credentials for deployments to work
* Create a file called "env_secret_vars.yml" and put it in the
 ./ansible/configs/CONFIGNAME/ directory.
** At this point this file has to be created even if no vars from it are used.
* You can choose to provide these values as extra vars (-e "var=value") in the
 command line if you prefer not to keep sensitive information in a file.
* In the future we will use ansible vault for this.
.Example contents of "Secret" Vars file
----
# ## Logon credentials for Red Hat Network
# ## Required if using the subscription component
# ## of this playbook.
rhel_subscription_user: ''
rhel_subscription_pass: ''
#
# ## LDAP Bind Password
bindPassword: ''
#
# ## Desired openshift admin name and password
admin_user: ""
admin_user_password: ""
#
# ## AWS Credentials. This is required.
aws_access_key_id: ""
aws_secret_access_key: ""
----
== Review the Env_Type variable file
* This file link:./env_vars.yml[./env_vars.yml] contains all the variables you
 need to define to control the deployment of your environment.
== Running Ansible Playbook
. You can run the playbook with the following arguments to overwrite the default variable values:
[source,bash]
----
# Set the your environment variables (this is optional, but makes life easy)
REGION=us-east-1
KEYNAME=ocpkey
GUID=testimp35
ENVTYPE="ocp-implementation-lab"
CLOUDPROVIDER=ec2
HOSTZONEID='Z3IHLWJZOU9SRT'
REPO_PATH='https://admin.example.com/repos/ocp/3.5/'
BASESUFFIX='.example.opentlc.com'
REPO_VERSION=3.5
NODE_COUNT=2
IPAPASS=ipapass
## For a HA environment that is not installed with OpenShift
time ansible-playbook ./main.yml \
    -e "osrelease=3.5.5.5" -e "repo_version=${REPO_VERSION}" -e "docker_version=1.12.6" \
    -e "guid=${GUID}" -e "env_type=${ENVTYPE}" -e "cloud_provider=${CLOUDPROVIDER}" \
    -e "aws_region=${REGION}"  -e "HostedZoneId=${HOSTZONEID}" -e "key_name=${KEYNAME}" \
    -e "subdomain_base_suffix=${BASESUFFIX}"  -e "install_idm=htpasswd" \
    -e "node_instance_count=${NODE_COUNT}" -e "infranode_instance_count=1" -e "master_instance_count=1" \
    -e "software_to_deploy=none"  -e "own_repo_path=${REPO_PATH}" -e "ipa_host_password=${IPAPASS}" \
    -e "tower_run=false"
. To Delete an environment
----
#To Destroy an Env
ansible-playbook ./configs/${ENVTYPE}/destroy_env.yml \
 -e "guid=${GUID}" -e "env_type=${ENVTYPE}"  -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}"  \
 -e "HostedZoneId=${HOSTZONEID}"  -e "key_name=${KEYNAME}"  -e "subdomain_base_suffix=${BASESUFFIX}"
----
ansible/configs/archive/ocp-implementation-lab/destroy_env.yml
New file
@@ -0,0 +1,56 @@
- name: Starting environment deployment
  hosts: localhost
  connection: local
  gather_facts: False
  become: no
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tasks:
    # - name: get internal dns zone id if not provided
    #   environment:
    #     AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
    #     AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
    #     AWS_DEFAULT_REGION: "{{aws_region}}"
    #   shell: "aws route53 list-hosted-zones-by-name --region={{aws_region}} --dns-name={{guid}}.internal. --output text --query='HostedZones[*].Id' | awk -F'/' '{print $3}'"
    #   register: internal_zone_id_register
    # - debug:
    #     var: internal_zone_id_register
    # - name: Store internal route53 ID
    #   set_fact:
    #     internal_zone_id: "{{ internal_zone_id_register.stdout }}"
    #   when: 'internal_zone_id_register is defined'
    # - name: delete internal dns names
    #   environment:
    #     AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
    #     AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
    #     AWS_DEFAULT_REGION: "{{aws_region}}"
    #   shell: "aws route53 change-resource-record-sets --hosted-zone-id {{internal_zone_id}}  --change-batch file://{{ ANSIBLE_REPO_PATH }}/workdir/internal_dns-{{ env_type }}-{{ guid }}_DELETE.json --region={{aws_region}}"
    #   ignore_errors: true
    #   tags:
    #     - internal_dns_delete
    #   when: internal_zone_id is defined
    - name: Destroy cloudformation template
      cloudformation:
        stack_name: "{{project_tag}}"
        state: "absent"
        region: "{{aws_region}}"
        disable_rollback: false
        tags:
          Stack: "project {{env_type}}-{{ guid }}"
      tags: [ destroying, destroy_cf_deployment ]
      register: cloudformation_result
      until: cloudformation_result|succeeded
      retries: 5
      delay: 60
      ignore_errors: yes
    - name: report Cloudformation error
      fail:
        msg: "FAIL {{ project_tag }} Destroy Cloudformation"
      when: not cloudformation_result|succeeded
      tags: [ destroying, destroy_cf_deployment ]
## we need to add something to delete the env specific key.
ansible/configs/archive/ocp-implementation-lab/env_vars.yml
New file
@@ -0,0 +1,210 @@
## TODO: What variables can we strip out of here to build complex variables?
## i.e. what can we add into group_vars as opposed to config_vars?
## Example: We don't really need "subdomain_base_short". If we want to use this,
## should just toss in group_vars/all.
### Also, we should probably just create a variable reference in the README.md
### For now, just tagging comments in line with configuration file.
deploy_local_ssh_config_location: "{{ ANSIBLE_REPO_PATH }}/workdir"
###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT
###### OR PASS as "-e" args to ansible-playbook command
# #
# # env_groups:
# #   limit: "tag_Project_opentlc_shared_{{guid}}"
# #   bastions: "tag_AnsibleGroup_bastions"
# #   masters: "tag_AnsibleGroup_masters"
# #   nodes: "tag_AnsibleGroup_nodes"
# #   infranodes: "tag_AnsibleGroup_infranodes"
# #   nfs: "tag_AnsibleGroup_nfs"
#
# # This doesn't work
# all: "tag_Project_opentlc_shared_{{guid}}"
#
# # but maybe this is silly enough to work
# #all: "tag_Project_opentlc_shared_{{guid}}:&tag_Project_opentlc_shared_{{guid}}"
#rhn_pool_id_string: OpenShift Container Platform
# bastions: "{{env_groups['limit']}}:&{{env_groups['bastions']}}"
# masters: "{{env_groups['limit']}}:&{{env_groups['masters']}}"
# nodes: "{{env_groups['limit']}}:&{{env_groups['nodes']}}"
# infranodes: "{{env_groups['limit']}}:&{{env_groups['infranodes']}}"
# nfs: "{{env_groups['limit']}}:&{{env_groups['nfs']}}"
#
# ocp_pvs:
#   - es-storage
#   - nexus
#   - nexus2
#   - nexus3
config_nfs_uservols: "true"
user_vols: 200
user_vols_size: 4Gi
# master_api_port: 443
# osrelease: 3.4.1.10
# openshift_master_overwrite_named_certificates: true
# deploy_openshift: true
# deploy_openshift_post: true
deploy_env_post: true
# install_metrics: true
# install_logging: true
# multi_tenant_setting: "os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'"
# master_lb_dns: "master.{{subdomain_base}}"
# cloudapps_suffix: 'cloudapps.{{subdomain_base}}'
# openshift_master_ldap_ca_file: 'openshift_master_ldap_ca_file=/root/ca.crt'
################################################################################
#### GENERIC EXAMPLE
################################################################################
install_common: true
install_nfs: true
install_bastion: false
env_authorized_key: "{{guid}}key"
set_env_authorized_key: true
software_to_deploy: "none"
################################################################################
#### OCP IMPLEMENATATION LAB
################################################################################
repo_version: '3.4'
cloudapps_dns: '*.apps.{{subdomain_base}}.'
master_public_dns: "master.{{subdomain_base}}."
################################################################################
#### Common host variables
################################################################################
update_packages: false
common_packages:
  # - python
  # - unzip
  # - bash-completion
  - tmux
  # - bind-utils
  # - wget
  # - git
  # - vim-enhanced
  # - ansible
rhel_repos:
  - rhel-7-server-rpms
  - rhel-7-server-extras-rpms
  - rhel-7-server-ose-{{repo_version}}-rpms
use_own_repos: true
use_subscription_manager: false
#rhn_pool_id_string: "Red Hat Enterprise Linux Server"
#rhn_pool_id_string: OpenShift Container Platform
################################################################################
#### nfs host settings
################################################################################
nfs_vg: nfsvg
nfs_pvs: /dev/xvdb
nfs_export_path: /srv/nfs
nfs_shares:
  - logging
  - metrics
  - jenkins
  - nexus
  - justanother
################################################################################
#### CLOUD PROVIDER: AWS SPECIFIC VARIABLES
################################################################################
#### Route 53 Zone ID (AWS)
HostedZoneId: ''
key_name: ''
aws_region: us-east-1
admin_user: ''
admin_user_password: ''
#### Connection Settings
ansible_ssh_user: ec2-user
remote_user: ec2-user
#### Networking (AWS)
guid: defaultguid
subdomain_base_short: "{{ guid }}"
subdomain_base_suffix: ".example.opentlc.com"
subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}"
tower_run: false
#### Environment Sizing
#bastion_instance_type: "t2.micro"
bastion_instance_type: "t2.small"
support_instance_type: "m4.large"
support_instance_count: 1
node_instance_type: "t2.large"
node_instance_count: 2
infranode_instance_type: "t2.large"
infranode_instance_count: 1
master_instance_type: "t2.large"
master_instance_count: 1
loadbalancer_instance_count: 0
#loadbalancer_instance_type: "t2.micro"
loadbalancer_instance_type: "t2.small"
ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
#### VARIABLES YOU SHOULD ***NOT*** CONFIGURE FOR YOUR DEPLOYEMNT
#### You can, but you usually wouldn't need to.
#### CLOUDFORMATIONS vars
project_tag: "{{ env_type }}-{{ guid }}"
#
# docker_version: "1.12.6"
# docker_device: /dev/xvdb
create_internal_dns_entries: true
zone_internal_dns: "{{guid}}.internal."
chomped_zone_internal_dns: "{{guid}}.internal"
zone_public_dns: "{{subdomain_base}}."
bastion_public_dns: "bastion.{{subdomain_base}}."
bastion_public_dns_chomped: "bastion.{{subdomain_base}}"
vpcid_cidr_block: "192.168.0.0/16"
vpcid_name_tag: "{{subdomain_base}}"
az_1_name: "{{ aws_region }}a"
az_2_name: "{{ aws_region }}b"
subnet_private_1_cidr_block: "192.168.2.0/24"
subnet_private_1_az: "{{ az_2_name }}"
subnet_private_1_name_tag: "{{subdomain_base}}-private"
subnet_private_2_cidr_block: "192.168.1.0/24"
subnet_private_2_az: "{{ az_1_name }}"
subnet_private_2_name_tag: "{{subdomain_base}}-private"
subnet_public_1_cidr_block: "192.168.10.0/24"
subnet_public_1_az: "{{ az_1_name }}"
subnet_public_1_name_tag: "{{subdomain_base}}-public"
subnet_public_2_cidr_block: "192.168.20.0/24"
subnet_public_2_az: "{{ az_2_name }}"
subnet_public_2_name_tag: "{{subdomain_base}}-public"
dopt_domain_name: "{{ aws_region }}.compute.internal"
rtb_public_name_tag: "{{subdomain_base}}-public"
rtb_private_name_tag: "{{subdomain_base}}-private"
cf_template_description: "{{ env_type }}-{{ guid }} template"
ansible/configs/archive/ocp-implementation-lab/files/cloud_providers/ec2_cloud_template.j2
New file
@@ -0,0 +1,726 @@
{
  "AWSTemplateFormatVersion": "2010-09-09",
  "Parameters": { },
  "Mappings": {
    "RegionMapping": {
      "us-east-1": {
        "AMI": "ami-c998b6b2"
      },
      "us-east-2": {
        "AMI": "ami-cfdafaaa"
      },
      "us-west-1": {
        "AMI": "ami-66eec506"
      },
      "us-west-2": {
        "AMI": "ami-9fa343e7"
      },
      "eu-west-1": {
        "AMI": "ami-bb9a6bc2"
      },
      "eu-central-1": {
        "AMI": "ami-d74be5b8"
      },
      "ap-northeast-1": {
        "AMI": "ami-30ef0556"
      },
      "ap-northeast-2": {
        "AMI": "ami-0f5a8361"
      },
      "ap-southeast-1": {
        "AMI": "ami-10bb2373"
      },
      "ap-southeast-2": {
        "AMI": "ami-ccecf5af"
      },
      "sa-east-1": {
        "AMI": "ami-a789ffcb"
      },
      "ap-south-1": {
        "AMI": "ami-cdbdd7a2"
      }
    },
    "DNSMapping": {
      "us-east-1": {
        "domain": "us-east-1.compute.internal"
      },
      "us-west-1": {
        "domain": "us-west-1.compute.internal"
      },
      "us-west-2": {
        "domain": "us-west-2.compute.internal"
      },
      "eu-west-1": {
        "domain": "eu-west-1.compute.internal"
      },
      "eu-central-1": {
        "domain": "eu-central-1.compute.internal"
      },
      "ap-northeast-1": {
        "domain": "ap-northeast-1.compute.internal"
      },
      "ap-northeast-2": {
        "domain": "ap-northeast-2.compute.internal"
      },
      "ap-southeast-1": {
        "domain": "ap-southeast-1.compute.internal"
      },
      "ap-southeast-2": {
        "domain": "ap-southeast-2.compute.internal"
      },
      "sa-east-1": {
        "domain": "sa-east-1.compute.internal"
      },
      "ap-south-1": {
        "domain": "ap-south-1.compute.internal"
      }
    }
  },
  "Resources": {
    "Vpc": {
      "Type": "AWS::EC2::VPC",
      "Properties": {
        "CidrBlock": "192.199.0.0/16",
        "EnableDnsSupport": "true",
        "EnableDnsHostnames": "true",
        "Tags": [
          {
            "Key": "Name",
            "Value": "VPCID_NAME_TAG"
          },
          {
            "Key": "Hostlication",
            "Value": {
              "Ref": "AWS::StackId"
            }
          }
        ]
      }
    },
    "VpcInternetGateway": {
      "Type": "AWS::EC2::InternetGateway",
      "Properties": {}
    },
    "VpcGA": {
      "Type": "AWS::EC2::VPCGatewayAttachment",
      "Properties": {
        "InternetGatewayId": {
          "Ref": "VpcInternetGateway"
        },
        "VpcId": {
          "Ref": "Vpc"
        }
      }
    },
    "VpcRouteTable": {
      "Type": "AWS::EC2::RouteTable",
      "Properties": {
        "VpcId": {
          "Ref": "Vpc"
        }
      }
    },
    "VPCRouteInternetGateway": {
      "DependsOn" : "VpcGA",
  "Type": "AWS::EC2::Route",
      "Properties": {
        "GatewayId": {
          "Ref": "VpcInternetGateway"
        },
        "DestinationCidrBlock": "0.0.0.0/0",
        "RouteTableId": {
          "Ref": "VpcRouteTable"
        }
      }
    },
    "PublicSubnet": {
      "Type": "AWS::EC2::Subnet",
      "DependsOn": [
        "Vpc"
      ],
      "Properties": {
        "CidrBlock": "192.199.0.0/24",
        "Tags": [
          {
            "Key": "Name",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "Hostlication",
            "Value": {
              "Ref": "AWS::StackId"
            }
          }
        ],
        "MapPublicIpOnLaunch": "true",
        "VpcId": {
          "Ref": "Vpc"
        }
      }
    },
    "PublicSubnetRTA": {
      "Type": "AWS::EC2::SubnetRouteTableAssociation",
      "Properties": {
        "RouteTableId": {
          "Ref": "VpcRouteTable"
        },
        "SubnetId": {
          "Ref": "PublicSubnet"
        }
      }
    },
    "HostSG": {
      "Type": "AWS::EC2::SecurityGroup",
      "Properties": {
        "GroupDescription": "Host",
        "VpcId": {
          "Ref": "Vpc"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "host_sg"
          }
        ]
      }
    },
    "HostUDPPorts": {
      "Type": "AWS::EC2::SecurityGroupIngress",
      "Properties": {
        "GroupId": {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        },
        "IpProtocol": "udp",
        "FromPort": "0",
        "ToPort": "65535",
        "CidrIp": "0.0.0.0/0"
      }
    },
    "HostTCPPorts": {
      "Type": "AWS::EC2::SecurityGroupIngress",
      "Properties": {
        "GroupId": {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        },
        "IpProtocol": "tcp",
        "FromPort": "0",
        "ToPort": "65535",
        "CidrIp": "0.0.0.0/0"
      }
    },
    "zoneinternalidns": {
      "Type": "AWS::Route53::HostedZone",
      "Properties": {
        "Name": "{{ zone_internal_dns }}",
        "VPCs" :  [{
      "VPCId": { "Ref" : "Vpc" },
      "VPCRegion": { "Ref": "AWS::Region" } } ],
        "HostedZoneConfig": {
          "Comment": "Created By ansible agnostic deployer"
        }
      }
    },
    "BastionDNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "Properties": {
        "HostedZoneId": "{{HostedZoneId}}",
        "RecordSets": [
          {
            "Name": "{{bastion_public_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "Bastion",
                  "PublicIp"
                ]
              }
            ]
          }
        ]
      }
    },
    "MasterDNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "Properties": {
        "HostedZoneId": "{{HostedZoneId}}",
        "RecordSets": [
          {
            "Name": "{{master_public_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "master1",
                  "PublicIp"
                ]
              }
            ]
          }
        ]
      }
    },
    "CloudDNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "DependsOn": "Bastion",
      "Properties": {
        "HostedZoneId": "{{HostedZoneId}}",
        "RecordSets": [
          {
            "Name": "{{cloudapps_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "Bastion",
                  "PublicIp"
                ]
              }
            ]
          }
        ]
      }
    },
    "Bastion": {
      "Type": "AWS::EC2::Instance",
      "Properties": {
        "ImageId": {
          "Fn::FindInMap": [
            "RegionMapping",
            {
              "Ref": "AWS::Region"
            },
            "AMI"
          ]
        },
        "InstanceType": "{{bastion_instance_type}}",
        "KeyName": "{{key_name}}",
        "SecurityGroupIds": [
          {
            "Fn::GetAtt": [
              "HostSG",
              "GroupId"
            ]
          }
        ],
        "SubnetId": {
          "Ref": "PublicSubnet"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "bastion"
          },
          {
            "Key": "AnsibleGroup",
            "Value": "bastions"
          },
          {
            "Key": "Project",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "{{ project_tag }}",
            "Value": "bastion"
          }
        ]
      }
  },
  "BastionInternalDNS": {
    "Type": "AWS::Route53::RecordSetGroup",
    "Properties": {
    "HostedZoneId" : { "Ref" : "zoneinternalidns" },
      "RecordSets": [
        {
          "Name": "bastion.{{zone_internal_dns}}",
          "Type": "A",
          "TTL": "10",
          "ResourceRecords": [
            {
              "Fn::GetAtt": [
                "Bastion",
                "PrivateIp"
              ]
            }
          ]
        }
      ]
    }
  },
  {% for c in range(1,(master_instance_count|int)+1) %}
    "master{{c}}": {
      "Type": "AWS::EC2::Instance",
      "Properties": {
        "ImageId": {
          "Fn::FindInMap": [
            "RegionMapping",
            {
              "Ref": "AWS::Region"
            },
            "AMI"
          ]
        },
        "InstanceType": "{{master_instance_type}}",
        "KeyName": "{{key_name}}",
        "SecurityGroupIds": [
          {
            "Fn::GetAtt": [
              "HostSG",
              "GroupId"
            ]
          }
        ],
        "SubnetId": {
          "Ref": "PublicSubnet"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "master"
          },
          {
            "Key": "AnsibleGroup",
            "Value": "masters"
          },
          {
            "Key": "Project",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "{{ project_tag }}",
            "Value": "master"
          }
        ],
        "BlockDeviceMappings": [
          {
            "DeviceName": "/dev/xvda",
            "Ebs": {
              "VolumeSize": 30
            }
          },
          {
            "DeviceName": "/dev/xvdb",
            "Ebs": {
              "VolumeType": "gp2",
              "VolumeSize": 20
            }
          }
        ]
      }
  },
  "master{{loop.index}}DNS": {
    "Type": "AWS::Route53::RecordSetGroup",
    "Properties": {
    "HostedZoneId" : { "Ref" : "zoneinternalidns" },
      "RecordSets": [
        {
          "Name": "master{{loop.index}}.{{zone_internal_dns}}",
          "Type": "A",
          "TTL": "10",
          "ResourceRecords": [
            {
              "Fn::GetAtt": [
                "master{{loop.index}}",
                "PrivateIp"
              ]
            }
          ]
        }
      ]
    }
  },
  {% endfor %}
  {% for c in range(1,(node_instance_count|int)+1) %}
  "node{{loop.index}}": {
    "Type": "AWS::EC2::Instance",
    "Properties": {
      "ImageId": {
        "Fn::FindInMap": [
          "RegionMapping",
          {
            "Ref": "AWS::Region"
          },
          "AMI"
        ]
      },
      "InstanceType": "{{node_instance_type}}",
      "KeyName": "{{key_name}}",
      "SecurityGroupIds": [
        {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        }
      ],
      "SubnetId": {
        "Ref": "PublicSubnet"
      },
      "Tags": [
        {
          "Key": "Name",
          "Value": "node"
        },
        {
          "Key": "AnsibleGroup",
          "Value": "nodes"
        },
        {
          "Key": "Project",
          "Value": "{{project_tag}}"
        },
        {
          "Key": "{{ project_tag }}",
          "Value": "node"
        }
      ],
      "BlockDeviceMappings": [
        {
          "DeviceName": "/dev/xvda",
          "Ebs": {
            "VolumeSize": 30
          }
        },
        {
          "DeviceName": "/dev/xvdb",
          "Ebs": {
            "VolumeType": "gp2",
            "VolumeSize": 100
          }
        }
      ]
    }
  },
  "node{{loop.index}}DNS": {
    "Type": "AWS::Route53::RecordSetGroup",
    "Properties": {
    "HostedZoneId" : { "Ref" : "zoneinternalidns" },
      "RecordSets": [
        {
          "Name": "node{{loop.index}}.{{zone_internal_dns}}",
          "Type": "A",
          "TTL": "10",
          "ResourceRecords": [
            {
              "Fn::GetAtt": [
                "node{{loop.index}}",
                "PrivateIp"
              ]
            }
          ]
        }
      ]
    }
  },
  {% endfor %}
  {% for c in range(1,(infranode_instance_count|int)+1) %}
  "infranode{{loop.index}}": {
    "Type": "AWS::EC2::Instance",
    "Properties": {
      "ImageId": {
        "Fn::FindInMap": [
          "RegionMapping",
          {
            "Ref": "AWS::Region"
          },
          "AMI"
        ]
      },
      "InstanceType": "{{infranode_instance_type}}",
      "KeyName": "{{key_name}}",
      "SecurityGroupIds": [
        {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        }
      ],
      "SubnetId": {
        "Ref": "PublicSubnet"
      },
      "Tags": [
        {
          "Key": "Name",
          "Value": "infranode"
        },
        {
          "Key": "AnsibleGroup",
          "Value": "infranodes"
        },
        {
          "Key": "Project",
          "Value": "{{project_tag}}"
        },
        {
          "Key": "{{ project_tag }}",
          "Value": "infranode"
        }
      ],
      "BlockDeviceMappings": [
        {
          "DeviceName": "/dev/xvda",
          "Ebs": {
            "VolumeSize": 30
          }
        },
        {
          "DeviceName": "/dev/xvdb",
          "Ebs": {
            "VolumeType": "gp2",
            "VolumeSize": 50
          }
        }
      ]
    }
  },
  "infranode{{loop.index}}DNS": {
    "Type": "AWS::Route53::RecordSetGroup",
    "Properties": {
    "HostedZoneId" : { "Ref" : "zoneinternalidns" },
      "RecordSets": [
        {
          "Name": "infranode{{loop.index}}.{{zone_internal_dns}}",
          "Type": "A",
          "TTL": "10",
          "ResourceRecords": [
            {
              "Fn::GetAtt": [
                "infranode{{loop.index}}",
                "PrivateIp"
              ]
            }
          ]
        }
      ]
    }
  },
  {% endfor %}
  {% for c in range(1,(support_instance_count|int)+1) %}
  "support{{loop.index}}": {
    "Type": "AWS::EC2::Instance",
    "Properties": {
      "ImageId": {
        "Fn::FindInMap": [
          "RegionMapping",
          {
            "Ref": "AWS::Region"
          },
          "AMI"
        ]
      },
      "InstanceType": "{{support_instance_type}}",
      "KeyName": "{{key_name}}",
      "SecurityGroupIds": [
        {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        }
      ],
      "SubnetId": {
        "Ref": "PublicSubnet"
      },
      "Tags": [
        {
          "Key": "Name",
          "Value": "support"
        },
        {
          "Key": "AnsibleGroup",
          "Value": "support"
        },
        {
          "Key": "Project",
          "Value": "{{project_tag}}"
        },
        {
          "Key": "{{ project_tag }}",
          "Value": "support"
        }
      ],
      "BlockDeviceMappings": [
        {
          "DeviceName": "/dev/xvda",
          "Ebs": {
            "VolumeSize": 30
          }
        },
        {
          "DeviceName": "/dev/xvdb",
          "Ebs": {
            "VolumeType": "gp2",
            "VolumeSize": 50
          }
        }
      ]
    }
  },
  "support{{loop.index}}DNS": {
    "Type": "AWS::Route53::RecordSetGroup",
    "Properties": {
    "HostedZoneId" : { "Ref" : "zoneinternalidns" },
      "RecordSets": [
        {
          "Name": "support{{loop.index}}.{{zone_internal_dns}}",
          "Type": "A",
          "TTL": "10",
          "ResourceRecords": [
            {
              "Fn::GetAtt": [
                "support{{loop.index}}",
                "PrivateIp"
              ]
            }
          ]
        }
      ]
    }
  },
{% endfor %}
},
  "Outputs": {
    "Route53internalzoneOutput": {
      "Description": "The ID of the internal route 53 zone",
      "Value": {
        "Ref": "zoneinternalidns"
      }
  }
}
}
ansible/configs/archive/ocp-implementation-lab/files/ec2_internal_dns.json.j2
New file
@@ -0,0 +1,72 @@
{
  "Comment": "Create internal dns zone entries",
  "Changes": [
{% for host in groups['masters'] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "master{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['infranodes'] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "infranode{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['nodes'] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "node{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['support'] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "nfs{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['bastions'] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "bastion.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    }
{% endfor %}
  ]
}
ansible/configs/archive/ocp-implementation-lab/files/hosts_template.j2
New file
@@ -0,0 +1,202 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
###########################################################################
### OpenShift Basic Vars
###########################################################################
openshift_metrics_image_version=v{{ repo_version }}
#openshift_image_tag=v{{ repo_version }}
openshift_release={{ osrelease }}
#docker_version="{{docker_version}}"
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
# default project node selector
osm_default_node_selector='env=users'
###########################################################################
### OpenShift Optional Vars
###########################################################################
# Enable cockpit
osm_use_cockpit=true
osm_cockpit_plugins=['cockpit-kubernetes']
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname={{master_lb_dns}}
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
###########################################################################
### OpenShift Network Vars
###########################################################################
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
{{multi_tenant_setting}}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" %}
openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com', 'bindPassword': '{{bindPassword}}', 'ca': 'ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid'}]
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "allow_all"  %}
openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
{% endif %}
{% if install_idm == "htpasswd"  %}
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
# or
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
# Enable cluster metrics
openshift_hosted_metrics_deploy={{install_metrics}}
openshift_hosted_metrics_storage_kind=nfs
openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
openshift_hosted_metrics_storage_host=support1.{{guid}}.internal
openshift_hosted_metrics_storage_nfs_directory=/srv/nfs
openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_metrics_storage_volume_name=metrics
openshift_hosted_metrics_storage_volume_size=10Gi
# Enable cluster logging
openshift_hosted_logging_deploy={{install_logging}}
openshift_hosted_logging_storage_kind=nfs
openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
openshift_hosted_logging_storage_nfs_directory=/srv/nfs
openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_logging_storage_volume_name=logging
openshift_hosted_logging_storage_volume_size=10Gi
openshift_hosted_logging_hostname=kibana.{{cloudapps_suffix}}
openshift_hosted_logging_elasticsearch_cluster_size=1
openshift_hosted_logging_deployer_version=v{{repo_version}}
# This one is wrong (down arrow)
#openshift_hosted_logging_image_version=v{{repo_version}}
###########################################################################
### OpenShift Project Management Vars
###########################################################################
# Configure additional projects
openshift_additional_projects={'my-infra-project-test': {'default_node_selector': 'env=infra'}}
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
openshift_hosted_router_selector='env=infra'
openshift_hosted_router_replicas=1
openshift_hosted_registry_selector='env=infra'
openshift_hosted_registry_replicas=1
# Registry AWS S3
# S3 bucket must already exist.
openshift_hosted_registry_storage_kind=object
openshift_hosted_registry_storage_provider=s3
openshift_hosted_registry_storage_s3_accesskey={{ aws_access_key_id }}
openshift_hosted_registry_storage_s3_secretkey={{ aws_secret_access_key }}
openshift_hosted_registry_storage_s3_bucket={{ env_type }}-{{ guid }}
openshift_hosted_registry_storage_s3_region={{ aws_region }}
openshift_hosted_registry_storage_s3_chunksize=26214400
openshift_hosted_registry_storage_s3_rootdirectory=/registry
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=true
template_service_broker_install=true
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=false
[OSEv3:children]
lb
masters
etcd
nodes
nfs
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host].placement}}
{% endfor %}
[masters]
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}}
{% endfor %}
[etcd]
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}} openshift_hostname=master{{loop.index}}.{{chomped_zone_internal_dns}}   ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes'] %}
infranode{{loop.index}}.{{chomped_zone_internal_dns}} openshift_hostname=infranode{{loop.index}}.{{chomped_zone_internal_dns}} openshift_ip={{hostvars[host]['ec2_private_ip_address']}} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra''}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes'] %}
node{{loop.index}}.{{chomped_zone_internal_dns}} openshift_hostname=node{{loop.index}}.{{chomped_zone_internal_dns}} openshift_ip={{hostvars[host]['ec2_private_ip_address']}} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users'}"
{% endfor %}
[nfs]
{% for host in groups['support'] %}
support{{loop.index}}.{{chomped_zone_internal_dns}} openshift_hostname=support{{loop.index}}.{{chomped_zone_internal_dns}} openshift_ip={{hostvars[host]['ec2_private_ip_address']}} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem
{% endfor %}
ansible/configs/archive/ocp-implementation-lab/files/htpasswd.openshift
New file
@@ -0,0 +1,102 @@
andrew:$apr1$dZPb2ECf$ercevOFO5znrynUfUj4tb/
karla:$apr1$FQx2mX4c$eJc21GuVZWNg1ULF8I2G31
user1:$apr1$FmrTsuSa$yducoDpvYq0KEV0ErmwpA1
user2:$apr1$JCcW2XQM$8takcyaYYrPT5I8M46TA01
user3:$apr1$zPC/rXKY$2PGF7dRsGwC3i8YJ59aOk0
user4:$apr1$e9/zT6dh$J18M.9zyn3DazrYreGV.B/
user5:$apr1$Nu/XJFVP$DgybymePret.Prch9MyxP/
user6:$apr1$VEbpwL9M$c1oFwS.emkt8fyR24zOzd0
user7:$apr1$wZxsnY/A$PK0O7iofGJJsvOZ3ctoNo.
user8:$apr1$5YBAWpGg$YO4ACHZL.c31NbQZH9LlE.
user9:$apr1$CIxB1enN$Aghb7.S4U3SXPRt55hTWI.
user10:$apr1$dWTDSR23$UGGJtkVC1ERmAOikomI9K0
user11:$apr1$j4fPyRZg$nNJk1nt1vAf54HAB/g/8g/
user12:$apr1$dd6kysUI$ueu/9.gbL0LkjpCbSjFNI.
user13:$apr1$DeRaAbVq$ZI3HtBzQxWYHifjIuPJSM1
user14:$apr1$dUuWDYgk$co6NQ4Dbcp3pQjVO5dR7Q.
user15:$apr1$4QmhSys7$wC.fKmKRqLNqoYqQ1dixJ/
user16:$apr1$RHcOPHg7$p9LgYP6zE4nMDlA8ongVc/
user17:$apr1$pji2xxHN$vvUHj/fbQRgLR.WBMblQH/
user18:$apr1$Lm79l0Qr$KgZSAuPcrTo4.GIWTBLGa/
user19:$apr1$KGxvneIX$.GJo7JB.N/c1FLW7vlblx/
user20:$apr1$WfYdosg5$cU1BsAzkIhTzKBx8Rvd3o1
user21:$apr1$cKRCbWLl$WCVjYUxD22GS5RRv1npwR1
user22:$apr1$QhpgOkFU$Y6Nn7NEPbJk3D9ehFb4i50
user23:$apr1$dVgQOh7j$L3JZlN8ZmdEwebXqD66Yl0
user24:$apr1$z/U5MAQB$GvKG3i8ATXWHhoxN9e0HS/
user25:$apr1$gFHGMQUV$w11pZbcBqVKOylr9TZ1EW.
user26:$apr1$5YG0dnOG$GzbnTQMBe0Dqc3f3pwvPL1
user27:$apr1$Kt6VoxNS$nq1Kzd53DUL8h8gfu4fEq/
user28:$apr1$aLAQHJ4d$qTRmUpw2eF9whEwDyIixG0
user29:$apr1$3HH4pgpa$Uh84gx3UP8vyPRfAIMPRl1
user30:$apr1$bbEEX3EF$ozw4jPcYHwVO7.MRzXtu0.
user31:$apr1$hD0kfz7i$SjNdGZbvto5EifBma5iA5.
user32:$apr1$fRMBUYu8$T5BQ8kI3pMgqXaRH7l8p..
user33:$apr1$es9ruteO$jZsV5/H8GIzw.vCfPs5310
user34:$apr1$OQ1I/gHn$.WA01EeXhDLE1K3vWD1wu.
user35:$apr1$KseEJXTS$kE/QO1XT0mZ44Iyw/ofnj/
user36:$apr1$PglCzG.g$44QsoAyMhanH5A40P5jhY1
user37:$apr1$2d5ggTIZ$xYsfdRBLOlEsnWRFVS9Yl0
user38:$apr1$x/cdV95V$mKFZmSkoBjeEu.HZshO0n.
user39:$apr1$VC6.WQOS$fAOAR1mx/i7Pnt2oGsDmu/
user40:$apr1$n36Hr3zC$lEVq4B7UWmdcnl01lUyR..
user41:$apr1$/q6tJtXi$9mCB1YCqdhEE6VVVVkVKc/
user42:$apr1$fTMTWEzw$X4MsyNlWketRjQgqonwxn.
user43:$apr1$.VwoJu38$D4v4NKL1KPuRZdNeprBXS/
user44:$apr1$e0s48GLK$JMQ849MeckVX0wG2vE2s10
user45:$apr1$a9ucQ1sC$HEMij.WGEa1xIQ01HpyKh1
user46:$apr1$uwOs/4nv$TB2r3pOPJ2K0A./CimVUT1
user47:$apr1$jfTmW1k5$Fd2ebTUtFFl3CLZWfFmRR.
user48:$apr1$4/apB/zd$IxoWJ5pTRNGgbxx3Ayl/i0
user49:$apr1$nu75PZ0r$bPCMgDmlOAj.YbeFPHJHE.
user50:$apr1$c/R3wJ/g$GJ03siVj5tkNxrg4OaxhJ0
user51:$apr1$EdEX6Pyt$IdPQHmhZi8FEbJjREVbe1/
user52:$apr1$ZMfyTjjX$RFOrnKsSr5xXA7IXn7TkC/
user53:$apr1$GY.rOkJM$uMCqJmmorP5I1v.YHHz1Z/
user54:$apr1$1vuZq/U0$Aq0Kz3wk0YPleDz/rTCdK0
user55:$apr1$KjULqmcD$XrhyYt2nWuiaQkbciDIcN/
user56:$apr1$gTPaNeq0$sqWJDPZ5//ZDjLf0dSbUh1
user57:$apr1$6PaKhdlY$dX2FkVJ0xV.4MAQeDUgRT0
user58:$apr1$.8MSdEpY$MPIbUO2WnC0wsno8zUOjC.
user59:$apr1$TWpKuAvt$CFeTQxxSgeU3dFkL4qpXb.
user60:$apr1$fEYUgRVU$LO2qwXfpxwI9fDXPfQgQB0
user61:$apr1$HHUBEn4G$.cAnwbh.ogNEzQSug3nqo/
user62:$apr1$Agt4GmKT$4k3Ev3FSJiNsbht3vUbxQ/
user63:$apr1$FsUKA7Hw$nkSgqSIFeqCY1mOyGje3O1
user64:$apr1$vBlkQoG4$8L2mTo8gdr8wC68G2y2G91
user65:$apr1$McEnEqn4$dZvjACdGp0HALVHBtHEu80
user66:$apr1$zamuhlOG$Xch5pbO1ki2Dad1dzjS4j.
user67:$apr1$qC1rll4s$cN4DzsWnyFBTNi3Cdi6161
user68:$apr1$txKPCx1k$WtrlrlP.UF.Rlzbnv6igE/
user69:$apr1$EO2A25Sj$DO/1lCNJJXff4GOsTZmHL/
user70:$apr1$pJu569Az$nHtF2ZkUrNXw9WN0Obb/T1
user71:$apr1$YKpEtZka$c59Fmov1cssRdrO5VqBKz1
user72:$apr1$CNkwam0s$b.QcPWytnhlOsaajMQx630
user73:$apr1$m5kE07o0$7TC3K.I16YTaRyN8EZq7E/
user74:$apr1$/5p0Qoyy$hjQ30Q8Ghb4zNrjjt2yLk/
user75:$apr1$ZF3yRTqJ$TgLBllrvTQuuiIjSb53xR0
user76:$apr1$711LL2Ai$59rBNmFprwZXtyFVBtRul0
user77:$apr1$N4uJhPSq$A.rVfAsRXCQqxOenDHjqX1
user78:$apr1$PHSpv5ty$WC8GlQpclQqH30eWPu.6e.
user79:$apr1$c/yk9dQ9$dvhh.P4F5zGnysBvwps4m/
user80:$apr1$oTmftf8R$FYzQD77hYfh9Wq3SvwYU7/
user81:$apr1$3YvQ/JPg$sDXhV8xpHNxQzFSvMMxAD1
user82:$apr1$quKB2P2.$iq.ZzDa3/xoaoY3.F1Un90
user83:$apr1$IVq8346H$lPQJZ7Thr/gJ2EmzDsktH0
user84:$apr1$xfehskAD$NRMQJttylejHtNKQqBj.k.
user85:$apr1$/LYLXNbH$/COZBzkaU0pPOXR38ZFVX/
user86:$apr1$a/xD3Jfw$rZXN4ykj0W6qadlh447n//
user87:$apr1$v01l1ljr$tGDKwdhKC05HEbntSxV5M0
user88:$apr1$9RYtWl12$ck19ozvS.SWeAAaDZqE940
user89:$apr1$EvSs2TA2$fRDg0hVOCf2jbhwXifzbs.
user90:$apr1$9ffAneiG$CAz5JWeIPGnamOQlVRGIk.
user91:$apr1$Z3XW5Yy4$Kibx7GmgdpC6CAM0IxhtC0
user92:$apr1$6CfIrBqr$5nGNCGA5QOPq/h8hlOE4f.
user93:$apr1$iJ4AQyfu$fkXSVib.OzPCSBQlLhwwS.
user94:$apr1$jiPqi0uI$XyYDQt0kcawqFLX12VW3n/
user95:$apr1$ULEkhfG2$/WHcoR9KJxAS3uw470Vkk.
user96:$apr1$56tQXa91$l0yaZgZHbDidgw95IP7yQ1
user97:$apr1$SoGwK9hP$YbceEfwmsM3QCdNGAaE1b.
user98:$apr1$MVU1/8dh$UKzkRk1CQP00SvnoPIm1..
user99:$apr1$v8vKZdHH$NC5xud.olhtdydHU9hav6.
user100:$apr1$glFN48wz$dR9w94PGiQL8qZXcXGd0L0
ansible/configs/archive/ocp-implementation-lab/files/labs_hosts_template.j2
New file
@@ -0,0 +1,57 @@
[OCPlabs:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
[OCPlabs:children]
lb
masters
etcd
nodes
nfs
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host].placement}}
{% endfor %}
[masters]
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% endfor %}
[etcd]
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% endfor %}
## These are infranodes
{% for host in groups['infranodes'] %}
infranode{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% endfor %}
## These are regular nodes
{% for host in groups['nodes'] %}
node{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% endfor %}
[nfs]
{% for host in groups['support'] %}
support{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% endfor %}
ansible/configs/archive/ocp-implementation-lab/files/pvs.j2
New file
@@ -0,0 +1,17 @@
---
{% for pv in pv_list %}
apiVersion: v1
kind: PersistentVolume
metadata:
  name: {{ pv }}
spec:
  capacity:
    storage: {{pv_size}}
  accessModes:
  - ReadWriteOnce
  nfs:
    path: {{ nfs_export_path }}/{{pv}}
    server: nfs1.{{guid}}.internal
  persistentVolumeReclaimPolicy: {{persistentVolumeReclaimPolicy}}
---
{% endfor %}
ansible/configs/archive/ocp-implementation-lab/files/repos_template.j2
New file
@@ -0,0 +1,36 @@
[rhel-7-server-rpms]
name=Red Hat Enterprise Linux 7
baseurl={{own_repo_path}}/rhel-7-server-rpms
enabled=1
gpgcheck=0
[rhel-7-server-rh-common-rpms]
name=Red Hat Enterprise Linux 7 Common
baseurl={{own_repo_path}}/rhel-7-server-rh-common-rpms
enabled=1
gpgcheck=0
[rhel-7-server-extras-rpms]
name=Red Hat Enterprise Linux 7 Extras
baseurl={{own_repo_path}}/rhel-7-server-extras-rpms
enabled=1
gpgcheck=0
[rhel-7-server-optional-rpms]
name=Red Hat Enterprise Linux 7 Optional
baseurl={{own_repo_path}}/rhel-7-server-optional-rpms
enabled=1
gpgcheck=0
[rhel-7-server-ose-{{repo_version}}-rpms]
name=Red Hat Enterprise Linux 7 OSE {{repo_version}}
baseurl={{own_repo_path}}/rhel-7-server-ose-{{repo_version}}-rpms
enabled=1
gpgcheck=0
## Required since OCP 3.5
[rhel-7-fast-datapath-rpms]
name=Red Hat Enterprise Linux Fast Datapath (RHEL 7 Server) (RPMs)
baseurl={{own_repo_path}}/rhel-7-fast-datapath-rpms
enabled=1
gpgcheck=0
ansible/configs/archive/ocp-implementation-lab/files/userpvs.j2
New file
@@ -0,0 +1,20 @@
---
{%  for pv in range(1,user_vols) %}
apiVersion: v1
kind: PersistentVolume
metadata:
  name: vol{{ pv }}
spec:
  capacity:
    storage: {{ pv_size }}
  accessModes:
  - ReadWriteOnce
{% if  pv % 2 == 0 %}
  - ReadWriteMany
{% endif %}
  nfs:
    path: {{ nfs_export_path }}/user-vols/vol{{pv}}
    server: nfs1.{{guid}}.internal
  persistentVolumeReclaimPolicy: {{ persistentVolumeReclaimPolicy }}
---
{% endfor %}
ansible/configs/archive/ocp-implementation-lab/post_infra.yml
New file
@@ -0,0 +1,32 @@
- name: Step 002 Post Infrastructure
  hosts: localhost
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tags:
    - step002
    - post_infrastructure
  tasks:
    - debug:
        msg: "Step 001 Post Infrastructure - There are no post_infrastructure tasks defined"
      when: "not {{ tower_run | default(false) }}"
    - name: Job Template to launch a Job Template with update on launch inventory set
      uri:
        url: "https://{{ ansible_tower_ip }}/api/v1/job_templates/{{ job_template_id }}/launch/"
        method: POST
        user: "{{tower_admin}}"
        password: "{{tower_admin_password}}"
        body:
          extra_vars:
            guid: "{{guid}}"
            ipa_host_password: "{{ipa_host_password}}"
        body_format: json
        validate_certs: False
        HEADER_Content-Type: "application/json"
        status_code: 200, 201
      when: "{{ tower_run | default(false) }}"
      tags:
        - tower_workaround
ansible/configs/archive/ocp-implementation-lab/post_software.yml
New file
@@ -0,0 +1,41 @@
#vim: set ft=ansible:
---
- name: Step 005 - Post Software deployment
  hosts: localhost
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tags:
    - step005
  tasks:
    - name: Overwrite ansible hosts file with lab hosts file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/labs_hosts_template.j2"
        dest: "{{ ANSIBLE_REPO_PATH }}/workdir/labs_hosts-{{ env_type }}-{{ guid }}"
- name: Step lab post software deployment
  hosts: bastions
  gather_facts: False
  become: yes
  tags:
    - opentlc_bastion_tasks
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Ensures /etc/ansible dir exists
      file: path=/etc/ansible state=directory
    - name: Copy over ansible hosts file
      copy:
        backup: yes
        src: "{{ ANSIBLE_REPO_PATH }}/workdir/labs_hosts-{{ env_type }}-{{ guid }}"
        dest: /etc/ansible/hosts
      tags:
        - overwrite_hosts_with_lab_hosts
    - name: install ipa client packages
      yum:
        name: "ipa-client"
        state: present
    - name: Register bastion with IPA
      shell: "/usr/sbin/ipa-client-install --domain=OPENTLC.COM -w {{ipa_host_password}} -N -U --mkhomedir --no-dns-sshfp --hostname={{bastion_public_dns_chomped}}"
ansible/configs/archive/ocp-implementation-lab/pre_infra.yml
New file
@@ -0,0 +1,13 @@
- name: Step 000 Pre Infrastructure
  hosts: localhost
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tags:
    - step000
    - pre_infrastructure
  tasks:
    - debug:
        msg: "Step 000 Pre Infrastructure - There are no pre_infrastructure tasks defined"
ansible/configs/archive/ocp-implementation-lab/pre_software.yml
New file
@@ -0,0 +1,89 @@
- name: Step 003 - Create env key
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - step003
    - generate_env_keys
  tasks:
    - name: Generate SSH keys
      shell: ssh-keygen -b 2048 -t rsa -f "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}" -q -N ""
      args:
        creates: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}"
      when: set_env_authorized_key
# Cloudformation template or equivalent should tag all hosts with Project:{{ env_type }}-{{ guid }}
- name: Configure all hosts with Repositories, Common Files and Set environment key
  hosts:
    - all:!windows
  become: true
  gather_facts: False
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - step004
    - common_tasks
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set-repositories", when: 'repo_method is defined' }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/common", when: 'install_common' }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set_env_authorized_key", when: 'set_env_authorized_key' }
- name: Configuring Bastion Hosts
  hosts: bastions
  become: true
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  roles:
    -  { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion", when: 'install_bastion' }
  tags:
    - step004
    - bastion_tasks
- name: Pre-software verification and ipa client
  hosts: bastions
  gather_facts: False
  become: yes
  tags:
    - opentlc_bastion_tasks
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: install ipa client packages
      yum:
        name: "ipa-client"
        state: present
      when: "install_ipa_client"
    - name: Register bastion with IPA
      shell: "/usr/sbin/ipa-client-install --domain=OPENTLC.COM -w {{ipa_host_password}} -N -U --mkhomedir --no-dns-sshfp --hostname={{bastion_public_dns_chomped}}"
      when: "install_ipa_client"
    - name: Add opentlc-access ipa group to sudoers.d
      lineinfile:
        path: /etc/sudoers.d/opentlc-sudoers
        state: present
        create: yes
        line: '%opentlc-access ALL=(ALL)       NOPASSWD: ALL'
        validate: '/usr/sbin/visudo -cf %s'
- name: PreSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - flight_check
  tasks:
    - debug:
        msg: "Pre-Software checks completed successfully"
ansible/configs/archive/ocp-implementation-lab/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/auth-playground-lab/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/bu-workshop/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/generic-example/destroy_env.yml
@@ -1,30 +1,3 @@
- name: Delete Infrastructure
  hosts: localhost
  connection: local
  gather_facts: False
  become: no
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tasks:
    - name: Destroy cloudformation template
      cloudformation:
        stack_name: "{{project_tag}}"
        state: "absent"
        region: "{{aws_region}}"
        disable_rollback: false
        tags:
          Stack: "project {{env_type}}-{{ guid }}"
      tags: [ destroying, destroy_cf_deployment ]
      register: cloudformation_result
      until: cloudformation_result|succeeded
      retries: 5
      delay: 60
      ignore_errors: yes
    - name: report Cloudformation error
      fail:
        msg: "FAIL {{ project_tag }} Destroy Cloudformation"
      when: not cloudformation_result|succeeded
      tags: [ destroying, destroy_cf_deployment ]
---
- name: Import default CloudFormation (aws) destroy playbook
  import_playbook: "{{ANSIBLE_REPO_PATH}}/cloud_providers/{{cloud_provider}}_destroy_env.yml"
ansible/configs/generic-example/post_infra.yml
@@ -3,8 +3,8 @@
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - step002
    - post_infrastructure
ansible/configs/generic-example/post_software.yml
@@ -1,9 +1,11 @@
---
- name: Step 00xxxxx post software
  hosts: support
  hosts: localhost
  gather_facts: False
  become: yes
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - debug:
        msg: "Post-Software tasks Started"
@@ -13,9 +15,6 @@
  connection: local
  gather_facts: false
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - post_flight_check
  tasks:
ansible/configs/generic-example/pre_infra.yml
@@ -1,10 +1,11 @@
---
- name: Step 000 Pre Infrastructure
  hosts: localhost
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - step001
    - pre_infrastructure
ansible/configs/generic-example/pre_software.yml
@@ -1,4 +1,4 @@
---
- name: Step 003 - Create env key
  hosts: localhost
  connection: local
ansible/configs/generic-example/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/ocp-demo-lab/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/ocp-ha-disconnected-lab/How.To.Create.Env.Type.adoc
New file
@@ -0,0 +1,190 @@
= How to create an Environment Type
== Create a base for your new environment type
* Duplicate the "generic-example" environemnt type directory or use another
 environment type directory that is closer to your end goal.
== Edit your cloud provider "blueprint" or "template"
NOTE: At this point this is "aws" based, with time we will have other providers.
* Edit the link:./files/cloud_providers/ec2_cloud_template.j2[./files/cloud_provides/ec2_cloud_template.j2]
* Add Security Groups if you require any.
* Add LaunchConfigs and AutoScale Groups
----
"HostLC": {
  "Type": "AWS::AutoScaling::LaunchConfiguration",
  "Properties": {
    "AssociatePublicIpAddress": true,
    "ImageId": {
      "Fn::FindInMap": [
        "RegionMapping",
        {
          "Ref": "AWS::Region"
        },
        "AMI"
      ]
    },
    "InstanceType": "{{host_instance_type}}",
    "KeyName": "{{key_name}}",
    "SecurityGroups": [
      {
        "Ref": "HostSG"
      }
    ],
    "BlockDeviceMappings": [
      {
        "DeviceName": "/dev/xvda",
        "Ebs": {
          "VolumeSize": 30
        }
      },
      {
        "DeviceName": "/dev/xvdb",
        "Ebs": {
          "VolumeSize": 100
        }
      }
    ]
  }
},
"HostAsg": {
  "Type": "AWS::AutoScaling::AutoScalingGroup",
  "Properties": {
    "DesiredCapacity": {{host_instance_count}},
    "LaunchConfigurationName": {
      "Ref": "HostLC"
    },
    "MaxSize": 100,
    "MinSize": 1,
    "Tags": [
      {
        "Key": "Name",
        "Value": "host",
        "PropagateAtLaunch": true
      },
      {
        "Key": "AnsibleGroup",
        "Value": "hosts",
        "PropagateAtLaunch": true
      },
      {
        "Key": "Project",
        "Value": "{{project_tag}}",
        "PropagateAtLaunch": true
      },
      {
        "Key": "{{ project_tag }}",
        "Value": "host",
        "PropagateAtLaunch": true
      }
    ],
    "VPCZoneIdentifier": [
      {
        "Ref": "PublicSubnet"
      }
    ]
  }
},
----
** Pay attention to the Tags created for the different AS groups
----
{
  "Key": "Project",
  "Value": "{{project_tag}}",
  "PropagateAtLaunch": true
},
{
  "Key": "{{ project_tag }}",
  "Value": "host",
  "PropagateAtLaunch": true
}
----
* Add DNS Entries you need for your environment:
----
"MasterDNS": {
  "Type": "AWS::Route53::RecordSetGroup",
  "DependsOn": "Master",
  "Properties": {
    "HostedZoneId": "{{HostedZoneId}}",
    "RecordSets": [
      {
        "Name": "{{master_public_dns}}",
        "Type": "A",
        "TTL": "10",
        "ResourceRecords": [
          {
            "Fn::GetAtt": [
              "Master",
              "PublicIp"
            ]
          }
        ]
      }
    ]
  }
},
----
* Add S3 or other resources you require:
----
"RegistryS3": {
  "Type": "AWS::S3::Bucket",
  "Properties": {
    "BucketName": "{{ env_type }}-{{ guid }}",
    "Tags": [
      {
        "Key": "Name",
        "Value": "s3-{{ env_type }}-{{ guid }}"
      },
      {
        "Key": "Project",
        "Value": "{{project_tag}}"
      }
    ]
  }
}
},
----
* Add any "outputs" you need from the cloud provider:
----
"RegistryS3Output": {
  "Description": "The ID of the S3 Bucket",
  "Value": {
    "Ref": "RegistryS3"
  }},
----
== Internal DNS file
* Edit the internal dns template: link:./files/ec2_internal_dns.json.j2[./files/ec2_internal_dns.json.j2]
** You can create nicely indexed internal hostname by creating a for loop in the file  for each host group
----
{% for host in groups['support'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "support{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
----
ansible/configs/ocp-ha-disconnected-lab/README.adoc
New file
@@ -0,0 +1,99 @@
= OPENTLC OCP-HA-LAB Env_Type config
For example, we will include things such as ec2 instance names, secret
variables such as private/public key pair information, passwords, etc.
Eventually, all sensitive information will be encrypted via Ansible Vault. The
inclusion as well as instructions on doing this will be included in a later
release.
== Set up your "Secret" variables
* You need to provide some credentials for deployments to work
* Create a file called "env_secret_vars.yml" and put it in the
 ./ansible/configs/CONFIGNAME/ directory.
** At this point this file *has to be created* even if no vars from it are used.
* You can choose to provide these values as extra vars (-e "var=value") in the
 command line if you prefer not to keep sensitive information in a file.
.Example contents of "Secret" Vars file
----
# ## Logon credentials for Red Hat Network
# ## Required if using the subscription component
# ## of this playbook.
rhel_subscription_user: ''
rhel_subscription_pass: ''
#
# ## LDAP Bind Password
bindPassword: ''
#
# ## Desired openshift admin name and password
admin_user: ""
admin_user_password: ""
#
# ## AWS Credentials. This is required.
aws_access_key_id: ""
aws_secret_access_key: ""
#If using repo_method: satellite, you must set these values as well.
# satellite_url: https://satellite.example.com
# satellite_org: Sat_org_name
# satellite_activationkey: "rhel7basic"
----
== Review the Env_Type variable file
* This file link:./env_vars.yml[./env_vars.yml] contains all the variables you
 need to define to control the deployment of your environment.
== Running Ansible Playbook
. You can run the playbook with the following arguments to overwrite the default variable values:
[source,bash]
----
# Set the your environment variables (this is optional, but makes life easy)
REGION=ap-southeast-1
KEYNAME=ocpkey
GUID=testnewec21
ENVTYPE="ocp-ha-lab"
CLOUDPROVIDER=ec2
HOSTZONEID='Z3IHLWJZOU9SRT'
REPO_PATH='https://admin.example.com/repos/ocp/3.6/'
BASESUFFIX='.example.opentlc.com'
IPAPASS=aaaaaa
REPO_VERSION=3.6
NODE_COUNT=2
DEPLOYER_REPO_PATH=`pwd`
LOG_FILE=$(pwd)/${ENVTYPE}-${GUID}.log
## For a HA environment that is not installed with OpenShift
  ansible-playbook ${DEPLOYER_REPO_PATH}/main.yml  \
      -e "guid=${GUID}" -e "env_type=${ENVTYPE}" -e "key_name=${KEYNAME}" \
      -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}" -e "HostedZoneId=${HOSTZONEID}" \
      -e "subdomain_base_suffix=${BASESUFFIX}" \
      -e "bastion_instance_type=t2.medium" -e "master_instance_type=t2.large" \
      -e "infranode_instance_type=t2.large" -e "node_instance_type=t2.large" \
      -e "support_instance_type=t2.medium" -e "node_instance_count=${NODE_COUNT}" \
      -e "ipa_host_password=${IPAPASS}" -e "install_idm=htpasswd"  \
      -e "email=name@example.com" \
      -e "repo_method=file" -e "own_repo_path=${REPO_PATH}" -e "repo_version=${REPO_VERSION}" \
      -e "software_to_deploy=openshift" -e "osrelease=3.6.173.0.21" -e "docker_version=1.12.6" \
      -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" \
      --skip-tags=installing_openshift,get_openshift_credentials 1>> $LOG_FILE 2>> $LOG_FILE
. To Delete an environment
----
#To Destroy an Env
ansible-playbook  \
    ${DEPLOYER_REPO_PATH}/configs/${ENVTYPE}/destroy_env.yml \
    -e "guid=${GUID}" -e "env_type=${ENVTYPE}" \
    -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}"  -e "HostedZoneId=${HOSTZONEID}" \
    -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" \
    -e "key_name=${KEYNAME}"  -e "subdomain_base_suffix=${BASESUFFIX}"
----
ansible/configs/ocp-ha-disconnected-lab/aws_test.yml
New file
@@ -0,0 +1,11 @@
- hosts: localhost
  connection: local
  gather_facts: false
  become: false
  vars_files:
   - ./env_vars.yml
  tasks:
  - name: AWS Generate Cloudformation Template
    template:
      src: ./files/cloud_providers/ec2_cloud_template.j2
      dest: ./ec2_cloud_template.yml
ansible/configs/ocp-ha-disconnected-lab/destroy_env.yml
New file
@@ -0,0 +1,31 @@
- name: Starting environment deployment
  hosts: localhost
  connection: local
  gather_facts: False
  become: no
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tasks:
    - name: Destroy cloudformation template
      cloudformation:
        stack_name: "{{project_tag}}"
        state: "absent"
        region: "{{aws_region}}"
        disable_rollback: false
        tags:
          Stack: "project {{env_type}}-{{ guid }}"
      tags: [ destroying, destroy_cf_deployment ]
      register: cloudformation_result
      until: cloudformation_result|succeeded
      retries: 5
      delay: 60
      ignore_errors: yes
    - name: report Cloudformation error
      fail:
        msg: "FAIL {{ project_tag }} Destroy Cloudformation"
      when: not cloudformation_result|succeeded
      tags: [ destroying, destroy_cf_deployment ]
## we need to add something to delete the env specific key.
ansible/configs/ocp-ha-disconnected-lab/ec2_cloud_template.yml
New file
@@ -0,0 +1,750 @@
---
AWSTemplateFormatVersion: "2010-09-09"
Mappings:
  RegionMapping:
    us-east-1:
      RHELAMI: ami-c998b6b2
    us-east-2:
      RHELAMI: ami-cfdafaaa
    us-west-1:
      RHELAMI: ami-66eec506d
    us-west-2:
      RHELAMI: ami-9fa343e7
    eu-west-1:
      RHELAMI: ami-bb9a6bc2
    eu-central-1:
      RHELAMI: ami-d74be5b8
    ap-northeast-1:
      RHELAMI: ami-30ef0556
    ap-northeast-2:
      RHELAMI: ami-0f5a8361
    ap-southeast-1:
      RHELAMI: ami-10bb2373
    ap-southeast-2:
      RHELAMI: ami-ccecf5af
    sa-east-1:
      RHELAMI: ami-a789ffcb
    ap-south-1:
      RHELAMI: ami-cdbdd7a2
  DNSMapping:
    us-east-1:
      domain: "us-east-1.compute.internal"
    us-west-1:
      domain: "us-west-1.compute.internal"
    us-west-2:
      domain: "us-west-2.compute.internal"
    eu-west-1:
      domain: "eu-west-1.compute.internal"
    eu-central-1:
      domain: "eu-central-1.compute.internal"
    ap-northeast-1:
      domain: "ap-northeast-1.compute.internal"
    ap-northeast-2:
      domain: "ap-northeast-2.compute.internal"
    ap-southeast-1:
      domain: "ap-southeast-1.compute.internal"
    ap-southeast-2:
      domain: "ap-southeast-2.compute.internal"
    sa-east-1:
      domain: "sa-east-1.compute.internal"
    ap-south-1:
      domain: "ap-south-1.compute.internal"
Resources:
  Vpc:
    Type: "AWS::EC2::VPC"
    Properties:
      CidrBlock: "192.168.0.0/16"
      EnableDnsSupport: true
      EnableDnsHostnames: true
      Tags:
        - Key: Name
          Value: "prakhar-test.example.opentlc.com"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
  VpcInternetGateway:
    Type: "AWS::EC2::InternetGateway"
  VpcGA:
    Type: "AWS::EC2::VPCGatewayAttachment"
    Properties:
      InternetGatewayId:
        Ref: VpcInternetGateway
      VpcId:
        Ref: Vpc
  VpcRouteTable:
    Type: "AWS::EC2::RouteTable"
    Properties:
      VpcId:
        Ref: Vpc
  VPCRouteInternetGateway:
    DependsOn: VpcGA
    Type: "AWS::EC2::Route"
    Properties:
      GatewayId:
        Ref: VpcInternetGateway
      DestinationCidrBlock: "0.0.0.0/0"
      RouteTableId:
        Ref: VpcRouteTable
  PublicSubnet:
    Type: "AWS::EC2::Subnet"
    DependsOn:
      - Vpc
    Properties:
      CidrBlock: "192.168.2.0/24"
      Tags:
        - Key: Name
          Value: "ocp-ha-lab-prakhar-test"
      MapPublicIpOnLaunch: true
      VpcId:
        Ref: Vpc
  PrivateSubnet:
    Type: "AWS::EC2::Subnet"
    DependsOn:
      - Vpc
    Properties:
      CidrBlock: "192.168.1.0/24"
      Tags:
        - Key: Name
          Value: "ocp-ha-lab-prakhar-test"
      MapPublicIpOnLaunch: true
      VpcId:
        Ref: Vpc
  PublicSubnetRTA:
    Type: "AWS::EC2::SubnetRouteTableAssociation"
    Properties:
      RouteTableId:
        Ref: VpcRouteTable
      SubnetId:
        Ref: PublicSubnet
  MasterSG:
    Type: "AWS::EC2::SecurityGroup"
    Properties:
      GroupDescription: Host
      VpcId:
        Ref: Vpc
      Tags:
        - Key: Name
          Value: "MasterSG"
  NodeSG:
    Type: "AWS::EC2::SecurityGroup"
    Properties:
      GroupDescription: Host
      VpcId:
        Ref: Vpc
      Tags:
        - Key: Name
          Value: "NodeSG"
  MasterSGDNS TCP:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
     GroupId:
      Fn::GetAtt:
        - "MasterSG"
        - GroupId
     IpProtocol: "tcp"
        FromPort: "53"
        ToPort: "53"
        CidrIp: "0.0.0.0/0"
  MasterSGDNS UDP:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
     GroupId:
      Fn::GetAtt:
        - "MasterSG"
        - GroupId
     IpProtocol: "udp"
        FromPort: "53"
        ToPort: "53"
        CidrIp: "0.0.0.0/0"
  MasterSGSDN:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
     GroupId:
      Fn::GetAtt:
        - "MasterSG"
        - GroupId
     IpProtocol: "udp"
        FromPort: "4789"
        ToPort: "4789"
        CidrIp: "0.0.0.0/0"
  MasterSGHTTPS Access:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
     GroupId:
      Fn::GetAtt:
        - "MasterSG"
        - GroupId
     IpProtocol: "tcp"
        FromPort: "443"
        ToPort: "443"
        CidrIp: "0.0.0.0/0"
  NodeSGKubelet:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
     GroupId:
      Fn::GetAtt:
        - "NodeSG"
        - GroupId
     IpProtocol: "tcp"
        FromPort: "10250"
        ToPort: "10250"
        CidrIp: "0.0.0.0/0"
  NodeSGSDN:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
     GroupId:
      Fn::GetAtt:
        - "NodeSG"
        - GroupId
     IpProtocol: "udp"
        FromPort: "4789"
        ToPort: "4789"
        CidrIp: "0.0.0.0/0"
#   PublicSG:
#     Type: "AWS::EC2::SecurityGroup"
#     Properties:
#       GroupDescription: Host
#       VpcId:
#         Ref: Vpc
#       Tags:
#         - Key: Name
#           Value: host_sg
#
#   HostUDPPorts:
#     Type: "AWS::EC2::SecurityGroupIngress"
#     Properties:
#       GroupId:
#         Fn::GetAtt:
#           - PublicSG
#           - GroupId
#       IpProtocol: udp
#       FromPort: 0
#       ToPort: 65535
#       CidrIp: "0.0.0.0/0"
#
#   HostTCPPorts:
#     Type: "AWS::EC2::SecurityGroupIngress"
#     Properties:
#       GroupId:
#         Fn::GetAtt:
#           - PublicSG
#           - GroupId
#       IpProtocol: tcp
#       FromPort: 0
#       ToPort: 65535
#       CidrIp: "0.0.0.0/0"
#
#   zoneinternalidns:
#     Type: "AWS::Route53::HostedZone"
#     Properties:
#       Name: "prakhar-test.internal."
#       VPCs:
#         - VPCId:
#             Ref: Vpc
#           VPCRegion:
#             Ref: "AWS::Region"
#       HostedZoneConfig:
#         Comment: "Created By ansible agnostic deployer"
#
#   CloudDNS:
#     Type: AWS::Route53::RecordSetGroup
#     DependsOn:
# #       - "infranode1EIP"
# #     Properties:
#       HostedZoneId: "Z3IHLWJZOU9SRT"
#       RecordSets:
#         - Name: "*.apps.prakhar-test.example.opentlc.com."
#           Type: A
#           TTL: 900
#           ResourceRecords:
# #             - Fn::GetAtt:
#                 - infranode1
#                 - PublicIp
# #
# # #
# #   bastion1:
#     Type: "AWS::EC2::Instance"
#     Properties:
#       ImageId:
#         Fn::FindInMap:
#         - RegionMapping
#         - Ref: AWS::Region
#         - RHELAMI
#       InstanceType: "t2.small"
#       KeyName: "ocpkey"
# #       SecurityGroupIds:
#         - "Fn::GetAtt":
#           - PublicSG
#           - GroupId
#       SubnetId:
#         Ref: PublicSubnet
#       Tags:
# #         - Key: Name
#           Value: bastion
#         - Key: internaldns
#           Value: bastion.prakhar-test.internal
# #         - Key: "owner"
#           Value: "unknownuser"
#         - Key: "Project"
#           Value: "ocp-ha-lab-prakhar-test"
#         - Key: "ocp-ha-lab-prakhar-test"
#           Value: "bastion"
# #         - Key: AnsibleGroup
#           Value: bastions
# #         - Key: ostype
#           Value: linux
# #       BlockDeviceMappings:
#         - DeviceName: "/dev/sda1"
#           Ebs:
#             VolumeSize: 20
# #
#   bastion1InternalDNS:
#     Type: "AWS::Route53::RecordSetGroup"
#     Properties:
#       HostedZoneId:
#         Ref: zoneinternalidns
#       RecordSets:
# #       - Name: "bastion.prakhar-test.internal."
# #         Type: A
#         TTL: 10
#         ResourceRecords:
#           - "Fn::GetAtt":
#             - bastion1
#             - PrivateIp
#
# #   bastion1EIP:
#     Type: "AWS::EC2::EIP"
#     DependsOn:
#     - VpcGA
#     Properties:
#       InstanceId:
#         Ref: bastion1
#
#   bastion1PublicDNS:
#     Type: "AWS::Route53::RecordSetGroup"
#     DependsOn:
#       - bastion1EIP
#     Properties:
#       HostedZoneId: Z3IHLWJZOU9SRT
#       RecordSets:
# #           - Name: "bastion.prakhar-test.example.opentlc.com."
# #             Type: A
#             TTL: 10
#             ResourceRecords:
#             - "Fn::GetAtt":
#               - bastion1
#               - PublicIp
# # # # #   loadbalancerDNSLoadBalancer:
#     Type: "AWS::Route53::RecordSetGroup"
#     DependsOn:
# #       - loadbalancer1EIP
# #     Properties:
#       HostedZoneId: Z3IHLWJZOU9SRT
#       RecordSets:
#       - Name: "loadbalancer.prakhar-test.example.opentlc.com."
#         Type: A
#         TTL: 900
#         ResourceRecords:
# #           - "Fn::GetAtt":
#             - loadbalancer1
#             - PublicIp
# # #
# #   loadbalancer1:
#     Type: "AWS::EC2::Instance"
#     Properties:
#       ImageId:
#         Fn::FindInMap:
#         - RegionMapping
#         - Ref: AWS::Region
#         - RHELAMI
#       InstanceType: "t2.small"
#       KeyName: "ocpkey"
# #       SecurityGroupIds:
#         - "Fn::GetAtt":
#           - PublicSG
#           - GroupId
#       SubnetId:
#         Ref: PublicSubnet
#       Tags:
# #         - Key: Name
#           Value: loadbalancer1
#         - Key: internaldns
#           Value: loadbalancer1.prakhar-test.internal
# #         - Key: "owner"
#           Value: "unknownuser"
#         - Key: "Project"
#           Value: "ocp-ha-lab-prakhar-test"
#         - Key: "ocp-ha-lab-prakhar-test"
#           Value: "loadbalancer"
# #         - Key: AnsibleGroup
#           Value: loadbalancers
# #         - Key: ostype
#           Value: linux
# #       BlockDeviceMappings:
#         - DeviceName: "/dev/sda1"
#           Ebs:
#             VolumeSize: 20
# #
#   loadbalancer1InternalDNS:
#     Type: "AWS::Route53::RecordSetGroup"
#     Properties:
#       HostedZoneId:
#         Ref: zoneinternalidns
#       RecordSets:
# #       - Name: "loadbalancer1.prakhar-test.internal."
# #         Type: A
#         TTL: 10
#         ResourceRecords:
#           - "Fn::GetAtt":
#             - loadbalancer1
#             - PrivateIp
#
# #   loadbalancer1EIP:
#     Type: "AWS::EC2::EIP"
#     DependsOn:
#     - VpcGA
#     Properties:
#       InstanceId:
#         Ref: loadbalancer1
#
#   loadbalancer1PublicDNS:
#     Type: "AWS::Route53::RecordSetGroup"
#     DependsOn:
#       - loadbalancer1EIP
#     Properties:
#       HostedZoneId: Z3IHLWJZOU9SRT
#       RecordSets:
# #           - Name: "loadbalancer1.prakhar-test.example.opentlc.com."
# #             Type: A
#             TTL: 10
#             ResourceRecords:
#             - "Fn::GetAtt":
#               - loadbalancer1
#               - PublicIp
# # # # #
# #   master1:
#     Type: "AWS::EC2::Instance"
#     Properties:
#       ImageId:
#         Fn::FindInMap:
#         - RegionMapping
#         - Ref: AWS::Region
#         - RHELAMI
#       InstanceType: "t2.large"
#       KeyName: "ocpkey"
# #       SecurityGroupIds:
#         - "Fn::GetAtt":
#           - PublicSG
#           - GroupId
#       SubnetId:
#         Ref: PublicSubnet
#       Tags:
# #         - Key: Name
#           Value: master1
#         - Key: internaldns
#           Value: master1.prakhar-test.internal
# #         - Key: "owner"
#           Value: "unknownuser"
#         - Key: "Project"
#           Value: "ocp-ha-lab-prakhar-test"
#         - Key: "ocp-ha-lab-prakhar-test"
#           Value: "master"
# #         - Key: AnsibleGroup
#           Value: masters
# #         - Key: ostype
#           Value: linux
# #       BlockDeviceMappings:
#         - DeviceName: "/dev/sda1"
#           Ebs:
#             VolumeSize: 50
# #         - DeviceName: "/dev/xvdb"
#           Ebs:
#             VolumeType: "gp2"
#             VolumeSize: "20"
# #
#   master1InternalDNS:
#     Type: "AWS::Route53::RecordSetGroup"
#     Properties:
#       HostedZoneId:
#         Ref: zoneinternalidns
#       RecordSets:
# #       - Name: "master1.prakhar-test.internal."
# #         Type: A
#         TTL: 10
#         ResourceRecords:
#           - "Fn::GetAtt":
#             - master1
#             - PrivateIp
#
# # # # #
# #   node1:
#     Type: "AWS::EC2::Instance"
#     Properties:
#       ImageId:
#         Fn::FindInMap:
#         - RegionMapping
#         - Ref: AWS::Region
#         - RHELAMI
#       InstanceType: "t2.large"
#       KeyName: "ocpkey"
# #       SecurityGroupIds:
#         - "Fn::GetAtt":
#           - PublicSG
#           - GroupId
#       SubnetId:
#         Ref: PublicSubnet
#       Tags:
# #         - Key: Name
#           Value: node1
#         - Key: internaldns
#           Value: node1.prakhar-test.internal
# #         - Key: "owner"
#           Value: "unknownuser"
#         - Key: "Project"
#           Value: "ocp-ha-lab-prakhar-test"
#         - Key: "ocp-ha-lab-prakhar-test"
#           Value: "node"
# #         - Key: AnsibleGroup
#           Value: nodes
# #         - Key: ostype
#           Value: linux
# #       BlockDeviceMappings:
#         - DeviceName: "/dev/sda1"
#           Ebs:
#             VolumeSize: 50
# #         - DeviceName: "/dev/xvdb"
#           Ebs:
#             VolumeType: "gp2"
#             VolumeSize: "100"
# #
#   node1InternalDNS:
#     Type: "AWS::Route53::RecordSetGroup"
#     Properties:
#       HostedZoneId:
#         Ref: zoneinternalidns
#       RecordSets:
# #       - Name: "node1.prakhar-test.internal."
# #         Type: A
#         TTL: 10
#         ResourceRecords:
#           - "Fn::GetAtt":
#             - node1
#             - PrivateIp
#
# # #   node2:
#     Type: "AWS::EC2::Instance"
#     Properties:
#       ImageId:
#         Fn::FindInMap:
#         - RegionMapping
#         - Ref: AWS::Region
#         - RHELAMI
#       InstanceType: "t2.large"
#       KeyName: "ocpkey"
# #       SecurityGroupIds:
#         - "Fn::GetAtt":
#           - PublicSG
#           - GroupId
#       SubnetId:
#         Ref: PublicSubnet
#       Tags:
# #         - Key: Name
#           Value: node2
#         - Key: internaldns
#           Value: node2.prakhar-test.internal
# #         - Key: "owner"
#           Value: "unknownuser"
#         - Key: "Project"
#           Value: "ocp-ha-lab-prakhar-test"
#         - Key: "ocp-ha-lab-prakhar-test"
#           Value: "node"
# #         - Key: AnsibleGroup
#           Value: nodes
# #         - Key: ostype
#           Value: linux
# #       BlockDeviceMappings:
#         - DeviceName: "/dev/sda1"
#           Ebs:
#             VolumeSize: 50
# #         - DeviceName: "/dev/xvdb"
#           Ebs:
#             VolumeType: "gp2"
#             VolumeSize: "100"
# #
#   node2InternalDNS:
#     Type: "AWS::Route53::RecordSetGroup"
#     Properties:
#       HostedZoneId:
#         Ref: zoneinternalidns
#       RecordSets:
# #       - Name: "node2.prakhar-test.internal."
# #         Type: A
#         TTL: 10
#         ResourceRecords:
#           - "Fn::GetAtt":
#             - node2
#             - PrivateIp
#
# # # # #   infranodeDNSLoadBalancer:
#     Type: "AWS::Route53::RecordSetGroup"
#     DependsOn:
# #       - infranode1EIP
# #     Properties:
#       HostedZoneId: Z3IHLWJZOU9SRT
#       RecordSets:
#       - Name: "infranode.prakhar-test.example.opentlc.com."
#         Type: A
#         TTL: 900
#         ResourceRecords:
# #           - "Fn::GetAtt":
#             - infranode1
#             - PublicIp
# # #
# #   infranode1:
#     Type: "AWS::EC2::Instance"
#     Properties:
#       ImageId:
#         Fn::FindInMap:
#         - RegionMapping
#         - Ref: AWS::Region
#         - RHELAMI
#       InstanceType: "t2.large"
#       KeyName: "ocpkey"
# #       SecurityGroupIds:
#         - "Fn::GetAtt":
#           - PublicSG
#           - GroupId
#       SubnetId:
#         Ref: PublicSubnet
#       Tags:
# #         - Key: Name
#           Value: infranode1
#         - Key: internaldns
#           Value: infranode1.prakhar-test.internal
# #         - Key: "owner"
#           Value: "unknownuser"
#         - Key: "Project"
#           Value: "ocp-ha-lab-prakhar-test"
#         - Key: "ocp-ha-lab-prakhar-test"
#           Value: "infranode"
# #         - Key: AnsibleGroup
#           Value: infranodes
# #         - Key: ostype
#           Value: linux
# #       BlockDeviceMappings:
#         - DeviceName: "/dev/sda1"
#           Ebs:
#             VolumeSize: 50
# #         - DeviceName: "/dev/xvdb"
#           Ebs:
#             VolumeType: "gp2"
#             VolumeSize: "50"
# #
#   infranode1InternalDNS:
#     Type: "AWS::Route53::RecordSetGroup"
#     Properties:
#       HostedZoneId:
#         Ref: zoneinternalidns
#       RecordSets:
# #       - Name: "infranode1.prakhar-test.internal."
# #         Type: A
#         TTL: 10
#         ResourceRecords:
#           - "Fn::GetAtt":
#             - infranode1
#             - PrivateIp
#
# #   infranode1EIP:
#     Type: "AWS::EC2::EIP"
#     DependsOn:
#     - VpcGA
#     Properties:
#       InstanceId:
#         Ref: infranode1
#
#   infranode1PublicDNS:
#     Type: "AWS::Route53::RecordSetGroup"
#     DependsOn:
#       - infranode1EIP
#     Properties:
#       HostedZoneId: Z3IHLWJZOU9SRT
#       RecordSets:
# #           - Name: "infranode1.prakhar-test.example.opentlc.com."
# #             Type: A
#             TTL: 10
#             ResourceRecords:
#             - "Fn::GetAtt":
#               - infranode1
#               - PublicIp
# # # # #
# #   support1:
#     Type: "AWS::EC2::Instance"
#     Properties:
#       ImageId:
#         Fn::FindInMap:
#         - RegionMapping
#         - Ref: AWS::Region
#         - RHELAMI
#       InstanceType: "t2.medium"
#       KeyName: "ocpkey"
# #       SecurityGroupIds:
#         - "Fn::GetAtt":
#           - PublicSG
#           - GroupId
#       SubnetId:
#         Ref: PublicSubnet
#       Tags:
# #         - Key: Name
#           Value: support1
#         - Key: internaldns
#           Value: support1.prakhar-test.internal
# #         - Key: "owner"
#           Value: "unknownuser"
#         - Key: "Project"
#           Value: "ocp-ha-lab-prakhar-test"
#         - Key: "ocp-ha-lab-prakhar-test"
#           Value: "support"
# #         - Key: AnsibleGroup
#           Value: support
# #         - Key: ostype
#           Value: linux
# #       BlockDeviceMappings:
#         - DeviceName: "/dev/sda1"
#           Ebs:
#             VolumeSize: 20
# #         - DeviceName: "/dev/xvdb"
#           Ebs:
#             VolumeType: "gp2"
#             VolumeSize: "50"
# #
#   support1InternalDNS:
#     Type: "AWS::Route53::RecordSetGroup"
#     Properties:
#       HostedZoneId:
#         Ref: zoneinternalidns
#       RecordSets:
# #       - Name: "support1.prakhar-test.internal."
# #         Type: A
#         TTL: 10
#         ResourceRecords:
#           - "Fn::GetAtt":
#             - support1
#             - PrivateIp
#
# # # #
# Outputs:
#   Route53internalzoneOutput:
#     Description: The ID of the internal route 53 zone
#     Value:
#       Ref: zoneinternalidns
ansible/configs/ocp-ha-disconnected-lab/env_vars.yml
New file
@@ -0,0 +1,377 @@
## TODO: What variables can we strip out of here to build complex variables?
## i.e. what can we add into group_vars as opposed to config_vars?
## Example: We don't really need "subdomain_base_short". If we want to use this,
## should just toss in group_vars/all.
### Also, we should probably just create a variable reference in the README.md
### For now, just tagging comments in line with configuration file.
###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT
###### OR PASS as "-e" args to ansible-playbook command
# #
# # env_groups:
# #   limit: "tag_Project_opentlc_shared_{{guid}}"
# #   bastions: "tag_AnsibleGroup_bastions"
# #   masters: "tag_AnsibleGroup_masters"
# #   nodes: "tag_AnsibleGroup_nodes"
# #   infranodes: "tag_AnsibleGroup_infranodes"
# #   nfs: "tag_AnsibleGroup_nfs"
#
# # This doesn't work
# all: "tag_Project_opentlc_shared_{{guid}}"
#
# # but maybe this is silly enough to work
# #all: "tag_Project_opentlc_shared_{{guid}}:&tag_Project_opentlc_shared_{{guid}}"
#rhn_pool_id_string: OpenShift Container Platform
# bastions: "{{env_groups['limit']}}:&{{env_groups['bastions']}}"
# masters: "{{env_groups['limit']}}:&{{env_groups['masters']}}"
# nodes: "{{env_groups['limit']}}:&{{env_groups['nodes']}}"
# infranodes: "{{env_groups['limit']}}:&{{env_groups['infranodes']}}"
# nfs: "{{env_groups['limit']}}:&{{env_groups['nfs']}}"
install_ipa_client: false
repo_method: file
ocp_pvs:
  # - es-storage
  # - nexus
  # - nexus2
  # - nexus3
config_nfs_uservols: "true"
user_vols: 200
user_vols_size: 4Gi
master_api_port: 443
osrelease: 3.7.9
openshift_master_overwrite_named_certificates: true
deploy_openshift: true
deploy_openshift_post: true
deploy_env_post: true
install_metrics: true
install_logging: true
ovs_plugin: "subnet" # This can also be set to: "multitenant"
multi_tenant_setting: "os_sdn_network_plugin_name='redhat/openshift-ovs-{{ovs_plugin}}'"
master_lb_dns: "loadbalancer1.{{subdomain_base}}"
cloudapps_suffix: 'apps.{{subdomain_base}}'
openshift_master_ldap_ca_file: 'openshift_master_ldap_ca_file=/root/ca.crt'
# htpasswd / ldap / allow_all
install_idm: htpasswd
 ## If you are not part of GPTE you don't need this.
opentlc_integration: true
################################################################################
#### GENERIC EXAMPLE
################################################################################
install_common: true
install_nfs: true
install_bastion: false
env_authorized_key: "{{guid}}key"
set_env_authorized_key: true
software_to_deploy: "openshift"
################################################################################
#### OCP IMPLEMENATATION LAB
################################################################################
repo_version: '3.7'
cloudapps_dns: '*.apps.{{subdomain_base}}.'
master_public_dns: "loadbalancer.{{subdomain_base}}."
################################################################################
#### Common host variables
################################################################################
update_packages: false
common_packages:
  - python
  - unzip
  - bash-completion
  - tmux
  - bind-utils
  - wget
  - git
  - vim-enhanced
  - ansible
rhel_repos:
  - rhel-7-server-rpms
  - rhel-7-server-extras-rpms
  - rhel-7-server-ose-{{repo_version}}-rpms
use_subscription_manager: false
use_own_repos: true
#rhn_pool_id_string: "Red Hat Enterprise Linux Server"
rhn_pool_id_string: OpenShift Container Platform
################################################################################
#### nfs host settings
################################################################################
nfs_vg: nfsvg
nfs_pvs: /dev/xvdb
nfs_export_path: /srv/nfs
nfs_size: 50
nfs_shares:
  # - jenkins
  # - nexus
################################################################################
#### CLOUD PROVIDER: AWS SPECIFIC VARIABLES
################################################################################
#### Route 53 Zone ID (AWS)
HostedZoneId: ''
key_name: ''
aws_region: us-east-1
admin_user: ''
admin_user_password: ''
#### Connection Settings
ansible_ssh_user: ec2-user
remote_user: ec2-user
#### Networking (AWS)
guid: defaultguid
subdomain_base_short: "{{ guid }}"
subdomain_base_suffix: ".example.opentlc.com"
subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}"
tower_run: false
#### Environment Sizing
#bastion_instance_type: "t2.micro"
bastion_instance_type: "t2.small"
support_instance_type: "t2.medium"
support_instance_count: 1
node_instance_type: "t2.large"
node_instance_count: 2
infranode_instance_type: "t2.large"
infranode_instance_count: 1
master_instance_type: "t2.large"
master_instance_count: 1
loadbalancer_instance_count: 1
loadbalancer_instance_type: "t2.small"
# scaleup
new_node_instance_count: 0
ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
#### VARIABLES YOU SHOULD ***NOT*** CONFIGURE FOR YOUR DEPLOYEMNT
#### You can, but you usually wouldn't need to.
#### CLOUDFORMATIONS vars
project_tag: "{{ env_type }}-{{ guid }}"
docker_version: "1.12.6"
docker_device: /dev/xvdb
create_internal_dns_entries: true
zone_internal_dns: "{{guid}}.internal."
chomped_zone_internal_dns: "{{guid}}.internal"
zone_public_dns: "{{subdomain_base}}."
bastion_public_dns: "bastion.{{subdomain_base}}."
bastion_public_dns_chomped: "bastion.{{subdomain_base}}"
vpcid_cidr_block: "192.168.0.0/16"
vpcid_name_tag: "{{subdomain_base}}"
# az_1_name: "{{ aws_region }}a"
# az_2_name: "{{ aws_region }}b"
#
# subnet_private_1_cidr_block: "192.168.2.0/24"
# subnet_private_1_az: "{{ az_2_name }}"
# subnet_private_1_name_tag: "{{subdomain_base}}-private"
#
# subnet_private_2_cidr_block: "192.168.1.0/24"
# subnet_private_2_az: "{{ az_1_name }}"
# subnet_private_2_name_tag: "{{subdomain_base}}-private"
#
# subnet_public_1_cidr_block: "192.168.10.0/24"
# subnet_public_1_az: "{{ az_1_name }}"
# subnet_public_1_name_tag: "{{subdomain_base}}-public"
#
# subnet_public_2_cidr_block: "192.168.20.0/24"
# subnet_public_2_az: "{{ az_2_name }}"
# subnet_public_2_name_tag: "{{subdomain_base}}-public"
# dopt_domain_name: "{{ aws_region }}.compute.internal"
#
# rtb_public_name_tag: "{{subdomain_base}}-public"
# rtb_private_name_tag: "{{subdomain_base}}-private"
#
# cf_template_description: "{{ env_type }}-{{ guid }} template"
rootfs_size_node: 50
rootfs_size_infranode: 50
rootfs_size_master: 50
rootfs_size_bastion: 20
rootfs_size_support: 20
rootfs_size_loadbalancer: 20
security_groups:
  - name: MasterSG
    rules:
      - name: DNS TCP
        description: "TCP Ports for DNS"
        from_port: 53
        to_port: 53
        protocol: tcp
        cidr: "0.0.0.0/0"
      - name: DNS UDP
        description: "UDP Ports for DNS"
        protocol: udp
        from_port: 53
        to_port: 53
        cidr: "0.0.0.0/0"
      - name:  SDN
        description: "SDN Communication"
        from_port: 4789
        to_port: 4789
        protocol: udp
        cidr: "0.0.0.0/0"
      - name: HTTPS Access
        description: "HTTPS Access"
        protocol: tcp
        from_port: 443
        to_port: 443
        cidr: "0.0.0.0/0"
  - name: NodeSG
    rules:
      - name: Kubelet
        description: "Kubelet for OC"
        from_port: 10250
        to_port: 10250
        protocol: tcp
        cidr: "0.0.0.0/0"
      - name:  SDN
        description: "SDN Communication"
        from_port: 4789
        to_port: 4789
        cidr: "0.0.0.0/0"
        protocol: udp
subnets:
  - name: PublicSubnet
    cidr: "192.168.2.0/24"
    routing_table: true
  - name: PrivateSubnet
    cidr: "192.168.1.0/24"
    routing_table: false # this could be wrong [sborenst]
instances:
  - name: "bastion"
    count: 1
    unique: true
    public_dns: true
    dns_loadbalancer: true
    flavor:
      "ec2": "{{bastion_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "bastions"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_bastion }}"
    security_group: "PublicSG"
    subnet: "PublicSubnet"
  - name: "loadbalancer"
    count: "{{loadbalancer_instance_count}}"
    public_dns: true
    dns_loadbalancer: true
    flavor:
      "ec2": "{{loadbalancer_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "loadbalancers"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_loadbalancer }}"
    security_group: "PublicSG"
    subnet: "PublicSubnet"
  - name: "master"
    count: "{{master_instance_count}}"
    public_dns: false
    dns_loadbalancer: false
    flavor:
      "ec2": "{{master_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "masters"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_master }}"
    volumes:
      - device_name: "{{docker_device}}"
        volume_size: 20
        volume_type: gp2
    security_group: "PublicSG"
    subnet: "PublicSubnet"
  - name: "node"
    count: "{{node_instance_count}}"
    public_dns: false
    dns_loadbalancer: false
    flavor:
      "ec2": "{{node_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "nodes"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_node }}"
    volumes:
      - device_name: "{{docker_device}}"
        volume_size: 100
        volume_type: gp2
    security_group: "PublicSG"
    subnet: "PublicSubnet"
  - name: "infranode"
    count: "{{infranode_instance_count}}"
    public_dns: true
    dns_loadbalancer: true
    flavor:
      "ec2": "{{infranode_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "infranodes"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_infranode }}"
    volumes:
      - device_name: "{{docker_device}}"
        volume_size: 50
        volume_type: gp2
    security_group: "PublicSG"
    subnet: "PublicSubnet"
  - name: "support"
    count: "{{support_instance_count}}"
    public_dns: false
    dns_loadbalancer: false
    flavor:
      "ec2": "{{support_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "support"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_support }}"
    volumes:
      - device_name: "{{nfs_pvs}}"
        volume_size: "{{nfs_size}}"
        volume_type: gp2
    security_group: "PublicSG"
    subnet: "PublicSubnet"
ansible/configs/ocp-ha-disconnected-lab/files/cloud_providers/ec2.py_cloud_template.j2
New file
@@ -0,0 +1,844 @@
{
  "AWSTemplateFormatVersion": "2010-09-09",
  "Parameters": { },
  "Mappings": {
    "RegionMapping": {
      "us-east-1": {
        "AMI": "ami-c998b6b2"
      },
      "us-east-2": {
        "AMI": "ami-0932686c"
      },
      "us-west-1": {
        "AMI": "ami-2cade64c"
      },
      "us-west-2": {
        "AMI": "ami-6f68cf0f"
      },
      "eu-west-1": {
        "AMI": "ami-02ace471"
      },
      "eu-central-1": {
        "AMI": "ami-e4c63e8b"
      },
      "ap-northeast-1": {
        "AMI": "ami-5de0433c"
      },
      "ap-northeast-2": {
        "AMI": "ami-44db152a"
      },
      "ap-southeast-1": {
        "AMI": "ami-2c95344f"
      },
      "ap-southeast-2": {
        "AMI": "ami-39ac915a"
      },
      "sa-east-1": {
        "AMI": "ami-7de77b11"
      },
      "ap-south-1": {
        "AMI": "ami-cdbdd7a2"
      }
    },
    "DNSMapping": {
      "us-east-1": {
        "domain": "us-east-1.compute.internal"
      },
      "us-west-1": {
        "domain": "us-west-1.compute.internal"
      },
      "us-west-2": {
        "domain": "us-west-2.compute.internal"
      },
      "eu-west-1": {
        "domain": "eu-west-1.compute.internal"
      },
      "eu-central-1": {
        "domain": "eu-central-1.compute.internal"
      },
      "ap-northeast-1": {
        "domain": "ap-northeast-1.compute.internal"
      },
      "ap-northeast-2": {
        "domain": "ap-northeast-2.compute.internal"
      },
      "ap-southeast-1": {
        "domain": "ap-southeast-1.compute.internal"
      },
      "ap-southeast-2": {
        "domain": "ap-southeast-2.compute.internal"
      },
      "sa-east-1": {
        "domain": "sa-east-1.compute.internal"
      },
      "ap-south-1": {
        "domain": "ap-south-1.compute.internal"
      }
    }
  },
  "Resources": {
    "Vpc": {
      "Type": "AWS::EC2::VPC",
      "Properties": {
        "CidrBlock": "192.199.0.0/16",
        "EnableDnsSupport": "true",
        "EnableDnsHostnames": "true",
        "Tags": [
          {
            "Key": "Name",
            "Value": "{{vpcid_name_tag}}"
          },
          {
            "Key": "Hostlication",
            "Value": {
              "Ref": "AWS::StackId"
            }
          }
        ]
      }
    },
    "VpcInternetGateway": {
      "Type": "AWS::EC2::InternetGateway",
      "Properties": {}
    },
    "VpcGA": {
      "Type": "AWS::EC2::VPCGatewayAttachment",
      "Properties": {
        "InternetGatewayId": {
          "Ref": "VpcInternetGateway"
        },
        "VpcId": {
          "Ref": "Vpc"
        }
      }
    },
    "VpcRouteTable": {
      "Type": "AWS::EC2::RouteTable",
      "Properties": {
        "VpcId": {
          "Ref": "Vpc"
        }
      }
    },
    "VPCRouteInternetGateway": {
        "DependsOn" : "VpcGA",
        "Type": "AWS::EC2::Route",
        "Properties": {
            "GatewayId": {
                "Ref": "VpcInternetGateway"
            },
            "DestinationCidrBlock": "0.0.0.0/0",
            "RouteTableId": {
                "Ref": "VpcRouteTable"
            }
        }
    },
    "PublicSubnet": {
      "Type": "AWS::EC2::Subnet",
      "DependsOn": [
        "Vpc"
      ],
      "Properties": {
        "CidrBlock": "192.199.0.0/24",
        "Tags": [
          {
            "Key": "Name",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "Hostlication",
            "Value": {
              "Ref": "AWS::StackId"
            }
          }
        ],
        "MapPublicIpOnLaunch": "true",
        "VpcId": {
          "Ref": "Vpc"
        }
      }
    },
    "PublicSubnetRTA": {
      "Type": "AWS::EC2::SubnetRouteTableAssociation",
      "Properties": {
        "RouteTableId": {
          "Ref": "VpcRouteTable"
        },
        "SubnetId": {
          "Ref": "PublicSubnet"
        }
      }
    },
    "HostSG": {
      "Type": "AWS::EC2::SecurityGroup",
      "Properties": {
        "GroupDescription": "Host",
        "VpcId": {
          "Ref": "Vpc"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "host_sg"
          }
        ]
      }
    },
    "HostUDPPorts": {
      "Type": "AWS::EC2::SecurityGroupIngress",
      "Properties": {
        "GroupId": {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        },
        "IpProtocol": "udp",
        "FromPort": "0",
        "ToPort": "65535",
        "CidrIp": "0.0.0.0/0"
      }
    },
    "HostTCPPorts": {
      "Type": "AWS::EC2::SecurityGroupIngress",
      "Properties": {
        "GroupId": {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        },
        "IpProtocol": "tcp",
        "FromPort": "0",
        "ToPort": "65535",
        "CidrIp": "0.0.0.0/0"
      }
    },
    "zoneinternalidns": {
      "Type": "AWS::Route53::HostedZone",
      "Properties": {
        "Name": "{{ zone_internal_dns }}",
        "VPCs" :  [{
      "VPCId": { "Ref" : "Vpc" },
      "VPCRegion": { "Ref": "AWS::Region" } } ],
        "HostedZoneConfig": {
          "Comment": "Created By ansible agnostic deployer"
        }
      }
    },
    "BastionDNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "DependsOn": [ "BastionEIP" ],
      "Properties": {
        "HostedZoneId": "{{HostedZoneId}}",
        "RecordSets": [
          {
            "Name": "{{bastion_public_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "Bastion",
                  "PublicIp"
                ]
              }
            ]
          }
        ]
      }
    },
    "LoadBalancerDNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "DependsOn": [ "LoadBalancerEIP" ],
      "Properties": {
        "HostedZoneId": "{{HostedZoneId}}",
        "RecordSets": [
          {
            "Name": "loadbalancer.{{subdomain_base}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "LoadBalancer",
                  "PublicIp"
                ]
              }
            ]
          }
        ]
      }
    },
    "CloudDNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "DependsOn": "infranode{{infranode_instance_count}}EIP",
      "Properties": {
        "HostedZoneId": "{{HostedZoneId}}",
        "RecordSets": [
        {
          "Name" : "{{cloudapps_dns}}",
          "Type" : "A",
          "TTL" : "900",
          "ResourceRecords" : [
{% for c in range(1,(infranode_instance_count|int)+1) %}
{ "Fn::GetAtt": [ "infranode{{loop.index}}", "PublicIp" ] }{% if loop.index < infranode_instance_count   %},{% endif %}
{% endfor %}
          ]}]
    }},
    "Bastion": {
      "Type": "AWS::EC2::Instance",
      "Properties": {
        "ImageId": {
          "Fn::FindInMap": [
            "RegionMapping",
            {
              "Ref": "AWS::Region"
            },
            "AMI"
          ]
        },
        "InstanceType": "{{bastion_instance_type}}",
        "KeyName": "{{key_name}}",
        "SecurityGroupIds": [
          {
            "Fn::GetAtt": [
              "HostSG",
              "GroupId"
            ]
          }
        ],
        "SubnetId": {
          "Ref": "PublicSubnet"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "bastion"
          },
          {
            "Key": "AnsibleGroup",
            "Value": "bastions"
          },
          {
            "Key": "Project",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "{{ project_tag }}",
            "Value": "bastion"
          },
          {
            "Key": "owner",
            "Value": "{{ email | default('unknown')}}"
          }
        ],
        "BlockDeviceMappings": [
          {
            "DeviceName": "/dev/sda1",
            "Ebs": {
              "VolumeSize": {{ rootfs_size_bastion }}
            }
          }
        ]
      }
    },
    "BastionEIP" : {
        "Type" : "AWS::EC2::EIP",
        "DependsOn": ["VpcGA"],
        "Properties" : {
            "InstanceId" : {
                "Ref" : "Bastion"
            }
        }
    },
    "BastionInternalDNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "Properties": {
      "HostedZoneId" : { "Ref" : "zoneinternalidns" },
        "RecordSets": [
          {
            "Name": "bastion.{{zone_internal_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "Bastion",
                  "PrivateIp"
                ]
              }
            ]
          }
        ]
      }
    },
    "LoadBalancer": {
      "Type": "AWS::EC2::Instance",
      "Properties": {
        "ImageId": {
          "Fn::FindInMap": [
            "RegionMapping",
            {
              "Ref": "AWS::Region"
            },
            "AMI"
          ]
        },
        "InstanceType": "{{loadbalancer_instance_type}}",
        "KeyName": "{{key_name}}",
        "SecurityGroupIds": [
          {
            "Fn::GetAtt": [
              "HostSG",
              "GroupId"
            ]
          }
        ],
        "SubnetId": {
          "Ref": "PublicSubnet"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "loadbalancer"
          },
          {
            "Key": "AnsibleGroup",
            "Value": "loadbalancers"
          },
          {
            "Key": "Project",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "{{ project_tag }}",
            "Value": "loadbalancer"
          },
          {
            "Key": "owner",
            "Value": "{{ email | default('unknown')}}"
          }
        ]
      }
    },
    "LoadBalancerEIP" : {
        "Type" : "AWS::EC2::EIP",
        "DependsOn": [ "VpcGA" ],
        "Properties" : {
            "InstanceId" : { "Ref" : "LoadBalancer" }
        }
    },
    "LoadBalancerInternalDNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "Properties": {
      "HostedZoneId" : { "Ref" : "zoneinternalidns" },
        "RecordSets": [
          {
            "Name": "loadbalancer1.{{zone_internal_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "LoadBalancer",
                  "PrivateIp"
                ]
              }
            ]
          }
        ]
      }
    },
{% for c in range(1,(master_instance_count|int)+1) %}
      "master{{c}}": {
        "Type": "AWS::EC2::Instance",
        "Properties": {
          "ImageId": {
            "Fn::FindInMap": [
              "RegionMapping",
              {
                "Ref": "AWS::Region"
              },
              "AMI"
            ]
          },
          "InstanceType": "{{master_instance_type}}",
          "KeyName": "{{key_name}}",
          "SecurityGroupIds": [
            {
              "Fn::GetAtt": [
                "HostSG",
                "GroupId"
              ]
            }
          ],
          "SubnetId": {
            "Ref": "PublicSubnet"
          },
          "Tags": [
            {
              "Key": "Name",
              "Value": "master{{loop.index}}"
            },
            {
              "Key": "AnsibleGroup",
              "Value": "masters"
            },
            {
              "Key": "Project",
              "Value": "{{project_tag}}"
            },
            {
              "Key": "{{ project_tag }}",
              "Value": "master"
            },
            {
              "Key": "owner",
              "Value": "{{ email | default('unknown')}}"
            }
          ],
          "BlockDeviceMappings": [
            {
              "DeviceName": "/dev/sda1",
              "Ebs": {
                "VolumeSize": {{ rootfs_size_master }}
              }
            },
            {
              "DeviceName": "{{ docker_device }}",
              "Ebs": {
                "VolumeType": "gp2",
                "VolumeSize": 20
              }
            }
          ]
        }
    },
    "master{{loop.index}}DNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "Properties": {
      "HostedZoneId" : { "Ref" : "zoneinternalidns" },
        "RecordSets": [
          {
            "Name": "master{{loop.index}}.{{zone_internal_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "master{{loop.index}}",
                  "PrivateIp"
                ]
              }
            ]
          }
        ]
      }
    },
{% endfor %}
    {% for c in range(1,(node_instance_count|int)+1) %}
    "node{{loop.index}}": {
      "Type": "AWS::EC2::Instance",
      "Properties": {
        "ImageId": {
          "Fn::FindInMap": [
            "RegionMapping",
            {
              "Ref": "AWS::Region"
            },
            "AMI"
          ]
        },
        "InstanceType": "{{node_instance_type}}",
        "KeyName": "{{key_name}}",
        "SecurityGroupIds": [
          {
            "Fn::GetAtt": [
              "HostSG",
              "GroupId"
            ]
          }
        ],
        "SubnetId": {
          "Ref": "PublicSubnet"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "node{{loop.index}}"
          },
          {
            "Key": "AnsibleGroup",
            "Value": "nodes"
          },
          {
            "Key": "Project",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "{{ project_tag }}",
            "Value": "node"
          },
          {
            "Key": "owner",
            "Value": "{{ email | default('unknown')}}"
          }
        ],
        "BlockDeviceMappings": [
          {
            "DeviceName": "/dev/sda1",
            "Ebs": {
              "VolumeSize": {{ rootfs_size_node }}
            }
          },
          {
            "DeviceName": "{{ docker_device }}",
            "Ebs": {
              "VolumeType": "gp2",
              "VolumeSize": 100
            }
          }
        ]
      }
    },
    "node{{loop.index}}DNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "Properties": {
      "HostedZoneId" : { "Ref" : "zoneinternalidns" },
        "RecordSets": [
          {
            "Name": "node{{loop.index}}.{{zone_internal_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "node{{loop.index}}",
                  "PrivateIp"
                ]
              }
            ]
          }
        ]
      }
    },
    {% endfor %}
    {% for c in range(1,(infranode_instance_count|int)+1) %}
    "infranode{{loop.index}}": {
      "Type": "AWS::EC2::Instance",
      "Properties": {
        "ImageId": {
          "Fn::FindInMap": [
            "RegionMapping",
            {
              "Ref": "AWS::Region"
            },
            "AMI"
          ]
        },
        "InstanceType": "{{infranode_instance_type}}",
        "KeyName": "{{key_name}}",
        "SecurityGroupIds": [
          {
            "Fn::GetAtt": [
              "HostSG",
              "GroupId"
            ]
          }
        ],
        "SubnetId": {
          "Ref": "PublicSubnet"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "infranode{{loop.index}}"
          },
          {
            "Key": "AnsibleGroup",
            "Value": "infranodes"
          },
          {
            "Key": "Project",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "{{ project_tag }}",
            "Value": "infranode"
          },
          {
            "Key": "owner",
            "Value": "{{ email | default('unknown')}}"
          }
        ],
        "BlockDeviceMappings": [
          {
            "DeviceName": "/dev/sda1",
            "Ebs": {
              "VolumeSize": {{ rootfs_size_infranode }}
            }
          },
          {
            "DeviceName": "{{ docker_device }}",
            "Ebs": {
              "VolumeType": "gp2",
              "VolumeSize": 50
            }
          }
        ]
      }
    },
    "infranode{{loop.index}}EIP" : {
        "Type" : "AWS::EC2::EIP",
        "DependsOn": [ "VpcGA" ],
        "Properties" : {
            "InstanceId" : { "Ref" : "infranode{{loop.index}}" }
        }
    },
    "infranode{{loop.index}}DNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "Properties": {
      "HostedZoneId" : { "Ref" : "zoneinternalidns" },
        "RecordSets": [
          {
            "Name": "infranode{{loop.index}}.{{zone_internal_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "infranode{{loop.index}}",
                  "PrivateIp"
                ]
              }
            ]
          }
        ]
      }
    },
    {% endfor %}
    {% for c in range(1,(support_instance_count|int)+1) %}
    "support{{loop.index}}": {
      "Type": "AWS::EC2::Instance",
      "Properties": {
        "ImageId": {
          "Fn::FindInMap": [
            "RegionMapping",
            {
              "Ref": "AWS::Region"
            },
            "AMI"
          ]
        },
        "InstanceType": "{{support_instance_type}}",
        "KeyName": "{{key_name}}",
        "SecurityGroupIds": [
          {
            "Fn::GetAtt": [
              "HostSG",
              "GroupId"
            ]
          }
        ],
        "SubnetId": {
          "Ref": "PublicSubnet"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "support{{loop.index}}"
          },
          {
            "Key": "AnsibleGroup",
            "Value": "support"
          },
          {
            "Key": "Project",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "{{ project_tag }}",
            "Value": "support"
          },
          {
            "Key": "owner",
            "Value": "{{ email | default('unknown')}}"
          }
        ],
        "BlockDeviceMappings": [
          {
            "DeviceName": "/dev/sda1",
            "Ebs": {
              "VolumeSize": {{ rootfs_size_support }}
            }
          },
          {
            "DeviceName": "{{ nfs_pvs }}",
            "Ebs": {
              "VolumeType": "gp2",
              "VolumeSize": {{ nfs_size }}
            }
          }
        ]
      }
    },
    "support{{loop.index}}DNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "Properties": {
      "HostedZoneId" : { "Ref" : "zoneinternalidns" },
        "RecordSets": [
          {
            "Name": "support{{loop.index}}.{{zone_internal_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "support{{loop.index}}",
                  "PrivateIp"
                ]
              }
            ]
          }
        ]
      }
    },
{% endfor %}
},
  "Outputs": {
    "Route53internalzoneOutput": {
      "Description": "The ID of the internal route 53 zone",
      "Value": {
        "Ref": "zoneinternalidns"
      }
  }
}}
ansible/configs/ocp-ha-disconnected-lab/files/cloud_providers/ec2_cloud_template.j2
New file
@@ -0,0 +1,339 @@
---
AWSTemplateFormatVersion: "2010-09-09"
Mappings:
  RegionMapping:
    us-east-1:
      RHELAMI: ami-c998b6b2
    us-east-2:
      RHELAMI: ami-cfdafaaa
    us-west-1:
      RHELAMI: ami-66eec506d
    us-west-2:
      RHELAMI: ami-9fa343e7
    eu-west-1:
      RHELAMI: ami-bb9a6bc2
    eu-central-1:
      RHELAMI: ami-d74be5b8
    ap-northeast-1:
      RHELAMI: ami-30ef0556
    ap-northeast-2:
      RHELAMI: ami-0f5a8361
    ap-southeast-1:
      RHELAMI: ami-10bb2373
    ap-southeast-2:
      RHELAMI: ami-ccecf5af
    sa-east-1:
      RHELAMI: ami-a789ffcb
    ap-south-1:
      RHELAMI: ami-cdbdd7a2
  DNSMapping:
    us-east-1:
      domain: "us-east-1.compute.internal"
    us-west-1:
      domain: "us-west-1.compute.internal"
    us-west-2:
      domain: "us-west-2.compute.internal"
    eu-west-1:
      domain: "eu-west-1.compute.internal"
    eu-central-1:
      domain: "eu-central-1.compute.internal"
    ap-northeast-1:
      domain: "ap-northeast-1.compute.internal"
    ap-northeast-2:
      domain: "ap-northeast-2.compute.internal"
    ap-southeast-1:
      domain: "ap-southeast-1.compute.internal"
    ap-southeast-2:
      domain: "ap-southeast-2.compute.internal"
    sa-east-1:
      domain: "sa-east-1.compute.internal"
    ap-south-1:
      domain: "ap-south-1.compute.internal"
Resources:
  Vpc:
    Type: "AWS::EC2::VPC"
    Properties:
      CidrBlock: "{{vpcid_cidr_block}}"
      EnableDnsSupport: true
      EnableDnsHostnames: true
      Tags:
        - Key: Name
          Value: "{{vpcid_name_tag}}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
  VpcInternetGateway:
    Type: "AWS::EC2::InternetGateway"
  VpcGA:
    Type: "AWS::EC2::VPCGatewayAttachment"
    Properties:
      InternetGatewayId:
        Ref: VpcInternetGateway
      VpcId:
        Ref: Vpc
  VpcRouteTable:
    Type: "AWS::EC2::RouteTable"
    Properties:
      VpcId:
        Ref: Vpc
  VPCRouteInternetGateway:
    DependsOn: VpcGA
    Type: "AWS::EC2::Route"
    Properties:
      GatewayId:
        Ref: VpcInternetGateway
      DestinationCidrBlock: "0.0.0.0/0"
      RouteTableId:
        Ref: VpcRouteTable
{% for subnet in subnets %}
  {{subnet['name']}}:
    Type: "AWS::EC2::Subnet"
    DependsOn:
      - Vpc
    Properties:
      CidrBlock: "{{subnet['cidr']}}"
      Tags:
        - Key: Name
          Value: "{{project_tag}}"
      MapPublicIpOnLaunch: true
      VpcId:
        Ref: Vpc
{% endfor %}
  PublicSubnetRTA:
    Type: "AWS::EC2::SubnetRouteTableAssociation"
    Properties:
      RouteTableId:
        Ref: VpcRouteTable
      SubnetId:
        Ref: PublicSubnet
{% for security_group in security_groups %}
  {{security_group['name']}}:
    Type: "AWS::EC2::SecurityGroup"
    Properties:
      GroupDescription: Host
      VpcId:
        Ref: Vpc
      Tags:
        - Key: Name
          Value: "{{security_group['name']}}"
{% endfor %}
{% for security_group in security_groups %}
{% for rule in security_group['rules'] %}
  {{security_group['name']}}{{rule['name']}}:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
     GroupId:
      Fn::GetAtt:
        - "{{security_group['name']}}"
        - GroupId
     IpProtocol: "{{rule['protocol']}}"
        FromPort: "{{rule['from_port']}}"
        ToPort: "{{rule['to_port']}}"
        CidrIp: "{{rule['cidr']}}"
{% endfor %}
{% endfor %}
#   PublicSG:
#     Type: "AWS::EC2::SecurityGroup"
#     Properties:
#       GroupDescription: Host
#       VpcId:
#         Ref: Vpc
#       Tags:
#         - Key: Name
#           Value: host_sg
#
#   HostUDPPorts:
#     Type: "AWS::EC2::SecurityGroupIngress"
#     Properties:
#       GroupId:
#         Fn::GetAtt:
#           - PublicSG
#           - GroupId
#       IpProtocol: udp
#       FromPort: 0
#       ToPort: 65535
#       CidrIp: "0.0.0.0/0"
#
#   HostTCPPorts:
#     Type: "AWS::EC2::SecurityGroupIngress"
#     Properties:
#       GroupId:
#         Fn::GetAtt:
#           - PublicSG
#           - GroupId
#       IpProtocol: tcp
#       FromPort: 0
#       ToPort: 65535
#       CidrIp: "0.0.0.0/0"
#
#   zoneinternalidns:
#     Type: "AWS::Route53::HostedZone"
#     Properties:
#       Name: "{{ zone_internal_dns }}"
#       VPCs:
#         - VPCId:
#             Ref: Vpc
#           VPCRegion:
#             Ref: "AWS::Region"
#       HostedZoneConfig:
#         Comment: "Created By ansible agnostic deployer"
#
#   CloudDNS:
#     Type: AWS::Route53::RecordSetGroup
#     DependsOn:
# {% for c in range(1,(infranode_instance_count|int)+1) %}
#       - "infranode{{loop.index}}EIP"
# {% endfor %}
#     Properties:
#       HostedZoneId: "{{HostedZoneId}}"
#       RecordSets:
#         - Name: "{{cloudapps_dns}}"
#           Type: A
#           TTL: 900
#           ResourceRecords:
# {% for c in range(1,(infranode_instance_count|int)+1) %}
#             - Fn::GetAtt:
#                 - infranode{{loop.index}}
#                 - PublicIp
# {% endfor %}
#
# {% for instance in instances %}
# {% if instance['dns_loadbalancer']|d(false)|bool and not instance['unique']|d(false)|bool %}
#   {{instance['name']}}DNSLoadBalancer:
#     Type: "AWS::Route53::RecordSetGroup"
#     DependsOn:
# {% for c in range(1, (instance['count']|int)+1) %}
#       - {{instance['name']}}{{c}}EIP
# {% endfor %}
#     Properties:
#       HostedZoneId: {{HostedZoneId}}
#       RecordSets:
#       - Name: "{{instance['name']}}.{{subdomain_base}}."
#         Type: A
#         TTL: 900
#         ResourceRecords:
# {% for c in range(1,(instance['count'] |int)+1) %}
#           - "Fn::GetAtt":
#             - {{instance['name']}}{{c}}
#             - PublicIp
# {% endfor %}
# {% endif %}
#
# {% for c in range(1,(instance['count'] |int)+1) %}
#   {{instance['name']}}{{loop.index}}:
#     Type: "AWS::EC2::Instance"
#     Properties:
#       ImageId:
#         Fn::FindInMap:
#         - RegionMapping
#         - Ref: AWS::Region
#         - {{ instance['image_id'] | default('RHELAMI') }}
#       InstanceType: "{{instance['flavor'][cloud_provider]}}"
#       KeyName: "{{instance['key_name'] | default(key_name)}}"
# {% if instance['UserData'] is defined %}
#       {{instance['UserData']}}
# {% endif %}
#       SecurityGroupIds:
#         - "Fn::GetAtt":
#           - {{instance['security_group']}}
#           - GroupId
#       SubnetId:
#         Ref: {{instance['subnet']}}
#       Tags:
# {% if instance['unique'] | d(false) | bool %}
#         - Key: Name
#           Value: {{instance['name']}}
#         - Key: internaldns
#           Value: {{instance['name']}}.{{chomped_zone_internal_dns}}
# {% else %}
#         - Key: Name
#           Value: {{instance['name']}}{{loop.index}}
#         - Key: internaldns
#           Value: {{instance['name']}}{{loop.index}}.{{chomped_zone_internal_dns}}
# {% endif %}
#         - Key: "owner"
#           Value: "{{ email | default('unknownuser') }}"
#         - Key: "Project"
#           Value: "{{project_tag}}"
#         - Key: "{{project_tag}}"
#           Value: "{{ instance['name'] }}"
# {% for tag in instance['tags'] %}
#         - Key: {{tag['key']}}
#           Value: {{tag['value']}}
# {% endfor %}
#       BlockDeviceMappings:
#         - DeviceName: "/dev/sda1"
#           Ebs:
#             VolumeSize: {{ instance['rootfs_size'] | default('50') }}
# {% for vol in instance['volumes']|default([]) %}
#         - DeviceName: "{{ vol['device_name'] }}"
#           Ebs:
#             VolumeType: "{{ vol['volume_type'] | d('gp2') }}"
#             VolumeSize: "{{ vol['volume_size'] | d('20') }}"
# {% endfor %}
#
#   {{instance['name']}}{{loop.index}}InternalDNS:
#     Type: "AWS::Route53::RecordSetGroup"
#     Properties:
#       HostedZoneId:
#         Ref: zoneinternalidns
#       RecordSets:
# {% if instance['unique'] | d(false) | bool %}
#       - Name: "{{instance['name']}}.{{zone_internal_dns}}"
# {% else %}
#       - Name: "{{instance['name']}}{{loop.index}}.{{zone_internal_dns}}"
# {% endif %}
#         Type: A
#         TTL: 10
#         ResourceRecords:
#           - "Fn::GetAtt":
#             - {{instance['name']}}{{loop.index}}
#             - PrivateIp
#
# {% if instance['public_dns'] %}
#   {{instance['name']}}{{loop.index}}EIP:
#     Type: "AWS::EC2::EIP"
#     DependsOn:
#     - VpcGA
#     Properties:
#       InstanceId:
#         Ref: {{instance['name']}}{{loop.index}}
#
#   {{instance['name']}}{{loop.index}}PublicDNS:
#     Type: "AWS::Route53::RecordSetGroup"
#     DependsOn:
#       - {{instance['name']}}{{loop.index}}EIP
#     Properties:
#       HostedZoneId: {{HostedZoneId}}
#       RecordSets:
# {% if instance['unique'] | d(false) | bool %}
#           - Name: "{{instance['name']}}.{{subdomain_base}}."
# {% else %}
#           - Name: "{{instance['name']}}{{loop.index}}.{{subdomain_base}}."
# {% endif %}
#             Type: A
#             TTL: 10
#             ResourceRecords:
#             - "Fn::GetAtt":
#               - {{instance['name']}}{{loop.index}}
#               - PublicIp
# {% endif %}
# {% endfor %}
# {% endfor %}
#
# Outputs:
#   Route53internalzoneOutput:
#     Description: The ID of the internal route 53 zone
#     Value:
#       Ref: zoneinternalidns
ansible/configs/ocp-ha-disconnected-lab/files/ec2_internal_dns.json.j2
New file
@@ -0,0 +1,84 @@
{
  "Comment": "Create internal dns zone entries",
  "Changes": [
{% for host in groups['masters'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "master{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['loadbalancers'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "loadbalancer{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['infranodes'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "infranode{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['nodes'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "node{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['support'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "nfs{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['bastions'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "bastion.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    }
{% endfor %}
  ]
}
ansible/configs/ocp-ha-disconnected-lab/files/hosts_template.j2
New file
@@ -0,0 +1,301 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
###########################################################################
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
# default project node selector
osm_default_node_selector='env=app'
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
{% if osrelease | version_compare('3.7', '<') %}
# Anything before 3.7
openshift_metrics_image_version=v{{ repo_version }}
#openshift_image_tag=v{{ repo_version }}
#openshift_release={{ osrelease }}
#docker_version="{{docker_version}}"
{% endif %}
###########################################################################
### OpenShift Optional Vars
###########################################################################
# Enable cockpit
osm_use_cockpit=true
osm_cockpit_plugins=['cockpit-kubernetes']
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
###########################################################################
### OpenShift Network Vars
###########################################################################
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
{{multi_tenant_setting}}
{% if osrelease | version_compare('3.7', '>=') %}
# This should be turned on once all dependent scripts use firewalld rather than iptables
# os_firewall_use_firewalld=True
{% endif %}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" %}
openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com', 'bindPassword': '{{bindPassword}}', 'ca': 'ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid'}]
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "allow_all"  %}
openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
{% endif %}
{% if install_idm == "htpasswd"  %}
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
# or
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
# Enable cluster metrics
{% if osrelease | version_compare('3.7', '>=') %}
openshift_metrics_install_metrics={{install_metrics}}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
#openshift_master_metrics_public_url=https://hawkular-metrics.{{cloudapps_suffix}}/hawkular/metrics
## Add Prometheus Metrics:
openshift_hosted_prometheus_deploy=true
openshift_prometheus_node_selector={"env":"infra"}
openshift_prometheus_namespace=openshift-metrics
# Prometheus
openshift_prometheus_storage_kind=nfs
openshift_prometheus_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_storage_nfs_directory=/srv/nfs
openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_storage_volume_name=prometheus
openshift_prometheus_storage_volume_size=10Gi
openshift_prometheus_storage_labels={'storage': 'prometheus'}
openshift_prometheus_storage_type='pvc'
# For prometheus-alertmanager
openshift_prometheus_alertmanager_storage_kind=nfs
openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
openshift_prometheus_alertmanager_storage_volume_size=10Gi
openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
openshift_prometheus_alertmanager_storage_type='pvc'
# For prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_kind=nfs
openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_volume_size=10Gi
openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
openshift_prometheus_alertbuffer_storage_type='pvc'
{% else %}
openshift_hosted_metrics_deploy={{install_metrics}}
openshift_hosted_metrics_storage_kind=nfs
openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
openshift_hosted_metrics_storage_host=support1.{{guid}}.internal
openshift_hosted_metrics_storage_nfs_directory=/srv/nfs
openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_metrics_storage_volume_name=metrics
openshift_hosted_metrics_storage_volume_size=10Gi
openshift_hosted_metrics_public_url=https://hawkular-metrics.{{cloudapps_suffix}}/hawkular/metrics
{% endif %}
openshift_metrics_cassandra_nodeselector={"env":"infra"}
openshift_metrics_hawkular_nodeselector={"env":"infra"}
openshift_metrics_heapster_nodeselector={"env":"infra"}
# Enable cluster logging
{% if osrelease | version_compare('3.7', '>=') %}
openshift_logging_install_logging={{install_logging}}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
# openshift_logging_kibana_hostname=kibana.{{cloudapps_suffix}}
openshift_logging_es_cluster_size=1
{% else %}
openshift_hosted_logging_deploy={{install_logging}}
openshift_master_logging_public_url=https://kibana.{{cloudapps_suffix}}
openshift_hosted_logging_storage_kind=nfs
openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
openshift_hosted_logging_storage_nfs_directory=/srv/nfs
openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_logging_storage_volume_name=logging
openshift_hosted_logging_storage_volume_size=10Gi
openshift_hosted_logging_hostname=kibana.{{cloudapps_suffix}}
openshift_hosted_logging_elasticsearch_cluster_size=1
openshift_hosted_logging_deployer_version=v{{repo_version}}
# This one is wrong (down arrow)
#openshift_hosted_logging_image_version=v{{repo_version}}
#openshift_logging_image_version=v{{repo_version}}
{% endif %}
openshift_logging_es_nodeselector={"env":"infra"}
openshift_logging_kibana_nodeselector={"env":"infra"}
openshift_logging_curator_nodeselector={"env":"infra"}
###########################################################################
### OpenShift Project Management Vars
###########################################################################
# Configure additional projects
openshift_additional_projects={'openshift-template-service-broker': {'default_node_selector': ''}}
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
openshift_hosted_router_selector='env=infra'
openshift_hosted_router_replicas={{infranode_instance_count}}
#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_selector='env=infra'
openshift_hosted_registry_replicas=1
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/srv/nfs
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
{% if osrelease | version_compare('3.7', '>=') %}
openshift_enable_service_catalog=true
template_service_broker_install=true
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=false
{% endif %}
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
nfs
{% if new_node_instance_count > 0 %}
new_nodes
{% endif %}
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host].placement}}
{% endfor %}
[masters]
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[etcd]
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'app', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
{% if new_node_instance_count > 0 %}
# scaleup performed, leave an empty group, see:
# https://docs.openshift.com/container-platform/3.5/install_config/adding_hosts_to_existing_cluster.html
[new_nodes]
{% endif %}
[nfs]
{% for host in groups['support'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}
{% endfor %}
ansible/configs/ocp-ha-disconnected-lab/files/htpasswd.openshift
New file
@@ -0,0 +1,103 @@
andrew:$apr1$dZPb2ECf$ercevOFO5znrynUfUj4tb/
karla:$apr1$FQx2mX4c$eJc21GuVZWNg1ULF8I2G31
user1:$apr1$FmrTsuSa$yducoDpvYq0KEV0ErmwpA1
user2:$apr1$JCcW2XQM$8takcyaYYrPT5I8M46TA01
user3:$apr1$zPC/rXKY$2PGF7dRsGwC3i8YJ59aOk0
user4:$apr1$e9/zT6dh$J18M.9zyn3DazrYreGV.B/
user5:$apr1$Nu/XJFVP$DgybymePret.Prch9MyxP/
user6:$apr1$VEbpwL9M$c1oFwS.emkt8fyR24zOzd0
user7:$apr1$wZxsnY/A$PK0O7iofGJJsvOZ3ctoNo.
user8:$apr1$5YBAWpGg$YO4ACHZL.c31NbQZH9LlE.
user9:$apr1$CIxB1enN$Aghb7.S4U3SXPRt55hTWI.
user10:$apr1$dWTDSR23$UGGJtkVC1ERmAOikomI9K0
user11:$apr1$j4fPyRZg$nNJk1nt1vAf54HAB/g/8g/
user12:$apr1$dd6kysUI$ueu/9.gbL0LkjpCbSjFNI.
user13:$apr1$DeRaAbVq$ZI3HtBzQxWYHifjIuPJSM1
user14:$apr1$dUuWDYgk$co6NQ4Dbcp3pQjVO5dR7Q.
user15:$apr1$4QmhSys7$wC.fKmKRqLNqoYqQ1dixJ/
user16:$apr1$RHcOPHg7$p9LgYP6zE4nMDlA8ongVc/
user17:$apr1$pji2xxHN$vvUHj/fbQRgLR.WBMblQH/
user18:$apr1$Lm79l0Qr$KgZSAuPcrTo4.GIWTBLGa/
user19:$apr1$KGxvneIX$.GJo7JB.N/c1FLW7vlblx/
user20:$apr1$WfYdosg5$cU1BsAzkIhTzKBx8Rvd3o1
user21:$apr1$cKRCbWLl$WCVjYUxD22GS5RRv1npwR1
user22:$apr1$QhpgOkFU$Y6Nn7NEPbJk3D9ehFb4i50
user23:$apr1$dVgQOh7j$L3JZlN8ZmdEwebXqD66Yl0
user24:$apr1$z/U5MAQB$GvKG3i8ATXWHhoxN9e0HS/
user25:$apr1$gFHGMQUV$w11pZbcBqVKOylr9TZ1EW.
user26:$apr1$5YG0dnOG$GzbnTQMBe0Dqc3f3pwvPL1
user27:$apr1$Kt6VoxNS$nq1Kzd53DUL8h8gfu4fEq/
user28:$apr1$aLAQHJ4d$qTRmUpw2eF9whEwDyIixG0
user29:$apr1$3HH4pgpa$Uh84gx3UP8vyPRfAIMPRl1
user30:$apr1$bbEEX3EF$ozw4jPcYHwVO7.MRzXtu0.
user31:$apr1$hD0kfz7i$SjNdGZbvto5EifBma5iA5.
user32:$apr1$fRMBUYu8$T5BQ8kI3pMgqXaRH7l8p..
user33:$apr1$es9ruteO$jZsV5/H8GIzw.vCfPs5310
user34:$apr1$OQ1I/gHn$.WA01EeXhDLE1K3vWD1wu.
user35:$apr1$KseEJXTS$kE/QO1XT0mZ44Iyw/ofnj/
user36:$apr1$PglCzG.g$44QsoAyMhanH5A40P5jhY1
user37:$apr1$2d5ggTIZ$xYsfdRBLOlEsnWRFVS9Yl0
user38:$apr1$x/cdV95V$mKFZmSkoBjeEu.HZshO0n.
user39:$apr1$VC6.WQOS$fAOAR1mx/i7Pnt2oGsDmu/
user40:$apr1$n36Hr3zC$lEVq4B7UWmdcnl01lUyR..
user41:$apr1$/q6tJtXi$9mCB1YCqdhEE6VVVVkVKc/
user42:$apr1$fTMTWEzw$X4MsyNlWketRjQgqonwxn.
user43:$apr1$.VwoJu38$D4v4NKL1KPuRZdNeprBXS/
user44:$apr1$e0s48GLK$JMQ849MeckVX0wG2vE2s10
user45:$apr1$a9ucQ1sC$HEMij.WGEa1xIQ01HpyKh1
user46:$apr1$uwOs/4nv$TB2r3pOPJ2K0A./CimVUT1
user47:$apr1$jfTmW1k5$Fd2ebTUtFFl3CLZWfFmRR.
user48:$apr1$4/apB/zd$IxoWJ5pTRNGgbxx3Ayl/i0
user49:$apr1$nu75PZ0r$bPCMgDmlOAj.YbeFPHJHE.
user50:$apr1$c/R3wJ/g$GJ03siVj5tkNxrg4OaxhJ0
user51:$apr1$EdEX6Pyt$IdPQHmhZi8FEbJjREVbe1/
user52:$apr1$ZMfyTjjX$RFOrnKsSr5xXA7IXn7TkC/
user53:$apr1$GY.rOkJM$uMCqJmmorP5I1v.YHHz1Z/
user54:$apr1$1vuZq/U0$Aq0Kz3wk0YPleDz/rTCdK0
user55:$apr1$KjULqmcD$XrhyYt2nWuiaQkbciDIcN/
user56:$apr1$gTPaNeq0$sqWJDPZ5//ZDjLf0dSbUh1
user57:$apr1$6PaKhdlY$dX2FkVJ0xV.4MAQeDUgRT0
user58:$apr1$.8MSdEpY$MPIbUO2WnC0wsno8zUOjC.
user59:$apr1$TWpKuAvt$CFeTQxxSgeU3dFkL4qpXb.
user60:$apr1$fEYUgRVU$LO2qwXfpxwI9fDXPfQgQB0
user61:$apr1$HHUBEn4G$.cAnwbh.ogNEzQSug3nqo/
user62:$apr1$Agt4GmKT$4k3Ev3FSJiNsbht3vUbxQ/
user63:$apr1$FsUKA7Hw$nkSgqSIFeqCY1mOyGje3O1
user64:$apr1$vBlkQoG4$8L2mTo8gdr8wC68G2y2G91
user65:$apr1$McEnEqn4$dZvjACdGp0HALVHBtHEu80
user66:$apr1$zamuhlOG$Xch5pbO1ki2Dad1dzjS4j.
user67:$apr1$qC1rll4s$cN4DzsWnyFBTNi3Cdi6161
user68:$apr1$txKPCx1k$WtrlrlP.UF.Rlzbnv6igE/
user69:$apr1$EO2A25Sj$DO/1lCNJJXff4GOsTZmHL/
user70:$apr1$pJu569Az$nHtF2ZkUrNXw9WN0Obb/T1
user71:$apr1$YKpEtZka$c59Fmov1cssRdrO5VqBKz1
user72:$apr1$CNkwam0s$b.QcPWytnhlOsaajMQx630
user73:$apr1$m5kE07o0$7TC3K.I16YTaRyN8EZq7E/
user74:$apr1$/5p0Qoyy$hjQ30Q8Ghb4zNrjjt2yLk/
user75:$apr1$ZF3yRTqJ$TgLBllrvTQuuiIjSb53xR0
user76:$apr1$711LL2Ai$59rBNmFprwZXtyFVBtRul0
user77:$apr1$N4uJhPSq$A.rVfAsRXCQqxOenDHjqX1
user78:$apr1$PHSpv5ty$WC8GlQpclQqH30eWPu.6e.
user79:$apr1$c/yk9dQ9$dvhh.P4F5zGnysBvwps4m/
user80:$apr1$oTmftf8R$FYzQD77hYfh9Wq3SvwYU7/
user81:$apr1$3YvQ/JPg$sDXhV8xpHNxQzFSvMMxAD1
user82:$apr1$quKB2P2.$iq.ZzDa3/xoaoY3.F1Un90
user83:$apr1$IVq8346H$lPQJZ7Thr/gJ2EmzDsktH0
user84:$apr1$xfehskAD$NRMQJttylejHtNKQqBj.k.
user85:$apr1$/LYLXNbH$/COZBzkaU0pPOXR38ZFVX/
user86:$apr1$a/xD3Jfw$rZXN4ykj0W6qadlh447n//
user87:$apr1$v01l1ljr$tGDKwdhKC05HEbntSxV5M0
user88:$apr1$9RYtWl12$ck19ozvS.SWeAAaDZqE940
user89:$apr1$EvSs2TA2$fRDg0hVOCf2jbhwXifzbs.
user90:$apr1$9ffAneiG$CAz5JWeIPGnamOQlVRGIk.
user91:$apr1$Z3XW5Yy4$Kibx7GmgdpC6CAM0IxhtC0
user92:$apr1$6CfIrBqr$5nGNCGA5QOPq/h8hlOE4f.
user93:$apr1$iJ4AQyfu$fkXSVib.OzPCSBQlLhwwS.
user94:$apr1$jiPqi0uI$XyYDQt0kcawqFLX12VW3n/
user95:$apr1$ULEkhfG2$/WHcoR9KJxAS3uw470Vkk.
user96:$apr1$56tQXa91$l0yaZgZHbDidgw95IP7yQ1
user97:$apr1$SoGwK9hP$YbceEfwmsM3QCdNGAaE1b.
user98:$apr1$MVU1/8dh$UKzkRk1CQP00SvnoPIm1..
user99:$apr1$v8vKZdHH$NC5xud.olhtdydHU9hav6.
user100:$apr1$glFN48wz$dR9w94PGiQL8qZXcXGd0L0
marina:$apr1$ayR8gA9a$4bmozGlmBX6XQY1AbJfQk1
ansible/configs/ocp-ha-disconnected-lab/files/labs_hosts_template.j2
New file
@@ -0,0 +1,55 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
# disable memory check, as we are not a production environment
openshift_disable_check="memory_availability"
[OSEv3:children]
lb
masters
etcd
nodes
nfs
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host].placement}}
{% endfor %}
[masters]
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host].placement}}
{% endfor %}
[etcd]
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host].placement}}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'app', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
[nfs]
{% for host in groups['support'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}
{% endfor %}
ansible/configs/ocp-ha-disconnected-lab/files/pvs.j2
New file
@@ -0,0 +1,21 @@
---
{% for pv in pv_list %}
apiVersion: v1
kind: PersistentVolume
metadata:
  name: {{ pv }}
spec:
  capacity:
    storage: {{pv_size}}
  accessModes:
{% if index % 2 == 0 %}
    - ReadWriteOnce
<% else %>
    - ReadWriteMany
{% endif %}
  nfs:
    path: {{ nfs_export_path }}/{{pv}}
    server: support1.{{guid}}.internal
  persistentVolumeReclaimPolicy: {{persistentVolumeReclaimPolicy}}
---
{% endfor %}
ansible/configs/ocp-ha-disconnected-lab/files/pvs_rwx.j2
New file
@@ -0,0 +1,17 @@
---
{% for pv in pv_list %}
apiVersion: v1
kind: PersistentVolume
metadata:
  name: {{ pv }}rwx
spec:
  capacity:
    storage: {{pv_size}}
  accessModes:
  - ReadWriteMany
  nfs:
    path: {{ nfs_export_path }}/{{pv}}
    server: support1.{{guid}}.internal
  persistentVolumeReclaimPolicy: {{persistentVolumeReclaimPolicy}}
---
{% endfor %}
ansible/configs/ocp-ha-disconnected-lab/files/repos_template.j2
New file
@@ -0,0 +1,37 @@
[rhel-7-server-rpms]
name=Red Hat Enterprise Linux 7
baseurl={{own_repo_path}}/rhel-7-server-rpms
enabled=1
gpgcheck=0
[rhel-7-server-rh-common-rpms]
name=Red Hat Enterprise Linux 7 Common
baseurl={{own_repo_path}}/rhel-7-server-rh-common-rpms
enabled=1
gpgcheck=0
[rhel-7-server-extras-rpms]
name=Red Hat Enterprise Linux 7 Extras
baseurl={{own_repo_path}}/rhel-7-server-extras-rpms
enabled=1
gpgcheck=0
[rhel-7-server-optional-rpms]
name=Red Hat Enterprise Linux 7 Optional
baseurl={{own_repo_path}}/rhel-7-server-optional-rpms
enabled=1
gpgcheck=0
[rhel-7-server-ose-{{repo_version}}-rpms]
name=Red Hat Enterprise Linux 7 OSE {{repo_version}}
baseurl={{own_repo_path}}/rhel-7-server-ose-{{repo_version}}-rpms
enabled=1
gpgcheck=0
[rhel-7-fast-datapath-rpms]
name=Red Hat Enterprise Linux 7 Fast Datapath
baseurl={{own_repo_path}}/rhel-7-fast-datapath-rpms
enabled=1
gpgcheck=0
ansible/configs/ocp-ha-disconnected-lab/files/userpvs.j2
New file
@@ -0,0 +1,20 @@
---
{%  for pv in range(1,user_vols) %}
apiVersion: v1
kind: PersistentVolume
metadata:
  name: vol{{ pv }}
spec:
  capacity:
    storage: {{ pv_size }}
  accessModes:
  - ReadWriteOnce
{% if  pv % 2 == 0 %}
  - ReadWriteMany
{% endif %}
  nfs:
    path: {{ nfs_export_path }}/user-vols/vol{{pv}}
    server: support1.{{guid}}.internal
  persistentVolumeReclaimPolicy: {{ persistentVolumeReclaimPolicy }}
---
{% endfor %}
ansible/configs/ocp-ha-disconnected-lab/post_infra.yml
New file
@@ -0,0 +1,32 @@
- name: Step 002 Post Infrastructure
  hosts: localhost
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tags:
    - step002
    - post_infrastructure
  tasks:
    - debug:
        msg: "Step 001 Post Infrastructure - There are no post_infrastructure tasks defined"
      when: "not {{ tower_run | default(false) }}"
    - name: Job Template to launch a Job Template with update on launch inventory set
      uri:
        url: "https://{{ ansible_tower_ip }}/api/v1/job_templates/{{ job_template_id }}/launch/"
        method: POST
        user: "{{tower_admin}}"
        password: "{{tower_admin_password}}"
        body:
          extra_vars:
            guid: "{{guid}}"
            ipa_host_password: "{{ipa_host_password}}"
        body_format: json
        validate_certs: False
        HEADER_Content-Type: "application/json"
        status_code: 200, 201
      when: "{{ tower_run | default(false) }}"
      tags:
        - tower_workaround
ansible/configs/ocp-ha-disconnected-lab/post_ocp_nfs_config.yml
New file
@@ -0,0 +1,58 @@
- name: Step 00xxxxx post software
  hosts: support
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Create user vols
      shell: "mkdir -p /srv/nfs/user-vols/vol{1..{{user_vols}}}"
    - name: chmod the user vols
      shell: "chmod -R 777 /srv/nfs/user-vols"
- name: Step 00xxxxx post software
  hosts: bastions
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: get nfs Hostname
      set_fact:
        nfs_host: "{{ groups['support']|sort|first }}"
    - set_fact:
        pv_size: '10Gi'
        pv_list: "{{ ocp_pvs }}"
        persistentVolumeReclaimPolicy: Retain
    - name: Generate PV file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/pvs.j2"
        dest: "/root/pvs-{{ env_type }}-{{ guid }}.yml"
      tags: [ gen_pv_file ]
      when: pv_list.0 is defined
    - set_fact:
        pv_size: "{{user_vols_size}}"
        persistentVolumeReclaimPolicy: Recycle
      notify: restart nfs services
      run_once: True
    - name: Generate user vol PV file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/userpvs.j2"
        dest: "/root/userpvs-{{ env_type }}-{{ guid }}.yml"
      tags:
        - gen_user_vol_pv
    - shell: 'oc create -f /root/pvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/pvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
      when: pv_list.0 is defined
    - shell: 'oc create -f /root/userpvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/userpvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
ansible/configs/ocp-ha-disconnected-lab/post_software.yml
New file
@@ -0,0 +1,126 @@
#vim: set ft=ansible:
---
- name: Step 005 - Post Software deployment
  hosts: localhost
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tags:
    - step005
  tasks:
    - name: Generate /etc/ansible/hosts file with lab hosts template
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/labs_hosts_template.j2"
        dest: "{{ ANSIBLE_REPO_PATH }}/workdir/labs_hosts-{{ env_type }}-{{ guid }}"
- name: Configure NFS host for user-vols if required
  hosts: support
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Create user vols
      shell: "mkdir -p /srv/nfs/user-vols/vol{1..{{user_vols}}}"
      tags:
        - openshift_nfs_config
    - name: chmod the user vols
      shell: "chmod -R 777 /srv/nfs/user-vols"
      tags:
        - openshift_nfs_config
- name: Step lab post software deployment
  hosts: bastions
  gather_facts: False
  become: yes
  tags:
    - opentlc_bastion_tasks
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Move complete inventory file to preserve directory.
      shell: mv /etc/ansible/hosts /var/preserve/
      tags: preserve_complete_ansible_inventory
    - name: Copy over ansible hosts file, lab version
      copy:
        backup: no
        src: "{{ ANSIBLE_REPO_PATH }}/workdir/labs_hosts-{{ env_type }}-{{ guid }}"
        dest: /etc/ansible/hosts
      tags:
        - overwrite_hosts_with_lab_hosts
    # sssd bug, fixed by restart
    - name: restart sssd
      service:
        name: sssd
        state: restarted
      when: install_ipa_client
    ## Create PVs for uservols if required
    - name: get nfs Hostname
      set_fact:
        nfs_host: "{{ groups['support']|sort|first }}"
      tags:
        - openshift_nfs_config
    - set_fact:
        pv_size: '10Gi'
        pv_list: "{{ ocp_pvs }}"
        persistentVolumeReclaimPolicy: Retain
      tags:
        - openshift_nfs_config
    - name: Generate PV file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/pvs.j2"
        dest: "/root/pvs-{{ env_type }}-{{ guid }}.yml"
      tags: [ gen_pv_file ]
      when: pv_list.0 is defined
      tags:
        - openshift_nfs_config
    - set_fact:
        pv_size: "{{user_vols_size}}"
        persistentVolumeReclaimPolicy: Recycle
      tags:
        - openshift_nfs_config
      notify: restart nfs services
      run_once: True
    - name: Generate user vol PV file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/userpvs.j2"
        dest: "/root/userpvs-{{ env_type }}-{{ guid }}.yml"
      tags:
        - gen_user_vol_pv
        - openshift_nfs_config
    - shell: 'oc create -f /root/pvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/pvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
      when: pv_list.0 is defined
      tags:
        - openshift_nfs_config
    - shell: 'oc create -f /root/userpvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/userpvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
        - openshift_nfs_config
# - name: include post nfs config
#   include: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/post_ocp_nfs_config.yml"
#   tags:
#     - openshift_nfs_config
- name: PostSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Post-Software checks completed successfully"
ansible/configs/ocp-ha-disconnected-lab/pre_infra.yml
New file
@@ -0,0 +1,13 @@
- name: Step 000 Pre Infrastructure
  hosts: localhost
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tags:
    - step000
    - pre_infrastructure
  tasks:
    - debug:
        msg: "Step 000 Pre Infrastructure - There are no pre_infrastructure tasks defined"
ansible/configs/ocp-ha-disconnected-lab/pre_software.yml
New file
@@ -0,0 +1,75 @@
- name: Step 003 - Create env key
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - step003
    - generate_env_keys
  tasks:
    - name: Generate SSH keys
      shell: ssh-keygen -b 2048 -t rsa -f "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}" -q -N ""
      args:
        creates: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}"
      when: set_env_authorized_key
    - name: fix permission
      file:
        path: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}"
        mode: 0400
      when: set_env_authorized_key
    - name: Generate SSH pub key
      shell: ssh-keygen -y -f "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}" > "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}.pub"
      args:
        creates: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}.pub"
      when: set_env_authorized_key
# Cloudformation template or equivalent should tag all hosts with Project:{{ env_type }}-{{ guid }}
- name: Configure all hosts with Repositories, Common Files and Set environment key
  hosts: all
  become: true
  gather_facts: False
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - step004
    - common_tasks
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set-repositories", when: 'repo_method is defined' }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/common", when: 'install_common' }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set_env_authorized_key", when: 'set_env_authorized_key' }
- name: Configuring Bastion Hosts
  hosts: bastions
  become: true
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  roles:
    -  { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion", when: 'install_bastion' }
    -  { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa", when: 'install_ipa_client' }
  tags:
    - step004
    - bastion_tasks
- name: PreSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - flight_check
  tasks:
    - debug:
        msg: "Pre-Software checks completed successfully"
ansible/configs/ocp-ha-disconnected-lab/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/ocp-ha-lab/env_vars.yml
@@ -47,7 +47,7 @@
install_logging: true
ovs_plugin: "subnet" # This can also be set to: "multitenant"
multi_tenant_setting: "os_sdn_network_plugin_name='redhat/openshift-ovs-{{ovs_plugin}}'"
master_lb_dns: "loadbalancer.{{subdomain_base}}"
master_lb_dns: "loadbalancer1.{{subdomain_base}}"
cloudapps_suffix: 'apps.{{subdomain_base}}'
openshift_master_ldap_ca_file: 'openshift_master_ldap_ca_file=/root/ca.crt'
# htpasswd / ldap / allow_all
ansible/configs/ocp-ha-lab/files/hosts_template.j2
@@ -49,7 +49,7 @@
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname={{master_lb_dns}}
openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
ansible/configs/ocp-ha-lab/files/htpasswd.openshift
@@ -100,3 +100,4 @@
user98:$apr1$MVU1/8dh$UKzkRk1CQP00SvnoPIm1..
user99:$apr1$v8vKZdHH$NC5xud.olhtdydHU9hav6.
user100:$apr1$glFN48wz$dR9w94PGiQL8qZXcXGd0L0
marina:$apr1$ayR8gA9a$4bmozGlmBX6XQY1AbJfQk1
ansible/configs/ocp-ha-lab/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/ocp-implementation-lab/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/ocp-workshop/README.adoc
@@ -166,6 +166,10 @@
    --skip-tags=remove_self_provisioners,opentlc-integration
----
=== IPA registration
You can either provide `ipa_host_password` or a couple `ipa_kerberos_user`/`ipa_kerberos_password` to register the host to the ipa server. See link:../../roles/bastion-opentlc-ipa[roles/bastion-opentlc-ipa].
=== CNS/Glusterfs
If you set this variable, 3 support nodes will be deployed and used for glusterfs:
ansible/configs/ocp-workshop/env_vars.yml
@@ -45,8 +45,8 @@
ocp_report: false
install_ipa_client: false
remove_self_provisioners: false
install_lets_encrypt_certificates: true
# you can also use: allow_all, htpasswd, ldap
install_idm: ldap
idm_ca_url: http://ipa.opentlc.com/ipa/config/ca.crt
install_metrics: true
ansible/configs/ocp-workshop/files/cloud_providers/ec2_cloud_template.j2
@@ -328,6 +328,26 @@
{% endfor %}
{% endfor %}
  Route53User:
    Type: AWS::IAM::User
    Properties:
      Policies:
        - PolicyName: Route53Access
          PolicyDocument:
            Statement:
              - Effect: Allow
                Action: route53domains:*
                Resource: "*"
              - Effect: Allow
                Action: route53:*
                Resource: "*"
  Route53UserAccessKey:
      Type: AWS::IAM::AccessKey
      Properties:
        UserName:
          Ref: Route53User
  RegistryS3:
    Type: "AWS::S3::Bucket"
    Properties:
@@ -407,3 +427,17 @@
        - S3UserAccessKey
        - SecretAccessKey
    Description: IAM User for RegistryS3
  Route53User:
    Value:
      Ref: Route53User
    Description: IAM User for Route53 (Let's Encrypt)
  Route53UserAccessKey:
    Value:
      Ref: Route53UserAccessKey
    Description: IAM User for Route53 (Let's Encrypt)
  Route53UserSecretAccessKey:
    Value:
      Fn::GetAtt:
        - Route53UserAccessKey
        - SecretAccessKey
    Description: IAM User for Route53 (Let's Encrypt)
ansible/configs/ocp-workshop/files/hosts_template.j2
@@ -66,6 +66,10 @@
openshift_master_default_subdomain={{cloudapps_suffix}}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
{% if install_lets_encrypt_certificates %}
openshift_master_named_certificates=[{"certfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.cer", "keyfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.key", "cafile": "/root/fakeleintermediatex1.pem"}]
{% endif %}
openshift_set_hostname=True
###########################################################################
@@ -82,8 +86,6 @@
# This should be turned on once all dependent scripts use firewalld rather than iptables
# os_firewall_use_firewalld=True
{% endif %}
{% if osrelease | version_compare('3.7', '>=') %}
###########################################################################
@@ -135,8 +137,6 @@
openshift_metrics_cassanda_pvc_storage_class_name=''
openshift_hosted_metrics_storage_volume_name=metrics
{% endif %}
#openshift_master_metrics_public_url=https://hawkular-metrics.{{cloudapps_suffix}}/hawkular/metrics
## Add Prometheus Metrics:
openshift_hosted_prometheus_deploy=true
@@ -222,7 +222,6 @@
openshift_hosted_logging_storage_volume_name=logging
{% endif %}
# openshift_logging_kibana_hostname=kibana.{{cloudapps_suffix}}
openshift_logging_es_cluster_size=1
{% else %}
@@ -241,9 +240,6 @@
openshift_hosted_logging_hostname=kibana.{{cloudapps_suffix}}
openshift_hosted_logging_elasticsearch_cluster_size=1
openshift_hosted_logging_deployer_version=v{{repo_version}}
# This one is wrong (down arrow)
#openshift_hosted_logging_image_version=v{{repo_version}}
#openshift_logging_image_version=v{{repo_version}}
{% endif %}
openshift_logging_es_nodeselector={"env":"infra"}
@@ -263,7 +259,10 @@
openshift_hosted_router_selector='env=infra'
openshift_hosted_router_replicas={{infranode_instance_count}}
#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
{% if install_lets_encrypt_certificates %}
openshift_hosted_router_certificate={"certfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.cer", "keyfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.key", "cafile": "/root/fakeleintermediatex1.pem"}
{% endif %}
openshift_hosted_registry_selector='env=infra'
openshift_hosted_registry_replicas=1
ansible/configs/ocp-workshop/post_software.yml
@@ -104,6 +104,9 @@
    - env-specific
    - env-specific_infra
  tasks:
    - name: Command to enable the wildcard routes in the OCP cluster for 3scale
      shell: "oc set env dc/router ROUTER_ALLOW_WILDCARD_ROUTES=true -n default"
    - name: Give administrative user cluster-admin privileges
      command: "oc adm policy add-cluster-role-to-user cluster-admin {{ admin_user }}"
@@ -127,17 +130,11 @@
    - name: Add capabilities within anyuid which is not really ideal
      command: "oc patch scc/anyuid --patch '{\"requiredDropCapabilities\":[\"MKNOD\",\"SYS_CHROOT\"]}'"
    - name: Check for Node Selector (env=app) on project openshift-template-service-broker
      shell: oc describe project openshift-template-service-broker|grep node-selector|grep app
      register: node_selector_present
      ignore_errors: true
      when: osrelease | version_compare('3.7', '>=')
    - name: Remove Node Selector (env=app) from project openshift-template-service-broker if it is set
    - name: Set Node Selector to empty for project openshift-template-service-broker
      shell: oc annotate namespace openshift-template-service-broker openshift.io/node-selector="" --overwrite
      ignore_errors: true
      when:
      - osrelease | version_compare('3.7', '>=')
      - node_selector_present.rc == 0
- name: Remove all users from self-provisioners group
  hosts: masters[0]
@@ -147,6 +144,10 @@
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags: [ env-specific, remove_self_provisioners ]
  tasks:
  - name: Set clusterRoleBinding auto-update to false
    command: oc annotate -n default --overwrite clusterrolebinding.rbac self-provisioners rbac.authorization.kubernetes.io/autoupdate=false
    when: remove_self_provisioners
  - name: Remove system:authenticated from self-provisioner role
    shell: "oadm policy remove-cluster-role-from-group self-provisioner system:authenticated system:authenticated:oauth"
    ignore_errors: true
@@ -262,11 +263,11 @@
  tasks:
    - name: Pull ose-recycler Image
      command: docker pull registry.access.redhat.com/openshift3/ose-recycler:latest
      when: osrelease | version_compare('3.7.9', 'eq')
      when: osrelease | version_compare('3.7.9', '>=')
    - name: Tag ose-recycler Image
      command: "docker tag registry.access.redhat.com/openshift3/ose-recycler:latest registry.access.redhat.com/openshift3/ose-recycler:v{{ osrelease }}"
      when: osrelease | version_compare('3.7.9', 'eq')
      when: osrelease | version_compare('3.7.9', '>=')
# Set up Prometheus/Node Exporter/Alertmanager/Grafana
# on the OpenShift Cluster
ansible/configs/ocp-workshop/pre_software.yml
@@ -47,6 +47,15 @@
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/common", when: 'install_common' }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set_env_authorized_key", when: 'set_env_authorized_key' }
- name: Request Let's Encrypt Wildcard Certificates
  hosts: bastions[0]
  become: true
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  roles:
    -  { role: "{{ ANSIBLE_REPO_PATH }}/roles/lets-encrypt", when: 'install_lets_encrypt_certificates' }
- name: Configuring Bastion Hosts
  hosts: bastions
  become: true
ansible/configs/ocp-workshop/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/openshift-demos/README.adoc
New file
@@ -0,0 +1,12 @@
This is imported from https://github.com/siamaksade/openshift-demos-ansible
## Requirements
You can import roles needed for this configs by running the following:
----
cd ansible_agnostic_deployer/ansible
ansible-galaxy install -r requirements.yml -p roles/
ansible-galaxy install -r requirements-prod.yml -p roles/  #for production
----
Please refer to https://github.com/siamaksade/openshift-demos-ansible for documentation.
ansible/configs/openshift-demos/env_vars.yml
New file
@@ -0,0 +1,61 @@
---
env_type: openshift-demos
# oc_kube_config:
# oc_token:
# openshift_master:
project_suffix: demo
project_cicd: cicd-{{ project_suffix }}
project_cicd_name: "CI/CD"
project_cicd_desc: "CI/CD Components (Jenkins, Gogs, etc)"
project_prod: coolstore-prod-{{ project_suffix }}
project_prod_name: "CoolStore PROD"
project_prod_desc: "CoolStore PROD Environment"
project_stage: coolstore-test-{{ project_suffix }}
project_stage_name: "CoolStore TEST"
project_stage_desc: "CoolStore Test Environment"
project_test: inventory-test-{{ project_suffix }}
project_test_name: "Inventory DEV"
project_test_desc: "Inventory DEV Environment"
project_dev: developer-{{ project_suffix }}
project_dev_name: "Developer Project"
project_dev_desc: "Personal Developer Project"
project_default: coolstore-{{ project_suffix }}
project_default_name: "CoolStore MSA"
project_default_desc: "CoolStore MSA"
github_account: jbossdemocentral
github_ref: master
ephemeral: false
maven_mirror_url:
deploy_guides: true
prebuilt_images_project_name: coolstore-image-builds
set_hostname_suffix: true
gogs_image_version: 0.11.29
gogs_admin_user: team
gogs_admin_password: team
gogs_user: developer
gogs_password: developer
nexus_volume_capacity: 10Gi
nexus_max_memory: 2Gi
nexus_image_version: 3.6.1
# project_admin:
projects_join_with: "{{ project_cicd }}"
workshopper_content_url_prefix: https://raw.githubusercontent.com/osevg/workshopper-content/master
ansible/configs/openshift-demos/idle.yml
New file
@@ -0,0 +1,23 @@
---
- name: Idle Coolstore Microservices Demo
  hosts: all
  gather_facts: false
  run_once: true
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - include_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_commons_facts"
      vars:
        set_hostname_suffix: false
    - name: idling projects
      shell: "{{ openshift_cli }} idle -n {{ item }} --all"
      ignore_errors: true
      with_items:
        - "{{ project_default }}"
        - "{{ project_cicd }}"
        - "{{ project_prod }}"
        - "{{ project_stage }}"
        - "{{ project_test }}"
        - "{{ project_dev }}"
ansible/configs/openshift-demos/imagebuild-pipeline.yml
New file
@@ -0,0 +1,39 @@
---
- name: Create CoolStore Image Build Pipeline to Pre-Build Images
  hosts: all
  gather_facts: false
  run_once: true
  vars:
    project_name: coolstore-image-builds
    github_account: jbossdemocentral
    github_ref: ocp-3.7
    maven_mirror_url: https://mirror.openshift.com/nexus/content/groups/public/
    pipeline_template: https://raw.githubusercontent.com/{{ github_account }}/coolstore-microservice/{{ github_ref }}/openshift/templates/imagebuild-pipeline-template.yaml
  tasks:
  - include_role:
      name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_commons_facts"
    vars:
      set_hostname_suffix: false
  - name: check if image builder project exists
    shell: "{{ openshift_cli }} get project {{ project_name }}"
    register: result
    ignore_errors: true
    changed_when: false
  - name: create image builder project {{ project_name }}
    shell: "{{ openshift_cli }} new-project {{ project_name }} --description='DO NOT REMOVE THIS PROJECT. NEEDED FOR COOLSTORE DEMO'"
    ignore_errors: true
    when: result|failed
  - import_role:
      name: openshift_jenkins
    vars:
      project_name: "{{ project_name }}"
      jenkins_image_tag: "v3.7"
      jenkins_image_force_import: "true"
      jenkins_max_cpu: 2
  - name: create coolstore image build pipeline
    shell: "{{ openshift_cli }} process -f {{ pipeline_template }} --param=GITHUB_ACCOUNT={{ github_account }} --param=GITHUB_REF={{ github_ref }} --param=MAVEN_MIRROR_URL={{ maven_mirror_url }} -n {{ project_name }} | {{ openshift_cli }} create -f - -n {{ project_name }}"
    ignore_errors: true
ansible/configs/openshift-demos/msa-cicd-eap-full.yml
New file
@@ -0,0 +1,188 @@
---
- name: Deploy Coolstore Microservices Demo - CI/CD with JBoss EAP
  hosts: all
  gather_facts: false
  run_once: true
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - include_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_commons_facts"
      tags:
        - cicd
        - guides
        - demo
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_gogs"
      vars:
        project_name: "{{ project_cicd }}"
        project_display_name: "{{ project_cicd_name }}"
        project_desc: "{{ project_cicd_desc }}"
        project_annotations: "demo=demo-modern-arch-{{ project_suffix }}"
        gogs_route: "gogs-{{ project_cicd }}.{{ apps_hostname_suffix }}"
      tags: cicd
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_sonatype_nexus"
      vars:
        project_name: "{{ project_cicd }}"
      when: >
        maven_mirror_url is not defined or
        maven_mirror_url is none or
        maven_mirror_url|trim() == ""
      tags: cicd
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_jenkins"
      vars:
        project_name: "{{ project_cicd }}"
        jenkins_image_tag: "v3.7"
        jenkins_image_force_import: "true"
      tags: cicd
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_workshopper"
      vars:
        project_name: "{{ project_cicd }}"
        workshopper_content_url_prefix: https://raw.githubusercontent.com/siamaksade/coolstore-demo-guides/openshift-3.7
        workshopper_workshop_urls: "{{ workshopper_content_url_prefix }}/demo-cicd-eap-full.yml"
        workshopper_env_vars:
          PROJECT_SUFFIX: "{{ project_suffix }}"
          GOGS_URL: "http://gogs-{{ project_cicd }}.{{ apps_hostname_suffix }}"
          GOGS_DEV_REPO_URL_PREFIX: "http://gogs-{{ project_cicd }}.{{ apps_hostname_suffix }}/{{ gogs_user }}/coolstore-microservice"
          JENKINS_URL: "http://jenkins-{{ project_prod }}.{{ apps_hostname_suffix }}"
          COOLSTORE_WEB_PROD_URL: "http://web-ui-{{ project_prod }}.{{ apps_hostname_suffix }}"
          HYSTRIX_PROD_URL: "http://hystrix-dashboard-{{ project_prod }}.{{ apps_hostname_suffix }}"
          GOGS_DEV_USER: "{{ gogs_user }}"
          GOGS_DEV_PASSWORD: "{{ gogs_password }}"
          GOGS_REVIEWER_USER: "{{ gogs_admin_user }}"
          GOGS_REVIEWER_PASSWORD: "{{ gogs_admin_password }}"
      when: deploy_guides
      tags: guides
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_coolstore"
      vars:
        project_annotations: "demo=demo-msa-cicd-eap-{{ project_suffix }}"
        hostname_suffix: "{{ apps_hostname_suffix }}"
        gogs_hostname: "gogs-{{ project_cicd }}.{{ apps_hostname_suffix }}"
        prune_deployments_selector_prod: ""
        prune_deployments_selector_stage: "comp-required!=true,app!=inventory"
      tags: demo
    # verify database deployments in cicd project
    - name: wait for database deployments in project {{ project_cicd }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_cicd }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_cicd }} selector="comp-type=database"
      tags: verify
    # verify database deployments in prod project
    - name: wait for database deployments in project {{ project_prod }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_prod }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_prod }} selector="comp-type=database"
    # verify database deployments in test project
    - name: wait for database deployments in project {{ project_test }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_test }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_test }} selector="comp-type=database"
    # verify database deployments in dev project
    - name: wait for database deployments in project {{ project_dev }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_dev }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_dev }} selector="comp-type=database"
      tags: verify
    # verify other deployments in cicd project
    - name: wait for other deployments in project {{ project_cicd }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_cicd }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_cicd }} selector="comp-type!=database"
      tags: verify
    # verify other deployments in prod project
    - name: wait for other deployments in project {{ project_prod }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_prod }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_prod }} selector="comp-type!=database"
      tags: verify
    # verify other deployments in stage project
    - name: wait for other deployments in project {{ project_stage }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_stage }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_stage }} selector="comp-type!=database"
      tags: verify
    # verify other deployments in test project
    - name: wait for other deployments in project {{ project_test }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_test }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_test }} selector="comp-type!=database"
      tags: verify
    # verify other deployments in dev project
    - name: wait for other deployments in project {{ project_dev }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_dev }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_dev }} selector="comp-type!=database"
      tags: verify
ansible/configs/openshift-demos/msa-cicd-eap-min.yml
New file
@@ -0,0 +1,178 @@
---
- name: Deploy Coolstore Microservices Demo - CI/CD with JBoss EAP
  hosts: all
  gather_facts: false
  run_once: true
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - include_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_commons_facts"
      tags:
        - cicd
        - guides
        - demo
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_gogs"
      vars:
        project_name: "{{ project_cicd }}"
        project_display_name: "{{ project_cicd_name }}"
        project_desc: "{{ project_cicd_desc }}"
        project_annotations: "demo=demo-modern-arch-{{ project_suffix }}"
        gogs_route: "gogs-{{ project_cicd }}.{{ apps_hostname_suffix }}"
      tags: cicd
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_sonatype_nexus"
      vars:
        project_name: "{{ project_cicd }}"
      when: >
        maven_mirror_url is not defined or
        maven_mirror_url is none or
        maven_mirror_url|trim() == ""
      tags: cicd
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_jenkins"
      vars:
        project_name: "{{ project_cicd }}"
        jenkins_image_tag: "v3.7"
        jenkins_image_force_import: "true"
      tags: cicd
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_workshopper"
      vars:
        project_name: "{{ project_cicd }}"
        workshopper_content_url_prefix: https://raw.githubusercontent.com/siamaksade/coolstore-demo-guides/openshift-3.7
        workshopper_workshop_urls: "{{ workshopper_content_url_prefix }}/demo-cicd-eap-min.yml"
        workshopper_env_vars:
          PROJECT_SUFFIX: "{{ project_suffix }}"
          GOGS_URL: "http://gogs-{{ project_cicd }}.{{ apps_hostname_suffix }}"
          GOGS_DEV_REPO_URL_PREFIX: "http://gogs-{{ project_cicd }}.{{ apps_hostname_suffix }}/{{ gogs_user }}/coolstore-microservice"
          JENKINS_URL: "http://jenkins-{{ project_prod }}.{{ apps_hostname_suffix }}"
          COOLSTORE_WEB_PROD_URL: "http://web-ui-{{ project_prod }}.{{ apps_hostname_suffix }}"
          HYSTRIX_PROD_URL: "http://hystrix-dashboard-{{ project_prod }}.{{ apps_hostname_suffix }}"
          GOGS_DEV_USER: "{{ gogs_user }}"
          GOGS_DEV_PASSWORD: "{{ gogs_password }}"
          GOGS_REVIEWER_USER: "{{ gogs_admin_user }}"
          GOGS_REVIEWER_PASSWORD: "{{ gogs_admin_password }}"
      when: deploy_guides
      tags: guides
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_coolstore"
      vars:
        project_annotations: "demo=demo-msa-cicd-eap-{{ project_suffix }}"
        hostname_suffix: "{{ apps_hostname_suffix }}"
        gogs_hostname: "gogs-{{ project_cicd }}.{{ apps_hostname_suffix }}"
        prune_deployments_selector: "comp-required!=true,app!=inventory"
        prune_builds_selector: "comp-required!=true,app!=inventory"
        disable_stage_project: true
      tags: demo
    # verify database deployments in cicd project
    - name: wait for database deployments in project {{ project_cicd }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_cicd }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_cicd }} selector="comp-type=database"
      tags: verify
    # verify database deployments in prod project
    - name: wait for database deployments in project {{ project_prod }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_prod }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_prod }} selector="comp-type=database"
    # verify database deployments in test project
    - name: wait for database deployments in project {{ project_test }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_test }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_test }} selector="comp-type=database"
    # verify database deployments in dev project
    - name: wait for database deployments in project {{ project_dev }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_dev }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_dev }} selector="comp-type=database"
      tags: verify
    # verify other deployments in cicd project
    - name: wait for other deployments in project {{ project_cicd }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_cicd }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_cicd }} selector="comp-type!=database"
      tags: verify
    # verify other deployments in prod project
    - name: wait for other deployments in project {{ project_prod }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_prod }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_prod }} selector="comp-type!=database"
      tags: verify
    # verify other deployments in test project
    - name: wait for other deployments in project {{ project_test }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_test }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_test }} selector="comp-type!=database"
      tags: verify
    # verify other deployments in dev project
    - name: wait for other deployments in project {{ project_dev }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_dev }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_dev }} selector="comp-type!=database"
      tags: verify
ansible/configs/openshift-demos/msa-full.yml
New file
@@ -0,0 +1,44 @@
---
- name: Deploy Coolstore Microservices Demo
  hosts: all
  gather_facts: false
  run_once: true
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - include_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_commons_facts"
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_coolstore"
      vars:
        project_annotations: "demo=demo-msa-{{ project_suffix }}"
        hostname_suffix: "{{ apps_hostname_suffix }}"
        enable_cicd: false
    # verify database deployments
    - name: wait for database deployments to complete (succeed or fail)
      shell: "oc get pods -n {{ project_default }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_default }} selector="comp-type=database"
      tags: verify
    # verify other deployments
    - name: wait for other deployments to complete (succeed or fail)
      shell: "oc get pods -n {{ project_default }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_default }} selector="comp-type!=database"
      tags: verify
ansible/configs/openshift-demos/msa-min.yml
New file
@@ -0,0 +1,47 @@
---
- name: Deploy Coolstore Microservices Demo
  hosts: all
  gather_facts: false
  run_once: true
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - include_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_commons_facts"
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_coolstore"
      vars:
        project_annotations: "demo=demo-msa-{{ project_suffix }}"
        hostname_suffix: "{{ apps_hostname_suffix }}"
        enable_cicd: false
        prune_builds_selector: "comp-required!=true"
        prune_deployments_selector: "comp-required!=true"
    # verify database deployments
    - name: wait for database deployments to complete (succeed or fail)
      shell: "oc get pods -n {{ project_default }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_default }} selector="comp-type=database"
      tags: verify
    # verify other deployments
    - name: wait for other deployments to complete (succeed or fail)
      shell: "oc get pods -n {{ project_default }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_default }} selector="comp-type!=database"
      tags: verify
ansible/configs/openshift-demos/requirements-prod.yml
New file
@@ -0,0 +1,30 @@
---
- src: siamaksade.openshift_common_facts
  # src: https://github.com/siamaksade/ansible-openshift-common-facts.git
  name: openshift_commons_facts
  version: ocp-3.7
- src: siamaksade.openshift_sonatype_nexus
  # src: https://github.com/siamaksade/ansible-openshift-nexus.git
  name: openshift_sonatype_nexus
  version: ocp-3.7
- src: siamaksade.openshift_gogs
  # src: https://github.com/siamaksade/ansible-openshift-gogs.git
  name: openshift_gogs
  version: ocp-3.7
- src: siamaksade.openshift_jenkins
  # src: https://github.com/siamaksade/ansible-openshift-jenkins.git
  name: openshift_jenkins
  version: ocp-3.7
- src: siamaksade.openshift_workshopper
  # src: https://github.com/siamaksade/ansible-openshift-workshopper.git
  name: openshift_workshopper
  version: ocp-3.7
- src: siamaksade.openshift_coolstore
  # src: https://github.com/siamaksade/ansible-openshift-coolstore.git
  name: openshift_coolstore
  version: ocp-3.7
ansible/configs/openshift-demos/requirements.yml
New file
@@ -0,0 +1,24 @@
---
- src: siamaksade.openshift_common_facts
  # src: https://github.com/siamaksade/ansible-openshift-common-facts.git
  name: openshift_commons_facts
- src: siamaksade.openshift_sonatype_nexus
  # src: https://github.com/siamaksade/ansible-openshift-nexus.git
  name: openshift_sonatype_nexus
- src: siamaksade.openshift_gogs
  # src: https://github.com/siamaksade/ansible-openshift-gogs.git
  name: openshift_gogs
- src: siamaksade.openshift_jenkins
  # src: https://github.com/siamaksade/ansible-openshift-jenkins.git
  name: openshift_jenkins
- src: siamaksade.openshift_workshopper
  # src: https://github.com/siamaksade/ansible-openshift-workshopper.git
  name: openshift_workshopper
- src: siamaksade.openshift_coolstore
  # src: https://github.com/siamaksade/ansible-openshift-coolstore.git
  name: openshift_coolstore
ansible/configs/openshift-demos/undeploy.yml
New file
@@ -0,0 +1,20 @@
---
- name: Undeploy Coolstore Microservices Demo
  hosts: all
  gather_facts: false
  run_once: true
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
  - include_role:
      name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_commons_facts"
    vars:
      set_hostname_suffix: false
  - name: delete ci/cd projects
    shell: "{{ openshift_cli }} delete project {{ project_cicd }} {{ project_prod }} {{ project_stage }} {{ project_test }} {{ project_dev }}"
    ignore_errors: true
  - name: delete default project
    shell: "{{ openshift_cli }} delete project {{ project_default }}"
    ignore_errors: true
ansible/configs/openshift-demos/unidle.yml
New file
@@ -0,0 +1,146 @@
---
- name: Idle Coolstore Microservices Demo
  hosts: all
  gather_facts: false
  run_once: true
  tasks:
  - include_role:
      name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift_commons_facts"
  # default project
  - name: check if {{ project_default }} exists
    shell: "{{ openshift_cli }} get project {{ project_default }}"
    register: result
    ignore_errors: true
    changed_when: false
  - name: list deployment configs in {{ project_default }}
    shell: "{{ openshift_cli }} get dc -o json -n {{ project_default }}"
    register: deploymentconfigs_list
    when: result|success
    changed_when: false
  - name: unidle all deployments in project {{ project_default }}"
    shell: "{{ openshift_cli }} scale --replicas={{ item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] }} dc {{ item.metadata.name }} -n {{ project_default }}"
    with_items: "{{ deploymentconfigs_list.stdout|from_json|json_query('items') }}"
    when: >
      result|success and
      item.metadata.annotations is defined and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] is defined and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] is not none and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale']|trim() != ""
  # cicd project
  - name: check if {{ project_cicd }} exists
    shell: "{{ openshift_cli }} get project {{ project_cicd }}"
    register: result
    ignore_errors: true
    changed_when: false
  - name: list deployment configs in {{ project_cicd }}
    shell: "{{ openshift_cli }} get dc -o json -n {{ project_cicd }}"
    register: deploymentconfigs_list
    when: result|success
    changed_when: false
  - name: unidle all deployments in project {{ project_cicd }}"
    shell: "{{ openshift_cli }} scale --replicas={{ item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] }} dc {{ item.metadata.name }} -n {{ project_cicd }}"
    with_items: "{{ deploymentconfigs_list.stdout|from_json|json_query('items') }}"
    when: >
      result|success and
      item.metadata.annotations is defined and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] is defined and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] is not none and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale']|trim() != ""
  # prod project
  - name: check if {{ project_prod }} exists
    shell: "{{ openshift_cli }} get project {{ project_prod }}"
    register: result
    ignore_errors: true
    changed_when: false
  - name: list deployment configs in {{ project_prod }}
    shell: "{{ openshift_cli }} get dc -o json -n {{ project_prod }}"
    register: deploymentconfigs_list
    when: result|success
    changed_when: false
  - name: unidle all deployments in project {{ project_prod }}"
    shell: "{{ openshift_cli }} scale --replicas={{ item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] }} dc {{ item.metadata.name }} -n {{ project_prod }}"
    with_items: "{{ deploymentconfigs_list.stdout|from_json|json_query('items') }}"
    when: >
      result|success and
      item.metadata.annotations is defined and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] is defined and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] is not none and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale']|trim() != ""
  # stage project
  - name: check if {{ project_stage }} exists
    shell: "{{ openshift_cli }} get project {{ project_stage }}"
    register: result
    ignore_errors: true
    changed_when: false
  - name: list deployment configs in {{ project_stage }}
    shell: "{{ openshift_cli }} get dc -o json -n {{ project_stage }}"
    register: deploymentconfigs_list
    when: result|success
    changed_when: false
  - name: unidle all deployments in project {{ project_stage }}"
    shell: "{{ openshift_cli }} scale --replicas={{ item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] }} dc {{ item.metadata.name }} -n {{ project_stage }}"
    with_items: "{{ deploymentconfigs_list.stdout|from_json|json_query('items') }}"
    when: >
      result|success and
      item.metadata.annotations is defined and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] is defined and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] is not none and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale']|trim() != ""
  # test project
  - name: check if {{ project_test }} exists
    shell: "{{ openshift_cli }} get project {{ project_test }}"
    register: result
    ignore_errors: true
    changed_when: false
  - name: list deployment configs in {{ project_test }}
    shell: "{{ openshift_cli }} get dc -o json -n {{ project_test }}"
    register: deploymentconfigs_list
    when: result|success
    changed_when: false
  - name: unidle all deployments in project {{ project_test }}"
    shell: "{{ openshift_cli }} scale --replicas={{ item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] }} dc {{ item.metadata.name }} -n {{ project_test }}"
    with_items: "{{ deploymentconfigs_list.stdout|from_json|json_query('items') }}"
    when: >
      result|success and
      item.metadata.annotations is defined and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] is defined and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] is not none and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale']|trim() != ""
  # dev project
  - name: check if {{ project_dev }} exists
    shell: "{{ openshift_cli }} get project {{ project_dev }}"
    register: result
    ignore_errors: true
    changed_when: false
  - name: list deployment configs in {{ project_dev }}
    shell: "{{ openshift_cli }} get dc -o json -n {{ project_dev }}"
    register: deploymentconfigs_list
    when: result|success
    changed_when: false
  - name: unidle all deployments in project {{ project_dev }}"
    shell: "{{ openshift_cli }} scale --replicas={{ item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] }} dc {{ item.metadata.name }} -n {{ project_dev }}"
    with_items: "{{ deploymentconfigs_list.stdout|from_json|json_query('items') }}"
    when: >
      result|success and
      item.metadata.annotations is defined and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] is defined and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale'] is not none and
      item.metadata.annotations['idling.alpha.openshift.io/previous-scale']|trim() != ""
ansible/configs/openshift-demos/verify_tasks.yml
New file
@@ -0,0 +1,37 @@
---
- name: check if project {{ project_name }} exists
  shell: "{{ openshift_cli }} get project {{ project_name }}"
  register: result
  ignore_errors: true
  changed_when: false
- name: get deploymentconfig names in {{ project_name }}
  shell: "{{ openshift_cli }} get dc -l {{ selector }} -o jsonpath='{.items[*].metadata.name}' -n {{ project_name }}"
  register: deploymentconfigs_list
  when: result|succeeded
  changed_when: false
- name: get deployment config replica counts in {{ project_name }}
  shell: "{{ openshift_cli }} get dc {{ deploymentconfigs_list.stdout }} -o json -n {{ project_name }}"
  register: deploymentconfigs_list_json
  when: result|succeeded
  changed_when: false
- name: redeploy if deployment has failed
  shell: |
    {{ openshift_cli }} rollout cancel dc/{{ dc.metadata.name }} -n {{ project_name }} | true
    sleep 30
    {{ openshift_cli }} rollout latest dc/{{ dc.metadata.name }} -n {{ project_name }}
    {{ openshift_cli }} rollout status dc/{{ dc.metadata.name }} -n {{ project_name }}
  when:
    - result|succeeded
    - deploymentconfigs_list_json|succeeded
    - dc.metadata is defined
    - dc.status is defined
    - dc.spec is defined
    - dc.status.availableReplicas != dc.spec.replicas
  loop_control:
    loop_var: dc
    label: "{{ dc.metadata.name if dc.metadata is defined else dc }}"
  with_items: "{{ deploymentconfigs_list_json.stdout|default('[]')|from_json|json_query('items') }}"
  ignore_errors: true
ansible/configs/ravello-bastion-setup/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/three-tier-app/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/main.yml
@@ -45,14 +45,17 @@
    - step003
    - pre_software_tasks
##################################################################################
##################################################################################
############ Step 004 Software Deploy Tasks
##################################################################################
##################################################################################
- name: Software
  include: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/software.yml"
  tags:
    - step004
    - deploy_software
- include: "{{ ANSIBLE_REPO_PATH }}/software_playbooks/{{ software_to_deploy }}.yml"
  tags:
@@ -69,4 +72,4 @@
  tags:
    - step005
    - post_software
    - post_software_tasks
    - post_software_tasks
ansible/roles/bastion-opentlc-ipa/tasks/main.yml
@@ -10,24 +10,15 @@
    state: present
- name: Register bastion with IPA
  shell: "/usr/sbin/ipa-client-install --domain=OPENTLC.COM -w {{ipa_host_password}} -N -U --mkhomedir --no-dns-sshfp --hostname={{bastion_public_dns_chomped}}"
  shell: "/usr/sbin/ipa-client-install --domain=OPENTLC.COM -w '{{ipa_host_password}}' -N -U --mkhomedir --no-dns-sshfp --hostname={{bastion_public_dns_chomped}}"
  when: ipa_host_password is defined
- name: Add opentlc-access ipa group to sudoers.d
  lineinfile:
    path: /etc/sudoers.d/opentlc-sudoers
    state: present
    create: yes
    line: '%opentlc-access ALL=(ALL)       NOPASSWD: ALL'
    validate: '/usr/sbin/visudo -cf %s'
  register: result
  retries: 5
  until: result | succeeded
  ignore_errors: yes
- name: report error
  fail:
    msg: Unable to update sudoers.d/opentlc-sudoers
  when: not result|succeeded
- name: Register bastion with IPA
  shell: "/usr/sbin/ipa-client-install --domain=OPENTLC.COM -p {{ipa_kerberos_user}} -w '{{ipa_kerberos_password}}' -N -U --mkhomedir --no-dns-sshfp --hostname={{bastion_public_dns_chomped}}"
  when:
    - ipa_host_password is not defined
    - ipa_kerberos_user is defined
    - ipa_kerberos_password is defined
- name: copy over ipa_optimize.sh script
  copy:
@@ -37,3 +28,20 @@
    group: root
    mode: 0700
  notify: Run ipa_optimize.sh
- name: Add opentlc-access ipa group to sudoers.d
  lineinfile:
    path: /etc/sudoers.d/opentlc-sudoers
    state: present
    create: yes
    line: '%opentlc-access ALL=(ALL)       NOPASSWD: ALL'
    validate: '/usr/sbin/visudo -cf %s'
  register: result
  retries: 20
  until: result | succeeded
  ignore_errors: yes
- name: report error
  fail:
    msg: Unable to update sudoers.d/opentlc-sudoers
  when: not result|succeeded
ansible/roles/bastion/tasks/main.yml
@@ -56,3 +56,13 @@
    mode: 0400
  tags:
    - copy_sshconfig_file
- name: Install python2-winrm and python-requests
  ignore_errors: yes
  become: true
  yum:
    name: "{{ item }}"
  with_items:
    - python2-winrm
    - python-requests
  when: install_win_ssh|bool
ansible/roles/lets-encrypt/README.md
New file
@@ -0,0 +1,42 @@
Role Name
=========
A brief description of the role goes here.
Requirements
------------
Request Let's Encrypt Wildcard Certificates for the Cluster Make sure to import the intermediate
certificate ([letsencrypt.org/certs/fakeleintermediatex1.pem](https://letsencrypt.org/certs/fakeleintermediatex1.pem)) into your browser to accept all newly created certificates.
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
    - hosts: servers
      roles:
         - { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
ansible/roles/lets-encrypt/files/defaults/main.yml
New file
@@ -0,0 +1,2 @@
---
# defaults file for bastion
ansible/roles/lets-encrypt/tasks/main.yml
New file
@@ -0,0 +1,45 @@
---
## Request Let's Encrypt Wildcard Certificates for the Cluster
## Make sure to import the intermediate
## certificate (https://letsencrypt.org/certs/fakeleintermediatex1.pem)
## Into your browser to accept all newly created certificates
  - name: Get Temporary CA Certificate
    get_url:
      url: https://letsencrypt.org/certs/fakeleintermediatex1.pem
      dest: /root/fakeleintermediatex1.pem
  - name: Remove Let's Encrypt directory if it's there
    file:
      path: /root/acme.sh
      state: absent
  - name: Remove Let's Encrypt cache if it's there
    file:
      path: /root/.acme.sh
      state: absent
  - name: Clone Let's Encrypt Repo
    git:
      repo: https://github.com/Neilpang/acme.sh.git
      clone: yes
      dest: /root/acme.sh
#      version: 2
  - name: Add AWS Access Key to Let's Encrypt configuration
    lineinfile:
      path: /root/acme.sh/dnsapi/dns_aws.sh
      line: AWS_ACCESS_KEY_ID="{{ hostvars['localhost'].route53user_access_key }}"
      state: present
      insertbefore: '^#AWS_ACCESS_KEY_ID'
  - name: Add AWS Secret Access Key to Let's Encrypt configuration
    lineinfile:
      path: /root/acme.sh/dnsapi/dns_aws.sh
      line: AWS_SECRET_ACCESS_KEY="{{ hostvars['localhost'].route53user_secret_access_key }}"
      state: present
      insertbefore: '^#AWS_SECRET_ACCESS_KEY'
  - name: Request Wildcard Certificates from Let's Encrypt
    shell: "/root/acme.sh/acme.sh --server https://acme-staging-v02.api.letsencrypt.org/directory --test --issue -d {{ master_lb_dns }} -d *.{{ cloudapps_suffix }} --dns dns_aws"
    args:
      chdir: /root/acme.sh
ansible/roles/ocp-workload-bxms-ba/defaults/main.yml
New file
@@ -0,0 +1,26 @@
---
become_override: false
ocp_username: jbride-redhat.com
ocp_user_needs_quota: True
ocp_user_groups:
  - OPENTLC-PROJECT-PROVISIONERS
quota_requests_cpu: 5
quota_limits_cpu: 10
quota_requests_memory: '6Gi'
quota_limits_memory: '20Gi'
quota_configmaps: 10
quota_pods: 20
quota_persistentvolumeclaims: 20
quota_services: 30
quota_secrets: 30
quota_requests_storage: 50Gi
build_status_retries: 20
build_status_delay: 15
deploy_status_retries: 15
deploy_status_delay: 15
ansible/roles/ocp-workload-bxms-ba/readme.adoc
New file
@@ -0,0 +1,131 @@
= ocp-workload-developer-environment - Sample Config
== Role overview
* This is a simple role that does the following:
** Playbook: link:./tasks/pre_workload.yml[pre_workload.yml] - Sets up an
 environment for the workload deployment
*** Adds a user to a list of groups defined in the
 link:./defaults/main.yml[defaults file].
*** Sets a cluster resource quota for the user based on the variables in the
 link:./defaults/main.yml[defaults file] .
*** Debug task will print out: `pre_workload Tasks Complete`
** Playbook: link:./tasks/workload.yml[workload.yml] - Used to deploy the actual
 workload, i.e, 3scale, Mobile or some Demo
*** This role doesn't do anything here
*** Debug task will print out: `workload Tasks Complete`
** Playbook: link:./tasks/post_workload.yml[post_workload.yml] - Used to
 configure the workload after deployment
*** This role doesn't do anything here
*** Debug task will print out: `post_workload Tasks Complete`
== Set up your Ansible inventory file
* You will need to create an Ansible inventory file to define your connection
 method to your host (Master/Bastion with OC command)
* You can also use the command line to define the hosts directly if your `ssh`
 configuration is set to connect to the host correctly
* You can also use the command line to use localhost or if your cluster is
 already authenticated and configured in your `oc` configuration
[source, ini]
.example inventory file
----
[gptehosts:vars]
ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem
ansible_ssh_user=ec2-user
[gptehosts:children]
openshift
[openshift]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
bastion.cluster3.openshift.opentlc.com ansible_ssh_host=ec2-11-111-111-11.us-west-2.compute.amazonaws.com
bastion.cluster4.openshift.opentlc.com
[dev]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
[prod]
bastion.cluster3.openshift.opentlc.com
bastion.cluster4.openshift.opentlc.com
----
== Review the defaults variable file
* This file link:./defaults/main.yml[./defaults/main.yml] contains all the variables you
 need to define to control the deployment of your workload.
* You can modify any of these default values by adding
`-e"variable_name=variable_value"` to the command line
=== Deploy Workload on OpenShift Cluster from an existing playbook:
[source,yaml]
----
- name: Deploy a workload role on a master host
  hosts: all
  become: true
  gather_facts: False
  tags:
    - step007
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/{{ocp_workload}}", when: 'ocp_workload is defined' }
----
NOTE: You might want to change `hosts: all` to fit your requirements
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
GUID=jb45
HOST_GUID=dev37
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-bxms-ba"
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ocp_user_needs_quota=true" \
                    -e"ocp_apps_domain=apps.${HOST_GUID}.openshift.opentlc.com" \
                    -e"ACTION=create"
----
=== To Delete an environment
----
GUID=jb45
HOST_GUID=dev37
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-bxms-ba"
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ACTION=remove"
----
ansible/roles/ocp-workload-bxms-ba/tasks/main.yml
New file
@@ -0,0 +1,20 @@
---
- name: Running Pre Workload Tasks
  include: ./pre_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload Tasks
  include: ./workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Post Workload Tasks
  include: ./post_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload removal Tasks
  include: ./remove_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "destroy" or ACTION == "remove"
ansible/roles/ocp-workload-bxms-ba/tasks/post_workload.yml
New file
@@ -0,0 +1,5 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully"
ansible/roles/ocp-workload-bxms-ba/tasks/pre_workload.yml
New file
@@ -0,0 +1,32 @@
---
# - name: Add user to developer group (allowed to create projects)
#   shell: "oadm groups add-users {{item}} {{ocp_username}}"
#   register: groupadd_register
#   with_items: "{{ocp_user_groups}}"
#   when: ocp_username is defined and ocp_user_groups is defined
#
# - name: test that command worked
#   debug:
#     var: groupadd_register
#     verbosity: 2
- name: Create user Quota - clusterresourcequota
  shell: |
        oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \
        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
        --hard requests.cpu="{{quota_requests_cpu}}" \
        --hard limits.cpu="{{quota_limits_cpu}}"  \
        --hard requests.memory="{{quota_requests_memory}}" \
        --hard limits.memory="{{quota_limits_memory}}" \
        --hard configmaps="{{quota_configmaps}}" \
        --hard pods="{{quota_pods}}" \
        --hard persistentvolumeclaims="{{quota_persistentvolumeclaims}}"  \
        --hard services="{{quota_services}}" \
        --hard secrets="{{quota_secrets}}" \
        --hard requests.storage="{{quota_requests_storage}}"
  ignore_errors: true
- name: pre_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully"
ansible/roles/ocp-workload-bxms-ba/tasks/remove_workload.yml
New file
@@ -0,0 +1,23 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully - Removed"
- name: define ocp_project
  set_fact:
    ocp_project: "bxms-ba-{{guid}}"
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
  ignore_errors: true
- name: Remove any lingering tmp files
  shell: "rm -rf /tmp/{{guid}}"
- name: Remove user Project
  shell: "oc delete project {{ocp_project}}"
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully - Removed"
ansible/roles/ocp-workload-bxms-ba/tasks/wait_for_build.yml
New file
@@ -0,0 +1,23 @@
---
# Purpose:
#   This script queries OCP for builds that exist but are not yet ready.
#   So long as there are unready builds, this script continues to loop
#
# Manual Test to determine list of unready builds :
#  1) install jp :  https://github.com/jmespath/jp
#  2) oc get builds -o json | jp "items[?  (status.phase != 'Complete') ].metadata.annotations.\"openshift.io/build-config.name\""
#
#  Documentation pertaining to jq syntax:
#    - http://jmespath.org/tutorial.html
#    - https://stackoverflow.com/questions/41261680/ansible-json-query-path-to-select-item-by-content
#
- name: "Wait for following builds to become ready: {{build_to_wait}}"
  command: 'oc get build -o json -n "{{ ocp_project }}"'
  register: build_state
  changed_when: false
  retries: "{{ build_status_retries }}"
  delay: "{{ build_status_delay }}"
  vars:
    query: "items[?  (status.phase != 'Complete') ].metadata.annotations.\"openshift.io/build-config.name\""
  until: "build_state.stdout |from_json |json_query(query) |intersect(build_to_wait) |length == 0"
ansible/roles/ocp-workload-bxms-ba/tasks/wait_for_deploy.yml
New file
@@ -0,0 +1,20 @@
---
# Purpose:
#   This script queries OCP for replication controllers that exist but are not yet ready.
#   So long as there are unready replication controllers, this script continues to loop
#
# Manual Test to determine list of unready replication controllers :
#  1) install jp :  https://github.com/jmespath/jp
#  2) oc get rc -o json | jp 'items[?  (status.readyReplicas == ""|| status.readyReplicas == `0`) ].metadata.annotations."openshift.io/deployment-config.name"'
#
- name: "Wait for following deployments to become ready: {{pod_to_wait}}"
  command: 'oc get rc -o json -n "{{ ocp_project }}"'
  register: rc_state
  changed_when: false
  retries: "{{ deploy_status_retries }}"
  delay: "{{ deploy_status_delay }}"
  until: 'rc_state.stdout |from_json |json_query(''items[?  (status.readyReplicas == ""|| status.readyReplicas == `0`) ].metadata.annotations."openshift.io/deployment-config.name"'') |intersect(pod_to_wait) |length == 0'
#  Documentation pertaining to jq syntax:
#    - http://jmespath.org/tutorial.html
ansible/roles/ocp-workload-bxms-ba/tasks/workload.yml
New file
@@ -0,0 +1,100 @@
---
- name: define ocp_project
  set_fact:
    ocp_project: "bxms-ba-{{guid}}"
- name: "Create project for workload {{ocp_project}}"
  shell: "oc new-project {{ocp_project}}"
- name: Give ocp_username access to ocp_project
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{ocp_project}}"
- name: Make sure we go back to default project
  shell: "oc project default"
- name: Initialize the project template
  shell: "oc create -f https://raw.githubusercontent.com/gpe-mw-training/bxms_decision_mgmt_foundations_lab/master/resources/ng-mortgage-bpmsuite70-full-mysql.yaml -n {{ocp_project}}"
- name: Prep local filesystem for temp files
  file:
    path: "/tmp/{{guid}}"
    state: directory
- name: Create a keystore
  shell: |
      keytool -genkey \
      -keyalg RSA \
      -alias selfsigned \
      -keystore /tmp/{{guid}}/keystore.jks \
      -validity 360 \
      -keysize 2048 \
      -alias jboss \
      -storepass rhtgpte \
      -keypass rhtgpte \
      -dname CN={{ocp_username}},OU=gpte,O=redhat.com,L=Raleigh,S=NC,C=US
- name: Configure a service account that is loaded with the previously created secret
  shell: oc create serviceaccount bpmsuite-service-account -n {{ocp_project}}
- shell: oc policy add-role-to-user view system:serviceaccount:{{ocp_project}}:bpmsuite-service-account
- shell: oc secrets new bpmsuite-app-secret /tmp/{{guid}}/keystore.jks -n {{ocp_project}}
- shell: oc secrets link bpmsuite-service-account bpmsuite-app-secret -n {{ocp_project}}
- name: Pull down JBoss CLI commands that enable the undertow component of the execution server with CORS
  get_url:
    url: https://raw.githubusercontent.com/gpe-mw-training/bxms_decision_mgmt_foundations_lab/master/resources/undertow-cors.cli
    dest: "/tmp/{{guid}}/undertow-cors.cli"
- name: Create a configmap of JBoss CLI commands that enable the undertow component of the execution server with CORS.
  shell: "oc create configmap undertow-cors --from-file=/tmp/{{guid}}/undertow-cors.cli -n {{ocp_project}}"
- name: Initialize OCP resources from the project template
  shell: |
      oc new-app --name=ba-demo -n {{ocp_project}} --template=ng-mortgage-bpmsuite70-full-mysql \
      -p IMAGE_STREAM_NAMESPACE=openshift \
      -p HTTPS_PASSWORD=rhtgpte \
      -p KIE_ADMIN_PWD=admin \
      -p APPLICATION_NAME=gpte > /tmp/{{guid}}/gpte-ba-demo.txt
- include: ./wait_for_build.yml
  static: no
  vars:
    build_to_wait:
      - gpte-custom-execserv
      - gpte-ng-mortgages
- name: resume mysql RDBMS
  shell: oc rollout resume dc/gpte-mysql -n {{ocp_project}}
- include: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - gpte-mysql
- name: resume gpte-execserv
  shell: oc rollout resume dc/gpte-execserv -n {{ocp_project}}
- include: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - gpte-execserv
- name: resume gpte-buscentr
  shell: oc rollout resume dc/gpte-buscentr -n {{ocp_project}}
- include: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - gpte-buscentr
- name: resume gpte-ng-mortgages
  shell: oc rollout resume dc/gpte-ng-mortgages -n {{ocp_project}}
- include: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - gpte-ng-mortgages
- name: workload Tasks Complete
  debug:
    msg: workload Tasks Complete
ansible/roles/ocp-workload-bxms-dm/defaults/main.yml
New file
@@ -0,0 +1,26 @@
---
become_override: false
ocp_username: jbride-redhat.com
ocp_user_needs_quota: True
ocp_user_groups:
  - OPENTLC-PROJECT-PROVISIONERS
quota_requests_cpu: 5
quota_limits_cpu: 10
quota_requests_memory: '6Gi'
quota_limits_memory: '20Gi'
quota_configmaps: 10
quota_pods: 20
quota_persistentvolumeclaims: 20
quota_services: 30
quota_secrets: 30
quota_requests_storage: 50Gi
build_status_retries: 20
build_status_delay: 20
deploy_status_retries: 15
deploy_status_delay: 20
ansible/roles/ocp-workload-bxms-dm/readme.adoc
New file
@@ -0,0 +1,63 @@
= ocp-workload-bxms-dm
=== Deploy Workload on OpenShift Cluster from an existing playbook:
[source,yaml]
----
- name: Deploy a workload role on a master host
  hosts: all
  become: true
  gather_facts: False
  tags:
    - step007
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/{{ocp_workload}}", when: 'ocp_workload is defined' }
----
NOTE: You might want to change `hosts: all` to fit your requirements
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
GUID=jb45
HOST_GUID=dev37
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-bxms-dm"
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ocp_user_needs_quota=true" \
                    -e"ocp_apps_domain=apps.${HOST_GUID}.openshift.opentlc.com" \
                    -e"ACTION=create"
----
=== To Delete an environment
----
GUID=jb45
HOST_GUID=dev37
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-bxms-dm"
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ACTION=remove"
----
ansible/roles/ocp-workload-bxms-dm/tasks/main.yml
New file
@@ -0,0 +1,20 @@
---
- name: Running Pre Workload Tasks
  include: ./pre_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload Tasks
  include: ./workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Post Workload Tasks
  include: ./post_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload removal Tasks
  include: ./remove_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "destroy" or ACTION == "remove"
ansible/roles/ocp-workload-bxms-dm/tasks/post_workload.yml
New file
@@ -0,0 +1,5 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully"
ansible/roles/ocp-workload-bxms-dm/tasks/pre_workload.yml
New file
@@ -0,0 +1,32 @@
---
# - name: Add user to developer group (allowed to create projects)
#   shell: "oadm groups add-users {{item}} {{ocp_username}}"
#   register: groupadd_register
#   with_items: "{{ocp_user_groups}}"
#   when: ocp_username is defined and ocp_user_groups is defined
#
# - name: test that command worked
#   debug:
#     var: groupadd_register
#     verbosity: 2
- name: Create user Quota - clusterresourcequota
  shell: |
        oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \
        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
        --hard requests.cpu="{{quota_requests_cpu}}" \
        --hard limits.cpu="{{quota_limits_cpu}}"  \
        --hard requests.memory="{{quota_requests_memory}}" \
        --hard limits.memory="{{quota_limits_memory}}" \
        --hard configmaps="{{quota_configmaps}}" \
        --hard pods="{{quota_pods}}" \
        --hard persistentvolumeclaims="{{quota_persistentvolumeclaims}}"  \
        --hard services="{{quota_services}}" \
        --hard secrets="{{quota_secrets}}" \
        --hard requests.storage="{{quota_requests_storage}}"
  ignore_errors: true
- name: pre_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully"
ansible/roles/ocp-workload-bxms-dm/tasks/remove_workload.yml
New file
@@ -0,0 +1,23 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully - Removed"
- name: define ocp_project
  set_fact:
    ocp_project: "bxms-dm-{{guid}}"
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
  ignore_errors: true
- name: Remove any lingering tmp files
  shell: "rm -rf /tmp/{{guid}}"
- name: Remove user Project
  shell: "oc delete project {{ocp_project}}"
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully - Removed"
ansible/roles/ocp-workload-bxms-dm/tasks/wait_for_build.yml
New file
@@ -0,0 +1,23 @@
---
# Purpose:
#   This script queries OCP for builds that exist but are not yet ready.
#   So long as there are unready builds, this script continues to loop
#
# Manual Test to determine list of unready builds :
#  1) install jp :  https://github.com/jmespath/jp
#  2) oc get builds -o json | jp "items[?  (status.phase != 'Complete') ].metadata.annotations.\"openshift.io/build-config.name\""
#
#  Documentation pertaining to jq syntax:
#    - http://jmespath.org/tutorial.html
#    - https://stackoverflow.com/questions/41261680/ansible-json-query-path-to-select-item-by-content
#
- name: "Wait for following builds to become ready: {{build_to_wait}}"
  command: 'oc get build -o json -n "{{ ocp_project }}"'
  register: build_state
  changed_when: false
  retries: "{{ build_status_retries }}"
  delay: "{{ build_status_delay }}"
  vars:
    query: "items[?  (status.phase != 'Complete') ].metadata.annotations.\"openshift.io/build-config.name\""
  until: "build_state.stdout |from_json |json_query(query) |intersect(build_to_wait) |length == 0"
ansible/roles/ocp-workload-bxms-dm/tasks/wait_for_deploy.yml
New file
@@ -0,0 +1,20 @@
---
# Purpose:
#   This script queries OCP for replication controllers that exist but are not yet ready.
#   So long as there are unready replication controllers, this script continues to loop
#
# Manual Test to determine list of unready replication controllers :
#  1) install jp :  https://github.com/jmespath/jp
#  2) oc get rc -o json | jp 'items[?  (status.readyReplicas == ""|| status.readyReplicas == `0`) ].metadata.annotations."openshift.io/deployment-config.name"'
#
- name: "Wait for following deployments to become ready: {{pod_to_wait}}"
  command: 'oc get rc -o json -n "{{ ocp_project }}"'
  register: rc_state
  changed_when: false
  retries: "{{ deploy_status_retries }}"
  delay: "{{ deploy_status_delay }}"
  until: 'rc_state.stdout |from_json |json_query(''items[?  (status.readyReplicas == ""|| status.readyReplicas == `0`) ].metadata.annotations."openshift.io/deployment-config.name"'') |intersect(pod_to_wait) |length == 0'
#  Documentation pertaining to jq syntax:
#    - http://jmespath.org/tutorial.html
ansible/roles/ocp-workload-bxms-dm/tasks/workload.yml
New file
@@ -0,0 +1,65 @@
---
- name: define ocp_project
  set_fact:
    ocp_project: "bxms-dm-{{guid}}"
- name: "Create project for workload {{ocp_project}}"
  shell: "oc new-project {{ocp_project}}"
- name: Give ocp_username access to ocp_project
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{ocp_project}}"
- name: Make sure we go back to default project
  shell: "oc project default"
- name: Initialize the project template
  shell: "oc create -f https://raw.githubusercontent.com/gpe-mw-training/bxms_decision_mgmt_foundations_lab/master/resources/rhdm7-full-ng.yaml -n {{ocp_project}}"
- name: Prep local filesystem for temp files
  file:
    path: "/tmp/{{guid}}"
    state: directory
- name: Initialize OCP resources from the project template using CLUSTER {{ ocp_apps_domain }}
  shell: |
      oc new-app --name=dm-demo -n {{ocp_project}} --template=rhdm7-full-ng \
      -p RHT_IMAGE_STREAM_NAMESPACE=openshift \
      -p KIE_ADMIN_PWD=test1234! \
      -p MAVEN_REPO_PASSWORD=test1234! \
      -p CLUSTER={{ocp_apps_domain}} \
      -p APPLICATION_NAME=gpte > /tmp/{{guid}}/gpte-dm-demo.txt
- include: ./wait_for_build.yml
  static: no
  vars:
    build_to_wait:
      - gpte-custom-kieserver
      - gpte-ng-dmf
- name: resume gpte-rhdmcentr
  shell: oc rollout resume dc/gpte-rhdmcentr -n {{ocp_project}}
- include: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - gpte-rhdmcentr
- name: resume gpte-kieserver
  shell: oc rollout resume dc/gpte-kieserver -n {{ocp_project}}
- include: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - gpte-kieserver
- name: resume gpte-ng-dmf
  shell: oc rollout resume dc/gpte-ng-dmf -n {{ocp_project}}
- include: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - gpte-ng-dmf
- name: workload Tasks Complete
  debug:
    msg: workload Tasks Complete
ansible/roles/ocp-workload-developer-environment/defaults/main.yml
@@ -1,5 +1,5 @@
---
become_override: true
become_override: false
ocp_username: shachar-redhat.com
ocp_user_needs_quota: True
@@ -18,3 +18,4 @@
quota_services: 30
quota_secrets: 30
quota_requests_storage: 50Gi
silent: false
ansible/roles/ocp-workload-developer-environment/readme.adoc
@@ -21,45 +21,6 @@
*** This role doesn't do anything here
*** Debug task will print out: `post_workload Tasks Complete`
== Set up your Ansible inventory file
* You will need to create an Ansible inventory file to define your connection
 method to your host (Master/Bastion with OC command)
* You can also use the command line to define the hosts directly if your `ssh`
 configuration is set to connect to the host correctly
* You can also use the command line to use localhost or if your cluster is
 already authenticated and configured in your `oc` configuration
[source, ini]
.example inventory file
----
[gptehosts:vars]
ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem
ansible_ssh_user=ec2-user
[gptehosts:children]
openshift
[openshift]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
bastion.cluster3.openshift.opentlc.com ansible_ssh_host=ec2-11-111-111-11.us-west-2.compute.amazonaws.com
bastion.cluster4.openshift.opentlc.com
[dev]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
[prod]
bastion.cluster3.openshift.opentlc.com
bastion.cluster4.openshift.opentlc.com
----
== Review the defaults variable file
* This file link:./defaults/main.yml[./defaults/main.yml] contains all the variables you
@@ -67,23 +28,6 @@
* You can modify any of these default values by adding
`-e"variable_name=variable_value"` to the command line
=== Deploy Workload on OpenShift Cluster from an existing playbook:
[source,yaml]
----
- name: Deploy a workload role on a master host
  hosts: all
  become: true
  gather_facts: False
  tags:
    - step007
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/{{ocp_workload}}", when: 'ocp_workload is defined' }
----
NOTE: You might want to change `hosts: all` to fit your requirements
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
@@ -122,3 +66,60 @@
                    -e"guid=${GUID}" \
                    -e"ACTION=remove"
----
== Other related information:
=== Deploy Workload on OpenShift Cluster from an existing playbook:
[source,yaml]
----
- name: Deploy a workload role on a master host
  hosts: all
  become: true
  gather_facts: False
  tags:
    - step007
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/{{ocp_workload}}", when: 'ocp_workload is defined' }
----
NOTE: You might want to change `hosts: all` to fit your requirements
=== Set up your Ansible inventory file
* You can create an Ansible inventory file to define your connection
 method to your host (Master/Bastion with OC command)
* You can also use the command line to define the hosts directly if your `ssh`
 configuration is set to connect to the host correctly
* You can also use the command line to use localhost or if your cluster is
 already authenticated and configured in your `oc` configuration
[source, ini]
.example inventory file
----
[gptehosts:vars]
ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem
ansible_ssh_user=ec2-user
[gptehosts:children]
openshift
[openshift]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
bastion.cluster3.openshift.opentlc.com ansible_ssh_host=ec2-11-111-111-11.us-west-2.compute.amazonaws.com
bastion.cluster4.openshift.opentlc.com
[dev]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
[prod]
bastion.cluster3.openshift.opentlc.com
bastion.cluster4.openshift.opentlc.com
----
ansible/roles/ocp-workload-developer-environment/tasks/post_workload.yml
@@ -3,3 +3,4 @@
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully"
  when: not silent|bool
ansible/roles/ocp-workload-developer-environment/tasks/pre_workload.yml
@@ -1,10 +1,11 @@
---
- name: Add user to developer group (allowed to create projects)
  shell: "oadm groups add-users {{item}} {{ocp_username}}"
  command: "oadm groups add-users {{item}} {{ocp_username}}"
  register: groupadd_register
  with_items: "{{ocp_user_groups}}"
  when: ocp_username is defined and ocp_user_groups is defined
  when:
    - ocp_username is defined
    - ocp_user_groups | default([]) | length > 0
- name: test that command worked
  debug:
@@ -12,7 +13,7 @@
    verbosity: 2
- name: Create user Quota - clusterresourcequota
  shell: |
  command: |
        oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \
        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
        --hard requests.cpu="{{quota_requests_cpu}}" \
ansible/roles/ocp-workload-developer-environment/tasks/remove_workload.yml
@@ -4,21 +4,23 @@
    msg: "Pre-Software checks completed successfully - Removed"
- name: Remove user from developer group - (remove ability to create projects)
  shell: "oadm groups remove-users {{item}} {{ocp_username}}"
  command: "oadm groups remove-users {{item}} {{ocp_username}}"
  register: groupadd_register
  with_items: "{{ocp_user_groups}}"
  when: ocp_username is defined and ocp_user_groups is defined
  when:
    - ocp_username is defined
    - ocp_user_groups | default([]) | length > 0
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
  command: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
  ignore_errors: true
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}
  command: oc delete clusterresourcequota clusterquota-{{ocp_username}}
  ignore_errors: true
- name: Remove user Projects - oc get projects
  shell: "oc get projects -o json"
  command: "oc get projects -o json"
  register: all_projects
- name: Remove user Projects - Convert output to json
@@ -29,14 +31,18 @@
  debug:
    msg: "found user project: {{item.metadata.name}}"
    verbosity: 1
  when: item.metadata.annotations['openshift.io/requester'] is defined and item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
  when:
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
  with_items: "{{projects['items']}}"
- name: Remove user Projects - "oc delete project {{item.metadata.name}}"
  shell: "oc delete project {{item.metadata.name}}"
  command: "oc delete project {{item.metadata.name}}"
  when:
    - (item.metadata.annotations['openshift.io/requester'] is defined and item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}")
    - (item.status.phase is defined and item.status.phase != "Terminating")
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
    - item.status.phase is defined
    - item.status.phase != "Terminating"
  with_items: "{{projects['items']}}"
- name: post_workload Tasks Complete
ansible/roles/ocp-workload-developer-environment/tasks/workload.yml
@@ -1,6 +1,5 @@
---
- name: workload Tasks Complete
  debug:
    msg: workload Tasks Complete
ansible/roles/ocp-workload-fuse-enmasse/defaults/main.yml
@@ -1,4 +1,5 @@
---
become_override: true
ocp_username: shachar-redhat.com
ocp_user_needs_quota: True
ansible/roles/ocp-workload-fuse-enmasse/enmasse-provision.yml
New file
@@ -0,0 +1,18 @@
# Plagarized from configs/ocp-workloads/ocp-workload.yml
- name: Prep enamsse
  hosts: all
  become: false
  gather_facts: False
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - step007
  roles:
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/{{ocp_workload}}"
# invoke out-of-the-box playbook from enmasse project
- name: Deploy enmasse
  import_playbook: "{{enmasse_path}}/ansible/playbooks/openshift/multitenant.yml"
  when: ACTION == "create" or ACTION == "provision"
ansible/roles/ocp-workload-fuse-enmasse/readme.adoc
@@ -1,127 +1,51 @@
= ocp-workload-developer-environment - Sample Config
= ocp-workload-bxms-dm
== Role overview
* This is a simple role that does the following:
** Playbook: link:./tasks/pre_workload.yml[pre_workload.yml] - Sets up an
 environment for the workload deployment
*** Adds a user to a list of groups defined in the
 link:./defaults/main.yml[defaults file].
*** Sets a cluster resource quota for the user based on the variables in the
 link:./defaults/main.yml[defaults file] .
*** Debug task will print out: `pre_workload Tasks Complete`
** Playbook: link:./tasks/workload.yml[workload.yml] - Used to deploy the actual
 workload, i.e, 3scale, Mobile or some Demo
*** This role doesn't do anything here
*** Debug task will print out: `workload Tasks Complete`
** Playbook: link:./tasks/post_workload.yml[post_workload.yml] - Used to
 configure the workload after deployment
*** This role doesn't do anything here
*** Debug task will print out: `post_workload Tasks Complete`
== Review the defaults variable file
* This file link:./defaults/main.yml[./defaults/main.yml] contains all the variables you
 need to define to control the deployment of your workload.
* You can modify any of these default values by adding
`-e"variable_name=variable_value"` to the command line
=== Deploy Workload on OpenShift Cluster from an existing playbook:
[source,yaml]
=== Deploy a Workload with the `ocp-workload` playbook
----
- name: Deploy a workload role on a master host
  hosts: all
  become: true
  gather_facts: False
  tags:
    - step007
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/{{ocp_workload}}", when: 'ocp_workload is defined' }
----
NOTE: You might want to change `hosts: all` to fit your requirements
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
TARGET_HOST="bastion.dev37.openshift.opentlc.com"
OCP_USERNAME="njoshi-redhat.com"
GUID=jb45
HOST_GUID=dev37
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-fuse-enmasse"
GUID=4444
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/forge" \
                 -e"ansible_ssh_user=ec2-user" \
# The following command specifies a custom playbook that imports the out-of-the-box enmasse playbook
ansible-playbook -i ${TARGET_HOST}, ./roles/ocp-workload-fuse-enmasse/enmasse-provision.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ocp_user_needs_quota=true" \
                    -e"ocp_apps_domain=apps.dev37.openshift.opentlc.com" \
                    -e"ocp_apps_domain=apps.${HOST_GUID}.openshift.opentlc.com" \
                    -e"enmasse_tag=0.17.0" \
                    -e"enmasse_path=/tmp/enmasse-{{enmasse_tag}}" \
                    -e"namespace=amq-enmasse-${GUID}" \
                    -e"ACTION=create"
----
=== To Delete an environment
----
TARGET_HOST="bastion.dev37.openshift.opentlc.com"
OCP_USERNAME="shacharb-redhat.com"
WORKLOAD="ocp-workload-fuse-ignite"
GUID=3005
GUID=jb45
HOST_GUID=dev37
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-fuse-enmasse"
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem" \
                    -e"ansible_ssh_user=ec2-user" \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"namespace="amq-enmasse-${GUID} \
                    -e"ACTION=remove"
----
== Set up your Ansible inventory file
* You can create an Ansible inventory file to define your connection
 method to your host (Master/Bastion with OC command)
* You can also use the command line to define the hosts directly if your `ssh`
 configuration is set to connect to the host correctly
* You can also use the command line to use localhost or if your cluster is
 already authenticated and configured in your `oc` configuration
[source, ini]
.example inventory file
----
[gptehosts:vars]
ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem
ansible_ssh_user=ec2-user
[gptehosts:children]
openshift
[openshift]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
bastion.cluster3.openshift.opentlc.com ansible_ssh_host=ec2-11-111-111-11.us-west-2.compute.amazonaws.com
bastion.cluster4.openshift.opentlc.com
[dev]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
[prod]
bastion.cluster3.openshift.opentlc.com
bastion.cluster4.openshift.opentlc.com
----
ansible/roles/ocp-workload-fuse-enmasse/tasks/main.yml
@@ -1,21 +1,22 @@
---
- name: Running Pre Workload Tasks
  include: ./pre_workload.yml
  become: true
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload Tasks
  include: ./workload.yml
  become: true
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Post Workload Tasks
  include: ./post_workload.yml
  become: true
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload removal Tasks
  include: ./remove_workload.yml
  become: true
  become: "{{ become_override | bool }}"
  when: ACTION == "destroy" or ACTION == "remove"
ansible/roles/ocp-workload-fuse-enmasse/tasks/pre_workload.yml
@@ -11,21 +11,21 @@
#     var: groupadd_register
#     verbosity: 2
- name: Create user Quota - clusterresourcequota
  shell: |
        oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \
        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
        --hard requests.cpu="{{quota_requests_cpu}}" \
        --hard limits.cpu="{{quota_limits_cpu}}"  \
        --hard requests.memory="{{quota_requests_memory}}" \
        --hard limits.memory="{{quota_limits_memory}}" \
        --hard configmaps="{{quota_configmaps}}" \
        --hard pods="{{quota_pods}}" \
        --hard persistentvolumeclaims="{{quota_persistentvolumeclaims}}"  \
        --hard services="{{quota_services}}" \
        --hard secrets="{{quota_secrets}}" \
        --hard requests.storage="{{quota_requests_storage}}"
  ignore_errors: true
#- name: Create user Quota - clusterresourcequota
#  shell: |
#        oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \
#        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
#        --hard requests.cpu="{{quota_requests_cpu}}" \
#        --hard limits.cpu="{{quota_limits_cpu}}"  \
#        --hard requests.memory="{{quota_requests_memory}}" \
#        --hard limits.memory="{{quota_limits_memory}}" \
#        --hard configmaps="{{quota_configmaps}}" \
#        --hard pods="{{quota_pods}}" \
#        --hard persistentvolumeclaims="{{quota_persistentvolumeclaims}}"  \
#        --hard services="{{quota_services}}" \
#        --hard secrets="{{quota_secrets}}" \
#        --hard requests.storage="{{quota_requests_storage}}"
#  ignore_errors: true
- name: pre_workload Tasks Complete
  debug:
ansible/roles/ocp-workload-fuse-enmasse/tasks/remove_workload.yml
@@ -3,20 +3,12 @@
  debug:
    msg: "Pre-Software checks completed successfully - Removed"
- name: define ocp_project
  set_fact:
    ocp_project: "enmasse-{{guid}}"
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
  ignore_errors: true
# - name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}"
#   shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}
#   ignore_errors: true
- name: Remove user Project
  shell: "oc delete project {{ocp_project}}"
  shell: "oc delete project {{namespace}}"
  ignore_errors: true
- name: post_workload Tasks Complete
ansible/roles/ocp-workload-fuse-enmasse/tasks/workload.yml
@@ -1,167 +1,52 @@
---
- name: define ocp_project
  set_fact:
    ocp_project: "enmasse-{{guid}}"
- name: Download enmasse software deployer
  shell: "wget https://github.com/EnMasseProject/enmasse/releases/download/0.15.3/enmasse-0.15.3.tgz -O /tmp/enmasse-0.15.3.tgz"
# The enmasse project now includes ansible playbooks.
# We'll download the enmasse source code to the local filesystem so as to be available to the ansible invoking this task
- stat: path=/tmp/enmasse-{{enmasse_tag}}.tgz
  register: enmasse_zip
  delegate_to: 127.0.0.1
- name: Untar enmasse software deployer
  shell: "tar zxvf /tmp/enmasse-0.15.3.tgz -C /tmp"
- name: Download enmasse project
  get_url:
    url: https://github.com/EnMasseProject/enmasse/releases/download/{{enmasse_tag}}/enmasse-{{enmasse_tag}}.tgz
    dest: /tmp/enmasse-{{enmasse_tag}}.tgz
    mode: 0755
  when: enmasse_zip.stat.exists == False
  delegate_to: 127.0.0.1
- name: Comment oc login commands in script to avoid interactive password request
- name: unzip enmasse project
  command: tar -zxvf /tmp/enmasse-{{enmasse_tag}}.tgz -C /tmp
  when: enmasse_zip.stat.exists == False
  delegate_to: 127.0.0.1
- name: modify hosts in enmasse playbook
  replace:
    dest: /tmp/enmasse-0.15.3/deploy-openshift.sh
    regexp: 'runcmd .oc login'
    replace: '#runcmd .oc login'
    path: "{{enmasse_path}}/ansible/playbooks/openshift/multitenant.yml"
    regexp: '^- hosts: localhost'
    replace: '- hosts: all'
  delegate_to: 127.0.0.1
- name: Run enmasse software deployer
  shell: "/tmp/enmasse-0.15.3/deploy-openshift.sh -m https://master.dev37.openshift.opentlc.com:443 -n {{ocp_project}} -y"
- name: delete enmasse software deployer
  shell: "rm -rf /tmp/enmasse-0.15.3/ /tmp/enmasse-0.15.3.tgz"
- name: annotate the project as requested by user
  shell: "oc annotate namespace {{ocp_project}} openshift.io/requester={{ocp_username}} --overwrite"
# Project and user administration
- name: "Create project for workload {{namespace}}"
  shell: "oc new-project {{namespace}}"
- name: Give ocp_username access to ocp_project
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{ocp_project}}"
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{namespace}}"
- name: Make sure we go back to default project
  shell: "oc project default"
- name: annotate the project as requested by user
  shell: "oc annotate namespace {{namespace}} openshift.io/requester={{ocp_username}} --overwrite"
- name: Give ocp_username access to namespace
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{namespace}}"
- name: workload Tasks Complete
  debug:
    msg: workload Tasks Complete
## This was our attempt to manually build this deployment based on the deploy-openshift.sh, it failed
## Everything was created, but for some reason the RS were not created.
#
# - name: Create project for workload
#   shell: "oc new-project {{ocp_project}}"
#   ignore_errors: true
# - name: Make sure we go back do default project
#   shell: "oc project default"
#
# - name: Create service account for address controller
#   shell: "oc create sa enmasse-admin -n {{ocp_project}}"
#
# - name: Add permissions for viewing OpenShift resources to default user
#   shell: "oc policy add-role-to-user view system:serviceaccount:{{ocp_project}}:default -n {{ocp_project}}"
#
# - name: Add permissions for editing OpenShift resources to admin SA
#   shell: "oc policy add-role-to-user edit system:serviceaccount:{{ocp_project}}:enmasse-admin -n {{ocp_project}}"
#
# ########## create_self_signed_cert "oc" "address-controller.${NAMESPACE}.svc.cluster.local" "address-controller-cert"
#
# - name: Create self-signed certificate for address-controller.{{ocp_project}}.svc.cluster.local
#   shell: "openssl req -new -x509 -batch -nodes -days 11000 -out /tmp/{{guid}}-cert.crt -keyout /tmp/{{guid}}-key.orig -subj \"/O=io.enmasse/CN=address-controller.{{ocp_project}}.svc.cluster.local\""
#
# - name: "Convert from PKCS#1 to PKCS#8"
#   shell: "openssl pkcs8 -topk8 -inform pem -in /tmp/{{guid}}-key.orig -outform pem -nocrypt -out /tmp/{{guid}}-key"
#
# - name: save tlskey as fact
#   shell: "base64 -w 0 /tmp/{{guid}}-key"
#   register: tlskey
# - name: save tlscert as fact
#   shell: "base64 -w 0 /tmp/{{guid}}-cert.crt"
#   register: tlscrt
#
# - name: create tls secret file
#   copy:
#     dest: "/tmp/{{guid}}-tls-secret"
#     content: |
#       {
#       "apiVersion": "v1",
#       "kind": "Secret",
#       "metadata": {
#           "name": "address-controller-cert"
#       },
#       "type": "kubernetes.io/tls",
#       "data": {
#           "tls.key": "{{tlskey.stdout}}",
#           "tls.crt": "{{tlscrt.stdout}}"
#       }
#       }
#
# - name: create tls secret object
#   shell: "oc create -f /tmp/{{guid}}-tls-secret -n {{ocp_project}}"
#
# ####### create_self_signed_cert "oc" "none-authservice.${NAMESPACE}.svc.cluster.local" "none-authservice-cert"
# - name: Create self-signed certificate for none-authservice.{{ocp_project}}.svc.cluster.local
#   shell: "openssl req -new -x509 -batch -nodes -days 11000 -out /tmp/{{guid}}auth-cert.crt -keyout /tmp/{{guid}}auth-key.orig -subj \"/O=io.enmasse/CN=none-authservice.{{ocp_project}}.svc.cluster.local\""
#
# - name: "Convert from PKCS#1 to PKCS#8"
#   shell: "openssl pkcs8 -topk8 -inform pem -in /tmp/{{guid}}auth-key.orig -outform pem -nocrypt -out /tmp/{{guid}}auth-key"
#
# - name: save tlskey as fact
#   shell: "base64 -w 0 /tmp/{{guid}}auth-key"
#   register: tlskeyauth
# - name: save tlscert as fact
#   shell: "base64 -w 0 /tmp/{{guid}}auth-cert.crt"
#   register: tlscrtauth
#
# - name: create tls secret file
#   copy:
#     dest: "/tmp/{{guid}}auth-tls-secret"
#     content: |
#       {
#       "apiVersion": "v1",
#       "kind": "Secret",
#       "metadata": {
#           "name": "none-authservice-cert"
#       },
#       "type": "kubernetes.io/tls",
#       "data": {
#           "tls.key": "{{tlskeyauth.stdout}}",
#           "tls.crt": "{{tlscrtauth.stdout}}"
#       }
#       }
#
# - name: create tls secret object
#   shell: "oc create -f /tmp/{{guid}}auth-tls-secret -n {{ocp_project}}"
#
#
# - name: Copy non autheservice file
#   template:
#     src: "noauth.json.j2"
#     dest: /tmp/{{guid}}-noauth-template.json
# - name: Create none authservice
#   shell: "oc create -f /tmp/{{guid}}-noauth-template.json -n {{ocp_project}} "
#
# - name: Copy enmasse template
#   template:
#     src: "enmasse.yaml.j2"
#     dest: /tmp/{{guid}}-enmasse-template.yaml
#
#
#
# - name: create payload fact
#   set_fact:
#     payload: "{ kind: AddressSpace , apiVersion: 'enmasse.io/v1', metadata: { name: default, namespace: {{ocp_project}} }, spec: { type: standard } }"
#
#
# - name: Create configmap
#   copy:
#     dest: "/tmp/{{guid}}-configmap.json"
#     content: |
#       {
#       "apiVersion": "v1",
#       "kind": "ConfigMap",
#       "metadata": {
#         "name": "address-space-default",
#         "labels": {
#             "type": "address-space"
#         }
#       },
#       "data": {
#       "config.json": "{{payload}}"
#       }
#       }
#
# - name: Create enmasse template
#   shell: "oc create -f /tmp/{{guid}}-configmap.json -n {{ocp_project}} "
#
# - name: Create enmasse template
#   shell: "oc process /tmp/{{guid}}-enmasse-template.yaml | oc create -n {{ocp_project}} -f -"
#
ansible/roles/ocp-workload-fuse-ignite/defaults/main.yml
@@ -1,4 +1,5 @@
---
become_override: false
ocp_username: shachar-redhat.com
ocp_user_needs_quota: True
ansible/roles/ocp-workload-fuse-ignite/tasks/main.yml
@@ -1,20 +1,20 @@
---
- name: Running Pre Workload Tasks
  include: ./pre_workload.yml
  become: true
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload Tasks
  include: ./workload.yml
  become: true
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Post Workload Tasks
  include: ./post_workload.yml
  become: true
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload removal Tasks
  include: ./remove_workload.yml
  become: true
  become: "{{ become_override | bool }}"
  when: ACTION == "destroy" or ACTION == "remove"
ansible/roles/ocp-workload-parksmap-demo/defaults/main.yml
New file
@@ -0,0 +1,19 @@
---
ocp_username: jmorales-redhat.com
ocp_user_needs_quota: True
ocp_user_groups:
  - OPENTLC-PROJECT-PROVISIONERS
quota_requests_cpu: 5
quota_limits_cpu: 10
quota_requests_memory: '6Gi'
quota_limits_memory: '20Gi'
quota_configmaps: 4
quota_pods: 20
quota_persistentvolumeclaims: 5
quota_services: 15
quota_secrets: 30
quota_requests_storage: 5Gi
ansible/roles/ocp-workload-parksmap-demo/description.html
New file
@@ -0,0 +1,18 @@
<h1>Parksmap</h1>
<p><i>Parksmap demo</i> showing the app used in the RoadShow FY 17 labs. It's a microservices application using SpringBoot and JBoss EAP.
​This demo walks the audience through​ ​the process​ and concepts of developing an application on OpenShift.</p>
<p>Products and Projects:</p>
<ul>
  <li>OpenShift Container Platform</li>
  <li>OpenShift Application Runtimes <br/>(Spring Boot)</li>
  <li>JBoss EAP</li>
</ul>
<p>
  ​This demo will create 2 projects. One with the completed demo and the guides to learn how to deliver the demo, and an empty project to exercise/show the demo in real time.​</p>
<b>Provisioning Time:</b> ~5 min <br>
<br>
<p><b>Need support?</b><br>Contact <a href="mailto:rhpds-admins@redhat.com">rhpds-admins@redhat.com</a></p>
ansible/roles/ocp-workload-parksmap-demo/files/workshop-mlbparks.yaml
New file
@@ -0,0 +1,338 @@
kind: List
apiVersion: v1
metadata: {}
items:
- kind: ImageStream
  apiVersion: v1
  metadata:
    name: mlbparks
    labels:
      app: workshop
      component: mlbparks
      role: frontend
  spec:
    lookupPolicy:
      local: false
    tags:
    - name: latest
      importPolicy: {}
      referencePolicy:
        type: Source
- kind: BuildConfig
  apiVersion: v1
  metadata:
    name: mlbparks
    labels:
      app: workshop
      build: mlbparks
      component: mlbparks
  spec:
    failedBuildsHistoryLimit: 5
    output:
      to:
        kind: ImageStreamTag
        name: mlbparks:latest
    postCommit: {}
    resources: {}
    runPolicy: Serial
    source:
      git:
        ref: 1.0.0
        uri: https://github.com/openshift-roadshow/mlbparks-py
      type: Git
    strategy:
      sourceStrategy:
        from:
          kind: ImageStreamTag
          name: python:3.5
          namespace: openshift
        incremental: false
      type: Source
    successfulBuildsHistoryLimit: 5
    triggers:
    - github:
        secret: ri2F3RF6
      type: GitHub
    - generic:
        secret: L4fu58wx
      type: Generic
    - type: ConfigChange
    - imageChange: {}
      type: ImageChange
- kind: DeploymentConfig
  apiVersion: v1
  metadata:
    name: mlbparks
    labels:
      app: workshop
      component: mlbparks
      role: frontend
  spec:
    replicas: 1
    revisionHistoryLimit: 10
    selector:
      app: workshop
      component: mlbparks
      deploymentconfig: mlbparks
      role: frontend
    strategy:
      activeDeadlineSeconds: 21600
      resources: {}
      rollingParams:
        intervalSeconds: 1
        maxSurge: 25%
        maxUnavailable: 25%
        post:
          execNewPod:
            command:
            - curl
            - -s
            - http://mlbparks:8080/ws/data/load
            containerName: mlbparks
          failurePolicy: Ignore
        timeoutSeconds: 600
        updatePeriodSeconds: 1
      type: Rolling
    template:
      metadata:
        labels:
          app: workshop
          component: mlbparks
          deploymentconfig: mlbparks
          role: frontend
      spec:
        containers:
        - env:
          - name: KUBERNETES_NAMESPACE
            valueFrom:
              fieldRef:
                apiVersion: v1
                fieldPath: metadata.namespace
          - name: DB_HOST
            value: mongodb-mlbparks
          - name: DB_USERNAME
            valueFrom:
              configMapKeyRef:
                key: db.user
                name: mlbparks
          - name: DB_PASSWORD
            valueFrom:
              configMapKeyRef:
                key: db.password
                name: mlbparks
          - name: DB_NAME
            valueFrom:
              configMapKeyRef:
                key: db.name
                name: mlbparks
          image: mlbparks
          imagePullPolicy: IfNotPresent
          livenessProbe:
            failureThreshold: 3
            httpGet:
              path: /ws/healthz/
              port: 8080
              scheme: HTTP
            initialDelaySeconds: 120
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          name: mlbparks
          ports:
          - containerPort: 8080
            protocol: TCP
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /ws/healthz/
              port: 8080
              scheme: HTTP
            initialDelaySeconds: 20
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          resources: {}
          terminationMessagePath: /dev/termination-log
          terminationMessagePolicy: File
        dnsPolicy: ClusterFirst
        restartPolicy: Always
        schedulerName: default-scheduler
        securityContext: {}
        terminationGracePeriodSeconds: 30
    test: false
    triggers:
    - type: ConfigChange
    - imageChangeParams:
        automatic: true
        containerNames:
        - mlbparks
        from:
          kind: ImageStreamTag
          name: mlbparks:latest
      type: ImageChange
- apiVersion: v1
  kind: DeploymentConfig
  metadata:
    labels:
      app: workshop
      component: mlbparks
      role: frontend
    name: mongodb-mlbparks
  spec:
    replicas: 1
    revisionHistoryLimit: 10
    selector:
      app: workshop
      component: mlbparks
      role: frontend
      deploymentconfig: mongodb-mlbparks
    strategy:
      activeDeadlineSeconds: 21600
      recreateParams:
        timeoutSeconds: 600
      resources: {}
      type: Recreate
    template:
      metadata:
        labels:
          app: workshop
          component: mlbparks
          role: frontend
          deploymentconfig: mongodb-mlbparks
      spec:
        containers:
        - env:
          - name: KUBERNETES_NAMESPACE
            valueFrom:
              fieldRef:
                apiVersion: v1
                fieldPath: metadata.namespace
          - name: MONGODB_USER
            valueFrom:
              configMapKeyRef:
                key: db.user
                name: mlbparks
          - name: MONGODB_PASSWORD
            valueFrom:
              configMapKeyRef:
                key: db.password
                name: mlbparks
          - name: MONGODB_DATABASE
            valueFrom:
              configMapKeyRef:
                key: db.name
                name: mlbparks
          - name: MONGODB_ADMIN_PASSWORD
            value: axPDHoCG
          image: centos/mongodb-32-centos7@sha256:bf746d7851d334a3a6afb0842e50081abd9c1d2c2c28123bc5c55c0e2fd2b3bb
          imagePullPolicy: IfNotPresent
          livenessProbe:
            failureThreshold: 3
            initialDelaySeconds: 30
            periodSeconds: 10
            successThreshold: 1
            tcpSocket:
              port: 27017
            timeoutSeconds: 1
          name: mongodb-mlbparks
          ports:
          - containerPort: 27017
            protocol: TCP
          readinessProbe:
            exec:
              command:
              - /bin/sh
              - -i
              - -c
              - mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD
                --eval="quit()"
            failureThreshold: 3
            initialDelaySeconds: 3
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          resources:
            limits:
              memory: 512Mi
          securityContext:
            capabilities: {}
            privileged: false
          terminationMessagePath: /dev/termination-log
          terminationMessagePolicy: File
          volumeMounts:
          - mountPath: /var/lib/mongodb/data
            name: mongodb-data
        dnsPolicy: ClusterFirst
        restartPolicy: Always
        schedulerName: default-scheduler
        securityContext: {}
        terminationGracePeriodSeconds: 30
        volumes:
        - emptyDir: {}
          name: mongodb-data
    test: false
    triggers:
    - imageChangeParams:
        automatic: true
        containerNames:
        - mongodb-mlbparks
        from:
          kind: ImageStreamTag
          name: mongodb:3.2
          namespace: openshift
      type: ImageChange
    - type: ConfigChange
- apiVersion: v1
  kind: Service
  metadata:
    labels:
      app: workshop
      component: mlbparks
      role: frontend
      type: parksmap-backend
    name: mlbparks
  spec:
    ports:
    - name: 8080-tcp
      port: 8080
      protocol: TCP
      targetPort: 8080
    selector:
      deploymentconfig: mlbparks
    sessionAffinity: None
    type: ClusterIP
- apiVersion: v1
  kind: Service
  metadata:
    labels:
      app: workshop
      component: mlbparks
      role: database
    name: mongodb-mlbparks
  spec:
    ports:
    - name: mongo
      port: 27017
      protocol: TCP
      targetPort: 27017
    selector:
      deploymentconfig: mongodb-mlbparks
    sessionAffinity: None
    type: ClusterIP
- kind: ConfigMap
  apiVersion: v1
  metadata:
    name: mlbparks
    labels:
      app: workshop
      component: mlbparks
      role: config
  data:
    application.properties: spring.data.mongodb.uri=mongodb://uservIw:YWSRuDpR@mongodb-mlbparks:27017/mongodb
    db.name: mongodb
    db.password: YWSRuDpR
    db.properties: |-
      db.user=uservIw
      db.password=YWSRuDpR
      db.name=mongodb
    db.user: uservIw
ansible/roles/ocp-workload-parksmap-demo/files/workshop-nationalparks.yaml
New file
@@ -0,0 +1,335 @@
kind: List
apiVersion: v1
metadata: {}
items:
- apiVersion: v1
  kind: ImageStream
  metadata:
    labels:
      app: workshop
      component: nationalparks
      role: frontend
    name: nationalparks
  spec:
    lookupPolicy:
      local: false
    tags:
    - name: latest
      importPolicy: {}
      referencePolicy:
        type: ""
- kind: BuildConfig
  apiVersion: v1
  metadata:
    labels:
      app: workshop
      component: nationalparks
      role: frontend
    name: nationalparks
  spec:
    output:
      to:
        kind: ImageStreamTag
        name: nationalparks:latest
    postCommit: {}
    resources: {}
    runPolicy: Serial
    source:
      git:
        ref: master
        uri: https://github.com/openshift-roadshow/nationalparks-py
      type: Git
    strategy:
      sourceStrategy:
        from:
          kind: ImageStreamTag
          name: python:3.5
          namespace: openshift
      type: Source
    triggers:
    - generic:
        secret: 3ce175ca0406c24d
      type: Generic
    - github:
        secret: "0739955342648413"
      type: GitHub
    - imageChange: {}
      type: ImageChange
    - type: ConfigChange
- kind: DeploymentConfig
  apiVersion: v1
  metadata:
    name: nationalparks
    labels:
      app: workshop
      component: nationalparks
      role: frontend
  spec:
    replicas: 1
    selector:
      deploymentconfig: nationalparks
    strategy:
      activeDeadlineSeconds: 21600
      resources: {}
      rollingParams:
        intervalSeconds: 1
        maxSurge: 25%
        maxUnavailable: 25%
        post:
          execNewPod:
            command:
            - curl
            - -s
            - http://nationalparks:8080/ws/data/load
            containerName: nationalparks
          failurePolicy: Ignore
        timeoutSeconds: 600
        updatePeriodSeconds: 1
      type: Rolling
    template:
      metadata:
        labels:
          app: workshop
          component: nationalparks
          deploymentconfig: nationalparks
          role: frontend
      spec:
        containers:
        - env:
          - name: DB_USERNAME
            valueFrom:
              secretKeyRef:
                key: database-user
                name: mongodb-nationalparks
          - name: DB_PASSWORD
            valueFrom:
              secretKeyRef:
                key: database-password
                name: mongodb-nationalparks
          - name: DB_NAME
            valueFrom:
              secretKeyRef:
                key: database-name
                name: mongodb-nationalparks
          - name: DB_HOST
            value: mongodb-nationalparks
          image: nationalparks
          imagePullPolicy: Always
          livenessProbe:
            failureThreshold: 3
            httpGet:
              path: /ws/healthz/
              port: 8080
              scheme: HTTP
            initialDelaySeconds: 20
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          name: nationalparks
          ports:
          - containerPort: 8080
            protocol: TCP
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /ws/healthz/
              port: 8080
              scheme: HTTP
            initialDelaySeconds: 5
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          resources: {}
          terminationMessagePath: /dev/termination-log
          terminationMessagePolicy: File
        dnsPolicy: ClusterFirst
        restartPolicy: Always
        schedulerName: default-scheduler
        securityContext: {}
        terminationGracePeriodSeconds: 30
    test: false
    triggers:
    - imageChangeParams:
        automatic: true
        containerNames:
        - nationalparks
        from:
          kind: ImageStreamTag
          name: nationalparks:latest
      type: ImageChange
    - type: ConfigChange
- kind: Service
  apiVersion: v1
  metadata:
    name: nationalparks
    labels:
      app: workshop
      component: nationalparks
      role: frontend
      type: parksmap-backend
  spec:
    ports:
    - name: 8080-tcp
      port: 8080
      protocol: TCP
      targetPort: 8080
    selector:
      deploymentconfig: nationalparks
    sessionAffinity: None
    type: ClusterIP
- kind: DeploymentConfig
  apiVersion: v1
  metadata:
    name: mongodb-nationalparks
    labels:
      app: workshop
      component: nationalparks
      role: database
  spec:
    replicas: 1
    selector:
      deploymentconfig: mongodb-nationalparks
      app: workshop
      component: nationalparks
      role: database
    strategy:
      activeDeadlineSeconds: 21600
      recreateParams:
        timeoutSeconds: 600
      resources: {}
      type: Recreate
    template:
      metadata:
        labels:
          deploymentconfig: mongodb-nationalparks
          app: workshop
          component: nationalparks
          role: database
      spec:
        containers:
        - env:
          - name: MONGODB_USER
            valueFrom:
              secretKeyRef:
                key: database-user
                name: mongodb-nationalparks
          - name: MONGODB_PASSWORD
            valueFrom:
              secretKeyRef:
                key: database-password
                name: mongodb-nationalparks
          - name: MONGODB_ADMIN_PASSWORD
            valueFrom:
              secretKeyRef:
                key: database-admin-password
                name: mongodb-nationalparks
          - name: MONGODB_DATABASE
            valueFrom:
              secretKeyRef:
                key: database-name
                name: mongodb-nationalparks
          image: mongodb
          imagePullPolicy: IfNotPresent
          livenessProbe:
            failureThreshold: 3
            initialDelaySeconds: 30
            periodSeconds: 10
            successThreshold: 1
            tcpSocket:
              port: 27017
            timeoutSeconds: 1
          name: mongodb
          ports:
          - containerPort: 27017
            protocol: TCP
          readinessProbe:
            exec:
              command:
              - /bin/sh
              - -i
              - -c
              - mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD
                --eval="quit()"
            failureThreshold: 3
            initialDelaySeconds: 3
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          resources:
            limits:
              memory: 512Mi
          securityContext:
            capabilities: {}
            privileged: false
          terminationMessagePath: /dev/termination-log
          terminationMessagePolicy: File
          volumeMounts:
          - mountPath: /var/lib/mongodb/data
            name: mongodb-data
        dnsPolicy: ClusterFirst
        restartPolicy: Always
        schedulerName: default-scheduler
        securityContext: {}
        terminationGracePeriodSeconds: 30
        volumes:
        - name: mongodb-data
          persistentVolumeClaim:
            claimName: mongodb-nationalparks
    test: false
    triggers:
    - imageChangeParams:
        automatic: true
        containerNames:
        - mongodb
        from:
          kind: ImageStreamTag
          name: mongodb:3.2
          namespace: openshift
      type: ImageChange
    - type: ConfigChange
- kind: Service
  apiVersion: v1
  metadata:
    labels:
      app: workshop
      component: nationalparks
      role: database
    name: mongodb-nationalparks
  spec:
    ports:
    - name: mongo
      port: 27017
      protocol: TCP
      targetPort: 27017
    selector:
      deploymentconfig: mongodb-nationalparks
    sessionAffinity: None
    type: ClusterIP
- kind: Secret
  apiVersion: v1
  metadata:
    name: mongodb-nationalparks
    labels:
      app: workshop
      component: nationalparks
      role: database
  type: Opaque
  data:
    database-admin-password: bW9uZ29kYg==
    database-name: bW9uZ29kYg==
    database-password: bW9uZ29kYg==
    database-user: bW9uZ29kYg==
- kind: PersistentVolumeClaim
  apiVersion: v1
  metadata:
    name: mongodb-nationalparks
    labels:
      app: workshop
      component: nationalparks
      role: database
  spec:
    accessModes:
    - ReadWriteOnce
    resources:
      requests:
        storage: 1Gi
ansible/roles/ocp-workload-parksmap-demo/files/workshop-parksmap.yaml
New file
@@ -0,0 +1,127 @@
kind: List
apiVersion: v1
metadata: {}
items:
- kind: RoleBinding
  apiVersion: v1
  groupNames: null
  metadata:
    name: view
  roleRef:
    name: view
  subjects:
  - kind: ServiceAccount
    name: default
- kind: ImageStream
  apiVersion: v1
  metadata:
    name: parksmap
    labels:
      app: workshop
      component: parksmap
      role: ui
  spec:
    lookupPolicy:
      local: false
    tags:
    - name: 1.2.0
      from:
        kind: DockerImage
        name: docker.io/openshiftroadshow/parksmap:1.2.0
      importPolicy: {}
      referencePolicy:
        type: Source
- apiVersion: v1
  kind: DeploymentConfig
  metadata:
    name: parksmap
    labels:
      app: workshop
      component: parksmap
      role: ui
  spec:
    replicas: 1
    selector:
      app: workshop
      component: parksmap
      role: ui
      deploymentconfig: parksmap
    strategy:
      activeDeadlineSeconds: 21600
      resources: {}
      rollingParams:
        intervalSeconds: 1
        maxSurge: 25%
        maxUnavailable: 25%
        timeoutSeconds: 600
        updatePeriodSeconds: 1
      type: Rolling
    template:
      metadata:
        labels:
          app: workshop
          component: parksmap
          role: ui
          deploymentconfig: parksmap
      spec:
        containers:
        - name: parksmap
          image: parksmap
          imagePullPolicy: IfNotPresent
          ports:
          - containerPort: 8080
            protocol: TCP
          resources: {}
          terminationMessagePath: /dev/termination-log
          terminationMessagePolicy: File
        dnsPolicy: ClusterFirst
        restartPolicy: Always
        schedulerName: default-scheduler
        securityContext: {}
        terminationGracePeriodSeconds: 30
    test: false
    triggers:
    - type: ConfigChange
    - imageChangeParams:
        automatic: true
        containerNames:
        - parksmap
        from:
          kind: ImageStreamTag
          name: parksmap:1.2.0
      type: ImageChange
- apiVersion: v1
  kind: Service
  metadata:
    name: parksmap
    labels:
      app: workshop
      component: parksmap
      role: ui
  spec:
    ports:
    - name: 8080-tcp
      port: 8080
      protocol: TCP
      targetPort: 8080
    selector:
      deploymentconfig: parksmap
    sessionAffinity: None
    type: ClusterIP
- apiVersion: v1
  kind: Route
  metadata:
    name: parksmap
    labels:
      app: workshop
      component: parksmap
      role: ui
  spec:
    host:
    port:
      targetPort: 8080-tcp
    to:
      kind: Service
      name: parksmap
      weight: 100
    wildcardPolicy: None
ansible/roles/ocp-workload-parksmap-demo/files/workshopper-template.yaml
New file
@@ -0,0 +1,165 @@
kind: Template
apiVersion: v1
metadata:
  name: guide
  annotations:
    description: Template to deploy a workshop guide with workshopper. Look into any of the workshops for the used variables that can be defined in the configMap.
parameters:
- name: CONTENT_URL_PREFIX
  description: Console url (e.g. https://raw.githubusercontent.com/osevg/workshopper-content/36)
  value:
  displayName: Content URL prefix
  required: true
- name: WORKSHOPS_URLS
  description: Workshop definition url (e.g. https://raw.githubusercontent.com/osevg/workshopper-content/36/_workshops/training.yml)
  value:
  displayName: Workshop Url
  required: true
- name: CONSOLE_ADDRESS
  description: Console url (e.g. master.mycluster.openshiftworkshop.com or console.mycluster.gce.pixy.io:8443)
  value:
  displayName: Console url
  required: true
- name: ROUTER_ADDRESS
  description: Application subdomain (e.g. apps.mycluster.openshiftworkshop.com or apps.mycluster.gce.pixy.io)
  value:
  displayName: Application subdomain
  required: true
objects:
- kind: ImageStream
  apiVersion: v1
  metadata:
    name: workshopper
    labels:
      app: guides
  spec:
    lookupPolicy:
      local: false
    tags:
    - name: latest
      from:
        kind: DockerImage
        name: osevg/workshopper:latest
      importPolicy: {}
      referencePolicy:
        type: Source
- kind: DeploymentConfig
  apiVersion: v1
  metadata:
    name: guides
    labels:
      app: guides
  spec:
    replicas: 1
    selector:
      app: guides
      deploymentconfig: guides
    strategy:
      activeDeadlineSeconds: 21600
      resources: {}
      rollingParams:
        intervalSeconds: 1
        maxSurge: 25%
        maxUnavailable: 25%
        timeoutSeconds: 600
        updatePeriodSeconds: 1
      type: Rolling
    template:
      metadata:
        labels:
          app: guides
          deploymentconfig: guides
      spec:
        containers:
        - image: workshopper
          envFrom:
            - configMapRef:
                name: guides
          imagePullPolicy: IfNotPresent
          livenessProbe:
            failureThreshold: 5
            httpGet:
              path: /
              port: 8080
              scheme: HTTP
            initialDelaySeconds: 15
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          name: guides
          ports:
          - containerPort: 8080
            protocol: TCP
          readinessProbe:
            failureThreshold: 5
            httpGet:
              path: /
              port: 8080
              scheme: HTTP
            initialDelaySeconds: 15
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          resources: {}
          terminationMessagePath: /dev/termination-log
          terminationMessagePolicy: File
        dnsPolicy: ClusterFirst
        restartPolicy: Always
        schedulerName: default-scheduler
        securityContext: {}
        terminationGracePeriodSeconds: 30
    test: false
    triggers:
    - type: ConfigChange
    - imageChangeParams:
        automatic: true
        containerNames:
        - guides
        from:
          kind: ImageStreamTag
          name: workshopper:latest
      type: ImageChange
- kind: Service
  apiVersion: v1
  metadata:
    name: guides
    labels:
      app: guides
  spec:
    ports:
    - name: 8080-tcp
      port: 8080
      protocol: TCP
      targetPort: 8080
    selector:
      app: guides
      deploymentconfig: guides
    sessionAffinity: None
    type: ClusterIP
- kind: Route
  apiVersion: v1
  metadata:
    name: guides
    labels:
      app: guides
  spec:
    host:
    port:
      targetPort: 8080-tcp
    to:
      kind: Service
      name: guides
      weight: 100
    wildcardPolicy: None
- kind: ConfigMap
  apiVersion: v1
  metadata:
    name: guides
    labels:
      app: guides
      role: config
  data:
    CONTENT_URL_PREFIX: ${CONTENT_URL_PREFIX}
    WORKSHOPS_URLS: ${WORKSHOPS_URLS}
    CONSOLE_ADDRESS: ${CONSOLE_ADDRESS}
    ROUTER_ADDRESS: ${ROUTER_ADDRESS}
ansible/roles/ocp-workload-parksmap-demo/readme.adoc
New file
@@ -0,0 +1,133 @@
= ocp-workload-developer-environment - Sample Config
== Role overview
* This is a simple role that does the following:
** Playbook: link:./tasks/pre_workload.yml[pre_workload.yml] - Sets up an
 environment for the workload deployment
*** Adds a user to a list of groups defined in the
 link:./defaults/main.yml[defaults file].
*** Sets a cluster resource quota for the user based on the variables in the
 link:./defaults/main.yml[defaults file] .
*** Debug task will print out: `pre_workload Tasks Complete`
** Playbook: link:./tasks/workload.yml[workload.yml] - Used to deploy the actual
 workload, i.e, 3scale, Mobile or some Demo
*** This role doesn't do anything here
*** Debug task will print out: `workload Tasks Complete`
** Playbook: link:./tasks/post_workload.yml[post_workload.yml] - Used to
 configure the workload after deployment
*** This role doesn't do anything here
*** Debug task will print out: `post_workload Tasks Complete`
== Review the defaults variable file
* This file link:./defaults/main.yml[./defaults/main.yml] contains all the variables you
 need to define to control the deployment of your workload.
* You can modify any of these default values by adding
`-e"variable_name=variable_value"` to the command line
=== Deploy Workload on OpenShift Cluster from an existing playbook:
[source,yaml]
----
- name: Deploy a workload role on a master host
  hosts: all
  become: true
  gather_facts: False
  tags:
    - step007
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/{{ocp_workload}}", when: 'ocp_workload is defined' }
----
NOTE: You might want to change `hosts: all` to fit your requirements
=== Common configuration to run these playbooks
You should have these environment variables defined/exported in your system in order
to run these playbooks.
----
HOST_GUID=dev37
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jmorales-redhat.com"
SSH_USER="opentlc-mgr"
SSH_PRIVATE_KEY="id_rsa"
GUID=unpoucode
----
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
WORKLOAD="ocp-workload-parksmap-demo"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USER}" \
                 -e"ANSIBLE_REPO_PATH=`pwd`" \
                 -e"ocp_username=${OCP_USERNAME}" \
                 -e"ocp_workload=${WORKLOAD}" \
                 -e"guid=${GUID}" \
                 -e"ocp_user_needs_quota=true" \
                 -e"ocp_master=master.${HOST_GUID}.openshift.opentlc.com" \
                 -e"ocp_apps_domain=apps.${HOST_GUID}.openshift.opentlc.com" \
                 -e"ACTION=create"
----
=== To Delete an environment
Use the common configuration first. Then run this.
----
WORKLOAD="ocp-workload-parksmap-demo"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                    -e"ansible_ssh_user=${SSH_USER}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ACTION=remove"
----
== Set up your Ansible inventory file
* You can create an Ansible inventory file to define your connection
 method to your host (Master/Bastion with OC command)
* You can also use the command line to define the hosts directly if your `ssh`
 configuration is set to connect to the host correctly
* You can also use the command line to use localhost or if your cluster is
 already authenticated and configured in your `oc` configuration
[source, ini]
.example inventory file
----
[gptehosts:vars]
ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem
ansible_ssh_user=ec2-user
[gptehosts:children]
openshift
[openshift]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
bastion.cluster3.openshift.opentlc.com ansible_ssh_host=ec2-11-111-111-11.us-west-2.compute.amazonaws.com
bastion.cluster4.openshift.opentlc.com
[dev]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
[prod]
bastion.cluster3.openshift.opentlc.com
bastion.cluster4.openshift.opentlc.com
----
ansible/roles/ocp-workload-parksmap-demo/tasks/main.yml
New file
@@ -0,0 +1,20 @@
---
- name: Running Pre Workload Tasks
  include: ./pre_workload.yml
  become: false
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload Tasks
  include: ./workload.yml
  become: false
  when: ACTION == "create" or ACTION == "provision"
- name: Running Post Workload Tasks
  include: ./post_workload.yml
  become: false
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload removal Tasks
  include: ./remove_workload.yml
  become: false
  when: ACTION == "destroy" or ACTION == "remove"
ansible/roles/ocp-workload-parksmap-demo/tasks/post_workload.yml
New file
@@ -0,0 +1,9 @@
---
- name: Delete the remote files used in this role
  file:
    path: /tmp/{{guid}}
    state: absent
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully"
ansible/roles/ocp-workload-parksmap-demo/tasks/pre_workload.yml
New file
@@ -0,0 +1,40 @@
---
# - name: Add user to developer group (allowed to create projects)
#   shell: "oadm groups add-users {{item}} {{ocp_username}}"
#   register: groupadd_register
#   with_items: "{{ocp_user_groups}}"
#   when: ocp_username is defined and ocp_user_groups is defined
#
# - name: test that command worked
#   debug:
#     var: groupadd_register
#     verbosity: 2
- name: Create user Quota - clusterresourcequota
  shell: |
        oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \
        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
        --hard requests.cpu="{{quota_requests_cpu}}" \
        --hard limits.cpu="{{quota_limits_cpu}}"  \
        --hard requests.memory="{{quota_requests_memory}}" \
        --hard limits.memory="{{quota_limits_memory}}" \
        --hard configmaps="{{quota_configmaps}}" \
        --hard pods="{{quota_pods}}" \
        --hard persistentvolumeclaims="{{quota_persistentvolumeclaims}}"  \
        --hard services="{{quota_services}}" \
        --hard secrets="{{quota_secrets}}" \
        --hard requests.storage="{{quota_requests_storage}}"
  ignore_errors: true
- name: Copy the files used in this role
  synchronize:
    src: "files/"
    dest: "/tmp/{{guid}}/"
    rsync_opts:
      - "--no-motd"
      - "--exclude=.git,*.qcow2"
- name: pre_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully"
ansible/roles/ocp-workload-parksmap-demo/tasks/remove_workload.yml
New file
@@ -0,0 +1,25 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully - Removed"
- name: define ocp_project
  set_fact:
    ocp_project: "parksmap-{{guid}}"
    ocp_project_completed: "parksmap-{{guid}}-completed"
- name: Remove user Project
  shell: "oc delete project {{ocp_project}}"
  ignore_errors: true
- name: Remove user Project
  shell: "oc delete project {{ocp_project_completed}}"
  ignore_errors: true
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
  ignore_errors: true
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully - Removed"
ansible/roles/ocp-workload-parksmap-demo/tasks/workload.yml
New file
@@ -0,0 +1,66 @@
---
- name: define ocp_project
  set_fact:
    ocp_project: "parksmap-{{guid}}"
    ocp_project_completed: "parksmap-{{guid}}-completed"
    # Templates come from here: https://raw.githubusercontent.com/jorgemoralespou/ose-sample-apps-layouts
- name: Create project for completed parksmap example
  shell: |
         oc new-project {{ocp_project_completed}} \
         --display-name="Parksmap (completed)" \
         --description="Project with the Parksmap demo completed (and guides)"
  ignore_errors: true
- name: Deploy Parksmap
  shell: "oc apply -f /tmp/{{guid}}/workshop-parksmap.yaml -n {{ocp_project_completed}}"
- name: Deploy Nationalparks
  shell: "oc apply -f /tmp/{{guid}}/workshop-nationalparks.yaml -n {{ocp_project_completed}}"
- name: Deploy MLBParks
  shell: "oc apply -f /tmp/{{guid}}/workshop-mlbparks.yaml -n {{ocp_project_completed}}"
- name: Add the template guide
  shell: "oc apply -f /tmp/{{guid}}/workshopper-template.yaml -n {{ocp_project_completed}}"
- name: Deploy the guide to follow
  shell: |
          oc new-app guide \
          -p CONTENT_URL_PREFIX=https://raw.githubusercontent.com/osevg/workshopper-content/master \
          -p WORKSHOPS_URLS=https://raw.githubusercontent.com/jorgemoralespou/workshopper-content/GPE/_workshops/gpe-parksmap-demo.yml \
          -p CONSOLE_ADDRESS={{ocp_master}} \
          -p ROUTER_ADDRESS={{ocp_apps_domain}} \
          -n "{{ocp_project_completed}}"
- name: Print where's the guide
  debug:
    msg: "Guide has been installed in http://guides-{{ocp_project_completed}}.{{ocp_apps_domain}}"
- name: Annotate the completed project as requested by user
  shell: "oc annotate namespace {{ocp_project_completed}} openshift.io/requester={{ocp_username}} --overwrite"
- name: Give user access to the completed project
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{ocp_project_completed}}"
#
# Now create an empty project for the user to execute the demo themselves
#
- name: Create project for empty parksmap
  shell: |
         oc new-project {{ocp_project}} \
         --display-name="Parksmap" \
         --description="Project to exercise the Parksmap demo"
  ignore_errors: true
- name: Annotate the empty project as requested by user
  shell: "oc annotate namespace {{ocp_project}} openshift.io/requester={{ocp_username}} --overwrite"
- name: Give user access to the empty project
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{ocp_project}}"
- name: workload Tasks Complete
  debug:
    msg: workload Tasks Complete
ansible/roles/opentlc-integration/tasks/main.yml
@@ -42,3 +42,11 @@
    group: "{{ item.name }}"
    recurse: yes
  with_items: "{{ mgr_users }}"
- name: Set .ssh permissions
  file:
    path: "{{ item.home }}/.ssh"
    owner: "{{ item.name }}"
    mode: 0700
    recurse: yes
  with_items: "{{ mgr_users }}"
ansible/software_playbooks/openshift.yml
@@ -86,7 +86,10 @@
- name: Configuring Nfs Host
  gather_facts: False
  become: yes
  hosts: "{{ ('tag_' ~ env_type ~ '_' ~ guid ~ '_support') | replace('-', '_') }}"
  order: sorted
  hosts:
    - support
    - "{{ ('tag_' ~ env_type ~ '_' ~ guid ~ '_support') | replace('-', '_') }}"
  vars_files:
    - "../configs/{{ env_type }}/env_vars.yml"
  roles:
scripts/README.adoc
@@ -15,7 +15,7 @@
ENVTYPE_ARGS=(
-e osrelease=3.5.5.31
-e "bastion_instance_type=t2.large"
-e "master_instance_type=c4.xlarge"
-e "master_instance_type=c4.xlarge"
-e "infranode_instance_type=c4.4xlarge"
-e "node_instance_type=c4.4xlarge"
-e "nfs_instance_type=m3.large"