Tok
2018-07-02 5fad29611a72752b77242935e4310c02ec99e8f7
Merge branch 'development' of https://github.com/sborenst/ansible_agnostic_deployer into development
1 files deleted
74 files added
69 files modified
1 files renamed
10125 ■■■■ changed files
ansible/configs/ansible-provisioner/destroy_env.yml 34 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-provisioner/env_vars.yml 12 ●●●● patch | view | raw | blame | history
ansible/configs/ansible-provisioner/files/cloud_providers/ec2_cloud_template.j2 116 ●●●● patch | view | raw | blame | history
ansible/configs/ansible-provisioner/files/repos.conf.j2 24 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-provisioner/post_software.yml 129 ●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/How.To.Create.Env.Type.adoc patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/README.adoc 85 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/destroy_env.yml 56 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/env_vars.yml 210 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/files/cloud_providers/ec2_cloud_template.j2 726 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/files/ec2_internal_dns.json.j2 72 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/files/hosts_template.j2 203 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/files/htpasswd.openshift 102 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/files/labs_hosts_template.j2 57 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/files/pvs.j2 17 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/files/repos_template.j2 36 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/files/userpvs.j2 20 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/post_infra.yml 32 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/post_software.yml 39 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/pre_infra.yml 13 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/pre_software.yml 62 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab-2/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-implementation-lab/files/userpvs.j2 2 ●●● patch | view | raw | blame | history
ansible/configs/archive/opentlc-shared/files/userpvs.j2 2 ●●● patch | view | raw | blame | history
ansible/configs/bu-workshop/files/cloud_providers/ec2_cloud_template.j2 3 ●●●● patch | view | raw | blame | history
ansible/configs/lightbulb/README.adoc 8 ●●●●● patch | view | raw | blame | history
ansible/configs/lightbulb/env_vars.yml 6 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/README.adoc 115 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/destroy_env.yml 31 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/env_vars.yml 283 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/files/cloud_providers/ec2_cloud_template.j2 361 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/files/ec2_internal_dns.json.j2 84 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/files/hosts_template.3.9.14.j2 266 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/files/hosts_template.3.9.25.j2 226 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/files/hosts_template.3.9.27.j2 230 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/files/htpasswd.openshift 103 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/files/labs_hosts_template.j2 71 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/files/pvs.j2 17 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/files/repos_template.j2 47 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/files/userpvs.j2 20 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/post_infra.yml 32 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/post_ocp_nfs_config.yml 58 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/post_software.yml 124 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/pre_infra.yml 13 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/pre_software.yml 74 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-adv-deploy-hw/software.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-clientvm/files/repos_template.j2 4 ●●● patch | view | raw | blame | history
ansible/configs/ocp-demo-lab/files/userpvs.j2 2 ●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/env_vars.yml 2 ●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/hosts_template.3.9.14.j2 1 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/hosts_template.3.9.25.j2 1 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/hosts_template.3.9.27.j2 1 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/hosts_template.3.9.30.j2 313 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/userpvs.j2 2 ●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/env_vars.yml 2 ●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/files/hosts_template.3.9.27.j2 3 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/files/hosts_template.3.9.30.j2 235 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/files/userpvs.j2 2 ●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/README.adoc 60 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/destroy_env.yml 25 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/env_vars.yml 262 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/files/cloud_providers/ec2_cloud_template.j2 1057 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/files/ec2_internal_dns.json.j2 22 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/files/hosts_template.3.9.14.j2 266 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/files/hosts_template.3.9.25.j2 226 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/files/hosts_template.3.9.27.j2 230 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/files/hosts_template.3.9.30.j2 236 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/files/hosts_template.j2 175 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/files/htpasswd.openshift 1 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/files/labs_hosts_template.j2 45 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/files/pvs.j2 4 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/files/repos_template.j2 6 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/files/userpvs.j2 4 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/post_ocp_nfs_config.yml 58 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/post_software.yml 95 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/pre_software.yml 18 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/env_vars.yml 90 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/files/cloud_providers/azure_cloud_template.j2 2 ●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/files/cloud_providers/ec2_cloud_template.j2 4 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/files/hosts_template.3.9.14.j2 23 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/files/hosts_template.3.9.25.j2 23 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/files/hosts_template.3.9.27.j2 28 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/files/hosts_template.3.9.30.j2 385 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/files/hosts_template.j2 23 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/files/lets-encrypt-x3-cross-signed.pem.txt 27 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/files/userpvs.j2 2 ●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/post_software.yml 32 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/pre_software.yml 17 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/scaleup.yml 2 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/msa-cicd-eap-full.yml 118 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/msa-cicd-eap-min.yml 106 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/msa-full.yml 28 ●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/msa-min.yml 28 ●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/requirements-prod.yml 18 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/requirements.yml 6 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/verify_all.yml 84 ●●●●● patch | view | raw | blame | history
ansible/configs/openshift-demos/verify_tasks.yml 173 ●●●●● patch | view | raw | blame | history
ansible/roles/bastion-opentlc-ipa/tasks/main.yml 69 ●●●● patch | view | raw | blame | history
ansible/roles/common/tasks/packages.yml 3 ●●●●● patch | view | raw | blame | history
ansible/roles/install-lets-encrypt-certs/tasks/main.yml 113 ●●●●● patch | view | raw | blame | history
ansible/roles/install-nexus/files/nexus3-persistent-template.yaml 2 ●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-demo/tasks/config.yml 2 ●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-demo/tasks/workload.yml 12 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-experienced/defaults/main.yml 29 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-experienced/readme.adoc 45 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-experienced/tasks/main.yml 20 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-experienced/tasks/post_workload.yml 5 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-experienced/tasks/pre_workload.yml 32 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-experienced/tasks/remove_workload.yml 50 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-experienced/tasks/wait_for_build.yml 23 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-experienced/tasks/wait_for_deploy.yml 20 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-experienced/tasks/workload.yml 8 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-amq-enmasse/tasks/workload.yml 25 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-appdev-homework/defaults/main.yml 21 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-appdev-homework/readme.adoc 125 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-appdev-homework/tasks/main.yml 20 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-appdev-homework/tasks/post_workload.yml 6 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-appdev-homework/tasks/pre_workload.yml 33 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-appdev-homework/tasks/remove_workload.yml 50 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-appdev-homework/tasks/workload.yml 5 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-appmod-migration/readme.adoc 8 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-appmod-migration/tasks/remove_workload.yml 35 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-dm/ilt_provision.sh 2 ●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-dm/readme.adoc 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-pam/ilt_provision.sh 2 ●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-pam/readme.adoc 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fsi-client-onboarding-demo/defaults/main.yml 2 ●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fsi-client-onboarding-demo/files/limit-range.yaml 28 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fsi-client-onboarding-demo/tasks/workload.yml 6 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-ignite/ilt_provision.sh 2 ●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-ignite/readme.adoc 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-on-ocp/defaults/main.yml 56 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-on-ocp/ilt_provision.sh 96 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-on-ocp/readme.adoc 45 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-on-ocp/tasks/main.yml 20 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-on-ocp/tasks/post_workload.yml 5 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-on-ocp/tasks/pre_workload.yml 32 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-on-ocp/tasks/remove_workload.yml 50 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-on-ocp/tasks/wait_for_build.yml 23 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-on-ocp/tasks/wait_for_deploy.yml 20 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-on-ocp/tasks/workload.yml 30 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-parksmap-demo/files/workshop-parksmap.yaml 9 ●●●● patch | view | raw | blame | history
ansible/software_playbooks/openshift.yml 21 ●●●● patch | view | raw | blame | history
scripts/examples/ocp-adv-deploy-hw.rc 14 ●●●●● patch | view | raw | blame | history
tests/scenarii/ansible-provisioner.yml 53 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-provisioner/destroy_env.yml
@@ -1,31 +1,3 @@
- name: Starting environment deployment
  hosts: localhost
  connection: local
  gather_facts: False
  become: no
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tasks:
    - name: Destroy cloudformation template
      cloudformation:
        stack_name: "{{project_tag}}"
        state: "absent"
        region: "{{aws_region}}"
        disable_rollback: false
        tags:
          Stack: "project {{env_type}}-{{ guid }}"
      tags: [ destroying, destroy_cf_deployment ]
      register: cloudformation_result
      until: cloudformation_result|succeeded
      retries: 5
      delay: 60
      ignore_errors: yes
    - name: report Cloudformation error
      fail:
        msg: "FAIL {{ project_tag }} Destroy Cloudformation"
      when: not cloudformation_result|succeeded
      tags: [ destroying, destroy_cf_deployment ]
## we need to add something to delete the env specific key.
---
- name: Import default CloudFormation (aws) destroy playbook
  import_playbook: "{{ANSIBLE_REPO_PATH}}/cloud_providers/{{cloud_provider}}_destroy_env.yml"
ansible/configs/ansible-provisioner/env_vars.yml
@@ -20,11 +20,11 @@
install_opentlc_integration: true
################################################################################
#### OCP IMPLEMENATATION LAB
################################################################################
provisioner_public_dns: "admin.{{subdomain_base}}."
zabbix_auto_registration_keyword: linux host
httpd_ssl_cert: /etc/pki/tls/certs/admin.pem
httpd_ssl_key: /etc/pki/tls/private/admin.key
################################################################################
#### Common host variables
@@ -60,7 +60,7 @@
#use_subscriptions: true
#use_own_repos: false
#rhn_pool_id_string: "Red Hat Enterprise Linux Server"
#rhn_pool_id_string: OpenShift Container Platform
rhn_pool_id_string: OpenShift Container Platform
################################################################################
#### CLOUD PROVIDER: AWS SPECIFIC VARIABLES
@@ -88,7 +88,7 @@
#### Environment Sizing
provisioner_instance_type: "c4.2xlarge"
provisioner_instance_type: "t2.2xlarge"
ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
ansible/configs/ansible-provisioner/files/cloud_providers/ec2_cloud_template.j2
@@ -184,7 +184,7 @@
        ]
      }
    },
    "HostUDPPorts": {
    "HostUDPPortsMosh": {
      "Type": "AWS::EC2::SecurityGroupIngress",
      "Properties": {
        "GroupId": {
@@ -194,12 +194,12 @@
          ]
        },
        "IpProtocol": "udp",
        "FromPort": "0",
        "ToPort": "65535",
        "FromPort": "60000",
        "ToPort": "61000",
        "CidrIp": "0.0.0.0/0"
      }
    },
    "HostTCPPorts": {
    "HostTCPPortsSSH": {
      "Type": "AWS::EC2::SecurityGroupIngress",
      "Properties": {
        "GroupId": {
@@ -209,8 +209,53 @@
          ]
        },
        "IpProtocol": "tcp",
        "FromPort": "0",
        "ToPort": "65535",
        "FromPort": "22",
        "ToPort": "22",
        "CidrIp": "0.0.0.0/0"
      }
    },
    "HostTCPPortsZabbix": {
      "Type": "AWS::EC2::SecurityGroupIngress",
      "Properties": {
        "GroupId": {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        },
        "IpProtocol": "tcp",
        "FromPort": "10050",
        "ToPort": "10050",
        "CidrIp": "23.246.247.58/32"
      }
    },
    "HostTCPPortsHTTP": {
      "Type": "AWS::EC2::SecurityGroupIngress",
      "Properties": {
        "GroupId": {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        },
        "IpProtocol": "tcp",
        "FromPort": "80",
        "ToPort": "80",
        "CidrIp": "0.0.0.0/0"
      }
    },
    "HostTCPPortsHTTPS": {
      "Type": "AWS::EC2::SecurityGroupIngress",
      "Properties": {
        "GroupId": {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        },
        "IpProtocol": "tcp",
        "FromPort": "443",
        "ToPort": "443",
        "CidrIp": "0.0.0.0/0"
      }
    },
@@ -234,7 +279,7 @@
          {
            "Name": "{{provisioner_public_dns}}",
            "Type": "A",
            "TTL": "10",
            "TTL": "300",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
@@ -246,6 +291,15 @@
          }
        ]
      }
    },
    "provisionerEIP": {
        "Type": "AWS::EC2::EIP",
        "DependsOn": "VpcGA",
        "Properties": {
            "InstanceId": {
                "Ref": "Provisioner"
            }
        }
    },
    "Provisioner": {
      "Type": "AWS::EC2::Instance",
@@ -273,35 +327,43 @@
          "Ref": "PublicSubnet"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "admin"
          },
          {
            "Key": "AnsibleGroup",
            "Value": "provisioners"
          },
          {
            "Key": "Project",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "{{ project_tag }}",
            "Value": "provisioner"
          }
            {
                "Key": "Name",
                "Value": "admin"
            },
            {
                "Key": "AnsibleGroup",
                "Value": "provisioners,bastions"
            },
            {
                "Key": "Project",
                "Value": "{{project_tag}}"
            },
            {
                "Key": "{{ project_tag }}",
                "Value": "provisioner"
            },
            {
                "Key": "internaldns",
                "Value": "{{provisioner_public_dns}}"
            }
        ],
        "BlockDeviceMappings": [
          {
            "DeviceName": "/dev/xvda",
            "DeviceName": "/dev/sda1",
            "Ebs": {
              "VolumeSize": 30
              "VolumeSize": 100
            }
          },
          {
            "DeviceName": "/dev/xvdb",
            "Ebs": {
              "VolumeType": "gp2",
              "VolumeSize": 300
                "VolumeType": "st1",
                "VolumeSize": 500
{%- if srv_snapshot_id is defined %}
,
                "SnapshotId": "{{ srv_snapshot_id }}"
{% endif %}
            }
          }
        ]
ansible/configs/ansible-provisioner/files/repos.conf.j2
New file
@@ -0,0 +1,24 @@
<VirtualHost *:80>
  ServerName {{ provisioner_public_dns }}
  Alias /repos /srv/repos
<Directory "/srv/repos">
    AllowOverride None
    # Allow open access:
    Require all granted
    Options Indexes FollowSymLinks
</Directory>
</VirtualHost>
<VirtualHost *:443>
  ServerName {{ provisioner_public_dns }}
  Alias /repos /srv/repos
  SSLEngine on
  SSLCertificateFile {{ httpd_ssl_cert }}
  SSLCertificateKeyFile {{ httpd_ssl_key }}
  <Directory "/srv/repos">
    AllowOverride None
    # Allow open access:
    Require all granted
    Options Indexes FollowSymLinks
  </Directory>
</VirtualHost>
ansible/configs/ansible-provisioner/post_software.yml
@@ -91,6 +91,22 @@
      with_items: "{{ mgr_users }}"
      no_log: yes
    - name: Insert awscli credentials (list version)
      blockinfile:
        dest: "{{ item.home }}/.aws/credentials"
        owner: "{{ item.name }}"
        group: "{{ item.name }}"
        content: |
          [{{item.name}}]
          aws_access_key_id = {{ item.aws_access_key_id }}
          aws_secret_access_key = {{ item.aws_secret_access_key }}
      when:
        - item.aws_credentials is defined
        - item.aws_credentials.aws_access_key_id is defined
        - item.aws_credentials.aws_secret_access_key is defined
      with_items: "{{ mgr_users }}"
      no_log: yes
    - name: Copy boto.cfg file to /etc
      copy:
        src: "{{ ANSIBLE_REPO_PATH }}/inventory/boto.cfg"
@@ -105,40 +121,103 @@
      yum:
        name: lvm2
    - lvg:
        vg: "{{ storage_vg }}"
        pvs: "{{ storage_pvs }}"
      ignore_errors: true
    # if we created the volume from a snapshot, do not run lvm commands
    - when: srv_snapshot_id is not defined
      block:
      - lvg:
          vg: "{{ storage_vg }}"
          pvs: "{{ storage_pvs }}"
        ignore_errors: true
    - lvol:
        vg: "{{ storage_vg }}"
        lv: storagelv
        size: 100%FREE
      ignore_errors: true
      - lvol:
          vg: "{{ storage_vg }}"
          lv: storagelv
          size: 100%FREE
        ignore_errors: true
    - filesystem:
        fstype: ext4
        dev: "/dev/{{ storage_vg }}/storagelv"
      ignore_errors: true
      - filesystem:
          fstype: ext4
          dev: "/dev/{{ storage_vg }}/storagelv"
        ignore_errors: true
    - file:
        path: "{{ storage_mount_path }}"
        state: directory
        mode: 0777
      - file:
          path: "{{ storage_mount_path }}"
          state: directory
          mode: 0777
    - name: Activate LVM Volume Group
      command: vgchange -ay
      when: srv_snapshot_id is defined
    - selinux:
        policy: targeted
        state: permissive
    - lineinfile:
        dest: /etc/fstab
        insertafter: EOF
        line: "/dev/{{ storage_vg }}/storagelv {{ storage_mount_path}} ext4 defaults 0 0"
        state: present
      ignore_errors: true
    - file:
        path: "{{ storage_mount_path }}"
        state: directory
    - shell: "mkdir -p {{ storage_mount_path }} &&  mount {{ storage_mount_path }}"
      ignore_errors: true
    - name: Mount and create fstab
      mount:
        path: "{{ storage_mount_path }}"
        src: "/dev/{{ storage_vg }}/storagelv"
        fstype: ext4
        state: mounted
    - name: Try to install mosh (we love it)
      become: true
      yum:
        name: http://dl.fedoraproject.org/pub/epel/7/x86_64/Packages/m/mosh-1.3.0-1.el7.x86_64.rpm
      ignore_errors: yes
    - copy:
        dest: /etc/yum.repos.d/azure-cli.repo
        content: |
          [azure-cli]
          name=Azure CLI
          baseurl=https://packages.microsoft.com/yumrepos/azure-cli
          enabled=1
          gpgcheck=1
          gpgkey=https://packages.microsoft.com/keys/microsoft.asc
    - name: Install azure-cli
      yum:
        name: azure-cli
    - name: install python azure module
      pip:
        name: ansible[azure]
    - name: install httpd
      yum:
        name: "{{ item }}"
      with_items:
        - httpd
        - mod_ssl
      tags: httpd
    - name: Generate a Self Signed OpenSSL certificate
      command: >-
        openssl req -x509 -nodes -days 3650
        -newkey rsa:2048 -keyout {{ httpd_ssl_key }}
        -out {{ httpd_ssl_cert }}
        -subj '/CN={{provisioner_public_dns}}/O=Red Hat/C=US'
      args:
        creates: /etc/pki/tls/certs/admin.pem
      tags: httpd
    - name: add httpd repos.conf
      template:
        src: files/repos.conf.j2
        dest: /etc/httpd/conf.d/repos.conf
      tags: httpd
    - name: Enable and start httpd
      service:
        name: httpd
        enabled: yes
        state: started
      tags: httpd
- name: Zabbix
  hosts: provisioners
ansible/configs/archive/ocp-implementation-lab-2/How.To.Create.Env.Type.adoc
ansible/configs/archive/ocp-implementation-lab-2/README.adoc
New file
@@ -0,0 +1,85 @@
= OPENTLC OCP-IMPLEMENTATION-LAB Env_Type config
For example, we will include things such as ec2 instance names, secret
variables such as private/public key pair information, passwords, etc.
Eventually, all sensitive information will be encrypted via Ansible Vault. The
inclusion as well as instructions on doing this will be included in a later
release.
== Set up your "Secret" variables
* You need to provide some credentials for deployments to work
* Create a file called "env_secret_vars.yml" and put it in the
 ./ansible/configs/CONFIGNAME/ directory.
** At this point this file has to be created even if no vars from it are used.
* You can choose to provide these values as extra vars (-e "var=value") in the
 command line if you prefer not to keep sensitive information in a file.
* In the future we will use ansible vault for this.
.Example contents of "Secret" Vars file
----
# ## Logon credentials for Red Hat Network
# ## Required if using the subscription component
# ## of this playbook.
rhel_subscription_user: ''
rhel_subscription_pass: ''
#
# ## LDAP Bind Password
bindPassword: ''
#
# ## Desired openshift admin name and password
admin_user: ""
admin_user_password: ""
#
# ## AWS Credentials. This is required.
aws_access_key_id: ""
aws_secret_access_key: ""
----
== Review the Env_Type variable file
* This file link:./env_vars.yml[./env_vars.yml] contains all the variables you
 need to define to control the deployment of your environment.
== Running Ansible Playbook
. You can run the playbook with the following arguments to overwrite the default variable values:
[source,bash]
----
# Set the your environment variables (this is optional, but makes life easy)
REGION=us-east-1
KEYNAME=ocpkey
GUID=testimp35
ENVTYPE="ocp-implementation-lab"
CLOUDPROVIDER=ec2
HOSTZONEID='Z3IHLWJZOU9SRT'
REPO_PATH='https://admin.example.com/repos/ocp/3.5/'
BASESUFFIX='.example.opentlc.com'
REPO_VERSION=3.5
NODE_COUNT=2
IPAPASS=ipapass
## For a HA environment that is not installed with OpenShift
time ansible-playbook ./main.yml \
    -e "osrelease=3.5.5.5" -e "repo_version=${REPO_VERSION}" -e "docker_version=1.12.6" \
    -e "guid=${GUID}" -e "env_type=${ENVTYPE}" -e "cloud_provider=${CLOUDPROVIDER}" \
    -e "aws_region=${REGION}"  -e "HostedZoneId=${HOSTZONEID}" -e "key_name=${KEYNAME}" \
    -e "subdomain_base_suffix=${BASESUFFIX}"  -e "install_idm=htpasswd" \
    -e "node_instance_count=${NODE_COUNT}" -e "infranode_instance_count=1" -e "master_instance_count=1" \
    -e "software_to_deploy=none"  -e "own_repo_path=${REPO_PATH}" -e "ipa_host_password=${IPAPASS}" \
    -e "tower_run=false"
. To Delete an environment
----
#To Destroy an Env
ansible-playbook ./configs/${ENVTYPE}/destroy_env.yml \
 -e "guid=${GUID}" -e "env_type=${ENVTYPE}"  -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}"  \
 -e "HostedZoneId=${HOSTZONEID}"  -e "key_name=${KEYNAME}"  -e "subdomain_base_suffix=${BASESUFFIX}"
----
ansible/configs/archive/ocp-implementation-lab-2/destroy_env.yml
New file
@@ -0,0 +1,56 @@
- name: Starting environment deployment
  hosts: localhost
  connection: local
  gather_facts: False
  become: no
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tasks:
    # - name: get internal dns zone id if not provided
    #   environment:
    #     AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
    #     AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
    #     AWS_DEFAULT_REGION: "{{aws_region}}"
    #   shell: "aws route53 list-hosted-zones-by-name --region={{aws_region}} --dns-name={{guid}}.internal. --output text --query='HostedZones[*].Id' | awk -F'/' '{print $3}'"
    #   register: internal_zone_id_register
    # - debug:
    #     var: internal_zone_id_register
    # - name: Store internal route53 ID
    #   set_fact:
    #     internal_zone_id: "{{ internal_zone_id_register.stdout }}"
    #   when: 'internal_zone_id_register is defined'
    # - name: delete internal dns names
    #   environment:
    #     AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
    #     AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
    #     AWS_DEFAULT_REGION: "{{aws_region}}"
    #   shell: "aws route53 change-resource-record-sets --hosted-zone-id {{internal_zone_id}}  --change-batch file://{{ ANSIBLE_REPO_PATH }}/workdir/internal_dns-{{ env_type }}-{{ guid }}_DELETE.json --region={{aws_region}}"
    #   ignore_errors: true
    #   tags:
    #     - internal_dns_delete
    #   when: internal_zone_id is defined
    - name: Destroy cloudformation template
      cloudformation:
        stack_name: "{{project_tag}}"
        state: "absent"
        region: "{{aws_region}}"
        disable_rollback: false
        tags:
          Stack: "project {{env_type}}-{{ guid }}"
      tags: [ destroying, destroy_cf_deployment ]
      register: cloudformation_result
      until: cloudformation_result|succeeded
      retries: 5
      delay: 60
      ignore_errors: yes
    - name: report Cloudformation error
      fail:
        msg: "FAIL {{ project_tag }} Destroy Cloudformation"
      when: not cloudformation_result|succeeded
      tags: [ destroying, destroy_cf_deployment ]
## we need to add something to delete the env specific key.
ansible/configs/archive/ocp-implementation-lab-2/env_vars.yml
New file
@@ -0,0 +1,210 @@
## TODO: What variables can we strip out of here to build complex variables?
## i.e. what can we add into group_vars as opposed to config_vars?
## Example: We don't really need "subdomain_base_short". If we want to use this,
## should just toss in group_vars/all.
### Also, we should probably just create a variable reference in the README.md
### For now, just tagging comments in line with configuration file.
deploy_local_ssh_config_location: "{{ ANSIBLE_REPO_PATH }}/workdir"
###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT
###### OR PASS as "-e" args to ansible-playbook command
# #
# # env_groups:
# #   limit: "tag_Project_opentlc_shared_{{guid}}"
# #   bastions: "tag_AnsibleGroup_bastions"
# #   masters: "tag_AnsibleGroup_masters"
# #   nodes: "tag_AnsibleGroup_nodes"
# #   infranodes: "tag_AnsibleGroup_infranodes"
# #   nfs: "tag_AnsibleGroup_nfs"
#
# # This doesn't work
# all: "tag_Project_opentlc_shared_{{guid}}"
#
# # but maybe this is silly enough to work
# #all: "tag_Project_opentlc_shared_{{guid}}:&tag_Project_opentlc_shared_{{guid}}"
#rhn_pool_id_string: OpenShift Container Platform
# bastions: "{{env_groups['limit']}}:&{{env_groups['bastions']}}"
# masters: "{{env_groups['limit']}}:&{{env_groups['masters']}}"
# nodes: "{{env_groups['limit']}}:&{{env_groups['nodes']}}"
# infranodes: "{{env_groups['limit']}}:&{{env_groups['infranodes']}}"
# nfs: "{{env_groups['limit']}}:&{{env_groups['nfs']}}"
#
# ocp_pvs:
#   - es-storage
#   - nexus
#   - nexus2
#   - nexus3
config_nfs_uservols: "true"
user_vols: 200
user_vols_size: 4Gi
# master_api_port: 443
# osrelease: 3.4.1.10
# openshift_master_overwrite_named_certificates: true
# deploy_openshift: true
# deploy_openshift_post: true
deploy_env_post: true
# install_metrics: true
# install_logging: true
# multi_tenant_setting: "os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'"
# master_lb_dns: "master.{{subdomain_base}}"
# cloudapps_suffix: 'cloudapps.{{subdomain_base}}'
# openshift_master_ldap_ca_file: 'openshift_master_ldap_ca_file=/root/ca.crt'
################################################################################
#### GENERIC EXAMPLE
################################################################################
install_common: true
install_nfs: true
install_bastion: false
env_authorized_key: "{{guid}}key"
set_env_authorized_key: true
software_to_deploy: "none"
################################################################################
#### OCP IMPLEMENATATION LAB
################################################################################
repo_version: '3.4'
cloudapps_dns: '*.apps.{{subdomain_base}}.'
master_public_dns: "master.{{subdomain_base}}."
################################################################################
#### Common host variables
################################################################################
update_packages: false
common_packages:
  # - python
  # - unzip
  # - bash-completion
  - tmux
  # - bind-utils
  # - wget
  # - git
  # - vim-enhanced
  # - ansible
rhel_repos:
  - rhel-7-server-rpms
  - rhel-7-server-extras-rpms
  - rhel-7-server-ose-{{repo_version}}-rpms
use_own_repos: true
use_subscription_manager: false
#rhn_pool_id_string: "Red Hat Enterprise Linux Server"
#rhn_pool_id_string: OpenShift Container Platform
################################################################################
#### nfs host settings
################################################################################
nfs_vg: nfsvg
nfs_pvs: /dev/xvdb
nfs_export_path: /srv/nfs
nfs_shares:
  - logging
  - metrics
  - jenkins
  - nexus
  - justanother
################################################################################
#### CLOUD PROVIDER: AWS SPECIFIC VARIABLES
################################################################################
#### Route 53 Zone ID (AWS)
HostedZoneId: ''
key_name: ''
aws_region: us-east-1
admin_user: ''
admin_user_password: ''
#### Connection Settings
ansible_ssh_user: ec2-user
remote_user: ec2-user
#### Networking (AWS)
guid: defaultguid
subdomain_base_short: "{{ guid }}"
subdomain_base_suffix: ".example.opentlc.com"
subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}"
tower_run: false
#### Environment Sizing
#bastion_instance_type: "t2.micro"
bastion_instance_type: "t2.small"
support_instance_type: "m4.large"
support_instance_count: 1
node_instance_type: "t2.large"
node_instance_count: 2
infranode_instance_type: "t2.large"
infranode_instance_count: 1
master_instance_type: "t2.large"
master_instance_count: 1
loadbalancer_instance_count: 0
#loadbalancer_instance_type: "t2.micro"
loadbalancer_instance_type: "t2.small"
ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
#### VARIABLES YOU SHOULD ***NOT*** CONFIGURE FOR YOUR DEPLOYEMNT
#### You can, but you usually wouldn't need to.
#### CLOUDFORMATIONS vars
project_tag: "{{ env_type }}-{{ guid }}"
#
# docker_version: "1.12.6"
# docker_device: /dev/xvdb
create_internal_dns_entries: true
zone_internal_dns: "{{guid}}.internal."
chomped_zone_internal_dns: "{{guid}}.internal"
zone_public_dns: "{{subdomain_base}}."
bastion_public_dns: "bastion.{{subdomain_base}}."
bastion_public_dns_chomped: "bastion.{{subdomain_base}}"
vpcid_cidr_block: "192.168.0.0/16"
vpcid_name_tag: "{{subdomain_base}}"
az_1_name: "{{ aws_region }}a"
az_2_name: "{{ aws_region }}b"
subnet_private_1_cidr_block: "192.168.2.0/24"
subnet_private_1_az: "{{ az_2_name }}"
subnet_private_1_name_tag: "{{subdomain_base}}-private"
subnet_private_2_cidr_block: "192.168.1.0/24"
subnet_private_2_az: "{{ az_1_name }}"
subnet_private_2_name_tag: "{{subdomain_base}}-private"
subnet_public_1_cidr_block: "192.168.10.0/24"
subnet_public_1_az: "{{ az_1_name }}"
subnet_public_1_name_tag: "{{subdomain_base}}-public"
subnet_public_2_cidr_block: "192.168.20.0/24"
subnet_public_2_az: "{{ az_2_name }}"
subnet_public_2_name_tag: "{{subdomain_base}}-public"
dopt_domain_name: "{{ aws_region }}.compute.internal"
rtb_public_name_tag: "{{subdomain_base}}-public"
rtb_private_name_tag: "{{subdomain_base}}-private"
cf_template_description: "{{ env_type }}-{{ guid }} template"
ansible/configs/archive/ocp-implementation-lab-2/files/cloud_providers/ec2_cloud_template.j2
New file
@@ -0,0 +1,726 @@
{
  "AWSTemplateFormatVersion": "2010-09-09",
  "Parameters": { },
  "Mappings": {
    "RegionMapping": {
      "us-east-1": {
        "AMI": "ami-c998b6b2"
      },
      "us-east-2": {
        "AMI": "ami-cfdafaaa"
      },
      "us-west-1": {
        "AMI": "ami-66eec506"
      },
      "us-west-2": {
        "AMI": "ami-9fa343e7"
      },
      "eu-west-1": {
        "AMI": "ami-bb9a6bc2"
      },
      "eu-central-1": {
        "AMI": "ami-d74be5b8"
      },
      "ap-northeast-1": {
        "AMI": "ami-30ef0556"
      },
      "ap-northeast-2": {
        "AMI": "ami-0f5a8361"
      },
      "ap-southeast-1": {
        "AMI": "ami-10bb2373"
      },
      "ap-southeast-2": {
        "AMI": "ami-ccecf5af"
      },
      "sa-east-1": {
        "AMI": "ami-a789ffcb"
      },
      "ap-south-1": {
        "AMI": "ami-cdbdd7a2"
      }
    },
    "DNSMapping": {
      "us-east-1": {
        "domain": "us-east-1.compute.internal"
      },
      "us-west-1": {
        "domain": "us-west-1.compute.internal"
      },
      "us-west-2": {
        "domain": "us-west-2.compute.internal"
      },
      "eu-west-1": {
        "domain": "eu-west-1.compute.internal"
      },
      "eu-central-1": {
        "domain": "eu-central-1.compute.internal"
      },
      "ap-northeast-1": {
        "domain": "ap-northeast-1.compute.internal"
      },
      "ap-northeast-2": {
        "domain": "ap-northeast-2.compute.internal"
      },
      "ap-southeast-1": {
        "domain": "ap-southeast-1.compute.internal"
      },
      "ap-southeast-2": {
        "domain": "ap-southeast-2.compute.internal"
      },
      "sa-east-1": {
        "domain": "sa-east-1.compute.internal"
      },
      "ap-south-1": {
        "domain": "ap-south-1.compute.internal"
      }
    }
  },
  "Resources": {
    "Vpc": {
      "Type": "AWS::EC2::VPC",
      "Properties": {
        "CidrBlock": "192.199.0.0/16",
        "EnableDnsSupport": "true",
        "EnableDnsHostnames": "true",
        "Tags": [
          {
            "Key": "Name",
            "Value": "VPCID_NAME_TAG"
          },
          {
            "Key": "Hostlication",
            "Value": {
              "Ref": "AWS::StackId"
            }
          }
        ]
      }
    },
    "VpcInternetGateway": {
      "Type": "AWS::EC2::InternetGateway",
      "Properties": {}
    },
    "VpcGA": {
      "Type": "AWS::EC2::VPCGatewayAttachment",
      "Properties": {
        "InternetGatewayId": {
          "Ref": "VpcInternetGateway"
        },
        "VpcId": {
          "Ref": "Vpc"
        }
      }
    },
    "VpcRouteTable": {
      "Type": "AWS::EC2::RouteTable",
      "Properties": {
        "VpcId": {
          "Ref": "Vpc"
        }
      }
    },
    "VPCRouteInternetGateway": {
      "DependsOn" : "VpcGA",
  "Type": "AWS::EC2::Route",
      "Properties": {
        "GatewayId": {
          "Ref": "VpcInternetGateway"
        },
        "DestinationCidrBlock": "0.0.0.0/0",
        "RouteTableId": {
          "Ref": "VpcRouteTable"
        }
      }
    },
    "PublicSubnet": {
      "Type": "AWS::EC2::Subnet",
      "DependsOn": [
        "Vpc"
      ],
      "Properties": {
        "CidrBlock": "192.199.0.0/24",
        "Tags": [
          {
            "Key": "Name",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "Hostlication",
            "Value": {
              "Ref": "AWS::StackId"
            }
          }
        ],
        "MapPublicIpOnLaunch": "true",
        "VpcId": {
          "Ref": "Vpc"
        }
      }
    },
    "PublicSubnetRTA": {
      "Type": "AWS::EC2::SubnetRouteTableAssociation",
      "Properties": {
        "RouteTableId": {
          "Ref": "VpcRouteTable"
        },
        "SubnetId": {
          "Ref": "PublicSubnet"
        }
      }
    },
    "HostSG": {
      "Type": "AWS::EC2::SecurityGroup",
      "Properties": {
        "GroupDescription": "Host",
        "VpcId": {
          "Ref": "Vpc"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "host_sg"
          }
        ]
      }
    },
    "HostUDPPorts": {
      "Type": "AWS::EC2::SecurityGroupIngress",
      "Properties": {
        "GroupId": {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        },
        "IpProtocol": "udp",
        "FromPort": "0",
        "ToPort": "65535",
        "CidrIp": "0.0.0.0/0"
      }
    },
    "HostTCPPorts": {
      "Type": "AWS::EC2::SecurityGroupIngress",
      "Properties": {
        "GroupId": {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        },
        "IpProtocol": "tcp",
        "FromPort": "0",
        "ToPort": "65535",
        "CidrIp": "0.0.0.0/0"
      }
    },
    "zoneinternalidns": {
      "Type": "AWS::Route53::HostedZone",
      "Properties": {
        "Name": "{{ zone_internal_dns }}",
        "VPCs" :  [{
      "VPCId": { "Ref" : "Vpc" },
      "VPCRegion": { "Ref": "AWS::Region" } } ],
        "HostedZoneConfig": {
          "Comment": "Created By ansible agnostic deployer"
        }
      }
    },
    "BastionDNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "Properties": {
        "HostedZoneId": "{{HostedZoneId}}",
        "RecordSets": [
          {
            "Name": "{{bastion_public_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "Bastion",
                  "PublicIp"
                ]
              }
            ]
          }
        ]
      }
    },
    "MasterDNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "Properties": {
        "HostedZoneId": "{{HostedZoneId}}",
        "RecordSets": [
          {
            "Name": "{{master_public_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "master1",
                  "PublicIp"
                ]
              }
            ]
          }
        ]
      }
    },
    "CloudDNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "DependsOn": "Bastion",
      "Properties": {
        "HostedZoneId": "{{HostedZoneId}}",
        "RecordSets": [
          {
            "Name": "{{cloudapps_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "Bastion",
                  "PublicIp"
                ]
              }
            ]
          }
        ]
      }
    },
    "Bastion": {
      "Type": "AWS::EC2::Instance",
      "Properties": {
        "ImageId": {
          "Fn::FindInMap": [
            "RegionMapping",
            {
              "Ref": "AWS::Region"
            },
            "AMI"
          ]
        },
        "InstanceType": "{{bastion_instance_type}}",
        "KeyName": "{{key_name}}",
        "SecurityGroupIds": [
          {
            "Fn::GetAtt": [
              "HostSG",
              "GroupId"
            ]
          }
        ],
        "SubnetId": {
          "Ref": "PublicSubnet"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "bastion"
          },
          {
            "Key": "AnsibleGroup",
            "Value": "bastions"
          },
          {
            "Key": "Project",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "{{ project_tag }}",
            "Value": "bastion"
          }
        ]
      }
  },
  "BastionInternalDNS": {
    "Type": "AWS::Route53::RecordSetGroup",
    "Properties": {
    "HostedZoneId" : { "Ref" : "zoneinternalidns" },
      "RecordSets": [
        {
          "Name": "bastion.{{zone_internal_dns}}",
          "Type": "A",
          "TTL": "10",
          "ResourceRecords": [
            {
              "Fn::GetAtt": [
                "Bastion",
                "PrivateIp"
              ]
            }
          ]
        }
      ]
    }
  },
  {% for c in range(1,(master_instance_count|int)+1) %}
    "master{{c}}": {
      "Type": "AWS::EC2::Instance",
      "Properties": {
        "ImageId": {
          "Fn::FindInMap": [
            "RegionMapping",
            {
              "Ref": "AWS::Region"
            },
            "AMI"
          ]
        },
        "InstanceType": "{{master_instance_type}}",
        "KeyName": "{{key_name}}",
        "SecurityGroupIds": [
          {
            "Fn::GetAtt": [
              "HostSG",
              "GroupId"
            ]
          }
        ],
        "SubnetId": {
          "Ref": "PublicSubnet"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "master"
          },
          {
            "Key": "AnsibleGroup",
            "Value": "masters"
          },
          {
            "Key": "Project",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "{{ project_tag }}",
            "Value": "master"
          }
        ],
        "BlockDeviceMappings": [
          {
            "DeviceName": "/dev/xvda",
            "Ebs": {
              "VolumeSize": 30
            }
          },
          {
            "DeviceName": "/dev/xvdb",
            "Ebs": {
              "VolumeType": "gp2",
              "VolumeSize": 20
            }
          }
        ]
      }
  },
  "master{{loop.index}}DNS": {
    "Type": "AWS::Route53::RecordSetGroup",
    "Properties": {
    "HostedZoneId" : { "Ref" : "zoneinternalidns" },
      "RecordSets": [
        {
          "Name": "master{{loop.index}}.{{zone_internal_dns}}",
          "Type": "A",
          "TTL": "10",
          "ResourceRecords": [
            {
              "Fn::GetAtt": [
                "master{{loop.index}}",
                "PrivateIp"
              ]
            }
          ]
        }
      ]
    }
  },
  {% endfor %}
  {% for c in range(1,(node_instance_count|int)+1) %}
  "node{{loop.index}}": {
    "Type": "AWS::EC2::Instance",
    "Properties": {
      "ImageId": {
        "Fn::FindInMap": [
          "RegionMapping",
          {
            "Ref": "AWS::Region"
          },
          "AMI"
        ]
      },
      "InstanceType": "{{node_instance_type}}",
      "KeyName": "{{key_name}}",
      "SecurityGroupIds": [
        {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        }
      ],
      "SubnetId": {
        "Ref": "PublicSubnet"
      },
      "Tags": [
        {
          "Key": "Name",
          "Value": "node"
        },
        {
          "Key": "AnsibleGroup",
          "Value": "nodes"
        },
        {
          "Key": "Project",
          "Value": "{{project_tag}}"
        },
        {
          "Key": "{{ project_tag }}",
          "Value": "node"
        }
      ],
      "BlockDeviceMappings": [
        {
          "DeviceName": "/dev/xvda",
          "Ebs": {
            "VolumeSize": 30
          }
        },
        {
          "DeviceName": "/dev/xvdb",
          "Ebs": {
            "VolumeType": "gp2",
            "VolumeSize": 100
          }
        }
      ]
    }
  },
  "node{{loop.index}}DNS": {
    "Type": "AWS::Route53::RecordSetGroup",
    "Properties": {
    "HostedZoneId" : { "Ref" : "zoneinternalidns" },
      "RecordSets": [
        {
          "Name": "node{{loop.index}}.{{zone_internal_dns}}",
          "Type": "A",
          "TTL": "10",
          "ResourceRecords": [
            {
              "Fn::GetAtt": [
                "node{{loop.index}}",
                "PrivateIp"
              ]
            }
          ]
        }
      ]
    }
  },
  {% endfor %}
  {% for c in range(1,(infranode_instance_count|int)+1) %}
  "infranode{{loop.index}}": {
    "Type": "AWS::EC2::Instance",
    "Properties": {
      "ImageId": {
        "Fn::FindInMap": [
          "RegionMapping",
          {
            "Ref": "AWS::Region"
          },
          "AMI"
        ]
      },
      "InstanceType": "{{infranode_instance_type}}",
      "KeyName": "{{key_name}}",
      "SecurityGroupIds": [
        {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        }
      ],
      "SubnetId": {
        "Ref": "PublicSubnet"
      },
      "Tags": [
        {
          "Key": "Name",
          "Value": "infranode"
        },
        {
          "Key": "AnsibleGroup",
          "Value": "infranodes"
        },
        {
          "Key": "Project",
          "Value": "{{project_tag}}"
        },
        {
          "Key": "{{ project_tag }}",
          "Value": "infranode"
        }
      ],
      "BlockDeviceMappings": [
        {
          "DeviceName": "/dev/xvda",
          "Ebs": {
            "VolumeSize": 30
          }
        },
        {
          "DeviceName": "/dev/xvdb",
          "Ebs": {
            "VolumeType": "gp2",
            "VolumeSize": 50
          }
        }
      ]
    }
  },
  "infranode{{loop.index}}DNS": {
    "Type": "AWS::Route53::RecordSetGroup",
    "Properties": {
    "HostedZoneId" : { "Ref" : "zoneinternalidns" },
      "RecordSets": [
        {
          "Name": "infranode{{loop.index}}.{{zone_internal_dns}}",
          "Type": "A",
          "TTL": "10",
          "ResourceRecords": [
            {
              "Fn::GetAtt": [
                "infranode{{loop.index}}",
                "PrivateIp"
              ]
            }
          ]
        }
      ]
    }
  },
  {% endfor %}
  {% for c in range(1,(support_instance_count|int)+1) %}
  "support{{loop.index}}": {
    "Type": "AWS::EC2::Instance",
    "Properties": {
      "ImageId": {
        "Fn::FindInMap": [
          "RegionMapping",
          {
            "Ref": "AWS::Region"
          },
          "AMI"
        ]
      },
      "InstanceType": "{{support_instance_type}}",
      "KeyName": "{{key_name}}",
      "SecurityGroupIds": [
        {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        }
      ],
      "SubnetId": {
        "Ref": "PublicSubnet"
      },
      "Tags": [
        {
          "Key": "Name",
          "Value": "support"
        },
        {
          "Key": "AnsibleGroup",
          "Value": "support"
        },
        {
          "Key": "Project",
          "Value": "{{project_tag}}"
        },
        {
          "Key": "{{ project_tag }}",
          "Value": "support"
        }
      ],
      "BlockDeviceMappings": [
        {
          "DeviceName": "/dev/xvda",
          "Ebs": {
            "VolumeSize": 30
          }
        },
        {
          "DeviceName": "/dev/xvdb",
          "Ebs": {
            "VolumeType": "gp2",
            "VolumeSize": 50
          }
        }
      ]
    }
  },
  "support{{loop.index}}DNS": {
    "Type": "AWS::Route53::RecordSetGroup",
    "Properties": {
    "HostedZoneId" : { "Ref" : "zoneinternalidns" },
      "RecordSets": [
        {
          "Name": "support{{loop.index}}.{{zone_internal_dns}}",
          "Type": "A",
          "TTL": "10",
          "ResourceRecords": [
            {
              "Fn::GetAtt": [
                "support{{loop.index}}",
                "PrivateIp"
              ]
            }
          ]
        }
      ]
    }
  },
{% endfor %}
},
  "Outputs": {
    "Route53internalzoneOutput": {
      "Description": "The ID of the internal route 53 zone",
      "Value": {
        "Ref": "zoneinternalidns"
      }
  }
}
}
ansible/configs/archive/ocp-implementation-lab-2/files/ec2_internal_dns.json.j2
New file
@@ -0,0 +1,72 @@
{
  "Comment": "Create internal dns zone entries",
  "Changes": [
{% for host in groups['masters'] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "master{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['infranodes'] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "infranode{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['nodes'] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "node{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['support'] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "nfs{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['bastions'] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "bastion.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    }
{% endfor %}
  ]
}
ansible/configs/archive/ocp-implementation-lab-2/files/hosts_template.j2
New file
@@ -0,0 +1,203 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
###########################################################################
### OpenShift Basic Vars
###########################################################################
openshift_metrics_image_version=v{{ repo_version }}
#openshift_image_tag=v{{ repo_version }}
openshift_release={{ osrelease }}
#docker_version="{{docker_version}}"
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
# default project node selector
osm_default_node_selector='env=users'
###########################################################################
### OpenShift Optional Vars
###########################################################################
# Enable cockpit
osm_use_cockpit=true
osm_cockpit_plugins=['cockpit-kubernetes']
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname={{master_lb_dns}}
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
###########################################################################
### OpenShift Network Vars
###########################################################################
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
{{multi_tenant_setting}}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" %}
openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com', 'bindPassword': '{{bindPassword}}', 'ca': 'ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid'}]
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "allow_all"  %}
openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
{% endif %}
{% if install_idm == "htpasswd"  %}
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
# or
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
# Enable cluster metrics
openshift_hosted_metrics_deploy={{install_metrics}}
openshift_hosted_metrics_storage_kind=nfs
openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
openshift_hosted_metrics_storage_host=support1.{{guid}}.internal
openshift_hosted_metrics_storage_nfs_directory=/srv/nfs
openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_metrics_storage_volume_name=metrics
openshift_hosted_metrics_storage_volume_size=10Gi
# Enable cluster logging
openshift_hosted_logging_deploy={{install_logging}}
openshift_hosted_logging_storage_kind=nfs
openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
openshift_hosted_logging_storage_nfs_directory=/srv/nfs
openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_logging_storage_volume_name=logging
openshift_hosted_logging_storage_volume_size=10Gi
openshift_hosted_logging_hostname=kibana.{{cloudapps_suffix}}
openshift_hosted_logging_elasticsearch_cluster_size=1
openshift_hosted_logging_deployer_version=v{{repo_version}}
# This one is wrong (down arrow)
#openshift_hosted_logging_image_version=v{{repo_version}}
###########################################################################
### OpenShift Project Management Vars
###########################################################################
# Configure additional projects
openshift_additional_projects={'my-infra-project-test': {'default_node_selector': 'env=infra'}}
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
openshift_hosted_router_selector='env=infra'
openshift_hosted_router_replicas=1
openshift_hosted_registry_selector='env=infra'
openshift_hosted_registry_replicas=1
# Registry AWS S3
# S3 bucket must already exist.
openshift_hosted_registry_storage_kind=object
openshift_hosted_registry_storage_provider=s3
openshift_hosted_registry_storage_s3_accesskey={{ aws_access_key_id }}
openshift_hosted_registry_storage_s3_secretkey={{ aws_secret_access_key }}
openshift_hosted_registry_storage_s3_bucket={{ env_type }}-{{ guid }}
openshift_hosted_registry_storage_s3_region={{ aws_region }}
openshift_hosted_registry_storage_s3_chunksize=26214400
openshift_hosted_registry_storage_s3_rootdirectory=/registry
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=true
template_service_broker_install=true
template_service_broker_selector={"env":"infra"}
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=false
[OSEv3:children]
lb
masters
etcd
nodes
nfs
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host].placement}}
{% endfor %}
[masters]
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}}
{% endfor %}
[etcd]
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}} openshift_hostname=master{{loop.index}}.{{chomped_zone_internal_dns}}   ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes'] %}
infranode{{loop.index}}.{{chomped_zone_internal_dns}} openshift_hostname=infranode{{loop.index}}.{{chomped_zone_internal_dns}} openshift_ip={{hostvars[host]['ec2_private_ip_address']}} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra''}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes'] %}
node{{loop.index}}.{{chomped_zone_internal_dns}} openshift_hostname=node{{loop.index}}.{{chomped_zone_internal_dns}} openshift_ip={{hostvars[host]['ec2_private_ip_address']}} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users'}"
{% endfor %}
[nfs]
{% for host in groups['support'] %}
support{{loop.index}}.{{chomped_zone_internal_dns}} openshift_hostname=support{{loop.index}}.{{chomped_zone_internal_dns}} openshift_ip={{hostvars[host]['ec2_private_ip_address']}} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem
{% endfor %}
ansible/configs/archive/ocp-implementation-lab-2/files/htpasswd.openshift
New file
@@ -0,0 +1,102 @@
andrew:$apr1$dZPb2ECf$ercevOFO5znrynUfUj4tb/
karla:$apr1$FQx2mX4c$eJc21GuVZWNg1ULF8I2G31
user1:$apr1$FmrTsuSa$yducoDpvYq0KEV0ErmwpA1
user2:$apr1$JCcW2XQM$8takcyaYYrPT5I8M46TA01
user3:$apr1$zPC/rXKY$2PGF7dRsGwC3i8YJ59aOk0
user4:$apr1$e9/zT6dh$J18M.9zyn3DazrYreGV.B/
user5:$apr1$Nu/XJFVP$DgybymePret.Prch9MyxP/
user6:$apr1$VEbpwL9M$c1oFwS.emkt8fyR24zOzd0
user7:$apr1$wZxsnY/A$PK0O7iofGJJsvOZ3ctoNo.
user8:$apr1$5YBAWpGg$YO4ACHZL.c31NbQZH9LlE.
user9:$apr1$CIxB1enN$Aghb7.S4U3SXPRt55hTWI.
user10:$apr1$dWTDSR23$UGGJtkVC1ERmAOikomI9K0
user11:$apr1$j4fPyRZg$nNJk1nt1vAf54HAB/g/8g/
user12:$apr1$dd6kysUI$ueu/9.gbL0LkjpCbSjFNI.
user13:$apr1$DeRaAbVq$ZI3HtBzQxWYHifjIuPJSM1
user14:$apr1$dUuWDYgk$co6NQ4Dbcp3pQjVO5dR7Q.
user15:$apr1$4QmhSys7$wC.fKmKRqLNqoYqQ1dixJ/
user16:$apr1$RHcOPHg7$p9LgYP6zE4nMDlA8ongVc/
user17:$apr1$pji2xxHN$vvUHj/fbQRgLR.WBMblQH/
user18:$apr1$Lm79l0Qr$KgZSAuPcrTo4.GIWTBLGa/
user19:$apr1$KGxvneIX$.GJo7JB.N/c1FLW7vlblx/
user20:$apr1$WfYdosg5$cU1BsAzkIhTzKBx8Rvd3o1
user21:$apr1$cKRCbWLl$WCVjYUxD22GS5RRv1npwR1
user22:$apr1$QhpgOkFU$Y6Nn7NEPbJk3D9ehFb4i50
user23:$apr1$dVgQOh7j$L3JZlN8ZmdEwebXqD66Yl0
user24:$apr1$z/U5MAQB$GvKG3i8ATXWHhoxN9e0HS/
user25:$apr1$gFHGMQUV$w11pZbcBqVKOylr9TZ1EW.
user26:$apr1$5YG0dnOG$GzbnTQMBe0Dqc3f3pwvPL1
user27:$apr1$Kt6VoxNS$nq1Kzd53DUL8h8gfu4fEq/
user28:$apr1$aLAQHJ4d$qTRmUpw2eF9whEwDyIixG0
user29:$apr1$3HH4pgpa$Uh84gx3UP8vyPRfAIMPRl1
user30:$apr1$bbEEX3EF$ozw4jPcYHwVO7.MRzXtu0.
user31:$apr1$hD0kfz7i$SjNdGZbvto5EifBma5iA5.
user32:$apr1$fRMBUYu8$T5BQ8kI3pMgqXaRH7l8p..
user33:$apr1$es9ruteO$jZsV5/H8GIzw.vCfPs5310
user34:$apr1$OQ1I/gHn$.WA01EeXhDLE1K3vWD1wu.
user35:$apr1$KseEJXTS$kE/QO1XT0mZ44Iyw/ofnj/
user36:$apr1$PglCzG.g$44QsoAyMhanH5A40P5jhY1
user37:$apr1$2d5ggTIZ$xYsfdRBLOlEsnWRFVS9Yl0
user38:$apr1$x/cdV95V$mKFZmSkoBjeEu.HZshO0n.
user39:$apr1$VC6.WQOS$fAOAR1mx/i7Pnt2oGsDmu/
user40:$apr1$n36Hr3zC$lEVq4B7UWmdcnl01lUyR..
user41:$apr1$/q6tJtXi$9mCB1YCqdhEE6VVVVkVKc/
user42:$apr1$fTMTWEzw$X4MsyNlWketRjQgqonwxn.
user43:$apr1$.VwoJu38$D4v4NKL1KPuRZdNeprBXS/
user44:$apr1$e0s48GLK$JMQ849MeckVX0wG2vE2s10
user45:$apr1$a9ucQ1sC$HEMij.WGEa1xIQ01HpyKh1
user46:$apr1$uwOs/4nv$TB2r3pOPJ2K0A./CimVUT1
user47:$apr1$jfTmW1k5$Fd2ebTUtFFl3CLZWfFmRR.
user48:$apr1$4/apB/zd$IxoWJ5pTRNGgbxx3Ayl/i0
user49:$apr1$nu75PZ0r$bPCMgDmlOAj.YbeFPHJHE.
user50:$apr1$c/R3wJ/g$GJ03siVj5tkNxrg4OaxhJ0
user51:$apr1$EdEX6Pyt$IdPQHmhZi8FEbJjREVbe1/
user52:$apr1$ZMfyTjjX$RFOrnKsSr5xXA7IXn7TkC/
user53:$apr1$GY.rOkJM$uMCqJmmorP5I1v.YHHz1Z/
user54:$apr1$1vuZq/U0$Aq0Kz3wk0YPleDz/rTCdK0
user55:$apr1$KjULqmcD$XrhyYt2nWuiaQkbciDIcN/
user56:$apr1$gTPaNeq0$sqWJDPZ5//ZDjLf0dSbUh1
user57:$apr1$6PaKhdlY$dX2FkVJ0xV.4MAQeDUgRT0
user58:$apr1$.8MSdEpY$MPIbUO2WnC0wsno8zUOjC.
user59:$apr1$TWpKuAvt$CFeTQxxSgeU3dFkL4qpXb.
user60:$apr1$fEYUgRVU$LO2qwXfpxwI9fDXPfQgQB0
user61:$apr1$HHUBEn4G$.cAnwbh.ogNEzQSug3nqo/
user62:$apr1$Agt4GmKT$4k3Ev3FSJiNsbht3vUbxQ/
user63:$apr1$FsUKA7Hw$nkSgqSIFeqCY1mOyGje3O1
user64:$apr1$vBlkQoG4$8L2mTo8gdr8wC68G2y2G91
user65:$apr1$McEnEqn4$dZvjACdGp0HALVHBtHEu80
user66:$apr1$zamuhlOG$Xch5pbO1ki2Dad1dzjS4j.
user67:$apr1$qC1rll4s$cN4DzsWnyFBTNi3Cdi6161
user68:$apr1$txKPCx1k$WtrlrlP.UF.Rlzbnv6igE/
user69:$apr1$EO2A25Sj$DO/1lCNJJXff4GOsTZmHL/
user70:$apr1$pJu569Az$nHtF2ZkUrNXw9WN0Obb/T1
user71:$apr1$YKpEtZka$c59Fmov1cssRdrO5VqBKz1
user72:$apr1$CNkwam0s$b.QcPWytnhlOsaajMQx630
user73:$apr1$m5kE07o0$7TC3K.I16YTaRyN8EZq7E/
user74:$apr1$/5p0Qoyy$hjQ30Q8Ghb4zNrjjt2yLk/
user75:$apr1$ZF3yRTqJ$TgLBllrvTQuuiIjSb53xR0
user76:$apr1$711LL2Ai$59rBNmFprwZXtyFVBtRul0
user77:$apr1$N4uJhPSq$A.rVfAsRXCQqxOenDHjqX1
user78:$apr1$PHSpv5ty$WC8GlQpclQqH30eWPu.6e.
user79:$apr1$c/yk9dQ9$dvhh.P4F5zGnysBvwps4m/
user80:$apr1$oTmftf8R$FYzQD77hYfh9Wq3SvwYU7/
user81:$apr1$3YvQ/JPg$sDXhV8xpHNxQzFSvMMxAD1
user82:$apr1$quKB2P2.$iq.ZzDa3/xoaoY3.F1Un90
user83:$apr1$IVq8346H$lPQJZ7Thr/gJ2EmzDsktH0
user84:$apr1$xfehskAD$NRMQJttylejHtNKQqBj.k.
user85:$apr1$/LYLXNbH$/COZBzkaU0pPOXR38ZFVX/
user86:$apr1$a/xD3Jfw$rZXN4ykj0W6qadlh447n//
user87:$apr1$v01l1ljr$tGDKwdhKC05HEbntSxV5M0
user88:$apr1$9RYtWl12$ck19ozvS.SWeAAaDZqE940
user89:$apr1$EvSs2TA2$fRDg0hVOCf2jbhwXifzbs.
user90:$apr1$9ffAneiG$CAz5JWeIPGnamOQlVRGIk.
user91:$apr1$Z3XW5Yy4$Kibx7GmgdpC6CAM0IxhtC0
user92:$apr1$6CfIrBqr$5nGNCGA5QOPq/h8hlOE4f.
user93:$apr1$iJ4AQyfu$fkXSVib.OzPCSBQlLhwwS.
user94:$apr1$jiPqi0uI$XyYDQt0kcawqFLX12VW3n/
user95:$apr1$ULEkhfG2$/WHcoR9KJxAS3uw470Vkk.
user96:$apr1$56tQXa91$l0yaZgZHbDidgw95IP7yQ1
user97:$apr1$SoGwK9hP$YbceEfwmsM3QCdNGAaE1b.
user98:$apr1$MVU1/8dh$UKzkRk1CQP00SvnoPIm1..
user99:$apr1$v8vKZdHH$NC5xud.olhtdydHU9hav6.
user100:$apr1$glFN48wz$dR9w94PGiQL8qZXcXGd0L0
ansible/configs/archive/ocp-implementation-lab-2/files/labs_hosts_template.j2
New file
@@ -0,0 +1,57 @@
[OCPlabs:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
[OCPlabs:children]
lb
masters
etcd
nodes
nfs
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host].placement}}
{% endfor %}
[masters]
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% endfor %}
[etcd]
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% endfor %}
## These are infranodes
{% for host in groups['infranodes'] %}
infranode{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% endfor %}
## These are regular nodes
{% for host in groups['nodes'] %}
node{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% endfor %}
[nfs]
{% for host in groups['support'] %}
support{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% endfor %}
ansible/configs/archive/ocp-implementation-lab-2/files/pvs.j2
New file
@@ -0,0 +1,17 @@
---
{% for pv in pv_list %}
apiVersion: v1
kind: PersistentVolume
metadata:
  name: {{ pv }}
spec:
  capacity:
    storage: {{pv_size}}
  accessModes:
  - ReadWriteOnce
  nfs:
    path: {{ nfs_export_path }}/{{pv}}
    server: nfs1.{{guid}}.internal
  persistentVolumeReclaimPolicy: {{persistentVolumeReclaimPolicy}}
---
{% endfor %}
ansible/configs/archive/ocp-implementation-lab-2/files/repos_template.j2
New file
@@ -0,0 +1,36 @@
[rhel-7-server-rpms]
name=Red Hat Enterprise Linux 7
baseurl={{own_repo_path}}/rhel-7-server-rpms
enabled=1
gpgcheck=0
[rhel-7-server-rh-common-rpms]
name=Red Hat Enterprise Linux 7 Common
baseurl={{own_repo_path}}/rhel-7-server-rh-common-rpms
enabled=1
gpgcheck=0
[rhel-7-server-extras-rpms]
name=Red Hat Enterprise Linux 7 Extras
baseurl={{own_repo_path}}/rhel-7-server-extras-rpms
enabled=1
gpgcheck=0
[rhel-7-server-optional-rpms]
name=Red Hat Enterprise Linux 7 Optional
baseurl={{own_repo_path}}/rhel-7-server-optional-rpms
enabled=1
gpgcheck=0
[rhel-7-server-ose-{{repo_version}}-rpms]
name=Red Hat Enterprise Linux 7 OSE {{repo_version}}
baseurl={{own_repo_path}}/rhel-7-server-ose-{{repo_version}}-rpms
enabled=1
gpgcheck=0
## Required since OCP 3.5
[rhel-7-fast-datapath-rpms]
name=Red Hat Enterprise Linux Fast Datapath (RHEL 7 Server) (RPMs)
baseurl={{own_repo_path}}/rhel-7-fast-datapath-rpms
enabled=1
gpgcheck=0
ansible/configs/archive/ocp-implementation-lab-2/files/userpvs.j2
New file
@@ -0,0 +1,20 @@
---
{%  for pv in range(1,user_vols|int) %}
apiVersion: v1
kind: PersistentVolume
metadata:
  name: vol{{ pv }}
spec:
  capacity:
    storage: {{ pv_size }}
  accessModes:
  - ReadWriteOnce
{% if  pv % 2 == 0 %}
  - ReadWriteMany
{% endif %}
  nfs:
    path: {{ nfs_export_path }}/user-vols/vol{{pv}}
    server: nfs1.{{guid}}.internal
  persistentVolumeReclaimPolicy: {{ persistentVolumeReclaimPolicy }}
---
{% endfor %}
ansible/configs/archive/ocp-implementation-lab-2/post_infra.yml
New file
@@ -0,0 +1,32 @@
- name: Step 002 Post Infrastructure
  hosts: localhost
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tags:
    - step002
    - post_infrastructure
  tasks:
    - debug:
        msg: "Step 001 Post Infrastructure - There are no post_infrastructure tasks defined"
      when: "not {{ tower_run | default(false) }}"
    - name: Job Template to launch a Job Template with update on launch inventory set
      uri:
        url: "https://{{ ansible_tower_ip }}/api/v1/job_templates/{{ job_template_id }}/launch/"
        method: POST
        user: "{{tower_admin}}"
        password: "{{tower_admin_password}}"
        body:
          extra_vars:
            guid: "{{guid}}"
            ipa_host_password: "{{ipa_host_password}}"
        body_format: json
        validate_certs: False
        HEADER_Content-Type: "application/json"
        status_code: 200, 201
      when: "{{ tower_run | default(false) }}"
      tags:
        - tower_workaround
ansible/configs/archive/ocp-implementation-lab-2/post_software.yml
New file
@@ -0,0 +1,39 @@
#vim: set ft=ansible:
---
- name: Step 005 - Post Software deployment
  hosts: localhost
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tags:
    - step005
  tasks:
    - name: Overwrite ansible hosts file with lab hosts file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/labs_hosts_template.j2"
        dest: "{{ ANSIBLE_REPO_PATH }}/workdir/labs_hosts-{{ env_type }}-{{ guid }}"
- name: Step lab post software deployment
  hosts: bastions
  gather_facts: False
  become: yes
  tags:
    - opentlc_bastion_tasks
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Ensures /etc/ansible dir exists
      file: path=/etc/ansible state=directory
    - name: Copy over ansible hosts file
      copy:
        backup: yes
        src: "{{ ANSIBLE_REPO_PATH }}/workdir/labs_hosts-{{ env_type }}-{{ guid }}"
        dest: /etc/ansible/hosts
      tags:
        - overwrite_hosts_with_lab_hosts
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa"
      when: install_ipa_client|bool
ansible/configs/archive/ocp-implementation-lab-2/pre_infra.yml
New file
@@ -0,0 +1,13 @@
- name: Step 000 Pre Infrastructure
  hosts: localhost
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tags:
    - step000
    - pre_infrastructure
  tasks:
    - debug:
        msg: "Step 000 Pre Infrastructure - There are no pre_infrastructure tasks defined"
ansible/configs/archive/ocp-implementation-lab-2/pre_software.yml
New file
@@ -0,0 +1,62 @@
---
- name: Step 003 - Create env key
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - step003
    - generate_env_keys
  tasks:
    - name: Generate SSH keys
      shell: ssh-keygen -b 2048 -t rsa -f "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}" -q -N ""
      args:
        creates: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}"
      when: set_env_authorized_key
# Cloudformation template or equivalent should tag all hosts with Project:{{ env_type }}-{{ guid }}
- name: Configure all hosts with Repositories, Common Files and Set environment key
  hosts:
    - all:!windows
  become: true
  gather_facts: False
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - step004
    - common_tasks
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set-repositories", when: 'repo_method is defined' }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/common", when: 'install_common' }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set_env_authorized_key", when: 'set_env_authorized_key' }
- name: Configuring Bastion Hosts
  hosts: bastions
  become: true
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  roles:
    -  { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion", when: 'install_bastion' }
  tags:
    - step004
    - bastion_tasks
- name: PreSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - flight_check
  tasks:
    - debug:
        msg: "Pre-Software checks completed successfully"
ansible/configs/archive/ocp-implementation-lab-2/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/archive/ocp-implementation-lab/files/userpvs.j2
@@ -1,5 +1,5 @@
---
{%  for pv in range(1,user_vols) %}
{%  for pv in range(1,user_vols|int) %}
apiVersion: v1
kind: PersistentVolume
metadata:
ansible/configs/archive/opentlc-shared/files/userpvs.j2
@@ -1,5 +1,5 @@
---
{%  for pv in range(1,user_vols) %}
{%  for pv in range(1,user_vols|int) %}
apiVersion: v1
kind: PersistentVolume
metadata:
ansible/configs/bu-workshop/files/cloud_providers/ec2_cloud_template.j2
@@ -478,7 +478,8 @@
          },
          {
            "Key": "owner",
            "Value": "{{ email | default('unknown')}}"
            "Value": "{{ email | default('unknown')}}",
            "PropagateAtLaunch": true
          }
        ],
        "VPCZoneIdentifier": [
ansible/configs/lightbulb/README.adoc
@@ -1,7 +1,15 @@
= Ansible Lightbulb Config
Ansible Lightbulb is an example of an existing deployer being wrapped by Ansible
<<<<<<< HEAD
Agnostic Deployer. 
=======
<<<<<<< HEAD
Agnostic Deployer. Uses its own deployer to create AWS specfic infrastruture.
=======
Agnostic Deployer.
>>>>>>> 1a01233... Added lightbulb deployer
>>>>>>> 97e92d3972791ab798a7768c7de6cd2e814481d4
https://github.com/ansible/lightbulb
ansible/configs/lightbulb/env_vars.yml
@@ -47,7 +47,13 @@
#use_own_key: true
#env_authorized_key: "{{guid}}key"
#ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
<<<<<<< HEAD
env_authorized_key: fookey
=======
use_own_key: true
env_authorized_key: "{{guid}}key"
ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
>>>>>>> 97e92d3972791ab798a7768c7de6cd2e814481d4
set_env_authorized_key: true
ansible/configs/ocp-adv-deploy-hw/README.adoc
New file
@@ -0,0 +1,115 @@
= OPENTLC OCP-HA-LAB Env_Type config
For the Homework environment, we will NOT preserve the complete hosts file. We
will add the following "--skip-tags"
[source,text]
----
preserve_complete_ansible_inventory
openshift_nfs_config
----
CNS nodes are not deployed.  Not yet part of homeowkr.
For example, we will include things such as ec2 instance names, secret
variables such as private/public key pair information, passwords, etc.
Eventually, all sensitive information will be encrypted via Ansible Vault. The
inclusion as well as instructions on doing this will be included in a later
release.
== Set up your "Secret" variables
* You need to provide some credentials for deployments to work
* Create a file called "env_secret_vars.yml" and put it in the
 ./ansible/configs/CONFIGNAME/ directory.
** At this point this file *has to be created* even if no vars from it are used.
* You can choose to provide these values as extra vars (-e "var=value") in the
 command line if you prefer not to keep sensitive information in a file.
.Example contents of "Secret" Vars file
----
# ## Logon credentials for Red Hat Network
# ## Required if using the subscription component
# ## of this playbook.
rhel_subscription_user: ''
rhel_subscription_pass: ''
#
# ## LDAP Bind Password
bindPassword: ''
#
# ## Desired openshift admin name and password
admin_user: ""
admin_user_password: ""
#
# ## AWS Credentials. This is required.
aws_access_key_id: ""
aws_secret_access_key: ""
#If using repo_method: satellite, you must set these values as well.
# satellite_url: https://satellite.example.com
# satellite_org: Sat_org_name
# satellite_activationkey: "rhel7basic"
----
== Review the Env_Type variable file
* This file link:./env_vars.yml[./env_vars.yml] contains all the variables you
 need to define to control the deployment of your environment.
== Running Ansible Playbook
. You can run the playbook with the following arguments to overwrite the default variable values:
[source,bash]
----
# Set the your environment variables (this is optional, but makes life easy)
REGION=ap-southeast-1
KEYNAME=ocpkey
GUID=testnewec21
ENVTYPE="ocp-ha-lab"
CLOUDPROVIDER=ec2v2
HOSTZONEID='Z3IHLWJZOU9SRT'
REPO_PATH='https://admin.example.com/repos/ocp/3.6/'
BASESUFFIX='.example.opentlc.com'
IPAPASS=aaaaaa
REPO_VERSION=3.6
NODE_COUNT=2
DEPLOYER_REPO_PATH=`pwd`
LOG_FILE=$(pwd)/${ENVTYPE}-${GUID}.log
## For a HA environment that is not installed with OpenShift
  ansible-playbook ${DEPLOYER_REPO_PATH}/main.yml  \
      -e "guid=${GUID}" -e "env_type=${ENVTYPE}" -e "key_name=${KEYNAME}" \
      -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}" -e "HostedZoneId=${HOSTZONEID}" \
      -e "subdomain_base_suffix=${BASESUFFIX}" \
      -e "bastion_instance_type=t2.medium" -e "master_instance_type=t2.large" \
      -e "infranode_instance_type=t2.large" -e "node_instance_type=t2.large" \
      -e "support_instance_type=t2.medium" -e "node_instance_count=${NODE_COUNT}" \
      -e "ipa_host_password=${IPAPASS}" -e "install_idm=htpasswd"  \
      -e "email=name@example.com" \
      -e "repo_method=file" -e "own_repo_path=${REPO_PATH}" -e "repo_version=${REPO_VERSION}" \
      -e "software_to_deploy=openshift" -e "osrelease=3.6.173.0.21" -e "docker_version=1.12.6" \
      -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" \
      --skip-tags=installing_openshift,get_openshift_credentials 1>> $LOG_FILE 2>> $LOG_FILE
. To Delete an environment
----
#To Destroy an Env
ansible-playbook  \
    ${DEPLOYER_REPO_PATH}/configs/${ENVTYPE}/destroy_env.yml \
    -e "guid=${GUID}" -e "env_type=${ENVTYPE}" \
    -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}"  -e "HostedZoneId=${HOSTZONEID}" \
    -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" \
    -e "key_name=${KEYNAME}"  -e "subdomain_base_suffix=${BASESUFFIX}"
----
ansible/configs/ocp-adv-deploy-hw/destroy_env.yml
New file
@@ -0,0 +1,31 @@
- name: Starting environment deployment
  hosts: localhost
  connection: local
  gather_facts: False
  become: no
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tasks:
    - name: Destroy cloudformation template
      cloudformation:
        stack_name: "{{project_tag}}"
        state: "absent"
        region: "{{aws_region}}"
        disable_rollback: false
        tags:
          Stack: "project {{env_type}}-{{ guid }}"
      tags: [ destroying, destroy_cf_deployment ]
      register: cloudformation_result
      until: cloudformation_result|succeeded
      retries: 5
      delay: 60
      ignore_errors: yes
    - name: report Cloudformation error
      fail:
        msg: "FAIL {{ project_tag }} Destroy Cloudformation"
      when: not cloudformation_result|succeeded
      tags: [ destroying, destroy_cf_deployment ]
## we need to add something to delete the env specific key.
ansible/configs/ocp-adv-deploy-hw/env_vars.yml
New file
@@ -0,0 +1,283 @@
###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT
###### OR PASS as "-e" args to ansible-playbook command
# #
# # env_groups:
# #   limit: "tag_Project_opentlc_shared_{{guid}}"
# #   bastions: "tag_AnsibleGroup_bastions"
# #   masters: "tag_AnsibleGroup_masters"
# #   nodes: "tag_AnsibleGroup_nodes"
# #   infranodes: "tag_AnsibleGroup_infranodes"
# #   nfs: "tag_AnsibleGroup_nfs"
install_ipa_client: false
repo_method: file
ocp_pvs:
  # - es-storage
  # - nexus
  # - nexus2
  # - nexus3
config_nfs_uservols: "true"
user_vols: 200
user_vols_size: 4Gi
master_api_port: 443
osrelease: 3.9.27
openshift_master_overwrite_named_certificates: true
deploy_openshift: false
deploy_openshift_post: false
deploy_env_post: false
install_metrics: true
install_logging: true
ovs_plugin: "subnet" # This can also be set to: "multitenant" or "networkpolicy"
multi_tenant_setting: "os_sdn_network_plugin_name='redhat/openshift-ovs-{{ovs_plugin}}'"
master_lb_dns: "loadbalancer1.{{subdomain_base}}"
cloudapps_suffix: 'apps.{{subdomain_base}}'
openshift_master_ldap_ca_file: 'openshift_master_ldap_ca_file=/root/ca.crt'
 ## If you are not part of GPTE you don't need this.
opentlc_integration: true
################################################################################
#### GENERIC EXAMPLE
################################################################################
install_common: true
install_nfs: true
glusterfs_hosted_device_name: /dev/xvdc
glusterfs_hosted_device_size: 300
glusterfs_app_device_name: /dev/xvdd
glusterfs_app_device_size: 300
install_bastion: false
env_authorized_key: "{{guid}}key"
set_env_authorized_key: true
software_to_deploy: "openshift"
################################################################################
#### OCP IMPLEMENATATION LAB
################################################################################
repo_version: '3.9'
cloudapps_dns: '*.apps.{{subdomain_base}}.'
master_public_dns: "loadbalancer.{{subdomain_base}}."
################################################################################
#### Common host variables
################################################################################
update_packages: false
common_packages:
  - python
  - unzip
  - bash-completion
  - tmux
  - bind-utils
  - wget
  - git
  - vim-enhanced
  - ansible
  - net-tools
  - iptables-services
  - bridge-utils
  - sos
  - psacct
rhel_repos:
  - rhel-7-server-rpms
  - rhel-7-server-extras-rpms
  - rhel-7-server-ose-{{repo_version}}-rpms
  - rhel-7-server-ansible-2.4-rpms
  - rh-gluster-3-client-for-rhel-7-server-rpms
use_subscription_manager: false
use_own_repos: true
#rhn_pool_id_string: "Red Hat Enterprise Linux Server"
rhn_pool_id_string: OpenShift Container Platform
################################################################################
#### nfs host settings
################################################################################
nfs_vg: nfsvg
nfs_pvs: /dev/xvdb
nfs_export_path: /srv/nfs
nfs_size: 50
nfs_shares:
  # - jenkins
  # - nexus
################################################################################
#### CLOUD PROVIDER: AWS SPECIFIC VARIABLES
################################################################################
#### Route 53 Zone ID (AWS)
HostedZoneId: ''
key_name: ''
aws_region: us-east-1
admin_user: ''
admin_user_password: ''
#### Connection Settings
ansible_ssh_user: ec2-user
remote_user: ec2-user
#### Networking (AWS)
guid: defaultguid
subdomain_base_short: "{{ guid }}"
subdomain_base_suffix: ".example.opentlc.com"
subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}"
tower_run: false
#### Environment Sizing
bastion_instance_type: "t2.xlarge"
support_instance_type: "t2.medium"
support_instance_count: 1
node_instance_type: "t2.large"
node_instance_count: 3
infranode_instance_type: "t2.xlarge"
infranode_instance_count: 2
master_instance_type: "t2.large"
master_instance_count: 3
loadbalancer_instance_count: 1
loadbalancer_instance_type: "t2.small"
ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
#### VARIABLES YOU SHOULD ***NOT*** CONFIGURE FOR YOUR DEPLOYEMNT
#### You can, but you usually wouldn't need to.
#### CLOUDFORMATIONS vars
project_tag: "{{ env_type }}-{{ guid }}"
docker_version: "{{ '1.12.6' if repo_version | version_compare('3.9', '<')  else '1.13.1' }}"
docker_device: /dev/xvdb
create_internal_dns_entries: true
zone_internal_dns: "{{guid}}.internal."
chomped_zone_internal_dns: "{{guid}}.internal"
zone_public_dns: "{{subdomain_base}}."
bastion_public_dns: "bastion.{{subdomain_base}}."
bastion_public_dns_chomped: "bastion.{{subdomain_base}}"
# vpcid_cidr_block: "192.168.0.0/16"
vpcid_name_tag: "{{subdomain_base}}"
rootfs_size_node: 50
rootfs_size_infranode: 50
rootfs_size_master: 50
rootfs_size_bastion: 20
rootfs_size_support: 20
rootfs_size_loadbalancer: 20
instances:
  - name: "bastion"
    count: 1
    unique: true
    public_dns: true
    dns_loadbalancer: true
    flavor:
      "ec2": "{{bastion_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "bastions"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_bastion }}"
  - name: "loadbalancer"
    count: "{{loadbalancer_instance_count}}"
    public_dns: true
    dns_loadbalancer: true
    flavor:
      "ec2": "{{loadbalancer_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "loadbalancers"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_loadbalancer }}"
  - name: "master"
    count: "{{master_instance_count}}"
    public_dns: false
    dns_loadbalancer: false
    flavor:
      "ec2": "{{master_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "masters"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_master }}"
    volumes:
      - device_name: "{{docker_device}}"
        volume_size: 20
        volume_type: gp2
  - name: "node"
    count: "{{node_instance_count}}"
    public_dns: false
    dns_loadbalancer: false
    flavor:
      "ec2": "{{node_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "nodes"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_node }}"
    volumes:
      - device_name: "{{docker_device}}"
        volume_size: 100
        volume_type: gp2
  - name: "infranode"
    count: "{{infranode_instance_count}}"
    public_dns: true
    dns_loadbalancer: true
    flavor:
      "ec2": "{{infranode_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "infranodes"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_infranode }}"
    volumes:
      - device_name: "{{docker_device}}"
        volume_size: 50
        volume_type: gp2
  - name: "support"
    count: "{{support_instance_count}}"
    public_dns: false
    dns_loadbalancer: false
    flavor:
      "ec2": "{{support_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "support"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_support }}"
    volumes:
      - device_name: "{{nfs_pvs}}"
        volume_size: "{{nfs_size}}"
        volume_type: gp2
      - device_name: "{{docker_device}}"
        volume_size: "50"
        volume_type: gp2
      - device_name: "{{glusterfs_app_device_name}}"
        volume_size: "{{glusterfs_app_device_size}}"
        volume_type: gp2
        purpose: glusterfs
      - device_name: "{{glusterfs_hosted_device_name}}"
        volume_size: "{{glusterfs_hosted_device_size}}"
        volume_type: gp2
        purpose: glusterfs
ansible/configs/ocp-adv-deploy-hw/files/cloud_providers/ec2_cloud_template.j2
New file
@@ -0,0 +1,361 @@
#jinja2: lstrip_blocks: True
---
AWSTemplateFormatVersion: "2010-09-09"
Mappings:
  RegionMapping:
    us-east-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-6871a115
      {% else %}
      RHELAMI: ami-c998b6b2
      {% endif %}
    us-east-2:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-03291866
      {% else %}
      RHELAMI: ami-cfdafaaa
      {% endif %}
    us-west-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-18726478
      {% else %}
      RHELAMI: ami-66eec506
      {% endif %}
    us-west-2:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-223f945a
      {% else %}
      RHELAMI: ami-9fa343e7
      {% endif %}
    eu-west-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-7c491f05
      {% else %}
      RHELAMI: ami-bb9a6bc2
      {% endif %}
    eu-central-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-c86c3f23
      {% else %}
      RHELAMI: ami-d74be5b8
      {% endif %}
    ap-northeast-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-6b0d5f0d
      {% else %}
      RHELAMI: ami-30ef0556
      {% endif %}
    ap-northeast-2:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-3eee4150
      {% else %}
      RHELAMI: ami-0f5a8361
      {% endif %}
    ap-southeast-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-76144b0a
      {% else %}
      RHELAMI: ami-10bb2373
      {% endif %}
    ap-southeast-2:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-67589505
      {% else %}
      RHELAMI: ami-ccecf5af
      {% endif %}
    ap-south-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-5b673c34
      {% else %}
      RHELAMI: ami-cdbdd7a2
      {% endif %}
    sa-east-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-b0b7e3dc
      {% else %}
      RHELAMI: ami-a789ffcb
      {% endif %}
  DNSMapping:
    us-east-1:
      domain: "us-east-1.compute.internal"
    us-west-1:
      domain: "us-west-1.compute.internal"
    us-west-2:
      domain: "us-west-2.compute.internal"
    eu-west-1:
      domain: "eu-west-1.compute.internal"
    eu-central-1:
      domain: "eu-central-1.compute.internal"
    ap-northeast-1:
      domain: "ap-northeast-1.compute.internal"
    ap-northeast-2:
      domain: "ap-northeast-2.compute.internal"
    ap-southeast-1:
      domain: "ap-southeast-1.compute.internal"
    ap-southeast-2:
      domain: "ap-southeast-2.compute.internal"
    sa-east-1:
      domain: "sa-east-1.compute.internal"
    ap-south-1:
      domain: "ap-south-1.compute.internal"
Resources:
  Vpc:
    Type: "AWS::EC2::VPC"
    Properties:
      CidrBlock: "192.199.0.0/16"
      EnableDnsSupport: true
      EnableDnsHostnames: true
      Tags:
        - Key: Name
          Value: "{{vpcid_name_tag}}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
  VpcInternetGateway:
    Type: "AWS::EC2::InternetGateway"
  VpcGA:
    Type: "AWS::EC2::VPCGatewayAttachment"
    Properties:
      InternetGatewayId:
        Ref: VpcInternetGateway
      VpcId:
        Ref: Vpc
  VpcRouteTable:
    Type: "AWS::EC2::RouteTable"
    Properties:
      VpcId:
        Ref: Vpc
  VPCRouteInternetGateway:
    DependsOn: VpcGA
    Type: "AWS::EC2::Route"
    Properties:
      GatewayId:
        Ref: VpcInternetGateway
      DestinationCidrBlock: "0.0.0.0/0"
      RouteTableId:
        Ref: VpcRouteTable
  PublicSubnet:
    Type: "AWS::EC2::Subnet"
    DependsOn:
      - Vpc
    Properties:
      CidrBlock: "192.199.0.0/24"
      Tags:
        - Key: Name
          Value: "{{project_tag}}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
      MapPublicIpOnLaunch: true
      VpcId:
        Ref: Vpc
  PublicSubnetRTA:
    Type: "AWS::EC2::SubnetRouteTableAssociation"
    Properties:
      RouteTableId:
        Ref: VpcRouteTable
      SubnetId:
        Ref: PublicSubnet
  HostSG:
    Type: "AWS::EC2::SecurityGroup"
    Properties:
      GroupDescription: Host
      VpcId:
        Ref: Vpc
      Tags:
        - Key: Name
          Value: host_sg
  HostUDPPorts:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
      GroupId:
        Fn::GetAtt:
          - HostSG
          - GroupId
      IpProtocol: udp
      FromPort: 0
      ToPort: 65535
      CidrIp: "0.0.0.0/0"
  HostTCPPorts:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
      GroupId:
        Fn::GetAtt:
          - HostSG
          - GroupId
      IpProtocol: tcp
      FromPort: 0
      ToPort: 65535
      CidrIp: "0.0.0.0/0"
  zoneinternalidns:
    Type: "AWS::Route53::HostedZone"
    Properties:
      Name: "{{ zone_internal_dns }}"
      VPCs:
        - VPCId:
            Ref: Vpc
          VPCRegion:
            Ref: "AWS::Region"
      HostedZoneConfig:
        Comment: "Created By ansible agnostic deployer"
  CloudDNS:
    Type: AWS::Route53::RecordSetGroup
    DependsOn:
{% for c in range(1,(infranode_instance_count|int)+1) %}
      - "infranode{{loop.index}}EIP"
{% endfor %}
    Properties:
      HostedZoneId: "{{HostedZoneId}}"
      RecordSets:
        - Name: "{{cloudapps_dns}}"
          Type: A
          TTL: 900
          ResourceRecords:
{% for c in range(1,(infranode_instance_count|int)+1) %}
            - Fn::GetAtt:
                - infranode{{loop.index}}
                - PublicIp
{% endfor %}
{% for instance in instances %}
{% if instance['dns_loadbalancer']|d(false)|bool and not instance['unique']|d(false)|bool %}
  {{instance['name']}}DNSLoadBalancer:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
{% for c in range(1, (instance['count']|int)+1) %}
      - {{instance['name']}}{{c}}EIP
{% endfor %}
    Properties:
      HostedZoneId: {{HostedZoneId}}
      RecordSets:
      - Name: "{{instance['name']}}.{{subdomain_base}}."
        Type: A
        TTL: 900
        ResourceRecords:
{% for c in range(1,(instance['count'] |int)+1) %}
          - "Fn::GetAtt":
            - {{instance['name']}}{{c}}
            - PublicIp
{% endfor %}
{% endif %}
{% for c in range(1,(instance['count'] |int)+1) %}
  {{instance['name']}}{{loop.index}}:
    Type: "AWS::EC2::Instance"
    Properties:
      ImageId:
        Fn::FindInMap:
        - RegionMapping
        - Ref: AWS::Region
        - {{ instance['image_id'] | default('RHELAMI') }}
      InstanceType: "{{instance['flavor'][cloud_provider]}}"
      KeyName: "{{instance['key_name'] | default(key_name)}}"
{% if instance['UserData'] is defined %}
      {{instance['UserData']}}
{% endif %}
      SecurityGroupIds:
        - "Fn::GetAtt":
          - HostSG
          - GroupId
      SubnetId:
        Ref: PublicSubnet
      Tags:
{% if instance['unique'] | d(false) | bool %}
        - Key: Name
          Value: {{instance['name']}}
        - Key: internaldns
          Value: {{instance['name']}}.{{chomped_zone_internal_dns}}
{% else %}
        - Key: Name
          Value: {{instance['name']}}{{loop.index}}
        - Key: internaldns
          Value: {{instance['name']}}{{loop.index}}.{{chomped_zone_internal_dns}}
{% endif %}
        - Key: "owner"
          Value: "{{ email | default('unknownuser') }}"
        - Key: "Project"
          Value: "{{project_tag}}"
        - Key: "{{project_tag}}"
          Value: "{{ instance['name'] }}"
{% for tag in instance['tags'] %}
        - Key: {{tag['key']}}
          Value: {{tag['value']}}
{% endfor %}
      BlockDeviceMappings:
        - DeviceName: "/dev/sda1"
          Ebs:
            VolumeSize: {{ instance['rootfs_size'] | default('50') }}
{% for vol in instance['volumes']|default([]) %}
        - DeviceName: "{{ vol['device_name'] }}"
          Ebs:
            VolumeType: "{{ vol['volume_type'] | d('gp2') }}"
            VolumeSize: "{{ vol['volume_size'] | d('20') }}"
{% endfor %}
  {{instance['name']}}{{loop.index}}InternalDNS:
    Type: "AWS::Route53::RecordSetGroup"
    Properties:
      HostedZoneId:
        Ref: zoneinternalidns
      RecordSets:
{% if instance['unique'] | d(false) | bool %}
      - Name: "{{instance['name']}}.{{zone_internal_dns}}"
{% else %}
      - Name: "{{instance['name']}}{{loop.index}}.{{zone_internal_dns}}"
{% endif %}
        Type: A
        TTL: 10
        ResourceRecords:
          - "Fn::GetAtt":
            - {{instance['name']}}{{loop.index}}
            - PrivateIp
{% if instance['public_dns'] %}
  {{instance['name']}}{{loop.index}}EIP:
    Type: "AWS::EC2::EIP"
    DependsOn:
    - VpcGA
    Properties:
      InstanceId:
        Ref: {{instance['name']}}{{loop.index}}
  {{instance['name']}}{{loop.index}}PublicDNS:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
      - {{instance['name']}}{{loop.index}}EIP
    Properties:
      HostedZoneId: {{HostedZoneId}}
      RecordSets:
{% if instance['unique'] | d(false) | bool %}
          - Name: "{{instance['name']}}.{{subdomain_base}}."
{% else %}
          - Name: "{{instance['name']}}{{loop.index}}.{{subdomain_base}}."
{% endif %}
            Type: A
            TTL: 10
            ResourceRecords:
            - "Fn::GetAtt":
              - {{instance['name']}}{{loop.index}}
              - PublicIp
{% endif %}
{% endfor %}
{% endfor %}
Outputs:
  Route53internalzoneOutput:
    Description: The ID of the internal route 53 zone
    Value:
      Ref: zoneinternalidns
ansible/configs/ocp-adv-deploy-hw/files/ec2_internal_dns.json.j2
New file
@@ -0,0 +1,84 @@
{
  "Comment": "Create internal dns zone entries",
  "Changes": [
{% for host in groups['masters'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "master{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['loadbalancers'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "loadbalancer{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['infranodes'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "infranode{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['nodes'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "node{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['support'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "nfs{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['bastions'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "bastion.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    }
{% endfor %}
  ]
}
ansible/configs/ocp-adv-deploy-hw/files/hosts_template.3.9.14.j2
New file
@@ -0,0 +1,266 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
###########################################################################
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
# default project node selector
osm_default_node_selector='env=app'
openshift_hosted_infra_selector="env=infra"
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
###########################################################################
### OpenShift Optional Vars
###########################################################################
# Enable cockpit
osm_use_cockpit=true
osm_cockpit_plugins=['cockpit-kubernetes']
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
###########################################################################
### OpenShift Network Vars
###########################################################################
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
{{multi_tenant_setting}}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" %}
openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com', 'bindPassword': '{{bindPassword}}', 'ca': 'ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid'}]
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "allow_all"  %}
openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
{% endif %}
{% if install_idm == "htpasswd"  %}
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
# or
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
# Enable cluster metrics
openshift_metrics_install_metrics={{install_metrics}}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassandra_nodeselector={"env":"infra"}
openshift_metrics_hawkular_nodeselector={"env":"infra"}
openshift_metrics_heapster_nodeselector={"env":"infra"}
## Add Prometheus Metrics:
openshift_hosted_prometheus_deploy=true
openshift_prometheus_node_selector={"env":"infra"}
openshift_prometheus_namespace=openshift-metrics
# Prometheus
openshift_prometheus_storage_kind=nfs
openshift_prometheus_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_storage_nfs_directory=/srv/nfs
openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_storage_volume_name=prometheus
openshift_prometheus_storage_volume_size=10Gi
openshift_prometheus_storage_labels={'storage': 'prometheus'}
openshift_prometheus_storage_type='pvc'
# For prometheus-alertmanager
openshift_prometheus_alertmanager_storage_kind=nfs
openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
openshift_prometheus_alertmanager_storage_volume_size=10Gi
openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
openshift_prometheus_alertmanager_storage_type='pvc'
# For prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_kind=nfs
openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_volume_size=10Gi
openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
openshift_prometheus_alertbuffer_storage_type='pvc'
openshift_prometheus_node_exporter_image_version=v3.9
# Enable cluster logging
openshift_logging_install_logging={{install_logging}}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
# openshift_logging_kibana_hostname=kibana.{{cloudapps_suffix}}
openshift_logging_es_cluster_size=1
openshift_logging_es_nodeselector={"env":"infra"}
openshift_logging_kibana_nodeselector={"env":"infra"}
openshift_logging_curator_nodeselector={"env":"infra"}
###########################################################################
### OpenShift Project Management Vars
###########################################################################
# Configure additional projects
openshift_additional_projects={'openshift-template-service-broker': {'default_node_selector': ''}}
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
openshift_hosted_router_replicas={{infranode_instance_count}}
#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/srv/nfs
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=true
template_service_broker_install=true
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
openshift_hosted_etcd_storage_kind=nfs
openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)"
openshift_hosted_etcd_storage_nfs_directory=/srv/nfs
openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'}
openshift_hosted_etcd_storage_volume_name=etcd-asb
openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce']
openshift_hosted_etcd_storage_volume_size=10G
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
nfs
{% if new_node_instance_count > 0 %}
new_nodes
{% endif %}
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host].placement}}
{% endfor %}
[masters]
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[etcd]
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'app', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are cns nodes
{% for host in groups['support'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'glusterfs', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
{% if new_node_instance_count > 0 %}
# scaleup performed, leave an empty group, see:
# https://docs.openshift.com/container-platform/3.5/install_config/adding_hosts_to_existing_cluster.html
[new_nodes]
{% endif %}
[nfs]
{% set nfshost = groups['support']|sort|first %}
{{ hostvars[nfshost].internaldns }} openshift_hostname={{ hostvars[nfshost].internaldns }}
[glusterfs]
{% for host in groups['support'] %}
{{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]'
{% endfor %}
ansible/configs/ocp-adv-deploy-hw/files/hosts_template.3.9.25.j2
New file
@@ -0,0 +1,226 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
###########################################################################
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="memory_availability"
# Default node selectors
osm_default_node_selector='env=app'
openshift_hosted_infra_selector="env=infra"
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
###########################################################################
### OpenShift Network Vars
###########################################################################
#osm_cluster_network_cidr=10.1.0.0/16
#openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
{{multi_tenant_setting}}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
# htpasswd Authentication
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
openshift_master_htpasswd_file=/root/htpasswd.openshift
# LDAP Authentication (download ipa-ca.crt first)
# openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'}]
# openshift_master_ldap_ca_file=/root/ipa-ca.crt
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
openshift_hosted_router_replicas={{infranode_instance_count}}
# openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/srv/nfs
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=true
template_service_broker_install=true
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
openshift_hosted_etcd_storage_kind=nfs
openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)"
openshift_hosted_etcd_storage_nfs_directory=/srv/nfs
openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'}
openshift_hosted_etcd_storage_volume_name=etcd-asb
openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce']
openshift_hosted_etcd_storage_volume_size=10G
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
# Enable cluster metrics
openshift_metrics_install_metrics={{install_metrics}}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassandra_nodeselector={"env":"infra"}
openshift_metrics_hawkular_nodeselector={"env":"infra"}
openshift_metrics_heapster_nodeselector={"env":"infra"}
# Enable cluster logging
openshift_logging_install_logging={{install_logging}}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
# openshift_logging_kibana_hostname=kibana.{{cloudapps_suffix}}
openshift_logging_es_cluster_size=1
openshift_logging_es_nodeselector={"env":"infra"}
openshift_logging_kibana_nodeselector={"env":"infra"}
openshift_logging_curator_nodeselector={"env":"infra"}
###########################################################################
### OpenShift Prometheus Vars
###########################################################################
## Add Prometheus Metrics:
openshift_hosted_prometheus_deploy=true
openshift_prometheus_node_selector={"env":"infra"}
openshift_prometheus_namespace=openshift-metrics
# Prometheus
openshift_prometheus_storage_kind=nfs
openshift_prometheus_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_storage_nfs_directory=/srv/nfs
openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_storage_volume_name=prometheus
openshift_prometheus_storage_volume_size=10Gi
openshift_prometheus_storage_labels={'storage': 'prometheus'}
openshift_prometheus_storage_type='pvc'
# For prometheus-alertmanager
openshift_prometheus_alertmanager_storage_kind=nfs
openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
openshift_prometheus_alertmanager_storage_volume_size=10Gi
openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
openshift_prometheus_alertmanager_storage_type='pvc'
# For prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_kind=nfs
openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_volume_size=10Gi
openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
openshift_prometheus_alertbuffer_storage_type='pvc'
# Necessary because of a bug in the installer on 3.9
openshift_prometheus_node_exporter_image_version=v3.9
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
nfs
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host].placement}}
{% endfor %}
[masters]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[etcd]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'env':'master', 'cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'infra', 'cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'app', 'cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are CNS nodes
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'glusterfs', 'cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
[nfs]
{% set nfshost = groups['support']|sort|first %}
{{ hostvars[nfshost].internaldns }} openshift_hostname={{ hostvars[nfshost].internaldns }}
#[glusterfs]
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]'
{% endfor %}
ansible/configs/ocp-adv-deploy-hw/files/hosts_template.3.9.27.j2
New file
@@ -0,0 +1,230 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
###########################################################################
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="memory_availability"
# Default node selectors
osm_default_node_selector='env=app'
openshift_hosted_infra_selector="env=infra"
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
###########################################################################
### OpenShift Network Vars
###########################################################################
#osm_cluster_network_cidr=10.1.0.0/16
#openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
{{multi_tenant_setting}}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
# htpasswd Authentication
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
openshift_master_htpasswd_file=/root/htpasswd.openshift
# LDAP Authentication (download ipa-ca.crt first)
# openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'}]
# openshift_master_ldap_ca_file=/root/ipa-ca.crt
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
openshift_hosted_router_replicas={{infranode_instance_count}}
# openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/srv/nfs
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=true
template_service_broker_install=true
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
openshift_hosted_etcd_storage_kind=nfs
openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)"
openshift_hosted_etcd_storage_nfs_directory=/srv/nfs
openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'}
openshift_hosted_etcd_storage_volume_name=etcd-asb
openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce']
openshift_hosted_etcd_storage_volume_size=10G
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
# Enable cluster metrics
openshift_metrics_install_metrics={{install_metrics}}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassandra_nodeselector={"env":"infra"}
openshift_metrics_hawkular_nodeselector={"env":"infra"}
openshift_metrics_heapster_nodeselector={"env":"infra"}
# Enable cluster logging
openshift_logging_install_logging={{install_logging}}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
# openshift_logging_kibana_hostname=kibana.{{cloudapps_suffix}}
openshift_logging_es_cluster_size=1
openshift_logging_es_nodeselector={"env":"infra"}
openshift_logging_kibana_nodeselector={"env":"infra"}
openshift_logging_curator_nodeselector={"env":"infra"}
###########################################################################
### OpenShift Prometheus Vars
###########################################################################
## Add Prometheus Metrics:
openshift_hosted_prometheus_deploy=true
openshift_prometheus_node_selector={"env":"infra"}
openshift_prometheus_namespace=openshift-metrics
# Prometheus
openshift_prometheus_storage_kind=nfs
openshift_prometheus_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_storage_nfs_directory=/srv/nfs
openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_storage_volume_name=prometheus
openshift_prometheus_storage_volume_size=10Gi
openshift_prometheus_storage_labels={'storage': 'prometheus'}
openshift_prometheus_storage_type='pvc'
# For prometheus-alertmanager
openshift_prometheus_alertmanager_storage_kind=nfs
openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
openshift_prometheus_alertmanager_storage_volume_size=10Gi
openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
openshift_prometheus_alertmanager_storage_type='pvc'
# For prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_kind=nfs
openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_volume_size=10Gi
openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
openshift_prometheus_alertbuffer_storage_type='pvc'
# Necessary because of a bug in the installer on 3.9
openshift_prometheus_node_exporter_image_version=v3.9
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
nfs
#glusterfs
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }}
{% endfor %}
[masters]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[etcd]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'env':'master', 'cluster': '{{guid}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'infra', 'cluster': '{{guid}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'app', 'cluster': '{{guid}}'}"
{% endfor %}
## These are CNS nodes
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'glusterfs', 'cluster': '{{guid}}'}"
{% endfor %}
[nfs]
{% set nfshost = groups['support']|sort|first %}
{{ hostvars[nfshost].internaldns }} openshift_hostname={{ hostvars[nfshost].internaldns }}
#[glusterfs]
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]'
{% endfor %}
ansible/configs/ocp-adv-deploy-hw/files/htpasswd.openshift
New file
@@ -0,0 +1,103 @@
andrew:$apr1$dZPb2ECf$ercevOFO5znrynUfUj4tb/
karla:$apr1$FQx2mX4c$eJc21GuVZWNg1ULF8I2G31
user1:$apr1$FmrTsuSa$yducoDpvYq0KEV0ErmwpA1
user2:$apr1$JCcW2XQM$8takcyaYYrPT5I8M46TA01
user3:$apr1$zPC/rXKY$2PGF7dRsGwC3i8YJ59aOk0
user4:$apr1$e9/zT6dh$J18M.9zyn3DazrYreGV.B/
user5:$apr1$Nu/XJFVP$DgybymePret.Prch9MyxP/
user6:$apr1$VEbpwL9M$c1oFwS.emkt8fyR24zOzd0
user7:$apr1$wZxsnY/A$PK0O7iofGJJsvOZ3ctoNo.
user8:$apr1$5YBAWpGg$YO4ACHZL.c31NbQZH9LlE.
user9:$apr1$CIxB1enN$Aghb7.S4U3SXPRt55hTWI.
user10:$apr1$dWTDSR23$UGGJtkVC1ERmAOikomI9K0
user11:$apr1$j4fPyRZg$nNJk1nt1vAf54HAB/g/8g/
user12:$apr1$dd6kysUI$ueu/9.gbL0LkjpCbSjFNI.
user13:$apr1$DeRaAbVq$ZI3HtBzQxWYHifjIuPJSM1
user14:$apr1$dUuWDYgk$co6NQ4Dbcp3pQjVO5dR7Q.
user15:$apr1$4QmhSys7$wC.fKmKRqLNqoYqQ1dixJ/
user16:$apr1$RHcOPHg7$p9LgYP6zE4nMDlA8ongVc/
user17:$apr1$pji2xxHN$vvUHj/fbQRgLR.WBMblQH/
user18:$apr1$Lm79l0Qr$KgZSAuPcrTo4.GIWTBLGa/
user19:$apr1$KGxvneIX$.GJo7JB.N/c1FLW7vlblx/
user20:$apr1$WfYdosg5$cU1BsAzkIhTzKBx8Rvd3o1
user21:$apr1$cKRCbWLl$WCVjYUxD22GS5RRv1npwR1
user22:$apr1$QhpgOkFU$Y6Nn7NEPbJk3D9ehFb4i50
user23:$apr1$dVgQOh7j$L3JZlN8ZmdEwebXqD66Yl0
user24:$apr1$z/U5MAQB$GvKG3i8ATXWHhoxN9e0HS/
user25:$apr1$gFHGMQUV$w11pZbcBqVKOylr9TZ1EW.
user26:$apr1$5YG0dnOG$GzbnTQMBe0Dqc3f3pwvPL1
user27:$apr1$Kt6VoxNS$nq1Kzd53DUL8h8gfu4fEq/
user28:$apr1$aLAQHJ4d$qTRmUpw2eF9whEwDyIixG0
user29:$apr1$3HH4pgpa$Uh84gx3UP8vyPRfAIMPRl1
user30:$apr1$bbEEX3EF$ozw4jPcYHwVO7.MRzXtu0.
user31:$apr1$hD0kfz7i$SjNdGZbvto5EifBma5iA5.
user32:$apr1$fRMBUYu8$T5BQ8kI3pMgqXaRH7l8p..
user33:$apr1$es9ruteO$jZsV5/H8GIzw.vCfPs5310
user34:$apr1$OQ1I/gHn$.WA01EeXhDLE1K3vWD1wu.
user35:$apr1$KseEJXTS$kE/QO1XT0mZ44Iyw/ofnj/
user36:$apr1$PglCzG.g$44QsoAyMhanH5A40P5jhY1
user37:$apr1$2d5ggTIZ$xYsfdRBLOlEsnWRFVS9Yl0
user38:$apr1$x/cdV95V$mKFZmSkoBjeEu.HZshO0n.
user39:$apr1$VC6.WQOS$fAOAR1mx/i7Pnt2oGsDmu/
user40:$apr1$n36Hr3zC$lEVq4B7UWmdcnl01lUyR..
user41:$apr1$/q6tJtXi$9mCB1YCqdhEE6VVVVkVKc/
user42:$apr1$fTMTWEzw$X4MsyNlWketRjQgqonwxn.
user43:$apr1$.VwoJu38$D4v4NKL1KPuRZdNeprBXS/
user44:$apr1$e0s48GLK$JMQ849MeckVX0wG2vE2s10
user45:$apr1$a9ucQ1sC$HEMij.WGEa1xIQ01HpyKh1
user46:$apr1$uwOs/4nv$TB2r3pOPJ2K0A./CimVUT1
user47:$apr1$jfTmW1k5$Fd2ebTUtFFl3CLZWfFmRR.
user48:$apr1$4/apB/zd$IxoWJ5pTRNGgbxx3Ayl/i0
user49:$apr1$nu75PZ0r$bPCMgDmlOAj.YbeFPHJHE.
user50:$apr1$c/R3wJ/g$GJ03siVj5tkNxrg4OaxhJ0
user51:$apr1$EdEX6Pyt$IdPQHmhZi8FEbJjREVbe1/
user52:$apr1$ZMfyTjjX$RFOrnKsSr5xXA7IXn7TkC/
user53:$apr1$GY.rOkJM$uMCqJmmorP5I1v.YHHz1Z/
user54:$apr1$1vuZq/U0$Aq0Kz3wk0YPleDz/rTCdK0
user55:$apr1$KjULqmcD$XrhyYt2nWuiaQkbciDIcN/
user56:$apr1$gTPaNeq0$sqWJDPZ5//ZDjLf0dSbUh1
user57:$apr1$6PaKhdlY$dX2FkVJ0xV.4MAQeDUgRT0
user58:$apr1$.8MSdEpY$MPIbUO2WnC0wsno8zUOjC.
user59:$apr1$TWpKuAvt$CFeTQxxSgeU3dFkL4qpXb.
user60:$apr1$fEYUgRVU$LO2qwXfpxwI9fDXPfQgQB0
user61:$apr1$HHUBEn4G$.cAnwbh.ogNEzQSug3nqo/
user62:$apr1$Agt4GmKT$4k3Ev3FSJiNsbht3vUbxQ/
user63:$apr1$FsUKA7Hw$nkSgqSIFeqCY1mOyGje3O1
user64:$apr1$vBlkQoG4$8L2mTo8gdr8wC68G2y2G91
user65:$apr1$McEnEqn4$dZvjACdGp0HALVHBtHEu80
user66:$apr1$zamuhlOG$Xch5pbO1ki2Dad1dzjS4j.
user67:$apr1$qC1rll4s$cN4DzsWnyFBTNi3Cdi6161
user68:$apr1$txKPCx1k$WtrlrlP.UF.Rlzbnv6igE/
user69:$apr1$EO2A25Sj$DO/1lCNJJXff4GOsTZmHL/
user70:$apr1$pJu569Az$nHtF2ZkUrNXw9WN0Obb/T1
user71:$apr1$YKpEtZka$c59Fmov1cssRdrO5VqBKz1
user72:$apr1$CNkwam0s$b.QcPWytnhlOsaajMQx630
user73:$apr1$m5kE07o0$7TC3K.I16YTaRyN8EZq7E/
user74:$apr1$/5p0Qoyy$hjQ30Q8Ghb4zNrjjt2yLk/
user75:$apr1$ZF3yRTqJ$TgLBllrvTQuuiIjSb53xR0
user76:$apr1$711LL2Ai$59rBNmFprwZXtyFVBtRul0
user77:$apr1$N4uJhPSq$A.rVfAsRXCQqxOenDHjqX1
user78:$apr1$PHSpv5ty$WC8GlQpclQqH30eWPu.6e.
user79:$apr1$c/yk9dQ9$dvhh.P4F5zGnysBvwps4m/
user80:$apr1$oTmftf8R$FYzQD77hYfh9Wq3SvwYU7/
user81:$apr1$3YvQ/JPg$sDXhV8xpHNxQzFSvMMxAD1
user82:$apr1$quKB2P2.$iq.ZzDa3/xoaoY3.F1Un90
user83:$apr1$IVq8346H$lPQJZ7Thr/gJ2EmzDsktH0
user84:$apr1$xfehskAD$NRMQJttylejHtNKQqBj.k.
user85:$apr1$/LYLXNbH$/COZBzkaU0pPOXR38ZFVX/
user86:$apr1$a/xD3Jfw$rZXN4ykj0W6qadlh447n//
user87:$apr1$v01l1ljr$tGDKwdhKC05HEbntSxV5M0
user88:$apr1$9RYtWl12$ck19ozvS.SWeAAaDZqE940
user89:$apr1$EvSs2TA2$fRDg0hVOCf2jbhwXifzbs.
user90:$apr1$9ffAneiG$CAz5JWeIPGnamOQlVRGIk.
user91:$apr1$Z3XW5Yy4$Kibx7GmgdpC6CAM0IxhtC0
user92:$apr1$6CfIrBqr$5nGNCGA5QOPq/h8hlOE4f.
user93:$apr1$iJ4AQyfu$fkXSVib.OzPCSBQlLhwwS.
user94:$apr1$jiPqi0uI$XyYDQt0kcawqFLX12VW3n/
user95:$apr1$ULEkhfG2$/WHcoR9KJxAS3uw470Vkk.
user96:$apr1$56tQXa91$l0yaZgZHbDidgw95IP7yQ1
user97:$apr1$SoGwK9hP$YbceEfwmsM3QCdNGAaE1b.
user98:$apr1$MVU1/8dh$UKzkRk1CQP00SvnoPIm1..
user99:$apr1$v8vKZdHH$NC5xud.olhtdydHU9hav6.
user100:$apr1$glFN48wz$dR9w94PGiQL8qZXcXGd0L0
marina:$apr1$ayR8gA9a$4bmozGlmBX6XQY1AbJfQk1
ansible/configs/ocp-adv-deploy-hw/files/labs_hosts_template.j2
New file
@@ -0,0 +1,71 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
# disable memory check, as we are not a production environment
openshift_disable_check="memory_availability"
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
nfs
#glusterfs
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }}
{% endfor %}
[masters]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[etcd]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env': 'master', 'cluster': '{{guid}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'infra', 'cluster': '{{guid}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'app', 'cluster': '{{guid}}'}"
{% endfor %}
## These are CNS nodes
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'glusterfs', 'cluster': '{{guid}}'}"
{% endfor %}
[nfs]
{% set nfshost = groups['support']|sort|first %}
{{ hostvars[nfshost].internaldns }} openshift_hostname={{ hostvars[nfshost].internaldns }}
#[glusterfs]
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]'
{% endfor %}
ansible/configs/ocp-adv-deploy-hw/files/pvs.j2
New file
@@ -0,0 +1,17 @@
---
{% for pv in pv_list %}
apiVersion: v1
kind: PersistentVolume
metadata:
  name: {{ pv }}
spec:
  capacity:
    storage: {{pv_size}}
  accessModes:
  - ReadWriteOnce
  nfs:
    path: {{ nfs_export_path }}/{{pv}}
    server: support1.{{guid}}.internal
  persistentVolumeReclaimPolicy: {{persistentVolumeReclaimPolicy}}
---
{% endfor %}
ansible/configs/ocp-adv-deploy-hw/files/repos_template.j2
New file
@@ -0,0 +1,47 @@
[rhel-7-server-rpms]
name=Red Hat Enterprise Linux 7
baseurl={{own_repo_path}}/rhel-7-server-rpms
enabled=1
gpgcheck=0
[rhel-7-server-rh-common-rpms]
name=Red Hat Enterprise Linux 7 Common
baseurl={{own_repo_path}}/rhel-7-server-rh-common-rpms
enabled=1
gpgcheck=0
[rhel-7-server-extras-rpms]
name=Red Hat Enterprise Linux 7 Extras
baseurl={{own_repo_path}}/rhel-7-server-extras-rpms
enabled=1
gpgcheck=0
[rhel-7-server-optional-rpms]
name=Red Hat Enterprise Linux 7 Optional
baseurl={{own_repo_path}}/rhel-7-server-optional-rpms
enabled=1
gpgcheck=0
[rhel-7-server-ose-{{repo_version}}-rpms]
name=Red Hat Enterprise Linux 7 OSE {{repo_version}}
baseurl={{own_repo_path}}/rhel-7-server-ose-{{repo_version}}-rpms
enabled=1
gpgcheck=0
[rhel-7-fast-datapath-rpms]
name=Red Hat Enterprise Linux 7 Fast Datapath
baseurl={{own_repo_path}}/rhel-7-fast-datapath-rpms
enabled=1
gpgcheck=0
[rhel-7-server-ansible-2.4-rpms]
name=Red Hat Enterprise Linux 7 Ansible RPMS
baseurl={{own_repo_path}}/rhel-7-server-ansible-2.4-rpms
enabled=1
gpgcheck=0
[rh-gluster-3-client-for-rhel-7-server-rpms]
name=Red Hat Gluster Client RPMS
baseurl={{own_repo_path}}/rh-gluster-3-client-for-rhel-7-server-rpms
enabled=1
gpgcheck=0
ansible/configs/ocp-adv-deploy-hw/files/userpvs.j2
New file
@@ -0,0 +1,20 @@
---
{%  for pv in range(1,user_vols) %}
apiVersion: v1
kind: PersistentVolume
metadata:
  name: vol{{ pv }}
spec:
  capacity:
    storage: {{ pv_size }}
  accessModes:
  - ReadWriteOnce
{% if  pv % 2 == 0 %}
  - ReadWriteMany
{% endif %}
  nfs:
    path: {{ nfs_export_path }}/user-vols/vol{{pv}}
    server: support1.{{guid}}.internal
  persistentVolumeReclaimPolicy: {{ persistentVolumeReclaimPolicy }}
---
{% endfor %}
ansible/configs/ocp-adv-deploy-hw/post_infra.yml
New file
@@ -0,0 +1,32 @@
- name: Step 002 Post Infrastructure
  hosts: localhost
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tags:
    - step002
    - post_infrastructure
  tasks:
    - debug:
        msg: "Step 001 Post Infrastructure - There are no post_infrastructure tasks defined"
      when: "not {{ tower_run | default(false) }}"
    - name: Job Template to launch a Job Template with update on launch inventory set
      uri:
        url: "https://{{ ansible_tower_ip }}/api/v1/job_templates/{{ job_template_id }}/launch/"
        method: POST
        user: "{{tower_admin}}"
        password: "{{tower_admin_password}}"
        body:
          extra_vars:
            guid: "{{guid}}"
            ipa_host_password: "{{ipa_host_password}}"
        body_format: json
        validate_certs: False
        HEADER_Content-Type: "application/json"
        status_code: 200, 201
      when: "{{ tower_run | default(false) }}"
      tags:
        - tower_workaround
ansible/configs/ocp-adv-deploy-hw/post_ocp_nfs_config.yml
New file
@@ -0,0 +1,58 @@
- name: Step 00xxxxx post software
  hosts: support
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Create user vols
      shell: "mkdir -p /srv/nfs/user-vols/vol{1..{{user_vols}}}"
    - name: chmod the user vols
      shell: "chmod -R 777 /srv/nfs/user-vols"
- name: Step 00xxxxx post software
  hosts: bastions
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: get nfs Hostname
      set_fact:
        nfs_host: "{{ groups['support']|sort|first }}"
    - set_fact:
        pv_size: '10Gi'
        pv_list: "{{ ocp_pvs }}"
        persistentVolumeReclaimPolicy: Retain
    - name: Generate PV file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/pvs.j2"
        dest: "/root/pvs-{{ env_type }}-{{ guid }}.yml"
      tags: [ gen_pv_file ]
      when: pv_list.0 is defined
    - set_fact:
        pv_size: "{{user_vols_size}}"
        persistentVolumeReclaimPolicy: Recycle
      notify: restart nfs services
      run_once: True
    - name: Generate user vol PV file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/userpvs.j2"
        dest: "/root/userpvs-{{ env_type }}-{{ guid }}.yml"
      tags:
        - gen_user_vol_pv
    - shell: 'oc create -f /root/pvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/pvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
      when: pv_list.0 is defined
    - shell: 'oc create -f /root/userpvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/userpvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
ansible/configs/ocp-adv-deploy-hw/post_software.yml
New file
@@ -0,0 +1,124 @@
#vim: set ft=ansible:
---
- name: Step 005 - Post Software deployment
  hosts: localhost
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tags:
    - step005
  tasks:
    - name: Generate /etc/ansible/hosts file with lab hosts template
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/labs_hosts_template.j2"
        dest: "{{ ANSIBLE_REPO_PATH }}/workdir/labs_hosts-{{ env_type }}-{{ guid }}"
- name: Configure NFS host for user-vols if required
  hosts: support
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Create user vols
      shell: "mkdir -p /srv/nfs/user-vols/vol{1..{{user_vols}}}"
      tags:
        - openshift_nfs_config
    - name: chmod the user vols
      shell: "chmod -R 777 /srv/nfs/user-vols"
      tags:
        - openshift_nfs_config
- name: Step lab post software deployment
  hosts: bastions
  gather_facts: False
  become: yes
  tags:
    - opentlc_bastion_tasks
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Move complete inventory file to preserve directory.
      shell: mv /etc/ansible/hosts /var/preserve/
      tags: preserve_complete_ansible_inventory
    - name: Copy over ansible hosts file, lab version
      copy:
        backup: no
        src: "{{ ANSIBLE_REPO_PATH }}/workdir/labs_hosts-{{ env_type }}-{{ guid }}"
        dest: /etc/ansible/hosts
      tags:
        - overwrite_hosts_with_lab_hosts
    ## Create PVs for uservols if required
    - name: get nfs Hostname
      set_fact:
        nfs_host: "{{ groups['support']|sort|first }}"
      tags:
        - openshift_nfs_config
    - set_fact:
        pv_size: '10Gi'
        pv_list: "{{ ocp_pvs }}"
        persistentVolumeReclaimPolicy: Retain
      tags:
        - openshift_nfs_config
    - name: Generate PV file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/pvs.j2"
        dest: "/root/pvs-{{ env_type }}-{{ guid }}.yml"
      tags: [ gen_pv_file ]
      when: pv_list.0 is defined
      tags:
        - openshift_nfs_config
    - set_fact:
        pv_size: "{{user_vols_size}}"
        persistentVolumeReclaimPolicy: Recycle
      tags:
        - openshift_nfs_config
      notify: restart nfs services
      run_once: True
    - name: Generate user vol PV file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/userpvs.j2"
        dest: "/root/userpvs-{{ env_type }}-{{ guid }}.yml"
      tags:
        - gen_user_vol_pv
        - openshift_nfs_config
    - shell: 'oc create -f /root/pvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/pvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
      when: pv_list.0 is defined
      tags:
        - openshift_nfs_config
    - shell: 'oc create -f /root/userpvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/userpvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
        - openshift_nfs_config
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa"
      when: install_ipa_client|bool
# - name: include post nfs config
#   include: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/post_ocp_nfs_config.yml"
#   tags:
#     - openshift_nfs_config
- name: PostSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Post-Software checks completed successfully"
ansible/configs/ocp-adv-deploy-hw/pre_infra.yml
New file
@@ -0,0 +1,13 @@
- name: Step 000 Pre Infrastructure
  hosts: localhost
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tags:
    - step000
    - pre_infrastructure
  tasks:
    - debug:
        msg: "Step 000 Pre Infrastructure - There are no pre_infrastructure tasks defined"
ansible/configs/ocp-adv-deploy-hw/pre_software.yml
New file
@@ -0,0 +1,74 @@
- name: Step 003 - Create env key
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - step003
    - generate_env_keys
  tasks:
    - name: Generate SSH keys
      shell: ssh-keygen -b 2048 -t rsa -f "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}" -q -N ""
      args:
        creates: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}"
      when: set_env_authorized_key
    - name: fix permission
      file:
        path: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}"
        mode: 0400
      when: set_env_authorized_key
    - name: Generate SSH pub key
      shell: ssh-keygen -y -f "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}" > "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}.pub"
      args:
        creates: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}.pub"
      when: set_env_authorized_key
# Cloudformation template or equivalent should tag all hosts with Project:{{ env_type }}-{{ guid }}
- name: Configure all hosts with Repositories, Common Files and Set environment key
  hosts: all
  become: true
  gather_facts: False
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - step004
    - common_tasks
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set-repositories", when: 'repo_method is defined' }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/common", when: 'install_common' }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set_env_authorized_key", when: 'set_env_authorized_key' }
- name: Configuring Bastion Hosts
  hosts: bastions
  become: true
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  roles:
    -  { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion", when: 'install_bastion' }
  tags:
    - step004
    - bastion_tasks
- name: PreSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - flight_check
  tasks:
    - debug:
        msg: "Pre-Software checks completed successfully"
ansible/configs/ocp-adv-deploy-hw/software.yml
New file
@@ -0,0 +1,19 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/ocp-clientvm/files/repos_template.j2
@@ -35,9 +35,11 @@
enabled=1
gpgcheck=0
{% if osrelease | version_compare('3.9', '>=') %}
## Required since OCP 3.9
[rhel-7-server-ansible-2.4-rpms]
name=Red Hat Enterprise Linux Ansible (RPMs)
baseurl={{own_repo_path}}/rhel-7-server-ansible-2.4-rpms
enabled=1
gpgcheck=0
gpgcheck=0
{% endif %}
ansible/configs/ocp-demo-lab/files/userpvs.j2
@@ -1,5 +1,5 @@
---
{%  for pv in range(1,user_vols) %}
{%  for pv in range(1,user_vols|int) %}
apiVersion: v1
kind: PersistentVolume
metadata:
ansible/configs/ocp-ha-disconnected-lab/env_vars.yml
@@ -38,7 +38,7 @@
user_vols: 200
user_vols_size: 4Gi
master_api_port: 443
osrelease: 3.9.17
osrelease: 3.9.30
openshift_master_overwrite_named_certificates: true
deploy_openshift: true
deploy_openshift_post: true
ansible/configs/ocp-ha-disconnected-lab/files/hosts_template.3.9.14.j2
@@ -11,7 +11,6 @@
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
# default project node selector
ansible/configs/ocp-ha-disconnected-lab/files/hosts_template.3.9.25.j2
@@ -11,7 +11,6 @@
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
# default project node selector
ansible/configs/ocp-ha-disconnected-lab/files/hosts_template.3.9.27.j2
@@ -11,7 +11,6 @@
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
# default project node selector
ansible/configs/ocp-ha-disconnected-lab/files/hosts_template.3.9.30.j2
New file
@@ -0,0 +1,313 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
###########################################################################
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
# default project node selector
osm_default_node_selector='env=app'
openshift_hosted_infra_selector="env=infra"
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
###########################################################################
### OpenShift Optional Vars
###########################################################################
# Enable cockpit
osm_use_cockpit=true
osm_cockpit_plugins=['cockpit-kubernetes']
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
###########################################################################
### OpenShift Network Vars
###########################################################################
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
{{multi_tenant_setting}}
##########################################################################
### Disconnected Install Vars
### Requires a docker registry at isolated1.{{guid}}.internal:5000
###########################################################################
# sets the debug level for all OpenShift components.  Default is 2
#debug_level=8
# used for container-based install, not RPM
system_images_registry=isolated1.{{guid}}.internal:5000
# https://bugzilla.redhat.com/show_bug.cgi?id=1461465  target release 3.9
#the enterprise registry will not be added to the docker registries.
openshift_docker_ent_reg=''
# https://bugzilla.redhat.com/show_bug.cgi?id=1516534 target release 3.10
#  does not update all image names
# 3.9.30 bug: when you want to access.registry.access.redhat.com
#  you must indicate the hostname:
# oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version}
oreg_url=isolated1.{{guid}}.internal:5000/openshift3/ose-${component}:${version}
openshift_examples_modify_imagestreams=true
openshift_docker_additional_registries=isolated1.{{guid}}.internal:5000
openshift_docker_insecure_registries=isolated1.{{guid}}.internal:5000
openshift_docker_blocked_registries=registry.access.redhat.com,docker.io
openshift_metrics_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_metrics_image_version=v3.9.14
openshift_logging_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_logging_image_version=v3.9.14
ansible_service_broker_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/ose-
ansible_service_broker_image_tag=v3.9.14
ansible_service_broker_etcd_image_prefix=isolated1.{{guid}}.internal:5000/rhel7/
ansible_service_broker_etcd_image_tag=latest
openshift_service_catalog_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/ose-
openshift_service_catalog_image_version=v3.9.14
openshift_cockpit_deployer_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_cockpit_deployer_version=v3.9.14
template_service_broker_prefix=isolated1.{{guid}}.internal:5000/openshift3/ose-
template_service_broker_version=v3.9.14
openshift_web_console_prefix=isolated1.{{guid}}.internal:5000/openshift3/ose-
openshift_web_console_version=v3.9.14
# PROMETHEUS SETTINGS
openshift_prometheus_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_prometheus_image_version=v3.9.14
openshift_prometheus_alertmanager_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_prometheus_alertmanager_image_version=v3.9.14
openshift_prometheus_alertbuffer_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_prometheus_alertbuffer_image_version=v3.9.14
openshift_prometheus_oauth_proxy_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_prometheus_oauth_proxy_image_version=v3.9.14
openshift_prometheus_node_exporter_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_prometheus_node_exporter_image_version=v3.9.14
##########################################################################
## OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" %}
openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com', 'bindPassword': '{{bindPassword}}', 'ca': 'ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid'}]
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "allow_all"  %}
openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
{% endif %}
{% if install_idm == "htpasswd"  %}
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
# or
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
# Enable cluster metrics
openshift_metrics_install_metrics={{install_metrics}}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassandra_nodeselector={"env":"infra"}
openshift_metrics_hawkular_nodeselector={"env":"infra"}
openshift_metrics_heapster_nodeselector={"env":"infra"}
## Add Prometheus Metrics:
openshift_hosted_prometheus_deploy=true
openshift_prometheus_node_selector={"env":"infra"}
openshift_prometheus_namespace=openshift-metrics
# Prometheus
openshift_prometheus_storage_kind=nfs
openshift_prometheus_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_storage_nfs_directory=/srv/nfs
openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_storage_volume_name=prometheus
openshift_prometheus_storage_volume_size=10Gi
openshift_prometheus_storage_labels={'storage': 'prometheus'}
openshift_prometheus_storage_type='pvc'
# For prometheus-alertmanager
openshift_prometheus_alertmanager_storage_kind=nfs
openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
openshift_prometheus_alertmanager_storage_volume_size=10Gi
openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
openshift_prometheus_alertmanager_storage_type='pvc'
# For prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_kind=nfs
openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_volume_size=10Gi
openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
openshift_prometheus_alertbuffer_storage_type='pvc'
# Already set in the disconnected section
# openshift_prometheus_node_exporter_image_version=v3.9
# Enable cluster logging
openshift_logging_install_logging={{install_logging}}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
# openshift_logging_kibana_hostname=kibana.{{cloudapps_suffix}}
openshift_logging_es_cluster_size=1
openshift_logging_es_nodeselector={"env":"infra"}
openshift_logging_kibana_nodeselector={"env":"infra"}
openshift_logging_curator_nodeselector={"env":"infra"}
###########################################################################
### OpenShift Project Management Vars
###########################################################################
# Configure additional projects
openshift_additional_projects={'openshift-template-service-broker': {'default_node_selector': ''}}
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
openshift_hosted_router_replicas={{infranode_instance_count}}
#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/srv/nfs
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=true
template_service_broker_install=true
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
openshift_hosted_etcd_storage_kind=nfs
openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)"
openshift_hosted_etcd_storage_nfs_directory=/srv/nfs
openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'}
openshift_hosted_etcd_storage_volume_name=etcd-asb
openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce']
openshift_hosted_etcd_storage_volume_size=10G
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
nfs
{% if new_node_instance_count > 0 %}
new_nodes
{% endif %}
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host].placement}}
{% endfor %}
[masters]
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[etcd]
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'app', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
{% if new_node_instance_count > 0 %}
# scaleup performed, leave an empty group, see:
# https://docs.openshift.com/container-platform/3.5/install_config/adding_hosts_to_existing_cluster.html
[new_nodes]
{% endif %}
[nfs]
{% for host in groups['support'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}
{% endfor %}
ansible/configs/ocp-ha-disconnected-lab/files/userpvs.j2
@@ -1,5 +1,5 @@
---
{%  for pv in range(1,user_vols) %}
{%  for pv in range(1,user_vols|int) %}
apiVersion: v1
kind: PersistentVolume
metadata:
ansible/configs/ocp-ha-lab/env_vars.yml
@@ -20,7 +20,7 @@
user_vols: 200
user_vols_size: 4Gi
master_api_port: 443
osrelease: 3.9.27
osrelease: 3.9.30
openshift_master_overwrite_named_certificates: true
deploy_openshift: false
deploy_openshift_post: false
ansible/configs/ocp-ha-lab/files/hosts_template.3.9.27.j2
@@ -11,7 +11,6 @@
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="memory_availability"
# Default node selectors
@@ -227,4 +226,4 @@
#[glusterfs]
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]'
{% endfor %}
{% endfor %}
ansible/configs/ocp-ha-lab/files/hosts_template.3.9.30.j2
New file
@@ -0,0 +1,235 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
###########################################################################
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
openshift_disable_check="memory_availability"
# Default node selectors
osm_default_node_selector='env=app'
openshift_hosted_infra_selector="env=infra"
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
###########################################################################
### OpenShift Network Vars
###########################################################################
#osm_cluster_network_cidr=10.1.0.0/16
#openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
{{multi_tenant_setting}}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
# htpasswd Authentication
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
openshift_master_htpasswd_file=/root/htpasswd.openshift
# LDAP Authentication (download ipa-ca.crt first)
# openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'}]
# openshift_master_ldap_ca_file=/root/ipa-ca.crt
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
# Bug in 3.9.30 - Bug 1583500 - Unqualified image is completed with "docker.io"
# https://bugzilla.redhat.com/show_bug.cgi?id=1583500
# Workaround:
oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version}
openshift_examples_modify_imagestreams=true
openshift_hosted_router_replicas={{infranode_instance_count}}
# openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/srv/nfs
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=true
template_service_broker_install=true
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
openshift_hosted_etcd_storage_kind=nfs
openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)"
openshift_hosted_etcd_storage_nfs_directory=/srv/nfs
openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'}
openshift_hosted_etcd_storage_volume_name=etcd-asb
openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce']
openshift_hosted_etcd_storage_volume_size=10G
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
# Enable cluster metrics
openshift_metrics_install_metrics={{install_metrics}}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassandra_nodeselector={"env":"infra"}
openshift_metrics_hawkular_nodeselector={"env":"infra"}
openshift_metrics_heapster_nodeselector={"env":"infra"}
# Enable cluster logging
openshift_logging_install_logging={{install_logging}}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
# openshift_logging_kibana_hostname=kibana.{{cloudapps_suffix}}
openshift_logging_es_cluster_size=1
openshift_logging_es_nodeselector={"env":"infra"}
openshift_logging_kibana_nodeselector={"env":"infra"}
openshift_logging_curator_nodeselector={"env":"infra"}
###########################################################################
### OpenShift Prometheus Vars
###########################################################################
## Add Prometheus Metrics:
openshift_hosted_prometheus_deploy=true
openshift_prometheus_node_selector={"env":"infra"}
openshift_prometheus_namespace=openshift-metrics
# Prometheus
openshift_prometheus_storage_kind=nfs
openshift_prometheus_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_storage_nfs_directory=/srv/nfs
openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_storage_volume_name=prometheus
openshift_prometheus_storage_volume_size=10Gi
openshift_prometheus_storage_labels={'storage': 'prometheus'}
openshift_prometheus_storage_type='pvc'
# For prometheus-alertmanager
openshift_prometheus_alertmanager_storage_kind=nfs
openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
openshift_prometheus_alertmanager_storage_volume_size=10Gi
openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
openshift_prometheus_alertmanager_storage_type='pvc'
# For prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_kind=nfs
openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_volume_size=10Gi
openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
openshift_prometheus_alertbuffer_storage_type='pvc'
# Necessary because of a bug in the installer on 3.9
openshift_prometheus_node_exporter_image_version=v3.9
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
nfs
#glusterfs
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }}
{% endfor %}
[masters]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[etcd]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'env':'master', 'cluster': '{{guid}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'infra', 'cluster': '{{guid}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'app', 'cluster': '{{guid}}'}"
{% endfor %}
## These are CNS nodes
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'glusterfs', 'cluster': '{{guid}}'}"
{% endfor %}
[nfs]
{% set nfshost = groups['support']|sort|first %}
{{ hostvars[nfshost].internaldns }} openshift_hostname={{ hostvars[nfshost].internaldns }}
#[glusterfs]
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]'
{% endfor %}
ansible/configs/ocp-ha-lab/files/userpvs.j2
@@ -1,5 +1,5 @@
---
{%  for pv in range(1,user_vols) %}
{%  for pv in range(1,user_vols|int) %}
apiVersion: v1
kind: PersistentVolume
metadata:
ansible/configs/ocp-implementation-lab/README.adoc
@@ -1,4 +1,4 @@
= OPENTLC OCP-IMPLEMENTATION-LAB Env_Type config
= OPENTLC OCP-HA-LAB Env_Type config
For example, we will include things such as ec2 instance names, secret
variables such as private/public key pair information, passwords, etc.
@@ -12,10 +12,9 @@
* You need to provide some credentials for deployments to work
* Create a file called "env_secret_vars.yml" and put it in the
 ./ansible/configs/CONFIGNAME/ directory.
** At this point this file has to be created even if no vars from it are used.
** At this point this file *has to be created* even if no vars from it are used.
* You can choose to provide these values as extra vars (-e "var=value") in the
 command line if you prefer not to keep sensitive information in a file.
* In the future we will use ansible vault for this.
.Example contents of "Secret" Vars file
----
@@ -35,6 +34,11 @@
# ## AWS Credentials. This is required.
aws_access_key_id: ""
aws_secret_access_key: ""
#If using repo_method: satellite, you must set these values as well.
# satellite_url: https://satellite.example.com
# satellite_org: Sat_org_name
# satellite_activationkey: "rhel7basic"
----
@@ -50,36 +54,46 @@
----
# Set the your environment variables (this is optional, but makes life easy)
REGION=us-east-1
REGION=ap-southeast-1
KEYNAME=ocpkey
GUID=testimp35
ENVTYPE="ocp-implementation-lab"
CLOUDPROVIDER=ec2
GUID=testnewec21
ENVTYPE="ocp-ha-lab"
CLOUDPROVIDER=ec2v2
HOSTZONEID='Z3IHLWJZOU9SRT'
REPO_PATH='https://admin.example.com/repos/ocp/3.5/'
BASESUFFIX='.example.opentlc.com'
REPO_PATH='https://admin.example.com/repos/ocp/3.6/'
REPO_VERSION=3.5
BASESUFFIX='.example.opentlc.com'
IPAPASS=aaaaaa
REPO_VERSION=3.6
NODE_COUNT=2
IPAPASS=ipapass
DEPLOYER_REPO_PATH=`pwd`
LOG_FILE=$(pwd)/${ENVTYPE}-${GUID}.log
## For a HA environment that is not installed with OpenShift
time ansible-playbook ./main.yml \
    -e "osrelease=3.5.5.5" -e "repo_version=${REPO_VERSION}" -e "docker_version=1.12.6" \
    -e "guid=${GUID}" -e "env_type=${ENVTYPE}" -e "cloud_provider=${CLOUDPROVIDER}" \
    -e "aws_region=${REGION}"  -e "HostedZoneId=${HOSTZONEID}" -e "key_name=${KEYNAME}" \
    -e "subdomain_base_suffix=${BASESUFFIX}"  -e "install_idm=htpasswd" \
    -e "node_instance_count=${NODE_COUNT}" -e "infranode_instance_count=1" -e "master_instance_count=1" \
    -e "software_to_deploy=none"  -e "own_repo_path=${REPO_PATH}" -e "ipa_host_password=${IPAPASS}" \
    -e "tower_run=false"
  ansible-playbook ${DEPLOYER_REPO_PATH}/main.yml  \
      -e "guid=${GUID}" -e "env_type=${ENVTYPE}" -e "key_name=${KEYNAME}" \
      -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}" -e "HostedZoneId=${HOSTZONEID}" \
      -e "subdomain_base_suffix=${BASESUFFIX}" \
      -e "bastion_instance_type=t2.medium" -e "master_instance_type=t2.large" \
      -e "infranode_instance_type=t2.large" -e "node_instance_type=t2.large" \
      -e "support_instance_type=t2.medium" -e "node_instance_count=${NODE_COUNT}" \
      -e "ipa_host_password=${IPAPASS}" -e "install_idm=htpasswd"  \
      -e "email=name@example.com" \
      -e "repo_method=file" -e "own_repo_path=${REPO_PATH}" -e "repo_version=${REPO_VERSION}" \
      -e "software_to_deploy=openshift" -e "osrelease=3.6.173.0.21" -e "docker_version=1.12.6" \
      -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" \
      --skip-tags=installing_openshift,get_openshift_credentials 1>> $LOG_FILE 2>> $LOG_FILE
. To Delete an environment
----
#To Destroy an Env
ansible-playbook ./configs/${ENVTYPE}/destroy_env.yml \
 -e "guid=${GUID}" -e "env_type=${ENVTYPE}"  -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}"  \
 -e "HostedZoneId=${HOSTZONEID}"  -e "key_name=${KEYNAME}"  -e "subdomain_base_suffix=${BASESUFFIX}"
ansible-playbook  \
    ${DEPLOYER_REPO_PATH}/configs/${ENVTYPE}/destroy_env.yml \
    -e "guid=${GUID}" -e "env_type=${ENVTYPE}" \
    -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}"  -e "HostedZoneId=${HOSTZONEID}" \
    -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" \
    -e "key_name=${KEYNAME}"  -e "subdomain_base_suffix=${BASESUFFIX}"
----
ansible/configs/ocp-implementation-lab/destroy_env.yml
@@ -8,31 +8,6 @@
    - "./env_secret_vars.yml"
  tasks:
    # - name: get internal dns zone id if not provided
    #   environment:
    #     AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
    #     AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
    #     AWS_DEFAULT_REGION: "{{aws_region}}"
    #   shell: "aws route53 list-hosted-zones-by-name --region={{aws_region}} --dns-name={{guid}}.internal. --output text --query='HostedZones[*].Id' | awk -F'/' '{print $3}'"
    #   register: internal_zone_id_register
    # - debug:
    #     var: internal_zone_id_register
    # - name: Store internal route53 ID
    #   set_fact:
    #     internal_zone_id: "{{ internal_zone_id_register.stdout }}"
    #   when: 'internal_zone_id_register is defined'
    # - name: delete internal dns names
    #   environment:
    #     AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
    #     AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
    #     AWS_DEFAULT_REGION: "{{aws_region}}"
    #   shell: "aws route53 change-resource-record-sets --hosted-zone-id {{internal_zone_id}}  --change-batch file://{{ ANSIBLE_REPO_PATH }}/workdir/internal_dns-{{ env_type }}-{{ guid }}_DELETE.json --region={{aws_region}}"
    #   ignore_errors: true
    #   tags:
    #     - internal_dns_delete
    #   when: internal_zone_id is defined
    - name: Destroy cloudformation template
      cloudformation:
        stack_name: "{{project_tag}}"
ansible/configs/ocp-implementation-lab/env_vars.yml
@@ -1,11 +1,3 @@
## TODO: What variables can we strip out of here to build complex variables?
## i.e. what can we add into group_vars as opposed to config_vars?
## Example: We don't really need "subdomain_base_short". If we want to use this,
## should just toss in group_vars/all.
### Also, we should probably just create a variable reference in the README.md
### For now, just tagging comments in line with configuration file.
deploy_local_ssh_config_location: "{{ ANSIBLE_REPO_PATH }}/workdir"
###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT
###### OR PASS as "-e" args to ansible-playbook command
# #
@@ -16,56 +8,55 @@
# #   nodes: "tag_AnsibleGroup_nodes"
# #   infranodes: "tag_AnsibleGroup_infranodes"
# #   nfs: "tag_AnsibleGroup_nfs"
#
# # This doesn't work
# all: "tag_Project_opentlc_shared_{{guid}}"
#
# # but maybe this is silly enough to work
# #all: "tag_Project_opentlc_shared_{{guid}}:&tag_Project_opentlc_shared_{{guid}}"
#rhn_pool_id_string: OpenShift Container Platform
# bastions: "{{env_groups['limit']}}:&{{env_groups['bastions']}}"
# masters: "{{env_groups['limit']}}:&{{env_groups['masters']}}"
# nodes: "{{env_groups['limit']}}:&{{env_groups['nodes']}}"
# infranodes: "{{env_groups['limit']}}:&{{env_groups['infranodes']}}"
# nfs: "{{env_groups['limit']}}:&{{env_groups['nfs']}}"
#
# ocp_pvs:
#   - es-storage
#   - nexus
#   - nexus2
#   - nexus3
install_ipa_client: false
repo_method: file
ocp_pvs:
  # - es-storage
  # - nexus
  # - nexus2
  # - nexus3
config_nfs_uservols: "true"
user_vols: 200
user_vols_size: 4Gi
# master_api_port: 443
# osrelease: 3.4.1.10
# openshift_master_overwrite_named_certificates: true
# deploy_openshift: true
# deploy_openshift_post: true
deploy_env_post: true
# install_metrics: true
# install_logging: true
# multi_tenant_setting: "os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'"
# master_lb_dns: "master.{{subdomain_base}}"
# cloudapps_suffix: 'cloudapps.{{subdomain_base}}'
# openshift_master_ldap_ca_file: 'openshift_master_ldap_ca_file=/root/ca.crt'
master_api_port: 443
osrelease: 3.7.32
openshift_master_overwrite_named_certificates: true
deploy_openshift: false
deploy_openshift_post: false
deploy_env_post: false
install_metrics: true
install_logging: true
ovs_plugin: "subnet" # This can also be set to: "multitenant" or "networkpolicy"
multi_tenant_setting: "os_sdn_network_plugin_name='redhat/openshift-ovs-{{ovs_plugin}}'"
master_lb_dns: "loadbalancer1.{{subdomain_base}}"
cloudapps_suffix: 'apps.{{subdomain_base}}'
openshift_master_ldap_ca_file: 'openshift_master_ldap_ca_file=/root/ca.crt'
 ## If you are not part of GPTE you don't need this.
opentlc_integration: true
################################################################################
#### GENERIC EXAMPLE
################################################################################
install_common: true
install_nfs: true
glusterfs_hosted_device_name: /dev/xvdc
glusterfs_hosted_device_size: 300
glusterfs_app_device_name: /dev/xvdd
glusterfs_app_device_size: 300
install_bastion: false
env_authorized_key: "{{guid}}key"
set_env_authorized_key: true
software_to_deploy: "none"
software_to_deploy: "openshift"
################################################################################
#### OCP IMPLEMENATATION LAB
################################################################################
repo_version: '3.4'
repo_version: '3.7'
cloudapps_dns: '*.apps.{{subdomain_base}}.'
master_public_dns: "master.{{subdomain_base}}."
master_public_dns: "loadbalancer.{{subdomain_base}}."
################################################################################
#### Common host variables
@@ -73,27 +64,33 @@
update_packages: false
common_packages:
  # - python
  # - unzip
  # - bash-completion
  - python
  - unzip
  - bash-completion
  - tmux
  # - bind-utils
  # - wget
  # - git
  # - vim-enhanced
  # - ansible
  - bind-utils
  - wget
  - git
  - vim-enhanced
  - ansible
  - net-tools
  - iptables-services
  - bridge-utils
  - sos
  - psacct
rhel_repos:
  - rhel-7-server-rpms
  - rhel-7-server-extras-rpms
  - rhel-7-server-ose-{{repo_version}}-rpms
use_own_repos: true
# omit for 3.7 homework
#  - rh-gluster-3-client-for-rhel-7-server-rpms
#  - rhel-7-server-ansible-2.4-rpms
use_subscription_manager: false
use_own_repos: true
#rhn_pool_id_string: "Red Hat Enterprise Linux Server"
#rhn_pool_id_string: OpenShift Container Platform
rhn_pool_id_string: OpenShift Container Platform
################################################################################
#### nfs host settings
@@ -102,13 +99,11 @@
nfs_vg: nfsvg
nfs_pvs: /dev/xvdb
nfs_export_path: /srv/nfs
nfs_size: 50
nfs_shares:
  - logging
  - metrics
  - jenkins
  - nexus
  - justanother
  # - jenkins
  # - nexus
################################################################################
#### CLOUD PROVIDER: AWS SPECIFIC VARIABLES
@@ -135,30 +130,27 @@
#### Environment Sizing
#bastion_instance_type: "t2.micro"
bastion_instance_type: "t2.small"
bastion_instance_type: "t2.xlarge"
support_instance_type: "m4.large"
support_instance_type: "t2.medium"
support_instance_count: 1
node_instance_type: "t2.large"
node_instance_count: 2
node_instance_count: 3
infranode_instance_type: "t2.large"
infranode_instance_count: 1
new_node_instance_type: "t2.large"
new_node_instance_count: 0
infranode_instance_type: "t2.xlarge"
infranode_instance_count: 2
master_instance_type: "t2.large"
master_instance_count: 1
master_instance_count: 3
loadbalancer_instance_count: 0
#loadbalancer_instance_type: "t2.micro"
loadbalancer_instance_count: 1
loadbalancer_instance_type: "t2.small"
ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
#### VARIABLES YOU SHOULD ***NOT*** CONFIGURE FOR YOUR DEPLOYEMNT
#### You can, but you usually wouldn't need to.
@@ -166,45 +158,123 @@
#### CLOUDFORMATIONS vars
project_tag: "{{ env_type }}-{{ guid }}"
#
# docker_version: "1.12.6"
# docker_device: /dev/xvdb
docker_version: "{{ '1.12.6' if repo_version | version_compare('3.9', '<')  else '1.13.1' }}"
docker_device: /dev/xvdb
create_internal_dns_entries: true
zone_internal_dns: "{{guid}}.internal."
chomped_zone_internal_dns: "{{guid}}.internal"
zone_public_dns: "{{subdomain_base}}."
bastion_public_dns: "bastion.{{subdomain_base}}."
bastion_public_dns_chomped: "bastion.{{subdomain_base}}"
vpcid_cidr_block: "192.168.0.0/16"
# vpcid_cidr_block: "192.168.0.0/16"
vpcid_name_tag: "{{subdomain_base}}"
az_1_name: "{{ aws_region }}a"
az_2_name: "{{ aws_region }}b"
rootfs_size_node: 50
rootfs_size_infranode: 50
rootfs_size_master: 50
rootfs_size_bastion: 20
rootfs_size_support: 20
rootfs_size_loadbalancer: 20
subnet_private_1_cidr_block: "192.168.2.0/24"
subnet_private_1_az: "{{ az_2_name }}"
subnet_private_1_name_tag: "{{subdomain_base}}-private"
instances:
  - name: "bastion"
    count: 1
    unique: true
    public_dns: true
    dns_loadbalancer: true
    flavor:
      "ec2": "{{bastion_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "bastions"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_bastion }}"
subnet_private_2_cidr_block: "192.168.1.0/24"
subnet_private_2_az: "{{ az_1_name }}"
subnet_private_2_name_tag: "{{subdomain_base}}-private"
  - name: "loadbalancer"
    count: "{{loadbalancer_instance_count}}"
    public_dns: true
    dns_loadbalancer: true
    flavor:
      "ec2": "{{loadbalancer_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "loadbalancers"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_loadbalancer }}"
subnet_public_1_cidr_block: "192.168.10.0/24"
subnet_public_1_az: "{{ az_1_name }}"
subnet_public_1_name_tag: "{{subdomain_base}}-public"
  - name: "master"
    count: "{{master_instance_count}}"
    public_dns: false
    dns_loadbalancer: false
    flavor:
      "ec2": "{{master_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "masters"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_master }}"
    volumes:
      - device_name: "{{docker_device}}"
        volume_size: 20
        volume_type: gp2
subnet_public_2_cidr_block: "192.168.20.0/24"
subnet_public_2_az: "{{ az_2_name }}"
subnet_public_2_name_tag: "{{subdomain_base}}-public"
  - name: "node"
    count: "{{node_instance_count}}"
    public_dns: false
    dns_loadbalancer: false
    flavor:
      "ec2": "{{node_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "nodes"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_node }}"
    volumes:
      - device_name: "{{docker_device}}"
        volume_size: 100
        volume_type: gp2
dopt_domain_name: "{{ aws_region }}.compute.internal"
  - name: "infranode"
    count: "{{infranode_instance_count}}"
    public_dns: true
    dns_loadbalancer: true
    flavor:
      "ec2": "{{infranode_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "infranodes"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_infranode }}"
    volumes:
      - device_name: "{{docker_device}}"
        volume_size: 50
        volume_type: gp2
rtb_public_name_tag: "{{subdomain_base}}-public"
rtb_private_name_tag: "{{subdomain_base}}-private"
cf_template_description: "{{ env_type }}-{{ guid }} template"
  - name: "support"
    count: "{{support_instance_count}}"
    public_dns: false
    dns_loadbalancer: false
    flavor:
      "ec2": "{{support_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "support"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_support }}"
    volumes:
      - device_name: "{{nfs_pvs}}"
        volume_size: "{{nfs_size}}"
        volume_type: gp2
      - device_name: "{{docker_device}}"
        volume_size: "50"
        volume_type: gp2
 # gluster configs removed for homework env
ansible/configs/ocp-implementation-lab/files/cloud_providers/ec2_cloud_template.j2
@@ -1,726 +1,361 @@
{
  "AWSTemplateFormatVersion": "2010-09-09",
  "Parameters": { },
  "Mappings": {
    "RegionMapping": {
      "us-east-1": {
        "AMI": "ami-c998b6b2"
      },
      "us-east-2": {
        "AMI": "ami-cfdafaaa"
      },
      "us-west-1": {
        "AMI": "ami-66eec506"
      },
      "us-west-2": {
        "AMI": "ami-9fa343e7"
      },
      "eu-west-1": {
        "AMI": "ami-bb9a6bc2"
      },
      "eu-central-1": {
        "AMI": "ami-d74be5b8"
      },
      "ap-northeast-1": {
        "AMI": "ami-30ef0556"
      },
      "ap-northeast-2": {
        "AMI": "ami-0f5a8361"
      },
      "ap-southeast-1": {
        "AMI": "ami-10bb2373"
      },
      "ap-southeast-2": {
        "AMI": "ami-ccecf5af"
      },
      "sa-east-1": {
        "AMI": "ami-a789ffcb"
      },
      "ap-south-1": {
        "AMI": "ami-cdbdd7a2"
      }
    },
    "DNSMapping": {
      "us-east-1": {
        "domain": "us-east-1.compute.internal"
      },
      "us-west-1": {
        "domain": "us-west-1.compute.internal"
      },
      "us-west-2": {
        "domain": "us-west-2.compute.internal"
      },
      "eu-west-1": {
        "domain": "eu-west-1.compute.internal"
      },
      "eu-central-1": {
        "domain": "eu-central-1.compute.internal"
      },
      "ap-northeast-1": {
        "domain": "ap-northeast-1.compute.internal"
      },
      "ap-northeast-2": {
        "domain": "ap-northeast-2.compute.internal"
      },
      "ap-southeast-1": {
        "domain": "ap-southeast-1.compute.internal"
      },
      "ap-southeast-2": {
        "domain": "ap-southeast-2.compute.internal"
      },
      "sa-east-1": {
        "domain": "sa-east-1.compute.internal"
      },
      "ap-south-1": {
        "domain": "ap-south-1.compute.internal"
      }
    }
  },
  "Resources": {
    "Vpc": {
      "Type": "AWS::EC2::VPC",
      "Properties": {
        "CidrBlock": "192.199.0.0/16",
        "EnableDnsSupport": "true",
        "EnableDnsHostnames": "true",
        "Tags": [
          {
            "Key": "Name",
            "Value": "VPCID_NAME_TAG"
          },
          {
            "Key": "Hostlication",
            "Value": {
              "Ref": "AWS::StackId"
            }
          }
        ]
      }
    },
    "VpcInternetGateway": {
      "Type": "AWS::EC2::InternetGateway",
      "Properties": {}
    },
    "VpcGA": {
      "Type": "AWS::EC2::VPCGatewayAttachment",
      "Properties": {
        "InternetGatewayId": {
          "Ref": "VpcInternetGateway"
        },
        "VpcId": {
          "Ref": "Vpc"
        }
      }
    },
    "VpcRouteTable": {
      "Type": "AWS::EC2::RouteTable",
      "Properties": {
        "VpcId": {
          "Ref": "Vpc"
        }
      }
    },
    "VPCRouteInternetGateway": {
      "DependsOn" : "VpcGA",
  "Type": "AWS::EC2::Route",
      "Properties": {
        "GatewayId": {
          "Ref": "VpcInternetGateway"
        },
        "DestinationCidrBlock": "0.0.0.0/0",
        "RouteTableId": {
          "Ref": "VpcRouteTable"
        }
      }
    },
    "PublicSubnet": {
      "Type": "AWS::EC2::Subnet",
      "DependsOn": [
        "Vpc"
      ],
      "Properties": {
        "CidrBlock": "192.199.0.0/24",
        "Tags": [
          {
            "Key": "Name",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "Hostlication",
            "Value": {
              "Ref": "AWS::StackId"
            }
          }
        ],
        "MapPublicIpOnLaunch": "true",
        "VpcId": {
          "Ref": "Vpc"
        }
      }
    },
    "PublicSubnetRTA": {
      "Type": "AWS::EC2::SubnetRouteTableAssociation",
      "Properties": {
        "RouteTableId": {
          "Ref": "VpcRouteTable"
        },
        "SubnetId": {
          "Ref": "PublicSubnet"
        }
      }
    },
    "HostSG": {
      "Type": "AWS::EC2::SecurityGroup",
      "Properties": {
        "GroupDescription": "Host",
        "VpcId": {
          "Ref": "Vpc"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "host_sg"
          }
        ]
      }
    },
    "HostUDPPorts": {
      "Type": "AWS::EC2::SecurityGroupIngress",
      "Properties": {
        "GroupId": {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        },
        "IpProtocol": "udp",
        "FromPort": "0",
        "ToPort": "65535",
        "CidrIp": "0.0.0.0/0"
      }
    },
    "HostTCPPorts": {
      "Type": "AWS::EC2::SecurityGroupIngress",
      "Properties": {
        "GroupId": {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        },
        "IpProtocol": "tcp",
        "FromPort": "0",
        "ToPort": "65535",
        "CidrIp": "0.0.0.0/0"
      }
    },
    "zoneinternalidns": {
      "Type": "AWS::Route53::HostedZone",
      "Properties": {
        "Name": "{{ zone_internal_dns }}",
        "VPCs" :  [{
      "VPCId": { "Ref" : "Vpc" },
      "VPCRegion": { "Ref": "AWS::Region" } } ],
        "HostedZoneConfig": {
          "Comment": "Created By ansible agnostic deployer"
        }
      }
    },
    "BastionDNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "Properties": {
        "HostedZoneId": "{{HostedZoneId}}",
        "RecordSets": [
          {
            "Name": "{{bastion_public_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "Bastion",
                  "PublicIp"
                ]
              }
            ]
          }
        ]
      }
    },
    "MasterDNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "Properties": {
        "HostedZoneId": "{{HostedZoneId}}",
        "RecordSets": [
          {
            "Name": "{{master_public_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "master1",
                  "PublicIp"
                ]
              }
            ]
          }
        ]
      }
    },
    "CloudDNS": {
      "Type": "AWS::Route53::RecordSetGroup",
      "DependsOn": "Bastion",
      "Properties": {
        "HostedZoneId": "{{HostedZoneId}}",
        "RecordSets": [
          {
            "Name": "{{cloudapps_dns}}",
            "Type": "A",
            "TTL": "10",
            "ResourceRecords": [
              {
                "Fn::GetAtt": [
                  "Bastion",
                  "PublicIp"
                ]
              }
            ]
          }
        ]
      }
    },
    "Bastion": {
      "Type": "AWS::EC2::Instance",
      "Properties": {
        "ImageId": {
          "Fn::FindInMap": [
            "RegionMapping",
            {
              "Ref": "AWS::Region"
            },
            "AMI"
          ]
        },
        "InstanceType": "{{bastion_instance_type}}",
        "KeyName": "{{key_name}}",
        "SecurityGroupIds": [
          {
            "Fn::GetAtt": [
              "HostSG",
              "GroupId"
            ]
          }
        ],
        "SubnetId": {
          "Ref": "PublicSubnet"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "bastion"
          },
          {
            "Key": "AnsibleGroup",
            "Value": "bastions"
          },
          {
            "Key": "Project",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "{{ project_tag }}",
            "Value": "bastion"
          }
        ]
      }
#jinja2: lstrip_blocks: True
---
AWSTemplateFormatVersion: "2010-09-09"
Mappings:
  RegionMapping:
    us-east-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-6871a115
      {% else %}
      RHELAMI: ami-c998b6b2
      {% endif %}
    us-east-2:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-03291866
      {% else %}
      RHELAMI: ami-cfdafaaa
      {% endif %}
    us-west-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-18726478
      {% else %}
      RHELAMI: ami-66eec506
      {% endif %}
    us-west-2:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-223f945a
      {% else %}
      RHELAMI: ami-9fa343e7
      {% endif %}
    eu-west-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-7c491f05
      {% else %}
      RHELAMI: ami-bb9a6bc2
      {% endif %}
    eu-central-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-c86c3f23
      {% else %}
      RHELAMI: ami-d74be5b8
      {% endif %}
    ap-northeast-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-6b0d5f0d
      {% else %}
      RHELAMI: ami-30ef0556
      {% endif %}
    ap-northeast-2:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-3eee4150
      {% else %}
      RHELAMI: ami-0f5a8361
      {% endif %}
    ap-southeast-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-76144b0a
      {% else %}
      RHELAMI: ami-10bb2373
      {% endif %}
    ap-southeast-2:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-67589505
      {% else %}
      RHELAMI: ami-ccecf5af
      {% endif %}
    ap-south-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-5b673c34
      {% else %}
      RHELAMI: ami-cdbdd7a2
      {% endif %}
    sa-east-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-b0b7e3dc
      {% else %}
      RHELAMI: ami-a789ffcb
      {% endif %}
  DNSMapping:
    us-east-1:
      domain: "us-east-1.compute.internal"
    us-west-1:
      domain: "us-west-1.compute.internal"
    us-west-2:
      domain: "us-west-2.compute.internal"
    eu-west-1:
      domain: "eu-west-1.compute.internal"
    eu-central-1:
      domain: "eu-central-1.compute.internal"
    ap-northeast-1:
      domain: "ap-northeast-1.compute.internal"
    ap-northeast-2:
      domain: "ap-northeast-2.compute.internal"
    ap-southeast-1:
      domain: "ap-southeast-1.compute.internal"
    ap-southeast-2:
      domain: "ap-southeast-2.compute.internal"
    sa-east-1:
      domain: "sa-east-1.compute.internal"
    ap-south-1:
      domain: "ap-south-1.compute.internal"
  },
  "BastionInternalDNS": {
    "Type": "AWS::Route53::RecordSetGroup",
    "Properties": {
    "HostedZoneId" : { "Ref" : "zoneinternalidns" },
Resources:
  Vpc:
    Type: "AWS::EC2::VPC"
    Properties:
      CidrBlock: "192.199.0.0/16"
      EnableDnsSupport: true
      EnableDnsHostnames: true
      Tags:
        - Key: Name
          Value: "{{vpcid_name_tag}}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
      "RecordSets": [
        {
          "Name": "bastion.{{zone_internal_dns}}",
          "Type": "A",
          "TTL": "10",
          "ResourceRecords": [
            {
              "Fn::GetAtt": [
                "Bastion",
                "PrivateIp"
              ]
            }
          ]
        }
      ]
    }
  },
  {% for c in range(1,(master_instance_count|int)+1) %}
  VpcInternetGateway:
    Type: "AWS::EC2::InternetGateway"
    "master{{c}}": {
      "Type": "AWS::EC2::Instance",
      "Properties": {
        "ImageId": {
          "Fn::FindInMap": [
            "RegionMapping",
            {
              "Ref": "AWS::Region"
            },
            "AMI"
          ]
        },
        "InstanceType": "{{master_instance_type}}",
        "KeyName": "{{key_name}}",
        "SecurityGroupIds": [
          {
            "Fn::GetAtt": [
              "HostSG",
              "GroupId"
            ]
          }
        ],
  VpcGA:
    Type: "AWS::EC2::VPCGatewayAttachment"
    Properties:
      InternetGatewayId:
        Ref: VpcInternetGateway
      VpcId:
        Ref: Vpc
        "SubnetId": {
          "Ref": "PublicSubnet"
        },
        "Tags": [
          {
            "Key": "Name",
            "Value": "master"
          },
          {
            "Key": "AnsibleGroup",
            "Value": "masters"
          },
          {
            "Key": "Project",
            "Value": "{{project_tag}}"
          },
          {
            "Key": "{{ project_tag }}",
            "Value": "master"
          }
        ],
        "BlockDeviceMappings": [
          {
            "DeviceName": "/dev/xvda",
            "Ebs": {
              "VolumeSize": 30
            }
          },
          {
            "DeviceName": "/dev/xvdb",
            "Ebs": {
              "VolumeType": "gp2",
              "VolumeSize": 20
            }
          }
        ]
      }
  },
  "master{{loop.index}}DNS": {
    "Type": "AWS::Route53::RecordSetGroup",
    "Properties": {
    "HostedZoneId" : { "Ref" : "zoneinternalidns" },
  VpcRouteTable:
    Type: "AWS::EC2::RouteTable"
    Properties:
      VpcId:
        Ref: Vpc
      "RecordSets": [
        {
          "Name": "master{{loop.index}}.{{zone_internal_dns}}",
          "Type": "A",
          "TTL": "10",
          "ResourceRecords": [
            {
              "Fn::GetAtt": [
                "master{{loop.index}}",
                "PrivateIp"
              ]
            }
          ]
        }
      ]
    }
  },
  {% endfor %}
  VPCRouteInternetGateway:
    DependsOn: VpcGA
    Type: "AWS::EC2::Route"
    Properties:
      GatewayId:
        Ref: VpcInternetGateway
      DestinationCidrBlock: "0.0.0.0/0"
      RouteTableId:
        Ref: VpcRouteTable
  PublicSubnet:
    Type: "AWS::EC2::Subnet"
    DependsOn:
      - Vpc
    Properties:
      CidrBlock: "192.199.0.0/24"
      Tags:
        - Key: Name
          Value: "{{project_tag}}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
      MapPublicIpOnLaunch: true
      VpcId:
        Ref: Vpc
  {% for c in range(1,(node_instance_count|int)+1) %}
  PublicSubnetRTA:
    Type: "AWS::EC2::SubnetRouteTableAssociation"
    Properties:
      RouteTableId:
        Ref: VpcRouteTable
      SubnetId:
        Ref: PublicSubnet
  "node{{loop.index}}": {
    "Type": "AWS::EC2::Instance",
    "Properties": {
      "ImageId": {
        "Fn::FindInMap": [
          "RegionMapping",
          {
            "Ref": "AWS::Region"
          },
          "AMI"
        ]
      },
      "InstanceType": "{{node_instance_type}}",
      "KeyName": "{{key_name}}",
      "SecurityGroupIds": [
        {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        }
      ],
      "SubnetId": {
        "Ref": "PublicSubnet"
      },
      "Tags": [
        {
          "Key": "Name",
          "Value": "node"
        },
        {
          "Key": "AnsibleGroup",
          "Value": "nodes"
        },
        {
          "Key": "Project",
          "Value": "{{project_tag}}"
        },
        {
          "Key": "{{ project_tag }}",
          "Value": "node"
        }
      ],
      "BlockDeviceMappings": [
        {
          "DeviceName": "/dev/xvda",
          "Ebs": {
            "VolumeSize": 30
          }
        },
        {
          "DeviceName": "/dev/xvdb",
          "Ebs": {
            "VolumeType": "gp2",
            "VolumeSize": 100
          }
        }
      ]
    }
  HostSG:
    Type: "AWS::EC2::SecurityGroup"
    Properties:
      GroupDescription: Host
      VpcId:
        Ref: Vpc
      Tags:
        - Key: Name
          Value: host_sg
  },
  HostUDPPorts:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
      GroupId:
        Fn::GetAtt:
          - HostSG
          - GroupId
      IpProtocol: udp
      FromPort: 0
      ToPort: 65535
      CidrIp: "0.0.0.0/0"
  "node{{loop.index}}DNS": {
    "Type": "AWS::Route53::RecordSetGroup",
    "Properties": {
    "HostedZoneId" : { "Ref" : "zoneinternalidns" },
  HostTCPPorts:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
      GroupId:
        Fn::GetAtt:
          - HostSG
          - GroupId
      IpProtocol: tcp
      FromPort: 0
      ToPort: 65535
      CidrIp: "0.0.0.0/0"
      "RecordSets": [
        {
          "Name": "node{{loop.index}}.{{zone_internal_dns}}",
          "Type": "A",
          "TTL": "10",
          "ResourceRecords": [
            {
              "Fn::GetAtt": [
                "node{{loop.index}}",
                "PrivateIp"
              ]
            }
          ]
        }
      ]
    }
  },
  {% endfor %}
  zoneinternalidns:
    Type: "AWS::Route53::HostedZone"
    Properties:
      Name: "{{ zone_internal_dns }}"
      VPCs:
        - VPCId:
            Ref: Vpc
          VPCRegion:
            Ref: "AWS::Region"
      HostedZoneConfig:
        Comment: "Created By ansible agnostic deployer"
  {% for c in range(1,(infranode_instance_count|int)+1) %}
  "infranode{{loop.index}}": {
    "Type": "AWS::EC2::Instance",
    "Properties": {
      "ImageId": {
        "Fn::FindInMap": [
          "RegionMapping",
          {
            "Ref": "AWS::Region"
          },
          "AMI"
        ]
      },
      "InstanceType": "{{infranode_instance_type}}",
      "KeyName": "{{key_name}}",
      "SecurityGroupIds": [
        {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        }
      ],
      "SubnetId": {
        "Ref": "PublicSubnet"
      },
      "Tags": [
        {
          "Key": "Name",
          "Value": "infranode"
        },
        {
          "Key": "AnsibleGroup",
          "Value": "infranodes"
        },
        {
          "Key": "Project",
          "Value": "{{project_tag}}"
        },
        {
          "Key": "{{ project_tag }}",
          "Value": "infranode"
        }
      ],
      "BlockDeviceMappings": [
        {
          "DeviceName": "/dev/xvda",
          "Ebs": {
            "VolumeSize": 30
          }
        },
        {
          "DeviceName": "/dev/xvdb",
          "Ebs": {
            "VolumeType": "gp2",
            "VolumeSize": 50
          }
        }
      ]
    }
  },
  "infranode{{loop.index}}DNS": {
    "Type": "AWS::Route53::RecordSetGroup",
    "Properties": {
    "HostedZoneId" : { "Ref" : "zoneinternalidns" },
      "RecordSets": [
        {
          "Name": "infranode{{loop.index}}.{{zone_internal_dns}}",
          "Type": "A",
          "TTL": "10",
          "ResourceRecords": [
            {
              "Fn::GetAtt": [
                "infranode{{loop.index}}",
                "PrivateIp"
              ]
            }
          ]
        }
      ]
    }
  },
  {% endfor %}
  {% for c in range(1,(support_instance_count|int)+1) %}
  "support{{loop.index}}": {
    "Type": "AWS::EC2::Instance",
    "Properties": {
      "ImageId": {
        "Fn::FindInMap": [
          "RegionMapping",
          {
            "Ref": "AWS::Region"
          },
          "AMI"
        ]
      },
      "InstanceType": "{{support_instance_type}}",
      "KeyName": "{{key_name}}",
      "SecurityGroupIds": [
        {
          "Fn::GetAtt": [
            "HostSG",
            "GroupId"
          ]
        }
      ],
      "SubnetId": {
        "Ref": "PublicSubnet"
      },
      "Tags": [
        {
          "Key": "Name",
          "Value": "support"
        },
        {
          "Key": "AnsibleGroup",
          "Value": "support"
        },
        {
          "Key": "Project",
          "Value": "{{project_tag}}"
        },
        {
          "Key": "{{ project_tag }}",
          "Value": "support"
        }
      ],
      "BlockDeviceMappings": [
        {
          "DeviceName": "/dev/xvda",
          "Ebs": {
            "VolumeSize": 30
          }
        },
        {
          "DeviceName": "/dev/xvdb",
          "Ebs": {
            "VolumeType": "gp2",
            "VolumeSize": 50
          }
        }
      ]
    }
  },
  "support{{loop.index}}DNS": {
    "Type": "AWS::Route53::RecordSetGroup",
    "Properties": {
    "HostedZoneId" : { "Ref" : "zoneinternalidns" },
      "RecordSets": [
        {
          "Name": "support{{loop.index}}.{{zone_internal_dns}}",
          "Type": "A",
          "TTL": "10",
          "ResourceRecords": [
            {
              "Fn::GetAtt": [
                "support{{loop.index}}",
                "PrivateIp"
              ]
            }
          ]
        }
      ]
    }
  },
  CloudDNS:
    Type: AWS::Route53::RecordSetGroup
    DependsOn:
{% for c in range(1,(infranode_instance_count|int)+1) %}
      - "infranode{{loop.index}}EIP"
{% endfor %}
    Properties:
      HostedZoneId: "{{HostedZoneId}}"
      RecordSets:
        - Name: "{{cloudapps_dns}}"
          Type: A
          TTL: 900
          ResourceRecords:
{% for c in range(1,(infranode_instance_count|int)+1) %}
            - Fn::GetAtt:
                - infranode{{loop.index}}
                - PublicIp
{% endfor %}
{% for instance in instances %}
{% if instance['dns_loadbalancer']|d(false)|bool and not instance['unique']|d(false)|bool %}
  {{instance['name']}}DNSLoadBalancer:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
{% for c in range(1, (instance['count']|int)+1) %}
      - {{instance['name']}}{{c}}EIP
{% endfor %}
    Properties:
      HostedZoneId: {{HostedZoneId}}
      RecordSets:
      - Name: "{{instance['name']}}.{{subdomain_base}}."
        Type: A
        TTL: 900
        ResourceRecords:
{% for c in range(1,(instance['count'] |int)+1) %}
          - "Fn::GetAtt":
            - {{instance['name']}}{{c}}
            - PublicIp
{% endfor %}
{% endif %}
},
  "Outputs": {
    "Route53internalzoneOutput": {
      "Description": "The ID of the internal route 53 zone",
      "Value": {
        "Ref": "zoneinternalidns"
      }
  }
}
}
{% for c in range(1,(instance['count'] |int)+1) %}
  {{instance['name']}}{{loop.index}}:
    Type: "AWS::EC2::Instance"
    Properties:
      ImageId:
        Fn::FindInMap:
        - RegionMapping
        - Ref: AWS::Region
        - {{ instance['image_id'] | default('RHELAMI') }}
      InstanceType: "{{instance['flavor'][cloud_provider]}}"
      KeyName: "{{instance['key_name'] | default(key_name)}}"
{% if instance['UserData'] is defined %}
      {{instance['UserData']}}
{% endif %}
      SecurityGroupIds:
        - "Fn::GetAtt":
          - HostSG
          - GroupId
      SubnetId:
        Ref: PublicSubnet
      Tags:
{% if instance['unique'] | d(false) | bool %}
        - Key: Name
          Value: {{instance['name']}}
        - Key: internaldns
          Value: {{instance['name']}}.{{chomped_zone_internal_dns}}
{% else %}
        - Key: Name
          Value: {{instance['name']}}{{loop.index}}
        - Key: internaldns
          Value: {{instance['name']}}{{loop.index}}.{{chomped_zone_internal_dns}}
{% endif %}
        - Key: "owner"
          Value: "{{ email | default('unknownuser') }}"
        - Key: "Project"
          Value: "{{project_tag}}"
        - Key: "{{project_tag}}"
          Value: "{{ instance['name'] }}"
{% for tag in instance['tags'] %}
        - Key: {{tag['key']}}
          Value: {{tag['value']}}
{% endfor %}
      BlockDeviceMappings:
        - DeviceName: "/dev/sda1"
          Ebs:
            VolumeSize: {{ instance['rootfs_size'] | default('50') }}
{% for vol in instance['volumes']|default([]) %}
        - DeviceName: "{{ vol['device_name'] }}"
          Ebs:
            VolumeType: "{{ vol['volume_type'] | d('gp2') }}"
            VolumeSize: "{{ vol['volume_size'] | d('20') }}"
{% endfor %}
  {{instance['name']}}{{loop.index}}InternalDNS:
    Type: "AWS::Route53::RecordSetGroup"
    Properties:
      HostedZoneId:
        Ref: zoneinternalidns
      RecordSets:
{% if instance['unique'] | d(false) | bool %}
      - Name: "{{instance['name']}}.{{zone_internal_dns}}"
{% else %}
      - Name: "{{instance['name']}}{{loop.index}}.{{zone_internal_dns}}"
{% endif %}
        Type: A
        TTL: 10
        ResourceRecords:
          - "Fn::GetAtt":
            - {{instance['name']}}{{loop.index}}
            - PrivateIp
{% if instance['public_dns'] %}
  {{instance['name']}}{{loop.index}}EIP:
    Type: "AWS::EC2::EIP"
    DependsOn:
    - VpcGA
    Properties:
      InstanceId:
        Ref: {{instance['name']}}{{loop.index}}
  {{instance['name']}}{{loop.index}}PublicDNS:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
      - {{instance['name']}}{{loop.index}}EIP
    Properties:
      HostedZoneId: {{HostedZoneId}}
      RecordSets:
{% if instance['unique'] | d(false) | bool %}
          - Name: "{{instance['name']}}.{{subdomain_base}}."
{% else %}
          - Name: "{{instance['name']}}{{loop.index}}.{{subdomain_base}}."
{% endif %}
            Type: A
            TTL: 10
            ResourceRecords:
            - "Fn::GetAtt":
              - {{instance['name']}}{{loop.index}}
              - PublicIp
{% endif %}
{% endfor %}
{% endfor %}
Outputs:
  Route53internalzoneOutput:
    Description: The ID of the internal route 53 zone
    Value:
      Ref: zoneinternalidns
ansible/configs/ocp-implementation-lab/files/ec2_internal_dns.json.j2
@@ -5,7 +5,7 @@
{% for host in groups['masters'] %}
{% for host in groups['masters'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
@@ -18,7 +18,19 @@
{% endfor %}
{% for host in groups['infranodes'] %}
{% for host in groups['loadbalancers'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
        "Name": "loadbalancer{{loop.index}}.{{zone_internal_dns}}",
        "Type": "A",
        "TTL": 20,
        "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ]
      }
    },
{% endfor %}
{% for host in groups['infranodes'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
@@ -32,7 +44,7 @@
{% for host in groups['nodes'] %}
{% for host in groups['nodes'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
@@ -44,7 +56,7 @@
    },
{% endfor %}
{% for host in groups['support'] %}
{% for host in groups['support'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
@@ -56,7 +68,7 @@
    },
{% endfor %}
{% for host in groups['bastions'] %}
{% for host in groups['bastions'] | ] %}
    {
      "Action": "{{DNS_action}}",
      "ResourceRecordSet": {
ansible/configs/ocp-implementation-lab/files/hosts_template.3.9.14.j2
New file
@@ -0,0 +1,266 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
###########################################################################
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
# default project node selector
osm_default_node_selector='env=app'
openshift_hosted_infra_selector="env=infra"
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
###########################################################################
### OpenShift Optional Vars
###########################################################################
# Enable cockpit
osm_use_cockpit=true
osm_cockpit_plugins=['cockpit-kubernetes']
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
###########################################################################
### OpenShift Network Vars
###########################################################################
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
{{multi_tenant_setting}}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" %}
openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com', 'bindPassword': '{{bindPassword}}', 'ca': 'ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid'}]
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "allow_all"  %}
openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
{% endif %}
{% if install_idm == "htpasswd"  %}
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
# or
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
# Enable cluster metrics
openshift_metrics_install_metrics={{install_metrics}}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassandra_nodeselector={"env":"infra"}
openshift_metrics_hawkular_nodeselector={"env":"infra"}
openshift_metrics_heapster_nodeselector={"env":"infra"}
## Add Prometheus Metrics:
openshift_hosted_prometheus_deploy=true
openshift_prometheus_node_selector={"env":"infra"}
openshift_prometheus_namespace=openshift-metrics
# Prometheus
openshift_prometheus_storage_kind=nfs
openshift_prometheus_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_storage_nfs_directory=/srv/nfs
openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_storage_volume_name=prometheus
openshift_prometheus_storage_volume_size=10Gi
openshift_prometheus_storage_labels={'storage': 'prometheus'}
openshift_prometheus_storage_type='pvc'
# For prometheus-alertmanager
openshift_prometheus_alertmanager_storage_kind=nfs
openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
openshift_prometheus_alertmanager_storage_volume_size=10Gi
openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
openshift_prometheus_alertmanager_storage_type='pvc'
# For prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_kind=nfs
openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_volume_size=10Gi
openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
openshift_prometheus_alertbuffer_storage_type='pvc'
openshift_prometheus_node_exporter_image_version=v3.9
# Enable cluster logging
openshift_logging_install_logging={{install_logging}}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
# openshift_logging_kibana_hostname=kibana.{{cloudapps_suffix}}
openshift_logging_es_cluster_size=1
openshift_logging_es_nodeselector={"env":"infra"}
openshift_logging_kibana_nodeselector={"env":"infra"}
openshift_logging_curator_nodeselector={"env":"infra"}
###########################################################################
### OpenShift Project Management Vars
###########################################################################
# Configure additional projects
openshift_additional_projects={'openshift-template-service-broker': {'default_node_selector': ''}}
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
openshift_hosted_router_replicas={{infranode_instance_count}}
#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/srv/nfs
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=true
template_service_broker_install=true
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
openshift_hosted_etcd_storage_kind=nfs
openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)"
openshift_hosted_etcd_storage_nfs_directory=/srv/nfs
openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'}
openshift_hosted_etcd_storage_volume_name=etcd-asb
openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce']
openshift_hosted_etcd_storage_volume_size=10G
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
nfs
{% if new_node_instance_count > 0 %}
new_nodes
{% endif %}
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host].placement}}
{% endfor %}
[masters]
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[etcd]
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'app', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are cns nodes
{% for host in groups['support'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'glusterfs', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
{% if new_node_instance_count > 0 %}
# scaleup performed, leave an empty group, see:
# https://docs.openshift.com/container-platform/3.5/install_config/adding_hosts_to_existing_cluster.html
[new_nodes]
{% endif %}
[nfs]
{% set nfshost = groups['support']|sort|first %}
{{ hostvars[nfshost].internaldns }} openshift_hostname={{ hostvars[nfshost].internaldns }}
[glusterfs]
{% for host in groups['support'] %}
{{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]'
{% endfor %}
ansible/configs/ocp-implementation-lab/files/hosts_template.3.9.25.j2
New file
@@ -0,0 +1,226 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
###########################################################################
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="memory_availability"
# Default node selectors
osm_default_node_selector='env=app'
openshift_hosted_infra_selector="env=infra"
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
###########################################################################
### OpenShift Network Vars
###########################################################################
#osm_cluster_network_cidr=10.1.0.0/16
#openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
{{multi_tenant_setting}}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
# htpasswd Authentication
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
openshift_master_htpasswd_file=/root/htpasswd.openshift
# LDAP Authentication (download ipa-ca.crt first)
# openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'}]
# openshift_master_ldap_ca_file=/root/ipa-ca.crt
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
openshift_hosted_router_replicas={{infranode_instance_count}}
# openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/srv/nfs
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=true
template_service_broker_install=true
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
openshift_hosted_etcd_storage_kind=nfs
openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)"
openshift_hosted_etcd_storage_nfs_directory=/srv/nfs
openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'}
openshift_hosted_etcd_storage_volume_name=etcd-asb
openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce']
openshift_hosted_etcd_storage_volume_size=10G
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
# Enable cluster metrics
openshift_metrics_install_metrics={{install_metrics}}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassandra_nodeselector={"env":"infra"}
openshift_metrics_hawkular_nodeselector={"env":"infra"}
openshift_metrics_heapster_nodeselector={"env":"infra"}
# Enable cluster logging
openshift_logging_install_logging={{install_logging}}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
# openshift_logging_kibana_hostname=kibana.{{cloudapps_suffix}}
openshift_logging_es_cluster_size=1
openshift_logging_es_nodeselector={"env":"infra"}
openshift_logging_kibana_nodeselector={"env":"infra"}
openshift_logging_curator_nodeselector={"env":"infra"}
###########################################################################
### OpenShift Prometheus Vars
###########################################################################
## Add Prometheus Metrics:
openshift_hosted_prometheus_deploy=true
openshift_prometheus_node_selector={"env":"infra"}
openshift_prometheus_namespace=openshift-metrics
# Prometheus
openshift_prometheus_storage_kind=nfs
openshift_prometheus_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_storage_nfs_directory=/srv/nfs
openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_storage_volume_name=prometheus
openshift_prometheus_storage_volume_size=10Gi
openshift_prometheus_storage_labels={'storage': 'prometheus'}
openshift_prometheus_storage_type='pvc'
# For prometheus-alertmanager
openshift_prometheus_alertmanager_storage_kind=nfs
openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
openshift_prometheus_alertmanager_storage_volume_size=10Gi
openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
openshift_prometheus_alertmanager_storage_type='pvc'
# For prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_kind=nfs
openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_volume_size=10Gi
openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
openshift_prometheus_alertbuffer_storage_type='pvc'
# Necessary because of a bug in the installer on 3.9
openshift_prometheus_node_exporter_image_version=v3.9
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
nfs
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host].placement}}
{% endfor %}
[masters]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[etcd]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'env':'master', 'cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'infra', 'cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'app', 'cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are CNS nodes
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'glusterfs', 'cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
[nfs]
{% set nfshost = groups['support']|sort|first %}
{{ hostvars[nfshost].internaldns }} openshift_hostname={{ hostvars[nfshost].internaldns }}
#[glusterfs]
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]'
{% endfor %}
ansible/configs/ocp-implementation-lab/files/hosts_template.3.9.27.j2
New file
@@ -0,0 +1,230 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
###########################################################################
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="memory_availability"
# Default node selectors
osm_default_node_selector='env=app'
openshift_hosted_infra_selector="env=infra"
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
###########################################################################
### OpenShift Network Vars
###########################################################################
#osm_cluster_network_cidr=10.1.0.0/16
#openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
{{multi_tenant_setting}}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
# htpasswd Authentication
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
openshift_master_htpasswd_file=/root/htpasswd.openshift
# LDAP Authentication (download ipa-ca.crt first)
# openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'}]
# openshift_master_ldap_ca_file=/root/ipa-ca.crt
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
openshift_hosted_router_replicas={{infranode_instance_count}}
# openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/srv/nfs
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=true
template_service_broker_install=true
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
openshift_hosted_etcd_storage_kind=nfs
openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)"
openshift_hosted_etcd_storage_nfs_directory=/srv/nfs
openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'}
openshift_hosted_etcd_storage_volume_name=etcd-asb
openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce']
openshift_hosted_etcd_storage_volume_size=10G
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
# Enable cluster metrics
openshift_metrics_install_metrics={{install_metrics}}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassandra_nodeselector={"env":"infra"}
openshift_metrics_hawkular_nodeselector={"env":"infra"}
openshift_metrics_heapster_nodeselector={"env":"infra"}
# Enable cluster logging
openshift_logging_install_logging={{install_logging}}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
# openshift_logging_kibana_hostname=kibana.{{cloudapps_suffix}}
openshift_logging_es_cluster_size=1
openshift_logging_es_nodeselector={"env":"infra"}
openshift_logging_kibana_nodeselector={"env":"infra"}
openshift_logging_curator_nodeselector={"env":"infra"}
###########################################################################
### OpenShift Prometheus Vars
###########################################################################
## Add Prometheus Metrics:
openshift_hosted_prometheus_deploy=true
openshift_prometheus_node_selector={"env":"infra"}
openshift_prometheus_namespace=openshift-metrics
# Prometheus
openshift_prometheus_storage_kind=nfs
openshift_prometheus_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_storage_nfs_directory=/srv/nfs
openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_storage_volume_name=prometheus
openshift_prometheus_storage_volume_size=10Gi
openshift_prometheus_storage_labels={'storage': 'prometheus'}
openshift_prometheus_storage_type='pvc'
# For prometheus-alertmanager
openshift_prometheus_alertmanager_storage_kind=nfs
openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
openshift_prometheus_alertmanager_storage_volume_size=10Gi
openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
openshift_prometheus_alertmanager_storage_type='pvc'
# For prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_kind=nfs
openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_volume_size=10Gi
openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
openshift_prometheus_alertbuffer_storage_type='pvc'
# Necessary because of a bug in the installer on 3.9
openshift_prometheus_node_exporter_image_version=v3.9
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
nfs
#glusterfs
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }}
{% endfor %}
[masters]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[etcd]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'env':'master', 'cluster': '{{guid}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'infra', 'cluster': '{{guid}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'app', 'cluster': '{{guid}}'}"
{% endfor %}
## These are CNS nodes
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'glusterfs', 'cluster': '{{guid}}'}"
{% endfor %}
[nfs]
{% set nfshost = groups['support']|sort|first %}
{{ hostvars[nfshost].internaldns }} openshift_hostname={{ hostvars[nfshost].internaldns }}
#[glusterfs]
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]'
{% endfor %}
ansible/configs/ocp-implementation-lab/files/hosts_template.3.9.30.j2
New file
@@ -0,0 +1,236 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
###########################################################################
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="memory_availability"
# Default node selectors
osm_default_node_selector='env=app'
openshift_hosted_infra_selector="env=infra"
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
###########################################################################
### OpenShift Network Vars
###########################################################################
#osm_cluster_network_cidr=10.1.0.0/16
#openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
{{multi_tenant_setting}}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
# htpasswd Authentication
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
openshift_master_htpasswd_file=/root/htpasswd.openshift
# LDAP Authentication (download ipa-ca.crt first)
# openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'}]
# openshift_master_ldap_ca_file=/root/ipa-ca.crt
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
# Bug in 3.9.30 - Bug 1583500 - Unqualified image is completed with "docker.io"
# https://bugzilla.redhat.com/show_bug.cgi?id=1583500
# Workaround:
oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version}
openshift_examples_modify_imagestreams=true
openshift_hosted_router_replicas={{infranode_instance_count}}
# openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/srv/nfs
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=true
template_service_broker_install=true
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
openshift_hosted_etcd_storage_kind=nfs
openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)"
openshift_hosted_etcd_storage_nfs_directory=/srv/nfs
openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'}
openshift_hosted_etcd_storage_volume_name=etcd-asb
openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce']
openshift_hosted_etcd_storage_volume_size=10G
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
# Enable cluster metrics
openshift_metrics_install_metrics={{install_metrics}}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassandra_nodeselector={"env":"infra"}
openshift_metrics_hawkular_nodeselector={"env":"infra"}
openshift_metrics_heapster_nodeselector={"env":"infra"}
# Enable cluster logging
openshift_logging_install_logging={{install_logging}}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
# openshift_logging_kibana_hostname=kibana.{{cloudapps_suffix}}
openshift_logging_es_cluster_size=1
openshift_logging_es_nodeselector={"env":"infra"}
openshift_logging_kibana_nodeselector={"env":"infra"}
openshift_logging_curator_nodeselector={"env":"infra"}
###########################################################################
### OpenShift Prometheus Vars
###########################################################################
## Add Prometheus Metrics:
openshift_hosted_prometheus_deploy=true
openshift_prometheus_node_selector={"env":"infra"}
openshift_prometheus_namespace=openshift-metrics
# Prometheus
openshift_prometheus_storage_kind=nfs
openshift_prometheus_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_storage_nfs_directory=/srv/nfs
openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_storage_volume_name=prometheus
openshift_prometheus_storage_volume_size=10Gi
openshift_prometheus_storage_labels={'storage': 'prometheus'}
openshift_prometheus_storage_type='pvc'
# For prometheus-alertmanager
openshift_prometheus_alertmanager_storage_kind=nfs
openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
openshift_prometheus_alertmanager_storage_volume_size=10Gi
openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
openshift_prometheus_alertmanager_storage_type='pvc'
# For prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_kind=nfs
openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_volume_size=10Gi
openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
openshift_prometheus_alertbuffer_storage_type='pvc'
# Necessary because of a bug in the installer on 3.9
openshift_prometheus_node_exporter_image_version=v3.9
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
nfs
#glusterfs
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }}
{% endfor %}
[masters]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[etcd]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'env':'master', 'cluster': '{{guid}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'infra', 'cluster': '{{guid}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'app', 'cluster': '{{guid}}'}"
{% endfor %}
## These are CNS nodes
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'glusterfs', 'cluster': '{{guid}}'}"
{% endfor %}
[nfs]
{% set nfshost = groups['support']|sort|first %}
{{ hostvars[nfshost].internaldns }} openshift_hostname={{ hostvars[nfshost].internaldns }}
#[glusterfs]
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]'
{% endfor %}
ansible/configs/ocp-implementation-lab/files/hosts_template.j2
@@ -7,26 +7,35 @@
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
###########################################################################
### OpenShift Basic Vars
###########################################################################
openshift_metrics_image_version=v{{ repo_version }}
#openshift_image_tag=v{{ repo_version }}
openshift_release={{ osrelease }}
#docker_version="{{docker_version}}"
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
# default project node selector
osm_default_node_selector='env=users'
osm_default_node_selector='env=app'
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
{% if osrelease | version_compare('3.7', '<') %}
# Anything before 3.7
openshift_metrics_image_version=v{{ repo_version }}
#openshift_image_tag=v{{ repo_version }}
#openshift_release={{ osrelease }}
#docker_version="{{docker_version}}"
{% endif %}
###########################################################################
### OpenShift Optional Vars
###########################################################################
# Enable cockpit
osm_use_cockpit=true
@@ -40,9 +49,11 @@
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname={{master_lb_dns}}
openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
###########################################################################
### OpenShift Network Vars
@@ -51,38 +62,34 @@
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
{{multi_tenant_setting}}
{% if osrelease | version_compare('3.7', '>=') %}
# This should be turned on once all dependent scripts use firewalld rather than iptables
# os_firewall_use_firewalld=True
{% endif %}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" %}
openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com', 'bindPassword': '{{bindPassword}}', 'ca': 'ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid'}]
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "allow_all"  %}
openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
{% endif %}
{% if install_idm == "htpasswd"  %}
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
# or
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
###########################################################################
@@ -90,7 +97,57 @@
###########################################################################
# Enable cluster metrics
{% if osrelease | version_compare('3.7', '>=') %}
openshift_metrics_install_metrics={{install_metrics}}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
#openshift_master_metrics_public_url=https://hawkular-metrics.{{cloudapps_suffix}}/hawkular/metrics
## Add Prometheus Metrics:
openshift_hosted_prometheus_deploy=true
openshift_prometheus_node_selector={"env":"infra"}
openshift_prometheus_namespace=openshift-metrics
# Prometheus
openshift_prometheus_storage_kind=nfs
openshift_prometheus_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_storage_nfs_directory=/srv/nfs
openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_storage_volume_name=prometheus
openshift_prometheus_storage_volume_size=10Gi
openshift_prometheus_storage_labels={'storage': 'prometheus'}
openshift_prometheus_storage_type='pvc'
# For prometheus-alertmanager
openshift_prometheus_alertmanager_storage_kind=nfs
openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
openshift_prometheus_alertmanager_storage_volume_size=10Gi
openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
openshift_prometheus_alertmanager_storage_type='pvc'
# For prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_kind=nfs
openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_volume_size=10Gi
openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
openshift_prometheus_alertbuffer_storage_type='pvc'
{% else %}
openshift_hosted_metrics_deploy={{install_metrics}}
openshift_hosted_metrics_storage_kind=nfs
openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
openshift_hosted_metrics_storage_host=support1.{{guid}}.internal
@@ -99,8 +156,34 @@
openshift_hosted_metrics_storage_volume_name=metrics
openshift_hosted_metrics_storage_volume_size=10Gi
openshift_hosted_metrics_public_url=https://hawkular-metrics.{{cloudapps_suffix}}/hawkular/metrics
{% endif %}
openshift_metrics_cassandra_nodeselector={"env":"infra"}
openshift_metrics_hawkular_nodeselector={"env":"infra"}
openshift_metrics_heapster_nodeselector={"env":"infra"}
# Enable cluster logging
{% if osrelease | version_compare('3.7', '>=') %}
openshift_logging_install_logging={{install_logging}}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
# openshift_logging_kibana_hostname=kibana.{{cloudapps_suffix}}
openshift_logging_es_cluster_size=1
{% else %}
openshift_hosted_logging_deploy={{install_logging}}
openshift_master_logging_public_url=https://kibana.{{cloudapps_suffix}}
openshift_hosted_logging_storage_kind=nfs
openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
openshift_hosted_logging_storage_nfs_directory=/srv/nfs
@@ -110,18 +193,22 @@
openshift_hosted_logging_hostname=kibana.{{cloudapps_suffix}}
openshift_hosted_logging_elasticsearch_cluster_size=1
openshift_hosted_logging_deployer_version=v{{repo_version}}
# This one is wrong (down arrow)
#openshift_hosted_logging_image_version=v{{repo_version}}
#openshift_logging_image_version=v{{repo_version}}
{% endif %}
openshift_logging_es_nodeselector={"env":"infra"}
openshift_logging_kibana_nodeselector={"env":"infra"}
openshift_logging_curator_nodeselector={"env":"infra"}
###########################################################################
### OpenShift Project Management Vars
###########################################################################
# Configure additional projects
openshift_additional_projects={'my-infra-project-test': {'default_node_selector': 'env=infra'}}
openshift_additional_projects={'openshift-template-service-broker': {'default_node_selector': ''}}
###########################################################################
@@ -129,41 +216,47 @@
###########################################################################
openshift_hosted_router_selector='env=infra'
openshift_hosted_router_replicas=1
openshift_hosted_router_replicas={{infranode_instance_count}}
#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_selector='env=infra'
openshift_hosted_registry_replicas=1
# Registry AWS S3
# S3 bucket must already exist.
openshift_hosted_registry_storage_kind=object
openshift_hosted_registry_storage_provider=s3
openshift_hosted_registry_storage_s3_accesskey={{ aws_access_key_id }}
openshift_hosted_registry_storage_s3_secretkey={{ aws_secret_access_key }}
openshift_hosted_registry_storage_s3_bucket={{ env_type }}-{{ guid }}
openshift_hosted_registry_storage_s3_region={{ aws_region }}
openshift_hosted_registry_storage_s3_chunksize=26214400
openshift_hosted_registry_storage_s3_rootdirectory=/registry
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/srv/nfs
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
{% if osrelease | version_compare('3.7', '>=') %}
openshift_enable_service_catalog=true
template_service_broker_install=true
template_service_broker_selector={"env":"infra"}
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=false
{% endif %}
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
nfs
{% if new_node_instance_count > 0 %}
new_nodes
{% endif %}
[lb]
{% for host in groups['loadbalancers'] %}
@@ -172,32 +265,38 @@
[masters]
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[etcd]
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}} openshift_hostname=master{{loop.index}}.{{chomped_zone_internal_dns}}   ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}'}"
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes'] %}
infranode{{loop.index}}.{{chomped_zone_internal_dns}} openshift_hostname=infranode{{loop.index}}.{{chomped_zone_internal_dns}} openshift_ip={{hostvars[host]['ec2_private_ip_address']}} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra''}"
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes'] %}
node{{loop.index}}.{{chomped_zone_internal_dns}} openshift_hostname=node{{loop.index}}.{{chomped_zone_internal_dns}} openshift_ip={{hostvars[host]['ec2_private_ip_address']}} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users'}"
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'app', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
{% if new_node_instance_count > 0 %}
# scaleup performed, leave an empty group, see:
# https://docs.openshift.com/container-platform/3.5/install_config/adding_hosts_to_existing_cluster.html
[new_nodes]
{% endif %}
[nfs]
{% for host in groups['support'] %}
support{{loop.index}}.{{chomped_zone_internal_dns}} openshift_hostname=support{{loop.index}}.{{chomped_zone_internal_dns}} openshift_ip={{hostvars[host]['ec2_private_ip_address']}} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}
{% endfor %}
ansible/configs/ocp-implementation-lab/files/htpasswd.openshift
@@ -100,3 +100,4 @@
user98:$apr1$MVU1/8dh$UKzkRk1CQP00SvnoPIm1..
user99:$apr1$v8vKZdHH$NC5xud.olhtdydHU9hav6.
user100:$apr1$glFN48wz$dR9w94PGiQL8qZXcXGd0L0
marina:$apr1$ayR8gA9a$4bmozGlmBX6XQY1AbJfQk1
ansible/configs/ocp-implementation-lab/files/labs_hosts_template.j2
@@ -1,4 +1,4 @@
[OCPlabs:vars]
[OSEv3:vars]
###########################################################################
### Ansible Vars
@@ -7,9 +7,17 @@
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
# disable memory check, as we are not a production environment
openshift_disable_check="memory_availability"
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
[OCPlabs:children]
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
@@ -18,40 +26,39 @@
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host].placement}}
{{ hostvars[host].internaldns }}
{% endfor %}
[masters]
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[etcd]
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters'] %}
master{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env': 'master', 'cluster': '{{guid}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes'] %}
infranode{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% for host in groups['infranodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'infra', 'cluster': '{{guid}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'app', 'cluster': '{{guid}}'}"
{% endfor %}
{% for host in groups['nodes'] %}
node{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'glusterfs', 'cluster': '{{guid}}'}"
{% endfor %}
[nfs]
{% for host in groups['support'] %}
support{{loop.index}}.{{chomped_zone_internal_dns}}  host_zone={{hostvars[host]['placement']}}
{% endfor %}
{% set nfshost = groups['support']|sort|first %}
{{ hostvars[nfshost].internaldns }} openshift_hostname={{ hostvars[nfshost].internaldns }}
ansible/configs/ocp-implementation-lab/files/pvs.j2
@@ -11,7 +11,7 @@
  - ReadWriteOnce
  nfs:
    path: {{ nfs_export_path }}/{{pv}}
    server: nfs1.{{guid}}.internal
    server: support1.{{guid}}.internal
  persistentVolumeReclaimPolicy: {{persistentVolumeReclaimPolicy}}
---
{% endfor %}
{% endfor %}
ansible/configs/ocp-implementation-lab/files/repos_template.j2
@@ -28,9 +28,11 @@
enabled=1
gpgcheck=0
## Required since OCP 3.5
[rhel-7-fast-datapath-rpms]
name=Red Hat Enterprise Linux Fast Datapath (RHEL 7 Server) (RPMs)
name=Red Hat Enterprise Linux 7 Fast Datapath
baseurl={{own_repo_path}}/rhel-7-fast-datapath-rpms
enabled=1
gpgcheck=0
# ansible omitted for 3.7
# gluster repo omitted for 3.7
ansible/configs/ocp-implementation-lab/files/userpvs.j2
@@ -1,5 +1,5 @@
---
{%  for pv in range(1,user_vols) %}
{%  for pv in range(1,user_vols|int) %}
apiVersion: v1
kind: PersistentVolume
metadata:
@@ -14,7 +14,7 @@
{% endif %}
  nfs:
    path: {{ nfs_export_path }}/user-vols/vol{{pv}}
    server: nfs1.{{guid}}.internal
    server: support1.{{guid}}.internal
  persistentVolumeReclaimPolicy: {{ persistentVolumeReclaimPolicy }}
---
{% endfor %}
ansible/configs/ocp-implementation-lab/post_ocp_nfs_config.yml
New file
@@ -0,0 +1,58 @@
- name: Step 00xxxxx post software
  hosts: support
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Create user vols
      shell: "mkdir -p /srv/nfs/user-vols/vol{1..{{user_vols}}}"
    - name: chmod the user vols
      shell: "chmod -R 777 /srv/nfs/user-vols"
- name: Step 00xxxxx post software
  hosts: bastions
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: get nfs Hostname
      set_fact:
        nfs_host: "{{ groups['support']|sort|first }}"
    - set_fact:
        pv_size: '10Gi'
        pv_list: "{{ ocp_pvs }}"
        persistentVolumeReclaimPolicy: Retain
    - name: Generate PV file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/pvs.j2"
        dest: "/root/pvs-{{ env_type }}-{{ guid }}.yml"
      tags: [ gen_pv_file ]
      when: pv_list.0 is defined
    - set_fact:
        pv_size: "{{user_vols_size}}"
        persistentVolumeReclaimPolicy: Recycle
      notify: restart nfs services
      run_once: True
    - name: Generate user vol PV file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/userpvs.j2"
        dest: "/root/userpvs-{{ env_type }}-{{ guid }}.yml"
      tags:
        - gen_user_vol_pv
    - shell: 'oc create -f /root/pvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/pvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
      when: pv_list.0 is defined
    - shell: 'oc create -f /root/userpvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/userpvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
ansible/configs/ocp-implementation-lab/post_software.yml
@@ -10,10 +10,26 @@
  tags:
    - step005
  tasks:
    - name: Overwrite ansible hosts file with lab hosts file
    - name: Generate /etc/ansible/hosts file with lab hosts template
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/labs_hosts_template.j2"
        dest: "{{ ANSIBLE_REPO_PATH }}/workdir/labs_hosts-{{ env_type }}-{{ guid }}"
- name: Configure NFS host for user-vols if required
  hosts: support
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Create user vols
      shell: "mkdir -p /srv/nfs/user-vols/vol{1..{{user_vols}}}"
      tags:
        - openshift_nfs_config
    - name: chmod the user vols
      shell: "chmod -R 777 /srv/nfs/user-vols"
      tags:
        - openshift_nfs_config
- name: Step lab post software deployment
  hosts: bastions
@@ -24,16 +40,85 @@
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Ensures /etc/ansible dir exists
      file: path=/etc/ansible state=directory
    - name: Copy over ansible hosts file
    - name: Move complete inventory file to preserve directory.
      shell: mv /etc/ansible/hosts /var/preserve/
      tags: preserve_complete_ansible_inventory
    - name: Copy over ansible hosts file, lab version
      copy:
        backup: yes
        backup: no
        src: "{{ ANSIBLE_REPO_PATH }}/workdir/labs_hosts-{{ env_type }}-{{ guid }}"
        dest: /etc/ansible/hosts
      tags:
        - overwrite_hosts_with_lab_hosts
    ## Create PVs for uservols if required
    - name: get nfs Hostname
      set_fact:
        nfs_host: "{{ groups['support']|sort|first }}"
      tags:
        - openshift_nfs_config
    - set_fact:
        pv_size: '10Gi'
        pv_list: "{{ ocp_pvs }}"
        persistentVolumeReclaimPolicy: Retain
      tags:
        - openshift_nfs_config
    - name: Generate PV file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/pvs.j2"
        dest: "/root/pvs-{{ env_type }}-{{ guid }}.yml"
      tags: [ gen_pv_file ]
      when: pv_list.0 is defined
      tags:
        - openshift_nfs_config
    - set_fact:
        pv_size: "{{user_vols_size}}"
        persistentVolumeReclaimPolicy: Recycle
      tags:
        - openshift_nfs_config
      notify: restart nfs services
      run_once: True
    - name: Generate user vol PV file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/userpvs.j2"
        dest: "/root/userpvs-{{ env_type }}-{{ guid }}.yml"
      tags:
        - gen_user_vol_pv
        - openshift_nfs_config
    - shell: 'oc create -f /root/pvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/pvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
      when: pv_list.0 is defined
      tags:
        - openshift_nfs_config
    - shell: 'oc create -f /root/userpvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/userpvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
        - openshift_nfs_config
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa"
      when: install_ipa_client|bool
# - name: include post nfs config
#   include: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/post_ocp_nfs_config.yml"
#   tags:
#     - openshift_nfs_config
- name: PostSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Post-Software checks completed successfully"
ansible/configs/ocp-implementation-lab/pre_software.yml
@@ -1,4 +1,4 @@
---
- name: Step 003 - Create env key
  hosts: localhost
  connection: local
@@ -17,11 +17,22 @@
        creates: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}"
      when: set_env_authorized_key
    - name: fix permission
      file:
        path: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}"
        mode: 0400
      when: set_env_authorized_key
    - name: Generate SSH pub key
      shell: ssh-keygen -y -f "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}" > "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}.pub"
      args:
        creates: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}.pub"
      when: set_env_authorized_key
# Cloudformation template or equivalent should tag all hosts with Project:{{ env_type }}-{{ guid }}
- name: Configure all hosts with Repositories, Common Files and Set environment key
  hosts:
    - all:!windows
  hosts: all
  become: true
  gather_facts: False
  vars_files:
@@ -47,6 +58,7 @@
    - step004
    - bastion_tasks
- name: PreSoftware flight-check
  hosts: localhost
  connection: local
ansible/configs/ocp-workshop/env_vars.yml
@@ -51,11 +51,9 @@
install_metrics: true
install_logging: true
install_aws_broker: false
# Options for install_idm: allow_all, htpasswd, ldap
install_idm: ldap
glusterfs_device_name: /dev/xvdc
glusterfs_device_size: 300
glusterfs_device_size: 500
ocp_report: false
remove_self_provisioners: false
@@ -141,15 +139,96 @@
multi_tenant_setting: "os_sdn_network_plugin_name='redhat/openshift-ovs-{{ovs_plugin}}'"
master_lb_dns: "master.{{subdomain_base}}"
lets_encrypt_openshift_master_named_certificates:
  - certfile: "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.cer"
    keyfile: "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.key"
    cafile: "/root/.acme.sh/{{ master_lb_dns }}/ca.cer"
lets_encrypt_openshift_hosted_router_certificate:
  certfile: "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.cer"
  keyfile: "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.key"
  cafile: "/root/.acme.sh/{{ master_lb_dns }}/ca.cer"
project_request_message: 'To provision Projects you must request access in https://labs.opentlc.com or https://rhpds.redhat.com'
cloudapps_suffix: 'apps.{{subdomain_base}}'
openshift_master_ldap_ca_file: 'openshift_master_ldap_ca_file=/root/ca.crt'
## TODO: This should be registered as a variable. Awk for os verions (OCP).
## yum info openshift...
osrelease: 3.9.27
osrelease: 3.9.30
openshift_master_overwrite_named_certificates: true
timeout: 60
########## OCP identity providers
# Options for install_idm: allow_all, htpasswd, ldap, ...  see the available below
install_idm: ldap
# if you want to install several identity providers, just pick from the
# available_identity_providers list:
install_idms:
  - "{{ install_idm }}"
# This var is empty by default.
# Every idm in the list 'install_idms' will be added, using the 'available_identity_providers' map
# you can:
#   - directly override the 'identity_providers' list
# or
#   - add an option to 'available_identity_providers' and then
#     reference it in 'install_idm' or the 'install_idms' list
identity_providers: []
openshift_master_ldap_ca_file: 'openshift_master_ldap_ca_file=/root/ca.crt'
available_identity_providers:
  ldap:
    name: OpenTLC IPA
    challenge: true
    login: true
    kind: LDAPPasswordIdentityProvider
    attributes:
      id: ['dn']
      email: ['mail']
      name: ['cn']
      preferredUsername: ['uid']
    bindDN: uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com
    bindPassword: "{{bindPassword|d('NOT_DEFINED')}}"
    ca: ipa-ca.crt
    insecure: false
    url: ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid
  ssodev:
    name: ssodev-iad00
    challenge: false
    login: true
    kind: OpenIDIdentityProvider
    clientID: "{{ opentlc_ssodev_client_id|d('NOT_DEFINED') }}"
    clientSecret: "{{ opentlc_ssodev_client_secret|d('NOT_DEFINED') }}"
    ca: lets-encrypt-x3-cross-signed.pem.txt
    urls:
      authorize: https://ssodev-iad00.opentlc.com:8443/auth/realms/ipatest/protocol/openid-connect/auth
      token: https://ssodev-iad00.opentlc.com:8443/auth/realms/ipatest/protocol/openid-connect/token
      userInfo: https://ssodev-iad00.opentlc.com:8443/auth/realms/ipatest/protocol/openid-connect/userinfo
    claims:
      id:
        - sub
      preferredUsername:
        - preferred_username
      name:
        - name
      email:
        - email
  allow_all:
    name: allow_all
    login: true
    challenge: true
    kind: AllowAllPasswordIdentityProvider
  htpasswd:
    name: htpasswd_auth
    login: true
    challenge: true
    kind: HTPasswdPasswordIdentityProvider
    filename: /etc/origin/master/htpasswd
###### You can, but you usually wouldn't need to.
ansible_ssh_user: ec2-user
@@ -174,6 +253,7 @@
  - kexec-tools
  - sos
  - psacct
  - iotop
rhel_repos:
  - rhel-7-server-rpms
ansible/configs/ocp-workshop/files/cloud_providers/azure_cloud_template.j2
@@ -412,7 +412,7 @@
                        }
                    },
                    {
                        "name" : "default-allow-openshift-router-http\n",
                        "name" : "default-allow-openshift-router-http",
                        "properties" : {
                            "protocol" : "Tcp",
                            "sourcePortRange" : "*",
ansible/configs/ocp-workshop/files/cloud_providers/ec2_cloud_template.j2
@@ -23,9 +23,9 @@
      {% endif %}
    us-west-2:
      {% if osrelease | version_compare('3.9.25', '>=') %}
      RHELAMI: ami-223f945a
      RHELAMI: ami-28e07e50
      {% else %}
      RHELAMI: ami-9fa343e7
      RHELAMI: ami-223f945a
      {% endif %}
    eu-west-1:
      {% if osrelease | version_compare('3.9.25', '>=') %}
ansible/configs/ocp-workshop/files/hosts_template.3.9.14.j2
@@ -67,7 +67,7 @@
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
{% if install_lets_encrypt_certificates|bool %}
openshift_master_named_certificates=[{"certfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.cer", "keyfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.key", "cafile": "/root/lets-encrypt-x3-cross-signed.pem"}]
openshift_master_named_certificates={{lets_encrypt_openshift_master_named_certificates|to_json}}
{% endif %}
openshift_set_hostname=True
@@ -90,9 +90,8 @@
###########################################################################
{% if install_openwhisk|bool %} 
# TODO: add imagePolicy here, it's enabled by default
openshift_master_admission_plugin_config={"openshift.io/ImagePolicy":{"configuration":{"apiVersion":"v1","kind":"ImagePolicyConfig","resolveImages": "AttemptRewrite"}}}
{% else %}
openshift_master_admission_plugin_config={}
{% endif %}
@@ -100,23 +99,15 @@
### OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" %}
openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com', 'bindPassword': '{{bindPassword}}', 'ca': 'ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid'}]
{% if install_idm == "ldap" or 'ldap' in install_idms|d([]) %}
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "allow_all"  %}
openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
{% endif %}
{% if install_idm == "htpasswd"  %}
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
# or
{% if install_idm == "htpasswd" or 'htpasswd' in install_idms|d([]) %}
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
openshift_master_identity_providers='{{identity_providers|to_json}}'
###########################################################################
### OpenShift Metrics and Logging Vars
@@ -228,7 +219,7 @@
openshift_hosted_router_replicas={{infranode_instance_count}}
{% if install_lets_encrypt_certificates|bool %}
openshift_hosted_router_certificate={"certfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.cer", "keyfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.key", "cafile": "/root/lets-encrypt-x3-cross-signed.pem"}
openshift_hosted_router_certificate={{lets_encrypt_openshift_hosted_router_certificate|to_json}}
{% endif %}
openshift_hosted_registry_replicas=1
ansible/configs/ocp-workshop/files/hosts_template.3.9.25.j2
@@ -67,7 +67,7 @@
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
{% if install_lets_encrypt_certificates|bool %}
openshift_master_named_certificates=[{"certfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.cer", "keyfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.key", "cafile": "/root/lets-encrypt-x3-cross-signed.pem"}]
openshift_master_named_certificates={{lets_encrypt_openshift_master_named_certificates|to_json}}
{% endif %}
openshift_set_hostname=True
@@ -90,9 +90,8 @@
###########################################################################
{% if install_openwhisk|bool %} 
#TODO: add imagePolicy as it is in default
openshift_master_admission_plugin_config={"openshift.io/ImagePolicy":{"configuration":{"apiVersion":"v1","kind":"ImagePolicyConfig","resolveImages": "AttemptRewrite"}}}
{% else %}
openshift_master_admission_plugin_config={}
{% endif %}
@@ -100,23 +99,15 @@
### OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" %}
openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com', 'bindPassword': '{{bindPassword}}', 'ca': 'ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid'}]
{% if install_idm == "ldap" or 'ldap' in install_idms|d([]) %}
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "allow_all"  %}
openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
{% endif %}
{% if install_idm == "htpasswd"  %}
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
# or
{% if install_idm == "htpasswd" or 'htpasswd' in install_idms|d([]) %}
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
openshift_master_identity_providers='{{identity_providers|to_json}}'
###########################################################################
### OpenShift Metrics and Logging Vars
@@ -227,7 +218,7 @@
openshift_hosted_router_replicas={{infranode_instance_count}}
{% if install_lets_encrypt_certificates|bool %}
openshift_hosted_router_certificate={"certfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.cer", "keyfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.key", "cafile": "/root/lets-encrypt-x3-cross-signed.pem"}
openshift_hosted_router_certificate={{lets_encrypt_openshift_hosted_router_certificate|to_json}}
{% endif %}
openshift_hosted_registry_replicas=1
ansible/configs/ocp-workshop/files/hosts_template.3.9.27.j2
@@ -46,12 +46,13 @@
openshift_storage_glusterfs_storageclass_default=false
openshift_storage_glusterfs_storageclass=True
openshift_storageclass_default=false
openshift_storage_glusterfs_timeout=500
# Set up Block Storage
# Set as default storage class during installation to force Logging/metrics to use it
# Right now the ASB's ETCD PVC will also land on Block due to this fact
openshift_storage_glusterfs_block_deploy=True
openshift_storage_glusterfs_block_host_vol_size=10
openshift_storage_glusterfs_block_host_vol_size=100
openshift_storage_glusterfs_block_storageclass=True
openshift_storage_glusterfs_block_storageclass_default=True
openshift_storage_glusterfs_block_host_vol_create=True
@@ -87,7 +88,7 @@
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
{% if install_lets_encrypt_certificates|bool %}
openshift_master_named_certificates=[{"certfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.cer", "keyfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.key", "cafile": "/root/lets-encrypt-x3-cross-signed.pem"}]
openshift_master_named_certificates={{lets_encrypt_openshift_master_named_certificates|to_json}}
{% endif %}
openshift_set_hostname=True
@@ -110,9 +111,8 @@
###########################################################################
{% if install_openwhisk|bool %} 
#TODO: add imagePolicy as it is in default
openshift_master_admission_plugin_config={"openshift.io/ImagePolicy":{"configuration":{"apiVersion":"v1","kind":"ImagePolicyConfig","resolveImages": "AttemptRewrite"}}}
{% else %}
openshift_master_admission_plugin_config={}
{% endif %}
@@ -120,23 +120,15 @@
### OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" %}
openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com', 'bindPassword': '{{bindPassword}}', 'ca': 'ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid'}]
{% if install_idm == "ldap" or 'ldap' in install_idms|d([]) %}
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "allow_all"  %}
openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
{% endif %}
{% if install_idm == "htpasswd"  %}
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
# or
{% if install_idm == "htpasswd" or 'htpasswd' in install_idms|d([]) %}
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
openshift_master_identity_providers='{{identity_providers|to_json}}'
###########################################################################
### OpenShift Metrics and Logging Vars
@@ -260,7 +252,7 @@
openshift_hosted_router_replicas={{infranode_instance_count}}
{% if install_lets_encrypt_certificates|bool %}
openshift_hosted_router_certificate={"certfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.cer", "keyfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.key", "cafile": "/root/lets-encrypt-x3-cross-signed.pem"}
openshift_hosted_router_certificate={{lets_encrypt_openshift_hosted_router_certificate|to_json}}
{% endif %}
openshift_hosted_registry_replicas=1
@@ -387,4 +379,4 @@
{{ hostvars[host].internaldns }} glusterfs_ip={{hostvars[host].private_ip_address}} glusterfs_devices='{{ [ hostvars[host].glusterfs_device_name ] |to_json}}'
{% endif %}
{% endfor %}
{% endif %}
{% endif %}
ansible/configs/ocp-workshop/files/hosts_template.3.9.30.j2
New file
@@ -0,0 +1,385 @@
#
# /etc/ansible/hosts file for OpenShift Container Platform 3.9.30
#
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
###########################################################################
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
{% if container_runtime == "cri-o" %}
openshift_use_crio=True
openshift_crio_enable_docker_gc=True
{% endif %}
# default project node selector
osm_default_node_selector='env=users'
openshift_hosted_infra_selector="env=infra"
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
# Necessary for 3.9.30
oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version}
openshift_examples_modify_imagestreams=true
{% if install_glusterfs|bool %}
###########################################################################
### OpenShift CNS
###########################################################################
openshift_master_dynamic_provisioning_enabled=True
# Set up GlusterFS Storage
openshift_storage_glusterfs_namespace=glusterfs
openshift_storage_glusterfs_name=storage
openshift_storage_glusterfs_wipe=True
openshift_storage_glusterfs_storageclass_default=false
openshift_storage_glusterfs_storageclass=True
openshift_storageclass_default=false
openshift_storage_glusterfs_timeout=500
# Set up Block Storage
# Set as default storage class during installation to force Logging/metrics to use it
# Right now the ASB's ETCD PVC will also land on Block due to this fact
openshift_storage_glusterfs_block_deploy=True
openshift_storage_glusterfs_block_host_vol_size=100
openshift_storage_glusterfs_block_storageclass=True
openshift_storage_glusterfs_block_storageclass_default=True
openshift_storage_glusterfs_block_host_vol_create=True
# Run these commands after installation on one of the masters:
# oc patch storageclass glusterfs-storage -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "true"}}}'
# oc patch storageclass glusterfs-block -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "false"}}}'
{% else %}
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
{% endif %}
###########################################################################
### OpenShift Cockpit Vars
###########################################################################
# Enable cockpit
osm_use_cockpit=true
osm_cockpit_plugins=['cockpit-kubernetes']
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname={{master_lb_dns}}
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
{% if install_lets_encrypt_certificates|bool %}
openshift_master_named_certificates={{lets_encrypt_openshift_master_named_certificates|to_json}}
{% endif %}
openshift_set_hostname=True
###########################################################################
### OpenShift Network Vars
###########################################################################
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy'
{{multi_tenant_setting}}
# This should be turned on once all dependent scripts use firewalld rather than iptables
# os_firewall_use_firewalld=True
###########################################################################
### OpenShift admission plugin config
###########################################################################
{% if install_openwhisk|bool %}
#TODO: add imagePolicy as it is in default
openshift_master_admission_plugin_config={"openshift.io/ImagePolicy":{"configuration":{"apiVersion":"v1","kind":"ImagePolicyConfig","resolveImages": "AttemptRewrite"}}}
{% endif %}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" or 'ldap' in install_idms|d([]) %}
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "htpasswd" or 'htpasswd' in install_idms|d([]) %}
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
openshift_master_identity_providers='{{identity_providers|to_json}}'
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
# Enable cluster metrics
########################
openshift_metrics_install_metrics={{install_metrics}}
{% if install_nfs|bool and not install_glusterfs|bool %}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassanda_pvc_storage_class_name=''
{% endif %}
{% if install_glusterfs|bool %}
openshift_metrics_cassandra_storage_type=dynamic
# Volume size needs to be equal or smaller to the GlusterBlock volume size
openshift_metrics_storage_volume_size=10Gi
{% endif %}
openshift_metrics_cassandra_nodeselector={"env":"infra"}
openshift_metrics_hawkular_nodeselector={"env":"infra"}
openshift_metrics_heapster_nodeselector={"env":"infra"}
# Add Prometheus Metrics:
#########################
openshift_hosted_prometheus_deploy=true
openshift_prometheus_node_selector={"env":"infra"}
openshift_prometheus_namespace=openshift-metrics
# Prometheus
{% if install_glusterfs|bool %}
openshift_prometheus_storage_kind=dynamic
openshift_prometheus_storage_volume_size=20Gi
openshift_prometheus_storage_class=glusterfs-storage
{% elif install_nfs|bool %}
openshift_prometheus_storage_kind=nfs
openshift_prometheus_storage_nfs_directory=/srv/nfs
openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_storage_volume_size=10Gi
openshift_prometheus_storage_labels={'storage': 'prometheus'}
{% endif %}
openshift_prometheus_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_storage_volume_name=prometheus
openshift_prometheus_storage_type='pvc'
# For prometheus-alertmanager
{% if install_glusterfs|bool %}
openshift_prometheus_alertmanager_storage_kind=dynamic
openshift_prometheus_alertmanager_storage_class=glusterfs-storage
{% elif install_nfs|bool %}
openshift_prometheus_alertmanager_storage_kind=nfs
openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
{% endif %}
openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertmanager_storage_volume_size=10Gi
openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
openshift_prometheus_alertmanager_storage_type='pvc'
# For prometheus-alertbuffer
{% if install_glusterfs|bool %}
openshift_prometheus_alertbuffer_storage_kind=dynamic
openshift_prometheus_alertbuffer_storage_class=glusterfs-storage
{% elif install_nfs|bool %}
openshift_prometheus_alertbuffer_storage_kind=nfs
openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
{% endif %}
openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_volume_size=10Gi
openshift_prometheus_alertbuffer_storage_type='pvc'
# Necessary for 3.9.25
# openshift_prometheus_node_exporter_image_version=v3.9
# Enable cluster logging
########################
openshift_logging_install_logging={{install_logging}}
{% if install_nfs|bool and not install_glusterfs|bool %}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
openshift_logging_es_pvc_storage_class_name=''
{% endif %}
{% if install_glusterfs|bool %}
openshift_logging_es_pvc_dynamic=true
# Volume size needs to be equal or smaller to the GlusterBlock volume size
openshift_metrics_storage_volume_size=10Gi
{% endif %}
openshift_logging_es_cluster_size=1
openshift_logging_es_nodeselector={"env":"infra"}
openshift_logging_kibana_nodeselector={"env":"infra"}
openshift_logging_curator_nodeselector={"env":"infra"}
###########################################################################
### OpenShift Project Management Vars
###########################################################################
# Configure additional projects
# openshift_additional_projects={'my-infra-project-test': {'default_node_selector': 'env=infra'}}
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
openshift_hosted_router_replicas={{infranode_instance_count}}
{% if install_lets_encrypt_certificates|bool %}
openshift_hosted_router_certificate={{lets_encrypt_openshift_hosted_router_certificate|to_json}}
{% endif %}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
{% if s3user_access_key is defined %}
# Registry AWS S3
# S3 bucket must already exist.
openshift_hosted_registry_storage_kind=object
openshift_hosted_registry_storage_provider=s3
openshift_hosted_registry_storage_s3_accesskey={{ s3user_access_key }}
openshift_hosted_registry_storage_s3_secretkey={{ s3user_secret_access_key }}
openshift_hosted_registry_storage_s3_bucket={{ project_tag }}
openshift_hosted_registry_storage_s3_region={{ aws_region }}
openshift_hosted_registry_storage_s3_chunksize=26214400
openshift_hosted_registry_storage_s3_rootdirectory=/registry
{% endif %}
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=true
template_service_broker_install=true
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
{% if install_glusterfs|bool %}
openshift_hosted_etcd_storage_kind=dynamic
# Next one doesn't work at the moment - it's still block
openshift_hosted_etcd_storage_class=glusterfs-storage
{% elif install_nfs|bool %}
openshift_hosted_etcd_storage_kind=nfs
openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)"
openshift_hosted_etcd_storage_nfs_directory=/srv/nfs
openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'}
{% endif %}
openshift_hosted_etcd_storage_volume_name=etcd-asb
openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce']
openshift_hosted_etcd_storage_volume_size=10G
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
masters
etcd
nodes
{% if install_nfs|bool %}
nfs
{% endif %}
{% if install_glusterfs|bool %}
glusterfs
{% endif %}
{% if groups['newnodes']|d([])|length > 0 %}
new_nodes
{% endif %}
[masters]
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }}
{% endfor %}
[etcd]
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}', 'runtime': '{{container_runtime}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra', 'runtime': '{{container_runtime}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']
  if host not in groups['newnodes']|d([])
  and host not in groups['glusterfs']|d([])
  %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users', 'runtime': '{{container_runtime}}'}"
{% endfor %}
{% if groups['glusterfs']|d([])|length > 0 %}
## These are glusterfs nodes
{% for host in groups['glusterfs'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'glusterfs', 'runtime': '{{container_runtime}}'}"
{% endfor %}
{% endif %}
{% if groups['newnodes']|d([])|length > 0 %}
# scaleup performed, leave an empty group, see:
# https://docs.openshift.com/container-platform/3.7/install_config/adding_hosts_to_existing_cluster.html
[new_nodes]
{% for host in groups['newnodes'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users', 'runtime': '{{container_runtime}}'}"
{% endfor %}
{% endif %}
{% if install_nfs|bool %}
[nfs]
{% for host in [groups['support']|sort|first] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}
{% endfor %}
{% endif %}
{% if install_glusterfs|bool %}
{% set query = "[?name=='support']|[0].volumes[?purpose=='glusterfs'].device_name" %}
[glusterfs]
{% for host in groups['glusterfs'] %}
{% if cloud_provider == 'ec2' %}
{{ hostvars[host].internaldns }} glusterfs_ip={{hostvars[host].private_ip_address}} glusterfs_devices='{{instances|json_query(query)|to_json}}'
{% elif cloud_provider == 'azure' %}
{{ hostvars[host].internaldns }} glusterfs_ip={{hostvars[host].private_ip_address}} glusterfs_devices='{{ [ hostvars[host].glusterfs_device_name ] |to_json}}'
{% endif %}
{% endfor %}
{% endif %}
ansible/configs/ocp-workshop/files/hosts_template.j2
@@ -68,7 +68,7 @@
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
{% if install_lets_encrypt_certificates|bool %}
openshift_master_named_certificates=[{"certfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.cer", "keyfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.key", "cafile": "/root/lets-encrypt-x3-cross-signed.pem"}]
openshift_master_named_certificates={{lets_encrypt_openshift_master_named_certificates|to_json}}
{% endif %}
openshift_set_hostname=True
@@ -93,30 +93,23 @@
### OpenShift admission plugin config
###########################################################################
openshift_master_admission_plugin_config={}
#keep default
#openshift_master_admission_plugin_config={}
{% endif %}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" %}
openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com', 'bindPassword': '{{bindPassword}}', 'ca': 'ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid'}]
{% if install_idm == "ldap" or 'ldap' in install_idms|d([]) %}
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "allow_all"  %}
openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
{% endif %}
{% if install_idm == "htpasswd"  %}
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
# or
{% if install_idm == "htpasswd" or 'htpasswd' in install_idms|d([])  %}
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
openshift_master_identity_providers='{{identity_providers|to_json}}'
###########################################################################
### OpenShift Metrics and Logging Vars
@@ -262,7 +255,7 @@
openshift_hosted_router_replicas={{infranode_instance_count}}
{% if install_lets_encrypt_certificates|bool %}
openshift_hosted_router_certificate={"certfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.cer", "keyfile": "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.key", "cafile": "/root/lets-encrypt-x3-cross-signed.pem"}
openshift_hosted_router_certificate={{lets_encrypt_openshift_hosted_router_certificate|to_json}}
{% endif %}
openshift_hosted_registry_selector='env=infra'
ansible/configs/ocp-workshop/files/lets-encrypt-x3-cross-signed.pem.txt
New file
@@ -0,0 +1,27 @@
-----BEGIN CERTIFICATE-----
MIIEkjCCA3qgAwIBAgIQCgFBQgAAAVOFc2oLheynCDANBgkqhkiG9w0BAQsFADA/
MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT
DkRTVCBSb290IENBIFgzMB4XDTE2MDMxNzE2NDA0NloXDTIxMDMxNzE2NDA0Nlow
SjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUxldCdzIEVuY3J5cHQxIzAhBgNVBAMT
GkxldCdzIEVuY3J5cHQgQXV0aG9yaXR5IFgzMIIBIjANBgkqhkiG9w0BAQEFAAOC
AQ8AMIIBCgKCAQEAnNMM8FrlLke3cl03g7NoYzDq1zUmGSXhvb418XCSL7e4S0EF
q6meNQhY7LEqxGiHC6PjdeTm86dicbp5gWAf15Gan/PQeGdxyGkOlZHP/uaZ6WA8
SMx+yk13EiSdRxta67nsHjcAHJyse6cF6s5K671B5TaYucv9bTyWaN8jKkKQDIZ0
Z8h/pZq4UmEUEz9l6YKHy9v6Dlb2honzhT+Xhq+w3Brvaw2VFn3EK6BlspkENnWA
a6xK8xuQSXgvopZPKiAlKQTGdMDQMc2PMTiVFrqoM7hD8bEfwzB/onkxEz0tNvjj
/PIzark5McWvxI0NHWQWM6r6hCm21AvA2H3DkwIDAQABo4IBfTCCAXkwEgYDVR0T
AQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwfwYIKwYBBQUHAQEEczBxMDIG
CCsGAQUFBzABhiZodHRwOi8vaXNyZy50cnVzdGlkLm9jc3AuaWRlbnRydXN0LmNv
bTA7BggrBgEFBQcwAoYvaHR0cDovL2FwcHMuaWRlbnRydXN0LmNvbS9yb290cy9k
c3Ryb290Y2F4My5wN2MwHwYDVR0jBBgwFoAUxKexpHsscfrb4UuQdf/EFWCFiRAw
VAYDVR0gBE0wSzAIBgZngQwBAgEwPwYLKwYBBAGC3xMBAQEwMDAuBggrBgEFBQcC
ARYiaHR0cDovL2Nwcy5yb290LXgxLmxldHNlbmNyeXB0Lm9yZzA8BgNVHR8ENTAz
MDGgL6AthitodHRwOi8vY3JsLmlkZW50cnVzdC5jb20vRFNUUk9PVENBWDNDUkwu
Y3JsMB0GA1UdDgQWBBSoSmpjBH3duubRObemRWXv86jsoTANBgkqhkiG9w0BAQsF
AAOCAQEA3TPXEfNjWDjdGBX7CVW+dla5cEilaUcne8IkCJLxWh9KEik3JHRRHGJo
uM2VcGfl96S8TihRzZvoroed6ti6WqEBmtzw3Wodatg+VyOeph4EYpr/1wXKtx8/
wApIvJSwtmVi4MFU5aMqrSDE6ea73Mj2tcMyo5jMd6jmeWUHK8so/joWUoHOUgwu
X4Po1QYz+3dszkDqMp4fklxBwXRsW10KXzPMTZ+sOPAveyxindmjkW8lGy+QsRlG
PfZ+G6Z6h7mjem0Y+iWlkYcV4PIWL1iwBi8saCbGS5jN2p8M+X+Q7UNKEkROb3N6
KOqkqm57TH2H3eDJAkSnh6/DNFu0Qg==
-----END CERTIFICATE-----
ansible/configs/ocp-workshop/files/userpvs.j2
@@ -1,5 +1,5 @@
---
{%  for pv in range(1,user_vols) %}
{%  for pv in range(1,user_vols|int) %}
apiVersion: v1
kind: PersistentVolume
metadata:
ansible/configs/ocp-workshop/post_software.yml
@@ -167,7 +167,7 @@
      shell: oc annotate namespace openshift-template-service-broker openshift.io/node-selector="" --overwrite
      ignore_errors: true
      when:
        - osrelease | version_compare('3.7', '>=')
        - osrelease is version_compare('3.7', '>=')
- name: Remove all users from self-provisioners group
  hosts: masters
@@ -178,23 +178,21 @@
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags: [ env-specific, remove_self_provisioners ]
  tasks:
    - name: Set clusterRoleBinding auto-update to false
      command: oc annotate -n default --overwrite clusterrolebinding.rbac self-provisioners rbac.authorization.kubernetes.io/autoupdate=false
      when: remove_self_provisioners
    - when: remove_self_provisioners|bool
      block:
      - name: Set clusterRoleBinding auto-update to false
        command: oc annotate -n default --overwrite clusterrolebinding.rbac self-provisioners rbac.authorization.kubernetes.io/autoupdate=false
    - name: Remove system:authenticated from self-provisioner role
      shell: "oadm policy remove-cluster-role-from-group self-provisioner system:authenticated system:authenticated:oauth"
      ignore_errors: true
      when: remove_self_provisioners
      - name: Remove system:authenticated from self-provisioner role
        command: "oadm policy remove-cluster-role-from-group self-provisioner system:authenticated system:authenticated:oauth"
        ignore_errors: true
    - name: create our own OPENTLC-PROJECT-PROVISIONERS
      shell: "oadm groups new OPENTLC-PROJECT-PROVISIONERS"
      ignore_errors: true
      when: remove_self_provisioners
      - name: create our own OPENTLC-PROJECT-PROVISIONERS
        command: "oadm groups new OPENTLC-PROJECT-PROVISIONERS"
        ignore_errors: true
    - name: allow OPENTLC-PROJECT-PROVISIONERS members to provision their own projects
      shell: "oadm policy add-cluster-role-to-group self-provisioner OPENTLC-PROJECT-PROVISIONERS"
      when: remove_self_provisioners
      - name: allow OPENTLC-PROJECT-PROVISIONERS members to provision their own projects
        command: "oadm policy add-cluster-role-to-group self-provisioner OPENTLC-PROJECT-PROVISIONERS"
- name: Project Request Template
  hosts: masters
@@ -471,7 +469,9 @@
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    # start supporting this only for OCP >= 3.9
    - when: osrelease is version_compare('3.9', '>=')
    - when:
        - osrelease is version_compare('3.9', '>=')
        - run_ocp_diagnostics|d(true)| bool
      block:
        # this command should return 0 (no error)
        - name: Run oc adm diagnostics
ansible/configs/ocp-workshop/pre_software.yml
@@ -66,6 +66,7 @@
  tasks:
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/install-lets-encrypt-certs"
      tags: lets_encrypt
      when: install_lets_encrypt_certificates|bool
- name: Configuring Bastion Hosts
@@ -94,3 +95,19 @@
  tasks:
    - debug:
        msg: "Pre-Software checks completed successfully"
- name: Copy lets encrypt certificates
  hosts: masters
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - env-specific
    - project_request
  tasks:
    # https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem.txt
    - name: Copy over the letsencrypt certificate
      copy:
        src: ./files/lets-encrypt-x3-cross-signed.pem.txt
        dest: /etc/origin/master/
ansible/configs/ocp-workshop/scaleup.yml
@@ -117,7 +117,6 @@
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - openshift_host_templates
    - openshift_install_idm_cert
  tasks:
    - name: generate ansible hosts file, keep it under workdir
      template:
@@ -197,7 +196,6 @@
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - openshift_host_templates
    - openshift_install_idm_cert
  tasks:
    - debug:
        msg: "WARNING: s3user credentials not set"
ansible/configs/openshift-demos/msa-cicd-eap-full.yml
@@ -73,118 +73,6 @@
        prune_deployments_selector_stage: "comp-required!=true,app!=inventory"
      tags: demo
    # verify database deployments in cicd project
    - name: wait for database deployments in project {{ project_cicd }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_cicd }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_cicd }} selector="comp-type=database"
      tags: verify
    # verify database deployments in prod project
    - name: wait for database deployments in project {{ project_prod }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_prod }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_prod }} selector="comp-type=database"
    # verify database deployments in test project
    - name: wait for database deployments in project {{ project_test }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_test }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_test }} selector="comp-type=database"
    # verify database deployments in dev project
    - name: wait for database deployments in project {{ project_dev }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_dev }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_dev }} selector="comp-type=database"
      tags: verify
    # verify other deployments in cicd project
    - name: wait for other deployments in project {{ project_cicd }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_cicd }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_cicd }} selector="comp-type!=database"
      tags: verify
    # verify other deployments in prod project
    - name: wait for other deployments in project {{ project_prod }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_prod }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_prod }} selector="comp-type!=database"
      tags: verify
    # verify other deployments in stage project
    - name: wait for other deployments in project {{ project_stage }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_stage }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_stage }} selector="comp-type!=database"
      tags: verify
    # verify other deployments in test project
    - name: wait for other deployments in project {{ project_test }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_test }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_test }} selector="comp-type!=database"
      tags: verify
    # verify other deployments in dev project
    - name: wait for other deployments in project {{ project_dev }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_dev }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_dev }} selector="comp-type!=database"
      tags: verify
    # verify deployed successfully
    - include_tasks: verify_all.yml
      tags: verify
ansible/configs/openshift-demos/msa-cicd-eap-min.yml
@@ -75,106 +75,6 @@
        disable_stage_project: true
      tags: demo
    # verify database deployments in cicd project
    - name: wait for database deployments in project {{ project_cicd }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_cicd }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_cicd }} selector="comp-type=database"
      tags: verify
    # verify database deployments in prod project
    - name: wait for database deployments in project {{ project_prod }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_prod }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_prod }} selector="comp-type=database"
    # verify database deployments in test project
    - name: wait for database deployments in project {{ project_test }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_test }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_test }} selector="comp-type=database"
    # verify database deployments in dev project
    - name: wait for database deployments in project {{ project_dev }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_dev }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_dev }} selector="comp-type=database"
      tags: verify
    # verify other deployments in cicd project
    - name: wait for other deployments in project {{ project_cicd }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_cicd }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_cicd }} selector="comp-type!=database"
      tags: verify
    # verify other deployments in prod project
    - name: wait for other deployments in project {{ project_prod }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_prod }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_prod }} selector="comp-type!=database"
      tags: verify
    # verify other deployments in test project
    - name: wait for other deployments in project {{ project_test }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_test }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_test }} selector="comp-type!=database"
      tags: verify
    # verify other deployments in dev project
    - name: wait for other deployments in project {{ project_dev }} to complete (succeed or fail)
      shell: "oc get pods -n {{ project_dev }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_dev }} selector="comp-type!=database"
      tags: verify
    # verify deployed successfully
    - include_tasks: verify_all.yml
      tags: verify
ansible/configs/openshift-demos/msa-full.yml
@@ -19,28 +19,6 @@
        hostname_suffix: "{{ apps_hostname_suffix }}"
        enable_cicd: false
    # verify database deployments
    - name: wait for database deployments to complete (succeed or fail)
      shell: "oc get pods -n {{ project_default }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_default }} selector="comp-type=database"
      tags: verify
    # verify other deployments
    - name: wait for other deployments to complete (succeed or fail)
      shell: "oc get pods -n {{ project_default }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_default }} selector="comp-type!=database"
      tags: verify
    # verify deployed successfully
    - include_tasks: verify_all.yml
      tags: verify
ansible/configs/openshift-demos/msa-min.yml
@@ -22,28 +22,6 @@
        prune_deployments_selector: "comp-required!=true"
    # verify database deployments
    - name: wait for database deployments to complete (succeed or fail)
      shell: "oc get pods -n {{ project_default }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_default }} selector="comp-type=database"
      tags: verify
    # verify other deployments
    - name: wait for other deployments to complete (succeed or fail)
      shell: "oc get pods -n {{ project_default }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
      register: deployment_running
      until: deployment_running|failed
      retries: 20
      delay: 30
      ignore_errors: true
      tags: verify
    - include_tasks: verify_tasks.yml project_name={{ project_default }} selector="comp-type!=database"
      tags: verify
    # verify deployed successfully
    - include_tasks: verify_all.yml
      tags: verify
ansible/configs/openshift-demos/requirements-prod.yml
@@ -1,30 +1,24 @@
---
- src: siamaksade.openshift_common_facts
  # src: https://github.com/siamaksade/ansible-openshift-common-facts.git
  name: openshift_commons_facts
  version: ocp-3.7
  version: ocp-3.9
- src: siamaksade.openshift_sonatype_nexus
  # src: https://github.com/siamaksade/ansible-openshift-nexus.git
  name: openshift_sonatype_nexus
  version: ocp-3.7
  version: ocp-3.9
- src: siamaksade.openshift_gogs
  # src: https://github.com/siamaksade/ansible-openshift-gogs.git
  name: openshift_gogs
  version: ocp-3.7
  version: ocp-3.9
- src: siamaksade.openshift_jenkins
  # src: https://github.com/siamaksade/ansible-openshift-jenkins.git
  name: openshift_jenkins
  version: ocp-3.7
  version: ocp-3.9
- src: siamaksade.openshift_workshopper
  # src: https://github.com/siamaksade/ansible-openshift-workshopper.git
  name: openshift_workshopper
  version: ocp-3.7
  version: ocp-3.9
- src: siamaksade.openshift_coolstore
  # src: https://github.com/siamaksade/ansible-openshift-coolstore.git
  name: openshift_coolstore
  version: ocp-3.7
  version: ocp-3.9
ansible/configs/openshift-demos/requirements.yml
@@ -1,24 +1,18 @@
---
- src: siamaksade.openshift_common_facts
  # src: https://github.com/siamaksade/ansible-openshift-common-facts.git
  name: openshift_commons_facts
- src: siamaksade.openshift_sonatype_nexus
  # src: https://github.com/siamaksade/ansible-openshift-nexus.git
  name: openshift_sonatype_nexus
- src: siamaksade.openshift_gogs
  # src: https://github.com/siamaksade/ansible-openshift-gogs.git
  name: openshift_gogs
- src: siamaksade.openshift_jenkins
  # src: https://github.com/siamaksade/ansible-openshift-jenkins.git
  name: openshift_jenkins
- src: siamaksade.openshift_workshopper
  # src: https://github.com/siamaksade/ansible-openshift-workshopper.git
  name: openshift_workshopper
- src: siamaksade.openshift_coolstore
  # src: https://github.com/siamaksade/ansible-openshift-coolstore.git
  name: openshift_coolstore
ansible/configs/openshift-demos/verify_all.yml
New file
@@ -0,0 +1,84 @@
---
# wait for database deployments to finish
- name: wait for database deployments in project {{ project_cicd }} to complete (succeed or fail)
  shell: "oc get pods -n {{ project_cicd }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
  register: deployment_running
  until: deployment_running|failed
  retries: 20
  delay: 30
  ignore_errors: true
- name: wait for database deployments in project {{ project_prod }} to complete (succeed or fail)
  shell: "oc get pods -n {{ project_prod }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
  register: deployment_running
  until: deployment_running|failed
  retries: 20
  delay: 30
  ignore_errors: true
- name: wait for database deployments in project {{ project_test }} to complete (succeed or fail)
  shell: "oc get pods -n {{ project_test }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
  register: deployment_running
  until: deployment_running|failed
  retries: 20
  delay: 30
  ignore_errors: true
- name: wait for database deployments in project {{ project_dev }} to complete (succeed or fail)
  shell: "oc get pods -n {{ project_dev }} -l comp-type=database | grep '\\-deploy\\s' | grep 'Running'"
  register: deployment_running
  until: deployment_running|failed
  retries: 20
  delay: 30
  ignore_errors: true
# verify database deployments
- include_tasks: verify_tasks.yml
  vars:
    selector: comp-type=database
# wait for non-database deployments to finish
- name: wait for other deployments in project {{ project_cicd }} to complete (succeed or fail)
  shell: "oc get pods -n {{ project_cicd }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
  register: deployment_running
  until: deployment_running|failed
  retries: 20
  delay: 30
  ignore_errors: true
- name: wait for other deployments in project {{ project_prod }} to complete (succeed or fail)
  shell: "oc get pods -n {{ project_prod }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
  register: deployment_running
  until: deployment_running|failed
  retries: 20
  delay: 30
  ignore_errors: true
- name: wait for other deployments in project {{ project_stage }} to complete (succeed or fail)
  shell: "oc get pods -n {{ project_stage }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
  register: deployment_running
  until: deployment_running|failed
  retries: 20
  delay: 30
  ignore_errors: true
- name: wait for other deployments in project {{ project_test }} to complete (succeed or fail)
  shell: "oc get pods -n {{ project_test }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
  register: deployment_running
  until: deployment_running|failed
  retries: 20
  delay: 30
  ignore_errors: true
- name: wait for other deployments in project {{ project_dev }} to complete (succeed or fail)
  shell: "oc get pods -n {{ project_dev }} -l comp-type!=database | grep '\\-deploy\\s' | grep 'Running'"
  register: deployment_running
  until: deployment_running|failed
  retries: 20
  delay: 30
  ignore_errors: true
# verify non-database deployments
- import_tasks: verify_tasks.yml
  vars:
    selector: comp-type!=database
ansible/configs/openshift-demos/verify_tasks.yml
@@ -1,29 +1,30 @@
---
- name: check if project {{ project_name }} exists
  shell: "{{ openshift_cli }} get project {{ project_name }}"
# verify cicd
- name: check if project {{ project_cicd }} exists
  shell: "{{ openshift_cli }} get project {{ project_cicd }}"
  register: result
  ignore_errors: true
  changed_when: false
- name: get deploymentconfig names in {{ project_name }}
  shell: "{{ openshift_cli }} get dc -l {{ selector }} -o jsonpath='{.items[*].metadata.name}' -n {{ project_name }}"
- name: get deploymentconfig names in {{ project_cicd }}
  shell: "{{ openshift_cli }} get dc -l {{ selector }} -o jsonpath='{.items[*].metadata.name}' -n {{ project_cicd }}"
  register: deploymentconfigs_list
  when: result|succeeded
  changed_when: false
- name: get deployment config replica counts in {{ project_name }}
  shell: "{{ openshift_cli }} get dc {{ deploymentconfigs_list.stdout }} -o json -n {{ project_name }}"
- name: get deployment config replica counts in {{ project_cicd }}
  shell: "{{ openshift_cli }} get dc {{ deploymentconfigs_list.stdout }} -o json -n {{ project_cicd }}"
  register: deploymentconfigs_list_json
  when: result|succeeded
  changed_when: false
- name: redeploy if deployment has failed
  shell: |
    {{ openshift_cli }} rollout cancel dc/{{ dc.metadata.name }} -n {{ project_name }} | true
    {{ openshift_cli }} rollout cancel dc/{{ dc.metadata.name }} -n {{ project_cicd }} | true
    sleep 30
    {{ openshift_cli }} rollout latest dc/{{ dc.metadata.name }} -n {{ project_name }}
    {{ openshift_cli }} rollout status dc/{{ dc.metadata.name }} -n {{ project_name }}
  when:
    {{ openshift_cli }} rollout latest dc/{{ dc.metadata.name }} -n {{ project_cicd }}
    {{ openshift_cli }} rollout status dc/{{ dc.metadata.name }} -n {{ project_cicd }}
  when:
    - result|succeeded
    - deploymentconfigs_list_json|succeeded
    - dc.metadata is defined
@@ -35,3 +36,155 @@
    label: "{{ dc.metadata.name if dc.metadata is defined else dc }}"
  with_items: "{{ deploymentconfigs_list_json.stdout|default('[]')|from_json|json_query('items') }}"
  ignore_errors: true
# verify prod
- name: check if project {{ project_prod }} exists
  shell: "{{ openshift_cli }} get project {{ project_prod }}"
  register: result
  ignore_errors: true
  changed_when: false
- name: get deploymentconfig names in {{ project_prod }}
  shell: "{{ openshift_cli }} get dc -l {{ selector }} -o jsonpath='{.items[*].metadata.name}' -n {{ project_prod }}"
  register: deploymentconfigs_list
  when: result|succeeded
  changed_when: false
- name: get deployment config replica counts in {{ project_prod }}
  shell: "{{ openshift_cli }} get dc {{ deploymentconfigs_list.stdout }} -o json -n {{ project_prod }}"
  register: deploymentconfigs_list_json
  when: result|succeeded
  changed_when: false
- name: redeploy if deployment has failed
  shell: |
    {{ openshift_cli }} rollout cancel dc/{{ dc.metadata.name }} -n {{ project_prod }} | true
    sleep 30
    {{ openshift_cli }} rollout latest dc/{{ dc.metadata.name }} -n {{ project_prod }}
    {{ openshift_cli }} rollout status dc/{{ dc.metadata.name }} -n {{ project_prod }}
  when:
    - result|succeeded
    - deploymentconfigs_list_json|succeeded
    - dc.metadata is defined
    - dc.status is defined
    - dc.spec is defined
    - dc.status.availableReplicas != dc.spec.replicas
  loop_control:
    loop_var: dc
    label: "{{ dc.metadata.name if dc.metadata is defined else dc }}"
  with_items: "{{ deploymentconfigs_list_json.stdout|default('[]')|from_json|json_query('items') }}"
  ignore_errors: true
# verify stage
- name: check if project {{ project_stage }} exists
  shell: "{{ openshift_cli }} get project {{ project_stage }}"
  register: result
  ignore_errors: true
  changed_when: false
- name: get deploymentconfig names in {{ project_stage }}
  shell: "{{ openshift_cli }} get dc -l {{ selector }} -o jsonpath='{.items[*].metadata.name}' -n {{ project_stage }}"
  register: deploymentconfigs_list
  when: result|succeeded
  changed_when: false
- name: get deployment config replica counts in {{ project_stage }}
  shell: "{{ openshift_cli }} get dc {{ deploymentconfigs_list.stdout }} -o json -n {{ project_stage }}"
  register: deploymentconfigs_list_json
  when: result|succeeded
  changed_when: false
- name: redeploy if deployment has failed
  shell: |
    {{ openshift_cli }} rollout cancel dc/{{ dc.metadata.name }} -n {{ project_stage }} | true
    sleep 30
    {{ openshift_cli }} rollout latest dc/{{ dc.metadata.name }} -n {{ project_stage }}
    {{ openshift_cli }} rollout status dc/{{ dc.metadata.name }} -n {{ project_stage }}
  when:
    - result|succeeded
    - deploymentconfigs_list_json|succeeded
    - dc.metadata is defined
    - dc.status is defined
    - dc.spec is defined
    - dc.status.availableReplicas != dc.spec.replicas
  loop_control:
    loop_var: dc
    label: "{{ dc.metadata.name if dc.metadata is defined else dc }}"
  with_items: "{{ deploymentconfigs_list_json.stdout|default('[]')|from_json|json_query('items') }}"
  ignore_errors: true
# verify test
- name: check if project {{ project_test }} exists
  shell: "{{ openshift_cli }} get project {{ project_test }}"
  register: result
  ignore_errors: true
  changed_when: false
- name: get deploymentconfig names in {{ project_test }}
  shell: "{{ openshift_cli }} get dc -l {{ selector }} -o jsonpath='{.items[*].metadata.name}' -n {{ project_test }}"
  register: deploymentconfigs_list
  when: result|succeeded
  changed_when: false
- name: get deployment config replica counts in {{ project_test }}
  shell: "{{ openshift_cli }} get dc {{ deploymentconfigs_list.stdout }} -o json -n {{ project_test }}"
  register: deploymentconfigs_list_json
  when: result|succeeded
  changed_when: false
- name: redeploy if deployment has failed
  shell: |
    {{ openshift_cli }} rollout cancel dc/{{ dc.metadata.name }} -n {{ project_test }} | true
    sleep 30
    {{ openshift_cli }} rollout latest dc/{{ dc.metadata.name }} -n {{ project_test }}
    {{ openshift_cli }} rollout status dc/{{ dc.metadata.name }} -n {{ project_test }}
  when:
    - result|succeeded
    - deploymentconfigs_list_json|succeeded
    - dc.metadata is defined
    - dc.status is defined
    - dc.spec is defined
    - dc.status.availableReplicas != dc.spec.replicas
  loop_control:
    loop_var: dc
    label: "{{ dc.metadata.name if dc.metadata is defined else dc }}"
  with_items: "{{ deploymentconfigs_list_json.stdout|default('[]')|from_json|json_query('items') }}"
  ignore_errors: true
# verify dev
- name: check if project {{ project_dev }} exists
  shell: "{{ openshift_cli }} get project {{ project_dev }}"
  register: result
  ignore_errors: true
  changed_when: false
- name: get deploymentconfig names in {{ project_dev }}
  shell: "{{ openshift_cli }} get dc -l {{ selector }} -o jsonpath='{.items[*].metadata.name}' -n {{ project_dev }}"
  register: deploymentconfigs_list
  when: result|succeeded
  changed_when: false
- name: get deployment config replica counts in {{ project_dev }}
  shell: "{{ openshift_cli }} get dc {{ deploymentconfigs_list.stdout }} -o json -n {{ project_dev }}"
  register: deploymentconfigs_list_json
  when: result|succeeded
  changed_when: false
- name: redeploy if deployment has failed
  shell: |
    {{ openshift_cli }} rollout cancel dc/{{ dc.metadata.name }} -n {{ project_dev }} | true
    sleep 30
    {{ openshift_cli }} rollout latest dc/{{ dc.metadata.name }} -n {{ project_dev }}
    {{ openshift_cli }} rollout status dc/{{ dc.metadata.name }} -n {{ project_dev }}
  when:
    - result|succeeded
    - deploymentconfigs_list_json|succeeded
    - dc.metadata is defined
    - dc.status is defined
    - dc.spec is defined
    - dc.status.availableReplicas != dc.spec.replicas
  loop_control:
    loop_var: dc
    label: "{{ dc.metadata.name if dc.metadata is defined else dc }}"
  with_items: "{{ deploymentconfigs_list_json.stdout|default('[]')|from_json|json_query('items') }}"
  ignore_errors: true
ansible/roles/bastion-opentlc-ipa/tasks/main.yml
@@ -9,23 +9,36 @@
    name: "ipa-client"
    state: present
- name: Register bastion with IPA
  shell: >
    /usr/sbin/ipa-client-install --uninstall;
- name: Register bastion with IPA using host password (first try)
  command: >
    /usr/sbin/ipa-client-install --domain=OPENTLC.COM
    -w '{{ipa_host_password}}'
    -N -U --mkhomedir --no-dns-sshfp
    --hostname={{bastion_public_dns_chomped}}
    {{ipa_additional_options|d('')}}
  when: ipa_host_password is defined
  register: ipa_r1
  ignore_errors: yes
- name: Retry to register bastion with IPA using host password
  shell: >
    /usr/sbin/ipa-client-install --uninstall;
    rm -f /var/lib/ipa-client/sysrestore.state;
    /usr/sbin/ipa-client-install --domain=OPENTLC.COM
    -w '{{ipa_host_password}}'
    -N -U --mkhomedir --no-dns-sshfp
    --hostname={{bastion_public_dns_chomped}}
    {{ipa_additional_options|d('')}}
  when:
    - ipa_host_password is defined
    - ipa_r1 is failed
  register: ipa_r
  until:
    - ipa_r is succeeded
  retries: 5
- name: Register bastion with IPA
  shell: >
    /usr/sbin/ipa-client-install --uninstall;
- name: Register bastion with IPA using OpenTLC admin creds (first try)
  command: >
    /usr/sbin/ipa-client-install --domain=OPENTLC.COM
    -p {{ipa_kerberos_user}} -w '{{ipa_kerberos_password}}'
    -N -U --mkhomedir --no-dns-sshfp
@@ -35,11 +48,43 @@
    - ipa_host_password is not defined
    - ipa_kerberos_user is defined
    - ipa_kerberos_password is defined
  register: ipa_r1
  ignore_errors: yes
- name: Retry to register bastion with IPA using OpenTLC admin creds
  shell: >
    /usr/sbin/ipa-client-install --uninstall;
    rm -f /var/lib/ipa-client/sysrestore.state;
    /usr/sbin/ipa-client-install --domain=OPENTLC.COM
    -p {{ipa_kerberos_user}} -w '{{ipa_kerberos_password}}'
    -N -U --mkhomedir --no-dns-sshfp
    --hostname={{bastion_public_dns_chomped}}
    {{ipa_additional_options|d('')}}
  when:
    - ipa_host_password is not defined
    - ipa_kerberos_user is defined
    - ipa_kerberos_password is defined
    - ipa_r1 is failed
  register: ipa_r
  until: ipa_r is succeeded
  retries: 5
- name: copy over ipa_optimize.sh script
- name: Create an archive of the ipa-client-* logs
  archive:
    path:
      - /var/log/ipaclient-install.log
      - /var/log/ipaclient-uninstall.log
    dest: /tmp/ipa-client-logs.tar.gz
  ignore_errors: yes
- name: Fetch the ipa-client-logs archive
  fetch:
    src: /tmp/ipa-client-logs.tar.gz
    dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{project_tag}}_ipa-client-logs.tar.gz"
    flat: true
  ignore_errors: yes
- name: Copy over ipa_optimize.sh script
  copy:
    src: "{{ role_path }}/files/ipa_optimize.sh"
    dest: /opt/ipa_optimize.sh
@@ -57,16 +102,10 @@
    validate: '/usr/sbin/visudo -cf %s'
  register: result
  retries: 20
  until: result | succeeded
  ignore_errors: yes
- name: report error
  fail:
    msg: Unable to update sudoers.d/opentlc-sudoers
  when: not result|succeeded
  until: result is succeeded
# sssd bug, fixed by restart
- name: restart sssd
- name: Restart sssd
  service:
    name: sssd
    state: restarted
ansible/roles/common/tasks/packages.yml
@@ -18,5 +18,8 @@
    name: "{{ item }}"
    state: present
  with_items: "{{common_packages}}"
  register: yumr
  until: yumr is succeeded
  any_errors_fatal: true
  tags:
    - install_common_packages
ansible/roles/install-lets-encrypt-certs/tasks/main.yml
@@ -1,49 +1,78 @@
---
## Request Let's Encrypt Wildcard Certificates for the Cluster
  - name: Test if Let's Encrypt Certificates are already there
    stat:
      path: /root/lets-encrypt-x3-cross-signed.pem
    register: cacert
- name: Test if Let's Encrypt Certificates are already there
  stat:
    path: /root/lets-encrypt-x3-cross-signed.pem
  register: cacert
  - name: Get Let's Encrypt Intermediary CA Certificate
    get_url:
      url: https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem.txt
      dest: /root/lets-encrypt-x3-cross-signed.pem
    when:
      cacert.stat.exists|bool == false
# Get Intermediary CA Certificate. This is also used in the SSO configuration!
- name: Get Let's Encrypt Intermediary CA Certificate
  get_url:
    url: https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem.txt
    dest: /root/lets-encrypt-x3-cross-signed.pem
  when:
    cacert.stat.exists|bool == false
  - name: Clone Let's Encrypt Acme.sh Repository
    git:
      repo: https://github.com/Neilpang/acme.sh.git
      clone: yes
      dest: /root/acme.sh
    when:
      cacert.stat.exists|bool == false
- name: Clone Let's Encrypt Acme.sh Repository
  git:
    repo: https://github.com/Neilpang/acme.sh.git
    clone: yes
    dest: /root/acme.sh
  when:
    cacert.stat.exists|bool == false
  - name: Add AWS Access Key to Let's Encrypt Acme.sh configuration
    lineinfile:
      path: /root/acme.sh/dnsapi/dns_aws.sh
      line: AWS_ACCESS_KEY_ID="{{ hostvars['localhost'].route53user_access_key }}"
      state: present
      insertbefore: '^#AWS_ACCESS_KEY_ID'
    when:
      - cacert.stat.exists|bool == false
      - cloud_provider == 'ec2'
# AWS BEGIN
- name: Add AWS Access Key to Let's Encrypt Acme.sh configuration
  lineinfile:
    path: /root/acme.sh/dnsapi/dns_aws.sh
    line: AWS_ACCESS_KEY_ID="{{ hostvars['localhost'].route53user_access_key }}"
    state: present
    insertbefore: '^#AWS_ACCESS_KEY_ID'
  when:
    - cacert.stat.exists|bool == false
    - cloud_provider == 'ec2'
  - name: Add AWS Secret Access Key to Let's Encrypt Acme.sh configuration
    lineinfile:
      path: /root/acme.sh/dnsapi/dns_aws.sh
      line: AWS_SECRET_ACCESS_KEY="{{ hostvars['localhost'].route53user_secret_access_key }}"
      state: present
      insertbefore: '^#AWS_SECRET_ACCESS_KEY'
    when:
      - cacert.stat.exists|bool == false
      - cloud_provider == 'ec2'
- name: Add AWS Secret Access Key to Let's Encrypt Acme.sh configuration
  lineinfile:
    path: /root/acme.sh/dnsapi/dns_aws.sh
    line: AWS_SECRET_ACCESS_KEY="{{ hostvars['localhost'].route53user_secret_access_key }}"
    state: present
    insertbefore: '^#AWS_SECRET_ACCESS_KEY'
  when:
    - cacert.stat.exists|bool == false
    - cloud_provider == 'ec2'
  - name: Request API and Wildcard Certificates from Let's Encrypt
    shell: "/root/acme.sh/acme.sh --issue -d {{ master_lb_dns }} -d *.{{ cloudapps_suffix }} --dns dns_aws"
    args:
      chdir: /root/acme.sh
    when:
      - cacert.stat.exists|bool == false
      - cloud_provider == 'ec2'
- name: Request API and Wildcard Certificates from Let's Encrypt
  shell: "/root/acme.sh/acme.sh {{ acme_args|d('') }} --issue -d {{ master_lb_dns }} -d *.{{ cloudapps_suffix }} --dns dns_aws"
  args:
    chdir: /root/acme.sh
  when:
    - cacert.stat.exists|bool == false
    - cloud_provider == 'ec2'
# AWS END
# MS Azure BEGIN (Placeholder for now)
# https://github.com/Neilpang/acme.sh/wiki/How-to-use-Azure-DNS
# Placeholder for Azure. Needs to be implemented
# Set the following Variables ...
# export AZUREDNS_SUBSCRIPTIONID="12345678-9abc-def0-1234-567890abcdef"
# export AZUREDNS_TENANTID="11111111-2222-3333-4444-555555555555"
# export AZUREDNS_APPID="3b5033b5-7a66-43a5-b3b9-a36b9e7c25ed"          # appid of the service principal
# export AZUREDNS_CLIENTSECRET="1b0224ef-34d4-5af9-110f-77f527d561bd"   # password from creating the service principal
- name: Request API and Wildcard Certificates from Let's Encrypt
  shell: "/root/acme.sh/acme.sh {{ acme_args|d('') }} --issue -d {{ master_lb_dns }} -d *.{{ cloudapps_suffix }} --dns dns_azure"
  args:
    chdir: /root/acme.sh
  when:
    - cacert.stat.exists|bool == false
    - cloud_provider == 'azure'
# MS Azure END (Placeholder for now)
- name: Install crontab to renew certificates when they expire
  cron:
    name: LETS_ENCRYPT_RENEW
    special_time: hourly
    job: "/root/acme.sh/acme.sh {{ acme_args|d('') }} --cron --home /root/.acme.sh > /dev/null"
ansible/roles/install-nexus/files/nexus3-persistent-template.yaml
@@ -158,7 +158,7 @@
- displayName: Sonatype Nexus version
  name: NEXUS_VERSION
  required: true
  value: 3.6.0
  value: 3.12.1
- description: Volume space available for Sonatype Nexus e.g. 512Mi, 2Gi
  displayName: Volume Space for Nexus
  name: VOLUME_CAPACITY
ansible/roles/ocp-workload-3scale-demo/tasks/config.yml
@@ -22,7 +22,7 @@
- name: Get Insurance realm
  uri:
    url: 'https://raw.githubusercontent.com/hguerrero/3scale-examples/master/security-oidc/templates/insurance-realm.json'
    url: 'https://raw.githubusercontent.com/jbossdemocentral/3scale-security-oidc-demo/master/support/templates/insurance-realm.json'
    return_content: yes
  register: get_realm
ansible/roles/ocp-workload-3scale-demo/tasks/workload.yml
@@ -44,13 +44,13 @@
  shell: "oc project default"
- name: Create service messaging broker
  shell: "oc process -f https://raw.githubusercontent.com/hguerrero/3scale-examples/master/security-oidc/templates/amq63-basic-template.json \
  shell: "oc process -f https://raw.githubusercontent.com/jbossdemocentral/3scale-security-oidc-demo/master/support/templates/amq63-basic-template.json \
  -p MQ_USERNAME=admin -p MQ_PASSWORD=admin | oc create -n {{service_project}} -f -"
- name: Create service backend API implementation
  shell: "oc process -f https://raw.githubusercontent.com/hguerrero/3scale-examples/master/security-oidc/templates/accidentalert-backend-template.json \
  -p APP_NAME=accidentalert-backend -p GIT_REPO=https://github.com/hguerrero/3scale-examples.git -p GIT_REF=master \
  -p CONTEXT_DIR=/security-oidc/projects/myaccidentalert -p ACTIVEMQ_BROKER_USERNAME=admin -p ACTIVEMQ_BROKER_PASSWORD=admin \
  shell: "oc process -f https://raw.githubusercontent.com/jbossdemocentral/3scale-security-oidc-demo/master/support/templates/accidentalert-backend-template.json \
  -p APP_NAME=accidentalert-backend -p GIT_REPO=https://github.com/jbossdemocentral/3scale-security-oidc-demo.git -p GIT_REF=master \
  -p CONTEXT_DIR=/projects/myaccidentalert -p ACTIVEMQ_BROKER_USERNAME=admin -p ACTIVEMQ_BROKER_PASSWORD=admin \
  -p CPU_REQUEST=1 -p MEMORY_REQUEST=512Mi -p MEMORY_LIMIT=1024Mi | oc create -n {{service_project}} -f -"
- name: Create project for ui app
@@ -61,7 +61,7 @@
  shell: "oc project default"
- name: Create UI app
  shell: "oc process -f https://raw.githubusercontent.com/hguerrero/3scale-examples/master/security-oidc/templates/accidentalert-ui-template.json \
  shell: "oc process -f https://raw.githubusercontent.com/jbossdemocentral/3scale-security-oidc-demo/master/support/templates/accidentalert-ui-template.json \
  -p SSO_URL='http://sso-{{sso_project}}.{{ocp_apps_domain}}' -p BACKEND_URL='http://accidentalert-backend-{{service_project}}.{{ocp_apps_domain}}' \
  -p APPLICATION_HOSTNAME='www-accidentalert-{{guid}}.{{ocp_apps_domain}}' | oc -n {{www_project}} create -f -"
@@ -73,7 +73,7 @@
  shell: "oc project default"
- name: Create the 3scale app
  shell: "oc new-app -f https://raw.githubusercontent.com/hguerrero/3scale-examples/master/security-oidc/templates/amp-template.json \
  shell: "oc new-app -f https://raw.githubusercontent.com/jbossdemocentral/3scale-security-oidc-demo/master/support/templates/amp-template.json \
  --param WILDCARD_DOMAIN={{guid}}.{{ocp_apps_domain}} --param ADMIN_PASSWORD=password --param WILDCARD_POLICY=Subdomain -n {{threescale_project}}"
- name: Give ocp_username access to sso_project
ansible/roles/ocp-workload-3scale-experienced/defaults/main.yml
New file
@@ -0,0 +1,29 @@
---
become_override: false
ocp_username: jbride-redhat.com
ocp_user_needs_quota: True
ocp_user_groups:
  - OPENTLC-PROJECT-PROVISIONERS
quota_requests_cpu: 5
quota_limits_cpu: 10
quota_requests_memory: '7Gi'
quota_limits_memory: '25Gi'
quota_configmaps: 15
quota_pods: 30
quota_persistentvolumeclaims: 20
quota_services: 30
quota_secrets: 30
quota_requests_storage: 50Gi
ocp_apps_domain: apps.{{ocp_domain}}
build_status_retries: 20
build_status_delay: 20
deploy_status_retries: 15
deploy_status_delay: 20
ansible/roles/ocp-workload-3scale-experienced/readme.adoc
New file
@@ -0,0 +1,45 @@
= ocp-workload-developer-environment - Sample Config
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
WORKLOAD="ocp-workload-fuse-on-ocp"
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
GUID=jb05
OCP_USERNAME="jbride-redhat.com"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ocp_user_needs_quota=true" \
                    -e"ocp_domain=$HOST_GUID.openshift.opentlc.com" \
                    -e"ACTION=create"
----
=== To Delete an environment
----
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
WORKLOAD="ocp-workload-fuse-on-ocp"
GUID=jb05
OCP_USERNAME="jbride-redhat.com"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                    -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ACTION=remove"
----
ansible/roles/ocp-workload-3scale-experienced/tasks/main.yml
New file
@@ -0,0 +1,20 @@
---
- name: Running Pre Workload Tasks
  include: ./pre_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload Tasks
  include: ./workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Post Workload Tasks
  include: ./post_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload removal Tasks
  include: ./remove_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "destroy" or ACTION == "remove"
ansible/roles/ocp-workload-3scale-experienced/tasks/post_workload.yml
New file
@@ -0,0 +1,5 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully"
ansible/roles/ocp-workload-3scale-experienced/tasks/pre_workload.yml
New file
@@ -0,0 +1,32 @@
---
- name: Add user to developer group (allowed to create projects)
  shell: "oadm groups add-users {{item}} {{ocp_username}}"
  register: groupadd_register
  with_items: "{{ocp_user_groups}}"
  when: ocp_username is defined and ocp_user_groups is defined
# - name: test that command worked
#   debug:
#     var: groupadd_register
#     verbosity: 2
- name: Create user Quota - clusterresourcequota
  shell: |
        oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \
        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
        --hard requests.cpu="{{quota_requests_cpu}}" \
        --hard limits.cpu="{{quota_limits_cpu}}"  \
        --hard requests.memory="{{quota_requests_memory}}" \
        --hard limits.memory="{{quota_limits_memory}}" \
        --hard configmaps="{{quota_configmaps}}" \
        --hard pods="{{quota_pods}}" \
        --hard persistentvolumeclaims="{{quota_persistentvolumeclaims}}"  \
        --hard services="{{quota_services}}" \
        --hard secrets="{{quota_secrets}}" \
        --hard requests.storage="{{quota_requests_storage}}"
  ignore_errors: true
- name: pre_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully"
ansible/roles/ocp-workload-3scale-experienced/tasks/remove_workload.yml
New file
@@ -0,0 +1,50 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully - Removed"
- name: Remove user from groups {{ocp_user_groups}}
  shell: oc adm groups remove-users {{item}} {{ocp_username}}
  with_items: "{{ocp_user_groups}}"
  ignore_errors: true
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
  ignore_errors: true
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}
  ignore_errors: true
- name: Remove user Projects - oc get projects
  command: "oc get projects -o json"
  register: all_projects
- name: Remove user Projects - Convert output to json
  set_fact:
    projects: "{{all_projects.stdout | from_json}}"
- name: Remove user Projects -  Debug statement
  debug:
    msg: "found user project: {{item.metadata.name}}"
    verbosity: 1
  when:
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
  with_items: "{{projects['items']}}"
- name: Remove user Projects - "oc delete project {{item.metadata.name}} "
  command: "oc delete project {{item.metadata.name}}"
  when:
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
    - item.status.phase is defined
    - item.status.phase != "Terminating"
  with_items: "{{projects['items']}}"
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully - Removed"
ansible/roles/ocp-workload-3scale-experienced/tasks/wait_for_build.yml
New file
@@ -0,0 +1,23 @@
---
# Purpose:
#   This script queries OCP for builds that exist but are not yet ready.
#   So long as there are unready builds, this script continues to loop
#
# Manual Test to determine list of unready builds :
#  1) install jp :  https://github.com/jmespath/jp
#  2) oc get builds -o json | jp "items[?  (status.phase != 'Complete') ].metadata.annotations.\"openshift.io/build-config.name\""
#
#  Documentation pertaining to jq syntax:
#    - http://jmespath.org/tutorial.html
#    - https://stackoverflow.com/questions/41261680/ansible-json-query-path-to-select-item-by-content
#
- name: "Wait for following builds to become ready: {{build_to_wait}}"
  command: 'oc get build -o json -n "{{ ocp_project }}"'
  register: build_state
  changed_when: false
  retries: "{{ build_status_retries }}"
  delay: "{{ build_status_delay }}"
  vars:
    query: "items[?  (status.phase != 'Complete') ].metadata.annotations.\"openshift.io/build-config.name\""
  until: "build_state.stdout |from_json |json_query(query) |intersect(build_to_wait) |length == 0"
ansible/roles/ocp-workload-3scale-experienced/tasks/wait_for_deploy.yml
New file
@@ -0,0 +1,20 @@
---
# Purpose:
#   This script queries OCP for replication controllers that exist but are not yet ready.
#   So long as there are unready replication controllers, this script continues to loop
#
# Manual Test to determine list of unready replication controllers :
#  1) install jp :  https://github.com/jmespath/jp
#  2) oc get rc -o json | jp 'items[?  (status.readyReplicas == ""|| status.readyReplicas == `0`) ].metadata.annotations."openshift.io/deployment-config.name"'
#
- name: "Wait for following deployments to become ready: {{pod_to_wait}}"
  command: 'oc get rc -o json -n "{{ ocp_project }}"'
  register: rc_state
  changed_when: false
  retries: "{{ deploy_status_retries }}"
  delay: "{{ deploy_status_delay }}"
  until: 'rc_state.stdout |from_json |json_query(''items[?  (status.readyReplicas == ""|| status.readyReplicas == `0`) ].metadata.annotations."openshift.io/deployment-config.name"'') |intersect(pod_to_wait) |length == 0'
#  Documentation pertaining to jq syntax:
#    - http://jmespath.org/tutorial.html
ansible/roles/ocp-workload-3scale-experienced/tasks/workload.yml
New file
@@ -0,0 +1,8 @@
---
- name: Make sure we go back do default project
  shell: "oc project default"
- name: workload Tasks Complete
  debug:
    msg: workload Tasks Complete
ansible/roles/ocp-workload-amq-enmasse/tasks/workload.yml
@@ -5,6 +5,9 @@
- name: "Create project for workload {{namespace}}"
  shell: "oc new-project {{namespace}}"
- name: "Create 2nd project for brokered workload {{namespace}}"
  shell: "oc new-project {{namespace}}-brokered"
- name: Make sure we go back to default project
  shell: "oc project default"
@@ -27,13 +30,12 @@
    depth: 1
    version: "{{enmasse_repo_tag}}"
- name: execute ansible-playbook using shell
- name: execute ansible-playbook using shell to create standard ST deployment
  shell: |
      ansible-playbook -i "enmasse," -c local /tmp/{{namespace}}/enmasse/templates/install/ansible/playbooks/openshift/{{enmasse_template_file}} \
      ansible-playbook -i /tmp/{{namespace}}/enmasse/ansible/inventory/singletenant-standard.example /tmp/{{namespace}}/enmasse/ansible/playbooks/openshift/{{enmasse_template_file}} \
      -e namespace={{namespace}} \
      -e multitenant=true \
      -e multitenant=false \
      -e enable_rbac=false \
      -e enable_user_lookup=true \
      -e api_server=true \
      -e register_api_server=true \
      -e keycloak_admin_password={{keycloak_admin_password}} \
@@ -41,6 +43,21 @@
      > /tmp/{{namespace}}/enmasse_install.log
- name: Make sure we go to brokered project
  shell: "oc project {{namespace}}_brokered"
- name: execute ansible-playbook using shell to create brokered ST deployment
  shell: |
      ansible-playbook -i /tmp/{{namespace}}/enmasse/ansible/inventory/singletenant-brokered.example /tmp/{{namespace}}/enmasse/ansible/playbooks/openshift/{{enmasse_template_file}} \
      -e namespace={{namespace}} \
      -e multitenant=false \
      -e enable_rbac=false \
      -e api_server=true \
      -e register_api_server=true \
      -e keycloak_admin_password={{keycloak_admin_password}} \
      -e authentication_services={{authentication_services}} \
      > /tmp/{{namespace}}/enmasse_install_1.log
# ###############################################
- name: annotate the project as requested by user
ansible/roles/ocp-workload-appdev-homework/defaults/main.yml
New file
@@ -0,0 +1,21 @@
---
become_override: false
ocp_username: shachar-redhat.com
ocp_user_needs_quota: True
ocp_user_groups:
  - OPENTLC-PROJECT-PROVISIONERS
quota_requests_cpu: 5
quota_limits_cpu: 10
quota_requests_memory: '15Gi'
quota_limits_memory: '25Gi'
quota_configmaps: 10
quota_pods: 25
quota_persistentvolumeclaims: 20
quota_services: 30
quota_secrets: 30
quota_requests_storage: 50Gi
silent: false
ansible/roles/ocp-workload-appdev-homework/readme.adoc
New file
@@ -0,0 +1,125 @@
= ocp-workload-developer-environment - Sample Config
== Role overview
* This is a simple role that does the following:
** Playbook: link:./tasks/pre_workload.yml[pre_workload.yml] - Sets up an
 environment for the workload deployment
*** Adds a user to a list of groups defined in the
 link:./defaults/main.yml[defaults file].
*** Sets a cluster resource quota for the user based on the variables in the
 link:./defaults/main.yml[defaults file] .
*** Debug task will print out: `pre_workload Tasks Complete`
** Playbook: link:./tasks/workload.yml[workload.yml] - Used to deploy the actual
 workload, i.e, 3scale, Mobile or some Demo
*** This role doesn't do anything here
*** Debug task will print out: `workload Tasks Complete`
** Playbook: link:./tasks/post_workload.yml[post_workload.yml] - Used to
 configure the workload after deployment
*** This role doesn't do anything here
*** Debug task will print out: `post_workload Tasks Complete`
== Review the defaults variable file
* This file link:./defaults/main.yml[./defaults/main.yml] contains all the variables you
 need to define to control the deployment of your workload.
* You can modify any of these default values by adding
`-e"variable_name=variable_value"` to the command line
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
TARGET_HOST="bastion.dev37.openshift.opentlc.com"
OCP_USERNAME="shacharb-redhat.com"
WORKLOAD="ocp-workload-developer-environment"
GUID=1001
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem" \
                 -e"ansible_ssh_user=ec2-user" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ocp_user_needs_quota=true" \
                    -e"ACTION=create"
----
=== To Delete an environment
----
TARGET_HOST="bastion.dev37.openshift.opentlc.com"
OCP_USERNAME="ankay-redhat.com"
WORKLOAD="ocp-workload-developer-environment"
GUID=1002
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem" \
                    -e"ansible_ssh_user=ec2-user" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ACTION=remove"
----
== Other related information:
=== Deploy Workload on OpenShift Cluster from an existing playbook:
[source,yaml]
----
- name: Deploy a workload role on a master host
  hosts: all
  become: true
  gather_facts: False
  tags:
    - step007
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/{{ocp_workload}}", when: 'ocp_workload is defined' }
----
NOTE: You might want to change `hosts: all` to fit your requirements
=== Set up your Ansible inventory file
* You can create an Ansible inventory file to define your connection
 method to your host (Master/Bastion with OC command)
* You can also use the command line to define the hosts directly if your `ssh`
 configuration is set to connect to the host correctly
* You can also use the command line to use localhost or if your cluster is
 already authenticated and configured in your `oc` configuration
[source, ini]
.example inventory file
----
[gptehosts:vars]
ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem
ansible_ssh_user=ec2-user
[gptehosts:children]
openshift
[openshift]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
bastion.cluster3.openshift.opentlc.com ansible_ssh_host=ec2-11-111-111-11.us-west-2.compute.amazonaws.com
bastion.cluster4.openshift.opentlc.com
[dev]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
[prod]
bastion.cluster3.openshift.opentlc.com
bastion.cluster4.openshift.opentlc.com
----
ansible/roles/ocp-workload-appdev-homework/tasks/main.yml
New file
@@ -0,0 +1,20 @@
---
- name: Running Pre Workload Tasks
  include: ./pre_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload Tasks
  include: ./workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Post Workload Tasks
  include: ./post_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload removal Tasks
  include: ./remove_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "destroy" or ACTION == "remove"
ansible/roles/ocp-workload-appdev-homework/tasks/post_workload.yml
New file
@@ -0,0 +1,6 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully"
  when: not silent|bool
ansible/roles/ocp-workload-appdev-homework/tasks/pre_workload.yml
New file
@@ -0,0 +1,33 @@
---
- name: Add user to developer group (allowed to create projects)
  command: "oadm groups add-users {{item}} {{ocp_username}}"
  register: groupadd_register
  with_items: "{{ocp_user_groups}}"
  when:
    - ocp_username is defined
    - ocp_user_groups | default([]) | length > 0
- name: test that command worked
  debug:
    var: groupadd_register
    verbosity: 2
- name: Create user Quota - clusterresourcequota
  command: |
        oc create clusterresourcequota clusterquota-"{{ocp_username}}-{{guid}}" \
        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
        --hard requests.cpu="{{quota_requests_cpu}}" \
        --hard limits.cpu="{{quota_limits_cpu}}"  \
        --hard requests.memory="{{quota_requests_memory}}" \
        --hard limits.memory="{{quota_limits_memory}}" \
        --hard configmaps="{{quota_configmaps}}" \
        --hard pods="{{quota_pods}}" \
        --hard persistentvolumeclaims="{{quota_persistentvolumeclaims}}"  \
        --hard services="{{quota_services}}" \
        --hard secrets="{{quota_secrets}}" \
        --hard requests.storage="{{quota_requests_storage}}"
  ignore_errors: true
- name: pre_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully"
ansible/roles/ocp-workload-appdev-homework/tasks/remove_workload.yml
New file
@@ -0,0 +1,50 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully - Removed"
- name: Remove user from developer group - (remove ability to create projects)
  command: "oadm groups remove-users {{item}} {{ocp_username}}"
  register: groupadd_register
  with_items: "{{ocp_user_groups}}"
  when:
    - ocp_username is defined
    - ocp_user_groups | default([]) | length > 0
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  command: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
  ignore_errors: true
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}"
  command: oc delete clusterresourcequota clusterquota-{{ocp_username}}
  ignore_errors: true
- name: Remove user Projects - oc get projects
  command: "oc get projects -o json"
  register: all_projects
- name: Remove user Projects - Convert output to json
  set_fact:
    projects: "{{all_projects.stdout | from_json}}"
- name: Remove user Projects -  Debug statement
  debug:
    msg: "found user project: {{item.metadata.name}}"
    verbosity: 1
  when:
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
  with_items: "{{projects['items']}}"
- name: Remove user Projects - "oc delete project {{item.metadata.name}}"
  command: "oc delete project {{item.metadata.name}}"
  when:
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
    - item.status.phase is defined
    - item.status.phase != "Terminating"
  with_items: "{{projects['items']}}"
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully - Removed"
ansible/roles/ocp-workload-appdev-homework/tasks/workload.yml
New file
@@ -0,0 +1,5 @@
---
- name: workload Tasks Complete
  debug:
    msg: workload Tasks Complete
ansible/roles/ocp-workload-appmod-migration/readme.adoc
@@ -8,8 +8,8 @@
WORKLOAD="ocp-workload-appmod-migration"
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
GUID=jb06
OCP_USERNAME="jbride-redhat.com"
GUID=gptetraining01
OCP_USERNAME="gpsetraining1"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
@@ -30,8 +30,8 @@
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
WORKLOAD="ocp-workload-appmod-migration"
GUID=jb06
OCP_USERNAME="jbride-redhat.com"
GUID=gptetraining01
OCP_USERNAME="gpsetraining1"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
ansible/roles/ocp-workload-appmod-migration/tasks/remove_workload.yml
@@ -3,6 +3,12 @@
  debug:
    msg: "Pre-Software checks completed successfully - Removed"
- name: Remove user from groups {{ocp_user_groups}}
  shell: oc adm groups remove-users {{item}} {{ocp_username}}
  with_items: "{{ocp_user_groups}}"
  ignore_errors: true
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
  ignore_errors: true
@@ -11,8 +17,33 @@
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}
  ignore_errors: true
- name: Remove Project {{ocp_constraints_project}}
  shell: "oc delete project {{ocp_constraints_project}}"
- name: Remove user Projects - oc get projects
  command: "oc get projects -o json"
  register: all_projects
- name: Remove user Projects - Convert output to json
  set_fact:
    projects: "{{all_projects.stdout | from_json}}"
- name: Remove user Projects -  Debug statement
  debug:
    msg: "found user project: {{item.metadata.name}}"
    verbosity: 1
  when:
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
  with_items: "{{projects['items']}}"
- name: Remove user Projects - "oc delete project {{item.metadata.name}} "
  command: "oc delete project {{item.metadata.name}}"
  when:
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
    - item.status.phase is defined
    - item.status.phase != "Terminating"
  with_items: "{{projects['items']}}"
- name: post_workload Tasks Complete
  debug:
ansible/roles/ocp-workload-bxms-dm/ilt_provision.sh
@@ -35,7 +35,7 @@
    echo -en "\n\t--START_PROJECT_NUM=*     OPTIONAL: specify # of first OCP project to provision (defult = 1))"
    echo -en "\n\t--END_PROJECT_NUM=*       OPTIONAL: specify # of OCP projects to provision (defualt = 1))"
    echo -en "\n\t-h                        this help manual"
    echo -en "\n\n\nExample:                ./roles/$WORKLOAD/ilt_provision.sh --HOST_GUID=dev37 --START_PROJECT_NUM=1 --END_PROJECT_NUM=1\n\n"
    echo -en "\n\n\nExample:                ./roles/$WORKLOAD/ilt_provision.sh --HOST_GUID=dev39 --START_PROJECT_NUM=1 --END_PROJECT_NUM=1\n\n"
}
ansible/roles/ocp-workload-bxms-dm/readme.adoc
@@ -9,7 +9,7 @@
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
GUID=jb45
HOST_GUID=dev37
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-bxms-dm"
@@ -33,7 +33,7 @@
=== To Delete an environment
----
GUID=jb45
HOST_GUID=dev37
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-bxms-dm"
ansible/roles/ocp-workload-bxms-pam/ilt_provision.sh
@@ -35,7 +35,7 @@
    echo -en "\n\t--START_PROJECT_NUM=*     OPTIONAL: specify # of first OCP project to provision (defult = 1))"
    echo -en "\n\t--END_PROJECT_NUM=*       OPTIONAL: specify # of OCP projects to provision (defualt = 1))"
    echo -en "\n\t-h                        this help manual"
    echo -en "\n\n\nExample:                ./roles/$WORKLOAD/ilt_provision.sh --HOST_GUID=dev37 --START_PROJECT_NUM=1 --END_PROJECT_NUM=1\n\n"
    echo -en "\n\n\nExample:                ./roles/$WORKLOAD/ilt_provision.sh --HOST_GUID=dev39 --START_PROJECT_NUM=1 --END_PROJECT_NUM=1\n\n"
}
ansible/roles/ocp-workload-bxms-pam/readme.adoc
@@ -3,7 +3,7 @@
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
GUID=jb45
HOST_GUID=dev37
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-bxms-pam"
@@ -28,7 +28,7 @@
----
GUID=jb45
HOST_GUID=dev37
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-bxms-pam"
ansible/roles/ocp-workload-fsi-client-onboarding-demo/defaults/main.yml
@@ -9,7 +9,7 @@
quota_limits_cpu: 4
quota_requests_memory: '6Gi'
quota_limits_memory: '6Gi'
quota_limits_memory: '10Gi'
quota_configmaps: 4
quota_pods: 6
ansible/roles/ocp-workload-fsi-client-onboarding-demo/files/limit-range.yaml
File was deleted
ansible/roles/ocp-workload-fsi-client-onboarding-demo/tasks/workload.yml
@@ -12,11 +12,11 @@
         --description="Red Hat JBoss BPM Suite & Entando 'Client Onboarding' FSI Demo"
  ignore_errors: true
- name: Set project limit LimitRange
  shell: "oc create -f /tmp/{{guid}}//limit-range.yaml -n {{ocp_project}}"
#- name: Set project limit LimitRange
#  shell: "oc create -f /tmp/{{guid}}//limit-range.yaml -n {{ocp_project}}"
- name: Import ImageStreams
  shell: "oc create -f https://raw.githubusercontent.com/jboss-openshift/application-templates/master/jboss-image-streams.json -n {{ocp_project}}"
  shell: "oc create -f https://raw.githubusercontent.com/jboss-openshift/application-templates/master/processserver/processserver64-image-stream.json -n {{ocp_project}}"
- name: "Import templates"
  shell: "oc create -f https://raw.githubusercontent.com/jboss-openshift/application-templates/master/processserver/processserver64-postgresql-s2i.json -n {{ocp_project}}"
ansible/roles/ocp-workload-fuse-ignite/ilt_provision.sh
@@ -35,7 +35,7 @@
    echo -en "\n\t--START_PROJECT_NUM=*     OPTIONAL: specify # of first OCP project to provision (defult = 1))"
    echo -en "\n\t--END_PROJECT_NUM=*       OPTIONAL: specify # of OCP projects to provision (defualt = 1))"
    echo -en "\n\t-h                        this help manual"
    echo -en "\n\n\nExample:                ./roles/$WORKLOAD/ilt_provision.sh --HOST_GUID=dev37 --START_PROJECT_NUM=1 --END_PROJECT_NUM=1\n\n"
    echo -en "\n\n\nExample:                ./roles/$WORKLOAD/ilt_provision.sh --HOST_GUID=dev39 --START_PROJECT_NUM=1 --END_PROJECT_NUM=1\n\n"
}
ansible/roles/ocp-workload-fuse-ignite/readme.adoc
@@ -3,7 +3,7 @@
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
HOST_GUID=dev37
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
WORKLOAD="ocp-workload-fuse-ignite"
SSH_USERNAME="jbride-redhat.com"
@@ -35,7 +35,7 @@
=== To Delete an environment
----
HOST_GUID=dev37
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
WORKLOAD="ocp-workload-fuse-ignite"
GUID=jb05
ansible/roles/ocp-workload-fuse-on-ocp/defaults/main.yml
New file
@@ -0,0 +1,56 @@
---
become_override: false
ocp_username: jbride-redhat.com
ocp_user_needs_quota: True
ocp_user_groups:
  - OPENTLC-PROJECT-PROVISIONERS
ocp_project: fuse-on-ocp-{{guid}}
quota_requests_cpu: 5
quota_limits_cpu: 10
quota_requests_memory: '6Gi'
quota_limits_memory: '20Gi'
quota_configmaps: 10
quota_pods: 20
quota_persistentvolumeclaims: 20
quota_services: 30
quota_secrets: 30
quota_requests_storage: 50Gi
ocp_apps_domain: apps.{{ocp_domain}}
build_status_retries: 20
build_status_delay: 20
deploy_status_retries: 15
deploy_status_delay: 20
fuse_version: 2.1.fuse-000081-redhat-4
fuse_artifact_base_url: "https://raw.githubusercontent.com/jboss-fuse/application-templates/application-templates-{{fuse_version}}"
fuse_is_url: "{{fuse_artifact_base_url}}/fis-image-streams.json"
fuse_components:
  - eap-camel-cdi-template.json
  - eap-camel-cxf-jaxrs-template.json
  - eap-camel-cxf-jaxws-template.json
  - eap-camel-jpa-template.json
  - karaf-camel-amq-template.json
  - karaf-camel-log-template.json
  - karaf-camel-rest-sql-template.json
  - karaf-cxf-rest-template.json
  - spring-boot-camel-amq-template.json
  - spring-boot-camel-config-template.json
  - spring-boot-camel-drools-template.json
  - spring-boot-camel-infinispan-template.json
  - spring-boot-camel-teiid-template.json
  - spring-boot-camel-template.json
  - spring-boot-camel-xml-template.json
  - spring-boot-cxf-jaxrs-template.json
  - spring-boot-cxf-jaxws-template.json
ansible/roles/ocp-workload-fuse-on-ocp/ilt_provision.sh
New file
@@ -0,0 +1,96 @@
#!/bin/bash
END_PROJECT_NUM=1
START_PROJECT_NUM=1
WORKLOAD="ocp-workload-fuse-on-ocp"
LOG_FILE=/tmp/$WORKLOAD
for var in $@
do
    case "$var" in
        --HOST_GUID=*) HOST_GUID=`echo $var | cut -f2 -d\=` ;;
        --START_PROJECT_NUM=*) START_PROJECT_NUM=`echo $var | cut -f2 -d\=` ;;
        --END_PROJECT_NUM=*) END_PROJECT_NUM=`echo $var | cut -f2 -d\=` ;;
        -h) HELP=true ;;
        -help) HELP=true ;;
    esac
done
function ensurePreReqs() {
    if [ "x$HOST_GUID" == "x" ]; then
            echo -en "must pass parameter: --HOST_GUID=<ocp host GUID> . \n\n"
            help
            exit 1;
    fi
    LOG_FILE=$LOG_FILE-$HOST_GUID-$START_PROJECT_NUM-$END_PROJECT_NUM.log
    echo -en "starting\n\n" > $LOG_FILE
    echo -en "\n\nProvision log file found at: $LOG_FILE\n";
}
function help() {
    echo -en "\n\nOPTIONS:";
    echo -en "\n\t--HOST_GUID=*             REQUIRED: specify GUID of target OCP environment)"
    echo -en "\n\t--START_PROJECT_NUM=*     OPTIONAL: specify # of first OCP project to provision (defult = 1))"
    echo -en "\n\t--END_PROJECT_NUM=*       OPTIONAL: specify # of OCP projects to provision (defualt = 1))"
    echo -en "\n\t-h                        this help manual"
    echo -en "\n\n\nExample:                ./roles/$WORKLOAD/ilt_provision.sh --HOST_GUID=dev39 --START_PROJECT_NUM=1 --END_PROJECT_NUM=1\n\n"
}
function login() {
    echo -en "\nHOST_GUID=$HOST_GUID\n" >> $LOG_FILE
    oc login https://master.$HOST_GUID.openshift.opentlc.com -u opentlc-mgr -p r3dh4t1!
}
function executeLoop() {
    echo -en "\nexecuteLoop() START_PROJECT_NUM = $START_PROJECT_NUM ;  END_PROJECT_NUM=$END_PROJECT_NUM" >> $LOG_FILE
    for (( c=$START_PROJECT_NUM; c<=$END_PROJECT_NUM; c++ ))
    do
        GUID=$c
        OCP_USERNAME=user$c
        executeAnsible
    done
}
function executeAnsible() {
    TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
    SSH_USERNAME="jbride-redhat.com"
    SSH_PRIVATE_KEY="id_ocp"
    # NOTE:  Ensure you have ssh'd (as $SSH_USERNMAE) into the bastion node of your OCP cluster environment at $TARGET_HOST and logged in using opentlc-mgr account:
    #           oc login https://master.$HOST_GUID.openshift.opentlc.com -u opentlc-mgr
    PROJECT_PREFIX=fuseocp
    GUID=$PROJECT_PREFIX$GUID
    echo -en "\n\nexecuteAnsible():  Provisioning project with GUID = $GUID and OCP_USERNAME = $OCP_USERNAME\n" >> $LOG_FILE
    ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ocp_user_needs_quota=true" \
                    -e"ocp_domain=$HOST_GUID.openshift.opentlc.com" \
                    -e"ACTION=create" >> $LOG_FILE
    if [ $? -ne 0 ];
    then
        echo -en "\n\n*** Error provisioning where GUID = $GUID\n\n " >> $LOG_FILE
        echo -en "\n\n*** Error provisioning where GUID = $GUID\n\n "
        exit 1;
    fi
}
ensurePreReqs
login
executeLoop
ansible/roles/ocp-workload-fuse-on-ocp/readme.adoc
New file
@@ -0,0 +1,45 @@
= ocp-workload-developer-environment - Sample Config
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
WORKLOAD="ocp-workload-fuse-on-ocp"
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
GUID=jb05
OCP_USERNAME="jbride-redhat.com"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ocp_user_needs_quota=true" \
                    -e"ocp_domain=$HOST_GUID.openshift.opentlc.com" \
                    -e"ACTION=create"
----
=== To Delete an environment
----
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
WORKLOAD="ocp-workload-fuse-on-ocp"
GUID=jb05
OCP_USERNAME="jbride-redhat.com"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                    -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ACTION=remove"
----
ansible/roles/ocp-workload-fuse-on-ocp/tasks/main.yml
New file
@@ -0,0 +1,20 @@
---
- name: Running Pre Workload Tasks
  include: ./pre_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload Tasks
  include: ./workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Post Workload Tasks
  include: ./post_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload removal Tasks
  include: ./remove_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "destroy" or ACTION == "remove"
ansible/roles/ocp-workload-fuse-on-ocp/tasks/post_workload.yml
New file
@@ -0,0 +1,5 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully"
ansible/roles/ocp-workload-fuse-on-ocp/tasks/pre_workload.yml
New file
@@ -0,0 +1,32 @@
---
- name: Add user to developer group (allowed to create projects)
  shell: "oadm groups add-users {{item}} {{ocp_username}}"
  register: groupadd_register
  with_items: "{{ocp_user_groups}}"
  when: ocp_username is defined and ocp_user_groups is defined
# - name: test that command worked
#   debug:
#     var: groupadd_register
#     verbosity: 2
- name: Create user Quota - clusterresourcequota
  shell: |
        oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \
        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
        --hard requests.cpu="{{quota_requests_cpu}}" \
        --hard limits.cpu="{{quota_limits_cpu}}"  \
        --hard requests.memory="{{quota_requests_memory}}" \
        --hard limits.memory="{{quota_limits_memory}}" \
        --hard configmaps="{{quota_configmaps}}" \
        --hard pods="{{quota_pods}}" \
        --hard persistentvolumeclaims="{{quota_persistentvolumeclaims}}"  \
        --hard services="{{quota_services}}" \
        --hard secrets="{{quota_secrets}}" \
        --hard requests.storage="{{quota_requests_storage}}"
  ignore_errors: true
- name: pre_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully"
ansible/roles/ocp-workload-fuse-on-ocp/tasks/remove_workload.yml
New file
@@ -0,0 +1,50 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully - Removed"
- name: Remove user from groups {{ocp_user_groups}}
  shell: oc adm groups remove-users {{item}} {{ocp_username}}
  with_items: "{{ocp_user_groups}}"
  ignore_errors: true
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
  ignore_errors: true
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}
  ignore_errors: true
- name: Remove user Projects - oc get projects
  command: "oc get projects -o json"
  register: all_projects
- name: Remove user Projects - Convert output to json
  set_fact:
    projects: "{{all_projects.stdout | from_json}}"
- name: Remove user Projects -  Debug statement
  debug:
    msg: "found user project: {{item.metadata.name}}"
    verbosity: 1
  when:
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
  with_items: "{{projects['items']}}"
- name: Remove user Projects - "oc delete project {{item.metadata.name}} "
  command: "oc delete project {{item.metadata.name}}"
  when:
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
    - item.status.phase is defined
    - item.status.phase != "Terminating"
  with_items: "{{projects['items']}}"
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully - Removed"
ansible/roles/ocp-workload-fuse-on-ocp/tasks/wait_for_build.yml
New file
@@ -0,0 +1,23 @@
---
# Purpose:
#   This script queries OCP for builds that exist but are not yet ready.
#   So long as there are unready builds, this script continues to loop
#
# Manual Test to determine list of unready builds :
#  1) install jp :  https://github.com/jmespath/jp
#  2) oc get builds -o json | jp "items[?  (status.phase != 'Complete') ].metadata.annotations.\"openshift.io/build-config.name\""
#
#  Documentation pertaining to jq syntax:
#    - http://jmespath.org/tutorial.html
#    - https://stackoverflow.com/questions/41261680/ansible-json-query-path-to-select-item-by-content
#
- name: "Wait for following builds to become ready: {{build_to_wait}}"
  command: 'oc get build -o json -n "{{ ocp_project }}"'
  register: build_state
  changed_when: false
  retries: "{{ build_status_retries }}"
  delay: "{{ build_status_delay }}"
  vars:
    query: "items[?  (status.phase != 'Complete') ].metadata.annotations.\"openshift.io/build-config.name\""
  until: "build_state.stdout |from_json |json_query(query) |intersect(build_to_wait) |length == 0"
ansible/roles/ocp-workload-fuse-on-ocp/tasks/wait_for_deploy.yml
New file
@@ -0,0 +1,20 @@
---
# Purpose:
#   This script queries OCP for replication controllers that exist but are not yet ready.
#   So long as there are unready replication controllers, this script continues to loop
#
# Manual Test to determine list of unready replication controllers :
#  1) install jp :  https://github.com/jmespath/jp
#  2) oc get rc -o json | jp 'items[?  (status.readyReplicas == ""|| status.readyReplicas == `0`) ].metadata.annotations."openshift.io/deployment-config.name"'
#
- name: "Wait for following deployments to become ready: {{pod_to_wait}}"
  command: 'oc get rc -o json -n "{{ ocp_project }}"'
  register: rc_state
  changed_when: false
  retries: "{{ deploy_status_retries }}"
  delay: "{{ deploy_status_delay }}"
  until: 'rc_state.stdout |from_json |json_query(''items[?  (status.readyReplicas == ""|| status.readyReplicas == `0`) ].metadata.annotations."openshift.io/deployment-config.name"'') |intersect(pod_to_wait) |length == 0'
#  Documentation pertaining to jq syntax:
#    - http://jmespath.org/tutorial.html
ansible/roles/ocp-workload-fuse-on-ocp/tasks/workload.yml
New file
@@ -0,0 +1,30 @@
---
- name: Create project for workload; project =  {{ocp_project}}
  shell: "oc new-project {{ocp_project}}"
- name: Make sure we go back do default project
  shell: "oc project default"
- name: load fuse on ocp imagestreams in openshift namespace
  shell: "oc create -f {{fuse_is_url}} -n openshift"
  ignore_errors: true
- name: load fuse on ocp templates in openshift namespace
  shell: "oc create -f {{fuse_artifact_base_url}}/quickstarts/{{item}} -n openshift"
#  debug:
#    msg: "item = {{item}}"
  with_items: "{{fuse_components}}"
  ignore_errors: true
- name: Annotate the empty project as requested by user
  shell: "oc annotate namespace {{ocp_project}} openshift.io/requester={{ocp_username}} --overwrite"
- name: Give ocp_username access to ocp_project; user = {{ocp_username}}
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{ocp_project}}"
- name: workload Tasks Complete
  debug:
    msg: workload Tasks Complete
ansible/roles/ocp-workload-parksmap-demo/files/workshop-parksmap.yaml
@@ -74,6 +74,13 @@
          resources: {}
          terminationMessagePath: /dev/termination-log
          terminationMessagePolicy: File
          resources:
            limits:
              cpu: 2000m
              memory: 2Gi
            requests:
              cpu: 50m
              memory: 256Mi
        dnsPolicy: ClusterFirst
        restartPolicy: Always
        schedulerName: default-scheduler
@@ -124,4 +131,4 @@
      kind: Service
      name: parksmap
      weight: 100
    wildcardPolicy: None
    wildcardPolicy: None
ansible/software_playbooks/openshift.yml
@@ -11,6 +11,14 @@
  tags:
    - generate_ansible_hosts_file
  tasks:
    - name: Add identity providers to the list
      set_fact:
        identity_providers: "{{ identity_providers + [available_identity_providers[item]] }}"
      when:
        - available_identity_providers is defined
        - available_identity_providers[item] is defined
      with_items: "{{ install_idms|d([]) }}"
    - name: generate ansible hosts file for 3.7 and earlier
      template:
        src: "../configs/{{ env_type }}/files/hosts_template.j2"
@@ -47,20 +55,23 @@
        src: "../workdir/hosts-{{ env_type }}-{{ guid }}"
        dest: /etc/ansible/hosts
    - debug:
        var: install_idm
    - name: get idm certificate
      get_url:
        url: "{{idm_ca_url}}"
        dest: /root/ca.crt
        mode: 0440
      when: 'install_idm is defined and install_idm == "ldap"'
      when: >
        (install_idm is defined and install_idm == "ldap")
        or 'ldap' in install_idms|d([])
    - name: Copy over htpasswd file
      copy:
        src: "../configs/{{env_type}}/files/htpasswd.openshift"
        dest: /root/htpasswd.openshift
      when: 'install_idm is defined and install_idm == "htpasswd"'
      when: >
        (install_idm is defined and install_idm == "htpasswd")
        or 'htpasswd' in install_idms|d([])
- name: Configuring openshift-provisioner
  hosts:
scripts/examples/ocp-adv-deploy-hw.rc
New file
@@ -0,0 +1,14 @@
GUID=testhomework
REGION=us-east-1
KEYNAME=ocpkey
ENVTYPE=ocp-adv-deploy-hw
SOFTWARE_TO_DEPLOY=openshift
HOSTZONEID=Z3IHLWJZOU9SRT
INSTALL_IPA_CLIENT=false
ENVTYPE_ARGS=(
-e "subdomain_base_suffix=.example.opentlc.com"
-e "email=judd@redhat.com"
-e "own_repo_path=http://admin.na.shared.opentlc.com/repos/ocp/3.9.27"
--skip-tags=installing_openshift,get_openshift_credentialas,openshift_nfs_config
)
tests/scenarii/ansible-provisioner.yml
New file
@@ -0,0 +1,53 @@
---
# credentials
aws_access_key_id: foobar
aws_secret_access_key: foobar
aws_access_key_id_openshiftbu: foobar
aws_secret_access_key_openshiftbu: foobar
github_user: foobar
github_token: foobar
ipa_kerberos_user: foobar
ipa_kerberos_password: foobar
rhel_subscription_user: foobar
rhel_subscription_pass: foobar
cloud_provider: ec2
guid: foobartest
provisioner_public_dns: admin.example.com
aws_region: eu-central-1
key_name: mykey
env_type: ansible-provisioner
software_to_deploy: none
HostedZoneId: foobar
subdomain_base_suffix: .example.com
install_zabbix: true
install_ipa_client: true
ipa_additional_options: '--force-join'
remove_self_provisioners: true
ocp_report: false
repo_method: rhn
provisioner_instance_type: t2.2xlarge
mgr_users:
  - name: opentlc-mgr
    home: /home/opentlc-mgr
    aws_credentials:
      - name: default
        aws_access_key_id: "{{ aws_access_key_id }}"
        aws_secret_access_key: "{{ aws_secret_access_key }}"
      - name: openshiftbu
        aws_access_key_id: "{{ aws_access_key_id_openshiftbu }}"
        aws_secret_access_key: "{{ aws_secret_access_key_openshiftbu }}"
    git_repos:
      - repo: "https://github.com/sborenst/ansible_agnostic_deployer"
        dest: "/home/opentlc-mgr/ansible_agnostic_deployer"
        version: master
      - repo: "https://github.com/sborenst/ansible_agnostic_deployer"
        dest: "/home/opentlc-mgr/dev_ansible_agnostic_deployer"
        version: development
    authorized_keys:
      - ssh-rsa foobar