New file |
| | |
| | | = ocp-workshop standard config |
| | | |
| | | == Set up your "Secret" variables |
| | | |
| | | * You need to provide some credentials for deployments to work |
| | | * Create a file called "env_secret_vars.yml" and put it in the |
| | | ./ansible/configs/CONFIGNAME/ directory. |
| | | ** At this point this file *has to be created* even if no vars from it are used. |
| | | * You can choose to provide these values as extra vars (-e "var=value") in the |
| | | command line if you prefer not to keep sensitive information in a file. |
| | | |
| | | .Example contents of "Secret" Vars file |
| | | ---- |
| | | # ## Logon credentials for Red Hat Network |
| | | # ## Required if using the subscription component |
| | | # ## of this playbook. |
| | | rhel_subscription_user: '' |
| | | rhel_subscription_pass: '' |
| | | # |
| | | # ## LDAP Bind Password |
| | | bindPassword: '' |
| | | # |
| | | # ## Desired openshift admin name and password |
| | | admin_user: "" |
| | | admin_user_password: "" |
| | | # |
| | | # ## AWS Credentials. This is required. |
| | | aws_access_key_id: "" |
| | | aws_secret_access_key: "" |
| | | #If using repo_method: satellite, you must set these values as well. |
| | | satellite_url: https://satellite.example.com |
| | | satellite_org: Sat_org_name |
| | | satellite_activationkey: "rhel7basic" |
| | | zabbix_auto_registration_pass: "XXXXX" |
| | | |
| | | ---- |
| | | |
| | | == Review the Env_Type variable file |
| | | |
| | | * This file link:./env_vars.yml[./env_vars.yml] contains all the variables you |
| | | need to define to control the deployment of your environment. |
| | | |
| | | |
| | | === Add new users on the bastion |
| | | |
| | | For managing users on the bastion, you can override the `mgr_users` variable. The default is located in `{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/mgr_users.yml`, and looks like : |
| | | |
| | | .Default mgr_users.yml |
| | | [source,yaml] |
| | | ---- |
| | | mgr_users: |
| | | - name: opentlc-mgr |
| | | home: /home/opentlc-mgr |
| | | authorized_keys: |
| | | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4OojwKH74UWVOY92y87Tb/b56CMJoWbz2gyEYsr3geOc2z/n1pXMwPfiC2KT7rALZFHofc+x6vfUi6px5uTm06jXa78S7UB3MX56U3RUd8XF3svkpDzql1gLRbPIgL1h0C7sWHfr0K2LG479i0nPt/X+tjfsAmT3nWj5PVMqSLFfKrOs6B7dzsqAcQPInYIM+Pqm/pXk+Tjc7cfExur2oMdzx1DnF9mJaj1XTnMsR81h5ciR2ogXUuns0r6+HmsHzdr1I1sDUtd/sEVu3STXUPR8oDbXBsb41O5ek6E9iacBJ327G3/1SWwuLoJsjZM0ize+iq3HpT1NqtOW6YBLR opentlc-mgr@inf00-mwl.opentlc.com |
| | | ---- |
| | | |
| | | You can, for example, want to add another user. For that just override the variable in `env_secret_vars.yml`: |
| | | |
| | | .managing users ("Secret" Vars file or Env Vars file) |
| | | [source,yaml] |
| | | ---- |
| | | mgr_users: |
| | | - name: opentlc-mgr |
| | | home: /home/opentlc-mgr |
| | | authorized_keys: |
| | | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4OojwKH74UWVOY92y87Tb/b56CMJoWbz2gyEYsr3geOc2z/n1pXMwPfiC2KT7rALZFHofc+x6vfUi6px5uTm06jXa78S7UB3MX56U3RUd8XF3svkpDzql1gLRbPIgL1h0C7sWHfr0K2LG479i0nPt/X+tjfsAmT3nWj5PVMqSLFfKrOs6B7dzsqAcQPInYIM+Pqm/pXk+Tjc7cfExur2oMdzx1DnF9mJaj1XTnMsR81h5ciR2ogXUuns0r6+HmsHzdr1I1sDUtd/sEVu3STXUPR8oDbXBsb41O5ek6E9iacBJ327G3/1SWwuLoJsjZM0ize+iq3HpT1NqtOW6YBLR opentlc-mgr@inf00-mwl.opentlc.com |
| | | - name: fridim |
| | | home: /home/fridim |
| | | authorized_keys: |
| | | - https://github.com/fridim.keys |
| | | ---- |
| | | |
| | | == Running Ansible Playbook |
| | | |
| | | You can run the playbook with the following arguments to overwrite the default variable values: |
| | | [source,bash] |
| | | ---- |
| | | REGION=us-east-1 |
| | | KEYNAME=ocpkey |
| | | GUID=testocpworkshop1 |
| | | ENVTYPE="ocp-workshop" |
| | | CLOUDPROVIDER=ec2 |
| | | HOSTZONEID='Z186MFNM7DX4NF' |
| | | REPO_PATH='https://admin.example.com/repos/ocp/3.6/' |
| | | BASESUFFIX='.openshift.opentlc.com' |
| | | NODE_COUNT=2 |
| | | REPO_VERSION=3.6 |
| | | DEPLOYER_REPO_PATH=`pwd` |
| | | OSRELEASE=3.6.173.0.21 |
| | | |
| | | ansible-playbook main.yml -e "guid=${GUID}" -e "env_type=${ENVTYPE}" \ |
| | | -e "osrelease=${OSRELEASE}" -e "repo_version=${REPO_VERSION}" \ |
| | | -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}" \ |
| | | -e "HostedZoneId=${HOSTZONEID}" -e "key_name=${KEYNAME}" \ |
| | | -e "subdomain_base_suffix=${BASESUFFIX}" \ |
| | | -e "bastion_instance_type=t2.large" -e "master_instance_type=c4.xlarge" \ |
| | | -e "infranode_instance_type=c4.4xlarge" -e "node_instance_type=c4.4xlarge" \ |
| | | -e "nfs_instance_type=m3.large" -e "node_instance_count=5" \ |
| | | -e "email=name@example.com" \ |
| | | -e "install_idm=htpasswd" -e "software_to_deploy=openshift" \ |
| | | -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" -e "own_repo_path=${REPO_PATH}" --skip-tags=remove_self_provisioners |
| | | |
| | | ---- |
| | | |
| | | === Satellite version |
| | | ---- |
| | | REGION=us-east-1 |
| | | KEYNAME=ocpkey |
| | | GUID=dev-na1 |
| | | ENVTYPE="ocp-workshop" |
| | | CLOUDPROVIDER=ec2 |
| | | HOSTZONEID='Z186MFNM7DX4NF' |
| | | BASESUFFIX='.openshift.opentlc.com' |
| | | NODE_COUNT=2 |
| | | REPO_VERSION=3.5 |
| | | DEPLOYER_REPO_PATH=`pwd` |
| | | |
| | | LOG_FILE=/tmp/${ENVTYPE}-${GUID}.log |
| | | IPAPASS=$5 |
| | | |
| | | if [ "$1" = "provision" ] ; then |
| | | |
| | | echo "Provisioning: ${STACK_NAME}" 1>> $LOG_FILE 2>> $LOG_FILE |
| | | |
| | | ansible-playbook ${DEPLOYER_REPO_PATH}/main.yml \ |
| | | -e "guid=${GUID}" -e "env_type=${ENVTYPE}" -e "key_name=${KEYNAME}" \ |
| | | -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}" -e "HostedZoneId=${HOSTZONEID}" \ |
| | | -e "subdomain_base_suffix=${BASESUFFIX}" \ |
| | | -e "bastion_instance_type=t2.large" -e "master_instance_type=c4.xlarge" \ |
| | | -e "infranode_instance_type=c4.4xlarge" -e "node_instance_type=c4.4xlarge" \ |
| | | -e "support_instance_type=c4.xlarge" -e "node_instance_count=${NODE_COUNT}" \ |
| | | -e "ipa_host_password=${IPAPASS}" -e "install_idm=ldap" \ |
| | | -e "repo_method=satellite" -e "repo_version=${REPO_VERSION}" \ |
| | | -e "email=name@example.com" \ |
| | | -e "software_to_deploy=openshift" -e "osrelease=3.5.5.15" -e "docker_version=1.12.6" \ |
| | | -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" 1>> $LOG_FILE 2>> $LOG_FILE |
| | | ---- |
| | | |
| | | === Advanced Development Workshop |
| | | [source,bash] |
| | | ---- |
| | | REGION=us-east-1 |
| | | KEYNAME=ocpkey |
| | | GUID=rdu |
| | | ENVTYPE="ocp-workshop" |
| | | CLOUDPROVIDER=ec2 |
| | | HOSTZONEID='Z186MFNM7DX4NF' |
| | | REPO_PATH='https://admin.example.com/repos/ocp/3.5/' |
| | | DEPLOYER_REPO_PATH=/opt/ansible_agnostic_deployer/ansible |
| | | BASESUFFIX='.openshift.opentlc.com' |
| | | REPO_VERSION=3.5 |
| | | ansible-playbook ${DEPLOYER_REPO_PATH}/main.yml \ |
| | | -e "guid=${GUID}" \ |
| | | -e "env_type=${ENVTYPE}" \ |
| | | -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}" \ |
| | | -e "HostedZoneId=${HOSTZONEID}" -e "key_name=${KEYNAME}" \ |
| | | -e "subdomain_base_suffix=${BASESUFFIX}" \ |
| | | -e "bastion_instance_type=t2.large" -e "master_instance_type=c4.xlarge" \ |
| | | -e "infranode_instance_type=c4.4xlarge" -e "node_instance_type=c4.4xlarge" \ |
| | | -e "nfs_instance_type=t2.large" -e "node_instance_count=${NODE_COUNT}" \ |
| | | -e "install_idm=htpasswd" -e "software_to_deploy=openshift" \ |
| | | -e "email=name@example.com" \ |
| | | -e "own_repo_path=${REPO_PATH}" -e"repo_method=rhn" -e"ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" \ |
| | | -e "osrelease=3.5.5.31" -e "repo_version=${REPO_VERSION}" -e "docker_version=1.12.6" \ |
| | | --skip-tags=remove_self_provisioners,opentlc-integration |
| | | ---- |
| | | |
| | | === IPA registration |
| | | |
| | | You can either provide `ipa_host_password` or a couple `ipa_kerberos_user`/`ipa_kerberos_password` to register the host to the ipa server. See link:../../roles/bastion-opentlc-ipa[roles/bastion-opentlc-ipa]. |
| | | |
| | | === CNS/Glusterfs |
| | | |
| | | If you set this variable, 3 support nodes will be deployed and used for glusterfs: |
| | | |
| | | ---- |
| | | -e install_glusterfs=true |
| | | ---- |
| | | |
| | | NOTE: This will discard NFS PVs for logging (elasticsearch) and metrics (cassandra). Instead storage for those pods will be 'EmptyDir'. Proper persistent storage setup is left to user as a post-install step. |
| | | |
| | | Tested on OCP 3.7. See examples in `scripts/examples` |
| | | |
| | | === Scale Up |
| | | Use the `scaleup.yml` playbook. Increase `node_instance_count` and `new_node_instance_count` accordingly. For example, if your previous `node_instance_count` was 2: |
| | | [source,bash] |
| | | ---- |
| | | REGION=us-west-1 |
| | | KEYNAME=ocpkey |
| | | GUID=na1 |
| | | ENVTYPE="ocp-workshop" |
| | | CLOUDPROVIDER=ec2 |
| | | HOSTZONEID='Z186MFNM7DX4NF' |
| | | REPO_PATH='https://admin.example.com/repos/ocp/3.5/' |
| | | MINOR_VERSION="3.5.5.15" |
| | | INSTALLIPA=false |
| | | BASESUFFIX='.openshift.opentlc.com' |
| | | REPO_VERSION=3.5 |
| | | NODE_COUNT=4 |
| | | NEW_NODE_COUNT=2 |
| | | ansible-playbook ./configs/${ENVTYPE}/scaleup.yml \ |
| | | -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" \ |
| | | -e "HostedZoneId=${HOSTZONEID}" \ |
| | | -e "bastion_instance_type=t2.large" \ |
| | | -e "cloud_provider=${CLOUDPROVIDER}" \ |
| | | -e "guid=${GUID}" \ |
| | | -e "infranode_instance_type=c4.4xlarge" \ |
| | | -e "install_idm=htpasswd" \ |
| | | -e "install_ipa_client=${INSTALLIPA}" \ |
| | | -e "nfs_instance_type=m3.large" \ |
| | | -e "osrelease=${MINOR_VERSION}" \ |
| | | -e "own_repo_path=${REPO_PATH}" \ |
| | | -e "email=name@example.com" \ |
| | | -e "repo_method=file" \ |
| | | -e "subdomain_base_suffix=${BASESUFFIX}" \ |
| | | --skip-tags=remove_self_provisioners,install_zabbix \ |
| | | -e "aws_region=${REGION}" \ |
| | | -e "docker_version=1.12.6" \ |
| | | -e "env_type=${ENVTYPE}" \ |
| | | -e "key_name=${KEYNAME}" \ |
| | | -e "master_instance_type=c4.xlarge" \ |
| | | -e "node_instance_count=${NODE_COUNT}" \ |
| | | -e "new_node_instance_count=${NEW_NODE_COUNT}" \ |
| | | -e "node_instance_type=c4.4xlarge" \ |
| | | -e "repo_version=${REPO_VERSION}" |
| | | ---- |
| | | |
| | | === To Delete an environment |
| | | ---- |
| | | REGION=us-west-1 |
| | | KEYNAME=ocp-workshop-openshift |
| | | GUID=na1 |
| | | ENVTYPE="ocp-workshop" |
| | | CLOUDPROVIDER=ec2 |
| | | HOSTZONEID='Z186MFNM7DX4NF' |
| | | #To Destroy an Env |
| | | ansible-playbook ./configs/${ENVTYPE}/destroy_env.yml \ |
| | | -e "guid=${GUID}" \ |
| | | -e "env_type=${ENVTYPE}" \ |
| | | -e "cloud_provider=${CLOUDPROVIDER}" \ |
| | | -e "aws_region=${REGION}" \ |
| | | -e "HostedZoneId=${HOSTZONEID}" \ |
| | | -e "key_name=${KEYNAME}" \ |
| | | -e "subdomain_base_suffix=${BASESUFFIX}" |
| | | ---- |
New file |
| | |
| | | --- |
| | | - name: Delete Infrastructure |
| | | hosts: localhost |
| | | connection: local |
| | | gather_facts: False |
| | | become: no |
| | | vars_files: |
| | | - "./env_vars.yml" |
| | | - "./env_secret_vars.yml" |
| | | |
| | | tasks: |
| | | # - name: get internal dns zone id if not provided |
| | | # environment: |
| | | # AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}" |
| | | # AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}" |
| | | # AWS_DEFAULT_REGION: "{{aws_region}}" |
| | | # shell: "aws route53 list-hosted-zones-by-name --region={{aws_region}} --dns-name={{guid}}.internal. --output text --query='HostedZones[*].Id' | awk -F'/' '{print $3}'" |
| | | # register: internal_zone_id_register |
| | | # - debug: |
| | | # var: internal_zone_id_register |
| | | # - name: Store internal route53 ID |
| | | # set_fact: |
| | | # internal_zone_id: "{{ internal_zone_id_register.stdout }}" |
| | | # when: 'internal_zone_id_register is defined' |
| | | # - name: delete internal dns names |
| | | # environment: |
| | | # AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}" |
| | | # AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}" |
| | | # AWS_DEFAULT_REGION: "{{aws_region}}" |
| | | # shell: "aws route53 change-resource-record-sets --hosted-zone-id {{internal_zone_id}} --change-batch file://{{ ANSIBLE_REPO_PATH }}/workdir/internal_dns-{{ env_type }}-{{ guid }}_DELETE.json --region={{aws_region}}" |
| | | # ignore_errors: true |
| | | # tags: |
| | | # - internal_dns_delete |
| | | # when: internal_zone_id is defined |
| | | |
| | | - name: Delete S3 bucket |
| | | environment: |
| | | AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}" |
| | | AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}" |
| | | AWS_DEFAULT_REGION: "{{aws_region}}" |
| | | s3_bucket: |
| | | |
| | | name: "{{ env_type }}-{{ guid }}" |
| | | state: absent |
| | | force: yes |
| | | region: "{{ aws_region }}" |
| | | tags: |
| | | - remove_s3 |
| | | register: s3_result |
| | | until: s3_result|succeeded |
| | | retries: 5 |
| | | delay: 60 |
| | | ignore_errors: yes |
| | | when: cloud_provider == 'ec2' |
| | | |
| | | - name: report s3 error |
| | | fail: |
| | | msg: "FAIL {{ project_tag }} delete s3" |
| | | when: |
| | | - not s3_result|succeeded |
| | | - cloud_provider == 'ec2' |
| | | |
| | | - name: Destroy cloudformation template |
| | | cloudformation: |
| | | aws_access_key: "{{ aws_access_key_id }}" |
| | | aws_secret_key: "{{ aws_secret_access_key }}" |
| | | stack_name: "{{project_tag}}" |
| | | state: "absent" |
| | | region: "{{aws_region}}" |
| | | disable_rollback: false |
| | | tags: |
| | | Stack: "project {{env_type}}-{{ guid }}" |
| | | tags: [ destroying, destroy_cf_deployment ] |
| | | register: cloudformation_result |
| | | until: cloudformation_result|succeeded |
| | | retries: 5 |
| | | delay: 60 |
| | | ignore_errors: yes |
| | | when: cloud_provider == 'ec2' |
| | | |
| | | - name: report Cloudformation error |
| | | fail: |
| | | msg: "FAIL {{ project_tag }} Destroy Cloudformation" |
| | | when: |
| | | - not cloudformation_result|succeeded |
| | | - cloud_provider == 'ec2' |
| | | tags: [ destroying, destroy_cf_deployment ] |
| | | ## we need to add something to delete the env specific key. |
| | | |
| | | - name: Import default azure destroy playbook |
| | | import_playbook: "{{ANSIBLE_REPO_PATH}}/cloud_providers/azure_destroy_env.yml" |
| | | when: cloud_provider == 'azure' |
New file |
| | |
| | | ######################### env-specific specific |
| | | |
| | | - name: ocp-workshop tests |
| | | hosts: masters[0] |
| | | become: yes |
| | | vars_files: |
| | | - "./{{ env_type }}_vars.yml" |
| | | - "./{{ env_type }}_secret_vars.yml" |
| | | |
| | | tags: [ env-specific, cf_integration ] |
| | | tasks: |
| | | |
| | | ## This need to be completed |
| | | |
| | | - name: Can authenticate at the master console with admin user |
| | | shell: "oc login master.{{subdomain_base}} -u {{admin_user}} -p {{admin_user_password}} --insecure-skip-tls-verify=true" |
| | | register: health_authenticate_to_master |
| | | |
| | | - name: Admin user can create projects |
| | | shell: "oc new-project envtests" |
| | | ignore_errors: true |
| | | |
| | | # - name: Admin user can deploy Jenkins with application |
| | | # shell: 'oc new-app jenkins-persistent -p JENKINS_PASSWORD=testmctestface -n envtests' |
| | | # register: health_jenkins-persistent_new-app |
| | | # ignore_errors: true |
| | | |
| | | - name: Admin user can deploy an application |
| | | shell: 'oc new-app https://github.com/StefanoPicozzi/cotd.git -n envtests' |
| | | register: health_cotd_new-app |
| | | ignore_errors: true |
| | | |
| | | - name: Expose deployed application route for testing |
| | | shell: 'oc expose service cotd' |
| | | ignore_errors: true |
| | | |
| | | |
| | | - name: wait for application port to open |
| | | wait_for: |
| | | host: "cotd-envtests.{{cloudapps_suffix}}" |
| | | port: 80 |
| | | #delay: 60 |
| | | timeout: 360 |
| | | |
| | | - name: fetch |
| | | uri: |
| | | url: 'http://cotd-envtests.{{cloudapps_suffix}}/item.php' |
| | | return_content: yes |
| | | #validate_certs: false |
| | | register: health_cotd_curl_output |
| | | |
| | | - name: Fail if content isn't displayed as expected |
| | | fail: |
| | | when: "'COTD' not in health_cotd_curl_output.content" |
| | | |
| | | |
| | | |
| | | |
| | | # - name: Router is configured on each Infranodes |
| | | |
| | | # - name: Different PVs are available for users to consume |
| | | # - name: There are 3 Masters Working |
| | | # - name: There are 3 EtcD Instances working |
| | | # - name: There is Loadbalancer to access the Masters |
| | | # - name: There is a Load Balancer/DNS for both Infranodes |
| | | # - name: There are at least 2 Infranodes |
| | | # - name: Multitenancy is configured and working |
| | | # - name: Node Selector is defined in the "default" Namespace |
| | | # - name: Node Selector is defined in the "openshift-infra" and "logging" Projects |
| | | # - name: Aggregated logging is configured and working |
| | | # - name: Metrics Collection is configured and working |
| | | # - name: Jenkins pod is running with Persistent Volume |
| | | # - name: Deploy openshift-tasks app using Jenkins |
| | | # - name: Create A CICD workflow Using Jenkins OpenShift Plugin |
| | | # - name: HPA configured and working on production deployment of openshift-tasks |
| | | # |
| | | # - name: Multiple Clients created |
| | | # - name: Multiple Registries, Dedicated Registry per Client |
| | | # - name: Dedicated node for ClientC |
| | | # - name: admissionControl plugin sets specific limits per label (client) |
| | | # - name: A new-project template is modified so that it includes a LimitRange |
| | | # - name: A new-user template is used to create a user object with the specific label value. |
| | | # - name: On Boarding new client documentation |
| | | # - name: Deploy ticket master or similar (multi pod) application.. |
| | | # - name: Create a Jenkins Workflow using "Jenkins Pipeline" and Jenkins-OpenShift Plugin |
| | | # - name: Create a Nexus Pod |
| | | # - name: Create a SonarQ Pod |
| | | # - name: Deploy using Jenkins in "dev" and make it pass all the unit tests |
| | | # - name: Display Unit Test and Code Coverage results in Jenkins |
| | | # - name: Deploy using Jenkins in "test" Pass an integration test to an AMQ or similar component |
| | | # - name: Display Integration tests results in Jenkins Console |
| | | # - name: Artifacts should be stored and pulled in Nexus/Jenkins |
New file |
| | |
| | | --- |
| | | ## TODO: What variables can we strip out of here to build complex variables? |
| | | ## i.e. what can we add into group_vars as opposed to config_vars? |
| | | ## Example: We don't really need "subdomain_base_short". If we want to use this, |
| | | ## should just toss in group_vars/all. |
| | | ### Also, we should probably just create a variable reference in the README.md |
| | | ### For now, just tagging comments in line with configuration file. |
| | | |
| | | ### Vars that can be removed: |
| | | # use_satellite: true |
| | | # use_subscription_manager: false |
| | | # use_own_repos: false |
| | | |
| | | ###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT |
| | | ###### OR PASS as "-e" args to ansible-playbook command |
| | | |
| | | ### Common Host settings |
| | | repo_version: "3.9" |
| | | repo_method: file # Other Options are: file, satellite and rhn |
| | | |
| | | # If using repo_method: satellite, you must set these values as well. |
| | | # satellite_url: https://satellite.example.com |
| | | # satellite_org: Sat_org_name |
| | | # satellite_activationkey: "rhel7basic" |
| | | |
| | | # Do you want to run a full yum update |
| | | update_packages: true |
| | | |
| | | ## guid is the deployment unique identifier, it will be appended to all tags, |
| | | ## files and anything that identifies this environment from another "just like it" |
| | | guid: defaultguid |
| | | |
| | | # This var is used to identify stack (cloudformation, azure resourcegroup, ...) |
| | | project_tag: "{{ env_type }}-{{ guid }}" |
| | | |
| | | software_to_deploy: openshift |
| | | deploy_openshift: true |
| | | deploy_openshift_post: true |
| | | deploy_env_post: true |
| | | |
| | | install_bastion: true |
| | | install_common: true |
| | | install_nfs: true |
| | | install_glusterfs: true |
| | | install_opentlc_integration: true |
| | | install_zabbix: false |
| | | install_prometheus: true |
| | | install_ipa_client: false |
| | | install_lets_encrypt_certificates: false |
| | | install_openwhisk: false |
| | | install_metrics: true |
| | | install_logging: true |
| | | install_aws_broker: false |
| | | |
| | | ocp_report: false |
| | | remove_self_provisioners: false |
| | | idm_ca_url: http://ipa.opentlc.com/ipa/config/ca.crt |
| | | zabbix_host: 23.246.247.58 |
| | | |
| | | # Options for container_runtime: docker, cri-o |
| | | container_runtime: "docker" |
| | | docker_version: "{{ '1.12.6' if repo_version | version_compare('3.9', '<') else '1.13.1' }}" |
| | | docker_device: /dev/xvdb |
| | | |
| | | ### If you want a Key Pair name created and injected into the hosts, |
| | | # set `set_env_authorized_key` to true and set the keyname in `env_authorized_key` |
| | | # you can use the key used to create the environment or use your own self generated key |
| | | # if you set "use_own_key" to false your PRIVATE key will be copied to the bastion. (This is {{key_name}}) |
| | | |
| | | use_own_key: true |
| | | env_authorized_key: "{{guid}}key" |
| | | ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem |
| | | set_env_authorized_key: true |
| | | |
| | | # Is this running from Red Hat Ansible Tower |
| | | tower_run: false |
| | | |
| | | admin_user: opentlc-mgr |
| | | admin_project: "ocp-storage-cns" |
| | | |
| | | ### Azure |
| | | |
| | | # Create a dedicated resourceGroup for this deployment |
| | | az_destroy_method: resource_group |
| | | az_resource_group: "{{ project_tag }}" |
| | | |
| | | # you can operate differently: if you share on resourceGroup for all you deployments, |
| | | # you can specify a different resourceGroup and method: |
| | | # az_destroy_method: deployment |
| | | # az_resource_group: my-shared-resource-group |
| | | # az_storage_account_type: Premium_LRS |
| | | |
| | | ### AWS EC2 Environment settings |
| | | |
| | | ### Route 53 Zone ID (AWS) |
| | | # This is the Route53 HostedZoneId where you will create your Public DNS entries |
| | | # This only needs to be defined if your CF template uses route53 |
| | | HostedZoneId: Z1TQFSYFZUAO0D |
| | | # The region to be used, if not specified by -e in the command line |
| | | aws_region: us-east-1 |
| | | # The key that is used to |
| | | key_name: "default_key_name" |
| | | |
| | | ## Networking (AWS) |
| | | subdomain_base_short: "{{ guid }}" |
| | | subdomain_base_suffix: ".openshift.opentlc.com" |
| | | subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}" |
| | | |
| | | ## Environment Sizing |
| | | |
| | | bastion_instance_type: "t2.large" |
| | | master_instance_type: "m4.4xlarge" |
| | | etcd_instance_type: "{{master_instance_type}}" |
| | | infranode_instance_type: "m4.4xlarge" |
| | | node_instance_type: "m4.4xlarge" |
| | | support_instance_type: "c4.xlarge" |
| | | |
| | | node_instance_count: 2 |
| | | infranode_instance_count: 1 |
| | | master_instance_count: 1 |
| | | support_instance_count: "{{ 3 if install_glusterfs|bool else 1 }}" |
| | | # scaleup |
| | | new_node_instance_count: 0 |
| | | |
| | | ###### VARIABLES YOU SHOULD ***NOT*** CONFIGURE FOR YOUR DEPLOYEMNT |
| | | |
| | | ## This might get removed |
| | | env_specific_images: |
| | | # - "registry.access.redhat.com/jboss-eap-7/eap70-openshift:latest" |
| | | # - "registry.access.redhat.com/openshift3/jenkins-2-rhel7:latest" |
| | | # - "registry.access.redhat.com/openshift3/jenkins-slave-maven-rhel7:latest" |
| | | |
| | | #### Vars for the OpenShift Ansible hosts file |
| | | master_api_port: 443 |
| | | ovs_plugin: "networkpolicy" # This can also be set to: "multitenant" or "networkpolicy" |
| | | multi_tenant_setting: "os_sdn_network_plugin_name='redhat/openshift-ovs-{{ovs_plugin}}'" |
| | | master_lb_dns: "master.{{subdomain_base}}" |
| | | |
| | | lets_encrypt_openshift_master_named_certificates: |
| | | - certfile: "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.cer" |
| | | keyfile: "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.key" |
| | | cafile: "/root/.acme.sh/{{ master_lb_dns }}/ca.cer" |
| | | |
| | | lets_encrypt_openshift_hosted_router_certificate: |
| | | certfile: "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.cer" |
| | | keyfile: "/root/.acme.sh/{{ master_lb_dns }}/{{ master_lb_dns }}.key" |
| | | cafile: "/root/.acme.sh/{{ master_lb_dns }}/ca.cer" |
| | | |
| | | project_request_message: 'To provision Projects you must request access in https://labs.opentlc.com or https://rhpds.redhat.com' |
| | | |
| | | cloudapps_suffix: 'apps.{{subdomain_base}}' |
| | | ## TODO: This should be registered as a variable. Awk for os verions (OCP). |
| | | ## yum info openshift... |
| | | osrelease: 3.9.31 |
| | | openshift_master_overwrite_named_certificates: true |
| | | timeout: 60 |
| | | |
| | | ########## OCP identity providers |
| | | # Options for install_idm: allow_all, htpasswd, ldap, ... see the available below |
| | | install_idm: ldap |
| | | |
| | | # if you want to install several identity providers, just pick from the |
| | | # available_identity_providers list: |
| | | install_idms: |
| | | - "{{ install_idm }}" |
| | | |
| | | # This var is empty by default. |
| | | # Every idm in the list 'install_idms' will be added, using the 'available_identity_providers' map |
| | | # you can: |
| | | # - directly override the 'identity_providers' list |
| | | # or |
| | | # - add an option to 'available_identity_providers' and then |
| | | # reference it in 'install_idm' or the 'install_idms' list |
| | | identity_providers: [] |
| | | |
| | | openshift_master_ldap_ca_file: 'openshift_master_ldap_ca_file=/root/ca.crt' |
| | | |
| | | available_identity_providers: |
| | | ldap: |
| | | name: OpenTLC IPA |
| | | challenge: true |
| | | login: true |
| | | kind: LDAPPasswordIdentityProvider |
| | | attributes: |
| | | id: ['dn'] |
| | | email: ['mail'] |
| | | name: ['cn'] |
| | | preferredUsername: ['uid'] |
| | | bindDN: uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com |
| | | bindPassword: "{{bindPassword|d('NOT_DEFINED')}}" |
| | | ca: ipa-ca.crt |
| | | insecure: false |
| | | url: ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid |
| | | |
| | | ssodev: |
| | | name: ssodev-iad00 |
| | | challenge: false |
| | | login: true |
| | | kind: OpenIDIdentityProvider |
| | | clientID: "{{ opentlc_ssodev_client_id|d('NOT_DEFINED') }}" |
| | | clientSecret: "{{ opentlc_ssodev_client_secret|d('NOT_DEFINED') }}" |
| | | ca: lets-encrypt-x3-cross-signed.pem.txt |
| | | urls: |
| | | authorize: https://ssodev-iad00.opentlc.com:8443/auth/realms/ipatest/protocol/openid-connect/auth |
| | | token: https://ssodev-iad00.opentlc.com:8443/auth/realms/ipatest/protocol/openid-connect/token |
| | | userInfo: https://ssodev-iad00.opentlc.com:8443/auth/realms/ipatest/protocol/openid-connect/userinfo |
| | | claims: |
| | | id: |
| | | - sub |
| | | preferredUsername: |
| | | - preferred_username |
| | | name: |
| | | - name |
| | | email: |
| | | - email |
| | | |
| | | allow_all: |
| | | name: allow_all |
| | | login: true |
| | | challenge: true |
| | | kind: AllowAllPasswordIdentityProvider |
| | | |
| | | htpasswd: |
| | | name: htpasswd_auth |
| | | login: true |
| | | challenge: true |
| | | kind: HTPasswdPasswordIdentityProvider |
| | | filename: /etc/origin/master/htpasswd |
| | | |
| | | ###### You can, but you usually wouldn't need to. |
| | | ansible_ssh_user: ec2-user |
| | | remote_user: ec2-user |
| | | |
| | | common_packages: |
| | | - python |
| | | - unzip |
| | | - bash-completion |
| | | - tmux |
| | | - bind-utils |
| | | - wget |
| | | - ansible |
| | | - git |
| | | - vim-enhanced |
| | | - at |
| | | - sysstat |
| | | - strace |
| | | - net-tools |
| | | - iptables-services |
| | | - bridge-utils |
| | | - kexec-tools |
| | | - sos |
| | | - psacct |
| | | - iotop |
| | | |
| | | rhel_repos: |
| | | - rhel-7-server-rpms |
| | | - rhel-7-server-extras-rpms |
| | | - rhel-7-server-ose-{{repo_version}}-rpms |
| | | - rhel-7-fast-datapath-rpms |
| | | - rh-gluster-3-client-for-rhel-7-server-rpms |
| | | |
| | | # use_subscription_manager: false |
| | | # use_own_repos: true |
| | | # |
| | | # rhn_pool_id_string: OpenShift Container Platform |
| | | |
| | | ## NFS Server settings |
| | | nfs_vg: nfsvg |
| | | nfs_pvs: /dev/xvdb |
| | | nfs_export_path: /srv/nfs |
| | | nfs_size: 200 |
| | | |
| | | nfs_shares: |
| | | - user-vols |
| | | |
| | | ocp_pvs: |
| | | # - es-storage |
| | | # - nexus |
| | | # - nexus2 |
| | | # - nexus3 |
| | | |
| | | user_vols: 200 |
| | | user_vols_size: 10Gi |
| | | |
| | | ## gluster settings |
| | | glusterfs_device_name: /dev/xvdc |
| | | glusterfs_device_size: 500 |
| | | |
| | | cache_images: |
| | | - "registry.access.redhat.com/jboss-eap-7/eap70-openshift:latest" |
| | | - "registry.access.redhat.com/openshift3/jenkins-2-rhel7:v{{ repo_version }}" |
| | | - "registry.access.redhat.com/openshift3/jenkins-slave-maven-rhel7:v{{ repo_version }}" |
| | | |
| | | ### CLOUDFORMATIONS vars |
| | | |
| | | create_internal_dns_entries: true |
| | | zone_internal_dns: "{{guid}}.internal." |
| | | chomped_zone_internal_dns: "{{guid}}.internal" |
| | | zone_public_dns: "{{subdomain_base}}." |
| | | cloudapps_record: '*.apps' |
| | | cloudapps_dns: '{{cloudapps_record}}.{{subdomain_base}}.' |
| | | |
| | | master_public_dns: "master.{{subdomain_base}}." |
| | | bastion_public_dns: "bastion.{{subdomain_base}}." |
| | | certtest_public_dns: "certtest.{{subdomain_base}}." |
| | | bastion_public_dns_chomped: "bastion.{{subdomain_base}}" |
| | | vpcid_cidr_block: "192.168.0.0/16" |
| | | vpcid_name_tag: "{{subdomain_base}}" |
| | | |
| | | az_1_name: "{{ aws_region }}a" |
| | | az_2_name: "{{ aws_region }}b" |
| | | |
| | | subnet_private_1_cidr_block: "192.168.2.0/24" |
| | | subnet_private_1_az: "{{ az_2_name }}" |
| | | subnet_private_1_name_tag: "{{subdomain_base}}-private" |
| | | |
| | | subnet_private_2_cidr_block: "192.168.1.0/24" |
| | | subnet_private_2_az: "{{ az_1_name }}" |
| | | subnet_private_2_name_tag: "{{subdomain_base}}-private" |
| | | |
| | | subnet_public_1_cidr_block: "192.168.10.0/24" |
| | | subnet_public_1_az: "{{ az_1_name }}" |
| | | subnet_public_1_name_tag: "{{subdomain_base}}-public" |
| | | |
| | | subnet_public_2_cidr_block: "192.168.20.0/24" |
| | | subnet_public_2_az: "{{ az_2_name }}" |
| | | subnet_public_2_name_tag: "{{subdomain_base}}-public" |
| | | |
| | | dopt_domain_name: "{{ aws_region }}.compute.internal" |
| | | |
| | | rtb_public_name_tag: "{{subdomain_base}}-public" |
| | | rtb_private_name_tag: "{{subdomain_base}}-private" |
| | | |
| | | cf_template_description: "{{ env_type }}-{{ guid }} template " |
| | | |
| | | rootfs_size_node: 50 |
| | | rootfs_size_infranode: 150 |
| | | rootfs_size_master: 50 |
| | | rootfs_size_bastion: 20 |
| | | rootfs_size_support: 20 |
| | | |
| | | instances: |
| | | - name: "bastion" |
| | | count: 1 |
| | | unique: true |
| | | public_dns: true |
| | | dns_loadbalancer: true |
| | | flavor: |
| | | ec2: "{{bastion_instance_type}}" |
| | | azure: "{{bastion_instance_type}}" |
| | | tags: |
| | | - key: "AnsibleGroup" |
| | | value: "bastions" |
| | | - key: "ostype" |
| | | value: "linux" |
| | | rootfs_size: "{{ rootfs_size_bastion }}" |
| | | |
| | | - name: "master" |
| | | count: "{{master_instance_count}}" |
| | | public_dns: true |
| | | dns_loadbalancer: true |
| | | flavor: |
| | | ec2: "{{master_instance_type}}" |
| | | azure: "{{master_instance_type}}" |
| | | tags: |
| | | - key: "AnsibleGroup" |
| | | value: "masters" |
| | | - key: "ostype" |
| | | value: "linux" |
| | | rootfs_size: "{{ rootfs_size_master }}" |
| | | volumes: |
| | | - device_name: "{{docker_device}}" |
| | | volume_size: "{{master_docker_size|default(docker_size)|default('20')}}" |
| | | volume_type: gp2 |
| | | purpose: docker |
| | | lun: 0 |
| | | |
| | | - name: "node" |
| | | count: "{{node_instance_count}}" |
| | | public_dns: false |
| | | dns_loadbalancer: false |
| | | flavor: |
| | | ec2: "{{node_instance_type}}" |
| | | azure: "{{node_instance_type}}" |
| | | tags: |
| | | - key: "AnsibleGroup" |
| | | value: "nodes" |
| | | - key: "ostype" |
| | | value: "linux" |
| | | rootfs_size: "{{ rootfs_size_node }}" |
| | | volumes: |
| | | - device_name: "{{docker_device}}" |
| | | volume_size: "{{node_docker_size|d(docker_size)|d('100')}}" |
| | | volume_type: gp2 |
| | | purpose: docker |
| | | lun: 0 |
| | | |
| | | - name: "infranode" |
| | | count: "{{infranode_instance_count}}" |
| | | public_dns: true |
| | | dns_loadbalancer: true |
| | | flavor: |
| | | ec2: "{{infranode_instance_type}}" |
| | | azure: "{{infranode_instance_type}}" |
| | | tags: |
| | | - key: "AnsibleGroup" |
| | | value: "infranodes" |
| | | - key: "ostype" |
| | | value: "linux" |
| | | rootfs_size: "{{ rootfs_size_infranode }}" |
| | | volumes: |
| | | - device_name: "{{docker_device}}" |
| | | volume_size: "{{infranode_docker_size|d(docker_size)|d('50')}}" |
| | | volume_type: gp2 |
| | | purpose: docker |
| | | lun: 0 |
| | | |
| | | - name: "support" |
| | | count: "{{support_instance_count}}" |
| | | public_dns: false |
| | | dns_loadbalancer: false |
| | | flavor: |
| | | ec2: "{{support_instance_type}}" |
| | | azure: "{{support_instance_type}}" |
| | | tags: |
| | | - key: "AnsibleGroup" |
| | | value: "{{ 'support,glusterfs,nodes' if install_glusterfs|bool else 'support' }}" |
| | | - key: "ostype" |
| | | value: "linux" |
| | | rootfs_size: "{{ rootfs_size_support }}" |
| | | volumes: |
| | | - device_name: "{{docker_device}}" |
| | | volume_size: "{{support_docker_size|d(docker_size)|d('50')}}" |
| | | volume_type: gp2 |
| | | purpose: docker |
| | | lun: 0 |
| | | - device_name: "{{glusterfs_device_name}}" |
| | | volume_size: "{{glusterfs_device_size}}" |
| | | volume_type: gp2 |
| | | purpose: glusterfs |
| | | lun: 1 |
| | | - device_name: "{{nfs_pvs}}" |
| | | volume_size: "{{nfs_size}}" |
| | | volume_type: gp2 |
| | | purpose: nfs |
| | | lun: 2 |
New file |
| | |
| | | { |
| | | "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", |
| | | "contentVersion": "1.0.0.0", |
| | | "parameters" : { |
| | | "guid": { |
| | | "type" : "string", |
| | | "minLength" : 3, |
| | | "metadata" : { |
| | | "description" : "GUID of the environment" |
| | | } |
| | | }, |
| | | "DNSZone": { |
| | | "type" : "string", |
| | | "minLength" : 3, |
| | | "metadata" : { |
| | | "description" : "dns zone of the environment, to update or create" |
| | | } |
| | | }, |
| | | "adminUsername" : { |
| | | "type" : "string", |
| | | "minLength" : 1, |
| | | "defaultValue" : "azure", |
| | | "metadata" : { |
| | | "description" : "User name for the Virtual Machine." |
| | | } |
| | | }, |
| | | "sshKeyData" : { |
| | | "type" : "securestring", |
| | | "metadata" : { |
| | | "description" : "SSH RSA public key file as a string." |
| | | } |
| | | }, |
| | | "vmSize" : { |
| | | "type" : "string", |
| | | "defaultValue" : "Basic_A2", |
| | | "allowedValues" : [ |
| | | "Basic_A2", |
| | | "Standard_A2", |
| | | "Standard_A3", |
| | | "Standard_A4", |
| | | "Standard_A5", |
| | | "Standard_A6", |
| | | "Standard_A7", |
| | | "Standard_A8", |
| | | "Standard_A9", |
| | | "Standard_A10", |
| | | "Standard_A11", |
| | | "Standard_D2", |
| | | "Standard_D3", |
| | | "Standard_D4", |
| | | "Standard_D11", |
| | | "Standard_D12", |
| | | "Standard_D13", |
| | | "Standard_D14", |
| | | "Standard_D2_v2", |
| | | "Standard_D3_v2", |
| | | "Standard_D4_v2", |
| | | "Standard_D5_v2", |
| | | "Standard_D11_v2", |
| | | "Standard_D12_v2", |
| | | "Standard_D13_v2", |
| | | "Standard_D14_v2", |
| | | "Standard_G1", |
| | | "Standard_G2", |
| | | "Standard_G3", |
| | | "Standard_G4", |
| | | "Standard_G5", |
| | | "Standard_DS2", |
| | | "Standard_DS3", |
| | | "Standard_DS4", |
| | | "Standard_DS11", |
| | | "Standard_DS12", |
| | | "Standard_DS13", |
| | | "Standard_DS14", |
| | | "Standard_DS2_v2", |
| | | "Standard_DS3_v2", |
| | | "Standard_DS4_v2", |
| | | "Standard_DS5_v2", |
| | | "Standard_DS11_v2", |
| | | "Standard_DS12_v2", |
| | | "Standard_DS13_v2", |
| | | "Standard_DS14_v2", |
| | | "Standard_GS1", |
| | | "Standard_GS2", |
| | | "Standard_GS3", |
| | | "Standard_GS4", |
| | | "Standard_GS5" |
| | | ], |
| | | "metadata" : { |
| | | "description" : "The size of the each Node Virtual Machine." |
| | | } |
| | | } |
| | | }, |
| | | "variables" : { |
| | | "subzone": "[concat('{{guid}}.',parameters('DNSZone'))]", |
| | | "location" : "[resourceGroup().location]", |
| | | "virtualNetworkName" : "{{project_tag}}-virtualnetwork", |
| | | "addressPrefix" : "10.0.0.0/16", |
| | | "vnetId" : "[resourceId('Microsoft.Network/virtualNetworks', variables('virtualNetworkName'))]", |
| | | "rhel" : { |
| | | "publisher" : "Redhat", |
| | | "offer" : "RHEL", |
| | | "sku" : "7-RAW", |
| | | {% if osrelease is version_compare('3.9.25', '>=') %} |
| | | "version" : "7.5.2018050901" |
| | | {% else %} |
| | | "version" : "7.4.2018010506" |
| | | {% endif %} |
| | | }, |
| | | "tenantId" : "[subscription().tenantId]", |
| | | "apiVersion" : "2015-06-15", |
| | | "apiVersionCompute" : "2017-12-01", |
| | | "apiVersionNetwork" : "2016-03-30", |
| | | "tmApiVersion" : "2015-11-01", |
| | | "apiVersionStorage" : "2015-06-15", |
| | | "apiVersionLinkTemplate" : "2015-01-01", |
| | | "nicName" : "OneVmNic", |
| | | "publicIPAddressType" : "Dynamic", |
| | | "subnetRef" : "[concat(variables('vnetID'),'/subnets/',variables('virtualNetworkName'))]", |
| | | "sshKeyPath" : "[concat('/home/',parameters('adminUsername'),'/.ssh/authorized_keys')]", |
| | | "sQuote" : "\"", |
| | | "vmStorageAccountContainerName": "vhds", |
| | | "storageAccountType": "{{az_storage_account_type|d('Premium_LRS')}}", |
| | | "vhdStorageType" : "Premium_LRS", |
| | | "storageAccountName": "[concat('vsts8',uniquestring(parameters('guid')))]" |
| | | }, |
| | | "resources": [ |
| | | { |
| | | "type": "Microsoft.Storage/storageAccounts", |
| | | "name": "[variables('StorageAccountName')]", |
| | | "apiVersion": "2017-10-01", |
| | | "location": "[resourceGroup().location]", |
| | | "sku": { |
| | | "name": "[variables('storageAccountType')]" |
| | | }, |
| | | "tags": { |
| | | "owner": "{{ email | default('unknownuser') }}", |
| | | "Project": "{{project_tag}}" |
| | | }, |
| | | "kind": "Storage", |
| | | "properties": { |
| | | } |
| | | }, |
| | | |
| | | { |
| | | "type": "Microsoft.Network/dnszones/a", |
| | | "name": "[concat(variables('subzone'), '/', '{{cloudapps_record|d('*.apps')}}')]", |
| | | "apiVersion": "2016-04-01", |
| | | "dependsOn": [ |
| | | {% for c in range(1,(infranode_instance_count|int)+1) %} |
| | | "[resourceId('Microsoft.Network/publicIPAddresses/', '{{project_tag}}-infranode{{loop.index}}-PublicIP')]", |
| | | {% endfor %} |
| | | "[resourceId('Microsoft.Network/dnsZones/', variables('subzone'))]" |
| | | ], |
| | | "properties": { |
| | | "TTL": 3600, |
| | | "ARecords": [ |
| | | {% for c in range(1,(infranode_instance_count|int)+1) %} |
| | | { |
| | | "ipv4Address": "[reference('{{project_tag}}-infranode{{loop.index}}-PublicIP').ipAddress]" |
| | | }{{ "," if not loop.last else "" }} |
| | | {% endfor %} |
| | | ] |
| | | } |
| | | }, |
| | | |
| | | {% for instance in instances %} |
| | | {% if instance['dns_loadbalancer']|d(false)|bool and not instance['unique']|d(false)|bool %} |
| | | { |
| | | "type": "Microsoft.Network/dnszones/a", |
| | | "name": "[concat(variables('subzone'), '/', '{{instance['name']}}')]", |
| | | "apiVersion": "2016-04-01", |
| | | "dependsOn": [ |
| | | {% for c in range(1,(instance['count'] |int)+1) %} |
| | | {% if instance['unique']|d(false)|bool %} |
| | | {% set instancename = instance['name'] %} |
| | | {% else %} |
| | | {% set instancename = instance['name'] + (loop.index|string) %} |
| | | {% endif %} |
| | | {% if instance['public_dns']|d(false)|bool %} |
| | | "[resourceId('Microsoft.Network/publicIPAddresses/', '{{project_tag}}-{{instancename}}-PublicIP')]", |
| | | {% endif %} |
| | | {% endfor %} |
| | | "[resourceId('Microsoft.Network/dnsZones/', variables('subzone'))]" |
| | | ], |
| | | "properties": { |
| | | "TTL": 3600, |
| | | "ARecords": [ |
| | | { |
| | | {% for c in range(1,(instance['count'] |int)+1) %} |
| | | {% if instance['unique']|d(false)|bool %} |
| | | {% set instancename = instance['name'] %} |
| | | {% else %} |
| | | {% set instancename = instance['name'] + (loop.index|string) %} |
| | | {% endif %} |
| | | "ipv4Address": "[reference('{{project_tag}}-{{instancename}}-PublicIP').ipAddress]" |
| | | {% endfor %} |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | {% endif %} |
| | | |
| | | |
| | | {% for c in range(1,(instance['count'] |int)+1) %} |
| | | |
| | | {% if instance['unique']|d(false)|bool %} |
| | | {% set instancename = instance['name'] %} |
| | | {% else %} |
| | | {% set instancename = instance['name'] + (loop.index|string) %} |
| | | {% endif %} |
| | | |
| | | {% if instance['public_dns']|d(false)|bool %} |
| | | { |
| | | "type": "Microsoft.Network/dnszones/a", |
| | | "name": "[concat(variables('subzone'), '/', '{{instancename}}')]", |
| | | "apiVersion": "2016-04-01", |
| | | "dependsOn": [ |
| | | "[resourceId('Microsoft.Network/publicIPAddresses/', '{{project_tag}}-{{instancename}}-PublicIP')]", |
| | | "[resourceId('Microsoft.Network/dnsZones/', variables('subzone'))]" |
| | | ], |
| | | "properties": { |
| | | "TTL": 3600, |
| | | "ARecords": [ |
| | | { |
| | | "ipv4Address": "[reference('{{project_tag}}-{{instancename}}-PublicIP').ipAddress]" |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | {% endif %} |
| | | {% if instance['public_dns']|d(false)|bool %} |
| | | { |
| | | "apiVersion" : "2017-04-01", |
| | | "type" : "Microsoft.Network/publicIPAddresses", |
| | | "name" : "{{project_tag}}-{{instancename}}-PublicIP", |
| | | "location" : "[resourceGroup().location]", |
| | | "properties" : { |
| | | "publicIPAllocationMethod" : "Static", |
| | | "dnsSettings" : { |
| | | "domainNameLabel" : "{{instancename}}-{{guid}}" |
| | | } |
| | | } |
| | | }, |
| | | {% endif %} |
| | | { |
| | | "apiVersion" : "2017-04-01", |
| | | "type" : "Microsoft.Network/networkInterfaces", |
| | | "name" : "{{project_tag}}-{{instancename}}-Interface", |
| | | "location" : "[resourceGroup().location]", |
| | | "dependsOn" : [ |
| | | {% if instance['public_dns']|d(false)|bool %} |
| | | "[resourceId('Microsoft.Network/publicIPAddresses/', '{{project_tag}}-{{instancename}}-PublicIP')]", |
| | | {% endif %} |
| | | "[resourceId('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]" |
| | | ], |
| | | "properties" : { |
| | | "ipConfigurations" : [ |
| | | { |
| | | "name" : "ipconfig1", |
| | | "properties" : { |
| | | "privateIPAllocationMethod" : "Dynamic", |
| | | {% if instance['public_dns']|d(false)|bool %} |
| | | "publicIPAddress" : { |
| | | "id" : "[resourceId('Microsoft.Network/publicIPAddresses','{{project_tag}}-{{instancename}}-PublicIP')]" |
| | | }, |
| | | {% endif %} |
| | | "subnet" : { |
| | | "id" : "[variables('subnetRef')]" |
| | | } |
| | | } |
| | | } |
| | | ], |
| | | "dnsSettings": { |
| | | "internalDnsNameLabel": "{{instancename}}", |
| | | "internalFqdn": "{{instancename}}.{{chomped_zone_internal_dns}}" |
| | | } |
| | | } |
| | | }, |
| | | { |
| | | "apiVersion" : "2017-12-01", |
| | | "type" : "Microsoft.Compute/virtualMachines", |
| | | "name" : "{{project_tag}}-{{instancename}}", |
| | | "location" : "[resourceGroup().location]", |
| | | "dependsOn" : [ |
| | | "[resourceId('Microsoft.Storage/storageAccounts',variables('StorageAccountName'))]", |
| | | "[resourceId('Microsoft.Network/networkInterfaces/', '{{project_tag}}-{{instancename}}-Interface')]" |
| | | ], |
| | | "tags": { |
| | | "Name": "{{instancename}}", |
| | | "internaldns": "{{instancename}}.{{chomped_zone_internal_dns}}", |
| | | "owner": "{{ email | default('unknownuser') }}", |
| | | "Project": "{{project_tag}}", |
| | | {% for tag in instance['tags'] %} |
| | | "{{tag['key']}}": "{{tag['value']}}", |
| | | {% endfor %} |
| | | "{{project_tag}}": "{{ instance['name'] }}" |
| | | }, |
| | | "properties" : { |
| | | "hardwareProfile" : { |
| | | "vmSize" : "{{instance['flavor'][cloud_provider]}}" |
| | | }, |
| | | "osProfile" : { |
| | | "computerName": "{{instancename}}.{{chomped_zone_internal_dns}}", |
| | | "adminUsername" : "[parameters('adminUsername')]", |
| | | "linuxConfiguration" : { |
| | | "disablePasswordAuthentication" : "true", |
| | | "ssh" : { |
| | | "publicKeys" : [ |
| | | { |
| | | "path" : "[variables('sshKeyPath')]", |
| | | "keyData" : "[parameters('sshKeyData')]" |
| | | } |
| | | ] |
| | | } |
| | | } |
| | | }, |
| | | "storageProfile" : { |
| | | "imageReference" : "[variables('rhel')]", |
| | | "osDisk" : { |
| | | "caching" : "ReadWrite", |
| | | "name" : "{{project_tag}}-{{instancename}}-osdisk", |
| | | "createOption" : "FromImage", |
| | | "diskSizeGB" : "{{instance['rootfs_size']|d('50')}}", |
| | | "managedDisk": { |
| | | "storageAccountType": "{{az_storage_account_type|d('Premium_LRS')}}" |
| | | } |
| | | }, |
| | | "dataDisks" : [ |
| | | {% for vol in instance['volumes']|default([]) %} |
| | | { |
| | | "caching" : "None", |
| | | "createOption" : "Empty", |
| | | "lun" : "{{vol.lun}}", |
| | | "name": "{{project_tag}}-{{instancename}}-disk{{loop.index}}-{{vol.purpose}}", |
| | | "diskSizeGB" : "{{vol.volume_size}}", |
| | | "managedDisk": { |
| | | "storageAccountType": "{{az_storage_account_type|d('Premium_LRS')}}" |
| | | } |
| | | }{{ "," if not loop.last else "" }} |
| | | {% endfor %} |
| | | ] |
| | | }, |
| | | "networkProfile" : { |
| | | "networkInterfaces" : [ |
| | | { |
| | | "id" : "[resourceId('Microsoft.Network/networkInterfaces','{{project_tag}}-{{instancename}}-Interface')]" |
| | | } |
| | | ] |
| | | }, |
| | | "diagnosticsProfile" : { |
| | | "bootDiagnostics" : { |
| | | "enabled" : "false", |
| | | "storageUri" : "[concat(reference(concat('Microsoft.Storage/storageAccounts/', variables('storageAccountName')), '2016-01-01').primaryEndpoints.blob)]" |
| | | } |
| | | } |
| | | } |
| | | }, |
| | | {% endfor %} |
| | | {% endfor %} |
| | | { |
| | | "name": "[variables('subzone')]", |
| | | "type": "Microsoft.Network/dnsZones", |
| | | "apiVersion": "2017-09-01", |
| | | "location" : "global", |
| | | }, |
| | | { |
| | | "apiVersion" : "[variables('apiVersion')]", |
| | | "type" : "Microsoft.Network/virtualNetworks", |
| | | "name" : "[variables('virtualNetworkName')]", |
| | | "location" : "[variables('location')]", |
| | | "tags" : { |
| | | "displayName" : "VirtualNetwork" |
| | | }, |
| | | "properties" : { |
| | | "addressSpace" : { |
| | | "addressPrefixes" : [ |
| | | "[variables('addressPrefix')]" |
| | | ] |
| | | }, |
| | | "subnets" : [ |
| | | { |
| | | "name" : "[variables('virtualNetworkName')]", |
| | | "properties" : { |
| | | "addressPrefix" : "[variables('addressPrefix')]" |
| | | } |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | { |
| | | "type" : "Microsoft.Network/networkSecurityGroups", |
| | | "name" : "{{project_tag}}-NetworkSecurityGroup", |
| | | "tags" : { |
| | | "displayName" : "NetworkSecurityGroup" |
| | | }, |
| | | "apiVersion" : "[variables('apiVersion')]", |
| | | "location" : "[resourceGroup().location]", |
| | | "properties" : { |
| | | "securityRules" : [ |
| | | { |
| | | "name" : "default-allow-openshift-router-https", |
| | | "properties" : { |
| | | "protocol" : "Tcp", |
| | | "sourcePortRange" : "*", |
| | | "destinationPortRange" : "443", |
| | | "sourceAddressPrefix" : "*", |
| | | "destinationAddressPrefix" : "*", |
| | | "access" : "Allow", |
| | | "priority" : 2000, |
| | | "direction" : "Inbound" |
| | | } |
| | | }, |
| | | { |
| | | "name" : "default-allow-openshift-router-http", |
| | | "properties" : { |
| | | "protocol" : "Tcp", |
| | | "sourcePortRange" : "*", |
| | | "destinationPortRange" : "80", |
| | | "sourceAddressPrefix" : "*", |
| | | "destinationAddressPrefix" : "*", |
| | | "access" : "Allow", |
| | | "priority" : 2001, |
| | | "direction" : "Inbound" |
| | | } |
| | | }, |
| | | { |
| | | "name" : "default-allow-openshift-master", |
| | | "properties" : { |
| | | "protocol" : "Tcp", |
| | | "sourcePortRange" : "*", |
| | | "destinationPortRange" : "8443", |
| | | "sourceAddressPrefix" : "*", |
| | | "destinationAddressPrefix" : "*", |
| | | "access" : "Allow", |
| | | "priority" : 2002, |
| | | "direction" : "Inbound" |
| | | } |
| | | }, |
| | | { |
| | | "name" : "default-allow-ssh", |
| | | "properties" : { |
| | | "protocol" : "Tcp", |
| | | "sourcePortRange" : "*", |
| | | "destinationPortRange" : "22", |
| | | "sourceAddressPrefix" : "*", |
| | | "destinationAddressPrefix" : "*", |
| | | "access" : "Allow", |
| | | "priority" : 2003, |
| | | "direction" : "Inbound" |
| | | } |
| | | }, |
| | | { |
| | | "name" : "default-allow-mosh", |
| | | "properties" : { |
| | | "protocol" : "Udp", |
| | | "sourcePortRange" : "*", |
| | | "destinationPortRange" : "60000-60003", |
| | | "sourceAddressPrefix" : "*", |
| | | "destinationAddressPrefix" : "*", |
| | | "access" : "Allow", |
| | | "priority" : 2004, |
| | | "direction" : "Inbound" |
| | | } |
| | | } |
| | | ] |
| | | } |
| | | } |
| | | ], |
| | | "outputs" : { |
| | | } |
| | | } |
New file |
| | |
| | | #jinja2: lstrip_blocks: True |
| | | --- |
| | | AWSTemplateFormatVersion: "2010-09-09" |
| | | Mappings: |
| | | RegionMapping: |
| | | us-east-1: |
| | | {% if osrelease | version_compare('3.9.25', '>=') %} |
| | | RHELAMI: ami-6871a115 |
| | | {% else %} |
| | | RHELAMI: ami-c998b6b2 |
| | | {% endif %} |
| | | us-east-2: |
| | | {% if osrelease | version_compare('3.9.25', '>=') %} |
| | | RHELAMI: ami-03291866 |
| | | {% else %} |
| | | RHELAMI: ami-cfdafaaa |
| | | {% endif %} |
| | | us-west-1: |
| | | {% if osrelease | version_compare('3.9.25', '>=') %} |
| | | RHELAMI: ami-18726478 |
| | | {% else %} |
| | | RHELAMI: ami-66eec506 |
| | | {% endif %} |
| | | us-west-2: |
| | | {% if osrelease | version_compare('3.9.25', '>=') %} |
| | | RHELAMI: ami-28e07e50 |
| | | {% else %} |
| | | RHELAMI: ami-223f945a |
| | | {% endif %} |
| | | eu-west-1: |
| | | {% if osrelease | version_compare('3.9.25', '>=') %} |
| | | RHELAMI: ami-7c491f05 |
| | | {% else %} |
| | | RHELAMI: ami-bb9a6bc2 |
| | | {% endif %} |
| | | eu-central-1: |
| | | {% if osrelease | version_compare('3.9.25', '>=') %} |
| | | RHELAMI: ami-c86c3f23 |
| | | {% else %} |
| | | RHELAMI: ami-d74be5b8 |
| | | {% endif %} |
| | | ap-northeast-1: |
| | | {% if osrelease | version_compare('3.9.25', '>=') %} |
| | | RHELAMI: ami-6b0d5f0d |
| | | {% else %} |
| | | RHELAMI: ami-30ef0556 |
| | | {% endif %} |
| | | ap-northeast-2: |
| | | {% if osrelease | version_compare('3.9.25', '>=') %} |
| | | RHELAMI: ami-3eee4150 |
| | | {% else %} |
| | | RHELAMI: ami-0f5a8361 |
| | | {% endif %} |
| | | ap-southeast-1: |
| | | {% if osrelease | version_compare('3.9.25', '>=') %} |
| | | RHELAMI: ami-76144b0a |
| | | {% else %} |
| | | RHELAMI: ami-10bb2373 |
| | | {% endif %} |
| | | ap-southeast-2: |
| | | {% if osrelease | version_compare('3.9.25', '>=') %} |
| | | RHELAMI: ami-67589505 |
| | | {% else %} |
| | | RHELAMI: ami-ccecf5af |
| | | {% endif %} |
| | | ap-south-1: |
| | | {% if osrelease | version_compare('3.9.25', '>=') %} |
| | | RHELAMI: ami-5b673c34 |
| | | {% else %} |
| | | RHELAMI: ami-cdbdd7a2 |
| | | {% endif %} |
| | | sa-east-1: |
| | | {% if osrelease | version_compare('3.9.25', '>=') %} |
| | | RHELAMI: ami-b0b7e3dc |
| | | {% else %} |
| | | RHELAMI: ami-a789ffcb |
| | | {% endif %} |
| | | DNSMapping: |
| | | us-east-1: |
| | | domain: "us-east-1.compute.internal" |
| | | us-west-1: |
| | | domain: "us-west-1.compute.internal" |
| | | us-west-2: |
| | | domain: "us-west-2.compute.internal" |
| | | eu-west-1: |
| | | domain: "eu-west-1.compute.internal" |
| | | eu-central-1: |
| | | domain: "eu-central-1.compute.internal" |
| | | ap-northeast-1: |
| | | domain: "ap-northeast-1.compute.internal" |
| | | ap-northeast-2: |
| | | domain: "ap-northeast-2.compute.internal" |
| | | ap-southeast-1: |
| | | domain: "ap-southeast-1.compute.internal" |
| | | ap-southeast-2: |
| | | domain: "ap-southeast-2.compute.internal" |
| | | sa-east-1: |
| | | domain: "sa-east-1.compute.internal" |
| | | ap-south-1: |
| | | domain: "ap-south-1.compute.internal" |
| | | |
| | | Resources: |
| | | Vpc: |
| | | Type: "AWS::EC2::VPC" |
| | | Properties: |
| | | CidrBlock: "192.199.0.0/16" |
| | | EnableDnsSupport: true |
| | | EnableDnsHostnames: true |
| | | Tags: |
| | | - Key: Name |
| | | Value: "{{vpcid_name_tag}}" |
| | | - Key: Hostlication |
| | | Value: |
| | | Ref: "AWS::StackId" |
| | | |
| | | VpcInternetGateway: |
| | | Type: "AWS::EC2::InternetGateway" |
| | | |
| | | VpcGA: |
| | | Type: "AWS::EC2::VPCGatewayAttachment" |
| | | Properties: |
| | | InternetGatewayId: |
| | | Ref: VpcInternetGateway |
| | | VpcId: |
| | | Ref: Vpc |
| | | |
| | | VpcRouteTable: |
| | | Type: "AWS::EC2::RouteTable" |
| | | Properties: |
| | | VpcId: |
| | | Ref: Vpc |
| | | |
| | | VPCRouteInternetGateway: |
| | | DependsOn: VpcGA |
| | | Type: "AWS::EC2::Route" |
| | | Properties: |
| | | GatewayId: |
| | | Ref: VpcInternetGateway |
| | | DestinationCidrBlock: "0.0.0.0/0" |
| | | RouteTableId: |
| | | Ref: VpcRouteTable |
| | | |
| | | PublicSubnet: |
| | | Type: "AWS::EC2::Subnet" |
| | | DependsOn: |
| | | - Vpc |
| | | Properties: |
| | | CidrBlock: "192.199.0.0/24" |
| | | Tags: |
| | | - Key: Name |
| | | Value: "{{project_tag}}" |
| | | - Key: Hostlication |
| | | Value: |
| | | Ref: "AWS::StackId" |
| | | MapPublicIpOnLaunch: true |
| | | VpcId: |
| | | Ref: Vpc |
| | | |
| | | PublicSubnetRTA: |
| | | Type: "AWS::EC2::SubnetRouteTableAssociation" |
| | | Properties: |
| | | RouteTableId: |
| | | Ref: VpcRouteTable |
| | | SubnetId: |
| | | Ref: PublicSubnet |
| | | |
| | | HostSG: |
| | | Type: "AWS::EC2::SecurityGroup" |
| | | Properties: |
| | | GroupDescription: Host |
| | | VpcId: |
| | | Ref: Vpc |
| | | Tags: |
| | | - Key: Name |
| | | Value: host_sg |
| | | |
| | | HostUDPPorts: |
| | | Type: "AWS::EC2::SecurityGroupIngress" |
| | | Properties: |
| | | GroupId: |
| | | Fn::GetAtt: |
| | | - HostSG |
| | | - GroupId |
| | | IpProtocol: udp |
| | | FromPort: 0 |
| | | ToPort: 65535 |
| | | CidrIp: "0.0.0.0/0" |
| | | |
| | | HostTCPPorts: |
| | | Type: "AWS::EC2::SecurityGroupIngress" |
| | | Properties: |
| | | GroupId: |
| | | Fn::GetAtt: |
| | | - HostSG |
| | | - GroupId |
| | | IpProtocol: tcp |
| | | FromPort: 0 |
| | | ToPort: 65535 |
| | | CidrIp: "0.0.0.0/0" |
| | | |
| | | zoneinternalidns: |
| | | Type: "AWS::Route53::HostedZone" |
| | | Properties: |
| | | Name: "{{ zone_internal_dns }}" |
| | | VPCs: |
| | | - VPCId: |
| | | Ref: Vpc |
| | | VPCRegion: |
| | | Ref: "AWS::Region" |
| | | HostedZoneConfig: |
| | | Comment: "Created By ansible agnostic deployer" |
| | | |
| | | CerttestDNS: |
| | | Type: AWS::Route53::RecordSetGroup |
| | | DependsOn: |
| | | - master1EIP |
| | | Properties: |
| | | HostedZoneId: "{{HostedZoneId}}" |
| | | RecordSets: |
| | | - Name: "{{certtest_public_dns}}" |
| | | Type: A |
| | | TTL: 10 |
| | | ResourceRecords: |
| | | - Fn::GetAtt: |
| | | - master1 |
| | | - PublicIp |
| | | |
| | | CloudDNS: |
| | | Type: AWS::Route53::RecordSetGroup |
| | | DependsOn: |
| | | {% for c in range(1,(infranode_instance_count|int)+1) %} |
| | | - "infranode{{loop.index}}EIP" |
| | | {% endfor %} |
| | | Properties: |
| | | HostedZoneId: "{{HostedZoneId}}" |
| | | RecordSets: |
| | | - Name: "{{cloudapps_dns}}" |
| | | Type: A |
| | | TTL: 900 |
| | | ResourceRecords: |
| | | {% for c in range(1,(infranode_instance_count|int)+1) %} |
| | | - Fn::GetAtt: |
| | | - infranode{{loop.index}} |
| | | - PublicIp |
| | | {% endfor %} |
| | | |
| | | {% for instance in instances %} |
| | | {% if instance['dns_loadbalancer']|d(false)|bool and not instance['unique']|d(false)|bool %} |
| | | {{instance['name']}}DNSLoadBalancer: |
| | | Type: "AWS::Route53::RecordSetGroup" |
| | | DependsOn: |
| | | {% for c in range(1, (instance['count']|int)+1) %} |
| | | - {{instance['name']}}{{c}}EIP |
| | | {% endfor %} |
| | | Properties: |
| | | HostedZoneId: {{HostedZoneId}} |
| | | RecordSets: |
| | | - Name: "{{instance['name']}}.{{subdomain_base}}." |
| | | Type: A |
| | | TTL: 900 |
| | | ResourceRecords: |
| | | {% for c in range(1,(instance['count'] |int)+1) %} |
| | | - "Fn::GetAtt": |
| | | - {{instance['name']}}{{c}} |
| | | - PublicIp |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% for c in range(1,(instance['count'] |int)+1) %} |
| | | {{instance['name']}}{{loop.index}}: |
| | | Type: "AWS::EC2::Instance" |
| | | Properties: |
| | | ImageId: |
| | | Fn::FindInMap: |
| | | - RegionMapping |
| | | - Ref: AWS::Region |
| | | - {{ instance['image_id'] | default('RHELAMI') }} |
| | | InstanceType: "{{instance['flavor'][cloud_provider]}}" |
| | | KeyName: "{{instance['key_name'] | default(key_name)}}" |
| | | {% if instance['UserData'] is defined %} |
| | | {{instance['UserData']}} |
| | | {% endif %} |
| | | SecurityGroupIds: |
| | | - "Fn::GetAtt": |
| | | - HostSG |
| | | - GroupId |
| | | SubnetId: |
| | | Ref: PublicSubnet |
| | | Tags: |
| | | {% if instance['unique'] | d(false) | bool %} |
| | | - Key: Name |
| | | Value: {{instance['name']}} |
| | | - Key: internaldns |
| | | Value: {{instance['name']}}.{{chomped_zone_internal_dns}} |
| | | {% else %} |
| | | - Key: Name |
| | | Value: {{instance['name']}}{{loop.index}} |
| | | - Key: internaldns |
| | | Value: {{instance['name']}}{{loop.index}}.{{chomped_zone_internal_dns}} |
| | | {% endif %} |
| | | {% if instance['name'] == 'node' %} |
| | | {% if c > (node_instance_count|int) - (new_node_instance_count|int) %} |
| | | - Key: newnode |
| | | Value: true |
| | | {% endif %} |
| | | {% endif %} |
| | | - Key: "owner" |
| | | Value: "{{ email | default('unknownuser') }}" |
| | | - Key: "Project" |
| | | Value: "{{project_tag}}" |
| | | - Key: "{{project_tag}}" |
| | | Value: "{{ instance['name'] }}" |
| | | {% for tag in instance['tags'] %} |
| | | - Key: {{tag['key']}} |
| | | Value: {{tag['value']}} |
| | | {% endfor %} |
| | | BlockDeviceMappings: |
| | | - DeviceName: "/dev/sda1" |
| | | Ebs: |
| | | VolumeSize: {{ instance['rootfs_size'] | default('50') }} |
| | | {% for vol in instance['volumes']|default([]) if |
| | | (vol.purpose|d('') == 'glusterfs' and install_glusterfs|bool) |
| | | or (vol.purpose|d('') == 'nfs' and install_nfs|bool) |
| | | or vol.purpose|d('') not in ['glusterfs', 'nfs'] %} |
| | | - DeviceName: "{{ vol['device_name'] }}" |
| | | Ebs: |
| | | VolumeType: "{{ vol['volume_type'] | d('gp2') }}" |
| | | VolumeSize: "{{ vol['volume_size'] | d('20') }}" |
| | | {% endfor %} |
| | | |
| | | {{instance['name']}}{{loop.index}}InternalDNS: |
| | | Type: "AWS::Route53::RecordSetGroup" |
| | | Properties: |
| | | HostedZoneId: |
| | | Ref: zoneinternalidns |
| | | RecordSets: |
| | | {% if instance['unique'] | d(false) | bool %} |
| | | - Name: "{{instance['name']}}.{{zone_internal_dns}}" |
| | | {% else %} |
| | | - Name: "{{instance['name']}}{{loop.index}}.{{zone_internal_dns}}" |
| | | {% endif %} |
| | | Type: A |
| | | TTL: 10 |
| | | ResourceRecords: |
| | | - "Fn::GetAtt": |
| | | - {{instance['name']}}{{loop.index}} |
| | | - PrivateIp |
| | | |
| | | {% if instance['public_dns'] %} |
| | | {{instance['name']}}{{loop.index}}EIP: |
| | | Type: "AWS::EC2::EIP" |
| | | DependsOn: |
| | | - VpcGA |
| | | Properties: |
| | | InstanceId: |
| | | Ref: {{instance['name']}}{{loop.index}} |
| | | |
| | | {{instance['name']}}{{loop.index}}PublicDNS: |
| | | Type: "AWS::Route53::RecordSetGroup" |
| | | DependsOn: |
| | | - {{instance['name']}}{{loop.index}}EIP |
| | | Properties: |
| | | HostedZoneId: {{HostedZoneId}} |
| | | RecordSets: |
| | | {% if instance['unique'] | d(false) | bool %} |
| | | - Name: "{{instance['name']}}.{{subdomain_base}}." |
| | | {% else %} |
| | | - Name: "{{instance['name']}}{{loop.index}}.{{subdomain_base}}." |
| | | {% endif %} |
| | | Type: A |
| | | TTL: 10 |
| | | ResourceRecords: |
| | | - "Fn::GetAtt": |
| | | - {{instance['name']}}{{loop.index}} |
| | | - PublicIp |
| | | {% endif %} |
| | | {% endfor %} |
| | | {% endfor %} |
| | | |
| | | Route53User: |
| | | Type: AWS::IAM::User |
| | | Properties: |
| | | Policies: |
| | | - PolicyName: Route53Access |
| | | PolicyDocument: |
| | | Statement: |
| | | - Effect: Allow |
| | | Action: route53domains:* |
| | | Resource: "*" |
| | | - Effect: Allow |
| | | Action: route53:* |
| | | Resource: "*" |
| | | |
| | | Route53UserAccessKey: |
| | | DependsOn: Route53User |
| | | Type: AWS::IAM::AccessKey |
| | | Properties: |
| | | UserName: |
| | | Ref: Route53User |
| | | |
| | | RegistryS3: |
| | | Type: "AWS::S3::Bucket" |
| | | Properties: |
| | | BucketName: "{{ env_type }}-{{ guid }}" |
| | | Tags: |
| | | - Key: Name |
| | | Value: "s3-{{ env_type }}-{{ guid }}" |
| | | - Key: Project |
| | | Value: "{{project_tag}}" |
| | | - Key: owner |
| | | Value: "{{ email | default('unknown')}}" |
| | | |
| | | S3User: |
| | | Type: AWS::IAM::User |
| | | DependsOn: |
| | | - RegistryS3 |
| | | Properties: |
| | | Policies: |
| | | - PolicyName: S3Access |
| | | PolicyDocument: |
| | | Statement: |
| | | - Effect: Allow |
| | | Action: s3:ListAllMyBuckets |
| | | Resource: "*" |
| | | - Effect: Allow |
| | | Action: "s3:*" |
| | | Resource: |
| | | Fn::Join: |
| | | - "" |
| | | - - "arn:aws:s3:::" |
| | | - Ref: RegistryS3 |
| | | - "/*" |
| | | |
| | | S3UserAccessKey: |
| | | Type: AWS::IAM::AccessKey |
| | | DependsOn: |
| | | - S3User |
| | | Properties: |
| | | UserName: |
| | | Ref: S3User |
| | | |
| | | BucketPolicy: |
| | | Type: AWS::S3::BucketPolicy |
| | | DependsOn: |
| | | - RegistryS3 |
| | | Properties: |
| | | PolicyDocument: |
| | | Id: Give registry access to user |
| | | Statement: |
| | | - Sid: AllAccess |
| | | Action: |
| | | - "s3:*" |
| | | Effect: Allow |
| | | Resource: |
| | | Fn::Join: |
| | | - "" |
| | | - - "arn:aws:s3:::" |
| | | - Ref: RegistryS3 |
| | | Principal: |
| | | AWS: |
| | | Fn::GetAtt: |
| | | - S3User |
| | | - Arn |
| | | Bucket: |
| | | Ref: RegistryS3 |
| | | |
| | | Outputs: |
| | | Route53internalzoneOutput: |
| | | Description: The ID of the internal route 53 zone |
| | | Value: |
| | | Ref: zoneinternalidns |
| | | S3User: |
| | | Value: |
| | | Ref: S3User |
| | | Description: IAM User for RegistryS3 |
| | | S3UserAccessKey: |
| | | Value: |
| | | Ref: S3UserAccessKey |
| | | Description: IAM User for RegistryS3 |
| | | S3UserSecretAccessKey: |
| | | Value: |
| | | Fn::GetAtt: |
| | | - S3UserAccessKey |
| | | - SecretAccessKey |
| | | Description: IAM User for RegistryS3 |
| | | Route53User: |
| | | Value: |
| | | Ref: Route53User |
| | | Description: IAM User for Route53 (Let's Encrypt) |
| | | Route53UserAccessKey: |
| | | Value: |
| | | Ref: Route53UserAccessKey |
| | | Description: IAM User for Route53 (Let's Encrypt) |
| | | Route53UserSecretAccessKey: |
| | | Value: |
| | | Fn::GetAtt: |
| | | - Route53UserAccessKey |
| | | - SecretAccessKey |
| | | Description: IAM User for Route53 (Let's Encrypt) |
New file |
| | |
| | | { |
| | | "Comment": "Create internal dns zone entries", |
| | | "Changes": [ |
| | | {% for host in groups['nodes'] %} |
| | | |
| | | { |
| | | "Action": "{{DNS_action}}", |
| | | "ResourceRecordSet": { |
| | | "Name": "node{{loop.index}}.{{zone_internal_dns}}", |
| | | "Type": "A", |
| | | "TTL": 20, |
| | | "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ] |
| | | } |
| | | }, |
| | | {% endfor %} |
| | | {% for host in groups['infranodes'] %} |
| | | { |
| | | "Action": "{{DNS_action}}", |
| | | "ResourceRecordSet": { |
| | | "Name": "infranode{{loop.index}}.{{zone_internal_dns}}", |
| | | "Type": "A", |
| | | "TTL": 20, |
| | | "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ] |
| | | } |
| | | }, |
| | | {% endfor %} |
| | | {% for host in groups['masters'] %} |
| | | |
| | | { |
| | | "Action": "{{DNS_action}}", |
| | | "ResourceRecordSet": { |
| | | "Name": "master{{loop.index}}.{{zone_internal_dns}}", |
| | | "Type": "A", |
| | | "TTL": 20, |
| | | "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ] |
| | | } |
| | | }, |
| | | {% endfor %} |
| | | {% for host in groups[('tag_' + env_type + '-' + guid + '_nfs') | replace('-', '_') ] %} |
| | | { |
| | | "Action": "{{DNS_action}}", |
| | | "ResourceRecordSet": { |
| | | "Name": "nfs{{loop.index}}.{{zone_internal_dns}}", |
| | | "Type": "A", |
| | | "TTL": 20, |
| | | "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ] |
| | | } |
| | | }, |
| | | {% endfor %} |
| | | {% for host in groups[('tag_' + env_type + '-' + guid + '_bastion') | replace('-', '_') ] %} |
| | | { |
| | | "Action": "{{DNS_action}}", |
| | | "ResourceRecordSet": { |
| | | "Name": "bastion{{loop.index}}.{{zone_internal_dns}}", |
| | | "Type": "A", |
| | | "TTL": 20, |
| | | "ResourceRecords": [ { "Value": "{{hostvars[host]['ec2_private_ip_address']}}" } ] |
| | | } |
| | | } |
| | | {% endfor %} |
| | | |
| | | ] |
| | | } |
New file |
| | |
| | | { |
| | | "AWSTemplateFormatVersion": "2010-09-09", |
| | | "Parameters": { }, |
| | | "Mappings": { |
| | | "RegionMapping": { |
| | | "us-east-1": { |
| | | "AMI": "ami-2051294a" |
| | | }, |
| | | "us-east-2": { |
| | | "AMI": "Not Available" |
| | | }, |
| | | "us-west-1": { |
| | | "AMI": "ami-d1315fb1" |
| | | }, |
| | | "us-west-2": { |
| | | "AMI": "ami-775e4f16" |
| | | }, |
| | | "eu-west-1": { |
| | | "AMI": "ami-8b8c57f8" |
| | | }, |
| | | "eu-central-1": { |
| | | "AMI": "ami-875042eb" |
| | | }, |
| | | "ap-northeast-1": { |
| | | "AMI": "ami-0dd8f963" |
| | | }, |
| | | "ap-northeast-2": { |
| | | "AMI": "ami-44db152a" |
| | | }, |
| | | "ap-southeast-1": { |
| | | "AMI": "ami-3f03c55c" |
| | | }, |
| | | "ap-southeast-2": { |
| | | "AMI": "ami-e0c19f83" |
| | | }, |
| | | "sa-east-1": { |
| | | "AMI": "ami-27b3094b" |
| | | } |
| | | }, |
| | | "DNSMapping": { |
| | | "us-east-1": { |
| | | "domain": "us-east-1.compute.internal" |
| | | }, |
| | | "us-west-1": { |
| | | "domain": "us-west-1.compute.internal" |
| | | }, |
| | | "us-west-2": { |
| | | "domain": "us-west-2.compute.internal" |
| | | }, |
| | | "eu-west-1": { |
| | | "domain": "eu-west-1.compute.internal" |
| | | }, |
| | | "eu-central-1": { |
| | | "domain": "eu-central-1.compute.internal" |
| | | }, |
| | | "ap-northeast-1": { |
| | | "domain": "ap-northeast-1.compute.internal" |
| | | }, |
| | | "ap-northeast-2": { |
| | | "domain": "ap-northeast-2.compute.internal" |
| | | }, |
| | | "ap-southeast-1": { |
| | | "domain": "ap-southeast-1.compute.internal" |
| | | }, |
| | | "ap-southeast-2": { |
| | | "domain": "ap-southeast-2.compute.internal" |
| | | }, |
| | | "sa-east-1": { |
| | | "domain": "sa-east-1.compute.internal" |
| | | } |
| | | } |
| | | }, |
| | | "Resources": { |
| | | "Vpc": { |
| | | "Type": "AWS::EC2::VPC", |
| | | "Properties": { |
| | | "CidrBlock": "192.199.0.0/16", |
| | | "EnableDnsSupport": "true", |
| | | "EnableDnsHostnames": "true", |
| | | "Tags": [ |
| | | { |
| | | "Key": "Name", |
| | | "Value": "VPCID_NAME_TAG" |
| | | }, |
| | | { |
| | | "Key": "Application", |
| | | "Value": { |
| | | "Ref": "AWS::StackId" |
| | | } |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | "VpcInternetGateway": { |
| | | "Type": "AWS::EC2::InternetGateway", |
| | | "Properties": {} |
| | | }, |
| | | "VpcGA": { |
| | | "Type": "AWS::EC2::VPCGatewayAttachment", |
| | | "Properties": { |
| | | "InternetGatewayId": { |
| | | "Ref": "VpcInternetGateway" |
| | | }, |
| | | "VpcId": { |
| | | "Ref": "Vpc" |
| | | } |
| | | } |
| | | }, |
| | | "VpcRouteTable": { |
| | | "Type": "AWS::EC2::RouteTable", |
| | | "Properties": { |
| | | "VpcId": { |
| | | "Ref": "Vpc" |
| | | } |
| | | } |
| | | }, |
| | | "VPCRouteInternetGateway": { |
| | | "DependsOn" : "VpcGA", |
| | | "Type": "AWS::EC2::Route", |
| | | "Properties": { |
| | | "GatewayId": { |
| | | "Ref": "VpcInternetGateway" |
| | | }, |
| | | "DestinationCidrBlock": "0.0.0.0/0", |
| | | "RouteTableId": { |
| | | "Ref": "VpcRouteTable" |
| | | } |
| | | } |
| | | }, |
| | | "PublicSubnet": { |
| | | "Type": "AWS::EC2::Subnet", |
| | | "DependsOn": [ |
| | | "Vpc" |
| | | ], |
| | | "Properties": { |
| | | "CidrBlock": "192.199.0.0/24", |
| | | "Tags": [ |
| | | { |
| | | "Key": "Name", |
| | | "Value": "{{project_tag}}" |
| | | }, |
| | | { |
| | | "Key": "Application", |
| | | "Value": { |
| | | "Ref": "AWS::StackId" |
| | | } |
| | | } |
| | | ], |
| | | "MapPublicIpOnLaunch": "true", |
| | | "VpcId": { |
| | | "Ref": "Vpc" |
| | | } |
| | | } |
| | | }, |
| | | "PublicSubnetRTA": { |
| | | "Type": "AWS::EC2::SubnetRouteTableAssociation", |
| | | "Properties": { |
| | | "RouteTableId": { |
| | | "Ref": "VpcRouteTable" |
| | | }, |
| | | "SubnetId": { |
| | | "Ref": "PublicSubnet" |
| | | } |
| | | } |
| | | }, |
| | | "NodeSG": { |
| | | "Type": "AWS::EC2::SecurityGroup", |
| | | "Properties": { |
| | | "GroupDescription": "Node", |
| | | "VpcId": { |
| | | "Ref": "Vpc" |
| | | }, |
| | | "Tags": [ |
| | | { |
| | | "Key": "Name", |
| | | "Value": "ose_node_sg" |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | "ZabbixSG": { |
| | | "Type": "AWS::EC2::SecurityGroup", |
| | | "Properties": { |
| | | "GroupDescription": "Node", |
| | | "VpcId": { |
| | | "Ref": "Vpc" |
| | | }, |
| | | "Tags": [ |
| | | { |
| | | "Key": "Name", |
| | | "Value": "zabbix_sg" |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | "MasterSG": { |
| | | "Type": "AWS::EC2::SecurityGroup", |
| | | "Properties": { |
| | | "GroupDescription": "Master", |
| | | "VpcId": { |
| | | "Ref": "Vpc" |
| | | }, |
| | | "Tags": [ |
| | | { |
| | | "Key": "Name", |
| | | "Value": "ose_master_sg" |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | "MasterTCPPorts": { |
| | | "Type": "AWS::EC2::SecurityGroupIngress", |
| | | "Properties": { |
| | | "GroupId": { |
| | | "Fn::GetAtt": [ |
| | | "MasterSG", |
| | | "GroupId" |
| | | ] |
| | | }, |
| | | "IpProtocol": "tcp", |
| | | "FromPort": "0", |
| | | "ToPort": "65535", |
| | | "CidrIp": "0.0.0.0/0" |
| | | } |
| | | }, |
| | | "MasterUDPPorts": { |
| | | "Type": "AWS::EC2::SecurityGroupIngress", |
| | | "Properties": { |
| | | "GroupId": { |
| | | "Fn::GetAtt": [ |
| | | "MasterSG", |
| | | "GroupId" |
| | | ] |
| | | }, |
| | | "IpProtocol": "udp", |
| | | "FromPort": "0", |
| | | "ToPort": "65535", |
| | | "CidrIp": "0.0.0.0/0" |
| | | } |
| | | }, |
| | | "NodeUDPPorts": { |
| | | "Type": "AWS::EC2::SecurityGroupIngress", |
| | | "Properties": { |
| | | "GroupId": { |
| | | "Fn::GetAtt": [ |
| | | "NodeSG", |
| | | "GroupId" |
| | | ] |
| | | }, |
| | | "IpProtocol": "udp", |
| | | "FromPort": "0", |
| | | "ToPort": "65535", |
| | | "CidrIp": "0.0.0.0/0" |
| | | } |
| | | }, |
| | | "NodeTCPPorts": { |
| | | "Type": "AWS::EC2::SecurityGroupIngress", |
| | | "Properties": { |
| | | "GroupId": { |
| | | "Fn::GetAtt": [ |
| | | "NodeSG", |
| | | "GroupId" |
| | | ] |
| | | }, |
| | | "IpProtocol": "tcp", |
| | | "FromPort": "0", |
| | | "ToPort": "65535", |
| | | "CidrIp": "0.0.0.0/0" |
| | | } |
| | | }, |
| | | "Master": { |
| | | "Type": "AWS::EC2::Instance", |
| | | "Properties": { |
| | | "ImageId": { |
| | | "Fn::FindInMap": [ |
| | | "RegionMapping", |
| | | { |
| | | "Ref": "AWS::Region" |
| | | }, |
| | | "AMI" |
| | | ] |
| | | }, |
| | | "InstanceType": "{{master_instance_type}}", |
| | | "KeyName": "{{key_name}}", |
| | | "SecurityGroupIds": [ |
| | | { |
| | | "Fn::GetAtt": [ |
| | | "MasterSG", |
| | | "GroupId" |
| | | ] |
| | | } |
| | | ], |
| | | "SubnetId": { |
| | | "Ref": "PublicSubnet" |
| | | }, |
| | | "Tags": [ |
| | | { |
| | | "Key": "Name", |
| | | "Value": "master" |
| | | }, |
| | | { |
| | | "Key": "AnsibleGroup", |
| | | "Value": "masters" |
| | | }, |
| | | { |
| | | "Key": "Project", |
| | | "Value": "{{project_tag}}" |
| | | } |
| | | ], |
| | | "BlockDeviceMappings": [ |
| | | { |
| | | "DeviceName": "/dev/xvdb", |
| | | "Ebs": { |
| | | "VolumeSize": 20 |
| | | } |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | "zoneinternalidns": { |
| | | "Type": "AWS::Route53::HostedZone", |
| | | "Properties": { |
| | | "Name": "{{ zone_internal_dns }}", |
| | | "VPCs" : [{ |
| | | "VPCId": { "Ref" : "Vpc" }, |
| | | "VPCRegion": { "Ref": "AWS::Region" } } ], |
| | | "HostedZoneConfig": { |
| | | "Comment": "Created By Cloudformation" |
| | | } |
| | | } |
| | | }, |
| | | "MasterDNS": { |
| | | "Type": "AWS::Route53::RecordSetGroup", |
| | | "DependsOn": "Master", |
| | | "Properties": { |
| | | "HostedZoneId": "{{HostedZoneId}}", |
| | | "RecordSets": [ |
| | | { |
| | | "Name": "{{master_public_dns}}", |
| | | "Type": "A", |
| | | "TTL": "10", |
| | | "ResourceRecords": [ |
| | | { |
| | | "Fn::GetAtt": [ |
| | | "Master", |
| | | "PublicIp" |
| | | ] |
| | | } |
| | | ] |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | "BastionDNS": { |
| | | "Type": "AWS::Route53::RecordSetGroup", |
| | | "DependsOn": "Master", |
| | | "Properties": { |
| | | "HostedZoneId": "{{HostedZoneId}}", |
| | | "RecordSets": [ |
| | | { |
| | | "Name": "{{bastion_public_dns}}", |
| | | "Type": "A", |
| | | "TTL": "10", |
| | | "ResourceRecords": [ |
| | | { |
| | | "Fn::GetAtt": [ |
| | | "Bastion", |
| | | "PublicIp" |
| | | ] |
| | | } |
| | | ] |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | "InfraNode": { |
| | | "Type": "AWS::EC2::Instance", |
| | | "Properties": { |
| | | "ImageId": { |
| | | "Fn::FindInMap": [ |
| | | "RegionMapping", |
| | | { |
| | | "Ref": "AWS::Region" |
| | | }, |
| | | "AMI" |
| | | ] |
| | | }, |
| | | "InstanceType": "{{infranode_instance_type}}", |
| | | "KeyName": "{{key_name}}", |
| | | "SecurityGroupIds": [ |
| | | { |
| | | "Fn::GetAtt": [ |
| | | "NodeSG", |
| | | "GroupId" |
| | | ] |
| | | } |
| | | ], |
| | | "SubnetId": { |
| | | "Ref": "PublicSubnet" |
| | | }, |
| | | "Tags": [ |
| | | { |
| | | "Key": "Name", |
| | | "Value": "infranode" |
| | | }, |
| | | { |
| | | "Key": "AnsibleGroup", |
| | | "Value": "infranodes" |
| | | }, |
| | | { |
| | | "Key": "Project", |
| | | "Value": "{{project_tag}}" |
| | | } |
| | | ], |
| | | "BlockDeviceMappings": [ |
| | | { |
| | | "DeviceName": "/dev/xvdb", |
| | | "Ebs": { |
| | | "VolumeSize": 60 |
| | | } |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | "CloudDNS": { |
| | | "Type": "AWS::Route53::RecordSetGroup", |
| | | "DependsOn": "InfraNode", |
| | | "Properties": { |
| | | "HostedZoneId": "{{HostedZoneId}}", |
| | | "RecordSets": [ |
| | | { |
| | | "Name": "{{cloudapps_dns}}", |
| | | "Type": "A", |
| | | "TTL": "10", |
| | | "ResourceRecords": [ |
| | | { |
| | | "Fn::GetAtt": [ |
| | | "InfraNode", |
| | | "PublicIp" |
| | | ] |
| | | } |
| | | ] |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | "AppLC": { |
| | | "Type": "AWS::AutoScaling::LaunchConfiguration", |
| | | "Properties": { |
| | | "AssociatePublicIpAddress": true, |
| | | "ImageId": { |
| | | "Fn::FindInMap": [ |
| | | "RegionMapping", |
| | | { |
| | | "Ref": "AWS::Region" |
| | | }, |
| | | "AMI" |
| | | ] |
| | | }, |
| | | "InstanceType": "{{node_instance_type}}", |
| | | "KeyName": "{{key_name}}", |
| | | "SecurityGroups": [ |
| | | { |
| | | "Ref": "NodeSG" |
| | | } |
| | | ], |
| | | "BlockDeviceMappings": [ |
| | | { |
| | | "DeviceName": "/dev/xvdb", |
| | | "Ebs": { |
| | | "VolumeSize": 60 |
| | | } |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | "AppAsg": { |
| | | "Type": "AWS::AutoScaling::AutoScalingGroup", |
| | | "Properties": { |
| | | "DesiredCapacity": {{node_instance_count}}, |
| | | "LaunchConfigurationName": { |
| | | "Ref": "AppLC" |
| | | }, |
| | | "MaxSize": 10, |
| | | "MinSize": 1, |
| | | "Tags": [ |
| | | { |
| | | "Key": "Name", |
| | | "Value": "node", |
| | | "PropagateAtLaunch": true |
| | | }, |
| | | { |
| | | "Key": "AnsibleGroup", |
| | | "Value": "nodes", |
| | | "PropagateAtLaunch": true |
| | | }, |
| | | { |
| | | "Key": "Project", |
| | | "Value": "{{project_tag}}", |
| | | "PropagateAtLaunch": true |
| | | } |
| | | ], |
| | | "VPCZoneIdentifier": [ |
| | | { |
| | | "Ref": "PublicSubnet" |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | "NFSNode": { |
| | | "Type": "AWS::EC2::Instance", |
| | | "Properties": { |
| | | "ImageId": { |
| | | "Fn::FindInMap": [ |
| | | "RegionMapping", |
| | | { |
| | | "Ref": "AWS::Region" |
| | | }, |
| | | "AMI" |
| | | ] |
| | | }, |
| | | "InstanceType": "{{nfs_instance_type}}", |
| | | "KeyName": "{{key_name}}", |
| | | "SecurityGroupIds": [ |
| | | { |
| | | "Fn::GetAtt": [ |
| | | "NodeSG", |
| | | "GroupId" |
| | | ] |
| | | } |
| | | ], |
| | | "SubnetId": { |
| | | "Ref": "PublicSubnet" |
| | | }, |
| | | "Tags": [ |
| | | { |
| | | "Key": "Name", |
| | | "Value": "nfs" |
| | | }, |
| | | { |
| | | "Key": "AnsibleGroup", |
| | | "Value": "nfs" |
| | | }, |
| | | { |
| | | "Key": "Project", |
| | | "Value": "{{project_tag}}" |
| | | } |
| | | ], |
| | | "BlockDeviceMappings": [ |
| | | { |
| | | "DeviceName": "/dev/xvdb", |
| | | "Ebs": { |
| | | "VolumeSize": 200, |
| | | "VolumeType": "gp2" |
| | | } |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | "Bastion": { |
| | | "Type": "AWS::EC2::Instance", |
| | | "Properties": { |
| | | "ImageId": { |
| | | "Fn::FindInMap": [ |
| | | "RegionMapping", |
| | | { |
| | | "Ref": "AWS::Region" |
| | | }, |
| | | "AMI" |
| | | ] |
| | | }, |
| | | "InstanceType": "{{bastion_instance_type}}", |
| | | "KeyName": "{{key_name}}", |
| | | "SecurityGroupIds": [ |
| | | { |
| | | "Fn::GetAtt": [ |
| | | "NodeSG", |
| | | "GroupId" |
| | | ] |
| | | } |
| | | ], |
| | | "SubnetId": { |
| | | "Ref": "PublicSubnet" |
| | | }, |
| | | "Tags": [ |
| | | { |
| | | "Key": "Name", |
| | | "Value": "bastion" |
| | | }, |
| | | { |
| | | "Key": "AnsibleGroup", |
| | | "Value": "bastions" |
| | | }, |
| | | { |
| | | "Key": "Project", |
| | | "Value": "{{project_tag}}" |
| | | } |
| | | ] |
| | | } |
| | | }, |
| | | "RegistryS3": { |
| | | "Type": "AWS::S3::Bucket", |
| | | "Properties": { |
| | | "BucketName": "{{ env_type }}-{{ guid }}", |
| | | "Tags": [ |
| | | { |
| | | "Key": "Name", |
| | | "Value": "s3-{{ env_type }}-{{ guid }}" |
| | | }, |
| | | { |
| | | "Key": "Project", |
| | | "Value": "{{project_tag}}" |
| | | } |
| | | ] |
| | | } |
| | | } |
| | | }, |
| | | "Outputs": { |
| | | "RegistryS3Output": { |
| | | "Description": "The ID of the S3 Bucket", |
| | | "Value": { |
| | | "Ref": "RegistryS3" |
| | | }}, |
| | | "Route53internalzoneOutput": { |
| | | "Description": "The ID of the internal route 53 zone", |
| | | "Value": { |
| | | "Ref": "zoneinternalidns" |
| | | } |
| | | } |
| | | } |
| | | } |
New file |
| | |
| | | [OSEv3:vars] |
| | | |
| | | # |
| | | # /etc/ansible/hosts file for OpenShift Container Platform 3.9.14 |
| | | # |
| | | ########################################################################### |
| | | ### Ansible Vars |
| | | ########################################################################### |
| | | timeout=60 |
| | | ansible_become=yes |
| | | ansible_ssh_user={{ansible_ssh_user}} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Basic Vars |
| | | ########################################################################### |
| | | deployment_type=openshift-enterprise |
| | | containerized=false |
| | | openshift_disable_check="disk_availability,memory_availability,docker_image_availability" |
| | | |
| | | {% if container_runtime == "cri-o" %} |
| | | openshift_use_crio=True |
| | | openshift_crio_enable_docker_gc=True |
| | | {% endif %} |
| | | |
| | | # default project node selector |
| | | osm_default_node_selector='env=users' |
| | | openshift_hosted_infra_selector="env=infra" |
| | | |
| | | # Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. |
| | | openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']} |
| | | |
| | | # Configure logrotate scripts |
| | | # See: https://github.com/nickhammond/ansible-logrotate |
| | | logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | ########################################################################### |
| | | ### Glusterfs |
| | | ########################################################################### |
| | | openshift_storage_glusterfs_namespace=glusterfs |
| | | openshift_storage_glusterfs_name=storage |
| | | openshift_storage_glusterfs_storageclass_default=true |
| | | #openshift_storage_glusterfs_wipe=True |
| | | openshift_master_dynamic_provisioning_enabled=True |
| | | #dynamic_volumes_check=False |
| | | {% endif %} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Cockpit Vars |
| | | ########################################################################### |
| | | |
| | | # Enable cockpit |
| | | osm_use_cockpit=true |
| | | osm_cockpit_plugins=['cockpit-kubernetes'] |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Master Vars |
| | | ########################################################################### |
| | | |
| | | openshift_master_api_port={{master_api_port}} |
| | | openshift_master_console_port={{master_api_port}} |
| | | |
| | | openshift_master_cluster_method=native |
| | | openshift_master_cluster_hostname={{master_lb_dns}} |
| | | openshift_master_cluster_public_hostname={{master_lb_dns}} |
| | | openshift_master_default_subdomain={{cloudapps_suffix}} |
| | | openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}} |
| | | |
| | | {% if install_lets_encrypt_certificates|bool %} |
| | | openshift_master_named_certificates={{lets_encrypt_openshift_master_named_certificates|to_json}} |
| | | {% endif %} |
| | | |
| | | openshift_set_hostname=True |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Network Vars |
| | | ########################################################################### |
| | | |
| | | osm_cluster_network_cidr=10.1.0.0/16 |
| | | openshift_portal_net=172.30.0.0/16 |
| | | |
| | | #os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy' |
| | | {{multi_tenant_setting}} |
| | | |
| | | # This should be turned on once all dependent scripts use firewalld rather than iptables |
| | | # os_firewall_use_firewalld=True |
| | | |
| | | ########################################################################### |
| | | ### OpenShift admission plugin config |
| | | ########################################################################### |
| | | |
| | | {% if install_openwhisk|bool %} |
| | | # TODO: add imagePolicy here, it's enabled by default |
| | | openshift_master_admission_plugin_config={"openshift.io/ImagePolicy":{"configuration":{"apiVersion":"v1","kind":"ImagePolicyConfig","resolveImages": "AttemptRewrite"}}} |
| | | {% endif %} |
| | | |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Authentication Vars |
| | | ########################################################################### |
| | | |
| | | {% if install_idm == "ldap" or 'ldap' in install_idms|d([]) %} |
| | | {{openshift_master_ldap_ca_file}} |
| | | {% endif %} |
| | | |
| | | {% if install_idm == "htpasswd" or 'htpasswd' in install_idms|d([]) %} |
| | | openshift_master_htpasswd_file=/root/htpasswd.openshift |
| | | {% endif %} |
| | | |
| | | openshift_master_identity_providers='{{identity_providers|to_json}}' |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Metrics and Logging Vars |
| | | ########################################################################### |
| | | |
| | | # Enable cluster metrics |
| | | ######################## |
| | | openshift_metrics_install_metrics={{install_metrics}} |
| | | |
| | | {% if install_nfs|bool and not install_glusterfs|bool %} |
| | | openshift_metrics_storage_kind=nfs |
| | | openshift_metrics_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_metrics_storage_nfs_directory=/srv/nfs |
| | | openshift_metrics_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_metrics_storage_volume_name=metrics |
| | | openshift_metrics_storage_volume_size=10Gi |
| | | openshift_metrics_storage_labels={'storage': 'metrics'} |
| | | openshift_metrics_cassanda_pvc_storage_class_name='' |
| | | {% endif %} |
| | | |
| | | openshift_metrics_cassandra_nodeselector={"env":"infra"} |
| | | openshift_metrics_hawkular_nodeselector={"env":"infra"} |
| | | openshift_metrics_heapster_nodeselector={"env":"infra"} |
| | | |
| | | # Add Prometheus Metrics: |
| | | ######################### |
| | | openshift_hosted_prometheus_deploy=true |
| | | openshift_prometheus_node_selector={"env":"infra"} |
| | | openshift_prometheus_namespace=openshift-metrics |
| | | |
| | | # Prometheus |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_storage_kind=dynamic |
| | | openshift_prometheus_storage_volume_size=20Gi |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_storage_kind=nfs |
| | | openshift_prometheus_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_storage_volume_size=10Gi |
| | | openshift_prometheus_storage_labels={'storage': 'prometheus'} |
| | | {% endif %} |
| | | openshift_prometheus_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_storage_volume_name=prometheus |
| | | openshift_prometheus_storage_type='pvc' |
| | | |
| | | # For prometheus-alertmanager |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_alertmanager_storage_kind=dynamic |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_alertmanager_storage_kind=nfs |
| | | openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} |
| | | {% endif %} |
| | | openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_alertmanager_storage_volume_size=10Gi |
| | | openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager |
| | | openshift_prometheus_alertmanager_storage_type='pvc' |
| | | |
| | | # For prometheus-alertbuffer |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_alertbuffer_storage_kind=dynamic |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_alertbuffer_storage_kind=nfs |
| | | openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} |
| | | {% endif %} |
| | | openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer |
| | | openshift_prometheus_alertbuffer_storage_volume_size=10Gi |
| | | openshift_prometheus_alertbuffer_storage_type='pvc' |
| | | |
| | | # Necessary for 3.9.14 |
| | | openshift_prometheus_node_exporter_image_version=v3.9 |
| | | |
| | | |
| | | # Enable cluster logging |
| | | ######################## |
| | | openshift_logging_install_logging={{install_logging}} |
| | | |
| | | {% if install_nfs|bool and not install_glusterfs|bool %} |
| | | openshift_logging_storage_kind=nfs |
| | | openshift_logging_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_logging_storage_nfs_directory=/srv/nfs |
| | | openshift_logging_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_logging_storage_volume_name=logging |
| | | openshift_logging_storage_volume_size=10Gi |
| | | openshift_logging_storage_labels={'storage': 'logging'} |
| | | openshift_logging_es_pvc_storage_class_name='' |
| | | {% endif %} |
| | | |
| | | openshift_logging_es_cluster_size=1 |
| | | openshift_logging_es_nodeselector={"env":"infra"} |
| | | openshift_logging_kibana_nodeselector={"env":"infra"} |
| | | openshift_logging_curator_nodeselector={"env":"infra"} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Project Management Vars |
| | | ########################################################################### |
| | | |
| | | # Configure additional projects |
| | | # openshift_additional_projects={'my-infra-project-test': {'default_node_selector': 'env=infra'}} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Router and Registry Vars |
| | | ########################################################################### |
| | | |
| | | openshift_hosted_router_replicas={{infranode_instance_count}} |
| | | |
| | | {% if install_lets_encrypt_certificates|bool %} |
| | | openshift_hosted_router_certificate={{lets_encrypt_openshift_hosted_router_certificate|to_json}} |
| | | {% endif %} |
| | | |
| | | openshift_hosted_registry_replicas=1 |
| | | openshift_hosted_registry_pullthrough=true |
| | | openshift_hosted_registry_acceptschema2=true |
| | | openshift_hosted_registry_enforcequota=true |
| | | |
| | | {% if s3user_access_key is defined %} |
| | | # Registry AWS S3 |
| | | # S3 bucket must already exist. |
| | | openshift_hosted_registry_storage_kind=object |
| | | openshift_hosted_registry_storage_provider=s3 |
| | | openshift_hosted_registry_storage_s3_accesskey={{ s3user_access_key }} |
| | | openshift_hosted_registry_storage_s3_secretkey={{ s3user_secret_access_key }} |
| | | openshift_hosted_registry_storage_s3_bucket={{ project_tag }} |
| | | openshift_hosted_registry_storage_s3_region={{ aws_region }} |
| | | openshift_hosted_registry_storage_s3_chunksize=26214400 |
| | | openshift_hosted_registry_storage_s3_rootdirectory=/registry |
| | | {% endif %} |
| | | |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Service Catalog Vars |
| | | ########################################################################### |
| | | |
| | | openshift_enable_service_catalog=true |
| | | |
| | | template_service_broker_install=true |
| | | openshift_template_service_broker_namespaces=['openshift'] |
| | | |
| | | ansible_service_broker_install=true |
| | | ansible_service_broker_local_registry_whitelist=['.*-apb$'] |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | openshift_hosted_etcd_storage_kind=dynamic |
| | | {% elif install_nfs|bool %} |
| | | openshift_hosted_etcd_storage_kind=nfs |
| | | openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)" |
| | | openshift_hosted_etcd_storage_nfs_directory=/srv/nfs |
| | | openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'} |
| | | {% endif %} |
| | | openshift_hosted_etcd_storage_volume_name=etcd-asb |
| | | openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_hosted_etcd_storage_volume_size=10G |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Hosts |
| | | ########################################################################### |
| | | [OSEv3:children] |
| | | masters |
| | | etcd |
| | | nodes |
| | | {% if install_nfs|bool %} |
| | | nfs |
| | | {% endif %} |
| | | {% if install_glusterfs|bool %} |
| | | glusterfs |
| | | {% endif %} |
| | | {% if groups['newnodes']|d([])|length > 0 %} |
| | | new_nodes |
| | | {% endif %} |
| | | |
| | | [masters] |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}} |
| | | {% endfor %} |
| | | |
| | | [etcd] |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}} |
| | | {% endfor %} |
| | | |
| | | [nodes] |
| | | ## These are the masters |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | |
| | | ## These are infranodes |
| | | {% for host in groups['infranodes'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra', 'zone': '{{hostvars[host]['placement']}}', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | |
| | | ## These are regular nodes |
| | | {% for host in groups['nodes'] |
| | | if host not in groups['newnodes']|d([]) |
| | | and host not in groups['glusterfs']|d([]) |
| | | %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users', 'zone': '{{hostvars[host]['placement']}}', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | |
| | | {% if groups['glusterfs']|d([])|length > 0 %} |
| | | ## These are glusterfs nodes |
| | | {% for host in groups['glusterfs'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'glusterfs', 'zone': '{{hostvars[host]['placement']}}', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if groups['newnodes']|d([])|length > 0 %} |
| | | # scaleup performed, leave an empty group, see: |
| | | # https://docs.openshift.com/container-platform/3.7/install_config/adding_hosts_to_existing_cluster.html |
| | | [new_nodes] |
| | | {% for host in groups['newnodes'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users', 'zone': '{{hostvars[host]['placement']}}', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if install_nfs|bool %} |
| | | [nfs] |
| | | {% for host in [groups['support']|sort|first] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | {% set query = "[?name=='support']|[0].volumes[?purpose=='glusterfs'].device_name" %} |
| | | [glusterfs] |
| | | {% for host in groups['glusterfs'] %} |
| | | {% if cloud_provider == 'ec2' %} |
| | | {{ hostvars[host].internaldns }} glusterfs_ip={{hostvars[host].private_ip_address}} glusterfs_devices='{{instances|json_query(query)|to_json}}' |
| | | {% elif cloud_provider == 'azure' %} |
| | | {{ hostvars[host].internaldns }} glusterfs_ip={{hostvars[host].private_ip_address}} glusterfs_devices='{{ [ hostvars[host].glusterfs_device_name ] |to_json}}' |
| | | {% endif %} |
| | | {% endfor %} |
| | | {% endif %} |
New file |
| | |
| | | [OSEv3:vars] |
| | | |
| | | # |
| | | # /etc/ansible/hosts file for OpenShift Container Platform 3.9.14 |
| | | # |
| | | ########################################################################### |
| | | ### Ansible Vars |
| | | ########################################################################### |
| | | timeout=60 |
| | | ansible_become=yes |
| | | ansible_ssh_user={{ansible_ssh_user}} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Basic Vars |
| | | ########################################################################### |
| | | deployment_type=openshift-enterprise |
| | | containerized=false |
| | | openshift_disable_check="disk_availability,memory_availability,docker_image_availability" |
| | | |
| | | {% if container_runtime == "cri-o" %} |
| | | openshift_use_crio=True |
| | | openshift_crio_enable_docker_gc=True |
| | | {% endif %} |
| | | |
| | | # default project node selector |
| | | osm_default_node_selector='env=users' |
| | | openshift_hosted_infra_selector="env=infra" |
| | | |
| | | # Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. |
| | | openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']} |
| | | |
| | | # Configure logrotate scripts |
| | | # See: https://github.com/nickhammond/ansible-logrotate |
| | | logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | ########################################################################### |
| | | ### Glusterfs |
| | | ########################################################################### |
| | | openshift_storage_glusterfs_namespace=glusterfs |
| | | openshift_storage_glusterfs_name=storage |
| | | openshift_storage_glusterfs_storageclass_default=true |
| | | #openshift_storage_glusterfs_wipe=True |
| | | openshift_master_dynamic_provisioning_enabled=True |
| | | #dynamic_volumes_check=False |
| | | {% endif %} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Cockpit Vars |
| | | ########################################################################### |
| | | |
| | | # Enable cockpit |
| | | osm_use_cockpit=true |
| | | osm_cockpit_plugins=['cockpit-kubernetes'] |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Master Vars |
| | | ########################################################################### |
| | | |
| | | openshift_master_api_port={{master_api_port}} |
| | | openshift_master_console_port={{master_api_port}} |
| | | |
| | | openshift_master_cluster_method=native |
| | | openshift_master_cluster_hostname={{master_lb_dns}} |
| | | openshift_master_cluster_public_hostname={{master_lb_dns}} |
| | | openshift_master_default_subdomain={{cloudapps_suffix}} |
| | | openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}} |
| | | |
| | | {% if install_lets_encrypt_certificates|bool %} |
| | | openshift_master_named_certificates={{lets_encrypt_openshift_master_named_certificates|to_json}} |
| | | {% endif %} |
| | | |
| | | openshift_set_hostname=True |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Network Vars |
| | | ########################################################################### |
| | | |
| | | osm_cluster_network_cidr=10.1.0.0/16 |
| | | openshift_portal_net=172.30.0.0/16 |
| | | |
| | | #os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy' |
| | | {{multi_tenant_setting}} |
| | | |
| | | # This should be turned on once all dependent scripts use firewalld rather than iptables |
| | | # os_firewall_use_firewalld=True |
| | | |
| | | ########################################################################### |
| | | ### OpenShift admission plugin config |
| | | ########################################################################### |
| | | |
| | | {% if install_openwhisk|bool %} |
| | | #TODO: add imagePolicy as it is in default |
| | | openshift_master_admission_plugin_config={"openshift.io/ImagePolicy":{"configuration":{"apiVersion":"v1","kind":"ImagePolicyConfig","resolveImages": "AttemptRewrite"}}} |
| | | {% endif %} |
| | | |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Authentication Vars |
| | | ########################################################################### |
| | | |
| | | {% if install_idm == "ldap" or 'ldap' in install_idms|d([]) %} |
| | | {{openshift_master_ldap_ca_file}} |
| | | {% endif %} |
| | | |
| | | {% if install_idm == "htpasswd" or 'htpasswd' in install_idms|d([]) %} |
| | | openshift_master_htpasswd_file=/root/htpasswd.openshift |
| | | {% endif %} |
| | | |
| | | openshift_master_identity_providers='{{identity_providers|to_json}}' |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Metrics and Logging Vars |
| | | ########################################################################### |
| | | |
| | | # Enable cluster metrics |
| | | ######################## |
| | | openshift_metrics_install_metrics={{install_metrics}} |
| | | |
| | | {% if install_nfs|bool and not install_glusterfs|bool %} |
| | | openshift_metrics_storage_kind=nfs |
| | | openshift_metrics_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_metrics_storage_nfs_directory=/srv/nfs |
| | | openshift_metrics_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_metrics_storage_volume_name=metrics |
| | | openshift_metrics_storage_volume_size=10Gi |
| | | openshift_metrics_storage_labels={'storage': 'metrics'} |
| | | openshift_metrics_cassanda_pvc_storage_class_name='' |
| | | {% endif %} |
| | | |
| | | openshift_metrics_cassandra_nodeselector={"env":"infra"} |
| | | openshift_metrics_hawkular_nodeselector={"env":"infra"} |
| | | openshift_metrics_heapster_nodeselector={"env":"infra"} |
| | | |
| | | # Add Prometheus Metrics: |
| | | ######################### |
| | | openshift_hosted_prometheus_deploy=true |
| | | openshift_prometheus_node_selector={"env":"infra"} |
| | | openshift_prometheus_namespace=openshift-metrics |
| | | |
| | | # Prometheus |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_storage_kind=dynamic |
| | | openshift_prometheus_storage_volume_size=20Gi |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_storage_kind=nfs |
| | | openshift_prometheus_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_storage_volume_size=10Gi |
| | | openshift_prometheus_storage_labels={'storage': 'prometheus'} |
| | | {% endif %} |
| | | openshift_prometheus_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_storage_volume_name=prometheus |
| | | openshift_prometheus_storage_type='pvc' |
| | | |
| | | # For prometheus-alertmanager |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_alertmanager_storage_kind=dynamic |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_alertmanager_storage_kind=nfs |
| | | openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} |
| | | {% endif %} |
| | | openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_alertmanager_storage_volume_size=10Gi |
| | | openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager |
| | | openshift_prometheus_alertmanager_storage_type='pvc' |
| | | |
| | | # For prometheus-alertbuffer |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_alertbuffer_storage_kind=dynamic |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_alertbuffer_storage_kind=nfs |
| | | openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} |
| | | {% endif %} |
| | | openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer |
| | | openshift_prometheus_alertbuffer_storage_volume_size=10Gi |
| | | openshift_prometheus_alertbuffer_storage_type='pvc' |
| | | |
| | | # Necessary for 3.9.25 |
| | | openshift_prometheus_node_exporter_image_version=v3.9 |
| | | |
| | | # Enable cluster logging |
| | | ######################## |
| | | openshift_logging_install_logging={{install_logging}} |
| | | |
| | | {% if install_nfs|bool and not install_glusterfs|bool %} |
| | | openshift_logging_storage_kind=nfs |
| | | openshift_logging_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_logging_storage_nfs_directory=/srv/nfs |
| | | openshift_logging_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_logging_storage_volume_name=logging |
| | | openshift_logging_storage_volume_size=10Gi |
| | | openshift_logging_storage_labels={'storage': 'logging'} |
| | | openshift_logging_es_pvc_storage_class_name='' |
| | | {% endif %} |
| | | |
| | | openshift_logging_es_cluster_size=1 |
| | | openshift_logging_es_nodeselector={"env":"infra"} |
| | | openshift_logging_kibana_nodeselector={"env":"infra"} |
| | | openshift_logging_curator_nodeselector={"env":"infra"} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Project Management Vars |
| | | ########################################################################### |
| | | |
| | | # Configure additional projects |
| | | # openshift_additional_projects={'my-infra-project-test': {'default_node_selector': 'env=infra'}} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Router and Registry Vars |
| | | ########################################################################### |
| | | |
| | | openshift_hosted_router_replicas={{infranode_instance_count}} |
| | | |
| | | {% if install_lets_encrypt_certificates|bool %} |
| | | openshift_hosted_router_certificate={{lets_encrypt_openshift_hosted_router_certificate|to_json}} |
| | | {% endif %} |
| | | |
| | | openshift_hosted_registry_replicas=1 |
| | | openshift_hosted_registry_pullthrough=true |
| | | openshift_hosted_registry_acceptschema2=true |
| | | openshift_hosted_registry_enforcequota=true |
| | | |
| | | {% if s3user_access_key is defined %} |
| | | # Registry AWS S3 |
| | | # S3 bucket must already exist. |
| | | openshift_hosted_registry_storage_kind=object |
| | | openshift_hosted_registry_storage_provider=s3 |
| | | openshift_hosted_registry_storage_s3_accesskey={{ s3user_access_key }} |
| | | openshift_hosted_registry_storage_s3_secretkey={{ s3user_secret_access_key }} |
| | | openshift_hosted_registry_storage_s3_bucket={{ project_tag }} |
| | | openshift_hosted_registry_storage_s3_region={{ aws_region }} |
| | | openshift_hosted_registry_storage_s3_chunksize=26214400 |
| | | openshift_hosted_registry_storage_s3_rootdirectory=/registry |
| | | {% endif %} |
| | | |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Service Catalog Vars |
| | | ########################################################################### |
| | | |
| | | openshift_enable_service_catalog=true |
| | | |
| | | template_service_broker_install=true |
| | | openshift_template_service_broker_namespaces=['openshift'] |
| | | |
| | | ansible_service_broker_install=true |
| | | ansible_service_broker_local_registry_whitelist=['.*-apb$'] |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | openshift_hosted_etcd_storage_kind=dynamic |
| | | {% elif install_nfs|bool %} |
| | | openshift_hosted_etcd_storage_kind=nfs |
| | | openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)" |
| | | openshift_hosted_etcd_storage_nfs_directory=/srv/nfs |
| | | openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'} |
| | | {% endif %} |
| | | openshift_hosted_etcd_storage_volume_name=etcd-asb |
| | | openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_hosted_etcd_storage_volume_size=10G |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Hosts |
| | | ########################################################################### |
| | | [OSEv3:children] |
| | | masters |
| | | etcd |
| | | nodes |
| | | {% if install_nfs|bool %} |
| | | nfs |
| | | {% endif %} |
| | | {% if install_glusterfs|bool %} |
| | | glusterfs |
| | | {% endif %} |
| | | {% if groups['newnodes']|d([])|length > 0 %} |
| | | new_nodes |
| | | {% endif %} |
| | | |
| | | [masters] |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}} |
| | | {% endfor %} |
| | | |
| | | [etcd] |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}} |
| | | {% endfor %} |
| | | |
| | | [nodes] |
| | | ## These are the masters |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | |
| | | ## These are infranodes |
| | | {% for host in groups['infranodes'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra', 'zone': '{{hostvars[host]['placement']}}', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | |
| | | ## These are regular nodes |
| | | {% for host in groups['nodes'] |
| | | if host not in groups['newnodes']|d([]) |
| | | and host not in groups['glusterfs']|d([]) |
| | | %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users', 'zone': '{{hostvars[host]['placement']}}', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | |
| | | {% if groups['glusterfs']|d([])|length > 0 %} |
| | | ## These are glusterfs nodes |
| | | {% for host in groups['glusterfs'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'glusterfs', 'zone': '{{hostvars[host]['placement']}}', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if groups['newnodes']|d([])|length > 0 %} |
| | | # scaleup performed, leave an empty group, see: |
| | | # https://docs.openshift.com/container-platform/3.7/install_config/adding_hosts_to_existing_cluster.html |
| | | [new_nodes] |
| | | {% for host in groups['newnodes'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users', 'zone': '{{hostvars[host]['placement']}}', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if install_nfs|bool %} |
| | | [nfs] |
| | | {% for host in [groups['support']|sort|first] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | {% set query = "[?name=='support']|[0].volumes[?purpose=='glusterfs'].device_name" %} |
| | | [glusterfs] |
| | | {% for host in groups['glusterfs'] %} |
| | | {% if cloud_provider == 'ec2' %} |
| | | {{ hostvars[host].internaldns }} glusterfs_ip={{hostvars[host].private_ip_address}} glusterfs_devices='{{instances|json_query(query)|to_json}}' |
| | | {% elif cloud_provider == 'azure' %} |
| | | {{ hostvars[host].internaldns }} glusterfs_ip={{hostvars[host].private_ip_address}} glusterfs_devices='{{ [ hostvars[host].glusterfs_device_name ] |to_json}}' |
| | | {% endif %} |
| | | {% endfor %} |
| | | {% endif %} |
New file |
| | |
| | | [OSEv3:vars] |
| | | |
| | | # |
| | | # /etc/ansible/hosts file for OpenShift Container Platform 3.9.14 |
| | | # |
| | | ########################################################################### |
| | | ### Ansible Vars |
| | | ########################################################################### |
| | | timeout=60 |
| | | ansible_become=yes |
| | | ansible_ssh_user={{ansible_ssh_user}} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Basic Vars |
| | | ########################################################################### |
| | | deployment_type=openshift-enterprise |
| | | containerized=false |
| | | openshift_disable_check="disk_availability,memory_availability,docker_image_availability" |
| | | |
| | | {% if container_runtime == "cri-o" %} |
| | | openshift_use_crio=True |
| | | openshift_crio_enable_docker_gc=True |
| | | {% endif %} |
| | | |
| | | # default project node selector |
| | | osm_default_node_selector='env=users' |
| | | openshift_hosted_infra_selector="env=infra" |
| | | |
| | | # Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. |
| | | openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']} |
| | | |
| | | # Configure logrotate scripts |
| | | # See: https://github.com/nickhammond/ansible-logrotate |
| | | logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | ########################################################################### |
| | | ### OpenShift CNS |
| | | ########################################################################### |
| | | openshift_master_dynamic_provisioning_enabled=True |
| | | |
| | | # Set up GlusterFS Storage |
| | | openshift_storage_glusterfs_namespace=glusterfs |
| | | openshift_storage_glusterfs_name=storage |
| | | openshift_storage_glusterfs_wipe=True |
| | | openshift_storage_glusterfs_storageclass_default=false |
| | | openshift_storage_glusterfs_storageclass=True |
| | | openshift_storageclass_default=false |
| | | openshift_storage_glusterfs_timeout=500 |
| | | |
| | | # Set up Block Storage |
| | | # Set as default storage class during installation to force Logging/metrics to use it |
| | | # Right now the ASB's ETCD PVC will also land on Block due to this fact |
| | | openshift_storage_glusterfs_block_deploy=True |
| | | openshift_storage_glusterfs_block_host_vol_size=100 |
| | | openshift_storage_glusterfs_block_storageclass=True |
| | | openshift_storage_glusterfs_block_storageclass_default=True |
| | | openshift_storage_glusterfs_block_host_vol_create=True |
| | | |
| | | # Run these commands after installation on one of the masters: |
| | | # oc patch storageclass glusterfs-storage -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "true"}}}' |
| | | # oc patch storageclass glusterfs-block -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "false"}}}' |
| | | |
| | | {% else %} |
| | | # Set this line to enable NFS |
| | | openshift_enable_unsupported_configurations=True |
| | | {% endif %} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Cockpit Vars |
| | | ########################################################################### |
| | | |
| | | # Enable cockpit |
| | | osm_use_cockpit=true |
| | | osm_cockpit_plugins=['cockpit-kubernetes'] |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Master Vars |
| | | ########################################################################### |
| | | |
| | | openshift_master_api_port={{master_api_port}} |
| | | openshift_master_console_port={{master_api_port}} |
| | | |
| | | openshift_master_cluster_method=native |
| | | openshift_master_cluster_hostname={{master_lb_dns}} |
| | | openshift_master_cluster_public_hostname={{master_lb_dns}} |
| | | openshift_master_default_subdomain={{cloudapps_suffix}} |
| | | openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}} |
| | | |
| | | {% if install_lets_encrypt_certificates|bool %} |
| | | openshift_master_named_certificates={{lets_encrypt_openshift_master_named_certificates|to_json}} |
| | | {% endif %} |
| | | |
| | | openshift_set_hostname=True |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Network Vars |
| | | ########################################################################### |
| | | |
| | | osm_cluster_network_cidr=10.1.0.0/16 |
| | | openshift_portal_net=172.30.0.0/16 |
| | | |
| | | #os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy' |
| | | {{multi_tenant_setting}} |
| | | |
| | | # This should be turned on once all dependent scripts use firewalld rather than iptables |
| | | # os_firewall_use_firewalld=True |
| | | |
| | | ########################################################################### |
| | | ### OpenShift admission plugin config |
| | | ########################################################################### |
| | | |
| | | {% if install_openwhisk|bool %} |
| | | #TODO: add imagePolicy as it is in default |
| | | openshift_master_admission_plugin_config={"openshift.io/ImagePolicy":{"configuration":{"apiVersion":"v1","kind":"ImagePolicyConfig","resolveImages": "AttemptRewrite"}}} |
| | | {% endif %} |
| | | |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Authentication Vars |
| | | ########################################################################### |
| | | |
| | | {% if install_idm == "ldap" or 'ldap' in install_idms|d([]) %} |
| | | {{openshift_master_ldap_ca_file}} |
| | | {% endif %} |
| | | |
| | | {% if install_idm == "htpasswd" or 'htpasswd' in install_idms|d([]) %} |
| | | openshift_master_htpasswd_file=/root/htpasswd.openshift |
| | | {% endif %} |
| | | |
| | | openshift_master_identity_providers='{{identity_providers|to_json}}' |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Metrics and Logging Vars |
| | | ########################################################################### |
| | | |
| | | # Enable cluster metrics |
| | | ######################## |
| | | openshift_metrics_install_metrics={{install_metrics}} |
| | | |
| | | {% if install_nfs|bool and not install_glusterfs|bool %} |
| | | openshift_metrics_storage_kind=nfs |
| | | openshift_metrics_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_metrics_storage_nfs_directory=/srv/nfs |
| | | openshift_metrics_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_metrics_storage_volume_name=metrics |
| | | openshift_metrics_storage_volume_size=10Gi |
| | | openshift_metrics_storage_labels={'storage': 'metrics'} |
| | | openshift_metrics_cassanda_pvc_storage_class_name='' |
| | | {% endif %} |
| | | {% if install_glusterfs|bool %} |
| | | openshift_metrics_cassandra_storage_type=dynamic |
| | | # Volume size needs to be equal or smaller to the GlusterBlock volume size |
| | | openshift_metrics_storage_volume_size=10Gi |
| | | {% endif %} |
| | | |
| | | openshift_metrics_cassandra_nodeselector={"env":"infra"} |
| | | openshift_metrics_hawkular_nodeselector={"env":"infra"} |
| | | openshift_metrics_heapster_nodeselector={"env":"infra"} |
| | | |
| | | # Add Prometheus Metrics: |
| | | ######################### |
| | | openshift_hosted_prometheus_deploy=true |
| | | openshift_prometheus_node_selector={"env":"infra"} |
| | | openshift_prometheus_namespace=openshift-metrics |
| | | |
| | | # Prometheus |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_storage_kind=dynamic |
| | | openshift_prometheus_storage_volume_size=20Gi |
| | | openshift_prometheus_storage_class=glusterfs-storage |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_storage_kind=nfs |
| | | openshift_prometheus_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_storage_volume_size=10Gi |
| | | openshift_prometheus_storage_labels={'storage': 'prometheus'} |
| | | {% endif %} |
| | | openshift_prometheus_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_storage_volume_name=prometheus |
| | | openshift_prometheus_storage_type='pvc' |
| | | |
| | | # For prometheus-alertmanager |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_alertmanager_storage_kind=dynamic |
| | | openshift_prometheus_alertmanager_storage_class=glusterfs-storage |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_alertmanager_storage_kind=nfs |
| | | openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} |
| | | {% endif %} |
| | | openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_alertmanager_storage_volume_size=10Gi |
| | | openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager |
| | | openshift_prometheus_alertmanager_storage_type='pvc' |
| | | |
| | | # For prometheus-alertbuffer |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_alertbuffer_storage_kind=dynamic |
| | | openshift_prometheus_alertbuffer_storage_class=glusterfs-storage |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_alertbuffer_storage_kind=nfs |
| | | openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} |
| | | {% endif %} |
| | | openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer |
| | | openshift_prometheus_alertbuffer_storage_volume_size=10Gi |
| | | openshift_prometheus_alertbuffer_storage_type='pvc' |
| | | |
| | | # Necessary for 3.9.25 |
| | | openshift_prometheus_node_exporter_image_version=v3.9 |
| | | |
| | | # Enable cluster logging |
| | | ######################## |
| | | openshift_logging_install_logging={{install_logging}} |
| | | |
| | | {% if install_nfs|bool and not install_glusterfs|bool %} |
| | | openshift_logging_storage_kind=nfs |
| | | openshift_logging_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_logging_storage_nfs_directory=/srv/nfs |
| | | openshift_logging_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_logging_storage_volume_name=logging |
| | | openshift_logging_storage_volume_size=10Gi |
| | | openshift_logging_storage_labels={'storage': 'logging'} |
| | | openshift_logging_es_pvc_storage_class_name='' |
| | | {% endif %} |
| | | {% if install_glusterfs|bool %} |
| | | openshift_logging_es_pvc_dynamic=true |
| | | # Volume size needs to be equal or smaller to the GlusterBlock volume size |
| | | openshift_metrics_storage_volume_size=10Gi |
| | | {% endif %} |
| | | |
| | | openshift_logging_es_cluster_size=1 |
| | | openshift_logging_es_nodeselector={"env":"infra"} |
| | | openshift_logging_kibana_nodeselector={"env":"infra"} |
| | | openshift_logging_curator_nodeselector={"env":"infra"} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Project Management Vars |
| | | ########################################################################### |
| | | |
| | | # Configure additional projects |
| | | # openshift_additional_projects={'my-infra-project-test': {'default_node_selector': 'env=infra'}} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Router and Registry Vars |
| | | ########################################################################### |
| | | |
| | | openshift_hosted_router_replicas={{infranode_instance_count}} |
| | | |
| | | {% if install_lets_encrypt_certificates|bool %} |
| | | openshift_hosted_router_certificate={{lets_encrypt_openshift_hosted_router_certificate|to_json}} |
| | | {% endif %} |
| | | |
| | | openshift_hosted_registry_replicas=1 |
| | | openshift_hosted_registry_pullthrough=true |
| | | openshift_hosted_registry_acceptschema2=true |
| | | openshift_hosted_registry_enforcequota=true |
| | | |
| | | {% if s3user_access_key is defined %} |
| | | # Registry AWS S3 |
| | | # S3 bucket must already exist. |
| | | openshift_hosted_registry_storage_kind=object |
| | | openshift_hosted_registry_storage_provider=s3 |
| | | openshift_hosted_registry_storage_s3_accesskey={{ s3user_access_key }} |
| | | openshift_hosted_registry_storage_s3_secretkey={{ s3user_secret_access_key }} |
| | | openshift_hosted_registry_storage_s3_bucket={{ project_tag }} |
| | | openshift_hosted_registry_storage_s3_region={{ aws_region }} |
| | | openshift_hosted_registry_storage_s3_chunksize=26214400 |
| | | openshift_hosted_registry_storage_s3_rootdirectory=/registry |
| | | {% endif %} |
| | | |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Service Catalog Vars |
| | | ########################################################################### |
| | | |
| | | openshift_enable_service_catalog=true |
| | | |
| | | template_service_broker_install=true |
| | | openshift_template_service_broker_namespaces=['openshift'] |
| | | |
| | | ansible_service_broker_install=true |
| | | ansible_service_broker_local_registry_whitelist=['.*-apb$'] |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | openshift_hosted_etcd_storage_kind=dynamic |
| | | # Next one doesn't work at the moment - it's still block |
| | | openshift_hosted_etcd_storage_class=glusterfs-storage |
| | | {% elif install_nfs|bool %} |
| | | openshift_hosted_etcd_storage_kind=nfs |
| | | openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)" |
| | | openshift_hosted_etcd_storage_nfs_directory=/srv/nfs |
| | | openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'} |
| | | {% endif %} |
| | | openshift_hosted_etcd_storage_volume_name=etcd-asb |
| | | openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_hosted_etcd_storage_volume_size=10G |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Hosts |
| | | ########################################################################### |
| | | [OSEv3:children] |
| | | masters |
| | | etcd |
| | | nodes |
| | | {% if install_nfs|bool %} |
| | | nfs |
| | | {% endif %} |
| | | {% if install_glusterfs|bool %} |
| | | glusterfs |
| | | {% endif %} |
| | | {% if groups['newnodes']|d([])|length > 0 %} |
| | | new_nodes |
| | | {% endif %} |
| | | |
| | | [masters] |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} |
| | | {% endfor %} |
| | | |
| | | [etcd] |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} |
| | | {% endfor %} |
| | | |
| | | [nodes] |
| | | ## These are the masters |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | |
| | | ## These are infranodes |
| | | {% for host in groups['infranodes'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | |
| | | ## These are regular nodes |
| | | {% for host in groups['nodes'] |
| | | if host not in groups['newnodes']|d([]) |
| | | and host not in groups['glusterfs']|d([]) |
| | | %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | |
| | | {% if groups['glusterfs']|d([])|length > 0 %} |
| | | ## These are glusterfs nodes |
| | | {% for host in groups['glusterfs'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'glusterfs', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if groups['newnodes']|d([])|length > 0 %} |
| | | # scaleup performed, leave an empty group, see: |
| | | # https://docs.openshift.com/container-platform/3.7/install_config/adding_hosts_to_existing_cluster.html |
| | | [new_nodes] |
| | | {% for host in groups['newnodes'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if install_nfs|bool %} |
| | | [nfs] |
| | | {% for host in [groups['support']|sort|first] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | {% set query = "[?name=='support']|[0].volumes[?purpose=='glusterfs'].device_name" %} |
| | | [glusterfs] |
| | | {% for host in groups['glusterfs'] %} |
| | | {% if cloud_provider == 'ec2' %} |
| | | {{ hostvars[host].internaldns }} glusterfs_ip={{hostvars[host].private_ip_address}} glusterfs_devices='{{instances|json_query(query)|to_json}}' |
| | | {% elif cloud_provider == 'azure' %} |
| | | {{ hostvars[host].internaldns }} glusterfs_ip={{hostvars[host].private_ip_address}} glusterfs_devices='{{ [ hostvars[host].glusterfs_device_name ] |to_json}}' |
| | | {% endif %} |
| | | {% endfor %} |
| | | {% endif %} |
New file |
| | |
| | | # |
| | | # /etc/ansible/hosts file for OpenShift Container Platform 3.9.30 |
| | | # |
| | | |
| | | [OSEv3:vars] |
| | | |
| | | ########################################################################### |
| | | ### Ansible Vars |
| | | ########################################################################### |
| | | timeout=60 |
| | | ansible_become=yes |
| | | ansible_ssh_user={{ansible_ssh_user}} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Basic Vars |
| | | ########################################################################### |
| | | deployment_type=openshift-enterprise |
| | | containerized=false |
| | | openshift_disable_check="disk_availability,memory_availability,docker_image_availability" |
| | | |
| | | {% if container_runtime == "cri-o" %} |
| | | openshift_use_crio=True |
| | | openshift_crio_enable_docker_gc=True |
| | | {% endif %} |
| | | |
| | | # default project node selector |
| | | osm_default_node_selector='env=users' |
| | | openshift_hosted_infra_selector="env=infra" |
| | | |
| | | # Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. |
| | | openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']} |
| | | |
| | | # Configure logrotate scripts |
| | | # See: https://github.com/nickhammond/ansible-logrotate |
| | | logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] |
| | | |
| | | # Necessary for 3.9.30 |
| | | oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version} |
| | | openshift_examples_modify_imagestreams=true |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | ########################################################################### |
| | | ### OpenShift CNS |
| | | ########################################################################### |
| | | openshift_master_dynamic_provisioning_enabled=True |
| | | |
| | | # Set up GlusterFS Storage |
| | | openshift_storage_glusterfs_namespace=glusterfs |
| | | openshift_storage_glusterfs_name=storage |
| | | openshift_storage_glusterfs_wipe=True |
| | | openshift_storage_glusterfs_storageclass_default=false |
| | | openshift_storage_glusterfs_storageclass=True |
| | | openshift_storageclass_default=false |
| | | openshift_storage_glusterfs_timeout=500 |
| | | |
| | | # Set up Block Storage |
| | | # Set as default storage class during installation to force Logging/metrics to use it |
| | | # Right now the ASB's ETCD PVC will also land on Block due to this fact |
| | | openshift_storage_glusterfs_block_deploy=True |
| | | openshift_storage_glusterfs_block_host_vol_size=100 |
| | | openshift_storage_glusterfs_block_storageclass=True |
| | | openshift_storage_glusterfs_block_storageclass_default=True |
| | | openshift_storage_glusterfs_block_host_vol_create=True |
| | | |
| | | # Run these commands after installation on one of the masters: |
| | | # oc patch storageclass glusterfs-storage -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "true"}}}' |
| | | # oc patch storageclass glusterfs-block -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "false"}}}' |
| | | |
| | | {% else %} |
| | | # Set this line to enable NFS |
| | | openshift_enable_unsupported_configurations=True |
| | | {% endif %} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Cockpit Vars |
| | | ########################################################################### |
| | | |
| | | # Enable cockpit |
| | | osm_use_cockpit=true |
| | | osm_cockpit_plugins=['cockpit-kubernetes'] |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Master Vars |
| | | ########################################################################### |
| | | |
| | | openshift_master_api_port={{master_api_port}} |
| | | openshift_master_console_port={{master_api_port}} |
| | | |
| | | openshift_master_cluster_method=native |
| | | openshift_master_cluster_hostname={{master_lb_dns}} |
| | | openshift_master_cluster_public_hostname={{master_lb_dns}} |
| | | openshift_master_default_subdomain={{cloudapps_suffix}} |
| | | openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}} |
| | | |
| | | {% if install_lets_encrypt_certificates|bool %} |
| | | openshift_master_named_certificates={{lets_encrypt_openshift_master_named_certificates|to_json}} |
| | | {% endif %} |
| | | |
| | | openshift_set_hostname=True |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Network Vars |
| | | ########################################################################### |
| | | |
| | | osm_cluster_network_cidr=10.1.0.0/16 |
| | | openshift_portal_net=172.30.0.0/16 |
| | | |
| | | #os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy' |
| | | {{multi_tenant_setting}} |
| | | |
| | | # This should be turned on once all dependent scripts use firewalld rather than iptables |
| | | # os_firewall_use_firewalld=True |
| | | |
| | | ########################################################################### |
| | | ### OpenShift admission plugin config |
| | | ########################################################################### |
| | | |
| | | {% if install_openwhisk|bool %} |
| | | #TODO: add imagePolicy as it is in default |
| | | openshift_master_admission_plugin_config={"openshift.io/ImagePolicy":{"configuration":{"apiVersion":"v1","kind":"ImagePolicyConfig","resolveImages": "AttemptRewrite"}}} |
| | | {% endif %} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Authentication Vars |
| | | ########################################################################### |
| | | |
| | | {% if install_idm == "ldap" or 'ldap' in install_idms|d([]) %} |
| | | {{openshift_master_ldap_ca_file}} |
| | | {% endif %} |
| | | |
| | | {% if install_idm == "htpasswd" or 'htpasswd' in install_idms|d([]) %} |
| | | openshift_master_htpasswd_file=/root/htpasswd.openshift |
| | | {% endif %} |
| | | |
| | | openshift_master_identity_providers='{{identity_providers|to_json}}' |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Metrics and Logging Vars |
| | | ########################################################################### |
| | | |
| | | # Enable cluster metrics |
| | | ######################## |
| | | openshift_metrics_install_metrics={{install_metrics}} |
| | | |
| | | {% if install_nfs|bool and not install_glusterfs|bool %} |
| | | openshift_metrics_storage_kind=nfs |
| | | openshift_metrics_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_metrics_storage_nfs_directory=/srv/nfs |
| | | openshift_metrics_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_metrics_storage_volume_name=metrics |
| | | openshift_metrics_storage_volume_size=10Gi |
| | | openshift_metrics_storage_labels={'storage': 'metrics'} |
| | | openshift_metrics_cassanda_pvc_storage_class_name='' |
| | | {% endif %} |
| | | {% if install_glusterfs|bool %} |
| | | openshift_metrics_cassandra_storage_type=dynamic |
| | | # Volume size needs to be equal or smaller to the GlusterBlock volume size |
| | | openshift_metrics_storage_volume_size=10Gi |
| | | {% endif %} |
| | | |
| | | openshift_metrics_cassandra_nodeselector={"env":"infra"} |
| | | openshift_metrics_hawkular_nodeselector={"env":"infra"} |
| | | openshift_metrics_heapster_nodeselector={"env":"infra"} |
| | | |
| | | # Add Prometheus Metrics: |
| | | ######################### |
| | | openshift_hosted_prometheus_deploy=true |
| | | openshift_prometheus_node_selector={"env":"infra"} |
| | | openshift_prometheus_namespace=openshift-metrics |
| | | |
| | | # Prometheus |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_storage_kind=dynamic |
| | | openshift_prometheus_storage_volume_size=20Gi |
| | | openshift_prometheus_storage_class=glusterfs-storage |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_storage_kind=nfs |
| | | openshift_prometheus_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_storage_volume_size=10Gi |
| | | openshift_prometheus_storage_labels={'storage': 'prometheus'} |
| | | {% endif %} |
| | | openshift_prometheus_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_storage_volume_name=prometheus |
| | | openshift_prometheus_storage_type='pvc' |
| | | |
| | | # For prometheus-alertmanager |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_alertmanager_storage_kind=dynamic |
| | | openshift_prometheus_alertmanager_storage_class=glusterfs-storage |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_alertmanager_storage_kind=nfs |
| | | openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} |
| | | {% endif %} |
| | | openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_alertmanager_storage_volume_size=10Gi |
| | | openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager |
| | | openshift_prometheus_alertmanager_storage_type='pvc' |
| | | |
| | | # For prometheus-alertbuffer |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_alertbuffer_storage_kind=dynamic |
| | | openshift_prometheus_alertbuffer_storage_class=glusterfs-storage |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_alertbuffer_storage_kind=nfs |
| | | openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} |
| | | {% endif %} |
| | | openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer |
| | | openshift_prometheus_alertbuffer_storage_volume_size=10Gi |
| | | openshift_prometheus_alertbuffer_storage_type='pvc' |
| | | |
| | | # Necessary for 3.9.25 |
| | | # openshift_prometheus_node_exporter_image_version=v3.9 |
| | | |
| | | # Enable cluster logging |
| | | ######################## |
| | | openshift_logging_install_logging={{install_logging}} |
| | | |
| | | {% if install_nfs|bool and not install_glusterfs|bool %} |
| | | openshift_logging_storage_kind=nfs |
| | | openshift_logging_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_logging_storage_nfs_directory=/srv/nfs |
| | | openshift_logging_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_logging_storage_volume_name=logging |
| | | openshift_logging_storage_volume_size=10Gi |
| | | openshift_logging_storage_labels={'storage': 'logging'} |
| | | openshift_logging_es_pvc_storage_class_name='' |
| | | {% endif %} |
| | | {% if install_glusterfs|bool %} |
| | | openshift_logging_es_pvc_dynamic=true |
| | | # Volume size needs to be equal or smaller to the GlusterBlock volume size |
| | | openshift_metrics_storage_volume_size=10Gi |
| | | {% endif %} |
| | | |
| | | openshift_logging_es_cluster_size=1 |
| | | openshift_logging_es_nodeselector={"env":"infra"} |
| | | openshift_logging_kibana_nodeselector={"env":"infra"} |
| | | openshift_logging_curator_nodeselector={"env":"infra"} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Project Management Vars |
| | | ########################################################################### |
| | | |
| | | # Configure additional projects |
| | | # openshift_additional_projects={'my-infra-project-test': {'default_node_selector': 'env=infra'}} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Router and Registry Vars |
| | | ########################################################################### |
| | | |
| | | openshift_hosted_router_replicas={{infranode_instance_count}} |
| | | |
| | | {% if install_lets_encrypt_certificates|bool %} |
| | | openshift_hosted_router_certificate={{lets_encrypt_openshift_hosted_router_certificate|to_json}} |
| | | {% endif %} |
| | | |
| | | openshift_hosted_registry_replicas=1 |
| | | openshift_hosted_registry_pullthrough=true |
| | | openshift_hosted_registry_acceptschema2=true |
| | | openshift_hosted_registry_enforcequota=true |
| | | |
| | | {% if s3user_access_key is defined %} |
| | | # Registry AWS S3 |
| | | # S3 bucket must already exist. |
| | | openshift_hosted_registry_storage_kind=object |
| | | openshift_hosted_registry_storage_provider=s3 |
| | | openshift_hosted_registry_storage_s3_accesskey={{ s3user_access_key }} |
| | | openshift_hosted_registry_storage_s3_secretkey={{ s3user_secret_access_key }} |
| | | openshift_hosted_registry_storage_s3_bucket={{ project_tag }} |
| | | openshift_hosted_registry_storage_s3_region={{ aws_region }} |
| | | openshift_hosted_registry_storage_s3_chunksize=26214400 |
| | | openshift_hosted_registry_storage_s3_rootdirectory=/registry |
| | | {% endif %} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Service Catalog Vars |
| | | ########################################################################### |
| | | |
| | | openshift_enable_service_catalog=true |
| | | |
| | | template_service_broker_install=true |
| | | openshift_template_service_broker_namespaces=['openshift'] |
| | | |
| | | ansible_service_broker_install=true |
| | | ansible_service_broker_local_registry_whitelist=['.*-apb$'] |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | openshift_hosted_etcd_storage_kind=dynamic |
| | | # Next one doesn't work at the moment - it's still block |
| | | openshift_hosted_etcd_storage_class=glusterfs-storage |
| | | {% elif install_nfs|bool %} |
| | | openshift_hosted_etcd_storage_kind=nfs |
| | | openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)" |
| | | openshift_hosted_etcd_storage_nfs_directory=/srv/nfs |
| | | openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'} |
| | | {% endif %} |
| | | openshift_hosted_etcd_storage_volume_name=etcd-asb |
| | | openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_hosted_etcd_storage_volume_size=10G |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Hosts |
| | | ########################################################################### |
| | | [OSEv3:children] |
| | | masters |
| | | etcd |
| | | nodes |
| | | {% if install_nfs|bool %} |
| | | nfs |
| | | {% endif %} |
| | | {% if install_glusterfs|bool %} |
| | | glusterfs |
| | | {% endif %} |
| | | {% if groups['newnodes']|d([])|length > 0 %} |
| | | new_nodes |
| | | {% endif %} |
| | | |
| | | [masters] |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} |
| | | {% endfor %} |
| | | |
| | | [etcd] |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} |
| | | {% endfor %} |
| | | |
| | | [nodes] |
| | | ## These are the masters |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | |
| | | ## These are infranodes |
| | | {% for host in groups['infranodes'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | |
| | | ## These are regular nodes |
| | | {% for host in groups['nodes'] |
| | | if host not in groups['newnodes']|d([]) |
| | | and host not in groups['glusterfs']|d([]) |
| | | %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | |
| | | {% if groups['glusterfs']|d([])|length > 0 %} |
| | | ## These are glusterfs nodes |
| | | {% for host in groups['glusterfs'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'glusterfs', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if groups['newnodes']|d([])|length > 0 %} |
| | | # scaleup performed, leave an empty group, see: |
| | | # https://docs.openshift.com/container-platform/3.7/install_config/adding_hosts_to_existing_cluster.html |
| | | [new_nodes] |
| | | {% for host in groups['newnodes'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if install_nfs|bool %} |
| | | [nfs] |
| | | {% for host in [groups['support']|sort|first] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | {% set query = "[?name=='support']|[0].volumes[?purpose=='glusterfs'].device_name" %} |
| | | [glusterfs] |
| | | {% for host in groups['glusterfs'] %} |
| | | {% if cloud_provider == 'ec2' %} |
| | | {{ hostvars[host].internaldns }} glusterfs_ip={{hostvars[host].private_ip_address}} glusterfs_devices='{{instances|json_query(query)|to_json}}' |
| | | {% elif cloud_provider == 'azure' %} |
| | | {{ hostvars[host].internaldns }} glusterfs_ip={{hostvars[host].private_ip_address}} glusterfs_devices='{{ [ hostvars[host].glusterfs_device_name ] |to_json}}' |
| | | {% endif %} |
| | | {% endfor %} |
| | | {% endif %} |
New file |
| | |
| | | # |
| | | # /etc/ansible/hosts file for OpenShift Container Platform 3.9.30 |
| | | # |
| | | |
| | | [OSEv3:vars] |
| | | |
| | | ########################################################################### |
| | | ### Ansible Vars |
| | | ########################################################################### |
| | | timeout=60 |
| | | ansible_become=yes |
| | | ansible_ssh_user={{ansible_ssh_user}} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Basic Vars |
| | | ########################################################################### |
| | | deployment_type=openshift-enterprise |
| | | containerized=false |
| | | openshift_disable_check="disk_availability,memory_availability,docker_image_availability" |
| | | |
| | | {% if container_runtime == "cri-o" %} |
| | | openshift_use_crio=True |
| | | openshift_crio_enable_docker_gc=True |
| | | {% endif %} |
| | | |
| | | # default project node selector |
| | | osm_default_node_selector='env=users' |
| | | openshift_hosted_infra_selector="env=infra" |
| | | |
| | | # Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. |
| | | openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']} |
| | | |
| | | # Configure logrotate scripts |
| | | # See: https://github.com/nickhammond/ansible-logrotate |
| | | logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] |
| | | |
| | | # Necessary for 3.9.30 |
| | | oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version} |
| | | openshift_examples_modify_imagestreams=true |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | ########################################################################### |
| | | ### OpenShift CNS |
| | | ########################################################################### |
| | | openshift_master_dynamic_provisioning_enabled=True |
| | | |
| | | # Set up GlusterFS Storage |
| | | openshift_storage_glusterfs_namespace=glusterfs |
| | | openshift_storage_glusterfs_name=storage |
| | | openshift_storage_glusterfs_wipe=True |
| | | openshift_storage_glusterfs_storageclass_default=false |
| | | openshift_storage_glusterfs_storageclass=True |
| | | openshift_storageclass_default=false |
| | | openshift_storage_glusterfs_timeout=500 |
| | | |
| | | # Set up Block Storage |
| | | # Set as default storage class during installation to force Logging/metrics to use it |
| | | # Right now the ASB's ETCD PVC will also land on Block due to this fact |
| | | openshift_storage_glusterfs_block_deploy=True |
| | | openshift_storage_glusterfs_block_host_vol_size=100 |
| | | openshift_storage_glusterfs_block_storageclass=True |
| | | openshift_storage_glusterfs_block_storageclass_default=True |
| | | openshift_storage_glusterfs_block_host_vol_create=True |
| | | |
| | | # Run these commands after installation on one of the masters: |
| | | # oc patch storageclass glusterfs-storage -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "true"}}}' |
| | | # oc patch storageclass glusterfs-block -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "false"}}}' |
| | | |
| | | {% else %} |
| | | # Set this line to enable NFS |
| | | openshift_enable_unsupported_configurations=True |
| | | {% endif %} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Cockpit Vars |
| | | ########################################################################### |
| | | |
| | | # Enable cockpit |
| | | osm_use_cockpit=true |
| | | osm_cockpit_plugins=['cockpit-kubernetes'] |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Master Vars |
| | | ########################################################################### |
| | | |
| | | openshift_master_api_port={{master_api_port}} |
| | | openshift_master_console_port={{master_api_port}} |
| | | |
| | | openshift_master_cluster_method=native |
| | | openshift_master_cluster_hostname={{master_lb_dns}} |
| | | openshift_master_cluster_public_hostname={{master_lb_dns}} |
| | | openshift_master_default_subdomain={{cloudapps_suffix}} |
| | | openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}} |
| | | |
| | | {% if install_lets_encrypt_certificates|bool %} |
| | | openshift_master_named_certificates={{lets_encrypt_openshift_master_named_certificates|to_json}} |
| | | {% endif %} |
| | | |
| | | openshift_set_hostname=True |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Network Vars |
| | | ########################################################################### |
| | | |
| | | osm_cluster_network_cidr=10.1.0.0/16 |
| | | openshift_portal_net=172.30.0.0/16 |
| | | |
| | | #os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy' |
| | | {{multi_tenant_setting}} |
| | | |
| | | # This should be turned on once all dependent scripts use firewalld rather than iptables |
| | | # os_firewall_use_firewalld=True |
| | | |
| | | ########################################################################### |
| | | ### OpenShift admission plugin config |
| | | ########################################################################### |
| | | |
| | | {% if install_openwhisk|bool %} |
| | | #TODO: add imagePolicy as it is in default |
| | | openshift_master_admission_plugin_config={"openshift.io/ImagePolicy":{"configuration":{"apiVersion":"v1","kind":"ImagePolicyConfig","resolveImages": "AttemptRewrite"}}} |
| | | {% endif %} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Authentication Vars |
| | | ########################################################################### |
| | | |
| | | {% if install_idm == "ldap" or 'ldap' in install_idms|d([]) %} |
| | | {{openshift_master_ldap_ca_file}} |
| | | {% endif %} |
| | | |
| | | {% if install_idm == "htpasswd" or 'htpasswd' in install_idms|d([]) %} |
| | | openshift_master_htpasswd_file=/root/htpasswd.openshift |
| | | {% endif %} |
| | | |
| | | openshift_master_identity_providers='{{identity_providers|to_json}}' |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Metrics and Logging Vars |
| | | ########################################################################### |
| | | |
| | | # Enable cluster metrics |
| | | ######################## |
| | | openshift_metrics_install_metrics={{install_metrics}} |
| | | |
| | | {% if install_nfs|bool and not install_glusterfs|bool %} |
| | | openshift_metrics_storage_kind=nfs |
| | | openshift_metrics_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_metrics_storage_nfs_directory=/srv/nfs |
| | | openshift_metrics_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_metrics_storage_volume_name=metrics |
| | | openshift_metrics_storage_volume_size=10Gi |
| | | openshift_metrics_storage_labels={'storage': 'metrics'} |
| | | openshift_metrics_cassanda_pvc_storage_class_name='' |
| | | {% endif %} |
| | | {% if install_glusterfs|bool %} |
| | | openshift_metrics_cassandra_storage_type=dynamic |
| | | # Volume size needs to be equal or smaller to the GlusterBlock volume size |
| | | openshift_metrics_storage_volume_size=10Gi |
| | | {% endif %} |
| | | |
| | | openshift_metrics_cassandra_nodeselector={"env":"infra"} |
| | | openshift_metrics_hawkular_nodeselector={"env":"infra"} |
| | | openshift_metrics_heapster_nodeselector={"env":"infra"} |
| | | |
| | | # Add Prometheus Metrics: |
| | | ######################### |
| | | openshift_hosted_prometheus_deploy=true |
| | | openshift_prometheus_node_selector={"env":"infra"} |
| | | openshift_prometheus_namespace=openshift-metrics |
| | | |
| | | # Prometheus |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_storage_kind=dynamic |
| | | openshift_prometheus_storage_volume_size=20Gi |
| | | openshift_prometheus_storage_class=glusterfs-storage |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_storage_kind=nfs |
| | | openshift_prometheus_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_storage_volume_size=10Gi |
| | | openshift_prometheus_storage_labels={'storage': 'prometheus'} |
| | | {% endif %} |
| | | openshift_prometheus_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_storage_volume_name=prometheus |
| | | openshift_prometheus_storage_type='pvc' |
| | | |
| | | # For prometheus-alertmanager |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_alertmanager_storage_kind=dynamic |
| | | openshift_prometheus_alertmanager_storage_class=glusterfs-storage |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_alertmanager_storage_kind=nfs |
| | | openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} |
| | | {% endif %} |
| | | openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_alertmanager_storage_volume_size=10Gi |
| | | openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager |
| | | openshift_prometheus_alertmanager_storage_type='pvc' |
| | | |
| | | # For prometheus-alertbuffer |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_alertbuffer_storage_kind=dynamic |
| | | openshift_prometheus_alertbuffer_storage_class=glusterfs-storage |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_alertbuffer_storage_kind=nfs |
| | | openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} |
| | | {% endif %} |
| | | openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer |
| | | openshift_prometheus_alertbuffer_storage_volume_size=10Gi |
| | | openshift_prometheus_alertbuffer_storage_type='pvc' |
| | | |
| | | # Necessary for 3.9.25 |
| | | # openshift_prometheus_node_exporter_image_version=v3.9 |
| | | |
| | | # Enable cluster logging |
| | | ######################## |
| | | openshift_logging_install_logging={{install_logging}} |
| | | |
| | | {% if install_nfs|bool and not install_glusterfs|bool %} |
| | | openshift_logging_storage_kind=nfs |
| | | openshift_logging_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_logging_storage_nfs_directory=/srv/nfs |
| | | openshift_logging_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_logging_storage_volume_name=logging |
| | | openshift_logging_storage_volume_size=10Gi |
| | | openshift_logging_storage_labels={'storage': 'logging'} |
| | | openshift_logging_es_pvc_storage_class_name='' |
| | | {% endif %} |
| | | {% if install_glusterfs|bool %} |
| | | openshift_logging_es_pvc_dynamic=true |
| | | # Volume size needs to be equal or smaller to the GlusterBlock volume size |
| | | openshift_metrics_storage_volume_size=10Gi |
| | | {% endif %} |
| | | |
| | | openshift_logging_es_cluster_size=1 |
| | | openshift_logging_es_nodeselector={"env":"infra"} |
| | | openshift_logging_kibana_nodeselector={"env":"infra"} |
| | | openshift_logging_curator_nodeselector={"env":"infra"} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Project Management Vars |
| | | ########################################################################### |
| | | |
| | | # Configure additional projects |
| | | # openshift_additional_projects={'my-infra-project-test': {'default_node_selector': 'env=infra'}} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Router and Registry Vars |
| | | ########################################################################### |
| | | |
| | | openshift_hosted_router_replicas={{infranode_instance_count}} |
| | | |
| | | {% if install_lets_encrypt_certificates|bool %} |
| | | openshift_hosted_router_certificate={{lets_encrypt_openshift_hosted_router_certificate|to_json}} |
| | | {% endif %} |
| | | |
| | | openshift_hosted_registry_replicas=1 |
| | | openshift_hosted_registry_pullthrough=true |
| | | openshift_hosted_registry_acceptschema2=true |
| | | openshift_hosted_registry_enforcequota=true |
| | | |
| | | {% if s3user_access_key is defined %} |
| | | # Registry AWS S3 |
| | | # S3 bucket must already exist. |
| | | openshift_hosted_registry_storage_kind=object |
| | | openshift_hosted_registry_storage_provider=s3 |
| | | openshift_hosted_registry_storage_s3_accesskey={{ s3user_access_key }} |
| | | openshift_hosted_registry_storage_s3_secretkey={{ s3user_secret_access_key }} |
| | | openshift_hosted_registry_storage_s3_bucket={{ project_tag }} |
| | | openshift_hosted_registry_storage_s3_region={{ aws_region }} |
| | | openshift_hosted_registry_storage_s3_chunksize=26214400 |
| | | openshift_hosted_registry_storage_s3_rootdirectory=/registry |
| | | {% endif %} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Service Catalog Vars |
| | | ########################################################################### |
| | | |
| | | openshift_enable_service_catalog=true |
| | | |
| | | template_service_broker_install=true |
| | | openshift_template_service_broker_namespaces=['openshift'] |
| | | |
| | | ansible_service_broker_install=true |
| | | ansible_service_broker_local_registry_whitelist=['.*-apb$'] |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | openshift_hosted_etcd_storage_kind=dynamic |
| | | # Next one doesn't work at the moment - it's still block |
| | | openshift_hosted_etcd_storage_class=glusterfs-storage |
| | | {% elif install_nfs|bool %} |
| | | openshift_hosted_etcd_storage_kind=nfs |
| | | openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)" |
| | | openshift_hosted_etcd_storage_nfs_directory=/srv/nfs |
| | | openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'} |
| | | {% endif %} |
| | | openshift_hosted_etcd_storage_volume_name=etcd-asb |
| | | openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_hosted_etcd_storage_volume_size=10G |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Hosts |
| | | ########################################################################### |
| | | [OSEv3:children] |
| | | masters |
| | | etcd |
| | | nodes |
| | | {% if install_nfs|bool %} |
| | | nfs |
| | | {% endif %} |
| | | {% if install_glusterfs|bool %} |
| | | glusterfs |
| | | {% endif %} |
| | | {% if groups['newnodes']|d([])|length > 0 %} |
| | | new_nodes |
| | | {% endif %} |
| | | |
| | | [masters] |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} |
| | | {% endfor %} |
| | | |
| | | [etcd] |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} |
| | | {% endfor %} |
| | | |
| | | [nodes] |
| | | ## These are the masters |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | |
| | | ## These are infranodes |
| | | {% for host in groups['infranodes'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | |
| | | ## These are regular nodes |
| | | {% for host in groups['nodes'] |
| | | if host not in groups['newnodes']|d([]) |
| | | and host not in groups['glusterfs']|d([]) |
| | | %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | |
| | | {% if groups['glusterfs']|d([])|length > 0 %} |
| | | ## These are glusterfs nodes |
| | | {% for host in groups['glusterfs'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'glusterfs', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if groups['newnodes']|d([])|length > 0 %} |
| | | # scaleup performed, leave an empty group, see: |
| | | # https://docs.openshift.com/container-platform/3.7/install_config/adding_hosts_to_existing_cluster.html |
| | | [new_nodes] |
| | | {% for host in groups['newnodes'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users', 'runtime': '{{container_runtime}}'}" |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if install_nfs|bool %} |
| | | [nfs] |
| | | {% for host in [groups['support']|sort|first] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | {% set query = "[?name=='support']|[0].volumes[?purpose=='glusterfs'].device_name" %} |
| | | [glusterfs] |
| | | {% for host in groups['glusterfs'] %} |
| | | {% if cloud_provider == 'ec2' %} |
| | | {{ hostvars[host].internaldns }} glusterfs_ip={{hostvars[host].private_ip_address}} glusterfs_devices='{{instances|json_query(query)|to_json}}' |
| | | {% elif cloud_provider == 'azure' %} |
| | | {{ hostvars[host].internaldns }} glusterfs_ip={{hostvars[host].private_ip_address}} glusterfs_devices='{{ [ hostvars[host].glusterfs_device_name ] |to_json}}' |
| | | {% endif %} |
| | | {% endfor %} |
| | | {% endif %} |
New file |
| | |
| | | [OSEv3:vars] |
| | | |
| | | ########################################################################### |
| | | ### Ansible Vars |
| | | ########################################################################### timeout=60 |
| | | ansible_become=yes |
| | | ansible_ssh_user={{ansible_ssh_user}} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Basic Vars |
| | | ########################################################################### |
| | | deployment_type=openshift-enterprise |
| | | containerized=false |
| | | openshift_disable_check="disk_availability,memory_availability,docker_image_availability" |
| | | |
| | | # default project node selector |
| | | osm_default_node_selector='env=users' |
| | | openshift_hosted_infra_selector={"env":"infra"} |
| | | |
| | | # Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. |
| | | openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']} |
| | | |
| | | # Configure logrotate scripts |
| | | # See: https://github.com/nickhammond/ansible-logrotate |
| | | logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] |
| | | |
| | | {% if osrelease | version_compare('3.7', '<') %} |
| | | # Anything before 3.7 |
| | | openshift_metrics_image_version=v{{ repo_version }} |
| | | #openshift_image_tag=v{{ repo_version }} |
| | | #openshift_release={{ osrelease }} |
| | | #docker_version="{{docker_version}}" |
| | | {% endif %} |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | ########################################################################### |
| | | ### Glusterfs |
| | | ########################################################################### |
| | | openshift_storage_glusterfs_namespace=glusterfs |
| | | openshift_storage_glusterfs_name=storage |
| | | openshift_storage_glusterfs_storageclass_default=true |
| | | #openshift_storage_glusterfs_wipe=True |
| | | openshift_master_dynamic_provisioning_enabled=True |
| | | #dynamic_volumes_check=False |
| | | openshift_storage_glusterfs_timeout=600 |
| | | {% endif %} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Optional Vars |
| | | ########################################################################### |
| | | |
| | | # Enable cockpit |
| | | osm_use_cockpit=true |
| | | osm_cockpit_plugins=['cockpit-kubernetes'] |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Master Vars |
| | | ########################################################################### |
| | | |
| | | openshift_master_api_port={{master_api_port}} |
| | | openshift_master_console_port={{master_api_port}} |
| | | |
| | | openshift_master_cluster_method=native |
| | | openshift_master_cluster_hostname={{master_lb_dns}} |
| | | openshift_master_cluster_public_hostname={{master_lb_dns}} |
| | | openshift_master_default_subdomain={{cloudapps_suffix}} |
| | | openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}} |
| | | |
| | | {% if install_lets_encrypt_certificates|bool %} |
| | | openshift_master_named_certificates={{lets_encrypt_openshift_master_named_certificates|to_json}} |
| | | {% endif %} |
| | | |
| | | openshift_set_hostname=True |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Network Vars |
| | | ########################################################################### |
| | | |
| | | osm_cluster_network_cidr=10.1.0.0/16 |
| | | openshift_portal_net=172.30.0.0/16 |
| | | |
| | | #os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' |
| | | {{multi_tenant_setting}} |
| | | |
| | | {% if osrelease | version_compare('3.7', '>=') %} |
| | | # This should be turned on once all dependent scripts use firewalld rather than iptables |
| | | # os_firewall_use_firewalld=True |
| | | {% endif %} |
| | | |
| | | {% if osrelease | version_compare('3.7', '>=') %} |
| | | ########################################################################### |
| | | ### OpenShift admission plugin config |
| | | ########################################################################### |
| | | |
| | | #keep default |
| | | #openshift_master_admission_plugin_config={} |
| | | {% endif %} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Authentication Vars |
| | | ########################################################################### |
| | | |
| | | {% if install_idm == "ldap" or 'ldap' in install_idms|d([]) %} |
| | | {{openshift_master_ldap_ca_file}} |
| | | {% endif %} |
| | | |
| | | {% if install_idm == "htpasswd" or 'htpasswd' in install_idms|d([]) %} |
| | | openshift_master_htpasswd_file=/root/htpasswd.openshift |
| | | {% endif %} |
| | | |
| | | openshift_master_identity_providers='{{identity_providers|to_json}}' |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Metrics and Logging Vars |
| | | ########################################################################### |
| | | |
| | | # Enable cluster metrics |
| | | {% if osrelease | version_compare('3.7', '>=') %} |
| | | |
| | | openshift_metrics_install_metrics={{install_metrics}} |
| | | openshift_hosted_metrics_deploy={{install_metrics}} |
| | | |
| | | {% if install_nfs|bool and not install_glusterfs|bool %} |
| | | openshift_hosted_metrics_storage_kind=nfs |
| | | openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_hosted_metrics_storage_nfs_directory=/srv/nfs |
| | | openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_hosted_metrics_storage_volume_size=10Gi |
| | | openshift_hosted_metrics_storage_labels={'storage': 'metrics'} |
| | | openshift_metrics_cassanda_pvc_storage_class_name='' |
| | | openshift_hosted_metrics_storage_volume_name=metrics |
| | | {% endif %} |
| | | |
| | | ## Add Prometheus Metrics: |
| | | openshift_hosted_prometheus_deploy=true |
| | | openshift_prometheus_node_selector={"env":"infra"} |
| | | openshift_prometheus_namespace=openshift-metrics |
| | | |
| | | # Prometheus |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_storage_kind=dynamic |
| | | openshift_prometheus_storage_volume_size=20Gi |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_storage_kind=nfs |
| | | openshift_prometheus_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_storage_volume_size=10Gi |
| | | openshift_prometheus_storage_labels={'storage': 'prometheus'} |
| | | {% endif %} |
| | | openshift_prometheus_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_storage_volume_name=prometheus |
| | | openshift_prometheus_storage_type='pvc' |
| | | # For prometheus-alertmanager |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_alertmanager_storage_kind=dynamic |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_alertmanager_storage_kind=nfs |
| | | openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} |
| | | {% endif %} |
| | | openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_alertmanager_storage_volume_size=10Gi |
| | | openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager |
| | | openshift_prometheus_alertmanager_storage_type='pvc' |
| | | # For prometheus-alertbuffer |
| | | {% if install_glusterfs|bool %} |
| | | openshift_prometheus_alertbuffer_storage_kind=dynamic |
| | | {% elif install_nfs|bool %} |
| | | openshift_prometheus_alertbuffer_storage_kind=nfs |
| | | openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs |
| | | openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} |
| | | {% endif %} |
| | | openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer |
| | | openshift_prometheus_alertbuffer_storage_volume_size=10Gi |
| | | openshift_prometheus_alertbuffer_storage_type='pvc' |
| | | |
| | | {% else %} |
| | | |
| | | openshift_hosted_metrics_deploy={{install_metrics}} |
| | | |
| | | {% if install_nfs|bool %} |
| | | openshift_hosted_metrics_storage_kind=nfs |
| | | openshift_hosted_metrics_storage_host=support1.{{guid}}.internal |
| | | openshift_hosted_metrics_storage_nfs_directory=/srv/nfs |
| | | openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)' |
| | | {% endif %} |
| | | openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_hosted_metrics_storage_volume_name=metrics |
| | | openshift_hosted_metrics_storage_volume_size=10Gi |
| | | |
| | | openshift_hosted_metrics_public_url=https://hawkular-metrics.{{cloudapps_suffix}}/hawkular/metrics |
| | | |
| | | {% endif %} |
| | | openshift_metrics_cassandra_nodeselector={"env":"infra"} |
| | | openshift_metrics_hawkular_nodeselector={"env":"infra"} |
| | | openshift_metrics_heapster_nodeselector={"env":"infra"} |
| | | |
| | | # Enable cluster logging |
| | | {% if osrelease | version_compare('3.7', '>=') %} |
| | | |
| | | openshift_logging_install_logging={{install_logging}} |
| | | openshift_hosted_logging_deploy={{install_logging}} |
| | | |
| | | {% if install_nfs|bool and not install_glusterfs|bool %} |
| | | openshift_hosted_logging_storage_kind=nfs |
| | | openshift_hosted_logging_storage_nfs_directory=/srv/nfs |
| | | openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)' |
| | | openshift_hosted_logging_storage_volume_size=10Gi |
| | | openshift_hosted_logging_storage_labels={'storage': 'logging'} |
| | | openshift_logging_es_pvc_storage_class_name='' |
| | | openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_hosted_logging_storage_volume_name=logging |
| | | {% endif %} |
| | | |
| | | openshift_logging_es_cluster_size=1 |
| | | |
| | | {% else %} |
| | | |
| | | openshift_hosted_logging_deploy={{install_logging}} |
| | | openshift_master_logging_public_url=https://kibana.{{cloudapps_suffix}} |
| | | |
| | | {% if install_nfs|bool %} |
| | | openshift_hosted_logging_storage_kind=nfs |
| | | openshift_hosted_logging_storage_nfs_directory=/srv/nfs |
| | | openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)' |
| | | {% endif %} |
| | | openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] |
| | | openshift_hosted_logging_storage_volume_name=logging |
| | | openshift_hosted_logging_storage_volume_size=10Gi |
| | | openshift_hosted_logging_hostname=kibana.{{cloudapps_suffix}} |
| | | openshift_hosted_logging_elasticsearch_cluster_size=1 |
| | | openshift_hosted_logging_deployer_version=v{{repo_version}} |
| | | {% endif %} |
| | | |
| | | openshift_logging_es_nodeselector={"env":"infra"} |
| | | openshift_logging_kibana_nodeselector={"env":"infra"} |
| | | openshift_logging_curator_nodeselector={"env":"infra"} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Project Management Vars |
| | | ########################################################################### |
| | | |
| | | # Configure additional projects |
| | | # openshift_additional_projects={'my-infra-project-test': {'default_node_selector': 'env=infra'}} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Router and Registry Vars |
| | | ########################################################################### |
| | | |
| | | openshift_hosted_router_selector='env=infra' |
| | | openshift_hosted_router_replicas={{infranode_instance_count}} |
| | | |
| | | {% if install_lets_encrypt_certificates|bool %} |
| | | openshift_hosted_router_certificate={{lets_encrypt_openshift_hosted_router_certificate|to_json}} |
| | | {% endif %} |
| | | |
| | | openshift_hosted_registry_selector='env=infra' |
| | | openshift_hosted_registry_replicas=1 |
| | | openshift_hosted_registry_pullthrough=true |
| | | openshift_hosted_registry_acceptschema2=true |
| | | openshift_hosted_registry_enforcequota=true |
| | | |
| | | {% if s3user_access_key is defined %} |
| | | # Registry AWS S3 |
| | | # S3 bucket must already exist. |
| | | openshift_hosted_registry_storage_kind=object |
| | | openshift_hosted_registry_storage_provider=s3 |
| | | openshift_hosted_registry_storage_s3_accesskey={{ s3user_access_key }} |
| | | openshift_hosted_registry_storage_s3_secretkey={{ s3user_secret_access_key }} |
| | | openshift_hosted_registry_storage_s3_bucket={{ project_tag }} |
| | | openshift_hosted_registry_storage_s3_region={{ aws_region }} |
| | | openshift_hosted_registry_storage_s3_chunksize=26214400 |
| | | openshift_hosted_registry_storage_s3_rootdirectory=/registry |
| | | {% endif %} |
| | | |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Service Catalog Vars |
| | | ########################################################################### |
| | | |
| | | {% if osrelease | version_compare('3.7', '>=') %} |
| | | openshift_enable_service_catalog=true |
| | | template_service_broker_install=true |
| | | template_service_broker_selector={"env":"infra"} |
| | | |
| | | # This line was added as a workaround as this doesn't currently work |
| | | ansible_service_broker_install=false |
| | | openshift_template_service_broker_namespaces=['openshift'] |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | openshift_hosted_etcd_storage_kind=dynamic |
| | | {% elif install_nfs|bool %} |
| | | openshift_hosted_etcd_storage_kind=nfs |
| | | openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)" |
| | | openshift_hosted_etcd_storage_nfs_directory=/srv/nfs |
| | | openshift_hosted_etcd_storage_labels={'storage': 'etcd'} |
| | | {% endif %} |
| | | openshift_hosted_etcd_storage_volume_name=etcd-vol2 |
| | | openshift_hosted_etcd_storage_access_modes=["ReadWriteOnce"] |
| | | openshift_hosted_etcd_storage_volume_size=5G |
| | | {% endif %} |
| | | |
| | | ########################################################################### |
| | | ### OpenShift Hosts |
| | | ########################################################################### |
| | | [OSEv3:children] |
| | | masters |
| | | etcd |
| | | nodes |
| | | {% if install_nfs|bool %} |
| | | nfs |
| | | {% endif %} |
| | | {% if install_glusterfs|bool %} |
| | | glusterfs |
| | | {% endif %} |
| | | {% if groups['newnodes']|d([])|length > 0 %} |
| | | new_nodes |
| | | {% endif %} |
| | | |
| | | [masters] |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}} |
| | | {% endfor %} |
| | | |
| | | [etcd] |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}} |
| | | {% endfor %} |
| | | |
| | | [nodes] |
| | | ## These are the masters |
| | | {% for host in groups['masters'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}" |
| | | {% endfor %} |
| | | |
| | | ## These are infranodes |
| | | {% for host in groups['infranodes'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra', 'zone': '{{hostvars[host]['placement']}}'}" |
| | | {% endfor %} |
| | | |
| | | ## These are regular nodes |
| | | {% for host in groups['nodes'] |
| | | if host not in groups['newnodes']|d([]) |
| | | and host not in groups['glusterfs']|d([]) |
| | | %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users', 'zone': '{{hostvars[host]['placement']}}'}" |
| | | {% endfor %} |
| | | |
| | | {% if groups['glusterfs']|d([])|length > 0 %} |
| | | ## These are glusterfs nodes |
| | | {% for host in groups['glusterfs'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'glusterfs', 'zone': '{{hostvars[host]['placement']}}'}" |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if groups['newnodes']|d([])|length > 0 %} |
| | | # scaleup performed, leave an empty group, see: |
| | | # https://docs.openshift.com/container-platform/3.7/install_config/adding_hosts_to_existing_cluster.html |
| | | [new_nodes] |
| | | {% for host in groups['newnodes'] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'users', 'zone': '{{hostvars[host]['placement']}}'}" |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if install_nfs|bool %} |
| | | [nfs] |
| | | {% for host in [groups['support']|sort|first] %} |
| | | {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} |
| | | {% endfor %} |
| | | {% endif %} |
| | | |
| | | {% if install_glusterfs|bool %} |
| | | {% set query = "[?name=='support']|[0].volumes[?purpose=='glusterfs'].device_name" %} |
| | | [glusterfs] |
| | | {% for host in groups['glusterfs'] %} |
| | | {% if cloud_provider == 'ec2' %} |
| | | {{ hostvars[host].internaldns }} glusterfs_ip={{hostvars[host].private_ip_address}} glusterfs_devices='{{instances|json_query(query)|to_json}}' |
| | | {% elif cloud_provider == 'azure' %} |
| | | {{ hostvars[host].internaldns }} glusterfs_ip={{hostvars[host].private_ip_address}} glusterfs_devices='{{ [ hostvars[host].glusterfs_device_name ] |to_json}}' |
| | | {% endif %} |
| | | {% endfor %} |
| | | {% endif %} |
New file |
| | |
| | | andrew:$apr1$dZPb2ECf$ercevOFO5znrynUfUj4tb/ |
| | | karla:$apr1$FQx2mX4c$eJc21GuVZWNg1ULF8I2G31 |
| | | user1:$apr1$FmrTsuSa$yducoDpvYq0KEV0ErmwpA1 |
| | | user2:$apr1$JCcW2XQM$8takcyaYYrPT5I8M46TA01 |
| | | user3:$apr1$zPC/rXKY$2PGF7dRsGwC3i8YJ59aOk0 |
| | | user4:$apr1$e9/zT6dh$J18M.9zyn3DazrYreGV.B/ |
| | | user5:$apr1$Nu/XJFVP$DgybymePret.Prch9MyxP/ |
| | | user6:$apr1$VEbpwL9M$c1oFwS.emkt8fyR24zOzd0 |
| | | user7:$apr1$wZxsnY/A$PK0O7iofGJJsvOZ3ctoNo. |
| | | user8:$apr1$5YBAWpGg$YO4ACHZL.c31NbQZH9LlE. |
| | | user9:$apr1$CIxB1enN$Aghb7.S4U3SXPRt55hTWI. |
| | | user10:$apr1$dWTDSR23$UGGJtkVC1ERmAOikomI9K0 |
| | | user11:$apr1$j4fPyRZg$nNJk1nt1vAf54HAB/g/8g/ |
| | | user12:$apr1$dd6kysUI$ueu/9.gbL0LkjpCbSjFNI. |
| | | user13:$apr1$DeRaAbVq$ZI3HtBzQxWYHifjIuPJSM1 |
| | | user14:$apr1$dUuWDYgk$co6NQ4Dbcp3pQjVO5dR7Q. |
| | | user15:$apr1$4QmhSys7$wC.fKmKRqLNqoYqQ1dixJ/ |
| | | user16:$apr1$RHcOPHg7$p9LgYP6zE4nMDlA8ongVc/ |
| | | user17:$apr1$pji2xxHN$vvUHj/fbQRgLR.WBMblQH/ |
| | | user18:$apr1$Lm79l0Qr$KgZSAuPcrTo4.GIWTBLGa/ |
| | | user19:$apr1$KGxvneIX$.GJo7JB.N/c1FLW7vlblx/ |
| | | user20:$apr1$WfYdosg5$cU1BsAzkIhTzKBx8Rvd3o1 |
| | | user21:$apr1$cKRCbWLl$WCVjYUxD22GS5RRv1npwR1 |
| | | user22:$apr1$QhpgOkFU$Y6Nn7NEPbJk3D9ehFb4i50 |
| | | user23:$apr1$dVgQOh7j$L3JZlN8ZmdEwebXqD66Yl0 |
| | | user24:$apr1$z/U5MAQB$GvKG3i8ATXWHhoxN9e0HS/ |
| | | user25:$apr1$gFHGMQUV$w11pZbcBqVKOylr9TZ1EW. |
| | | user26:$apr1$5YG0dnOG$GzbnTQMBe0Dqc3f3pwvPL1 |
| | | user27:$apr1$Kt6VoxNS$nq1Kzd53DUL8h8gfu4fEq/ |
| | | user28:$apr1$aLAQHJ4d$qTRmUpw2eF9whEwDyIixG0 |
| | | user29:$apr1$3HH4pgpa$Uh84gx3UP8vyPRfAIMPRl1 |
| | | user30:$apr1$bbEEX3EF$ozw4jPcYHwVO7.MRzXtu0. |
| | | user31:$apr1$hD0kfz7i$SjNdGZbvto5EifBma5iA5. |
| | | user32:$apr1$fRMBUYu8$T5BQ8kI3pMgqXaRH7l8p.. |
| | | user33:$apr1$es9ruteO$jZsV5/H8GIzw.vCfPs5310 |
| | | user34:$apr1$OQ1I/gHn$.WA01EeXhDLE1K3vWD1wu. |
| | | user35:$apr1$KseEJXTS$kE/QO1XT0mZ44Iyw/ofnj/ |
| | | user36:$apr1$PglCzG.g$44QsoAyMhanH5A40P5jhY1 |
| | | user37:$apr1$2d5ggTIZ$xYsfdRBLOlEsnWRFVS9Yl0 |
| | | user38:$apr1$x/cdV95V$mKFZmSkoBjeEu.HZshO0n. |
| | | user39:$apr1$VC6.WQOS$fAOAR1mx/i7Pnt2oGsDmu/ |
| | | user40:$apr1$n36Hr3zC$lEVq4B7UWmdcnl01lUyR.. |
| | | user41:$apr1$/q6tJtXi$9mCB1YCqdhEE6VVVVkVKc/ |
| | | user42:$apr1$fTMTWEzw$X4MsyNlWketRjQgqonwxn. |
| | | user43:$apr1$.VwoJu38$D4v4NKL1KPuRZdNeprBXS/ |
| | | user44:$apr1$e0s48GLK$JMQ849MeckVX0wG2vE2s10 |
| | | user45:$apr1$a9ucQ1sC$HEMij.WGEa1xIQ01HpyKh1 |
| | | user46:$apr1$uwOs/4nv$TB2r3pOPJ2K0A./CimVUT1 |
| | | user47:$apr1$jfTmW1k5$Fd2ebTUtFFl3CLZWfFmRR. |
| | | user48:$apr1$4/apB/zd$IxoWJ5pTRNGgbxx3Ayl/i0 |
| | | user49:$apr1$nu75PZ0r$bPCMgDmlOAj.YbeFPHJHE. |
| | | user50:$apr1$c/R3wJ/g$GJ03siVj5tkNxrg4OaxhJ0 |
| | | user51:$apr1$EdEX6Pyt$IdPQHmhZi8FEbJjREVbe1/ |
| | | user52:$apr1$ZMfyTjjX$RFOrnKsSr5xXA7IXn7TkC/ |
| | | user53:$apr1$GY.rOkJM$uMCqJmmorP5I1v.YHHz1Z/ |
| | | user54:$apr1$1vuZq/U0$Aq0Kz3wk0YPleDz/rTCdK0 |
| | | user55:$apr1$KjULqmcD$XrhyYt2nWuiaQkbciDIcN/ |
| | | user56:$apr1$gTPaNeq0$sqWJDPZ5//ZDjLf0dSbUh1 |
| | | user57:$apr1$6PaKhdlY$dX2FkVJ0xV.4MAQeDUgRT0 |
| | | user58:$apr1$.8MSdEpY$MPIbUO2WnC0wsno8zUOjC. |
| | | user59:$apr1$TWpKuAvt$CFeTQxxSgeU3dFkL4qpXb. |
| | | user60:$apr1$fEYUgRVU$LO2qwXfpxwI9fDXPfQgQB0 |
| | | user61:$apr1$HHUBEn4G$.cAnwbh.ogNEzQSug3nqo/ |
| | | user62:$apr1$Agt4GmKT$4k3Ev3FSJiNsbht3vUbxQ/ |
| | | user63:$apr1$FsUKA7Hw$nkSgqSIFeqCY1mOyGje3O1 |
| | | user64:$apr1$vBlkQoG4$8L2mTo8gdr8wC68G2y2G91 |
| | | user65:$apr1$McEnEqn4$dZvjACdGp0HALVHBtHEu80 |
| | | user66:$apr1$zamuhlOG$Xch5pbO1ki2Dad1dzjS4j. |
| | | user67:$apr1$qC1rll4s$cN4DzsWnyFBTNi3Cdi6161 |
| | | user68:$apr1$txKPCx1k$WtrlrlP.UF.Rlzbnv6igE/ |
| | | user69:$apr1$EO2A25Sj$DO/1lCNJJXff4GOsTZmHL/ |
| | | user70:$apr1$pJu569Az$nHtF2ZkUrNXw9WN0Obb/T1 |
| | | user71:$apr1$YKpEtZka$c59Fmov1cssRdrO5VqBKz1 |
| | | user72:$apr1$CNkwam0s$b.QcPWytnhlOsaajMQx630 |
| | | user73:$apr1$m5kE07o0$7TC3K.I16YTaRyN8EZq7E/ |
| | | user74:$apr1$/5p0Qoyy$hjQ30Q8Ghb4zNrjjt2yLk/ |
| | | user75:$apr1$ZF3yRTqJ$TgLBllrvTQuuiIjSb53xR0 |
| | | user76:$apr1$711LL2Ai$59rBNmFprwZXtyFVBtRul0 |
| | | user77:$apr1$N4uJhPSq$A.rVfAsRXCQqxOenDHjqX1 |
| | | user78:$apr1$PHSpv5ty$WC8GlQpclQqH30eWPu.6e. |
| | | user79:$apr1$c/yk9dQ9$dvhh.P4F5zGnysBvwps4m/ |
| | | user80:$apr1$oTmftf8R$FYzQD77hYfh9Wq3SvwYU7/ |
| | | user81:$apr1$3YvQ/JPg$sDXhV8xpHNxQzFSvMMxAD1 |
| | | user82:$apr1$quKB2P2.$iq.ZzDa3/xoaoY3.F1Un90 |
| | | user83:$apr1$IVq8346H$lPQJZ7Thr/gJ2EmzDsktH0 |
| | | user84:$apr1$xfehskAD$NRMQJttylejHtNKQqBj.k. |
| | | user85:$apr1$/LYLXNbH$/COZBzkaU0pPOXR38ZFVX/ |
| | | user86:$apr1$a/xD3Jfw$rZXN4ykj0W6qadlh447n// |
| | | user87:$apr1$v01l1ljr$tGDKwdhKC05HEbntSxV5M0 |
| | | user88:$apr1$9RYtWl12$ck19ozvS.SWeAAaDZqE940 |
| | | user89:$apr1$EvSs2TA2$fRDg0hVOCf2jbhwXifzbs. |
| | | user90:$apr1$9ffAneiG$CAz5JWeIPGnamOQlVRGIk. |
| | | user91:$apr1$Z3XW5Yy4$Kibx7GmgdpC6CAM0IxhtC0 |
| | | user92:$apr1$6CfIrBqr$5nGNCGA5QOPq/h8hlOE4f. |
| | | user93:$apr1$iJ4AQyfu$fkXSVib.OzPCSBQlLhwwS. |
| | | user94:$apr1$jiPqi0uI$XyYDQt0kcawqFLX12VW3n/ |
| | | user95:$apr1$ULEkhfG2$/WHcoR9KJxAS3uw470Vkk. |
| | | user96:$apr1$56tQXa91$l0yaZgZHbDidgw95IP7yQ1 |
| | | user97:$apr1$SoGwK9hP$YbceEfwmsM3QCdNGAaE1b. |
| | | user98:$apr1$MVU1/8dh$UKzkRk1CQP00SvnoPIm1.. |
| | | user99:$apr1$v8vKZdHH$NC5xud.olhtdydHU9hav6. |
| | | user100:$apr1$glFN48wz$dR9w94PGiQL8qZXcXGd0L0 |
| | | opentlc-mgr:$apr1$glFN48wz$dR9w94PGiQL8qZXcXGd0L0 |
New file |
| | |
| | | -----BEGIN CERTIFICATE----- |
| | | MIIEkjCCA3qgAwIBAgIQCgFBQgAAAVOFc2oLheynCDANBgkqhkiG9w0BAQsFADA/ |
| | | MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT |
| | | DkRTVCBSb290IENBIFgzMB4XDTE2MDMxNzE2NDA0NloXDTIxMDMxNzE2NDA0Nlow |
| | | SjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUxldCdzIEVuY3J5cHQxIzAhBgNVBAMT |
| | | GkxldCdzIEVuY3J5cHQgQXV0aG9yaXR5IFgzMIIBIjANBgkqhkiG9w0BAQEFAAOC |
| | | AQ8AMIIBCgKCAQEAnNMM8FrlLke3cl03g7NoYzDq1zUmGSXhvb418XCSL7e4S0EF |
| | | q6meNQhY7LEqxGiHC6PjdeTm86dicbp5gWAf15Gan/PQeGdxyGkOlZHP/uaZ6WA8 |
| | | SMx+yk13EiSdRxta67nsHjcAHJyse6cF6s5K671B5TaYucv9bTyWaN8jKkKQDIZ0 |
| | | Z8h/pZq4UmEUEz9l6YKHy9v6Dlb2honzhT+Xhq+w3Brvaw2VFn3EK6BlspkENnWA |
| | | a6xK8xuQSXgvopZPKiAlKQTGdMDQMc2PMTiVFrqoM7hD8bEfwzB/onkxEz0tNvjj |
| | | /PIzark5McWvxI0NHWQWM6r6hCm21AvA2H3DkwIDAQABo4IBfTCCAXkwEgYDVR0T |
| | | AQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwfwYIKwYBBQUHAQEEczBxMDIG |
| | | CCsGAQUFBzABhiZodHRwOi8vaXNyZy50cnVzdGlkLm9jc3AuaWRlbnRydXN0LmNv |
| | | bTA7BggrBgEFBQcwAoYvaHR0cDovL2FwcHMuaWRlbnRydXN0LmNvbS9yb290cy9k |
| | | c3Ryb290Y2F4My5wN2MwHwYDVR0jBBgwFoAUxKexpHsscfrb4UuQdf/EFWCFiRAw |
| | | VAYDVR0gBE0wSzAIBgZngQwBAgEwPwYLKwYBBAGC3xMBAQEwMDAuBggrBgEFBQcC |
| | | ARYiaHR0cDovL2Nwcy5yb290LXgxLmxldHNlbmNyeXB0Lm9yZzA8BgNVHR8ENTAz |
| | | MDGgL6AthitodHRwOi8vY3JsLmlkZW50cnVzdC5jb20vRFNUUk9PVENBWDNDUkwu |
| | | Y3JsMB0GA1UdDgQWBBSoSmpjBH3duubRObemRWXv86jsoTANBgkqhkiG9w0BAQsF |
| | | AAOCAQEA3TPXEfNjWDjdGBX7CVW+dla5cEilaUcne8IkCJLxWh9KEik3JHRRHGJo |
| | | uM2VcGfl96S8TihRzZvoroed6ti6WqEBmtzw3Wodatg+VyOeph4EYpr/1wXKtx8/ |
| | | wApIvJSwtmVi4MFU5aMqrSDE6ea73Mj2tcMyo5jMd6jmeWUHK8so/joWUoHOUgwu |
| | | X4Po1QYz+3dszkDqMp4fklxBwXRsW10KXzPMTZ+sOPAveyxindmjkW8lGy+QsRlG |
| | | PfZ+G6Z6h7mjem0Y+iWlkYcV4PIWL1iwBi8saCbGS5jN2p8M+X+Q7UNKEkROb3N6 |
| | | KOqkqm57TH2H3eDJAkSnh6/DNFu0Qg== |
| | | -----END CERTIFICATE----- |
New file |
| | |
| | | /var/log/cron |
| | | /var/log/maillog |
| | | /var/log/messages |
| | | { |
| | | daily |
| | | rotate 7 |
| | | compress |
| | | } |
| | | /var/log/secure |
| | | /var/log/spooler |
| | | { |
| | | missingok |
| | | sharedscripts |
| | | postrotate |
| | | /bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true |
| | | endscript |
| | | } |
New file |
| | |
| | | = OPENTLC Shared Labs Environment "{{ guid }}" |
| | | |
| | | == Overview |
| | | |
| | | * The OpenShift Shared Environment is used to provide "developer" level access to |
| | | participants enrolled in OPEN courses by GPTE. |
| | | |
| | | == Deployment information |
| | | |
| | | [width="100%",cols="1,1,3,10",options="header"] |
| | | |========================================================= |
| | | |Software |Version |Source |Notes |
| | | | ansible_agnostic_deployer |
| | | | commit {{ ansible_agnostic_deployer_head.stdout }} |
| | | | https://github.com/sborenst/ansible_agnostic_deployer |
| | | | date of deployment: {{ ansible_date_time.date }} |
| | | |========================================================= |
| | | |
| | | == Installed Software |
| | | |
| | | .Installed Software versions |
| | | [width="100%",cols="1,1,3,10",options="header"] |
| | | |========================================================= |
| | | |Software |Version |Source |Notes |
| | | |
| | | | OpenShift |
| | | | {{ osrelease }} |
| | | | {% if repo_method == 'file' -%} |
| | | {{ own_repo_path }} |
| | | {% elif repo_method == 'satellite' %} |
| | | Satellite |
| | | {% else %} |
| | | Unknown |
| | | {% endif %} |
| | | | OpenShift Container Platform is installed and configured. |
| | | |
| | | | Nexus |
| | | | v2 |
| | | | Container based deployment |
| | | | A Nexus pod is configured and loaded with X repositories |
| | | |
| | | | Zabbix |
| | | | 3.1 |
| | | | admin based repos |
| | | | Zabbix Client is installed, configured and enabled |
| | | |========================================================= |
| | | |
| | | == Architecture |
| | | |
| | | * AWS region: {{ aws_region }} |
| | | |
| | | .Environment Size |
| | | [width="100%",options="header",cols=",,,,a"] |
| | | |========================================================= |
| | | |Host Name | Instance Size | CPU | RAM | Storage |
| | | {% for host in groups[env_all_hosts] -%} |
| | | | {{ hostvars[host].ansible_hostname }} |
| | | | {{ hostvars[host].ec2_instance_type | default(hostvars[host].instance_type) | default('unknown') }} |
| | | | {{ hostvars[host].ansible_processor_cores }} x {{ hostvars[host].ansible_processor[1] }} |
| | | | {{ '%0.2f'| format(hostvars[host].ansible_memtotal_mb|int / 1024) }} GB RAM |
| | | | {% for dev in hostvars[host].ansible_devices %} |
| | | - {{ dev }}: {{ hostvars[host].ansible_devices[dev].size }} |
| | | {% endfor %} |
| | | |
| | | {% endfor %} |
| | | |========================================================= |
| | | |
| | | .Host Networking |
| | | [width="100%",options="header"] |
| | | |========================================================= |
| | | |Internal Name |Public Name | EIP | Access Key |
| | | |
| | | {% for host in groups[env_all_hosts] -%} |
| | | | {{ hostvars[host].internaldns }} |
| | | | {{ hostvars[host].public_dns_name }} |
| | | | {{ hostvars[host].public_ip_address }} |
| | | | {{ hostvars[host].key_name }} |
| | | {% endfor %} |
| | | |========================================================= |
| | | |
| | | .Access Points |
| | | [width="100%",options="header"] |
| | | |========================================================= |
| | | |Access Point | URI | Method | Notes |
| | | | Bastion Shell |
| | | | `ssh ec2-user@bastion.{{ subdomain_base }}` |
| | | | SSH |
| | | | Admin Only |
| | | |
| | | | OpenShift Web Console |
| | | | https://master.{{ subdomain_base }}:{{master_api_port}} |
| | | | Browser |
| | | | Using OPENTLC Credentials |
| | | |
| | | | Cockpit Console |
| | | | https://master.{{ subdomain_base }}:9090 |
| | | | Browser |
| | | | Admin Only |
| | | |
| | | |Nexus Repository |
| | | | http://nexus-opentlc-shared.{{ cloudapps_suffix }} |
| | | | Browser |
| | | | Admin Only |
| | | |========================================================= |
New file |
| | |
| | | apiVersion: v1 |
| | | kind: Template |
| | | metadata: |
| | | creationTimestamp: null |
| | | name: project-request |
| | | objects: |
| | | - apiVersion: "v1" |
| | | kind: "LimitRange" |
| | | metadata: |
| | | name: "${PROJECT_NAME}-core-resource-limits" |
| | | spec: |
| | | limits: |
| | | - type: "Container" |
| | | max: |
| | | memory: 6Gi |
| | | min: |
| | | memory: 10Mi |
| | | default: |
| | | cpu: 500m |
| | | memory: 1.5Gi |
| | | defaultRequest: |
| | | cpu: 50m |
| | | memory: 256Mi |
| | | - type: "Pod" |
| | | max: |
| | | memory: 12Gi |
| | | min: |
| | | memory: 6Mi |
| | | |
| | | |
| | | - apiVersion: v1 |
| | | kind: Project |
| | | metadata: |
| | | annotations: |
| | | openshift.io/description: ${PROJECT_DESCRIPTION} |
| | | openshift.io/display-name: ${PROJECT_DISPLAYNAME} |
| | | openshift.io/requester: ${PROJECT_REQUESTING_USER} |
| | | creationTimestamp: null |
| | | name: ${PROJECT_NAME} |
| | | spec: {} |
| | | status: {} |
| | | - apiVersion: v1 |
| | | groupNames: |
| | | - system:serviceaccounts:${PROJECT_NAME} |
| | | kind: RoleBinding |
| | | metadata: |
| | | creationTimestamp: null |
| | | name: system:image-pullers |
| | | namespace: ${PROJECT_NAME} |
| | | roleRef: |
| | | name: system:image-puller |
| | | subjects: |
| | | - kind: SystemGroup |
| | | name: system:serviceaccounts:${PROJECT_NAME} |
| | | userNames: null |
| | | - apiVersion: v1 |
| | | groupNames: null |
| | | kind: RoleBinding |
| | | metadata: |
| | | creationTimestamp: null |
| | | name: system:image-builders |
| | | namespace: ${PROJECT_NAME} |
| | | roleRef: |
| | | name: system:image-builder |
| | | subjects: |
| | | - kind: ServiceAccount |
| | | name: builder |
| | | userNames: |
| | | - system:serviceaccount:${PROJECT_NAME}:builder |
| | | - apiVersion: v1 |
| | | groupNames: null |
| | | kind: RoleBinding |
| | | metadata: |
| | | creationTimestamp: null |
| | | name: system:deployers |
| | | namespace: ${PROJECT_NAME} |
| | | roleRef: |
| | | name: system:deployer |
| | | subjects: |
| | | - kind: ServiceAccount |
| | | name: deployer |
| | | userNames: |
| | | - system:serviceaccount:${PROJECT_NAME}:deployer |
| | | - apiVersion: v1 |
| | | groupNames: null |
| | | kind: RoleBinding |
| | | metadata: |
| | | creationTimestamp: null |
| | | name: admin |
| | | namespace: ${PROJECT_NAME} |
| | | roleRef: |
| | | name: admin |
| | | subjects: |
| | | - kind: User |
| | | name: ${PROJECT_ADMIN_USER} |
| | | userNames: |
| | | - ${PROJECT_ADMIN_USER} |
| | | parameters: |
| | | - name: PROJECT_NAME |
| | | - name: PROJECT_DISPLAYNAME |
| | | - name: PROJECT_DESCRIPTION |
| | | - name: PROJECT_ADMIN_USER |
| | | - name: PROJECT_REQUESTING_USER |
New file |
| | |
| | | --- |
| | | {% for pv in pv_list %} |
| | | apiVersion: v1 |
| | | kind: PersistentVolume |
| | | metadata: |
| | | name: {{ pv }} |
| | | spec: |
| | | capacity: |
| | | storage: {{pv_size}} |
| | | accessModes: |
| | | - ReadWriteOnce |
| | | nfs: |
| | | path: {{ nfs_export_path }}/{{pv}} |
| | | server: support1.{{chomped_zone_internal_dns}} |
| | | persistentVolumeReclaimPolicy: {{persistentVolumeReclaimPolicy}} |
| | | --- |
| | | {% endfor %} |
New file |
| | |
| | | [rhel-7-server-rpms] |
| | | name=Red Hat Enterprise Linux 7 |
| | | baseurl={{own_repo_path}}/rhel-7-server-rpms |
| | | enabled=1 |
| | | gpgcheck=0 |
| | | |
| | | [rhel-7-server-rh-common-rpms] |
| | | name=Red Hat Enterprise Linux 7 Common |
| | | baseurl={{own_repo_path}}/rhel-7-server-rh-common-rpms |
| | | enabled=1 |
| | | gpgcheck=0 |
| | | |
| | | [rhel-7-server-extras-rpms] |
| | | name=Red Hat Enterprise Linux 7 Extras |
| | | baseurl={{own_repo_path}}/rhel-7-server-extras-rpms |
| | | enabled=1 |
| | | gpgcheck=0 |
| | | |
| | | [rhel-7-server-optional-rpms] |
| | | name=Red Hat Enterprise Linux 7 Optional |
| | | baseurl={{own_repo_path}}/rhel-7-server-optional-rpms |
| | | enabled=1 |
| | | gpgcheck=0 |
| | | |
| | | [rhel-7-server-ose-{{repo_version}}-rpms] |
| | | name=Red Hat Enterprise Linux 7 OSE {{repo_version}} |
| | | baseurl={{own_repo_path}}/rhel-7-server-ose-{{repo_version}}-rpms |
| | | enabled=1 |
| | | gpgcheck=0 |
| | | |
| | | ## Required since OCP 3.5 |
| | | [rhel-7-fast-datapath-rpms] |
| | | name=Red Hat Enterprise Linux Fast Datapath (RHEL 7 Server) (RPMs) |
| | | baseurl={{own_repo_path}}/rhel-7-fast-datapath-rpms |
| | | enabled=1 |
| | | gpgcheck=0 |
| | | |
| | | {% if osrelease | version_compare('3.9', '>=') %} |
| | | ## Required since OCP 3.9 |
| | | [rhel-7-server-ansible-2.4-rpms] |
| | | name=Red Hat Enterprise Linux Ansible (RPMs) |
| | | baseurl={{own_repo_path}}/rhel-7-server-ansible-2.4-rpms |
| | | enabled=1 |
| | | gpgcheck=0 |
| | | {% endif %} |
New file |
| | |
| | | --- |
| | | {% for pv in range(1,user_vols|int) %} |
| | | apiVersion: v1 |
| | | kind: PersistentVolume |
| | | metadata: |
| | | name: vol{{ pv }} |
| | | spec: |
| | | capacity: |
| | | storage: {{ pv_size }} |
| | | accessModes: |
| | | - ReadWriteOnce |
| | | {% if pv % 2 == 0 %} |
| | | - ReadWriteMany |
| | | {% endif %} |
| | | nfs: |
| | | path: {{ nfs_export_path }}/user-vols/vol{{pv}} |
| | | server: support1.{{chomped_zone_internal_dns}} |
| | | persistentVolumeReclaimPolicy: {{ persistentVolumeReclaimPolicy }} |
| | | --- |
| | | {% endfor %} |
New file |
| | |
| | | --- |
| | | mgr_users: |
| | | - name: opentlc-mgr |
| | | home: /home/opentlc-mgr |
| | | authorized_keys: |
| | | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4OojwKH74UWVOY92y87Tb/b56CMJoWbz2gyEYsr3geOc2z/n1pXMwPfiC2KT7rALZFHofc+x6vfUi6px5uTm06jXa78S7UB3MX56U3RUd8XF3svkpDzql1gLRbPIgL1h0C7sWHfr0K2LG479i0nPt/X+tjfsAmT3nWj5PVMqSLFfKrOs6B7dzsqAcQPInYIM+Pqm/pXk+Tjc7cfExur2oMdzx1DnF9mJaj1XTnMsR81h5ciR2ogXUuns0r6+HmsHzdr1I1sDUtd/sEVu3STXUPR8oDbXBsb41O5ek6E9iacBJ327G3/1SWwuLoJsjZM0ize+iq3HpT1NqtOW6YBLR opentlc-mgr@inf00-mwl.opentlc.com |
New file |
| | |
| | | - name: Step 002 Post Infrastructure |
| | | hosts: localhost |
| | | connection: local |
| | | become: false |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tags: |
| | | - step002 |
| | | - post_infrastructure |
| | | tasks: |
| | | |
| | | - name: Job Template to launch a Job Template with update on launch inventory set |
| | | uri: |
| | | url: "https://{{ ansible_tower_ip }}/api/v1/job_templates/{{ job_template_id }}/launch/" |
| | | method: POST |
| | | user: "{{tower_admin}}" |
| | | password: "{{tower_admin_password}}" |
| | | body: |
| | | extra_vars: |
| | | guid: "{{guid}}" |
| | | ipa_host_password: "{{ipa_host_password}}" |
| | | |
| | | body_format: json |
| | | validate_certs: False |
| | | HEADER_Content-Type: "application/json" |
| | | status_code: 200, 201 |
| | | when: tower_run == 'true' |
| | | |
| | | - name: get S3User credentials from stack outputs |
| | | set_fact: |
| | | s3user: "{{ cloudformation_out.stack_outputs.S3User }}" |
| | | s3user_access_key: "{{ cloudformation_out.stack_outputs.S3UserAccessKey }}" |
| | | s3user_secret_access_key: "{{ cloudformation_out.stack_outputs.S3UserSecretAccessKey }}" |
| | | when: |
| | | - cloudformation_out is defined |
| | | - cloudformation_out.stack_outputs.S3UserAccessKey is defined |
| | | - cloudformation_out.stack_outputs.S3UserSecretAccessKey is defined |
| | | tags: |
| | | - provision_cf_template |
| | | |
| | | - name: write down s3user credentials |
| | | copy: |
| | | dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.s3user.credentials" |
| | | content: | |
| | | * S3 Bucket for registry: {{s3user}} |
| | | ** S3User access key: {{s3user_access_key}} |
| | | ** S3User secret key: {{s3user_secret_access_key}} |
| | | when: s3user_access_key is defined |
| | | |
| | | - name: get Route53User credentials from stack outputs |
| | | set_fact: |
| | | route53user: "{{ cloudformation_out.stack_outputs.Route53User }}" |
| | | route53user_access_key: "{{ cloudformation_out.stack_outputs.Route53UserAccessKey }}" |
| | | route53user_secret_access_key: "{{ cloudformation_out.stack_outputs.Route53UserSecretAccessKey }}" |
| | | when: |
| | | - cloudformation_out is defined |
| | | - cloudformation_out.stack_outputs.Route53UserAccessKey is defined |
| | | - cloudformation_out.stack_outputs.Route53UserSecretAccessKey is defined |
| | | tags: |
| | | - provision_cf_template |
| | | |
| | | - name: write down Route53User credentials |
| | | copy: |
| | | dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.route53user.credentials" |
| | | content: | |
| | | * Route53 User for Let's Encrypt: {{ route53user }} |
| | | ** Route53User access key: {{ route53user_access_key }} |
| | | ** Route53User secret key: {{ route53user_secret_access_key }} |
| | | when: route53user_access_key is defined |
| | | |
| | | |
| | | - name: Detect and map data disks (support) for Azure |
| | | hosts: support |
| | | become: true |
| | | gather_facts: false |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tasks: |
| | | - name: test if docker_device file is present (previous run) |
| | | stat: |
| | | path: /var/preserve/docker_device |
| | | register: rfile |
| | | |
| | | - name: Get docker device |
| | | changed_when: false |
| | | vars: |
| | | query: "[?name == 'support']|[].volumes[?purpose=='docker'].lun|[0][0]" |
| | | shell: > |
| | | parted -m /dev/sda print all 2>/dev/null |
| | | | grep unknown |
| | | | grep /dev/sd |
| | | | cut -d':' -f1 |
| | | | sed -n '{{ (instances|json_query(query)|int) + 1}}p' |
| | | register: result |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - instances is defined |
| | | # docker_device will be present on support nodes only when glusterfs is installed |
| | | - install_glusterfs | bool |
| | | - not rfile.stat.exists |
| | | |
| | | - name: set fact for docker_device |
| | | set_fact: |
| | | docker_device: "{{ result.stdout }}" |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - install_glusterfs | bool |
| | | - not rfile.stat.exists |
| | | |
| | | - name: Write down docker_device for idempotency |
| | | copy: |
| | | dest: /var/preserve/docker_device |
| | | content: "{{ docker_device }}" |
| | | force: no |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - install_glusterfs | bool |
| | | - not rfile.stat.exists |
| | | |
| | | # idempotency |
| | | |
| | | - name: get device_name from file (from previous run) |
| | | slurp: |
| | | src: /var/preserve/docker_device |
| | | register: slurp_result |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - install_glusterfs | bool |
| | | - rfile.stat.exists |
| | | |
| | | - name: set fact for docker_device (from previous run) |
| | | set_fact: |
| | | docker_device: "{{ slurp_result.content|b64decode }}" |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - install_glusterfs | bool |
| | | - rfile.stat.exists |
| | | |
| | | # glusterfs device |
| | | |
| | | - name: test if glusterfs_device_name file is present (previous run) |
| | | stat: |
| | | path: /var/preserve/glusterfs_device_name |
| | | register: rfile |
| | | |
| | | - name: Get glusterfs device |
| | | changed_when: false |
| | | vars: |
| | | query: "[?name == 'support']|[].volumes[?purpose=='glusterfs'].lun|[0][0]" |
| | | shell: > |
| | | parted -m /dev/sda print all 2>/dev/null |
| | | | grep unknown |
| | | | grep /dev/sd |
| | | | cut -d':' -f1 |
| | | | sed -n '{{ (instances|json_query(query)|int) + 1}}p' |
| | | register: result |
| | | when: |
| | | - install_glusterfs | bool |
| | | - cloud_provider == 'azure' |
| | | - instances is defined |
| | | - not rfile.stat.exists |
| | | |
| | | - name: set fact for glusterfs_device_name |
| | | set_fact: |
| | | glusterfs_device_name: "{{ result.stdout }}" |
| | | when: |
| | | - install_glusterfs | bool |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - not rfile.stat.exists |
| | | |
| | | - name: Write down glusterfs_device_name for idempotency |
| | | copy: |
| | | dest: /var/preserve/glusterfs_device_name |
| | | content: "{{ glusterfs_device_name }}" |
| | | force: no |
| | | when: |
| | | - install_glusterfs | bool |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - not rfile.stat.exists |
| | | |
| | | # idempotency |
| | | |
| | | - name: get device_name from file (from previous run) |
| | | slurp: |
| | | src: /var/preserve/glusterfs_device_name |
| | | register: slurp_result |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - install_glusterfs | bool |
| | | - rfile.stat.exists |
| | | |
| | | - name: set fact for glusterfs_device_name (from previous run) |
| | | set_fact: |
| | | glusterfs_device_name: "{{ slurp_result.content|b64decode }}" |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - install_glusterfs | bool |
| | | - rfile.stat.exists |
| | | |
| | | # nfs_pvs |
| | | |
| | | - name: test if nfs_pvs file is present (previous run) |
| | | stat: |
| | | path: /var/preserve/nfs_pvs |
| | | register: rfile |
| | | |
| | | - name: Get NFS device |
| | | changed_when: false |
| | | vars: |
| | | query: "[?name == 'support']|[].volumes[?purpose=='nfs'].lun|[0][0]" |
| | | shell: > |
| | | parted -m /dev/sda print all 2>/dev/null |
| | | | grep unknown |
| | | | grep /dev/sd |
| | | | cut -d':' -f1 |
| | | | sed -n '{{ (instances|json_query(query)|int) + 1}}p' |
| | | register: result |
| | | when: |
| | | - install_nfs | bool |
| | | - cloud_provider == 'azure' |
| | | - instances is defined |
| | | - not rfile.stat.exists |
| | | |
| | | - name: set fact for nfs_pvs |
| | | set_fact: |
| | | nfs_pvs: "{{ result.stdout }}" |
| | | when: |
| | | - install_nfs | bool |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - not rfile.stat.exists |
| | | |
| | | - name: Write down nfs_pvs for idempotency |
| | | copy: |
| | | dest: /var/preserve/nfs_pvs |
| | | content: "{{ nfs_pvs }}" |
| | | force: no |
| | | when: |
| | | - install_nfs | bool |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - not rfile.stat.exists |
| | | |
| | | # idempotency |
| | | |
| | | - name: get nfs_pvs from file (from previous run) |
| | | slurp: |
| | | src: /var/preserve/nfs_pvs |
| | | register: slurp_result |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - install_nfs | bool |
| | | - rfile.stat.exists |
| | | |
| | | - name: set fact for nfs_pvs (from previous run) |
| | | set_fact: |
| | | nfs_pvs: "{{ slurp_result.content|b64decode }}" |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - install_nfs | bool |
| | | - rfile.stat.exists |
| | | |
| | | - name: Detect and map data disks (nodes) for Azure |
| | | hosts: nodes |
| | | become: true |
| | | gather_facts: false |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tasks: |
| | | - name: test if docker_device file is present (previous run) |
| | | stat: |
| | | path: /var/preserve/docker_device |
| | | register: rfile |
| | | |
| | | - name: Get docker device |
| | | changed_when: false |
| | | vars: |
| | | query: "[?name == 'node']|[].volumes[?purpose=='docker'].lun|[0][0]" |
| | | shell: > |
| | | parted -m /dev/sda print all 2>/dev/null |
| | | | grep unknown |
| | | | grep /dev/sd |
| | | | cut -d':' -f1 |
| | | | sed -n '{{ (instances|json_query(query)|int) + 1}}p' |
| | | register: result |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - instances is defined |
| | | - not rfile.stat.exists |
| | | |
| | | - name: set fact for docker_device |
| | | set_fact: |
| | | docker_device: "{{ result.stdout }}" |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - not rfile.stat.exists |
| | | |
| | | - name: Write down docker_device for idempotency |
| | | copy: |
| | | dest: /var/preserve/docker_device |
| | | content: "{{ docker_device }}" |
| | | force: no |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - not rfile.stat.exists |
| | | |
| | | # idempotency |
| | | |
| | | - name: get device_name from file (from previous run) |
| | | slurp: |
| | | src: /var/preserve/docker_device |
| | | register: slurp_result |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - rfile.stat.exists |
| | | |
| | | - name: set fact for docker_device (from previous run) |
| | | set_fact: |
| | | docker_device: "{{ slurp_result.content|b64decode }}" |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - rfile.stat.exists |
| | | |
| | | - name: Detect and map data disks (infranodes) for Azure |
| | | hosts: infranodes |
| | | become: true |
| | | gather_facts: false |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tasks: |
| | | - name: test if docker_device file is present (previous run) |
| | | stat: |
| | | path: /var/preserve/docker_device |
| | | register: rfile |
| | | |
| | | - name: Get docker device |
| | | changed_when: false |
| | | vars: |
| | | query: "[?name == 'infranode']|[].volumes[?purpose=='docker'].lun|[0][0]" |
| | | shell: > |
| | | parted -m /dev/sda print all 2>/dev/null |
| | | | grep unknown |
| | | | grep /dev/sd |
| | | | cut -d':' -f1 |
| | | | sed -n '{{ (instances|json_query(query)|int) + 1}}p' |
| | | register: result |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - instances is defined |
| | | - not rfile.stat.exists |
| | | |
| | | - name: set fact for docker_device |
| | | set_fact: |
| | | docker_device: "{{ result.stdout }}" |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - not rfile.stat.exists |
| | | |
| | | - name: Write down docker_device for idempotency |
| | | copy: |
| | | dest: /var/preserve/docker_device |
| | | content: "{{ docker_device }}" |
| | | force: no |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - not rfile.stat.exists |
| | | |
| | | # idempotency |
| | | |
| | | - name: get device_name from file (from previous run) |
| | | slurp: |
| | | src: /var/preserve/docker_device |
| | | register: slurp_result |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - rfile.stat.exists |
| | | |
| | | - name: set fact for docker_device (from previous run) |
| | | set_fact: |
| | | docker_device: "{{ slurp_result.content|b64decode }}" |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - rfile.stat.exists |
| | | |
| | | - name: Map data disks (masters) for Azure |
| | | hosts: masters |
| | | become: true |
| | | gather_facts: false |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tasks: |
| | | - name: test if docker_device file is present (previous run) |
| | | stat: |
| | | path: /var/preserve/docker_device |
| | | register: rfile |
| | | |
| | | - name: Get docker device |
| | | changed_when: false |
| | | vars: |
| | | query: "[?name == 'master']|[].volumes[?purpose=='docker'].lun|[0][0]" |
| | | shell: > |
| | | parted -m /dev/sda print all 2>/dev/null |
| | | | grep unknown |
| | | | grep /dev/sd |
| | | | cut -d':' -f1 |
| | | | sed -n '{{ (instances|json_query(query)|int) + 1}}p' |
| | | register: result |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - instances is defined |
| | | - not rfile.stat.exists |
| | | |
| | | - name: set fact for docker_device |
| | | set_fact: |
| | | docker_device: "{{ result.stdout }}" |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - not rfile.stat.exists |
| | | |
| | | - name: Write down docker_device for idempotency |
| | | copy: |
| | | dest: /var/preserve/docker_device |
| | | content: "{{ docker_device }}" |
| | | force: no |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - not rfile.stat.exists |
| | | |
| | | # idempotency |
| | | |
| | | - name: get device_name from file (from previous run) |
| | | slurp: |
| | | src: /var/preserve/docker_device |
| | | register: slurp_result |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - rfile.stat.exists |
| | | |
| | | - name: set fact for docker_device (from previous run) |
| | | set_fact: |
| | | docker_device: "{{ slurp_result.content|b64decode }}" |
| | | when: |
| | | - cloud_provider == 'azure' |
| | | - result | succeeded |
| | | - rfile.stat.exists |
New file |
| | |
| | | --- |
| | | - name: Step 00xxxxx post software |
| | | hosts: support |
| | | gather_facts: False |
| | | become: yes |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | tasks: |
| | | - name: Create user vols |
| | | shell: "mkdir -p /srv/nfs/user-vols/vol{1..{{user_vols}}}" |
| | | - name: chmod the user vols |
| | | shell: "chmod -R 777 /srv/nfs/user-vols" |
| | | |
| | | - name: Step 00xxxxx post software |
| | | hosts: bastions |
| | | run_once: true |
| | | gather_facts: False |
| | | become: yes |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | roles: |
| | | - role: "{{ ANSIBLE_REPO_PATH }}/roles/openshift-ansible-broker" |
| | | tasks: |
| | | - name: get nfs Hostname |
| | | set_fact: |
| | | nfs_host: "{{ groups['support']|sort|first }}" |
| | | |
| | | - set_fact: |
| | | pv_size: '10Gi' |
| | | pv_list: "{{ ocp_pvs }}" |
| | | persistentVolumeReclaimPolicy: Retain |
| | | |
| | | - name: Generate PV file |
| | | template: |
| | | src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/pvs.j2" |
| | | dest: "/root/pvs-{{ env_type }}-{{ guid }}.yml" |
| | | tags: [ gen_pv_file ] |
| | | when: pv_list.0 is defined |
| | | |
| | | - set_fact: |
| | | pv_size: "{{user_vols_size}}" |
| | | persistentVolumeReclaimPolicy: Recycle |
| | | |
| | | notify: restart nfs services |
| | | run_once: True |
| | | |
| | | - name: Generate user vol PV file |
| | | template: |
| | | src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/userpvs.j2" |
| | | dest: "/root/userpvs-{{ env_type }}-{{ guid }}.yml" |
| | | tags: |
| | | - gen_user_vol_pv |
| | | |
| | | - shell: 'oc create -f /root/pvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/pvs-{{ env_type }}-{{ guid }}.yml' |
| | | tags: |
| | | - create_user_pv |
| | | when: pv_list.0 is defined |
| | | |
| | | - shell: 'oc create -f /root/userpvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/userpvs-{{ env_type }}-{{ guid }}.yml' |
| | | tags: |
| | | - create_user_pv |
| | | |
| | | - name: For CNS change default storage class to glusterfs-storage |
| | | hosts: masters |
| | | run_once: true |
| | | become: yes |
| | | gather_facts: False |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | tags: |
| | | - env-specific |
| | | - env-specific_infra |
| | | - storage-class |
| | | tasks: |
| | | - when: |
| | | - osrelease is version_compare('3.9.27', '>=') |
| | | - install_glusterfs|bool |
| | | block: |
| | | - name: Set glusterfs-storage class to default |
| | | command: > |
| | | oc patch storageclass glusterfs-storage |
| | | -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "true"}}}' |
| | | register: changesc_r |
| | | failed_when: |
| | | - changesc_r.stdout.find('storageclass "glusterfs-storage" not patched') == -1 |
| | | - changesc_r.rc != 0 |
| | | changed_when: changesc_r.stdout.find('storageclass "glusterfs-storage" patched') != -1 |
| | | |
| | | - name: Remove default from glusterfs-storage-block class |
| | | register: changesc_r |
| | | changed_when: changesc_r.stdout.find('storageclass "glusterfs-storage-block" patched') != -1 |
| | | failed_when: |
| | | - changesc_r.stdout.find('storageclass "glusterfs-storage-block" not patched') == -1 |
| | | - changesc_r.rc != 0 |
| | | command: > |
| | | oc patch storageclass glusterfs-storage-block |
| | | -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "false"}}}' |
| | | |
| | | - name: Configure Bastion for CF integration |
| | | hosts: bastions |
| | | become: yes |
| | | gather_facts: False |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/mgr_users.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tags: |
| | | - env-specific |
| | | - cf_integration |
| | | - opentlc_integration |
| | | roles: |
| | | - role: "{{ ANSIBLE_REPO_PATH }}/roles/opentlc-integration" |
| | | when: install_opentlc_integration |
| | | no_log: yes |
| | | tasks: |
| | | - name: Copy /root/.kube to ~opentlc-mgr/ |
| | | command: "cp -rf /root/.kube /home/opentlc-mgr/" |
| | | when: install_opentlc_integration == true |
| | | |
| | | - name: set permission for .kube |
| | | when: install_opentlc_integration == true |
| | | file: |
| | | path: /home/opentlc-mgr/.kube |
| | | owner: opentlc-mgr |
| | | group: opentlc-mgr |
| | | recurse: yes |
| | | |
| | | - name: env-specific infrastructure |
| | | hosts: masters |
| | | run_once: true |
| | | become: yes |
| | | gather_facts: False |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | tags: |
| | | - env-specific |
| | | - env-specific_infra |
| | | tasks: |
| | | - name: Command to enable the wildcard routes in the OCP cluster for 3scale |
| | | shell: "oc set env dc/router ROUTER_ALLOW_WILDCARD_ROUTES=true -n default" |
| | | |
| | | - name: Give administrative user cluster-admin privileges |
| | | command: "oc adm policy add-cluster-role-to-user cluster-admin {{ admin_user }}" |
| | | |
| | | - name: Check for admin_project project |
| | | command: "oc get project {{admin_project}}" |
| | | register: result |
| | | changed_when: false |
| | | ignore_errors: true |
| | | |
| | | - name: Create admin_project project |
| | | command: "oc adm new-project {{admin_project}} --admin {{admin_user}} --node-selector='env=infra'" |
| | | when: result | failed |
| | | |
| | | - name: Make admin_project project network global |
| | | command: "oc adm pod-network make-projects-global {{admin_project}}" |
| | | when: 'ovs_plugin == "multitenant"' |
| | | |
| | | - name: Set admin_project SCC for anyuid |
| | | command: "oc adm policy add-scc-to-group anyuid system:serviceaccounts:{{admin_project}}" |
| | | |
| | | - name: Add capabilities within anyuid which is not really ideal |
| | | command: "oc patch scc/anyuid --patch '{\"requiredDropCapabilities\":[\"MKNOD\",\"SYS_CHROOT\"]}'" |
| | | ignore_errors: true |
| | | |
| | | - name: Set Node Selector to empty for project openshift-template-service-broker |
| | | shell: oc annotate namespace openshift-template-service-broker openshift.io/node-selector="" --overwrite |
| | | ignore_errors: true |
| | | when: |
| | | - osrelease is version_compare('3.7', '>=') |
| | | |
| | | - name: Remove all users from self-provisioners group |
| | | hosts: masters |
| | | run_once: true |
| | | become: yes |
| | | gather_facts: False |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | tags: [ env-specific, remove_self_provisioners ] |
| | | tasks: |
| | | - when: remove_self_provisioners|bool |
| | | block: |
| | | - name: Set clusterRoleBinding auto-update to false |
| | | command: oc annotate -n default --overwrite clusterrolebinding.rbac self-provisioners rbac.authorization.kubernetes.io/autoupdate=false |
| | | |
| | | - name: Remove system:authenticated from self-provisioner role |
| | | command: "oadm policy remove-cluster-role-from-group self-provisioner system:authenticated system:authenticated:oauth" |
| | | ignore_errors: true |
| | | |
| | | - name: create our own OPENTLC-PROJECT-PROVISIONERS |
| | | command: "oadm groups new OPENTLC-PROJECT-PROVISIONERS" |
| | | ignore_errors: true |
| | | |
| | | - name: allow OPENTLC-PROJECT-PROVISIONERS members to provision their own projects |
| | | command: "oadm policy add-cluster-role-to-group self-provisioner OPENTLC-PROJECT-PROVISIONERS" |
| | | |
| | | - name: Project Request Template |
| | | hosts: masters |
| | | gather_facts: False |
| | | become: yes |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | tags: |
| | | - env-specific |
| | | - project_request |
| | | tasks: |
| | | - name: Copy project request template to master |
| | | copy: |
| | | src: ./files/project-template.yml |
| | | dest: /root/project-template.yml |
| | | |
| | | - name: Check for project request template |
| | | command: "oc get template project-request -n default" |
| | | register: request_template |
| | | ignore_errors: true |
| | | |
| | | - name: Create project request template in default project |
| | | shell: "oc create -f /root/project-template.yml -n default || oc replace -f /root/project-template.yml -n default" |
| | | when: request_template | failed |
| | | |
| | | - name: Update master config file to use project request template |
| | | lineinfile: |
| | | regexp: " projectRequestTemplate" |
| | | dest: "/etc/origin/master/master-config.yaml" |
| | | line: ' projectRequestTemplate: "default/project-request"' |
| | | state: present |
| | | register: master_config |
| | | |
| | | - name: Add Project request message |
| | | replace: |
| | | dest: '/etc/origin/master/master-config.yaml' |
| | | regexp: 'projectRequestMessage.*' |
| | | replace: "projectRequestMessage: '{{project_request_message}}'" |
| | | backup: yes |
| | | |
| | | - name: Restart master service |
| | | service: |
| | | name: atomic-openshift-master-api |
| | | state: restarted |
| | | when: |
| | | - master_config.changed |
| | | - osrelease | version_compare('3.7', '>=') |
| | | |
| | | - name: Restart master service |
| | | service: |
| | | name: atomic-openshift-master |
| | | state: restarted |
| | | when: |
| | | - master_config.changed |
| | | - osrelease | version_compare('3.7', '<') |
| | | |
| | | - name: node admin configs |
| | | hosts: nodes |
| | | gather_facts: False |
| | | become: yes |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | tags: |
| | | - env-specific |
| | | - env_specific_images |
| | | tasks: |
| | | - name: 'Pull image' |
| | | command: "docker pull {{ item }}" |
| | | with_items: '{{ env_specific_images }}' |
| | | when: env_specific_images.0 is defined |
| | | |
| | | - name: Import jenkins images for OCP 3.7 and newer |
| | | hosts: masters |
| | | run_once: true |
| | | become: yes |
| | | gather_facts: False |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | tags: |
| | | - env-specific |
| | | - env_specific_images |
| | | tasks: |
| | | - name: tag jenkins |
| | | command: oc tag --source=docker registry.access.redhat.com/openshift3/jenkins-2-rhel7:v{{ repo_version }} openshift/jenkins:v{{ repo_version }} -n openshift |
| | | when: osrelease | version_compare('3.7', '>=') |
| | | ignore_errors: true |
| | | |
| | | - name: tag jenkins |
| | | command: oc tag openshift/jenkins:v{{ repo_version }} openshift/jenkins:latest -n openshift |
| | | register: octag_result |
| | | when: osrelease | version_compare('3.7', '>=') |
| | | retries: 5 |
| | | delay: 2 |
| | | until: octag_result|succeeded |
| | | ignore_errors: true |
| | | |
| | | - name: Fix NFS PV Recycling for OCP 3.7 and newer |
| | | gather_facts: False |
| | | become: yes |
| | | hosts: |
| | | - nodes |
| | | - infranodes |
| | | - masters |
| | | tasks: |
| | | - name: Pull ose-recycler Image |
| | | command: docker pull registry.access.redhat.com/openshift3/ose-recycler:latest |
| | | register: pullr |
| | | retries: 5 |
| | | delay: 10 |
| | | until: pullr|succeeded |
| | | when: osrelease | version_compare('3.7', '>=') |
| | | |
| | | - name: Tag ose-recycler Image |
| | | command: > |
| | | docker tag registry.access.redhat.com/openshift3/ose-recycler:latest |
| | | registry.access.redhat.com/openshift3/ose-recycler:v{{ osrelease }} |
| | | when: osrelease | version_compare('3.7', '>=') |
| | | |
| | | - name: Fix CRI-O Garbage Collection DaemonSet for OCP 3.9 and newer |
| | | gather_facts: False |
| | | become: yes |
| | | hosts: masters |
| | | run_once: true |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | tasks: |
| | | - name: Patch dockergc DaemonSet |
| | | shell: "oc patch daemonset dockergc --patch='\"spec\": { \"template\": { \"spec\": { \"containers\": [ { \"command\": [ \"/usr/bin/oc\" ], \"name\": \"dockergc\" } ] } } }' -n default" |
| | | ignore_errors: true |
| | | when: |
| | | - osrelease | version_compare('3.9.0', '>=') |
| | | - osrelease | version_compare('3.9.25', '<=') |
| | | - container_runtime == "cri-o" |
| | | - name: Redeploy dockergc DaemonSet pods |
| | | shell: "oc delete pod $(oc get pods -n default|grep dockergc|awk -c '{print $1}') -n default" |
| | | when: |
| | | - osrelease | version_compare('3.9.0', '>=') |
| | | - osrelease | version_compare('3.9.25', '<=') |
| | | - container_runtime == "cri-o" |
| | | |
| | | # Install OpenWhisk |
| | | - name: Install OpenWhisk |
| | | hosts: masters |
| | | run_once: true |
| | | gather_facts: False |
| | | become: yes |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | tags: |
| | | - env-specific |
| | | - install_openwhisk |
| | | tasks: |
| | | - import_role: |
| | | name: "{{ ANSIBLE_REPO_PATH }}/roles/install-openwhisk" |
| | | when: |
| | | - install_openwhisk|bool |
| | | |
| | | # Set up Prometheus/Node Exporter/Alertmanager/Grafana |
| | | # on the OpenShift Cluster |
| | | - name: Install Prometheus and Grafana |
| | | gather_facts: False |
| | | become: yes |
| | | hosts: |
| | | - nodes |
| | | - infranodes |
| | | - masters |
| | | - bastions |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tags: |
| | | - install_prometheus |
| | | tasks: |
| | | - import_role: |
| | | name: "{{ ANSIBLE_REPO_PATH }}/roles/install-prometheus" |
| | | when: install_prometheus|bool |
| | | |
| | | - name: Install Nexus |
| | | hosts: masters |
| | | run_once: true |
| | | gather_facts: False |
| | | become: yes |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | roles: |
| | | - { role: "{{ ANSIBLE_REPO_PATH }}/roles/install-nexus", desired_project: "{{admin_project}}", nexus_version: "3" } |
| | | tags: |
| | | - env-specific |
| | | - install_nexus |
| | | |
| | | - name: Install AWS Broker |
| | | hosts: masters |
| | | run_once: true |
| | | gather_facts: False |
| | | become: yes |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | tags: |
| | | - env-specific |
| | | - install_aws_broker |
| | | tasks: |
| | | - import_role: |
| | | name: "{{ ANSIBLE_REPO_PATH }}/roles/install-aws-broker" |
| | | when: |
| | | - install_aws_broker|bool |
| | | |
| | | - name: Zabbix for masters |
| | | hosts: masters |
| | | gather_facts: true |
| | | become: yes |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | vars: |
| | | zabbix_auto_registration_keyword: OCP Master |
| | | roles: |
| | | - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client" |
| | | when: install_zabbix |
| | | - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client-openshift-master" |
| | | when: install_zabbix |
| | | - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client-openshift-node" |
| | | when: install_zabbix |
| | | tags: |
| | | - env-specific |
| | | - install_zabbix |
| | | |
| | | - name: Zabbix for nodes |
| | | hosts: |
| | | - nodes |
| | | - infranodes |
| | | gather_facts: true |
| | | become: yes |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | vars: |
| | | zabbix_auto_registration_keyword: OCP Node |
| | | zabbix_token: "{{ hostvars[groups['masters'][0]].zabbix_token }}" |
| | | hawkular_route: "{{ hostvars[groups['masters'][0]].hawkular_route }}" |
| | | roles: |
| | | - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client" |
| | | when: install_zabbix |
| | | - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client-openshift-node" |
| | | when: install_zabbix |
| | | tags: |
| | | - env-specific |
| | | - install_zabbix |
| | | |
| | | - name: Zabbix for all other hosts (bastion, support, ...) |
| | | hosts: |
| | | - bastions |
| | | - support |
| | | gather_facts: true |
| | | become: yes |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | vars: |
| | | zabbix_auto_registration_keyword: OCP Host |
| | | roles: |
| | | - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client" |
| | | when: install_zabbix |
| | | tags: |
| | | - env-specific |
| | | - install_zabbix |
| | | |
| | | - name: Run diagnostics from master |
| | | hosts: masters |
| | | become: yes |
| | | gather_facts: False |
| | | run_once: yes |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tasks: |
| | | # start supporting this only for OCP >= 3.9 |
| | | - when: |
| | | - osrelease is version_compare('3.9', '>=') |
| | | - run_ocp_diagnostics|d(true)| bool |
| | | block: |
| | | # this command should return 0 (no error) |
| | | - name: Run oc adm diagnostics |
| | | shell: oc adm diagnostics > /tmp/diagnostics.log |
| | | register: r_diag |
| | | retries: 2 |
| | | until: r_diag is succeeded |
| | | ignore_errors: true |
| | | |
| | | - name: Ensure /tmp/openshift exist |
| | | file: |
| | | path: /tmp/openshift |
| | | state: directory |
| | | |
| | | # oc adm diagnostics logs everything in /tmp/openshift |
| | | - name: Create an archive of diagnostics output logs |
| | | archive: |
| | | path: |
| | | - /tmp/openshift |
| | | - /tmp/diagnostics.log |
| | | dest: /tmp/diagnostics.tar.gz |
| | | |
| | | - name: Fetch the diagnostic archive and logs |
| | | fetch: |
| | | src: /tmp/diagnostics.tar.gz |
| | | dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{project_tag}}_diagnostics.tar.gz" |
| | | flat: true |
| | | |
| | | - name: Report diagnostics failure |
| | | fail: |
| | | msg: "FAIL {{ project_tag }} Diagnostics" |
| | | when: r_diag is failed |
| | | |
| | | - name: Configure IPA on bastion |
| | | hosts: bastions |
| | | become: yes |
| | | gather_facts: False |
| | | run_once: yes |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tasks: |
| | | - import_role: |
| | | name: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa" |
| | | when: install_ipa_client|bool |
| | | |
| | | - name: PostSoftware flight-check |
| | | hosts: localhost |
| | | connection: local |
| | | gather_facts: false |
| | | become: false |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tags: |
| | | - post_flight_check |
| | | tasks: |
| | | - debug: |
| | | msg: "Post-Software checks completed successfully" |
| | | |
| | | - name: Gather facts |
| | | hosts: |
| | | - all |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | gather_facts: true |
| | | tags: |
| | | - ocp_report |
| | | |
| | | - name: Generate reports |
| | | hosts: localhost |
| | | connection: local |
| | | become: false |
| | | |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tags: |
| | | - ocp_report |
| | | vars: |
| | | env_all_hosts: all |
| | | tasks: |
| | | - name: get repo version used to deploy |
| | | command: git rev-parse HEAD |
| | | args: |
| | | chdir: "{{ ANSIBLE_REPO_PATH }}" |
| | | register: ansible_agnostic_deployer_head |
| | | |
| | | - name: Gather ec2 facts |
| | | ec2_remote_facts: |
| | | aws_access_key: "{{ aws_access_key_id }}" |
| | | aws_secret_key: "{{ aws_secret_access_key }}" |
| | | region: "{{ aws_region }}" |
| | | when: |
| | | - ocp_report |
| | | - cloud_provider == 'ec2' |
| | | - name: Generate report |
| | | template: |
| | | src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/ocp_report.adoc.j2" |
| | | dest: "{{ ANSIBLE_REPO_PATH }}/workdir/ocp_report_{{ env_type }}-{{ guid }}.adoc" |
| | | when: |
| | | - ocp_report |
| | | - cloud_provider == 'ec2' |
New file |
| | |
| | | - name: Step 000 Pre Infrastructure |
| | | hosts: localhost |
| | | connection: local |
| | | become: false |
| | | vars_files: |
| | | - "./env_vars.yml" |
| | | - "./env_secret_vars.yml" |
| | | tags: |
| | | - step001 |
| | | - pre_infrastructure |
| | | tasks: |
| | | - debug: |
| | | msg: "Step 000 Pre Infrastructure - Dummy action" |
New file |
| | |
| | | --- |
| | | - name: Step 003 - Create env key |
| | | hosts: localhost |
| | | connection: local |
| | | gather_facts: false |
| | | become: false |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tags: |
| | | - step003 |
| | | - generate_env_keys |
| | | tasks: |
| | | - name: Generate SSH keys |
| | | shell: ssh-keygen -b 2048 -t rsa -f "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}" -q -N "" |
| | | args: |
| | | creates: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}" |
| | | when: set_env_authorized_key |
| | | |
| | | - name: fix permission |
| | | file: |
| | | path: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}" |
| | | mode: 0400 |
| | | when: set_env_authorized_key |
| | | |
| | | - name: Generate SSH pub key |
| | | shell: ssh-keygen -y -f "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}" > "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}.pub" |
| | | args: |
| | | creates: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}.pub" |
| | | when: set_env_authorized_key |
| | | |
| | | # Cloudformation template or equivalent should tag all hosts with Project:{{ env_type }}-{{ guid }} |
| | | |
| | | - name: Configure all hosts with Repositories, Common Files and Set environment key |
| | | hosts: |
| | | - all:!windows |
| | | become: true |
| | | gather_facts: False |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tags: |
| | | - step004 |
| | | - common_tasks |
| | | pre_tasks: |
| | | - name: add rhel-7-server-ansible-2.4-rpms repo for OCP 3.9 |
| | | set_fact: |
| | | rhel_repos: "{{ rhel_repos + ['rhel-7-server-ansible-2.4-rpms'] }}" |
| | | when: osrelease | version_compare('3.9', '>=') |
| | | roles: |
| | | - role: "{{ ANSIBLE_REPO_PATH }}/roles/set-repositories" |
| | | when: repo_method is defined |
| | | - role: "{{ ANSIBLE_REPO_PATH }}/roles/common" |
| | | when: install_common|bool |
| | | - role: "{{ ANSIBLE_REPO_PATH }}/roles/set_env_authorized_key" |
| | | when: set_env_authorized_key|bool |
| | | |
| | | - name: Install Let's Encrypt Wildcard Certificates |
| | | hosts: bastions |
| | | run_once: true |
| | | become: true |
| | | gather_facts: false |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tasks: |
| | | - import_role: |
| | | name: "{{ ANSIBLE_REPO_PATH }}/roles/install-lets-encrypt-certs" |
| | | tags: lets_encrypt |
| | | when: install_lets_encrypt_certificates|bool |
| | | |
| | | - name: Configuring Bastion Hosts |
| | | hosts: bastions |
| | | become: true |
| | | gather_facts: false |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | roles: |
| | | - { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion", when: 'install_bastion' } |
| | | tags: |
| | | - step004 |
| | | - bastion_tasks |
| | | |
| | | - name: PreSoftware flight-check |
| | | hosts: localhost |
| | | connection: local |
| | | gather_facts: false |
| | | become: false |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tags: |
| | | - flight_check |
| | | tasks: |
| | | - debug: |
| | | msg: "Pre-Software checks completed successfully" |
| | | |
| | | - name: Copy lets encrypt certificates |
| | | hosts: masters |
| | | gather_facts: False |
| | | become: yes |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | tags: |
| | | - env-specific |
| | | - project_request |
| | | tasks: |
| | | # https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem.txt |
| | | - name: Copy over the letsencrypt certificate |
| | | copy: |
| | | src: ./files/lets-encrypt-x3-cross-signed.pem.txt |
| | | dest: /etc/origin/master/ |
New file |
| | |
| | | --- |
| | | # Run the Infra steps (step000, step001,step002) |
| | | |
| | | ################################################################################ |
| | | ################################################################################ |
| | | ############ Step 000 Pre Infrastructure Deploy Tasks |
| | | ################################################################################ |
| | | ################################################################################ |
| | | |
| | | - include: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/pre_infra.yml" |
| | | tags: |
| | | - step000 |
| | | - pre_infra_tasks |
| | | |
| | | ################################################################################# |
| | | ################################################################################# |
| | | ############# Step 001 Deploy Infrastructure |
| | | ################################################################################# |
| | | ################################################################################# |
| | | |
| | | - include: "{{ ANSIBLE_REPO_PATH }}/cloud_providers/{{ cloud_provider }}_infrastructure_deployment.yml" |
| | | tags: |
| | | - step001 |
| | | - deploy_infrastructure |
| | | |
| | | # Before continuing this playbook, assert that there is at least one new node |
| | | - hosts: localhost |
| | | connection: local |
| | | gather_facts: false |
| | | become: false |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | tasks: |
| | | - name: Assert new_node_instance_count and node_instance_count are setup properly. |
| | | tags: assert_new_node |
| | | assert: |
| | | that: |
| | | - (groups['nodes'] | length) == (node_instance_count|int) |
| | | - groups['newnodes'] | length > 0 |
| | | |
| | | - name: Ensure one bastion is present |
| | | assert: |
| | | that: groups['bastions'] | length > 0 |
| | | |
| | | ################################################################################# |
| | | ################################################################################# |
| | | ############# Step 002 Post Infrastructure Deploy Tasks |
| | | ################################################################################# |
| | | ################################################################################# |
| | | |
| | | - include: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/post_infra.yml" |
| | | tags: |
| | | - step002 |
| | | - post_infra_tasks |
| | | |
| | | ################################################################################# |
| | | ################################################################################# |
| | | ############## Step 003 Pre Software Deploy Tasks |
| | | ################################################################################# |
| | | ################################################################################# |
| | | # Run pre-software steps on new nodes, or all of them if we can't do just the new nodes ( pre_software playbook) |
| | | |
| | | |
| | | - name: Configure all hosts with Repositories, Common Files and Set environment key |
| | | hosts: |
| | | - "newnodes" |
| | | become: true |
| | | gather_facts: False |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tags: |
| | | - step004 |
| | | - common_tasks |
| | | roles: |
| | | - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set-repositories", when: 'repo_method is defined' } |
| | | - { role: "{{ ANSIBLE_REPO_PATH }}/roles/common", when: 'install_common' } |
| | | - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set_env_authorized_key", when: 'set_env_authorized_key' } |
| | | |
| | | |
| | | - name: PreSoftware flight-check |
| | | hosts: localhost |
| | | connection: local |
| | | gather_facts: false |
| | | become: false |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tags: |
| | | - flight_check |
| | | tasks: |
| | | - debug: |
| | | msg: "Pre-Software checks completed successfully" |
| | | |
| | | # Run openshift-node role on the new hosts |
| | | |
| | | - name: Configuring openshift-nodes |
| | | gather_facts: False |
| | | become: yes |
| | | |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | hosts: |
| | | - "newnodes" |
| | | roles: |
| | | - "{{ ANSIBLE_REPO_PATH }}/roles/openshift-node" |
| | | tags: |
| | | - openshift_node_tasks |
| | | |
| | | - name: Step 00xxxxx bastion preparation for OpenShift deployment |
| | | hosts: bastions |
| | | become: true |
| | | gather_facts: False |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tags: |
| | | - openshift_host_templates |
| | | tasks: |
| | | - name: generate ansible hosts file, keep it under workdir |
| | | template: |
| | | src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/hosts_template.j2" |
| | | dest: "{{ ANSIBLE_REPO_PATH }}/workdir/scaleup_hosts-{{ env_type }}-{{ guid }}" |
| | | delegate_to: localhost |
| | | become: false |
| | | tags: |
| | | - generate_ansible_hosts_file |
| | | - name: Copy over ansible hosts file |
| | | copy: |
| | | src: "{{ ANSIBLE_REPO_PATH }}/workdir/scaleup_hosts-{{ env_type }}-{{ guid }}" |
| | | dest: /etc/ansible/scaleup_hosts |
| | | |
| | | ## Run Scaleup playbook |
| | | |
| | | - name: Run OpenShift Scaleup playbook on first bastion |
| | | gather_facts: False |
| | | become: yes |
| | | hosts: bastions[0] |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | tags: |
| | | - installing_openshift |
| | | ## Change to scale up, and change inventory file |
| | | tasks: |
| | | - name: run scaleup playbook |
| | | shell: "ansible-playbook -i /etc/ansible/scaleup_hosts /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-node/scaleup.yml" |
| | | register: openshift_install_log |
| | | tags: |
| | | - openshift_installer |
| | | - name: show output of the scaleup playbook |
| | | debug: |
| | | var: openshift_install_log |
| | | verbosity: 2 |
| | | |
| | | - name: Tag freshly created node as node |
| | | hosts: localhost |
| | | connection: local |
| | | become: False |
| | | gather_facts: False |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tags: |
| | | - generate_ansible_hosts_file |
| | | tasks: |
| | | # untag nodes, remove newnode tag |
| | | - name: Ensure tags 'newnode' for instance is set to 'false' |
| | | ec2_tag: |
| | | region: "{{ aws_region }}" |
| | | resource: "{{ hostvars[item].instance_id }}" |
| | | tags: |
| | | newnode: false |
| | | with_items: "{{ groups['newnodes'] }}" |
| | | when: cloud_provider == 'ec2' |
| | | |
| | | - name: Remove hosts from group newnodes |
| | | gather_facts: False |
| | | become: yes |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | hosts: newnodes |
| | | tasks: |
| | | - group: |
| | | name: newnodes |
| | | state: absent |
| | | |
| | | ## Generate new /etc/ansible/hosts file |
| | | - name: Update /etc/ansible/host file on bastion |
| | | hosts: bastions |
| | | become: true |
| | | gather_facts: False |
| | | vars_files: |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" |
| | | - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" |
| | | tags: |
| | | - openshift_host_templates |
| | | tasks: |
| | | - debug: |
| | | msg: "WARNING: s3user credentials not set" |
| | | when: s3user_access_key is not defined |
| | | |
| | | - name: generate ansible hosts file, keep it under workdir |
| | | template: |
| | | src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/hosts_template.j2" |
| | | dest: "{{ ANSIBLE_REPO_PATH }}/workdir/hosts-{{ env_type }}-{{ guid }}" |
| | | delegate_to: localhost |
| | | become: false |
| | | tags: |
| | | - generate_ansible_hosts_file |
| | | |
| | | - name: Copy over ansible hosts file |
| | | copy: |
| | | src: "{{ ANSIBLE_REPO_PATH }}/workdir/hosts-{{ project_tag }}" |
| | | dest: /etc/ansible/hosts |
| | | backup: yes |
New file |
| | |
| | | --- |
| | | - name: Step 00xxxxx software |
| | | hosts: localhost |
| | | gather_facts: False |
| | | become: false |
| | | tasks: |
| | | - debug: |
| | | msg: "Software tasks Started" |
| | | |
| | | - name: Software flight-check |
| | | hosts: localhost |
| | | connection: local |
| | | gather_facts: false |
| | | become: false |
| | | tags: |
| | | - post_flight_check |
| | | tasks: |
| | | - debug: |
| | | msg: "Software checks completed successfully" |