sborenst
2018-08-27 22da266c8351cacc2e75b83bde714c445c1f3631
added ocp-multi-cloud-example
7 files modified
1294 ■■■■ changed files
ansible/configs/ocp-multi-cloud-example/README.adoc 139 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-multi-cloud-example/env_vars.yml 197 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-multi-cloud-example/files/cloud_providers/ec2_cloud_template.j2 9 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-multi-cloud-example/post_infra.yml 878 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-multi-cloud-example/post_software.yml 9 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-multi-cloud-example/pre_software.yml 58 ●●●●● patch | view | raw | blame | history
ansible/roles/set-repositories/tasks/satellite-repos.yml 4 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-multi-cloud-example/README.adoc
@@ -76,81 +76,18 @@
You can run the playbook with the following arguments to overwrite the default variable values:
[source,bash]
----
REGION=us-east-1
KEYNAME=ocpkey
GUID=testocpworkshop1
ENVTYPE="ocp-workshop"
CLOUDPROVIDER=ec2
HOSTZONEID='Z186MFNM7DX4NF'
REPO_PATH='https://admin.example.com/repos/ocp/3.6/'
BASESUFFIX='.openshift.opentlc.com'
NODE_COUNT=2
REPO_VERSION=3.6
DEPLOYER_REPO_PATH=`pwd`
OSRELEASE=3.6.173.0.21
ansible-playbook main.yml  -e "guid=${GUID}" -e "env_type=${ENVTYPE}" \
-e "osrelease=${OSRELEASE}" -e "repo_version=${REPO_VERSION}" \
  -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}" \
  -e "HostedZoneId=${HOSTZONEID}" -e "key_name=${KEYNAME}" \
  -e "subdomain_base_suffix=${BASESUFFIX}" \
       -e "bastion_instance_type=t2.large" -e "master_instance_type=c4.xlarge" \
       -e "infranode_instance_type=c4.4xlarge" -e "node_instance_type=c4.4xlarge" \
       -e "nfs_instance_type=m3.large" -e "node_instance_count=5" \
       -e "email=name@example.com" \
  -e "install_idm=htpasswd" -e "software_to_deploy=openshift" \
  -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" -e "own_repo_path=${REPO_PATH}" --skip-tags=remove_self_provisioners
----
=== Satellite version
----
REGION=us-east-1
KEYNAME=ocpkey
GUID=dev-na1
ENVTYPE="ocp-workshop"
CLOUDPROVIDER=ec2
HOSTZONEID='Z186MFNM7DX4NF'
BASESUFFIX='.openshift.opentlc.com'
NODE_COUNT=2
REPO_VERSION=3.5
DEPLOYER_REPO_PATH=`pwd`
LOG_FILE=/tmp/${ENVTYPE}-${GUID}.log
IPAPASS=$5
if [ "$1" = "provision" ] ; then
echo "Provisioning: ${STACK_NAME}"  1>> $LOG_FILE 2>> $LOG_FILE
ansible-playbook ${DEPLOYER_REPO_PATH}/main.yml  \
    -e "guid=${GUID}" -e "env_type=${ENVTYPE}" -e "key_name=${KEYNAME}" \
    -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}" -e "HostedZoneId=${HOSTZONEID}" \
    -e "subdomain_base_suffix=${BASESUFFIX}" \
    -e "bastion_instance_type=t2.large" -e "master_instance_type=c4.xlarge" \
    -e "infranode_instance_type=c4.4xlarge" -e "node_instance_type=c4.4xlarge" \
    -e "support_instance_type=c4.xlarge" -e "node_instance_count=${NODE_COUNT}" \
    -e "ipa_host_password=${IPAPASS}" -e "install_idm=ldap"  \
    -e "repo_method=satellite" -e "repo_version=${REPO_VERSION}" \
    -e "email=name@example.com" \
    -e "software_to_deploy=openshift" -e "osrelease=3.5.5.15" -e "docker_version=1.12.6" \
    -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" 1>> $LOG_FILE 2>> $LOG_FILE
----
=== Advanced Development Workshop
[source,bash]
----
REGION=ap-southeast-2
KEYNAME=ocpkey
GUID=sborenstest1
GUID=sborenstest7
ENVTYPE="ocp-multi-cloud-example"
CLOUDPROVIDER=ec2
HOSTZONEID='Z3IHLWJZOU9SRT'
#REPO_PATH='https://admin.example.com/repos/ocp/3.5/'
REPO_PATH='http://admin.example.com/repos/ocp/3.10.14'
NODE_COUNT=2
DEPLOYER_REPO_PATH=`pwd`
BASESUFFIX='.example.opentlc.com'
REPO_VERSION=3.9
REPO_VERSION=3.10
ansible-playbook ${DEPLOYER_REPO_PATH}/main.yml \
  -e "guid=${GUID}" \
@@ -162,74 +99,14 @@
  -e "infranode_instance_type=c4.4xlarge" -e "node_instance_type=c4.4xlarge" \
  -e "nfs_instance_type=t2.large" -e "node_instance_count=${NODE_COUNT}" \
  -e "install_idm=htpasswd" -e "software_to_deploy=openshift" \
  -e "email=name@example.com" \
  -e "email=sborenst@redhat.com" \
  -e "own_repo_path=${REPO_PATH}" -e"repo_method=file" -e"ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" \
  -e "osrelease=${REPO_VERSION}" -e "repo_version=${REPO_VERSION}" -e "docker_version=1.12.6" \
  -e "osrelease=${REPO_VERSION}" -e "repo_version=${REPO_VERSION}" -e "docker_version=1.13.1" \
  -e install_lets_encrypt_certificates=false -e user_vols=100\
    --skip-tags=remove_self_provisioners,opentlc-integration,install_zabbix
   --tags=step000,step001,step002,step003
----
=== IPA registration
You can either provide `ipa_host_password` or a couple `ipa_kerberos_user`/`ipa_kerberos_password` to register the host to the ipa server. See link:../../roles/bastion-opentlc-ipa[roles/bastion-opentlc-ipa].
=== CNS/Glusterfs
If you set this variable, 3 support nodes will be deployed and used for glusterfs:
----
-e install_glusterfs=true
----
NOTE: This will discard NFS PVs for logging (elasticsearch) and metrics (cassandra). Instead storage for those pods will be 'EmptyDir'. Proper persistent storage setup is left to user as a post-install step.
Tested on OCP 3.7. See examples in `scripts/examples`
=== Scale Up
Use the `scaleup.yml` playbook. Increase `node_instance_count` and `new_node_instance_count` accordingly. For example, if your previous `node_instance_count` was 2:
[source,bash]
----
REGION=us-west-1
KEYNAME=ocpkey
GUID=na1
ENVTYPE="ocp-workshop"
CLOUDPROVIDER=ec2
HOSTZONEID='Z186MFNM7DX4NF'
REPO_PATH='https://admin.example.com/repos/ocp/3.5/'
MINOR_VERSION="3.5.5.15"
INSTALLIPA=false
BASESUFFIX='.openshift.opentlc.com'
REPO_VERSION=3.5
NODE_COUNT=4
NEW_NODE_COUNT=2
ansible-playbook ./configs/${ENVTYPE}/scaleup.yml \
                         -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" \
                         -e "HostedZoneId=${HOSTZONEID}" \
                         -e "bastion_instance_type=t2.large" \
                         -e "cloud_provider=${CLOUDPROVIDER}" \
                         -e "guid=${GUID}" \
                         -e "infranode_instance_type=c4.4xlarge" \
                         -e "install_idm=htpasswd" \
                         -e "install_ipa_client=${INSTALLIPA}" \
                         -e "nfs_instance_type=m3.large" \
                         -e "osrelease=${MINOR_VERSION}" \
                         -e "own_repo_path=${REPO_PATH}" \
                         -e "email=name@example.com" \
                         -e "repo_method=file" \
                         -e "subdomain_base_suffix=${BASESUFFIX}" \
                         --skip-tags=remove_self_provisioners,install_zabbix \
                         -e "aws_region=${REGION}" \
                         -e "docker_version=1.12.6" \
                         -e "env_type=${ENVTYPE}" \
                         -e "key_name=${KEYNAME}" \
                         -e "master_instance_type=c4.xlarge" \
                         -e "node_instance_count=${NODE_COUNT}" \
                         -e "new_node_instance_count=${NEW_NODE_COUNT}" \
                         -e "node_instance_type=c4.4xlarge" \
                         -e "repo_version=${REPO_VERSION}"
----
=== To Delete an environment
== To Delete an environment
----
REGION=us-west-1
KEYNAME=ocp-workshop-openshift
ansible/configs/ocp-multi-cloud-example/env_vars.yml
@@ -18,6 +18,202 @@
repo_version: "3.9"
repo_method: file # Other Options are: file, satellite and rhn
cached_packages:
  - cockpit-docker-169-1.el7.x86_64
  - cockpit-system-154-3.el7.noarch
  - cockpit-bridge-154-3.el7.x86_64
  - trousers-0.3.14-2.el7.x86_64
  - nettle-2.7.1-8.el7.x86_64
  - libproxy-0.4.11-11.el7.x86_64
  - libmodman-2.0.1-8.el7.x86_64
  - json-glib-1.2.6-1.el7.x86_64
  - gsettings-desktop-schemas-3.24.1-2.el7_5.x86_64
  - gnutls-3.3.26-9.el7.x86_64
  - glib-networking-2.50.0-1.el7.x86_64
  - cockpit-ws-154-3.el7.x86_64
  - python-firewall-0.4.4.4-14.el7.noarch
  - ipset-libs-6.29-1.el7.x86_64
  - ipset-6.29-1.el7.x86_64
  - firewalld-filesystem-0.4.4.4-14.el7.noarch
  - firewalld-0.4.4.4-14.el7.noarch
  - python-slip-dbus-0.4.0-4.el7.noarch
  - python-slip-0.4.0-4.el7.noarch
  - ebtables-2.0.10-16.el7.x86_64
  - ansible-2.4.6.0-1.el7ae.noarch
  - python-paramiko-2.1.1-4.el7.noarch
  - sshpass-1.06-2.el7.x86_64
  - python-pycparser-2.14-1.el7.noarch
  - python-ply-3.4-11.el7.noarch
  - python-passlib-1.6.5-2.el7.noarch
  - python-idna-2.4-1.el7.noarch
  - python-httplib2-0.9.2-1.el7.noarch
  - python-enum34-1.0.4-1.el7.noarch
  - python-cffi-1.6.0-5.el7.x86_64
  - python2-pyasn1-0.1.9-7.el7.noarch
  - python2-jmespath-0.9.0-4.el7ae.noarch
  - python2-cryptography-1.7.2-2.el7.x86_64
  - device-mapper-multipath-libs-0.4.9-119.el7.x86_64
  - device-mapper-multipath-0.4.9-119.el7.x86_64
  - iscsi-initiator-utils-iscsiuio-6.2.0.874-7.el7.x86_64
  - iscsi-initiator-utils-6.2.0.874-7.el7.x86_64
  - time-1.7-45.el7.x86_64
  - redhat-lsb-core-4.1-27.el7.x86_64
  - mailx-12.5-19.el7.x86_64
  - cryptsetup-1.7.4-4.el7.x86_64
  - ceph-common-0.94.5-2.el7.x86_64
  - spax-1.5.2-13.el7.x86_64
  - redhat-lsb-submod-security-4.1-27.el7.x86_64
  - python-rbd-0.94.5-2.el7.x86_64
  - python-rados-0.94.5-2.el7.x86_64
  - patch-2.7.1-10.el7_5.x86_64
  - m4-1.4.16-10.el7.x86_64
  - librbd1-0.94.5-2.el7.x86_64
  - librados2-0.94.5-2.el7.x86_64
  - hdparm-9.43-5.el7.x86_64
  - ed-1.9-4.el7.x86_64
  - cups-client-1.6.3-35.el7.x86_64
  - boost-thread-1.53.0-27.el7.x86_64
  - boost-system-1.53.0-27.el7.x86_64
  - bc-1.06.95-13.el7.x86_64
  - at-3.1.13-23.el7.x86_64
  - psmisc-22.20-15.el7.x86_64
  - glusterfs-libs-3.8.4-53.el7.x86_64
  - glusterfs-fuse-3.8.4-53.el7.x86_64
  - glusterfs-client-xlators-3.8.4-53.el7.x86_64
  - glusterfs-3.8.4-53.el7.x86_64
  - attr-2.4.46-13.el7.x86_64
  - atomic-openshift-node-3.10.14-1.git.0.ba8ae6d.el7.x86_64
  - libnetfilter_cttimeout-1.0.0-6.el7.x86_64
  - libnetfilter_cthelper-1.0.0-9.el7.x86_64
  - conntrack-tools-1.4.4-3.el7_3.x86_64
  - atomic-openshift-hyperkube-3.10.14-1.git.0.ba8ae6d.el7.x86_64
  - nfs-utils-1.3.0-0.54.el7.x86_64
  - libnetfilter_queue-1.0.2-2.el7_2.x86_64
  - quota-nls-4.01-17.el7.noarch
  - quota-4.01-17.el7.x86_64
  - atomic-openshift-3.10.14-1.git.0.ba8ae6d.el7.x86_64
  - atomic-openshift-clients-3.10.14-1.git.0.ba8ae6d.el7.x86_64
  - libverto-libevent-0.2.5-4.el7.x86_64
  - libini_config-1.3.1-29.el7.x86_64
  - gssproxy-0.7.0-17.el7.x86_64
  - tcp_wrappers-7.6-77.el7.x86_64
  - socat-1.7.3.2-2.el7.x86_64
  - samba-client-libs-4.7.1-6.el7.x86_64
  - libpath_utils-0.2.1-29.el7.x86_64
  - libnfsidmap-0.25-19.el7.x86_64
  - cifs-utils-6.2-10.el7.x86_64
  - samba-common-libs-4.7.1-6.el7.x86_64
  - rpcbind-0.2.0-44.el7.x86_64
  - libwbclient-4.7.1-6.el7.x86_64
  - libtirpc-0.2.4-0.10.el7.x86_64
  - libldb-1.2.2-1.el7.x86_64
  - libbasicobjects-0.1.1-29.el7.x86_64
  - cups-libs-1.6.3-35.el7.x86_64
  - samba-common-4.7.1-6.el7.noarch
  - libtevent-0.9.33-2.el7.x86_64
  - libtdb-1.3.15-1.el7.x86_64
  - libtalloc-2.1.10-1.el7.x86_64
  - libref_array-0.1.5-29.el7.x86_64
  - libcollection-0.7.0-29.el7.x86_64
  - keyutils-1.5.8-3.el7.x86_64
  - avahi-libs-0.6.31-19.el7.x86_64
  - dnsmasq-2.76-5.el7.x86_64
  - atomic-openshift-excluder-3.10.14-1.git.0.ba8ae6d.el7.noarch
  # - atomic-openshift-docker-excluder-3.10.14-1.git.0.ba8ae6d.el7.noarch
  # - python-docker-2.4.2-1.3.el7.noarch
  - python-websocket-client-0.32.0-116.el7.noarch
  # - python-docker-pycreds-1.10.6-4.el7.noarch
  # - docker-1.13.1-68.gitdded712.el7.x86_64
  - skopeo-containers-0.1.31-1.dev.gitae64ff7.el7.x86_64
  # - docker-rhel-push-plugin-1.13.1-68.gitdded712.el7.x86_64
  # - docker-common-1.13.1-68.gitdded712.el7.x86_64
  # - docker-client-1.13.1-68.gitdded712.el7.x86_64
  - container-storage-setup-0.10.0-1.gitdf0dcd5.el7.noarch
  - oci-register-machine-0-6.git2b44233.el7.x86_64
  - lvm2-2.02.177-4.el7.x86_64
  - libaio-0.3.109-13.el7.x86_64
  - device-mapper-persistent-data-0.7.3-3.el7.x86_64
  - python-pytoml-0.1.14-1.git7dea353.el7.noarch
  - lvm2-libs-2.02.177-4.el7.x86_64
  - container-selinux-2.66-1.el7.noarch
  - atomic-registries-1.22.1-22.git5a342e3.el7.x86_64
  - yajl-2.0.4-4.el7.x86_64
  - oci-umount-2.3.3-3.gite3c9055.el7.x86_64
  - oci-systemd-hook-0.1.16-1.git05bd9a0.el7.x86_64
  - device-mapper-event-libs-1.02.146-4.el7.x86_64
  - device-mapper-event-1.02.146-4.el7.x86_64
  - yum-plugin-versionlock-1.1.31-46.el7_5.noarch
  # - tcpdump-4.9.2-3.el7.x86_64
  # - sysstat-10.1.5-13.el7.x86_64
  # - strace-4.12-6.el7.x86_64
  # - iptables-services-1.4.21-24.1.el7_5.x86_64
  # - httpd-tools-2.4.6-80.el7_5.1.x86_64
  # - bridge-utils-1.5-9.el7.x86_64
  # - tmux-1.8-4.el7.x86_64
  # - ntp-4.2.6p5-28.el7.x86_64
  # - git-1.8.3.1-14.el7_5.x86_64
  # - bind-utils-9.9.4-61.el7.x86_64
  # - perl-TermReadKey-2.30-20.el7.x86_64
  # - perl-Git-1.8.3.1-14.el7_5.noarch
  # - ntpdate-4.2.6p5-28.el7.x86_64
  # - lm_sensors-libs-3.4.0-4.20160601gitf9185e5.el7.x86_64
  # - libpcap-1.5.3-11.el7.x86_64
  # - libgnome-keyring-3.12.0-1.el7.x86_64
  # - libevent-2.0.21-4.el7.x86_64
  # - iptables-1.4.21-24.1.el7_5.x86_64
  # - bind-libs-9.9.4-61.el7.x86_64
  # - autogen-libopts-5.18-5.el7.x86_64
  # - perl-Error-0.17020-2.el7.noarch
  # - apr-util-1.5.2-6.el7.x86_64
  # - apr-1.4.8-3.el7_4.1.x86_64
  # - wget-1.14-15.el7_4.1.x86_64
  # - vim-enhanced-7.4.160-4.el7.x86_64
  # - vim-common-7.4.160-4.el7.x86_64
  # - gpm-libs-1.20.7-5.el7.x86_64
  # - bash-completion-2.1-6.el7.noarch
  # - vim-filesystem-7.4.160-4.el7.x86_64
  # - perl-5.16.3-292.el7.x86_64
  # - perl-threads-shared-1.43-6.el7.x86_64
  # - perl-threads-1.87-4.el7.x86_64
  # - perl-Pod-Simple-3.28-4.el7.noarch
  # - perl-Getopt-Long-2.40-3.el7.noarch
  # - perl-Filter-1.49-3.el7.x86_64
  # - perl-File-Temp-0.23.01-3.el7.noarch
  # - perl-File-Path-2.09-2.el7.noarch
  # - perl-Time-Local-1.2300-2.el7.noarch
  # - perl-Time-HiRes-1.9725-3.el7.x86_64
  # - perl-Text-ParseWords-3.29-4.el7.noarch
  # - perl-Storable-2.45-3.el7.x86_64
  # - perl-Socket-2.010-4.el7.x86_64
  # - perl-Scalar-List-Utils-1.27-248.el7.x86_64
  # - perl-Pod-Usage-1.63-3.el7.noarch
  # - perl-Pod-Perldoc-3.20-4.el7.noarch
  # - perl-podlators-2.5.1-3.el7.noarch
  # - perl-Pod-Escapes-1.04-292.el7.noarch
  # - perl-PathTools-3.40-5.el7.x86_64
  # - perl-parent-0.225-244.el7.noarch
  # - perl-macros-5.16.3-292.el7.x86_64
  # - perl-libs-5.16.3-292.el7.x86_64
  # - perl-HTTP-Tiny-0.033-3.el7.noarch
  # - perl-Exporter-5.68-3.el7.noarch
  # - perl-Encode-2.51-7.el7.x86_64
  # - perl-constant-1.27-2.el7.noarch
  # - perl-Carp-1.26-244.el7.noarch
#  - python-qpid-proton-0.16.0-12.el7sat.x86_64
  # - python-gofer-proton-2.7.8-1.el7.noarch
  # - katello-host-tools-fact-plugin-3.1.0-2.el7sat.noarch
  # - katello-host-tools-3.1.0-2.el7sat.noarch
  # - katello-agent-3.1.0-2.el7sat.noarch
  # - qpid-proton-c-0.16.0-12.el7sat.x86_64
  # - python-pulp-rpm-common-2.13.4.8-1.el7sat.noarch
  # - python-pulp-common-2.13.4.10-1.el7sat.noarch
  # - python-pulp-agent-lib-2.13.4.10-1.el7sat.noarch
  # - python-isodate-0.5.0-5.pulp.el7sat.noarch
  # - python-gofer-2.7.8-1.el7.noarch
  # # - pulp-rpm-handlers-2.13.4.8-1.el7sat.noarch
  # - gofer-2.7.8-1.el7.noarch
#If using repo_method: satellite, you must set these values as well.
# satellite_url: https://satellite.example.com
# satellite_org: Sat_org_name
@@ -260,6 +456,7 @@
  - rhel-7-server-extras-rpms
  - rhel-7-server-ose-{{repo_version}}-rpms
  - rhel-7-fast-datapath-rpms
  - rhel-7-server-ansible-2.4-rpms
# use_subscription_manager: false
# use_own_repos: true
ansible/configs/ocp-multi-cloud-example/files/cloud_providers/ec2_cloud_template.j2
@@ -377,6 +377,8 @@
{% endfor %}
{% endfor %}
  Route53User:
    Type: AWS::IAM::User
    Properties:
@@ -397,7 +399,7 @@
      Properties:
        UserName:
          Ref: Route53User
{% if s3bucket | d(false) | bool %}
  RegistryS3:
    Type: "AWS::S3::Bucket"
    Properties:
@@ -463,12 +465,15 @@
                  - Arn
      Bucket:
        Ref: RegistryS3
{% endif %}
Outputs:
  Route53internalzoneOutput:
    Description: The ID of the internal route 53 zone
    Value:
      Ref: zoneinternalidns
{% if s3bucket | d(false) | bool %}
  S3User:
    Value:
      Ref: S3User
@@ -483,6 +488,8 @@
        - S3UserAccessKey
        - SecretAccessKey
    Description: IAM User for RegistryS3
{% endif %}
  Route53User:
    Value:
      Ref: Route53User
ansible/configs/ocp-multi-cloud-example/post_infra.yml
@@ -9,7 +9,9 @@
    - step002
    - post_infrastructure
  tasks:
    - name: Dummy tasks
      debug:
        msg: "post infra dummy task"
    # - name: Job Template to launch a Job Template with update on launch inventory set
    #   uri:
    #     url: "https://{{ ansible_tower_ip }}/api/v1/job_templates/{{ job_template_id }}/launch/"
@@ -26,440 +28,440 @@
    #     HEADER_Content-Type: "application/json"
    #     status_code: 200, 201
    #   when: tower_run == 'true'
    - name: get S3User credentials from stack outputs
      set_fact:
        s3user: "{{ cloudformation_out_final.stack_outputs.S3User }}"
        s3user_access_key: "{{ cloudformation_out_final.stack_outputs.S3UserAccessKey }}"
        s3user_secret_access_key: "{{ cloudformation_out_final.stack_outputs.S3UserSecretAccessKey }}"
      when:
        - cloudformation_out_final is defined
        - cloudformation_out_final.stack_outputs.S3UserAccessKey is defined
        - cloudformation_out_final.stack_outputs.S3UserSecretAccessKey is defined
      tags:
        - provision_cf_template
    - name: write down s3user credentials
      copy:
        dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.s3user.credentials"
        content: |
          * S3 Bucket for registry: {{s3user}}
          ** S3User access key: {{s3user_access_key}}
          ** S3User secret key: {{s3user_secret_access_key}}
      when: s3user_access_key is defined
    - name: get Route53User credentials from stack outputs
      set_fact:
        route53user: "{{ cloudformation_out_final.stack_outputs.Route53User }}"
        route53user_access_key: "{{ cloudformation_out_final.stack_outputs.Route53UserAccessKey }}"
        route53user_secret_access_key: "{{ cloudformation_out_final.stack_outputs.Route53UserSecretAccessKey }}"
      when:
        - cloudformation_out_final is defined
        - cloudformation_out_final.stack_outputs.Route53UserAccessKey is defined
        - cloudformation_out_final.stack_outputs.Route53UserSecretAccessKey is defined
      tags:
        - provision_cf_template
    - name: write down Route53User credentials
      copy:
        dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.route53user.credentials"
        content: |
          * Route53 User for Let's Encrypt: {{ route53user }}
          ** Route53User access key: {{ route53user_access_key }}
          ** Route53User secret key: {{ route53user_secret_access_key }}
      when: route53user_access_key is defined
- name: Detect and map data disks (support) for Azure
  hosts: support
  become: true
  gather_facts: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - name: test if docker_device file is present (previous run)
      stat:
        path: /var/preserve/docker_device
      register: rfile
    - name: Get docker device
      changed_when: false
      vars:
        query: "[?name == 'support']|[].volumes[?purpose=='docker'].lun|[0][0]"
      shell: >
        parted -m /dev/sda print all 2>/dev/null
        | grep unknown
        | grep /dev/sd
        | cut -d':' -f1
        | sed -n '{{ (instances|json_query(query)|int) + 1}}p'
      register: result
      when:
        - cloud_provider == 'azure'
        - instances is defined
        # docker_device will be present on support nodes only when glusterfs is installed
        - install_glusterfs | bool
        - not rfile.stat.exists
    - name: set fact for docker_device
      set_fact:
        docker_device: "{{ result.stdout }}"
      when:
        - cloud_provider == 'azure'
        - result | succeeded
        - install_glusterfs | bool
        - not rfile.stat.exists
    - name: Write down docker_device for idempotency
      copy:
        dest: /var/preserve/docker_device
        content: "{{ docker_device }}"
        force: no
      when:
        - cloud_provider == 'azure'
        - result | succeeded
        - install_glusterfs | bool
        - not rfile.stat.exists
    # idempotency
    - name: get device_name from file (from previous run)
      slurp:
        src: /var/preserve/docker_device
      register: slurp_result
      when:
        - cloud_provider == 'azure'
        - install_glusterfs | bool
        - rfile.stat.exists
    - name: set fact for docker_device (from previous run)
      set_fact:
        docker_device: "{{ slurp_result.content|b64decode }}"
      when:
        - cloud_provider == 'azure'
        - result | succeeded
        - install_glusterfs | bool
        - rfile.stat.exists
    # glusterfs device
    - name: test if glusterfs_device_name file is present (previous run)
      stat:
        path: /var/preserve/glusterfs_device_name
      register: rfile
    - name: Get glusterfs device
      changed_when: false
      vars:
        query: "[?name == 'support']|[].volumes[?purpose=='glusterfs'].lun|[0][0]"
      shell: >
        parted -m /dev/sda print all 2>/dev/null
        | grep unknown
        | grep /dev/sd
        | cut -d':' -f1
        | sed -n '{{ (instances|json_query(query)|int) + 1}}p'
      register: result
      when:
        - install_glusterfs | bool
        - cloud_provider == 'azure'
        - instances is defined
        - not rfile.stat.exists
    - name: set fact for glusterfs_device_name
      set_fact:
        glusterfs_device_name: "{{ result.stdout }}"
      when:
        - install_glusterfs | bool
        - cloud_provider == 'azure'
        - result | succeeded
        - not rfile.stat.exists
    - name: Write down glusterfs_device_name for idempotency
      copy:
        dest: /var/preserve/glusterfs_device_name
        content: "{{ glusterfs_device_name }}"
        force: no
      when:
        - install_glusterfs | bool
        - cloud_provider == 'azure'
        - result | succeeded
        - not rfile.stat.exists
    # idempotency
    - name: get device_name from file (from previous run)
      slurp:
        src: /var/preserve/glusterfs_device_name
      register: slurp_result
      when:
        - cloud_provider == 'azure'
        - install_glusterfs | bool
        - rfile.stat.exists
    - name: set fact for glusterfs_device_name (from previous run)
      set_fact:
        glusterfs_device_name: "{{ slurp_result.content|b64decode }}"
      when:
        - cloud_provider == 'azure'
        - result | succeeded
        - install_glusterfs | bool
        - rfile.stat.exists
    # nfs_pvs
    - name: test if nfs_pvs file is present (previous run)
      stat:
        path: /var/preserve/nfs_pvs
      register: rfile
    - name: Get NFS device
      changed_when: false
      vars:
        query: "[?name == 'support']|[].volumes[?purpose=='nfs'].lun|[0][0]"
      shell: >
        parted -m /dev/sda print all 2>/dev/null
        | grep unknown
        | grep /dev/sd
        | cut -d':' -f1
        | sed -n '{{ (instances|json_query(query)|int) + 1}}p'
      register: result
      when:
        - install_nfs | bool
        - cloud_provider == 'azure'
        - instances is defined
        - not rfile.stat.exists
    - name: set fact for nfs_pvs
      set_fact:
        nfs_pvs: "{{ result.stdout }}"
      when:
        - install_nfs | bool
        - cloud_provider == 'azure'
        - result | succeeded
        - not rfile.stat.exists
    - name: Write down nfs_pvs for idempotency
      copy:
        dest: /var/preserve/nfs_pvs
        content: "{{ nfs_pvs }}"
        force: no
      when:
        - install_nfs | bool
        - cloud_provider == 'azure'
        - result | succeeded
        - not rfile.stat.exists
    # idempotency
    - name: get nfs_pvs from file (from previous run)
      slurp:
        src: /var/preserve/nfs_pvs
      register: slurp_result
      when:
        - cloud_provider == 'azure'
        - install_nfs | bool
        - rfile.stat.exists
    - name: set fact for nfs_pvs (from previous run)
      set_fact:
        nfs_pvs: "{{ slurp_result.content|b64decode }}"
      when:
        - cloud_provider == 'azure'
        - result | succeeded
        - install_nfs | bool
        - rfile.stat.exists
- name: Detect and map data disks (nodes) for Azure
  hosts: nodes
  become: true
  gather_facts: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - name: test if docker_device file is present (previous run)
      stat:
        path: /var/preserve/docker_device
      register: rfile
    - name: Get docker device
      changed_when: false
      vars:
        query: "[?name == 'node']|[].volumes[?purpose=='docker'].lun|[0][0]"
      shell: >
        parted -m /dev/sda print all 2>/dev/null
        | grep unknown
        | grep /dev/sd
        | cut -d':' -f1
        | sed -n '{{ (instances|json_query(query)|int) + 1}}p'
      register: result
      when:
        - cloud_provider == 'azure'
        - instances is defined
        - not rfile.stat.exists
    - name: set fact for docker_device
      set_fact:
        docker_device: "{{ result.stdout }}"
      when:
        - cloud_provider == 'azure'
        - result | succeeded
        - not rfile.stat.exists
    - name: Write down docker_device for idempotency
      copy:
        dest: /var/preserve/docker_device
        content: "{{ docker_device }}"
        force: no
      when:
        - cloud_provider == 'azure'
        - result | succeeded
        - not rfile.stat.exists
    # idempotency
    - name: get device_name from file (from previous run)
      slurp:
        src: /var/preserve/docker_device
      register: slurp_result
      when:
        - cloud_provider == 'azure'
        - rfile.stat.exists
    - name: set fact for docker_device (from previous run)
      set_fact:
        docker_device: "{{ slurp_result.content|b64decode }}"
      when:
        - cloud_provider == 'azure'
        - result | succeeded
        - rfile.stat.exists
- name: Detect and map data disks (infranodes) for Azure
  hosts: infranodes
  become: true
  gather_facts: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - name: test if docker_device file is present (previous run)
      stat:
        path: /var/preserve/docker_device
      register: rfile
    - name: Get docker device
      changed_when: false
      vars:
        query: "[?name == 'infranode']|[].volumes[?purpose=='docker'].lun|[0][0]"
      shell: >
        parted -m /dev/sda print all 2>/dev/null
        | grep unknown
        | grep /dev/sd
        | cut -d':' -f1
        | sed -n '{{ (instances|json_query(query)|int) + 1}}p'
      register: result
      when:
        - cloud_provider == 'azure'
        - instances is defined
        - not rfile.stat.exists
    - name: set fact for docker_device
      set_fact:
        docker_device: "{{ result.stdout }}"
      when:
        - cloud_provider == 'azure'
        - result | succeeded
        - not rfile.stat.exists
    - name: Write down docker_device for idempotency
      copy:
        dest: /var/preserve/docker_device
        content: "{{ docker_device }}"
        force: no
      when:
        - cloud_provider == 'azure'
        - result | succeeded
        - not rfile.stat.exists
    # idempotency
    - name: get device_name from file (from previous run)
      slurp:
        src: /var/preserve/docker_device
      register: slurp_result
      when:
        - cloud_provider == 'azure'
        - rfile.stat.exists
    - name: set fact for docker_device (from previous run)
      set_fact:
        docker_device: "{{ slurp_result.content|b64decode }}"
      when:
        - cloud_provider == 'azure'
        - result | succeeded
        - rfile.stat.exists
- name: Map data disks (masters) for Azure
  hosts: masters
  become: true
  gather_facts: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - name: test if docker_device file is present (previous run)
      stat:
        path: /var/preserve/docker_device
      register: rfile
    - name: Get docker device
      changed_when: false
      vars:
        query: "[?name == 'master']|[].volumes[?purpose=='docker'].lun|[0][0]"
      shell: >
        parted -m /dev/sda print all 2>/dev/null
        | grep unknown
        | grep /dev/sd
        | cut -d':' -f1
        | sed -n '{{ (instances|json_query(query)|int) + 1}}p'
      register: result
      when:
        - cloud_provider == 'azure'
        - instances is defined
        - not rfile.stat.exists
    - name: set fact for docker_device
      set_fact:
        docker_device: "{{ result.stdout }}"
      when:
        - cloud_provider == 'azure'
        - result | succeeded
        - not rfile.stat.exists
    - name: Write down docker_device for idempotency
      copy:
        dest: /var/preserve/docker_device
        content: "{{ docker_device }}"
        force: no
      when:
        - cloud_provider == 'azure'
        - result | succeeded
        - not rfile.stat.exists
    # idempotency
    - name: get device_name from file (from previous run)
      slurp:
        src: /var/preserve/docker_device
      register: slurp_result
      when:
        - cloud_provider == 'azure'
        - rfile.stat.exists
    - name: set fact for docker_device (from previous run)
      set_fact:
        docker_device: "{{ slurp_result.content|b64decode }}"
      when:
        - cloud_provider == 'azure'
        - result | succeeded
        - rfile.stat.exists
#
#     - name: get S3User credentials from stack outputs
#       set_fact:
#         s3user: "{{ cloudformation_out_final.stack_outputs.S3User }}"
#         s3user_access_key: "{{ cloudformation_out_final.stack_outputs.S3UserAccessKey }}"
#         s3user_secret_access_key: "{{ cloudformation_out_final.stack_outputs.S3UserSecretAccessKey }}"
#       when:
#         - cloudformation_out_final is defined
#         - cloudformation_out_final.stack_outputs.S3UserAccessKey is defined
#         - cloudformation_out_final.stack_outputs.S3UserSecretAccessKey is defined
#       tags:
#         - provision_cf_template
#
#     - name: write down s3user credentials
#       copy:
#         dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.s3user.credentials"
#         content: |
#           * S3 Bucket for registry: {{s3user}}
#           ** S3User access key: {{s3user_access_key}}
#           ** S3User secret key: {{s3user_secret_access_key}}
#       when: s3user_access_key is defined
#
#     - name: get Route53User credentials from stack outputs
#       set_fact:
#         route53user: "{{ cloudformation_out_final.stack_outputs.Route53User }}"
#         route53user_access_key: "{{ cloudformation_out_final.stack_outputs.Route53UserAccessKey }}"
#         route53user_secret_access_key: "{{ cloudformation_out_final.stack_outputs.Route53UserSecretAccessKey }}"
#       when:
#         - cloudformation_out_final is defined
#         - cloudformation_out_final.stack_outputs.Route53UserAccessKey is defined
#         - cloudformation_out_final.stack_outputs.Route53UserSecretAccessKey is defined
#       tags:
#         - provision_cf_template
#
#     - name: write down Route53User credentials
#       copy:
#         dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.route53user.credentials"
#         content: |
#           * Route53 User for Let's Encrypt: {{ route53user }}
#           ** Route53User access key: {{ route53user_access_key }}
#           ** Route53User secret key: {{ route53user_secret_access_key }}
#       when: route53user_access_key is defined
#
#
# - name: Detect and map data disks (support) for Azure
#   hosts: support
#   become: true
#   gather_facts: false
#   vars_files:
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
#   tasks:
#     - name: test if docker_device file is present (previous run)
#       stat:
#         path: /var/preserve/docker_device
#       register: rfile
#
#     - name: Get docker device
#       changed_when: false
#       vars:
#         query: "[?name == 'support']|[].volumes[?purpose=='docker'].lun|[0][0]"
#       shell: >
#         parted -m /dev/sda print all 2>/dev/null
#         | grep unknown
#         | grep /dev/sd
#         | cut -d':' -f1
#         | sed -n '{{ (instances|json_query(query)|int) + 1}}p'
#       register: result
#       when:
#         - cloud_provider == 'azure'
#         - instances is defined
#         # docker_device will be present on support nodes only when glusterfs is installed
#         - install_glusterfs | bool
#         - not rfile.stat.exists
#
#     - name: set fact for docker_device
#       set_fact:
#         docker_device: "{{ result.stdout }}"
#       when:
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - install_glusterfs | bool
#         - not rfile.stat.exists
#
#     - name: Write down docker_device for idempotency
#       copy:
#         dest: /var/preserve/docker_device
#         content: "{{ docker_device }}"
#         force: no
#       when:
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - install_glusterfs | bool
#         - not rfile.stat.exists
#
#     # idempotency
#
#     - name: get device_name from file (from previous run)
#       slurp:
#         src: /var/preserve/docker_device
#       register: slurp_result
#       when:
#         - cloud_provider == 'azure'
#         - install_glusterfs | bool
#         - rfile.stat.exists
#
#     - name: set fact for docker_device (from previous run)
#       set_fact:
#         docker_device: "{{ slurp_result.content|b64decode }}"
#       when:
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - install_glusterfs | bool
#         - rfile.stat.exists
#
#     # glusterfs device
#
#     - name: test if glusterfs_device_name file is present (previous run)
#       stat:
#         path: /var/preserve/glusterfs_device_name
#       register: rfile
#
#     - name: Get glusterfs device
#       changed_when: false
#       vars:
#         query: "[?name == 'support']|[].volumes[?purpose=='glusterfs'].lun|[0][0]"
#       shell: >
#         parted -m /dev/sda print all 2>/dev/null
#         | grep unknown
#         | grep /dev/sd
#         | cut -d':' -f1
#         | sed -n '{{ (instances|json_query(query)|int) + 1}}p'
#       register: result
#       when:
#         - install_glusterfs | bool
#         - cloud_provider == 'azure'
#         - instances is defined
#         - not rfile.stat.exists
#
#     - name: set fact for glusterfs_device_name
#       set_fact:
#         glusterfs_device_name: "{{ result.stdout }}"
#       when:
#         - install_glusterfs | bool
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - not rfile.stat.exists
#
#     - name: Write down glusterfs_device_name for idempotency
#       copy:
#         dest: /var/preserve/glusterfs_device_name
#         content: "{{ glusterfs_device_name }}"
#         force: no
#       when:
#         - install_glusterfs | bool
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - not rfile.stat.exists
#
#     # idempotency
#
#     - name: get device_name from file (from previous run)
#       slurp:
#         src: /var/preserve/glusterfs_device_name
#       register: slurp_result
#       when:
#         - cloud_provider == 'azure'
#         - install_glusterfs | bool
#         - rfile.stat.exists
#
#     - name: set fact for glusterfs_device_name (from previous run)
#       set_fact:
#         glusterfs_device_name: "{{ slurp_result.content|b64decode }}"
#       when:
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - install_glusterfs | bool
#         - rfile.stat.exists
#
#     # nfs_pvs
#
#     - name: test if nfs_pvs file is present (previous run)
#       stat:
#         path: /var/preserve/nfs_pvs
#       register: rfile
#
#     - name: Get NFS device
#       changed_when: false
#       vars:
#         query: "[?name == 'support']|[].volumes[?purpose=='nfs'].lun|[0][0]"
#       shell: >
#         parted -m /dev/sda print all 2>/dev/null
#         | grep unknown
#         | grep /dev/sd
#         | cut -d':' -f1
#         | sed -n '{{ (instances|json_query(query)|int) + 1}}p'
#       register: result
#       when:
#         - install_nfs | bool
#         - cloud_provider == 'azure'
#         - instances is defined
#         - not rfile.stat.exists
#
#     - name: set fact for nfs_pvs
#       set_fact:
#         nfs_pvs: "{{ result.stdout }}"
#       when:
#         - install_nfs | bool
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - not rfile.stat.exists
#
#     - name: Write down nfs_pvs for idempotency
#       copy:
#         dest: /var/preserve/nfs_pvs
#         content: "{{ nfs_pvs }}"
#         force: no
#       when:
#         - install_nfs | bool
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - not rfile.stat.exists
#
#     # idempotency
#
#     - name: get nfs_pvs from file (from previous run)
#       slurp:
#         src: /var/preserve/nfs_pvs
#       register: slurp_result
#       when:
#         - cloud_provider == 'azure'
#         - install_nfs | bool
#         - rfile.stat.exists
#
#     - name: set fact for nfs_pvs (from previous run)
#       set_fact:
#         nfs_pvs: "{{ slurp_result.content|b64decode }}"
#       when:
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - install_nfs | bool
#         - rfile.stat.exists
#
# - name: Detect and map data disks (nodes) for Azure
#   hosts: nodes
#   become: true
#   gather_facts: false
#   vars_files:
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
#   tasks:
#     - name: test if docker_device file is present (previous run)
#       stat:
#         path: /var/preserve/docker_device
#       register: rfile
#
#     - name: Get docker device
#       changed_when: false
#       vars:
#         query: "[?name == 'node']|[].volumes[?purpose=='docker'].lun|[0][0]"
#       shell: >
#         parted -m /dev/sda print all 2>/dev/null
#         | grep unknown
#         | grep /dev/sd
#         | cut -d':' -f1
#         | sed -n '{{ (instances|json_query(query)|int) + 1}}p'
#       register: result
#       when:
#         - cloud_provider == 'azure'
#         - instances is defined
#         - not rfile.stat.exists
#
#     - name: set fact for docker_device
#       set_fact:
#         docker_device: "{{ result.stdout }}"
#       when:
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - not rfile.stat.exists
#
#     - name: Write down docker_device for idempotency
#       copy:
#         dest: /var/preserve/docker_device
#         content: "{{ docker_device }}"
#         force: no
#       when:
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - not rfile.stat.exists
#
#     # idempotency
#
#     - name: get device_name from file (from previous run)
#       slurp:
#         src: /var/preserve/docker_device
#       register: slurp_result
#       when:
#         - cloud_provider == 'azure'
#         - rfile.stat.exists
#
#     - name: set fact for docker_device (from previous run)
#       set_fact:
#         docker_device: "{{ slurp_result.content|b64decode }}"
#       when:
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - rfile.stat.exists
#
# - name: Detect and map data disks (infranodes) for Azure
#   hosts: infranodes
#   become: true
#   gather_facts: false
#   vars_files:
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
#   tasks:
#     - name: test if docker_device file is present (previous run)
#       stat:
#         path: /var/preserve/docker_device
#       register: rfile
#
#     - name: Get docker device
#       changed_when: false
#       vars:
#         query: "[?name == 'infranode']|[].volumes[?purpose=='docker'].lun|[0][0]"
#       shell: >
#         parted -m /dev/sda print all 2>/dev/null
#         | grep unknown
#         | grep /dev/sd
#         | cut -d':' -f1
#         | sed -n '{{ (instances|json_query(query)|int) + 1}}p'
#       register: result
#       when:
#         - cloud_provider == 'azure'
#         - instances is defined
#         - not rfile.stat.exists
#
#     - name: set fact for docker_device
#       set_fact:
#         docker_device: "{{ result.stdout }}"
#       when:
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - not rfile.stat.exists
#
#     - name: Write down docker_device for idempotency
#       copy:
#         dest: /var/preserve/docker_device
#         content: "{{ docker_device }}"
#         force: no
#       when:
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - not rfile.stat.exists
#
#     # idempotency
#
#     - name: get device_name from file (from previous run)
#       slurp:
#         src: /var/preserve/docker_device
#       register: slurp_result
#       when:
#         - cloud_provider == 'azure'
#         - rfile.stat.exists
#
#     - name: set fact for docker_device (from previous run)
#       set_fact:
#         docker_device: "{{ slurp_result.content|b64decode }}"
#       when:
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - rfile.stat.exists
#
# - name: Map data disks (masters) for Azure
#   hosts: masters
#   become: true
#   gather_facts: false
#   vars_files:
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
#   tasks:
#     - name: test if docker_device file is present (previous run)
#       stat:
#         path: /var/preserve/docker_device
#       register: rfile
#
#     - name: Get docker device
#       changed_when: false
#       vars:
#         query: "[?name == 'master']|[].volumes[?purpose=='docker'].lun|[0][0]"
#       shell: >
#         parted -m /dev/sda print all 2>/dev/null
#         | grep unknown
#         | grep /dev/sd
#         | cut -d':' -f1
#         | sed -n '{{ (instances|json_query(query)|int) + 1}}p'
#       register: result
#       when:
#         - cloud_provider == 'azure'
#         - instances is defined
#         - not rfile.stat.exists
#
#     - name: set fact for docker_device
#       set_fact:
#         docker_device: "{{ result.stdout }}"
#       when:
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - not rfile.stat.exists
#
#     - name: Write down docker_device for idempotency
#       copy:
#         dest: /var/preserve/docker_device
#         content: "{{ docker_device }}"
#         force: no
#       when:
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - not rfile.stat.exists
#
#     # idempotency
#
#     - name: get device_name from file (from previous run)
#       slurp:
#         src: /var/preserve/docker_device
#       register: slurp_result
#       when:
#         - cloud_provider == 'azure'
#         - rfile.stat.exists
#
#     - name: set fact for docker_device (from previous run)
#       set_fact:
#         docker_device: "{{ slurp_result.content|b64decode }}"
#       when:
#         - cloud_provider == 'azure'
#         - result | succeeded
#         - rfile.stat.exists
ansible/configs/ocp-multi-cloud-example/post_software.yml
@@ -1 +1,10 @@
---
- name: Step 00xxxxx post software
  hosts: support
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Create user vols
      shell: "mkdir -p /srv/nfs/user-vols/vol{1..{{user_vols}}}"
ansible/configs/ocp-multi-cloud-example/pre_software.yml
@@ -28,11 +28,6 @@
    - step004
    - step004.2
    - common_tasks
  pre_tasks:
    - name: add rhel-7-server-ansible-2.4-rpms repo for OCP 3.9
      set_fact:
        rhel_repos: "{{ rhel_repos + ['rhel-7-server-ansible-2.4-rpms'] }}"
      when: osrelease | version_compare('3.9', '>=')
  roles:
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/set-repositories"
      when: repo_method is defined
@@ -54,6 +49,28 @@
    - step004
    - bastion_tasks
- name: Cache rpms for openshift install - temporary for rhte
  hosts:
    - nodes
    - infranodes
    - masters
  run_once: true
  become: true
  gather_facts: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - name: install openshift_node packages
      yum:
        name: "{{ item }}"
        state: present
      with_items: "{{cached_packages}}"
      tags:
        - testing_rhte_caching
    - name: docker yum-complete-transaction workaround
      shell: "yum-complete-transaction -y"
- name: PreSoftware flight-check
  hosts: localhost
  connection: local
@@ -67,34 +84,3 @@
  tasks:
    - debug:
        msg: "Pre-Software checks completed successfully"
### This section is only used when "install_lets_encrypt_certificates" is true. Not Required.
- name: Install Let's Encrypt Wildcard Certificates
  hosts: bastions
  run_once: true
  become: true
  gather_facts: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/install-lets-encrypt-certs"
      tags: lets_encrypt
      when: install_lets_encrypt_certificates|bool
- name: Copy lets encrypt certificates
  hosts: masters
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - env-specific
    - project_request
  tasks:
    # https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem.txt
    - name: Copy over the letsencrypt certificate
      copy:
        src: ./files/lets-encrypt-x3-cross-signed.pem.txt
        dest: /etc/origin/master/
ansible/roles/set-repositories/tasks/satellite-repos.yml
@@ -24,10 +24,10 @@
  command: >
    subscription-manager register
    --org="{{satellite_org}}"
    --activationkey="{{ satellite_activationkey }}"
    --activationkey="{{ satellite_activationkey }}" --force
- name: Enable Repositories
  command: subscription-manager repos --enable=rhel-7-server-satellite-tools-6.2-rpms
  command: subscription-manager repos --enable=rhel-7-server-satellite-tools-6.3-rpms
- name: Install Katello Agent
  yum: