Tok
2018-09-02 a321298b35d907e8bee5227ed3d71fe9bac65a3b
Merge branch 'development' of https://github.com/sborenst/ansible_agnostic_deployer into development
2 files deleted
59 files added
50 files modified
2 files renamed
4766 ■■■■ changed files
ansible/configs/ansible-cicd-lab/env_vars.yml 8 ●●●● patch | view | raw | blame | history
ansible/configs/ansible-cicd-lab/post_software.yml 5 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-cicd-lab/tower_setup.yml 1 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/env_vars.yml 46 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/files/hosts_template.3.10.14.j2 149 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/files/labs_hosts_template.3.10.14.j2 90 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/files/labs_hosts_template.3.9.30.j2 71 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/post_infra.yml 4 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/post_software.yml 24 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/pre_software.yml 22 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-multi-cloud-example/env_vars.yml 5 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-multi-cloud-example/files/cloud_providers/ec2_cloud_template.j2 105 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-multi-cloud-example/packer.adoc 29 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-multi-cloud-example/packer.json 30 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-multi-cloud-example/pre_software.yml 4 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/files/hosts_template.3.10.34.j2 395 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/post_software.yml 130 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/README.adoc 150 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/destroy_env.yml 15 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/env_vars.yml 156 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/files/cloud_providers/ec2_cloud_template.j2 477 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/files/hosts_template.j2 5 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/files/oc-cluster.service.j2 20 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/files/registries.conf 25 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/files/repos_template.j2 45 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/files/start_oc.sh.j2 2 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/mgr_users.yml 6 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/post_infra.yml 27 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/post_software.yml 109 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/pre_infra.yml 13 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/pre_software.yml 75 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/software.yml 34 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-ocp-workshop/destroy_env.yml 49 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-ocp-workshop/env_vars.yml 30 ●●●● patch | view | raw | blame | history
ansible/configs/rhte-ocp-workshop/files/cloud_providers/ec2_cloud_template.j2 166 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-ocp-workshop/files/hosts_template.3.10.34.j2 395 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-ocp-workshop/post_infra.yml 52 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-ocp-workshop/post_software.yml 3 ●●●● patch | view | raw | blame | history
ansible/roles/bastion-student-user/defaults/main.yml 2 ●●●●● patch | view | raw | blame | history
ansible/roles/bastion-student-user/tasks/main.yml 6 ●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/.gitignore 2 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/.travis.yml 59 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/LICENSE 20 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/README.md 55 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/defaults/main.yml 23 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/meta/.galaxy_install_info 1 ●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/meta/main.yml 29 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/tasks/install-from-source.yml 70 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/tasks/main.yml 23 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/tests/README.md 11 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/tests/test-source.yml 16 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/tests/test.yml 15 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/vars/Debian.yml 10 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/vars/Fedora.yml 13 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/vars/RedHat.yml 12 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.git/vars/main.yml 3 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/.gitignore 2 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/.travis.yml 27 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/LICENSE 20 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/README.md 59 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/defaults/main.yml 9 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/handlers/main.yml 3 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/meta/.galaxy_install_info 1 ●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/meta/main.yml 24 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/tasks/gogs-mysql.yml 20 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/tasks/init-setup.yml 24 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/tasks/main.yml 48 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/templates/gogs.unit.j2 26 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/tests/README.md 11 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/tests/requirements.yml 2 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/tests/test.yml 14 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/vars/Debian.yml 2 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.gogs/vars/RedHat.yml 2 ●●●●● patch | view | raw | blame | history
ansible/roles/host-gogs-server/tasks/main.yml 9 ●●●●● patch | view | raw | blame | history
ansible/roles/host-gogs-server/templates/gogs_config.j2 1 ●●●● patch | view | raw | blame | history
ansible/roles/infra-common-ssh-config-generate/tasks/main.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/infra-ec2-create-inventory/tasks/main.yml 1 ●●●● patch | view | raw | blame | history
ansible/roles/infra-ec2-template-create/tasks/main.yml 38 ●●●●● patch | view | raw | blame | history
ansible/roles/infra-ec2-template-generate/tasks/main.yml 52 ●●●●● patch | view | raw | blame | history
ansible/roles/infra-ec2-template-generate/templates/region_mapping.j2 73 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-multitenant/defaults/main.yml 9 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-multitenant/readme.adoc 17 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-multitenant/tasks/pre_workload.yml 2 ●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-multitenant/tasks/remove_workload.yml 25 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-multitenant/tasks/workload.yml 81 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-multitenant/templates/manage_tenants.sh 51 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-pam/defaults/main.yml 7 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-pam/readme.adoc 6 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-pam/tasks/workload.yml 52 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-pam/templates/rhpam70-image-streams.yaml 123 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-ignite/tasks/workload.yml 11 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-istio-community/defaults/main.yml 2 ●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-istio-community/readme.adoc 10 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-istio-community/tasks/workload.yml 47 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-api-mesh/defaults/main.yml 6 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-api-mesh/ilt_provision.sh 100 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-api-mesh/readme.adoc 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-api-mesh/tasks/pre_workload.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-api-mesh/tasks/remove_workload.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-api-mesh/templates/coolstore-catalog-mongodb-persistent.yaml 6 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-msa-mesh/defaults/main.yml 5 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-msa-mesh/tasks/pre_workload.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-msa-mesh/tasks/remove_workload.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-msa-mesh/templates/coolstore-catalog-mongodb-persistent.yaml 6 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-msa-orchestration/readme.adoc 33 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-msa-orchestration/tasks/workload.yml 10 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/defaults/main.yml 40 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/readme.adoc 28 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/main.yml 9 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/remove_workload.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/spark_workload.yml 178 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/strimzi_workload.yml 60 ●●●●● patch | view | raw | blame | history
ansible/workdir/.gitignore 4 ●●●● patch | view | raw | blame | history
ansible/configs/ansible-cicd-lab/env_vars.yml
@@ -277,4 +277,10 @@
tower_job_template_name: Acme
tower_credential_name: Acme
tower_job_template_playbook: hackathons/rhte-2018-emea/ansible-role-httpd/tests/test.yml
tower_credential_username: ec2-user
tower_credential_username: ec2-user
### Gogs Variables
ansible_service_mgr: systemd
gogs_admin_username: cicduser1
gogs_admin_password: r3dh4t!
ansible/configs/ansible-cicd-lab/post_software.yml
@@ -12,7 +12,7 @@
  hosts:
    - cicd*
  become: true
  gather_facts: False
  gather_facts: True
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
@@ -23,11 +23,12 @@
      setup:
        filter: 'ansible_[od][si]*'
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/geerlingguy.gogs" }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/host-gogs-server" }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/host-jenkins-server" }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/molecule" }
- hosts: bastion
- hosts: bastions
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
ansible/configs/ansible-cicd-lab/tower_setup.yml
@@ -162,7 +162,6 @@
    src: "/root/.ssh/{{ guid }}key.pem"
  register: credential_ssh_key
  no_log: True
  delegate_to: "{{ bastion_host }}"
  when: response.json.count == 0
- name: Create Credential
ansible/configs/ocp-ha-lab/env_vars.yml
@@ -26,6 +26,7 @@
deploy_openshift_post: false
deploy_env_post: false
install_metrics: true
install_prometheus: true
install_logging: true
ovs_plugin: "subnet" # This can also be set to: "multitenant" or "networkpolicy"
multi_tenant_setting: "os_sdn_network_plugin_name='redhat/openshift-ovs-{{ovs_plugin}}'"
@@ -33,28 +34,32 @@
cloudapps_suffix: 'apps.{{subdomain_base}}'
openshift_master_ldap_ca_file: 'openshift_master_ldap_ca_file=/root/ca.crt'
 ## If you are not part of GPTE you don't need this.
# Options for container_runtime: docker, cri-o
container_runtime: "docker"
docker_version: "{{ '1.12.6' if repo_version | version_compare('3.9', '<')  else '1.13.1' }}"
docker_device: /dev/xvdb
## If you are not part of GPTE you don't need this.
opentlc_integration: true
################################################################################
#### GENERIC EXAMPLE
####
################################################################################
install_common: true
install_nfs: true
install_glusterfs: false
glusterfs_hosted_device_name: /dev/xvdc
glusterfs_hosted_device_size: 300
glusterfs_app_device_name: /dev/xvdd
glusterfs_app_device_size: 300
install_bastion: false
env_authorized_key: "{{guid}}key"
set_env_authorized_key: true
software_to_deploy: "openshift"
################################################################################
#### OCP IMPLEMENATATION LAB
################################################################################
repo_version: '3.10'
repo_version: '3.10.14'
cloudapps_dns: '*.apps.{{subdomain_base}}.'
master_public_dns: "loadbalancer.{{subdomain_base}}."
@@ -70,21 +75,26 @@
  - tmux
  - bind-utils
  - wget
  - nano
  - ansible
  - git
  - vim-enhanced
  - ansible
  - at
  - sysstat
  - strace
  - net-tools
  - iptables-services
  - bridge-utils
  - kexec-tools
  - sos
  - psacct
  - iotop
rhel_repos:
  - rhel-7-server-rpms
  - rhel-7-server-extras-rpms
  - rhel-7-server-ose-{{repo_version}}-rpms
  - rhel-7-server-ansible-2.4-rpms
  - rh-gluster-3-client-for-rhel-7-server-rpms
use_subscription_manager: false
use_own_repos: true
@@ -131,20 +141,20 @@
bastion_instance_type: "t2.xlarge"
support_instance_type: "t2.medium"
support_instance_count: 3
node_instance_type: "t2.large"
node_instance_count: 3
infranode_instance_type: "t2.xlarge"
infranode_instance_count: 2
loadbalancer_instance_count: 1
loadbalancer_instance_type: "t2.small"
master_instance_type: "t2.large"
master_instance_count: 3
loadbalancer_instance_count: 1
loadbalancer_instance_type: "t2.small"
infranode_instance_type: "t2.xlarge"
infranode_instance_count: 2
node_instance_type: "t2.large"
node_instance_count: 3
support_instance_type: "t2.medium"
support_instance_count: 3
ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
ansible/configs/ocp-ha-lab/files/hosts_template.3.10.14.j2
@@ -1,5 +1,5 @@
#
# /etc/ansible/hosts file for OpenShift Container Platform 3.10.14
# ansible inventory for OpenShift Container Platform  3.10.14
#
[OSEv3:vars]
@@ -29,6 +29,7 @@
openshift_crio_docker_gc_node_selector={'runtime': 'cri-o'}
{% endif %}
# Node Groups
openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true','runtime={{container_runtime}}']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true','runtime={{container_runtime}}']}, {'name': 'node-config-glusterfs', 'labels': ['runtime={{container_runtime}}']}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true','runtime={{container_runtime}}'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}]
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. -> These  need to go into the above
# openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']}
@@ -37,34 +38,6 @@
# See: https://github.com/nickhammond/ansible-logrotate
logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
{% if install_glusterfs|bool %}
###########################################################################
### OpenShift Container Storage
###########################################################################
openshift_master_dynamic_provisioning_enabled=True
# CNS storage cluster
# From https://github.com/red-hat-storage/openshift-cic
openshift_storage_glusterfs_namespace=openshift-storage
openshift_storage_glusterfs_storageclass=true
openshift_storage_glusterfs_storageclass_default=true
openshift_storage_glusterfs_block_deploy=true
openshift_storage_glusterfs_block_host_vol_create=true
openshift_storage_glusterfs_block_host_vol_size=200
openshift_storage_glusterfs_block_storageclass=true
openshift_storage_glusterfs_block_storageclass_default=false
# Container image to use for glusterfs pods
openshift_storage_glusterfs_image="registry.access.redhat.com/rhgs3/rhgs-server-rhel7:v3.9"
# Container image to use for glusterblock-provisioner pod
openshift_storage_glusterfs_block_image="registry.access.redhat.com/rhgs3/rhgs-gluster-block-prov-rhel7:v3.9"
# Container image to use for heketi pods
openshift_storage_glusterfs_heketi_image="registry.access.redhat.com/rhgs3/rhgs-volmanager-rhel7:v3.9"
{% endif %}
{% if install_nfs|bool %}
# Set this line to enable NFS
@@ -87,16 +60,11 @@
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname={{master_lb_dns}}
openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
{% if install_lets_encrypt_certificates|bool %}
openshift_master_named_certificates={{lets_encrypt_openshift_master_named_certificates|to_json}}
{% endif %}
openshift_set_hostname=True
###########################################################################
### OpenShift Network Vars
@@ -112,23 +80,13 @@
### OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" or 'ldap' in install_idms|d([]) %}
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "htpasswd" or 'htpasswd' in install_idms|d([]) %}
# htpasswd Authentication
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
openshift_master_identity_providers={{identity_providers|to_json}}
{% if admission_plugin_config is defined %}
###########################################################################
### OpenShift admission plugin config
###########################################################################
openshift_master_admission_plugin_config={{admission_plugin_config|to_json}}
{% endif %}
# LDAP Authentication (download ipa-ca.crt first)
# openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'}]
# openshift_master_ldap_ca_file=/root/ipa-ca.crt
###########################################################################
### OpenShift Metrics and Logging Vars
@@ -139,7 +97,6 @@
########################
openshift_metrics_install_metrics={{install_metrics}}
{% if install_nfs|bool and not install_glusterfs|bool %}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
@@ -148,13 +105,6 @@
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassanda_pvc_storage_class_name=''
{% endif %}
{% if install_glusterfs|bool %}
openshift_metrics_storage_kind=dynamic
openshift_metrics_storage_volume_size=20Gi
openshift_metrics_cassandra_pvc_storage_class_name='glusterfs-storage-block'
{% endif %}
openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra": "true"}
@@ -237,6 +187,7 @@
# Enable cluster logging
########################
openshift_logging_install_logging={{install_logging}}
{% if install_nfs|bool and not install_glusterfs|bool %}
@@ -269,26 +220,25 @@
openshift_hosted_router_replicas={{infranode_instance_count}}
{% if install_lets_encrypt_certificates|bool %}
openshift_hosted_router_certificate={{lets_encrypt_openshift_hosted_router_certificate|to_json}}
{% endif %}
# openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
{% if s3user_access_key is defined %}
# Registry AWS S3
# S3 bucket must already exist.
openshift_hosted_registry_storage_kind=object
openshift_hosted_registry_storage_provider=s3
openshift_hosted_registry_storage_s3_accesskey={{ s3user_access_key }}
openshift_hosted_registry_storage_s3_secretkey={{ s3user_secret_access_key }}
openshift_hosted_registry_storage_s3_bucket={{ project_tag }}
openshift_hosted_registry_storage_s3_region={{ aws_region_final|d(aws_region) }}
openshift_hosted_registry_storage_s3_chunksize=26214400
openshift_hosted_registry_storage_s3_rootdirectory=/registry
openshift_hosted_registry_replicas=1
{% if install_nfs|bool %}
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/srv/nfs
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
{% endif %}
###########################################################################
@@ -307,6 +257,7 @@
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
@@ -314,11 +265,13 @@
nfs
{% endif %}
{% if install_glusterfs|bool %}
glusterfs
#glusterfs
{% endif %}
{% if groups['newnodes']|d([])|length > 0 %}
new_nodes
{% endif %}
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }}
{% endfor %}
[masters]
{% for host in groups['masters']|sort %}
@@ -342,28 +295,14 @@
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']|sort
  if host not in groups['newnodes']|d([])
  and host not in groups['glusterfs']|d([])
  %}
{% for host in groups['nodes']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute'
{% endfor %}
{% if groups['glusterfs']|d([])|length > 0 %}
## These are glusterfs nodes
{% for host in groups['glusterfs']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute'
## These are OCS nodes
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute'
{% endfor %}
{% endif %}
{% if groups['newnodes']|d([])|length > 0 %}
# scaleup performed, leave an empty group, see:
# https://docs.openshift.com/container-platform/3.7/install_config/adding_hosts_to_existing_cluster.html
[new_nodes]
{% for host in groups['newnodes']|sort %}
{{ hostvars[host].internaldns }} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_group_name='node-config-compute'
{% endfor %}
{% endif %}
{% if install_nfs|bool %}
[nfs]
@@ -372,21 +311,7 @@
{% endfor %}
{% endif %}
{% if install_glusterfs|bool %}
{% set query = "[?name=='support']|[0].volumes[?purpose=='glusterfs'].device_name" %}
[glusterfs]
{% for host in groups['glusterfs']|sort %}
{% if  loop.index % 3 == 1 %}
{%   set glusterfs_zone = 1 %}
{% elif  loop.index % 3 == 2 %}
{%   set glusterfs_zone = 2 %}
{% elif  loop.index % 3 == 0 %}
{%   set glusterfs_zone = 3 %}
{% endif %}
{% if cloud_provider == 'ec2' %}
{{ hostvars[host].internaldns }} glusterfs_zone={{ glusterfs_zone }} glusterfs_devices='{{instances|json_query(query)|to_json}}'
{% elif cloud_provider == 'azure' %}
{{ hostvars[host].internaldns }} glusterfs_zone={{ glusterfs_zone }} glusterfs_devices='{{ [ hostvars[host].glusterfs_device_name ] |to_json}}'
{% endif %}
#[glusterfs]
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]'
{% endfor %}
{% endif %}
ansible/configs/ocp-ha-lab/files/labs_hosts_template.3.10.14.j2
New file
@@ -0,0 +1,90 @@
#
# LAB inventory
#  ansible inventory for OpenShift Container Platform  3.10.14
#
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_user={{ansible_ssh_user}}
ansible_become=yes
###########################################################################
### OpenShift Basic Vars
###########################################################################
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true','runtime={{container_runtime}}']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true','runtime={{container_runtime}}']}, {'name': 'node-config-glusterfs', 'labels': ['runtime={{container_runtime}}']}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true','runtime={{container_runtime}}'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}]
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. -> These  need to go into the above
# openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
{% if install_nfs|bool %}
nfs
{% endif %}
{% if install_glusterfs|bool %}
#glusterfs
{% endif %}
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }}
{% endfor %}
[masters]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[etcd]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-master'
{% endfor %}
## These are infranodes
{% for host in groups['infranodes']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-infra'
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute'
{% endfor %}
## These are OCS nodes
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute'
{% endfor %}
{% if install_nfs|bool %}
[nfs]
{% for host in [groups['support']|sort|first] %}
{{ hostvars[host].internaldns }}
{% endfor %}
{% endif %}
#[glusterfs]
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]'
{% endfor %}
ansible/configs/ocp-ha-lab/files/labs_hosts_template.3.9.30.j2
New file
@@ -0,0 +1,71 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
# disable memory check, as we are not a production environment
openshift_disable_check="memory_availability"
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
nfs
#glusterfs
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }}
{% endfor %}
[masters]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[etcd]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env': 'master', 'cluster': '{{guid}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'infra', 'cluster': '{{guid}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']|sort %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'app', 'cluster': '{{guid}}'}"
{% endfor %}
## These are CNS nodes
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'env':'glusterfs', 'cluster': '{{guid}}'}"
{% endfor %}
[nfs]
{% set nfshost = groups['support']|sort|first %}
{{ hostvars[nfshost].internaldns }} openshift_hostname={{ hostvars[nfshost].internaldns }}
#[glusterfs]
{% for host in groups['support']|sort %}
# {{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]'
{% endfor %}
ansible/configs/ocp-ha-lab/post_infra.yml
@@ -3,8 +3,8 @@
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - step002
    - post_infrastructure
ansible/configs/ocp-ha-lab/post_software.yml
@@ -1,4 +1,4 @@
#vim: set ft=ansible:
# vim: set ft=ansible:
---
- name: Step 005 - Post Software deployment
  hosts: localhost
@@ -10,10 +10,20 @@
  tags:
    - step005
  tasks:
    - name: Generate /etc/ansible/hosts file with lab hosts template
    - name: Generate /etc/ansible/hosts file with lab inv template
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/labs_hosts_template.j2"
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/labs_hosts_template.{{ osrelease }}.j2"
        dest: "{{ ANSIBLE_REPO_PATH }}/workdir/labs_hosts-{{ env_type }}-{{ guid }}"
- name: Run openshift host provision on the bastion
  gather_facts: False
  become: yes
  hosts:
    - bastions
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  roles:
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/host-ocp-provisioner"
- name: Configure NFS host for user-vols if required
  hosts: support
@@ -40,8 +50,10 @@
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Move complete inventory file to preserve directory.
      shell: mv /etc/ansible/hosts /var/preserve/
    - name: Copy complete inventory file to bastion:/var/preserve/hosts
      copy:
        src: "{{ ANSIBLE_REPO_PATH }}/workdir/hosts-{{ env_type }}-{{ guid }}"
        dest: /var/preserve/hosts
      tags: preserve_complete_ansible_inventory
    - name: Copy over ansible hosts file, lab version
@@ -52,7 +64,7 @@
      tags:
        - overwrite_hosts_with_lab_hosts
    ## Create PVs for uservols if required
    ## Create PV objects for uservols if required
    - name: get nfs Hostname
      set_fact:
        nfs_host: "{{ groups['support']|sort|first }}"
ansible/configs/ocp-ha-lab/pre_software.yml
@@ -1,4 +1,5 @@
# vim: set ft=ansible:
# ---
- name: Step 003 - Create env key
  hosts: localhost
  connection: local
@@ -32,7 +33,8 @@
# Cloudformation template or equivalent should tag all hosts with Project:{{ env_type }}-{{ guid }}
- name: Configure all hosts with Repositories, Common Files and Set environment key
  hosts: all
  hosts:
    - all:!windows
  become: true
  gather_facts: False
  vars_files:
@@ -41,14 +43,23 @@
  tags:
    - step004
    - common_tasks
  pre_tasks:
    - name: add rhel-7-server-ansible-2.4-rpms repo for OCP 3.9+
      set_fact:
        rhel_repos: "{{ rhel_repos + ['rhel-7-server-ansible-2.4-rpms'] }}"
      when: osrelease is version_compare('3.9', '>=')
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set-repositories", when: 'repo_method is defined' }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/common", when: 'install_common' }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set_env_authorized_key", when: 'set_env_authorized_key' }
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/set-repositories"
      when: repo_method is defined
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/common"
      when: install_common|bool
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/set_env_authorized_key"
      when: set_env_authorized_key|bool
- name: Configuring Bastion Hosts
  hosts: bastions
  become: true
  gather_facts: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
@@ -57,7 +68,6 @@
  tags:
    - step004
    - bastion_tasks
- name: PreSoftware flight-check
  hosts: localhost
ansible/configs/ocp-multi-cloud-example/env_vars.yml
@@ -15,7 +15,7 @@
###### OR PASS as "-e" args to ansible-playbook command
### Common Host settings
repo_version: "3.9"
repo_version: "3.10"
repo_method: file # Other Options are: file, satellite and rhn
cached_packages:
@@ -226,6 +226,7 @@
## files and anything that identifies this environment from another "just like it"
guid: defaultguid
env_type: ocp-multi-cloud-example
# This var is used to identify stack (cloudformation, azure resourcegroup, ...)
project_tag: "{{ env_type }}-{{ guid }}"
@@ -350,7 +351,7 @@
cloudapps_suffix: 'apps.{{subdomain_base}}'
## TODO: This should be registered as a variable. Awk for os verions (OCP).
## yum info openshift...
osrelease: 3.9.30
osrelease: 3.10.14
openshift_master_overwrite_named_certificates: true
timeout: 60
ansible/configs/ocp-multi-cloud-example/files/cloud_providers/ec2_cloud_template.j2
@@ -2,102 +2,7 @@
---
AWSTemplateFormatVersion: "2010-09-09"
Mappings:
  RegionMapping:
    us-east-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-6871a115
      {% else %}
      RHELAMI: ami-c998b6b2
      {% endif %}
    us-east-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-03291866
      {% else %}
      RHELAMI: ami-cfdafaaa
      {% endif %}
    us-west-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-18726478
      {% else %}
      RHELAMI: ami-66eec506
      {% endif %}
    us-west-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-28e07e50
      {% else %}
      RHELAMI: ami-223f945a
      {% endif %}
    eu-west-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-7c491f05
      {% else %}
      RHELAMI: ami-bb9a6bc2
      {% endif %}
    eu-central-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-c86c3f23
      {% else %}
      RHELAMI: ami-d74be5b8
      {% endif %}
    ap-northeast-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-6b0d5f0d
      {% else %}
      RHELAMI: ami-30ef0556
      {% endif %}
    ap-northeast-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-3eee4150
      {% else %}
      RHELAMI: ami-0f5a8361
      {% endif %}
    ap-southeast-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-76144b0a
      {% else %}
      RHELAMI: ami-10bb2373
      {% endif %}
    ap-southeast-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-67589505
      {% else %}
      RHELAMI: ami-ccecf5af
      {% endif %}
    ap-south-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-5b673c34
      {% else %}
      RHELAMI: ami-cdbdd7a2
      {% endif %}
    sa-east-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-b0b7e3dc
      {% else %}
      RHELAMI: ami-a789ffcb
      {% endif %}
  DNSMapping:
    us-east-1:
      domain: "us-east-1.compute.internal"
    us-west-1:
      domain: "us-west-1.compute.internal"
    us-west-2:
      domain: "us-west-2.compute.internal"
    eu-west-1:
      domain: "eu-west-1.compute.internal"
    eu-central-1:
      domain: "eu-central-1.compute.internal"
    ap-northeast-1:
      domain: "ap-northeast-1.compute.internal"
    ap-northeast-2:
      domain: "ap-northeast-2.compute.internal"
    ap-southeast-1:
      domain: "ap-southeast-1.compute.internal"
    ap-southeast-2:
      domain: "ap-southeast-2.compute.internal"
    sa-east-1:
      domain: "sa-east-1.compute.internal"
    ap-south-1:
      domain: "ap-south-1.compute.internal"
{% include 'templates/region_mapping.j2' %}
Resources:
  Vpc:
@@ -270,11 +175,19 @@
  {{instance['name']}}{{loop.index}}:
    Type: "AWS::EC2::Instance"
    Properties:
{% if (instance_default_image is defined
      and env_type in instance_default_image
      and osrelease in instance_default_image[env_type]
      and aws_region in instance_default_image[env_type][osrelease])
%}
      ImageId: {{ instance_default_image[env_type][osrelease][aws_region] }}
{% else %}
      ImageId:
        Fn::FindInMap:
        - RegionMapping
        - Ref: AWS::Region
        - {{ instance['image_id'] | default('RHELAMI') }}
{% endif %}
      InstanceType: "{{instance['flavor'][cloud_provider]}}"
      KeyName: "{{instance['key_name'] | default(key_name)}}"
{% if instance['UserData'] is defined %}
ansible/configs/ocp-multi-cloud-example/packer.adoc
New file
@@ -0,0 +1,29 @@
= Create custom image for this env
You will find a packer file `packer.json`.
To create the image for a specific version:
----
packer build -var-file=~/secrets/rhte.json  -var "ANSIBLE_REPO_PATH=$(pwd)/../../" -var own_repo_path=http://... packer.json
----
Where `rhte.json` contains:
----
{
        "aws_access_key_id": "...",
        "aws_secret_access_key": "..."
}
----
This will create an image that you can define in the `instance_default_image` dictionary, for example:
----
instance_default_image:
  ocp-multi-cloud-example:
    3.10.14:
      us-east-1: ami-xxxx
----
ansible/configs/ocp-multi-cloud-example/packer.json
New file
@@ -0,0 +1,30 @@
{
    "builders": [
        {
            "type": "amazon-ebs",
            "access_key": "{{user `aws_access_key_id`}}",
            "secret_key": "{{user `aws_secret_access_key`}}",
            "region": "us-east-1",
            "source_ami": "ami-6871a115",
            "ami_regions": "us-east-1",
            "instance_type": "t2.large",
            "ssh_username": "ec2-user",
            "ami_name": "packer updated RHEL 7.5 ocp-multi-cloud-example {{timestamp}}"
        }
    ],
    "provisioners": [
        {
            "type": "ansible",
            "playbook_file": "./pre_software.yml",
            "user": "ec2-user",
            "extra_arguments": [
                "--extra-vars", "ANSIBLE_REPO_PATH={{user `ANSIBLE_REPO_PATH`}}",
                "--extra-vars", "own_repo_path={{user `own_repo_path`}}",
                "--extra-vars", "env_type=ocp-multi-cloud-example",
                "--tags", "packer"
            ],
            "ansible_env_vars": ["ANSIBLE_HOST_KEY_CHECKING=False"]
        }
    ]
}
ansible/configs/ocp-multi-cloud-example/pre_software.yml
@@ -28,6 +28,7 @@
    - step004
    - step004.2
    - common_tasks
    - packer
  roles:
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/set-repositories"
      when: repo_method is defined
@@ -57,11 +58,14 @@
    - nodes
    - infranodes
    - masters
    - default
  become: true
  gather_facts: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - packer
  tasks:
    - name: install openshift_node packages
      yum:
ansible/configs/ocp-workshop/files/hosts_template.3.10.34.j2
New file
@@ -0,0 +1,395 @@
#
# /etc/ansible/hosts file for OpenShift Container Platform 3.10.34
#
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_user={{ansible_ssh_user}}
ansible_become=yes
###########################################################################
### OpenShift Basic Vars
###########################################################################
openshift_deployment_type=openshift-enterprise
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version}
openshift_examples_modify_imagestreams=true
{% if container_runtime == "cri-o" %}
openshift_use_crio=True
openshift_crio_enable_docker_gc=True
openshift_crio_docker_gc_node_selector={'runtime': 'cri-o'}
{% endif %}
openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true','runtime={{container_runtime}}']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true','runtime={{container_runtime}}']}, {'name': 'node-config-glusterfs', 'labels': ['runtime={{container_runtime}}']}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true','runtime={{container_runtime}}'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}]
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. -> These  need to go into the above
# openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
{% if install_glusterfs|bool %}
###########################################################################
### OpenShift Container Storage
###########################################################################
openshift_master_dynamic_provisioning_enabled=True
# CNS storage cluster
# From https://github.com/red-hat-storage/openshift-cic
openshift_storage_glusterfs_namespace=openshift-storage
openshift_storage_glusterfs_storageclass=true
openshift_storage_glusterfs_storageclass_default=true
openshift_storage_glusterfs_block_deploy=true
openshift_storage_glusterfs_block_host_vol_create=true
openshift_storage_glusterfs_block_host_vol_size=200
openshift_storage_glusterfs_block_storageclass=true
openshift_storage_glusterfs_block_storageclass_default=false
# Container image to use for glusterfs pods
openshift_storage_glusterfs_image="registry.access.redhat.com/rhgs3/rhgs-server-rhel7:v3.9"
# Container image to use for glusterblock-provisioner pod
openshift_storage_glusterfs_block_image="registry.access.redhat.com/rhgs3/rhgs-gluster-block-prov-rhel7:v3.9"
# Container image to use for heketi pods
openshift_storage_glusterfs_heketi_image="registry.access.redhat.com/rhgs3/rhgs-volmanager-rhel7:v3.9"
{% endif %}
{% if install_nfs|bool %}
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
{% endif %}
###########################################################################
### OpenShift Cockpit Vars
###########################################################################
# Enable cockpit
osm_use_cockpit=true
osm_cockpit_plugins=['cockpit-kubernetes']
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname={{master_lb_dns}}
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
{% if install_lets_encrypt_certificates|bool %}
openshift_master_named_certificates={{lets_encrypt_openshift_master_named_certificates|to_json}}
{% endif %}
openshift_set_hostname=True
###########################################################################
### OpenShift Network Vars
###########################################################################
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
# os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy'
{{multi_tenant_setting}}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" or 'ldap' in install_idms|d([]) %}
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "htpasswd" or 'htpasswd' in install_idms|d([]) %}
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
openshift_master_identity_providers={{identity_providers|to_json}}
{% if admission_plugin_config is defined %}
###########################################################################
### OpenShift admission plugin config
###########################################################################
openshift_master_admission_plugin_config={{admission_plugin_config|to_json}}
{% endif %}
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
########################
# Enable cluster metrics
########################
openshift_metrics_install_metrics={{install_metrics}}
{% if install_nfs|bool and not install_glusterfs|bool %}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassanda_pvc_storage_class_name=''
{% endif %}
{% if install_glusterfs|bool %}
openshift_metrics_storage_kind=dynamic
openshift_metrics_storage_volume_size=20Gi
openshift_metrics_cassandra_pvc_storage_class_name='glusterfs-storage-block'
{% endif %}
openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_heapster_nodeselector={"node-role.kubernetes.io/infra": "true"}
# Store Metrics for 2 days
openshift_metrics_duration=2
{% if install_prometheus|bool %}
#########################
# Add Prometheus Metrics:
#########################
openshift_hosted_prometheus_deploy=true
openshift_prometheus_namespace=openshift-metrics
openshift_prometheus_node_selector={"node-role.kubernetes.io/infra":"true"}
# Prometheus
{% if install_glusterfs|bool %}
openshift_prometheus_storage_type='pvc'
openshift_prometheus_storage_kind=dynamic
openshift_prometheus_storage_class='glusterfs-storage-block'
openshift_prometheus_storage_volume_size=20Gi
openshift_prometheus_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_storage_volume_name=prometheus
{% elif install_nfs|bool %}
openshift_prometheus_storage_type='emptydir'
{% endif %}
# For prometheus-alertmanager
{% if install_glusterfs|bool %}
openshift_prometheus_alertmanager_storage_type='pvc'
openshift_prometheus_alertmanager_storage_kind=dynamic
openshift_prometheus_alertmanager_storage_class='glusterfs-storage-block'
openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertmanager_storage_volume_size=10Gi
openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
{% elif install_nfs|bool %}
openshift_prometheus_alertmanager_storage_type='emptydir'
{% endif %}
# For prometheus-alertbuffer
{% if install_glusterfs|bool %}
openshift_prometheus_alertbuffer_storage_type='pvc'
openshift_prometheus_alertbuffer_storage_kind=dynamic
openshift_prometheus_alertbuffer_storage_class='glusterfs-storage-block'
openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_volume_size=10Gi
{% elif install_nfs|bool %}
openshift_prometheus_alertbuffer_storage_type='emptydir'
{% endif %}
# Suggested Quotas and limits for Prometheus components:
openshift_prometheus_memory_requests=2Gi
openshift_prometheus_cpu_requests=750m
openshift_prometheus_memory_limit=2Gi
openshift_prometheus_cpu_limit=750m
openshift_prometheus_alertmanager_memory_requests=300Mi
openshift_prometheus_alertmanager_cpu_requests=200m
openshift_prometheus_alertmanager_memory_limit=300Mi
openshift_prometheus_alertmanager_cpu_limit=200m
openshift_prometheus_alertbuffer_memory_requests=300Mi
openshift_prometheus_alertbuffer_cpu_requests=200m
openshift_prometheus_alertbuffer_memory_limit=300Mi
openshift_prometheus_alertbuffer_cpu_limit=200m
# The following file will need to be copied over to the bastion before deployment
# There is an example in ocp-workshop/files
# openshift_prometheus_additional_rules_file=/root/prometheus_alerts_rules.yml
# Grafana
openshift_grafana_node_selector={"node-role.kubernetes.io/infra":"true"}
openshift_grafana_storage_type=pvc
openshift_grafana_pvc_size=2Gi
openshift_grafana_node_exporter=true
openshift_grafana_prometheus_namespace="openshift-metrics"
openshift_grafana_prometheus_serviceaccount="prometheus"
openshift_grafana_prometheus_route="prometheus"
{% if install_glusterfs|bool %}
openshift_grafana_sc_name=glusterfs-storage
{% endif %}
{% endif %}
# Enable cluster logging
########################
openshift_logging_install_logging={{install_logging}}
{% if install_nfs|bool and not install_glusterfs|bool %}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
openshift_logging_es_pvc_storage_class_name=''
{% endif %}
{% if install_glusterfs|bool %}
openshift_logging_es_pvc_dynamic=true
openshift_logging_es_pvc_size=20Gi
openshift_logging_es_cluster_size=1
openshift_logging_es_pvc_storage_class_name='glusterfs-storage-block'
{% endif %}
openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_es_cluster_size=1
openshift_logging_curator_default_days=3
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
openshift_hosted_router_replicas={{infranode_instance_count}}
{% if install_lets_encrypt_certificates|bool %}
openshift_hosted_router_certificate={{lets_encrypt_openshift_hosted_router_certificate|to_json}}
{% endif %}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
{% if s3user_access_key is defined %}
# Registry AWS S3
# S3 bucket must already exist.
openshift_hosted_registry_storage_kind=object
openshift_hosted_registry_storage_provider=s3
openshift_hosted_registry_storage_s3_accesskey={{ s3user_access_key }}
openshift_hosted_registry_storage_s3_secretkey={{ s3user_secret_access_key }}
openshift_hosted_registry_storage_s3_bucket={{ project_tag }}
openshift_hosted_registry_storage_s3_region={{ aws_region_final|d(aws_region) }}
openshift_hosted_registry_storage_s3_chunksize=26214400
openshift_hosted_registry_storage_s3_rootdirectory=/registry
{% endif %}
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=true
template_service_broker_install=true
# openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
masters
etcd
nodes
{% if install_nfs|bool %}
nfs
{% endif %}
{% if install_glusterfs|bool %}
glusterfs
{% endif %}
{% if groups['newnodes']|d([])|length > 0 %}
new_nodes
{% endif %}
[masters]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[etcd]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-master'
{% endfor %}
## These are infranodes
{% for host in groups['infranodes']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-infra'
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']|sort
  if host not in groups['newnodes']|d([])
  and host not in groups['glusterfs']|d([])
  %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute'
{% endfor %}
{% if groups['glusterfs']|d([])|length > 0 %}
## These are glusterfs nodes
{% for host in groups['glusterfs']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute'
{% endfor %}
{% endif %}
{% if groups['newnodes']|d([])|length > 0 %}
# scaleup performed, leave an empty group, see:
# https://docs.openshift.com/container-platform/3.7/install_config/adding_hosts_to_existing_cluster.html
[new_nodes]
{% for host in groups['newnodes']|sort %}
{{ hostvars[host].internaldns }} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_group_name='node-config-compute'
{% endfor %}
{% endif %}
{% if install_nfs|bool %}
[nfs]
{% for host in [groups['support']|sort|first] %}
{{ hostvars[host].internaldns }}
{% endfor %}
{% endif %}
{% if install_glusterfs|bool %}
{% set query = "[?name=='support']|[0].volumes[?purpose=='glusterfs'].device_name" %}
[glusterfs]
{% for host in groups['glusterfs']|sort %}
{% if  loop.index % 3 == 1 %}
{%   set glusterfs_zone = 1 %}
{% elif  loop.index % 3 == 2 %}
{%   set glusterfs_zone = 2 %}
{% elif  loop.index % 3 == 0 %}
{%   set glusterfs_zone = 3 %}
{% endif %}
{% if cloud_provider == 'ec2' %}
{{ hostvars[host].internaldns }} glusterfs_zone={{ glusterfs_zone }} glusterfs_devices='{{instances|json_query(query)|to_json}}'
{% elif cloud_provider == 'azure' %}
{{ hostvars[host].internaldns }} glusterfs_zone={{ glusterfs_zone }} glusterfs_devices='{{ [ hostvars[host].glusterfs_device_name ] |to_json}}'
{% endif %}
{% endfor %}
{% endif %}
ansible/configs/ocp-workshop/post_software.yml
@@ -380,6 +380,27 @@
    - name: Redeploy dockergc DaemonSet pods
      shell: "oc delete pod $(oc get pods -n default|grep dockergc|awk -c '{print $1}') -n default"
# - name: Fix Error pods when using CRI-O (3.10+)
#   gather_facts: False
#   become: yes
#   hosts: masters
#   run_once: true
#   vars_files:
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
#   tasks:
#   - name: Fix cri-o containers in error state
#     when:
#     - osrelease is version_compare('3.10.0', '>=')
#     - container_runtime == "cri-o"
#     block:
#     - name: Find Elasticsearch Deployment Config
#       shell: "oc get dc -o=custom-columns=NAME:.metadata.name -n openshift-logging|grep data"
#       register: logging_dc
#     - name: Redeploy elasticsearcht pods
#       shell: "oc rollout latest {{ logging_dc.stdout }} -n openshift-logging"
#     - name: Redeploy asb
#       shell: "oc rollout latest asb -n openshift-ansible-broker"
# Install OpenWhisk
- name: Install OpenWhisk
  hosts: masters
@@ -462,45 +483,6 @@
    - name: Add admin permissions to admin_user for Grafana project
      shell: "oc policy add-role-to-user admin {{admin_user}} -n openshift-grafana"
      when: grafana_exists is failed
# Update Firewall Rules for Node Exporter to work (3.10 and onwards).
- name: Node Exporter and Grafana Configuration (3.10+)
  gather_facts: False
  become: yes
  hosts:
  - nodes
  - infranodes
  - masters
  vars_files:
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
  - install_prometheus
  tasks:
  - when:
    - install_prometheus|d(False)|bool
    - osrelease is version_compare("3.10", ">=")
    block:
    # Node Exporters on all Nodes liston on port 9100.
    # Open Firewall Port 9100 for future sessions by adding
    # the rule to the iptables file.
    - name: Open Firewall port 9100 for future sessions
      lineinfile:
        dest: /etc/sysconfig/iptables
        insertafter: '-A FORWARD -j REJECT --reject-with icmp-host-prohibited'
        line: '-A OS_FIREWALL_ALLOW -p tcp -m state --state NEW -m tcp --dport 9100 -j ACCEPT'
        state: present
    # Open Firewall Port 9100 for current session by adding
    # the rule to the current iptables configuration. We won't
    # need to restart the iptables service - which will ensure
    # all OpenShift rules stay in place.
    - name: Open Firewall Port 9100 for current session
      iptables:
        action: insert
        protocol: tcp
        destination_port: 9100
        state: present
        chain: OS_FIREWALL_ALLOW
        jump: ACCEPT
- name: Customize Service Catalog UI for workshops
  hosts: masters
@@ -586,6 +568,41 @@
    when: install_maistra|d(False)|bool
# WK Added for RHTE
# Install Infrastructure workloads first
# - name: Install ocp-infra workloads
#   hosts: masters
#   gather_facts: false
#   run_once: true
#   become: yes
#   vars_files:
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
#   tasks:
#   - name: Install ocp-infra workloads
#     when:
#     - infra_workloads|d("")|length > 0
#     block:
#     - name: Check if admin_user is set
#       fail:
#         msg: admin_user must be set for ocp-infra workloads
#       when:
#       - not admin_user is defined or admin_user|length == 0
#     - name: Install ocp-infra-workloads
#       when:
#       - infra_workloads|d("")|length >0
#       block:
#       - name: Deploy ocp-infra workloads
#         include_role:
#           name: "{{ ANSIBLE_REPO_PATH }}/roles/{{ workload_loop_var }}"
#         vars:
#           admin_user: "{{ admin_user }}"
#           ocp_username: "{{ admin_user }}"
#           ACTION: "provision"
#         loop: "{{ infra_workloads.split(',')|list }}"
#         loop_control:
#           loop_var: workload_loop_var
# Install User Workloads second
# - name: Install ocp-workload workloads for multiple Users
#   hosts: masters
#   gather_facts: false
@@ -624,39 +641,6 @@
#       loop: "{{ users | product(student_workloads.split(','))|list }}"
#       loop_control:
#         loop_var: workload_loop_var
# - name: Install ocp-infra workloads
#   hosts: masters
#   gather_facts: false
#   run_once: true
#   become: yes
#   vars_files:
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
#   tasks:
#   - name: Install ocp-infra workloads
#     when:
#     - infra_workloads|d("")|length > 0
#     block:
#     - name: Check if admin_user is set
#       fail:
#         msg: admin_user must be set for ocp-infra workloads
#       when:
#       - not admin_user is defined or admin_user|length == 0
#     - name: Install ocp-infra-workloads
#       when:
#       - infra_workloads|d("")|length >0
#       block:
#       - name: Deploy ocp-infra workloads
#         include_role:
#           name: "{{ ANSIBLE_REPO_PATH }}/roles/{{ workload_loop_var[1] }}"
#         vars:
#           admin_user: "{{ admin_user }}"
#           ocp_username: "user{{ workload_loop_var[0] }}"
#           ACTION: "provision"
#         loop: "{{ infra_workloads.split(',')|list }}"
#         loop_control:
#           loop_var: workload_loop_var
# WK Added for RHTE End
- name: Zabbix for masters
@@ -717,7 +701,7 @@
  vars:
    zabbix_auto_registration_keyword: OCP Host
  tasks:
    - when: install_zabbix_bool
    - when: install_zabbix|bool
      include_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client"
  tags:
ansible/configs/rhte-oc-cluster-vms/README.adoc
New file
@@ -0,0 +1,150 @@
= OCP Client VM (ocp-clientvm) Standard Config
== Set up your "Secret" variables
* You need to provide some credentials for deployments to work
* Create a file called "env_secret_vars.yml" and put it in the ./ansible/configs/CONFIGNAME/ directory.
** At this point this file *has to be created* even if no vars from it are used.
* You can choose to provide these values as extra vars (-e "var=value") in the command line if you prefer not to keep sensitive information in a file.
.Example contents of "Secret" Vars file
----
# ## Logon credentials for Red Hat Network
# ## Required if using the subscription component
# ## of this playbook.
rhel_subscription_user: ''
rhel_subscription_pass: ''
#
# ## AWS Credentials. This is required.
aws_access_key_id: ""
aws_secret_access_key: ""
#
#If using repo_method: satellite, you must set these values as well.
satellite_url: https://satellite.example.com
satellite_org: Sat_org_name
satellite_activationkey: "rhel7basic"
----
== Review the Env_Type variable file
* This file link:./env_vars.yml[./env_vars.yml] contains all the variables you need to define to control the deployment of your environment.
=== IPA registration
You can either provide `ipa_host_password` or a couple `ipa_kerberos_user`/`ipa_kerberos_password` to register the host to the ipa server. See link:../../roles/bastion-opentlc-ipa[roles/bastion-opentlc-ipa].
== Running Ansible Playbook
You can run the playbook with the following arguments to overwrite the default variable values:
[source,bash]
----
GUID=testclientvm1
REGION=us-east-1
KEYNAME=ocpkey
ENVTYPE="ocp-clientvm"
CLOUDPROVIDER=ec2
HOSTZONEID='Z186MFNM7DX4NF'
REPO_PATH='https://admin.example.com/repos/ocp/3.9.14/'
BASESUFFIX='.example.opentlc.com'
REPO_VERSION=3.9
DEPLOYER_REPO_PATH=`pwd`
OSRELEASE=3.9.14
ansible-playbook main.yml \
  -e "guid=${GUID}" \
  -e "env_type=${ENVTYPE}" \
  -e "osrelease=${OSRELEASE}" \
  -e "repo_version=${REPO_VERSION}" \
  -e "docker_version=1.13.1" \
  -e "cloud_provider=${CLOUDPROVIDER}"
  -e "aws_region=${REGION}" \
  -e "HostedZoneId=${HOSTZONEID}" \
  -e "key_name=${KEYNAME}" \
  -e "subdomain_base_suffix=${BASESUFFIX}" \
  -e "clientvm_instance_type=t2.large" \
  -e "email=name@example.com" \
  -e "software_to_deploy=none" \
  -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" \
  -e "own_repo_path=${REPO_PATH}"
----
=== Satellite version
----
GUID=testclientvm1
REGION=us-east-1
KEYNAME=ocpkey
ENVTYPE="ocp-clientvm"
CLOUDPROVIDER=ec2
HOSTZONEID='Z186MFNM7DX4NF'
BASESUFFIX='.example.opentlc.com'
REPO_VERSION=3.9
DEPLOYER_REPO_PATH=`pwd`
LOG_FILE=/tmp/${ENVTYPE}-${GUID}.log
IPAPASS=$5
if [ "$1" = "provision" ] ; then
echo "Provisioning: ${STACK_NAME}"  1>> $LOG_FILE 2>> $LOG_FILE
ansible-playbook ${DEPLOYER_REPO_PATH}/main.yml  \
  -e "guid=${GUID}" \
  -e "env_type=${ENVTYPE}" \
  -e "key_name=${KEYNAME}" \
  -e "cloud_provider=${CLOUDPROVIDER}" \
  -e "aws_region=${REGION}" \
  -e "HostedZoneId=${HOSTZONEID}" \
  -e "subdomain_base_suffix=${BASESUFFIX}" \
  -e "clientvm_instance_type=t2.large" \
  -e "ipa_host_password=${IPAPASS}"
  -e "repo_method=satellite" \
  -e "repo_version=${REPO_VERSION}" \
  -e "email=name@example.com" \
  -e "software_to_deploy=none" \
  -e "osrelease=3.9.14" \
  -e "docker_version=1.13.1" \
  -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" 1>> $LOG_FILE 2>> $LOG_FILE
----
=== To Delete an environment
----
REGION=us-east-1
KEYNAME=ocpkey
GUID=testclientvm1
ENVTYPE="ocp-clientvm"
CLOUDPROVIDER=ec2
HOSTZONEID='Z186MFNM7DX4NF'
ansible-playbook ./configs/${ENVTYPE}/destroy_env.yml \
 -e "guid=${GUID}" \
 -e "env_type=${ENVTYPE}"  \
 -e "cloud_provider=${CLOUDPROVIDER}" \
 -e "aws_region=${REGION}" \
 -e "HostedZoneId=${HOSTZONEID}" \
 -e "key_name=${KEYNAME}" \
 -e "subdomain_base_suffix=${BASESUFFIX}"
----
== Example RC file
Use a RC file like this one to create a ClientVM with the wrapper.sh script:
----
GUID=myclient
REGION=us-east-1
KEYNAME=ocpkey
ENVTYPE=ocp-clientvm
SOFTWARE_TO_DEPLOY=none
HOSTZONEID='Z3IHLWJZOU9SRT'
ENVTYPE_ARGS=(
-e repo_version=3.9
-e osrelease=3.9.14
-e own_repo_path=http://admin.example.com/repos/ocp/3.9.14
-e docker_version=1.13.1
-e "clientvm_instance_type=t2.large"
-e "subdomain_base_suffix=.example.opentlc.com"
)
----
ansible/configs/rhte-oc-cluster-vms/destroy_env.yml
New file
@@ -0,0 +1,15 @@
---
- name: Delete Infrastructure
  hosts: localhost
  connection: local
  gather_facts: False
  become: no
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  tasks:
    - debug:
        msg: Deleting Infrastructure
- import_playbook: "../../cloud_providers/ec2_destroy_env.yml"
ansible/configs/rhte-oc-cluster-vms/env_vars.yml
New file
@@ -0,0 +1,156 @@
###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT
###### OR PASS as "-e" args to ansible-playbook command
### Common Host settings
repo_method: file # Other Options are: file, satellite and rhn
# Do you want to run a full yum update
update_packages: true
#If using repo_method: satellite, you must set these values as well.
# satellite_url: https://satellite.example.com
# satellite_org: Sat_org_name
# satellite_activationkey: "rhel7basic"
## guid is the deployment unique identifier, it will be appended to all tags,
## files and anything that identifies this environment from another "just like it"
guid: defaultguid
# The next flag is 1 by default. If it is set to more than 1 then instead of creating
# clientvm.guid.baseurl it will create clientvm{1..num_users}.guid.baseurl
num_users: 1
install_bastion: true
install_common: true
install_opentlc_integration: true
install_ipa_client: false
# Install a user id 'student'. If install_student_user=true then a global variable
# student_password=password needs to be provided with the password to set for the user student
install_student_user: false
docker_device: /dev/xvdb
docker_version: "{{ '1.12.6' if repo_version | version_compare('3.9', '<')  else '1.13.1' }}"
### If you want a Key Pair name created and injected into the hosts,
# set `set_env_authorized_key` to true and set the keyname in `env_authorized_key`
# you can use the key used to create the environment or use your own self generated key
# if you set "use_own_key" to false your PRIVATE key will be copied to the bastion. (This is {{key_name}})
use_own_key: true
env_authorized_key: "{{guid}}key"
ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
set_env_authorized_key: true
# Is this running from Red Hat Ansible Tower
tower_run: false
### AWS EC2 Environment settings
### Route 53 Zone ID (AWS)
# This is the Route53 HostedZoneId where you will create your Public DNS entries
# This only needs to be defined if your CF template uses route53
HostedZoneId: Z3IHLWJZOU9SRT
# The region to be used, if not specified by -e in the command line
aws_region: us-east-1
# The key that is used to
key_name: "default_key_name"
## Networking (AWS)
subdomain_base_short: "{{ guid }}"
subdomain_base_suffix: ".example.opentlc.com"
subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}"
## Environment Sizing
clientvm_instance_type: "t2.large"
###### VARIABLES YOU SHOULD ***NOT*** CONFIGURE FOR YOUR DEPLOYEMNT
## This might get removed
env_specific_images:
#   - "registry.access.redhat.com/jboss-eap-7/eap70-openshift:latest"
#   - "registry.access.redhat.com/openshift3/jenkins-2-rhel7:latest"
#   - "registry.access.redhat.com/openshift3/jenkins-slave-maven-rhel7:latest"
#### Vars for the OpenShift Ansible hosts file
## TODO: This should be registered as a variable. Awk for os verions (OCP).
## yum info openshift...
osrelease: 3.9.40
###### You can, but you usually wouldn't need to.
ansible_ssh_user: ec2-user
remote_user: ec2-user
common_packages:
- git
- ansible
- docker
rhel_repos:
  - rhel-7-server-rpms
  - rhel-7-server-extras-rpms
  - rhel-7-server-ose-{{repo_version}}-rpms
  - rhel-7-fast-datapath-rpms
  - rhel-7-server-ansible-2.4-rpms
# use_subscription_manager: false
# use_own_repos: true
#
# rhn_pool_id_string: OpenShift Container Platform
### CLOUDFORMATIONS vars
project_tag: "{{ env_type }}-{{ guid }}"
create_internal_dns_entries: false
zone_internal_dns: "{{guid}}.internal."
chomped_zone_internal_dns: "{{guid}}.internal"
zone_public_dns: "{{subdomain_base}}."
bastion_public_dns: "bastion.{{subdomain_base}}."
bastion_public_dns_chomped: "bastion.{{subdomain_base}}"
vpcid_name_tag: "{{subdomain_base}}"
az_1_name: "{{ aws_region }}a"
az_2_name: "{{ aws_region }}b"
subnet_private_1_cidr_block: "192.168.2.0/24"
subnet_private_1_az: "{{ az_2_name }}"
subnet_private_1_name_tag: "{{subdomain_base}}-private"
subnet_private_2_cidr_block: "192.168.1.0/24"
subnet_private_2_az: "{{ az_1_name }}"
subnet_private_2_name_tag: "{{subdomain_base}}-private"
subnet_public_1_cidr_block: "192.168.10.0/24"
subnet_public_1_az: "{{ az_1_name }}"
subnet_public_1_name_tag: "{{subdomain_base}}-public"
subnet_public_2_cidr_block: "192.168.20.0/24"
subnet_public_2_az: "{{ az_2_name }}"
subnet_public_2_name_tag: "{{subdomain_base}}-public"
dopt_domain_name: "{{ aws_region }}.compute.internal"
rtb_public_name_tag: "{{subdomain_base}}-public"
rtb_private_name_tag: "{{subdomain_base}}-private"
cf_template_description: "{{ env_type }}-{{ guid }} template "
cloudformation_retries: 2
ocp_report: false
rootfs_size_clientvm: 200
instances:
  - name: "clientvm"
    count: "{{num_users}}"
    public_dns: true
    flavor:
      "ec2": "{{clientvm_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "bastions"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_clientvm }}"
    volumes:
      - device_name: "{{docker_device}}"
        volume_size: 100
        volume_type: gp2
ansible/configs/rhte-oc-cluster-vms/files/cloud_providers/ec2_cloud_template.j2
New file
@@ -0,0 +1,477 @@
#jinja2: lstrip_blocks: True
---
AWSTemplateFormatVersion: "2010-09-09"
Mappings:
  RegionMapping:
    us-east-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-6871a115
      {% else %}
      RHELAMI: ami-c998b6b2
      {% endif %}
    us-east-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-03291866
      {% else %}
      RHELAMI: ami-cfdafaaa
      {% endif %}
    us-west-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-18726478
      {% else %}
      RHELAMI: ami-66eec506
      {% endif %}
    us-west-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-223f945a
      {% else %}
      RHELAMI: ami-9fa343e7
      {% endif %}
    eu-west-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-7c491f05
      {% else %}
      RHELAMI: ami-bb9a6bc2
      {% endif %}
    eu-central-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-c86c3f23
      {% else %}
      RHELAMI: ami-d74be5b8
      {% endif %}
    ap-northeast-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-6b0d5f0d
      {% else %}
      RHELAMI: ami-30ef0556
      {% endif %}
    ap-northeast-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-3eee4150
      {% else %}
      RHELAMI: ami-0f5a8361
      {% endif %}
    ap-southeast-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-76144b0a
      {% else %}
      RHELAMI: ami-10bb2373
      {% endif %}
    ap-southeast-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-67589505
      {% else %}
      RHELAMI: ami-ccecf5af
      {% endif %}
    ap-south-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-5b673c34
      {% else %}
      RHELAMI: ami-cdbdd7a2
      {% endif %}
    sa-east-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-b0b7e3dc
      {% else %}
      RHELAMI: ami-a789ffcb
      {% endif %}
  DNSMapping:
    us-east-1:
      domain: "us-east-1.compute.internal"
    us-west-1:
      domain: "us-west-1.compute.internal"
    us-west-2:
      domain: "us-west-2.compute.internal"
    eu-west-1:
      domain: "eu-west-1.compute.internal"
    eu-central-1:
      domain: "eu-central-1.compute.internal"
    ap-northeast-1:
      domain: "ap-northeast-1.compute.internal"
    ap-northeast-2:
      domain: "ap-northeast-2.compute.internal"
    ap-southeast-1:
      domain: "ap-southeast-1.compute.internal"
    ap-southeast-2:
      domain: "ap-southeast-2.compute.internal"
    sa-east-1:
      domain: "sa-east-1.compute.internal"
    ap-south-1:
      domain: "ap-south-1.compute.internal"
Resources:
  Vpc:
    Type: "AWS::EC2::VPC"
    Properties:
      CidrBlock: "192.199.0.0/16"
      EnableDnsSupport: true
      EnableDnsHostnames: true
      Tags:
        - Key: Name
          Value: "{{vpcid_name_tag}}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
  VpcInternetGateway:
    Type: "AWS::EC2::InternetGateway"
  VpcGA:
    Type: "AWS::EC2::VPCGatewayAttachment"
    Properties:
      InternetGatewayId:
        Ref: VpcInternetGateway
      VpcId:
        Ref: Vpc
  VpcRouteTable:
    Type: "AWS::EC2::RouteTable"
    Properties:
      VpcId:
        Ref: Vpc
  VPCRouteInternetGateway:
    DependsOn: VpcGA
    Type: "AWS::EC2::Route"
    Properties:
      GatewayId:
        Ref: VpcInternetGateway
      DestinationCidrBlock: "0.0.0.0/0"
      RouteTableId:
        Ref: VpcRouteTable
  PublicSubnet:
    Type: "AWS::EC2::Subnet"
    DependsOn:
      - Vpc
    Properties:
{% if aws_availability_zone is defined %}
      AvailabilityZone: {{ aws_availability_zone }}
{% endif %}
      CidrBlock: "192.199.0.0/24"
      Tags:
        - Key: Name
          Value: "{{project_tag}}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
      MapPublicIpOnLaunch: true
      VpcId:
        Ref: Vpc
  PublicSubnetRTA:
    Type: "AWS::EC2::SubnetRouteTableAssociation"
    Properties:
      RouteTableId:
        Ref: VpcRouteTable
      SubnetId:
        Ref: PublicSubnet
  HostSG:
    Type: "AWS::EC2::SecurityGroup"
    Properties:
      GroupDescription: Host
      VpcId:
        Ref: Vpc
      Tags:
        - Key: Name
          Value: host_sg
  HostUDPPorts:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
      GroupId:
        Fn::GetAtt:
          - HostSG
          - GroupId
      IpProtocol: udp
      FromPort: 60000
      ToPort: 60003
      CidrIp: "0.0.0.0/0"
  HostTCPPortsSSH:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
      GroupId:
        Fn::GetAtt:
          - HostSG
          - GroupId
      IpProtocol: tcp
      FromPort: 22
      ToPort: 22
      CidrIp: "0.0.0.0/0"
  HostTCPPortsHTTP:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
      GroupId:
        Fn::GetAtt:
          - HostSG
          - GroupId
      IpProtocol: tcp
      FromPort: 80
      ToPort: 80
      CidrIp: "0.0.0.0/0"
  HostTCPPortsHTTPS:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
      GroupId:
        Fn::GetAtt:
          - HostSG
          - GroupId
      IpProtocol: tcp
      FromPort: 443
      ToPort: 443
      CidrIp: "0.0.0.0/0"
  HostTCPPorts8443:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
      GroupId:
        Fn::GetAtt:
          - HostSG
          - GroupId
      IpProtocol: tcp
      FromPort: 8443
      ToPort: 8443
      CidrIp: "0.0.0.0/0"
  zoneinternalidns:
    Type: "AWS::Route53::HostedZone"
    Properties:
      Name: "{{ zone_internal_dns }}"
      VPCs:
        - VPCId:
            Ref: Vpc
          VPCRegion:
            Ref: "AWS::Region"
      HostedZoneConfig:
        Comment: "Created By ansible agnostic deployer"
{% if num_users|d(1)|int > 1 %}
{% for c in range(1,num_users|int+1) %}
  CloudDNS{{loop.index}}:
    Type: AWS::Route53::RecordSetGroup
    DependsOn:
      - "clientvm{{loop.index}}EIP"
    Properties:
      HostedZoneId: "{{HostedZoneId}}"
      RecordSets:
        - Name: "*.apps.clientvm{{loop.index}}.{{subdomain_base}}"
          Type: A
          TTL: 900
          ResourceRecords:
            - Fn::GetAtt:
                - clientvm{{loop.index}}
                - PublicIp
  clientvm{{loop.index}}:
    Type: "AWS::EC2::Instance"
    Properties:
      ImageId:
        Fn::FindInMap:
        - RegionMapping
        - Ref: AWS::Region
        - 'RHELAMI'
      InstanceType: "{{instances[0]['flavor'][cloud_provider]}}"
      KeyName: "{{instances[0]['key_name'] | default(key_name)}}"
      SecurityGroupIds:
        - "Fn::GetAtt":
          - HostSG
          - GroupId
      SubnetId:
        Ref: PublicSubnet
      Tags:
        - Key: Name
          Value: clientvm{{loop.index}}
        - Key: internaldns
          Value: clientvm{{loop.index}}.{{chomped_zone_internal_dns}}
        - Key: "owner"
          Value: "{{ email | default('unknownuser') }}"
        - Key: "Project"
          Value: "{{project_tag}}"
        - Key: "{{project_tag}}"
          Value: "{{ instances[0]['name'] }}"
{% for tag in instances[0]['tags'] %}
        - Key: {{tag['key']}}
          Value: {{tag['value']}}
{% endfor %}
      BlockDeviceMappings:
        - DeviceName: "/dev/sda1"
          Ebs:
            VolumeSize: {{ instances[0]['rootfs_size'] | default('50') }}
{% for vol in instances[0]['volumes']|default([]) %}
        - DeviceName: "{{ vol['device_name'] }}"
          Ebs:
            VolumeType: "{{ vol['volume_type'] | d('gp2') }}"
            VolumeSize: "{{ vol['volume_size'] | d('20') }}"
{% endfor %}
  clientvm{{loop.index}}InternalDNS:
    Type: "AWS::Route53::RecordSetGroup"
    Properties:
      HostedZoneId:
        Ref: zoneinternalidns
      RecordSets:
      - Name: "clientvm{{loop.index}}.{{zone_internal_dns}}"
        Type: A
        TTL: 10
        ResourceRecords:
          - "Fn::GetAtt":
            - clientvm{{loop.index}}
            - PrivateIp
      - Name: "bastion{{loop.index}}.{{zone_internal_dns}}"
        Type: A
        TTL: 10
        ResourceRecords:
          - "Fn::GetAtt":
            - clientvm{{loop.index}}
            - PrivateIp
  clientvm{{loop.index}}EIP:
    Type: "AWS::EC2::EIP"
    DependsOn:
    - VpcGA
    Properties:
      InstanceId:
        Ref: clientvm{{loop.index}}
  clientvm{{loop.index}}PublicDNS:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
      - clientvm{{loop.index}}EIP
    Properties:
      HostedZoneId: {{HostedZoneId}}
      RecordSets:
          - Name: "clientvm{{loop.index}}.{{subdomain_base}}."
            Type: A
            TTL: 10
            ResourceRecords:
            - "Fn::GetAtt":
              - clientvm{{loop.index}}
              - PublicIp
          - Name: "bastion{{loop.index}}.{{subdomain_base}}."
            Type: A
            TTL: 10
            ResourceRecords:
            - "Fn::GetAtt":
              - clientvm{{loop.index}}
              - PublicIp
{% endfor %}
{% else %}
  CloudDNS:
    Type: AWS::Route53::RecordSetGroup
    DependsOn:
      - "clientvmEIP"
    Properties:
      HostedZoneId: "{{HostedZoneId}}"
      RecordSets:
        - Name: "*.apps.clientvm.{{subdomain_base}}"
          Type: A
          TTL: 900
          ResourceRecords:
            - Fn::GetAtt:
                - clientvm
                - PublicIp
  clientvm:
    Type: "AWS::EC2::Instance"
    Properties:
      ImageId:
        Fn::FindInMap:
        - RegionMapping
        - Ref: AWS::Region
        - 'RHELAMI'
      InstanceType: "{{instances[0]['flavor'][cloud_provider]}}"
      KeyName: "{{instances[0]['key_name'] | default(key_name)}}"
      SecurityGroupIds:
        - "Fn::GetAtt":
          - HostSG
          - GroupId
      SubnetId:
        Ref: PublicSubnet
      Tags:
        - Key: Name
          Value: clientvm
        - Key: internaldns
          Value: clientvm.{{chomped_zone_internal_dns}}
        - Key: "owner"
          Value: "{{ email | default('unknownuser') }}"
        - Key: "Project"
          Value: "{{project_tag}}"
        - Key: "{{project_tag}}"
          Value: "{{ instances[0]['name'] }}"
{% for tag in instances[0]['tags'] %}
        - Key: {{tag['key']}}
          Value: {{tag['value']}}
{% endfor %}
      BlockDeviceMappings:
        - DeviceName: "/dev/sda1"
          Ebs:
            VolumeSize: {{ instances[0]['rootfs_size'] | default('50') }}
{% for vol in instances[0]['volumes']|default([]) %}
        - DeviceName: "{{ vol['device_name'] }}"
          Ebs:
            VolumeType: "{{ vol['volume_type'] | d('gp2') }}"
            VolumeSize: "{{ vol['volume_size'] | d('20') }}"
{% endfor %}
  clientvmInternalDNS:
    Type: "AWS::Route53::RecordSetGroup"
    Properties:
      HostedZoneId:
        Ref: zoneinternalidns
      RecordSets:
      - Name: "clientvm.{{zone_internal_dns}}"
        Type: A
        TTL: 10
        ResourceRecords:
          - "Fn::GetAtt":
            - clientvm
            - PrivateIp
      - Name: "bastion.{{zone_internal_dns}}"
        Type: A
        TTL: 10
        ResourceRecords:
          - "Fn::GetAtt":
            - clientvm
            - PrivateIp
  clientvmEIP:
    Type: "AWS::EC2::EIP"
    DependsOn:
    - VpcGA
    Properties:
      InstanceId:
        Ref: clientvm
  clientvmPublicDNS:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
      - clientvmEIP
    Properties:
      HostedZoneId: {{HostedZoneId}}
      RecordSets:
          - Name: "clientvm.{{subdomain_base}}."
            Type: A
            TTL: 10
            ResourceRecords:
            - "Fn::GetAtt":
              - clientvm
              - PublicIp
          - Name: "bastion.{{subdomain_base}}."
            Type: A
            TTL: 10
            ResourceRecords:
            - "Fn::GetAtt":
              - clientvm
              - PublicIp
{% endif %}
Outputs:
  Route53internalzoneOutput:
    Description: The ID of the internal route 53 zone
    Value:
      Ref: zoneinternalidns
ansible/configs/rhte-oc-cluster-vms/files/hosts_template.j2
New file
@@ -0,0 +1,5 @@
###########################################################################
### ClientVM Hosts
### Dummy file
###########################################################################
ansible/configs/rhte-oc-cluster-vms/files/oc-cluster.service.j2
New file
@@ -0,0 +1,20 @@
[Unit]
Description=OpenShift oc cluster up Service
After=docker.service
Requires=docker.service
[Service]
ExecStart=/bin/oc cluster up --base-dir={{ ocp_root }} --public-hostname={{ hostname }} --routing-suffix=apps.{{ hostname }} --loglevel=1
ExecStop=/bin/oc cluster down
WorkingDirectory={{ ocp_root }}
Restart=no
StandardOutput=syslog
StandardError=syslog
SyslogIdentifier=occlusterup
User=root
Type=oneshot
RemainAfterExit=yes
TimeoutSec=300
[Install]
WantedBy=multi-user.target
ansible/configs/rhte-oc-cluster-vms/files/registries.conf
New file
@@ -0,0 +1,25 @@
# This is a system-wide configuration file used to
# keep track of registries for various container backends.
# It adheres to TOML format and does not support recursive
# lists of registries.
# The default location for this configuration file is /etc/containers/registries.conf.
# The only valid categories are: 'registries.search', 'registries.insecure',
# and 'registries.block'.
[registries.search]
registries = ['registry.access.redhat.com']
# If you need to access insecure registries, add the registry's fully-qualified name.
# An insecure registry is one that does not have a valid SSL certificate or only does HTTP.
[registries.insecure]
registries = ['172.30.0.0/16']
# If you need to block pull access from a registry, uncomment the section below
# and add the registries fully-qualified name.
#
# Docker only
[registries.block]
registries = []
ansible/configs/rhte-oc-cluster-vms/files/repos_template.j2
New file
@@ -0,0 +1,45 @@
[rhel-7-server-rpms]
name=Red Hat Enterprise Linux 7
baseurl={{own_repo_path}}/rhel-7-server-rpms
enabled=1
gpgcheck=0
[rhel-7-server-rh-common-rpms]
name=Red Hat Enterprise Linux 7 Common
baseurl={{own_repo_path}}/rhel-7-server-rh-common-rpms
enabled=1
gpgcheck=0
[rhel-7-server-extras-rpms]
name=Red Hat Enterprise Linux 7 Extras
baseurl={{own_repo_path}}/rhel-7-server-extras-rpms
enabled=1
gpgcheck=0
[rhel-7-server-optional-rpms]
name=Red Hat Enterprise Linux 7 Optional
baseurl={{own_repo_path}}/rhel-7-server-optional-rpms
enabled=1
gpgcheck=0
[rhel-7-server-ose-{{repo_version}}-rpms]
name=Red Hat Enterprise Linux 7 OSE {{repo_version}}
baseurl={{own_repo_path}}/rhel-7-server-ose-{{repo_version}}-rpms
enabled=1
gpgcheck=0
## Required since OCP 3.5
[rhel-7-fast-datapath-rpms]
name=Red Hat Enterprise Linux Fast Datapath (RHEL 7 Server) (RPMs)
baseurl={{own_repo_path}}/rhel-7-fast-datapath-rpms
enabled=1
gpgcheck=0
{% if osrelease is version_compare('3.9', '>=') %}
## Required since OCP 3.9
[rhel-7-server-ansible-2.4-rpms]
name=Red Hat Enterprise Linux Ansible (RPMs)
baseurl={{own_repo_path}}/rhel-7-server-ansible-2.4-rpms
enabled=1
gpgcheck=0
{% endif %}
ansible/configs/rhte-oc-cluster-vms/files/start_oc.sh.j2
New file
@@ -0,0 +1,2 @@
#!/bin/bash
oc cluster up --public-hostname={{ hostname }} --routing-suffix=apps.{{ hostname }}
ansible/configs/rhte-oc-cluster-vms/mgr_users.yml
New file
@@ -0,0 +1,6 @@
---
mgr_users:
- name: opentlc-mgr
  home: /home/opentlc-mgr
  authorized_keys:
  - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4OojwKH74UWVOY92y87Tb/b56CMJoWbz2gyEYsr3geOc2z/n1pXMwPfiC2KT7rALZFHofc+x6vfUi6px5uTm06jXa78S7UB3MX56U3RUd8XF3svkpDzql1gLRbPIgL1h0C7sWHfr0K2LG479i0nPt/X+tjfsAmT3nWj5PVMqSLFfKrOs6B7dzsqAcQPInYIM+Pqm/pXk+Tjc7cfExur2oMdzx1DnF9mJaj1XTnMsR81h5ciR2ogXUuns0r6+HmsHzdr1I1sDUtd/sEVu3STXUPR8oDbXBsb41O5ek6E9iacBJ327G3/1SWwuLoJsjZM0ize+iq3HpT1NqtOW6YBLR opentlc-mgr@inf00-mwl.opentlc.com
ansible/configs/rhte-oc-cluster-vms/post_infra.yml
New file
@@ -0,0 +1,27 @@
- name: Step 002 Post Infrastructure
  hosts: localhost
  connection: local
  become: false
  vars_files:
  - "./env_vars.yml"
  - "./env_secret_vars.yml"
  tags:
  - step002
  - post_infrastructure
  tasks:
  - name: Job Template to launch a Job Template with update on launch inventory set
    uri:
      url: "https://{{ ansible_tower_ip }}/api/v1/job_templates/{{ job_template_id }}/launch/"
      method: POST
      user: "{{tower_admin}}"
      password: "{{tower_admin_password}}"
      body:
        extra_vars:
          guid: "{{guid}}"
          ipa_host_password: "{{ipa_host_password}}"
      body_format: json
      validate_certs: False
      HEADER_Content-Type: "application/json"
      status_code: 200, 201
    when: tower_run == 'true'
ansible/configs/rhte-oc-cluster-vms/post_software.yml
New file
@@ -0,0 +1,109 @@
---
- name: Step 00xxxxx post software
  hosts: bastions
  become: yes
  gather_facts: False
  vars_files:
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
  - debug:
    msg: "Post-Software Steps starting"
  - name: Set hostname fact
    set_fact:
      hostname: "{{ ansible_hostname}}.{{subdomain_base}}"
  - name: Set hostname
    hostname:
      name: "{{ hostname }}"
  - name: Make hostname permanent
    command: "echo {{ hostname }} > /etc/hostname"
  - name: Ensure software is installed
    yum:
      name: "{{ item }}"
      state: latest
    with_items:
    - git
    - vim
    - ansible
    - docker
  - name: Copy docker registries.conf file
    copy:
      src: ./files/registries.conf
      dest: /etc/containers/registries.conf
      mode: 0644
      owner: root
      group: root
  - name: Restart docker
    systemd:
      name: docker
      state: restarted
  # - name: Upload oc-cluster up script
  #   template:
  #     src: ./files/start_oc.sh.j2
  #     dest: /usr/bin/start_oc.sh
  #     owner: root
  #     group: root
  #     mode: 0755
  - name: Install AAD
    git:
      repo: https://github.com/sborenst/ansible_agnostic_deployer
      clone: yes
      dest: "/root/ansible_agnostic_deployer"
      version: "development"
  - name: Set OpenShift root config directory
    set_fact:
      ocp_root: "/var/lib/openshift"
  - name: Ensure OpenShift config directory is there
    file:
      path: "{{ ocp_root }}"
      state: directory
      owner: root
      group: root
      mode: 0775
  - name: Create oc_cluster system service
    template:
      src: ./files/oc-cluster.service.j2
      dest: "/usr/lib/systemd/system/oc-cluster.service"
  - name: Enable and start oc-cluster system service
    systemd:
      name: oc-cluster
      enabled: yes
      state: started
  - name: Wait for oc-cluster to be up and running
    wait_for:
      host: "{{ hostname }}"
      port: 8443
      sleep: 2
  - name: Check if cluster is already set up
    stat:
      path: /root/.setupcomplete
    register: occluster_setup
  - name: Setup and configure oc cluster
    when: not occluster_setup.stat.exists
    block:
    - name: Login as system:admin
      shell: "oc login -u system:admin"
    - name: Deploy workloads into cluster
      shell: 'ansible-playbook -i localhost, -c local /root/ansible_agnostic_deployer/ansible/configs/ocp-workloads/ocp-workload.yml -e"ANSIBLE_REPO_PATH=/root/ansible_agnostic_deployer/ansible" -e"ocp_username=developer" -e"ocp_workload={{ item }}" -e"guid=a1001" -e"ocp_user_needs_quota=false" -e"ocp_domain=https://{{ hostname }}:8443"  -e"ACTION=create"'
      chdir: /root/ansible_agnostic_deployer
      with_items: "{{ student_workloads.split(',')|list }}"
    - name: Touch setup file
      command: touch /root/.setupcomplete
- name: PostSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  vars_files:
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
  - post_flight_check
  tasks:
  - debug:
      msg: "Post-Software checks completed successfully"
ansible/configs/rhte-oc-cluster-vms/pre_infra.yml
New file
@@ -0,0 +1,13 @@
- name: Step 000 Pre Infrastructure
  hosts: localhost
  connection: local
  become: false
  vars_files:
  - "./env_vars.yml"
  - "./env_secret_vars.yml"
  tags:
  - step001
  - pre_infrastructure
  tasks:
  - debug:
      msg: "Step 000 Pre Infrastructure - Dummy action"
ansible/configs/rhte-oc-cluster-vms/pre_software.yml
New file
@@ -0,0 +1,75 @@
---
- name: Step 003 - Create env key
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  vars_files:
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
  - step003
  - generate_env_keys
  tasks:
  - name: Generate SSH keys
    shell: ssh-keygen -b 2048 -t rsa -f "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}" -q -N ""
    args:
      creates: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}"
    when: set_env_authorized_key
  - name: fix permission
    file:
      path: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}"
      mode: 0400
    when: set_env_authorized_key
  - name: Generate SSH pub key
    shell: ssh-keygen -y -f "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}" > "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}.pub"
    args:
      creates: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}.pub"
    when: set_env_authorized_key
# Cloudformation template or equivalent should tag all hosts with Project:{{ env_type }}-{{ guid }}
- name: Configure all hosts with Repositories, Common Files and Set environment key
  hosts:
  - all:!windows
  become: true
  gather_facts: False
  vars_files:
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
  - step004
  - common_tasks
  roles:
  - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set-repositories",       when: 'repo_method is defined' }
  - { role: "{{ ANSIBLE_REPO_PATH }}/roles/common",                 when: 'install_common' }
  - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set_env_authorized_key", when: 'set_env_authorized_key' }
- name: Configuring Bastion Hosts
  hosts: bastions
  become: true
  vars_files:
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  roles:
  - { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion",              when: 'install_bastion' }
  - { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa",  when: 'install_ipa_client' }
  - { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-student-user", when: 'install_student_user' }
  tags:
  - step004
  - bastion_tasks
- name: PreSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  vars_files:
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
  - flight_check
  tasks:
  - debug:
      msg: "Pre-Software checks completed successfully"
ansible/configs/rhte-oc-cluster-vms/software.yml
New file
@@ -0,0 +1,34 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - debug:
        msg: "Software tasks started"
- name: Set up ClientVM
  hosts: bastions
  gather_facts: false
  become: true
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
  - name: Set up Client VM
    include_role:
      name: "{{ ANSIBLE_REPO_PATH }}/roles/ocp-client-vm"
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/rhte-ocp-workshop/destroy_env.yml
@@ -7,8 +7,55 @@
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
  environment:
    AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
    AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
    AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}"
  tasks:
    - name: Fetch autoscaling name
      slurp:
        src: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.AutoScalingGroupClientVM"
      register: asg_r
      ignore_errors: yes
    - name: Delete AutoScalingGroupClientVM
      command: >-
          aws autoscaling delete-auto-scaling-group --force-delete --auto-scaling-group-name {{asg_r.content|b64decode}}
      ignore_errors: yes
    - name: Get DNS record for each clientVM
      route53:
        state: get
        zone: "{{subdomain_base_suffix[1:]}}."
        record: "clientvm{{idx}}.{{subdomain_base}}."
        type: A
      with_sequence: start=0 end={{num_users|default(100)}}
      loop_control:
        index_var: idx
      ignore_errors: yes
      register: rec
    - name: Delete DNS record for each clientVM
      route53:
        zone: "{{item.set.zone}}"
        record: "{{item.set.record}}"
        type: "{{item.set.type}}"
        value: "{{item.set.value}}"
        ttl: "{{item.set.ttl}}"
        state: absent
      when: >-
        'set' in item and 'record' in item.set
      with_items: "{{rec.results}}"
      ignore_errors: yes
    - name: Delete instance
      ec2_instance:
        state: absent
        filters:
          instance-state-name: running
          "tag:Project": "{{project_tag}}"
      when: project_tag is defined
    - name: Delete S3 bucket
      environment:
        AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
ansible/configs/rhte-ocp-workshop/env_vars.yml
@@ -440,21 +440,21 @@
        volume_type: gp2
        purpose: docker
        lun: 0
  - name: "clientvm"
    count: "{{num_users}}"
    public_dns: true
    flavor:
      "ec2": "{{clientvm_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "clientvms"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{ rootfs_size_clientvm }}"
    volumes:
      - device_name: "{{docker_device}}"
        volume_size: 100
        volume_type: gp2
  # - name: "clientvm"
  #   count: "{{num_users}}"
  #   public_dns: true
  #   flavor:
  #     "ec2": "{{clientvm_instance_type}}"
  #   tags:
  #     - key: "AnsibleGroup"
  #       value: "clientvms"
  #     - key: "ostype"
  #       value: "linux"
  #   rootfs_size: "{{ rootfs_size_clientvm }}"
  #   volumes:
  #     - device_name: "{{docker_device}}"
  #       volume_size: 100
  #       volume_type: gp2
  - name: "support"
    count: "{{support_instance_count}}"
ansible/configs/rhte-ocp-workshop/files/cloud_providers/ec2_cloud_template.j2
@@ -2,102 +2,7 @@
---
AWSTemplateFormatVersion: "2010-09-09"
Mappings:
  RegionMapping:
    us-east-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-6871a115
      {% else %}
      RHELAMI: ami-c998b6b2
      {% endif %}
    us-east-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-03291866
      {% else %}
      RHELAMI: ami-cfdafaaa
      {% endif %}
    us-west-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-18726478
      {% else %}
      RHELAMI: ami-66eec506
      {% endif %}
    us-west-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-28e07e50
      {% else %}
      RHELAMI: ami-223f945a
      {% endif %}
    eu-west-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-7c491f05
      {% else %}
      RHELAMI: ami-bb9a6bc2
      {% endif %}
    eu-central-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-c86c3f23
      {% else %}
      RHELAMI: ami-d74be5b8
      {% endif %}
    ap-northeast-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-6b0d5f0d
      {% else %}
      RHELAMI: ami-30ef0556
      {% endif %}
    ap-northeast-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-3eee4150
      {% else %}
      RHELAMI: ami-0f5a8361
      {% endif %}
    ap-southeast-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-76144b0a
      {% else %}
      RHELAMI: ami-10bb2373
      {% endif %}
    ap-southeast-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-67589505
      {% else %}
      RHELAMI: ami-ccecf5af
      {% endif %}
    ap-south-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-5b673c34
      {% else %}
      RHELAMI: ami-cdbdd7a2
      {% endif %}
    sa-east-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-b0b7e3dc
      {% else %}
      RHELAMI: ami-a789ffcb
      {% endif %}
  DNSMapping:
    us-east-1:
      domain: "us-east-1.compute.internal"
    us-west-1:
      domain: "us-west-1.compute.internal"
    us-west-2:
      domain: "us-west-2.compute.internal"
    eu-west-1:
      domain: "eu-west-1.compute.internal"
    eu-central-1:
      domain: "eu-central-1.compute.internal"
    ap-northeast-1:
      domain: "ap-northeast-1.compute.internal"
    ap-northeast-2:
      domain: "ap-northeast-2.compute.internal"
    ap-southeast-1:
      domain: "ap-southeast-1.compute.internal"
    ap-southeast-2:
      domain: "ap-southeast-2.compute.internal"
    sa-east-1:
      domain: "sa-east-1.compute.internal"
    ap-south-1:
      domain: "ap-south-1.compute.internal"
{% include 'templates/region_mapping.j2' %}
Resources:
  Vpc:
@@ -158,6 +63,7 @@
      MapPublicIpOnLaunch: true
      VpcId:
        Ref: Vpc
  PublicSubnetRTA:
    Type: "AWS::EC2::SubnetRouteTableAssociation"
@@ -246,6 +152,71 @@
                - infranode{{loop.index}}
                - PublicIp
{% endfor %}
  clientVMLaunchConfiguration:
    Type: AWS::AutoScaling::LaunchConfiguration
    DependsOn: HostSG
    Properties:
      AssociatePublicIpAddress: True
      BlockDeviceMappings:
        - DeviceName: "/dev/sda1"
          Ebs:
            VolumeSize: {{ rootfs_size_clientvm }}
        - DeviceName: "{{docker_device}}"
          Ebs:
            VolumeSize: 100
            VolumeType: gp2
      ImageId:
        Fn::FindInMap:
        - RegionMapping
        - Ref: AWS::Region
        - RHELAMI
      InstanceType: {{clientvm_instance_type}}
      KeyName: {{key_name}}
      SecurityGroups:
        - "Fn::GetAtt":
          - HostSG
          - GroupId
  clientVMScalingGroup:
    Type: AWS::AutoScaling::AutoScalingGroup
    DependsOn: VpcGA
    Properties:
      VPCZoneIdentifier:
        - Ref: PublicSubnet
      #AvailabilityZones:
      #  Fn::GetAZs: ""
      #   - Fn::GetAtt:
      #       - PublicSubnetClientVM
      #       - AvailabilityZone
      #   - Fn::GetAtt:
      #       - PublicSubnetClientVM2
      #       - AvailabilityZone
      LaunchConfigurationName:
        Ref: clientVMLaunchConfiguration
      MinSize: {{num_users}}
      MaxSize: 200
      DesiredCapacity: {{num_users}}
      Tags:
        - Key: isolated
          Value: True
          PropagateAtLaunch: True
        - Key: "owner"
          Value: "{{ email | default('unknownuser') }}"
          PropagateAtLaunch: True
        - Key: "Project"
          Value: "{{project_tag}}"
          PropagateAtLaunch: True
        - Key: "{{project_tag}}"
          Value: "clientvm"
          PropagateAtLaunch: True
        - Key: "AnsibleGroup"
          Value: "clientvms"
          PropagateAtLaunch: True
        - Key: "ostype"
          Value: "linux"
          PropagateAtLaunch: True
{% for instance in instances %}
{% if instance['dns_loadbalancer']|d(false)|bool and not instance['unique']|d(false)|bool %}
@@ -468,6 +439,9 @@
        Ref: RegistryS3
Outputs:
  AutoScalingGroupClientVM:
    Value:
      Ref: clientVMScalingGroup
  Route53internalzoneOutput:
    Description: The ID of the internal route 53 zone
    Value:
ansible/configs/rhte-ocp-workshop/files/hosts_template.3.10.34.j2
New file
@@ -0,0 +1,395 @@
#
# /etc/ansible/hosts file for OpenShift Container Platform 3.10.34
#
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_user={{ansible_ssh_user}}
ansible_become=yes
###########################################################################
### OpenShift Basic Vars
###########################################################################
openshift_deployment_type=openshift-enterprise
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version}
openshift_examples_modify_imagestreams=true
{% if container_runtime == "cri-o" %}
openshift_use_crio=True
openshift_crio_enable_docker_gc=True
openshift_crio_docker_gc_node_selector={'runtime': 'cri-o'}
{% endif %}
openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true','runtime={{container_runtime}}']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true','runtime={{container_runtime}}']}, {'name': 'node-config-glusterfs', 'labels': ['runtime={{container_runtime}}']}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true','runtime={{container_runtime}}'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}]
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. -> These  need to go into the above
# openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
{% if install_glusterfs|bool %}
###########################################################################
### OpenShift Container Storage
###########################################################################
openshift_master_dynamic_provisioning_enabled=True
# CNS storage cluster
# From https://github.com/red-hat-storage/openshift-cic
openshift_storage_glusterfs_namespace=openshift-storage
openshift_storage_glusterfs_storageclass=true
openshift_storage_glusterfs_storageclass_default=true
openshift_storage_glusterfs_block_deploy=true
openshift_storage_glusterfs_block_host_vol_create=true
openshift_storage_glusterfs_block_host_vol_size=200
openshift_storage_glusterfs_block_storageclass=true
openshift_storage_glusterfs_block_storageclass_default=false
# Container image to use for glusterfs pods
openshift_storage_glusterfs_image="registry.access.redhat.com/rhgs3/rhgs-server-rhel7:v3.9"
# Container image to use for glusterblock-provisioner pod
openshift_storage_glusterfs_block_image="registry.access.redhat.com/rhgs3/rhgs-gluster-block-prov-rhel7:v3.9"
# Container image to use for heketi pods
openshift_storage_glusterfs_heketi_image="registry.access.redhat.com/rhgs3/rhgs-volmanager-rhel7:v3.9"
{% endif %}
{% if install_nfs|bool %}
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
{% endif %}
###########################################################################
### OpenShift Cockpit Vars
###########################################################################
# Enable cockpit
osm_use_cockpit=true
osm_cockpit_plugins=['cockpit-kubernetes']
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname={{master_lb_dns}}
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
{% if install_lets_encrypt_certificates|bool %}
openshift_master_named_certificates={{lets_encrypt_openshift_master_named_certificates|to_json}}
{% endif %}
openshift_set_hostname=True
###########################################################################
### OpenShift Network Vars
###########################################################################
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
# os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy'
{{multi_tenant_setting}}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" or 'ldap' in install_idms|d([]) %}
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "htpasswd" or 'htpasswd' in install_idms|d([]) %}
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
openshift_master_identity_providers={{identity_providers|to_json}}
{% if admission_plugin_config is defined %}
###########################################################################
### OpenShift admission plugin config
###########################################################################
openshift_master_admission_plugin_config={{admission_plugin_config|to_json}}
{% endif %}
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
########################
# Enable cluster metrics
########################
openshift_metrics_install_metrics={{install_metrics}}
{% if install_nfs|bool and not install_glusterfs|bool %}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassanda_pvc_storage_class_name=''
{% endif %}
{% if install_glusterfs|bool %}
openshift_metrics_storage_kind=dynamic
openshift_metrics_storage_volume_size=20Gi
openshift_metrics_cassandra_pvc_storage_class_name='glusterfs-storage-block'
{% endif %}
openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_heapster_nodeselector={"node-role.kubernetes.io/infra": "true"}
# Store Metrics for 2 days
openshift_metrics_duration=2
{% if install_prometheus|bool %}
#########################
# Add Prometheus Metrics:
#########################
openshift_hosted_prometheus_deploy=true
openshift_prometheus_namespace=openshift-metrics
openshift_prometheus_node_selector={"node-role.kubernetes.io/infra":"true"}
# Prometheus
{% if install_glusterfs|bool %}
openshift_prometheus_storage_type='pvc'
openshift_prometheus_storage_kind=dynamic
openshift_prometheus_storage_class='glusterfs-storage-block'
openshift_prometheus_storage_volume_size=20Gi
openshift_prometheus_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_storage_volume_name=prometheus
{% elif install_nfs|bool %}
openshift_prometheus_storage_type='emptydir'
{% endif %}
# For prometheus-alertmanager
{% if install_glusterfs|bool %}
openshift_prometheus_alertmanager_storage_type='pvc'
openshift_prometheus_alertmanager_storage_kind=dynamic
openshift_prometheus_alertmanager_storage_class='glusterfs-storage-block'
openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertmanager_storage_volume_size=10Gi
openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
{% elif install_nfs|bool %}
openshift_prometheus_alertmanager_storage_type='emptydir'
{% endif %}
# For prometheus-alertbuffer
{% if install_glusterfs|bool %}
openshift_prometheus_alertbuffer_storage_type='pvc'
openshift_prometheus_alertbuffer_storage_kind=dynamic
openshift_prometheus_alertbuffer_storage_class='glusterfs-storage-block'
openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_volume_size=10Gi
{% elif install_nfs|bool %}
openshift_prometheus_alertbuffer_storage_type='emptydir'
{% endif %}
# Suggested Quotas and limits for Prometheus components:
openshift_prometheus_memory_requests=2Gi
openshift_prometheus_cpu_requests=750m
openshift_prometheus_memory_limit=2Gi
openshift_prometheus_cpu_limit=750m
openshift_prometheus_alertmanager_memory_requests=300Mi
openshift_prometheus_alertmanager_cpu_requests=200m
openshift_prometheus_alertmanager_memory_limit=300Mi
openshift_prometheus_alertmanager_cpu_limit=200m
openshift_prometheus_alertbuffer_memory_requests=300Mi
openshift_prometheus_alertbuffer_cpu_requests=200m
openshift_prometheus_alertbuffer_memory_limit=300Mi
openshift_prometheus_alertbuffer_cpu_limit=200m
# The following file will need to be copied over to the bastion before deployment
# There is an example in ocp-workshop/files
# openshift_prometheus_additional_rules_file=/root/prometheus_alerts_rules.yml
# Grafana
openshift_grafana_node_selector={"node-role.kubernetes.io/infra":"true"}
openshift_grafana_storage_type=pvc
openshift_grafana_pvc_size=2Gi
openshift_grafana_node_exporter=true
openshift_grafana_prometheus_namespace="openshift-metrics"
openshift_grafana_prometheus_serviceaccount="prometheus"
openshift_grafana_prometheus_route="prometheus"
{% if install_glusterfs|bool %}
openshift_grafana_sc_name=glusterfs-storage
{% endif %}
{% endif %}
# Enable cluster logging
########################
openshift_logging_install_logging={{install_logging}}
{% if install_nfs|bool and not install_glusterfs|bool %}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
openshift_logging_es_pvc_storage_class_name=''
{% endif %}
{% if install_glusterfs|bool %}
openshift_logging_es_pvc_dynamic=true
openshift_logging_es_pvc_size=20Gi
openshift_logging_es_cluster_size=1
openshift_logging_es_pvc_storage_class_name='glusterfs-storage-block'
{% endif %}
openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_es_cluster_size=1
openshift_logging_curator_default_days=3
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
openshift_hosted_router_replicas={{infranode_instance_count}}
{% if install_lets_encrypt_certificates|bool %}
openshift_hosted_router_certificate={{lets_encrypt_openshift_hosted_router_certificate|to_json}}
{% endif %}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
{% if s3user_access_key is defined %}
# Registry AWS S3
# S3 bucket must already exist.
openshift_hosted_registry_storage_kind=object
openshift_hosted_registry_storage_provider=s3
openshift_hosted_registry_storage_s3_accesskey={{ s3user_access_key }}
openshift_hosted_registry_storage_s3_secretkey={{ s3user_secret_access_key }}
openshift_hosted_registry_storage_s3_bucket={{ project_tag }}
openshift_hosted_registry_storage_s3_region={{ aws_region_final|d(aws_region) }}
openshift_hosted_registry_storage_s3_chunksize=26214400
openshift_hosted_registry_storage_s3_rootdirectory=/registry
{% endif %}
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=true
template_service_broker_install=true
# openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
masters
etcd
nodes
{% if install_nfs|bool %}
nfs
{% endif %}
{% if install_glusterfs|bool %}
glusterfs
{% endif %}
{% if groups['newnodes']|d([])|length > 0 %}
new_nodes
{% endif %}
[masters]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[etcd]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-master'
{% endfor %}
## These are infranodes
{% for host in groups['infranodes']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-infra'
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']|sort
  if host not in groups['newnodes']|d([])
  and host not in groups['glusterfs']|d([])
  %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute'
{% endfor %}
{% if groups['glusterfs']|d([])|length > 0 %}
## These are glusterfs nodes
{% for host in groups['glusterfs']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute'
{% endfor %}
{% endif %}
{% if groups['newnodes']|d([])|length > 0 %}
# scaleup performed, leave an empty group, see:
# https://docs.openshift.com/container-platform/3.7/install_config/adding_hosts_to_existing_cluster.html
[new_nodes]
{% for host in groups['newnodes']|sort %}
{{ hostvars[host].internaldns }} ansible_ssh_user={{remote_user}} ansible_ssh_private_key_file=~/.ssh/{{key_name}}.pem openshift_node_group_name='node-config-compute'
{% endfor %}
{% endif %}
{% if install_nfs|bool %}
[nfs]
{% for host in [groups['support']|sort|first] %}
{{ hostvars[host].internaldns }}
{% endfor %}
{% endif %}
{% if install_glusterfs|bool %}
{% set query = "[?name=='support']|[0].volumes[?purpose=='glusterfs'].device_name" %}
[glusterfs]
{% for host in groups['glusterfs']|sort %}
{% if  loop.index % 3 == 1 %}
{%   set glusterfs_zone = 1 %}
{% elif  loop.index % 3 == 2 %}
{%   set glusterfs_zone = 2 %}
{% elif  loop.index % 3 == 0 %}
{%   set glusterfs_zone = 3 %}
{% endif %}
{% if cloud_provider == 'ec2' %}
{{ hostvars[host].internaldns }} glusterfs_zone={{ glusterfs_zone }} glusterfs_devices='{{instances|json_query(query)|to_json}}'
{% elif cloud_provider == 'azure' %}
{{ hostvars[host].internaldns }} glusterfs_zone={{ glusterfs_zone }} glusterfs_devices='{{ [ hostvars[host].glusterfs_device_name ] |to_json}}'
{% endif %}
{% endfor %}
{% endif %}
ansible/configs/rhte-ocp-workshop/post_infra.yml
@@ -9,6 +9,58 @@
    - step002
    - post_infrastructure
  tasks:
    - environment:
        AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
        AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
        AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}"
      block:
      - name: Deactivate autoscaling
        command: >-
          aws autoscaling suspend-processes --auto-scaling-group-name
          {{cloudformation_out_final.stack_outputs.AutoScalingGroupClientVM}}
      - name: Write down autoscaling name
        copy:
          dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.AutoScalingGroupClientVM"
          content: "{{cloudformation_out_final.stack_outputs.AutoScalingGroupClientVM}}"
      - name: Allocate and associate an EIP to the clientVMs
        ec2_eip:
          device_id: "{{hostvars[item].instance_id}}"
        with_items: "{{groups['clientvms']}}"
        register: eips
      # reimport roles to update inventory, and regenerate ssh config, since
      # we're using public ip to connect to clientVMs
      - name: Run infra-ec2-create-inventory Role
        import_role:
          name: "{{ ANSIBLE_REPO_PATH }}/roles/infra-ec2-create-inventory"
      - name: Run Common SSH Config Generator Role
        import_role:
          name: "{{ANSIBLE_REPO_PATH}}/roles/infra-common-ssh-config-generate"
      - name: Create DNS record for each clientVM
        route53:
          hosted_zone_id: "{{HostedZoneId}}"
          zone: "{{subdomain_base}}"
          record: "clientvm{{idx}}.{{subdomain_base}}"
          state: present
          type: A
          ttl: 90
          value: "{{hostvars[item].public_ip_address}}"
        with_items: "{{groups['clientvms']}}"
        loop_control:
          index_var: idx
      - name: Rename instance
        ec2_tag:
          state: present
          resource: "{{hostvars[item].instance_id}}"
          tags:
            Name: "clientvm{{idx}}"
        with_items: "{{groups['clientvms']}}"
        loop_control:
          index_var: idx
    - name: Job Template to launch a Job Template with update on launch inventory set
      uri:
ansible/configs/rhte-ocp-workshop/post_software.yml
@@ -479,6 +479,7 @@
  - when:
    - install_prometheus|d(False)|bool
    - osrelease is version_compare("3.10", ">=")
    - osrelease is version_compare("3.10.34", "<")
    block:
    # Node Exporters on all Nodes liston on port 9100.
    # Open Firewall Port 9100 for future sessions by adding
@@ -719,7 +720,7 @@
  vars:
    zabbix_auto_registration_keyword: OCP Host
  tasks:
    - when: install_zabbix_bool
    - when: install_zabbix|bool
      include_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client"
  tags:
ansible/roles/bastion-student-user/defaults/main.yml
New file
@@ -0,0 +1,2 @@
---
student_name: lab-user
ansible/roles/bastion-student-user/tasks/main.yml
@@ -16,9 +16,9 @@
  when:
    - student_password is not defined
- name: Create user lab-user
- name: Create user
  user:
    name: lab-user
    name: "{{ student_name }}"
    password: "{{ student_password|password_hash('sha512') }}"
    comment: GTPE Student
    group: users
@@ -44,7 +44,7 @@
  lineinfile:
    path: '/etc/sudoers'
    state: present
    line: 'student         ALL=(ALL)       NOPASSWD: ALL'
    line: "{{ student_name }}         ALL=(ALL)       NOPASSWD: ALL"
    insertafter: '^ec2-user'
- name: Restart sshd
ansible/roles/geerlingguy.git/.gitignore
New file
@@ -0,0 +1,2 @@
*.retry
tests/test.sh
ansible/roles/geerlingguy.git/.travis.yml
New file
@@ -0,0 +1,59 @@
---
services: docker
env:
  # Test source install on latest supported OSes.
  - distro: centos7
    playbook: test-source.yml
    GIT_VERSION: 2.16.2
  - distro: fedora27
    playbook: test-source.yml
    GIT_VERSION: 2.16.2
  - distro: ubuntu1804
    playbook: test-source.yml
    GIT_VERSION: 2.16.2
  # Test package install on all supported OSes.
  - distro: centos7
    playbook: test.yml
    GIT_VERSION: 1.8.3.1
  - distro: centos6
    playbook: test.yml
    GIT_VERSION: 1.7.1
  - distro: fedora27
    playbook: test.yml
    GIT_VERSION: 2.14.3
  - distro: ubuntu1804
    playbook: test.yml
    GIT_VERSION: 2.15.1
  - distro: ubuntu1604
    playbook: test.yml
    GIT_VERSION: 2.7.4
  - distro: ubuntu1404
    playbook: test.yml
    GIT_VERSION: 1.9.1
  - distro: debian8
    playbook: test.yml
    GIT_VERSION: 2.1.4
script:
  # Configure test script so we can run extra tests after playbook is run.
  - export container_id=$(date +%s)
  - export cleanup=false
  # Download test shim.
  - wget -O ${PWD}/tests/test.sh https://gist.githubusercontent.com/geerlingguy/73ef1e5ee45d8694570f334be385e181/raw/
  - chmod +x ${PWD}/tests/test.sh
  # Run tests.
  - ${PWD}/tests/test.sh
  # Ensure Git is installed and at the right version.
  - 'docker exec --tty ${container_id} env TERM=xterm which git'
  - 'docker exec --tty ${container_id} env TERM=xterm test -x /usr/bin/git'
  - 'docker exec --tty ${container_id} env TERM=xterm git --version'
  - 'docker exec --tty ${container_id} env TERM=xterm /usr/bin/git --version | grep -qF "$GIT_VERSION"'
notifications:
  webhooks: https://galaxy.ansible.com/api/v1/notifications/
ansible/roles/geerlingguy.git/LICENSE
New file
@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2017 Jeff Geerling
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
ansible/roles/geerlingguy.git/README.md
New file
@@ -0,0 +1,55 @@
# Ansible Role: Git
[![Build Status](https://travis-ci.org/geerlingguy/ansible-role-git.svg?branch=master)](https://travis-ci.org/geerlingguy/ansible-role-git)
Installs Git, a distributed version control system, on any RHEL/CentOS or Debian/Ubuntu Linux system.
## Requirements
None.
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
    workspace: /root
Where certain files will be downloaded and adjusted prior to git installation, if needed.
    git_enablerepo: ""
This variable, a well as `git_packages`, will be used to install git via a particular `yum` repo if `git_install_from_source` is false (CentOS only). Any additional repositories you have installed that you would like to use for a newer/different Git version.
    git_packages:
      - git
      - git-svn
The specific Git packages that will be installed. By default, `git-svn` is included, but you can easily add this variable to your playbook's variables and remove `git-svn` if desired.
    git_install_from_source: false
    git_install_path: "/usr"
    git_version: "2.16.2"
Whether to install Git from source; if set to `true`, `git_version` is required and will be used to install a particular version of git (see all available versions here: https://www.kernel.org/pub/software/scm/git/), and `git_install_path` defines where git should be installed.
    git_install_from_source_force_update: false
If git is already installed at and older version, force a new source build. Only applies if `git_install_from_source` is `true`.
## Dependencies
None.
## Example Playbook
    - hosts: servers
      roles:
        - { role: geerlingguy.git }
## License
MIT / BSD
## Author Information
This role was created in 2014 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).
ansible/roles/geerlingguy.git/defaults/main.yml
New file
@@ -0,0 +1,23 @@
---
workspace: /root
# If git_install_from_source is set to false, these two variables define whether
# to use an additional repo for the package installation, and which git packages
# will be installed.
git_enablerepo: ""
git_packages:
  - git
  - git-svn
# If set to TRUE, git will be installed from source, using the version set with
# the 'git_version' variable instead of using a package.
git_install_from_source: false
git_install_path: "/usr"
git_version: "2.16.2"
# If git is already installed at and older version, force a new source build.
# Only applies if git_install_from_source is `true`.
git_install_from_source_force_update: false
# Leave this at it's default.
git_reinstall_from_source: false
ansible/roles/geerlingguy.git/meta/.galaxy_install_info
New file
@@ -0,0 +1 @@
{install_date: 'Thu Aug 30 16:27:09 2018', version: 2.0.2}
ansible/roles/geerlingguy.git/meta/main.yml
New file
@@ -0,0 +1,29 @@
---
dependencies: []
galaxy_info:
  author: geerlingguy
  description: Git version control software
  company: "Midwestern Mac, LLC"
  license: "license (BSD, MIT)"
  min_ansible_version: 2.4
  platforms:
  - name: EL
    versions:
    - all
  - name: Fedora
    versions:
    - all
  - name: Debian
    versions:
    - all
  - name: Ubuntu
    versions:
    - all
  galaxy_tags:
    - development
    - system
    - git
    - vcs
    - source
    - code
ansible/roles/geerlingguy.git/tasks/install-from-source.yml
New file
@@ -0,0 +1,70 @@
---
- name: Include OS-specific variables (RedHat).
  include_vars: "{{ ansible_os_family }}.yml"
  when:
    - ansible_os_family == "RedHat"
    - ansible_distribution != "Fedora"
- name: Include OS-specific variables (Fedora).
  include_vars: "{{ ansible_distribution }}.yml"
  when: ansible_distribution == "Fedora"
- name: Include OS-specific variables (Debian).
  include_vars: "{{ ansible_os_family }}.yml"
  when: ansible_os_family == "Debian"
- name: Define git_install_from_source_dependencies.
  set_fact:
    git_install_from_source_dependencies: "{{ __git_install_from_source_dependencies | list }}"
  when: git_install_from_source_dependencies is not defined
- name: Ensure git's dependencies are installed (RedHat).
  package: "name={{ item }} state=present"
  with_items: "{{ git_install_from_source_dependencies }}"
  when: ansible_os_family == 'RedHat'
- name: Ensure git's dependencies are installed (Debian).
  apt: "name={{ item }} state=present"
  with_items: "{{ git_install_from_source_dependencies }}"
  when: ansible_os_family == 'Debian'
- name: Get installed version.
  command: >
    git --version
    warn=no
  changed_when: false
  failed_when: false
  check_mode: no
  register: git_installed_version
- name: Force git install if the version numbers do not match.
  set_fact:
    git_reinstall_from_source: true
  when:
    - git_install_from_source_force_update
    - (git_installed_version.rc == 0) and (git_installed_version.stdout | regex_replace("^.*?([0-9\.]+)$", "\\1") | version_compare(git_version, operator="!="))
- name: Download git.
  get_url:
    url: "https://www.kernel.org/pub/software/scm/git/git-{{ git_version }}.tar.gz"
    dest: "{{ workspace }}/git-{{ git_version }}.tar.gz"
  when: (git_installed_version.rc != 0) or git_reinstall_from_source
- name: Expand git archive.
  unarchive:
    src: "{{ workspace }}/git-{{ git_version }}.tar.gz"
    dest: "{{ workspace }}"
    creates: "{{ workspace }}/git-{{ git_version }}/README"
    copy: no
  when: (git_installed_version.rc != 0) or git_reinstall_from_source
- name: Build git.
  command: >
    make prefix={{ git_install_path }} {{ item }}
    chdir={{ workspace }}/git-{{ git_version }}
  with_items:
    - all
    - install
  when: (git_installed_version.rc != 0) or git_reinstall_from_source
  become: yes
ansible/roles/geerlingguy.git/tasks/main.yml
New file
@@ -0,0 +1,23 @@
---
- name: Ensure git is installed (RedHat).
  package:
    name: "{{ item }}"
    state: present
    enablerepo: "{{ git_enablerepo }}"
  with_items: "{{ git_packages }}"
  when: (git_install_from_source == false) and (ansible_os_family == 'RedHat')
- name: Update apt cache (Debian).
  apt: update_cache=yes cache_valid_time=86400
  when: ansible_os_family == 'Debian'
- name: Ensure git is installed (Debian).
  apt:
    name: "{{ item }}"
    state: present
  with_items: "{{ git_packages }}"
  when: (git_install_from_source == false) and (ansible_os_family == 'Debian')
# Install git from source when git_install_from_source is true.
- import_tasks: install-from-source.yml
  when: git_install_from_source == true
ansible/roles/geerlingguy.git/tests/README.md
New file
@@ -0,0 +1,11 @@
# Ansible Role tests
To run the test playbook(s) in this directory:
  1. Install and start Docker.
  1. Download the test shim (see .travis.yml file for the URL) into `tests/test.sh`:
    - `wget -O tests/test.sh https://gist.githubusercontent.com/geerlingguy/73ef1e5ee45d8694570f334be385e181/raw/`
  1. Make the test shim executable: `chmod +x tests/test.sh`.
  1. Run (from the role root directory) `distro=[distro] playbook=[playbook] ./tests/test.sh`
If you don't want the container to be automatically deleted after the test playbook is run, add the following environment variables: `cleanup=false container_id=$(date +%s)`
ansible/roles/geerlingguy.git/tests/test-source.yml
New file
@@ -0,0 +1,16 @@
---
- hosts: all
  vars:
    git_install_from_source: true
    git_install_from_source_force_update: true
    git_version: "2.16.2"
  pre_tasks:
    - name: Update apt cache.
      apt: update_cache=yes cache_valid_time=600
      when: ansible_os_family == 'Debian'
      changed_when: false
  roles:
    - role_under_test
ansible/roles/geerlingguy.git/tests/test.yml
New file
@@ -0,0 +1,15 @@
---
- hosts: all
  vars:
    git_install_from_source: false
    git_install_path: /usr/local
  pre_tasks:
    - name: Update apt cache.
      apt: update_cache=yes cache_valid_time=600
      when: ansible_os_family == 'Debian'
      changed_when: false
  roles:
    - role_under_test
ansible/roles/geerlingguy.git/vars/Debian.yml
New file
@@ -0,0 +1,10 @@
---
git_install_from_source_dependencies:
  - libcurl4-gnutls-dev
  - libexpat1-dev
  - gettext
  - libssl-dev
  - zlib1g-dev
  - build-essential
  - gcc
ansible/roles/geerlingguy.git/vars/Fedora.yml
New file
@@ -0,0 +1,13 @@
---
git_install_from_source_dependencies:
  - gettext-devel
  - expat-devel
  - curl-devel
  - zlib-devel
  - perl-devel
  - openssl-devel
  - subversion-perl
  - make
  - gcc
  - tar
ansible/roles/geerlingguy.git/vars/RedHat.yml
New file
@@ -0,0 +1,12 @@
---
git_install_from_source_dependencies:
  - gettext-devel
  - expat-devel
  - curl-devel
  - zlib-devel
  - perl-devel
  - openssl-devel
  - subversion-perl
  - make
  - gcc
ansible/roles/geerlingguy.git/vars/main.yml
New file
@@ -0,0 +1,3 @@
---
# This space intentionally left blank.
ansible/roles/geerlingguy.gogs/.gitignore
New file
@@ -0,0 +1,2 @@
*.retry
tests/test.sh
ansible/roles/geerlingguy.gogs/.travis.yml
New file
@@ -0,0 +1,27 @@
---
services: docker
env:
  - distro: centos7
  - distro: ubuntu1604
  - distro: ubuntu1404
  - distro: ubuntu1204
  - distro: debian8
script:
  # Configure test script so we can run extra tests after playbook is run.
  - export container_id=$(date +%s)
  - export cleanup=false
  # Download test shim.
  - wget -O ${PWD}/tests/test.sh https://gist.githubusercontent.com/geerlingguy/73ef1e5ee45d8694570f334be385e181/raw/
  - chmod +x ${PWD}/tests/test.sh
  # Run tests.
  - ${PWD}/tests/test.sh
  # Check if we get an installation page.
  - 'docker exec --tty ${container_id} env TERM=xterm curl http://localhost:3000/install'
notifications:
  webhooks: https://galaxy.ansible.com/api/v1/notifications/
ansible/roles/geerlingguy.gogs/LICENSE
New file
@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2017 Jeff Geerling
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
ansible/roles/geerlingguy.gogs/README.md
New file
@@ -0,0 +1,59 @@
# Ansible Role: Gogs
[![Build Status](https://travis-ci.org/geerlingguy/ansible-role-gogs.svg?branch=master)](https://travis-ci.org/geerlingguy/ansible-role-gogs)
Installs [Gogs](https://github.com/gogits/gogs), a Go-based front-end to Git, on RedHat or Debian-based linux systems.
After the playbook is finished, visit the gogs server (on port 3000 by default), and you will be redirected to the /install page, where you can configure an administrator account and other default options.
## Requirements
Requires git (via `geerlingguy.git`), and at least the Gogs HTTP port (3000 by default) open on your system's firewall. Install MySQL (e.g. via `geerlingguy.mysql`) prior to installing Gogs if you would like to use MySQL instead of built-in SQLite support.
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
    gogs_user: git
    gogs_user_home: /home/git
The user and home under which Gogs will run and be installed.
    gogs_binary_url: https://github.com/gogits/gogs/releases/download/v0.3.1/linux_amd64.zip
Download URL for the Gogs binary.
    gogs_http_port: "3000"
HTTP port over which Gogs will be accessed.
    gogs_use_mysql: false
    gogs_db_name: gogs
    gogs_db_username: gogs
    gogs_db_password: root
MySQL database support. Set `gogs_use_mysql` to `true` to configure MySQL for gogs, using the database name, username, and password defined by the respective variables.
## Dependencies
  - geerlingguy.git
## Example Playbook
    - hosts: servers
      vars_files:
        - vars/main.yml
      roles:
        - geerlingguy.gogs
*Inside `vars/main.yml`*:
    gogs_http_port: "8080"
## License
MIT / BSD
## Author Information
This role was created in 2014 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).
ansible/roles/geerlingguy.gogs/defaults/main.yml
New file
@@ -0,0 +1,9 @@
---
gogs_user: git
gogs_user_home: /home/git
gogs_binary_url: https://github.com/gogits/gogs/releases/download/v0.9.97/linux_amd64.tar.gz
gogs_http_port: "3000"
gogs_use_mysql: false
gogs_db_name: gogs
gogs_db_username: gogs
gogs_db_password: root
ansible/roles/geerlingguy.gogs/handlers/main.yml
New file
@@ -0,0 +1,3 @@
---
- name: restart gogs
  service: name=gogs state=restarted
ansible/roles/geerlingguy.gogs/meta/.galaxy_install_info
New file
@@ -0,0 +1 @@
{install_date: 'Thu Aug 30 16:27:17 2018', version: 1.4.2}
ansible/roles/geerlingguy.gogs/meta/main.yml
New file
@@ -0,0 +1,24 @@
---
dependencies:
  - geerlingguy.git
galaxy_info:
  author: geerlingguy
  description: "Gogs: Go Git Service"
  company: "Midwestern Mac, LLC"
  license: "license (BSD, MIT)"
  min_ansible_version: 1.8
  platforms:
  - name: EL
    versions:
    - 6
    - 7
  - name: Ubuntu
    versions:
    - all
  - name: Debian
    versions:
    - all
  galaxy_tags:
    - development
    - web
ansible/roles/geerlingguy.gogs/tasks/gogs-mysql.yml
New file
@@ -0,0 +1,20 @@
---
- name: Create Gogs MySQL user.
  mysql_user:
    name: "{{ gogs_db_username }}"
    host: "{{ item }}"
    priv: "{{ gogs_db_name }}.*:ALL"
    password: "{{ gogs_db_password }}"
  with_items:
    - 127.0.0.1
    - ::1
    - localhost
  when: gogs_use_mysql
  notify: restart gogs
- name: Create Gogs MySQL database.
  mysql_db:
    db: "{{ gogs_db_name }}"
    state: present
  when: gogs_use_mysql
  notify: restart gogs
ansible/roles/geerlingguy.gogs/tasks/init-setup.yml
New file
@@ -0,0 +1,24 @@
---
- name: Make Gogs init script executable.
  file:
    path: "{{ gogs_user_home }}/{{ gogs_init_script_path }}"
    mode: 0755
- name: Symlink Gogs binary and startup scripts.
  file:
    src: "{{ item.src }}"
    dest: "{{ item.dest }}"
    state: link
  with_items:
    - { src: "{{ gogs_user_home }}/gogs/gogs", dest: "/usr/local/bin/gogs" }
    - { src: "{{ gogs_user_home }}/{{ gogs_init_script_path }}" , dest: "/etc/init.d/gogs" }
  notify: restart gogs
- name: Copy Gogs systemd unit file into place (for systemd systems).
  template:
    src: gogs.unit.j2
    dest: /etc/systemd/system/gogs.service
    owner: root
    group: root
    mode: 0755
  when: "ansible_service_mgr == 'systemd'"
ansible/roles/geerlingguy.gogs/tasks/main.yml
New file
@@ -0,0 +1,48 @@
---
# Include variables and define needed variables.
- name: Include OS-specific variables.
  include_vars: "{{ ansible_os_family }}.yml"
- name: Ensure unzip is installed.
  package: name=unzip state=present
- name: Create user for Gogs.
  user:
    name: "{{ gogs_user }}"
    comment: Gogs
    home: "{{ gogs_user_home }}"
- name: Check if Gogs is already installed.
  stat: path=/usr/local/bin/gogs
  register: gogs_bin
- name: Download Gogs.
  get_url:
    url: "{{ gogs_binary_url }}"
    dest: "{{ gogs_user_home }}/gogs.zip"
    owner: "{{ gogs_user }}"
    group: "{{ gogs_user }}"
  when: gogs_bin.stat.islnk is not defined
- name: Expand Gogs.
  unarchive:
    src: "{{ gogs_user_home }}/gogs.zip"
    dest: "{{ gogs_user_home }}"
    group: "{{ gogs_user }}"
    owner: "{{ gogs_user }}"
    copy: no
  when: gogs_bin.stat.islnk is not defined
- include: init-setup.yml
- include: gogs-mysql.yml
- name: Create Gogs log folder.
  file:
    path: "{{ gogs_user_home }}/gogs/log"
    state: directory
    owner: "{{ gogs_user }}"
    group: "{{ gogs_user }}"
    mode: 0755
- name: Ensure Gogs is running.
  service: name=gogs state=started enabled=yes
ansible/roles/geerlingguy.gogs/templates/gogs.unit.j2
New file
@@ -0,0 +1,26 @@
[Unit]
Description=Gogs (Go Git Service)
After=syslog.target
After=network.target
#After=mysqld.service
#After=postgresql.service
#After=memcached.service
#After=redis.service
[Service]
# Modify these two values and uncomment them if you have
# repos with lots of files and get an HTTP error 500 because
# of that
###
#LimitMEMLOCK=infinity
#LimitNOFILE=65535
Type=simple
User={{ gogs_user }}
Group={{ gogs_user }}
WorkingDirectory={{ gogs_user_home }}/gogs
ExecStart={{ gogs_user_home }}/gogs/gogs web
Restart=always
Environment=USER={{ gogs_user }} HOME={{ gogs_user_home }}
[Install]
WantedBy=multi-user.target
ansible/roles/geerlingguy.gogs/tests/README.md
New file
@@ -0,0 +1,11 @@
# Ansible Role tests
To run the test playbook(s) in this directory:
  1. Install and start Docker.
  1. Download the test shim (see .travis.yml file for the URL) into `tests/test.sh`:
    - `wget -O tests/test.sh https://gist.githubusercontent.com/geerlingguy/73ef1e5ee45d8694570f334be385e181/raw/`
  1. Make the test shim executable: `chmod +x tests/test.sh`.
  1. Run (from the role root directory) `distro=[distro] playbook=[playbook] ./tests/test.sh`
If you don't want the container to be automatically deleted after the test playbook is run, add the following environment variables: `cleanup=false container_id=$(date +%s)`
ansible/roles/geerlingguy.gogs/tests/requirements.yml
New file
@@ -0,0 +1,2 @@
---
- src: geerlingguy.git
ansible/roles/geerlingguy.gogs/tests/test.yml
New file
@@ -0,0 +1,14 @@
- hosts: all
  pre_tasks:
    - name: Update apt cache.
      apt: update_cache=yes cache_valid_time=600
      when: ansible_os_family == 'Debian'
      changed_when: false
    - name: Ensure build dependencies are installed.
      package: name=curl state=present
  roles:
    - geerlingguy.git
    - role_under_test
ansible/roles/geerlingguy.gogs/vars/Debian.yml
New file
@@ -0,0 +1,2 @@
---
gogs_init_script_path: gogs/scripts/init/debian/gogs
ansible/roles/geerlingguy.gogs/vars/RedHat.yml
New file
@@ -0,0 +1,2 @@
---
gogs_init_script_path: gogs/scripts/init/centos/gogs
ansible/roles/host-gogs-server/tasks/main.yml
@@ -1,4 +1,7 @@
---
- name: Start host-jenkins-server installer
  debug:
    msg: "Do the needful to deploy host-jenkins-server"
- name: Run gogs Config with admin user creation
  uri:
    url: http://{{ ansible_hostname }}:3000/install
    method: POST
    body: "{{ lookup('template', 'templates/gogs_config.j2') }}"
    status_code: 302
ansible/roles/host-gogs-server/templates/gogs_config.j2
New file
@@ -0,0 +1 @@
db_type=SQLite3&db_path=data/gogs.db&app_name=Gogs&repo_root_path=/home/git/gogs-repositories&run_user=git&domain=example.opentlc.com&ssh_port=22&http_port=3000&app_url=http://localhost:3000/&log_root_path=/home/git/gogs/log&smtp_host=&smtp_from=&smtp_user=&smtp_passwd=&disable_gravatar=true&enable_captcha=false&register_confirm=false&admin_name={{ gogs_admin_username}}&admin_passwd={{ gogs_admin_password }}&admin_confirm_passwd={{ gogs_admin_password }}&admin_email=admin@example.com
ansible/roles/infra-common-ssh-config-generate/tasks/main.yml
@@ -56,7 +56,11 @@
    marker: "##### {mark} ADDED Node Proxy Config  {{ item }} {{ env_type }}-{{ guid }} ######"
    block: |
        Host {{ item }} {{  hostvars[item].public_ip_address | default('') }} {{ hostvars[item].shortname |d('')}}
        {% if hostvars[item].isolated %}
          Hostname {{ hostvars[item].public_ip_address }}
        {% else %}
          Hostname {{ hostvars[item].private_ip_address }}
        {% endif %}
          User {{ remote_user }}
          IdentityFile {{ ssh_key | default(ansible_ssh_private_key_file) | default(default_key_name)}}
          ProxyCommand ssh -F {{ ansible_ssh_config }} {{ bastion_hostname }} -W %h:%p
ansible/roles/infra-ec2-create-inventory/tasks/main.yml
@@ -50,6 +50,7 @@
    key_name: "{{item['key_name']}}"
    state: "{{item['state']}}"
    internaldns: "{{item.tags.internaldns | default(item.private_dns_name)}}"
    isolated: "{{item.tags.isolated | default(false)}}"
    instance_id: "{{ item.id }}"
    region: "{{item['region']}}"
    public_dns_name: "{{item['public_dns_name']}}"
ansible/roles/infra-ec2-template-create/tasks/main.yml
@@ -21,7 +21,7 @@
        - cloudformation_out is defined
        - cloudformation_out is failed
    - name: Launch CloudFormation template
    - name: Launch CloudFormation template (local)
      # environment:
      #   AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
      #   AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
@@ -42,9 +42,43 @@
      register: cloudformation_out
      until: >-
        cloudformation_out is succeeded
        or cloudformation_out.output in ["Stack CREATE complete", "Stack is already up-to-date."]
        or (
          'output' in cloudformation_out
          and cloudformation_out.output in ["Stack CREATE complete", "Stack is already up-to-date."]
        )
      retries: "{{ cloudformation_retries | default(3) }}"
      delay: "{{ cloudformation_retry_delay | default(30) }}"
      when: stat_template.stat.size <= 51200
      ignore_errors: yes
    - name: Launch CloudFormation template (from S3)
      # environment:
      #   AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
      #   AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
      #   AWS_DEFAULT_REGION: "{{aws_region_loop|d(aws_region)}}"
      cloudformation:
        aws_access_key: "{{ aws_access_key_id }}"
        aws_secret_key: "{{ aws_secret_access_key }}"
        stack_name: "{{ project_tag }}"
        state: "present"
        region: "{{ aws_region_loop | d(aws_region) | d(region) | d('us-east-1')}}"
        # rollback is unreliable, it can make this task hang forever.
        disable_rollback: true
        template_url: "https://s3.amazonaws.com/redhat-gpe-cloudformation-templates/{{env_type}}.{{guid}}.{{cloud_provider}}_cloud_template"
        tags: "{{ cf_tags | combine(cloud_tags_final)}}"
      tags:
        - aws_infrastructure_deployment
        - provision_cf_template
      register: cloudformation_out
      until: >-
        cloudformation_out is succeeded
        or (
          'output' in cloudformation_out
          and cloudformation_out.output in ["Stack CREATE complete", "Stack is already up-to-date."]
        )
      retries: "{{ cloudformation_retries | default(3) }}"
      delay: "{{ cloudformation_retry_delay | default(30) }}"
      when: stat_template.stat.size > 51200
      ignore_errors: yes
    - name: debug cloudformation
ansible/roles/infra-ec2-template-generate/tasks/main.yml
@@ -24,9 +24,40 @@
    - gen_cf_template
    - minify_template
######################### Copy CF Template to S3 if too big
- name: Stat CloudFormation template
  stat:
    path: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.{{cloud_provider}}_cloud_template"
  register: stat_template
  tags:
    - aws_infrastructure_deployment
    - gen_cf_template
- when:
    stat_template.stat.size > 51200
  tags:
    - aws_infrastructure_deployment
    - gen_cf_template
  environment:
    AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
    AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
    AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}"
  block:
    - name: Create bucket
      s3_bucket:
        name: redhat-gpe-cloudformation-templates
    - name: Copy Template to S3
      aws_s3:
        bucket: redhat-gpe-cloudformation-templates
        object: "{{ env_type }}.{{ guid }}.{{cloud_provider}}_cloud_template"
        src: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.{{cloud_provider}}_cloud_template"
        mode: put
######################### Validate CF Template
- name: validate cloudformation template
- name: validate cloudformation template (local)
  environment:
    AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
    AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
@@ -42,5 +73,24 @@
  tags:
    - aws_infrastructure_deployment
    - validate_cf_template
  when: stat_template.stat.size <= 51200
- name: validate cloudformation template (S3)
  environment:
    AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
    AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
    AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}"
  command: >-
    aws cloudformation validate-template
    --region {{ aws_region_final | d(aws_region) | default(region) | default('us-east-1')}}
    --template-url https://s3.amazonaws.com/redhat-gpe-cloudformation-templates/{{env_type}}.{{guid}}.{{cloud_provider}}_cloud_template
  changed_when: false
  register: cloudformation_validation
  until: cloudformation_validation is succeeded
  delay: 20
  tags:
    - aws_infrastructure_deployment
    - validate_cf_template
  when: stat_template.stat.size > 51200
######################### Launch CF Template
ansible/roles/infra-ec2-template-generate/templates/region_mapping.j2
New file
@@ -0,0 +1,73 @@
  RegionMapping:
    us-east-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-6871a115
      {% else %}
      RHELAMI: ami-c998b6b2
      {% endif %}
    us-east-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-03291866
      {% else %}
      RHELAMI: ami-cfdafaaa
      {% endif %}
    us-west-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-18726478
      {% else %}
      RHELAMI: ami-66eec506
      {% endif %}
    us-west-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-28e07e50
      {% else %}
      RHELAMI: ami-223f945a
      {% endif %}
    eu-west-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-7c491f05
      {% else %}
      RHELAMI: ami-bb9a6bc2
      {% endif %}
    eu-central-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-c86c3f23
      {% else %}
      RHELAMI: ami-d74be5b8
      {% endif %}
    ap-northeast-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-6b0d5f0d
      {% else %}
      RHELAMI: ami-30ef0556
      {% endif %}
    ap-northeast-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-3eee4150
      {% else %}
      RHELAMI: ami-0f5a8361
      {% endif %}
    ap-southeast-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-76144b0a
      {% else %}
      RHELAMI: ami-10bb2373
      {% endif %}
    ap-southeast-2:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-67589505
      {% else %}
      RHELAMI: ami-ccecf5af
      {% endif %}
    ap-south-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-5b673c34
      {% else %}
      RHELAMI: ami-cdbdd7a2
      {% endif %}
    sa-east-1:
      {% if osrelease is version_compare('3.9.25', '>=') %}
      RHELAMI: ami-b0b7e3dc
      {% else %}
      RHELAMI: ami-a789ffcb
      {% endif %}
ansible/roles/ocp-workload-3scale-multitenant/defaults/main.yml
@@ -4,15 +4,16 @@
ocp_user_groups:
  - OPENTLC-PROJECT-PROVISIONERS
# By default, this 3scale multi-tenant environment will be owned by user102
ocp_username: user102
# By default, this 3scale multi-tenant environment will be owned by admin1
ocp_username: admin1
ocp_user_needs_quota: true
quota_requests_cpu: 10
quota_limits_cpu: 20
quota_requests_memory: '10Gi'
quota_limits_memory: '25Gi'
quota_limits_memory: '23Gi'
quota_configmaps: 15
quota_pods: 30
@@ -45,6 +46,8 @@
# new tenants
tenant_output_dir: /tmp/3scale_tenants
tenant_provisioning_log_file: tenant_provisioning.log
tenant_provisioning_results_file: user_info_file.txt
start_tenant: 1
end_tenant: 100
tenantAdminPasswd: r3dh4t1!
ansible/roles/ocp-workload-3scale-multitenant/readme.adoc
@@ -7,9 +7,9 @@
. This workload requires installation of xmlstarlet on target host executing this ansible
. This workload only needs to be executed once per OCP cluster
. This workload provisions a single centralized 3scale multi-tenant app in a single OCP namespace called: 3scale-mt-$GUID
. The OCP namespace for 3scale multi-tenant app will be owned by the following user:
.. user102
.. user102 will be assigned a clusterquota so as to manage limits and requests assigned to 3scale
. The OCP namespace for 3scale multi-tenant app will be owned by the following non-existant user:
.. admin1
.. admin1 will be assigned a clusterquota so as to manage limits and requests assigned to 3scale
=== Tenants
@@ -23,19 +23,20 @@
== Execution using localhost oc client
-----
WORKLOAD="ocp-workload-3scale-multitenant"
# valid values:  "all" , "3scale" or "tenants"
TAGS=all
# valid values:  "tenants"
TAGS=tenants
# Tenant related variables
START_TENANT=1
END_TENANT=1
CREATE_GWS_WITH_EACH_TENANT=true
WORKLOAD="ocp-workload-3scale-multitenant"
REGION=`oc whoami --show-server | cut -d'.' -f 2`
OCP_DOMAIN=$REGION.openshift.opentlc.com
GUID=adm0
CREATE_GWS_WITH_EACH_TENANT=true
ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
ansible/roles/ocp-workload-3scale-multitenant/tasks/pre_workload.yml
@@ -13,7 +13,7 @@
- name: Create user Quota - clusterresourcequota
  shell: |
        oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \
        oc create clusterquota clusterquota-"{{ocp_username}}-{{ocp_project}}" \
        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
        --hard requests.cpu="{{quota_requests_cpu}}" \
        --hard limits.cpu="{{quota_limits_cpu}}"  \
ansible/roles/ocp-workload-3scale-multitenant/tasks/remove_workload.yml
@@ -3,18 +3,31 @@
  debug:
    msg: "Pre-Software checks completed successfully - Removed"
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
  ignore_errors: true
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{ocp_project}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{ocp_project}}
  ignore_errors: true
- name: Remove user Project
  shell: "oc delete project {{ocp_project}}"
#####       TENANT Management       #######
- name: Copy tenant provisioning script to known path
  template:
    src: templates/manage_tenants.sh
    dest: /tmp/manage_tenants.sh
    mode: 0755
  tags: all,tenants
  vars:
    create_tenants: "false"
- name: Create tenants
  shell: "/tmp/manage_tenants.sh"
  tags: all,tenants
##############################################
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully - Removed"
ansible/roles/ocp-workload-3scale-multitenant/tasks/workload.yml
@@ -6,18 +6,33 @@
# Use this ansible play for this course only.
# Afterwards, leverage (and contribute to) the original ansible playbook created by nmasse-itix.
- name: "Create project for workload {{ocp_project}}"
- name: check if user is cluster admin
  shell: "oc get project default"
  register: default_project_result
  ignore_errors: true
  changed_when: false
- fail:
    msg: "User does not have cluster-admin rights to install Istio"
  when: default_project_result is failed
- name: check if API Manager deployed
  shell: "oc get project {{ocp_project}}"
  register: api_project_result
  ignore_errors: true
  changed_when: false
- name: "Create project {{ocp_project}}"
  shell: "oc new-project {{ocp_project}} --display-name={{ocp_project}}"
  tags: all,3scale
  when: api_project_result is failed
- name: "Label namespace"
  command: "oc label namespace {{ocp_project}} AAD='{{guid}}'"
  tags: all,3scale
  when: api_project_result is failed
- name: Make sure we go back do default project
  shell: "oc project default"
  tags: all,3scale
  when: api_project_result is failed
################      Multitenent  AMP        #####################
@@ -26,7 +41,7 @@
    url: "{{ threescale_template }}"
    dest: "{{modified_template_path}}"
    force: yes
  tags: all,3scale
  when: api_project_result is failed
- name: Process the OpenShift Template and create the OpenShift objects for the 3scale API Management Platform
@@ -40,12 +55,12 @@
                 -p "WILDCARD_DOMAIN={{ ocp_apps_domain }}" \
                 -n "{{ ocp_project }}" \
                 > {{new_app_output}}
  tags: all,3scale
  when: api_project_result is failed
- name: output message
  debug:
    msg: new-app output available at {{new_app_output}}
  tags: all,3scale
  when: api_project_result is failed
# #### Storage Tier
@@ -57,7 +72,7 @@
    - system-mysql
    - system-redis
    - zync-database
  tags: all,3scale
  when: api_project_result is failed
- include: wait_for_deploy.yml
  static: no
  vars:
@@ -67,7 +82,7 @@
      - system-mysql
      - system-redis
      - zync-database
  tags: all,3scale
  when: api_project_result is failed
# #### Backend Listeners
@@ -76,27 +91,27 @@
  with_items:
    - backend-listener
    - backend-worker
  tags: all,3scale
  when: api_project_result is failed
- include: wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - backend-listener
      - backend-worker
  tags: all,3scale
  when: api_project_result is failed
# #### System Tier
- name: "Resume {{with_items}}"
  command: oc rollout resume dc/"{{ item }}" -n "{{ ocp_project }}"
  with_items:
    - system-app
  tags: all,3scale
  when: api_project_result is failed
- include: wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - system-app
  tags: all,3scale
  when: api_project_result is failed
# #### Utility Tier
- name: "Resume {{with_items}}"
@@ -106,7 +121,7 @@
    - system-sidekiq
    - system-sphinx
    - backend-cron
  tags: all,3scale
  when: api_project_result is failed
- include: wait_for_deploy.yml
  static: no
  vars:
@@ -115,7 +130,7 @@
      - system-sidekiq
      - system-sphinx
      - backend-cron
  tags: all,3scale
  when: api_project_result is failed
# #### Gateway Tier
@@ -125,7 +140,7 @@
    - apicast-staging
    - apicast-production
    - apicast-wildcard-router
  tags: all,3scale
  when: api_project_result is failed
- include: wait_for_deploy.yml
  static: no
  vars:
@@ -133,42 +148,54 @@
      - apicast-staging
      - apicast-production
      - apicast-wildcard-router
  tags: all,3scale
  when: api_project_result is failed
# #### Zync / RHSSO Sync Tier
- name: "Resume {{with_items}}"
  command: oc rollout resume dc/"{{ item }}" -n "{{ ocp_project }}"
  with_items:
    - zync
  tags: all,3scale
  when: api_project_result is failed
- include: wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - zync
  tags: all,3scale
  when: api_project_result is failed
###################################################################
################        Tenant Rollout        #####################
- name: Tenant provisioning starting
  debug:
    msg:
      - "tenant_output_dir:  {{tenant_output_dir}}"
      - "tenant_provisioning_log_file = {{tenant_output_dir}}/{{tenant_provisioning_log_file}}"
      - "tenant_provisioning_results_file = {{tenant_output_dir}}/{{tenant_provisioning_results_file}}"
      - "start and end tenants = {{start_tenant}}  {{end_tenant}}"
      - "create API Gateways for each tenant = {{create_gws_with_each_tenant}}"
  tags: tenants
- name: Copy tenant provisioning script to known path
  template:
    src: templates/create_tenants.sh
    dest: /tmp/create_tenants.sh
    src: templates/manage_tenants.sh
    dest: /tmp/manage_tenants.sh
    mode: 0755
  tags: all,tenants
  tags: tenants
  vars:
    create_tenants: "true"
- name: Create tenants
  shell: "/tmp/create_tenants.sh"
  tags: all,tenants
  shell: "/tmp/manage_tenants.sh"
  tags: tenants
- name: Creation of tenants complete
  debug:
    msg: Creation of tenants complete.  output files available at {{tenant_output_dir}}
  tags: all,tenants
  tags: tenants
###################################################################
- name: workload Tasks Complete
  debug:
    msg: workload Tasks Complete
  tags: all
ansible/roles/ocp-workload-3scale-multitenant/templates/manage_tenants.sh
File was renamed from ansible/roles/ocp-workload-3scale-multitenant/templates/create_tenants.sh
@@ -11,16 +11,24 @@
master_access_token={{master_access_token}}
tenantAdminPasswd={{tenantAdminPasswd}}
create_tenant_url=https://{{ocp_project}}-master-admin.{{ocp_apps_domain}}/master/api/providers.xml
delete_tenant_url=https://{{ocp_project}}-master-admin.{{ocp_apps_domain}}/master/api/providers.xml
output_dir={{tenant_output_dir}}
user_info_file=$output_dir/user_info_file.txt
log_file=$output_dir/tenant_provisioning.log
user_info_file=$output_dir/{{tenant_provisioning_results_file}}
log_file=$output_dir/{{tenant_provisioning_log_file}}
createGWs={{create_gws_with_each_tenant}}
create_tenants={{create_tenants}}
function prep() {
    mkdir -p $output_dir
}
function createAndActivateTenants() {
    echo -en "\n\nCreating tenants $startTenant through $endTenant  \n" > $log_file
    echo -en "GUID\tOCP user id\tOCP user passwd\t3scale admin URL\tAPI admin Id\tAPI admin passwd\tAPI admin access token\n\t\t\t\t\t" > $user_info_file
    
    curl -o $output_dir/3scale-apicast.yml https://raw.githubusercontent.com/gpe-mw-training/3scale_onpremise_implementation_labs/master/resources/rhte/3scale-apicast.yml
    for i in $(seq ${startTenant} ${endTenant}) ; do
        orgName=user$i-3scale-mt;
@@ -105,9 +113,10 @@
            THREESCALE_PORTAL_ENDPOINT=https://$tenant_access_token@$orgName-admin.{{ocp_apps_domain}}
            # 9) Create staging gateway
            oc new-app \
               -f $HOME/lab/3scale-apicast.yml \
               -f $output_dir/3scale-apicast.yml \
               --param THREESCALE_PORTAL_ENDPOINT=$THREESCALE_PORTAL_ENDPOINT \
               --param APP_NAME=stage-apicast \
               --param ROUTE_NAME=$orgName-mt-stage-generic \
@@ -122,7 +131,7 @@
            # 10) Create production gateway
            oc new-app \
               -f $HOME/lab/3scale-apicast.yml \
               -f $output_dir/3scale-apicast.yml \
               --param THREESCALE_PORTAL_ENDPOINT=$THREESCALE_PORTAL_ENDPOINT \
               --param APP_NAME=prod-apicast \
               --param ROUTE_NAME=$orgName-mt-prod-generic \
@@ -147,5 +156,35 @@
}
mkdir -p $output_dir
createAndActivateTenants
function deleteTenants() {
    echo -en "\n\nDeleting tenants $startTenant through $endTenant  \n" > $log_file
    for i in $(seq ${startTenant} ${endTenant}) ; do
        orgName=user$i-3scale-mt;
        tenantAdminId=user$i;
        #1) delete tenant project
        oc adm new-project $tenantAdminId-gw >> $log_file
        #2) delete routes
        oc delete route $orgName-provider -n {{ocp_project}} >> $log_file
        oc delete route $orgName-developer -n {{ocp_project}} >> $log_file
        #3) delete tenant in 3scale API Manager
        curl -k  \
            -X DELETE \
            -d access_token=$master_access_token \
            -d org_name=$orgName \
            $delete_tenant_url >> $log_file
    done
}
prep
if [ "x$create_tenants" == "xtrue"  ]; then
    createAndActivateTenants
else
    deleteTenants
fi
ansible/roles/ocp-workload-bxms-pam/defaults/main.yml
@@ -30,13 +30,14 @@
MAVEN_REPO_URL: http://nexus3.default.svc.cluster.local:8081/repository/maven-public/
POSTGRESQL_IMAGE_STREAM_TAG: 9.5
pam_tag: 1.2-3
pam_tag: 7.0.2.GA
app_name: rht
pam_secrets_yml: https://raw.githubusercontent.com/jboss-container-images/rhpam-7-openshift-image/7.0.2.GA/example-app-secret-template.yaml
pam_imagestreams_yml: https://raw.githubusercontent.com/jboss-container-images/rhpam-7-openshift-image/{{pam_tag}}/rhpam70-image-streams.yaml
pam_secrets_yml: https://raw.githubusercontent.com/jboss-container-images/rhpam-7-openshift-image/{{pam_tag}}/example-app-secret-template.yaml
pam_secrets_template_name: example-app-secret
bcentral_app_secret: businesscentral-app-secret
kserver_app_secret: kserver-app-secret
pam_template_yml: https://raw.githubusercontent.com/jboss-container-images/rhpam-7-openshift-image/7.0.2.GA/templates/rhpam70-authoring.yaml
pam_template_yml: https://raw.githubusercontent.com/jboss-container-images/rhpam-7-openshift-image/{{pam_tag}}/templates/rhpam70-authoring.yaml
pam_template_name: rhpam70-authoring
ansible/roles/ocp-workload-bxms-pam/readme.adoc
@@ -19,13 +19,15 @@
                    -e"ocp_user_needs_quota=true" \
                    -e"guid=$GUID" \
                    -e"ocp_domain=$OCP_DOMAIN" \
                    -e"ACTION=create"
                    -e"ACTION=create" \
                    -e"ocp_username=dtorresf-redhat.com"
ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=$GUID" \
                    -e"ACTION=remove"
                    -e"ACTION=remove" \
                    -e"ocp_username=dtorresf-redhat.com"
-----
ansible/roles/ocp-workload-bxms-pam/tasks/workload.yml
@@ -12,21 +12,30 @@
- name: Make sure we go back to default project
  shell: "oc project default"
- name: Initialize the project templates, {{pam_template_yml}}
  shell: "oc create -f {{pam_template_yml}} -n {{ocp_project}}"
- shell: "oc create -f {{pam_secrets_yml}} -n {{ocp_project}}"
- name: Prep local filesystem for temp files
  file:
    path: "/tmp/{{guid}}"
    state: directory
- name: Copy pam imagestream using the following tag,  {{pam_tag}}
  template:
    src: templates/rhpam70-image-streams.yaml
    dest: /tmp/{{guid}}/rhpam70-image-streams.yaml
- name: Prepare a local copy of the template for modifications
  shell: "oc process -f {{pam_template_yml}} \
          -p IMAGE_STREAM_NAMESPACE={{ocp_project}} \
          -p KIE_ADMIN_PWD={{kie_admin_passwd}} \
          -p APPLICATION_NAME={{app_name}} \
          -p BUSINESS_CENTRAL_HTTPS_SECRET={{bcentral_app_secret}} \
          -p KIE_SERVER_HTTPS_SECRET={{kserver_app_secret}} > /tmp/{{guid}}/{{pam_template_name}}.json"
- name: Modify template with pause build configs
  replace:
    path: '/tmp/{{guid}}/{{pam_template_name}}.json'
    regexp: '("replicas": 1,\n)'
    replace: '\1\t\t\t\t\t"paused": true,\n'
- name: Create template for secrets {{pam_secrets_yml}}
  shell: "oc create -f {{pam_secrets_yml}} -n {{ocp_project}}"
- name: Load pam imagestream
  shell: "oc create -f https://raw.githubusercontent.com/jboss-container-images/rhpam-7-openshift-image/7.0.2.GA/rhpam70-image-streams.yaml -n {{ocp_project}}"
  shell: "oc create -f {{pam_imagestreams_yml}} -n {{ocp_project}}"
- name: Load pam secret, {{kserver_app_secret}}
  shell: |
@@ -40,29 +49,11 @@
      -n {{ocp_project}}
- name: Initialize OCP resources from the project template using app_name = {{app_name}}
  shell: |
      oc new-app --name=pam-lab -n {{ocp_project}} --template={{pam_template_name}} \
      -p IMAGE_STREAM_NAMESPACE={{ocp_project}} \
      -p KIE_ADMIN_PWD={{kie_admin_passwd}} \
      -p APPLICATION_NAME={{app_name}} \
      -p BUSINESS_CENTRAL_HTTPS_SECRET={{bcentral_app_secret}} \
      -p KIE_SERVER_HTTPS_SECRET={{kserver_app_secret}} \
      -n {{ocp_project}} > /tmp/{{guid}}/gpte-pam-lab.txt
# Do not cancel, just pause, it will be modified next.
- name: pause {{app_name}}-rhpamcentr
  shell: oc rollout pause dc/{{app_name}}-rhpamcentr -n {{ocp_project}}
- name: modify business central deployment for additional java options
  shell: |
    oc env dc/{{app_name}}-rhpamcentr -n {{ocp_project}} \
    JAVA_OPTS_APPEND='-Xms512m -Xmx1536m -XX:MetaspaceSize=128M -XX:MaxMetaspaceSize=1536m -Xrs'
- name: cancel {{app_name}}-kieserver initial rollout
  shell: oc rollout cancel dc/{{app_name}}-kieserver -n {{ocp_project}}
  shell: "oc create -f /tmp/{{guid}}/{{pam_template_name}}.json -n {{ocp_project}}"
- name: resume {{app_name}}-rhpamcentr
  shell: oc rollout resume dc/{{app_name}}-rhpamcentr -n {{ocp_project}}
- include_tasks: ./wait_for_deploy.yml
  static: no
  vars:
@@ -70,7 +61,8 @@
      - "{{app_name}}-rhpamcentr"
- name: resume {{app_name}}-kieserver
  shell: oc deploy --latest dc/{{app_name}}-kieserver -n {{ocp_project}}
  shell: oc rollout resume dc/{{app_name}}-kieserver -n {{ocp_project}}
- include_tasks: ./wait_for_deploy.yml
  static: no
  vars:
ansible/roles/ocp-workload-bxms-pam/templates/rhpam70-image-streams.yaml
File was deleted
ansible/roles/ocp-workload-fuse-ignite/tasks/workload.yml
@@ -1,5 +1,16 @@
---
- name: check if user is cluster admin
  shell: "oc get project default"
  register: default_project_result
  ignore_errors: true
  changed_when: false
- fail:
    msg: "User does not have cluster-admin rights to install Istio"
  when: default_project_result is failed
- name: Create project for workload; project =  {{ocp_project}}
  shell: "oc new-project {{ocp_project}} --display-name={{ocp_project}}"
ansible/roles/ocp-workload-istio-community/defaults/main.yml
@@ -10,7 +10,7 @@
quota_limits_cpu: 10
quota_requests_memory: '6Gi'
quota_limits_memory: '20Gi'
quota_limits_memory: '8Gi'
quota_configmaps: 15
quota_pods: 20
ansible/roles/ocp-workload-istio-community/readme.adoc
@@ -4,8 +4,8 @@
. Using a version of oc utility that corresponds to your target OCP cluster, ensure oc utility is already authenticated as the cluster-admin:   opentlc-mgr
. This workload provisions a single Istio environment in an OCP cluster.
. The OCP namespace for istio will be owned by admin199
.. user100 will be assigned a clusterquota so as to manage limits and requests assigned to Istio
. The OCP namespace for istio will be owned by a non-existant user: admin1
.. admin1 will be assigned a clusterquota so as to manage limits and requests assigned to Istio
. This workload only needs to be executed once per OCP cluster
@@ -13,16 +13,18 @@
-----
WORKLOAD="ocp-workload-istio-community"
GUID=199
GUID=admin1
OCP_USERNAME="admin$GUID"
HOST_GUID=`oc whoami --show-server | cut -d'.' -f 2`
OCP_DOMAIN=$HOST_GUID.openshift.opentlc.com
NEEDS_QUOTA=true
ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ocp_user_needs_quota=false" \
                    -e"ocp_user_needs_quota=${NEEDS_QUOTA}" \
                    -e"ocp_domain=$OCP_DOMAIN" \
                    -e"ACTION=create"
ansible/roles/ocp-workload-istio-community/tasks/workload.yml
@@ -1,4 +1,15 @@
---
- name: check if user is cluster admin
  shell: "oc get project default"
  register: default_project_result
  ignore_errors: true
  changed_when: false
- fail:
    msg: "User does not have cluster-admin rights to install Istio"
  when: default_project_result is failed
- name: define user ocp_project
  set_fact:
    ocp_project: "istio-system"
@@ -15,7 +26,7 @@
  register: oc_projects
  changed_when: false
- name: "Add anyuid scc to all authenticated: {{oc_projects.stdout_lines}}"
- name: "Add anyuid scc to all authenticated in project {{oc_project}}"
  shell: "oc adm policy add-scc-to-group anyuid system:authenticated -n {{ocp_project}}"
  when: '"projects/" ~ "istio-system" not in oc_projects.stdout_lines'
@@ -25,39 +36,17 @@
    dest: /tmp/istio-demo-1.0.0.yaml
  when: '"projects/" ~ "istio-system" not in oc_projects.stdout_lines'
- name: set ocp_project to istio-system to support wait for deploy
  set_fact:
    ocp_project: istio-system
- name: Provision Istio Control Plane (which creates istio-system namespace)
  shell:  "oc create -f /tmp/istio-demo-1.0.0.yaml"
  when: '"projects/" ~ "istio-system" not in oc_projects.stdout_lines'
# TO-DO:  investigate why this isn't working
- name: wait for istio deployments
  include_tasks: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - istio-citadel
      - istio-egressgateway
      - istio-galley
      - istio-ingressgateway
      - istio-pilot
      - istio-policy
      - istio-sidecar-injector
      - istio-statsd-prom-bridge
      - istio-telemetry
      - istio-tracing
- name: wait for Istio monitoring UIs
  include_tasks: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - grafana
      - prometheus
      - servicegraph
- name: wait for istio sidecar-injector to initialize
  shell: "oc get deployment istio-sidecar-injector -o jsonpath='{.status.availableReplicas}' -n istio-system"
  register: sidecar_injector_replicas
  until: sidecar_injector_replicas.stdout == "1"
  retries: "30"
  delay: "30"
- name: Expose routes
  shell: |
ansible/roles/ocp-workload-rhte-mw-api-mesh/defaults/main.yml
@@ -1,7 +1,6 @@
---
become_override: false
ocp_username: jbride-redhat.com
ocp_user_needs_quota: True
ocp_user_groups:
  - OPENTLC-PROJECT-PROVISIONERS
@@ -10,7 +9,7 @@
quota_limits_cpu: 10
quota_requests_memory: '6Gi'
quota_limits_memory: '20Gi'
quota_limits_memory: '15Gi'
quota_configmaps: 20
quota_pods: 20
@@ -31,3 +30,6 @@
docker_registry_url: docker-registry.default.svc:5000
lab_name: rhte-mw-api-mesh
ocp_user_needs_quota: True
cluster_quota_name: "clusterquota-{{lab_name}}-{{ocp_username}}"
ansible/roles/ocp-workload-rhte-mw-api-mesh/ilt_provision.sh
New file
@@ -0,0 +1,100 @@
#!/bin/bash
START_PROJECT_NUM=1
END_PROJECT_NUM=1
WORKLOAD="ocp-workload-rhte-mw-api-mesh"
LOG_FILE=/tmp/$WORKLOAD
HOST_GUID=`oc whoami --show-server | cut -d'.' -f 2`
OCP_DOMAIN=$HOST_GUID.openshift.opentlc.com
PATH_TO_AAD_ROOT=$TRAINING/gpte/ansible_agnostic_deployer/ansible
for var in $@
do
    case "$var" in
        --START_PROJECT_NUM=*) START_PROJECT_NUM=`echo $var | cut -f2 -d\=` ;;
        --END_PROJECT_NUM=*) END_PROJECT_NUM=`echo $var | cut -f2 -d\=` ;;
        --PATH_TO_AAD_ROOT=*) PATH_TO_AAD_ROOT=`echo $var | cut -f2 -d\=` ;;
        -h) HELP=true ;;
        -help) HELP=true ;;
        --help) HELP=true ;;
    esac
done
function ensurePreReqs() {
    if [ "x$HOST_GUID" == "x" ]; then
            echo -en "must pass parameter: --HOST_GUID=<ocp host GUID> . \n\n"
            help
            exit 1;
    fi
    LOG_FILE=$LOG_FILE-$HOST_GUID-$START_PROJECT_NUM-$END_PROJECT_NUM.log
    echo -en "starting\n\n" > $LOG_FILE
    echo -en "\n\nProvision log file found at: $LOG_FILE\n";
}
function help() {
    echo -en "\n\nOPTIONS:";
    echo -en "\n\t--START_PROJECT_NUM=*     OPTIONAL: specify # of first OCP project to provision (defult = 1))"
    echo -en "\n\t--END_PROJECT_NUM=*       OPTIONAL: specify # of OCP projects to provision (defualt = 1))"
    echo -en "\n\t--PATH_TO_AAD_ROOT=*       OPTIONAL: (defualt = $PATH_TO_AAD_ROOT))"
    echo -en "\n\t-h                        this help manual"
    echo -en "\n\n\nExample:                ./roles/$WORKLOAD/ilt_provision.sh --HOST_GUID=dev39 --START_PROJECT_NUM=1 --END_PROJECT_NUM=1\n\n"
}
function login() {
    echo -en "\nHOST_GUID=$HOST_GUID\n" >> $LOG_FILE
    oc login https://master.$HOST_GUID.openshift.opentlc.com -u opentlc-mgr -p r3dh4t1!
}
function executeLoop() {
    echo -en "\nexecuteLoop() START_PROJECT_NUM = $START_PROJECT_NUM ;  END_PROJECT_NUM=$END_PROJECT_NUM" >> $LOG_FILE
    for (( c=$START_PROJECT_NUM; c<=$END_PROJECT_NUM; c++ ))
    do
        GUID=$c
        OCP_USERNAME=user$c
        executeAnsibleViaLocalhost
    done
}
function executeAnsibleViaLocalhost() {
    GUID=$PROJECT_PREFIX$GUID
    echo -en "\n\nexecuteAnsibleViaLocalhost():  Provisioning project with GUID = $GUID and OCP_USERNAME = $OCP_USERNAME\n" >> $LOG_FILE
    ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ocp_user_needs_quota=true" \
                    -e"ocp_domain=$OCP_DOMAIN" \
                    -e"ACTION=create" >> $LOG_FILE
    if [ $? -ne 0 ];
    then
        echo -en "\n\n*** Error provisioning where GUID = $GUID\n\n " >> $LOG_FILE
        echo -en "\n\n*** Error provisioning where GUID = $GUID\n\n "
        exit 1;
    fi
}
if [ "x$HELP" == "xtrue" ]; then
    help
    exit 0
fi
cd $PATH_TO_AAD_ROOT
ensurePreReqs
login
executeLoop
ansible/roles/ocp-workload-rhte-mw-api-mesh/readme.adoc
@@ -11,8 +11,8 @@
WORKLOAD="ocp-workload-rhte-mw-api-mesh"
GUID=1
OCP_USERNAME="user$GUID"
HOST_GUID=`oc whoami --show-server | cut -d'.' -f 2`
OCP_DOMAIN=$HOST_GUID.openshift.opentlc.com
REGION=`oc whoami --show-server | cut -d'.' -f 2`
OCP_DOMAIN=$REGION.openshift.opentlc.com
ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
ansible/roles/ocp-workload-rhte-mw-api-mesh/tasks/pre_workload.yml
@@ -11,9 +11,9 @@
#     var: groupadd_register
#     verbosity: 2
- name: Create user Quota - clusterresourcequota
- name: Create user Quota - "{{cluster_quota_name}}"
  shell: |
        oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \
        oc create clusterquota "{{cluster_quota_name}}" \
        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
        --hard requests.cpu="{{quota_requests_cpu}}" \
        --hard limits.cpu="{{quota_limits_cpu}}"  \
ansible/roles/ocp-workload-rhte-mw-api-mesh/tasks/remove_workload.yml
@@ -7,8 +7,8 @@
  set_fact:
    ocp_project: "{{lab_name}}-{{guid}}"
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
- name: Remove user Quota - oc delete clusterresourcequota  "{{cluster_quota_name}}"
  shell: oc delete clusterresourcequota {{cluster_quota_name}}
  ignore_errors: true
- name: Remove any lingering tmp files
ansible/roles/ocp-workload-rhte-mw-api-mesh/templates/coolstore-catalog-mongodb-persistent.yaml
@@ -155,7 +155,7 @@
            timeoutSeconds: 1
          resources:
            limits:
              cpu: 500m
              cpu: 250m
              memory: 1Gi
            requests:
              cpu: 100m
@@ -258,10 +258,10 @@
            timeoutSeconds: 1
          resources:
            limits:
              cpu: 500m
              cpu: 250m
              memory: 500Mi
            requests:
              cpu: 500m
              cpu: 125m
              memory: 500Mi
          securityContext:
            privileged: false
ansible/roles/ocp-workload-rhte-mw-msa-mesh/defaults/main.yml
@@ -1,7 +1,6 @@
---
become_override: false
ocp_username: jbride-redhat.com
ocp_user_needs_quota: False
ocp_user_groups:
  - OPENTLC-PROJECT-PROVISIONERS
@@ -28,5 +27,9 @@
deploy_status_retries: 15
deploy_status_delay: 20
lab_name: rhte-mw-msa-mesh
lab_1_name: coolstore-catalog
lab_2_name: coolstore-gateway
ocp_user_needs_quota: True
cluster_quota_name: "clusterquota-{{lab_name}}-{{ocp_username}}"
ansible/roles/ocp-workload-rhte-mw-msa-mesh/tasks/pre_workload.yml
@@ -11,9 +11,9 @@
#     var: groupadd_register
#     verbosity: 2
- name: Create user Quota - clusterresourcequota
- name: Create user Quota - {{cluster_quota_name}}
  shell: |
        oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \
        oc create clusterquota "{{cluster_quota_name}}" \
        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
        --hard requests.cpu="{{quota_requests_cpu}}" \
        --hard limits.cpu="{{quota_limits_cpu}}"  \
ansible/roles/ocp-workload-rhte-mw-msa-mesh/tasks/remove_workload.yml
@@ -7,8 +7,8 @@
  set_fact:
    ocp_project: "{{ocp_username}}-{{lab_1_name}}"
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
- name: Remove user Quota - oc delete clusterresourcequota  "{{cluster_quota_name}}"
  shell: oc delete clusterresourcequota {{cluster_quota_name}}
  ignore_errors: true
- name: Remove any lingering tmp files
ansible/roles/ocp-workload-rhte-mw-msa-mesh/templates/coolstore-catalog-mongodb-persistent.yaml
@@ -155,7 +155,7 @@
            timeoutSeconds: 1
          resources:
            limits:
              cpu: 500m
              cpu: 250m
              memory: 1Gi
            requests:
              cpu: 100m
@@ -258,10 +258,10 @@
            timeoutSeconds: 1
          resources:
            limits:
              cpu: 500m
              cpu: 250m
              memory: 500Mi
            requests:
              cpu: 500m
              cpu: 100m
              memory: 500Mi
          securityContext:
            privileged: false
ansible/roles/ocp-workload-rhte-mw-msa-orchestration/readme.adoc
@@ -3,20 +3,18 @@
Corresponds to the link:https://drive.google.com/open?id=1AjAty4tY5HmrOXiZ6p5f3wEd_XQEJhp-zKHaq-aFnhE[Microservices Orchestration] Tech Exchange Lab.
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
== Deploy a Workload using local oc utility
----
GUID=jb45
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-rhte-mw-msa-orchestration"
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
GUID=1
OCP_USERNAME="user$GUID"
REGION=`oc whoami --show-server | cut -d'.' -f 2`
OCP_DOMAIN=$REGION.openshift.opentlc.com
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
@@ -25,22 +23,9 @@
                    -e"ocp_apps_domain=apps.${HOST_GUID}.openshift.opentlc.com" \
                    -e"ACTION=create"
----
=== To Delete an environment
----
GUID=jb45
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-rhte-mw-msa-orchestration"
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
ansible/roles/ocp-workload-rhte-mw-msa-orchestration/tasks/workload.yml
@@ -16,12 +16,14 @@
# #######      lab specific tasks   ############## #
# Components:
#   1) AMQ 7 broker
#   2) PAM kie-server w/ postgresql (possibly with business central)
#   1) enmasse ( configured via configmaps )
#   2) embedded spring-boot based kie-server w/ postgresql
#   3) backend business services
#   4) centralized rh-sso
#   5) centralized nexus (proxy repo and lab artifact repo (need to segregate between students ) )
#   4) rh-sso
#   5) nexus
####################################################
ansible/roles/ocp-workload-rhte-mw-op-intel/defaults/main.yml
@@ -6,17 +6,17 @@
ocp_user_groups:
  - OPENTLC-PROJECT-PROVISIONERS
quota_requests_cpu: 5
quota_limits_cpu: 10
quota_requests_cpu: 10
quota_limits_cpu: 30
quota_requests_memory: '6Gi'
quota_limits_memory: '20Gi'
quota_requests_memory: '10Gi'
quota_limits_memory: '60Gi'
quota_configmaps: 10
quota_pods: 20
quota_configmaps: 20
quota_pods: 40
quota_persistentvolumeclaims: 20
quota_services: 30
quota_secrets: 30
quota_services: 50
quota_secrets: 100
quota_requests_storage: 50Gi
ocp_domain: "{{subdomain_base}}"
@@ -28,12 +28,19 @@
deploy_status_retries: 15
deploy_status_delay: 20
# Corresponds to project names
lab_name: rhte-mw-op-intel
lab_1_name: lab1-kafka-project
lab_2_name: lab2-kafka-project
lab_3_name: lab3-spark-simple-project
lab_4_name: lab4-spark-medium-project
lab_5_name: lab5-spark-complex-project
lab_1_name: "{{ocp_username}}-lab1-kafka-project"
lab_2_name: "{{ocp_username}}-lab2-kafka-project"
lab_3_name: "{{ocp_username}}-lab3-spark-simple-project"
lab_4_name: "{{ocp_username}}-lab4-spark-medium-zepnb-project"
lab_5_name: "{{ocp_username}}-lab5-spark-medium-jpynb-project"
lab_6_name: "{{ocp_username}}-lab6-spark-complex-project"
# Corresponds to app names
spark_app_1: oshinko-webui
spark_app_2: spark-drools
spark_app_3: apache-zeppelin
##########          Templates for RHTE Lab 6 Op Intel       #################
@@ -52,9 +59,11 @@
kafkaconnect_crd_yaml: "{{clusteroperator_yaml}}/04-Crd-kafkaconnect.yaml"
kafkaconnects2i_crd_yaml: "{{clusteroperator_yaml}}/04-Crd-kafkaconnects2i.yaml"
kafkatopic_crd_yaml: "{{clusteroperator_yaml}}/04-Crd-kafkatopic.yaml"
kafkauser_crd_yaml: "{{clusteroperator_yaml}}/04-Crd-kafkauser.yaml"
clusteroperator_deployment_yaml: "{{clusteroperator_yaml}}/05-Deployment-strimzi-cluster-operator.yaml"
kafkapersistent_yaml: "{{strimzi_url}}/kafka/kafka-persistent.yaml"
kafkatopic_yaml: "{{strimzi_url}}/topic/kafka-topic.yaml"
kafkatopic2_yaml: "{{strimzi_url}}/topic/kafka-topic2.yaml"
kafkauser_yaml: "{{strimzi_url}}/user/kafka-user.yaml"
helloword_yaml: "{{strimzi_url}}/hello-world/deployment.yaml"
@@ -64,12 +73,11 @@
jupyter_yaml: "https://raw.githubusercontent.com/gpe-mw-training/operational_intelligence/master/templates/resources-jupyter.yaml"
strimzi_yaml: "https://raw.githubusercontent.com/gpe-mw-training/operational_intelligence/master/templates/openshift-strimzi-kafka-template.yaml"
nbconf_yaml: "https://raw.githubusercontent.com/honghuac/rhte2018/master/spark/nbconf.yaml"
kafkatopic2_yaml: "https://raw.githubusercontent.com/gpe-mw-training/operational_intelligence/master/templates/kafka-topic-configmap.yaml"
kafkatopic3_yaml: "https://raw.githubusercontent.com/gpe-mw-training/operational_intelligence/master/templates/kafka-topic-configmap.yaml"
spark_dc: "oshinko-java-spark-build-dc"
# Corresponds to Git repos
spark_git: "https://github.com/gpe-mw-training/operational_intelligence/tree/master/spark-drools"
zeppelin_git: "https://github.com/rimolive/zeppelin-notebooks.git"
#################################################################
ansible/roles/ocp-workload-rhte-mw-op-intel/readme.adoc
@@ -7,6 +7,34 @@
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
GUID=jb45
HOST_GUID=dev39
TARGET_HOST="master.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-rhte-mw-op-intel"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \
  -e"ANSIBLE_REPO_PATH=`pwd`" \
  -e"ocp_workload=${WORKLOAD}" \
  -e"ocp_username=${OCP_USERNAME}" \
  -e"guid=${GUID}" \
  -e"ocp_user_needs_quota=true" \
  -e"ocp_domain=${OCP_DOMAIN}" \
  -e"ACTION=create"
----
=== To Delete an environment
----
GUID=jb45
HOST_GUID=dev39
TARGET_HOST="master.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-rhte-mw-op-intel"
# a TARGET_HOST is specified in the command line, without using an inventory file
=======
WORKLOAD="ocp-workload-rhte-mw-op-intel"
OCP_USERNAME="jbride-redhat.com"
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/main.yml
@@ -4,8 +4,13 @@
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload Tasks
  include: ./workload.yml
- name: Running Strimzi Workload Tasks
  include: ./strimzi_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Spark Workload Tasks
  include: ./spark_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/remove_workload.yml
@@ -34,6 +34,10 @@
  shell: "oc delete project {{lab_5_name}}"
  ignore_errors: true
- name: Remove Project {{lab_6_name}}
  shell: "oc delete project {{lab_6_name}}"
  ignore_errors: true
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully - Removed"
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/spark_workload.yml
New file
@@ -0,0 +1,178 @@
---
- name: define ocp_project
  set_fact:
    ocp_project: "{{lab_name}}-{{guid}}"
# #######  Spark Installation  ############## #
# Components:
#   1) Apache Spark
####################################################
- name: Lab 6 Op Intel Installation workload Tasks Start
  debug:
    msg: Lab 6 Op Intel Installation workload Tasks Start
- name: Spark Installation Tasks Begin
  debug:
    msg: Spark Installation Tasks Begin
- name: Simple Spark Lab Installation Tasks Begin
  debug:
    msg: Simple Spark Lab Installation Tasks Begin
- name: Create project for workload {{lab_3_name}}
  shell: "oc new-project {{lab_3_name}}"
  ignore_errors: true
- name: Label namespace for project {{lab_3_name}}
  command: "oc label namespace {{lab_3_name}} AAD='{{guid}}'"
  ignore_errors: true
- name: Make sure we go back to default project
  shell: "oc project default"
- name: Give user {{guid}} access to ocp_project {{lab_3_name}}
  shell: "oc policy add-role-to-user admin {{guid}} -n {{lab_3_name}}"
  ignore_errors: true
- name: delete temp dir if it exists
  file:
      path: /tmp/{{ocp_project}}
      state: absent
- file:
      path: /tmp/{{ocp_project}}
      state: directory
- name: Create spark cluster environment in project {{lab_3_name}}
  shell: "oc create -f {{ spark_yaml }} -n {{lab_3_name}}"
  ignore_errors: true
- name: Create oshinko web app in project {{lab_3_name}}
  shell: "oc new-app {{ spark_app_1 }} -n {{lab_3_name}}"
  ignore_errors: true
- name: Create spark cluster app in project {{lab_3_name}}
  shell: "oc new-app --template {{ spark_dc }}
    -p APPLICATION_NAME={{ spark_app_2 }}
    -p APP_MAIN_CLASS=com.redhat.gpte.App
    -p GIT_URI={{ spark_git }}
    -p APP_FILE=spark-drools.jar
    -p OSHINKO_DEL_CLUSTER=false
    -n {{lab_3_name}}"
  ignore_errors: true
- name: Simple Spark Lab Installation Tasks Complete
  debug:
    msg: Simple Spark Lab Installation Tasks Complete
- name: Medium Spark Lab Installation Tasks Begin
  debug:
    msg: Medium Spark Lab Installation Tasks Begin
- name: Create project for workload {{lab_4_name}}
  shell: "oc new-project {{lab_4_name}}"
  ignore_errors: true
- name: Label namespace for project {{lab_4_name}}
  command: "oc label namespace {{lab_4_name}} AAD='{{guid}}'"
  ignore_errors: true
- name: Give user {{guid}} access to ocp_project {{lab_4_name}}
  shell: "oc policy add-role-to-user admin {{guid}} -n {{lab_4_name}}"
  ignore_errors: true
- name: Create ZEPPELIN NOTEBOOK environment in project {{lab_4_name}}
  shell: "oc create -f {{ zeppelin_yaml }} -n {{lab_4_name}}"
  ignore_errors: true
- name: Create ZEPPELIN NOTEBOOK app in project {{lab_4_name}}
  shell: "oc new-app --template=$namespace/apache-zeppelin-openshift
  -p APPLICATION_NAME={{ spark_app_3 }}
  -p GIT_URI={{ zeppelin_git }}
  -p ZEPPELIN_INTERPRETERS=md
  -n {{lab_4_name}}"
  ignore_errors: true
- name: Create project for workload {{lab_5_name}}
  shell: "oc new-project {{lab_5_name}}"
  ignore_errors: true
- name: Label namespace for project {{lab_5_name}}
  command: "oc label namespace {{lab_5_name}} AAD='{{guid}}'"
  ignore_errors: true
- name: Give user {{guid}} access to ocp_project {{lab_5_name}}
  shell: "oc policy add-role-to-user admin {{guid}} -n {{lab_5_name}}"
  ignore_errors: true
- name: Make sure we go back to default project
  shell: "oc project default"
- name: Create JUPYTER NOTEBOOK environment in project {{lab_5_name}}
  shell: "oc create -f {{ jupyter_yaml }} -n {{lab_5_name}}"
  ignore_errors: true
- name: Create JUPYTER NOTEBOOK app in project {{lab_5_name}}
  shell: "oc new-app --template radanalytics-jupyter-notebook
  -p JUPYTER_NOTEBOOK_PASSWORD=developer
  -n {{lab_5_name}}"
  ignore_errors: true
- name: Create JUPYTER NOTEBOOK configuration in project {{lab_5_name}}
  shell: "oc create -f {{ nbconf_yaml }} -n {{lab_5_name}}"
  ignore_errors: true
- name: Medium Spark Lab Installation Tasks Complete
  debug:
    msg: Medium Spark Lab Installation Tasks Complete
- name: Complex Spark Lab Installation Tasks Begin
  debug:
    msg: Complex Spark Lab Installation Tasks Begin
- name: Create project for workload {{lab_6_name}}
  shell: "oc new-project {{lab_6_name}}"
  ignore_errors: true
- name: Label namespace for project {{lab_6_name}}
  command: "oc label namespace {{lab_6_name}} AAD='{{guid}}'"
  ignore_errors: true
- name: Make sure we go back to default project
  shell: "oc project default"
- name: Give user {{guid}} access to ocp_project {{lab_6_name}}
  shell: "oc policy add-role-to-user admin {{guid}} -n {{lab_6_name}}"
  ignore_errors: true
- name: Create ZEPPELIN NOTEBOOK environment in project {{lab_6_name}}
  shell: "oc create -f {{ zeppelin_yaml }} -n {{lab_6_name}}"
  ignore_errors: true
- name: Create ZEPPELIN NOTEBOOK app in project {{lab_6_name}}
  shell: "oc new-app --template=$namespace/apache-zeppelin-openshift
  -p APPLICATION_NAME={{ spark_app_3 }}
  -p GIT_URI={{ zeppelin_git }}
  -p ZEPPELIN_INTERPRETERS=md
  -n {{lab_6_name}}"
  ignore_errors: true
- name: Create spark cluster environment in project {{lab_6_name}}
  shell: "oc create -f {{ spark_yaml }} -n {{lab_6_name}}"
  ignore_errors: true
- name: Create oshinko web app in project {{lab_6_name}}
  shell: "oc new-app {{ spark_app_1 }} -n {{lab_6_name}}"
  ignore_errors: true
- name: Complex Spark Lab Installation Tasks Complete
  debug:
    msg: Complex Spark Lab Installation Tasks Complete
- name: Spark Installation Tasks Complete
  debug:
    msg: Spark Installation Tasks Complete
- name: Lab 6 Op Intel Installation workload Tasks Complete
  debug:
    msg: Lab 6 Op Intel Installation workload Tasks Complete
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/strimzi_workload.yml
File was renamed from ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/workload.yml
@@ -8,25 +8,28 @@
#   1) Kafka
####################################################
- name: Strimzi Installation Tasks Start
- name: Lab 6 Op Intel Installation workload Tasks Start
  debug:
    msg: Strimzi Installation Tasks Start
    msg: Lab 6 Op Intel Installation workload Tasks Start
- name: define user user2_kafka_project
  set_fact:
    user2_kafka_project: "{{ocp_username}}-{{lab_1_name}}"
- name: Strimzi Installation Tasks Begin
  debug:
    msg: Strimzi Installation Tasks Begin
- name: "Create project for workload {{lab_1_name}}"
- name: Create project for workload {{lab_1_name}}
  shell: "oc new-project {{lab_1_name}}"
  ignore_errors: true
- name: "Label namespace"
- name: Label namespace
  command: "oc label namespace {{lab_1_name}} AAD='{{guid}}'"
  ignore_errors: true
- name: Make sure we go back to default project
  shell: "oc project default"
- name: Add user as admin to the project
  shell: "oc adm policy add-role-to-user admin {{ocp_username}} -n {{lab_1_name}}"
- name: Give user {{guid}} access to ocp_project {{lab_1_name}}
  shell: "oc policy add-role-to-user admin {{guid}} -n {{lab_1_name}}"
  ignore_errors: true
- name: delete temp dir if it exists
  file:
@@ -38,54 +41,69 @@
- name: Create SA for strimzi cluster operator
  shell: "oc apply -f {{ serviceaccount_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Create cluster role for strimzi cluster operator
  shell: "oc apply -f {{ clusteroperator_role_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Create cluster role binding for strimzi cluster operator
  shell: "oc apply -f {{ clusteroperator_rolebinding_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Create cluster role for Kafka broker
  shell: "oc apply -f {{ kafkabroker_role_yaml }} -n {{lab_1_name}}"
- name: Create cluster role binding for strimzi-cluster-operator-topic-operator-delegation
  shell: ""
- name: Create cluster role for strimzi-topic operator
  shell: "oc apply -f {{ topicoperator_SOMETHING_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Create  cluster role binding for strimzi-cluster-operator-topic-operator-delegation
  shell: "oc apply -f {{ topicoperator_rolebinding_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Add CRD for Kafka
  shell: "oc apply -f {{ kafka_crd_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Add CRD for Kafka connect
  shell: "oc apply -f {{ kafkaconnect_crd_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Add CRD for Kafka connect s2i
  shell: "oc apply -f {{ kafkaconnects2i_crd_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Add CRD for Kafka topic
  shell: "oc apply -f {{ kafkatopic_crd_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Add CRD for Kafka user
  shell: "oc apply -f {{ kafkauser_crd_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Deploy Kafka
  shell: "oc apply -f examples/install/cluster-operator/05-Deployment-strimzi-cluster-operator.yaml -n {{lab_1_name}}"
  shell: "oc apply -f {{clusteroperator_deployment_yaml}} -n {{lab_1_name}}"
  ignore_errors: true
- name: Apply Kafka Persistent template
  shell: "oc apply -f {{ kafkapersistent_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Create Kafka topics
- name: Create first Kafka topic
  shell: "oc apply -f {{ kafkatopic_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Create second Kafka topic
  shell: "oc apply -f {{ kafkatopic2_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
#- name: Create third Kafka topic
#  shell: "oc apply -f {{ kafkatopic3_yaml }} -n {{lab_1_name}}"
- name: Create Kafka users
  shell: "oc apply -f {{ kafkauser_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Annotate the empty project as requested by user [what does this do?]
  shell: "oc annotate namespace {{lab_1_name}} openshift.io/requester={{ocp_username}} --overwrite"
- name: Create Kafka connect deployment
  shell: "oc apply -f examples/kafka-connect/kafka-connect.yaml -n {{lab_1_name}}"
- name: Strimzi Installation Tasks Complete
  debug:
ansible/workdir/.gitignore
File was deleted