From 50312a2d16531c9ef444e8880952582cc8347c69 Mon Sep 17 00:00:00 2001
From: Nandan Joshi <njoshi@redhat.com>
Date: Tue, 03 Mar 2020 01:09:55 +0100
Subject: [PATCH] initial role for amq messaging foundations eLT (#1219)

---
 ansible/configs/amq-messaging-foundations/files/repos_template.j2                     |   26 
 ansible/configs/amq-messaging-foundations/post_software.yml                           |   19 
 ansible/roles/amq-client-vm/files/bashrc                                              |   24 
 ansible/roles/amq-client-vm/files/bash_profile                                        |   11 
 ansible/roles/amq-client-vm/tasks/main.yml~                                           |  300 +++++++++
 ansible/configs/amq-messaging-foundations/software.yml                                |   28 
 ansible/configs/amq-messaging-foundations/default_vars_ec2.yml                        |   91 ++
 ansible/configs/amq-messaging-foundations/sample_vars                                 |   14 
 ansible/roles/amq-client-vm/tasks/main.yml                                            |  205 ++++++
 ansible/configs/amq-messaging-foundations/default_vars.yml                            |  113 +++
 ansible/configs/amq-messaging-foundations/requirements.yml                            |    6 
 ansible/configs/amq-messaging-foundations/default_vars.yml~                           |  117 +++
 ansible/configs/amq-messaging-foundations/files/cloud_providers/ec2_cloud_template.j2 |  443 +++++++++++++
 ansible/configs/amq-messaging-foundations/README.adoc                                 |  119 +++
 ansible/configs/amq-messaging-foundations/files/hosts_template.j2                     |    5 
 ansible/roles/amq-client-vm/README.md                                                 |   56 +
 ansible/configs/amq-messaging-foundations/software.yml~                               |   28 
 ansible/configs/amq-messaging-foundations/pre_infra.yml                               |   12 
 ansible/configs/amq-messaging-foundations/pre_software.yml                            |   89 ++
 ansible/roles/amq-client-vm/defaults/main.yml                                         |    9 
 ansible/configs/amq-messaging-foundations/requirements-ruby.yml                       |    6 
 ansible/configs/amq-messaging-foundations/destroy_env.yml                             |    2 
 ansible/configs/amq-messaging-foundations/default_vars_osp.yml                        |  129 ++++
 ansible/configs/amq-messaging-foundations/post_infra.yml                              |   24 
 24 files changed, 1,876 insertions(+), 0 deletions(-)

diff --git a/ansible/configs/amq-messaging-foundations/README.adoc b/ansible/configs/amq-messaging-foundations/README.adoc
new file mode 100644
index 0000000..f0a2950
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/README.adoc
@@ -0,0 +1,119 @@
+= OCP Client VM (ocp-clientvm)
+
+== Running Ansible Playbook
+
+* You can run the playbook with the following arguments to overwrite the default variable values:
++
+[source,bash]
+----
+# Generic Vars
+
+GUID=sborenstest5
+REGION=ap-southeast-2
+KEYNAME=ocpkey
+ENVTYPE="ocp-clientvm"
+CLOUDPROVIDER=ec2
+HOSTZONEID='Z3IHLWJZOU9SRT'
+BASESUFFIX='.example.opentlc.com'
+
+
+# OCP Vars
+
+REPO_VERSION=3.11
+OSRELEASE=3.11.16
+
+ansible-playbook main.yml \
+  -e "guid=${GUID}" \
+  -e "env_type=${ENVTYPE}" \
+  -e "osrelease=${OSRELEASE}" \
+  -e "repo_version=${REPO_VERSION}" \
+  -e "docker_version=1.13.1" \
+  -e "cloud_provider=${CLOUDPROVIDER}" \
+  -e "aws_region=${REGION}" \
+  -e "HostedZoneId=${HOSTZONEID}" \
+  -e "key_name=${KEYNAME}" \
+  -e "subdomain_base_suffix=${BASESUFFIX}" \
+  -e "clientvm_instance_type=t2.large" \
+  -e "requirements_path=ansible/configs/ocp-clientvm/requirements_ruby.yml" \
+  -e "email=name@example.com"  \
+  -e "output_dir=/tmp/output" \
+  -e "install_ruby=true"    -e "install_3scale=true" -vvvv
+----
+
+=== Satellite version
+----
+GUID=testclientvm1
+REGION=us-east-1
+KEYNAME=ocpkey
+ENVTYPE="ocp-clientvm"
+CLOUDPROVIDER=ec2
+HOSTZONEID='Z186MFNM7DX4NF'
+BASESUFFIX='.example.opentlc.com'
+REPO_VERSION=3.9
+DEPLOYER_REPO_PATH=`pwd`
+
+LOG_FILE=/tmp/${ENVTYPE}-${GUID}.log
+IPAPASS=$5
+
+if [ "$1" = "provision" ] ; then
+
+echo "Provisioning: ${STACK_NAME}"  1>> $LOG_FILE 2>> $LOG_FILE
+
+ansible-playbook ${DEPLOYER_REPO_PATH}/main.yml  \
+  -e "guid=${GUID}" \
+  -e "env_type=${ENVTYPE}" \
+  -e "key_name=${KEYNAME}" \
+  -e "cloud_provider=${CLOUDPROVIDER}" \
+  -e "aws_region=${REGION}" \
+  -e "HostedZoneId=${HOSTZONEID}" \
+  -e "subdomain_base_suffix=${BASESUFFIX}" \
+  -e "clientvm_instance_type=t2.large" \
+  -e "ipa_host_password=${IPAPASS}"
+  -e "repo_method=satellite" \
+  -e "repo_version=${REPO_VERSION}" \
+  -e "email=name@example.com" \
+  -e "software_to_deploy=none" \
+  -e "osrelease=3.9.14" \
+  -e "docker_version=1.13.1" \
+  -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" 1>> $LOG_FILE 2>> $LOG_FILE
+----
+
+=== To Delete an environment
+----
+REGION=us-east-1
+KEYNAME=ocpkey
+GUID=testclientvm1
+ENVTYPE="ocp-clientvm"
+CLOUDPROVIDER=ec2
+HOSTZONEID='Z186MFNM7DX4NF'
+
+ansible-playbook ./configs/${ENVTYPE}/destroy_env.yml \
+ -e "guid=${GUID}" \
+ -e "env_type=${ENVTYPE}"  \
+ -e "cloud_provider=${CLOUDPROVIDER}" \
+ -e "aws_region=${REGION}" \
+ -e "HostedZoneId=${HOSTZONEID}" \
+ -e "key_name=${KEYNAME}" \
+ -e "subdomain_base_suffix=${BASESUFFIX}"
+----
+
+
+== Example RC file
+
+Use a RC file like this one to create a ClientVM with the wrapper.sh script:
+
+----
+GUID=myclient
+REGION=us-east-1
+KEYNAME=ocpkey
+ENVTYPE=ocp-clientvm
+HOSTZONEID='Z3IHLWJZOU9SRT'
+ENVTYPE_ARGS=(
+-e repo_version=3.9
+-e osrelease=3.9.14
+-e own_repo_path=http://admin.example.com/repos/ocp/3.9.14
+-e docker_version=1.13.1
+-e "clientvm_instance_type=t2.large"
+-e "subdomain_base_suffix=.example.opentlc.com"
+)
+----
diff --git a/ansible/configs/amq-messaging-foundations/default_vars.yml b/ansible/configs/amq-messaging-foundations/default_vars.yml
new file mode 100644
index 0000000..b7ee136
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/default_vars.yml
@@ -0,0 +1,113 @@
+###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT
+###### OR PASS as "-e" args to ansible-playbook command
+
+## guid is the deployment unique identifier, it will be appended to all tags,
+## files and anything that identifies this environment from another "just like it"
+guid: defaultguid
+
+# Project Tag for all generated resources
+project_tag: "{{ env_type }}-{{ guid }}"
+
+# osrelease determines if the OCP3 or OCP4 ClientVM is installed.
+# osrelease >=4.0 ==> OCP 4, osrelease < 4.0 ==> OCP 3
+# osrelease also determins which RHEL Repos to use
+# Specific tool versions can be set to override the default
+osrelease: '4.3.0'
+repo_version: '4.3'
+
+# Ruby and 3scale ToolBox gem is installed. 
+install_ruby: false
+install_3scale: false
+
+
+# Software Versions:
+# Specified in ocp-client-vm role defaults. Can be overridden with specific
+# versions if necessary
+ocp_clientvm_oc_version: "{{ osrelease }}"
+# ocp_clientvm_oc_version: '4.3.0'
+# ocp_clientvm_oc_version: '3.11.154'
+
+# OpenShift 4 specific software (only installed when Client Version >= 4.0)
+# ocp_clientvm_odo_version: 'v1.1.0'
+# ocp_clientvm_helm_version: 'v3.0.0'
+# ocp_clientvm_tkn_version: '0.6.0'
+
+# Supplemental Software
+# Specified in ocp-client-vm role defaults. Can be overridden.
+# ocp_clientvm_maven_version: '3.6.3'
+# ocp_clientvm_istioctl_version: '1.1.17'
+
+# Docker version and settings
+docker_version: '1.13.1'
+docker_size: '200'
+
+# The next flag is 1 by default. If it is set to more than 1 then instead of creating
+# clientvm.guid.baseurl it will create clientvm{1..num_users}.guid.baseurl
+num_users: 1
+
+install_bastion: true
+install_common: true
+install_opentlc_integration: true
+install_ipa_client: false
+
+# Install a user id 'student'. If install_student_user=true then a global variable
+# student_password=password needs to be provided with the password to set for the user student
+install_student_user: false
+
+# FTL is used for grading and solving. It will pull in the external ftl-injector role.
+# This might be enabled when we have solvers to run or graders for ILT
+# Define the FTL Injector Tag
+ftl_injector_tag: "v0.17.0"
+ftl_use_python3: true
+
+### If you want a Key Pair name created and injected into the hosts,
+# set `set_env_authorized_key` to true and set the keyname in `env_authorized_key`
+# you can use the key used to create the environment or use your own self generated key
+# if you set "use_own_key" to false your PRIVATE key will be copied to the bastion. (This is {{key_name}})
+
+use_own_key: true
+env_authorized_key: "{{guid}}key"
+key_name: "default_key_name"
+ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
+set_env_authorized_key: true
+
+# Is this running from Red Hat Ansible Tower
+tower_run: false
+
+### Common Host settings
+repo_method: file # Other Options are: file, satellite and rhn
+
+# Do you want to run a full yum update
+update_packages: true
+
+#If using repo_method: satellite, you must set these values as well.
+# satellite_url: satellite.example.com
+# satellite_org: Sat_org_name
+# satellite_activationkey: "rhel7basic"
+
+# Packages to install
+common_packages:
+- unzip
+- bash-completion
+- tmux
+- bind-utils
+- wget
+- nano
+- ansible
+- git
+- vim-enhanced
+- httpd-tools
+- openldap-clients
+- tree
+
+# Which RHEL Repos to enable.
+rhel_repos:
+- rhel-7-server-rpms
+- rhel-7-server-extras-rpms
+- rhel-7-server-ansible-2.8-rpms
+
+cloud_tags:
+  env_type: "{{ env_type }}"
+  guid: "{{ guid }}"
+  course_name: "{{ course_name | d('unknown') }}"
+  platform: "{{ platform | d('unknown') }}"
diff --git a/ansible/configs/amq-messaging-foundations/default_vars.yml~ b/ansible/configs/amq-messaging-foundations/default_vars.yml~
new file mode 100644
index 0000000..45e5985
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/default_vars.yml~
@@ -0,0 +1,117 @@
+###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT
+###### OR PASS as "-e" args to ansible-playbook command
+
+## guid is the deployment unique identifier, it will be appended to all tags,
+## files and anything that identifies this environment from another "just like it"
+guid: defaultguid
+
+# Project Tag for all generated resources
+project_tag: "{{ env_type }}-{{ guid }}"
+
+# osrelease determines if the OCP3 or OCP4 ClientVM is installed.
+# osrelease >=4.0 ==> OCP 4, osrelease < 4.0 ==> OCP 3
+# osrelease also determins which RHEL Repos to use
+# Specific tool versions can be set to override the default
+osrelease: '4.3.0'
+repo_version: '4.3'
+
+# Ruby and 3scale ToolBox gem is installed. 
+install_ruby: false
+install_3scale: false
+
+
+# Software Versions:
+# Specified in ocp-client-vm role defaults. Can be overridden with specific
+# versions if necessary
+ocp_clientvm_oc_version: "{{ osrelease }}"
+# ocp_clientvm_oc_version: '4.3.0'
+# ocp_clientvm_oc_version: '3.11.154'
+
+# OpenShift 4 specific software (only installed when Client Version >= 4.0)
+# ocp_clientvm_odo_version: 'v1.1.0'
+# ocp_clientvm_helm_version: 'v3.0.0'
+# ocp_clientvm_tkn_version: '0.6.0'
+
+# Supplemental Software
+# Specified in ocp-client-vm role defaults. Can be overridden.
+# ocp_clientvm_maven_version: '3.6.3'
+# ocp_clientvm_istioctl_version: '1.1.17'
+
+# Docker version and settings
+docker_version: '1.13.1'
+docker_size: '200'
+
+# The next flag is 1 by default. If it is set to more than 1 then instead of creating
+# clientvm.guid.baseurl it will create clientvm{1..num_users}.guid.baseurl
+num_users: 1
+
+install_bastion: true
+install_common: true
+install_opentlc_integration: true
+install_ipa_client: false
+
+# Install a user id 'student'. If install_student_user=true then a global variable
+# student_password=password needs to be provided with the password to set for the user student
+install_student_user: false
+
+# FTL is used for grading and solving. It will pull in the external ftl-injector role.
+# This might be enabled when we have solvers to run or graders for ILT
+# Define the FTL Injector Tag
+ftl_injector_tag: "v0.17.0"
+ftl_use_python3: true
+
+### If you want a Key Pair name created and injected into the hosts,
+# set `set_env_authorized_key` to true and set the keyname in `env_authorized_key`
+# you can use the key used to create the environment or use your own self generated key
+# if you set "use_own_key" to false your PRIVATE key will be copied to the bastion. (This is {{key_name}})
+
+use_own_key: true
+env_authorized_key: "{{guid}}key"
+key_name: "default_key_name"
+ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
+set_env_authorized_key: true
+
+# Is this running from Red Hat Ansible Tower
+tower_run: false
+
+### Common Host settings
+repo_method: file # Other Options are: file, satellite and rhn
+
+# Do you want to run a full yum update
+update_packages: true
+
+#If using repo_method: satellite, you must set these values as well.
+# satellite_url: satellite.example.com
+# satellite_org: Sat_org_name
+# satellite_activationkey: "rhel7basic"
+
+# Packages to install
+common_packages:
+- unzip
+- bash-completion
+- tmux
+- bind-utils
+- wget
+- nano
+- ansible
+- git
+- vim-enhanced
+- httpd-tools
+- openldap-clients
+- podman
+- tree
+- qpid-dispatch-router 
+- qpid-dispatch-tools 
+- qpid-dispatch-console
+
+# Which RHEL Repos to enable.
+rhel_repos:
+- rhel-7-server-rpms
+- rhel-7-server-extras-rpms
+- rhel-7-server-ansible-2.8-rpms
+
+cloud_tags:
+  env_type: "{{ env_type }}"
+  guid: "{{ guid }}"
+  course_name: "{{ course_name | d('unknown') }}"
+  platform: "{{ platform | d('unknown') }}"
diff --git a/ansible/configs/amq-messaging-foundations/default_vars_ec2.yml b/ansible/configs/amq-messaging-foundations/default_vars_ec2.yml
new file mode 100644
index 0000000..c82de94
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/default_vars_ec2.yml
@@ -0,0 +1,91 @@
+### AWS EC2 Environment settings
+
+### Route 53 Zone ID (AWS)
+# This is the Route53 HostedZoneId where you will create your Public DNS entries
+# This only needs to be defined if your CF template uses route53
+HostedZoneId: Z3IHLWJZOU9SRT
+
+# The region to be used, if not specified by -e in the command line
+aws_region: us-east-1
+
+# The key that is used to
+key_name: "default_key_name"
+
+## Networking (AWS)
+subdomain_base_short: "{{ guid }}"
+subdomain_base_suffix: ".example.opentlc.com"
+subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}"
+
+## Environment Sizing
+
+clientvm_instance_type: "t2.large"
+clientvm_instance_image: RHELAMI
+
+###### VARIABLES YOU SHOULD ***NOT*** CONFIGURE FOR YOUR DEPLOYEMNT
+
+docker_device: /dev/xvdb
+
+###### You can, but you usually wouldn't need to.
+ansible_user: ec2-user
+remote_user: ec2-user
+
+### CLOUDFORMATIONS vars
+
+zone_internal_dns: "{{guid}}.internal."
+chomped_zone_internal_dns: "{{guid}}.internal"
+
+bastion_public_dns: "bastion.{{subdomain_base}}."
+bastion_public_dns_chomped: "bastion.{{subdomain_base}}"
+vpcid_name_tag: "{{subdomain_base}}"
+
+az_1_name: "{{ aws_region }}a"
+az_2_name: "{{ aws_region }}b"
+
+subnet_private_1_cidr_block: "192.168.2.0/24"
+subnet_private_1_az: "{{ az_2_name }}"
+subnet_private_1_name_tag: "{{subdomain_base}}-private"
+
+subnet_private_2_cidr_block: "192.168.1.0/24"
+subnet_private_2_az: "{{ az_1_name }}"
+subnet_private_2_name_tag: "{{subdomain_base}}-private"
+
+subnet_public_1_cidr_block: "192.168.10.0/24"
+subnet_public_1_az: "{{ az_1_name }}"
+subnet_public_1_name_tag: "{{subdomain_base}}-public"
+
+subnet_public_2_cidr_block: "192.168.20.0/24"
+subnet_public_2_az: "{{ az_2_name }}"
+subnet_public_2_name_tag: "{{subdomain_base}}-public"
+
+dopt_domain_name: "{{ aws_region }}.compute.internal"
+
+rtb_public_name_tag: "{{subdomain_base}}-public"
+rtb_private_name_tag: "{{subdomain_base}}-private"
+
+cf_template_description: "{{ env_type }}-{{ guid }} template "
+
+cloudformation_retries: 2
+ocp_report: false
+
+rootfs_size_clientvm: 200
+
+instances:
+- name: "clientvm"
+  count: "{{ num_users }}"
+  public_dns: true
+  floating_ip: true
+  image_id: "{{ clientvm_instance_image }}"
+  flavor:
+    ec2: "{{ clientvm_instance_type }}"
+  tags:
+  - key: "AnsibleGroup"
+    value: "bastions"
+  - key: "ostype"
+    value: "linux"
+  rootfs_size: "{{ rootfs_size_clientvm }}"
+  volumes:
+  - device_name: "{{docker_device}}"
+    volume_size: "{{ docker_size }}"
+    volume_type: gp2
+  security_groups:
+  - BastionSG
diff --git a/ansible/configs/amq-messaging-foundations/default_vars_osp.yml b/ansible/configs/amq-messaging-foundations/default_vars_osp.yml
new file mode 100644
index 0000000..06d7a24
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/default_vars_osp.yml
@@ -0,0 +1,129 @@
+# The type of cloud provider this will be deployed to
+cloud_provider: osp
+
+# Authenication credentials for OpenStack in order to create the things.
+# These should be included with your secrets, but are listed here for reference
+# osp_auth_url:
+# osp_auth_username:
+# osp_auth_password:
+# osp_auth_cloud:
+# osp_auth_project_domain: #usually set to "default"
+# osp_auth_user_domain: #usually set to "default"
+
+# This is an account that must exist in OpenStack.
+# It is used to create projects, access, Heat templates
+admin_user: opentlc-mgr
+
+# This is the user that Ansible will use to connect to the nodes it is
+# configuring from the admin/control host
+ansible_user: cloud-user
+remote_user: cloud-user
+
+# The domain that you want to add DNS entries to
+osp_cluster_dns_zone: blue.osp.opentlc.com
+
+# The dynamic DNS server you will add entries to.
+# NOTE: This is only applicable when {{ use_dynamic_dns}} is true
+osp_cluster_dns_server: ddns01.opentlc.com
+
+# Whether to wait for an ack from the DNS servers before continuing
+wait_for_dns: true
+
+# Authenticaion for DDNS, Must be set in secrets
+# ddns_key_name:
+# ddns_secret_name:
+
+# Set this to true if you want a FIPs provisioned for an OpenShift on OpenStack install
+# This will provision an API and Ingress FIP
+openshift_fip_provision: True
+
+# This requires DDNS or other DNS solution configured
+# If enabled, it will add DNS entries for the API and Ingress FIPs
+openshift_fip_dns: True
+
+# The external network in OpenStack where the floating IPs (FIPs) come from
+provider_network: external
+
+# Instance type & image
+clientvm_instance_type: "2c2g30d"
+clientvm_instance_image: rhel-server-7.7-update-2
+rootfs_size_clientvm: 200
+
+# Docker Root Device
+docker_device: /dev/vdb
+
+# See cloud_providers/osp_default_vars.yml
+# See roles/infra-osp-project-create/defaults/main.yml
+
+# Set this to true if you need to create a new project in OpenStack
+# This should almost always be set to true for OpenShift installations
+# If it is set to false, the {{ osp_project_name }} must already exist and
+# should be able to run whatever you are deploying
+osp_project_create: true
+
+# The name of the project that will be created in OpenStack for the user
+osp_project_name: "{{ guid }}-project"
+
+# A list of the private networks and subnets to create in the project
+# You can create as many as you want, but at least one is required.
+# Use the name of the networks where appropriate in the instance list
+networks:
+- name: ocp
+  shared: "false"
+  subnet_cidr: 192.168.47.0/24
+  gateway_ip: 192.168.47.1
+  allocation_start: 192.168.47.10
+  allocation_end: 192.168.47.254
+  dns_nameservers: []
+  create_router: true
+
+# If osp_project_create is set to yes, define those:
+# Quotas to set for new project that is created
+quota_num_instances: 1
+quota_num_cores: 4
+quota_memory: 4096 # in MB
+quota_num_volumes: 3
+quota_volumes_gigs: 500
+quota_fip: 1
+quota_sg: 5
+
+# Instances to be provisioned in new project
+# Provide these as a list.
+# Each instance type can have any number of replicas deployed with the same
+# configuration.
+# Metadata in OpenStack is equivelent to tags in AWS
+# These instances will be created with Cinder persistent volumes
+instances:
+- name: clientvm
+  count: 1
+  unique: yes
+  alt_name: bastion
+  image_id: "{{ clientvm_instance_image }}"
+  floating_ip: yes
+  flavor:
+    osp: "{{ clientvm_instance_type }}"
+  metadata:
+  - AnsibleGroup: "bastions,clientvms"
+  - function: clientvm
+  - user: "{{ student_name }}"
+  - project: "{{ project_tag }}"
+  - ostype: linux
+  - Purpose: "{{ purpose }}"
+  rootfs_size: "{{ rootfs_size_clientvm }}"
+  network: ocp
+  security_groups:
+  - clientvm_sg
+
+# Security groups and associated rules. This will be provided
+#when the Heat template is generated separate groups and rules
+security_groups:
+- name: clientvm_sg
+  description: Client VM security group allows basic icmp and SSH ingress and egress to *
+  rules:
+  - protocol: icmp
+    direction: ingress
+  - protocol: tcp
+    direction: ingress
+    port_range_min: 22
+    port_range_max: 22
+    remote_ip_prefix: 0.0.0.0/0
diff --git a/ansible/configs/amq-messaging-foundations/destroy_env.yml b/ansible/configs/amq-messaging-foundations/destroy_env.yml
new file mode 100644
index 0000000..9945328
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/destroy_env.yml
@@ -0,0 +1,2 @@
+---
+- import_playbook: "../../cloud_providers/{{ cloud_provider }}_destroy_env.yml"
diff --git a/ansible/configs/amq-messaging-foundations/files/cloud_providers/ec2_cloud_template.j2 b/ansible/configs/amq-messaging-foundations/files/cloud_providers/ec2_cloud_template.j2
new file mode 100644
index 0000000..9cb6738
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/files/cloud_providers/ec2_cloud_template.j2
@@ -0,0 +1,443 @@
+#jinja2: lstrip_blocks: True
+---
+AWSTemplateFormatVersion: "2010-09-09"
+Mappings:
+  RegionMapping:
+    us-east-1:
+      {% if osrelease is version_compare('3.9.25', '>=') %}
+      RHELAMI: ami-6871a115
+      {% else %}
+      RHELAMI: ami-c998b6b2
+      {% endif %}
+    us-east-2:
+      {% if osrelease is version_compare('3.9.25', '>=') %}
+      RHELAMI: ami-03291866
+      {% else %}
+      RHELAMI: ami-cfdafaaa
+      {% endif %}
+    us-west-1:
+      {% if osrelease is version_compare('3.9.25', '>=') %}
+      RHELAMI: ami-18726478
+      {% else %}
+      RHELAMI: ami-66eec506
+      {% endif %}
+    us-west-2:
+      {% if osrelease is version_compare('3.9.25', '>=') %}
+      RHELAMI: ami-223f945a
+      {% else %}
+      RHELAMI: ami-9fa343e7
+      {% endif %}
+    eu-west-1:
+      {% if osrelease is version_compare('3.9.25', '>=') %}
+      RHELAMI: ami-7c491f05
+      {% else %}
+      RHELAMI: ami-bb9a6bc2
+      {% endif %}
+    eu-central-1:
+      {% if osrelease is version_compare('3.9.25', '>=') %}
+      RHELAMI: ami-c86c3f23
+      {% else %}
+      RHELAMI: ami-d74be5b8
+      {% endif %}
+    ap-northeast-1:
+      {% if osrelease is version_compare('3.9.25', '>=') %}
+      RHELAMI: ami-6b0d5f0d
+      {% else %}
+      RHELAMI: ami-30ef0556
+      {% endif %}
+    ap-northeast-2:
+      {% if osrelease is version_compare('3.9.25', '>=') %}
+      RHELAMI: ami-3eee4150
+      {% else %}
+      RHELAMI: ami-0f5a8361
+      {% endif %}
+    ap-southeast-1:
+      {% if osrelease is version_compare('3.9.25', '>=') %}
+      RHELAMI: ami-76144b0a
+      {% else %}
+      RHELAMI: ami-10bb2373
+      {% endif %}
+    ap-southeast-2:
+      {% if osrelease is version_compare('3.9.25', '>=') %}
+      RHELAMI: ami-67589505
+      {% else %}
+      RHELAMI: ami-ccecf5af
+      {% endif %}
+    ap-south-1:
+      {% if osrelease is version_compare('3.9.25', '>=') %}
+      RHELAMI: ami-5b673c34
+      {% else %}
+      RHELAMI: ami-cdbdd7a2
+      {% endif %}
+    sa-east-1:
+      {% if osrelease is version_compare('3.9.25', '>=') %}
+      RHELAMI: ami-b0b7e3dc
+      {% else %}
+      RHELAMI: ami-a789ffcb
+      {% endif %}
+  DNSMapping:
+    us-east-1:
+      domain: "us-east-1.compute.internal"
+    us-west-1:
+      domain: "us-west-1.compute.internal"
+    us-west-2:
+      domain: "us-west-2.compute.internal"
+    eu-west-1:
+      domain: "eu-west-1.compute.internal"
+    eu-central-1:
+      domain: "eu-central-1.compute.internal"
+    ap-northeast-1:
+      domain: "ap-northeast-1.compute.internal"
+    ap-northeast-2:
+      domain: "ap-northeast-2.compute.internal"
+    ap-southeast-1:
+      domain: "ap-southeast-1.compute.internal"
+    ap-southeast-2:
+      domain: "ap-southeast-2.compute.internal"
+    sa-east-1:
+      domain: "sa-east-1.compute.internal"
+    ap-south-1:
+      domain: "ap-south-1.compute.internal"
+
+Resources:
+  Vpc:
+    Type: "AWS::EC2::VPC"
+    Properties:
+      CidrBlock: "192.168.0.0/16"
+      EnableDnsSupport: true
+      EnableDnsHostnames: true
+      Tags:
+        - Key: Name
+          Value: "{{vpcid_name_tag}}"
+        - Key: Hostlication
+          Value:
+            Ref: "AWS::StackId"
+
+  VpcInternetGateway:
+    Type: "AWS::EC2::InternetGateway"
+
+  VpcGA:
+    Type: "AWS::EC2::VPCGatewayAttachment"
+    Properties:
+      InternetGatewayId:
+        Ref: VpcInternetGateway
+      VpcId:
+        Ref: Vpc
+
+  VpcRouteTable:
+    Type: "AWS::EC2::RouteTable"
+    Properties:
+      VpcId:
+        Ref: Vpc
+
+  VPCRouteInternetGateway:
+    DependsOn: VpcGA
+    Type: "AWS::EC2::Route"
+    Properties:
+      GatewayId:
+        Ref: VpcInternetGateway
+      DestinationCidrBlock: "0.0.0.0/0"
+      RouteTableId:
+        Ref: VpcRouteTable
+
+  PublicSubnet:
+    Type: "AWS::EC2::Subnet"
+    DependsOn:
+      - Vpc
+    Properties:
+      CidrBlock: "192.168.0.0/24"
+      Tags:
+        - Key: Name
+          Value: "{{project_tag}}"
+        - Key: Hostlication
+          Value:
+            Ref: "AWS::StackId"
+      MapPublicIpOnLaunch: true
+      VpcId:
+        Ref: Vpc
+
+  PublicSubnetRTA:
+    Type: "AWS::EC2::SubnetRouteTableAssociation"
+    Properties:
+      RouteTableId:
+        Ref: VpcRouteTable
+      SubnetId:
+        Ref: PublicSubnet
+
+  HostSG:
+    Type: "AWS::EC2::SecurityGroup"
+    Properties:
+      GroupDescription: Host
+      VpcId:
+        Ref: Vpc
+      Tags:
+        - Key: Name
+          Value: host_sg
+
+  HostUDPPorts:
+    Type: "AWS::EC2::SecurityGroupIngress"
+    Properties:
+      GroupId:
+        Fn::GetAtt:
+          - HostSG
+          - GroupId
+      IpProtocol: udp
+      FromPort: 60000
+      ToPort: 60003
+      CidrIp: "0.0.0.0/0"
+
+  HostTCPPortsSSH:
+    Type: "AWS::EC2::SecurityGroupIngress"
+    Properties:
+      GroupId:
+        Fn::GetAtt:
+          - HostSG
+          - GroupId
+      IpProtocol: tcp
+      FromPort: 22
+      ToPort: 22
+      CidrIp: "0.0.0.0/0"
+  HostTCPPortsHTTP:
+    Type: "AWS::EC2::SecurityGroupIngress"
+    Properties:
+      GroupId:
+        Fn::GetAtt:
+          - HostSG
+          - GroupId
+      IpProtocol: tcp
+      FromPort: 80
+      ToPort: 80
+      CidrIp: "0.0.0.0/0"
+  HostTCPPortsHTTPS:
+    Type: "AWS::EC2::SecurityGroupIngress"
+    Properties:
+      GroupId:
+        Fn::GetAtt:
+          - HostSG
+          - GroupId
+      IpProtocol: tcp
+      FromPort: 443
+      ToPort: 443
+      CidrIp: "0.0.0.0/0"
+
+  zoneinternalidns:
+    Type: "AWS::Route53::HostedZone"
+    Properties:
+      Name: "{{ zone_internal_dns }}"
+      VPCs:
+        - VPCId:
+            Ref: Vpc
+          VPCRegion:
+            Ref: "AWS::Region"
+      HostedZoneConfig:
+        Comment: "Created By ansible agnostic deployer"
+
+{% if num_users|d(1)|int > 1 %}
+{% for c in range(1,num_users|int+1) %}
+
+  clientvm{{loop.index}}:
+    Type: "AWS::EC2::Instance"
+    Properties:
+{% if custom_image is defined %}
+      ImageId: {{ custom_image.image_id }}
+{% else %}
+      ImageId:
+        Fn::FindInMap:
+        - RegionMapping
+        - Ref: AWS::Region
+{% if 'image_id' in instances[0] %}
+        - {{ instances[0].image_id }}
+{% else %}
+        - 'RHELAMI'
+{% endif %}
+{% endif %}
+      InstanceType: "{{instances[0]['flavor'][cloud_provider]}}"
+      KeyName: "{{instances[0]['key_name'] | default(key_name)}}"
+      SecurityGroupIds:
+        - "Fn::GetAtt":
+          - HostSG
+          - GroupId
+      SubnetId:
+        Ref: PublicSubnet
+      Tags:
+        - Key: Name
+          Value: clientvm{{loop.index}}
+        - Key: internaldns
+          Value: clientvm{{loop.index}}.{{chomped_zone_internal_dns}}
+        - Key: "owner"
+          Value: "{{ email | default('unknownuser') }}"
+        - Key: "Project"
+          Value: "{{project_tag}}"
+        - Key: "{{project_tag}}"
+          Value: "{{ instances[0]['name'] }}"
+{% for tag in instances[0]['tags'] %}
+        - Key: {{tag['key']}}
+          Value: {{tag['value']}}
+{% endfor %}
+      BlockDeviceMappings:
+        - DeviceName: "/dev/sda1"
+          Ebs:
+            VolumeSize: {{ instances[0]['rootfs_size'] | default('50') }}
+{% for vol in instances[0]['volumes']|default([]) %}
+        - DeviceName: "{{ vol['device_name'] }}"
+          Ebs:
+            VolumeType: "{{ vol['volume_type'] | d('gp2') }}"
+            VolumeSize: "{{ vol['volume_size'] | d('20') }}"
+{% endfor %}
+
+  clientvm{{loop.index}}InternalDNS:
+    Type: "AWS::Route53::RecordSetGroup"
+    Properties:
+      HostedZoneId:
+        Ref: zoneinternalidns
+      RecordSets:
+      - Name: "clientvm{{loop.index}}.{{zone_internal_dns}}"
+        Type: A
+        TTL: 10
+        ResourceRecords:
+          - "Fn::GetAtt":
+            - clientvm{{loop.index}}
+            - PrivateIp
+      - Name: "bastion{{loop.index}}.{{zone_internal_dns}}"
+        Type: A
+        TTL: 10
+        ResourceRecords:
+          - "Fn::GetAtt":
+            - clientvm{{loop.index}}
+            - PrivateIp
+
+  clientvm{{loop.index}}EIP:
+    Type: "AWS::EC2::EIP"
+    DependsOn:
+    - VpcGA
+    Properties:
+      InstanceId:
+        Ref: clientvm{{loop.index}}
+
+  clientvm{{loop.index}}PublicDNS:
+    Type: "AWS::Route53::RecordSetGroup"
+    DependsOn:
+      - clientvm{{loop.index}}EIP
+    Properties:
+      HostedZoneId: {{HostedZoneId}}
+      RecordSets:
+          - Name: "clientvm{{loop.index}}.{{subdomain_base}}."
+            Type: A
+            TTL: 10
+            ResourceRecords:
+            - "Fn::GetAtt":
+              - clientvm{{loop.index}}
+              - PublicIp
+          - Name: "bastion{{loop.index}}.{{subdomain_base}}."
+            Type: A
+            TTL: 10
+            ResourceRecords:
+            - "Fn::GetAtt":
+              - clientvm{{loop.index}}
+              - PublicIp
+{% endfor %}
+
+{% else %}
+  clientvm:
+    Type: "AWS::EC2::Instance"
+    Properties:
+{% if custom_image is defined %}
+      ImageId: {{ custom_image.image_id }}
+{% else %}
+      ImageId:
+        Fn::FindInMap:
+        - RegionMapping
+        - Ref: AWS::Region
+        - 'RHELAMI'
+{% endif %}
+      InstanceType: "{{instances[0]['flavor'][cloud_provider]}}"
+      KeyName: "{{instances[0]['key_name'] | default(key_name)}}"
+      SecurityGroupIds:
+        - "Fn::GetAtt":
+          - HostSG
+          - GroupId
+      SubnetId:
+        Ref: PublicSubnet
+      Tags:
+        - Key: Name
+          Value: clientvm
+        - Key: internaldns
+          Value: clientvm.{{chomped_zone_internal_dns}}
+        - Key: "owner"
+          Value: "{{ email | default('unknownuser') }}"
+        - Key: "Project"
+          Value: "{{project_tag}}"
+        - Key: "{{project_tag}}"
+          Value: "{{ instances[0]['name'] }}"
+{% for tag in instances[0]['tags'] %}
+        - Key: {{tag['key']}}
+          Value: {{tag['value']}}
+{% endfor %}
+      BlockDeviceMappings:
+        - DeviceName: "/dev/sda1"
+          Ebs:
+            VolumeSize: {{ instances[0]['rootfs_size'] | default('50') }}
+{% for vol in instances[0]['volumes']|default([]) %}
+        - DeviceName: "{{ vol['device_name'] }}"
+          Ebs:
+            VolumeType: "{{ vol['volume_type'] | d('gp2') }}"
+            VolumeSize: "{{ vol['volume_size'] | d('20') }}"
+{% endfor %}
+
+  clientvmInternalDNS:
+    Type: "AWS::Route53::RecordSetGroup"
+    Properties:
+      HostedZoneId:
+        Ref: zoneinternalidns
+      RecordSets:
+      - Name: "clientvm.{{zone_internal_dns}}"
+        Type: A
+        TTL: 10
+        ResourceRecords:
+          - "Fn::GetAtt":
+            - clientvm
+            - PrivateIp
+      - Name: "bastion.{{zone_internal_dns}}"
+        Type: A
+        TTL: 10
+        ResourceRecords:
+          - "Fn::GetAtt":
+            - clientvm
+            - PrivateIp
+
+  clientvmEIP:
+    Type: "AWS::EC2::EIP"
+    DependsOn:
+    - VpcGA
+    Properties:
+      InstanceId:
+        Ref: clientvm
+
+  clientvmPublicDNS:
+    Type: "AWS::Route53::RecordSetGroup"
+    DependsOn:
+      - clientvmEIP
+    Properties:
+      HostedZoneId: {{HostedZoneId}}
+      RecordSets:
+          - Name: "clientvm.{{subdomain_base}}."
+            Type: A
+            TTL: 10
+            ResourceRecords:
+            - "Fn::GetAtt":
+              - clientvm
+              - PublicIp
+          - Name: "bastion.{{subdomain_base}}."
+            Type: A
+            TTL: 10
+            ResourceRecords:
+            - "Fn::GetAtt":
+              - clientvm
+              - PublicIp
+{% endif %}
+
+Outputs:
+  Route53internalzoneOutput:
+    Description: The ID of the internal route 53 zone
+    Value:
+      Ref: zoneinternalidns
diff --git a/ansible/configs/amq-messaging-foundations/files/hosts_template.j2 b/ansible/configs/amq-messaging-foundations/files/hosts_template.j2
new file mode 100644
index 0000000..da6ec89
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/files/hosts_template.j2
@@ -0,0 +1,5 @@
+
+###########################################################################
+### ClientVM Hosts
+### Dummy file
+###########################################################################
diff --git a/ansible/configs/amq-messaging-foundations/files/repos_template.j2 b/ansible/configs/amq-messaging-foundations/files/repos_template.j2
new file mode 100644
index 0000000..4fe0111
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/files/repos_template.j2
@@ -0,0 +1,26 @@
+# RHEL Repos
+
+{% if rhel_repos is defined %}
+{% for i in range(0,rhel_repos|length) %}
+[{{ rhel_repos[i] }}]
+name={{ rhel_repos[i] }}
+baseurl={{own_repo_path}}/{{ rhel_repos[i] }}
+enabled=1
+gpgcheck=0
+
+{% endfor %}
+{% else %}
+[rhel-7-server-rpms]
+name=Red Hat Enterprise Linux 7
+baseurl={{own_repo_path}}/rhel-7-server-rpms
+enabled=1
+gpgcheck=0
+{% endif %}
+
+[epel]
+name=Extra Packages for Enterprise Linux 7 - $basearch
+baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch
+mirrorlist=http://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
+failovermethod=priority
+enabled=1
+gpgcheck=0
\ No newline at end of file
diff --git a/ansible/configs/amq-messaging-foundations/post_infra.yml b/ansible/configs/amq-messaging-foundations/post_infra.yml
new file mode 100644
index 0000000..6da080f
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/post_infra.yml
@@ -0,0 +1,24 @@
+- name: Step 002 Post Infrastructure
+  hosts: localhost
+  connection: local
+  become: false
+  tags:
+  - step002
+  - post_infrastructure
+  tasks:
+  - name: Job Template to launch a Job Template with update on launch inventory set
+    when: tower_run == 'true'
+    uri:
+      url: "https://{{ ansible_tower_ip }}/api/v1/job_templates/{{ job_template_id }}/launch/"
+      method: POST
+      user: "{{tower_admin}}"
+      password: "{{tower_admin_password}}"
+      body:
+        extra_vars:
+          guid: "{{guid}}"
+          ipa_host_password: "{{ipa_host_password}}"
+
+      body_format: json
+      validate_certs: False
+      HEADER_Content-Type: "application/json"
+      status_code: 200, 201
diff --git a/ansible/configs/amq-messaging-foundations/post_software.yml b/ansible/configs/amq-messaging-foundations/post_software.yml
new file mode 100644
index 0000000..9426123
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/post_software.yml
@@ -0,0 +1,19 @@
+---
+- name: Step 00xxxxx post software
+  hosts: bastions
+  become: yes
+  gather_facts: False
+  tasks:
+  - debug:
+      msg: "Post-Software Steps starting"
+
+- name: PostSoftware flight-check
+  hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+  - post_flight_check
+  tasks:
+  - debug:
+      msg: "Post-Software checks completed successfully"
diff --git a/ansible/configs/amq-messaging-foundations/pre_infra.yml b/ansible/configs/amq-messaging-foundations/pre_infra.yml
new file mode 100644
index 0000000..5539e0d
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/pre_infra.yml
@@ -0,0 +1,12 @@
+---
+- name: Step 000 Pre Infrastructure
+  hosts: localhost
+  connection: local
+  become: false
+  gather_facts: false
+  tags:
+  - step001
+  - pre_infrastructure
+  tasks:
+  - debug:
+      msg: "Step 000 Pre Infrastructure - Dummy action"
diff --git a/ansible/configs/amq-messaging-foundations/pre_software.yml b/ansible/configs/amq-messaging-foundations/pre_software.yml
new file mode 100644
index 0000000..289a996
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/pre_software.yml
@@ -0,0 +1,89 @@
+---
+- name: Step 003 - Create env key
+  hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+  - step003
+  - generate_env_keys
+  tasks:
+  - name: Generate SSH keys
+    shell: ssh-keygen -b 2048 -t rsa -f "{{output_dir}}/{{env_authorized_key}}" -q -N ""
+    args:
+      creates: "{{output_dir}}/{{env_authorized_key}}"
+    when: set_env_authorized_key | bool
+
+  - name: fix permission
+    file:
+      path: "{{output_dir}}/{{env_authorized_key}}"
+      mode: 0400
+    when: set_env_authorized_key | bool
+
+  - name: Generate SSH pub key
+    shell: ssh-keygen -y -f "{{output_dir}}//{{env_authorized_key}}" > "{{output_dir}}/{{env_authorized_key}}.pub"
+    args:
+      creates: "{{output_dir}}/{{env_authorized_key}}.pub"
+    when: set_env_authorized_key | bool
+
+# Cloudformation template or equivalent should tag all hosts with Project:{{ env_type }}-{{ guid }}
+- name: Configure all hosts with Repositories, Common Files and Set environment key
+  hosts:
+  - all:!windows
+  become: true
+  gather_facts: False
+  tags:
+  - step004
+  - common_tasks
+  roles:
+  - { role: "set-repositories",       when: 'repo_method is defined' }
+  - { role: "common",                 when: 'install_common | bool' }
+  - { role: "set_env_authorized_key", when: 'set_env_authorized_key | bool' }
+
+- name: Configuring Bastion Hosts
+  hosts: bastions
+  become: true
+  tags:
+  - step004
+  - bastion_tasks
+  roles:
+  - { role: "bastion-lite",         when: 'install_bastion | bool' }
+  - { role: "bastion-student-user", when: 'install_student_user | bool' }
+  - { role: "bastion-opentlc-ipa",  when: 'install_ipa_client | bool' }
+
+- name: Configuring Ruby on Bastion Hosts
+  hosts: bastions
+  become: true
+  gather_facts: False
+  tasks:
+  - when: (install_ruby | bool) or (install_3scale | bool)
+    include_role:
+      name: ruby
+    vars:
+      rvm1_rubies: ['ruby-2.6.3']
+      rvm1_install_flags: '--auto-dotfiles'  # Remove --user-install from defaults
+      rvm1_install_path: /usr/local/rvm         # Set to system location
+      rvm1_user: root                            # Need root account to access system location
+  tags:
+  - step004
+  - ruby_tasks
+
+- name: Configuring 3scale toolbox 
+  hosts: bastions
+  become: true
+  tags:
+  - step004
+  - 3scale_toolbox_tasks
+  roles:
+  - { role: "bastion-3scale",         when: 'install_3scale | bool' }
+
+- name: PreSoftware flight-check
+  hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+  - flight_check
+  tasks:
+  - debug:
+      msg: "Pre-Software checks completed successfully"
diff --git a/ansible/configs/amq-messaging-foundations/requirements-ruby.yml b/ansible/configs/amq-messaging-foundations/requirements-ruby.yml
new file mode 100644
index 0000000..9763b05
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/requirements-ruby.yml
@@ -0,0 +1,6 @@
+---
+# External role to setup RVM and Ruby 
+
+- src: rvm.ruby
+  name: ruby 
+  version: v2.1.2
diff --git a/ansible/configs/amq-messaging-foundations/requirements.yml b/ansible/configs/amq-messaging-foundations/requirements.yml
new file mode 100644
index 0000000..c95cee4
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/requirements.yml
@@ -0,0 +1,6 @@
+---
+# External role to setup grader host virtualenv and FTL grading infra
+
+- src: https://github.com/redhat-gpte-devopsautomation/ftl-injector
+  name: ftl-injector
+  version: v0.17.0
diff --git a/ansible/configs/amq-messaging-foundations/sample_vars b/ansible/configs/amq-messaging-foundations/sample_vars
new file mode 100644
index 0000000..51668c9
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/sample_vars
@@ -0,0 +1,14 @@
+env_type: amq-messaging-foundations     # Name of config to deploy
+output_dir: /opt/workdir                # Writable working scratch directory
+email: name@example.com                 # User info for notifications
+
+guid: test01                            # Unique string used in FQDN
+subdomain_base_suffix: .example.opentlc.com      # Your domain used in FQDN
+
+# Cloud specfic settings - example given here for AWS
+
+cloud_provider: ec2                     # Which AgnosticD Cloud Provider to use
+aws_region: us-west-1                   # AWS Region to deploy in
+HostedZoneId: Z3IHLWJZOU9SRT            # You will need to change this
+key_name: ocpkey                        # Keyname must exist in AWS
+software_to_deploy: none
diff --git a/ansible/configs/amq-messaging-foundations/software.yml b/ansible/configs/amq-messaging-foundations/software.yml
new file mode 100644
index 0000000..5922bcd
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/software.yml
@@ -0,0 +1,28 @@
+---
+- name: Step 00xxxxx software
+  hosts: localhost
+  gather_facts: False
+  become: false
+  tasks:
+    - debug:
+        msg: "Software tasks started"
+
+- name: Set up ClientVM
+  hosts: bastions
+  gather_facts: false
+  become: true
+  tasks:
+  - name: Set up Client VM for AMQ messaging foundations
+    include_role:
+      name: "amq-client-vm"
+
+- name: Software flight-check
+  hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - post_flight_check
+  tasks:
+    - debug:
+        msg: "Software checks completed successfully"
diff --git a/ansible/configs/amq-messaging-foundations/software.yml~ b/ansible/configs/amq-messaging-foundations/software.yml~
new file mode 100644
index 0000000..7002496
--- /dev/null
+++ b/ansible/configs/amq-messaging-foundations/software.yml~
@@ -0,0 +1,28 @@
+---
+- name: Step 00xxxxx software
+  hosts: localhost
+  gather_facts: False
+  become: false
+  tasks:
+    - debug:
+        msg: "Software tasks started"
+
+- name: Set up ClientVM
+  hosts: bastions
+  gather_facts: false
+  become: true
+  tasks:
+  - name: Set up Client VM for OCP 3
+    include_role:
+      name: "ocp-client-vm"
+
+- name: Software flight-check
+  hosts: localhost
+  connection: local
+  gather_facts: false
+  become: false
+  tags:
+    - post_flight_check
+  tasks:
+    - debug:
+        msg: "Software checks completed successfully"
diff --git a/ansible/roles/amq-client-vm/README.md b/ansible/roles/amq-client-vm/README.md
new file mode 100644
index 0000000..bc6defd
--- /dev/null
+++ b/ansible/roles/amq-client-vm/README.md
@@ -0,0 +1,56 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+    - hosts: servers
+      roles:
+         - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
+
+
+----
+HOST_GUID=dev39
+TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
+SSH_USERNAME="xxxx"
+SSH_PRIVATE_KEY="xxxx"
+
+WORKLOAD="ocp-client-vm"
+
+# a TARGET_HOST is specified in the command line, without using an inventory file
+ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
+                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
+                 -e"ansible_user=${SSH_USERNAME}" \
+                    -e"ocp_workload=${WORKLOAD}" \
+                    -e"ACTION=create"
+
+----
diff --git a/ansible/roles/amq-client-vm/defaults/main.yml b/ansible/roles/amq-client-vm/defaults/main.yml
new file mode 100644
index 0000000..5a71caf
--- /dev/null
+++ b/ansible/roles/amq-client-vm/defaults/main.yml
@@ -0,0 +1,9 @@
+# Software Version defaults
+
+ocp_clientvm_oc_version: '4.3.1'
+ocp_clientvm_odo_version: 'v1.1.0'
+ocp_clientvm_helm_version: 'v3.0.0'
+ocp_clientvm_tkn_version: '0.7.1'
+ocp_clientvm_kn_version: 'v0.12.0'
+ocp_clientvm_istioctl_version: '1.1.17'
+ocp_clientvm_maven_version: '3.6.3'
diff --git a/ansible/roles/amq-client-vm/files/bash_profile b/ansible/roles/amq-client-vm/files/bash_profile
new file mode 100644
index 0000000..365ba4b
--- /dev/null
+++ b/ansible/roles/amq-client-vm/files/bash_profile
@@ -0,0 +1,11 @@
+# .bash_profile
+
+# Get the aliases and functions
+if [ -f ~/.bashrc ]; then
+        . ~/.bashrc
+fi
+
+# User specific environment and startup programs
+
+export PATH=$PATH:$HOME/bin:/usr/local/bin:/usr/local/maven/bin
+export GUID=`hostname | awk -F. '{print $2}'`
diff --git a/ansible/roles/amq-client-vm/files/bashrc b/ansible/roles/amq-client-vm/files/bashrc
new file mode 100644
index 0000000..0725131
--- /dev/null
+++ b/ansible/roles/amq-client-vm/files/bashrc
@@ -0,0 +1,24 @@
+# .bashrc
+
+# User specific aliases and functions
+
+alias rm='rm -i'
+alias cp='cp -i'
+alias mv='mv -i'
+alias ls='ls -F'
+alias ll='ls -lF'
+alias la='ls -aF'
+alias ge="oc get events --sort-by='{.lastTimestamp}'"
+
+# Source global definitions
+if [ -f /etc/bashrc ]; then
+  . /etc/bashrc
+fi
+
+# Set up Bash Git Prompt
+if [ -f "$HOME/.bash-git-prompt/gitprompt.sh" ]; then
+  GIT_PROMPT_THEME=Evermeet
+  GIT_PROMPT_SHOW_CHANGED_FILES_COUNT=1
+  GIT_PROMPT_ONLY_IN_REPO=0
+  source "$HOME/.bash-git-prompt/gitprompt.sh"
+fi
diff --git a/ansible/roles/amq-client-vm/tasks/main.yml b/ansible/roles/amq-client-vm/tasks/main.yml
new file mode 100644
index 0000000..40696b5
--- /dev/null
+++ b/ansible/roles/amq-client-vm/tasks/main.yml
@@ -0,0 +1,205 @@
+---
+- name: Install Openshift AMQ messaging foundations
+  yum:
+    state: present
+    name:
+    - java-1.8.0-openjdk-devel
+    - java-11-openjdk-devel
+    - docker
+    - python-websockify
+    - qpid-dispatch-router 
+    - qpid-dispatch-tools 
+    - qpid-dispatch-console
+  tags:
+  - install_amq_client_vm_packages
+
+- name: Create docker group
+  become: yes
+  group:
+    name: docker
+    state: present
+
+- name: Create users group
+  become: yes
+  group:
+    name: users
+    state: present
+
+- when:
+  - student_name is defined
+  - student_name != ''
+  name: Add user to docker and users groups
+  become: yes
+  user:
+    name: "{{ student_name }}"
+    append: yes
+    groups: docker,users
+
+- name: Enable and Start Docker
+  service:
+    name: docker
+    state: started
+    enabled: yes
+
+- name: Get the OpenShift CLI for OCP 3
+  become: yes
+  when: ocp_clientvm_oc_version is version_compare('4.0', '<')
+  unarchive:
+    src: "https://mirror.openshift.com/pub/openshift-v3/clients/{{ ocp_clientvm_oc_version }}/linux/oc.tar.gz"
+    remote_src: yes
+    dest: /usr/local/sbin
+    mode: 0775
+    owner: root
+    group: root
+
+- name: Get Command Line Tools for OpenShift 4
+  when: ocp_clientvm_oc_version is version_compare('4.0', '>=')
+  block:
+  - name: Get the OpenShift CLI for OCP 4
+    become: yes
+    unarchive:
+      src: "https://mirror.openshift.com/pub/openshift-v4/clients/ocp/{{ ocp_clientvm_oc_version }}/openshift-client-linux-{{ ocp_clientvm_oc_version }}.tar.gz"
+      remote_src: yes
+      dest: /usr/local/sbin
+      mode: 0775
+      owner: root
+      group: root
+
+- name: Create OpenShift Bash completion file
+  become: yes
+  shell: /usr/local/sbin/oc completion bash >/etc/bash_completion.d/openshift
+
+
+- name: Create /usr/local/maven directory
+  file:
+    path: /usr/local/maven
+    state: directory
+    owner: root
+    group: root
+    mode: 0775
+
+- name: Download and unarchive Maven Distribution
+  unarchive:
+    src: "https://gpte-public.s3.amazonaws.com/apache-maven-{{ ocp_clientvm_maven_version }}-bin.tar.gz"
+    remote_src: yes
+    dest: /usr/local/maven
+    owner: root
+    group: root
+    extra_opts:
+      - --strip=1
+  register: r_geturl
+  retries: 5
+  delay: 20
+  until: r_geturl is succeeded
+  tags:
+  - install_openshift_client_vm_packages
+
+- name: Download jq-linux64
+  get_url:
+    url: https://gpte-public.s3.amazonaws.com/jq-linux64
+    dest: /usr/local/sbin/jq
+    mode: 0775
+  ignore_errors: true
+  tags:
+  - install_openshift_client_vm_packages
+
+- name: Install bash-git-prompt
+  git:
+    repo: https://github.com/magicmonty/bash-git-prompt.git
+    dest: "{{ item }}/.bash-git-prompt"
+    clone: yes
+  loop:
+  - "/root"
+  - "/home/{{remote_user}}"
+  - "/etc/skel"
+  tags:
+  - install_bash_customization
+
+- name: Change ownership of bash-git-prompt
+  file:
+    path: "{{ item.directory }}/.bash-git-prompt"
+    owner: "{{ item.user }}"
+    group: "{{ item.group }}"
+    recurse: yes
+  loop:
+  - { directory: "/root",                 user: "root",            group: "root" }
+  - { directory: "/home/{{remote_user}}", user: "{{remote_user}}", group: "{{remote_user}}" }
+  - { directory: "/etc/skel",             user: "root",            group: "root" }
+  tags:
+  - install_bash_customization
+
+- name: Install .bashrc
+  copy:
+    src: ../files/bashrc
+    dest: "{{ item.directory }}/.bashrc"
+    mode: 0644
+    owner: "{{ item.user }}"
+    group: "{{ item.group }}"
+  loop:
+  - { directory: "/root",                 user: "root",            group: "root" }
+  - { directory: "/home/{{remote_user}}", user: "{{remote_user}}", group: "{{remote_user}}" }
+  - { directory: "/etc/skel",             user: "root",            group: "root" }
+  tags:
+  - install_bash_customization
+
+- name: Install .bash_profile
+  copy:
+    src: ../files/bash_profile
+    dest: "{{ item.directory }}/.bash_profile"
+    mode: 0644
+    owner: "{{ item.user }}"
+    group: "{{ item.group }}"
+  loop:
+  - { directory: "/root",                 user: "root",            group: "root" }
+  - { directory: "/home/{{remote_user}}", user: "{{remote_user}}", group: "{{remote_user}}" }
+  - { directory: "/etc/skel",             user: "root",            group: "root" }
+  tags:
+  - install_bash_customization
+
+- name: Install everything for student user
+  when: install_student_user|d(False)|bool
+  block:
+  - name: Install bash-git-prompt for lab-user
+    git:
+      repo: https://github.com/magicmonty/bash-git-prompt.git
+      dest: "{{ item }}/.bash-git-prompt"
+      clone: yes
+    with_items:
+    - "/home/lab-user"
+    tags:
+    - install_bash_customization
+
+  - name: Change ownership of bash-git-prompt for lab-user
+    file:
+      path: "{{ item.directory }}/.bash-git-prompt"
+      owner: "{{ item.user }}"
+      group: "{{ item.group }}"
+      recurse: yes
+    loop:
+    - { directory: "/home/lab-user",  user: "lab-user",  group: "users" }
+    tags:
+    - install_bash_customization
+
+  - name: Install .bashrc for lab-user
+    copy:
+      src: ../files/bashrc
+      dest: "{{ item.directory }}/.bashrc"
+      mode: 0644
+      owner: "{{ item.user }}"
+      group: "{{ item.group }}"
+    loop:
+    - { directory: "/home/lab-user",  user: "lab-user",  group: "users" }
+    tags:
+    - install_bash_customization
+
+  - name: Install .bash_profile for lab-user
+    copy:
+      src: ../files/bash_profile
+      dest: "{{ item.directory }}/.bash_profile"
+      mode: 0644
+      owner: "{{ item.user }}"
+      group: "{{ item.group }}"
+    loop:
+    - { directory: "/home/lab-user",  user: "lab-user",  group: "users" }
+    tags:
+    - install_bash_customization
diff --git a/ansible/roles/amq-client-vm/tasks/main.yml~ b/ansible/roles/amq-client-vm/tasks/main.yml~
new file mode 100644
index 0000000..486c092
--- /dev/null
+++ b/ansible/roles/amq-client-vm/tasks/main.yml~
@@ -0,0 +1,300 @@
+---
+- name: Install Openshift Client VM packages
+  yum:
+    state: present
+    name:
+    - java-1.8.0-openjdk-devel
+    - java-11-openjdk-devel
+    - docker
+    - podman
+    - skopeo
+    - buildah
+  tags:
+  - install_openshift_client_vm_packages
+
+- name: Create docker group
+  become: yes
+  group:
+    name: docker
+    state: present
+
+- name: Create users group
+  become: yes
+  group:
+    name: users
+    state: present
+
+- when:
+  - student_name is defined
+  - student_name != ''
+  name: Add user to docker and users groups
+  become: yes
+  user:
+    name: "{{ student_name }}"
+    append: yes
+    groups: docker,users
+
+- name: Enable and Start Docker
+  service:
+    name: docker
+    state: started
+    enabled: yes
+
+- name: Get the OpenShift CLI for OCP 3
+  become: yes
+  when: ocp_clientvm_oc_version is version_compare('4.0', '<')
+  unarchive:
+    src: "https://mirror.openshift.com/pub/openshift-v3/clients/{{ ocp_clientvm_oc_version }}/linux/oc.tar.gz"
+    remote_src: yes
+    dest: /usr/local/sbin
+    mode: 0775
+    owner: root
+    group: root
+
+- name: Get Command Line Tools for OpenShift 4
+  when: ocp_clientvm_oc_version is version_compare('4.0', '>=')
+  block:
+  - name: Get the OpenShift CLI for OCP 4
+    become: yes
+    unarchive:
+      src: "https://mirror.openshift.com/pub/openshift-v4/clients/ocp/{{ ocp_clientvm_oc_version }}/openshift-client-linux-{{ ocp_clientvm_oc_version }}.tar.gz"
+      remote_src: yes
+      dest: /usr/local/sbin
+      mode: 0775
+      owner: root
+      group: root
+
+  - name: Download OpenShift Do (odo)
+    get_url:
+      url: "https://mirror.openshift.com/pub/openshift-v4/clients/odo/{{ ocp_clientvm_odo_version }}/odo-linux-amd64"
+      dest: /usr/local/sbin/odo
+      owner: root
+      group: root
+      mode: 0775
+    ignore_errors: true
+    tags:
+    - install_openshift_client_vm_packages
+
+  - name: Download OpenShift Helm 3
+    get_url:
+      url: "https://mirror.openshift.com/pub/openshift-v4/clients/helm/{{ ocp_clientvm_helm_version }}/helm-linux-amd64"
+      dest: /usr/local/sbin/helm
+      owner: root
+      group: root
+      mode: 0775
+    ignore_errors: true
+    tags:
+    - install_openshift_client_vm_packages
+
+  - name: Download Tekton CLI (tkn)
+    unarchive:
+      src: "https://github.com/tektoncd/cli/releases/download/v{{ ocp_clientvm_tkn_version }}/tkn_{{ ocp_clientvm_tkn_version }}_Linux_x86_64.tar.gz"
+      remote_src: yes
+      dest: /usr/local/sbin
+      mode: 0775
+      owner: root
+      group: root
+    ignore_errors: true
+    tags:
+    - install_openshift_client_vm_packages
+
+  - name: Download KNative CLI (kn)
+    get_url:
+      url: "https://github.com/knative/client/releases/download/{{ ocp_clientvm_kn_version }}/kn-linux-amd64"
+      dest: /usr/local/sbin/kn
+      owner: root
+      group: root
+      mode: 0775
+    ignore_errors: true
+    tags:
+    - install_openshift_client_vm_packages
+
+- name: Create OpenShift Bash completion file
+  become: yes
+  shell: /usr/local/sbin/oc completion bash >/etc/bash_completion.d/openshift
+
+- name: Install S2I Executable
+  unarchive:
+    src: https://github.com/openshift/source-to-image/releases/download/v1.2.0/source-to-image-v1.2.0-2a579ecd-linux-amd64.tar.gz
+    remote_src: yes
+    dest: /usr/local/sbin
+    owner: root
+    group: root
+    mode: 0755
+    extra_opts:
+      - --strip=1
+  ignore_errors: true
+
+- name: Get community istioctl utility
+  get_url:
+    url: "https://github.com/istio/istio/releases/download/{{ ocp_clientvm_istioctl_version }}/istio-{{ ocp_clientvm_istioctl_version }}-linux.tar.gz"
+    dest: "/tmp/istio-{{ ocp_clientvm_istioctl_version }}-linux.tar.gz"
+  register: r_geturl
+  retries: 5
+  delay: 20
+  until: r_geturl is succeeded
+  tags:
+  - install_openshift_client_vm_packages
+- name: Unarchive file
+  unarchive:
+    remote_src: yes
+    src: "/tmp/istio-{{ ocp_clientvm_istioctl_version }}-linux.tar.gz"
+    dest: /tmp/
+  tags:
+  - install_openshift_client_vm_packages
+- name: Move istioctl to /usr/local/sbin
+  copy:
+    remote_src: yes
+    src: "/tmp/istio-{{ ocp_clientvm_istioctl_version }}/bin/istioctl"
+    dest: /usr/local/sbin/istioctl
+    group: root
+    owner: root
+    mode: 0755
+  tags:
+  - install_openshift_client_vm_packages
+- name: Cleanup Temp Directory
+  file:
+    dest: "/tmp/istio-{{ ocp_clientvm_istioctl_version }}"
+    state: absent
+  tags:
+  - install_openshift_client_vm_packages
+- name: Cleanup downloaded file
+  file:
+    dest: "/tmp/istio-{{ ocp_clientvm_istioctl_version }}-linux.tar.gz"
+    state: absent
+  tags:
+  - install_openshift_client_vm_packages
+
+- name: Create /usr/local/maven directory
+  file:
+    path: /usr/local/maven
+    state: directory
+    owner: root
+    group: root
+    mode: 0775
+
+- name: Download and unarchive Maven Distribution
+  unarchive:
+    src: "https://gpte-public.s3.amazonaws.com/apache-maven-{{ ocp_clientvm_maven_version }}-bin.tar.gz"
+    remote_src: yes
+    dest: /usr/local/maven
+    owner: root
+    group: root
+    extra_opts:
+      - --strip=1
+  register: r_geturl
+  retries: 5
+  delay: 20
+  until: r_geturl is succeeded
+  tags:
+  - install_openshift_client_vm_packages
+
+- name: Download jq-linux64
+  get_url:
+    url: https://gpte-public.s3.amazonaws.com/jq-linux64
+    dest: /usr/local/sbin/jq
+    mode: 0775
+  ignore_errors: true
+  tags:
+  - install_openshift_client_vm_packages
+
+- name: Install bash-git-prompt
+  git:
+    repo: https://github.com/magicmonty/bash-git-prompt.git
+    dest: "{{ item }}/.bash-git-prompt"
+    clone: yes
+  loop:
+  - "/root"
+  - "/home/{{remote_user}}"
+  - "/etc/skel"
+  tags:
+  - install_bash_customization
+
+- name: Change ownership of bash-git-prompt
+  file:
+    path: "{{ item.directory }}/.bash-git-prompt"
+    owner: "{{ item.user }}"
+    group: "{{ item.group }}"
+    recurse: yes
+  loop:
+  - { directory: "/root",                 user: "root",            group: "root" }
+  - { directory: "/home/{{remote_user}}", user: "{{remote_user}}", group: "{{remote_user}}" }
+  - { directory: "/etc/skel",             user: "root",            group: "root" }
+  tags:
+  - install_bash_customization
+
+- name: Install .bashrc
+  copy:
+    src: ../files/bashrc
+    dest: "{{ item.directory }}/.bashrc"
+    mode: 0644
+    owner: "{{ item.user }}"
+    group: "{{ item.group }}"
+  loop:
+  - { directory: "/root",                 user: "root",            group: "root" }
+  - { directory: "/home/{{remote_user}}", user: "{{remote_user}}", group: "{{remote_user}}" }
+  - { directory: "/etc/skel",             user: "root",            group: "root" }
+  tags:
+  - install_bash_customization
+
+- name: Install .bash_profile
+  copy:
+    src: ../files/bash_profile
+    dest: "{{ item.directory }}/.bash_profile"
+    mode: 0644
+    owner: "{{ item.user }}"
+    group: "{{ item.group }}"
+  loop:
+  - { directory: "/root",                 user: "root",            group: "root" }
+  - { directory: "/home/{{remote_user}}", user: "{{remote_user}}", group: "{{remote_user}}" }
+  - { directory: "/etc/skel",             user: "root",            group: "root" }
+  tags:
+  - install_bash_customization
+
+- name: Install everything for student user
+  when: install_student_user|d(False)|bool
+  block:
+  - name: Install bash-git-prompt for lab-user
+    git:
+      repo: https://github.com/magicmonty/bash-git-prompt.git
+      dest: "{{ item }}/.bash-git-prompt"
+      clone: yes
+    with_items:
+    - "/home/lab-user"
+    tags:
+    - install_bash_customization
+
+  - name: Change ownership of bash-git-prompt for lab-user
+    file:
+      path: "{{ item.directory }}/.bash-git-prompt"
+      owner: "{{ item.user }}"
+      group: "{{ item.group }}"
+      recurse: yes
+    loop:
+    - { directory: "/home/lab-user",  user: "lab-user",  group: "users" }
+    tags:
+    - install_bash_customization
+
+  - name: Install .bashrc for lab-user
+    copy:
+      src: ../files/bashrc
+      dest: "{{ item.directory }}/.bashrc"
+      mode: 0644
+      owner: "{{ item.user }}"
+      group: "{{ item.group }}"
+    loop:
+    - { directory: "/home/lab-user",  user: "lab-user",  group: "users" }
+    tags:
+    - install_bash_customization
+
+  - name: Install .bash_profile for lab-user
+    copy:
+      src: ../files/bash_profile
+      dest: "{{ item.directory }}/.bash_profile"
+      mode: 0644
+      owner: "{{ item.user }}"
+      group: "{{ item.group }}"
+    loop:
+    - { directory: "/home/lab-user",  user: "lab-user",  group: "users" }
+    tags:
+    - install_bash_customization

--
Gitblit v1.9.3