Tok
2018-11-09 04259f14a042c84a1164402d9ac2009c9db6adf5
Merge branch 'development' of https://github.com/sborenst/ansible_agnostic_deployer into development
3 files deleted
29 files added
45 files modified
4351 ■■■■■ changed files
ansible/cloud_providers/ec2_detect_region_tasks.yml 29 ●●●●● patch | view | raw | blame | history
ansible/cloud_providers/ec2_infrastructure_deployment.yml 14 ●●●●● patch | view | raw | blame | history
ansible/configs/ans-tower-lab/files/cloud_providers/ec2_cloud_template.j2 2 ●●● patch | view | raw | blame | history
ansible/configs/ansible-cicd-lab/files/cloud_providers/ec2_cloud_template.j2 2 ●●● patch | view | raw | blame | history
ansible/configs/archive/auth-playground-lab/files/cloud_providers/ec2_cloud_template.j2 2 ●●● patch | view | raw | blame | history
ansible/configs/just-some-nodes-example/env_vars.yml 21 ●●●● patch | view | raw | blame | history
ansible/configs/just-some-nodes-example/files/cloud_providers/ec2_cloud_template.j2 287 ●●●●● patch | view | raw | blame | history
ansible/configs/just-some-nodes-example/files/cloud_providers/ec2_cloud_template_json.j2 792 ●●●●● patch | view | raw | blame | history
ansible/configs/just-some-nodes-example/files/hosts_template.j2 33 ●●●● patch | view | raw | blame | history
ansible/configs/just-some-nodes-example/pre_software.yml 11 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-clientvm/pre_software.yml 2 ●●● patch | view | raw | blame | history
ansible/configs/ocp-gpu-single-node/env_vars.yml 15 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-gpu-single-node/files/cloud_providers/ec2_cloud_template.j2 2 ●●● patch | view | raw | blame | history
ansible/configs/ocp-gpu-single-node/files/hosts_template.3.11.16.j2 230 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-gpu-single-node/files/repos_template.j2 4 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-gpu-single-node/post_software.yml 9 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/env_vars.yml 11 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/cloud_providers/ec2_cloud_template.j2 74 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/hosts_template.3.11.16.j2 406 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/labs_hosts_template.3.11.16.j2 404 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/prometheus_alerts_rules.yml 68 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/repos_tempalte_3.11.j2 53 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/repos_template.j2 23 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/post_infra.yml 4 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/post_software.yml 46 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/pre_software.yml 2 ●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/files/hosts_template.3.11.16.j2 6 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/files/labs_hosts_template.3.11.16.j2 6 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/env_vars.yml 1 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/files/htpasswd.openshift 2 ●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/ocp_workloads.yml 2 ●●●●● patch | view | raw | blame | history
ansible/configs/simple-multi-cloud-example/files/cloud_providers/ec2_cloud_template.j2 2 ●●● patch | view | raw | blame | history
ansible/configs/three-tier-app/files/cloud_providers/ec2_cloud_template.j2 2 ●●● patch | view | raw | blame | history
ansible/configs/three-tier-tower/files/cloud_providers/ec2_cloud_template.j2 2 ●●● patch | view | raw | blame | history
ansible/destroy.yml 23 ●●●●● patch | view | raw | blame | history
ansible/requirements.txt 1 ●●●● patch | view | raw | blame | history
ansible/roles/bastion/tasks/main.yml 6 ●●●●● patch | view | raw | blame | history
ansible/roles/common/tasks/packages.yml 3 ●●●● patch | view | raw | blame | history
ansible/roles/host-ocp-nfs/tasks/packages.yml 3 ●●●● patch | view | raw | blame | history
ansible/roles/host-ocp-node/tasks/packages.yml 3 ●●●● patch | view | raw | blame | history
ansible/roles/host-ocp-provisioner/tasks/main.yml 9 ●●●●● patch | view | raw | blame | history
ansible/roles/infra-common-ssh-config-generate/defaults/main.yml 3 ●●●●● patch | view | raw | blame | history
ansible/roles/infra-common-ssh-config-generate/tasks/main.yml 1 ●●●● patch | view | raw | blame | history
ansible/roles/infra-ec2-template-destroy/tasks/main.yml 3 ●●●● patch | view | raw | blame | history
ansible/roles/infra-ec2-template-generate/README.adoc 42 ●●●●● patch | view | raw | blame | history
ansible/roles/infra-ec2-template-generate/defaults/main.yml 183 ●●●●● patch | view | raw | blame | history
ansible/roles/infra-ec2-template-generate/tasks/locate_template.yml 15 ●●●●● patch | view | raw | blame | history
ansible/roles/infra-ec2-template-generate/tasks/main.yml 18 ●●●● patch | view | raw | blame | history
ansible/roles/infra-ec2-template-generate/templates/cloud_template.j2 273 ●●●●● patch | view | raw | blame | history
ansible/roles/infra-ec2-template-generate/templates/region_mapping.j2 73 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-ignite/defaults/main.yml 6 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-ignite/readme.adoc 5 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-ignite/tasks/workload.yml 3 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-iot-demo/defaults/main.yml 2 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-iot-demo/tasks/main.yml 8 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-iot-demo/tasks/pre_workload.yml 9 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-iot-demo/tasks/workload.yml 23 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/defaults/main.yml 24 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/readme.adoc 74 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/tasks/create-dc-svc-and-route.yml 18 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/tasks/main.yml 20 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/tasks/post_workload.yml 5 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/tasks/pre_workload.yml 32 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/tasks/remove_workload.yml 29 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/tasks/workload.yml 79 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/templates/application.yaml 11 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/templates/billboard-dc.json 141 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/templates/billboard-svc.json 34 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/templates/current-line-updater-dc.json 91 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/templates/current-line-updater-svc.json 35 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/templates/event-generator-dc.json 141 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/templates/event-generator-svc.json 35 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/templates/event-store-dc.json 81 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/templates/event-store-svc.json 31 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/templates/queue-length-calculator-dc.json 117 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-vertx-reactica/templates/queue-length-calculator-svc.json 35 ●●●●● patch | view | raw | blame | history
scripts/find_ami.sh 34 ●●●●● patch | view | raw | blame | history
ansible/cloud_providers/ec2_detect_region_tasks.yml
New file
@@ -0,0 +1,29 @@
---
# if fallback_regions is defined, detect the region
- when: fallback_regions is defined
  block:
    - name: fallback_regions is defined, detect region for AWS
      environment:
        AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
        AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
      command: >-
        aws cloudformation describe-stacks
        --stack-name {{project_tag}} --region {{item}}
      register: cloudformation_detect
      with_items: "{{ [aws_region] + fallback_regions|d([]) }}"
      changed_when: false
      failed_when: false
    - name: Set aws_region_final
      set_fact:
        aws_region_final: "{{item._ansible_item_label}}"
      with_items: "{{cloudformation_detect.results}}"
      loop_control:
        label: "{{item._ansible_item_label|d('unknown')}}"
      when: item.rc == 0
# else just set as the provided aws_region
- name: Set aws_region_final as provided with aws_region
  when: fallback_regions is not defined
  set_fact:
    aws_region_final: "{{aws_region}}"
ansible/cloud_providers/ec2_infrastructure_deployment.yml
@@ -12,15 +12,6 @@
    - step001.1
    - deploy_infrastructure
  tasks:
    # for SSH first access to ec2 instances we always use the key defined in the CloudFormation
    # template by the name {{key_name}}
    # This variable is used when generation ssh config.
    # - name: Get ssh pub key
    #   tags:
    #     - must
    #   set_fact:
    #     ssh_key: "~/.ssh/{{key_name}}.pem"
    - name: Run infra-ec2-template-generate Role
      import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/infra-ec2-template-generate"
@@ -62,6 +53,11 @@
    - create_inventory
    - create_ssh_config
  tasks:
    # Sometimes the infra step is skipped, for example when scaling up a cluster.
    # when step001.1 is skipped, aws_region_final is not defined.
    - when: aws_region_final is not defined
      include_tasks: ec2_detect_region_tasks.yml
    - name: Run infra-ec2-create-inventory Role
      import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/infra-ec2-create-inventory"
ansible/configs/ans-tower-lab/files/cloud_providers/ec2_cloud_template.j2
@@ -3,7 +3,7 @@
  RegionMapping:
    us-east-1:
      RHELAMI: ami-c998b6b2
      WIN2012R2AMI: ami-93118ee9
      WIN2012R2AMI: ami-0dcdd073eeabb0101
    us-east-2:
      RHELAMI: ami-cfdafaaa
      WIN2012R2AMI: ami-72745d17
ansible/configs/ansible-cicd-lab/files/cloud_providers/ec2_cloud_template.j2
@@ -3,7 +3,7 @@
  RegionMapping:
    us-east-1:
      RHELAMI: ami-c998b6b2
      WIN2012R2AMI: ami-93118ee9
      WIN2012R2AMI: ami-0dcdd073eeabb0101
    us-east-2:
      RHELAMI: ami-cfdafaaa
      WIN2012R2AMI: ami-72745d17
ansible/configs/archive/auth-playground-lab/files/cloud_providers/ec2_cloud_template.j2
@@ -3,7 +3,7 @@
  RegionMapping:
    us-east-1:
      RHELAMI: ami-c998b6b2
      WIN2012R2AMI: ami-93118ee9
      WIN2012R2AMI: ami-0dcdd073eeabb0101
    us-east-2:
      RHELAMI: ami-cfdafaaa
      WIN2012R2AMI: ami-72745d17
ansible/configs/just-some-nodes-example/env_vars.yml
@@ -9,14 +9,17 @@
  ec2: "t2.medium"
  azure: Standard_A2_V2
bastion_instance_image: RHEL75
node_instance_type:
  ec2: "t2.medium"
  azure: Standard_A2_V2
node_instance_image: RHEL75
# How many do you want for each instance type
node_instance_count: 1
# Environment Instances
instances:
@@ -25,7 +28,10 @@
    unique: true
    public_dns: true
    dns_loadbalancer: false
    flavor: "{{bastion_instance_type}}"
    image: "{{ bastion_instance_image }}"
    flavor:
      ec2: "t2.medium"
      azure: Standard_A2_V2
    tags:
      - key: "AnsibleGroup"
        value: "bastions"
@@ -33,13 +39,20 @@
        value: "linux"
      - key: "instance_filter"
        value: "{{ env_type }}-{{ email }}"
    rootfs_size: 20
    volumes:
      - name: '/dev/sda1'
        size: 20
    security_groups:
      - "BastionSG"
  - name: "node"
    count: "{{node_instance_count}}"
    public_dns: true
    dns_loadbalancer: false
    flavor: "{{bastion_instance_type}}"
    image: "{{ node_instance_image }}"
    flavor:
      ec2: "t2.medium"
      azure: Standard_A2_V2
    tags:
      - key: "AnsibleGroup"
        value: "nodes"
ansible/configs/just-some-nodes-example/files/cloud_providers/ec2_cloud_template.j2
File was deleted
ansible/configs/just-some-nodes-example/files/cloud_providers/ec2_cloud_template_json.j2
File was deleted
ansible/configs/just-some-nodes-example/files/hosts_template.j2
@@ -1,5 +1,4 @@
[3tierapp:vars]
[all:vars]
###########################################################################
### Ansible Vars
###########################################################################
@@ -8,33 +7,9 @@
ansible_ssh_user={{remote_user}}
ansible_ssh_private_key_file="~/.ssh/{{guid}}key.pem"
ansible_ssh_common_args="-o StrictHostKeyChecking=no"
[3tierapp:children]
frontends
apps
appdbs
support
[frontends]
[nodes]
## These are the frontends
{% for host in groups['frontends'] %}
frontend{{loop.index}}.{{chomped_zone_internal_dns}} ansible_ssh_host=frontend{{loop.index}}.{{subdomain_base}}
{% endfor %}
[apps]
## These are the apps
{% for host in groups['apps']  %}
app{{loop.index}}.{{chomped_zone_internal_dns}} ansible_ssh_host=app{{loop.index}}.{{subdomain_base}}
{% endfor %}
[appdbs]
## These are the appdbs
{% for host in groups['appdbs'] %}
appdb{{loop.index}}.{{chomped_zone_internal_dns}} ansible_ssh_host=appdb{{loop.index}}.{{subdomain_base}}
{% endfor %}
## These are the support
[support]
{% for host in groups['support'] %}
support{{loop.index}}.{{chomped_zone_internal_dns}} ansible_ssh_host=support{{loop.index}}.{{subdomain_base}}
{% for host in groups['nodes']|d([]) %}
node{{loop.index}}.{{chomped_zone_internal_dns}} ansible_ssh_host=frontend{{loop.index}}.{{subdomain_base}}
{% endfor %}
ansible/configs/just-some-nodes-example/pre_software.yml
@@ -14,9 +14,14 @@
  tags:
    - common_tasks
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set-repositories", when: 'repo_method is defined' }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/common", when: 'install_common' }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set_env_authorized_key", when: 'set_env_authorized_key' }
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/set-repositories"
      when: repo_method is defined
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/common"
      when: install_common | bool
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/set_env_authorized_key"
      when: set_env_authorized_key | bool
- name: Configuring Bastion Hosts
  hosts:
ansible/configs/ocp-clientvm/pre_software.yml
@@ -54,8 +54,8 @@
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  roles:
  - { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion",              when: 'install_bastion' }
  - { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa",  when: 'install_ipa_client' }
  - { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-student-user", when: 'install_student_user' }
  - { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa",  when: 'install_ipa_client' }
  tags:
  - step004
  - bastion_tasks
ansible/configs/ocp-gpu-single-node/env_vars.yml
@@ -15,7 +15,7 @@
###### OR PASS as "-e" args to ansible-playbook command
### Common Host settings
repo_version: "3.10"
repo_version: "3.11"
repo_method: file # Other Options are: file, satellite and rhn
#If using repo_method: satellite, you must set these values as well.
@@ -48,8 +48,8 @@
install_ipa_client: false
install_lets_encrypt_certificates: false
install_openwhisk: false
install_metrics: true
install_logging: true
install_metrics: false
install_logging: false
install_aws_broker: false
glusterfs_device_name: /dev/xvdc
@@ -145,8 +145,8 @@
cloudapps_suffix: 'apps.{{subdomain_base}}'
## TODO: This should be registered as a variable. Awk for os verions (OCP).
## yum info openshift...
osrelease: "3.10.14"
installer_release: "3.10.21"
osrelease: "3.11.16"
installer_release: "3.11.16"
openshift_master_overwrite_named_certificates: true
timeout: 60
@@ -270,9 +270,8 @@
#user_vols_size: 10Gi
cache_images:
  - "registry.access.redhat.com/jboss-eap-7/eap70-openshift:latest"
  - "registry.access.redhat.com/openshift3/jenkins-2-rhel7:v{{ repo_version }}"
  - "registry.access.redhat.com/openshift3/jenkins-slave-maven-rhel7:v{{ repo_version }}"
  - "docker.io/caffe2ai/caffe2:latest"
  - "docker.io/mirrorgooglecontainers/cuda-vector-add:v0.1"
### CLOUDFORMATIONS vars
ansible/configs/ocp-gpu-single-node/files/cloud_providers/ec2_cloud_template.j2
@@ -4,7 +4,7 @@
Mappings:
  RegionMapping:
    us-east-1:
      RHELAMI: ami-0d70a070
      RHELAMI: ami-06fd194eff2ab1451
  DNSMapping:
    us-east-1:
      domain: "us-east-1.compute.internal"
ansible/configs/ocp-gpu-single-node/files/hosts_template.3.11.16.j2
New file
@@ -0,0 +1,230 @@
#
# /etc/ansible/hosts file for OpenShift Container Platform 3.11.16
#
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_user={{ansible_ssh_user}}
ansible_become=yes
log_path=/root
###########################################################################
### OpenShift Basic Vars
###########################################################################
openshift_deployment_type=openshift-enterprise
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
openshift_image_tag=v{{ osrelease }}
openshift_pgk_version=v{{ osrelease }}
openshift_release={{ osrelease }}
{% if container_runtime == "cri-o" %}
openshift_use_crio=True
openshift_crio_enable_docker_gc=True
openshift_crio_docker_gc_node_selector={'runtime': 'cri-o'}
{% endif %}
openshift_node_groups=[{'name': 'node-config-all-in-one', 'labels': ['node-role.kubernetes.io/master=true', 'node-role.kubernetes.io/infra=true', 'node-role.kubernetes.io/compute=true']}]
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
# Deploy Operator Lifecycle Management Tech Preview
openshift_enable_olm=false
##########################################################################
### OpenShift Registries Locations
###########################################################################
oreg_url=registry.redhat.io/openshift3/ose-${component}:${version}
oreg_auth_user={{ redhat_registry_user }}
oreg_auth_password={{ redhat_registry_password }}
# For Operator Framework Images
openshift_additional_registry_credentials=[{'host':'registry.connect.redhat.com','user':'{{ redhat_registry_user}}','password':'{{ redhat_registry_password }}','test_image':'mongodb/enterprise-operator:0.3.2'}]
openshift_examples_modify_imagestreams=true
#{% if install_glusterfs|bool %}
############################################################################
#### OpenShift Container Storage
############################################################################
#
#openshift_master_dynamic_provisioning_enabled=false
#
## CNS storage cluster
## From https://github.com/red-hat-storage/openshift-cic
#openshift_storage_glusterfs_namespace=openshift-storage
#openshift_storage_glusterfs_storageclass=true
#openshift_storage_glusterfs_storageclass_default=true
#
#openshift_storage_glusterfs_block_deploy=true
#openshift_storage_glusterfs_block_host_vol_create=true
#openshift_storage_glusterfs_block_host_vol_size=200
#openshift_storage_glusterfs_block_storageclass=true
#openshift_storage_glusterfs_block_storageclass_default=false
#
## Container image to use for glusterfs pods
#openshift_storage_glusterfs_image="registry.access.redhat.com/rhgs3/rhgs-server-rhel7:{{ glusterfs_image_tag }}"
#
## Container image to use for glusterblock-provisioner pod
#openshift_storage_glusterfs_block_image="registry.access.redhat.com/rhgs3/rhgs-gluster-block-prov-rhel7:{{ glusterfs_image_tag }}"
#
## Container image to use for heketi pods
#openshift_storage_glusterfs_heketi_image="registry.access.redhat.com/rhgs3/rhgs-volmanager-rhel7:{{ glusterfs_image_tag }}"
#{% endif %}
#{% if install_nfs|bool %}
## Set this line to enable NFS
#openshift_enable_unsupported_configurations=True
#{% endif %}
###########################################################################
### OpenShift Cockpit Vars
###########################################################################
# Enable cockpit
osm_use_cockpit=false
osm_cockpit_plugins=['cockpit-kubernetes']
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname={{master_lb_dns}}
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
{% if install_lets_encrypt_certificates|bool %}
openshift_master_named_certificates={{lets_encrypt_openshift_master_named_certificates|to_json}}
{% endif %}
openshift_set_hostname=True
openshift_clock_enabled=True
###########################################################################
### OpenShift Network Vars
###########################################################################
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
{{multi_tenant_setting}}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" or 'ldap' in install_idms|d([]) %}
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "htpasswd" or 'htpasswd' in install_idms|d([]) %}
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
openshift_master_identity_providers={{identity_providers|to_json}}
{% if admission_plugin_config is defined %}
###########################################################################
### OpenShift admission plugin config
###########################################################################
openshift_master_admission_plugin_config={{admission_plugin_config|to_json}}
{% endif %}
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
####################
# Prometheus Metrics
####################
openshift_cluster_monitoring_operator_install=false
#################
# Cluster metrics
#################
openshift_metrics_install_metrics={{install_metrics}}
#################
# Cluster logging
#################
openshift_logging_install_logging={{ install_logging }}
openshift_logging_install_eventrouter={{ install_logging }}
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
#openshift_hosted_router_replicas={{infranode_instance_count}}
{% if install_lets_encrypt_certificates|bool %}
openshift_hosted_router_certificate={{lets_encrypt_openshift_hosted_router_certificate|to_json}}
{% endif %}
{% if s3user_access_key is defined %}
# Registry AWS S3
# S3 bucket must already exist.
openshift_hosted_registry_storage_kind=object
openshift_hosted_registry_storage_provider=s3
openshift_hosted_registry_storage_s3_accesskey={{ s3user_access_key }}
openshift_hosted_registry_storage_s3_secretkey={{ s3user_secret_access_key }}
openshift_hosted_registry_storage_s3_bucket={{ project_tag }}
openshift_hosted_registry_storage_s3_region={{ aws_region_final|d(aws_region) }}
openshift_hosted_registry_storage_s3_chunksize=26214400
openshift_hosted_registry_storage_s3_rootdirectory=/registry
{% endif %}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=false
template_service_broker_install=false
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=false
ansible_service_broker_local_registry_whitelist=['.*-apb$']
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
masters
etcd
nodes
[masters]
{% for host in groups['bastions'] %}
{{ hostvars[host].internaldns }}
{% endfor %}
[etcd]
{% for host in groups['bastions'] %}
{{ hostvars[host].internaldns }}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['bastions'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_group_name="node-config-all-in-one"
{% endfor %}
ansible/configs/ocp-gpu-single-node/files/repos_template.j2
@@ -37,9 +37,9 @@
{% if osrelease is version_compare('3.9', '>=') %}
## Required since OCP 3.9
[rhel-7-server-ansible-2.4-rpms]
[rhel-7-server-ansible-2.6-rpms]
name=Red Hat Enterprise Linux Ansible (RPMs)
baseurl={{own_repo_path}}/rhel-7-server-ansible-2.4-rpms
baseurl={{own_repo_path}}/rhel-7-server-ansible-2.6-rpms
enabled=1
gpgcheck=0
{% endif %}
ansible/configs/ocp-gpu-single-node/post_software.yml
@@ -10,11 +10,16 @@
    - step005.1
    - gpu_install_config
  tasks:
    - name: pre-pull relevant images
      command: docker pull {{ item }}
      with_items:
        - "{{ cache_images }}"
    - name: clone the psap repository onto the bastion
      git:
        repo: 'https://github.com/thoraxe/openshift-psap.git'
        dest: '/root/openshift-psap'
        version: 'ocp-310-0.6'
        version: 'ocp-311-0.7'
        force: yes
        update: yes
@@ -40,7 +45,7 @@
  tasks:
    # if we do something multi-node this will have to change to look for the total number of gpu nodes
    - name: Check that the daemonset is ready
      command: oc get daemonset -n nvidia-device-plugin nvidia-deviceplugin-daemonset -o jsonpath --template='{.status.numberReady}'
      command: oc get daemonset -n kube-system nvidia-deviceplugin-daemonset -o jsonpath --template='{.status.numberReady}'
      register: daemonset_ready_out
      until: 'daemonset_ready_out.stdout | int >= 1'
      retries: 5
ansible/configs/ocp-ha-disconnected-lab/env_vars.yml
@@ -38,14 +38,14 @@
user_vols: 200
user_vols_size: 4Gi
master_api_port: 443
osrelease: 3.9.30
osrelease: 3.11.16
openshift_master_overwrite_named_certificates: true
deploy_openshift: true
deploy_openshift_post: true
deploy_env_post: true
install_metrics: true
install_logging: true
ovs_plugin: "subnet" # This can also be set to: "multitenant"
ovs_plugin: "networkpolicy" # This can also be set to: "multitenant"
multi_tenant_setting: "os_sdn_network_plugin_name='redhat/openshift-ovs-{{ovs_plugin}}'"
master_lb_dns: "loadbalancer1.{{subdomain_base}}"
cloudapps_suffix: 'apps.{{subdomain_base}}'
@@ -70,9 +70,11 @@
#### OCP IMPLEMENATATION LAB
################################################################################
repo_version: '3.9'
repo_version: '3.11'
cloudapps_dns: '*.apps.{{subdomain_base}}.'
master_public_dns: "loadbalancer.{{subdomain_base}}."
install_ansible_version: "{{ '2.4' if repo_version is version_compare('3.11', '<')  else '2.6' }}"
################################################################################
#### Common host variables
@@ -94,6 +96,7 @@
  - rhel-7-server-rpms
  - rhel-7-server-extras-rpms
  - rhel-7-server-ose-{{repo_version}}-rpms
  - rhel-7-server-ansible--{{install_ansible_version}}-rpms
use_subscription_manager: false
use_own_repos: true
@@ -272,7 +275,7 @@
      - name: MoshPublic
        description: "Public Mosh Access for bastions"
        from_port: 60000
        to_port: 60001
        to_port: 61000
        protocol: udp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
ansible/configs/ocp-ha-disconnected-lab/files/cloud_providers/ec2_cloud_template.j2
@@ -75,6 +75,7 @@
      {% else %}
      RHELAMI: ami-a789ffcb
      {% endif %}
  DNSMapping:
    us-east-1:
      domain: "us-east-1.compute.internal"
@@ -103,7 +104,7 @@
  Vpc:
    Type: "AWS::EC2::VPC"
    Properties:
      CidrBlock: "{{vpcid_cidr_block}}"
      CidrBlock: "192.199.0.0/16"
      EnableDnsSupport: true
      EnableDnsHostnames: true
      Tags:
@@ -140,20 +141,21 @@
      RouteTableId:
        Ref: VpcRouteTable
{% for subnet in subnets %}
  {{subnet['name']}}:
  PublicSubnet:
    Type: "AWS::EC2::Subnet"
    DependsOn:
      - Vpc
    Properties:
      CidrBlock: "{{subnet['cidr']}}"
      CidrBlock: "192.199.0.0/24"
      Tags:
        - Key: Name
          Value: "{{project_tag}}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
      MapPublicIpOnLaunch: true
      VpcId:
        Ref: Vpc
{% endfor %}
  PublicSubnetRTA:
    Type: "AWS::EC2::SubnetRouteTableAssociation"
@@ -163,8 +165,7 @@
      SubnetId:
        Ref: PublicSubnet
{% for security_group in security_groups %}
  {{security_group['name']}}:
  HostSG:
    Type: "AWS::EC2::SecurityGroup"
    Properties:
      GroupDescription: Host
@@ -172,34 +173,31 @@
        Ref: Vpc
      Tags:
        - Key: Name
          Value: "{{security_group['name']}}"
{% endfor %}
          Value: host_sg
{% for security_group in security_groups %}
{% for rule in security_group['rules'] %}
  {{security_group['name']}}{{rule['name']}}:
    Type: "AWS::EC2::SecurityGroup{{rule['rule_type']}}"
  HostUDPPorts:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
     GroupId:
       Fn::GetAtt:
         - "{{security_group['name']}}"
         - GroupId
     IpProtocol: {{rule['protocol']}}
     FromPort: {{rule['from_port']}}
     ToPort: {{rule['to_port']}}
{% if rule['cidr'] is defined %}
     CidrIp: "{{rule['cidr']}}"
{% endif  %}
{% if rule['from_group'] is defined %}
     SourceSecurityGroupId:
       Fn::GetAtt:
        - "{{rule['from_group']}}"
        - GroupId
{% endif  %}
{% endfor %}
{% endfor %}
      GroupId:
        Fn::GetAtt:
          - HostSG
          - GroupId
      IpProtocol: udp
      FromPort: 0
      ToPort: 65535
      CidrIp: "0.0.0.0/0"
  HostTCPPorts:
    Type: "AWS::EC2::SecurityGroupIngress"
    Properties:
      GroupId:
        Fn::GetAtt:
          - HostSG
          - GroupId
      IpProtocol: tcp
      FromPort: 0
      ToPort: 65535
      CidrIp: "0.0.0.0/0"
  zoneinternalidns:
    Type: "AWS::Route53::HostedZone"
@@ -216,8 +214,8 @@
  CloudDNS:
    Type: AWS::Route53::RecordSetGroup
    DependsOn:
{% for c in range(1,(loadbalancer_instance_count|int)+1) %}
      - "loadbalancer{{loop.index}}EIP"
{% for c in range(1,(infranode_instance_count|int)+1) %}
      - "infranode{{loop.index}}EIP"
{% endfor %}
    Properties:
      HostedZoneId: "{{HostedZoneId}}"
@@ -226,9 +224,9 @@
          Type: A
          TTL: 900
          ResourceRecords:
{% for c in range(1,(loadbalancer_instance_count|int)+1) %}
{% for c in range(1,(infranode_instance_count|int)+1) %}
            - Fn::GetAtt:
                - loadbalancer{{loop.index}}
                - infranode{{loop.index}}
                - PublicIp
{% endfor %}
@@ -270,10 +268,10 @@
{% endif %}
      SecurityGroupIds:
        - "Fn::GetAtt":
          - {{instance['security_group']}}
          - HostSG
          - GroupId
      SubnetId:
        Ref: {{instance['subnet']}}
        Ref: PublicSubnet
      Tags:
{% if instance['unique'] | d(false) | bool %}
        - Key: Name
ansible/configs/ocp-ha-disconnected-lab/files/hosts_template.3.11.16.j2
New file
@@ -0,0 +1,406 @@
#
# ansible inventory for OpenShift Container Platform  3.11.16
# AgnosticD ansible-config: ocp-ha-lab
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_user={{ansible_ssh_user}}
ansible_become=yes
###########################################################################
### OpenShift Basic Vars
###########################################################################
openshift_deployment_type=openshift-enterprise
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
# OpenShift Version:
# If you modify the openshift_image_tag or the openshift_pkg_version variables after the cluster is set up, then an upgrade can be triggered, resulting in downtime.
# If openshift_image_tag is set, its value is used for all hosts in system container environments, even those that have another version installed. If
# Use this variable to specify a container image tag to install or configure.
#openshift_pkg_version is set, its value is used for all hosts in RPM-based environments, even those that have another version installed.
openshift_image_tag=v{{ osrelease }}
# Use this variable to specify an RPM version to install or configure.
openshift_pkg_version=-{{ osrelease }}
openshift_release={{ osrelease }}
{% if container_runtime == "cri-o" %}
openshift_use_crio=True
openshift_crio_enable_docker_gc=True
openshift_crio_docker_gc_node_selector={'runtime': 'cri-o'}
{% endif %}
# Node Groups
openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true','runtime={{container_runtime}}']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true','runtime={{container_runtime}}']}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true','runtime={{container_runtime}}'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}]
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. -> These  need to go into the above
# openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
# Deploy Operator Lifecycle Manager Tech Preview
openshift_enable_olm=true
###########################################################################
### OpenShift Registries Locations
###########################################################################
#oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version}
oreg_url=isolated1.{{ guid }}.internal:5000/openshift3/ose-${component}:${version}
#oreg_auth_user={{ redhat_registry_user }}
#oreg_auth_password={{ redhat_registry_password }}
openshift_docker_insecure_registries=isolated1.{{ guid }}.internal:5000
# For Operator Framework Images
openshift_additional_registry_credentials=[{'host':'registry.connect.redhat.com','user':'{{ redhat_registry_user }}','password':'{{ redhat_registry_password }}','test_image':'mongodb/enterprise-operator:0.3.2'}]
openshift_examples_modify_imagestreams=true
{% if install_glusterfs|bool %}
###########################################################################
### OpenShift Container Storage
###########################################################################
openshift_master_dynamic_provisioning_enabled=True
# CNS storage cluster
# From https://github.com/red-hat-storage/openshift-cic
openshift_storage_glusterfs_namespace=openshift-storage
openshift_storage_glusterfs_storageclass=true
openshift_storage_glusterfs_storageclass_default=false
openshift_storage_glusterfs_block_deploy=true
openshift_storage_glusterfs_block_host_vol_create=true
openshift_storage_glusterfs_block_host_vol_size=200
openshift_storage_glusterfs_block_storageclass=true
openshift_storage_glusterfs_block_storageclass_default=true
# Container image to use for glusterfs pods
openshift_storage_glusterfs_image="registry.access.redhat.com/rhgs3/rhgs-server-rhel7:{{ glusterfs_image_tag }}"
# Container image to use for glusterblock-provisioner pod
openshift_storage_glusterfs_block_image="registry.access.redhat.com/rhgs3/rhgs-gluster-block-prov-rhel7:{{ glusterfs_image_tag }}"
# Container image to use for heketi pods
openshift_storage_glusterfs_heketi_image="registry.access.redhat.com/rhgs3/rhgs-volmanager-rhel7:{{ glusterfs_image_tag }}"
# GlusterFS version
#  Knowledgebase
#   https://access.redhat.com/solutions/3617551
#  Bugzilla
#   https://bugzilla.redhat.com/show_bug.cgi?id=163.1057
#  Complete OpenShift GlusterFS Configuration README
#   https://github.com/openshift/openshift-ansible/tree/master/roles/openshift_storage_glusterfs
openshift_storage_glusterfs_version=v3.10
openshift_storage_glusterfs_block_version=v3.10
openshift_storage_glusterfs_s3_version=v3.10
openshift_storage_glusterfs_heketi_version=v3.10
# openshift_storage_glusterfs_registry_version=v3.10
# openshift_storage_glusterfs_registry_block_version=v3.10
# openshift_storage_glusterfs_registry_s3_version=v3.10
# openshift_storage_glusterfs_registry_heketi_version=v3.10
{% endif %}
{% if install_nfs|bool %}
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
{% endif %}
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
#Default:  openshift_master_cluster_method=native
openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
# Audit log
# openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
# ocp-ha-lab
# AWS Autoscaler
#openshift_master_bootstrap_auto_approve=false
# This variable is a cluster identifier unique to the AWS Availability Zone. Using this avoids potential issues in Amazon Web Services (AWS) with multiple zones or multiple clusters.
#openshift_clusterid
###########################################################################
### OpenShift Network Vars
###########################################################################
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
# os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy'
{{multi_tenant_setting}}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
# LDAP AND HTPASSWD Authentication (download ipa-ca.crt first)
#openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'},{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
# Just LDAP
#openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'}]
# Just HTPASSWD
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
# LDAP and HTPASSWD dependencies
openshift_master_htpasswd_file=/root/htpasswd.openshift
#openshift_master_ldap_ca_file=/root/ipa-ca.crt
{% if admission_plugin_config is defined %}
###########################################################################
### OpenShift admission plugin config
###########################################################################
openshift_master_admission_plugin_config={{admission_plugin_config|to_json}}
{% endif %}
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
#########################
# Prometheus Metrics
#########################
openshift_hosted_prometheus_deploy=true
openshift_prometheus_namespace=openshift-metrics
openshift_prometheus_node_selector={"node-role.kubernetes.io/infra":"true"}
openshift_cluster_monitoring_operator_install=true
{% if install_glusterfs|bool %}
openshift_cluster_monitoring_operator_prometheus_storage_capacity=20Gi
openshift_cluster_monitoring_operator_alertmanager_storage_capacity=2Gi
openshift_cluster_monitoring_operator_prometheus_storage_enabled=True
openshift_cluster_monitoring_operator_alertmanager_storage_enabled=True
# The next two will be enabled in 3.11.z
# will use deafult storage class until then
# so set the block storage class as default
# openshift_cluster_monitoring_operator_prometheus_storage_class_name='glusterfs-storage-block'
# openshift_cluster_monitoring_operator_alertmanager_storage_class_name='glusterfs-storage-block'
{% endif %}
########################
# Cluster Metrics
########################
openshift_metrics_install_metrics={{install_metrics}}
{% if install_nfs|bool and not install_glusterfs|bool %}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassandra_pvc_storage_class_name=''
{% endif %}
{% if install_glusterfs|bool %}
openshift_metrics_cassandra_storage_type=dynamic
openshift_metrics_cassandra_pvc_storage_class_name='glusterfs-storage-block'
{% endif %}
openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_heapster_nodeselector={"node-role.kubernetes.io/infra": "true"}
# Store Metrics for 2 days
openshift_metrics_duration=2
# Suggested Quotas and limits for Prometheus components:
openshift_prometheus_memory_requests=2Gi
openshift_prometheus_cpu_requests=750m
openshift_prometheus_memory_limit=2Gi
openshift_prometheus_cpu_limit=750m
openshift_prometheus_alertmanager_memory_requests=300Mi
openshift_prometheus_alertmanager_cpu_requests=200m
openshift_prometheus_alertmanager_memory_limit=300Mi
openshift_prometheus_alertmanager_cpu_limit=200m
openshift_prometheus_alertbuffer_memory_requests=300Mi
openshift_prometheus_alertbuffer_cpu_requests=200m
openshift_prometheus_alertbuffer_memory_limit=300Mi
openshift_prometheus_alertbuffer_cpu_limit=200m
{# The following file will need to be copied over to the bastion before deployment
# There is an example in ocp-workshop/files
# openshift_prometheus_additional_rules_file=/root/prometheus_alerts_rules.yml #}
# Grafana
openshift_grafana_node_selector={"node-role.kubernetes.io/infra":"true"}
openshift_grafana_storage_type=pvc
openshift_grafana_pvc_size=2Gi
openshift_grafana_node_exporter=true
{% if install_glusterfs|bool %}
openshift_grafana_sc_name=glusterfs-storage
{% endif %}
########################
# Cluster Logging
########################
openshift_logging_install_logging={{install_logging}}
openshift_logging_install_eventrouter={{install_logging}}
{% if install_nfs|bool and not install_glusterfs|bool %}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
openshift_logging_es_pvc_storage_class_name=''
{% endif %}
{% if install_glusterfs|bool %}
openshift_logging_es_pvc_dynamic=true
openshift_logging_es_pvc_size=20Gi
openshift_logging_es_pvc_storage_class_name='glusterfs-storage-block'
{% endif %}
openshift_logging_es_memory_limit=8Gi
openshift_logging_es_cluster_size=1
openshift_logging_curator_default_days=2
openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_eventrouter_nodeselector={"node-role.kubernetes.io/infra": "true"}
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
# default selectors for router and registry services
# openshift_router_selector='node-role.kubernetes.io/infra=true'
# openshift_registry_selector='node-role.kubernetes.io/infra=true'
openshift_hosted_router_replicas={{infranode_instance_count}}
# openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
{% if install_glusterfs|bool %}
openshift_hosted_registry_storage_kind=glusterfs
openshift_hosted_registry_storage_volume_size=10Gi
openshift_hosted_registry_selector="node-role.kubernetes.io/infra=true"
{% endif %}
{% if install_nfs|bool %}
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/srv/nfs
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
{% endif %}
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
# default=true
openshift_enable_service_catalog=true
# default=true
template_service_broker_install=true
openshift_template_service_broker_namespaces=['openshift']
# default=true
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
###########################################################################
### OpenShift Hosts
###########################################################################
# openshift_node_labels DEPRECATED
# openshift_node_problem_detector_install
[OSEv3:children]
lb
masters
etcd
nodes
{% if install_nfs|bool %}
nfs
{% endif %}
{% if install_glusterfs|bool %}
glusterfs
{% endif %}
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }}
{% endfor %}
[masters]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[etcd]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-master'
{% endfor %}
## These are infranodes
{% for host in groups['infranodes']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-infra'
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute'
{% endfor %}
{% if install_glusterfs|bool %}
## These are OCS nodes
{% for host in groups['support']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute'
{% endfor %}
{% endif %}
{% if install_nfs|bool %}
[nfs]
{% for host in [groups['support']|sort|first] %}
{{ hostvars[host].internaldns }}
{% endfor %}
{% endif %}
{% if install_glusterfs|bool %}
[glusterfs]
{% for host in groups['support']|sort %}
{{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]'
{% endfor %}
{% endif %}
ansible/configs/ocp-ha-disconnected-lab/files/labs_hosts_template.3.11.16.j2
New file
@@ -0,0 +1,404 @@
#
# ansible inventory for OpenShift Container Platform  3.11.16
# AgnosticD ansible-config: ocp-ha-lab
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_user={{ansible_ssh_user}}
ansible_become=yes
###########################################################################
### OpenShift Basic Vars
###########################################################################
openshift_deployment_type=openshift-enterprise
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
# OpenShift Version:
# If you modify the openshift_image_tag or the openshift_pkg_version variables after the cluster is set up, then an upgrade can be triggered, resulting in downtime.
# If openshift_image_tag is set, its value is used for all hosts in system container environments, even those that have another version installed. If
# Use this variable to specify a container image tag to install or configure.
#openshift_pkg_version is set, its value is used for all hosts in RPM-based environments, even those that have another version installed.
openshift_image_tag=
# Use this variable to specify an RPM version to install or configure.
openshift_pkg_version=
openshift_release=
{% if container_runtime == "cri-o" %}
openshift_use_crio=
openshift_crio_enable_docker_gc=
openshift_crio_docker_gc_node_selector=
{% endif %}
# Node Groups
openshift_node_groups=
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. -> These  need to go into the above
# openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
# Deploy Operator Lifecycle Manager Tech Preview
openshift_enable_olm=
###########################################################################
### OpenShift Registries Locations
###########################################################################
#oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version}
oreg_url=
oreg_auth_user=
oreg_auth_password=
# For Operator Framework Images
openshift_additional_registry_credentials=
openshift_examples_modify_imagestreams=
{% if install_glusterfs|bool %}
###########################################################################
### OpenShift Container Storage
###########################################################################
openshift_master_dynamic_provisioning_enabled=
# CNS storage cluster
# From https://github.com/red-hat-storage/openshift-cic
openshift_storage_glusterfs_namespace=
openshift_storage_glusterfs_storageclass=
openshift_storage_glusterfs_storageclass_default=
openshift_storage_glusterfs_block_deploy=
openshift_storage_glusterfs_block_host_vol_create=
openshift_storage_glusterfs_block_host_vol_size=
openshift_storage_glusterfs_block_storageclass=
openshift_storage_glusterfs_block_storageclass_default=
# Container image to use for glusterfs pods
openshift_storage_glusterfs_image=
# Container image to use for glusterblock-provisioner pod
openshift_storage_glusterfs_block_image=
# Container image to use for heketi pods
openshift_storage_glusterfs_heketi_image=
# GlusterFS version
#  Knowledgebase
#   https://access.redhat.com/solutions/3617551
#  Bugzilla
#   https://bugzilla.redhat.com/show_bug.cgi?id=163.1057
#  Complete OpenShift GlusterFS Configuration README
#   https://github.com/openshift/openshift-ansible/tree/master/roles/openshift_storage_glusterfs
openshift_storage_glusterfs_version=
openshift_storage_glusterfs_block_version=
openshift_storage_glusterfs_s3_version=
openshift_storage_glusterfs_heketi_version=
# openshift_storage_glusterfs_registry_version=v3.10
# openshift_storage_glusterfs_registry_block_version=v3.10
# openshift_storage_glusterfs_registry_s3_version=v3.10
# openshift_storage_glusterfs_registry_heketi_version=v3.10
{% endif %}
{% if install_nfs|bool %}
# Set this line to enable NFS
openshift_enable_unsupported_configurations=
{% endif %}
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port=
openshift_master_console_port=
#Default:  openshift_master_cluster_method=native
openshift_master_cluster_hostname=
openshift_master_cluster_public_hostname=
openshift_master_default_subdomain=
#openshift_master_ca_certificate=
openshift_master_overwrite_named_certificates=
# Audit log
# openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
# ocp-ha-lab
# AWS Autoscaler
#openshift_master_bootstrap_auto_approve=false
# This variable is a cluster identifier unique to the AWS Availability Zone. Using this avoids potential issues in Amazon Web Services (AWS) with multiple zones or multiple clusters.
#openshift_clusterid
###########################################################################
### OpenShift Network Vars
###########################################################################
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
# os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy'
{{multi_tenant_setting}}
###########################################################################
### OpenShift Authentication Vars
###########################################################################
# LDAP AND HTPASSWD Authentication (download ipa-ca.crt first)
#openshift_master_identity_providers=
# Just LDAP
#openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'}]
# Just HTPASSWD
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
# LDAP and HTPASSWD dependencies
openshift_master_htpasswd_file=/root/htpasswd.openshift
#openshift_master_ldap_ca_file=/root/ipa-ca.crt
{% if admission_plugin_config is defined %}
###########################################################################
### OpenShift admission plugin config
###########################################################################
openshift_master_admission_plugin_config={{admission_plugin_config|to_json}}
{% endif %}
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
#########################
# Prometheus Metrics
#########################
openshift_hosted_prometheus_deploy=
openshift_prometheus_namespace=
openshift_prometheus_node_selector=
openshift_cluster_monitoring_operator_install=
{% if install_glusterfs|bool %}
openshift_cluster_monitoring_operator_prometheus_storage_capacity=20Gi
openshift_cluster_monitoring_operator_alertmanager_storage_capacity=2Gi
openshift_cluster_monitoring_operator_prometheus_storage_enabled=True
openshift_cluster_monitoring_operator_alertmanager_storage_enabled=True
# The next two will be enabled in 3.11.z
# will use deafult storage class until then
# so set the block storage class as default
# openshift_cluster_monitoring_operator_prometheus_storage_class_name='glusterfs-storage-block'
# openshift_cluster_monitoring_operator_alertmanager_storage_class_name='glusterfs-storage-block'
{% endif %}
########################
# Cluster Metrics
########################
openshift_metrics_install_metrics=
{% if install_nfs|bool and not install_glusterfs|bool %}
openshift_metrics_storage_kind=
openshift_metrics_storage_access_modes=
openshift_metrics_storage_nfs_directory=
openshift_metrics_storage_nfs_options=
openshift_metrics_storage_volume_name=
openshift_metrics_storage_volume_size=
openshift_metrics_storage_labels=
openshift_metrics_cassandra_pvc_storage_class_name=
{% endif %}
{% if install_glusterfs|bool %}
openshift_metrics_cassandra_storage_type=
openshift_metrics_cassandra_pvc_storage_class_name=
{% endif %}
openshift_metrics_hawkular_nodeselector=
openshift_metrics_cassandra_nodeselector=
openshift_metrics_heapster_nodeselector=
# Store Metrics for 2 days
openshift_metrics_duration=2
# Suggested Quotas and limits for Prometheus components:
openshift_prometheus_memory_requests=2Gi
openshift_prometheus_cpu_requests=750m
openshift_prometheus_memory_limit=2Gi
openshift_prometheus_cpu_limit=750m
openshift_prometheus_alertmanager_memory_requests=300Mi
openshift_prometheus_alertmanager_cpu_requests=200m
openshift_prometheus_alertmanager_memory_limit=300Mi
openshift_prometheus_alertmanager_cpu_limit=200m
openshift_prometheus_alertbuffer_memory_requests=300Mi
openshift_prometheus_alertbuffer_cpu_requests=200m
openshift_prometheus_alertbuffer_memory_limit=300Mi
openshift_prometheus_alertbuffer_cpu_limit=200m
{# The following file will need to be copied over to the bastion before deployment
# There is an example in ocp-workshop/files
# openshift_prometheus_additional_rules_file=/root/prometheus_alerts_rules.yml #}
# Grafana
openshift_grafana_node_selector=
openshift_grafana_storage_type=
openshift_grafana_pvc_size=
openshift_grafana_node_exporter=
{% if install_glusterfs|bool %}
openshift_grafana_sc_name=glusterfs-storage
{% endif %}
########################
# Cluster Logging
########################
openshift_logging_install_logging=
openshift_logging_install_eventrouter=
{% if install_nfs|bool and not install_glusterfs|bool %}
openshift_logging_storage_kind=
openshift_logging_storage_access_modes=
openshift_logging_storage_nfs_directory=
openshift_logging_storage_nfs_options=
openshift_logging_storage_volume_name=
openshift_logging_storage_volume_size=
openshift_logging_storage_labels=
openshift_logging_es_pvc_storage_class_name=
{% endif %}
{% if install_glusterfs|bool %}
openshift_logging_es_pvc_dynamic=true
openshift_logging_es_pvc_size=20Gi
openshift_logging_es_pvc_storage_class_name='glusterfs-storage-block'
{% endif %}
openshift_logging_es_memory_limit=8Gi
openshift_logging_es_cluster_size=1
openshift_logging_curator_default_days=2
openshift_logging_kibana_nodeselector=
openshift_logging_curator_nodeselector=
openshift_logging_es_nodeselector=
openshift_logging_eventrouter_nodeselector=
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
# default selectors for router and registry services
# openshift_router_selector='node-role.kubernetes.io/infra=true'
# openshift_registry_selector='node-role.kubernetes.io/infra=true'
openshift_hosted_router_replicas=
# openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
{% if install_glusterfs|bool %}
openshift_hosted_registry_storage_kind=glusterfs
openshift_hosted_registry_storage_volume_size=10Gi
openshift_hosted_registry_selector="node-role.kubernetes.io/infra=true"
{% endif %}
{% if install_nfs|bool %}
openshift_hosted_registry_storage_kind=
openshift_hosted_registry_storage_access_modes=
openshift_hosted_registry_storage_nfs_directory=
openshift_hosted_registry_storage_nfs_options=
openshift_hosted_registry_storage_volume_name=
openshift_hosted_registry_storage_volume_size=
{% endif %}
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
# default=true
openshift_enable_service_catalog=
# default=true
template_service_broker_install=
openshift_template_service_broker_namespaces=
# default=true
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
###########################################################################
### OpenShift Hosts
###########################################################################
# openshift_node_labels DEPRECATED
# openshift_node_problem_detector_install
[OSEv3:children]
lb
masters
etcd
nodes
{% if install_nfs|bool %}
nfs
{% endif %}
{% if install_glusterfs|bool %}
glusterfs
{% endif %}
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }}
{% endfor %}
[masters]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[etcd]
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-master' openshift_node_problem_detector_install=true
{% endfor %}
## These are infranodes
{% for host in groups['infranodes']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-infra' openshift_node_problem_detector_install=true
{% endfor %}
## These are regular nodes
{% for host in groups['nodes']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute' openshift_node_problem_detector_install=true
{% endfor %}
{% if install_glusterfs|bool %}
## These are OCS nodes
{% for host in groups['support']|sort %}
{{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute' openshift_node_problem_detector_install=true
{% endfor %}
{% endif %}
{% if install_nfs|bool %}
[nfs]
{% for host in [groups['support']|sort|first] %}
{{ hostvars[host].internaldns }}
{% endfor %}
{% endif %}
{% if install_glusterfs|bool %}
[glusterfs]
{% for host in groups['support']|sort %}
{{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]'
{% endfor %}
{% endif %}
ansible/configs/ocp-ha-disconnected-lab/files/prometheus_alerts_rules.yml
New file
@@ -0,0 +1,68 @@
groups:
- name: etcd-rules
  interval: 10s # defaults to global interval
  rules:
  - alert: "Node Down"
    expr: up{job="kubernetes-nodes"} == 0
    annotations:
      component: "ContainerNode"
      severity: "HIGH"
      message: "Node {{$labels.instance}} is down"
  - alert: "Lost ETCD"
    expr: up{job="etcd"} == 0
    annotations:
      component: "ETCD"
      severity: "HIGH"
      message: "ETCD {{$labels.instance}} is down"
  - alert: "Time drift"
    expr: sqrt((scalar(avg(node_time{job="kubernetes-nodes-exporter"})) - node_time{job="kubernetes-nodes-exporter"} )^2) > 60
    for: 30s
    annotations:
      component: "NTP"
      severity: "HIGH"
      message: "Node {{$labels.instance}} has time drift bigger than 60 from average time"
- name: scheduler-rules
  interval: 10s # defaults to global interval
  rules:
  - alert: "Scheduler node1"
    expr: ( (sum(kubelet_running_pod_count{instance=~"^node.*"}) / ((count(node_time) - 6))) * 2 ) < (sum(kubelet_running_pod_count{instance=~"^node1.*"}))
    annotations:
      component: "Scheduler"
      severity: "HIGH"
      message: "Node node1.example.com has more pods than average"
  - alert: "Scheduler node2"
    expr: ( (sum(kubelet_running_pod_count{instance=~"^node.*"}) / ((count(node_time) - 6))) *2 ) < (sum(kubelet_running_pod_count{instance=~"^node2.*"}))
    annotations:
      component: "Scheduler"
      severity: "HIGH"
      message: "Node node2.example.com has more pods than average"
  - alert: "Scheduler node3"
    expr: ( (sum(kubelet_running_pod_count{instance=~"^node.*"}) / ((count(node_time) - 6))) *2 ) < (sum(kubelet_running_pod_count{instance=~"^node3.*"}))
    annotations:
      component: "Scheduler"
      severity: "HIGH"
      message: "Node node3.example.com has more pods than average"
  - alert: "Builds Failing"
    expr: sum(openshift_build_total{phase=~"Failed|Error"}) > 10
    annotations:
      component: "OpenShift Builds"
      severity: "HIGH"
      message: "There is a high volume of builds failing"
  - alert: "Registry storage"
    expr: (avg(kubelet_volume_stats_used_bytes{persistentvolumeclaim="registry-claim"}) * 100) / avg(kubelet_volume_stats_capacity_bytes{persistentvolumeclaim="registry-claim"}) > 80
    annotations:
      component: "Registry Storage"
      severity: "MEDIUM"
      message: "Storage limit reached more than 80% "
  - alert: "Registry storage"
    expr: (avg(kubelet_volume_stats_used_bytes{persistentvolumeclaim="registry-claim"}) * 100) / avg(kubelet_volume_stats_capacity_bytes{persistentvolumeclaim="registry-claim"}) > 95
    annotations:
      component: "Registry Storage"
      severity: "HIGH"
      message: "Storage limit reached more than 95% "
  - alert: "DNS Errors"
    expr: changes(node_dnsmasq_sync_error_count_total[2m]) >=1
    annotations:
      component: "dnsmasq"
      severity: "HIGH"
message: "DNS errors detected. Check grafana for more details"
ansible/configs/ocp-ha-disconnected-lab/files/repos_tempalte_3.11.j2
New file
@@ -0,0 +1,53 @@
[rhel-7-server-rpms]
name=Red Hat Enterprise Linux 7
baseurl={{own_repo_path}}/rhel-7-server-rpms
enabled=1
gpgcheck=0
[rhel-7-server-rh-common-rpms]
name=Red Hat Enterprise Linux 7 Common
baseurl={{own_repo_path}}/rhel-7-server-rh-common-rpms
enabled=1
gpgcheck=0
[rhel-7-server-extras-rpms]
name=Red Hat Enterprise Linux 7 Extras
baseurl={{own_repo_path}}/rhel-7-server-extras-rpms
enabled=1
gpgcheck=0
[rhel-7-server-optional-rpms]
name=Red Hat Enterprise Linux 7 Optional
baseurl={{own_repo_path}}/rhel-7-server-optional-rpms
enabled=1
gpgcheck=0
[rhel-7-server-ose-{{repo_version}}-rpms]
name=Red Hat Enterprise Linux 7 OSE {{repo_version}}
baseurl={{own_repo_path}}/rhel-7-server-ose-{{repo_version}}-rpms
enabled=1
gpgcheck=0
{% if osrelease is version_compare('3.10', '<=') %}
[rhel-7-fast-datapath-rpms]
name=Red Hat Enterprise Linux Fast Datapath (RHEL 7 Server) (RPMs)
baseurl={{own_repo_path}}/rhel-7-fast-datapath-rpms
enabled=1
gpgcheck=0
{% endif %}
{% if osrelease is version_compare('3.10', '>=') %}
## Required since OCP 3.10
[rh-gluster-3-client-for-rhel-7-server-rpms]
name=Red Hat Enterprise Linux GlusterFS Client (RPMs)
baseurl={{own_repo_path}}/rh-gluster-3-client-for-rhel-7-server-rpms
enabled=1
gpgcheck=0
{% endif %}
[rhel-7-server-ansible-{{install_ansible_version}}-rpms]
name=Red Hat Enterprise Linux Ansible (RPMs)
baseurl={{own_repo_path}}/rhel-7-server-ansible-{{install_ansible_version}}-rpms
enabled=1
gpgcheck=0
ansible/configs/ocp-ha-disconnected-lab/files/repos_template.j2
@@ -1,44 +1,43 @@
[rhel-7-server-rpms]
name=Red Hat Enterprise Linux 7
baseurl=http://{{hostvars[groups['isolated'][0]].internaldns}}{{ own_repo_path | urlsplit('path') }}rhel-7-server-rpms
baseurl=http://{{hostvars[groups['isolated'][0]].internaldns}}{{ own_repo_path | urlsplit('path') }}/rhel-7-server-rpms
enabled=1
gpgcheck=0
[rhel-7-server-rh-common-rpms]
name=Red Hat Enterprise Linux 7 Common
baseurl=http://{{hostvars[groups['isolated'][0]].internaldns}}{{ own_repo_path | urlsplit('path') }}rhel-7-server-rh-common-rpms
baseurl=http://{{hostvars[groups['isolated'][0]].internaldns}}{{ own_repo_path | urlsplit('path') }}/rhel-7-server-rh-common-rpms
enabled=1
gpgcheck=0
[rhel-7-server-extras-rpms]
name=Red Hat Enterprise Linux 7 Extras
baseurl=http://{{hostvars[groups['isolated'][0]].internaldns}}{{ own_repo_path | urlsplit('path') }}rhel-7-server-extras-rpms
baseurl=http://{{hostvars[groups['isolated'][0]].internaldns}}{{ own_repo_path | urlsplit('path') }}/rhel-7-server-extras-rpms
enabled=1
gpgcheck=0
[rhel-7-server-optional-rpms]
name=Red Hat Enterprise Linux 7 Optional
baseurl=http://{{hostvars[groups['isolated'][0]].internaldns}}{{ own_repo_path | urlsplit('path') }}rhel-7-server-optional-rpms
baseurl=http://{{hostvars[groups['isolated'][0]].internaldns}}{{ own_repo_path | urlsplit('path') }}/rhel-7-server-optional-rpms
enabled=1
gpgcheck=0
[rhel-7-server-ose-{{repo_version}}-rpms]
name=Red Hat Enterprise Linux 7 OSE {{repo_version}}
baseurl=http://{{hostvars[groups['isolated'][0]].internaldns}}{{ own_repo_path | urlsplit('path') }}rhel-7-server-ose-{{repo_version}}-rpms
baseurl=http://{{hostvars[groups['isolated'][0]].internaldns}}{{ own_repo_path | urlsplit('path') }}/rhel-7-server-ose-{{repo_version}}-rpms
enabled=1
gpgcheck=0
{% if osrelease is version_compare('3.10', '<=') %}
[rhel-7-fast-datapath-rpms]
name=Red Hat Enterprise Linux 7 Fast Datapath
baseurl=http://{{hostvars[groups['isolated'][0]].internaldns}}{{ own_repo_path | urlsplit('path') }}rhel-7-fast-datapath-rpms
baseurl=http://{{hostvars[groups['isolated'][0]].internaldns}}{{ own_repo_path | urlsplit('path') }}/rhel-7-fast-datapath-rpms
enabled=1
gpgcheck=0
{% endif %}
{% if osrelease is version_compare('3.9', '>=') %}
## Required since OCP 3.9
[rhel-7-server-ansible-2.4-rpms]
[rhel-7-server-ansible-{{ install_ansible_version }}-rpms]
name=Red Hat Enterprise Linux Ansible (RPMs)
baseurl=http://{{hostvars[groups['isolated'][0]].internaldns}}{{ own_repo_path | urlsplit('path') }}rhel-7-server-ansible-2.4-rpms
baseurl=http://{{hostvars[groups['isolated'][0]].internaldns}}{{ own_repo_path | urlsplit('path') }}/rhel-7-server-ansible-{{ install_ansible_version }}-rpms
enabled=1
gpgcheck=0
{% endif %}
ansible/configs/ocp-ha-disconnected-lab/post_infra.yml
@@ -3,8 +3,8 @@
  connection: local
  become: false
  vars_files:
    - "./env_vars.yml"
    - "./env_secret_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - step002
    - post_infrastructure
ansible/configs/ocp-ha-disconnected-lab/post_software.yml
@@ -1,4 +1,4 @@
#vim: set ft=ansible:
# vim: set ft=ansible:
---
- name: Step 005 - Post Software deployment
  hosts: localhost
@@ -10,10 +10,20 @@
  tags:
    - step005
  tasks:
    - name: Generate /etc/ansible/hosts file with lab hosts template
    - name: Generate /etc/ansible/hosts file with lab inv template
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/labs_hosts_template.j2"
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/labs_hosts_template.{{ osrelease }}.j2"
        dest: "{{ ANSIBLE_REPO_PATH }}/workdir/labs_hosts-{{ env_type }}-{{ guid }}"
- name: Run openshift host provision on the bastion
  gather_facts: False
  become: yes
  hosts:
    - bastions
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  roles:
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/host-ocp-provisioner"
- name: Configure NFS host for user-vols if required
  hosts: support
@@ -40,9 +50,16 @@
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Move complete inventory file to preserve directory.
      shell: mv /etc/ansible/hosts /var/preserve/
    - name: Copy complete inventory file to bastion /var/preserve/hosts
      copy:
        src: "{{ ANSIBLE_REPO_PATH }}/workdir/hosts-{{ env_type }}-{{ guid }}"
        dest: /var/preserve/hosts
      tags: preserve_complete_ansible_inventory
    - name: copy prometheus rules file to bastion
      copy:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/prometheus_alerts_rules.yml"
        dest: /root/prometheus_alerts_rules.yml
    - name: Copy over ansible hosts file, lab version
      copy:
@@ -53,11 +70,11 @@
        - overwrite_hosts_with_lab_hosts
    # sssd bug, fixed by restart
    - name: restart sssd
      service:
        name: sssd
        state: restarted
      when: install_ipa_client
    # - name: restart sssd
    #  service:
      #    name: sssd
      #  state: restarted
      #   when: install_ipa_client
    ## Create PVs for uservols if required
    - name: get nfs Hostname
      set_fact:
@@ -74,10 +91,10 @@
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/pvs.j2"
        dest: "/root/pvs-{{ env_type }}-{{ guid }}.yml"
      tags: [ gen_pv_file ]
      when: pv_list.0 is defined
      tags:
        - openshift_nfs_config
        - gen_pv_file
    - set_fact:
        pv_size: "{{user_vols_size}}"
        persistentVolumeReclaimPolicy: Recycle
@@ -95,17 +112,20 @@
        - openshift_nfs_config
    - shell: 'oc create -f /root/pvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/pvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
      when: pv_list.0 is defined
      tags:
        - openshift_nfs_config
        - create_user_pv
    - shell: 'oc create -f /root/userpvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/userpvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
        - openshift_nfs_config
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa"
      when: install_ipa_client|bool
- name: PostSoftware flight-check
  hosts: localhost
  connection: local
ansible/configs/ocp-ha-disconnected-lab/pre_software.yml
@@ -71,7 +71,7 @@
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  roles:
    -  { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion", when: 'install_bastion' }
    -  { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa", when: 'install_ipa_client' }
      # -  { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa", when: 'install_ipa_client' }
  tags:
    - step004
    - bastion_tasks
ansible/configs/ocp-ha-lab/files/hosts_template.3.11.16.j2
@@ -150,18 +150,18 @@
# LDAP AND HTPASSWD Authentication (download ipa-ca.crt first)
openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'},{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
#openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'},{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
# Just LDAP
#openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'}]
# Just HTPASSWD
#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
# LDAP and HTPASSWD dependencies
openshift_master_htpasswd_file=/root/htpasswd.openshift
openshift_master_ldap_ca_file=/root/ipa-ca.crt
#openshift_master_ldap_ca_file=/root/ipa-ca.crt
{% if admission_plugin_config is defined %}
###########################################################################
ansible/configs/ocp-ha-lab/files/labs_hosts_template.3.11.16.j2
@@ -150,18 +150,18 @@
# LDAP AND HTPASSWD Authentication (download ipa-ca.crt first)
openshift_master_identity_providers=
#openshift_master_identity_providers=
# Just LDAP
#openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'}]
# Just HTPASSWD
#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
# LDAP and HTPASSWD dependencies
openshift_master_htpasswd_file=/root/htpasswd.openshift
openshift_master_ldap_ca_file=/root/ipa-ca.crt
#openshift_master_ldap_ca_file=/root/ipa-ca.crt
{% if admission_plugin_config is defined %}
###########################################################################
ansible/configs/ocp-workshop/env_vars.yml
@@ -288,6 +288,7 @@
  - sos
  - psacct
  - iotop
  - rsync
rhel_repos:
  - rhel-7-server-rpms
ansible/configs/ocp-workshop/files/htpasswd.openshift
@@ -1,6 +1,6 @@
andrew:$apr1$dZPb2ECf$ercevOFO5znrynUfUj4tb/
karla:$apr1$FQx2mX4c$eJc21GuVZWNg1ULF8I2G31
{{admin_user|d('opentlc-mgr')}}:{{admin_password_hash|d('$apr1$glFN48wz$dR9w94PGiQL8qZXcXGd0L0')}}
{% for i in range(0, (user_count|int) + 1) %}
{% for i in range(0, [ (user_count|int), 200 ] | max + 1) %}
user{{i}}:{{user_password_hash|d('$apr1$FmrTsuSa$yducoDpvYq0KEV0ErmwpA1')}}
{% endfor %}
ansible/configs/ocp-workshop/ocp_workloads.yml
@@ -28,6 +28,7 @@
          name: "{{ ANSIBLE_REPO_PATH }}/roles/{{ workload_loop_var }}"
        vars:
          ocp_username: "{{ admin_user }}"
          become_override: yes
          ACTION: "provision"
        loop: "{{ infra_workloads.split(',')|list }}"
        loop_control:
@@ -62,6 +63,7 @@
        name: "{{ ANSIBLE_REPO_PATH }}/roles/{{ workload_loop_var[1] }}"
      vars:
        ocp_username: "user{{ workload_loop_var[0] }}"
        become_override: yes
        ACTION: "provision"
      loop: "{{ users | product(student_workloads.split(','))|list }}"
      loop_control:
ansible/configs/simple-multi-cloud-example/files/cloud_providers/ec2_cloud_template.j2
@@ -3,7 +3,7 @@
  RegionMapping:
    us-east-1:
      RHELAMI: ami-c998b6b2
      WIN2012R2AMI: ami-93118ee9
      WIN2012R2AMI: ami-0dcdd073eeabb0101
    us-east-2:
      RHELAMI: ami-cfdafaaa
      WIN2012R2AMI: ami-72745d17
ansible/configs/three-tier-app/files/cloud_providers/ec2_cloud_template.j2
@@ -3,7 +3,7 @@
  RegionMapping:
    us-east-1:
      RHELAMI: ami-c998b6b2
      WIN2012R2AMI: ami-93118ee9
      WIN2012R2AMI: ami-0dcdd073eeabb0101
    us-east-2:
      RHELAMI: ami-cfdafaaa
      WIN2012R2AMI: ami-72745d17
ansible/configs/three-tier-tower/files/cloud_providers/ec2_cloud_template.j2
@@ -3,7 +3,7 @@
  RegionMapping:
    us-east-1:
      RHELAMI: ami-c998b6b2
      WIN2012R2AMI: ami-93118ee9
      WIN2012R2AMI: ami-0dcdd073eeabb0101
    us-east-2:
      RHELAMI: ami-cfdafaaa
      WIN2012R2AMI: ami-72745d17
ansible/destroy.yml
@@ -14,26 +14,7 @@
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - when:
        - cloud_provider == 'ec2'
        - fallback_regions is defined
      block:
        - name: Detect region for AWS
          environment:
            AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
            AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
          command: >-
            aws cloudformation describe-stacks
            --stack-name {{project_tag}} --region {{item}}
          register: cloudformation_detect
          with_items: "{{ [aws_region] + fallback_regions|d([]) }}"
          ignore_errors: yes
        - set_fact:
            aws_region_final: "{{item._ansible_item_label}}"
          with_items: "{{cloudformation_detect.results}}"
          loop_control:
            label: "{{item._ansible_item_label|d('unknown')}}"
          when: item.failed == false
    - when: cloud_provider == 'ec2'
      include_tasks: cloud_providers/ec2_detect_region_tasks.yml
- import_playbook: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/destroy_env.yml"
ansible/requirements.txt
@@ -2,3 +2,4 @@
boto
boto3
ansible
awscli
ansible/roles/bastion/tasks/main.yml
@@ -48,13 +48,11 @@
  tags:
    - copy_sshconfig_file
- name: Install python2-winrm and python-requests
- name: Install python-requests
  ignore_errors: yes
  become: true
  yum:
    name: "{{ item }}"
  with_items:
    - python2-winrm
    name:
    - python-requests
# - name: Ensure that iptables service is installed
ansible/roles/common/tasks/packages.yml
@@ -3,9 +3,8 @@
######################### Install Basic Packages
- name: install basic packages
  yum:
    name: "{{ item }}"
    name: "{{common_packages}}"
    state: present
  with_items: "{{common_packages}}"
  register: yumr
  until: yumr is succeeded
  retries: 10
ansible/roles/host-ocp-nfs/tasks/packages.yml
@@ -2,9 +2,8 @@
---
- name: install nfs packages
  yum:
    name: "{{ item }}"
    state: present
  with_items:
    name:
    - lvm2
    - bind-utils
    - nfs-utils
ansible/roles/host-ocp-node/tasks/packages.yml
@@ -2,9 +2,8 @@
---
- name: install openshift_node packages
  yum:
    name: "{{ item }}"
    state: present
  with_items:
    name:
      - vim
      - tmux
      - ntp
ansible/roles/host-ocp-provisioner/tasks/main.yml
@@ -69,9 +69,8 @@
- name: Install Host packages for releases before 3.10
  yum:
    name: "{{ item }}"
    state: present
  with_items:
    name:
      - "atomic-openshift-clients-{{ osrelease }}"
      - "atomic-openshift-utils"
      - "atomic-openshift-{{ osrelease }}"
@@ -83,9 +82,8 @@
- name: Install Host packages for releases starting with 3.10
  yum:
    name: "{{ item }}"
    state: present
  with_items:
    name:
      - "atomic-openshift-clients-{{ osrelease }}"
      - "atomic-openshift-{{ osrelease }}"
      - "openshift-ansible-{{ osrelease }}"
@@ -99,9 +97,8 @@
- name: Install Host packages for releases starting with 3.10
  yum:
    name: "{{ item }}"
    state: present
  with_items:
    name:
      - "atomic-openshift-clients-{{ osrelease }}"
      - "atomic-openshift-{{ osrelease }}"
      - "openshift-ansible-3.10.47"
ansible/roles/infra-common-ssh-config-generate/defaults/main.yml
@@ -1,2 +1,5 @@
---
default_key_name: ~/.ssh/{{key_name}}.pem
remote_user_map:
  ec2: ec2-user
  azure: azure
ansible/roles/infra-common-ssh-config-generate/tasks/main.yml
@@ -14,6 +14,7 @@
    # define the communication method to all the hosts in the deployment
    ansible_ssh_config: "{{ ANSIBLE_REPO_PATH }}/workdir/{{ env_type }}_{{ guid }}_ssh_conf"
    ansible_known_host: "{{ ANSIBLE_REPO_PATH }}/workdir/{{ env_type }}_{{ guid }}_ssh_known_hosts"
    remote_user: "{{ remote_user_map[cloud_provider] }}"
- name: Delete dedicated known_host if it exists (new deployment)
  file:
ansible/roles/infra-ec2-template-destroy/tasks/main.yml
@@ -1,5 +1,4 @@
---
- name: Destroy cloudformation template
  cloudformation:
    aws_access_key: "{{ aws_access_key_id }}"
ansible/roles/infra-ec2-template-generate/README.adoc
New file
@@ -0,0 +1,42 @@
= CloudFormation template generation
When creating a config, you can either have the config under the `configs/{{env_type}}/files/cloud_providers/ec2_cloud_template.j2`, or use the default template.
If you choose to use the default template, you can still customize it to your needs.
Have a look at the link:../../configs/just-some-nodes-example/env_vars.yml[env_vars.yml] file from the link:../../configs/just-some-nodes-example/[just-some-nodes-example]  config.
=== Current status and features of the default template
Resources created by the default template:
* Instances
** [x] ElasticIP
** [x] Storage
* DNS
** Mandatory Variables:
*** `subdomain_base`: the AWS top-level Zone to update, for example `.openshift.opentlc.com`
** [ ] TODO: Public DNS Zone
*** [ ] TODO: Allow route53User to access only the delegated zone
** [x] Internal DNS Zone
** [x] Cloud DNS load balancer records
* [x] SecurityGroup
** [x] SecurityGroup rules
* [x] Subnet
* [ ] TODO: S3 Buckets
** [ ] TODO: Create a bucket and a user that has access to it
== Security Groups
The default template comes with 2 default security groups:
* DefaultSG (allow all connections from the bastion)
* BastionSG (allow SSH and mosh connection from the internet)
Have a look at link:defaults/main.yml[defaults/main.yml].
You can add more security group using the `security_groups` variables.
Then you can pick the security group**s** you want for any of the instances defined in the `instances` list.
ansible/roles/infra-ec2-template-generate/defaults/main.yml
New file
@@ -0,0 +1,183 @@
---
# TODO: split into different files. Possible since 2.6 thanks to this commit:
# https://github.com/ansible/ansible/commit/95ce00ff00e2907e89f4106747abaf9d4e4ccd7f
cloudformation_retries: 1
aws_comment: "Created by Ansible Agnostic Deployer"
#################################################################
# VPC
#################################################################
aws_vpc_cidr: 192.199.0.0/16
aws_vpc_name: "{{ subdomain_base }}"
#################################################################
# Subnet
#################################################################
aws_public_subnet_cidr: 192.199.0.0/24
#################################################################
# Security Groups
#################################################################
default_security_groups:
  - name: BastionSG
    rules:
      - name: MoshPublic
        description: "Public Mosh Access for bastions"
        from_port: 60000
        to_port: 61000
        protocol: udp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: SSHPublic
        description: "Public Access for bastions"
        from_port: 22
        to_port: 22
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
  - name: DefaultSG
    rules:
      - name: FromBastionTCP
        description: "Allow everything from Bastion"
        from_port: 0
        to_port: 65535
        protocol: tcp
        from_group: BastionSG
        rule_type: Ingress
      - name: FromBastionUDP
        description: "Allow everything from Bastion"
        from_port: 0
        to_port: 65535
        protocol: tcp
        from_group: BastionSG
        rule_type: Ingress
# Environment specific security groups
security_groups: []
#################################################################
# DNS zones
#################################################################
# The top level DNS zone you want to update
aws_dns_zone_root: "{{ subdomain_base_suffix | regex_replace('^\\.', '') }}."
# Private DNS Zone dedicated to the environment
aws_dns_zone_private: "{{ guid }}.internal."
aws_dns_zone_private_chomped: "{{ guid }}.internal"
aws_dns_ttl_public: 900
aws_dns_ttl_private: 3600
#################################################################
# Volumes
#################################################################
# default size for /dev/sda1
aws_default_rootfs_size: 50
# default Volume type
aws_default_volume_type: gp2
#################################################################
# Images
#################################################################
aws_default_image: RHEL75
aws_ami_region_mapping:
  ap-south-1:
    RHEL75GOLD: ami-0c6ec6988a8df3acc # RHEL-7.5_HVM-20180813-x86_64-0-Access2-GP2
    RHEL74GOLD: ami-952879fa # RHEL-7.4_HVM-20180122-x86_64-1-Access2-GP2
    RHEL75: ami-0aa4317636e016115 # RHEL-7.5_HVM-20180813-x86_64-0-Hourly2-GP2
    RHEL74: ami-5c2f7e33 # RHEL-7.4_HVM-20180122-x86_64-1-Hourly2-GP2
    WIN2012R2: ami-03087b28576b37511 # Windows_Server-2012-R2_RTM-English-Deep-Learning-2018.09.15
  eu-west-3:
    RHEL75GOLD: ami-0a0167e3e2a1d1d9b # RHEL-7.5_HVM-20180813-x86_64-0-Access2-GP2
    RHEL74GOLD: ami-69d06614 # RHEL-7.4_HVM-20180122-x86_64-1-Access2-GP2
    RHEL75: ami-039346fed23fb53ad # RHEL-7.5_HVM-20180813-x86_64-0-Hourly2-GP2
    RHEL74: ami-66d0661b # RHEL-7.4_HVM-20180122-x86_64-1-Hourly2-GP2
    WIN2012R2: ami-080d3d8def91e4f44 # Windows_Server-2012-R2_RTM-English-Deep-Learning-2018.09.15
  eu-west-2:
    RHEL75GOLD: ami-01f010afd559615b9 # RHEL-7.5_HVM-20180813-x86_64-0-Access2-GP2
    RHEL74GOLD: ami-55bca731 # RHEL-7.4_HVM-20180122-x86_64-1-Access2-GP2
    RHEL75: ami-0ac5fae255ddac6f6 # RHEL-7.5_HVM-20180813-x86_64-0-Hourly2-GP2
    RHEL74: ami-b4b3a8d0 # RHEL-7.4_HVM-20180122-x86_64-1-Hourly2-GP2
    WIN2012R2: ami-0699aabf510a3f2f8 # Windows_Server-2012-R2_RTM-English-Deep-Learning-2018.09.15
  eu-west-1:
    RHEL75GOLD: ami-0c51cd02617947143 # RHEL-7.5_HVM-20180813-x86_64-0-Access2-GP2
    RHEL74GOLD: ami-b7b6d3ce # RHEL-7.4_HVM-20180122-x86_64-1-Access2-GP2
    RHEL75: ami-092acf20fad7f7795 # RHEL-7.5_HVM-20180813-x86_64-0-Hourly2-GP2
    RHEL74: ami-ccb7d2b5 # RHEL-7.4_HVM-20180122-x86_64-1-Hourly2-GP2
    WIN2012R2: ami-0370c806916d2a17f # Windows_Server-2012-R2_RTM-English-64Bit-HyperV-2018.09.15
  ap-northeast-2:
    RHEL75GOLD: ami-031161cd3182e012a # RHEL-7.5_HVM-20180813-x86_64-0-Access2-GP2
    RHEL74GOLD: ami-9fa201f1 # RHEL-7.4_HVM-20180122-x86_64-1-Access2-GP2
    RHEL75: ami-0d226f15e3e46903a # RHEL-7.5_HVM-20180813-x86_64-0-Hourly2-GP2
    RHEL74: ami-90a201fe # RHEL-7.4_HVM-20180122-x86_64-1-Hourly2-GP2
    WIN2012R2: ami-02ee840e33e7c2244 # Windows_Server-2012-R2_RTM-English-Deep-Learning-2018.09.15
  ap-northeast-1:
    RHEL75GOLD: ami-0bf9ecb88f5719e17 # RHEL-7.5_HVM-20180813-x86_64-0-Access2-GP2
    RHEL74GOLD: ami-ccf695aa # RHEL-7.4_HVM-20180122-x86_64-1-Access2-GP2
    RHEL75: ami-0b517025bb2f0ad4a # RHEL-7.5_HVM-20180813-x86_64-0-Hourly2-GP2
    RHEL74: ami-36f09350 # RHEL-7.4_HVM-20180122-x86_64-1-Hourly2-GP2
    WIN2012R2: ami-08e310c576c077de1 # Windows_Server-2012-R2_RTM-English-Deep-Learning-2018.09.15
  sa-east-1:
    RHEL75GOLD: ami-93b693ff # RHEL-7.5_HVM_GA-JBEAP-7.1.2-20180629-x86_64-1-Access2-GP2
    RHEL74GOLD: ami-dc014db0 # RHEL-7.4_HVM-20180122-x86_64-1-Access2-GP2
    RHEL75: ami-01c56172f9db84834 # RHEL-7.5_HVM-20180813-x86_64-0-Hourly2-GP2
    RHEL74: ami-1a064a76 # RHEL-7.4_HVM-20180122-x86_64-1-Hourly2-GP2
    WIN2012R2: ami-079f7c686ba77c199 # Windows_Server-2012-R2_RTM-English-Deep-Learning-2018.09.15
  ca-central-1:
    RHEL75GOLD: ami-e320ad87 # RHEL-7.5_HVM-20180813-x86_64-0-Access2-GP2
    RHEL74GOLD: ami-2a00854e # RHEL-7.4_HVM-20180122-x86_64-1-Access2-GP2
    RHEL75: ami-fc20ad98 # RHEL-7.5_HVM-20180813-x86_64-0-Hourly2-GP2
    RHEL74: ami-71018415 # RHEL-7.4_HVM-20180122-x86_64-1-Hourly2-GP2
    WIN2012R2: ami-020be7519c99e8064 # Windows_Server-2012-R2_RTM-English-Deep-Learning-2018.09.15
  ap-southeast-1:
    RHEL75GOLD: ami-0f44e46fa59e902b6 # RHEL-7.5_HVM-20180813-x86_64-0-Access2-GP2
    RHEL74GOLD: ami-8193eafd # RHEL-7.4_HVM-20180122-x86_64-1-Access2-GP2
    RHEL75: ami-09fc728e15fbfb535 # RHEL-7.5_HVM-20180813-x86_64-0-Hourly2-GP2
    RHEL74: ami-8d90e9f1 # RHEL-7.4_HVM-20180122-x86_64-1-Hourly2-GP2
    WIN2012R2: ami-0906117a55c70d5e7 # Windows_Server-2012-R2_RTM-English-Deep-Learning-2018.09.15
  ap-southeast-2:
    RHEL75GOLD: ami-0066ef2f9c72fad96 # RHEL-7.5_HVM-20180813-x86_64-0-Access2-GP2
    RHEL74GOLD: ami-dd9668bf # RHEL-7.4_HVM-20180122-x86_64-1-Access2-GP2
    RHEL75: ami-0a61d60bde3940420 # RHEL-7.5_HVM-20180813-x86_64-0-Hourly2-GP2
    RHEL74: ami-e1996783 # RHEL-7.4_HVM-20180122-x86_64-1-Hourly2-GP2
    WIN2012R2: ami-09fb195e1d6625aab # Windows_Server-2012-R2_RTM-English-Deep-Learning-2018.09.15
  eu-central-1:
    RHEL75GOLD: ami-07d3f0705bebac978 # RHEL-7.5_HVM-20180813-x86_64-0-Access2-GP2
    RHEL74GOLD: ami-b3d841dc # RHEL-7.4_HVM-20180122-x86_64-1-Access2-GP2
    RHEL75: ami-05ba90b00a46d83fa # RHEL-7.5_HVM-20180813-x86_64-0-Hourly2-GP2
    RHEL74: ami-8a21bfe5 # RHEL-7.4_HVM-20180122-x86_64-1-Hourly2-GP2
    WIN2012R2: ami-07b8613a03480d559 # Windows_Server-2012-R2_RTM-English-64Bit-HyperV-2018.09.15
  us-east-1:
    RHEL75GOLD: ami-0456c465f72bd0c95 # RHEL-7.5_HVM-20180813-x86_64-0-Access2-GP2
    RHEL74GOLD: ami-c5a094bf # RHEL-7.4_HVM-20180122-x86_64-1-Access2-GP2
    RHEL75: ami-0394fe9914b475c53 # RHEL-7.5_HVM-20180813-x86_64-0-Hourly2-GP2
    RHEL74: ami-76a3970c # RHEL-7.4_HVM-20180122-x86_64-1-Hourly2-GP2
    WIN2012R2: ami-003027603b9c132b3 # Windows_Server-2012-R2_RTM-Japanese-64Bit-SQL_2016_SP1_Express-2018.09.15
  us-east-2:
    RHEL75GOLD: ami-04268981d7c33264d # RHEL-7.5_HVM-20180813-x86_64-0-Access2-GP2
    RHEL74GOLD: ami-9db09af8 # RHEL-7.4_HVM-20180122-x86_64-1-Access2-GP2
    RHEL75: ami-0376bbf9be9eac670 # RHEL-7.5_HVM-20180813-x86_64-0-Hourly2-GP2
    RHEL74: ami-cebe94ab # RHEL-7.4_HVM-20180122-x86_64-1-Hourly2-GP2
    WIN2012R2: ami-02fa46b8e1a36044b # Windows_Server-2012-R2_RTM-English-P3-2018.09.15
  us-west-1:
    RHEL75GOLD: ami-02574210e91c38419 # RHEL-7.5_HVM-20180813-x86_64-0-Access2-GP2
    RHEL74GOLD: ami-6f030e0f # RHEL-7.4_HVM-20180122-x86_64-1-Access2-GP2
    RHEL75: ami-0bdc0ff10fb093057 # RHEL-7.5_HVM-20180813-x86_64-0-Hourly2-GP2
    RHEL74: ami-c8020fa8 # RHEL-7.4_HVM-20180122-x86_64-1-Hourly2-GP2
    WIN2012R2: ami-0f9c4789993c313f7 # Windows_Server-2012-R2_RTM-English-Deep-Learning-2018.09.15
  us-west-2:
    RHEL75GOLD: ami-0e6bab6682ec471c0 # RHEL-7.5_HVM-20180813-x86_64-0-Access2-GP2
    RHEL74GOLD: ami-c405b8bc # RHEL-7.4_HVM-20180122-x86_64-1-Access2-GP2
    RHEL75: ami-096510cab1b6b2c6d # RHEL-7.5_HVM-20180813-x86_64-0-Hourly2-GP2
    RHEL74: ami-1607ba6e # RHEL-7.4_HVM-20180122-x86_64-1-Hourly2-GP2
    WIN2012R2: ami-0d786d5cc800b2456 # Windows_Server-2012-R2_RTM-English-64Bit-HyperV-2018.09.15
ansible/roles/infra-ec2-template-generate/tasks/locate_template.yml
New file
@@ -0,0 +1,15 @@
---
- name: Check if template exists for the environment
  stat:
    path: "{{ANSIBLE_REPO_PATH}}/configs/{{ env_type }}/files/cloud_providers/{{cloud_provider}}_cloud_template.j2"
  register: stat_local_template
- name: Use CloudFormation template from the environment
  set_fact:
    cloudformation_template_src: "{{ANSIBLE_REPO_PATH}}/configs/{{ env_type }}/files/cloud_providers/{{cloud_provider}}_cloud_template.j2"
  when: stat_local_template.stat.exists
- name: Use the default CloudFormation template
  set_fact:
    cloudformation_template_src: "templates/cloud_template.j2"
  when: not stat_local_template.stat.exists
ansible/roles/infra-ec2-template-generate/tasks/main.yml
@@ -1,7 +1,13 @@
---
- import_tasks: locate_template.yml
- set_fact:
    cloudformation_template: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.{{cloud_provider}}_cloud_template"
- name: AWS Generate CloudFormation Template
  template:
    src: "{{ANSIBLE_REPO_PATH}}/configs/{{ env_type }}/files/cloud_providers/{{cloud_provider}}_cloud_template.j2"
    dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.{{cloud_provider}}_cloud_template"
    src: "{{ cloudformation_template_src }}"
    dest: "{{ cloudformation_template }}"
  tags:
    - aws_infrastructure_deployment
    - gen_cf_template
@@ -9,7 +15,7 @@
######################### Copy CF Template to S3 if too big
- name: Stat CloudFormation template
  stat:
    path: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.{{cloud_provider}}_cloud_template"
    path: "{{ cloudformation_template }}"
  register: stat_template
  tags:
    - aws_infrastructure_deployment
@@ -47,7 +53,7 @@
      aws_s3:
        bucket: "{{bucket_templates}}"
        object: "{{ env_type }}.{{ guid }}.{{cloud_provider}}_cloud_template"
        src: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.{{cloud_provider}}_cloud_template"
        src: "{{ cloudformation_template }}"
        mode: put
######################### Validate CF Template
@@ -60,10 +66,11 @@
  command: >-
    aws cloudformation validate-template
    --region {{ aws_region_final | d(aws_region) | default(region) | default('us-east-1')}}
    --template-body file://../workdir/{{ env_type }}.{{ guid }}.{{cloud_provider}}_cloud_template
    --template-body file://{{ cloudformation_template }}
  changed_when: false
  register: cloudformation_validation
  until: cloudformation_validation is succeeded
  retries: "{{ cloudformation_retries }}"
  delay: 20
  tags:
    - aws_infrastructure_deployment
@@ -82,6 +89,7 @@
  changed_when: false
  register: cloudformation_validation
  until: cloudformation_validation is succeeded
  retries: "{{ cloudformation_retries }}"
  delay: 20
  tags:
    - aws_infrastructure_deployment
ansible/roles/infra-ec2-template-generate/templates/cloud_template.j2
New file
@@ -0,0 +1,273 @@
#jinja2: lstrip_blocks: "True"
---
AWSTemplateFormatVersion: "2010-09-09"
Mappings:
  RegionMapping: {{ aws_ami_region_mapping | to_json }}
Resources:
  Vpc:
    Type: "AWS::EC2::VPC"
    Properties:
      CidrBlock: "{{ aws_vpc_cidr }}"
      EnableDnsSupport: true
      EnableDnsHostnames: true
      Tags:
        - Key: Name
          Value: "{{ aws_vpc_name }}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
  VpcInternetGateway:
    Type: "AWS::EC2::InternetGateway"
  VpcRouteTable:
    Type: "AWS::EC2::RouteTable"
    Properties:
      VpcId:
        Ref: Vpc
  VPCRouteInternetGateway:
    DependsOn: VpcGA
    Type: "AWS::EC2::Route"
    Properties:
      GatewayId:
        Ref: VpcInternetGateway
      DestinationCidrBlock: "0.0.0.0/0"
      RouteTableId:
        Ref: VpcRouteTable
  VpcGA:
    Type: "AWS::EC2::VPCGatewayAttachment"
    Properties:
      InternetGatewayId:
        Ref: VpcInternetGateway
      VpcId:
        Ref: Vpc
  PublicSubnet:
    Type: "AWS::EC2::Subnet"
    DependsOn:
      - Vpc
    Properties:
    {% if aws_availability_zone is defined %}
      AvailabilityZone: {{ aws_availability_zone }}
    {% endif %}
      CidrBlock: "{{ aws_public_subnet_cidr }}"
      Tags:
        - Key: Name
          Value: "{{project_tag}}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
      MapPublicIpOnLaunch: true
      VpcId:
        Ref: Vpc
  PublicSubnetRTA:
    Type: "AWS::EC2::SubnetRouteTableAssociation"
    Properties:
      RouteTableId:
        Ref: VpcRouteTable
      SubnetId:
        Ref: PublicSubnet
{% for security_group in security_groups|list + default_security_groups|list %}
  {{security_group['name']}}:
    Type: "AWS::EC2::SecurityGroup"
    Properties:
      GroupDescription: Host
      VpcId:
        Ref: Vpc
      Tags:
        - Key: Name
          Value: "{{security_group['name']}}"
{% endfor %}
{% for security_group in default_security_groups|list + security_groups|list %}
{% for rule in security_group.rules %}
  {{security_group['name']}}{{rule['name']}}:
    Type: "AWS::EC2::SecurityGroup{{rule['rule_type']}}"
    Properties:
     GroupId:
       Fn::GetAtt:
         - "{{security_group['name']}}"
         - GroupId
     IpProtocol: {{rule['protocol']}}
     FromPort: {{rule['from_port']}}
     ToPort: {{rule['to_port']}}
  {% if rule['cidr'] is defined %}
     CidrIp: "{{rule['cidr']}}"
  {% endif  %}
  {% if rule['from_group'] is defined %}
     SourceSecurityGroupId:
       Fn::GetAtt:
        - "{{rule['from_group']}}"
        - GroupId
  {% endif  %}
{% endfor %}
{% endfor %}
  DnsZonePrivate:
    Type: "AWS::Route53::HostedZone"
    Properties:
      Name: "{{ aws_dns_zone_private }}"
      VPCs:
        - VPCId:
            Ref: Vpc
          VPCRegion:
            Ref: "AWS::Region"
      HostedZoneConfig:
        Comment: "{{ aws_comment }}"
{% for instance in instances %}
{% if instance['dns_loadbalancer'] | d(false) | bool
  and not instance['unique'] | d(false) | bool %}
  {{instance['name']}}DnsLoadBalancer:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
    {% for c in range(1, (instance['count']|int)+1) %}
      - {{instance['name']}}{{c}}
      {% if instance['public_dns'] %}
      - {{instance['name']}}{{c}}EIP
      {% endif %}
    {% endfor %}
    Properties:
      HostedZoneName: {{ aws_dns_zone_root }}
      RecordSets:
      - Name: "{{instance['name']}}.{{ guid }}.{{ aws_dns_zone_root }}"
        Type: A
        TTL: {{ aws_dns_ttl_public }}
        ResourceRecords:
{% for c in range(1,(instance['count'] |int)+1) %}
          - "Fn::GetAtt":
            - {{instance['name']}}{{c}}
            - PublicIp
{% endfor %}
{% endif %}
{% for c in range(1,(instance['count'] |int)+1) %}
  {{instance['name']}}{{loop.index}}:
    Type: "AWS::EC2::Instance"
    Properties:
      ImageId:
        Fn::FindInMap:
        - RegionMapping
        - Ref: AWS::Region
        - {{ instance.image | default(aws_default_image) }}
      InstanceType: "{{instance['flavor'][cloud_provider]}}"
      KeyName: "{{instance.key_name | default(key_name)}}"
    {% if instance['UserData'] is defined %}
      {{instance['UserData']}}
    {% endif %}
    {% if instance['security_groups'] is defined %}
      SecurityGroupIds:
      {% for sg in instance.security_groups %}
        - Ref: {{ sg }}
      {% endfor %}
    {% else %}
      SecurityGroupIds:
        - Ref: DefaultSG
    {% endif %}
      SubnetId:
        Ref: PublicSubnet
      Tags:
    {% if instance['unique'] | d(false) | bool %}
        - Key: Name
          Value: {{instance['name']}}
        - Key: internaldns
          Value: {{instance['name']}}.{{aws_dns_zone_private_chomped}}
    {% else %}
        - Key: Name
          Value: {{instance['name']}}{{loop.index}}
        - Key: internaldns
          Value: {{instance['name']}}{{loop.index}}.{{aws_dns_zone_private_chomped}}
    {% endif %}
        - Key: "owner"
          Value: "{{ email | default('unknownuser') }}"
        - Key: "Project"
          Value: "{{project_tag}}"
        - Key: "{{project_tag}}"
          Value: "{{ instance['name'] }}"
    {% for tag in instance['tags'] %}
        - Key: {{tag['key']}}
          Value: {{tag['value']}}
    {% endfor %}
      BlockDeviceMappings:
    {% if '/dev/sda1' not in instance.volumes|d([])|json_query('[].device_name')
      and '/dev/sda1' not in instance.volumes|d([])|json_query('[].name')
%}
        - DeviceName: "/dev/sda1"
          Ebs:
            VolumeSize: "{{ instance['rootfs_size'] | default(aws_default_rootfs_size) }}"
            VolumeType: "{{ aws_default_volume_type }}"
    {% endif %}
    {% for vol in instance.volumes|default([]) if vol.enable|d(true) %}
        - DeviceName: "{{ vol.name | default(vol.device_name) }}"
          Ebs:
          {% if cloud_provider in vol and 'type' in vol.ec2 %}
            VolumeType: "{{ vol[cloud_provider].type }}"
          {% else %}
            VolumeType: "{{ aws_default_volume_type }}"
          {% endif %}
            VolumeSize: "{{ vol.size }}"
    {% endfor %}
  {{instance['name']}}{{loop.index}}InternalDns:
    Type: "AWS::Route53::RecordSetGroup"
    Properties:
      HostedZoneId:
        Ref: DnsZonePrivate
      RecordSets:
    {% if instance['unique'] | d(false) | bool %}
        - Name: "{{instance['name']}}.{{aws_dns_zone_private}}"
    {% else %}
        - Name: "{{instance['name']}}{{loop.index}}.{{aws_dns_zone_private}}"
    {% endif %}
          Type: A
          TTL: {{ aws_dns_ttl_private }}
          ResourceRecords:
            - "Fn::GetAtt":
              - {{instance['name']}}{{loop.index}}
              - PrivateIp
{% if instance['public_dns'] %}
  {{instance['name']}}{{loop.index}}EIP:
    Type: "AWS::EC2::EIP"
    DependsOn:
    - VpcGA
    Properties:
      InstanceId:
        Ref: {{instance['name']}}{{loop.index}}
  {{instance['name']}}{{loop.index}}PublicDns:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
      - {{instance['name']}}{{loop.index}}EIP
    Properties:
      HostedZoneName: "{{ aws_dns_zone_root }}"
      RecordSets:
      {% if instance['unique'] | d(false) | bool %}
        - Name: "{{instance['name']}}.{{subdomain_base}}."
      {% else %}
        - Name: "{{instance['name']}}{{loop.index}}.{{subdomain_base}}."
      {% endif %}
          Type: A
          TTL: {{ aws_dns_ttl_public }}
          ResourceRecords:
          - "Fn::GetAtt":
            - {{instance['name']}}{{loop.index}}
            - PublicIp
{% endif %}
{% endfor %}
{% endfor %}
Outputs:
  Route53internalzoneOutput:
    Description: The ID of the internal route 53 zone
    Value:
      Ref: DnsZonePrivate
ansible/roles/infra-ec2-template-generate/templates/region_mapping.j2
File was deleted
ansible/roles/ocp-workload-fuse-ignite/defaults/main.yml
@@ -22,13 +22,13 @@
quota_secrets: 30
quota_requests_storage: 50Gi
ocp_apps_domain: apps.{{subdomain_base}}
ocp_apps_domain: apps.{{ocp_domain}}
build_status_retries: 20
build_status_delay: 20
deploy_status_retries: 30
deploy_status_delay: 45
deploy_status_retries: 75
deploy_status_delay: 90
POSTGRESQL_MEMORY_LIMIT: 512Mi
PROMETHEUS_MEMORY_LIMIT: 255Mi
ansible/roles/ocp-workload-fuse-ignite/readme.adoc
@@ -9,7 +9,8 @@
WORKLOAD="ocp-workload-fuse-ignite"
SSH_USERNAME="hchin-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
GUID=adm0
GUID=3d5k
OCP_DOMAIN=`oc whoami --show-server | cut -d'.' -f 2,3,4,5 | cut -d':' -f 1`
OCP_USERNAME="developer"
HOST_GUID=na311
POSTGRESQL_MEMORY_LIMIT=512Mi
@@ -24,7 +25,7 @@
                    -e "ocp_username=${OCP_USERNAME}" \
                    -e "ocp_workload=${WORKLOAD}" \
                    -e "guid=${GUID}" \
                    -e "subdomain_base=${HOST_GUID}.openshift.opentlc.com" \
                    -e "ocp_domain=${OCP_DOMAIN}" \
                    -e "ACTION=create"
ansible-playbook -i ${TARGET_HOST}, -c local ./configs/ocp-workloads/ocp-workload.yml \
ansible/roles/ocp-workload-fuse-ignite/tasks/workload.yml
@@ -46,12 +46,13 @@
  shell: |
      oc new-app {{ignite_template_name}} \
      -p ROUTE_HOSTNAME=fuse.{{ocp_project}}.{{ocp_apps_domain}} \
      -p OPENSHIFT_MASTER=https://master.{{subdomain_base}} \
      -p OPENSHIFT_MASTER=https://master.{{ocp_domain}} \
      -p OPENSHIFT_PROJECT={{ocp_project}} \
      -p POSTGRESQL_MEMORY_LIMIT={{POSTGRESQL_MEMORY_LIMIT}} \
      -p PROMETHEUS_MEMORY_LIMIT={{PROMETHEUS_MEMORY_LIMIT}} \
      -p META_MEMORY_LIMIT={{META_MEMORY_LIMIT}} \
      -p SERVER_MEMORY_LIMIT={{SERVER_MEMORY_LIMIT}} \
      -p OPENSHIFT_OAUTH_CLIENT_SECRET=$(oc sa get-token syndesis-oauth-client -n {{ocp_project}}) \
      -p MAX_INTEGRATIONS_PER_USER={{MAX_INTEGRATIONS_PER_USER}} \
      -p IMAGE_STREAM_NAMESPACE={{ocp_project}} \
      -n {{ocp_project}}
ansible/roles/ocp-workload-iot-demo/defaults/main.yml
@@ -1,4 +1,5 @@
---
become_override: false
ocp_username: ccustine-redhat.com
ocp_user_needs_quota: True
@@ -17,6 +18,7 @@
quota_services: 20
quota_secrets: 30
quota_requests_storage: 5Gi
ocp_apps_domain: "{{ cloudapps_suffix | d('unknown') }}"
openssl_self_signed:
  - name: 'apps.iot-dev.openshift.opentlc.com'
ansible/roles/ocp-workload-iot-demo/tasks/main.yml
@@ -1,20 +1,20 @@
---
- name: Running Pre Workload Tasks
  import_tasks: ./pre_workload.yml
  become: false
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload Tasks
  import_tasks: ./workload.yml
  become: false
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Post Workload Tasks
  import_tasks: ./post_workload.yml
  become: false
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload removal Tasks
  import_tasks: ./remove_workload.yml
  become: false
  become: "{{ become_override | bool }}"
  when: ACTION == "destroy" or ACTION == "remove"
ansible/roles/ocp-workload-iot-demo/tasks/pre_workload.yml
@@ -28,12 +28,11 @@
  ignore_errors: true
- name: Copy the files used in this role
  synchronize:
    src: "files/"
  copy:
    src: "{{item}}"
    dest: "/tmp/{{guid}}/"
    rsync_opts:
      - "--no-motd"
      - "--exclude=.git,*.qcow2"
  with_fileglob:
    - files/*
- name: pre_workload Tasks Complete
  debug:
ansible/roles/ocp-workload-iot-demo/tasks/workload.yml
@@ -3,6 +3,17 @@
  set_fact:
    ocp_project: "iot-demo-{{guid}}"
- name: define ocp_project (multiple user)
  set_fact:
    ocp_project: "iot-demo-{{guid}}-{{ocp_username}}"
  when:
    - user_count|d(0)|int > 0
    - student_workloads|d("")|length > 0
- name: Set portcheck to a valid result for first use
  shell: echo "false"
  register: portcheck
- name: Check for open MQTT port
  block:
    - name: Wait for port and loop
@@ -14,7 +25,7 @@
        connect_timeout: 2
        timeout: 3
      loop: "{{ range(31883, 31992, 1)|list }}"
      when: (portcheck is undefined) or (portcheck.failed == false)
      when: portcheck.failed == false
      register: portcheck
  rescue:    
    - set_fact:
@@ -22,6 +33,10 @@
    - debug: msg="MQTT Port Assignment is {{ mqtt_port }}"
    # Use to force fail on rescue since we short circuit the failure by handling in rescue
    #- command: /bin/false
- name: Reset portcheck for MQTTS loop
  shell: echo "false"
  register: portcheck
- name: Check for open MQTTS port
  block:
@@ -34,11 +49,11 @@
        connect_timeout: 2
        timeout: 3
      loop: "{{ range(31993, 32102, 1)|list }}"
      when: (portcheck2 is undefined) or (portcheck2.failed == false)
      register: portcheck2
      when: portcheck.failed == false
      register: portcheck
  rescue:    
    - set_fact:
        mqtts_port: "{{ portcheck2.results|selectattr('failed', 'defined')|selectattr('failed')|map(attribute='item')|first}}"
        mqtts_port: "{{ portcheck.results|selectattr('failed', 'defined')|selectattr('failed')|map(attribute='item')|first}}"
    - debug: msg="MQTTS Port Assignment is {{ mqtts_port }}"
    # Use to force fail on rescue since we short circuit the failure by handling in rescue
    #- command: /bin/false
ansible/roles/ocp-workload-vertx-reactica/defaults/main.yml
New file
@@ -0,0 +1,24 @@
---
become_override: false
ocp_user_needs_quota: True
ocp_user_groups:
  - OPENTLC-PROJECT-PROVISIONERS
quota_requests_cpu: 5
quota_limits_cpu: 10
quota_requests_memory: '8Gi'
quota_limits_memory: '20Gi'
quota_configmaps: 10
quota_pods: 20
quota_persistentvolumeclaims: 20
quota_services: 30
quota_secrets: 30
quota_requests_storage: 50Gi
namespace: "reactica-{{guid}}"
ansible/roles/ocp-workload-vertx-reactica/readme.adoc
New file
@@ -0,0 +1,74 @@
= ocp-workload-Vert.x-Reactica
== Overview
This ansible playbook role are used to deploy a demo on RHPDS. Instructions below here are for developing and testing the playbook. To run the demo go to https://rhpds.redhat.com and search the catalog for **Reactica Demo**.
=== Purpose
This roles create a reactive demo called Reactica that is show-casing an event based reactive application using Eclipse Vert.x, Red Hat AMQ and Red Hat DataGrid.
For more details about the demo please see (https://github.com/reactica/rhte-demo)[https://github.com/reactica/rhte-demo]
=== Deploy a Workload with the `ocp-workload` playbook
To deploy an new version of the demo to an environment please use the following script:
----
#!/bin/sh
HOST_GUID=dev310
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="xxxx-redhat.com"
SSH_USER="opentlc-mgr"
SSH_PRIVATE_KEY="id_rsa"
GUID="XXXX"
WORKLOAD="ocp-workload-vertx-reactica"
ansible-playbook -v -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
              -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
              -e"ansible_ssh_user=${SSH_USER}" \
              -e"ANSIBLE_REPO_PATH=`pwd`" \
              -e"ocp_username=${OCP_USERNAME}" \
              -e"ocp_workload=${WORKLOAD}" \
              -e"guid=${GUID}" \
              -e"ocp_user_needs_quota=true" \
              -e"ocp_master=master.${HOST_GUID}.openshift.opentlc.com" \
              -e"ocp_apps_domain=apps.${HOST_GUID}.openshift.opentlc.com" \
              -e"ACTION=create"
----
IMPORTANT: You need to replace the **HOST_GUID**, **GUID** and **OCP_USERNAME** with your own credentials and you need access to the bastion host using your private SSH key. Please contact rhpds-admin@redhat.com for getting access.
=== To Delete the demo
To remove the demo from the shared cluster run the following script:
----
#!/bin/sh
HOST_GUID=dev310
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="xxxx-redhat.com"
SSH_USER="opentlc-mgr"
SSH_PRIVATE_KEY="id_rsa"
GUID="XXXX"
WORKLOAD="ocp-workload-vertx-reactica"
ansible-playbook -v -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
              -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
              -e"ansible_ssh_user=${SSH_USER}" \
              -e"ANSIBLE_REPO_PATH=`pwd`" \
              -e"ocp_username=${OCP_USERNAME}" \
              -e"ocp_workload=${WORKLOAD}" \
              -e"guid=${GUID}" \
              -e"ocp_user_needs_quota=true" \
              -e"ocp_master=master.${HOST_GUID}.openshift.opentlc.com" \
              -e"ocp_apps_domain=apps.${HOST_GUID}.openshift.opentlc.com" \
              -e"ACTION=remove"
----
IMPORTANT: You need to replace the **HOST_GUID**, **GUID** and **OCP_USERNAME** with your own credentials and you need access to the bastion host using your private SSH key. Please contact rhpds-admin@redhat.com for getting access.
ansible/roles/ocp-workload-vertx-reactica/tasks/create-dc-svc-and-route.yml
New file
@@ -0,0 +1,18 @@
- name: "Copy deployment config for {{service}}"
  template:
    src: "{{service}}-dc.json"
    dest: "/tmp/{{service}}-dc.json"
- name: "Copy service config for {{service}}"
  template:
    src: "{{service}}-svc.json"
    dest: "/tmp/{{service}}-svc.json"
- name: "Deploy deployment config for {{service}}"
  shell: "oc replace --force -f /tmp/{{service}}-dc.json -n {{namespace}}"
- name: "Deploy service config for {{service}}"
  shell: "oc replace --force -f /tmp/{{service}}-svc.json -n {{namespace}}"
- name: "Create route for {{service}} if it doesn't exists"
  shell: "oc get route {{service}} -n {{namespace}} || oc expose svc {{service}} -n {{namespace}}"
ansible/roles/ocp-workload-vertx-reactica/tasks/main.yml
New file
@@ -0,0 +1,20 @@
---
- name: Running Pre Workload Tasks
  import_tasks: ./pre_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload Tasks
  import_tasks: ./workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Post Workload Tasks
  import_tasks: ./post_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload removal Tasks
  import_tasks: ./remove_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "destroy" or ACTION == "remove"
ansible/roles/ocp-workload-vertx-reactica/tasks/post_workload.yml
New file
@@ -0,0 +1,5 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully"
ansible/roles/ocp-workload-vertx-reactica/tasks/pre_workload.yml
New file
@@ -0,0 +1,32 @@
---
# - name: Add user to developer group (allowed to create projects)
#   shell: "oadm groups add-users {{item}} {{ocp_username}}"
#   register: groupadd_register
#   with_items: "{{ocp_user_groups}}"
#   when: ocp_username is defined and ocp_user_groups is defined
# - name: test that command worked
#   debug:
#     var: groupadd_register
#     verbosity: 2
- name: Create user Quota - clusterresourcequota
  shell: |
        oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \
        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
        --hard requests.cpu="{{quota_requests_cpu}}" \
        --hard limits.cpu="{{quota_limits_cpu}}"  \
        --hard requests.memory="{{quota_requests_memory}}" \
        --hard limits.memory="{{quota_limits_memory}}" \
        --hard configmaps="{{quota_configmaps}}" \
        --hard pods="{{quota_pods}}" \
        --hard persistentvolumeclaims="{{quota_persistentvolumeclaims}}"  \
        --hard services="{{quota_services}}" \
        --hard secrets="{{quota_secrets}}" \
        --hard requests.storage="{{quota_requests_storage}}"
  ignore_errors: true
- name: pre_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully"
ansible/roles/ocp-workload-vertx-reactica/tasks/remove_workload.yml
New file
@@ -0,0 +1,29 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully - Removed"
# - name: Remove user from groups {{ocp_user_groups}}
#   shell: oc adm groups remove-users {{item}} {{ocp_username}}
#   with_items: "{{ocp_user_groups}}"
#   ignore_errors: true
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: "oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}"
  ignore_errors: true
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}"
  shell: "oc delete clusterresourcequota clusterquota-{{ocp_username}}"
  ignore_errors: true
- name: Remove user Projects - oc get projects
  shell: "oc get project {{namespace}} && oc delete project {{namespace}} || (echo 'No project with name {{namespace}} exists'; exit 0)"
- name: Make sure we go back to default project
  shell: "oc project default"
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully - Removed"
ansible/roles/ocp-workload-vertx-reactica/tasks/workload.yml
New file
@@ -0,0 +1,79 @@
---
# Project and user administration
- name: "Create project for workload {{namespace}}"
  shell: "oc get project {{namespace}} || oc new-project {{namespace}} --display-name='Reactica'"
- name: "Give user access to the project"
  shell: "oc adm policy add-role-to-user admin {{ocp_username}} -n {{namespace}}"
- name: "Label namespace"
  command: "oc label namespace {{namespace}} AAD='{{guid}}'"
- name: Make sure we go back to default project
  shell: "oc project default"
# ###############       Installing images streams and templates        ###############
- name: Install AMQ ImageStream
  shell: "oc replace --force -f https://raw.githubusercontent.com/jboss-container-images/jboss-amq-7-broker-openshift-image/amq-broker-71/amq-broker-7-image-streams.yaml -n {{namespace}}"
- name: Install DataGrid ImageStream
  shell: "curl -s https://raw.githubusercontent.com/jboss-container-images/jboss-datagrid-7-openshift-image/datagrid72/templates/datagrid72-image-stream.json | sed 's/registry.redhat.io/registry.access.redhat.com/g' | oc replace --force -n {{namespace}} -f -"
  args:
    warn: false
- name: Install AMQ template
  shell: "oc replace --force -f https://github.com/jboss-container-images/jboss-amq-7-broker-openshift-image/raw/amq-broker-71/templates/amq-broker-71-basic.yaml -n {{namespace}}"
- name: Install DataGrid template
  shell: "oc replace --force -f https://raw.githubusercontent.com/jboss-container-images/jboss-datagrid-7-openshift-image/datagrid72/templates/datagrid72-basic.json -n {{namespace}}"
## Deploying AMQ and DataGrid ###
- name: Deploying AMQ
  shell: "oc get dc eventstream-amq -n {{namespace}} || oc new-app --template=amq-broker-71-basic -p APPLICATION_NAME=eventstream -p AMQ_QUEUES=USER_QUEUE,ENTER_EVENT_QUEUE,RIDE_EVENT_QUEUE,QLC_QUEUE,CL_QUEUE -p AMQ_USER=user -p AMQ_PASSWORD=user123 -p AMQ_PROTOCOL=amqp -p IMAGE_STREAM_NAMESPACE={{namespace}} -n {{namespace}}"
- name: Deploying DataGrid
  shell: "oc get dc eventstore-dg -n {{namespace}} || oc new-app --template=datagrid72-basic -p APPLICATION_NAME=eventstore-dg -p CACHE_NAMES=userevents,rideevents,users -p IMAGE_STREAM_NAMESPACE={{namespace}} -n {{namespace}}"
- name: Wait for AMQ to be availble
  shell: "oc get pods -n {{namespace}} | grep eventstream-amq | grep -s Running"
  retries: 30
  delay: 10
  register: result
  until: result is succeeded
- name: Wait for DG to be availble
  shell: "oc get pods -n {{namespace}} | grep eventstore-dg | grep -s Running"
  retries: 30
  delay: 10
  register: result
  until: result is succeeded
- name: Add the view role to the default service account
  shell: "oc policy add-role-to-user view -z default -n {{namespace}}"
- name: Copy catalog service configmap to known path
  template:
    src: application.yaml
    dest: /tmp/application.yaml
- name: Create the application configuration for the apps
  shell: "oc get configmap reactica-config -n {{namespace}} || oc create configmap reactica-config --from-file=/tmp/application.yaml -n {{namespace}}"
- name: Create services
  include_tasks: create-dc-svc-and-route.yml service="{{item}}"
  with_items:
    - event-store
    - billboard
    - current-line-updater
    - queue-length-calculator
    - event-generator
- name: Make sure we go back to default project
  shell: "oc project default"
ansible/roles/ocp-workload-vertx-reactica/templates/application.yaml
New file
@@ -0,0 +1,11 @@
---
user-simulator:
  period-in-seconds: 5
  jitter-in-seconds: 2
  enabled: false
ride-simulator:
  duration-in-seconds: 30
  users-per-ride: 5
  jitter-in-seconds: 5
  enabled: true
ansible/roles/ocp-workload-vertx-reactica/templates/billboard-dc.json
New file
@@ -0,0 +1,141 @@
{
    "apiVersion": "apps.openshift.io/v1",
    "kind": "DeploymentConfig",
    "metadata": {
        "labels": {
            "app": "billboard",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8",
            "version": "0.1-SNAPSHOT"
        },
        "name": "billboard"
    },
    "spec": {
        "replicas": 1,
        "selector": {
            "app": "billboard",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8"
        },
        "strategy": {
            "activeDeadlineSeconds": 21600,
            "resources": {},
            "rollingParams": {
                "intervalSeconds": 1,
                "maxSurge": "25%",
                "maxUnavailable": "25%",
                "timeoutSeconds": 3600,
                "updatePeriodSeconds": 1
            },
            "type": "Rolling"
        },
        "template": {
            "metadata": {
                "labels": {
                    "app": "billboard",
                    "group": "com.redhat.coderland.reactica",
                    "provider": "fabric8",
                    "version": "0.1-SNAPSHOT"
                }
            },
            "spec": {
                "containers": [
                    {
                        "env": [
                            {
                                "name": "VERTX_CONFIG_PATH",
                                "value": "/deployments/conf/config.yml"
                            },
                            {
                                "name": "KUBERNETES_NAMESPACE",
                                "valueFrom": {
                                    "fieldRef": {
                                        "apiVersion": "v1",
                                        "fieldPath": "metadata.namespace"
                                    }
                                }
                            }
                        ],
                        "image": "quay.io/redhat/reactica-billboard:latest",
                        "imagePullPolicy": "Always",
                        "livenessProbe": {
                            "failureThreshold": 3,
                            "httpGet": {
                                "path": "/health",
                                "port": 8080,
                                "scheme": "HTTP"
                            },
                            "initialDelaySeconds": 180,
                            "periodSeconds": 10,
                            "successThreshold": 1,
                            "timeoutSeconds": 1
                        },
                        "name": "vertx",
                        "ports": [
                            {
                                "containerPort": 8080,
                                "name": "http",
                                "protocol": "TCP"
                            },
                            {
                                "containerPort": 9779,
                                "name": "prometheus",
                                "protocol": "TCP"
                            },
                            {
                                "containerPort": 8778,
                                "name": "jolokia",
                                "protocol": "TCP"
                            }
                        ],
                        "readinessProbe": {
                            "failureThreshold": 3,
                            "httpGet": {
                                "path": "/health",
                                "port": 8080,
                                "scheme": "HTTP"
                            },
                            "initialDelaySeconds": 10,
                            "periodSeconds": 10,
                            "successThreshold": 1,
                            "timeoutSeconds": 1
                        },
                        "resources": {},
                        "securityContext": {
                            "privileged": false
                        },
                        "terminationMessagePath": "/dev/termination-log",
                        "terminationMessagePolicy": "File",
                        "volumeMounts": [
                            {
                                "mountPath": "/deployments/conf",
                                "name": "config"
                            }
                        ]
                    }
                ],
                "dnsPolicy": "ClusterFirst",
                "restartPolicy": "Always",
                "schedulerName": "default-scheduler",
                "securityContext": {},
                "terminationGracePeriodSeconds": 30,
                "volumes": [
                    {
                        "configMap": {
                            "defaultMode": 420,
                            "items": [
                                {
                                    "key": "application.yaml",
                                    "path": "config.yml"
                                }
                            ],
                            "name": "reactica-config",
                            "optional": true
                        },
                        "name": "config"
                    }
                ]
            }
        }
    }
}
ansible/roles/ocp-workload-vertx-reactica/templates/billboard-svc.json
New file
@@ -0,0 +1,34 @@
{
    "apiVersion": "v1",
    "kind": "Service",
    "metadata": {
       "labels": {
            "app": "billboard",
            "expose": "true",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8",
            "version": "0.1-SNAPSHOT"
        },
        "name": "billboard"
    },
    "spec": {
        "ports": [
            {
                "name": "http",
                "port": 8080,
                "protocol": "TCP",
                "targetPort": 8080
            }
        ],
        "selector": {
            "app": "billboard",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8"
        },
        "sessionAffinity": "None",
        "type": "ClusterIP"
    },
    "status": {
        "loadBalancer": {}
    }
}
ansible/roles/ocp-workload-vertx-reactica/templates/current-line-updater-dc.json
New file
@@ -0,0 +1,91 @@
{
    "apiVersion": "apps.openshift.io/v1",
    "kind": "DeploymentConfig",
    "metadata": {
        "labels": {
            "app": "current-line-updater",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8",
            "version": "0.1-SNAPSHOT"
        },
        "name": "current-line-updater"
    },
    "spec": {
        "replicas": 1,
        "selector": {
            "app": "current-line-updater",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8"
        },
        "strategy": {
            "activeDeadlineSeconds": 21600,
            "resources": {},
            "rollingParams": {
                "intervalSeconds": 1,
                "maxSurge": "25%",
                "maxUnavailable": "25%",
                "timeoutSeconds": 3600,
                "updatePeriodSeconds": 1
            },
            "type": "Rolling"
        },
        "template": {
            "metadata": {
                "labels": {
                    "app": "current-line-updater",
                    "group": "com.redhat.coderland.reactica",
                    "provider": "fabric8",
                    "version": "0.1-SNAPSHOT"
                }
            },
            "spec": {
                "containers": [
                    {
                        "env": [
                            {
                                "name": "KUBERNETES_NAMESPACE",
                                "valueFrom": {
                                    "fieldRef": {
                                        "apiVersion": "v1",
                                        "fieldPath": "metadata.namespace"
                                    }
                                }
                            }
                        ],
                        "image": "quay.io/redhat/reactica-current-line-updater:latest",
                        "imagePullPolicy": "Always",
                        "name": "vertx",
                        "ports": [
                            {
                                "containerPort": 8080,
                                "name": "http",
                                "protocol": "TCP"
                            },
                            {
                                "containerPort": 9779,
                                "name": "prometheus",
                                "protocol": "TCP"
                            },
                            {
                                "containerPort": 8778,
                                "name": "jolokia",
                                "protocol": "TCP"
                            }
                        ],
                        "resources": {},
                        "securityContext": {
                            "privileged": false
                        },
                        "terminationMessagePath": "/dev/termination-log",
                        "terminationMessagePolicy": "File"
                    }
                ],
                "dnsPolicy": "ClusterFirst",
                "restartPolicy": "Always",
                "schedulerName": "default-scheduler",
                "securityContext": {},
                "terminationGracePeriodSeconds": 30
            }
        }
    }
}
ansible/roles/ocp-workload-vertx-reactica/templates/current-line-updater-svc.json
New file
@@ -0,0 +1,35 @@
{
    "apiVersion": "v1",
    "kind": "Service",
    "metadata": {
        "labels": {
            "app": "current-line-updater",
            "expose": "true",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8",
            "version": "0.1-SNAPSHOT"
        },
        "name": "current-line-updater"
    },
    "spec": {
        "clusterIP": "172.30.180.45",
        "ports": [
            {
                "name": "http",
                "port": 8080,
                "protocol": "TCP",
                "targetPort": 8080
            }
        ],
        "selector": {
            "app": "current-line-updater",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8"
        },
        "sessionAffinity": "None",
        "type": "ClusterIP"
    },
    "status": {
        "loadBalancer": {}
    }
}
ansible/roles/ocp-workload-vertx-reactica/templates/event-generator-dc.json
New file
@@ -0,0 +1,141 @@
{
    "apiVersion": "apps.openshift.io/v1",
    "kind": "DeploymentConfig",
    "metadata": {
        "labels": {
            "app": "event-generator",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8",
            "version": "0.1-SNAPSHOT"
        },
        "name": "event-generator"
    },
    "spec": {
        "replicas": 1,
        "selector": {
            "app": "event-generator",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8"
        },
        "strategy": {
            "activeDeadlineSeconds": 21600,
            "resources": {},
            "rollingParams": {
                "intervalSeconds": 1,
                "maxSurge": "25%",
                "maxUnavailable": "25%",
                "timeoutSeconds": 3600,
                "updatePeriodSeconds": 1
            },
            "type": "Rolling"
        },
        "template": {
            "metadata": {
                "labels": {
                    "app": "event-generator",
                    "group": "com.redhat.coderland.reactica",
                    "provider": "fabric8",
                    "version": "0.1-SNAPSHOT"
                }
            },
            "spec": {
                "containers": [
                    {
                        "env": [
                            {
                                "name": "VERTX_CONFIG_PATH",
                                "value": "/deployments/conf/config.yml"
                            },
                            {
                                "name": "KUBERNETES_NAMESPACE",
                                "valueFrom": {
                                    "fieldRef": {
                                        "apiVersion": "v1",
                                        "fieldPath": "metadata.namespace"
                                    }
                                }
                            }
                        ],
                        "image": "quay.io/redhat/reactica-event-generator:latest",
                        "imagePullPolicy": "Always",
                        "livenessProbe": {
                            "failureThreshold": 3,
                            "httpGet": {
                                "path": "/health",
                                "port": 8080,
                                "scheme": "HTTP"
                            },
                            "initialDelaySeconds": 180,
                            "periodSeconds": 10,
                            "successThreshold": 1,
                            "timeoutSeconds": 1
                        },
                        "name": "vertx",
                        "ports": [
                            {
                                "containerPort": 8080,
                                "name": "http",
                                "protocol": "TCP"
                            },
                            {
                                "containerPort": 9779,
                                "name": "prometheus",
                                "protocol": "TCP"
                            },
                            {
                                "containerPort": 8778,
                                "name": "jolokia",
                                "protocol": "TCP"
                            }
                        ],
                        "readinessProbe": {
                            "failureThreshold": 3,
                            "httpGet": {
                                "path": "/health",
                                "port": 8080,
                                "scheme": "HTTP"
                            },
                            "initialDelaySeconds": 10,
                            "periodSeconds": 10,
                            "successThreshold": 1,
                            "timeoutSeconds": 1
                        },
                        "resources": {},
                        "securityContext": {
                            "privileged": false
                        },
                        "terminationMessagePath": "/dev/termination-log",
                        "terminationMessagePolicy": "File",
                        "volumeMounts": [
                            {
                                "mountPath": "/deployments/conf",
                                "name": "config"
                            }
                        ]
                    }
                ],
                "dnsPolicy": "ClusterFirst",
                "restartPolicy": "Always",
                "schedulerName": "default-scheduler",
                "securityContext": {},
                "terminationGracePeriodSeconds": 30,
                "volumes": [
                    {
                        "configMap": {
                            "defaultMode": 420,
                            "items": [
                                {
                                    "key": "application.yaml",
                                    "path": "config.yml"
                                }
                            ],
                            "name": "reactica-config",
                            "optional": true
                        },
                        "name": "config"
                    }
                ]
            }
        }
    }
}
ansible/roles/ocp-workload-vertx-reactica/templates/event-generator-svc.json
New file
@@ -0,0 +1,35 @@
{
    "apiVersion": "v1",
    "kind": "Service",
    "metadata": {
        "labels": {
            "app": "event-generator",
            "expose": "true",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8",
            "version": "0.1-SNAPSHOT"
        },
        "name": "event-generator"
    },
    "spec": {
        "clusterIP": "172.30.52.249",
        "ports": [
            {
                "name": "http",
                "port": 8080,
                "protocol": "TCP",
                "targetPort": 8080
            }
        ],
        "selector": {
            "app": "event-generator",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8"
        },
        "sessionAffinity": "None",
        "type": "ClusterIP"
    },
    "status": {
        "loadBalancer": {}
    }
}
ansible/roles/ocp-workload-vertx-reactica/templates/event-store-dc.json
New file
@@ -0,0 +1,81 @@
{
    "apiVersion": "apps.openshift.io/v1",
    "kind": "DeploymentConfig",
    "metadata": {
        "labels": {
            "app": "event-store",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8",
            "version": "0.1-SNAPSHOT"
        },
        "name": "event-store"
    },
    "spec": {
        "replicas": 1,
        "selector": {
            "app": "event-store",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8"
        },
        "strategy": {
            "rollingParams": {
                "intervalSeconds": 1,
                "maxSurge": "25%",
                "maxUnavailable": "25%",
                "timeoutSeconds": 3600,
                "updatePeriodSeconds": 1
            },
            "type": "Rolling"
        },
        "template": {
            "metadata": {
                "labels": {
                    "app": "event-store",
                    "group": "com.redhat.coderland.reactica",
                    "provider": "fabric8",
                    "version": "0.1-SNAPSHOT"
                }
            },
            "spec": {
                "containers": [
                    {
                        "env": [
                            {
                                "name": "KUBERNETES_NAMESPACE",
                                "valueFrom": {
                                    "fieldRef": {
                                        "apiVersion": "v1",
                                        "fieldPath": "metadata.namespace"
                                    }
                                }
                            }
                        ],
                        "image": "quay.io/redhat/reactica-event-store:latest",
                        "imagePullPolicy": "Always",
                        "name": "vertx",
                        "ports": [
                            {
                                "containerPort": 8080,
                                "name": "http",
                                "protocol": "TCP"
                            },
                            {
                                "containerPort": 9779,
                                "name": "prometheus",
                                "protocol": "TCP"
                            },
                            {
                                "containerPort": 8778,
                                "name": "jolokia",
                                "protocol": "TCP"
                            }
                        ]
                    }
                ],
                "dnsPolicy": "ClusterFirst",
                "restartPolicy": "Always",
                "terminationGracePeriodSeconds": 30
            }
        }
    }
}
ansible/roles/ocp-workload-vertx-reactica/templates/event-store-svc.json
New file
@@ -0,0 +1,31 @@
{
    "apiVersion": "v1",
    "kind": "Service",
    "metadata": {
        "labels": {
            "app": "event-store",
            "expose": "true",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8",
            "version": "0.1-SNAPSHOT"
        },
        "name": "event-store"
    },
    "spec": {
        "ports": [
            {
                "name": "http",
                "port": 8080,
                "protocol": "TCP",
                "targetPort": 8080
            }
        ],
        "selector": {
            "app": "event-store",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8"
        },
        "sessionAffinity": "None",
        "type": "ClusterIP"
    }
}
ansible/roles/ocp-workload-vertx-reactica/templates/queue-length-calculator-dc.json
New file
@@ -0,0 +1,117 @@
{
    "apiVersion": "apps.openshift.io/v1",
    "kind": "DeploymentConfig",
    "metadata": {
        "labels": {
            "app": "queue-length-calculator",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8",
            "version": "0.1-SNAPSHOT"
        },
        "name": "queue-length-calculator"
    },
    "spec": {
        "replicas": 1,
        "selector": {
            "app": "queue-length-calculator",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8"
        },
        "strategy": {
            "activeDeadlineSeconds": 21600,
            "resources": {},
            "rollingParams": {
                "intervalSeconds": 1,
                "maxSurge": "25%",
                "maxUnavailable": "25%",
                "timeoutSeconds": 3600,
                "updatePeriodSeconds": 1
            },
            "type": "Rolling"
        },
        "template": {
            "metadata": {
                "labels": {
                    "app": "queue-length-calculator",
                    "group": "com.redhat.coderland.reactica",
                    "provider": "fabric8",
                    "version": "0.1-SNAPSHOT"
                }
            },
            "spec": {
                "containers": [
                    {
                        "env": [
                            {
                                "name": "VERTX_CONFIG_PATH",
                                "value": "/deployments/conf/config.yml"
                            },
                            {
                                "name": "KUBERNETES_NAMESPACE",
                                "valueFrom": {
                                    "fieldRef": {
                                        "apiVersion": "v1",
                                        "fieldPath": "metadata.namespace"
                                    }
                                }
                            }
                        ],
                        "image": "quay.io/redhat/reactica-queue-lenght-calculator:latest",
                        "imagePullPolicy": "Always",
                        "name": "vertx",
                        "ports": [
                            {
                                "containerPort": 8080,
                                "name": "http",
                                "protocol": "TCP"
                            },
                            {
                                "containerPort": 9779,
                                "name": "prometheus",
                                "protocol": "TCP"
                            },
                            {
                                "containerPort": 8778,
                                "name": "jolokia",
                                "protocol": "TCP"
                            }
                        ],
                        "resources": {},
                        "securityContext": {
                            "privileged": false
                        },
                        "terminationMessagePath": "/dev/termination-log",
                        "terminationMessagePolicy": "File",
                        "volumeMounts": [
                            {
                                "mountPath": "/deployments/conf",
                                "name": "config"
                            }
                        ]
                    }
                ],
                "dnsPolicy": "ClusterFirst",
                "restartPolicy": "Always",
                "schedulerName": "default-scheduler",
                "securityContext": {},
                "terminationGracePeriodSeconds": 30,
                "volumes": [
                    {
                        "configMap": {
                            "defaultMode": 420,
                            "items": [
                                {
                                    "key": "application.yaml",
                                    "path": "config.yml"
                                }
                            ],
                            "name": "reactica-config",
                            "optional": true
                        },
                        "name": "config"
                    }
                ]
            }
        }
    }
}
ansible/roles/ocp-workload-vertx-reactica/templates/queue-length-calculator-svc.json
New file
@@ -0,0 +1,35 @@
{
    "apiVersion": "v1",
    "kind": "Service",
    "metadata": {
        "labels": {
            "app": "queue-length-calculator",
            "expose": "true",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8",
            "version": "0.1-SNAPSHOT"
        },
        "name": "queue-length-calculator"
    },
    "spec": {
        "clusterIP": "172.30.74.154",
        "ports": [
            {
                "name": "http",
                "port": 8080,
                "protocol": "TCP",
                "targetPort": 8080
            }
        ],
        "selector": {
            "app": "queue-length-calculator",
            "group": "com.redhat.coderland.reactica",
            "provider": "fabric8"
        },
        "sessionAffinity": "None",
        "type": "ClusterIP"
    },
    "status": {
        "loadBalancer": {}
    }
}
scripts/find_ami.sh
@@ -1,9 +1,37 @@
#!/bin/bash
name=$1
for region in $(aws ec2 describe-regions --query "Regions[].RegionName" --output text)
# Generate yaml containing image information for each region
search_images() {
    owner=$1
    pattern=$2
    ispublic=$3
    aws ec2 describe-images \
        --owners ${owner} \
        --filters "Name=name,Values=${pattern}" "Name=is-public,Values=${ispublic}" \
        --query "reverse(sort_by(Images, &CreationDate))[0].{name: Name, id: ImageId}" \
        --output text \
        --region $region | awk '{print $1 " # " $2}'
}
#for region in us-east-1
for region in $(aws ec2 describe-regions --query "Regions[].RegionName" --output text --region us-east-1)
do
  echo "${region}: $(aws ec2 describe-images --owners amazon 309956199498 --filters Name=name,Values=${name} --query "reverse(sort_by(Images, &CreationDate))[0].ImageId" --output text --region $region)"
    echo "${region}:"
    echo -n "  RHEL75GOLD: "
    search_images 309956199498 'RHEL-7.5*Access*' false
    echo -n "  RHEL74GOLD: "
    search_images 309956199498 'RHEL-7.4*Access*' false
    echo -n "  RHEL75: "
    search_images 309956199498 'RHEL-7.5*' true
    echo -n "  RHEL74: "
    search_images 309956199498 'RHEL-7.4*' true
    echo -n "  WIN2012R2: "
    search_images 801119661308 'Windows_Server-2012-R2*' true
done
# For azure