diego-torres
2018-09-13 9c469e66705704edb0bbdd56dbdcf392366a4e8d
Merge branch 'development' of https://github.com/honghuac/ansible_agnostic_deployer into development
2 files deleted
1 files copied
31 files added
42 files modified
1 files renamed
2728 ■■■■ changed files
ansible/configs/ansible-cicd-lab/env_vars.yml 11 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-cicd-lab/post_software.yml 12 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/files/hosts_template.3.10.14.j2 19 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/files/prometheus_alerts_rules.yml 68 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/post_software.yml 7 ●●●● patch | view | raw | blame | history
ansible/configs/quay-enterprise/env_vars.yml 4 ●●●● patch | view | raw | blame | history
ansible/configs/quay-enterprise/software.yml 35 ●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/files/oc-cluster.service.j2 4 ●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/files/start_oc.sh.j2 2 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-oc-cluster-vms/post_software.yml 53 ●●●● patch | view | raw | blame | history
ansible/configs/rhte-ocp-workshop/files/cloud_providers/ec2_cloud_template.j2 3 ●●●●● patch | view | raw | blame | history
ansible/configs/rhte-ocp-workshop/post_infra.yml 27 ●●●●● patch | view | raw | blame | history
ansible/roles/config-quay-builder/README.md 43 ●●●●● patch | view | raw | blame | history
ansible/roles/config-quay-builder/defaults/main.yml 27 ●●●●● patch | view | raw | blame | history
ansible/roles/config-quay-builder/handlers/main.yml 8 ●●●●● patch | view | raw | blame | history
ansible/roles/config-quay-builder/tasks/container_credentials.yml 36 ●●●●● patch | view | raw | blame | history
ansible/roles/config-quay-builder/tasks/main.yml 55 ●●●●● patch | view | raw | blame | history
ansible/roles/config-quay-builder/templates/quay-builder.j2 7 ●●●●● patch | view | raw | blame | history
ansible/roles/config-quay-builder/templates/quay-builder.service.j2 16 ●●●●● patch | view | raw | blame | history
ansible/roles/config-quay-enterprise/defaults/main.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/config-quay-enterprise/tasks/configure_systemd.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/config-quay-enterprise/tasks/main.yml 30 ●●●● patch | view | raw | blame | history
ansible/roles/config-quay-enterprise/templates/config.yaml.j2 2 ●●● patch | view | raw | blame | history
ansible/roles/config-quay-enterprise/templates/quay.service.j2 2 ●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/defaults/main.yml 32 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tasks/main.yml 12 ●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tasks/plugins.yml 6 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tasks/self-signed-certificate.yml 58 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tasks/settings.yml 16 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tasks/setup-http.yml 9 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tasks/setup-https.yml 87 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/vars/RedHat.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-client-vm/README.md 20 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-client-vm/files/bash_profile 2 ●●● patch | view | raw | blame | history
ansible/roles/ocp-client-vm/tasks/packages.yml 35 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-lifecycle/defaults/main.yml 33 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-lifecycle/readme.adoc 32 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-lifecycle/tasks/main.yml 20 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-lifecycle/tasks/post_workload.yml 5 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-lifecycle/tasks/pre_workload.yml 32 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-lifecycle/tasks/remove_workload.yml 41 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-lifecycle/tasks/wait_for_build.yml patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-lifecycle/tasks/wait_for_deploy.yml 25 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-lifecycle/tasks/workload.yml 57 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-multitenant/templates/manage_tenants.sh 2 ●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-iot-demo/defaults/main.yml 20 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-iot-demo/files/dashboard-proxy.yml 459 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-iot-demo/files/dashboard.yml 331 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-iot-demo/tasks/workload.yml 74 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rh-sso/defaults/main.yml 28 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rh-sso/readme.adoc 32 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rh-sso/tasks/main.yml 20 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rh-sso/tasks/post_workload.yml 5 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rh-sso/tasks/pre_workload.yml 32 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rh-sso/tasks/remove_workload.yml 31 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rh-sso/tasks/wait_for_build.yml patch | view | raw | blame | history
ansible/roles/ocp-workload-rh-sso/tasks/wait_for_deploy.yml 25 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rh-sso/tasks/workload.yml 39 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-api-biz/defaults/main.yml 27 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-api-biz/tasks/workload.yml 194 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-api-mesh/defaults/main.yml 3 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-api-mesh/readme.adoc 1 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-api-mesh/tasks/pre_workload.yml 1 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-bfield-migration/defaults/main.yml 21 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-bfield-migration/readme.adoc 27 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-bfield-migration/tasks/pre_workload.yml 22 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-bfield-migration/tasks/remove_workload.yml 41 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-bfield-migration/tasks/wait_for_deploy.yml 20 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-bfield-migration/tasks/workload.yml 46 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-bfield-migration/templates/constraints_limitrange.yaml 25 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-bfield-migration/templates/limitrange.yaml 23 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/defaults/main.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/readme.adoc 86 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/main.yml 5 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/pre_workload.yml 2 ●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/remove_workload.yml 38 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/strimzi_workload.yml 39 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-cicd-lab/env_vars.yml
@@ -262,8 +262,19 @@
  - workflow-aggregator   # Pipeline Plugin
  - parameterized-trigger # Parameterized Trigger Plugin
  - extended-choice-parameter # Extended Choice Parameter
  - ansible-tower         # Ansible Tower Plugin
jenkins_plugin_timeout: 240 # Jenkins tends to run into timeout while installing plug-ins
jenkins_admin_password: r3dh4t1!
jenkins_protocol: "https"
jenkins_selfsigned_certificate: yes
jenkins_port: 8443
jenkins_home: /var/lib/jenkins
jenkins_keystore_path: "/opt/jenkins/jenkins.jks"
jenkins_keystore_password: "r3dh4t1!"
jenkins_url_prefix: ""
jenkins_java_options_env_var: JENKINS_JAVA_OPTIONS
jenkins_java_options: "-Djenkins.install.runSetupWizard=false"
### Tower Variables
ansible/configs/ansible-cicd-lab/post_software.yml
@@ -19,9 +19,17 @@
  tags:
    - install_ci_components
  pre_tasks:
    - name: gather ansible_os_family and ansible_distribution facts for Jenkins
    - name: gather ansible_hostname, ansible_os_family and ansible_distribution facts for Jenkins
      setup:
        filter: 'ansible_[od][si]*'
        filter: 'ansible_[odh][sio]*'
# These variables have to be set at runtime as they are host related.
    - name: Set target_domain variable and keystore variables
      set_fact:
        jenkins_target_domain: "{{ ansible_hostname }}.{{ guid }}{{ subdomain_base_suffix }}"
        keystore_path: "{{ jenkins_keystore_path }}"
        keystore_pass: "{{ jenkins_keystore_password }}"
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/geerlingguy.gogs" }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/host-gogs-server" }
ansible/configs/ocp-ha-lab/files/hosts_template.3.10.14.j2
@@ -73,8 +73,8 @@
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
# os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy'
{{multi_tenant_setting}}
# os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy'
###########################################################################
### OpenShift Authentication Vars
@@ -97,6 +97,7 @@
########################
openshift_metrics_install_metrics={{install_metrics}}
{% if install_nfs|bool %}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
@@ -104,6 +105,7 @@
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
{% endif %}
openshift_metrics_cassanda_pvc_storage_class_name=''
openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/infra": "true"}
@@ -170,9 +172,9 @@
openshift_prometheus_alertbuffer_cpu_requests=200m
openshift_prometheus_alertbuffer_memory_limit=300Mi
openshift_prometheus_alertbuffer_cpu_limit=200m
# The following file will need to be copied over to the bastion before deployment
{# The following file will need to be copied over to the bastion before deployment
# There is an example in ocp-workshop/files
# openshift_prometheus_additional_rules_file=/root/prometheus_alerts_rules.yml
# openshift_prometheus_additional_rules_file=/root/prometheus_alerts_rules.yml #}
# Grafana
openshift_grafana_node_selector={"node-role.kubernetes.io/infra":"true"}
@@ -203,16 +205,16 @@
{% if install_glusterfs|bool %}
openshift_logging_es_pvc_dynamic=true
openshift_logging_es_pvc_size=20Gi
openshift_logging_es_cluster_size=1
openshift_logging_es_pvc_storage_class_name='glusterfs-storage-block'
{% endif %}
openshift_logging_es_memory_limit=8Gi
openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_es_cluster_size=1
openshift_logging_curator_default_days=3
openshift_logging_curator_default_days=2
###########################################################################
### OpenShift Router and Registry Vars
@@ -222,10 +224,6 @@
# openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
openshift_hosted_registry_replicas=1
@@ -236,10 +234,11 @@
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
{% endif %}
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
{% endif %}
###########################################################################
### OpenShift Service Catalog Vars
ansible/configs/ocp-ha-lab/files/prometheus_alerts_rules.yml
New file
@@ -0,0 +1,68 @@
groups:
- name: etcd-rules
  interval: 10s # defaults to global interval
  rules:
  - alert: "Node Down"
    expr: up{job="kubernetes-nodes"} == 0
    annotations:
      component: "ContainerNode"
      severity: "HIGH"
      message: "Node {{$labels.instance}} is down"
  - alert: "Lost ETCD"
    expr: up{job="etcd"} == 0
    annotations:
      component: "ETCD"
      severity: "HIGH"
      message: "ETCD {{$labels.instance}} is down"
  - alert: "Time drift"
    expr: sqrt((scalar(avg(node_time{job="kubernetes-nodes-exporter"})) - node_time{job="kubernetes-nodes-exporter"} )^2) > 60
    for: 30s
    annotations:
      component: "NTP"
      severity: "HIGH"
      message: "Node {{$labels.instance}} has time drift bigger than 60 from average time"
- name: scheduler-rules
  interval: 10s # defaults to global interval
  rules:
  - alert: "Scheduler node1"
    expr: ( (sum(kubelet_running_pod_count{instance=~"^node.*"}) / ((count(node_time) - 6))) * 2 ) < (sum(kubelet_running_pod_count{instance=~"^node1.*"}))
    annotations:
      component: "Scheduler"
      severity: "HIGH"
      message: "Node node1.example.com has more pods than average"
  - alert: "Scheduler node2"
    expr: ( (sum(kubelet_running_pod_count{instance=~"^node.*"}) / ((count(node_time) - 6))) *2 ) < (sum(kubelet_running_pod_count{instance=~"^node2.*"}))
    annotations:
      component: "Scheduler"
      severity: "HIGH"
      message: "Node node2.example.com has more pods than average"
  - alert: "Scheduler node3"
    expr: ( (sum(kubelet_running_pod_count{instance=~"^node.*"}) / ((count(node_time) - 6))) *2 ) < (sum(kubelet_running_pod_count{instance=~"^node3.*"}))
    annotations:
      component: "Scheduler"
      severity: "HIGH"
      message: "Node node3.example.com has more pods than average"
  - alert: "Builds Failing"
    expr: sum(openshift_build_total{phase=~"Failed|Error"}) > 10
    annotations:
      component: "OpenShift Builds"
      severity: "HIGH"
      message: "There is a high volume of builds failing"
  - alert: "Registry storage"
    expr: (avg(kubelet_volume_stats_used_bytes{persistentvolumeclaim="registry-claim"}) * 100) / avg(kubelet_volume_stats_capacity_bytes{persistentvolumeclaim="registry-claim"}) > 80
    annotations:
      component: "Registry Storage"
      severity: "MEDIUM"
      message: "Storage limit reached more than 80% "
  - alert: "Registry storage"
    expr: (avg(kubelet_volume_stats_used_bytes{persistentvolumeclaim="registry-claim"}) * 100) / avg(kubelet_volume_stats_capacity_bytes{persistentvolumeclaim="registry-claim"}) > 95
    annotations:
      component: "Registry Storage"
      severity: "HIGH"
      message: "Storage limit reached more than 95% "
  - alert: "DNS Errors"
    expr: changes(node_dnsmasq_sync_error_count_total[2m]) >=1
    annotations:
      component: "dnsmasq"
      severity: "HIGH"
message: "DNS errors detected. Check grafana for more details"
ansible/configs/ocp-ha-lab/post_software.yml
@@ -50,12 +50,17 @@
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Copy complete inventory file to bastion:/var/preserve/hosts
    - name: Copy complete inventory file to bastion /var/preserve/hosts
      copy:
        src: "{{ ANSIBLE_REPO_PATH }}/workdir/hosts-{{ env_type }}-{{ guid }}"
        dest: /var/preserve/hosts
      tags: preserve_complete_ansible_inventory
    - name: copy prometheus rules file to bastion
      copy:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/prometheus_alerts_rules.yml"
        dest: /root/prometheus_alerts_rules.yml
    - name: Copy over ansible hosts file, lab version
      copy:
        backup: no
ansible/configs/quay-enterprise/env_vars.yml
@@ -44,8 +44,8 @@
qe_quay_ssl_lets_encrypt_production: False
qe_quay_ssl_lets_encrypt_force_renew: False
qe_quay_ssl_lets_encrypt_renew_automatically: False
qe_quay_ssl_key_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.key"
qe_quay_ssl_cert_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.cert"
#qe_quay_ssl_key_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.key"
#qe_quay_ssl_cert_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.cert"
# qe_quay_superuser_username: quayadmin
# qe_quay_superuser_password: quaypwd
# qe_quay_superuser_email: quayadmin@dummy.com
ansible/configs/quay-enterprise/software.yml
@@ -24,6 +24,19 @@
        - quay_clair_enable|d(False)|bool
        - "'clair' not in groups or groups['clair']|length == 0 or 'clair_database' not in groups or groups['clair_database']|length == 0"
- name: Set cert/key file locations
  hosts:
  - quay_enterprise
  gather_facts: false
  run_once: true
  tasks:
  - name: Set Cert/Key file locations
    set_fact:
      qe_quay_ssl_key_file: ""
      qe_quay_ssl_cert_file: ""
    when:
    - qe_quay_ssl_key_file is undefined or qe_quay_ssl_cert_file is undefined or qe_quay_ssl_key_file=="" or qe_quay_ssl_cert_file==""
- name: Set up Let's Encrypt Certificates
  hosts:
  - quay_enterprise
@@ -32,16 +45,18 @@
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
  - name: Open iptables port 80
    iptables:
      action: insert
      protocol: tcp
      destination_port: "80"
      state: present
      chain: INPUT
      jump: ACCEPT
  - name: Request Certificates
    when:
    - qe_quay_ssl_lets_encrypt_certs|d(False)|bool
    block:
    - name: Open iptables port 80
      iptables:
        action: insert
        protocol: tcp
        destination_port: "80"
        state: present
        chain: INPUT
        jump: ACCEPT
    - name: Request and install Let's Encrypt Certificates
      include_role:
        name: ../../roles/host-lets-encrypt-certs
@@ -54,8 +69,6 @@
      - acme_production: "{{ qe_quay_ssl_lets_encrypt_production|d(False)|bool }}"
      - acme_renew_automatically: "{{ qe_quay_ssl_lets_encrypt_renew_automatically|d(False)|bool }}"
      - acme_force_issue: "{{ qe_quay_ssl_lets_encrypt_force_renew|d(False)|bool }}"
    when:
    - qe_quay_ssl_lets_encrypt_certs|d(False)|bool
- name: Set up Node Software (Docker)
  hosts:
@@ -72,8 +85,10 @@
    - name: Ensure that iptables service is enabled and started
      systemd:
        name: iptables
        daemon_reload: yes
        enabled: yes
        state: started
        no_block: yes
    - name: Set up Node Software (Docker)
      include_role:
        name: ../../roles/host-ocp-node
ansible/configs/rhte-oc-cluster-vms/files/oc-cluster.service.j2
@@ -4,7 +4,11 @@
Requires=docker.service
[Service]
{% if osrelease is version_compare("3.10", ">=") %}
ExecStart=/bin/oc cluster up --base-dir={{ ocp_root }} --public-hostname={{ hostname }} --routing-suffix=apps.{{ hostname }} --loglevel=1
{% else %}
ExecStart=/bin/oc cluster up --host-config-dir={{ ocp_root }}/config --host-data-dir={{ ocp_root }}/data --host-pv-dir={{ ocp_root }}/pv --host-volumes-dir={{ ocp_root }}/volumes --use-existing-config=true --public-hostname={{ hostname }} --routing-suffix=apps.{{ hostname }} --loglevel=1
{% endif %}
ExecStop=/bin/oc cluster down
WorkingDirectory={{ ocp_root }}
Restart=no
ansible/configs/rhte-oc-cluster-vms/files/start_oc.sh.j2
File was deleted
ansible/configs/rhte-oc-cluster-vms/post_software.yml
@@ -37,13 +37,6 @@
    systemd:
      name: docker
      state: restarted
  # - name: Upload oc-cluster up script
  #   template:
  #     src: ./files/start_oc.sh.j2
  #     dest: /usr/bin/start_oc.sh
  #     owner: root
  #     group: root
  #     mode: 0755
  - name: Install AAD
    git:
      repo: https://github.com/sborenst/ansible_agnostic_deployer
@@ -55,13 +48,28 @@
    set_fact:
      ocp_root: "/var/lib/openshift"
  - name: Ensure OpenShift config directory is there
  - name: Ensure OpenShift config directory exists
    file:
      path: "{{ ocp_root }}"
      state: directory
      owner: root
      group: root
      mode: 0775
  - name: Ensure OpenShift config subdirectories exists for OCP 3.9
    file:
      path: "{{ item }}"
      state: directory
      owner: root
      group: root
      mode: 0775
    with_items:
    - "{{ ocp_root }}/config"
    - "{{ ocp_root }}/data"
    - "{{ ocp_root }}/pv"
    - "{{ ocp_root }}/volumes"
    when:
    - osrelease is version_compare("3.10", "<")
  - name: Create oc_cluster system service
    template:
@@ -73,6 +81,35 @@
      name: oc-cluster
      enabled: yes
      state: started
    register: r_systemd
    ignore_errors: yes
  # there is a bug that happens sometimes (1/100),
  # the service is in fail state. Reboot fixes the issue.
  - name: Restart VM in case of Service Start failure
    when: r_systemd is failed
    block:
      - name: Reboot VM
        command: shutdown -r now
        ignore_errors: yes
      - name: wait for linux host to be available (retry)
        wait_for_connection:
          delay: 60
          timeout: 200
      - ping:
        register: rping
        retries: 3
        delay: 10
        until: rping is succeeded
      - name: Retry to enable and start oc-cluster system service
        systemd:
          name: oc-cluster
          enabled: yes
          state: started
  - name: Wait for oc-cluster to be up and running
    wait_for:
      host: "{{ hostname }}"
ansible/configs/rhte-ocp-workshop/files/cloud_providers/ec2_cloud_template.j2
@@ -198,9 +198,6 @@
      MaxSize: 200
      DesiredCapacity: {{num_users}}
      Tags:
        - Key: isolated
          Value: True
          PropagateAtLaunch: True
        - Key: "owner"
          Value: "{{ email | default('unknownuser') }}"
          PropagateAtLaunch: True
ansible/configs/rhte-ocp-workshop/post_infra.yml
@@ -16,34 +16,31 @@
      when:
        - "'clientvms' in groups"
        - groups['clientvms'] | length > 0
        - cloudformation_out_final.stack_outputs.AutoScalingGroupClientVM is defined
      block:
      - name: test cloudformation_out_final
        debug:
          var: cloudformation_out_final
          verbosity: 2
      - name: Deactivate autoscaling
        command: >-
          aws autoscaling suspend-processes --auto-scaling-group-name
          {{cloudformation_out_final.stack_outputs.AutoScalingGroupClientVM}}
        when:
          - cloudformation_out_final.stack_outputs.AutoScalingGroupClientVM is defined
      - name: Write down autoscaling name
        copy:
          dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.AutoScalingGroupClientVM"
          content: "{{cloudformation_out_final.stack_outputs.AutoScalingGroupClientVM}}"
        when:
          - cloudformation_out_final.stack_outputs.AutoScalingGroupClientVM is defined
      - name: Allocate and associate an EIP to the clientVMs
        ec2_eip:
          device_id: "{{hostvars[item].instance_id}}"
          release_on_disassociation: yes
        with_items: "{{groups['clientvms']}}"
        register: eips
        when:
          - cloudformation_out_final.stack_outputs.AutoScalingGroupClientVM is defined
      # reimport roles to update inventory, and regenerate ssh config, since
      # we're using public ip to connect to clientVMs
      # reimport roles to update inventory, since publicIP changed
      - name: Run infra-ec2-create-inventory Role
        import_role:
          name: "{{ ANSIBLE_REPO_PATH }}/roles/infra-ec2-create-inventory"
@@ -55,12 +52,13 @@
        route53:
          hosted_zone_id: "{{HostedZoneId}}"
          zone: "{{subdomain_base}}"
          record: "clientvm{{idx}}.{{subdomain_base}}"
          record: "clientvm{{idx + 1}}.{{subdomain_base}}"
          state: present
          type: A
          ttl: 90
          value: "{{hostvars[item].public_ip_address}}"
        with_items: "{{groups['clientvms']}}"
        ignore_errors: yes
        loop_control:
          index_var: idx
          pause: 2
@@ -70,7 +68,16 @@
          state: present
          resource: "{{hostvars[item].instance_id}}"
          tags:
            Name: "clientvm{{idx}}"
            Name: "clientvm{{idx + 1}}"
        with_items: "{{groups['clientvms']}}"
        loop_control:
          index_var: idx
      - name: Set hostname
        become: yes
        hostname:
          name: "clientvm{{idx + 1}}"
        delegate_to: "{{item}}"
        with_items: "{{groups['clientvms']}}"
        loop_control:
          index_var: idx
ansible/roles/config-quay-builder/README.md
New file
@@ -0,0 +1,43 @@
# Quay Builder
Ansible Role to help configure [Quay Builder](https://coreos.com/quay-enterprise/docs/latest/build-support.html) on a standalone instance.
## Requirements
A Linux Distribution which supports `systemd` and `package` modules along with docker install and configured.
## Role Variables
This role contains a number of variables to customize the deployment of Clair. The following are some of the most important that may need to be configured
| Name | Description | Default|
|---|---|---|
|quay_builder_image|Quay builder image|`quay.io/coreos/quay-builder:v2.9.3`|
|quay_builder_config_dir|Directory for Quay builder configurations| `/var/lib/quay-builder/config`|
|quay_enterprise_hostname|Hostname of the Quay Enterprise instance| |
|quay_builder_ssl_trust_configure|Configure SSL trust|`False`|
|quay_builder_ssl_trust_src_file|Location of the SSL certificates to populate use for TLS trust when enabled|`/tmp/quay-builder-ssl-trust.crt`|
## Dependencies
* [container-storage-setup](../container-storage-setup)
* [config-docker](../config-docker)
## Example Playbook
```
- name: Install Quay Builder
  hosts: quay_builder
  roles:
    - role: config-quay-builder
```
## License
Apache License 2.0
## Author Information
Red Hat Community of Practice & staff of the Red Hat Open Innovation Labs.
ansible/roles/config-quay-builder/defaults/main.yml
New file
@@ -0,0 +1,27 @@
---
# Base Configurations
quay_builder_name: quay-builder
quay_builder_service: "{{ quay_builder_name }}.service"
#Systemd
systemd_service_dir: /usr/lib/systemd/system
systemd_environmentfile_dir: /etc/sysconfig
# Quay Builder
quay_builder_image: quay.io/coreos/quay-builder:v2.9.3
quay_builder_config_dir: /var/lib/quay-builder/config
quay_builder_ssl_trust_configure: False
quay_builder_ssl_trust_src_file: /tmp/quay-builder-ssl-trust.crt
quay_builder_ssl_trust_host_file: "{{ quay_builder_config_dir }}/ca.crt"
quay_builder_ssl_trust_container_file: /usr/local/share/ca-certificates/rootCA.pem
# Container Credentials
container_credentials_file: /root/.docker/config.json
container_credentials_file_content: {}
quay_registry_server: quay.io
quay_registry_auth:
quay_registry_email:
# Quay
quay_enterprise_hostname: ""
ansible/roles/config-quay-builder/handlers/main.yml
New file
@@ -0,0 +1,8 @@
---
- name: Restart Quay Builder Service
  systemd:
    name: "{{ quay_builder_service }}"
    enabled: yes
    state: restarted
    daemon_reload: yes
ansible/roles/config-quay-builder/tasks/container_credentials.yml
New file
@@ -0,0 +1,36 @@
---
- name: Check if container credential file exists
  stat:
    path: "{{ container_credentials_file }}"
  register: container_credential_stat_result
- block:
  - name: Read content of container credentials file
    slurp:
      path: "{{ container_credentials_file }}"
    register: remote_container_credentials_file
  - name: Set content of container credentials file
    set_fact:
      container_credentials_file_content: "{{ remote_container_credentials_file.content| b64decode | from_json }}"
  when: container_credential_stat_result.stat.exists
- name: Create Quay credentials variable
  set_fact:
    quay_container_credentials:
      auths: "{ '{{quay_registry_server}}':{ 'email': '{{ quay_registry_email }}', 'auth': '{{ quay_registry_auth }}' } }"
- name: Create merged credentials file content
  set_fact:
    container_credentials_file_content: "{{ container_credentials_file_content | combine(quay_container_credentials, recursive=True) }}"
- name: Create directory for container credentials file
  file:
    state: directory
    path: "{{ container_credentials_file | dirname }}"
- name: Update container credentials file
  copy:
    content: "{{container_credentials_file_content | to_nice_json }}"
    dest: "{{ container_credentials_file }}"
ansible/roles/config-quay-builder/tasks/main.yml
New file
@@ -0,0 +1,55 @@
---
- name: Validate Quay Hostname Provided
  fail:
    msg: "Quay Hostname Must Be Provided!"
  when: quay_enterprise_hostname is undefined or quay_enterprise_hostname|trim == ""
- name: Include Container Credentials
  include_tasks: container_credentials.yml
  when: (quay_registry_server | trim != "") and ((quay_registry_auth | trim != "") or (quay_registry_email | trim != ""))
- name: Configure Configuration Directory
  file:
    state: directory
    owner: root
    group: root
    mode: g+rw
    path: "{{ quay_builder_config_dir }}"
- name: Configure Trusted SSL
  block:
    - name: Check if Trusted SSL file exists
      become: false
      stat:
        path: "{{ quay_builder_ssl_trust_src_file  }}"
      register: trusted_ssl_exists
      changed_when: False
      delegate_to: localhost
    - name: Fail if SSL source file does not exist
      fail:
        msg: "Could not locate SSL trust certificate"
      when: trusted_ssl_exists.stat.exists == false
    - name: Copy SSL Certificate
      copy:
        src: "{{ quay_builder_ssl_trust_src_file }}"
        dest: "{{ quay_builder_ssl_trust_host_file }}"
        owner: root
        group: root
        mode: g+rw
      notify: Restart Quay Builder Service
  when: quay_builder_ssl_trust_configure|bool
- name: Configure systemd environment files
  template:
    src: "quay-builder.j2"
    dest: "{{ systemd_environmentfile_dir}}/{{ quay_builder_name }}"
  notify: Restart Quay Builder Service
- name: Configure systemd unit files
  template:
    src: "quay-builder.service.j2"
    dest: "{{ systemd_service_dir}}/{{ quay_builder_service }}"
  notify: Restart Quay Builder Service
ansible/roles/config-quay-builder/templates/quay-builder.j2
New file
@@ -0,0 +1,7 @@
QUAY_BUILDER_IMAGE={{ quay_builder_image }}
QUAY_BUILDER_HOST_CONFIG_DIR={{ quay_builder_config_dir }}
QUAY_ADDRESS={{ quay_enterprise_hostname  }}
{% if quay_builder_ssl_trust_configure %}
QUAY_BUILDER_SSL_TRUST_HOST_FILE={{ quay_builder_ssl_trust_host_file }}
QUAY_BUILDER_SSL_TRUST_CONTAINER_FILE={{ quay_builder_ssl_trust_container_file }}
{% endif %}
ansible/roles/config-quay-builder/templates/quay-builder.service.j2
New file
@@ -0,0 +1,16 @@
[Unit]
Description=Quay Builder Docker Container
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile={{ systemd_environmentfile_dir }}/{{ quay_builder_name }}
TimeoutStartSec=0
Restart=always
ExecStartPre=/usr/bin/docker pull ${QUAY_BUILDER_IMAGE}
ExecStart=/usr/bin/docker run --rm --name %n -e SERVER={% if quay_builder_ssl_trust_configure|bool %}wss{% else %}ws{% endif %}://${QUAY_ADDRESS} {% if quay_builder_ssl_trust_configure|bool %} -v ${QUAY_BUILDER_SSL_TRUST_HOST_FILE}:${QUAY_BUILDER_SSL_TRUST_CONTAINER_FILE}:Z {% endif %} -v /var/run/docker.sock:/var/run/docker.sock --entrypoint=/bin/sh ${QUAY_BUILDER_IMAGE} -c '{% if quay_builder_ssl_trust_configure|bool %}/usr/sbin/update-ca-certificates && {% endif %}quay-builder'
ExecStop=/usr/bin/docker stop %n
ExecReload=/usr/bin/docker restart %n
[Install]
WantedBy=multi-user.target
ansible/roles/config-quay-enterprise/defaults/main.yml
@@ -16,6 +16,7 @@
quay_container_config_dir: /conf/stack
quay_storage_dir: /var/lib/quay/storage
quay_container_storage_dir: /datastorage
quay_storage_selinux_relabel: True
# External Databases
postgresql_db_uri: "postgresql://{{ quay_database_username }}:{{ quay_database_password }}@{{ quay_database_host }}:{{ quay_database_port | default('5432') }}/{{ quay_database_name }}"
@@ -51,6 +52,9 @@
quay_clair_enable: False
quay_clair_endpoint: ""
# Builder
quay_builder_enable: False
# Superuser Configuration
quay_superuser_username: ""
quay_superuser_password: ""
ansible/roles/config-quay-enterprise/tasks/configure_systemd.yml
@@ -2,12 +2,12 @@
- name: Configure systemd environment files
  template:
    src: "{{ quay_name }}.j2"
    src: "quay.j2"
    dest: "{{ systemd_environmentfile_dir}}/{{ quay_name }}"
  notify: "Restart quay service"
- name: Configure systemd unit files
  template:
    src: "{{ quay_service }}.j2"
    src: "quay.service.j2"
    dest: "{{ systemd_service_dir}}/{{ quay_service }}"
  notify: "Restart quay service"
ansible/roles/config-quay-enterprise/tasks/main.yml
@@ -42,24 +42,47 @@
- name: Include systemd configurations
  include_tasks: configure_systemd.yml
- name: Set Fail Safes in case certificate files not defined
  when: quay_ssl_enable|bool
  block:
  - name: Set fail safe for certificate file
    set_fact:
      quay_ssl_cert_file: ""
    when: quay_ssl_cert_file is undefined
  - name: Set fail safe for key file
    set_fact:
      quay_ssl_key_file: ""
    when: quay_ssl_key_file is undefined
- name: Set Fact for Custom SSL Certificates
  set_fact:
    quay_ssl_cert_file_to_use: "{{ quay_ssl_cert_file }}"
    quay_ssl_key_file_to_use: "{{ quay_ssl_key_file }}"
  when: quay_ssl_enable|bool and (quay_ssl_key_file is defined and quay_ssl_key_file|trim != "" and quay_ssl_cert_file is defined and quay_ssl_cert_file|trim != "")
  when: quay_ssl_enable|bool and (quay_ssl_key_file|trim != "" and quay_ssl_cert_file|trim != "")
- name: Debug first when
  debug:
    msg: First when true
  when: quay_ssl_enable|bool and (quay_ssl_key_file|trim != "" and quay_ssl_cert_file|trim != "")
- name: Debug second when
  debug:
    msg: Second when true
  when: quay_ssl_enable|bool and (quay_ssl_key_file|trim == "" or quay_ssl_cert_file|trim == "")
- name: Create Self Signed SSL Certificates
  when: quay_ssl_enable|bool and (quay_ssl_key_file|trim == "" or quay_ssl_cert_file|trim == "")
  block:
  - name: Create Temporary SSL Directory
    command: mktemp -d /tmp/quay-ssl-XXXXXXX
    register: quay_ssl_remote_tmp_dir_mktemp
    delegate_to: "{{ groups['quay_enterprise'][0] }}"
    when: quay_ssl_remote_tmp_dir is undefined and quay_ssl_remote_tmp_dir|trim == ""
    when: quay_ssl_remote_tmp_dir is not defined and quay_ssl_remote_tmp_dir|trim == ""
  - name: Set Fact for Remote Self Signed SSL Directory
    set_fact:
      quay_ssl_remote_tmp_dir: "{{ quay_ssl_remote_tmp_dir if quay_ssl_remote_tmp_dir is defined and quay_ssl_remote_tmp_dir|trim == '' else quay_ssl_remote_tmp_dir_mktemp.stdout }}"
    when: quay_ssl_remote_tmp_dir is undefined and quay_ssl_remote_tmp_dir|trim == ""
    when: quay_ssl_remote_tmp_dir is not defined and quay_ssl_remote_tmp_dir|trim == ""
  - name: Create SSL Certificate
    command: openssl req -nodes -x509 -newkey rsa:4096 -keyout {{ quay_ssl_remote_tmp_dir }}/ssl.key -out {{ quay_ssl_remote_tmp_dir }}/ssl.cert -subj "/C={{ quay_ssl_generate_country }}/ST={{ quay_ssl_generate_state }}/L={{ quay_ssl_generate_city }}/O={{ quay_ssl_generate_organization }}/OU={{ quay_ssl_generate_organizational_unit }}/CN={{ quay_server_hostname }}" -days {{ quay_ssl_generate_days_validity }}
@@ -88,7 +111,6 @@
    set_fact:
      quay_ssl_cert_file_to_use: "{{ quay_ssl_local_tmp_dir }}/ssl.cert"
      quay_ssl_key_file_to_use: "{{ quay_ssl_local_tmp_dir }}/ssl.key"
  when: quay_ssl_enable|bool and (quay_ssl_key_file is not defined or quay_ssl_key_file|trim == "" or quay_ssl_cert_file is not defined or quay_ssl_cert_file|trim == "")
- name: Copy SSL Certificates
  copy:
ansible/roles/config-quay-enterprise/templates/config.yaml.j2
@@ -14,7 +14,7 @@
FEATURE_ANONYMOUS_ACCESS: true
FEATURE_APP_REGISTRY: false
FEATURE_APP_SPECIFIC_TOKENS: true
FEATURE_BUILD_SUPPORT: false
FEATURE_BUILD_SUPPORT: {{ (quay_builder_enable is defined and quay_builder_enable|bool) | ternary('true','false') }}
FEATURE_CHANGE_TAG_EXPIRATION: true
FEATURE_DIRECT_LOGIN: true
FEATURE_MAILING: false
ansible/roles/config-quay-enterprise/templates/quay.service.j2
@@ -8,7 +8,7 @@
TimeoutStartSec=0
Restart=always
ExecStartPre=/usr/bin/docker pull ${QUAY_IMAGE}
ExecStart=/usr/bin/docker run --rm --name %n -p ${QUAY_HOST_HTTP_PORT}:${QUAY_CONTAINER_HTTP_PORT} -p ${QUAY_HOST_HTTPS_PORT}:${QUAY_CONTAINER_HTTPS_PORT} -v ${QUAY_HOST_CONFIG_DIR}:${QUAY_CONTAINER_CONFIG_DIR}:Z -v ${QUAY_HOST_STORAGE_DIR}:${QUAY_CONTAINER_STORAGE_DIR}:Z ${QUAY_IMAGE}
ExecStart=/usr/bin/docker run --rm --name %n -p ${QUAY_HOST_HTTP_PORT}:${QUAY_CONTAINER_HTTP_PORT} -p ${QUAY_HOST_HTTPS_PORT}:${QUAY_CONTAINER_HTTPS_PORT} -v ${QUAY_HOST_CONFIG_DIR}:${QUAY_CONTAINER_CONFIG_DIR}:Z -v ${QUAY_HOST_STORAGE_DIR}:${QUAY_CONTAINER_STORAGE_DIR}{% if quay_storage_selinux_relabel is defined and quay_storage_selinux_relabel|bool %}:Z{% endif %} ${QUAY_IMAGE}
ExecStop=/usr/bin/docker stop %n
ExecReload=/usr/bin/docker restart %n
ansible/roles/host-jenkins-server/defaults/main.yml
@@ -11,10 +11,11 @@
jenkins_connection_retries: 60
jenkins_home: /var/lib/jenkins
jenkins_hostname: localhost
jenkins_http_port: 8080
jenkins_port: 8080
jenkins_jar_location: /opt/jenkins-cli.jar
jenkins_url_prefix: ""
jenkins_java_options: "-Djenkins.install.runSetupWizard=false"
jenkins_protocol: "http"
jenkins_plugins: []
jenkins_plugins_state: present
@@ -36,3 +37,32 @@
    value: "--prefix={{ jenkins_url_prefix }}"
  - option: "{{ jenkins_java_options_env_var }}"
    value: "{{ jenkins_java_options }}"
jenkins_selfsigned_certificate: no
jenkins_target_domain: "example.com"
jenkins_ca_subject:
  country: "US"
  state: "NY"
  location: "NY"
  organization: "Acme"
  organizational_unit: "IT Department"
  common_name: "{{ jenkins_target_domain }}"
jenkins_ca_subject_value: "/C={{ jenkins_ca_subject.country }}/ST={{ jenkins_ca_subject.state }}/L={{ jenkins_ca_subject.location }}/O={{ jenkins_ca_subject.organization }}/O={{ jenkins_ca_subject.organizational_unit }}/CN={{ jenkins_ca_subject.common_name }}"
jenkins_cert_expiration_days: "1024"
jenkins_ca_key_path: "/tmp/rootCA.key"
jenkins_ca_cert_path: "/tmp/rootCA.crt"
jenkins_key_path: "/tmp/key.key"
jenkins_csr_path: "/tmp/csr.csr"
jenkins_cert_path: "/tmp/{{ jenkins_target_domain }}.crt"
jenkins_pkcs12_path: /tmp/keystore.p12
jenkins_keystore_path: "/tmp/defaultkeystore.jks"
jenkins_keystore_pass: "changeit"
jenkins_keystore_owner: "jenkins"
jenkins_keystore_mode: 0755
jenkins_cert_alias: "{{ jenkins_target_domain }}"
jenkins_https_listen_address: "0.0.0.0"
ansible/roles/host-jenkins-server/tasks/main.yml
@@ -3,6 +3,13 @@
- name: Include OS-Specific variables
  include_vars: "{{ ansible_os_family }}.yml"
- name: Check protocol variable
  fail:
    msg: "jenkins_protocol variable is undefined or has a wrong value. Available options: http, https"
  when:
    - jenkins_protocol != "http"
    - jenkins_protocol != "https"
- name: Define jenkins_repo_url
  set_fact:
    jenkins_repo_url: "{{ __jenkins_repo_url }}"
@@ -33,7 +40,7 @@
  service: name=jenkins state=started enabled=yes
- name: Wait for Jenkins to start up before proceeding.
  shell: "curl -D - --silent --max-time 5 http://{{ jenkins_hostname }}:{{ jenkins_http_port }}{{ jenkins_url_prefix }}/cli/"
  shell: "curl -D - --silent --max-time 5 {{ jenkins_protocol }}://{{ jenkins_hostname }}:{{ jenkins_port }}{{ jenkins_url_prefix }}/cli/ -k"
  register: result
  until: (result.stdout.find("403 Forbidden") != -1) or (result.stdout.find("200 OK") != -1) and (result.stdout.find("Please wait while") == -1)
  retries: "{{ jenkins_connection_retries }}"
@@ -43,8 +50,9 @@
- name: Get the jenkins-cli jarfile from the Jenkins server.
  get_url:
    url: "http://{{ jenkins_hostname }}:{{ jenkins_http_port }}{{ jenkins_url_prefix }}/jnlpJars/jenkins-cli.jar"
    url: "{{ jenkins_protocol }}://{{ jenkins_hostname }}:{{ jenkins_port }}{{ jenkins_url_prefix }}/jnlpJars/jenkins-cli.jar"
    dest: "{{ jenkins_jar_location }}"
    validate_certs: no
  register: jarfile_get
  until: "'OK' in jarfile_get.msg or 'file already exists' in jarfile_get.msg"
  retries: 5
ansible/roles/host-jenkins-server/tasks/plugins.yml
@@ -54,8 +54,9 @@
    state: "{{ jenkins_plugins_state }}"
    timeout: "{{ jenkins_plugin_timeout }}"
    updates_expiration: "{{ jenkins_plugin_updates_expiration }}"
    url: "http://{{ jenkins_hostname }}:{{ jenkins_http_port }}{{ jenkins_url_prefix }}"
    url: "{{ jenkins_protocol }}://{{ jenkins_hostname }}:{{ jenkins_port }}{{ jenkins_url_prefix }}"
    with_dependencies: "{{ jenkins_plugins_install_dependencies }}"
    validate_certs: no
  with_items: "{{ jenkins_plugins }}"
  when: jenkins_admin_password != ""
  notify: restart jenkins
@@ -65,8 +66,9 @@
    name: "{{ item }}"
    url_token: "{{ jenkins_admin_token }}"
    updates_expiration: "{{ jenkins_plugin_updates_expiration }}"
    url: "http://{{ jenkins_hostname }}:{{ jenkins_http_port }}{{ jenkins_url_prefix }}"
    url: "{{ jenkins_protocol }}://{{ jenkins_hostname }}:{{ jenkins_port }}{{ jenkins_url_prefix }}"
    with_dependencies: "{{ jenkins_plugins_install_dependencies }}"
    validate_certs: no
  with_items: "{{ jenkins_plugins }}"
  when: jenkins_admin_token != ""
  notify: restart jenkins
ansible/roles/host-jenkins-server/tasks/self-signed-certificate.yml
New file
@@ -0,0 +1,58 @@
---
# No better way to do this due to the openssl_certificate module being unusable in RHEL 7
# See: https://github.com/ansible/ansible/issues/34054
- name: Generate CA RSA Key
  command: >
    openssl genrsa
    -out {{ jenkins_ca_key_path }}
    2048
  args:
    creates: "{{ jenkins_ca_key_path }}"
- name: Generate CA root certificate
  command: >
    openssl req
    -x509
    -new
    -nodes
    -key {{ jenkins_ca_key_path }}
    -sha256
    -days {{ jenkins_cert_expiration_days }}
    -out {{ jenkins_ca_cert_path }}
    -subj "{{ jenkins_ca_subject_value }}"
  args:
    creates: "{{ jenkins_ca_cert_path }}"
- name: Generate RSA Key
  command: >
    openssl genrsa
    -out {{ jenkins_key_path }}
    2048
  args:
    creates: "{{ jenkins_key_path }}"
- name: Generate CSR
  command: >
    openssl req
    -new
    -subj "{{ jenkins_ca_subject_value }}"
    -key {{ jenkins_key_path }}
    -out {{ jenkins_csr_path }}
  args:
    creates: "{{ jenkins_csr_path }}"
- name: Generate Self-signed Certificate
  command: >
    openssl x509
    -req
    -in {{ jenkins_csr_path }}
    -CA {{ jenkins_ca_cert_path }}
    -CAkey {{ jenkins_ca_key_path }}
    -CAcreateserial
    -out {{ jenkins_cert_path }}
    -sha256
    -days {{ jenkins_cert_expiration_days }}
  args:
    creates: "{{ jenkins_cert_path }}"
ansible/roles/host-jenkins-server/tasks/settings.yml
@@ -21,13 +21,11 @@
  service: name=jenkins state=restarted
  when: jenkins_init_prefix.changed
- name: Set HTTP port in Jenkins config.
  lineinfile:
    backrefs: yes
    dest: "{{ jenkins_init_file }}"
    regexp: '^{{ jenkins_http_port_param }}='
    line: '{{ jenkins_http_port_param }}={{ jenkins_http_port }}'
  register: jenkins_http_config
- include_tasks: setup-http.yml
  when: jenkins_protocol == "http"
- include_tasks: setup-https.yml
  when: jenkins_protocol == "https"
- name: Ensure jenkins_home {{ jenkins_home }} exists
  file:
@@ -53,4 +51,8 @@
  service: name=jenkins state=restarted
  when: (jenkins_users_config is defined and jenkins_users_config.changed) or
        (jenkins_http_config is defined and jenkins_http_config.changed) or
        (jenkins_https_config is defined and jenkins_https_config.changed) or
        (jenkins_keystore_config is defined and jenkins_keystore_config.changed) or
        (jenkins_keystore_pass_config is defined and jenkins_keystore_pass_config.changed) or
        (jenkins_https_listen_address_config is defined and jenkins_https_listen_address_config.changed) or
        (jenkins_home_config is defined and jenkins_home_config.changed)
ansible/roles/host-jenkins-server/tasks/setup-http.yml
New file
@@ -0,0 +1,9 @@
---
- name: Set HTTP port in Jenkins config.
  lineinfile:
    backrefs: yes
    dest: "{{ jenkins_init_file }}"
    regexp: '^{{ jenkins_http_port_param }}='
    line: '{{ jenkins_http_port_param }}={{ jenkins_port }}'
  register: jenkins_http_config
ansible/roles/host-jenkins-server/tasks/setup-https.yml
New file
@@ -0,0 +1,87 @@
---
#Disable HTTP by using -1 as value
- name: Set HTTP port in Jenkins config.
  lineinfile:
    backrefs: yes
    dest: "{{ jenkins_init_file }}"
    regexp: '^{{ jenkins_http_port_param }}='
    line: '{{ jenkins_http_port_param }}=-1'
  register: jenkins_http_config
- include_tasks: self-signed-certificate.yml
  when: jenkins_selfsigned_certificate
- name: Generate PKCS12 Keystore
  command: >
    openssl pkcs12 -export
    -out {{ jenkins_pkcs12_path }}
    -passout pass:{{ jenkins_keystore_pass }}
    -inkey {{ jenkins_key_path }}
    -in {{ jenkins_cert_path }}
    -certfile {{ jenkins_ca_cert_path }}
    -name {{ jenkins_target_domain }}
- name: "Ensure Keystore target directory exists"
  file:
    path: "{{ jenkins_keystore_path | dirname }}"
    owner: "{{ jenkins_keystore_owner }}"
    group: "{{ jenkins_keystore_owner }}"
    state: directory
    mode: "{{ jenkins_keystore_mode }}"
- name: "Remove keystore if it exists already"
  file:
    path: "{{ jenkins_keystore_path }}"
    state: absent
- name: "Create Keystore and insert certificate"
  command: >
    keytool -importkeystore
    -srckeystore {{ jenkins_pkcs12_path }}
    -srcstoretype pkcs12
    -srcstorepass {{ jenkins_keystore_pass }}
    -srcalias {{ jenkins_target_domain }}
    -destkeystore {{ jenkins_keystore_path }}
    -deststoretype jks
    -deststorepass {{ jenkins_keystore_pass }}
    -destalias {{ jenkins_target_domain }}
- name: "Set keystore file permissions"
  file:
    path: "{{ jenkins_keystore_path }}"
    owner: "{{ jenkins_keystore_owner }}"
    group: "{{ jenkins_keystore_owner }}"
    mode: "{{ jenkins_keystore_mode }}"
- name: Set HTTPS port in Jenkins config.
  lineinfile:
    backrefs: yes
    dest: "{{ jenkins_init_file }}"
    regexp: '^{{ jenkins_https_port_param }}='
    line: '{{ jenkins_https_port_param }}={{ jenkins_port }}'
  register: jenkins_https_config
- name: Set HTTPS Keystore in Jenkins config.
  lineinfile:
    backrefs: yes
    dest: "{{ jenkins_init_file }}"
    regexp: '^{{ jenkins_https_keystore_param }}='
    line: '{{ jenkins_https_keystore_param }}={{ jenkins_keystore_path }}'
  register: jenkins_keystore_config
- name: Set HTTPS Keystore Password in Jenkins config.
  lineinfile:
    backrefs: yes
    dest: "{{ jenkins_init_file }}"
    regexp: '^{{ jenkins_https_keystore_password_param }}='
    line: '{{ jenkins_https_keystore_password_param }}={{ jenkins_keystore_pass }}'
  register: jenkins_keystore_pass_config
- name: Set HTTPS listen address in Jenkins config.
  lineinfile:
    backrefs: yes
    dest: "{{ jenkins_init_file }}"
    regexp: '^{{ jenkins_https_listen_address_param }}='
    line: '{{ jenkins_https_listen_address_param }}={{ jenkins_https_listen_address }}'
  register: jenkins_https_listen_address_config
ansible/roles/host-jenkins-server/vars/RedHat.yml
@@ -4,4 +4,8 @@
__jenkins_pkg_url: https://pkg.jenkins.io/redhat
jenkins_init_file: /etc/sysconfig/jenkins
jenkins_http_port_param: JENKINS_PORT
jenkins_https_port_param: JENKINS_HTTPS_PORT
jenkins_https_keystore_param: JENKINS_HTTPS_KEYSTORE
jenkins_https_keystore_password_param: JENKINS_HTTPS_KEYSTORE_PASSWORD
jenkins_https_listen_address_param: JENKINS_HTTPS_LISTEN_ADDRESS
jenkins_java_options_env_var: JENKINS_JAVA_OPTIONS
ansible/roles/ocp-client-vm/README.md
@@ -36,3 +36,23 @@
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
----
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
SSH_USERNAME="xxxx"
SSH_PRIVATE_KEY="xxxx"
WORKLOAD="ocp-client-vm"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"ACTION=create"
----
ansible/roles/ocp-client-vm/files/bash_profile
@@ -7,6 +7,6 @@
# User specific environment and startup programs
PATH=$PATH:$HOME/bin:/usr/local/bin
PATH=$PATH:$HOME/bin:/usr/local/bin:/usr/local/apache-maven-3.5.4/bin
export PATH
ansible/roles/ocp-client-vm/tasks/packages.yml
@@ -1,12 +1,12 @@
#vim: set ft=ansible:
---
- name: Install Openshift Client VM packages
  yum:
    name: "{{ item }}"
    state: present
  with_items:
  - java-1.8.0-openjdk-devel
  - maven
  - docker
  - atomic-openshift-clients
  - skopeo
@@ -16,6 +16,35 @@
  - cri-o
  - cri-tools
  - podman
  tags:
  - install_openshift_client_vm_packages
- name: Get recent version of maven
  get_url:
    url: http://www-eu.apache.org/dist/maven/maven-3/3.5.4/binaries/apache-maven-3.5.4-bin.tar.gz
    dest: /root/apache-maven.tar.gz
  register: r_geturl
  retries: 5
  delay: 20
  until: r_geturl is succeeded
  tags:
  - install_openshift_client_vm_packages
- name: Unarchive file
  unarchive:
    remote_src: yes
    src: /root/apache-maven.tar.gz
    dest: /root/
  tags:
  - install_openshift_client_vm_packages
- name: Move maven to /usr/local
  command: mv /root/apache-maven-3.5.4 /usr/local
  tags:
  - install_openshift_client_vm_packages
- name: Cleanup downloaded file
  file:
    dest: /root/apache-maven.tar.gz
    state: absent
  tags:
  - install_openshift_client_vm_packages
@@ -74,6 +103,10 @@
  get_url:
    url: https://github.com/istio/istio/releases/download/1.0.0/istio-1.0.0-linux.tar.gz
    dest: /root/istio-1.0.0-linux.tar.gz
  register: r_geturl
  retries: 5
  delay: 20
  until: r_geturl is succeeded
  tags:
  - install_openshift_client_vm_packages
ansible/roles/ocp-workload-3scale-lifecycle/defaults/main.yml
New file
@@ -0,0 +1,33 @@
---
ocp_username: hchin-redhat.com
ocp_user_needs_quota: True
ocp_user_groups:
  - OPENTLC-PROJECT-PROVISIONERS
quota_requests_cpu: 5
quota_limits_cpu: 10
quota_requests_memory: '6Gi'
quota_limits_memory: '8Gi'
quota_configmaps: 15
quota_pods: 20
quota_persistentvolumeclaims: 20
quota_services: 150
quota_secrets: 150
quota_requests_storage: 50Gi
ocp_domain: "{{subdomain_base}}"
ocp_apps_domain: "apps.{{ocp_domain}}"
build_status_retries: 20
build_status_delay: 20
deploy_status_retries: 15
deploy_status_delay: 20
APICURIO_UI_ROUTE: apicurio-studio.$OCP_SUFFIX
APICURIO_API_ROUTE: apicurio-studio-api.$OCP_SUFFIX
APICURIO_WS_ROUTE: apicurio-studio-ws.$OCP_SUFFIX
MICROCKS_ROUTE_HOSTNAME: microcks.$OCP_SUFFIX
ansible/roles/ocp-workload-3scale-lifecycle/readme.adoc
New file
@@ -0,0 +1,32 @@
---
- name: Check if Red Hat SSO was already provisioned
  command: "oc get service sso -n {{shared_ns}}"
  register: sso_already_installed
  ignore_errors: true
  changed_when: false
- name: Install Red Hat SSO in shared namespace {{shared_ns}}
  block:
    - name: define sso template version
      set_fact:
        sso_version: "ose-v1.4.15"
    - name: Make sure use shared namespace
      shell: "oc project {{shared_ns}}"
    - name: Create service accounts and secrets for RH SSO
      shell: "oc create -f https://raw.githubusercontent.com/jboss-openshift/application-templates/{{sso_version}}/secrets/sso-app-secret.json -n {{shared_ns}}"
    - name: Add cluster view policy to sa for clustering
      shell: "oc policy add-role-to-user view system:serviceaccount:{{shared_ns}}:sso-service-account -n {{shared_ns}}"
    - name: Import RH SSO template
      shell: "oc create -f https://raw.githubusercontent.com/jboss-openshift/application-templates/{{sso_version}}/sso/sso72-mysql-persistent.json"
    - name: Create RH SSO app
      shell: "oc new-app sso72-mysql-persistent -p HTTPS_NAME=jboss -p HTTPS_PASSWORD=mykeystorepass -p SSO_ADMIN_USERNAME=keyadmin \
      -p SSO_ADMIN_PASSWORD=keypassword -n {{shared_ns}}"
    - name: Wait for Red Hat SSO to be running
      command: "oc get dc/sso -o yaml -n {{shared_ns}}"
      register: result
      until: '"availableReplicas: 1" in result.stdout'
      retries: 5
      delay: 60
      changed_when: false
  when: sso_already_installed is failed
ansible/roles/ocp-workload-3scale-lifecycle/tasks/main.yml
New file
@@ -0,0 +1,20 @@
---
- name: Running Pre Workload Tasks
  include: ./pre_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload Tasks
  include: ./workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Post Workload Tasks
  include: ./post_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload removal Tasks
  include: ./remove_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "destroy" or ACTION == "remove"
ansible/roles/ocp-workload-3scale-lifecycle/tasks/post_workload.yml
New file
@@ -0,0 +1,5 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully"
ansible/roles/ocp-workload-3scale-lifecycle/tasks/pre_workload.yml
New file
@@ -0,0 +1,32 @@
---
# - name: Add user to developer group (allowed to create projects)
#   shell: "oadm groups add-users {{item}} {{ocp_username}}"
#   register: groupadd_register
#   with_items: "{{ocp_user_groups}}"
#   when: ocp_username is defined and ocp_user_groups is defined
#
# - name: test that command worked
#   debug:
#     var: groupadd_register
#     verbosity: 2
- name: Create user Quota - clusterresourcequota
  shell: |
        oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \
        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
        --hard requests.cpu="{{quota_requests_cpu}}" \
        --hard limits.cpu="{{quota_limits_cpu}}"  \
        --hard requests.memory="{{quota_requests_memory}}" \
        --hard limits.memory="{{quota_limits_memory}}" \
        --hard configmaps="{{quota_configmaps}}" \
        --hard pods="{{quota_pods}}" \
        --hard persistentvolumeclaims="{{quota_persistentvolumeclaims}}"  \
        --hard services="{{quota_services}}" \
        --hard secrets="{{quota_secrets}}" \
        --hard requests.storage="{{quota_requests_storage}}"
  ignore_errors: true
- name: pre_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully"
ansible/roles/ocp-workload-3scale-lifecycle/tasks/remove_workload.yml
New file
@@ -0,0 +1,41 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully - Removed"
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
  ignore_errors: true
- name: Remove any lingering tmp files
  shell: "rm -rf /tmp/{{guid}}"
- name: Remove user Projects - oc get projects
  command: "oc get projects -o json"
  register: all_projects
- name: Remove user Projects - Convert output to json
  set_fact:
    projects: "{{all_projects.stdout | from_json}}"
- name: Remove user Projects -  Debug statement
  debug:
    msg: "found user project: {{item.metadata.name}}"
    verbosity: 1
  when:
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
  with_items: "{{projects['items']}}"
- name: Remove user Projects - "oc delete project {{item.metadata.name}} "
  command: "oc delete project {{item.metadata.name}}"
  when:
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
    - item.status.phase is defined
    - item.status.phase != "Terminating"
  with_items: "{{projects['items']}}"
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully - Removed"
ansible/roles/ocp-workload-3scale-lifecycle/tasks/wait_for_build.yml
copy from ansible/roles/ocp-workload-rhte-mw-bfield-migration/tasks/wait_for_build.yml copy to ansible/roles/ocp-workload-3scale-lifecycle/tasks/wait_for_build.yml
ansible/roles/ocp-workload-3scale-lifecycle/tasks/wait_for_deploy.yml
New file
@@ -0,0 +1,25 @@
---
# Purpose:
#   This script queries OCP for replica sets that exist but are not yet ready.
#   So long as there are unready replica sets, this script continues to loop
#
# Manual Test to determine list of unready replication controllers :
#  1) install jp :  https://github.com/jmespath/jp
#  2) oc get rs -o json | jp 'items[?  (status.readyReplicas == ""|| status.readyReplicas == `0`) ].metadata.labels.deployment'
#  Documentation pertaining to jq syntax:
#    - http://jmespath.org/tutorial.html
# NOTE:  requires a Deployment label of:  deployment: <deploymentname>
- name: "Wait for following deployments to become ready: {{pod_to_wait}}"
  command: 'oc get rs -o json -n "{{ ocp_project }}"'
  register: rs_state
  changed_when: false
  retries: "{{ deploy_status_retries }}"
  delay: "{{ deploy_status_delay }}"
  until: 'rs_state.stdout |from_json |json_query(''items[?  (status.readyReplicas == ""|| status.readyReplicas == `0`) ].metadata.labels."deployment"'') |intersect(pod_to_wait) |length == 0'
ansible/roles/ocp-workload-3scale-lifecycle/tasks/workload.yml
New file
@@ -0,0 +1,57 @@
---
- name: define OCP Project for Apicurio Studio
  set_fact:
    nodejs_ocp_project: "apicurio-{{lab_name}}-{{guid}}"
# ####### Start of Installation of Apicurio Studio  ############## #
- name: check if Decision Manager is deployed
  shell: "oc get project {{rhdm_ocp_project}}"
  register: rhdm_project_result
  ignore_errors: true
  changed_when: false
- name: "Create project {{rhdm_ocp_project}}"
  shell: "oc new-project {{rhdm_ocp_project}} --display-name={{rhdm_ocp_project}}"
  when: rhdm_project_result has failed
- name: "Label namespace"
  command: "oc label namespace {{rhdm_ocp_project}} AAD='{{guid}}'"
  when: rhdm_project_result has failed
- name: Make sure we go back do default project
  shell: "oc project default"
  when: rhdm_project_result has failed
# ####### End of Installation of Apicurio Studio  ############## #
# Apicurio Studio
oc create -f https://raw.githubusercontent.com/gpe-mw-training/rhte-api-as-business-labs/master/templates/apicurio-template.yml -n openshift
# Microcks
oc create -f https://raw.githubusercontent.com/gpe-mw-training/rhte-api-as-business-labs/master/templates/microcks-persistent-no-keycloak-template.yml -n openshift
### Create Lab Infra project
oc adm new-project lab-infra --admin=opentlc-mgr --description="Lab Infrastructure project for SSO, Microcks & Apicurio Studio."
### Apicurio
oc new-app --template=apicurio-studio --param=AUTH_ROUTE=http://$HOSTNAME_HTTP/auth --param=UI_ROUTE=$APICURIO_UI_ROUTE --param=API_ROUTE=$APICURIO_API_ROUTE --param=WS_ROUTE=$APICURIO_WS_ROUTE --param=API_JVM_MAX=2000m --param=API_MEM_LIMIT=3000Mi --param=WS_JVM_MAX=2000m --param=WS_MEM_LIMIT=2500Mi --param=UI_JVM_MAX=1800m --param=UI_MEM_LIMIT=2500Mi
### Create Lab Infra project
#oc adm new-project lab-infra --admin=opentlc-mgr --description="Lab Infrastructure project for SSO, Microcks & Apicurio Studio."
### Apicurio
#oc new-app --template=apicurio-studio --param=AUTH_ROUTE=http://$HOSTNAME_HTTP/auth --param=UI_ROUTE=$APICURIO_UI_ROUTE --param=API_ROUTE=$APICURIO_API_ROUTE --param=WS_ROUTE=$APICURIO_WS_ROUTE --param=API_JVM_MAX=2000m --param=API_MEM_LIMIT=3000Mi --param=WS_JVM_MAX=2000m --param=WS_MEM_LIMIT=2500Mi --param=UI_JVM_MAX=1800m --param=UI_MEM_LIMIT=2500Mi
# Microcks
#oc new-app --template=microcks-persistent-no-keycloak --param=APP_ROUTE_HOSTNAME=$MICROCKS_ROUTE_HOSTNAME --param=KEYCLOAK_ROUTE_HOSTNAME=$KEYCLOAK_ROUTE_HOSTNAME
# Apicurio Studio
#oc create -f https://raw.githubusercontent.com/gpe-mw-training/rhte-api-as-business-labs/master/templates/apicurio-template.yml -n openshift
# Microcks
#oc create -f https://raw.githubusercontent.com/gpe-mw-training/rhte-api-as-business-labs/master/templates/microcks-persistent-no-keycloak-template.yml -n openshift
ansible/roles/ocp-workload-3scale-multitenant/templates/manage_tenants.sh
@@ -94,7 +94,7 @@
        fi
        # 7) Create corresponding route on 3scale AMP system-developer service
        oc create route edge $orgName-developer --service=system-developer --hostname=$orgName-3scale.{{ocp_apps_domain}} -n {{ocp_project}}
        oc create route edge $orgName-developer --service=system-developer --hostname=$orgName.{{ocp_apps_domain}} -n {{ocp_project}}
        if [ $? -ne 0 ];then
            echo -en "\n *** ERROR: 6" >> $log_file
            exit 1;
ansible/roles/ocp-workload-iot-demo/defaults/main.yml
@@ -5,22 +5,22 @@
ocp_user_groups:
  - OPENTLC-PROJECT-PROVISIONERS
quota_requests_cpu: 5
quota_limits_cpu: 10
quota_requests_cpu: 20
quota_limits_cpu: 40
  
quota_requests_memory: '10Gi'
quota_limits_memory: '20Gi'
quota_requests_memory: '20Gi'
quota_limits_memory: '40Gi'
quota_configmaps: 4
quota_pods: 20
quota_persistentvolumeclaims: 5
quota_services: 15
quota_configmaps: 10
quota_pods: 25
quota_persistentvolumeclaims: 15
quota_services: 20
quota_secrets: 30
quota_requests_storage: 5Gi
openssl_self_signed:
  - name: 'apps.iiot-demo.rhiot.org'
    domains: ['*.apps.iiot-demo.rhiot.org', 'apps.iiot-demo.rhiot.org']
  - name: 'apps.iot-dev.openshift.opentlc.com'
    domains: ['*.apps.iot-dev.openshift.opentlc.com', 'apps.iot-dev.openshift.opentlc.com']
    country: 'US'
    state: 'NC'
    city: 'Raleigh'
ansible/roles/ocp-workload-iot-demo/files/dashboard-proxy.yml
New file
@@ -0,0 +1,459 @@
---
apiVersion: v1
kind: Template
metadata:
  annotations:
    description: Red Hat IoT Demo Application Template - TEMP
    iconClass: icon-shadowman
    tags: jboss,iot,kapua,kura,eclipse
  name: iot-cloudera-demo
parameters:
- name: JOLOKIA_PASSWORD
  description: The password used for the Jolokia endpoint authentication
  generate: expression
  from: '[a-zA-Z0-9]{15}'
  required: true
- name: JOLOKIA_USER
  description: The username used for the Jolokia endpoint authentication
  value: jolokia
- description: Maven mirror url. If nexus is deployed locally, use nexus url (e.g. http://nexus.ci:8081/content/groups/public/)
  displayName: Maven mirror url
  name: MAVEN_MIRROR_URL
  required: false
- description: MQ cluster password
  from: '[a-zA-Z0-9]{8}'
  generate: expression
  name: MQ_CLUSTER_PASSWORD
  required: true
- description: JGroups cluster password
  from: '[a-zA-Z0-9]{8}'
  generate: expression
  name: JGROUPS_CLUSTER_PASSWORD
  required: true
- description: Git source URI for application
  name: GIT_URI
  required: true
  value: https://github.com/redhat-iot/cloudera-iot-demo
- description: Git branch/tag reference
  name: GIT_REF
  value: master
- description: MQ Broker username
  name: BROKER_USERNAME
  value: "demo-gw2"
  required: true
- description: MQ Broker password
  name: BROKER_PASSWORD
  value: "RedHat123!@#"
  required: true
- description: OpenShift load-balanced MQ Broker hostname
  name: BROKER_HOSTNAME
  value: "ec-broker-ws"
  required: true
- description: OpenShift load-balanced MQ Broker port
  name: BROKER_PORT
  value: "31883"
  required: true
- description: OpenShift load-balanced MQ Broker websockets port
  name: BROKER_WS_PORT
  value: "80"
  required: true
- description: Name of datastore proxy service
  name: DASHBOARD_PROXY_SERVICE
  value: "dashboard-proxy"
  required: true
- description: API Key for Google Maps
  name: GOOGLE_MAPS_API_KEY
  value: "AIzaSyDpDtvyzzdXDYk5nt6CuOtjxmvBvwGq5D4"
  required: false
- description: Datastore username
  name: DATASTORE_USERNAME
  value: "rhiot"
  required: true
- description: Datastore password
  name: DATASTORE_PASSWORD
  value: "redhatiot1!"
  required: true
- description: Datastore service hostname
  name: DATASTORE_HOST
  value: "datastore-hotrod"
  required: true
- description: Datastore port
  name: DATASTORE_PORT
  value: "11333"
  required: true
- description: Datastore password
  name: DATASTORE_CACHE
  value: "customer,facility,lines,machines,runs,calendar"
  required: true
- description: Datastore password
  name: ADDITIONAL_SENSOR_IDS
  required: false
- description: Datastore password
  name: CONTROL_TOPIC_PREFIX
  value: "Red-Hat/cldr-demo-gw/cloudera-demo"
  required: true
- description: Displayed title of web dashboard
  name: DASHBOARD_WEB_TITLE
  value: "IoT Industry 4.0 Demo"
  required: true
- description: Cloudera Impala service hostname
  name: IMPALA_HOST
  value: "34.211.218.209"
  required: true
- description: Cloudera Impala service port
  name: IMPALA_PORT
  value: "21050"
  required: true
labels:
  demo: cloudera
  app: dashboard
objects:
# UI and JDG
#
# Image Streams
#
- apiVersion: v1
  kind: ImageStream
  metadata:
    name: dashboard-proxy
  spec:
    tags:
      - name: latest
        from:
          kind: DockerImage
          name: 'quay.io/redhat-iot/dashboard-proxy:latest'
#
# Build Configs
#
#- apiVersion: v1
#  kind: BuildConfig
#  metadata:
#    name: dashboard
#    labels:
#      application: dashboard
#  spec:
#    output:
#      to:
#        kind: ImageStreamTag
#        name: dashboard:latest
#    source:
#      contextDir: dashboard
#      git:
#        ref: ${GIT_REF}
#        uri: ${GIT_URI}
#      type: Git
#    strategy:
#      sourceStrategy:
#        from:
#          kind: ImageStreamTag
#          name: nodejs:4
#          namespace: openshift
#      type: Source
#    triggers:
#    - imageChange: {}
#      type: ImageChange
#    - type: ConfigChange
#
#- apiVersion: v1
#  kind: BuildConfig
#  metadata:
#    name: dashboard-proxy
#    labels:
#      application: dashboard-proxy
#  spec:
#    output:
#      to:
#        kind: ImageStreamTag
#        name: dashboard-proxy:latest
#    source:
#      contextDir: dashboard-proxy
#      git:
#        ref: ${GIT_REF}
#        uri: ${GIT_URI}
#      type: Git
#    strategy:
#      sourceStrategy:
#        env:
#        - name: MAVEN_MIRROR_URL
#          value: ${MAVEN_MIRROR_URL}
#        from:
#          kind: ImageStreamTag
#          name: wildfly:10.1
#          namespace: openshift
#      type: Source
#    triggers:
#    - type: ConfigChange
#    - imageChange: {}
#      type: ImageChange
#
# Angular Dashboard deployment
#
#
# Datastore (Infinispan) Deployment Config
#
- apiVersion: v1
  kind: DeploymentConfig
  metadata:
    creationTimestamp: null
    generation: 1
    labels:
      app: datastore
      application: datastore
    name: datastore
  spec:
    replicas: 1
    selector:
      deploymentConfig: datastore
    strategy:
      recreateParams:
        timeoutSeconds: 600
      type: Recreate
    template:
      metadata:
        labels:
          application: datastore
          deploymentConfig: datastore
        name: datastore
      spec:
        containers:
        - env:
          - name: USERNAME
            value: ${DATASTORE_USERNAME}
          - name: PASSWORD
            value: ${DATASTORE_PASSWORD}
          - name: OPENSHIFT_KUBE_PING_LABELS
            value: application=datastore
          - name: OPENSHIFT_KUBE_PING_NAMESPACE
            valueFrom:
              fieldRef:
                apiVersion: v1
                fieldPath: metadata.namespace
          - name: INFINISPAN_CONNECTORS
            value: hotrod,rest
          - name: CACHE_NAMES
            value: ${DATASTORE_CACHE}
          - name: DATAVIRT_CACHE_NAMES
          - name: ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH
          - name: HOTROD_SERVICE_NAME
            value: ${DATASTORE_HOST}
          - name: MEMCACHED_CACHE
            value: default
          - name: REST_SECURITY_DOMAIN
          - name: JGROUPS_CLUSTER_PASSWORD
            value: ${JGROUPS_CLUSTER_PASSWORD}
          image: registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift:1.4
          imagePullPolicy: IfNotPresent
          name: datastore
          ports:
          - containerPort: 8778
            name: jolokia
            protocol: TCP
          - containerPort: 8080
            name: http
            protocol: TCP
          - containerPort: 8888
            name: ping
            protocol: TCP
          - containerPort: 11211
            name: memcached
            protocol: TCP
          - containerPort: 11222
            name: hotrod
            protocol: TCP
          - containerPort: 11333
            name: hotrod3
            protocol: TCP
          terminationMessagePath: /dev/termination-log
        dnsPolicy: ClusterFirst
        restartPolicy: Always
        securityContext: {}
        terminationGracePeriodSeconds: 60
    triggers:
    - imageChangeParams:
        automatic: true
        containerNames:
        - datastore
        from:
          kind: ImageStreamTag
          name: jboss-datagrid65-openshift:1.4
          namespace: openshift
      type: ImageChange
    - type: ConfigChange
  status: {}
#
# Dashboard Proxy Deployment Config
#
- apiVersion: v1
  kind: DeploymentConfig
  metadata:
    generation: 1
    labels:
      app: dashboard-proxy
      application: dashboard-proxy
    name: dashboard-proxy
  spec:
    replicas: 1
    selector:
      deploymentConfig: dashboard-proxy
    strategy:
      recreateParams:
        timeoutSeconds: 600
      resources: {}
      type: Recreate
    template:
      metadata:
        creationTimestamp: null
        labels:
          application: dashboard-proxy
          deploymentConfig: dashboard-proxy
        name: dashboard-proxy
      spec:
        containers:
        - env:
          - name: OPENSHIFT_KUBE_PING_LABELS
            value: application=dashboard-proxy
          - name: OPENSHIFT_KUBE_PING_NAMESPACE
            valueFrom:
              fieldRef:
                apiVersion: v1
                fieldPath: metadata.namespace
          - name: MQ_CLUSTER_PASSWORD
            value: ${MQ_CLUSTER_PASSWORD}
          - name: JGROUPS_CLUSTER_PASSWORD
            value: ${JGROUPS_CLUSTER_PASSWORD}
          - name: AUTO_DEPLOY_EXPLODED
            value: "false"
          - name: DATASTORE_HOST
            value: ${DATASTORE_HOST}
          - name: DATASTORE_PORT
            value: ${DATASTORE_PORT}
          - name: DATASTORE_CACHE
            value: ${DATASTORE_CACHE}
          - name: BROKER_HOSTNAME
            value: ${BROKER_HOSTNAME}
          - name: BROKER_PORT
            value: ${BROKER_PORT}
          - name: BROKER_USERNAME
            value: ${BROKER_USERNAME}
          - name: BROKER_PASSWORD
            value: ${BROKER_PASSWORD}
          - name: ADDITIONAL_SENSOR_IDS
            value: ${ADDITIONAL_SENSOR_IDS}
          - name: IMPALA_HOST
            value: ${IMPALA_HOST}
          - name: IMPALA_PORT
            value: ${IMPALA_PORT}
          image: quay.io/redhat-iot/dashboard-proxy:latest
          imagePullPolicy: IfNotPresent
          name: dashboard-proxy
          ports:
          - containerPort: 8778
            name: jolokia
            protocol: TCP
          - containerPort: 8080
            name: http
            protocol: TCP
          - containerPort: 8888
            name: ping
            protocol: TCP
          resources: {}
          livenessProbe:
            failureThreshold: 15
            httpGet:
              path: /api/utils/health
              port: 8080
              scheme: HTTP
            initialDelaySeconds: 180
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 5
          readinessProbe:
            failureThreshold: 15
            httpGet:
              path: /api/utils/health
              port: 8080
              scheme: HTTP
            initialDelaySeconds: 180
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 5
          terminationMessagePath: /dev/termination-log
        dnsPolicy: ClusterFirst
        restartPolicy: Always
        securityContext: {}
        terminationGracePeriodSeconds: 3
        imagePullSecrets:
        - name: redhat-iot-rhpds-pull-secret
    triggers:
    - imageChangeParams:
        automatic: true
        containerNames:
        - dashboard-proxy
        from:
          kind: ImageStreamTag
          name: dashboard-proxy:latest
      type: ImageChange
    - type: ConfigChange
  status: {}
#
# Services
#
- apiVersion: v1
  kind: Service
  metadata:
    labels:
      app: datastore
    name: ${DATASTORE_HOST}
  spec:
    ports:
    - port: 11333
      protocol: TCP
      targetPort: 11333
    selector:
      deploymentConfig: datastore
    sessionAffinity: None
    type: ClusterIP
- apiVersion: v1
  kind: Service
  metadata:
    labels:
      app: dashboard-proxy
    name: dashboard-proxy
  spec:
    ports:
    - name: 8080-tcp
      port: 8080
      protocol: TCP
      targetPort: 8080
    selector:
      deploymentConfig: dashboard-proxy
    sessionAffinity: None
    type: ClusterIP
#
# Routes
#
- apiVersion: v1
  kind: Route
  metadata:
    labels:
      application: dashboard-proxy
    name: dashboard-proxy
  spec:
    port:
      targetPort: 8080-tcp
    to:
      kind: Service
      name: dashboard-proxy
ansible/roles/ocp-workload-iot-demo/files/dashboard.yml
New file
@@ -0,0 +1,331 @@
---
apiVersion: v1
kind: Template
metadata:
  annotations:
    description: Red Hat IoT Demo Application Template - TEMP
    iconClass: icon-shadowman
    tags: jboss,iot,kapua,kura,eclipse
  name: dashboard
parameters:
- name: JOLOKIA_PASSWORD
  description: The password used for the Jolokia endpoint authentication
  generate: expression
  from: '[a-zA-Z0-9]{15}'
  required: true
- name: JOLOKIA_USER
  description: The username used for the Jolokia endpoint authentication
  value: jolokia
- description: Maven mirror url. If nexus is deployed locally, use nexus url (e.g. http://nexus.ci:8081/content/groups/public/)
  displayName: Maven mirror url
  name: MAVEN_MIRROR_URL
  required: false
- description: MQ cluster password
  from: '[a-zA-Z0-9]{8}'
  generate: expression
  name: MQ_CLUSTER_PASSWORD
  required: true
- description: JGroups cluster password
  from: '[a-zA-Z0-9]{8}'
  generate: expression
  name: JGROUPS_CLUSTER_PASSWORD
  required: true
- description: Git source URI for application
  name: GIT_URI
  required: true
  value: https://github.com/redhat-iot/cloudera-iot-demo
- description: Git branch/tag reference
  name: GIT_REF
  value: master
- description: MQ Broker username
  name: BROKER_USERNAME
  value: "demo-gw2"
  required: true
- description: MQ Broker password
  name: BROKER_PASSWORD
  value: "RedHat123!@#"
  required: true
- description: OpenShift load-balanced MQ Broker hostname
  name: BROKER_HOSTNAME
  value: "ec-broker-ws"
  required: true
- description: OpenShift load-balanced MQ Broker port
  name: BROKER_PORT
  value: "31883"
  required: true
- description: OpenShift load-balanced MQ Broker websockets port
  name: BROKER_WS_PORT
  value: "80"
  required: true
- description: Name of datastore proxy service
  name: DASHBOARD_PROXY_SERVICE
  value: "dashboard-proxy"
  required: true
- description: Name of datastore proxy service
  name: DASHBOARD_PROXY_HOSTNAME
  value: "dashboard-proxy"
  required: true
- description: API Key for Google Maps
  name: GOOGLE_MAPS_API_KEY
  value: "AIzaSyDpDtvyzzdXDYk5nt6CuOtjxmvBvwGq5D4"
  required: false
- description: Datastore username
  name: DATASTORE_USERNAME
  value: "rhiot"
  required: true
- description: Datastore password
  name: DATASTORE_PASSWORD
  value: "redhatiot1!"
  required: true
- description: Datastore service hostname
  name: DATASTORE_HOST
  value: "datastore-hotrod"
  required: true
- description: Datastore port
  name: DATASTORE_PORT
  value: "11333"
  required: true
- description: Datastore password
  name: DATASTORE_CACHE
  value: "customer,facility,lines,machines,runs,calendar"
  required: true
- description: Datastore password
  name: ADDITIONAL_SENSOR_IDS
  required: false
- description: Datastore password
  name: CONTROL_TOPIC_PREFIX
  value: "Red-Hat/cldr-demo-gw/cloudera-demo"
  required: true
- description: Displayed title of web dashboard
  name: DASHBOARD_WEB_TITLE
  value: "IoT Industry 4.0 Demo"
  required: true
- description: Cloudera Impala service hostname
  name: IMPALA_HOST
  value: "34.211.218.209"
  required: true
- description: Cloudera Impala service port
  name: IMPALA_PORT
  value: "21050"
  required: true
labels:
  demo: cloudera
  app: dashboard
objects:
# UI and JDG
#
# Image Streams
#
- apiVersion: v1
  kind: ImageStream
  metadata:
    name: dashboard
  spec:
    tags:
      - name: latest
        from:
          kind: DockerImage
          name: 'quay.io/redhat-iot/dashboard:latest'
#
# Build Configs
#
#- apiVersion: v1
#  kind: BuildConfig
#  metadata:
#    name: dashboard
#    labels:
#      application: dashboard
#  spec:
#    output:
#      to:
#        kind: ImageStreamTag
#        name: dashboard:latest
#    source:
#      contextDir: dashboard
#      git:
#        ref: ${GIT_REF}
#        uri: ${GIT_URI}
#      type: Git
#    strategy:
#      sourceStrategy:
#        from:
#          kind: ImageStreamTag
#          name: nodejs:4
#          namespace: openshift
#      type: Source
#    triggers:
#    - imageChange: {}
#      type: ImageChange
#    - type: ConfigChange
#
#- apiVersion: v1
#  kind: BuildConfig
#  metadata:
#    name: dashboard-proxy
#    labels:
#      application: dashboard-proxy
#  spec:
#    output:
#      to:
#        kind: ImageStreamTag
#        name: dashboard-proxy:latest
#    source:
#      contextDir: dashboard-proxy
#      git:
#        ref: ${GIT_REF}
#        uri: ${GIT_URI}
#      type: Git
#    strategy:
#      sourceStrategy:
#        env:
#        - name: MAVEN_MIRROR_URL
#          value: ${MAVEN_MIRROR_URL}
#        from:
#          kind: ImageStreamTag
#          name: wildfly:10.1
#          namespace: openshift
#      type: Source
#    triggers:
#    - type: ConfigChange
#    - imageChange: {}
#      type: ImageChange
#
# Angular Dashboard deployment
#
- apiVersion: v1
  kind: DeploymentConfig
  metadata:
    labels:
      app: dashboard
    name: dashboard
  spec:
    replicas: 1
    selector:
      deploymentconfig: dashboard
    strategy:
      resources: {}
      rollingParams:
        intervalSeconds: 1
        maxSurge: 25%
        maxUnavailable: 25%
        timeoutSeconds: 600
        updatePeriodSeconds: 1
      type: Rolling
    template:
      metadata:
        creationTimestamp: null
        labels:
          app: dashboard
          deploymentconfig: dashboard
      spec:
        containers:
        - env:
          - name: BROKER_HOSTNAME
            value: ${BROKER_HOSTNAME}
          - name: BROKER_PORT
            value: ${BROKER_PORT}
          - name: BROKER_WS_PORT
            value: ${BROKER_WS_PORT}
          - name: BROKER_USERNAME
            value: ${BROKER_USERNAME}
          - name: BROKER_PASSWORD
            value: ${BROKER_PASSWORD}
          - name: DASHBOARD_PROXY_SERVICE
            value: ${DASHBOARD_PROXY_SERVICE}
          - name: DASHBOARD_PROXY_HOSTNAME
            value: ${DASHBOARD_PROXY_HOSTNAME}
          - name: GOOGLE_MAPS_API_KEY
            value: ${GOOGLE_MAPS_API_KEY}
          - name: DASHBOARD_WEB_TITLE
            value: ${DASHBOARD_WEB_TITLE}
          - name: CONTROL_TOPIC_PREFIX
            value: ${CONTROL_TOPIC_PREFIX}
          image: dashboard
          imagePullPolicy: Always
          name: dashboard
          ports:
          - containerPort: 8080
            protocol: TCP
          livenessProbe:
            failureThreshold: 5
            httpGet:
              path: /
              port: 8080
              scheme: HTTP
            initialDelaySeconds: 120
            periodSeconds: 5
            successThreshold: 1
            timeoutSeconds: 5
          readinessProbe:
            failureThreshold: 5
            httpGet:
              path: /
              port: 8080
              scheme: HTTP
            initialDelaySeconds: 15
            periodSeconds: 5
            successThreshold: 1
            timeoutSeconds: 1
          terminationMessagePath: /dev/termination-log
        dnsPolicy: ClusterFirst
        restartPolicy: Always
        securityContext: {}
        terminationGracePeriodSeconds: 10
        imagePullSecrets:
        - name: redhat-iot-rhpds-pull-secret
    triggers:
    - imageChangeParams:
        automatic: true
        containerNames:
        - dashboard
        from:
          kind: ImageStreamTag
          name: dashboard:latest
      type: ImageChange
    - type: ConfigChange
#
# Services
#
- apiVersion: v1
  kind: Service
  metadata:
    labels:
      app: dashboard
    name: dashboard
  spec:
    ports:
    - name: 8080-tcp
      port: 8080
      protocol: TCP
      targetPort: 8080
    selector:
      deploymentconfig: dashboard
    sessionAffinity: None
    type: ClusterIP
#
# Routes
#
- apiVersion: v1
  kind: Route
  metadata:
    labels:
      application: dashboard
    name: dashboard
  spec:
    port:
      targetPort: 8080-tcp
    to:
      kind: Service
      name: dashboard
ansible/roles/ocp-workload-iot-demo/tasks/workload.yml
@@ -3,6 +3,46 @@
  set_fact:
    ocp_project: "iot-demo-{{guid}}"
- name: Check for open MQTT port
  block:
    - name: Wait for port and loop
      wait_for:
        host: ec-broker-mqtt.{{ocp_project}}.{{ocp_apps_domain}}
        port: "{{ item }}"
        state: started
        delay: 0
        connect_timeout: 2
        timeout: 3
      loop: "{{ range(31883, 31992, 1)|list }}"
      when: (portcheck is undefined) or (portcheck.failed == false)
      register: portcheck
  rescue:
    - set_fact:
        mqtt_port: "{{ portcheck.results|selectattr('failed', 'defined')|selectattr('failed')|map(attribute='item')|first}}"
    - debug: msg="MQTT Port Assignment is {{ mqtt_port }}"
    # Use to force fail on rescue since we short circuit the failure by handling in rescue
    #- command: /bin/false
- name: Check for open MQTTS port
  block:
    - name: Wait for port and loop
      wait_for:
        host: ec-broker-mqtt.{{ocp_project}}.{{ocp_apps_domain}}
        port: "{{ item }}"
        state: started
        delay: 0
        connect_timeout: 2
        timeout: 3
      loop: "{{ range(31993, 32102, 1)|list }}"
      when: (portcheck2 is undefined) or (portcheck2.failed == false)
      register: portcheck2
  rescue:
    - set_fact:
        mqtts_port: "{{ portcheck2.results|selectattr('failed', 'defined')|selectattr('failed')|map(attribute='item')|first}}"
    - debug: msg="MQTTS Port Assignment is {{ mqtts_port }}"
    # Use to force fail on rescue since we short circuit the failure by handling in rescue
    #- command: /bin/false
- name: Create project for IoT Demo
  shell: |
         oc new-project {{ocp_project}} \
@@ -18,6 +58,9 @@
- name: Add Quay Image Pull Secret for Eurotech Images
  shell: "oc create -f /tmp/{{guid}}/redhat-iot-rhpds-secret.yml -n {{ocp_project}}"
- name: Add Quay Image Pull Secret for Eurotech Images
  shell: "oc secrets link default redhat-iot-rhpds-pull-secret --for=pull -n {{ocp_project}}"
- name: Add MariaDB Secret
  shell: "oc create secret generic ec-db --from-literal=name=ecdb --from-literal=username=redhat --from-literal=password=RedHat123 -n {{ocp_project}}"
@@ -52,7 +95,7 @@
  shell: "oc new-app -e 'ES_JAVA_OPTS=-Des.cluster.name=kapua-datastore -Des.http.cors.enabled=true -Des.http.cors.allow-origin=* -Xms256m -Xmx256m' elasticsearch:5.4 -n {{ocp_project}}"
- name: Deploy EC Broker
  shell: "oc new-app -f /tmp/{{guid}}/broker.yml -p IMAGE_VERSION=5.0.0 -p NAMESPACE={{ocp_project}} -p EC_SECRET_DB=ec-db -p DISABLE_SSL=true -n {{ocp_project}}"
  shell: "oc new-app -f /tmp/{{guid}}/broker.yml -p 'MQTT_NODE_PORT={{mqtt_port}}' -p 'MQTTS_NODE_PORT={{mqtts_port}}' -p IMAGE_VERSION=5.0.0 -p NAMESPACE={{ocp_project}} -p EC_SECRET_DB=ec-db -p DISABLE_SSL=true -n {{ocp_project}}"
- name: Deploy EC Console
  shell: "oc new-app -f /tmp/{{guid}}/console.yml -p 'IMAGE_VERSION=5.0.0' -p 'NAMESPACE={{ocp_project}}' -p 'EC_SECRET_DB=ec-db' -n {{ocp_project}}"
@@ -60,13 +103,38 @@
- name: Deploy EC API
  shell: "oc new-app -f /tmp/{{guid}}/api.yml -p 'IMAGE_VERSION=5.0.0' -p 'NAMESPACE={{ocp_project}}' -p 'EC_SECRET_DB=ec-db' -n {{ocp_project}}"
- name: Print Dashboard URL
  debug:
    msg: "user.info: Dashboard: http://dashboard-{{ocp_project}}.{{ocp_apps_domain}}"
- name: Print Console URL
  debug:
    msg: "EC Console is running at http://ec-console-{{ocp_project}}.{{ocp_apps_domain}}"
    msg: "user.info: EC Console: http://ec-console-{{ocp_project}}.{{ocp_apps_domain}}"
- name: Print MQTT Broker URL
  debug:
    msg: "EC Broker is running at: mqtt://ec-broker-mqtt.{{ocp_project}}.{{ocp_apps_domain}}:1883"
    msg: "user.info: EC Broker MQTT: mqtt://ec-broker-mqtt.{{ocp_project}}.{{ocp_apps_domain}}:{{mqtt_port}}"
# Not used yet, disable until it is relevant
#- name: Print MQTTS Broker URL
#  debug:
#    msg: "user.info: EC Broker MQTTS: mqtt://ec-broker-mqtts.{{ocp_project}}.{{ocp_apps_domain}}:{{mqtts_port}}"
- name: Deploy Dashboard Proxy and JDG
  shell: "oc new-app -f /tmp/{{guid}}/dashboard-proxy.yml -p 'BROKER_USERNAME=ec-sys' -p 'BROKER_PASSWORD=ec-password'"
- name: Deploy Dashboard App
  shell: "oc new-app -f /tmp/{{guid}}/dashboard.yml -p 'BROKER_HOSTNAME=ec-broker-ws-{{ocp_project}}' -p 'BROKER_PASSWORD=ec-password' -p 'BROKER_USERNAME=ec-sys' -p 'DASHBOARD_PROXY_HOSTNAME=dashboard-proxy-{{ocp_project}}'"
- name: Deploy virtual gateway
  shell: "oc new-app quay.io/redhat-iot/esf:5.2.0-DEMO"
- name: Print ESF Virtual Gateway URL
  debug:
    msg: "user.info: ESF Virtual Gateway: http://esf-{{ocp_project}}.{{ocp_apps_domain}}"
- name: Expose virtual gateway web ui
  shell: "oc expose svc/esf"
- name: Annotate the completed project as requested by user
  shell: "oc annotate namespace {{ocp_project}} openshift.io/requester={{ocp_username}} --overwrite"
ansible/roles/ocp-workload-rh-sso/defaults/main.yml
New file
@@ -0,0 +1,28 @@
---
ocp_username: hchin-redhat.com
ocp_user_needs_quota: True
ocp_user_groups:
  - OPENTLC-PROJECT-PROVISIONERS
quota_requests_cpu: 5
quota_limits_cpu: 10
quota_requests_memory: '6Gi'
quota_limits_memory: '8Gi'
quota_configmaps: 15
quota_pods: 20
quota_persistentvolumeclaims: 20
quota_services: 150
quota_secrets: 150
quota_requests_storage: 50Gi
ocp_domain: "{{subdomain_base}}"
ocp_apps_domain: "apps.{{ocp_domain}}"
build_status_retries: 20
build_status_delay: 20
deploy_status_retries: 15
deploy_status_delay: 20
ansible/roles/ocp-workload-rh-sso/readme.adoc
New file
@@ -0,0 +1,32 @@
---
- name: Check if Red Hat SSO was already provisioned
  command: "oc get service sso -n {{shared_ns}}"
  register: sso_already_installed
  ignore_errors: true
  changed_when: false
- name: Install Red Hat SSO in shared namespace {{shared_ns}}
  block:
    - name: define sso template version
      set_fact:
        sso_version: "ose-v1.4.15"
    - name: Make sure use shared namespace
      shell: "oc project {{shared_ns}}"
    - name: Create service accounts and secrets for RH SSO
      shell: "oc create -f https://raw.githubusercontent.com/jboss-openshift/application-templates/{{sso_version}}/secrets/sso-app-secret.json -n {{shared_ns}}"
    - name: Add cluster view policy to sa for clustering
      shell: "oc policy add-role-to-user view system:serviceaccount:{{shared_ns}}:sso-service-account -n {{shared_ns}}"
    - name: Import RH SSO template
      shell: "oc create -f https://raw.githubusercontent.com/jboss-openshift/application-templates/{{sso_version}}/sso/sso72-mysql-persistent.json"
    - name: Create RH SSO app
      shell: "oc new-app sso72-mysql-persistent -p HTTPS_NAME=jboss -p HTTPS_PASSWORD=mykeystorepass -p SSO_ADMIN_USERNAME=keyadmin \
      -p SSO_ADMIN_PASSWORD=keypassword -n {{shared_ns}}"
    - name: Wait for Red Hat SSO to be running
      command: "oc get dc/sso -o yaml -n {{shared_ns}}"
      register: result
      until: '"availableReplicas: 1" in result.stdout'
      retries: 5
      delay: 60
      changed_when: false
  when: sso_already_installed is failed
ansible/roles/ocp-workload-rh-sso/tasks/main.yml
New file
@@ -0,0 +1,20 @@
---
- name: Running Pre Workload Tasks
  include: ./pre_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload Tasks
  include: ./workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Post Workload Tasks
  include: ./post_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload removal Tasks
  include: ./remove_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "destroy" or ACTION == "remove"
ansible/roles/ocp-workload-rh-sso/tasks/post_workload.yml
New file
@@ -0,0 +1,5 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully"
ansible/roles/ocp-workload-rh-sso/tasks/pre_workload.yml
New file
@@ -0,0 +1,32 @@
---
# - name: Add user to developer group (allowed to create projects)
#   shell: "oadm groups add-users {{item}} {{ocp_username}}"
#   register: groupadd_register
#   with_items: "{{ocp_user_groups}}"
#   when: ocp_username is defined and ocp_user_groups is defined
#
# - name: test that command worked
#   debug:
#     var: groupadd_register
#     verbosity: 2
- name: Create user Quota - clusterresourcequota
  shell: |
        oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \
        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
        --hard requests.cpu="{{quota_requests_cpu}}" \
        --hard limits.cpu="{{quota_limits_cpu}}"  \
        --hard requests.memory="{{quota_requests_memory}}" \
        --hard limits.memory="{{quota_limits_memory}}" \
        --hard configmaps="{{quota_configmaps}}" \
        --hard pods="{{quota_pods}}" \
        --hard persistentvolumeclaims="{{quota_persistentvolumeclaims}}"  \
        --hard services="{{quota_services}}" \
        --hard secrets="{{quota_secrets}}" \
        --hard requests.storage="{{quota_requests_storage}}"
  ignore_errors: true
- name: pre_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully"
ansible/roles/ocp-workload-rh-sso/tasks/remove_workload.yml
New file
@@ -0,0 +1,31 @@
---
- name: post_workload Tasks Complete
  debug:
    msg: "Pre-Software checks completed successfully - Removed"
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
  ignore_errors: true
- name: Remove any lingering tmp files
  shell: "rm -rf /tmp/{{guid}}"
- name: Remove user Project
  shell: "oc delete project istio-system"
  ignore_errors: true
#- name: clean up residual istio stuff
#  template:
#    src: templates/istio_cleanup.sh
#    dest: /tmp/istio_cleanup.sh
#    mode: 0755
#- shell:  "/tmp/istio_cleanup.sh"
- name: Delete Istio Control Plane
  shell:  "oc delete -f /tmp/istio-demo-1.0.0.yaml"
  ignore_errors: true
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully - Removed"
ansible/roles/ocp-workload-rh-sso/tasks/wait_for_build.yml
ansible/roles/ocp-workload-rh-sso/tasks/wait_for_deploy.yml
New file
@@ -0,0 +1,25 @@
---
# Purpose:
#   This script queries OCP for replica sets that exist but are not yet ready.
#   So long as there are unready replica sets, this script continues to loop
#
# Manual Test to determine list of unready replication controllers :
#  1) install jp :  https://github.com/jmespath/jp
#  2) oc get rs -o json | jp 'items[?  (status.readyReplicas == ""|| status.readyReplicas == `0`) ].metadata.labels.deployment'
#  Documentation pertaining to jq syntax:
#    - http://jmespath.org/tutorial.html
# NOTE:  requires a Deployment label of:  deployment: <deploymentname>
- name: "Wait for following deployments to become ready: {{pod_to_wait}}"
  command: 'oc get rs -o json -n "{{ ocp_project }}"'
  register: rs_state
  changed_when: false
  retries: "{{ deploy_status_retries }}"
  delay: "{{ deploy_status_delay }}"
  until: 'rs_state.stdout |from_json |json_query(''items[?  (status.readyReplicas == ""|| status.readyReplicas == `0`) ].metadata.labels."deployment"'') |intersect(pod_to_wait) |length == 0'
ansible/roles/ocp-workload-rh-sso/tasks/workload.yml
New file
@@ -0,0 +1,39 @@
---
# Red Hat SSO 7.2
#oc create -f https://raw.githubusercontent.com/gpe-mw-training/rhte-api-as-business-labs/master/templates/sso72-x509-mysql-persistent.json -n openshift
#Red Hat SSO 7.2 Ephimeral
#oc create -f https://raw.githubusercontent.com/pszuster/3ScaleTD/master/templates/sso72-x509-https.json
- name: Check if Red Hat SSO was already provisioned
  command: "oc get service sso -n {{shared_ns}}"
  register: sso_already_installed
  ignore_errors: true
  changed_when: false
- name: Install Red Hat SSO in shared namespace {{shared_ns}}
  block:
    - name: define sso template version
      set_fact:
        sso_version: "ose-v1.4.15"
    - name: Make sure use shared namespace
      shell: "oc project {{shared_ns}}"
    - name: Create service accounts and secrets for RH SSO
      shell: "oc create -f https://raw.githubusercontent.com/jboss-openshift/application-templates/{{sso_version}}/secrets/sso-app-secret.json -n {{shared_ns}}"
    - name: Add cluster view policy to sa for clustering
      shell: "oc policy add-role-to-user view system:serviceaccount:{{shared_ns}}:sso-service-account -n {{shared_ns}}"
    - name: Import RH SSO template
      shell: "oc create -f https://raw.githubusercontent.com/jboss-openshift/application-templates/{{sso_version}}/sso/sso72-mysql-persistent.json"
    - name: Create RH SSO app
      shell: "oc new-app sso72-mysql-persistent -p HTTPS_NAME=jboss -p HTTPS_PASSWORD=mykeystorepass -p SSO_ADMIN_USERNAME=keyadmin \
      -p SSO_ADMIN_PASSWORD=keypassword -n {{shared_ns}}"
    - name: Wait for Red Hat SSO to be running
      command: "oc get dc/sso -o yaml -n {{shared_ns}}"
      register: result
      until: '"availableReplicas: 1" in result.stdout'
      retries: 5
      delay: 60
      changed_when: false
  when: sso_already_installed is failed
ansible/roles/ocp-workload-rhte-mw-api-biz/defaults/main.yml
@@ -1,6 +1,6 @@
---
become_override: false
ocp_username: jbride-redhat.com
ocp_username: hchin-redhat.com
ocp_user_needs_quota: True
ocp_user_groups:
@@ -28,4 +28,29 @@
deploy_status_retries: 15
deploy_status_delay: 20
rhdm_template_yml: "https://raw.githubusercontent.com/jboss-container-images/rhdm-7-openshift-image/7.0.x/rhdm70-image-streams.yaml"
# userId / password for AMQ used in syndesisio
mq_username: ignite
mq_password: ignite
##########          supported fuse ignite       #################
SYNDESIS_REGISTRY: registry.access.redhat.com
SYNDESIS_REGISTRY_WEB_SUBCONTEXT: fuse7
product_name: fuse-ignite
# Corresponds to GPTE fork of community syndesis master branch
#   This branch implements the following:
#       1)  DCs in a paused state
ignite_template_name: "{{ product_name }}"
syndesisio_url: https://raw.githubusercontent.com/gpe-mw-training/fuse-ignite-install/gpte-1.3
fuse_ignite_is_yaml: "{{syndesisio_url}}/resources/fuse-ignite-image-streams.yml"
syndesisio_sa_yml:  "{{syndesisio_url}}/resources/serviceaccount-as-oauthclient-restricted.yml"
syndesisio_template_yml: "{{syndesisio_url}}/resources/fuse-ignite-ocp.yml"
#################################################################
amq_template_name: amq63-basic
amq_template_yml: "{{syndesisio_url}}/install/support/syndesis-amq.yml"
lab_name: rhte-mw-api-biz
ansible/roles/ocp-workload-rhte-mw-api-biz/tasks/workload.yml
@@ -1,38 +1,200 @@
---
- name: define ocp_project
- name: define OCP Project for Decision Manager
  set_fact:
    ocp_project: "{{lab_name}}-{{guid}}"
    rhdm_ocp_project: "rhdm-{{lab_name}}-{{guid}}"
- name: "Create project for workload {{ocp_project}}"
  shell: "oc new-project {{ocp_project}}"
- name: define OCP Project for RH-SSO
  set_fact:
    rhsso_ocp_project: "rhsso-{{lab_name}}-{{guid}}"
- name: define OCP Project for Fuse Online
  set_fact:
    fuseonline_ocp_project: "fuseonline-{{lab_name}}-{{guid}}"
- name: define OCP Project for NodeJS
  set_fact:
    nodejs_ocp_project: "nodejs-{{lab_name}}-{{guid}}"
# ####### Start of Installation of Decision Manager  ############## #
- name: check if Decision Manager is deployed
  shell: "oc get project {{rhdm_ocp_project}}"
  register: rhdm_project_result
  ignore_errors: true
  changed_when: false
- name: "Create project {{rhdm_ocp_project}}"
  shell: "oc new-project {{rhdm_ocp_project}} --display-name={{rhdm_ocp_project}}"
  when: rhdm_project_result has failed
- name: "Label namespace"
  command: "oc label namespace {{ocp_project}} AAD='{{guid}}'"
  command: "oc label namespace {{rhdm_ocp_project}} AAD='{{guid}}'"
  when: rhdm_project_result has failed
- name: Make sure we go back to default project
- name: Make sure we go back do default project
  shell: "oc project default"
  when: rhdm_project_result has failed
- name: Add the view role to the default service account
  shell: "oc policy add-role-to-user view -z default -n {{rhdm_ocp_project}}"
  when: rhdm_project_result has failed
- name: Annotate the empty project as requested by user
  shell: "oc annotate namespace {{rhdm_ocp_project}} openshift.io/requester={{rhdm_ocp_project}} --overwrite"
  when: rhdm_project_result has failed
# #######      lab specific tasks   ############## #
- name: Give ocp_username access to ocp_project; user = {{ocp_username}}
  shell: "oc policy add-role-to-user admin {{rhdm_ocp_project}} -n {{rhdm_ocp_project}}"
  when: rhdm_project_result has failed
# Components:
#   1) Mongodb (use replica set .... 1 replica is sufficient)
#   2) AMQ Streaming (Kafka with Zookeeper)
#   3) Red Hat's Apache Spark  ( https://radanalytics.io/projects )
#   4) JDG
#   5) Decision Manager (KIE-Server, maybe Decision Central ? )
#   6) other ???
- name: Create Template for {{rhdm_ocp_project}}
  shell: oc create -f {{rhdm_template_yml}} -n openshift
  when: rhdm_project_result has failed
- name: Create project 'rhdm'
  shell: oc adm new-project rhdm --admin=opentlc-mgr --description="Insurance Quote Rules engine decision manager."
  when: rhdm_project_result has failed
- name: Create project 'rhdm'
  shell: oc project rhdm
  when: rhdm_project_result has failed
- name: Create app 'rhdm'
  shell: oc new-app  --name=quoting --template rhdm70-kieserver-basic-s2i  --param=APPLICATION_NAME=$APPLICATION_NAME  --param=KIE_ADMIN_USER=$KIE_ADMIN_USER --param=KIE_ADMIN_PWD=$KIE_ADMIN_PWD --param=KIE_SERVER_USER=$KIE_SERVER_USER --param=KIE_SERVER_PWD=$KIE_SERVER_PWD --param=KIE_SERVER_CONTAINER_DEPLOYMENT=$KIE_SERVER_CONTAINER_DEPLOYMENT --param=SOURCE_REPOSITORY_URL=$SOURCE_REPOSITORY_URL --param=SOURCE_REPOSITORY_REF=$SOURCE_REPOSITORY_REF --param=CONTEXT_DIR=$CONTEXT_DIR
  when: rhdm_project_result has failed
# ####### End of Installation of Decision Manager  ############## #
# NodeJS S2I template
#oc create -f https://raw.githubusercontent.com/gpe-mw-training/rhte-api-as-business-labs/master/templates/nodejs-quoting-app-template.json -n openshift
####################################################
# ###### Start of Installation of Fuse Online ##########
- name: check if Fuse Online is deployed
  shell: "oc get project {{fuseonline_ocp_project}}"
  register: rhdm_project_result
  ignore_errors: true
  changed_when: false
- name: "Create project {{fuseonline_ocp_project}}"
  shell: "oc new-project {{fuseonline_ocp_project}} --display-name={{fuseonline_ocp_project}}"
  when: rhdm_project_result has failed
- name: "Label namespace"
  command: "oc label namespace {{fuseonline_ocp_project}} AAD='{{guid}}'"
  when: rhdm_project_result has failed
- name: Make sure we go back do default project
  shell: "oc project default"
  when: rhdm_project_result has failed
- name: Create serviceaccount-as-oauthclient
  shell: "oc create -f {{syndesisio_sa_yml}} -n {{fuseonline_ocp_project}}"
- name: Create syndesisio template; {{syndesisio_template_yml}}
  shell: "oc create -f {{syndesisio_template_yml}} -n {{fuseonline_ocp_project}}"
- name: delete temp dir if it exists
  file:
      path: /tmp/{{fuseonline_ocp_project}}
      state: absent
- file:
      path: /tmp/{{fuseonline_ocp_project}}
      state: directory
- name: Load fuse-ignite-is
  shell: "oc create -f {{ fuse_ignite_is_yaml }} -n {{fuseonline_ocp_project}}"
- name: Create the Fuse Online app
  shell: |
      oc new-app {{ignite_template_name}} \
      -p ROUTE_HOSTNAME=fuse.{{ocp_project}}.{{ocp_apps_domain}} \
      -p OPENSHIFT_MASTER=https://master.{{ocp_domain}} \
      -p OPENSHIFT_PROJECT={{fuseonline_cp_project}} \
      -p POSTGRESQL_MEMORY_LIMIT={{POSTGRESQL_MEMORY_LIMIT}} \
      -p PROMETHEUS_MEMORY_LIMIT={{PROMETHEUS_MEMORY_LIMIT}} \
      -p META_MEMORY_LIMIT={{META_MEMORY_LIMIT}} \
      -p SERVER_MEMORY_LIMIT={{SERVER_MEMORY_LIMIT}} \
      -p OPENSHIFT_OAUTH_CLIENT_SECRET=$(oc sa get-token syndesis-oauth-client -n {{fuseonline_ocp_project}}) \
      -p MAX_INTEGRATIONS_PER_USER={{MAX_INTEGRATIONS_PER_USER}} \
      -p IMAGE_STREAM_NAMESPACE={{fuseonline_ocp_project}} \
      -n {{fuseonline_ocp_project}}
- name: resume syndesis oauthproxy and db
  shell: oc rollout resume dc/syndesis-oauthproxy dc/syndesis-db -n {{fuseonline_ocp_project}}
- include_tasks: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - syndesis-oauthproxy
      - syndesis-db
- name: Scale up broker-amq
  shell: |
      oc scale dc/broker-amq --replicas=1 -n {{fuseonline_ocp_project}}
- name: resume broker-amq
  shell: oc rollout resume dc/broker-amq -n {{fuseonline_ocp_project}}
- include_tasks: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - "broker-amq"
- name: resume syndesis-meta
  shell: oc rollout resume dc/syndesis-meta -n {{fuseonline_ocp_project}}
- include_tasks: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - syndesis-meta
- name: resume syndesis-server
  shell: oc rollout resume dc/syndesis-server -n {{fuseonline_ocp_project}}
- include_tasks: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - syndesis-server
- name: resume syndesis-ui
  shell: oc rollout resume dc/syndesis-ui -n {{fuseonline_ocp_project}}
- include_tasks: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - syndesis-ui
- name: resume syndesis-prometheus
  shell: oc rollout resume dc/syndesis-prometheus -n {{fuseonline_ocp_project}}
- include_tasks: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - syndesis-prometheus
- name: resume todo
  shell: oc rollout resume dc/todo -n {{ocp_project}}
- include_tasks: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - todo
- name: Add the view role to the default service account
  shell: "oc policy add-role-to-user view -z default -n {{fuseonline_ocp_project}}"
- name: Annotate the empty project as requested by user
  shell: "oc annotate namespace {{ocp_project}} openshift.io/requester={{ocp_username}} --overwrite"
  shell: "oc annotate namespace {{fuseonline_ocp_project}} openshift.io/requester={{fuseonline_ocp_project}} --overwrite"
- name: Give ocp_username access to ocp_project; user = {{ocp_username}}
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{ocp_project}}"
  shell: "oc policy add-role-to-user admin {{fuseonline_ocp_project}} -n {{fuseonline_ocp_project}}"
# ###### End of Installation of Fuse Online ##########
- name: workload Tasks Complete
  debug:
ansible/roles/ocp-workload-rhte-mw-api-mesh/defaults/main.yml
@@ -12,7 +12,7 @@
quota_limits_memory: '15Gi'
quota_configmaps: 20
quota_pods: 20
quota_pods: 30
quota_persistentvolumeclaims: 20
quota_services: 150
quota_secrets: 150
@@ -31,5 +31,4 @@
lab_name: rhte-mw-api-mesh
ocp_user_needs_quota: True
cluster_quota_name: "clusterquota-{{lab_name}}-{{ocp_username}}"
ansible/roles/ocp-workload-rhte-mw-api-mesh/readme.adoc
@@ -18,7 +18,6 @@
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ocp_user_needs_quota=true" \
                    -e"ocp_domain=$OCP_DOMAIN" \
                    -e"ACTION=create"
ansible/roles/ocp-workload-rhte-mw-api-mesh/tasks/pre_workload.yml
@@ -26,7 +26,6 @@
        --hard secrets="{{quota_secrets}}" \
        --hard requests.storage="{{quota_requests_storage}}"
  ignore_errors: true
  when: ocp_user_needs_quota|d(False)|bool
- name: pre_workload Tasks Complete
  debug:
ansible/roles/ocp-workload-rhte-mw-bfield-migration/defaults/main.yml
@@ -1,10 +1,13 @@
---
become_override: false
ocp_username: user1
ocp_username: opentlc-mgr
ocp_user_needs_quota: True
ocp_user_groups:
  - OPENTLC-PROJECT-PROVISIONERS
ocp_project: rhte-mw-bfield-migration-{{ocp_username}
ocp_constraints_project: rhte-mw-bfield-migration-constraints-{{ocp_username}}
quota_requests_cpu: 5
quota_limits_cpu: 10
@@ -12,24 +15,14 @@
quota_requests_memory: '4Gi'
quota_limits_memory: '12Gi'
quota_configmaps: 5
quota_configmaps: 15
quota_pods: 15
quota_persistentvolumeclaims: 10
quota_services: 10
quota_secrets: 20
quota_services: 20
quota_secrets: 100
quota_requests_storage: 50Gi
pod_min_mem: 10Mi
pod_max_mem: 6Gi
container_default_mem: 1Gi
container_max_mem: 4Gi
ocp_apps_domain: apps.{{ocp_domain}}
build_status_retries: 20
build_status_delay: 20
deploy_status_retries: 15
deploy_status_delay: 20
lab_name: rhte-mw-bfield-migration
ansible/roles/ocp-workload-rhte-mw-bfield-migration/readme.adoc
@@ -1,11 +1,9 @@
= ocp-workload-rhte-mw-bfield-migration
Corresponds to the link:https://drive.google.com/open?id=100uafQzO4j1viW9hOkiroT1DB32d2WcR0A0jpqbC1hg[Brownfield Migrations] Tech Exchange lab
= Brownfield Mig workload
== Execution using localhost oc client
-----
WORKLOAD="ocp-workload-appmod-migration"
WORKLOAD="ocp-workload-rhte-mw-bfield-migration"
GUID=1
OCP_USERNAME="user$GUID"
HOST_GUID=dev39
@@ -28,15 +26,16 @@
-----
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
GUID=jb45
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-rhte-mw-bfield-migration"
WORKLOAD="ocp-workload-appmod-migration"
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
GUID=gptetraining01
OCP_USERNAME="gpsetraining1"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
@@ -47,25 +46,23 @@
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ocp_user_needs_quota=true" \
                    -e"ocp_apps_domain=apps.${HOST_GUID}.openshift.opentlc.com" \
                    -e"ocp_domain=$HOST_GUID.openshift.opentlc.com" \
                    -e"ACTION=create"
----
=== To Delete an environment
----
GUID=jb45
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-rhte-mw-bfield-migration"
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
WORKLOAD="ocp-workload-appmod-migration"
GUID=gptetraining01
OCP_USERNAME="gpsetraining1"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                    -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
ansible/roles/ocp-workload-rhte-mw-bfield-migration/tasks/pre_workload.yml
@@ -1,11 +1,21 @@
---
# - name: Add user to developer group (allowed to create projects)
#   shell: "oadm groups add-users {{item}} {{ocp_username}}"
#   register: groupadd_register
#   with_items: "{{ocp_user_groups}}"
#   when: ocp_username is defined and ocp_user_groups is defined
#
# 27 August, 2018:  Not all OCP workshops have OPENTLC-PROJECT-PROVISIONERS group by default
- name: create our own OPENTLC-PROJECT-PROVISIONERS
  command: "oc adm groups new OPENTLC-PROJECT-PROVISIONERS"
  ignore_errors: true
- name: allow OPENTLC-PROJECT-PROVISIONERS members to provision their own projects
  command: "oc adm policy add-cluster-role-to-group self-provisioner OPENTLC-PROJECT-PROVISIONERS"
  ignore_errors: true
- name: Add user to developer group (allowed to create projects)
  shell: "oc adm groups add-users {{item}} {{ocp_username}}"
  register: groupadd_register
  with_items: "{{ocp_user_groups}}"
  when: ocp_username is defined and ocp_user_groups is defined
# - name: test that command worked
#   debug:
#     var: groupadd_register
ansible/roles/ocp-workload-rhte-mw-bfield-migration/tasks/remove_workload.yml
@@ -3,19 +3,46 @@
  debug:
    msg: "Pre-Software checks completed successfully - Removed"
- name: define ocp_project
  set_fact:
    ocp_project: "{{lab_name}}-{{guid}}"
- name: Remove user from groups {{ocp_user_groups}}
  shell: oc adm groups remove-users {{item}} {{ocp_username}}
  with_items: "{{ocp_user_groups}}"
  ignore_errors: true
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
  ignore_errors: true
- name: Remove any lingering tmp files
  shell: "rm -rf /tmp/{{guid}}"
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}
  ignore_errors: true
- name: Remove user Project
  shell: "oc delete project {{ocp_project}}"
- name: Remove user Projects - oc get projects
  command: "oc get projects -o json"
  register: all_projects
- name: Remove user Projects - Convert output to json
  set_fact:
    projects: "{{all_projects.stdout | from_json}}"
- name: Remove user Projects -  Debug statement
  debug:
    msg: "found user project: {{item.metadata.name}}"
    verbosity: 1
  when:
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
  with_items: "{{projects['items']}}"
- name: Remove user Projects - "oc delete project {{item.metadata.name}} "
  command: "oc delete project {{item.metadata.name}}"
  when:
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
    - item.status.phase is defined
    - item.status.phase != "Terminating"
  with_items: "{{projects['items']}}"
- name: post_workload Tasks Complete
ansible/roles/ocp-workload-rhte-mw-bfield-migration/tasks/wait_for_deploy.yml
File was deleted
ansible/roles/ocp-workload-rhte-mw-bfield-migration/tasks/workload.yml
@@ -1,36 +1,58 @@
---
- name: define ocp_project
  set_fact:
    ocp_project: "{{lab_name}}-{{guid}}"
# #########         Default project             ##############
#- name: Create project for workload; project =  {{ocp_project}}
#  shell: "oc new-project {{ocp_project}}"
#- name: "Label namespace"
#  command: "oc label namespace {{ocp_project}} AAD='{{guid}}'"
#- name: Make sure we go back do default project
#  shell: "oc project default"
#- name: Delete default limitrange
#  shell: |
#    oc delete limitrange {{ocp_project}}-core-resource-limits -n {{ocp_project}}
#- name: Create a new limitrange
#  template:
#    src: templates/limitrange.yaml
#    dest: /tmp/{{ocp_project}}_limitrange.yaml
#- shell: |
#    oc create -f /tmp/{{ocp_project}}_limitrange.yaml -n {{ocp_project}}
# ###############################################################
# #########             Constraint project          ##############
- name: Create project; project =  {{ocp_project}}
  shell: "oc new-project {{ocp_project}}"
- name: Create project; project =  {{ocp_constraints_project}}
  shell: "oc new-project {{ocp_constraints_project}}"
- name: "Label namespace"
  command: "oc label namespace {{ocp_project}} AAD='{{guid}}'"
  command: "oc label namespace {{ocp_constraints_project}} AAD='{{guid}}'"
- name: Give ocp_username access to {{ocp_constraints_project}};  user = {{ocp_username}}
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{ocp_project}}"
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{ocp_constraints_project}}"
- name: Delete default limitrange
  shell: |
    oc delete limitrange {{ocp_project}}-core-resource-limits -n {{ocp_project}}
    oc delete limitrange {{ocp_constraints_project}}-core-resource-limits -n {{ocp_constraints_project}}
- name: Create a new limitrange
  template:
    src: templates/constraints_limitrange.yaml
    dest: /tmp/{{ocp_project}}_limitrange.yaml
    dest: /tmp/{{ocp_constraints_project}}_limitrange.yaml
- shell: |
    oc create -f /tmp/{{ocp_project}}_limitrange.yaml -n {{ocp_project}}
    oc create -f /tmp/{{ocp_constraints_project}}_limitrange.yaml -n {{ocp_constraints_project}}
- name: Annotate the empty project as requested by user
  shell: "oc annotate namespace {{ocp_project}} openshift.io/requester={{ocp_username}} --overwrite"
  shell: "oc annotate namespace {{ocp_constraints_project}} openshift.io/requester={{ocp_username}} --overwrite"
- name: Give ocp_username access to ocp_project; user = {{ocp_username}}
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{ocp_project}}"
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{ocp_constraints_project}}"
# ###############################################################
ansible/roles/ocp-workload-rhte-mw-bfield-migration/templates/constraints_limitrange.yaml
New file
@@ -0,0 +1,25 @@
apiVersion: v1
kind: LimitRange
metadata:
  name: {{ocp_constraints_project}}-core-resource-limits
  namespace: {{ocp_constraints_project}}
spec:
  limits:
  - default:
      cpu: 1
      memory: 100Mi
    defaultRequest:
      cpu: 100m
      memory: 50Mi
    max:
      memory: 350Mi
    min:
      memory: 4Mi
    type: Container
  - max:
      cpu: 5
      memory: 350Mi
    min:
      cpu: 50m
      memory: 6Mi
    type: Pod
ansible/roles/ocp-workload-rhte-mw-bfield-migration/templates/limitrange.yaml
New file
@@ -0,0 +1,23 @@
apiVersion: v1
kind: LimitRange
metadata:
  name: {{ocp_project}}-core-resource-limits
  namespace: {{ocp_project}}
spec:
  limits:
  - default:
      cpu: 500m
      memory: {{container_default_mem}}
    defaultRequest:
      cpu: 50m
      memory: 256Mi
    max:
      memory: {{container_max_mem}}
    min:
      memory: 10Mi
    type: Container
  - max:
      memory: {{pod_max_mem}}
    min:
      memory: {{pod_min_mem}}
    type: Pod
ansible/roles/ocp-workload-rhte-mw-op-intel/defaults/main.yml
@@ -1,6 +1,6 @@
---
become_override: false
ocp_username: hchin-redhat.com
ocp_username: developer-lab1-kafka-project
ocp_user_needs_quota: True
ocp_user_groups:
@@ -45,7 +45,7 @@
##########          Templates for RHTE Lab 6 Op Intel       #################
# Corresponds to Strimzi-kafka-operator templates
strimzi_url: https://raw.githubusercontent.com/honghuac/rhte2018/master/examples
strimzi_url: https://raw.githubusercontent.com/gpe-mw-training/rhte2018/master/examples
clusteroperator_yaml: "{{strimzi_url}}/install/cluster-operator"
serviceaccount_yaml: "{{clusteroperator_yaml}}/01-ServiceAccount-strimzi-cluster-operator.yaml"
ansible/roles/ocp-workload-rhte-mw-op-intel/readme.adoc
@@ -7,99 +7,25 @@
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
GUID=jb45
HOST_GUID=dev39
TARGET_HOST="master.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-rhte-mw-op-intel"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \
$ ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \
  -e"ANSIBLE_REPO_PATH=`pwd`" \
  -e"ocp_workload=${WORKLOAD}" \
  -e"ocp_username=${OCP_USERNAME}" \
  -e"guid=${GUID}" \
  -e"ocp_user_needs_quota=true" \
  -e"ocp_domain=${OCP_DOMAIN}" \
  -e"ACTION=create"
----
=== To Delete an environment
----
GUID=jb45
HOST_GUID=dev39
TARGET_HOST="master.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-rhte-mw-op-intel"
# a TARGET_HOST is specified in the command line, without using an inventory file
=======
WORKLOAD="ocp-workload-rhte-mw-op-intel"
OCP_USERNAME="jbride-redhat.com"
HOST_GUID=`oc whoami --show-server | cut -d'.' -f 2`
OCP_DOMAIN=$HOST_GUID.openshift.opentlc.com
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"guid=${GUID}" \
                    -e"ocp_user_needs_quota=true" \
                    -e"ocp_domain=${OCP_DOMAIN}" \
                    -e"ACTION=create"
ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=$GUID" \
                    -e"ACTION=remove"
$ ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \
  -e"ANSIBLE_REPO_PATH=`pwd`" \
  -e"ocp_workload=${WORKLOAD}" \
  -e"ocp_username=${OCP_USERNAME}" \
  -e"ACTION=remove"
----
== Execution using remote (bastion node) oc client
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
WORKLOAD="ocp-workload-rhte-mw-op-intel"
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
GUID=jb05
OCP_USERNAME="jbride-redhat.com"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ocp_domain=$HOST_GUID.openshift.opentlc.com" \
                    -e"ACTION=create"
----
=== To Delete an environment
----
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
WORKLOAD="ocp-workload-rhte-mw-op-intel"
GUID=jb05
OCP_USERNAME="jbride-redhat.com"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                    -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ACTION=remove"
----
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/main.yml
@@ -9,11 +9,6 @@
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Spark Workload Tasks
  include: ./spark_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Post Workload Tasks
  include: ./post_workload.yml
  become: "{{ become_override | bool }}"
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/pre_workload.yml
@@ -13,7 +13,7 @@
- name: Create user Quota - clusterresourcequota
  shell: |
        oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \
        oc create clusterquota clusterquota-"{{ocp_username}}-{{lab_name}}" \
        --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \
        --hard requests.cpu="{{quota_requests_cpu}}" \
        --hard limits.cpu="{{quota_limits_cpu}}"  \
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/remove_workload.yml
@@ -3,41 +3,25 @@
  debug:
    msg: "Pre-Software checks completed successfully - Removed"
- name: define ocp_project
  set_fact:
    ocp_project: "{{lab_name}}-{{guid}}"
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{lab_name}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{lab_name}}
  ignore_errors: true
- name: Remove any lingering tmp files
  shell: "rm -rf /tmp/{{guid}}"
- name: Remove Project {{lab_1_name}}
  shell: "oc delete project {{lab_1_name}}"
  ignore_errors: true
- name: Remove Project {{lab_2_name}}
  shell: "oc delete project {{lab_2_name}}"
  ignore_errors: true
- name: Remove Project {{lab_3_name}}
  shell: "oc delete project {{lab_3_name}}"
  ignore_errors: true
- name: Remove Project {{lab_4_name}}
  shell: "oc delete project {{lab_4_name}}"
  ignore_errors: true
- name: Remove Project {{lab_5_name}}
  shell: "oc delete project {{lab_5_name}}"
  ignore_errors: true
- name: Remove Project {{lab_6_name}}
  shell: "oc delete project {{lab_6_name}}"
- name: Remove Strimzi CRDs
  shell: "oc delete crd {{item}}"
  items:
    - kafkaconnects.kafka.strimzi.io
    - kafkaconnects2is.kafka.strimzi.io
    - kafkas.kafka.strimzi.io
    - kafkatopics.kafka.strimzi.io
    - kafkausers.kafka.strimzi.io
  ignore_errors: true
- name: post_workload Tasks Complete
  debug:
    msg: "Post-Software checks completed successfully - Removed"
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/strimzi_workload.yml
@@ -1,8 +1,4 @@
---
- name: define ocp_project
  set_fact:
    ocp_project: "{{lab_name}}-{{guid}}"
# #######  Strimzi Installation  ############## #
# Components:
#   1) Kafka
@@ -18,89 +14,60 @@
- name: Create project for workload {{lab_1_name}}
  shell: "oc new-project {{lab_1_name}}"
  ignore_errors: true
- name: Label namespace
  command: "oc label namespace {{lab_1_name}} AAD='{{guid}}'"
  ignore_errors: true
  command: "oc label namespace {{lab_1_name}} AAD='{{ocp_username}}'"
- name: Make sure we go back to default project
  shell: "oc project default"
- name: Give user {{guid}} access to ocp_project {{lab_1_name}}
  shell: "oc policy add-role-to-user admin {{guid}} -n {{lab_1_name}}"
  ignore_errors: true
- name: delete temp dir if it exists
  file:
      path: /tmp/{{ocp_project}}
      state: absent
- file:
      path: /tmp/{{ocp_project}}
      state: directory
- name: Give user {{ocp_username}} access to ocp_project {{lab_1_name}}
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{lab_1_name}}"
- name: Create SA for strimzi cluster operator
  shell: "oc apply -f {{ serviceaccount_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Create cluster role for strimzi cluster operator
  shell: "oc apply -f {{ clusteroperator_role_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Create cluster role binding for strimzi cluster operator
  shell: "oc apply -f {{ clusteroperator_rolebinding_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Create cluster role for Kafka broker
  shell: "oc apply -f {{ kafkabroker_role_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Create  cluster role binding for strimzi-cluster-operator-topic-operator-delegation
  shell: "oc apply -f {{ topicoperator_rolebinding_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Add CRD for Kafka
  shell: "oc apply -f {{ kafka_crd_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Add CRD for Kafka connect
  shell: "oc apply -f {{ kafkaconnect_crd_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Add CRD for Kafka connect s2i
  shell: "oc apply -f {{ kafkaconnects2i_crd_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Add CRD for Kafka topic
  shell: "oc apply -f {{ kafkatopic_crd_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Add CRD for Kafka user
  shell: "oc apply -f {{ kafkauser_crd_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Deploy Kafka
  shell: "oc apply -f {{clusteroperator_deployment_yaml}} -n {{lab_1_name}}"
  ignore_errors: true
- name: Apply Kafka Persistent template
  shell: "oc apply -f {{ kafkapersistent_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Create first Kafka topic
  shell: "oc apply -f {{ kafkatopic_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Create second Kafka topic
  shell: "oc apply -f {{ kafkatopic2_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
#- name: Create third Kafka topic
#  shell: "oc apply -f {{ kafkatopic3_yaml }} -n {{lab_1_name}}"
- name: Create Kafka users
  shell: "oc apply -f {{ kafkauser_yaml }} -n {{lab_1_name}}"
  ignore_errors: true
- name: Create Kafka connect deployment
  shell: "oc apply -f {{ kafkaconnect_yaml }} -n {{lab_1_name}}"