Tok
2018-08-26 e78f5691833003d0465b1e6ee8fdf31d8fb767e8
Merge branch 'development' of https://github.com/sborenst/ansible_agnostic_deployer into development
4 files deleted
1 files copied
70 files added
37 files modified
14 files renamed
3891 ■■■■ changed files
ansible/configs/ansible-cicd-lab/README.adoc 3 ●●●● patch | view | raw | blame | history
ansible/configs/ansible-cicd-lab/env_vars.yml 16 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-cicd-lab/post_software.yml 6 ●●●● patch | view | raw | blame | history
ansible/configs/ansible-cicd-lab/requirements.yml 5 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-provisioner/env_vars.yml 2 ●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-storage-cns_based_on_ocp-workshop/post_software.yml 2 ●●● patch | view | raw | blame | history
ansible/configs/archive/ocp-storage-cns_based_on_ocp-workshop/pre_software.yml 38 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-demo-lab/post_software.yml 2 ●●● patch | view | raw | blame | history
ansible/configs/ocp-gpu-single-node/pre_software.yml 54 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-multi-cloud-example/pre_software.yml 40 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/env_vars.yml 10 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/post_software.yml 953 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/pre_software.yml 24 ●●●● patch | view | raw | blame | history
ansible/configs/quay-enterprise/env_vars.yml 9 ●●●●● patch | view | raw | blame | history
ansible/configs/quay-enterprise/software.yml 138 ●●●●● patch | view | raw | blame | history
ansible/roles/config-clair/handlers/main.yml 5 ●●●●● patch | view | raw | blame | history
ansible/roles/config-clair/tasks/firewall.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/config-clair/tasks/main.yml 6 ●●●●● patch | view | raw | blame | history
ansible/roles/config-postgresql/handlers/main.yml 7 ●●●● patch | view | raw | blame | history
ansible/roles/config-postgresql/tasks/firewall.yml 12 ●●●● patch | view | raw | blame | history
ansible/roles/config-postgresql/tasks/main.yml 3 ●●●●● patch | view | raw | blame | history
ansible/roles/config-quay-enterprise/defaults/main.yml 2 ●●● patch | view | raw | blame | history
ansible/roles/config-quay-enterprise/handlers/main.yml 5 ●●●●● patch | view | raw | blame | history
ansible/roles/config-quay-enterprise/tasks/firewall.yml 8 ●●●●● patch | view | raw | blame | history
ansible/roles/config-quay-enterprise/tasks/main.yml 82 ●●●● patch | view | raw | blame | history
ansible/roles/config-redis/handlers/main.yml 5 ●●●●● patch | view | raw | blame | history
ansible/roles/config-redis/tasks/firewall.yml 6 ●●●● patch | view | raw | blame | history
ansible/roles/config-redis/tasks/main.yml 3 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/.gitignore 2 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/.travis.yml 31 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/LICENSE 20 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/README.md 66 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/defaults/main.yml 6 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/meta/.galaxy_install_info 1 ●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/meta/main.yml 39 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/tasks/main.yml 37 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/tasks/setup-Debian.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/tasks/setup-FreeBSD.yml 10 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/tasks/setup-RedHat.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/templates/java_home.sh.j2 1 ●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/tests/README.md 11 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/tests/test.yml 11 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/vars/Debian-8.yml 7 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/vars/Debian-9.yml 6 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/vars/Fedora.yml 6 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/vars/FreeBSD.yml 7 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/vars/RedHat.yml 7 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/vars/Ubuntu-12.yml 7 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/vars/Ubuntu-14.yml 7 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/vars/Ubuntu-16.yml 7 ●●●●● patch | view | raw | blame | history
ansible/roles/geerlingguy.java/vars/Ubuntu-18.yml 6 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/.gitignore 2 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/.travis.yml 88 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/LICENSE 20 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/README.md 148 ●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/defaults/main.yml 38 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/handlers/main.yml 12 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/meta/.galaxy_install_info 1 ●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/meta/main.yml 29 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tasks/main.yml 62 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tasks/plugins.yml 72 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tasks/settings.yml 56 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tasks/setup-Debian.yml 44 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tasks/setup-RedHat.yml 44 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/templates/basic-security.groovy 28 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tests/README.md 11 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tests/java-8.yml 49 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tests/requirements.yml 2 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tests/test-http-port.yml 12 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tests/test-jenkins-version.yml 15 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tests/test-plugins-with-home.yml 15 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tests/test-plugins-with-pinning.yml 14 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tests/test-plugins.yml 17 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tests/test-prefix.yml 12 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/tests/test.yml 9 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/vars/Debian.yml 7 ●●●●● patch | view | raw | blame | history
ansible/roles/host-jenkins-server/vars/RedHat.yml 7 ●●●●● patch | view | raw | blame | history
ansible/roles/host-lets-encrypt-certs/README.md 121 ●●●●● patch | view | raw | blame | history
ansible/roles/host-lets-encrypt-certs/defaults/main.yml 42 ●●●●● patch | view | raw | blame | history
ansible/roles/host-lets-encrypt-certs/files/deploy_LE_certs.yml patch | view | raw | blame | history
ansible/roles/host-lets-encrypt-certs/tasks/main.yml 236 ●●●●● patch | view | raw | blame | history
ansible/roles/install-aws-broker/tasks/main.yml 85 ●●●●● patch | view | raw | blame | history
ansible/roles/install-lets-encrypt-certs/README.md 42 ●●●●● patch | view | raw | blame | history
ansible/roles/install-lets-encrypt-certs/tasks/main.yml 83 ●●●●● patch | view | raw | blame | history
ansible/roles/install-prometheus/README.md 38 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-infra-aws-service-broker/README.md patch | view | raw | blame | history
ansible/roles/ocp-infra-aws-service-broker/tasks/main.yml 70 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-infra-enable-custom-catalog/README.md 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-infra-enable-custom-catalog/files/custom-categories.js 14 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-infra-enable-custom-catalog/tasks/main.yml 36 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-infra-maistra/README.md 36 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-infra-maistra/defaults/main.yml 16 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-infra-maistra/meta/main.yml 22 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-infra-maistra/tasks/main.yml 49 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-infra-maistra/templates/istio-cr-full.yaml.j2 26 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-infra-maistra/templates/istio-cr.yaml.j2 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-infra-maistra/tests/inventory 1 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-infra-maistra/tests/test.yml 5 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-infra-nexus/README.md patch | view | raw | blame | history
ansible/roles/ocp-infra-nexus/files/nexus2-persistent-template.yaml patch | view | raw | blame | history
ansible/roles/ocp-infra-nexus/files/nexus3-persistent-template.yaml patch | view | raw | blame | history
ansible/roles/ocp-infra-nexus/tasks/main.yml patch | view | raw | blame | history
ansible/roles/ocp-infra-openwhisk/README.md patch | view | raw | blame | history
ansible/roles/ocp-infra-openwhisk/tasks/main.yml 10 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-infra-prometheus-pre310/README.md patch | view | raw | blame | history
ansible/roles/ocp-infra-prometheus-pre310/handlers/main.yml patch | view | raw | blame | history
ansible/roles/ocp-infra-prometheus-pre310/tasks/all-nodes.yml patch | view | raw | blame | history
ansible/roles/ocp-infra-prometheus-pre310/tasks/bastion.yml patch | view | raw | blame | history
ansible/roles/ocp-infra-prometheus-pre310/tasks/infranodes.yml patch | view | raw | blame | history
ansible/roles/ocp-infra-prometheus-pre310/tasks/main.yml patch | view | raw | blame | history
ansible/roles/ocp-infra-prometheus-pre310/vars/main.yml patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-multitenant/tasks/wait_for_deploy.yml 6 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-3scale-multitenant/templates/create_tenants.sh 12 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-example/defaults/main.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-example/readme.adoc 119 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-example/tasks/main.yml 23 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-example/tasks/post_workload.yml 9 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-example/tasks/pre_workload.yml 9 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-example/tasks/remove_workload.yml 9 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-example/tasks/workload.yml 13 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-ignite/readme.adoc 5 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/defaults/main.yml 27 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/readme.adoc 45 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/remove_workload.yml 6 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/workload.yml 92 ●●●● patch | view | raw | blame | history
scripts/wrapper.sh 5 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-cicd-lab/README.adoc
@@ -14,7 +14,8 @@
- bastion - this is the host through which you can easily connect to all the other ones.
- tower1 - the Ansible Tower server
- cicd1 - The CI/CD server (or build and test host) with Jenkins and Gogs pre-installed
- app1 and appdb1 - two "playground hosts
* the Jenkins server is available under http://cicd1.GUID.example.opentlc.com:8080, the user is _admin_ (with the usual password)
- app1 and appdb1 - two "playground" hosts
You may connect to the bastion host using the credentials given to you (by e-mail or GUID grabber) and from there jump to the other servers.
ansible/configs/ansible-cicd-lab/env_vars.yml
@@ -52,7 +52,7 @@
# This varialbe is no longer needed.
ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
#ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
set_env_authorized_key: true
@@ -251,3 +251,17 @@
cf_template_description: "{{ env_type }}-{{ guid }} Ansible Agnostic Deployer "
### variables necessary for Jenkins deployment
java_packages: java-1.8.0-openjdk # newer Jenkins don't work with older versions
jenkins_plugins:
  - git                   # Git Plugin
  - multiple-scms         # Multiple SCMs Plugin
  - tap                   # Tap Plugin
  - conditional-buildstep # Conditional BuildStep Plugin
  - workflow-aggregator   # Pipeline Plugin
  - parameterized-trigger # Parameterized Trigger Plugin
  - extended-choice-parameter # Extended Choice Parameter
jenkins_plugin_timeout: 240 # Jenkins tends to run into timeout while installing plug-ins
jenkins_admin_password: r3dh4t1!
ansible/configs/ansible-cicd-lab/post_software.yml
@@ -10,7 +10,7 @@
- name: Configure all hosts with Repositories, Common Files and Set environment key
  hosts:
    - bastions[0]
    - cicd*
  become: true
  gather_facts: False
  vars_files:
@@ -18,6 +18,10 @@
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - install_ci_components
  pre_tasks:
    - name: gather ansible_os_family and ansible_distribution facts for Jenkins
      setup:
        filter: 'ansible_[od][si]*'
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/host-gogs-server" }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/host-jenkins-server" }
ansible/configs/ansible-cicd-lab/requirements.yml
New file
@@ -0,0 +1,5 @@
# Use with `ansible-galaxy install --force -r requirements.yml -p ../../roles/`
# (only during development, not during installation)
---
- src: geerlingguy.jenkins
  name: host-jenkins-server
ansible/configs/ansible-provisioner/env_vars.yml
@@ -20,7 +20,7 @@
install_opentlc_integration: true
provisioner_public_dns: "admin.{{subdomain_base}}."
provisioner_public_dns: "admin.{{subdomain_base}}"
zabbix_auto_registration_keyword: linux host
httpd_ssl_cert: /etc/pki/tls/certs/admin.pem
ansible/configs/archive/ocp-storage-cns_based_on_ocp-workshop/post_software.yml
@@ -378,7 +378,7 @@
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/install-nexus", desired_project: "{{admin_project}}", nexus_version: "3" }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/ocp-infra-nexus", desired_project: "{{admin_project}}", nexus_version: "3" }
  tags:
    - env-specific
    - install_nexus
ansible/configs/archive/ocp-storage-cns_based_on_ocp-workshop/pre_software.yml
@@ -55,19 +55,31 @@
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/set_env_authorized_key"
      when: set_env_authorized_key|bool
- name: Install Let's Encrypt Wildcard Certificates
  hosts: bastions
  run_once: true
  become: true
  gather_facts: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/install-lets-encrypt-certs"
      tags: lets_encrypt
      when: install_lets_encrypt_certificates|bool
# - name: Install Let's Encrypt Wildcard Certificates
#   hosts: bastions
#   run_once: true
#   become: true
#   gather_facts: false
#   vars_files:
#   - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
#   tasks:
#   - name: Create Let's Encrypt Certificates
#     include_role:
#       name: "{{ ANSIBLE_REPO_PATH }}/roles/host-lets-encrypt-certs"
#     vars:
    # - acme_domain: "{{ master_lb_dns }}"
    # - acme_wildcard_domain: "*.{{ cloudapps_suffix }}"
    # - acme_aws_access_key: "{{ hostvars['localhost'].route53user_access_key }}"
    # - acme_aws_secret_access_key: "{{ hostvars['localhost'].route53user_secret_access_key }}"
    # - acme_production: "{{ lets_encrypt_production|d(False)|bool}}"
    # - acme_remote_dir: "/root"
    # - acme_cache_cert_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.cert"
    # - acme_cache_key_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.key"
    # - acme_cache_archive_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}_acme.tgz"
    # - acme_renew_automatically: True
    # - acme_force_issue: False
#     when:
#     - install_lets_encrypt_certificates|d(False)|bool
- name: Configuring Bastion Hosts
  hosts: bastions
ansible/configs/ocp-demo-lab/post_software.yml
@@ -129,7 +129,7 @@
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  run_once: true
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/install-nexus", desired_project: "{{admin_project}}", nexus_version: "2" }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/ocp-infra-nexus", desired_project: "{{admin_project}}", nexus_version: "2" }
  tags:
    - env-specific
    - install_nexus
ansible/configs/ocp-gpu-single-node/pre_software.yml
@@ -69,32 +69,28 @@
        msg: "Pre-Software checks completed successfully"
### This section is only used when "install_lets_encrypt_certificates" is true. Not Required.
- name: Install Let's Encrypt Wildcard Certificates
  hosts: bastions
  run_once: true
  become: true
  gather_facts: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/install-lets-encrypt-certs"
      tags: lets_encrypt
      when: install_lets_encrypt_certificates|bool
- name: Copy lets encrypt certificates
  hosts: masters
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - env-specific
    - project_request
  tasks:
    # https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem.txt
    - name: Copy over the letsencrypt certificate
      copy:
        src: ./files/lets-encrypt-x3-cross-signed.pem.txt
        dest: /etc/origin/master/
# - name: Install Let's Encrypt Wildcard Certificates
#   hosts: bastions
#   run_once: true
#   become: true
#   gather_facts: false
#   vars_files:
#   - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
#   tasks:
#   - name: Create Let's Encrypt Certificates
#     include_role:
#       name: "{{ ANSIBLE_REPO_PATH }}/roles/host-lets-encrypt-certs"
#     vars:
#     - acme_domain: "{{ master_lb_dns }}"
#     - acme_wildcard_domain: "*.{{ cloudapps_suffix }}"
#     - acme_aws_access_key: "{{ hostvars['localhost'].route53user_access_key }}"
#     - acme_aws_secret_access_key: "{{ hostvars['localhost'].route53user_secret_access_key }}"
#     - acme_production: "{{ lets_encrypt_production|d(False)|bool}}"
#     - acme_remote_dir: "/root"
#     - acme_cache_cert_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.cert"
#     - acme_cache_key_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.key"
#     - acme_cache_archive_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}_acme.tgz"
#     - acme_renew_automatically: True
#     - acme_force_issue: False
#     when:
#     - install_lets_encrypt_certificates|d(False)|bool
ansible/configs/ocp-multi-cloud-example/pre_software.yml
@@ -75,26 +75,22 @@
  become: true
  gather_facts: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/install-lets-encrypt-certs"
      tags: lets_encrypt
      when: install_lets_encrypt_certificates|bool
- name: Copy lets encrypt certificates
  hosts: masters
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - env-specific
    - project_request
  tasks:
    # https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem.txt
    - name: Copy over the letsencrypt certificate
      copy:
        src: ./files/lets-encrypt-x3-cross-signed.pem.txt
        dest: /etc/origin/master/
  - name: Create Let's Encrypt Certificates
    include_role:
      name: "{{ ANSIBLE_REPO_PATH }}/roles/host-lets-encrypt-certs"
    vars:
    - acme_domain: "{{ master_lb_dns }}"
    - acme_wildcard_domain: "*.{{ cloudapps_suffix }}"
    - acme_aws_access_key: "{{ hostvars['localhost'].route53user_access_key }}"
    - acme_aws_secret_access_key: "{{ hostvars['localhost'].route53user_secret_access_key }}"
    - acme_production: "{{ lets_encrypt_production|d(False)|bool}}"
    - acme_remote_dir: "/root"
    - acme_cache_cert_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.cert"
    - acme_cache_key_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.key"
    - acme_cache_archive_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}_acme.tgz"
    - acme_renew_automatically: True
    - acme_force_issue: False
    when:
    - install_lets_encrypt_certificates|d(False)|bool
ansible/configs/ocp-workshop/env_vars.yml
@@ -46,13 +46,18 @@
install_zabbix: false
install_prometheus: false
install_ipa_client: false
install_lets_encrypt_certificates: false
install_openwhisk: false
install_metrics: true
install_logging: true
install_aws_broker: false
install_nexus: true
install_openshiftapb: false
install_maistra: false
install_lets_encrypt_certificates: false
# Set the next variable to false to run tests. This prevents hitting the
# rate limiter of Let's Encrypt when requesting lots of certificates
# Set to true for "real" certificates
lets_encrypt_production: true
glusterfs_device_name: /dev/xvdc
glusterfs_device_size: 1500
@@ -83,6 +88,9 @@
admin_user: opentlc-mgr
admin_project: "ocp-workshop"
# UI Customizations
enable_workshops_catalog: false
### Azure 
# Create a dedicated resourceGroup for this deployment
ansible/configs/ocp-workshop/post_software.yml
@@ -4,12 +4,12 @@
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Create user vols
      shell: "mkdir -p /srv/nfs/user-vols/vol{1..{{user_vols}}}"
    - name: chmod the user vols
      shell: "chmod -R 777 /srv/nfs/user-vols"
  - name: Create user vols
    shell: "mkdir -p /srv/nfs/user-vols/vol{1..{{user_vols}}}"
  - name: chmod the user vols
    shell: "chmod -R 777 /srv/nfs/user-vols"
- name: Step 00xxxxx post software
  hosts: bastions
@@ -17,113 +17,115 @@
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - when: install_nfs|bool
      block:
      - name: get nfs Hostname
        set_fact:
          nfs_host: "{{ groups['support']|sort|first }}"
  - when: install_nfs|bool
    block:
    - name: get nfs Hostname
      set_fact:
        nfs_host: "{{ groups['support']|sort|first }}"
      - set_fact:
          pv_size: '10Gi'
          pv_list: "{{ ocp_pvs }}"
          persistentVolumeReclaimPolicy: Retain
    - set_fact:
        pv_size: '10Gi'
        pv_list: "{{ ocp_pvs }}"
        persistentVolumeReclaimPolicy: Retain
      - name: Generate PV file
        template:
          src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/pvs.j2"
          dest: "/root/pvs-{{ env_type }}-{{ guid }}.yml"
        tags: [ gen_pv_file ]
        when: pv_list.0 is defined
    - name: Generate PV file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/pvs.j2"
        dest: "/root/pvs-{{ env_type }}-{{ guid }}.yml"
      tags: [ gen_pv_file ]
      when: pv_list.0 is defined
      - set_fact:
          pv_size: "{{user_vols_size}}"
          persistentVolumeReclaimPolicy: Recycle
    - set_fact:
        pv_size: "{{user_vols_size}}"
        persistentVolumeReclaimPolicy: Recycle
        notify: restart nfs services
        run_once: True
      notify: restart nfs services
      run_once: True
      - name: Generate user vol PV file
        template:
          src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/userpvs.j2"
          dest: "/root/userpvs-{{ env_type }}-{{ guid }}.yml"
        tags:
          - gen_user_vol_pv
    - name: Generate user vol PV file
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/userpvs.j2"
        dest: "/root/userpvs-{{ env_type }}-{{ guid }}.yml"
      tags:
        - gen_user_vol_pv
      - shell: 'oc create -f /root/pvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/pvs-{{ env_type }}-{{ guid }}.yml'
        tags:
          - create_user_pv
        when: pv_list.0 is defined
    - shell: 'oc create -f /root/pvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/pvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
      when: pv_list.0 is defined
      - shell: 'oc create -f /root/userpvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/userpvs-{{ env_type }}-{{ guid }}.yml'
        tags:
          - create_user_pv
    - shell: 'oc create -f /root/userpvs-{{ env_type }}-{{ guid }}.yml || oc replace -f /root/userpvs-{{ env_type }}-{{ guid }}.yml'
      tags:
        - create_user_pv
- name: For CNS change default storage class to glusterfs-storage
- name: For CNS change default storage class to glusterfs-storage (3.9.27, 3.9.30)
  hosts: masters
  run_once: true
  become: yes
  gather_facts: False
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - env-specific
    - env-specific_infra
    - storage-class
  - env-specific
  - env-specific_infra
  - storage-class
  tasks:
    - when:
        - osrelease is version_compare('3.9.27', '>=')
        - osrelease is version_compare('3.9.30', '<=')
        - install_glusterfs|bool
      block:
      - name: Set glusterfs-storage class to default
        command: >
          oc patch storageclass glusterfs-storage
          -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "true"}}}'
        register: changesc_r
        failed_when:
          - changesc_r.stdout.find('storageclass "glusterfs-storage" not patched') == -1
          - changesc_r.rc != 0
        changed_when: changesc_r.stdout.find('storageclass "glusterfs-storage" patched') != -1
      - name: Remove default from glusterfs-storage-block class
        register: changesc_r
        changed_when: changesc_r.stdout.find('storageclass "glusterfs-storage-block" patched') != -1
        failed_when:
          - changesc_r.stdout.find('storageclass "glusterfs-storage-block" not patched') == -1
          - changesc_r.rc != 0
        command: >
          oc patch storageclass glusterfs-storage-block
          -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "false"}}}'
  - when:
      - osrelease is version_compare('3.9.27', '>=')
      - osrelease is version_compare('3.9.30', '<=')
      - install_glusterfs|bool
    block:
    - name: Set glusterfs-storage class to default
      command: >
        oc patch storageclass glusterfs-storage
        -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "true"}}}'
      register: changesc_r
      failed_when:
        - changesc_r.stdout.find('storageclass "glusterfs-storage" not patched') == -1
        - changesc_r.rc != 0
      changed_when: changesc_r.stdout.find('storageclass "glusterfs-storage" patched') != -1
    - name: Remove default from glusterfs-storage-block class
      register: changesc_r
      changed_when: changesc_r.stdout.find('storageclass "glusterfs-storage-block" patched') != -1
      failed_when:
        - changesc_r.stdout.find('storageclass "glusterfs-storage-block" not patched') == -1
        - changesc_r.rc != 0
      command: >
        oc patch storageclass glusterfs-storage-block
        -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "false"}}}'
- name: Configure Bastion for CF integration
  hosts: bastions
  become: yes
  gather_facts: False
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/mgr_users.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/mgr_users.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - env-specific
    - cf_integration
    - opentlc_integration
  roles:
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/opentlc-integration"
      when: install_opentlc_integration|bool
      no_log: yes
  - env-specific
  - cf_integration
  - opentlc_integration
  tasks:
    - name: Copy /root/.kube to ~opentlc-mgr/
      command: "cp -rf /root/.kube /home/opentlc-mgr/"
      when: install_opentlc_integration|bool
  - name: Configure Bastion
    import_role:
      name: "{{ ANSIBLE_REPO_PATH }}/roles/opentlc-integration"
    vars:
      no_log: yes
    when: install_opentlc_integration|bool
  - name: Copy /root/.kube to ~opentlc-mgr/
    command: "cp -rf /root/.kube /home/opentlc-mgr/"
    when: install_opentlc_integration|bool
    - name: set permission for .kube
      when: install_opentlc_integration|bool
      file:
        path: /home/opentlc-mgr/.kube
        owner: opentlc-mgr
        group: opentlc-mgr
        recurse: yes
  - name: set permission for .kube
    when: install_opentlc_integration|bool
    file:
      path: /home/opentlc-mgr/.kube
      owner: opentlc-mgr
      group: opentlc-mgr
      recurse: yes
- name: env-specific infrastructure
  hosts: masters
@@ -131,52 +133,52 @@
  become: yes
  gather_facts: False
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - env-specific
    - env-specific_infra
  - env-specific
  - env-specific_infra
  tasks:
    - name: Command to enable the wildcard routes in the OCP cluster for 3scale
      shell: "oc set env dc/router ROUTER_ALLOW_WILDCARD_ROUTES=true -n default"
  - name: Command to enable the wildcard routes in the OCP cluster for 3scale
    shell: "oc set env dc/router ROUTER_ALLOW_WILDCARD_ROUTES=true -n default"
    - name: Give administrative user cluster-admin privileges
      command: "oc adm policy add-cluster-role-to-user cluster-admin {{ admin_user }}"
  - name: Give administrative user cluster-admin privileges
    command: "oc adm policy add-cluster-role-to-user cluster-admin {{ admin_user }}"
    - name: Check for admin_project project
      command: "oc get project {{admin_project}}"
      register: result
      changed_when: false
      ignore_errors: true
  - name: Check for admin_project project
    command: "oc get project {{admin_project}}"
    register: result
    changed_when: false
    ignore_errors: true
    - name: Create admin_project project (for OCP before 3.10)
      command: "oc adm new-project {{admin_project}} --admin {{admin_user}} --node-selector='env=infra'"
      when:
      - result | failed
      - osrelease is version_compare("3.10", "<")
  - name: Create admin_project project (for OCP before 3.10)
    command: "oc adm new-project {{admin_project}} --admin {{admin_user}} --node-selector='env=infra'"
    when:
    - result | failed
    - osrelease is version_compare("3.10", "<")
    - name: Create admin_project project (for OCP 3.10+)
      command: "oc adm new-project {{admin_project}} --admin {{admin_user}} --node-selector='node-role.kubernetes.io/infra=true'"
      when:
      - result | failed
      - osrelease is version_compare("3.10", ">=")
  - name: Create admin_project project (for OCP 3.10+)
    command: "oc adm new-project {{admin_project}} --admin {{admin_user}} --node-selector='node-role.kubernetes.io/infra=true'"
    when:
    - result | failed
    - osrelease is version_compare("3.10", ">=")
    - name: Make admin_project project network global
      command: "oc adm pod-network make-projects-global {{admin_project}}"
      when: ovs_plugin == "multitenant"
  - name: Make admin_project project network global
    command: "oc adm pod-network make-projects-global {{admin_project}}"
    when: ovs_plugin == "multitenant"
    - name: Set admin_project SCC for anyuid
      command: "oc adm policy add-scc-to-group anyuid system:serviceaccounts:{{admin_project}}"
  - name: Set admin_project SCC for anyuid
    command: "oc adm policy add-scc-to-group anyuid system:serviceaccounts:{{admin_project}}"
    - name: Add capabilities within anyuid which is not really ideal
      command: "oc patch scc/anyuid --patch '{\"requiredDropCapabilities\":[\"MKNOD\",\"SYS_CHROOT\"]}'"
      ignore_errors: true
  - name: Add capabilities within anyuid which is not really ideal
    command: "oc patch scc/anyuid --patch '{\"requiredDropCapabilities\":[\"MKNOD\",\"SYS_CHROOT\"]}'"
    ignore_errors: true
    - name: Set Node Selector to empty for project openshift-template-service-broker
      shell: oc annotate namespace openshift-template-service-broker openshift.io/node-selector="" --overwrite
      ignore_errors: true
      when:
        - osrelease is version_compare('3.7', '>=')
        - osrelease is version_compare('3.10', '<')
  - name: Set Node Selector to empty for project openshift-template-service-broker
    shell: oc annotate namespace openshift-template-service-broker openshift.io/node-selector="" --overwrite
    ignore_errors: true
    when:
      - osrelease is version_compare('3.7', '>=')
      - osrelease is version_compare('3.10', '<')
- name: Remove all users from self-provisioners group
  hosts: masters
@@ -184,101 +186,101 @@
  become: yes
  gather_facts: False
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags: [ env-specific, remove_self_provisioners ]
  tasks:
    - when: remove_self_provisioners|bool
      block:
      - name: Set clusterRoleBinding auto-update to false
        command: oc annotate -n default --overwrite clusterrolebinding.rbac self-provisioners rbac.authorization.kubernetes.io/autoupdate=false
  - when: remove_self_provisioners|bool
    block:
    - name: Set clusterRoleBinding auto-update to false
      command: oc annotate -n default --overwrite clusterrolebinding.rbac self-provisioners rbac.authorization.kubernetes.io/autoupdate=false
      - name: Remove system:authenticated from self-provisioner role
        command: "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated system:authenticated:oauth"
        ignore_errors: true
    - name: Remove system:authenticated from self-provisioner role
      command: "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated system:authenticated:oauth"
      ignore_errors: true
      - name: create our own OPENTLC-PROJECT-PROVISIONERS
        command: "oc adm groups new OPENTLC-PROJECT-PROVISIONERS"
        ignore_errors: true
    - name: create our own OPENTLC-PROJECT-PROVISIONERS
      command: "oc adm groups new OPENTLC-PROJECT-PROVISIONERS"
      ignore_errors: true
      - name: allow OPENTLC-PROJECT-PROVISIONERS members to provision their own projects
        command: "oc adm policy add-cluster-role-to-group self-provisioner OPENTLC-PROJECT-PROVISIONERS"
    - name: allow OPENTLC-PROJECT-PROVISIONERS members to provision their own projects
      command: "oc adm policy add-cluster-role-to-group self-provisioner OPENTLC-PROJECT-PROVISIONERS"
- name: Project Request Template
  hosts: masters
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - env-specific
    - project_request
  - env-specific
  - project_request
  tasks:
    - name: Copy project request template to master
      copy:
        src: ./files/project-template.yml
        dest: /root/project-template.yml
  - name: Copy project request template to master
    copy:
      src: ./files/project-template.yml
      dest: /root/project-template.yml
    - name: Check for project request template
      command: "oc get template project-request -n default"
      register: request_template
      ignore_errors: true
  - name: Check for project request template
    command: "oc get template project-request -n default"
    register: request_template
    ignore_errors: true
    - name: Create project request template in default project
      shell: "oc create -f /root/project-template.yml -n default || oc replace -f /root/project-template.yml -n default"
      when: request_template | failed
  - name: Create project request template in default project
    shell: "oc create -f /root/project-template.yml -n default || oc replace -f /root/project-template.yml -n default"
    when: request_template | failed
    - name: Update master config file to use project request template
      lineinfile:
        regexp: "  projectRequestTemplate"
        dest: "/etc/origin/master/master-config.yaml"
        line: '  projectRequestTemplate: "default/project-request"'
        state: present
      register: master_config
  - name: Update master config file to use project request template
    lineinfile:
      regexp: "  projectRequestTemplate"
      dest: "/etc/origin/master/master-config.yaml"
      line: '  projectRequestTemplate: "default/project-request"'
      state: present
    register: master_config
    - name: Add Project request message
      replace:
        dest: '/etc/origin/master/master-config.yaml'
        regexp: 'projectRequestMessage.*'
        replace: "projectRequestMessage: '{{project_request_message}}'"
        backup: yes
  - name: Add Project request message
    replace:
      dest: '/etc/origin/master/master-config.yaml'
      regexp: 'projectRequestMessage.*'
      replace: "projectRequestMessage: '{{project_request_message}}'"
      backup: yes
    - name: Restart master service (Pre 3.7)
      service:
        name: atomic-openshift-master
        state: restarted
      when:
        - master_config.changed
        - osrelease is version_compare('3.7', '<')
  - name: Restart master service (Pre 3.7)
    service:
      name: atomic-openshift-master
      state: restarted
    when:
      - master_config.changed
      - osrelease is version_compare('3.7', '<')
    - name: Restart master API service (3.7 - 3.9)
      service:
        name: atomic-openshift-master-api
        state: restarted
      when:
        - master_config.changed
        - osrelease is version_compare('3.7', '>=')
        - osrelease is version_compare('3.10', '<')
  - name: Restart master API service (3.7 - 3.9)
    service:
      name: atomic-openshift-master-api
      state: restarted
    when:
      - master_config.changed
      - osrelease is version_compare('3.7', '>=')
      - osrelease is version_compare('3.10', '<')
    - name: Restart master API Pods (3.10+)
      command: /usr/local/bin/master-restart api
      when:
        - master_config.changed
        - osrelease is version_compare('3.10', '>=')
  - name: Restart master API Pods (3.10+)
    command: /usr/local/bin/master-restart api
    when:
      - master_config.changed
      - osrelease is version_compare('3.10', '>=')
- name: node admin configs
  hosts: nodes
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - env-specific
    - env_specific_images
  - env-specific
  - env_specific_images
  tasks:
    - name: 'Pull Env Specific Images'
      command: "docker pull {{ item }}"
      with_items: '{{ env_specific_images }}'
      when: env_specific_images.0 is defined
  - name: 'Pull Env Specific Images'
    command: "docker pull {{ item }}"
    with_items: '{{ env_specific_images }}'
    when: env_specific_images.0 is defined
- name: Import jenkins images for OCP 3.7 and newer
  hosts: masters
@@ -286,59 +288,62 @@
  become: yes
  gather_facts: False
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - env-specific
    - env_specific_images
  - env-specific
  - env_specific_images
  tasks:
    - when: osrelease is version_compare('3.7', '>=')
      block:
      - name: Remove default Jenkins ImageStream
        command: oc delete is jenkins -n openshift
        ignore_errors: true
  - when: osrelease is version_compare('3.7', '>=')
    block:
    - name: Remove default Jenkins ImageStream
      command: oc delete is jenkins -n openshift
      ignore_errors: true
      - name: Import jenkins from Red Hat Registry
        command: oc tag --source=docker registry.access.redhat.com/openshift3/jenkins-2-rhel7:v{{ repo_version }} openshift/jenkins:v{{ repo_version }} -n openshift
        ignore_errors: true
    - name: Import jenkins from Red Hat Registry
      command: oc tag --source=docker registry.access.redhat.com/openshift3/jenkins-2-rhel7:v{{ repo_version }} openshift/jenkins:v{{ repo_version }} -n openshift
      ignore_errors: true
      - name: Tag Jenkins jenkins:latest
        command: oc tag openshift/jenkins:v{{ repo_version }} openshift/jenkins:latest -n openshift
        register: octag_result
        retries: 5
        delay: 2
        until: octag_result is succeeded
        ignore_errors: true
    - name: Tag Jenkins jenkins:latest
      command: oc tag openshift/jenkins:v{{ repo_version }} openshift/jenkins:latest -n openshift
      register: octag_result
      retries: 5
      delay: 2
      until: octag_result is succeeded
      ignore_errors: true
      - name: Tag Jenkins jenkins:2
        command: oc tag openshift/jenkins:v{{ repo_version }} openshift/jenkins:2 -n openshift
        register: octag_result
        retries: 5
        delay: 2
        until: octag_result is succeeded
        ignore_errors: true
    - name: Tag Jenkins jenkins:2
      command: oc tag openshift/jenkins:v{{ repo_version }} openshift/jenkins:2 -n openshift
      register: octag_result
      retries: 5
      delay: 2
      until: octag_result is succeeded
      ignore_errors: true
- name: Fix NFS PV Recycling for OCP 3.7 and newer
  gather_facts: False
  become: yes
  hosts:
    - nodes
    - infranodes
    - masters
  - nodes
  - infranodes
  - masters
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - env-specific
    - install_nfs
  - env-specific
  - install_nfs
  tasks:
  - name: Fix NFS PV Recycling
    when:
    - install_nfs|d(True)|bool
    block:
    - name: Pull ose-recycler Image
      command: docker pull registry.access.redhat.com/openshift3/ose-recycler:latest
      register: pullr
      register: pull_result
      retries: 5
      delay: 10
      until: pullr is succeeded
      until: pull_result is succeeded
      when:
      - osrelease is version_compare('3.7', '>=')
      - install_nfs|bool
    - name: Tag ose-recycler Image (for OCP 3.7 - 3.9)
      command: >
@@ -347,7 +352,6 @@
      when:
      - osrelease is version_compare('3.7', '>=')
      - osrelease is version_compare('3.10', '<')
      - install_nfs|bool
    - name: Tag ose-recycler Image (for OCP 3.10+)
      command: >
@@ -355,7 +359,6 @@
        registry.access.redhat.com/openshift3/ose-recycler:v1.10.0
      when:
      - osrelease is version_compare('3.10', '>=')
      - install_nfs|bool
- name: Fix CRI-O Garbage Collection DaemonSet for OCP 3.9 (up to 3.9.25)
  gather_facts: False
@@ -365,19 +368,17 @@
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
  - name: Fix cri-o garbage collection
    when:
    - osrelease is version_compare('3.9.0', '>=')
    - osrelease is version_compare('3.9.25', '<=')
    - container_runtime == "cri-o"
    block:
    - name: Patch dockergc DaemonSet
      shell: "oc patch daemonset dockergc --patch='\"spec\": { \"template\": { \"spec\": { \"containers\": [ { \"command\": [ \"/usr/bin/oc\" ], \"name\": \"dockergc\" } ] } } }' -n default"
      ignore_errors: true
      when:
        - osrelease is version_compare('3.9.0', '>=')
        - osrelease is version_compare('3.9.25', '<=')
        - container_runtime == "cri-o"
    - name: Redeploy dockergc DaemonSet pods
      shell: "oc delete pod $(oc get pods -n default|grep dockergc|awk -c '{print $1}') -n default"
      when:
        - osrelease is version_compare('3.9.0', '>=')
        - osrelease is version_compare('3.9.25', '<=')
        - container_runtime == "cri-o"
# Install OpenWhisk
- name: Install OpenWhisk
@@ -386,15 +387,15 @@
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - env-specific
    - install_openwhisk
  - env-specific
  - install_openwhisk
  tasks:
  - import_role:
      name: "{{ ANSIBLE_REPO_PATH }}/roles/install-openwhisk"
      name: "{{ ANSIBLE_REPO_PATH }}/roles/ocp-infra-openwhisk"
    when:
      - install_openwhisk|bool
      - install_openwhisk|d(False)|bool
# Set up Prometheus/Node Exporter/Alertmanager/Grafana
# on the OpenShift Cluster
@@ -402,20 +403,19 @@
  gather_facts: False
  become: yes
  hosts:
    - nodes
    - infranodes
    - masters
    - bastions
  - nodes
  - infranodes
  - masters
  - bastions
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - install_prometheus
  tasks:
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/install-prometheus"
        name: "{{ ANSIBLE_REPO_PATH }}/roles/ocp-infra-prometheus-pre310"
      when:
      - install_prometheus|bool
      - install_prometheus|d(False)|bool
      - osrelease is version_compare("3.10", "<")
# Deploy Grafana Manually until the install playbooks can
@@ -424,94 +424,117 @@
  gather_facts: False
  become: yes
  hosts:
    - infranodes
  - infranodes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - install_prometheus
  - install_prometheus
  tasks:
    - name: Ensure Grafana Image is on Infranodes
      shell: "docker pull docker.io/mrsiano/grafana-ocp:latest"
      when:
      - install_prometheus|bool
      - osrelease is version_compare("3.10", ">=")
  - name: Ensure Grafana Image is on Infranodes
    shell: "docker pull docker.io/mrsiano/grafana-ocp:latest"
    when:
    - install_prometheus|d(False)|bool
    - osrelease is version_compare("3.10", ">=")
- name: Install Grafana (3.10+)
  gather_facts: False
  become: yes
  hosts:
    - bastions
  - bastions
  run_once: true
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - install_prometheus
  - install_prometheus
  tasks:
    - when:
      - install_prometheus|bool
      - osrelease is version_compare("3.10", ">=")
      block:
      - name: Run Grafana Installation Playbook
        shell: "ansible-playbook -i /etc/ansible/hosts /usr/share/ansible/openshift-ansible/playbooks/openshift-grafana/config.yml"
      - name: Add admin permissions to admin_user for Grafana project
        shell: "oc policy add-role-to-user admin {{admin_user}} -n openshift-grafana"
  - when:
    - install_prometheus|bool
    - osrelease is version_compare("3.10", ">=")
    block:
    - name: Check if Grafana is already there
      command: "oc get project openshift-grafana"
      register: grafana_exists
      changed_when: False
      ignore_errors: true
    - name: Run Grafana Installation Playbook
      shell: "ansible-playbook -i /etc/ansible/hosts /usr/share/ansible/openshift-ansible/playbooks/openshift-grafana/config.yml"
      when: grafana_exists is failed
    - name: Add admin permissions to admin_user for Grafana project
      shell: "oc policy add-role-to-user admin {{admin_user}} -n openshift-grafana"
      when: grafana_exists is failed
# Update Firewall Rules for Node Exporter to work
# (3.10 and onwards)
- name: Update Firewall on all nodes for Node Exporter (3.10+)
# Update Firewall Rules for Node Exporter to work (3.10 and onwards).
- name: Node Exporter and Grafana Configuration (3.10+)
  gather_facts: False
  become: yes
  hosts:
    - nodes
    - infranodes
    - masters
  - nodes
  - infranodes
  - masters
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - install_prometheus
  - install_prometheus
  tasks:
    - when:
      - install_prometheus|bool
      - osrelease is version_compare("3.10", ">=")
      block:
      # Node Exporters on all Nodes liston on port 9100.
      # Open Firewall Port 9100 for future sessions by adding
      # the rule to the iptables file.
      - name: Open Firewall port 9100 for future sessions
        lineinfile:
          dest: /etc/sysconfig/iptables
          insertafter: '-A FORWARD -j REJECT --reject-with icmp-host-prohibited'
          line: '-A OS_FIREWALL_ALLOW -p tcp -m state --state NEW -m tcp --dport 9100 -j ACCEPT'
          state: present
      # Open Firewall Port 9100 for current session by adding
      # the rule to the current iptables configuration. We won't
      # need to restart the iptables service - which will ensure
      # all OpenShift rules stay in place.
      - name: Open Firewall Port 9100 for current session
        iptables:
          action: insert
          protocol: tcp
          destination_port: 9100
          state: present
          chain: OS_FIREWALL_ALLOW
          jump: ACCEPT
  - when:
    - install_prometheus|d(False)|bool
    - osrelease is version_compare("3.10", ">=")
    block:
    # Node Exporters on all Nodes liston on port 9100.
    # Open Firewall Port 9100 for future sessions by adding
    # the rule to the iptables file.
    - name: Open Firewall port 9100 for future sessions
      lineinfile:
        dest: /etc/sysconfig/iptables
        insertafter: '-A FORWARD -j REJECT --reject-with icmp-host-prohibited'
        line: '-A OS_FIREWALL_ALLOW -p tcp -m state --state NEW -m tcp --dport 9100 -j ACCEPT'
        state: present
    # Open Firewall Port 9100 for current session by adding
    # the rule to the current iptables configuration. We won't
    # need to restart the iptables service - which will ensure
    # all OpenShift rules stay in place.
    - name: Open Firewall Port 9100 for current session
      iptables:
        action: insert
        protocol: tcp
        destination_port: 9100
        state: present
        chain: OS_FIREWALL_ALLOW
        jump: ACCEPT
- name: Customize Service Catalog UI for workshops
  hosts: masters
  run_once: true
  gather_facts: False
  become: yes
  vars_files:
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
  - name: Customize Service Catalog UI for workshops
    import_role:
      name: "{{ ANSIBLE_REPO_PATH }}/roles/ocp-infra-enable-custom-catalog"
    when: enable_workshops_catalog|d(False)|bool
  tags:
  - env-specific
  - custom_ui
- name: Install Nexus
  hosts: masters
  run_once: true
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  roles:
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/install-nexus"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
  - import_role:
      name: "{{ ANSIBLE_REPO_PATH }}/roles/ocp-infra-nexus"
    vars:
      desired_project: "{{admin_project}}"
      nexus_version: "3"
      when: install_nexus|bool
    when: install_nexus|d(False)|bool
  tags:
    - env-specific
    - install_nexus
  - env-specific
  - install_nexus
- name: Install AWS Broker
  hosts: masters
@@ -519,14 +542,14 @@
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - env-specific
    - install_aws_broker
  - env-specific
  - install_aws_broker
  tasks:
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/install-aws-broker"
      when: install_aws_broker|bool
  - import_role:
      name: "{{ ANSIBLE_REPO_PATH }}/roles/ocp-infra-aws-service-broker"
    when: install_aws_broker|d(False)|bool
- name: Update Ansible (Automation) Broker to show images from DockerHub
  hosts: masters
@@ -534,120 +557,212 @@
  gather_facts: False
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - env-specific
    - install_openshiftapb
  - env-specific
  - install_openshiftapb
  tasks:
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift-ansible-broker"
      when: install_openshiftapb|bool
  - name: Update ASB
    import_role:
      name: "{{ ANSIBLE_REPO_PATH }}/roles/openshift-ansible-broker"
    when: install_openshiftapb|d(False)|bool
- name: Install Maistra (Istio)
  hosts: masters
  run_once: true
  gather_facts: False
  become: yes
  vars_files:
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
  - env-specific
  - install_maistra
  tasks:
  - name: Install Maistra
    import_role:
      name: "{{ ANSIBLE_REPO_PATH }}/roles/ocp-infra-maistra"
    vars:
      openshift_master_public: "{{ master_lb_dns }}"
    when: install_maistra|d(False)|bool
# WK Added for RHTE
# - name: Install ocp-workload workloads for multiple Users
#   hosts: masters
#   gather_facts: false
#   run_once: true
#   become: yes
#   vars_files:
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
#   tasks:
#   - name: Install ocp-workloads
#     when:
#     - num_users|d(0)|int > 0
#     - student_workloads|d("")|length > 0
#     block:
#     - name: Check if authentication mechanism is set to htpasswd
#       fail:
#         msg: Authentication Mechanism must be htpasswd
#       when:
#       - install_idm|d("") != "htpasswd"
#     - name: Check if remove_self_provisioners=true
#       fail:
#         msg: remove_self_provisioners must be set to true
#       when:
#       - not remove_self_provisioners|d(False)|bool
#     - name: Generate list of User IDs
#       set_fact:
#         users: "{{ lookup('sequence', 'start=1 end={{ num_users|int }}', wantlist=true) | map('int') | list }}"
#     - name: Deploy ocp-workloads for each user ID
#       include_role:
#         name: "{{ ANSIBLE_REPO_PATH }}/roles/{{ workload_loop_var[1] }}"
#       vars:
#         ocp_username: "user{{ workload_loop_var[0] }}"
#         ACTION: "provision"
#       loop: "{{ users | product(student_workloads.split(','))|list }}"
#       loop_control:
#         loop_var: workload_loop_var
# - name: Install ocp-infra workloads
#   hosts: masters
#   gather_facts: false
#   run_once: true
#   become: yes
#   vars_files:
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
#     - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
#   tasks:
#   - name: Install ocp-infra workloads
#     when:
#     - infra_workloads|d("")|length > 0
#     block:
#     - name: Check if admin_user is set
#       fail:
#         msg: admin_user must be set for ocp-infra workloads
#       when:
#       - not admin_user is defined or admin_user|length == 0
#     - name: Install ocp-infra-workloads
#       when:
#       - infra_workloads|d("")|length >0
#       block:
#       - name: Deploy ocp-infra workloads
#         include_role:
#           name: "{{ ANSIBLE_REPO_PATH }}/roles/{{ workload_loop_var[1] }}"
#         vars:
#           admin_user: "{{ admin_user }}"
#           ocp_username: "user{{ workload_loop_var[0] }}"
#           ACTION: "provision"
#         loop: "{{ infra_workloads.split(',')|list }}"
#         loop_control:
#           loop_var: workload_loop_var
# WK Added for RHTE End
- name: Zabbix for masters
  hosts: masters
  gather_facts: true
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  vars:
    zabbix_auto_registration_keyword: OCP Master
  roles:
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client"
      when: install_zabbix|bool
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client-openshift-master"
      when: install_zabbix|bool
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client-openshift-node"
      when: install_zabbix|bool
  - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client"
    when: install_zabbix|bool
  - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client-openshift-master"
    when: install_zabbix|bool
  - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client-openshift-node"
    when: install_zabbix|bool
  tags:
    - env-specific
    - install_zabbix
  - env-specific
  - install_zabbix
- name: Zabbix for nodes
  hosts:
    - nodes
    - infranodes
  - nodes
  - infranodes
  gather_facts: true
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  vars:
    zabbix_auto_registration_keyword: OCP Node
    zabbix_token: "{{ hostvars[groups['masters'][0]].zabbix_token }}"
    hawkular_route: "{{ hostvars[groups['masters'][0]].hawkular_route }}"
  roles:
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client"
      when: install_zabbix|bool
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client-openshift-node"
      when: install_zabbix|bool
  - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client"
    when: install_zabbix|bool
  - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client-openshift-node"
    when: install_zabbix|bool
  tags:
    - env-specific
    - install_zabbix
  - env-specific
  - install_zabbix
- name: Zabbix for all other hosts (bastion, support, ...)
  hosts:
    - bastions
    - support
  - bastions
  - support
  gather_facts: true
  become: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  vars:
    zabbix_auto_registration_keyword: OCP Host
  roles:
    - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client"
      when: install_zabbix|bool
  - role: "{{ ANSIBLE_REPO_PATH }}/roles/zabbix-client"
    when: install_zabbix|bool
  tags:
    - env-specific
    - install_zabbix
  - env-specific
  - install_zabbix
# start supporting this only for OCP >= 3.9
- name: Run diagnostics from master
  hosts: masters
  become: yes
  gather_facts: False
  run_once: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    # start supporting this only for OCP >= 3.9
    - when:
        - osrelease is version_compare('3.9', '>=')
        - run_ocp_diagnostics|d(true)| bool
      block:
        # this command should return 0 (no error)
        - name: Run oc adm diagnostics
          shell: oc adm diagnostics > /tmp/diagnostics.log
          register: r_diag
          retries: 2
          until: r_diag is succeeded
          ignore_errors: true
  - when:
    - osrelease is version_compare('3.9', '>=')
    - run_ocp_diagnostics|d(False)| bool
    block:
    # this command should return 0 (no error)
    - name: Run oc adm diagnostics
      shell: oc adm diagnostics > /tmp/diagnostics.log
      register: r_diag
      retries: 2
      until: r_diag is succeeded
      ignore_errors: true
        - name: Ensure /tmp/openshift exist
          file:
            path: /tmp/openshift
            state: directory
    - name: Ensure /tmp/openshift exist
      file:
        path: /tmp/openshift
        state: directory
        # oc adm diagnostics logs everything in /tmp/openshift
        - name: Create an archive of diagnostics output logs
          archive:
            path:
              - /tmp/openshift
              - /tmp/diagnostics.log
            dest: /tmp/diagnostics.tar.gz
    # oc adm diagnostics logs everything in /tmp/openshift
    - name: Create an archive of diagnostics output logs
      archive:
        path:
          - /tmp/openshift
          - /tmp/diagnostics.log
        dest: /tmp/diagnostics.tar.gz
        - name: Fetch the diagnostic archive and logs
          fetch:
            src: /tmp/diagnostics.tar.gz
            dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{project_tag}}_diagnostics.tar.gz"
            flat: true
    - name: Fetch the diagnostic archive and logs
      fetch:
        src: /tmp/diagnostics.tar.gz
        dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{project_tag}}_diagnostics.tar.gz"
        flat: true
        - name: Report diagnostics failure
          fail:
            msg: "FAIL {{ project_tag }} Diagnostics"
          when: r_diag is failed
    - name: Report diagnostics failure
      fail:
        msg: "FAIL {{ project_tag }} Diagnostics"
      when: r_diag is failed
- name: Configure IPA on bastion
  hosts: bastions
@@ -655,12 +770,12 @@
  gather_facts: False
  run_once: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa"
      when: install_ipa_client|bool
  - import_role:
      name: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa"
    when: install_ipa_client|bool
- name: PostSoftware flight-check
  hosts: localhost
@@ -668,20 +783,18 @@
  gather_facts: false
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tags:
    - post_flight_check
  - post_flight_check
  tasks:
    - debug:
        msg: "Post-Software checks completed successfully"
  - debug:
      msg: "Post-Software checks completed successfully"
- name: Gather facts
  hosts:
    - all
  - all
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  gather_facts: true
  tags:
    - ocp_report
@@ -692,34 +805,34 @@
  become: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tags:
    - ocp_report
  - ocp_report
  vars:
    env_all_hosts: all
  tasks:
    - name: get repo version used to deploy
      command: git rev-parse HEAD
      args:
        chdir: "{{ ANSIBLE_REPO_PATH }}"
      register: ansible_agnostic_deployer_head
  - name: get repo version used to deploy
    command: git rev-parse HEAD
    args:
      chdir: "{{ ANSIBLE_REPO_PATH }}"
    register: ansible_agnostic_deployer_head
    - name: Gather ec2 facts
      ec2_remote_facts:
        aws_access_key: "{{ aws_access_key_id }}"
        aws_secret_key: "{{ aws_secret_access_key }}"
        region: "{{ aws_region_final|d(aws_region) }}"
        filters:
          instance-state-name: running
          "tag:Project": "{{project_tag}}"
      when:
        - ocp_report|bool
        - cloud_provider == 'ec2'
    - name: Generate report
      template:
        src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/ocp_report.adoc.j2"
        dest: "{{ ANSIBLE_REPO_PATH }}/workdir/ocp_report_{{ env_type }}-{{ guid }}.adoc"
      when:
        - ocp_report|bool
        - cloud_provider == 'ec2'
  - name: Gather ec2 facts
    ec2_remote_facts:
      aws_access_key: "{{ aws_access_key_id }}"
      aws_secret_key: "{{ aws_secret_access_key }}"
      region: "{{ aws_region_final|d(aws_region) }}"
      filters:
        instance-state-name: running
        "tag:Project": "{{project_tag}}"
    when:
      - ocp_report|bool
      - cloud_provider == 'ec2'
  - name: Generate report
    template:
      src: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/files/ocp_report.adoc.j2"
      dest: "{{ ANSIBLE_REPO_PATH }}/workdir/ocp_report_{{ env_type }}-{{ guid }}.adoc"
    when:
      - ocp_report|bool
      - cloud_provider == 'ec2'
ansible/configs/ocp-workshop/pre_software.yml
@@ -61,13 +61,25 @@
  become: true
  gather_facts: false
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/install-lets-encrypt-certs"
      tags: lets_encrypt
      when: install_lets_encrypt_certificates|bool
  - name: Create Let's Encrypt Certificates
    include_role:
      name: "{{ ANSIBLE_REPO_PATH }}/roles/host-lets-encrypt-certs"
    vars:
    - acme_domain: "{{ master_lb_dns }}"
    - acme_wildcard_domain: "*.{{ cloudapps_suffix }}"
    - acme_aws_access_key: "{{ hostvars['localhost'].route53user_access_key }}"
    - acme_aws_secret_access_key: "{{ hostvars['localhost'].route53user_secret_access_key }}"
    - acme_production: "{{ lets_encrypt_production|d(False)|bool}}"
    - acme_remote_dir: "/root"
    - acme_cache_cert_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.cert"
    - acme_cache_key_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.key"
    - acme_cache_archive_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}_acme.tgz"
    - acme_renew_automatically: True
    - acme_force_issue: False
    when:
    - install_lets_encrypt_certificates|d(False)|bool
- name: Configuring Bastion Hosts
  hosts: bastions
ansible/configs/quay-enterprise/env_vars.yml
@@ -41,14 +41,17 @@
qe_quay_ssl_enable: True
qe_quay_ssl_lets_encrypt_certs: False
# qe_quay_ssl_key_file: "files/quay.quay1.example.opentlc.com.key"
# qe_quay_ssl_cert_file: "files/quay.quay1.example.opentlc.com.cer"
qe_quay_ssl_lets_encrypt_production: False
qe_quay_ssl_lets_encrypt_force_renew: False
qe_quay_ssl_lets_encrypt_renew_automatically: False
qe_quay_ssl_key_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.key"
qe_quay_ssl_cert_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.cert"
# qe_quay_superuser_username: quayadmin
# qe_quay_superuser_password: quaypwd
# qe_quay_superuser_email: quayadmin@dummy.com
# Clair
qe_quay_clair_enable: True
qe_quay_clair_enable: False
qe_quay_clair_hostname: clair
qe_quay_clair_instance_type: t2.2xlarge
ansible/configs/quay-enterprise/software.yml
@@ -24,6 +24,39 @@
        - quay_clair_enable|d(False)|bool
        - "'clair' not in groups or groups['clair']|length == 0 or 'clair_database' not in groups or groups['clair_database']|length == 0"
- name: Set up Let's Encrypt Certificates
  hosts:
  - quay_enterprise
  gather_facts: false
  become: true
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
  - name: Open iptables port 80
    iptables:
      action: insert
      protocol: tcp
      destination_port: "80"
      state: present
      chain: INPUT
      jump: ACCEPT
  - name: Request Certificates
    block:
    - name: Request and install Let's Encrypt Certificates
      include_role:
        name: ../../roles/host-lets-encrypt-certs
      vars:
      - acme_domain: "{{ qe_quay_hostname }}.{{ subdomain_base }}"
      - acme_remote_dir: "/root"
      - acme_cache_cert_file: "{{ qe_quay_ssl_cert_file }}"
      - acme_cache_key_file: "{{ qe_quay_ssl_key_file }}"
      - acme_cache_archive_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}_acme.tgz"
      - acme_production: "{{ qe_quay_ssl_lets_encrypt_production|d(False)|bool }}"
      - acme_renew_automatically: "{{ qe_quay_ssl_lets_encrypt_renew_automatically|d(False)|bool }}"
      - acme_force_issue: "{{ qe_quay_ssl_lets_encrypt_force_renew|d(False)|bool }}"
    when:
    - qe_quay_ssl_lets_encrypt_certs|d(False)|bool
- name: Set up Node Software (Docker)
  hosts:
    - all_vms
@@ -31,8 +64,19 @@
  become: true
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  roles:
    - ../../roles/host-ocp-node
  tasks:
    - name: Ensure that iptables service is installed
      yum:
        name: iptables-services
        state: latest
    - name: Ensure that iptables service is enabled and started
      systemd:
        name: iptables
        enabled: yes
        state: started
    - name: Set up Node Software (Docker)
      include_role:
        name: ../../roles/host-ocp-node
- name: Install and Configure Quay Database
  hosts: quay_database
@@ -53,18 +97,15 @@
            postgresql_password: "{{ qe_quay_database_password }}"
            postgresql_admin_user: "{{ qe_quay_database_admin_username }}"
            postgresql_admin_password: "{{ qe_quay_database_admin_password }}"
            postgresql_port: "{{ qe_quay_database_port|d('5432') }}"
            postgresql_host_port: "{{ qe_quay_database_port|d('5432') }}"
            postgresql_database: "{{ qe_quay_database_name }}"
        
        - name: Flush Handlers
          meta: flush_handlers
        - name: Sleep to give PostgreSQL a chance to finish starting up
          pause:
            seconds: 10
        - name: Locate PostgreSQL Container
          command: docker ps --filter=name="{{ qe_quay_database_service_name | default('postgresql-quay') }}" -q
          command: docker ps --filter=name="{{ qe_quay_database_service_name | d('postgresql-quay') }}" -q
          register: postgresql_container
          
        - name: Configure PostgreSQL
@@ -98,40 +139,43 @@
      include_role:
        name: ../../roles/config-quay-enterprise
      vars:
        # Commented out variables need to be set in env_vars.yml
        quay_database_type: "postgresql"
        quay_database_username: "{{ qe_quay_database_username }}"
        quay_database_password: "{{ qe_quay_database_password }}"
        quay_database_name: "{{ qe_quay_database_name }}"
        quay_database_port: "{{ qe_quay_database_port }}|d(5432)"
        quay_database_port: "{{ qe_quay_database_port|d('5432') }}"
        quay_database_host: "{{ qe_quay_database_host }}"
        quay_ssl_cert_file: "{{ qe_quay_ssl_cert_file }}"
        quay_ssl_key_file: "{{ qe_quay_ssl_key_file }}"
        quay_registry_auth: "{{ qe_quay_registry_auth }}"
        quay_superuser_username: "{{ qe_quay_superuser_username }}"
        quay_superuser_password: "{{ qe_quay_superuser_password }}"
        quay_superuser_email: "{{ qe_quay_superuser_email }}"
        redis_host: "{{ qe_quay_hostname }}"
        quay_server_hostname: "{{ qe_quay_hostname }}.{{subdomain_base}}"
        quay_clair_enable: "{{ (groups['clair']| length > 0) | ternary('True','False') }}"
        quay_clair_enable: "{{ qe_quay_clair_enable }}"
        quay_clair_endpoint: "http://{{qe_quay_clair_hostname}}.{{ guid }}.internal:6060"
      tags:
        - install_quay
- name: Install and Configure Clair Database
  hosts: clair_database
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: Install and Configure PostgreSQL for Clair
      block:
        - name: Install PostgreSQL for Clair
          include_role:
            name: ../../roles/config-postgresql
          vars:
            mode: containerized
            postgresql_name: "{{ qe_quay_clair_database_service_name|d('postgresql-clair') }}"
            postgresql_username: "{{ qe_clair_database_username }}"
            postgresql_password: "{{ qe_clair_database_password }}"
            postgresql_host_port: "{{ qe_clair_database_port|d('5433') }}"
            postgresql_admin_user: "{{ qe_clair_database_admin_username }}"
            postgresql_admin_password: "{{ qe_clair_database_admin_password }}"
            postgresql_database: "{{ qe_clair_database_name }}"
          when: groups['clair_database']| length > 0
    - name: Install PostgreSQL for Clair
      include_role:
        name: ../../roles/config-postgresql
      vars:
        mode: containerized
        postgresql_name: "{{ qe_quay_clair_database_service_name|d('postgresql-clair') }}"
        postgresql_username: "{{ qe_clair_database_username }}"
        postgresql_password: "{{ qe_clair_database_password }}"
        postgresql_host_port: "{{ qe_clair_database_port|d('5433') }}"
        postgresql_admin_user: "{{ qe_clair_database_admin_username }}"
        postgresql_admin_password: "{{ qe_clair_database_admin_password }}"
        postgresql_database: "{{ qe_clair_database_name }}"
      when:
        - groups['clair_database']| length > 0
        - qe_quay_clair_enable|d(False)|bool
- name: Install Clair
  hosts: clair
@@ -139,23 +183,31 @@
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - name: Gather facts from machine
      setup:
      with_items:
        - "{{ groups['quay_enterprise'] }}"
    - name: Install Clair
      include_role:
        name: ../../roles/config-clair
      vars:
        database_host: "{{ qe_quay_clair_hostname }}"
        quay_enterprise_address: "{{ (qe_quay_ssl_enable|bool)| ternary('https','http') }}://{{ qe_quay_hostname }}.{{ subdomain_base }}"
        clair_ssl_trust_configure: "{{ qe_quay_ssl_enable|d(False)|bool }}"
        clair_ssl_trust_src_file: "{{ qe_quay_ssl_cert_file }}"
        postgresql_username: "{{ qe_clair_database_username }}"
        postgresql_password: "{{ qe_clair_database_password }}"
        postgresql_port: "{{ qe_clair_database_port | default('5433') }}"
    - name: Set SSL Certificate to generated certificate if no certificate file specified
      set_fact:
        qe_quay_ssl_cert_file: "/tmp/ssl.cert"
      when:
        - quay_clair_enable|d(False)|bool
        - qe_quay_ssl_cert_file is not defined or qe_quay_ssl_cert_file|trim == ""
    - name: Install Clair
      block:
      - name: Gather facts from machine
        setup:
        with_items:
          - "{{ groups['quay_enterprise'] }}"
      - name: Install Clair
        include_role:
          name: ../../roles/config-clair
        vars:
          database_host: "{{ qe_quay_clair_hostname }}"
          quay_enterprise_address: "{{ (qe_quay_ssl_enable|bool)| ternary('https','http') }}://{{ qe_quay_hostname }}.{{ subdomain_base }}"
          clair_ssl_trust_configure: "{{ qe_quay_ssl_enable|d(False)|bool }}"
          clair_ssl_trust_src_file: "{{ qe_quay_ssl_cert_file }}"
          postgresql_username: "{{ qe_clair_database_username }}"
          postgresql_password: "{{ qe_clair_database_password }}"
          postgresql_port: "{{ qe_clair_database_port | d('5433') }}"
      when:
        - groups['clair']| length > 0
        - qe_quay_clair_enable|d(False)|bool
- name: Software flight-check
  hosts: localhost
ansible/roles/config-clair/handlers/main.yml
@@ -10,9 +10,4 @@
- name: restart firewalld
  service:
    name: firewalld
    state: restarted
- name: restart iptables
  service:
    name: iptables
    state: restarted
ansible/roles/config-clair/tasks/firewall.yml
@@ -24,7 +24,7 @@
  notify:
  - restart firewalld
- name: Ensure iptables is correctly configured
- name: Open iptables Clair firewall port for future sessions
  lineinfile:
    insertbefore: "-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT"
    state: present
@@ -38,7 +38,7 @@
  # notify:
  # - restart iptables
- name: Open Clair Firewall Ports for current session
- name: Open iptables Clair firewall port for current session
  iptables:
    action: insert
    protocol: tcp
ansible/roles/config-clair/tasks/main.yml
@@ -52,7 +52,6 @@
    mode: g+rw
  notify: Restart Clair Service
- name: Configure systemd environment files
  template:
    src: "{{ clair_name }}.j2"
@@ -66,4 +65,7 @@
  notify: "Restart Clair Service"
- name: Include firewall tasks
  include_tasks: firewall.yml
  include_tasks: firewall.yml
- name: Flush Handlers (Clair)
  meta: flush_handlers
ansible/roles/config-postgresql/handlers/main.yml
@@ -10,9 +10,4 @@
- name: restart firewalld
  service:
    name: firewalld
    state: restarted
- name: restart iptables
  service:
    name: iptables
    state: restarted
    state: restarted
ansible/roles/config-postgresql/tasks/firewall.yml
@@ -14,29 +14,29 @@
- name: Open port in firewalld
  firewalld:
    port: "{{ postgresql_port }}/tcp"
    port: "{{ postgresql_host_port }}/tcp"
    permanent: true
    state: enabled
  when: firewalld_status.rc == 0
  notify:
  - restart firewalld
- name: Ensure iptables is correctly configured
- name: Open iptables Postgresql firewall port for future sessions
  lineinfile:
    insertbefore: "-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT"
    state: present
    dest: /etc/sysconfig/iptables
    regexp: "^-A INPUT .* --dport {{ postgresql_port }} .* ACCEPT"
    line: "-A INPUT -p tcp -m state --state NEW -m tcp --dport {{ postgresql_port }} -j ACCEPT"
    regexp: "^-A INPUT .* --dport {{ postgresql_host_port }} .* ACCEPT"
    line: "-A INPUT -p tcp -m state --state NEW -m tcp --dport {{ postgresql_host_port }} -j ACCEPT"
  when: iptables_status.rc == 0 and firewalld_status.rc != 0
  # notify:
  # - restart iptables
- name: Open Postgresql Firewall Port for current session
- name: Open iptables Postgresql firewall port for current session
  iptables:
    action: insert
    protocol: tcp
    destination_port: "{{ postgresql_port }}"
    destination_port: "{{ postgresql_host_port }}"
    state: present
    chain: INPUT
    jump: ACCEPT
ansible/roles/config-postgresql/tasks/main.yml
@@ -3,3 +3,6 @@
- name: Install Containerized PostgreSQL
  include_tasks: install_containerized.yml
  when: mode == "containerized"
- name: Flush Handlers (Postgresql)
  meta: flush_handlers
ansible/roles/config-quay-enterprise/defaults/main.yml
@@ -11,7 +11,7 @@
systemd_environmentfile_dir: /etc/sysconfig
# Quay
quay_image: quay.io/coreos/quay:v2.9.2
quay_image: quay.io/coreos/quay:v2.9.3
quay_config_dir: /var/lib/quay/config
quay_container_config_dir: /conf/stack
quay_storage_dir: /var/lib/quay/storage
ansible/roles/config-quay-enterprise/handlers/main.yml
@@ -11,8 +11,3 @@
  service:
    name: firewalld
    state: restarted
- name: restart iptables
  service:
    name: iptables
    state: restarted
ansible/roles/config-quay-enterprise/tasks/firewall.yml
@@ -24,7 +24,7 @@
  notify:
  - restart firewalld
- name: Ensure iptables is correctly configured
- name: Open iptables Quay firewall ports for future sessions
  lineinfile:
    insertbefore: "-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT"
    state: present
@@ -35,10 +35,8 @@
    - "{{ quay_host_http_port }}"
    - "{{ quay_host_https_port }}"
  when: iptables_status.rc == 0 and firewalld_status.rc != 0
  # notify:
  # - restart iptables
- name: Open Quay Firewall Ports for current session
- name: Open iptables Quay firewall ports for current session
  iptables:
    action: insert
    protocol: tcp
ansible/roles/config-quay-enterprise/tasks/main.yml
@@ -42,55 +42,52 @@
- name: Include systemd configurations
  include_tasks: configure_systemd.yml
- name: Set SSL Facts
  set_fact:
    quay_ssl_enable: "{{ quay_ssl_enable }}"
- name: Set Fact for Custom SSL Certificates
  set_fact:
    quay_ssl_cert_file: "{{ quay_ssl_cert_file }}"
    quay_ssl_key_file: "{{ quay_ssl_key_file }}"
    quay_ssl_cert_file_to_use: "{{ quay_ssl_cert_file }}"
    quay_ssl_key_file_to_use: "{{ quay_ssl_key_file }}"
  when: quay_ssl_enable|bool and (quay_ssl_key_file is defined and quay_ssl_key_file|trim != "" and quay_ssl_cert_file is defined and quay_ssl_cert_file|trim != "")
- name: Create SSL Certificates
- name: Create Self Signed SSL Certificates
  block:
    - name: Create Temporary SSL Directory
      command: mktemp -d /tmp/quay-ssl-XXXXXXX
      register: quay_ssl_remote_tmp_dir_mktemp
      delegate_to: "{{ groups['quay_enterprise'][0] }}"
      when: quay_ssl_remote_tmp_dir is undefined and quay_ssl_remote_tmp_dir|trim == ""
  - name: Create Temporary SSL Directory
    command: mktemp -d /tmp/quay-ssl-XXXXXXX
    register: quay_ssl_remote_tmp_dir_mktemp
    delegate_to: "{{ groups['quay_enterprise'][0] }}"
    when: quay_ssl_remote_tmp_dir is undefined and quay_ssl_remote_tmp_dir|trim == ""
    - name: Set Fact for Remote SSL Directory
      set_fact:
        quay_ssl_remote_tmp_dir: "{{ quay_ssl_remote_tmp_dir if quay_ssl_remote_tmp_dir is defined and quay_ssl_remote_tmp_dir|trim == '' else quay_ssl_remote_tmp_dir_mktemp.stdout }}"
      when: quay_ssl_remote_tmp_dir is undefined and quay_ssl_remote_tmp_dir|trim == ""
  - name: Set Fact for Remote Self Signed SSL Directory
    set_fact:
      quay_ssl_remote_tmp_dir: "{{ quay_ssl_remote_tmp_dir if quay_ssl_remote_tmp_dir is defined and quay_ssl_remote_tmp_dir|trim == '' else quay_ssl_remote_tmp_dir_mktemp.stdout }}"
    when: quay_ssl_remote_tmp_dir is undefined and quay_ssl_remote_tmp_dir|trim == ""
    - name: Create SSL Certificate
      command: openssl req -nodes -x509 -newkey rsa:4096 -keyout {{ quay_ssl_remote_tmp_dir }}/ssl.key -out {{ quay_ssl_remote_tmp_dir }}/ssl.cert -subj "/C={{ quay_ssl_generate_country }}/ST={{ quay_ssl_generate_state }}/L={{ quay_ssl_generate_city }}/O={{ quay_ssl_generate_organization }}/OU={{ quay_ssl_generate_organizational_unit }}/CN={{ quay_server_hostname }}" -days {{ quay_ssl_generate_days_validity }}
      delegate_to: "{{ groups['quay_enterprise'][0] }}"
  - name: Create SSL Certificate
    command: openssl req -nodes -x509 -newkey rsa:4096 -keyout {{ quay_ssl_remote_tmp_dir }}/ssl.key -out {{ quay_ssl_remote_tmp_dir }}/ssl.cert -subj "/C={{ quay_ssl_generate_country }}/ST={{ quay_ssl_generate_state }}/L={{ quay_ssl_generate_city }}/O={{ quay_ssl_generate_organization }}/OU={{ quay_ssl_generate_organizational_unit }}/CN={{ quay_server_hostname }}" -days {{ quay_ssl_generate_days_validity }}
    delegate_to: "{{ groups['quay_enterprise'][0] }}"
    - name: Fetch SSL Certifictes
      fetch:
        src:  "{{ item.src }}"
        dest: "{{ item.dest }}"
        flat: true
        fail_on_missing: yes
      delegate_to: "{{ groups['quay_enterprise'][0] }}"
      run_once: true
      with_items:
        - { src: "{{ quay_ssl_remote_tmp_dir }}/ssl.key", dest: "{{ quay_ssl_local_tmp_dir }}/ssl.key" }
        - { src: "{{ quay_ssl_remote_tmp_dir }}/ssl.cert", dest: "{{ quay_ssl_local_tmp_dir }}/ssl.cert" }
  - name: Fetch Self Signed SSL Certifictes
    fetch:
      src:  "{{ item.src }}"
      dest: "{{ item.dest }}"
      flat: true
      fail_on_missing: yes
    delegate_to: "{{ groups['quay_enterprise'][0] }}"
    run_once: true
    with_items:
      - { src: "{{ quay_ssl_remote_tmp_dir }}/ssl.key", dest: "{{ quay_ssl_local_tmp_dir }}/ssl.key" }
      - { src: "{{ quay_ssl_remote_tmp_dir }}/ssl.cert", dest: "{{ quay_ssl_local_tmp_dir }}/ssl.cert" }
    - name: Delete Remote SSL Certificates
      file:
        state: absent
        path: "{{ quay_ssl_remote_tmp_dir }}"
      delegate_to: "{{ groups['quay_enterprise'][0] }}"
  - name: Delete Remote Self Signed SSL Certificates
    file:
      state: absent
      path: "{{ quay_ssl_remote_tmp_dir }}"
    when: quay_ssl_delete_generated_cert|bool
    delegate_to: "{{ groups['quay_enterprise'][0] }}"
    - name: Set Fact for Custom SSL Certificates
      set_fact:
        quay_ssl_cert_file: "{{ quay_ssl_local_tmp_dir }}/ssl.cert"
        quay_ssl_key_file: "{{ quay_ssl_local_tmp_dir }}/ssl.key"
  - name: Set Fact for Self Signed SSL Certificates
    set_fact:
      quay_ssl_cert_file_to_use: "{{ quay_ssl_local_tmp_dir }}/ssl.cert"
      quay_ssl_key_file_to_use: "{{ quay_ssl_local_tmp_dir }}/ssl.key"
  when: quay_ssl_enable|bool and (quay_ssl_key_file is not defined or quay_ssl_key_file|trim == "" or quay_ssl_cert_file is not defined or quay_ssl_cert_file|trim == "")
- name: Copy SSL Certificates
@@ -102,8 +99,8 @@
    mode: g+rw
  notify: Restart quay service
  with_items:
    - { src: "{{ quay_ssl_key_file }}", dest: "{{ quay_config_dir }}/ssl.key" }
    - { src: "{{ quay_ssl_cert_file }}", dest: "{{ quay_config_dir }}/ssl.cert" }
    - { src: "{{ quay_ssl_cert_file_to_use }}", dest: "{{ quay_config_dir }}/ssl.cert" }
    - { src: "{{ quay_ssl_key_file_to_use }}", dest: "{{ quay_config_dir }}/ssl.key" }
  when: quay_ssl_enable|bool
- name: Check if Quay configuration exists
@@ -131,3 +128,6 @@
- name: Setup Initial User and Configuration
  include_tasks: complete_setup.yml
  when: not quay_config_stat_result.stat.exists and quay_superuser_username is defined and quay_superuser_username|trim != "" and quay_superuser_password is defined and quay_superuser_password|trim != "" and quay_superuser_email is defined and quay_superuser_email|trim != ""
- name: Flush Handlers (Quay)
  meta: flush_handlers
ansible/roles/config-redis/handlers/main.yml
@@ -11,8 +11,3 @@
  service:
    name: firewalld
    state: restarted
- name: restart iptables
  service:
    name: iptables
    state: restarted
ansible/roles/config-redis/tasks/firewall.yml
@@ -21,7 +21,7 @@
  notify:
  - restart firewalld
- name: Ensure iptables is correctly configured
- name: Open iptables Redis firewall port for future sessions
  lineinfile:
    insertbefore: "-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT"
    state: present
@@ -32,11 +32,11 @@
  # notify:
  # - restart iptables
- name: Open Redis Firewall Port for current session
- name: Open iptables Redis firewall port for current session
  iptables:
    action: insert
    protocol: tcp
    destination_port: {{ redis_host_port }}
    destination_port: "{{ redis_host_port }}"
    state: present
    chain: INPUT
    jump: ACCEPT
ansible/roles/config-redis/tasks/main.yml
@@ -3,3 +3,6 @@
- name: Install Containerized Redis
  include_tasks: install_containerized.yml
  when: mode == "containerized"
- name: Flush Handlers (Redis)
  meta: flush_handlers
ansible/roles/geerlingguy.java/.gitignore
New file
@@ -0,0 +1,2 @@
*.retry
tests/test.sh
ansible/roles/geerlingguy.java/.travis.yml
New file
@@ -0,0 +1,31 @@
---
services: docker
env:
  - distro: centos7
  - distro: centos6
  - distro: fedora27
  - distro: ubuntu1804
  - distro: ubuntu1604
  - distro: ubuntu1404
  - distro: ubuntu1204
  - distro: debian9
  - distro: debian8
script:
  # Configure test script so we can run extra tests after playbook is run.
  - export container_id=$(date +%s)
  - export cleanup=false
  # Download test shim.
  - wget -O ${PWD}/tests/test.sh https://gist.githubusercontent.com/geerlingguy/73ef1e5ee45d8694570f334be385e181/raw/
  - chmod +x ${PWD}/tests/test.sh
  # Run tests.
  - ${PWD}/tests/test.sh
  # Ensure Java is installed.
  - 'docker exec --tty ${container_id} env TERM=xterm which java'
notifications:
  webhooks: https://galaxy.ansible.com/api/v1/notifications/
ansible/roles/geerlingguy.java/LICENSE
New file
@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2017 Jeff Geerling
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
ansible/roles/geerlingguy.java/README.md
New file
@@ -0,0 +1,66 @@
# Ansible Role: Java
[![Build Status](https://travis-ci.org/geerlingguy/ansible-role-java.svg?branch=master)](https://travis-ci.org/geerlingguy/ansible-role-java)
Installs Java for RedHat/CentOS and Debian/Ubuntu linux servers.
## Requirements
None.
## Role Variables
Available variables are listed below, along with default values:
    # The defaults provided by this role are specific to each distribution.
    java_packages:
      - java-1.7.0-openjdk
Set the version/development kit of Java to install, along with any other necessary Java packages. Some other options include are included in the distribution-specific files in this role's 'defaults' folder.
    java_home: ""
If set, the role will set the global environment variable `JAVA_HOME` to this value.
## Dependencies
None.
## Example Playbook (using default package, usually OpenJDK 7)
    - hosts: servers
      roles:
        - geerlingguy.java
## Example Playbook (install OpenJDK 8)
For RHEL / CentOS:
    - hosts: server
      roles:
        - role: geerlingguy.java
          when: "ansible_os_family == 'RedHat'"
          java_packages:
            - java-1.8.0-openjdk
For Ubuntu < 16.04:
    - hosts: server
      tasks:
        - name: installing repo for Java 8 in Ubuntu
            apt_repository: repo='ppa:openjdk-r/ppa'
    - hosts: server
      roles:
        - role: geerlingguy.java
          when: "ansible_os_family == 'Debian'"
          java_packages:
            - openjdk-8-jdk
## License
MIT / BSD
## Author Information
This role was created in 2014 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).
ansible/roles/geerlingguy.java/defaults/main.yml
New file
@@ -0,0 +1,6 @@
---
# Set java_packages if you would like to use a different version than the
# default (OpenJDK 1.7).
# java_packages: []
java_home: ""
ansible/roles/geerlingguy.java/meta/.galaxy_install_info
New file
@@ -0,0 +1 @@
{install_date: 'Tue Aug 21 09:40:24 2018', version: 1.8.1}
ansible/roles/geerlingguy.java/meta/main.yml
New file
@@ -0,0 +1,39 @@
---
dependencies: []
galaxy_info:
  author: geerlingguy
  description: Java for Linux
  company: "Midwestern Mac, LLC"
  license: "license (BSD, MIT)"
  min_ansible_version: 2.4
  platforms:
    - name: EL
      versions:
      - 6
      - 7
    - name: Fedora
      versions:
      - all
    - name: Debian
      versions:
      - wheezy
      - jessie
      - stretch
    - name: Ubuntu
      versions:
      - precise
      - trusty
      - xenial
      - bionic
    - name: FreeBSD
      versions:
      - 10.2
  galaxy_tags:
    - development
    - system
    - web
    - java
    - jdk
    - openjdk
    - oracle
ansible/roles/geerlingguy.java/tasks/main.yml
New file
@@ -0,0 +1,37 @@
---
- name: Include OS-specific variables.
  include_vars: "{{ ansible_os_family }}.yml"
  when:
    - ansible_os_family != 'Debian'
    - ansible_distribution != 'Fedora'
- name: Include OS-specific variables for Fedora.
  include_vars: "{{ ansible_distribution }}.yml"
  when: ansible_distribution == 'Fedora'
- name: Include version-specific variables for Debian.
  include_vars: "{{ ansible_distribution|title }}-{{ ansible_distribution_version.split('.')[0] }}.yml"
  when: ansible_os_family == 'Debian'
- name: Define java_packages.
  set_fact:
    java_packages: "{{ __java_packages | list }}"
  when: java_packages is not defined
# Setup/install tasks.
- include_tasks: setup-RedHat.yml
  when: ansible_os_family == 'RedHat'
- include_tasks: setup-Debian.yml
  when: ansible_os_family == 'Debian'
- include_tasks: setup-FreeBSD.yml
  when: ansible_os_family == 'FreeBSD'
# Environment setup.
- name: Set JAVA_HOME if configured.
  template:
    src: java_home.sh.j2
    dest: /etc/profile.d/java_home.sh
    mode: 0644
  when: java_home is defined and java_home != ''
ansible/roles/geerlingguy.java/tasks/setup-Debian.yml
New file
@@ -0,0 +1,4 @@
---
- name: Ensure Java is installed.
  apt: "name={{ item }} state=present"
  with_items: "{{ java_packages }}"
ansible/roles/geerlingguy.java/tasks/setup-FreeBSD.yml
New file
@@ -0,0 +1,10 @@
---
- name: Ensure Java is installed.
  pkgng: "name={{ item }} state=present"
  with_items: "{{ java_packages }}"
- name: ensure proc is mounted
  mount: name=/proc fstype=procfs src=proc opts=rw state=mounted
- name: ensure fdesc is mounted
  mount: name=/dev/fd fstype=fdescfs src=fdesc opts=rw state=mounted
ansible/roles/geerlingguy.java/tasks/setup-RedHat.yml
New file
@@ -0,0 +1,4 @@
---
- name: Ensure Java is installed.
  package: "name={{ item }} state=present"
  with_items: "{{ java_packages }}"
ansible/roles/geerlingguy.java/templates/java_home.sh.j2
New file
@@ -0,0 +1 @@
export JAVA_HOME={{ java_home }}
ansible/roles/geerlingguy.java/tests/README.md
New file
@@ -0,0 +1,11 @@
# Ansible Role tests
To run the test playbook(s) in this directory:
  1. Install and start Docker.
  1. Download the test shim (see .travis.yml file for the URL) into `tests/test.sh`:
    - `wget -O tests/test.sh https://gist.githubusercontent.com/geerlingguy/73ef1e5ee45d8694570f334be385e181/raw/`
  1. Make the test shim executable: `chmod +x tests/test.sh`.
  1. Run (from the role root directory) `distro=[distro] playbook=[playbook] ./tests/test.sh`
If you don't want the container to be automatically deleted after the test playbook is run, add the following environment variables: `cleanup=false container_id=$(date +%s)`
ansible/roles/geerlingguy.java/tests/test.yml
New file
@@ -0,0 +1,11 @@
---
- hosts: all
  pre_tasks:
    - name: Update apt cache.
      apt: update_cache=yes cache_valid_time=600
      when: ansible_os_family == 'Debian'
      changed_when: false
  roles:
    - role_under_test
ansible/roles/geerlingguy.java/vars/Debian-8.yml
New file
@@ -0,0 +1,7 @@
---
# JDK version options include:
#   - java
#   - openjdk-6-jdk
#   - openjdk-7-jdk
__java_packages:
  - openjdk-7-jdk
ansible/roles/geerlingguy.java/vars/Debian-9.yml
New file
@@ -0,0 +1,6 @@
---
# JDK version options include:
#   - java
#   - openjdk-8-jdk
__java_packages:
  - openjdk-8-jdk
ansible/roles/geerlingguy.java/vars/Fedora.yml
New file
@@ -0,0 +1,6 @@
---
# JDK version options include:
#   - java
#   - java-1.8.0-openjdk
__java_packages:
  - java-1.8.0-openjdk
ansible/roles/geerlingguy.java/vars/FreeBSD.yml
New file
@@ -0,0 +1,7 @@
---
# JDK version options for FreeBSD include:
#   - openjdk
#   - openjdk6
#   - openjdk8
__java_packages:
  - openjdk
ansible/roles/geerlingguy.java/vars/RedHat.yml
New file
@@ -0,0 +1,7 @@
---
# JDK version options include:
#   - java
#   - java-1.6.0-openjdk
#   - java-1.7.0-openjdk
__java_packages:
  - java-1.7.0-openjdk
ansible/roles/geerlingguy.java/vars/Ubuntu-12.yml
New file
@@ -0,0 +1,7 @@
---
# JDK version options include:
#   - java
#   - openjdk-6-jdk
#   - openjdk-7-jdk
__java_packages:
  - openjdk-7-jdk
ansible/roles/geerlingguy.java/vars/Ubuntu-14.yml
New file
@@ -0,0 +1,7 @@
---
# JDK version options include:
#   - java
#   - openjdk-6-jdk
#   - openjdk-7-jdk
__java_packages:
  - openjdk-7-jdk
ansible/roles/geerlingguy.java/vars/Ubuntu-16.yml
New file
@@ -0,0 +1,7 @@
---
# JDK version options include:
#   - java
#   - openjdk-8-jdk
#   - openjdk-9-jdk
__java_packages:
  - openjdk-8-jdk
ansible/roles/geerlingguy.java/vars/Ubuntu-18.yml
New file
@@ -0,0 +1,6 @@
---
# JDK version options include:
#   - java
#   - openjdk-11-jdk
__java_packages:
  - openjdk-11-jdk
ansible/roles/host-jenkins-server/.gitignore
New file
@@ -0,0 +1,2 @@
*.retry
tests/test.sh
ansible/roles/host-jenkins-server/.travis.yml
New file
@@ -0,0 +1,88 @@
---
services: docker
env:
  # tests/test.yml
  - distro: centos7
    playbook: test.yml
    prefix: ''
    http_port: 8080
  - distro: fedora27
    site: test.yml
    prefix: ''
    http_port: 8080
  - distro: ubuntu1604
    playbook: test.yml
    prefix: ''
    http_port: 8080
  - distro: ubuntu1404
    playbook: test.yml
    prefix: ''
    http_port: 8080
  - distro: debian8
    playbook: test.yml
    prefix: ''
    http_port: 8080
  # tests/test-http-port.yml
  - distro: ubuntu1604
    playbook: test-http-port.yml
    prefix: ''
    http_port: 8081
  # tests/test-prefix.yml
  - distro: ubuntu1604
    playbook: test-prefix.yml
    prefix: jenkins
    http_port: 8080
  # tests/test-jenkins-version.yml
  - distro: centos7
    playbook: test-jenkins-version.yml
    prefix: ''
    http_port: 8080
  - distro: ubuntu1604
    playbook: test-jenkins-version.yml
    prefix: ''
    http_port: 8080
  # tests/test-plugins.yml
  - distro: ubuntu1604
    playbook: test-plugins.yml
    prefix: ''
    http_port: 8080
 # tests/test-plugins-with-home.yml
  - distro: ubuntu1604
    playbook: test-plugins-with-home.yml
    prefix: ''
    http_port: 8080
  # tests/test-plugins-with-pinning.yml
  - distro: ubuntu1604
    playbook: test-plugins-with-pinning.yml
    prefix: ''
    http_port: 8080
script:
  # Configure test script so we can run extra tests after playbook is run.
  - export container_id=$(date +%s)
  - export cleanup=false
  # Download test shim.
  - wget -O ${PWD}/tests/test.sh https://gist.githubusercontent.com/geerlingguy/73ef1e5ee45d8694570f334be385e181/raw/
  - chmod +x ${PWD}/tests/test.sh
  # Run tests.
  - ${PWD}/tests/test.sh
  # Make sure Jenkins is running.
  - 'docker exec --tty ${container_id} env TERM=xterm curl http://localhost:$http_port/$prefix'
after_failure:
  # Check what happened on systemd systems.
  - 'docker exec --tty ${container_id} env TERM=xterm systemctl -l status jenkins.service'
  - 'docker exec --tty ${container_id} env TERM=xterm journalctl -xe --no-pager'
notifications:
  webhooks: https://galaxy.ansible.com/api/v1/notifications/
ansible/roles/host-jenkins-server/LICENSE
New file
@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2017 Jeff Geerling
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
ansible/roles/host-jenkins-server/README.md
@@ -1,38 +1,138 @@
Role Name
=========
# Ansible Role: Jenkins CI
A brief description of the role goes here.
[![Build Status](https://travis-ci.org/geerlingguy/ansible-role-jenkins.svg?branch=master)](https://travis-ci.org/geerlingguy/ansible-role-jenkins)
Requirements
------------
Installs Jenkins CI on RHEL/CentOS and Debian/Ubuntu servers.
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
## Requirements
Role Variables
--------------
Requires `curl` to be installed on the server. Also, newer versions of Jenkins require Java 8+ (see the test playbooks inside the `tests/` directory for an example of how to use newer versions of Java for your OS).
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
## Role Variables
Dependencies
------------
Available variables are listed below, along with default values (see `defaults/main.yml`):
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
    jenkins_package_state: present
Example Playbook
----------------
The state of the `jenkins` package install. By default this role installs Jenkins but will not upgrade Jenkins (when using package-based installs). If you want to always update to the latest version, change this to `latest`.
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
    jenkins_hostname: localhost
    - hosts: servers
      roles:
         - { role: username.rolename, x: 42 }
The system hostname; usually `localhost` works fine. This will be used during setup to communicate with the running Jenkins instance via HTTP requests.
License
-------
    jenkins_home: /var/lib/jenkins
BSD
The Jenkins home directory which, amongst others, is being used for storing artifacts, workspaces and plugins. This variable allows you to override the default `/var/lib/jenkins` location.
Author Information
------------------
    jenkins_http_port: 8080
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
The HTTP port for Jenkins' web interface.
    jenkins_admin_username: admin
    jenkins_admin_password: admin
Default admin account credentials which will be created the first time Jenkins is installed.
    jenkins_admin_password_file: ""
Default admin password file which will be created the first time Jenkins is installed as /var/lib/jenkins/secrets/initialAdminPassword
    jenkins_admin_token: ""
A Jenkins API token (generated after installation) for [authenticated scripted clients](https://wiki.jenkins-ci.org/display/JENKINS/Authenticating+scripted+clients). You can use the admin token instead of a username and password for more convenient scripted access to Jenkins (e.g. for plugin management through this role).
    jenkins_admin_token_file: ""
A file (with full path) on the Jenkins server containing the admin token. If this variable is set in addition to the `jenkins_admin_token`, the contents of this file will overwrite the value of `jenkins_admin_token`.
    jenkins_jar_location: /opt/jenkins-cli.jar
The location at which the `jenkins-cli.jar` jarfile will be kept. This is used for communicating with Jenkins via the CLI.
    jenkins_plugins: []
Jenkins plugins to be installed automatically during provisioning.
    jenkins_plugins_install_dependencies: yes
Whether Jenkins plugins to be installed should also install any plugin dependencies.
    jenkins_plugins_state: present
Use `latest` to ensure all plugins are running the most up-to-date version.
    jenkins_plugin_updates_expiration: 86400
Number of seconds after which a new copy of the update-center.json file is downloaded. Set it to 0 if no cache file should be used.
    jenkins_plugin_timeout: 30
The server connection timeout, in seconds, when installing Jenkins plugins.
    jenkins_version: "1.644"
    jenkins_pkg_url: "http://www.example.com"
(Optional) Then Jenkins version can be pinned to any version available on `http://pkg.jenkins-ci.org/debian/` (Debian/Ubuntu) or `http://pkg.jenkins-ci.org/redhat/` (RHEL/CentOS). If the Jenkins version you need is not available in the default package URLs, you can override the URL with your own; set `jenkins_pkg_url` (_Note_: the role depends on the same naming convention that `http://pkg.jenkins-ci.org/` uses).
    jenkins_url_prefix: ""
Used for setting a URL prefix for your Jenkins installation. The option is added as `--prefix={{ jenkins_url_prefix }}` to the Jenkins initialization `java` invocation, so you can access the installation at a path like `http://www.example.com{{ jenkins_url_prefix }}`. Make sure you start the prefix with a `/` (e.g. `/jenkins`).
    jenkins_connection_delay: 5
    jenkins_connection_retries: 60
Amount of time and number of times to wait when connecting to Jenkins after initial startup, to verify that Jenkins is running. Total time to wait = `delay` * `retries`, so by default this role will wait up to 300 seconds before timing out.
    # For RedHat/CentOS (role default):
    jenkins_repo_url: http://pkg.jenkins-ci.org/redhat/jenkins.repo
    jenkins_repo_key_url: http://pkg.jenkins-ci.org/redhat/jenkins-ci.org.key
    # For Debian (role default):
    jenkins_repo_url: deb http://pkg.jenkins-ci.org/debian binary/
    jenkins_repo_key_url: http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key
This role will install the latest version of Jenkins by default (using the official repositories as listed above). You can override these variables (use the correct set for your platform) to install the current LTS version instead:
    # For RedHat/CentOS LTS:
    jenkins_repo_url: http://pkg.jenkins-ci.org/redhat-stable/jenkins.repo
    jenkins_repo_key_url: http://pkg.jenkins-ci.org/redhat-stable/jenkins-ci.org.key
    # For Debian/Ubuntu LTS:
    jenkins_repo_url: deb http://pkg.jenkins-ci.org/debian-stable binary/
    jenkins_repo_key_url: http://pkg.jenkins-ci.org/debian-stable/jenkins-ci.org.key
It is also possible stop the repo file being added by setting  `jenkins_repo_url = ''`. This is useful if, for example, you sign your own packages or run internal package management (e.g. Spacewalk).
    jenkins_java_options: "-Djenkins.install.runSetupWizard=false"
Extra Java options for the Jenkins launch command configured in the init file can be set with the var `jenkins_java_options`. For example, if you want to configure the timezone Jenkins uses, add `-Dorg.apache.commons.jelly.tags.fmt.timeZone=America/New_York`. By default, the option to disable the Jenkins 2.0 setup wizard is added.
    jenkins_init_changes:
      - option: "JENKINS_ARGS"
        value: "--prefix={{ jenkins_url_prefix }}"
      - option: "JENKINS_JAVA_OPTIONS"
        value: "{{ jenkins_java_options }}"
Changes made to the Jenkins init script; the default set of changes set the configured URL prefix and add in configured Java options for Jenkins' startup. You can add other option/value pairs if you need to set other options for the Jenkins init file.
## Dependencies
  - geerlingguy.java
## Example Playbook
```yaml
- hosts: jenkins
  vars:
    jenkins_hostname: jenkins.example.com
  roles:
    - role: geerlingguy.java
    - role: geerlingguy.jenkins
      become: true
```
## License
MIT (Expat) / BSD
## Author Information
This role was created in 2014 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).
ansible/roles/host-jenkins-server/defaults/main.yml
New file
@@ -0,0 +1,38 @@
---
# Optional method of pinning a specific version of Jenkins and/or overriding the
# default Jenkins packaging URL.
# jenkins_version: "1.644"
# jenkins_pkg_url: "https://www.example.com"
# Change this to `latest` to update Jenkins if a newer version is available.
jenkins_package_state: present
jenkins_connection_delay: 5
jenkins_connection_retries: 60
jenkins_home: /var/lib/jenkins
jenkins_hostname: localhost
jenkins_http_port: 8080
jenkins_jar_location: /opt/jenkins-cli.jar
jenkins_url_prefix: ""
jenkins_java_options: "-Djenkins.install.runSetupWizard=false"
jenkins_plugins: []
jenkins_plugins_state: present
jenkins_plugin_updates_expiration: 86400
jenkins_plugin_timeout: 30
jenkins_plugins_install_dependencies: yes
jenkins_admin_username: admin
jenkins_admin_password: admin
jenkins_admin_password_file: ""
jenkins_admin_token: ""
jenkins_admin_token_file: ""
jenkins_process_user: jenkins
jenkins_process_group: "{{ jenkins_process_user }}"
jenkins_init_changes:
  - option: "JENKINS_ARGS"
    value: "--prefix={{ jenkins_url_prefix }}"
  - option: "{{ jenkins_java_options_env_var }}"
    value: "{{ jenkins_java_options }}"
ansible/roles/host-jenkins-server/handlers/main.yml
New file
@@ -0,0 +1,12 @@
---
- name: restart jenkins
  service: name=jenkins state=restarted
- name: configure default users
  template:
    src: basic-security.groovy
    dest: "{{ jenkins_home }}/init.groovy.d/basic-security.groovy"
    owner: "{{ jenkins_process_user }}"
    group: "{{ jenkins_process_group }}"
    mode: 0775
  register: jenkins_users_config
ansible/roles/host-jenkins-server/meta/.galaxy_install_info
New file
@@ -0,0 +1 @@
{install_date: 'Tue Aug 21 09:40:20 2018', version: 3.5.0}
ansible/roles/host-jenkins-server/meta/main.yml
New file
@@ -0,0 +1,29 @@
---
dependencies:
  - geerlingguy.java
galaxy_info:
  author: geerlingguy
  description: Jenkins CI
  company: "Midwestern Mac, LLC"
  license: "license (BSD, MIT)"
  min_ansible_version: 2.4
  platforms:
  - name: EL
    versions:
    - 6
    - 7
  - name: Fedora
    versions:
    - all
  - name: Debian
    versions:
    - all
  - name: Ubuntu
    versions:
    - all
  galaxy_tags:
    - development
    - packaging
    - jenkins
    - ci
ansible/roles/host-jenkins-server/tasks/main.yml
@@ -1,4 +1,60 @@
---
- name: Start host-gogs-server installer
  debug:
    msg: "Do the needful to deploy host-gogs-server"
# Variable setup.
- name: Include OS-Specific variables
  include_vars: "{{ ansible_os_family }}.yml"
- name: Define jenkins_repo_url
  set_fact:
    jenkins_repo_url: "{{ __jenkins_repo_url }}"
  when: jenkins_repo_url is not defined
- name: Define jenkins_repo_key_url
  set_fact:
    jenkins_repo_key_url: "{{ __jenkins_repo_key_url }}"
  when: jenkins_repo_key_url is not defined
- name: Define jenkins_pkg_url
  set_fact:
    jenkins_pkg_url: "{{ __jenkins_pkg_url }}"
  when: jenkins_pkg_url is not defined
# Setup/install tasks.
- include_tasks: setup-RedHat.yml
  when: ansible_os_family == 'RedHat'
- include_tasks: setup-Debian.yml
  when: ansible_os_family == 'Debian'
# Configure Jenkins init settings.
- include_tasks: settings.yml
# Make sure Jenkins starts, then configure Jenkins.
- name: Ensure Jenkins is started and runs on startup.
  service: name=jenkins state=started enabled=yes
- name: Wait for Jenkins to start up before proceeding.
  shell: "curl -D - --silent --max-time 5 http://{{ jenkins_hostname }}:{{ jenkins_http_port }}{{ jenkins_url_prefix }}/cli/"
  register: result
  until: (result.stdout.find("403 Forbidden") != -1) or (result.stdout.find("200 OK") != -1) and (result.stdout.find("Please wait while") == -1)
  retries: "{{ jenkins_connection_retries }}"
  delay: "{{ jenkins_connection_delay }}"
  changed_when: false
  check_mode: no
- name: Get the jenkins-cli jarfile from the Jenkins server.
  get_url:
    url: "http://{{ jenkins_hostname }}:{{ jenkins_http_port }}{{ jenkins_url_prefix }}/jnlpJars/jenkins-cli.jar"
    dest: "{{ jenkins_jar_location }}"
  register: jarfile_get
  until: "'OK' in jarfile_get.msg or 'file already exists' in jarfile_get.msg"
  retries: 5
  delay: 10
  check_mode: no
- name: Remove Jenkins security init scripts after first startup.
  file:
    path: "{{ jenkins_home }}/init.groovy.d/basic-security.groovy"
    state: absent
# Update Jenkins and install configured plugins.
- include_tasks: plugins.yml
ansible/roles/host-jenkins-server/tasks/plugins.yml
New file
@@ -0,0 +1,72 @@
---
# jenkins_plugin module doesn't support password files.
- name: Get Jenkins admin password from file.
  slurp:
    src: "{{ jenkins_admin_password_file }}"
  register: adminpasswordfile
  no_log: True
  when: jenkins_admin_password_file != ""
- name: Set Jenkins admin password fact.
  set_fact:
    jenkins_admin_password: "{{ adminpasswordfile['stdout'] | default(jenkins_admin_password) }}"
  no_log: True
- name: Get Jenkins admin token from file.
  slurp:
    src: "{{ jenkins_admin_token_file }}"
  register: admintokenfile
  no_log: True
  when: jenkins_admin_token_file != ""
- name: Set Jenkins admin token fact.
  set_fact:
    jenkins_admin_token: "{{ admintokenfile['stdout'] | default(jenkins_admin_token) }}"
  no_log: True
# Update Jenkins so that plugin updates don't fail.
- name: Create update directory
  file:
    path: "{{ jenkins_home }}/updates"
    state: directory
    owner: jenkins
    group: jenkins
- name: Download current plugin updates from Jenkins update site
  get_url:
    url: http://updates.jenkins-ci.org/update-center.json
    dest: "{{ jenkins_home}}/updates/default.json"
    owner: jenkins
    group: jenkins
    mode: 0440
- name: Remove first and last line from json file
  replace:
    path: "{{ jenkins_home }}/updates/default.json"
    regexp: "1d;$d"
- name: Install Jenkins plugins using password.
  jenkins_plugin:
    name: "{{ item }}"
    jenkins_home: "{{ jenkins_home }}"
    url_username: "{{ jenkins_admin_username }}"
    url_password: "{{ jenkins_admin_password }}"
    state: "{{ jenkins_plugins_state }}"
    timeout: "{{ jenkins_plugin_timeout }}"
    updates_expiration: "{{ jenkins_plugin_updates_expiration }}"
    url: "http://{{ jenkins_hostname }}:{{ jenkins_http_port }}{{ jenkins_url_prefix }}"
    with_dependencies: "{{ jenkins_plugins_install_dependencies }}"
  with_items: "{{ jenkins_plugins }}"
  when: jenkins_admin_password != ""
  notify: restart jenkins
- name: Install Jenkins plugins using token.
  jenkins_plugin:
    name: "{{ item }}"
    url_token: "{{ jenkins_admin_token }}"
    updates_expiration: "{{ jenkins_plugin_updates_expiration }}"
    url: "http://{{ jenkins_hostname }}:{{ jenkins_http_port }}{{ jenkins_url_prefix }}"
    with_dependencies: "{{ jenkins_plugins_install_dependencies }}"
  with_items: "{{ jenkins_plugins }}"
  when: jenkins_admin_token != ""
  notify: restart jenkins
ansible/roles/host-jenkins-server/tasks/settings.yml
New file
@@ -0,0 +1,56 @@
---
- name: Modify variables in init file
  lineinfile:
    dest: "{{ jenkins_init_file }}"
    insertafter: '^{{ item.option }}='
    regexp: '^{{ item.option}}=\"\${{ item.option }} '
    line: '{{ item.option }}="${{ item.option }} {{ item.value }}"'
    state: present
  with_items:
      "{{ jenkins_init_changes }}"
  register: jenkins_init_prefix
- name: Set the Jenkins home directory
  lineinfile:
    dest: "{{ jenkins_init_file }}"
    regexp: '^JENKINS_HOME=.*'
    line: 'JENKINS_HOME={{ jenkins_home }}'
  register: jenkins_home_config
- name: Immediately restart Jenkins on init config changes.
  service: name=jenkins state=restarted
  when: jenkins_init_prefix.changed
- name: Set HTTP port in Jenkins config.
  lineinfile:
    backrefs: yes
    dest: "{{ jenkins_init_file }}"
    regexp: '^{{ jenkins_http_port_param }}='
    line: '{{ jenkins_http_port_param }}={{ jenkins_http_port }}'
  register: jenkins_http_config
- name: Ensure jenkins_home {{ jenkins_home }} exists
  file:
    path: "{{ jenkins_home }}"
    state: directory
    owner: jenkins
    group: jenkins
    mode: u+rwx
    follow: true
- name: Create custom init scripts directory.
  file:
    path: "{{ jenkins_home }}/init.groovy.d"
    state: directory
    owner: "{{ jenkins_process_user }}"
    group: "{{ jenkins_process_group }}"
    mode: 0775
- name: Trigger handlers immediately in case Jenkins was installed
  meta: flush_handlers
- name: Immediately restart Jenkins on http or user changes.
  service: name=jenkins state=restarted
  when: (jenkins_users_config is defined and jenkins_users_config.changed) or
        (jenkins_http_config is defined and jenkins_http_config.changed) or
        (jenkins_home_config is defined and jenkins_home_config.changed)
ansible/roles/host-jenkins-server/tasks/setup-Debian.yml
New file
@@ -0,0 +1,44 @@
---
- name: Ensure dependencies are installed.
  apt:
    name:
      - curl
      - apt-transport-https
    state: present
- name: Add Jenkins apt repository key.
  apt_key:
    url: "{{ jenkins_repo_key_url }}"
    state: present
- name: Add Jenkins apt repository.
  apt_repository:
    repo: "{{ jenkins_repo_url }}"
    state: present
    update_cache: yes
  when: jenkins_repo_url != ''
- name: Download specific Jenkins version.
  get_url:
    url: "{{ jenkins_pkg_url }}/jenkins_{{ jenkins_version }}_all.deb"
    dest: "/tmp/jenkins_{{ jenkins_version }}_all.deb"
  when: jenkins_version is defined
- name: Check if we downloaded a specific version of Jenkins.
  stat:
    path: "/tmp/jenkins_{{ jenkins_version }}_all.deb"
  register: specific_version
  when: jenkins_version is defined
- name: Install our specific version of Jenkins.
  apt:
    deb: "/tmp/jenkins_{{ jenkins_version }}_all.deb"
    state: present
  when: jenkins_version is defined and specific_version.stat.exists
  notify: configure default users
- name: Ensure Jenkins is installed.
  apt:
    name: jenkins
    state: "{{ jenkins_package_state }}"
  notify: configure default users
ansible/roles/host-jenkins-server/tasks/setup-RedHat.yml
New file
@@ -0,0 +1,44 @@
---
- name: Ensure dependencies are installed.
  package:
    name:
      - curl
      - libselinux-python
      - initscripts
    state: present
- name: Ensure Jenkins repo is installed.
  get_url:
    url: "{{ jenkins_repo_url }}"
    dest: /etc/yum.repos.d/jenkins.repo
  when: jenkins_repo_url != ''
- name: Add Jenkins repo GPG key.
  rpm_key:
    state: present
    key: "{{ jenkins_repo_key_url }}"
- name: Download specific Jenkins version.
  get_url:
    url: "{{ jenkins_pkg_url }}/jenkins-{{ jenkins_version }}-1.1.noarch.rpm"
    dest: "/tmp/jenkins-{{ jenkins_version }}-1.1.noarch.rpm"
  when: jenkins_version is defined
- name: Check if we downloaded a specific version of Jenkins.
  stat:
    path: "/tmp/jenkins-{{ jenkins_version }}-1.1.noarch.rpm"
  register: specific_version
  when: jenkins_version is defined
- name: Install our specific version of Jenkins.
  package:
    name: "/tmp/jenkins-{{ jenkins_version }}-1.1.noarch.rpm"
    state: present
  when: jenkins_version is defined and specific_version.stat.exists
  notify: configure default users
- name: Ensure Jenkins is installed.
  package:
    name: jenkins
    state: "{{ jenkins_package_state }}"
  notify: configure default users
ansible/roles/host-jenkins-server/templates/basic-security.groovy
New file
@@ -0,0 +1,28 @@
#!groovy
import hudson.security.*
import jenkins.model.*
def instance = Jenkins.getInstance()
def hudsonRealm = new HudsonPrivateSecurityRealm(false)
def users = hudsonRealm.getAllUsers()
users_s = users.collect { it.toString() }
// Create the admin user account if it doesn't already exist.
if ("{{ jenkins_admin_username }}" in users_s) {
    println "Admin user already exists - updating password"
    def user = hudson.model.User.get('{{ jenkins_admin_username }}');
    def password = hudson.security.HudsonPrivateSecurityRealm.Details.fromPlainPassword('{{ jenkins_admin_password }}')
    user.addProperty(password)
    user.save()
}
else {
    println "--> creating local admin user"
    hudsonRealm.createAccount('{{ jenkins_admin_username }}', '{{ jenkins_admin_password }}')
    instance.setSecurityRealm(hudsonRealm)
    def strategy = new FullControlOnceLoggedInAuthorizationStrategy()
    instance.setAuthorizationStrategy(strategy)
    instance.save()
}
ansible/roles/host-jenkins-server/tests/README.md
New file
@@ -0,0 +1,11 @@
# Ansible Role tests
To run the test playbook(s) in this directory:
  1. Install and start Docker.
  1. Download the test shim (see .travis.yml file for the URL) into `tests/test.sh`:
    - `wget -O tests/test.sh https://gist.githubusercontent.com/geerlingguy/73ef1e5ee45d8694570f334be385e181/raw/`
  1. Make the test shim executable: `chmod +x tests/test.sh`.
  1. Run (from the role root directory) `distro=[distro] playbook=[playbook] ./tests/test.sh`
If you don't want the container to be automatically deleted after the test playbook is run, add the following environment variables: `cleanup=false container_id=$(date +%s)`
ansible/roles/host-jenkins-server/tests/java-8.yml
New file
@@ -0,0 +1,49 @@
---
# Ubuntu.
- name: Add repository for OpenJDK 8 (Ubuntu 14).
  apt_repository: repo='ppa:openjdk-r/ppa'
  when: ansible_distribution == "Ubuntu" and ansible_distribution_version == "14.04"
# Debian.
- name: Enable Backports repository (Debian 8).
  apt_repository:
    repo: 'deb http://ftp.debian.org/debian {{ ansible_distribution_release }}-backports main'
    state: present
    filename: "{{ ansible_distribution_release }}_backports"
  when: ansible_distribution == "Debian"
- name: Update apt cache.
  apt: update_cache=yes cache_valid_time=600
  when: ansible_os_family == 'Debian'
  changed_when: false
# See: http://unix.stackexchange.com/a/342469
- name: Install dependencies.
  apt:
    default_release: "{{ ansible_distribution_release }}-backports"
    name:
      - openjdk-8-jre-headless
      - ca-certificates-java
    state: present
  when: ansible_distribution == "Debian"
# Red Hat.
- name: Set the java_packages variable (RedHat).
  set_fact:
    java_packages:
      - java-1.8.0-openjdk
  when: ansible_os_family == 'RedHat'
# Ubuntu.
- name: Set the java_packages variable (Ubuntu).
  set_fact:
    java_packages:
      - openjdk-8-jdk
  when: ansible_distribution == 'Ubuntu'
# Debian.
- name: Set the java_packages variable (Debian).
  set_fact:
    java_packages:
      - openjdk-8-jdk
  when: ansible_distribution == 'Debian'
ansible/roles/host-jenkins-server/tests/requirements.yml
New file
@@ -0,0 +1,2 @@
---
- src: geerlingguy.java
ansible/roles/host-jenkins-server/tests/test-http-port.yml
New file
@@ -0,0 +1,12 @@
---
- hosts: all
  vars:
    jenkins_http_port: 8081
  pre_tasks:
    - include_tasks: java-8.yml
  roles:
    - geerlingguy.java
    - role_under_test
ansible/roles/host-jenkins-server/tests/test-jenkins-version.yml
New file
@@ -0,0 +1,15 @@
---
- hosts: localhost
  vars:
    jenkins_version: 1.644
  pre_tasks:
    - name: Update apt cache.
      apt: update_cache=yes cache_valid_time=600
      when: ansible_os_family == 'Debian'
      changed_when: false
  roles:
    - geerlingguy.java
    - role_under_test
ansible/roles/host-jenkins-server/tests/test-plugins-with-home.yml
New file
@@ -0,0 +1,15 @@
---
- hosts: all
  vars:
    jenkins_plugins:
      - greenballs
    jenkins_home: /tmp/jenkins
    jenkins_plugin_timeout: 120
  pre_tasks:
    - include_tasks: java-8.yml
  roles:
    - geerlingguy.java
    - role_under_test
ansible/roles/host-jenkins-server/tests/test-plugins-with-pinning.yml
New file
@@ -0,0 +1,14 @@
---
- hosts: all
  vars:
    jenkins_version: 2.60
    jenkins_plugins:
      - ant
  pre_tasks:
    - include_tasks: java-8.yml
  roles:
    - geerlingguy.java
    - role_under_test
ansible/roles/host-jenkins-server/tests/test-plugins.yml
New file
@@ -0,0 +1,17 @@
---
- hosts: all
  vars:
    jenkins_plugins:
      - blueocean
      - ghprb
      - greenballs
      - workflow-aggregator
    jenkins_plugin_timeout: 120
  pre_tasks:
    - include_tasks: java-8.yml
  roles:
    - geerlingguy.java
    - role_under_test
ansible/roles/host-jenkins-server/tests/test-prefix.yml
New file
@@ -0,0 +1,12 @@
---
- hosts: all
  vars:
    jenkins_url_prefix: /jenkins
  pre_tasks:
    - include_tasks: java-8.yml
  roles:
    - geerlingguy.java
    - role_under_test
ansible/roles/host-jenkins-server/tests/test.yml
New file
@@ -0,0 +1,9 @@
---
- hosts: all
  pre_tasks:
    - include_tasks: java-8.yml
  roles:
    - geerlingguy.java
    - role_under_test
ansible/roles/host-jenkins-server/vars/Debian.yml
New file
@@ -0,0 +1,7 @@
---
__jenkins_repo_url: deb https://pkg.jenkins.io/debian binary/
__jenkins_repo_key_url: https://pkg.jenkins.io/debian/jenkins.io.key
__jenkins_pkg_url: https://pkg.jenkins.io/debian/binary
jenkins_init_file: /etc/default/jenkins
jenkins_http_port_param: HTTP_PORT
jenkins_java_options_env_var: JAVA_ARGS
ansible/roles/host-jenkins-server/vars/RedHat.yml
New file
@@ -0,0 +1,7 @@
---
__jenkins_repo_url: https://pkg.jenkins.io/redhat/jenkins.repo
__jenkins_repo_key_url: https://pkg.jenkins.io/redhat/jenkins.io.key
__jenkins_pkg_url: https://pkg.jenkins.io/redhat
jenkins_init_file: /etc/sysconfig/jenkins
jenkins_http_port_param: JENKINS_PORT
jenkins_java_options_env_var: JENKINS_JAVA_OPTIONS
ansible/roles/host-lets-encrypt-certs/README.md
New file
@@ -0,0 +1,121 @@
Role Name
=========
host-lets-encrypt-certs
Requirements
------------
Request Let's Encrypt Certificates for a host. Supports Wildcard certificates of AWS Credentials are provided.
If static certificates are requested this role needs to run on the host for which certificates are being requested. If wildcard certificates are not involved then the role *must* be run on the host that is requesting the certificates. And that host *must* respond to the DNS address the certificates are requested for. There can not be a web server running on the host that will serve the domain. Otherwise the request will fail.
If Wildcard certificates are involved the role can run on any (AWS) host because validation of the domain will happen via AWS Access Credentials to the Route53 entry for which wildcard certificates are being requested.
Role Variables
--------------
*acme_domain*: ""
  domain name for which to request a certificate
  _Limitation_: Curently only *one* domain name can be requested.
*acme_wildcard_domain*: ""
  Wildcard domain name for which to request a certificate
*acme_aws_access_key*: ""
  AWS Access Key for Route53 (Only for Wildcard Domains)
*acme_aws_secret_access_key*: ""
  AWS Secret Access Key for Route53  (Only for Wildcard Domains)
*acme_additional_args*: ""
  additional arguments for the Acme script
*acme_remote_dir*: "/root"
  The directoroy on the remote host in which to install acme.sh
*acme_cache_cert_file*: "/tmp/ssl.cert"
*acme_cache_key_file*: "/tmp/ssl.key"
*acme_cache_ca_file*: "/tmp/ssl_ca.cer"
*acme_archive_file*: /tmp/acme.tar.gz"
  Local (to the host ansible is running on) cache of certificates
  Prevents re-requesting certificates for later runs of the playbook
  when the domains haven't changed. acme.tar.gz will contain the entire
  .acme.sh directory so that it can be restored for future runs on new machines
  with the same domain names.
*acme_production*: False
  Use the Production Let's Encrypt Server. Leave to False for testing runs
  to prevent issues with the Let's Encrypt rate limits
*acme_renew_automatically*: False
  Install a cron job to automatically renew Certificates. Checks
  once a day.
*acme_force_issue*: False
  Force the creation of new certificates even if there are
  a) certificates already on the host or
  b) certificates in the local cache
Dependencies
------------
None
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- name: Request Let's Encrypt Static Certificates
  hosts: server
  gather_facts: False
  tasks:
  - name: Call Role
    include_role:
      name: ../../roles/host-lets-encrypt-certs
    vars:
    - acme_domain: "master.example.opentlc.com"
    - acme_production: False
    - acme_remote_dir: "/root"
    - acme_local_cache_cert_file: "/tmp/server.cert"
    - acme_local_cache_key_file: "/tmp/server.key"
    - acme_local_cache_ca_file: "/tmp/server_ca.cer"
    - acme_renew_automatically: False
    - acme_force_issue: False
- name: Request Let's Encrypt Wildcard Certificates
  hosts: quay
  gather_facts: False
  tasks:
  - name: Call Role
    include_role:
      name: ../ansible/roles/host-lets-encrypt-certs
    vars:
    - acme_wildcard_domain: "*.apps.example.opentlc.com"
    - acme_aws_access_key: "<AWS ACCESS KEY>"
    - acme_aws_secret_access_key: "<AWS_SECRET_ACCESS_KEY>"
    - acme_production: False
    - acme_remote_dir: "/root"
    - acme_local_cache_cert_file: "/tmp/server.cert"
    - acme_local_cache_key_file: "/tmp/server.key"
    - acme_local_cache_ca_file: "/tmp/server_ca.cer"
    - acme_renew_automatically: False
    - acme_force_issue: False
- name: Request Both Let's Encrypt Static and Wildcard Certificates
  hosts: quay
  gather_facts: False
  tasks:
  - name: Call Role
    include_role:
      name: ../ansible/roles/host-lets-encrypt-certs
    vars:
    - acme_domain: "master.example.opentlc.com"
    - acme_wildcard_domain: "*.apps.example.opentlc.com"
    - acme_aws_access_key: "<AWS ACCESS KEY>"
    - acme_aws_secret_access_key: "<AWS_SECRET_ACCESS_KEY>"
    - acme_production: False
    - acme_remote_dir: "/root"
    - acme_local_cache_cert_file: "/tmp/server.cert"
    - acme_local_cache_key_file: "/tmp/server.key"
    - acme_local_cache_ca_file: "/tmp/server_ca.cer"
    - acme_renew_automatically: False
    - acme_force_issue: False
ansible/roles/host-lets-encrypt-certs/defaults/main.yml
New file
@@ -0,0 +1,42 @@
# The Top Level domain for which to request a certificate
acme_domain: ""
# Wildcard domain for which to request a certificate
# Wildcard domains need valid AWS access credentials to manipulate
# Route 53 entries for automatic validation by Let's Encrypt
acme_wildcard_domain: ""
acme_aws_access_key: ""
acme_aws_secret_access_key: ""
# Additional arguments to be passed to the acme.sh command line
acme_additional_args: ""
# The directoroy on the remote host in which to install acme.sh
acme_remote_dir: "/root"
# Local (to the host ansible is running on) cache of certificates
# Prevents re-requesting certificates for later runs of the playbook
# when the domains haven't changed
acme_cache_archive_file: "/tmp/acme.tar.gz"
# If acme_local_archive_file is not provided both of the following can
# be specified to just use a previously created cert/key
acme_cache_cert_file: "/tmp/ssl.cert"
acme_cache_key_file: "/tmp/ssl.key"
acme_cache_ca_file: "/tmp/ssl_ca.cert"
# Use the Production Let's Encrypt Server. Leave to False for testing runs
# to prevent issues with the Let's Encrypt rate limits
acme_production: False
# Install a cron job to automatically renew Certificates. Checks
# once a day.
acme_renew_automatically: False
# Force the creation of new certificates even if there are
# a) certificates already on the host or
# b) certificates in the local cache
acme_force_issue: False
# Internal Variable. Don't change
acme_setup_complete: false
ansible/roles/host-lets-encrypt-certs/files/deploy_LE_certs.yml
ansible/roles/host-lets-encrypt-certs/tasks/main.yml
New file
@@ -0,0 +1,236 @@
---
## Request Let's Encrypt Certificates for a host
- name: Verify if AWS Credentials provided if required
  fail:
    msg: AWS Credentials are required when requesting certificates for a wildcard domain
  when:
    - acme_wildcard_domain|length|int>0
    - (acme_aws_access_key is not defined or acme_aws_access_key|trim == "" or acme_aws_secret_access_key is not defined or acme_aws_secret_access_key|trim == "")
- name: Set acme_wildcard_certs fact
  set_fact:
    acme_wildcard_certs: "{{ (acme_wildcard_domain|length|int>0)|ternary('true','false') }}"
- name: Test if Let's Encrypt Certificates are already there
  stat:
    path: "{{ acme_remote_dir }}/lets-encrypt-x3-cross-signed.pem"
  register: cacert
- name: No Certificates on host or acme_force_issue=true -> set up Let's Encrypt Certificates
  when:
    - cacert.stat.exists|bool == false or acme_force_issue|d(False)|bool
  block:
    # Get Intermediary CA Certificate.
    # This is also used in the SSO configuration!
    - name: Get Let's Encrypt Intermediary CA Certificate
      get_url:
        url: https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem.txt
        dest: "{{ acme_remote_dir }}/lets-encrypt-x3-cross-signed.pem"
    - name: Check if cached .acme.sh archive exists
      become: False
      stat:
        path: "{{ acme_cache_archive_file }}"
      delegate_to: localhost
      register: cache_archive_file
    - name: Restore entire .acme.sh archive
      when:
      - cache_archive_file.stat.exists|bool
      - not acme_force_issue|d(False)|bool
      block:
      - name: Upload .acme.sh archive
        unarchive:
          src: "{{ acme_cache_archive_file }}"
          dest: "{{ acme_remote_dir }}"
          owner: root
          group: root
          keep_newer: yes
      - name: Update AWS Key in .acme.sh/account.conf
        lineinfile:
          path: "{{ acme_remote_dir }}/.acme.sh/account.conf"
          line: "SAVED_AWS_ACCESS_KEY_ID='{{ acme_aws_access_key }}'"
          state: present
          regexp: '^SAVED_AWS_ACCESS_KEY_ID$'
        when:
          - acme_wildcard_certs|d(False)|bool
      - name: Add AWS Secret Access Key to Let's Encrypt Acme.sh configuration
        lineinfile:
          path: "{{ acme_remote_dir }}/.acme.sh/account.conf"
          line: "SAVED_AWS_SECRET_ACCESS_KEY_ID='{{ acme_aws_secret_access_key }}'"
          state: present
          regexp: '^SAVED_AWS_SECRET_ACCESS_KEY_ID$'
        when:
          - acme_wildcard_certs|d(False)|bool
      - name: Set acme_setup_complete=true
        set_fact:
          acme_setup_complete: true
    - name: Setup .acme.sh if cache archive doesn't exist or force=true
      when:
        - not cache_archive_file.stat.exists|bool or acme_force_issue|d(False)|bool
      block:
      - name: Ensure git is installed
        yum:
          name: git
          state: latest
      - name: Remove Acme.sh directories if present
        file:
          name: "{{ item }}"
          state: absent
        with_items:
        - "{{ acme_remote_dir }}/acme.sh"
        - "{{ acme_remote_dir }}/.acme.sh"
      - name: Clone Let's Encrypt Acme.sh Repository
        git:
          repo: https://github.com/Neilpang/acme.sh.git
          clone: yes
          dest: "{{ acme_remote_dir }}/acme.sh"
      - name: Add AWS Access Key to Let's Encrypt Acme.sh configuration
        lineinfile:
          path: "{{ acme_remote_dir }}/acme.sh/dnsapi/dns_aws.sh"
          line: "AWS_ACCESS_KEY_ID={{ acme_aws_access_key }}"
          state: present
          insertbefore: '^#AWS_ACCESS_KEY_ID'
        when:
          - acme_wildcard_certs|d(False)|bool
      - name: Add AWS Secret Access Key to Let's Encrypt Acme.sh configuration
        lineinfile:
          path: "{{ acme_remote_dir }}/acme.sh/dnsapi/dns_aws.sh"
          line: "AWS_SECRET_ACCESS_KEY={{ acme_aws_secret_access_key }}"
          state: present
          insertbefore: '^#AWS_SECRET_ACCESS_KEY'
        when:
          - acme_wildcard_certs|d(False)|bool
    - name: Check for cached certificate and key
      when:
        - not acme_force_issue|d(False)|bool
      block:
      - name: Check if cached Certificate exists
        become: False
        stat:
          path: "{{ acme_cache_cert_file }}"
        delegate_to: localhost
        register: cache_cert_file
      - name: Check if cached Key exists
        become: False
        stat:
          path: "{{ acme_cache_key_file }}"
        delegate_to: localhost
        register: cache_key_file
      - name: Check if cached CA Certificate exists
        become: False
        stat:
          path: "{{ acme_cache_ca_file }}"
        delegate_to: localhost
        register: cache_ca_file
      - name: Check that both key and certificate have been provided
        debug:
          msg: "Both Certificate and Key file need to be provided - proceeding with full setup"
        when:
        - not (cache_cert_file.stat.exists|bool and cache_key_file.stat.exists|bool)
      - name: Copy cached certificates
        when:
        - cache_cert_file.stat.exists|bool
        - cache_key_file.stat.exists|bool
        block:
        - name: Ensure directory for key and certificate exists on host
          file:
            path: "{{ acme_remote_dir }}/.acme.sh/{{ acme_domain }}"
            state: directory
            owner: root
            group: root
        - name: Copy all cached files to remote host
          copy:
            src: "{{ item.src }}"
            dest: "{{ item.dest }}"
          with_items:
          - { src: "{{ acme_cache_cert_file }}", dest: "{{ acme_remote_dir }}/.acme.sh/{{ acme_domain }}/{{ acme_domain }}.cer" }
          - { src: "{{ acme_cache_key_file }}", dest: "{{ acme_remote_dir }}/.acme.sh/{{ acme_domain }}/{{ acme_domain }}.key" }
        - name: Copy cached CA Certificate if it exists
          copy:
            src: "{{ acme_cache_ca_file }}"
            dest: "{{ acme_remote_dir }}/.acme.sh/{{ acme_domain }}/ca.cer"
          when:
          - cache_ca_file.stat.exists|bool
        - name: Set acme_setup_complete=true
          set_fact:
            acme_setup_complete: true
    - name: Request Certificates from Let's Encrypt (force or no cache)
      when:
      - acme_force_issue|d(False)|bool or not acme_setup_complete|bool
      block:
      - name: Print Shell Command
        debug:
          msg: "{{ acme_remote_dir }}/acme.sh/acme.sh {{ (acme_production|bool)|ternary('','--staging') }} {{ acme_additional_args|d('') }} --issue -d {{ acme_domain }} {{ (acme_wildcard_domain|length>0)|ternary('-d','')}} {{ (acme_wildcard_domain|length>0)|ternary(acme_wildcard_domain,'')}} {{ (acme_wildcard_certs|bool)|ternary('--dns dns_aws', '--standalone') }}"
      - name: Request API and Wildcard Certificates from Let's Encrypt
        shell: "{{ acme_remote_dir }}/acme.sh/acme.sh {{ (acme_production|bool)|ternary('','--staging') }} {{ acme_additional_args|d('') }} --issue -d {{ acme_domain }} {{ (acme_wildcard_domain|length>0)|ternary('-d','')}} {{ (acme_wildcard_domain|length>0)|ternary(acme_wildcard_domain,'')}} {{ (acme_wildcard_certs|bool)|ternary('--dns dns_aws', '--standalone') }}"
        args:
          chdir: "{{ acme_remote_dir }}/acme.sh"
      - name: Save certificate to cache
        fetch:
          src: "{{ acme_remote_dir }}/.acme.sh/{{ acme_domain }}/{{ acme_domain }}.cer"
          dest: "{{ acme_cache_cert_file }}"
          flat: true
        when:
          - acme_cache_cert_file is defined and acme_cache_cert_file|trim != ""
      - name: Save key to cache
        fetch:
          src: "{{ acme_remote_dir }}/.acme.sh/{{ acme_domain }}/{{ acme_domain }}.key"
          dest: "{{ acme_cache_key_file }}"
          flat: true
        when:
          - acme_cache_key_file is defined and acme_cache_key_file|trim != ""
      - name: Save CA Certificate to cache
        fetch:
          src: "{{ acme_remote_dir }}/.acme.sh/{{ acme_domain }}/ca.cer"
          dest: "{{ acme_cache_ca_file }}"
          flat: true
        when:
          - acme_cache_ca_file is defined and acme_cache_ca_file|trim != ""
      - name: Save archive of .acme.sh to cache
        when:
          - acme_cache_archive_file is defined
          - acme_cache_archive_file|trim != ""
        block:
        - name: Create archive of .acme.sh archive for cache
          archive:
            path: "{{ acme_remote_dir }}/.acme.sh"
            dest: "{{ acme_remote_dir }}/acme.tgz"
        - name: Save .acme.sh archive to cache
          fetch:
            src: "{{ acme_remote_dir }}/acme.tgz"
            dest: "{{ acme_cache_archive_file }}"
            flat: yes
        - name: Remove archive from server
          file:
            name: "{{ acme_remote_dir }}/acme.tgz"
            state: absent
- name: Install Automatic renewals of Certificates
  block:
  - name: Install crontab to renew certificates when they expire
    cron:
      name: LETS_ENCRYPT_RENEW
      special_time: daily
      job: "/root/acme.sh/acme.sh {{ acme_args|d('') }} --cron --home /root/.acme.sh > /dev/null"
  - name: Install deploy_LE_certs.yml playbook
    copy:
      src: deploy_LE_certs.yml
      dest: /root/deploy_LE_certs.yml
  when:
  - acme_renew_automatically|d(False)|bool
ansible/roles/install-aws-broker/tasks/main.yml
File was deleted
ansible/roles/install-lets-encrypt-certs/README.md
File was deleted
ansible/roles/install-lets-encrypt-certs/tasks/main.yml
File was deleted
ansible/roles/install-prometheus/README.md
File was deleted
ansible/roles/ocp-infra-aws-service-broker/README.md
ansible/roles/ocp-infra-aws-service-broker/tasks/main.yml
New file
@@ -0,0 +1,70 @@
---
## AWS Broker Installation
- name: Check if 'aws-service-broker' project exists
  command: "oc get project aws-service-broker"
  register: awsbroker_exists
  changed_when: false
  ignore_errors: true
- block:
  - name: Create temporary installation directory for AWS Service Broker files
    file:
      path: /root/aws_broker_install
      state: directory
  - name: Retrieve AWS Service Broker Installation Template
    get_url:
      url: https://s3.amazonaws.com/awsservicebroker/scripts/deploy-awsservicebroker.template.yaml
      dest: /root/aws_broker_install/deploy-awsservicebroker.template.yaml
      mode: 0644
      owner: root
  - name: Retrieve AWS Service Broker Installation Script
    get_url:
      url: https://s3.amazonaws.com/awsservicebroker/scripts/deploy_aws_broker.sh
      dest: /root/aws_broker_install/deploy_aws_broker.sh
      mode: 0755
      owner: root
  - name: Patch AWS Service Broker Installation Script (remove 'oc login' step)
    lineinfile:
      path: /root/aws_broker_install/deploy_aws_broker.sh
      regexp: 'oc login.'
      state: absent
  - name: Patch AWS Service Broker Installation Script (remove 'oc create-project' step)
    lineinfile:
      path: /root/aws_broker_install/deploy_aws_broker.sh
      regexp: 'oc new-project.'
      state: absent
  # Have to use oc adm because oc new-project doesn't understand --node-selector
  - name: Create AWS Service Broker Project (pre 3.10)
    shell: oc adm new-project aws-service-broker --node-selector='env=infra'
    when: osrelease is version_compare("3.10", "<")
  # Have to use oc adm because oc new-project doesn't understand --node-selector
  - name: Create AWS Service Broker Project (3.10+)
    shell: oc adm new-project aws-service-broker --node-selector='node-role.kubernetes.io/infra=true'
    when: osrelease is version_compare("3.10", ">=")
  # Switch to aws-service-broker project since installer script wants to install things in current project
  - name: Switch to aws-service-broker project
    shell: oc project aws-service-broker
  # Deploy the AWS Broker. This is a fire and forget operation
  - name: Run AWS Broker Installation
    shell: "/root/aws_broker_install/deploy_aws_broker.sh"
    args:
      chdir: /root/aws_broker_install
  # Cleanup (remove directory and switch back to default project)
  - name: AWS Broker Installation file cleanup
    file:
      path: /root/aws_broker_install
      state: absent
  - name: Switch back to 'default' project after running AWS Broker script
    command: "oc project default"
  when: awsbroker_exists is failed
ansible/roles/ocp-infra-enable-custom-catalog/README.md
New file
@@ -0,0 +1,4 @@
Role Name
=========
This role customizes the Catalog UI adding custom categories
ansible/roles/ocp-infra-enable-custom-catalog/files/custom-categories.js
New file
@@ -0,0 +1,14 @@
window.OPENSHIFT_CONSTANTS.SERVICE_CATALOG_CATEGORIES.unshift({
  id: "workshop",
  label: "Workshops",
  subCategories: [
    {
      id: "apbs",
      label: "Workshop",
      icon: "pficon pficon-process-automation",
      tags: [
        "workshop"
      ]
    },
  ]
});
ansible/roles/ocp-infra-enable-custom-catalog/tasks/main.yml
New file
@@ -0,0 +1,36 @@
---
## Add custom categories to catalog menu
##
  - name: Copy custom categories javascript file to master
    copy:
      src: files/custom-categories.js
      dest: /root/custom-categories.js
  - name: Create the config map with webconsole customizations
    command: "oc create configmap webconsole-customization --from-file=custom-categories.js=/root/custom-categories.js -n openshift-web-console"
  - name: Deploy static file server with customizations
    command: "oc new-app nginx:1.12~https://github.com/siamaksade/web-console-customization.git --name=static -n openshift-web-console"
  - name: Create a route for the static server
    command: "oc create route edge --service=static -n openshift-web-console"
  - name: Mount config map into static server
    command: "oc volume dc/static --add --name=custom --configmap-name=webconsole-customization --mount-path=/opt/app-root/src/custom -n openshift-web-console"
  - name: Get route for static server
    command: "oc get route/static -n openshift-web-console -o jsonpath='{ .spec.host }' "
    register: webconsole-url
  - name: Export webconsole-config yaml file from configmap
    shell: "oc get cm/webconsole-config -n openshift-web-console -o jsonpath='{ .data.webconsole-config\\.yaml }' > /tmp/webconsole-config.yaml"
  - name: Patch webconsole-config.yml
    shell: "sed -i -e 's/scriptURLs: \\[\\]/scriptURLs:/g' -e '/scriptURLs:/a\\    - https:\\/\\/{{ webconsole-url.stdout }}\\/custom\\/custom-categories\\.js' /tmp/webconsole-config.yaml"
  - name: Import webconsole-config yaml file from configmap
    shell: "oc create configmap webconsole-config --from-file=webconsole-config.yaml=/tmp/webconsole-config.yaml -n openshift-web-console --dry-run -o yaml | oc replace -f -"
  - name: Recreate web console pod
    command: "oc delete pod -l app=openshift-web-console -n openshift-web-console"
ansible/roles/ocp-infra-maistra/README.md
New file
@@ -0,0 +1,36 @@
Ansible Role: Maistra and OpenShift Service Mesh (Istio, Kiali and Jaeger)
[![Build Status](https://travis-ci.org/siamaksade/ansible-openshift-maistra.svg?branch=master)](https://travis-ci.org/siamaksade/ansible-openshift-maistra)
=========
Ansible Role for deploying [Maistra](http://maistra.io/) and OpenShift Service Mesh on OpenShift which deploys the
following components:
* Istio
* Jaeger
* Prometheus
* Grafana
* Kiali
Role Variables
------------
|Variable                  | Default Value                       |          | Description   |
|--------------------------|-------------------------------------|----------|---------------|
|`openshift_master_public` | -                                   | Required | OpenShift master public url (required) |
|`maistra_version`         | maistra-0.1.0-ocp-3.1.0-istio-1.0.0 | Optional | Maistra version to deploy |
|`kiali_username`          | `admin`                             | Optional | Kiali username |
|`kiali_password`          | `admin`                             | Optional | Kiali password |
|`openshift_cli`           | oc                                  | Optional | OpenShift CLI command and arguments (e.g. auth) |
Example Playbook
------------
```
name: Example Playbook
hosts: localhost
tasks:
- import_role:
    name: siamaksade.openshift_maistra
  vars:
    openshift_master_public: https://master.openshift.mydomain.com
```
ansible/roles/ocp-infra-maistra/defaults/main.yml
New file
@@ -0,0 +1,16 @@
maistra_version: maistra-0.1.0-ocp-3.1.0-istio-1.0.0
openshift_master_public: https://127.0.0.1:8443
openshift_cli: oc
kiali_username: admin
kiali_password: admin
enable_istio_auth: false
product_images: false
masitra_base_url: https://raw.githubusercontent.com/Maistra/openshift-ansible/{{ maistra_version }}/istio
installer_community: "{{ masitra_base_url }}/istio_community_operator_template.yaml"
installer_product: "{{ masitra_base_url }}/istio_product_operator_template.yaml"
installer_cr: "{{ masitra_base_url }}/cr-full.yaml"
ansible/roles/ocp-infra-maistra/meta/main.yml
New file
@@ -0,0 +1,22 @@
---
dependencies: []
allow_duplicates: true
galaxy_info:
  author: Siamak Sadeghianfar
  description: Maistra (Istio, Jaeger and Kiali) for OpenShift
  role_name: openshift_maistra
  company: "Red Hat"
  license: "license (BSD, MIT)"
  min_ansible_version: 2.3
  platforms:
  - name: GenericLinux
    versions:
    - any
  galaxy_tags:
    - openshift
    - maistra
    - istio
    - jaeger
    - kiali
    - servicemesh
ansible/roles/ocp-infra-maistra/tasks/main.yml
New file
@@ -0,0 +1,49 @@
---
- name: check if user is cluster admin
  shell: "{{ openshift_cli }} get project default"
  register: default_project_result
  ignore_errors: true
  changed_when: false
- fail:
    msg: "User does not have cluster-admin rights to install Istio"
  when: default_project_result is failed
- name: check if istio operator deployed
  shell: "{{ openshift_cli }} get project istio-operator"
  register: istio_project_result
  ignore_errors: true
  changed_when: false
- name: create istio operator project
  shell: "{{ openshift_cli }} new-project istio-operator"
  when: istio_project_result is failed
- name: install openshift service mesh (product istio) operator
  shell: "{{ openshift_cli }} new-app -f {{ installer_product }} --param=OPENSHIFT_ISTIO_MASTER_PUBLIC_URL={{ openshift_master_public }} -n istio-operator"
  when:
    - istio_project_result is failed
    - product_images|bool
- name: install maistra (community istio) operator
  shell: "{{ openshift_cli }} new-app -f {{ installer_community }} --param=OPENSHIFT_ISTIO_MASTER_PUBLIC_URL={{ openshift_master_public }} -n istio-operator"
  when:
    - istio_project_result is failed
    - not product_images|bool
- template:
    src: "{{ role_path }}/templates/istio-cr.yaml.j2"
    dest: /tmp/istio-cr.yaml
    force: true
- name: install istio
  shell: "{{ openshift_cli }} create -f /tmp/istio-cr.yaml -n istio-operator"
- name: wait for istio sidecar-injector to initialize
  shell: "{{ openshift_cli }} get deployment istio-sidecar-injector -o jsonpath='{.status.availableReplicas}' -n istio-system"
  register: sidecar_injector_replicas
  until: sidecar_injector_replicas.stdout == "1"
  retries: "30"
  delay: "30"
ansible/roles/ocp-infra-maistra/templates/istio-cr-full.yaml.j2
New file
@@ -0,0 +1,26 @@
apiVersion: "istio.openshift.com/v1alpha1"
kind: "Installation"
metadata:
  name: "istio-installation"
spec:
  deployment_type: openshift
  istio:
    authentication: true
    community: false
    prefix: openshift-istio-tech-preview/
    version: 0.1.0
  jaeger:
    prefix: distributed-tracing-tech-preview/
    version: 1.6.0
    elasticsearch_memory: 1Gi
  launcher:
    openshift:
      user: {{ openshift_username }}
      password: {{ openshif_password }}
    github:
      username: {{ github_username }}
      token: {{ github_token }}
    catalog:
      filter: filter
      branch: branch
      repo: repo
ansible/roles/ocp-infra-maistra/templates/istio-cr.yaml.j2
New file
@@ -0,0 +1,4 @@
apiVersion: "istio.openshift.com/v1alpha1"
kind: "Installation"
metadata:
  name: "istio-installation"
ansible/roles/ocp-infra-maistra/tests/inventory
New file
@@ -0,0 +1 @@
localhost
ansible/roles/ocp-infra-maistra/tests/test.yml
New file
@@ -0,0 +1,5 @@
- hosts: all
  gather_facts: false
  connection: local
  roles:
    - ../../ansible-openshift-maistra
ansible/roles/ocp-infra-nexus/README.md
ansible/roles/ocp-infra-nexus/files/nexus2-persistent-template.yaml
ansible/roles/ocp-infra-nexus/files/nexus3-persistent-template.yaml
ansible/roles/ocp-infra-nexus/tasks/main.yml
ansible/roles/ocp-infra-openwhisk/README.md
ansible/roles/ocp-infra-openwhisk/tasks/main.yml
File was renamed from ansible/roles/install-openwhisk/tasks/main.yml
@@ -11,14 +11,14 @@
  - name: Create 'openwhisk' project
    command: "oc new-project openwhisk"
    when:
    - openwhisk_exists | failed
    - openwhisk_exists is failed
    - install_openwhisk
  # Deploy Openwhisk. This is a fire and forget operation (before 3.10)
  - name: Deploy Openwhisk template
    shell: "oc process -f https://raw.githubusercontent.com/apache/incubator-openwhisk-deploy-openshift/master/persistent-template.yml|oc create -f - -n openwhisk"
    when:
    - openwhisk_exists | failed
    - openwhisk_exists is failed
    - install_openwhisk
    - osrelease is version_compare("3.10", "<")
@@ -26,7 +26,7 @@
  - name: Process Openwhisk template (3.10+)
    shell: "oc process -f https://raw.githubusercontent.com/apache/incubator-openwhisk-deploy-openshift/master/persistent-template.yml > /root/openwhisk.json"
    when:
    - openwhisk_exists | failed
    - openwhisk_exists is failed
    - install_openwhisk
    - osrelease is version_compare("3.10", ">=")
  - name: Patch Openwhisk template for correct CronJob API Version (3.10+)
@@ -35,13 +35,13 @@
      regexp: 'batch/v2alpha1'
      replace: 'batch/v1beta1'
    when:
    - openwhisk_exists | failed
    - openwhisk_exists is failed
    - install_openwhisk
    - osrelease is version_compare("3.10", ">=")
  - name: Deploy Patched Openwhisk template (Fire and Forget, 3.10+)
    shell: "oc create -f /root/openwhisk.json -n openwhisk"
    when:
    - openwhisk_exists | failed
    - openwhisk_exists is failed
    - install_openwhisk
    - osrelease is version_compare("3.10", ">=")
ansible/roles/ocp-infra-prometheus-pre310/README.md
copy from ansible/roles/install-nexus/README.md copy to ansible/roles/ocp-infra-prometheus-pre310/README.md
ansible/roles/ocp-infra-prometheus-pre310/handlers/main.yml
ansible/roles/ocp-infra-prometheus-pre310/tasks/all-nodes.yml
ansible/roles/ocp-infra-prometheus-pre310/tasks/bastion.yml
ansible/roles/ocp-infra-prometheus-pre310/tasks/infranodes.yml
ansible/roles/ocp-infra-prometheus-pre310/tasks/main.yml
ansible/roles/ocp-infra-prometheus-pre310/vars/main.yml
ansible/roles/ocp-workload-3scale-multitenant/tasks/wait_for_deploy.yml
@@ -8,6 +8,10 @@
#  1) install jp :  https://github.com/jmespath/jp
#  2) oc get rc -o json | jp 'items[?  (status.readyReplicas == ""|| status.readyReplicas == `0`) ].metadata.annotations."openshift.io/deployment-config.name"'
#
#  Documentation pertaining to jq syntax:
#    - http://jmespath.org/tutorial.html
- name: "Wait for following deployments to become ready: {{pod_to_wait}}"
  command: 'oc get rc -o json -n "{{ ocp_project }}"'
  register: rc_state
@@ -16,5 +20,3 @@
  delay: "{{ deploy_status_delay }}"
  until: 'rc_state.stdout |from_json |json_query(''items[?  (status.readyReplicas == ""|| status.readyReplicas == `0`) ].metadata.annotations."openshift.io/deployment-config.name"'') |intersect(pod_to_wait) |length == 0'
#  Documentation pertaining to jq syntax:
#    - http://jmespath.org/tutorial.html
ansible/roles/ocp-workload-3scale-multitenant/templates/create_tenants.sh
@@ -1,6 +1,11 @@
# loops from {{start_tenant}} to {{end_tenant}} to create 3scale tenants.
# Each user is given tenant admin rights to their corresponding tenant.
# TO-DOs :
#   1)  Configure smtp configmap to enable outbound emails from AMP
#   2)  Convert this entire shell script to ansible (rather than just being invoked by Ansible)
startTenant={{start_tenant}}
endTenant={{end_tenant}}
@@ -80,6 +85,13 @@
            exit 1;
        fi
        # 7) Create corresponding route on 3scale AMP system-developer service
        oc create route edge $orgName-developer --service=system-developer --hostname=$orgName-3scale.{{ocp_apps_domain}} -n {{ocp_project}}
        if [ $? -ne 0 ];then
            echo -en "\n *** ERROR: 6" >> $log_file
            exit 1;
        fi
        echo -en "\ncreated tenant with orgName= $orgName. \n\tOutput file at: $output_dir/$output_file  \n\ttenant_access_token = $tenant_access_token \n" >> $log_file
        echo -en "\nuser$i\t{{ocp_user_passwd}}\t$orgName-admin.{{ocp_apps_domain}}\t$tenantAdminId\t$tenantAdminPasswd\t$tenant_access_token" >> $user_info_file
ansible/roles/ocp-workload-example/defaults/main.yml
New file
@@ -0,0 +1,4 @@
---
become_override: False
ocp_username: wkulhane-redhat.com
silent: False
ansible/roles/ocp-workload-example/readme.adoc
New file
@@ -0,0 +1,119 @@
= ocp-workload-example - Example Workload Role
== Role overview
* This is a working no-op role that can be used to develop new ocp-workload roles. It consists of the following playbooks:
** Playbook: link:./tasks/pre_workload.yml[pre_workload.yml] - Sets up an
 environment for the workload deployment
*** Debug task will print out: `pre_workload Tasks completed successfully.`
** Playbook: link:./tasks/workload.yml[workload.yml] - Used to deploy the actual
 workload, i.e, 3scale, Mobile or some Demo
*** This role only prints the current username for which this role is provisioning.
*** Debug task will print out: `workload Tasks completed successfully.`
** Playbook: link:./tasks/post_workload.yml[post_workload.yml] - Used to
 configure the workload after deployment
*** This role doesn't do anything here
*** Debug task will print out: `post_workload Tasks completed successfully.`
** Playbook: link:./tasks/remove_workload.yml[remove_workload.yml] - Used to
 delete the workload
*** This role doesn't do anything here
*** Debug task will print out: `remove_workload Tasks completed successfully.`
== Review the defaults variable file
* This file link:./defaults/main.yml[./defaults/main.yml] contains all the variables you need to define to control the deployment of your workload.
* The variable *ocp_username* is mandatory to assign the workload to the correct OpenShift user.
* A variable *silent=True* can be passed to suppress debug messages.
* You can modify any of these default values by adding `-e "variable_name=variable_value"` to the command line
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
TARGET_HOST="bastion.na39.openshift.opentlc.com"
OCP_USERNAME="shacharb-redhat.com"
WORKLOAD="ocp-workload-example"
GUID=1001
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
    -e"ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem" \
    -e"ansible_ssh_user=ec2-user" \
    -e"ANSIBLE_REPO_PATH=`pwd`" \
    -e"ocp_username=${OCP_USERNAME}" \
    -e"ocp_workload=${WORKLOAD}" \
    -e"silent=False" \
    -e"guid=${GUID}" \
    -e"ACTION=create"
----
=== To Delete an environment
----
TARGET_HOST="bastion.na39.openshift.opentlc.com"
OCP_USERNAME="ankay-redhat.com"
WORKLOAD="ocp-workload-example"
GUID=1002
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
    -e"ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem" \
    -e"ansible_ssh_user=ec2-user" \
    -e"ANSIBLE_REPO_PATH=`pwd`" \
    -e"ocp_username=${OCP_USERNAME}" \
    -e"ocp_workload=${WORKLOAD}" \
    -e"guid=${GUID}" \
    -e"ACTION=remove"
----
== Other related information:
=== Deploy Workload on OpenShift Cluster from an existing playbook:
[source,yaml]
----
- name: Deploy a workload role on a master host
  hosts: all
  become: true
  gather_facts: False
  tags:
    - step007
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/{{ocp_workload}}", when: 'ocp_workload is defined' }
----
NOTE: You might want to change `hosts: all` to fit your requirements
=== Set up your Ansible inventory file
* You can create an Ansible inventory file to define your connection method to your host (Master/Bastion with `oc` command)
* You can also use the command line to define the hosts directly if your `ssh` configuration is set to connect to the host correctly
* You can also use the command line to use localhost or if your cluster is already authenticated and configured in your `oc` configuration
.Example inventory file
[source, ini]
----
[gptehosts:vars]
ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem
ansible_ssh_user=ec2-user
[gptehosts:children]
openshift
[openshift]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
bastion.cluster3.openshift.opentlc.com
bastion.cluster4.openshift.opentlc.com
[dev]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
[prod]
bastion.cluster3.openshift.opentlc.com
bastion.cluster4.openshift.opentlc.com
----
ansible/roles/ocp-workload-example/tasks/main.yml
New file
@@ -0,0 +1,23 @@
---
# Do not modify this file
- name: Running Pre Workload Tasks
  include: ./pre_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload Tasks
  include: ./workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Post Workload Tasks
  include: ./post_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload removal Tasks
  include: ./remove_workload.yml
  become: "{{ become_override | bool }}"
  when: ACTION == "destroy" or ACTION == "remove"
ansible/roles/ocp-workload-example/tasks/post_workload.yml
New file
@@ -0,0 +1,9 @@
---
# Implement your Post Workload deployment tasks here
# Leave this as the last task in the playbook.
- name: post_workload tasks complete
  debug:
    msg: "Post-Workload Tasks completed successfully."
  when: not silent|bool
ansible/roles/ocp-workload-example/tasks/pre_workload.yml
New file
@@ -0,0 +1,9 @@
---
# Implement your Pre Workload deployment tasks here
# Leave this as the last task in the playbook.
- name: pre_workload tasks complete
  debug:
    msg: "Pre-Workload tasks completed successfully."
  when: not silent|bool
ansible/roles/ocp-workload-example/tasks/remove_workload.yml
New file
@@ -0,0 +1,9 @@
---
# Implement your Workload removal tasks here
# Leave this as the last task in the playbook.
- name: remove_workload tasks complete
  debug:
    msg: "Remove Workload tasks completed successfully."
  when: not silent|bool
ansible/roles/ocp-workload-example/tasks/workload.yml
New file
@@ -0,0 +1,13 @@
---
# Implement your Workload deployment tasks here
- name: Setting up workload for user
  debug:
    msg: "Setting up workload for user ocp_username = {{ ocp_username }}"
# Leave this as the last task in the playbook.
- name: workload tasks complete
  debug:
    msg: "Workload Tasks completed successfully."
  when: not silent|bool
ansible/roles/ocp-workload-fuse-ignite/readme.adoc
@@ -54,8 +54,8 @@
----
== Execution using localhost oc client
WORKLOAD="ocp-workload-3scale-multitenant"
----
WORKLOAD="ocp-workload-fuse-ignite"
HOST_GUID=dev39
GUID=adm0
POSTGRESQL_MEMORY_LIMIT=512Mi
@@ -80,3 +80,4 @@
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=$GUID" \
                    -e"ACTION=remove"
----
ansible/roles/ocp-workload-rhte-mw-op-intel/defaults/main.yml
@@ -1,6 +1,6 @@
---
become_override: false
ocp_username: jbride-redhat.com
ocp_username: user12
ocp_user_needs_quota: True
ocp_user_groups:
@@ -28,3 +28,28 @@
deploy_status_delay: 20
lab_name: rhte-mw-op-intel
lab_1_name: lab1-kafka-project
lab_2_name: lab2-kafka-project
##########          Templates for RHTE Lab 6 Op Intel       #################
# Corresponds to fork of strimzi-kafka-operator examples
strimzi_url: https://raw.githubusercontent.com/honghuac/rhte2018/master/examples
clusteroperator_yaml: "{{strimzi_url}}/install/cluster-operator"
serviceaccount_yaml: "{{clusteroperator_yaml}}/01-ServiceAccount-strimzi-cluster-operator.yaml"
clusteroperator_role_yaml: "{{clusteroperator_yaml}}/02-ClusterRole-strimzi-cluster-operator-role.yaml"
clusteroperator_rolebinding_yaml: "{{clusteroperator_yaml}}/02-ClusterRoleBinding-strimzi-cluster-operator.yaml"
kafkabroker_role_yaml: "{{clusteroperator_yaml}}/03-ClusterRole-strimzi-kafka-broker.yaml"
kafkabroker_rolebinding_yaml: "{{clusteroperator_yaml}}/03-ClusterRoleBinding-strimzi-cluster-operator-kafka-broker-delegation.yaml"
topicoperator_role_yaml: "{{clusteroperator_yaml}}/04-ClusterRole-strimzi-topic-operator.yaml"
topicoperator_rolebinding_yaml: "{{clusteroperator_yaml}}/04-ClusterRoleBinding-strimzi-cluster-operator-topic-operator-delegation.yaml"
kafka_crd_yaml: "{{clusteroperator_yaml}}/04-Crd-kafka.yaml"
kafkaconnect_crd_yaml: "{{clusteroperator_yaml}}/04-Crd-kafkaconnect.yaml"
kafkaconnects2i_crd_yaml: "{{clusteroperator_yaml}}/04-Crd-kafkaconnects2i.yaml"
kafkatopic_crd_yaml: "{{clusteroperator_yaml}}/04-Crd-kafkatopic.yaml"
clusteroperator_deployment_yaml: "{{clusteroperator_yaml}}/05-Deployment-strimzi-cluster-operator.yaml"
kafkapersistent_yaml: "{{strimzi_url}}/kafka/kafka-persistent.yaml"
kafkatopic_yaml: "{{strimzi_url}}/topic/kafka-topic.yaml"
kafkauser_yaml: "{{strimzi_url}}/user/kafka-user.yaml"
helloword_yaml: "{{strimzi_url}}/hello-world/deployment.yaml"
#################################################################
ansible/roles/ocp-workload-rhte-mw-op-intel/readme.adoc
@@ -30,3 +30,48 @@
                    -e"ACTION=remove"
----
== Execution using remote (bastion node) oc client
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
WORKLOAD="ocp-workload-rhte-mw-op-intel"
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
GUID=jb05
OCP_USERNAME="jbride-redhat.com"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ocp_domain=$HOST_GUID.openshift.opentlc.com" \
                    -e"ACTION=create"
----
=== To Delete an environment
----
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
WORKLOAD="ocp-workload-rhte-mw-op-intel"
GUID=jb05
OCP_USERNAME="jbride-redhat.com"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                    -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                    -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ACTION=remove"
----
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/remove_workload.yml
@@ -14,9 +14,11 @@
- name: Remove any lingering tmp files
  shell: "rm -rf /tmp/{{guid}}"
- name: Remove user Project
  shell: "oc delete project {{ocp_project}}"
- name: Remove Project {{lab_1_name}}
  shell: "oc delete project {{lab_1_name}}"
- name: Remove Project {{lab_2_name}}
  shell: "oc delete project {{lab_2_name}}"
- name: post_workload Tasks Complete
  debug:
ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/workload.yml
@@ -3,16 +3,88 @@
  set_fact:
    ocp_project: "{{lab_name}}-{{guid}}"
- name: "Create project for workload {{ocp_project}}"
  shell: "oc new-project {{ocp_project}}"
- name: define user user2_kafka_project
  set_fact:
    user2_kafka_project: "{{ocp_username}}-{{lab_1_name}}"
- name: define user user2_kafka_project_2
  set_fact:
    user2_kafka_project_2: "{{ocp_username}}-{{lab_2_name}}"
- name: "Create project for workload {{lab_1_name}}"
  shell: "oc new-project {{lab_1_name}}"
- name: "Create project for workload {{lab_2_name}}"
  shell: "oc new-project {{lab_2_name}}"
- name: "Label namespace"
  command: "oc label namespace {{ocp_project}} AAD='{{guid}}'"
  command: "oc label namespace {{lab_1_name}} AAD='{{guid}}'"
- name: Make sure we go back to default project
  shell: "oc project default"
- name: delete temp dir if it exists
  file:
      path: /tmp/{{ocp_project}}
      state: absent
- file:
      path: /tmp/{{ocp_project}}
      state: directory
- name: Apply cluster-operator templates
  shell: "oc apply -f {{ serviceaccount_yaml }} -n {{lab_1_name}}"
- name: Apply cluster-operator templates
  shell: "oc apply -f {{ clusteroperator_role_yaml }} -n {{lab_1_name}}"
- name: Apply cluster-operator templates
  shell: "oc apply -f {{ clusteroperator_rolebinding_yaml }} -n {{lab_1_name}}"
- name: Apply cluster-operator templates
  shell: "oc apply -f {{ kafkabroker_role_yaml }} -n {{lab_1_name}}"
- name: Apply cluster-operator templates
  shell: "oc apply -f {{ kafkabroker_rolebinding_yaml }} -n {{lab_1_name}}"
- name: Apply cluster-operator templates
  shell: "oc apply -f {{ topicoperator_rolebinding_yaml }} -n {{lab_1_name}}"
- name: Apply cluster-operator templates
  shell: "oc apply -f {{ kafka_crd_yaml }} -n {{lab_1_name}}"
- name: Apply cluster-operator templates
  shell: "oc apply -f {{ kafkaconnect_crd_yaml }} -n {{lab_1_name}}"
- name: Apply cluster-operator templates
  shell: "oc apply -f {{ kafkaconnects2i_crd_yaml }} -n {{lab_1_name}}"
- name: Apply cluster-operator templates
  shell: "oc apply -f {{ kafkatopic_crd_yaml }} -n {{lab_1_name}}"
- name: Apply cluster-operator templates
  shell: "oc apply -f {{ clusteroperator_deployment_yaml }} -n {{lab_1_name}}"
- name: Apply Kafka Persistent template
  shell: "oc apply -f {{ kafkapersistent_yaml }} -n {{lab_1_name}}"
- name: Apply Kafka Topic template
  shell: "oc apply -f {{ kafkatopic_yaml }} -n {{lab_1_name}}"
- name: Apply Kafka User template
  shell: "oc apply -f {{ kafkauser_yaml }} -n {{lab_1_name}}"
- name: Apply Hello World template
  shell: "oc apply -f {{ helloword_yaml }} -n {{lab_1_name}}"
- name: Annotate the empty project as requested by user
  shell: "oc annotate namespace {{lab_1_name}} openshift.io/requester={{ocp_username}} --overwrite"
- name: Give ocp_username access to ocp_project; user = {{ocp_username}}
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{lab_1_name}}"
- name: workload Tasks Complete
  debug:
    msg: workload Tasks Complete
# #######      lab specific tasks   ############## #
@@ -20,20 +92,8 @@
#   1) Mongodb (use replica set .... 1 replica is sufficient)
#   2) AMQ Streaming (Kafka with Zookeeper)
#   3) Red Hat's Apache Spark  ( https://radanalytics.io/projects )
#   4) JDG
#   4) JDG
#   5) Decision Manager (KIE-Server, maybe Decision Central ? )
#   6) other ???
####################################################
- name: Annotate the empty project as requested by user
  shell: "oc annotate namespace {{ocp_project}} openshift.io/requester={{ocp_username}} --overwrite"
- name: Give ocp_username access to ocp_project; user = {{ocp_username}}
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{ocp_project}}"
- name: workload Tasks Complete
  debug:
    msg: workload Tasks Complete
scripts/wrapper.sh
@@ -28,6 +28,7 @@
REGION=${REGION:-us-east-1}
KEYNAME=${KEYNAME:-ocpkey}
ENVTYPE=${ENVTYPE:-generic-example}
PROFILE=${PROFILE:-default}
CLOUDPROVIDER=${CLOUDPROVIDER:-ec2}
if [ "$CLOUDPROVIDER" = "ec2" ]; then
    if [ -z "${HOSTZONEID}" ]; then
@@ -104,9 +105,9 @@
        ;;
    stop)
        aws ec2 stop-instances --region $REGION --instance-ids $(aws ec2 describe-instances --filters "Name=tag:aws:cloudformation:stack-name,Values=${STACK_NAME}" --query Reservations[*].Instances[*].InstanceId --region $REGION --output text)
        aws ec2 stop-instances --profile $PROFILE --region $REGION --instance-ids $(aws ec2 describe-instances --filters "Name=tag:aws:cloudformation:stack-name,Values=${STACK_NAME}" --query Reservations[*].Instances[*].InstanceId --profile $PROFILE --region $REGION --output text)
        ;;
    start)
        aws ec2 start-instances --region $REGION --instance-ids `aws ec2 describe-instances --filters "Name=tag:aws:cloudformation:stack-name,Values=${STACK_NAME}" --query Reservations[*].Instances[*].InstanceId --region $REGION --output text`
        aws ec2 start-instances --profile $PROFILE --region $REGION --instance-ids `aws ec2 describe-instances --filters "Name=tag:aws:cloudformation:stack-name,Values=${STACK_NAME}" --query Reservations[*].Instances[*].InstanceId --profile $PROFILE --region $REGION --output text`
        ;;
esac