jbride
2018-06-06 87ecf8e232e0bab91f9906a05b9efc0fd4f79e7e
Merge branch 'development'
1 files deleted
3 files added
30 files modified
995 ■■■■ changed files
ansible/configs/ans-tower-lab/post_software.yml 9 ●●●●● patch | view | raw | blame | history
ansible/configs/ans-tower-lab/pre_software.yml 1 ●●●● patch | view | raw | blame | history
ansible/configs/generic-example/post_software.yml 13 ●●●●● patch | view | raw | blame | history
ansible/configs/generic-example/pre_software.yml 28 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/hosts_template.3.9.27.j2 311 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-disconnected-lab/files/labs_hosts_template.j2 3 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/files/hosts_template.3.9.27.j2 3 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/files/labs_hosts_template.j2 3 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/post_software.yml 10 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-ha-lab/pre_software.yml 1 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/post_software.yml 10 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-implementation-lab/pre_software.yml 25 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/files/hosts_template.3.9.27.j2 4 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/post_software.yml 72 ●●●● patch | view | raw | blame | history
ansible/configs/ocp-workshop/pre_software.yml 1 ●●●● patch | view | raw | blame | history
ansible/configs/three-tier-app/post_software.yml 4 ●●●● patch | view | raw | blame | history
ansible/configs/three-tier-app/pre_software.yml 1 ●●●● patch | view | raw | blame | history
ansible/roles/bastion-opentlc-ipa/tasks/main.yml 29 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-amq-enmasse/defaults/main.yml 7 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-amq-enmasse/readme.adoc 5 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-amq-enmasse/tasks/workload.yml 17 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-appmod-migration/readme.adoc 12 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-appmod-migration/tasks/pre_workload.yml 12 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-appmod-migration/tasks/remove_workload.yml 36 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-appmod-migration/tasks/workload.yml 33 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-appmod-migration/templates/constraints_limitrange.yaml 3 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-dm/ilt_provision.sh 101 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-dm/readme.adoc 6 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-bxms-pam/ilt_provision.sh 17 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-ignite/defaults/main.yml 38 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-ignite/ilt_provision.sh 17 ●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-ignite/tasks/workload.yml 45 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp-workload-fuse-ignite/templates/fuse-ignite-is.yaml 104 ●●●●● patch | view | raw | blame | history
tests/scenarii/ans-tower-lab.yml 14 ●●●●● patch | view | raw | blame | history
ansible/configs/ans-tower-lab/post_software.yml
@@ -39,12 +39,9 @@
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    # sssd bug, fixed by restart
    - name: restart sssd
      service:
        name: sssd
        state: restarted
      when: install_ipa_client
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa"
      when: install_ipa_client|bool
- name: Post Tower configs
  hosts: towers[0]
ansible/configs/ans-tower-lab/pre_software.yml
@@ -43,7 +43,6 @@
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion", when: 'install_bastion' }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa", when: 'install_ipa_client' }
  tags:
    - step004
    - bastion_tasks
ansible/configs/generic-example/post_software.yml
@@ -10,6 +10,19 @@
    - debug:
        msg: "Post-Software tasks Started"
- name: Configure IPA on bastion
  hosts: bastions
  become: yes
  gather_facts: False
  run_once: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa"
      when: install_ipa_client|bool
- name: PostSoftware flight-check
  hosts: localhost
  connection: local
ansible/configs/generic-example/pre_software.yml
@@ -42,35 +42,11 @@
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  roles:
    -  { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion", when: 'install_bastion' }
    -  role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion"
       when: install_bastion|bool
  tags:
    - step004
    - bastion_tasks
- name: Pre-software verification and ipa client
  hosts: bastions
  gather_facts: False
  become: yes
  tags:
    - opentlc_bastion_tasks
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: install ipa client packages
      yum:
        name: "ipa-client"
        state: present
      when: "install_ipa_client"
    - name: Register bastion with IPA
      shell: "/usr/sbin/ipa-client-install --domain=OPENTLC.COM -w {{ipa_host_password}} -N -U --mkhomedir --no-dns-sshfp --hostname={{bastion_public_dns_chomped}}"
      when: "install_ipa_client"
    - name: Add opentlc-access ipa group to sudoers.d
      lineinfile:
        path: /etc/sudoers.d/opentlc-sudoers
        state: present
        create: yes
        line: '%opentlc-access ALL=(ALL)       NOPASSWD: ALL'
        validate: '/usr/sbin/visudo -cf %s'
- name: PreSoftware flight-check
  hosts: localhost
ansible/configs/ocp-ha-disconnected-lab/files/hosts_template.3.9.27.j2
New file
@@ -0,0 +1,311 @@
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_become=yes
ansible_ssh_user={{ansible_ssh_user}}
###########################################################################
### OpenShift Basic Vars
###########################################################################
deployment_type=openshift-enterprise
containerized=false
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
# default project node selector
osm_default_node_selector='env=app'
openshift_hosted_infra_selector="env=infra"
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
###########################################################################
### OpenShift Optional Vars
###########################################################################
# Enable cockpit
osm_use_cockpit=true
osm_cockpit_plugins=['cockpit-kubernetes']
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port={{master_api_port}}
openshift_master_console_port={{master_api_port}}
openshift_master_cluster_method=native
openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal
openshift_master_cluster_public_hostname={{master_lb_dns}}
openshift_master_default_subdomain={{cloudapps_suffix}}
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
###########################################################################
### OpenShift Network Vars
###########################################################################
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
#os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
{{multi_tenant_setting}}
##########################################################################
### Disconnected Install Vars
### Requires a docker registry at isolated1.{{guid}}.internal:5000
###########################################################################
# sets the debug level for all OpenShift components.  Default is 2
#debug_level=8
# used for container-based install, not RPM
system_images_registry=isolated1.{{guid}}.internal:5000
# https://bugzilla.redhat.com/show_bug.cgi?id=1461465  target release 3.9
#the enterprise registry will not be added to the docker registries.
#also enables insecure registries, somehow.
openshift_docker_ent_reg=''
# https://bugzilla.redhat.com/show_bug.cgi?id=1516534 target release 3.10
oreg_url=isolated1.{{guid}}.internal:5000/openshift3/ose-${component}:${version}
openshift_examples_modify_imagestreams=true
openshift_docker_additional_registries=isolated1.{{guid}}.internal:5000
openshift_docker_insecure_registries=isolated1.{{guid}}.internal:5000
openshift_docker_blocked_registries=registry.access.redhat.com,docker.io
openshift_metrics_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_metrics_image_version=v3.9.14
openshift_logging_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_logging_image_version=v3.9.14
ansible_service_broker_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/ose-
ansible_service_broker_image_tag=v3.9.14
ansible_service_broker_etcd_image_prefix=isolated1.{{guid}}.internal:5000/rhel7/
ansible_service_broker_etcd_image_tag=latest
openshift_service_catalog_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/ose-
openshift_service_catalog_image_version=v3.9.14
openshift_cockpit_deployer_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_cockpit_deployer_version=v3.9.14
template_service_broker_prefix=isolated1.{{guid}}.internal:5000/openshift3/ose-
template_service_broker_version=v3.9.14
openshift_web_console_prefix=isolated1.{{guid}}.internal:5000/openshift3/ose-
openshift_web_console_version=v3.9.14
# PROMETHEUS SETTINGS
openshift_prometheus_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_prometheus_image_version=v3.9.14
openshift_prometheus_alertmanager_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_prometheus_alertmanager_image_version=v3.9.14
openshift_prometheus_alertbuffer_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_prometheus_alertbuffer_image_version=v3.9.14
openshift_prometheus_oauth_proxy_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_prometheus_oauth_proxy_image_version=v3.9.14
openshift_prometheus_node_exporter_image_prefix=isolated1.{{guid}}.internal:5000/openshift3/
openshift_prometheus_node_exporter_image_version=v3.9.14
##########################################################################
## OpenShift Authentication Vars
###########################################################################
{% if install_idm == "ldap" %}
openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com', 'bindPassword': '{{bindPassword}}', 'ca': 'ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid'}]
{{openshift_master_ldap_ca_file}}
{% endif %}
{% if install_idm == "allow_all"  %}
openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
{% endif %}
{% if install_idm == "htpasswd"  %}
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
# Defining htpasswd users
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
# or
openshift_master_htpasswd_file=/root/htpasswd.openshift
{% endif %}
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
# Enable cluster metrics
openshift_metrics_install_metrics={{install_metrics}}
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassandra_nodeselector={"env":"infra"}
openshift_metrics_hawkular_nodeselector={"env":"infra"}
openshift_metrics_heapster_nodeselector={"env":"infra"}
## Add Prometheus Metrics:
openshift_hosted_prometheus_deploy=true
openshift_prometheus_node_selector={"env":"infra"}
openshift_prometheus_namespace=openshift-metrics
# Prometheus
openshift_prometheus_storage_kind=nfs
openshift_prometheus_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_storage_nfs_directory=/srv/nfs
openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_storage_volume_name=prometheus
openshift_prometheus_storage_volume_size=10Gi
openshift_prometheus_storage_labels={'storage': 'prometheus'}
openshift_prometheus_storage_type='pvc'
# For prometheus-alertmanager
openshift_prometheus_alertmanager_storage_kind=nfs
openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
openshift_prometheus_alertmanager_storage_volume_size=10Gi
openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
openshift_prometheus_alertmanager_storage_type='pvc'
# For prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_kind=nfs
openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs
openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
openshift_prometheus_alertbuffer_storage_volume_size=10Gi
openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
openshift_prometheus_alertbuffer_storage_type='pvc'
# Already set in the disconnected section
# openshift_prometheus_node_exporter_image_version=v3.9
# Enable cluster logging
openshift_logging_install_logging={{install_logging}}
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
# openshift_logging_kibana_hostname=kibana.{{cloudapps_suffix}}
openshift_logging_es_cluster_size=1
openshift_logging_es_nodeselector={"env":"infra"}
openshift_logging_kibana_nodeselector={"env":"infra"}
openshift_logging_curator_nodeselector={"env":"infra"}
###########################################################################
### OpenShift Project Management Vars
###########################################################################
# Configure additional projects
openshift_additional_projects={'openshift-template-service-broker': {'default_node_selector': ''}}
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
openshift_hosted_router_replicas={{infranode_instance_count}}
#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/srv/nfs
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
openshift_enable_service_catalog=true
template_service_broker_install=true
openshift_template_service_broker_namespaces=['openshift']
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
openshift_hosted_etcd_storage_kind=nfs
openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)"
openshift_hosted_etcd_storage_nfs_directory=/srv/nfs
openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'}
openshift_hosted_etcd_storage_volume_name=etcd-asb
openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce']
openshift_hosted_etcd_storage_volume_size=10G
###########################################################################
### OpenShift Hosts
###########################################################################
[OSEv3:children]
lb
masters
etcd
nodes
nfs
{% if new_node_instance_count > 0 %}
new_nodes
{% endif %}
[lb]
{% for host in groups['loadbalancers'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host].placement}}
{% endfor %}
[masters]
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[etcd]
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} host_zone={{hostvars[host]['placement']}}
{% endfor %}
[nodes]
## These are the masters
{% for host in groups['masters'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'logging':'true','openshift_schedulable':'False','cluster': '{{guid}}', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are infranodes
{% for host in groups['infranodes'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'infra', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
## These are regular nodes
{% for host in groups['nodes'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}  openshift_node_labels="{'logging':'true','cluster': '{{guid}}', 'env':'app', 'zone': '{{hostvars[host]['placement']}}'}"
{% endfor %}
{% if new_node_instance_count > 0 %}
# scaleup performed, leave an empty group, see:
# https://docs.openshift.com/container-platform/3.5/install_config/adding_hosts_to_existing_cluster.html
[new_nodes]
{% endif %}
[nfs]
{% for host in groups['support'] %}
{{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }}
{% endfor %}
ansible/configs/ocp-ha-disconnected-lab/files/labs_hosts_template.j2
@@ -11,6 +11,9 @@
# disable memory check, as we are not a production environment
openshift_disable_check="memory_availability"
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
[OSEv3:children]
lb
masters
ansible/configs/ocp-ha-lab/files/hosts_template.3.9.27.j2
@@ -32,6 +32,9 @@
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}}
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
###########################################################################
### OpenShift Network Vars
###########################################################################
ansible/configs/ocp-ha-lab/files/labs_hosts_template.j2
@@ -10,6 +10,9 @@
# disable memory check, as we are not a production environment
openshift_disable_check="memory_availability"
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
###########################################################################
### OpenShift Hosts
ansible/configs/ocp-ha-lab/post_software.yml
@@ -52,12 +52,6 @@
      tags:
        - overwrite_hosts_with_lab_hosts
    # sssd bug, fixed by restart
    - name: restart sssd
      service:
        name: sssd
        state: restarted
      when: install_ipa_client
    ## Create PVs for uservols if required
    - name: get nfs Hostname
      set_fact:
@@ -106,6 +100,10 @@
        - create_user_pv
        - openshift_nfs_config
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa"
      when: install_ipa_client|bool
# - name: include post nfs config
#   include: "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/post_ocp_nfs_config.yml"
#   tags:
ansible/configs/ocp-ha-lab/pre_software.yml
@@ -54,7 +54,6 @@
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  roles:
    -  { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion", when: 'install_bastion' }
    -  { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa", when: 'install_ipa_client' }
  tags:
    - step004
    - bastion_tasks
ansible/configs/ocp-implementation-lab/post_software.yml
@@ -33,9 +33,7 @@
        dest: /etc/ansible/hosts
      tags:
        - overwrite_hosts_with_lab_hosts
    - name: install ipa client packages
      yum:
        name: "ipa-client"
        state: present
    - name: Register bastion with IPA
      shell: "/usr/sbin/ipa-client-install --domain=OPENTLC.COM -w {{ipa_host_password}} -N -U --mkhomedir --no-dns-sshfp --hostname={{bastion_public_dns_chomped}}"
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa"
      when: install_ipa_client|bool
ansible/configs/ocp-implementation-lab/pre_software.yml
@@ -47,31 +47,6 @@
    - step004
    - bastion_tasks
- name: Pre-software verification and ipa client
  hosts: bastions
  gather_facts: False
  become: yes
  tags:
    - opentlc_bastion_tasks
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - name: install ipa client packages
      yum:
        name: "ipa-client"
        state: present
      when: "install_ipa_client"
    - name: Register bastion with IPA
      shell: "/usr/sbin/ipa-client-install --domain=OPENTLC.COM -w {{ipa_host_password}} -N -U --mkhomedir --no-dns-sshfp --hostname={{bastion_public_dns_chomped}}"
      when: "install_ipa_client"
    - name: Add opentlc-access ipa group to sudoers.d
      lineinfile:
        path: /etc/sudoers.d/opentlc-sudoers
        state: present
        create: yes
        line: '%opentlc-access ALL=(ALL)       NOPASSWD: ALL'
        validate: '/usr/sbin/visudo -cf %s'
- name: PreSoftware flight-check
  hosts: localhost
  connection: local
ansible/configs/ocp-workshop/files/hosts_template.3.9.27.j2
@@ -59,6 +59,10 @@
# Run these commands after installation on one of the masters:
# oc patch storageclass glusterfs-storage -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "true"}}}'
# oc patch storageclass glusterfs-block -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "false"}}}'
{% else %}
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
{% endif %}
###########################################################################
ansible/configs/ocp-workshop/post_software.yml
@@ -125,17 +125,6 @@
        group: opentlc-mgr
        recurse: yes
    # sssd bug, fixed by restart
    - name: restart sssd
      service:
        name: sssd
        state: restarted
      when: install_ipa_client
      register: sssd_restart_result
      retries: 6
      delay: 10
      until: sssd_restart_result is succeeded
- name: env-specific infrastructure
  hosts: masters
  run_once: true
@@ -319,7 +308,9 @@
      when: osrelease | version_compare('3.7', '>=')
    - name: Tag ose-recycler Image
      command: "docker tag registry.access.redhat.com/openshift3/ose-recycler:latest registry.access.redhat.com/openshift3/ose-recycler:v{{ osrelease }}"
      command: >
        docker tag registry.access.redhat.com/openshift3/ose-recycler:latest
        registry.access.redhat.com/openshift3/ose-recycler:v{{ osrelease }}
      when: osrelease | version_compare('3.7', '>=')
- name: Fix CRI-O Garbage Collection DaemonSet for OCP 3.9 and newer
@@ -470,6 +461,63 @@
    - env-specific
    - install_zabbix
- name: Run diagnostics from master
  hosts: masters
  become: yes
  gather_facts: False
  run_once: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    # start supporting this only for OCP >= 3.9
    - when: osrelease is version_compare('3.9', '>=')
      block:
        # this command should return 0 (no error)
        - name: Run oc adm diagnostics
          shell: oc adm diagnostics > /tmp/diagnostics.log
          register: r_diag
          retries: 2
          until: r_diag is succeeded
          ignore_errors: true
        - name: Ensure /tmp/openshift exist
          file:
            path: /tmp/openshift
            state: directory
        # oc adm diagnostics logs everything in /tmp/openshift
        - name: Create an archive of diagnostics output logs
          archive:
            path:
              - /tmp/openshift
              - /tmp/diagnostics.log
            dest: /tmp/diagnostics.tar.gz
        - name: Fetch the diagnostic archive and logs
          fetch:
            src: /tmp/diagnostics.tar.gz
            dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{project_tag}}_diagnostics.tar.gz"
            flat: true
        - name: Report diagnostics failure
          fail:
            msg: "FAIL {{ project_tag }} Diagnostics"
          when: r_diag is failed
- name: Configure IPA on bastion
  hosts: bastions
  become: yes
  gather_facts: False
  run_once: yes
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  tasks:
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa"
      when: install_ipa_client|bool
- name: PostSoftware flight-check
  hosts: localhost
  connection: local
ansible/configs/ocp-workshop/pre_software.yml
@@ -77,7 +77,6 @@
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  roles:
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion", when: 'install_bastion' }
    - { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa", when: 'install_ipa_client' }
  tags:
    - step004
    - bastion_tasks
ansible/configs/three-tier-app/post_software.yml
@@ -17,6 +17,10 @@
  vars_files:
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml"
  tasks:
    - import_role:
        name: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa"
      when: install_ipa_client|bool
    # sssd bug, fixed by restart
    - name: restart sssd
      service:
ansible/configs/three-tier-app/pre_software.yml
@@ -24,7 +24,6 @@
    - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml"
  roles:
    -  { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion", when: 'install_bastion' }
    -  { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa", when: 'install_ipa_client' }
  tags:
    - step004
    - bastion_tasks
ansible/roles/bastion-opentlc-ipa/tasks/main.yml
@@ -10,14 +10,27 @@
    state: present
- name: Register bastion with IPA
  shell: "/usr/sbin/ipa-client-install --domain=OPENTLC.COM -w '{{ipa_host_password}}' -N -U --mkhomedir --no-dns-sshfp --hostname={{bastion_public_dns_chomped}} {{ipa_additional_options|d('')}}"
  shell: >
    /usr/sbin/ipa-client-install --uninstall;
    /usr/sbin/ipa-client-install --domain=OPENTLC.COM
    -w '{{ipa_host_password}}'
    -N -U --mkhomedir --no-dns-sshfp
    --hostname={{bastion_public_dns_chomped}}
    {{ipa_additional_options|d('')}}
  when: ipa_host_password is defined
  register: ipa_r
  until: ipa_r is succeeded
  until:
    - ipa_r is succeeded
  retries: 5
- name: Register bastion with IPA
  shell: "/usr/sbin/ipa-client-install --domain=OPENTLC.COM -p {{ipa_kerberos_user}} -w '{{ipa_kerberos_password}}' -N -U --mkhomedir --no-dns-sshfp --hostname={{bastion_public_dns_chomped}} {{ipa_additional_options|d('')}}"
  shell: >
    /usr/sbin/ipa-client-install --uninstall;
    /usr/sbin/ipa-client-install --domain=OPENTLC.COM
    -p {{ipa_kerberos_user}} -w '{{ipa_kerberos_password}}'
    -N -U --mkhomedir --no-dns-sshfp
    --hostname={{bastion_public_dns_chomped}}
    {{ipa_additional_options|d('')}}
  when:
    - ipa_host_password is not defined
    - ipa_kerberos_user is defined
@@ -51,3 +64,13 @@
  fail:
    msg: Unable to update sudoers.d/opentlc-sudoers
  when: not result|succeeded
# sssd bug, fixed by restart
- name: restart sssd
  service:
    name: sssd
    state: restarted
  register: sssd_restart_result
  retries: 6
  delay: 10
  until: sssd_restart_result is succeeded
ansible/roles/ocp-workload-amq-enmasse/defaults/main.yml
@@ -19,17 +19,16 @@
quota_secrets: 30
quota_requests_storage: 50Gi
enmasse_tag: 0.17.2
enmasse_git_url: "https://raw.githubusercontent.com/EnMasseProject/enmasse/{{enmasse_tag}}/templates/install"
admin_user: developer
keycloak_admin_password: admin
enmasse_repo_url: https://github.com/EnMasseProject/enmasse
enmasse_repo_tag: 0.18.0
enmasse_template_file: install.yml
enmasse_repo_tag: 0.20.0
keycloak_admin_password: admin
# Using 0.18.0, getting wierd error with keycloak/H2 setup:
#   Caused by: org.h2.jdbc.JdbcSQLException: The database has been closed [90098-193]
authentication_services:
    - none
ansible/roles/ocp-workload-amq-enmasse/readme.adoc
@@ -26,7 +26,7 @@
== Deploy a Workload with the `ocp-workload` playbook
----
GUID=jb45
HOST_GUID=dev37
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-amq-enmasse"
@@ -43,7 +43,6 @@
                    -e"guid=${GUID}" \
                    -e"ocp_user_needs_quota=true" \
                    -e"ocp_apps_domain=apps.${HOST_GUID}.openshift.opentlc.com" \
                    -e"enmasse_repo_tag=0.18.0" \
                    -e"namespace=amq-enmasse-${GUID}" \
                    -e"ACTION=create"
@@ -52,7 +51,7 @@
== To Delete an environment
----
GUID=jb45
HOST_GUID=dev37
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
OCP_USERNAME="jbride-redhat.com"
WORKLOAD="ocp-workload-amq-enmasse"
ansible/roles/ocp-workload-amq-enmasse/tasks/workload.yml
@@ -12,8 +12,11 @@
# ###############       enmasse specific        ###############
- name: Ensure the following directory exists in remote, /tmp/{{namespace}}/enmasse
- name: Ensure the following directory is refreshed in remote, /tmp/{{namespace}}/enmasse
  file:
    path: "/tmp/{{namespace}}/enmasse"
    state: absent
- file:
    path: "/tmp/{{namespace}}/enmasse"
    state: directory
@@ -26,11 +29,17 @@
- name: execute ansible-playbook using shell
  shell: |
      ansible-playbook /tmp/{{namespace}}/enmasse/templates/install/ansible/playbooks/openshift/multitenant.yml \
      ansible-playbook -i "enmasse," -c local /tmp/{{namespace}}/enmasse/templates/install/ansible/playbooks/openshift/{{enmasse_template_file}} \
      -e namespace={{namespace}} \
      -e admin_user={{ocp_username}} \
      -e multitenant=true \
      -e enable_rbac=false \
      -e enable_user_lookup=true \
      -e api_server=true \
      -e register_api_server=true \
      -e keycloak_admin_password={{keycloak_admin_password}} \
      -e authentication_services={{authentication_services}}
      -e authentication_services={{authentication_services}} \
      > /tmp/{{namespace}}/enmasse_install.log
# ###############################################
ansible/roles/ocp-workload-appmod-migration/readme.adoc
@@ -3,13 +3,13 @@
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
HOST_GUID=dev37
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
WORKLOAD="ocp-workload-appmod-migration"
SSH_USERNAME="jbride-redhat.com"
SSH_PRIVATE_KEY="id_ocp"
GUID=jb05
OCP_USERNAME="jbride-redhat.com"
GUID=gptetraining01
OCP_USERNAME="gpsetraining1"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
@@ -27,11 +27,11 @@
=== To Delete an environment
----
HOST_GUID=dev37
HOST_GUID=dev39
TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
WORKLOAD="ocp-workload-appmod-migration"
GUID=jb05
OCP_USERNAME="jbride-redhat.com"
GUID=gptetraining01
OCP_USERNAME="gpsetraining1"
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
ansible/roles/ocp-workload-appmod-migration/tasks/pre_workload.yml
@@ -1,11 +1,11 @@
---
# - name: Add user to developer group (allowed to create projects)
#   shell: "oadm groups add-users {{item}} {{ocp_username}}"
#   register: groupadd_register
#   with_items: "{{ocp_user_groups}}"
#   when: ocp_username is defined and ocp_user_groups is defined
#
- name: Add user to developer group (allowed to create projects)
  shell: "oc adm groups add-users {{item}} {{ocp_username}}"
  register: groupadd_register
  with_items: "{{ocp_user_groups}}"
  when: ocp_username is defined and ocp_user_groups is defined
# - name: test that command worked
#   debug:
#     var: groupadd_register
ansible/roles/ocp-workload-appmod-migration/tasks/remove_workload.yml
@@ -3,6 +3,12 @@
  debug:
    msg: "Pre-Software checks completed successfully - Removed"
- name: Remove user from groups {{ocp_user_groups}}
  shell: oc adm groups remove-users {{item}} {{ocp_username}}
  with_items: "{{ocp_user_groups}}"
  ignore_errors: true
- name: Remove user Quota - oc delete clusterresourcequota  "clusterquota-{{ocp_username}}-{{guid}}"
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}}
  ignore_errors: true
@@ -11,11 +17,33 @@
  shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}
  ignore_errors: true
- name: Remove user Project
  shell: "oc delete project {{ocp_project}}"
- name: Remove Project {{ocp_constraints_project}}
  shell: "oc delete project {{ocp_constraints_project}}"
- name: Remove user Projects - oc get projects
  command: "oc get projects -o json"
  register: all_projects
- name: Remove user Projects - Convert output to json
  set_fact:
    projects: "{{all_projects.stdout | from_json}}"
- name: Remove user Projects -  Debug statement
  debug:
    msg: "found user project: {{item.metadata.name}}"
    verbosity: 1
  when:
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
  with_items: "{{projects['items']}}"
- name: Remove user Projects - "oc delete project {{item.metadata.name}} "
  command: "oc delete project {{item.metadata.name}}"
  when:
    - item.metadata.annotations['openshift.io/requester'] is defined
    - item.metadata.annotations['openshift.io/requester'] == "{{ocp_username}}"
    - item.status.phase is defined
    - item.status.phase != "Terminating"
  with_items: "{{projects['items']}}"
- name: post_workload Tasks Complete
  debug:
ansible/roles/ocp-workload-appmod-migration/tasks/workload.yml
@@ -1,22 +1,22 @@
---
# #########         Default project             ##############
- name: Create project for workload; project =  {{ocp_project}}
  shell: "oc new-project {{ocp_project}}"
#- name: Create project for workload; project =  {{ocp_project}}
#  shell: "oc new-project {{ocp_project}}"
- name: Make sure we go back do default project
  shell: "oc project default"
#- name: Make sure we go back do default project
#  shell: "oc project default"
- name: Delete default limitrange
  shell: |
    oc delete limitrange {{ocp_project}}-core-resource-limits -n {{ocp_project}}
#- name: Delete default limitrange
#  shell: |
#    oc delete limitrange {{ocp_project}}-core-resource-limits -n {{ocp_project}}
- name: Create a new limitrange
  template:
    src: templates/limitrange.yaml
    dest: /tmp/{{ocp_project}}_limitrange.yaml
- shell: |
    oc create -f /tmp/{{ocp_project}}_limitrange.yaml -n {{ocp_project}}
#- name: Create a new limitrange
#  template:
#    src: templates/limitrange.yaml
#    dest: /tmp/{{ocp_project}}_limitrange.yaml
#- shell: |
#    oc create -f /tmp/{{ocp_project}}_limitrange.yaml -n {{ocp_project}}
# ###############################################################
@@ -41,13 +41,14 @@
- shell: |
    oc create -f /tmp/{{ocp_constraints_project}}_limitrange.yaml -n {{ocp_constraints_project}}
# ###############################################################
- name: Annotate the empty project as requested by user
  shell: "oc annotate namespace {{ocp_project}} openshift.io/requester={{ocp_username}} --overwrite"
  shell: "oc annotate namespace {{ocp_constraints_project}} openshift.io/requester={{ocp_username}} --overwrite"
- name: Give ocp_username access to ocp_project; user = {{ocp_username}}
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{ocp_project}}"
  shell: "oc policy add-role-to-user admin {{ocp_username}} -n {{ocp_constraints_project}}"
# ###############################################################
- name: workload Tasks Complete
  debug:
ansible/roles/ocp-workload-appmod-migration/templates/constraints_limitrange.yaml
@@ -14,13 +14,12 @@
    max:
      memory: 350Mi
    min:
      memory: 50m
      memory: 4Mi
    type: Container
  - max:
      cpu: 5
      memory: 350Mi
    min:
      cpu: 500m
      cpu: 50m
      memory: 6Mi
    type: Pod
ansible/roles/ocp-workload-bxms-dm/ilt_provision.sh
New file
@@ -0,0 +1,101 @@
#!/bin/bash
END_PROJECT_NUM=1
START_PROJECT_NUM=1
WORKLOAD="ocp-workload-bxms-dm"
LOG_FILE=/tmp/$WORKLOAD
for var in $@
do
    case "$var" in
        --HOST_GUID=*) HOST_GUID=`echo $var | cut -f2 -d\=` ;;
        --START_PROJECT_NUM=*) START_PROJECT_NUM=`echo $var | cut -f2 -d\=` ;;
        --END_PROJECT_NUM=*) END_PROJECT_NUM=`echo $var | cut -f2 -d\=` ;;
        -h) HELP=true ;;
        -help) HELP=true ;;
    esac
done
function ensurePreReqs() {
    if [ "x$HOST_GUID" == "x" ]; then
            echo -en "must pass parameter: --HOST_GUID=<ocp host GUID> . \n\n"
            help
            exit 1;
    fi
    LOG_FILE=$LOG_FILE-$HOST_GUID-$START_PROJECT_NUM-$END_PROJECT_NUM.log
    echo -en "starting\n\n" > $LOG_FILE
    echo -en "\n\nProvision log file found at: $LOG_FILE\n";
}
function help() {
    echo -en "\n\nOPTIONS:";
    echo -en "\n\t--HOST_GUID=*             REQUIRED: specify GUID of target OCP environment)"
    echo -en "\n\t--START_PROJECT_NUM=*     OPTIONAL: specify # of first OCP project to provision (defult = 1))"
    echo -en "\n\t--END_PROJECT_NUM=*       OPTIONAL: specify # of OCP projects to provision (defualt = 1))"
    echo -en "\n\t-h                        this help manual"
    echo -en "\n\n\nExample:                ./roles/$WORKLOAD/ilt_provision.sh --HOST_GUID=dev37 --START_PROJECT_NUM=1 --END_PROJECT_NUM=1\n\n"
}
function login() {
    echo -en "\nHOST_GUID=$HOST_GUID\n" >> $LOG_FILE
    oc login https://master.$HOST_GUID.openshift.opentlc.com -u opentlc-mgr -p r3dh4t1!
}
function initializeOpenshift() {
    oc create -f https://raw.githubusercontent.com/jboss-container-images/rhdm-7-openshift-image/ose-v1.4.8-1/rhdm70-image-streams.yaml -n openshift
}
function executeLoop() {
    echo -en "\nexecuteLoop() START_PROJECT_NUM = $START_PROJECT_NUM ;  END_PROJECT_NUM=$END_PROJECT_NUM" >> $LOG_FILE
    for (( c=$START_PROJECT_NUM; c<=$END_PROJECT_NUM; c++ ))
    do
        GUID=$c
        OCP_USERNAME=user$c
        executeAnsible
    done
}
function executeAnsible() {
    TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com"
    SSH_USERNAME="jbride-redhat.com"
    SSH_PRIVATE_KEY="id_ocp"
    # NOTE:  Ensure you have ssh'd (as $SSH_USERNMAE) into the bastion node of your OCP cluster environment at $TARGET_HOST and logged in using opentlc-mgr account:
    #           oc login https://master.$HOST_GUID.openshift.opentlc.com -u opentlc-mgr
    GUID=$PROJECT_PREFIX$GUID
    echo -en "\n\nexecuteAnsible():  Provisioning project with GUID = $GUID and OCP_USERNAME = $OCP_USERNAME\n" >> $LOG_FILE
    ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
                 -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \
                 -e"ansible_ssh_user=${SSH_USERNAME}" \
                    -e"ANSIBLE_REPO_PATH=`pwd`" \
                    -e"ocp_username=${OCP_USERNAME}" \
                    -e"ocp_workload=${WORKLOAD}" \
                    -e"guid=${GUID}" \
                    -e"ocp_user_needs_quota=true" \
                    -e"ocp_apps_domain=apps.${HOST_GUID}.openshift.opentlc.com" \
                    -e"ACTION=create" >> $LOG_FILE
    if [ $? -ne 0 ];
    then
        echo -en "\n\n*** Error provisioning where GUID = $GUID\n\n " >> $LOG_FILE
        echo -en "\n\n*** Error provisioning where GUID = $GUID\n\n "
        exit 1;
    fi
}
ensurePreReqs
login
initializeOpenshift
executeLoop
ansible/roles/ocp-workload-bxms-dm/readme.adoc
@@ -1,5 +1,11 @@
= ocp-workload-bxms-dm
NOTE:  Assumes the following has been executed:
-----
$ oc create -f https://raw.githubusercontent.com/jboss-container-images/rhdm-7-openshift-image/ose-v1.4.8-1/rhdm70-image-streams.yaml -n openshift
-----
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
GUID=jb45
ansible/roles/ocp-workload-bxms-pam/ilt_provision.sh
@@ -2,7 +2,8 @@
END_PROJECT_NUM=1
START_PROJECT_NUM=1
LOG_FILE=/tmp/ilt_provision_
WORKLOAD="ocp-workload-bxms-pam"
LOG_FILE=/tmp/$WORKLOAD
for var in $@
do
@@ -22,7 +23,7 @@
            exit 1;
    fi
    LOG_FILE=$LOG_FILE$START_PROJECT_NUM-$END_PROJECT_NUM.log
    LOG_FILE=$LOG_FILE-$HOST_GUID-$START_PROJECT_NUM-$END_PROJECT_NUM.log
    echo -en "starting\n\n" > $LOG_FILE
    echo -en "\n\nProvision log file found at: $LOG_FILE\n";
@@ -34,7 +35,7 @@
    echo -en "\n\t--START_PROJECT_NUM=*     OPTIONAL: specify # of first OCP project to provision (defult = 1))"
    echo -en "\n\t--END_PROJECT_NUM=*       OPTIONAL: specify # of OCP projects to provision (defualt = 1))"
    echo -en "\n\t-h                        this help manual"
    echo -en "\n\n\nExample:                ./ilt_provision.sh --HOST_GUID=dev37 --START_PROJECT_NUM=1 --END_PROJECT_NUM=1\n\n"
    echo -en "\n\n\nExample:                ./roles/$WORKLOAD/ilt_provision.sh --HOST_GUID=dev37 --START_PROJECT_NUM=1 --END_PROJECT_NUM=1\n\n"
}
@@ -65,7 +66,6 @@
    # NOTE:  Ensure you have ssh'd (as $SSH_USERNMAE) into the bastion node of your OCP cluster environment at $TARGET_HOST and logged in using opentlc-mgr account:
    #           oc login https://master.$HOST_GUID.openshift.opentlc.com -u opentlc-mgr
    WORKLOAD="ocp-workload-bxms-pam"
    GUID=$PROJECT_PREFIX$GUID
@@ -80,7 +80,14 @@
                    -e"guid=${GUID}" \
                    -e"ocp_user_needs_quota=true" \
                    -e"ocp_apps_domain=apps.${HOST_GUID}.openshift.opentlc.com" \
                    -e"ACTION=create"
                    -e"ACTION=create" >> $LOG_FILE
    if [ $? -ne 0 ];
    then
        echo -en "\n\n*** Error provisioning where GUID = $GUID\n\n " >> $LOG_FILE
        echo -en "\n\n*** Error provisioning where GUID = $GUID\n\n "
        exit 1;
    fi
}
ansible/roles/ocp-workload-fuse-ignite/defaults/main.yml
@@ -42,46 +42,42 @@
##########          community syndesis          #################
SYNDESIS_REGISTRY: docker.io
SYNDESIS_REGISTRY_WEB_SUBCONTEXT: syndesis
SYNDESIS_TAG: 1.3.10
product_name: syndesis
#SYNDESIS_REGISTRY: docker.io
#SYNDESIS_REGISTRY_WEB_SUBCONTEXT: syndesis
#SYNDESIS_TAG: 1.3.10
#product_name: syndesis
#ignite_template_name: "{{ product_name }}"
# Corresponds to community syndesis master branch:
#ignite_version: syndesis
#syndesisio_url: https://raw.githubusercontent.com/syndesisio/syndesis/master
# Corresponds to GPTE fork of community syndesis master branch
#   This branch implements the following:
#       1)  DCs in a paused state
ignite_version: syndesis
syndesisio_url: https://raw.githubusercontent.com/gpe-mw-training/syndesis/gpte-{{SYNDESIS_TAG}}
#syndesisio_url: https://raw.githubusercontent.com/gpe-mw-training/syndesis/gpte-{{SYNDESIS_TAG}}
#syndesisio_sa_yml:  "{{syndesisio_url}}/install/support/serviceaccount-as-oauthclient-restricted.yml"
#syndesisio_template_yml: "{{syndesisio_url}}/install/syndesis.yml"
#################################################################
##########          supported fuse ignite       #################
#SYNDESIS_REGISTRY: registry.access.redhat.com
#SYNDESIS_REGISTRY_WEB_SUBCONTEXT: jboss-fuse-7-tech-preview
#SYNDESIS_TAG: 1.3-2
#product_name: fuse_ignite
# Corresponds to the following tag:  fuse-ignite-1.3
#ignite_version: fuse-ignite-1.3
#syndesisio_url: https://raw.githubusercontent.com/syndesisio/syndesis/952ecabaac59b70e87095b0bbe2e52b9c5610391
SYNDESIS_REGISTRY: registry.access.redhat.com
SYNDESIS_REGISTRY_WEB_SUBCONTEXT: fuse7
product_name: fuse-ignite
# Corresponds to GPTE fork of community syndesis master branch
#   This branch implements the following:
#       1)  DCs in a paused state
#ignite_version: syndesis-fuse-ignite-1.3
#syndesisio_url: https://raw.githubusercontent.com/gpe-mw-training/syndesis/gpte-fuse-ignite-1.3
ignite_template_name: "{{ product_name }}"
syndesisio_url: https://raw.githubusercontent.com/gpe-mw-training/fuse-ignite-install/gpte-1.3
fuse_ignite_is_yaml: "{{syndesisio_url}}/resources/fuse-ignite-image-streams.yml"
syndesisio_sa_yml:  "{{syndesisio_url}}/resources/serviceaccount-as-oauthclient-restricted.yml"
syndesisio_template_yml: "{{syndesisio_url}}/resources/fuse-ignite-ocp.yml"
#################################################################
amq_template_name: amq63-basic
syndesisio_template_yml: "{{syndesisio_url}}/install/syndesis.yml"
amq_template_yml: "{{syndesisio_url}}/install/support/syndesis-amq.yml"
syndesisio_sa_yml:  "{{syndesisio_url}}/install/support/serviceaccount-as-oauthclient-restricted.yml"
ansible/roles/ocp-workload-fuse-ignite/ilt_provision.sh
@@ -2,7 +2,8 @@
END_PROJECT_NUM=1
START_PROJECT_NUM=1
LOG_FILE=/tmp/ilt_provision_
WORKLOAD="ocp-workload-fuse-ignite"
LOG_FILE=/tmp/$WORKLOAD
for var in $@
do
@@ -22,7 +23,7 @@
            exit 1;
    fi
    LOG_FILE=$LOG_FILE$START_PROJECT_NUM-$END_PROJECT_NUM.log
    LOG_FILE=$LOG_FILE-$HOST_GUID-$START_PROJECT_NUM-$END_PROJECT_NUM.log
    echo -en "starting\n\n" > $LOG_FILE
    echo -en "\n\nProvision log file found at: $LOG_FILE\n";
@@ -33,7 +34,8 @@
    echo -en "\n\t--HOST_GUID=*             REQUIRED: specify GUID of target OCP environment)"
    echo -en "\n\t--START_PROJECT_NUM=*     OPTIONAL: specify # of first OCP project to provision (defult = 1))"
    echo -en "\n\t--END_PROJECT_NUM=*       OPTIONAL: specify # of OCP projects to provision (defualt = 1))"
    echo -en "\n\t-h                        this help manual\n\n"
    echo -en "\n\t-h                        this help manual"
    echo -en "\n\n\nExample:                ./roles/$WORKLOAD/ilt_provision.sh --HOST_GUID=dev37 --START_PROJECT_NUM=1 --END_PROJECT_NUM=1\n\n"
}
@@ -61,10 +63,9 @@
    SSH_USERNAME="jbride-redhat.com"
    SSH_PRIVATE_KEY="id_ocp"
    # NOTE:  Ensure you has ssh'd (as $SSH_USERNMAE) into the bastion node of your OCP cluster environment at $TARGET_HOST and logged in using opentlc-mgr account:
    # NOTE:  Ensure you have ssh'd (as $SSH_USERNMAE) into the bastion node of your OCP cluster environment at $TARGET_HOST and logged in using opentlc-mgr account:
    #           oc login https://master.$HOST_GUID.openshift.opentlc.com -u opentlc-mgr
    WORKLOAD="ocp-workload-fuse-ignite"
    POSTGRESQL_MEMORY_LIMIT=512Mi
    PROMETHEUS_MEMORY_LIMIT=255Mi
    META_MEMORY_LIMIT=1Gi
@@ -89,6 +90,12 @@
                    -e"META_MEMORY_LIMIT=$META_MEMORY_LIMIT" \
                    -e"SERVER_MEMORY_LIMIT=$SERVER_MEMORY_LIMIT" \
                    -e"ACTION=create" >> $LOG_FILE
    if [ $? -ne 0 ];
    then
        echo -en "\n\n*** Error provisioning where GUID = $GUID\n\n " >> $LOG_FILE
        echo -en "\n\n*** Error provisioning where GUID = $GUID\n\n "
        exit 1;
    fi
}
ensurePreReqs
ansible/roles/ocp-workload-fuse-ignite/tasks/workload.yml
@@ -16,8 +16,6 @@
- name: Create syndesisio template; {{syndesisio_template_yml}}
  shell: "oc create -f {{syndesisio_template_yml}} -n {{ocp_project}}"
- name: Create amq template; {{amq_template_yml}}
  shell: "oc create -f {{amq_template_yml}} -n {{ocp_project}}"
- name: delete temp dir if it exists
  file:
@@ -26,22 +24,22 @@
- file:
      path: /tmp/{{ocp_project}}
      state: directory
#- name: Copy fuse-ignite imagestream file to known path
#  template:
#    src: templates/fuse-ignite-is.yaml
#    dest: /tmp/{{ocp_project}}/fuse-ignite-is.yaml
#- name: Load fuse-ignite-is
#  shell: "oc create -f /tmp/{{ocp_project}}/fuse-ignite-is.yaml -n {{ocp_project}}"
- name: Create the amq app
  shell: |
      oc new-app {{amq_template_name}} \
      -p APPLICATION_NAME={{product_name}} \
      -n {{ocp_project}}
- name: Load fuse-ignite-is
  shell: "oc create -f {{ fuse_ignite_is_yaml }} -n {{ocp_project}}"
#- name: Create amq template; {{amq_template_yml}}
#  shell: "oc create -f {{amq_template_yml}} -n {{ocp_project}}"
#- name: Create the amq app
#  shell: |
#      oc new-app {{amq_template_name}} \
#      -p APPLICATION_NAME={{product_name}} \
#      -n {{ocp_project}}
- name: Create the syndesisio app
  shell: |
      oc new-app {{ignite_version}} \
      oc new-app {{ignite_template_name}} \
      -p ROUTE_HOSTNAME=fuse.{{ocp_project}}.{{ocp_apps_domain}} \
      -p OPENSHIFT_MASTER=https://master.{{ocp_domain}} \
      -p OPENSHIFT_PROJECT={{ocp_project}} \
@@ -51,6 +49,7 @@
      -p SERVER_MEMORY_LIMIT={{SERVER_MEMORY_LIMIT}} \
      -p OPENSHIFT_OAUTH_CLIENT_SECRET=$(oc sa get-token syndesis-oauth-client -n {{ocp_project}}) \
      -p MAX_INTEGRATIONS_PER_USER={{MAX_INTEGRATIONS_PER_USER}} \
      -p IMAGE_STREAM_NAMESPACE={{ocp_project}} \
      -n {{ocp_project}}
- name: resume syndesis oauthproxy and db
@@ -62,16 +61,16 @@
      - syndesis-oauthproxy
      - syndesis-db
- name: Scale up {{product_name}}-amq
- name: Scale up broker-amq
  shell: |
      oc scale dc/{{product_name}}-amq --replicas=1 -n {{ocp_project}}
- name: resume {{product_name}}-amq
  shell: oc rollout resume dc/{{product_name}}-amq -n {{ocp_project}}
      oc scale dc/broker-amq --replicas=1 -n {{ocp_project}}
- name: resume broker-amq
  shell: oc rollout resume dc/broker-amq -n {{ocp_project}}
- include: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - "{{product_name}}-amq"
      - "broker-amq"
- name: resume syndesis-meta
  shell: oc rollout resume dc/syndesis-meta -n {{ocp_project}}
@@ -105,6 +104,14 @@
    pod_to_wait:
      - syndesis-prometheus
- name: resume todo
  shell: oc rollout resume dc/todo -n {{ocp_project}}
- include: ./wait_for_deploy.yml
  static: no
  vars:
    pod_to_wait:
      - todo
# ########################################################
ansible/roles/ocp-workload-fuse-ignite/templates/fuse-ignite-is.yaml
File was deleted
tests/scenarii/ans-tower-lab.yml
New file
@@ -0,0 +1,14 @@
---
guid: testgucore
cloud_provider: ec2
aws_region: eu-central-1
key_name: gucore
env_type: ans-tower-lab
software_to_deploy: openshift
HostedZoneId: YOUR_AWS_ZONEID
subdomain_base_suffix: .openshift.opentlc.com
own_repo_path: http://admin.example.com/repos/ocp/3.9.14
repo_version: 3.7
install_win_ad: true
install_win_ssh: true
software_to_deploy: none