ansible/configs/rhte-ocp-workshop/post_infra.yml
@@ -13,6 +13,9 @@ AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}" AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}" AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}" when: - "'clientvms' in groups" - groups['clientvms'] | length > 0 block: - name: test cloudformation_out_final debug: ansible/configs/rhte-ocp-workshop/post_software.yml
@@ -644,6 +644,8 @@ - name: Check if remove_self_provisioners=true fail: msg: remove_self_provisioners must be set to true tags: - remove_self_provisioners when: - not remove_self_provisioners|d(False)|bool ansible/roles/infra-ec2-template-create/tasks/main.yml
@@ -64,12 +64,12 @@ region: "{{ aws_region_loop | d(aws_region) | d(region) | d('us-east-1')}}" # rollback is unreliable, it can make this task hang forever. disable_rollback: true template_url: "https://s3.amazonaws.com/redhat-gpe-cloudformation-templates/{{env_type}}.{{guid}}.{{cloud_provider}}_cloud_template" template_url: "https://s3.amazonaws.com/{{bucket_templates}}/{{env_type}}.{{guid}}.{{cloud_provider}}_cloud_template" tags: "{{ cf_tags | combine(cloud_tags_final)}}" tags: - aws_infrastructure_deployment - provision_cf_template register: cloudformation_out register: cloudformation_out_s3 until: >- cloudformation_out is succeeded or ( @@ -81,6 +81,14 @@ when: stat_template.stat.size > 51200 ignore_errors: yes # We cannot have the same name for the register because the skipped task is always succeeded. # We write back to cloudformation_out if it used the s3 method: - name: Set fact cloudformation_out set_fact: cloudformation_out: "{{ cloudformation_out_s3 }}" when: - stat_template.stat.size > 51200 - name: debug cloudformation debug: var: cloudformation_out ansible/roles/infra-ec2-template-generate/tasks/main.yml
@@ -45,12 +45,27 @@ AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}" AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}" block: - name: Get user name command: aws iam get-user register: rget_user ignore_errors: yes # Bucket name must be globally unique. Use the userID to define the bucketname. # Otherwise when we use the code in another account it fails because the bucket # already exists. - set_fact: aws_account_user: "{{rget_user.stdout|from_json|json_query('User.UserId')}}" ignore_errors: yes - set_fact: bucket_templates: "cloudformation-templates-{{aws_account_user|default('user')|lower}}" - name: Create bucket s3_bucket: name: redhat-gpe-cloudformation-templates name: "{{bucket_templates}}" - name: Copy Template to S3 aws_s3: bucket: redhat-gpe-cloudformation-templates bucket: "{{bucket_templates}}" object: "{{ env_type }}.{{ guid }}.{{cloud_provider}}_cloud_template" src: "{{ANSIBLE_REPO_PATH}}/workdir/{{ env_type }}.{{ guid }}.{{cloud_provider}}_cloud_template" mode: put @@ -83,7 +98,7 @@ command: >- aws cloudformation validate-template --region {{ aws_region_final | d(aws_region) | default(region) | default('us-east-1')}} --template-url https://s3.amazonaws.com/redhat-gpe-cloudformation-templates/{{env_type}}.{{guid}}.{{cloud_provider}}_cloud_template --template-url https://s3.amazonaws.com/{{bucket_templates}}/{{env_type}}.{{guid}}.{{cloud_provider}}_cloud_template changed_when: false register: cloudformation_validation until: cloudformation_validation is succeeded ansible/roles/ocp-client-vm/tasks/packages.yml
@@ -70,6 +70,43 @@ tags: - install_openshift_client_vm_packages - name: Get community istioctl utility get_url: url: https://github.com/istio/istio/releases/download/1.0.0/istio-1.0.0-linux.tar.gz dest: /root/istio-1.0.0-linux.tar.gz tags: - install_openshift_client_vm_packages - name: Unarchive file unarchive: remote_src: yes src: /root/istio-1.0.0-linux.tar.gz dest: /root/ tags: - install_openshift_client_vm_packages - name: Move istioctl to /usr/local/bin copy: remote_src: yes src: /root/istioctl-1.0.0/bin/istioctl dest: /usr/local/bin/s2i group: root owner: root mode: 0755 tags: - install_openshift_client_vm_packages - name: Cleanup Temp Directory file: dest: /root/istioctl-1.0.0 state: absent tags: - install_openshift_client_vm_packages - name: Cleanup downloaded file file: dest: /root/istio-1.0.0-linux.tar.gz state: absent tags: - install_openshift_client_vm_packages - name: Install bash-git-prompt git: repo: https://github.com/magicmonty/bash-git-prompt.git ansible/roles/ocp-workload-bxms-pam/defaults/main.yml
@@ -41,3 +41,5 @@ pam_template_yml: https://raw.githubusercontent.com/jboss-container-images/rhpam-7-openshift-image/{{pam_tag}}/templates/rhpam70-authoring.yaml pam_template_name: rhpam70-authoring products_data: https://raw.githubusercontent.com/gpe-mw-training/rhpam-order-fulfillment/master/src/main/resources/products.txt ansible/roles/ocp-workload-bxms-pam/tasks/workload.yml
@@ -60,6 +60,19 @@ pod_to_wait: - "{{app_name}}-rhpamcentr" # Modify kieserver to include required data products information - name: Download products data file get_url: url: '{{products_data}}' dest: /tmp/{{guid}}/products.txt - name: Create products configmap shell: oc create configmap products-cm --from-file=/tmp/{{guid}}/products.txt -n {{ocp_project}} - name: Modify kieserver to include products in a volume shell: oc volume dc/{{app_name}}-kieserver -n {{ocp_project}} \ --overwrite --add -t configmap -m /data --name=products-volume --configmap-name=products-cm - name: resume {{app_name}}-kieserver shell: oc rollout resume dc/{{app_name}}-kieserver -n {{ocp_project}} ansible/roles/ocp-workload-rhte-mw-msa-orchestration/defaults/main.yml
@@ -6,18 +6,18 @@ ocp_user_groups: - OPENTLC-PROJECT-PROVISIONERS quota_requests_cpu: 5 quota_limits_cpu: 10 quota_requests_cpu: 16 quota_limits_cpu: 240 quota_requests_memory: '6Gi' quota_limits_memory: '20Gi' quota_requests_memory: '16Gi' quota_limits_memory: '24Gi' quota_configmaps: 10 quota_pods: 20 quota_configmaps: 40 quota_pods: 30 quota_persistentvolumeclaims: 20 quota_services: 30 quota_secrets: 30 quota_requests_storage: 50Gi quota_services: 50 quota_secrets: 100 quota_requests_storage: 80Gi ocp_domain: "{{subdomain_base}}" ocp_apps_domain: "apps.{{ocp_domain}}" ansible/roles/ocp-workload-rhte-mw-msa-orchestration/ilt_provision.sh
New file @@ -0,0 +1,100 @@ #!/bin/bash START_PROJECT_NUM=1 END_PROJECT_NUM=1 WORKLOAD="ocp-workload-rhte-mw-msa-orchestration" LOG_FILE=/tmp/$WORKLOAD HOST_GUID=`oc whoami --show-server | cut -d'.' -f 2` OCP_DOMAIN=$HOST_GUID.openshift.opentlc.com PATH_TO_AAD_ROOT=$TRAINING/gpte/ansible_agnostic_deployer/ansible for var in $@ do case "$var" in --START_PROJECT_NUM=*) START_PROJECT_NUM=`echo $var | cut -f2 -d\=` ;; --END_PROJECT_NUM=*) END_PROJECT_NUM=`echo $var | cut -f2 -d\=` ;; --PATH_TO_AAD_ROOT=*) PATH_TO_AAD_ROOT=`echo $var | cut -f2 -d\=` ;; -h) HELP=true ;; -help) HELP=true ;; --help) HELP=true ;; esac done function ensurePreReqs() { if [ "x$HOST_GUID" == "x" ]; then echo -en "must pass parameter: --HOST_GUID=<ocp host GUID> . \n\n" help exit 1; fi LOG_FILE=$LOG_FILE-$HOST_GUID-$START_PROJECT_NUM-$END_PROJECT_NUM.log echo -en "starting\n\n" > $LOG_FILE echo -en "\n\nProvision log file found at: $LOG_FILE\n"; } function help() { echo -en "\n\nOPTIONS:"; echo -en "\n\t--START_PROJECT_NUM=* OPTIONAL: specify # of first OCP project to provision (defult = 1))" echo -en "\n\t--END_PROJECT_NUM=* OPTIONAL: specify # of OCP projects to provision (defualt = 1))" echo -en "\n\t--PATH_TO_AAD_ROOT=* OPTIONAL: (defualt = $PATH_TO_AAD_ROOT))" echo -en "\n\t-h this help manual" echo -en "\n\n\nExample: ./roles/$WORKLOAD/ilt_provision.sh --HOST_GUID=dev39 --START_PROJECT_NUM=1 --END_PROJECT_NUM=1\n\n" } function login() { echo -en "\nHOST_GUID=$HOST_GUID\n" >> $LOG_FILE oc login https://master.$HOST_GUID.openshift.opentlc.com -u opentlc-mgr -p r3dh4t1! } function executeLoop() { echo -en "\nexecuteLoop() START_PROJECT_NUM = $START_PROJECT_NUM ; END_PROJECT_NUM=$END_PROJECT_NUM" >> $LOG_FILE for (( c=$START_PROJECT_NUM; c<=$END_PROJECT_NUM; c++ )) do GUID=$c OCP_USERNAME=user$c executeAnsibleViaLocalhost done } function executeAnsibleViaLocalhost() { GUID=$PROJECT_PREFIX$GUID echo -en "\n\nexecuteAnsibleViaLocalhost(): Provisioning project with GUID = $GUID and OCP_USERNAME = $OCP_USERNAME\n" >> $LOG_FILE ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=True" \ -e"ocp_domain=$OCP_DOMAIN" \ -e"ACTION=create" >> $LOG_FILE if [ $? -ne 0 ]; then echo -en "\n\n*** Error provisioning where GUID = $GUID\n\n " >> $LOG_FILE echo -en "\n\n*** Error provisioning where GUID = $GUID\n\n " exit 1; fi } if [ "x$HELP" == "xtrue" ]; then help exit 0 fi cd $PATH_TO_AAD_ROOT ensurePreReqs login executeLoop ansible/roles/ocp-workload-rhte-mw-msa-orchestration/tasks/pre_workload.yml
@@ -1,19 +1,12 @@ --- # - name: Add user to developer group (allowed to create projects) # shell: "oadm groups add-users {{item}} {{ocp_username}}" # register: groupadd_register # with_items: "{{ocp_user_groups}}" # when: ocp_username is defined and ocp_user_groups is defined # # - name: test that command worked # debug: # var: groupadd_register # verbosity: 2 - name: define ocp_project set_fact: ocp_project: "{{ocp_username}}-{{lab_name}}" - name: Create user Quota - clusterresourcequota shell: | oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \ oc create clusterquota clusterquota-"{{ocp_project}}" \ --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \ --hard requests.cpu="{{quota_requests_cpu}}" \ --hard limits.cpu="{{quota_limits_cpu}}" \ @@ -26,6 +19,7 @@ --hard secrets="{{quota_secrets}}" \ --hard requests.storage="{{quota_requests_storage}}" ignore_errors: true when: ocp_user_needs_quota|d(False)|bool - name: pre_workload Tasks Complete debug: ansible/roles/ocp-workload-rhte-mw-msa-orchestration/tasks/remove_workload.yml
@@ -5,10 +5,10 @@ - name: define ocp_project set_fact: ocp_project: "{{lab_name}}-{{ocp_username}}" ocp_project: "{{ocp_username}}-{{lab_name}}" - name: Remove user Quota - oc delete clusterresourcequota "clusterquota-{{ocp_username}}-{{guid}}" shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}} - name: Remove user Quota - oc delete clusterresourcequota "clusterquota-{{ocp_project}}" shell: oc delete clusterresourcequota clusterquota-{{ocp_project}} ignore_errors: true - name: Remove any lingering tmp files ansible/roles/ocp-workload-rhte-mw-msa-orchestration/tasks/workload.yml
@@ -1,7 +1,18 @@ --- - name: define ocp_project set_fact: ocp_project: "{{lab_name}}-{{ocp_username}}" ocp_project: "{{ocp_username}}-{{lab_name}}" - name: check if user is cluster admin shell: "oc get project default" register: default_project_result ignore_errors: true changed_when: false - fail: msg: "User does not have cluster-admin rights to install Istio" when: default_project_result is failed - name: "Create project for workload {{ocp_project}}" shell: "oc new-project {{ocp_project}}" @@ -15,15 +26,6 @@ # ####### lab specific tasks ############## # # Components: # 1) enmasse ( configured via configmaps ) # 2) embedded spring-boot based kie-server w/ postgresql # 3) backend business services # 4) rh-sso # 5) nexus #################################################### ansible/roles/ocp-workload-rhte-mw-op-intel/defaults/main.yml
@@ -66,6 +66,7 @@ kafkatopic2_yaml: "{{strimzi_url}}/topic/kafka-topic2.yaml" kafkauser_yaml: "{{strimzi_url}}/user/kafka-user.yaml" helloword_yaml: "{{strimzi_url}}/hello-world/deployment.yaml" kafkaconnect_yaml: "{{strimzi_url}}/kafka-connect/kafka-connect.yaml" # Corresponds to Spark and Uber Data templates spark_yaml: "https://raw.githubusercontent.com/gpe-mw-training/operational_intelligence/master/templates/resources.yaml" ansible/roles/ocp-workload-rhte-mw-op-intel/tasks/strimzi_workload.yml
@@ -103,7 +103,7 @@ ignore_errors: true - name: Create Kafka connect deployment shell: "oc apply -f examples/kafka-connect/kafka-connect.yaml -n {{lab_1_name}}" shell: "oc apply -f {{ kafkaconnect_yaml }} -n {{lab_1_name}}" - name: Strimzi Installation Tasks Complete debug: