--- - import_playbook: ../../setup_runtime.yml - name: Backup event log of the user hosts: localhost connection: local gather_facts: False become: no environment: AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}" AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}" AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}" tasks: - name: Get fact for stack cloudformation_facts: stack_name: "{{ project_tag }}" register: stack_facts - when: project_tag in stack_facts.ansible_facts.cloudformation block: - name: Grab and set stack creation_time set_fact: stack_creation_time: >- {{ stack_facts.ansible_facts.cloudformation[project_tag].stack_description.creation_time }} stack_status: >- {{ stack_facts.ansible_facts.cloudformation[project_tag].stack_description.stack_status }} - when: stack_status == "CREATE_COMPLETE" block: - name: Grab student user set_fact: student_user: >- {{ stack_facts.ansible_facts.cloudformation[project_tag].stack_outputs.StudentUser }} - name: Backup event log for user shell: >- aws cloudtrail lookup-events --lookup-attributes AttributeKey=Username,AttributeValue={{ student_user }} --start-time {{ stack_creation_time }} > {{ output_dir }}/{{ env_type }}_{{ guid }}_cloudtrail_event_log.json when: email is defined or owner is defined - name: Build inventory hosts: localhost connection: local gather_facts: False become: no tasks: - when: cloud_provider == 'ec2' block: - name: Run infra-ec2-create-inventory Role include_role: name: infra-ec2-create-inventory - name: Run Common SSH Config Generator Role include_role: name: infra-common-ssh-config-generate when: "'bastions' in groups" - name: Start clientVM and cluster instances if they are stopped hosts: localhost connection: local gather_facts: False become: no environment: AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}" AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}" AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}" tasks: - when: - stack_status is defined - stack_status == 'CREATE_COMPLETE' block: - set_fact: clientvm_id: "{{ hostvars[groups.bastions[0]].instance_id }}" when: - "'bastions' in groups" - groups.bastions | length > 0 - fail: msg: "No clientVM present" when: >- 'bastions' not in groups or groups.bastions | length == 0 - when: - stack_status is defined - stack_status == 'CREATE_COMPLETE' block: - name: Start clientVM instance command: "aws ec2 start-instances --instance-ids '{{clientvm_id}}'" - name: Get cluster instance Ids command: >- aws ec2 describe-instances --filters "Name=tag:clusterid,Values=cluster-{{ guid }}" "Name=instance-state-name,Values=pending,running,shutting-down,stopping,stopped" --query 'Reservations[*].Instances[*].InstanceId' --output text changed_when: false # Sometimes deployment fails before any OCP instances is created, so comment this line: # failed_when: instanceids.stdout | trim | length == 0 register: instanceids - name: Start cluster instances when: instanceids.stdout | trim | length > 0 command: >- aws ec2 start-instances --instance-ids {{ instanceids.stdout | trim }} - name: Wait for clientVM instance command: "aws ec2 wait instance-running --instance-ids '{{clientvm_id}}'" - name: Wait for cluster instances when: instanceids.stdout | trim | length > 0 ignore_errors: true command: >- aws ec2 wait instance-running --filters "Name=tag:clusterid,Values=cluster-{{ guid }}" - name: Destroy OCP 4 resources using the installer hosts: bastions gather_facts: false become: no run_once: yes tasks: - when: - hostvars.localhost.stack_status is defined - hostvars.localhost.stack_status == 'CREATE_COMPLETE' block: - name: Set facts for remote access set_fact: ansible_ssh_extra_args: >- {{ ansible_ssh_extra_args|d() }} -F {{hostvars.localhost.output_dir}}/{{ env_type }}_{{ guid }}_ssh_conf - name: Wait for linux host to be available wait_for_connection: timeout: 20 - name: Pack an archive of everything in case something goes wrong become: yes archive: path: /home dest: /tmp/home.tar.gz - name: Fetch the archive fetch: flat: yes src: /tmp/home.tar.gz dest: "{{ hostvars.localhost.output_dir }}/{{ env_type }}_{{ guid }}_user_home.tar.gz" - stat: path: /home/{{ ansible_user }}/cluster-{{ guid }} register: statclusterdir - when: statclusterdir.stat.exists block: - name: destroy terraform resources (openshift-install destroy cluster) command: openshift-install destroy cluster --dir=/home/{{ ansible_user }}/cluster-{{ guid }}/ register: destroyr - name: pack an archive of everything archive: path: /home/{{ansible_user}}/cluster-{{ guid }} dest: /tmp/cluster-{{ guid }}.tar.gz - name: get archive of environment target dir fetch: flat: yes src: /tmp/cluster-{{ guid }}.tar.gz dest: "{{ hostvars.localhost.output_dir }}/{{ env_type }}_{{ guid }}_cluster-{{ guid }}.tar.gz" - set_fact: oktodelete: yes - name: Delete ocp4 provisioner stack hosts: localhost connection: local gather_facts: False become: no tasks: - name: Run infra-ec2-template-destroy include_role: name: infra-ec2-template-destroy when: # Before deleting the provisioner, make sure destroy terraform successfully run. - stack_status is defined - stack_status == 'CREATE_COMPLETE' - "'bastions' in groups" - groups.bastions | length > 0 - hostvars[groups.bastions[0]].oktodelete - name: Run infra-ec2-template-destroy include_role: name: infra-ec2-template-destroy when: # delete stack if stack creation failed - stack_status is defined - stack_status != 'CREATE_COMPLETE'