Vince Power
2020-03-13 db6af5c6a60e5f02e0a831aac21e73901f8dbba3
Adding a Google Cloud Provider and the ocp4-cluster as gcp enabled (#1244)

* Adding a Google Cloud Provider and the ocp4-cluster as gcp enabled

* Making gcloud use ENV variables with a unique temp directory

* cleaning up a redundant set_fact and fixing a bug in azure_infra deploy which got exposed during the same test cycle

* making ssh keys autogenerate and add admin keys to hosts in Azure and GCP

* Ensuring a user specified key could still be used on Azure or GCP

* adding osp_ssh_config_setup.yml even though it isn't used
11 files added
9 files modified
658 ■■■■■ changed files
ansible/cloud_providers/azure_infrastructure_deployment.yml 17 ●●●● patch | view | raw | blame | history
ansible/cloud_providers/gcp_destroy_env.yml 13 ●●●●● patch | view | raw | blame | history
ansible/cloud_providers/gcp_infrastructure_deployment.yml 194 ●●●●● patch | view | raw | blame | history
ansible/cloud_providers/gcp_ssh_config_setup.yml 3 ●●●●● patch | view | raw | blame | history
ansible/cloud_providers/osp_ssh_config_setup.yml 3 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster/default_vars.yml 5 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster/default_vars_gcp.yml 41 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster/destroy_env.yml 29 ●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster/files/cloud_providers/azure_cloud_template.j2 5 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster/files/cloud_providers/gcp_cloud_template.j2 83 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster/post_software.yml 5 ●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster/pre_infra.yml 7 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster/pre_software.yml 22 ●●●● patch | view | raw | blame | history
ansible/roles/host-ocp4-destroy/tasks/main.yml 8 ●●●● patch | view | raw | blame | history
ansible/roles/host-ocp4-provisioner/tasks/gcp_prereqs.yml 34 ●●●●● patch | view | raw | blame | history
ansible/roles/infra-common-ssh-config-generate/defaults/main.yml 1 ●●●● patch | view | raw | blame | history
ansible/roles/infra-gcp-create-inventory/tasks/main.yml 84 ●●●●● patch | view | raw | blame | history
ansible/roles/infra-gcp-template-destroy/tasks/main.yml 83 ●●●●● patch | view | raw | blame | history
tools/virtualenvs/azure-ansible-latest.txt 14 ●●●●● patch | view | raw | blame | history
tools/virtualenvs/gcp-ansible-latest.txt 7 ●●●●● patch | view | raw | blame | history
ansible/cloud_providers/azure_infrastructure_deployment.yml
@@ -69,14 +69,27 @@
        ssh_key: "~/.ssh/{{key_name}}.pem"
        ssh_key_data: "{{lookup('file', '~/.ssh/{{key_name}}.pub')}}"
      tags:
        - validate_azure_template
        - set_existing_ssh_key
        - must
        - create_inventory
      when: not set_env_authorized_key | bool
    - name: Get SSH public key
      set_fact:
        ssh_key: "{{output_dir}}/{{env_authorized_key}}"
        ssh_key_data: "{{lookup('file', '{{output_dir}}/{{env_authorized_key}}.pub')}}"
      tags:
        - set_generated_ssh_key
        - must
        - create_inventory
      when: set_env_authorized_key | bool
    - name: Setting windows_password variable
      set_fact:
        windows_password: "{{hostvars['localhost'].generated_windows_password}}"
      when: windows_password is not defined
      when:
        - windows_password is not defined
        - generated_windows_password is defined
    - name: Build parameter file
      copy:
ansible/cloud_providers/gcp_destroy_env.yml
New file
@@ -0,0 +1,13 @@
---
- import_playbook: ../include_vars.yml
- name: Delete Infrastructure
  hosts: localhost
  connection: local
  gather_facts: False
  become: no
  tasks:
    - name: Run infra-gcp-template-destroy
      include_role:
        name: infra-gcp-template-destroy
ansible/cloud_providers/gcp_infrastructure_deployment.yml
New file
@@ -0,0 +1,194 @@
---
- name: Step 001 Deploy Infrastructure
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - step001
    - deploy_infrastructure
  tasks:
    - name: Ensure gcloud is installed
      environment:
        PATH: /usr/bin
      command: which gcloud
      register: gcloud_result
    - name: Fail if gcloud not available
      fail:
        msg: you need Google Cloud SDK installed
      when: gcloud_result is failed
    - name: Get SSH public key
      set_fact:
        ssh_key: "~/.ssh/{{key_name}}.pem"
        ssh_key_data: "{{lookup('file', '~/.ssh/{{key_name}}.pub')}}"
      tags:
        - set_existing_ssh_key
        - must
        - create_inventory
      when: not set_env_authorized_key | bool
    - name: Get SSH public key
      set_fact:
        ssh_key: "{{output_dir}}/{{env_authorized_key}}"
        ssh_key_data: "{{lookup('file', '{{output_dir}}/{{env_authorized_key}}.pub')}}"
      tags:
        - set_generated_ssh_key
        - must
        - create_inventory
      when: set_env_authorized_key | bool
    - name: Set the destination for the template
      set_fact:
        t_dest: "{{output_dir}}/{{ env_type }}.{{ guid }}.{{cloud_provider}}_cloud_template"
      tags:
        - gcp_infrastructure_deployment
        - validate_gcp_template
        - gen_gcp_template
    - name: Generate GCP deployment manager template
      template:
        src: "../configs/{{ env_type }}/files/cloud_providers/{{cloud_provider}}_cloud_template.j2"
        dest: "{{t_dest}}"
      tags:
        - gcp_infrastructure_deployment
        - validate_gcp_template
        - gen_gcp_template
    - name: Starting Google deployment-manager with template
      environment:
        PATH: /usr/bin
        CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE: "{{ gcp_credentials_file }}"
        CLOUDSDK_COMPUTE_REGION: "{{ gcp_region }}"
        CLOUDSDK_CONFIG: "{{ output_dir }}/.gcloud-{{ guid }}"
        CLOUDSDK_CORE_PROJECT: "{{ gcp_project_id }}"
      command: >-
        gcloud deployment-manager deployments
        create "{{ project_tag }}-base"
        --config "{{ t_dest }}"
      register: gcp_deploy
      tags:
        - gcp_infrastructure_deployment
        - gcp_deployment_manager
      until: gcp_deploy is succeeded
      retries: 0
    - debug:
        var: gcp_deploy
        verbosity: 2
      tags:
        - gcp_infrastructure_deployment
    - name: Run infra-gcp-create-inventory Role
      import_role:
        name: infra-gcp-create-inventory
    - name: Fetch DNS zone Info
      gcp_dns_managed_zone_facts:
        dns_name: '{{ cluster_dns_zone + "."}}'
        project: "{{ gcp_project_id }}"
        auth_kind: "{{ gcp_auth_type }}"
        service_account_file: "{{ gcp_credentials_file }}"
      register: gcp_managed_zone
      tags:
        - gcp_infrastructure_deployment
      when:
        - HostedZoneId != "none"
    - name: Add delegation for NS to the main DNSZone
      gcp_dns_resource_record_set:
        project: "{{ gcp_project_id }}"
        auth_kind: "{{ gcp_auth_type }}"
        service_account_file: "{{ gcp_credentials_file }}"
        managed_zone: "{{ gcp_managed_zone.resources[0] }}"
        name: '{{ guid + "." + cluster_dns_zone + "."}}'
        type: NS
        ttl: 600
        target:
          - "{{ gcp_managed_zone.resources[0].nameServers[0] }}"
          - "{{ gcp_managed_zone.resources[0].nameServers[1] }}"
          - "{{ gcp_managed_zone.resources[0].nameServers[2] }}"
          - "{{ gcp_managed_zone.resources[0].nameServers[3] }}"
        state: present
      tags:
        - gcp_infrastructure_deployment
      when:
        - env_type != "ocp4-cluster"
        - HostedZoneId != "none"
    - name: Add bastion entry to the main DNSZone
      gcp_dns_resource_record_set:
        project: "{{ gcp_project_id }}"
        auth_kind: "{{ gcp_auth_type }}"
        service_account_file: "{{ gcp_credentials_file }}"
        managed_zone: "{{ gcp_managed_zone.resources[0] }}"
        name: '{{ "bastion." + guid + "." + cluster_dns_zone + "."}}'
        type: A
        ttl: 300
        target:
          - '{{ hostvars[item].public_ip_address }}'
        state: present
      with_items: "{{ groups['bastions'] }}"
      tags:
        - gcp_infrastructure_deployment
        - gcp_set_bastion_dns
      when:
        - env_type == "ocp4-cluster"
        - groups["bastions"] is defined
# Copy env_vars variables from the config to all hosts
- import_playbook: ../include_vars.yml
# TODO: use common infra role instead of this playbook
- name: Configure local ssh config for bastion proxy use
  import_playbook: "{{cloud_provider}}_ssh_config_setup.yml"
  when: groups["bastions"] is defined and (groups["bastions"]|length>0)
  tags:
    - must
    - create_inventory
- name: wait_for_connection for all non-windows machines and set hostname
  hosts:
    - all:!windows:!network
  gather_facts: false
  become: true
  tags:
    - step001
    - wait_ssh
    - set_hostname
  tasks:
    - name: wait for linux host to be available
      wait_for_connection:
        timeout: 300
      register: rwait
      ignore_errors: true
    - name: restart instance if wait_for_connection failed
      become: false
      command: "gcloud compute instances reset '{{inventory_hostname}}' --zone '{{ gcp_zone }}'"
      delegate_to: localhost
      environment:
        CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE: "{{ gcp_credentials_file }}"
        CLOUDSDK_COMPUTE_REGION: "{{ gcp_region }}"
        CLOUDSDK_CONFIG: "{{ output_dir }}/.gcloud-{{ guid }}"
        CLOUDSDK_CORE_PROJECT: "{{ gcp_project_id }}"
      when: rwait is failed
    - name: wait for linux host to be available (retry)
      wait_for_connection:
      when: rwait is failed
    - ping:
      register: rping
      retries: 3
      delay: 10
      until: rping is succeeded
    - name: Populate /etc/hosts
      lineinfile:
        dest: /etc/hosts
        regexp: ' {{hostvars[item].internaldns}}$'
        line: '{{hostvars[item].private_ip_address}} {{hostvars[item].internaldns}}'
      with_items: "{{ groups['all'] }}"
ansible/cloud_providers/gcp_ssh_config_setup.yml
New file
@@ -0,0 +1,3 @@
---
- name: Configure local ssh config for bastion proxy use
  import_playbook: common_ssh_config_setup.yml
ansible/cloud_providers/osp_ssh_config_setup.yml
New file
@@ -0,0 +1,3 @@
---
- name: Configure local ssh config for bastion proxy use
  import_playbook: common_ssh_config_setup.yml
ansible/configs/ocp4-cluster/default_vars.yml
@@ -203,3 +203,8 @@
# pull secret needs to be defined in secrets
#ocp4_pull_secret: ''
# admin keys
opentlc_admin_pub_keys:
  - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3Avw03Dmh1R2QWQ4CV7JgEsXnHQjNhfppD5aZmh0q/64p6lW+2oNKTT7fVQcrsdmlJwrMd5apkUGrOcq0hHXQMEVZEKUmEjko2BqD5A9/zNX7apObW88bFFfgxc91lOT+e+wfCFsrr3b2SJ3+KL6nTBJV7Lf46i6z86vhiDPjqL7U9kTS+bK9ldU20vpn8h+ZAIaiafVWfjihUjhNpcUY46klixV1YcAkBGCbE+YR6RAAc6vWy0zB3YJnTUl9OFt213ofi1qjuWKVMmOxORxPKB4/JQ+hfAsCMysoVFnFYs10dWxaySK63OgY9uLNyaIwkEaVVIfcViRVm0DZfoNH gucore
  - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCvZvn+GL0wTOsAdh1ikIQoqj2Fw/RA6F14O347rgKdpkgOQpGQk1k2gM8wcla2Y1o0bPIzwlNy1oh5o9uNjZDMeDcEXWuXbu0cRBy4pVRhh8a8zAZfssnqoXHHLyPyHWpdTmgIhr0UIGYrzHrnySAnUcDp3gJuE46UEBtrlyv94cVvZf+EZUTaZ+2KjTRLoNryCn7vKoGHQBooYg1DeHLcLSRWEADUo+bP0y64+X/XTMZOAXbf8kTXocqAgfl/usbYdfLOgwU6zWuj8vxzAKuMEXS1AJSp5aeqRKlbbw40IkTmLoQIgJdb2Zt98BH/xHDe9xxhscUCfWeS37XLp75J backdoor_opentlc_key
ansible/configs/ocp4-cluster/default_vars_gcp.yml
New file
@@ -0,0 +1,41 @@
# The type of cloud provider this will be deployed to
cloud_provider: gcp
# Authenication credentials for Google Cloud SDK in order to create the things.
# These should be included with your secrets, but are listed here for reference
#gcp_auth_type: 'serviceaccount'
#gcp_account: 'ocp4-cluster@openshift-test.iam.gserviceaccount.com'
#gcp_credentials_file: '/home/rhpds/openshift-test-abcdef012345.json'
#gcp_project_id: 'openshift-test'
# Setting default region in GCP (gcloud compute regions list)
gcp_region: europe-west4
# Setting a zone
gcp_zone: "{{ gcp_region }}-b"
# This is the user that Ansible will use to connect to the nodes it is
# configuring from the admin/control host
ansible_user: gcpuser
remote_user: gcpuser
# The domain that you want to add DNS entries to
ocp4_base_domain: gcp.testdrive.openshift.com
# Not needed for this config but the cloud provider still wants it
# "none" is the keyword to bypass the logic
HostedZoneId: gcp-testdrive-openshift-com
# Duplicating this in the GCP file to allow an unique default
master_instance_count: 3
# Which image family to use as a base (gcloud compute images list)
gcp_rhel_image_project: rhel-cloud
gcp_rhel_image_family: rhel-7
# Machine Type for control plane (master) nodes
master_instance_type: n2-standard-4
# Machine Type for worker nodes
worker_instance_type: n2-standard-4
ansible/configs/ocp4-cluster/destroy_env.yml
@@ -47,11 +47,23 @@
      ssh_key: "~/.ssh/{{key_name}}.pem"
      ssh_key_data: "{{lookup('file', '~/.ssh/{{key_name}}.pub')}}"
    tags:
      - validate_azure_template
      - set_existing_ssh_key
      - must
      - create_inventory
    when:
    - cloud_provider != 'osp'
      - not set_env_authorized_key | bool
      - cloud_provider != 'osp'
  - name: Get SSH public key
    set_fact:
      ssh_key: "{{output_dir}}/{{env_authorized_key}}"
      ssh_key_data: "{{lookup('file', '{{output_dir}}/{{env_authorized_key}}.pub')}}"
    tags:
      - set_generated_ssh_key
      - must
      - create_inventory
    when:
      - set_env_authorized_key | bool
      - cloud_provider != 'osp'
  - name: Run infra-azure-create-inventory Role
    include_role:
@@ -65,10 +77,9 @@
    when:
    - cloud_provider == 'gcp'
# TODO: use common infra role instead of this playbook
- name: Configure local ssh config for bastion proxy use
  import_playbook: ../../cloud_providers/azure_ssh_config_setup.yml
  import_playbook: ../../cloud_providers/{{ cloud_provider }}_ssh_config_setup.yml
  when:
    - cloud_provider != 'osp'
    - groups["bastions"] is defined
@@ -79,12 +90,20 @@
- name: Having the OpenShift installer cleanup what it did
  hosts: bastions
  gather_facts: false
  become: false
  tasks:
  - name: test the bastion host is available if not skip host-ocp4-destroy
    wait_for_connection:
      timeout: 60
    register: bwait
    ignore_errors: true
  - name: Call Role to destroy the OpenShift cluster
    include_role:
      name: host-ocp4-destroy
    when: cloud_provider !='osp'
    when:
      - cloud_provider !='osp'
      - bwait is successful
- name: Import default azure destroy playbook
  import_playbook: "../../cloud_providers/{{ cloud_provider }}_destroy_env.yml"
ansible/configs/ocp4-cluster/files/cloud_providers/azure_cloud_template.j2
@@ -140,6 +140,11 @@
            "publicKeys": [ {
              "path": "[concat('/home/', parameters('adminUsername'), '/.ssh/authorized_keys')]",
              "keyData": "[parameters('sshKeyData')]"
{% for adminkey in opentlc_admin_pub_keys %}
            }, {
              "path": "[concat('/home/', parameters('adminUsername'), '/.ssh/authorized_keys')]",
              "keyData": "{{ adminkey }}"
{% endfor %}
            } ]
          }
        },
ansible/configs/ocp4-cluster/files/cloud_providers/gcp_cloud_template.j2
New file
@@ -0,0 +1,83 @@
{% set network = project_tag + "-network" %}
{% set subnet0 = project_tag + "-subnet0" %}
resources:
{# NETWORK #}
- name: {{ network }}
  type: compute.v1.network
  properties:
    autoCreateSubnetworks: false
- name: {{ subnet0 }}
  type: compute.v1.subnetwork
  properties:
    ipCidrRange: 10.254.0.0/24
    network: $(ref.{{ network }}.selfLink)
    region: {{ gcp_region }}
{# NETWORK #}
{# FIREWALL #}
- name: {{ project_tag + 'fw' }}
  type: compute.v1.firewall
  properties:
    network: $(ref.{{ network }}.selfLink)
    sourceRanges: [ "0.0.0.0/0" ]
    allowed:
    - IPProtocol: TCP
      ports: [ "0-65535" ]
    - IPProtocol: UDP
      ports: [ "0-65535" ]
    - IPProtocol: ICMP
{# FIREWALL #}
{# INSTANCES #}
- name: {{ project_tag + '-vm' }}
  type: compute.v1.instance
  properties:
    zone: {{ gcp_zone }}
    machineType: zones/{{ gcp_zone }}/machineTypes/n1-standard-2
    metadata:
      items:
        - key: ssh-keys
          value: "{{ remote_user }}:{{ ssh_key_data }} {% for adminkey in opentlc_admin_pub_keys %}\n{{ remote_user }}:{{ adminkey }} {% endfor %}"
    disks:
    - deviceName: boot
      type: PERSISTENT
      boot: true
      autoDelete: true
      initializeParams:
        diskName: {{ project_tag + '-vm-disk' }}
        sourceImage: https://www.googleapis.com/compute/v1/projects/{{ gcp_rhel_image_project }}/global/images/family/{{ gcp_rhel_image_family }}
    networkInterfaces:
    - network: $(ref.{{ network }}.selfLink)
      subnetwork: $(ref.{{ subnet0 }}.selfLink)
      networkIP: 10.254.0.123
      accessConfigs:
      - name: External NAT
        type: ONE_TO_ONE_NAT
    labels:
      project: {{ project_tag }}
      ansiblegroup: bastions
      ostype: linux
    serviceAccounts:
    - email: default
      scopes:
        - 'https://www.googleapis.com/auth/compute'
        - 'https://www.googleapis.com/auth/devstorage.read_only'
        - 'https://www.googleapis.com/auth/logging.write'
        - 'https://www.googleapis.com/auth/monitoring.write'
        - 'https://www.googleapis.com/auth/servicecontrol'
        - 'https://www.googleapis.com/auth/service.management'
        - 'https://www.googleapis.com/auth/trace.append'
        - 'https://www.googleapis.com/auth/userinfo.email'
{# INSTANCES #}
outputs:
ansible/configs/ocp4-cluster/post_software.yml
@@ -254,6 +254,9 @@
  gather_facts: false
  become: false
  tasks:
  - name: Store bastion hostname as a fact
    set_fact:
      bastion_hostname: "{{groups['bastions'].0 }}"
  - name: print out user.info
    debug:
      msg: "{{ item }}"
@@ -261,7 +264,7 @@
    - "user.info: You can access your bastion via SSH:"
    - "user.info: ssh {{ student_name }}@bastion.{{ guid }}.{{ cluster_dns_zone }}"
    - "user.info: "
    - "user.info: Make sure you use the username '{{ student_name }}' and the password '{{ hostvars['bastion']['student_password'] }}' when prompted."
    - "user.info: Make sure you use the username '{{ student_name }}' and the password '{{ hostvars[bastion_hostname]['student_password'] }}' when prompted."
- name: Step 003.6 Tell CloudForms we are done
  hosts: localhost
ansible/configs/ocp4-cluster/pre_infra.yml
@@ -8,6 +8,12 @@
  tasks:
    - debug:
        msg: "Step 000 Pre Infrastructure"
  tasks:
    - import_role:
        name: infra-local-create-ssh_key
      when: set_env_authorized_key | bool
      tags:
        - generate_env_keys
    - name: Ensure variables are set
      assert:
        that: "{{ item.0 }}"
@@ -24,3 +30,4 @@
      - name: set_fact generated_windows_password (just generated)
        set_fact:
          generated_windows_password: "{{ password_gen_r.stdout }}"
ansible/configs/ocp4-cluster/pre_software.yml
@@ -1,30 +1,14 @@
---
- name: Step 003 - Create env key
- name: Step 003 - Pre Software
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
  - step003
  - generate_env_keys
  tasks:
  - name: Generate SSH keys
    shell: ssh-keygen -b 2048 -t rsa -f "{{output_dir}}/{{env_authorized_key}}" -q -N ""
    args:
      creates: "{{output_dir}}/{{env_authorized_key}}"
    when: set_env_authorized_key | bool
  - name: fix permission
    file:
      path: "{{output_dir}}/{{env_authorized_key}}"
      mode: 0400
    when: set_env_authorized_key | bool
  - name: Generate SSH pub key
    shell: ssh-keygen -y -f "{{output_dir}}/{{env_authorized_key}}" > "{{output_dir}}/{{env_authorized_key}}.pub"
    args:
      creates: "{{output_dir}}/{{env_authorized_key}}.pub"
    when: set_env_authorized_key | bool
    - debug:
        msg: "Step 003 - Pre Software"
- name: Configure all hosts with Repositories, Common Files and Set environment key
  hosts:
ansible/roles/host-ocp4-destroy/tasks/main.yml
@@ -3,10 +3,10 @@
    path: /tmp/deployinprogress
    state: touch
- name: stat if the installer exists
- name: stat if there is a cluster installed
  stat:
    path: "/usr/bin/openshift-install"
  register: installerfile
    path: "/home/{{ ansible_user }}/{{ cluster_name }}/metadata.json"
  register: terraformfile
- name: Run openshift-installer destroy cluster
  become: no
@@ -14,7 +14,7 @@
  - run_installer
  command: openshift-install destroy cluster --dir=/home/{{ ansible_user }}/{{ cluster_name }}
  async: "{{ 2 * 60 * 60 }}"
  when: installerfile.stat.exists
  when: terraformfile.stat.exists
- name: Delete deployinprogress lock file
  file:
ansible/roles/host-ocp4-provisioner/tasks/gcp_prereqs.yml
New file
@@ -0,0 +1,34 @@
---
- name: Creating Google Cloud SDK repo
  blockinfile:
    path: /etc/yum.repos.d/google-cloud-sdk.repo
    create: yes
    block: |-
      [google-cloud-sdk]
      name=Google Cloud SDK
      baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el7-x86_64
      enabled=1
      gpgcheck=1
      repo_gpgcheck=1
      gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
             https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
- name: Install Google Cloud SDK
  package:
    name: google-cloud-sdk
- name: make the base directory
  file:
    path: "/home/{{ ansible_user }}/.gcp"
    mode: 0700
    owner: "{{ ansible_user }}"
    state: directory
- name: Create key file for Google Cloud SDK
  copy:
    src: "{{ gcp_credentials_file }}"
    dest: "/home/{{ ansible_user }}/.gcp/osServiceAccount.json"
    mode: 0600
    owner: "{{ ansible_user }}"
  become: no
ansible/roles/infra-common-ssh-config-generate/defaults/main.yml
@@ -4,3 +4,4 @@
  ec2: ec2-user
  azure: azure
  osp: cloud-user
  gcp: gcpuser
ansible/roles/infra-gcp-create-inventory/tasks/main.yml
New file
@@ -0,0 +1,84 @@
# Setting the stack_tag
- set_fact:
    stack_tag: "{{env_type | replace('-', '_')}}_{{guid}}"
  tags:
    - create_inventory
    - must
# It saves us API calls.
- name: Get list of VMs
  gcp_compute_instance_facts:
    auth_kind: "{{ gcp_auth_type }}"
    project: "{{ gcp_project_id }}"
    service_account_file: "{{ gcp_credentials_file }}"
    zone: "{{ gcp_zone }}"
    filters:
      - labels.project eq "{{ project_tag }}"
    scopes: ["https://www.googleapis.com/auth/compute.readonly"]
  register: instance_list
  tags:
    - create_inventory
    - must
- debug:
    var: instance_list.resources
    verbosity: 2
  tags:
    - create_inventory
    - must
- name: Build inventory
  add_host:
    name: "{{item.name}}"
    shortname: "{{item.tags.Name|default(item.name)}}"
    groups:
      - "tag_Project_{{stack_tag}}"
      - "tag_{{stack_tag}}_{{item.labels.ansiblegroup | default('unknowns')}}"
      - "tag_{{stack_tag}}_ostype_{{item.labels.ostype | default('unknown')}}"
      - "{{item.labels.ostype | default('unknowns')}}"
      - "{{ 'newnodes' if (item.labels.newnode|d()|bool) else 'all'}}"
    ansible_user: "{{ remote_user }}"
    remote_user: "{{ remote_user | d('gcp') }}"
    ansible_ssh_private_key_file: "{{ssh_key}}"
    key_name: "{{key_name}}"
    state: "{{item.status|d('unknown')}}"
    internaldns: "{{item.networkInterfaces[0].networkIP | d(item.name)}}"
    instance_id: "{{ item.id | d('unknown')}}"
    region: "{{gcp_region}}"
    zone: "{{gcp_zone}}"
    public_dns_name: "{{item.networkInterfaces[0].accessConfigs[0].natIP}}"
    private_dns_name: "{{item.name}}"
    private_ip_address: "{{item.networkInterfaces[0].networkIP}}"
    public_ip_address: "{{item.networkInterfaces[0].accessConfigs[0].natIP}}"
    placement: "{{item.zone}}"
    image_id: "{{item.disks[0].source|d('unknown')}}"
    ansible_ssh_extra_args: "-o StrictHostKeyChecking=no"
    instance_canonical_name: "{{ item.name }}"
  with_items: "{{ instance_list.resources }}"
  loop_control:
    label: "{{ item.name }}"
  tags:
    - create_inventory
    - must
# AnsibleGroup tag can have several comma-separated values. Ex: activedirectories,windows
- add_host:
    name: "{{item.name}}"
    groups: "{{item.labels['ansiblegroup']}}"
  with_items: "{{instance_list.resources}}"
  loop_control:
    label: "{{ item.name }}"
  tags:
    - create_inventory
    - must
- name: debug hostvars
  debug:
    var: hostvars
    verbosity: 2
- name: debug groups
  debug:
    var: groups
    verbosity: 2
ansible/roles/infra-gcp-template-destroy/tasks/main.yml
New file
@@ -0,0 +1,83 @@
---
- name: Find managed_zone
  gcp_dns_managed_zone_facts:
    dns_name: '{{ cluster_dns_zone + "."}}'
    project: "{{ gcp_project_id }}"
    auth_kind: "{{ gcp_auth_type }}"
    service_account_file: "{{ gcp_credentials_file }}"
  register: gcp_managed_zone
  when:
    - HostedZoneId != "none"
  tags:
    - destroying
    - destroy_cloud_deployment
    - destroy_gcp_deployment
- name: Remove delegation for NS from the main DNSZone
  gcp_dns_resource_record_set:
    project: "{{ gcp_project_id }}"
    auth_kind: "{{ gcp_auth_type }}"
    service_account_file: "{{ gcp_credentials_file }}"
    managed_zone: "{{ gcp_managed_zone.resources[0] }}"
    name: '{{ guid + "." + cluster_dns_zone + "."}}'
    type: NS
    state: absent
  when:
    - HostedZoneId != "none"
  tags:
    - destroying
    - destroy_cloud_deployment
    - destroy_gcp_deployment
- name: Remove bastion entry from the main DNSZone
  gcp_dns_resource_record_set:
    project: "{{ gcp_project_id }}"
    auth_kind: "{{ gcp_auth_type }}"
    service_account_file: "{{ gcp_credentials_file }}"
    managed_zone: "{{ gcp_managed_zone.resources[0] }}"
    name: '{{ "bastion." + guid + "." + cluster_dns_zone + "."}}'
    type: A
    state: absent
  when:
    - HostedZoneId != "none"
  tags:
    - destroying
    - destroy_cloud_deployment
    - destroy_gcp_deployment
- name: Destroy method deployment-manager
  block:
    - name: Delete the deployment
      command: "gcloud deployment-manager deployments delete {{ project_tag }}-base -q"
      environment:
        CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE: "{{ gcp_credentials_file }}"
        CLOUDSDK_COMPUTE_REGION: "{{ gcp_region }}"
        CLOUDSDK_CONFIG: "{{ output_dir }}/.gcloud-{{ guid }}"
        CLOUDSDK_CORE_PROJECT: "{{ gcp_project_id }}"
      tags:
        - destroying
        - destroy_cloud_deployment
        - destroy_gcp_deployment
      register: gcp_result
      until: gcp_result is succeeded
      retries: 6
      delay: 10
      ignore_errors: yes
      when:
        - cloud_provider == "gcp"
      tags:
        - destroying
        - destroy_cloud_deployment
        - destroy_gcp_deployment
    - name: report error
      fail:
        msg: "FAIL {{ gcp_project_id }} GCP delete deployment"
      when:
        - gcp_result is failed
        - cloud_provider == "gcp"
      tags:
        - destroying
        - destroy_cloud_deployment
        - destroy_gcp_deployment
tools/virtualenvs/azure-ansible-latest.txt
New file
@@ -0,0 +1,14 @@
ansible[azure]
boto
boto3
click
colorama
cryptography
Jinja2
keepercommander
pbr
packaging
requests
pytest
pycryptodomex
selinux
tools/virtualenvs/gcp-ansible-latest.txt
New file
@@ -0,0 +1,7 @@
ansible
boto
boto3
google-auth
pbr
requests
selinux