Guillaume Coré
2020-01-13 d154d4062c893621e91ea19da50076fe656276e1
Osp migration bp (#982)

* Create osp-migration config

This config can deploy generated HEAT templates

* Update three-tier-app sample-vars

- add red cluster example

* Fix bastion hostname, use the short one

* remove obsolete user.info

* Fix typo
13 files added
4 files modified
621 ■■■■■ changed files
ansible/configs/osp-migration/README.adoc 8 ●●●●● patch | view | raw | blame | history
ansible/configs/osp-migration/destroy_env.yml 67 ●●●●● patch | view | raw | blame | history
ansible/configs/osp-migration/dns_loop.yml 34 ●●●●● patch | view | raw | blame | history
ansible/configs/osp-migration/env_vars.yml 107 ●●●●● patch | view | raw | blame | history
ansible/configs/osp-migration/infra.yml 192 ●●●●● patch | view | raw | blame | history
ansible/configs/osp-migration/instance_loop.yml 12 ●●●●● patch | view | raw | blame | history
ansible/configs/osp-migration/post_infra.yml 30 ●●●●● patch | view | raw | blame | history
ansible/configs/osp-migration/post_software.yml 10 ●●●●● patch | view | raw | blame | history
ansible/configs/osp-migration/pre_infra.yml 10 ●●●●● patch | view | raw | blame | history
ansible/configs/osp-migration/pre_software.yml 11 ●●●●● patch | view | raw | blame | history
ansible/configs/osp-migration/requirements.yml 6 ●●●●● patch | view | raw | blame | history
ansible/configs/osp-migration/sample_vars.yml 28 ●●●●● patch | view | raw | blame | history
ansible/configs/osp-migration/software.yml 5 ●●●●● patch | view | raw | blame | history
ansible/configs/three-tier-app/sample_vars_osp.yml 19 ●●●●● patch | view | raw | blame | history
ansible/include_vars.yml 2 ●●● patch | view | raw | blame | history
ansible/main.yml 9 ●●●● patch | view | raw | blame | history
ansible/roles/infra-osp-create-inventory/tasks/main.yml 71 ●●●● patch | view | raw | blame | history
ansible/configs/osp-migration/README.adoc
New file
@@ -0,0 +1,8 @@
= OSP migration config
This config has its own infra implementation.
It expects a heat template name to be passed as argument.
See link:sample_vars.yml[sample_vars.yml].
ansible/configs/osp-migration/destroy_env.yml
New file
@@ -0,0 +1,67 @@
---
- import_playbook: ../../setup_runtime.yml
- name: Teardown OpenStack project and resources
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  environment:
    OS_AUTH_URL: "{{ osp_auth_url }}"
    OS_USERNAME: "{{ osp_auth_username }}"
    OS_PASSWORD: "{{ osp_auth_password }}"
    OS_PROJECT_NAME: "admin"
    OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
    OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
  tasks:
    - set_fact:
        osp_project_name: >-
          {{ project
          | replace('-bp','')
          | replace('OPTLC', 'OTLC-LAB-' + student_name)
          }}-{{ guid }}
    - name: Check if project exists
      environment:
        OS_AUTH_URL: "{{ osp_auth_url }}"
        OS_USERNAME: "{{ osp_auth_username }}"
        OS_PASSWORD: "{{ osp_auth_password }}"
        OS_PROJECT_NAME: "admin"
        OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
        OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
      os_project_info:
        name: "{{ osp_project_name }}"
      register: project_exists
    - meta: end_play
      when: project_exists.openstack_projects | length == 0
    - name: Gather instance facts
      environment:
        OS_PROJECT_NAME: "{{ osp_project_name }}"
      os_server_facts:
        server: "*"
        filters:
          metadata:
            guid: "{{ guid }}"
      register: r_osp_facts
    - name: Delete objects inside the project
      os_stack:
        name: "create-objects-{{ osp_project_name }}"
        state: absent
    - name: Delete project and unassign permission
      os_stack:
        name: "create-project-{{ osp_project_name }}"
        state: absent
    - name: Iterate over all instances and delete DNS entries
      loop: "{{ r_osp_facts.ansible_facts.openstack_servers }}"
      loop_control:
        loop_var: _instance
      vars:
        _infra_osp_dns_default_ttl: 300
        _dns_state: absent
      include_tasks: instance_loop.yml
ansible/configs/osp-migration/dns_loop.yml
New file
@@ -0,0 +1,34 @@
---
- when: _dns_state == 'present'
  block:
    - debug:
        msg: >-
          The floating IP for {{ _dns }}
          is {{ _instance.public_v4 }}
    - name: DNS entry ({{ _dns_state | default('present') }})
      nsupdate:
        server: "{{ osp_cluster_dns_server }}"
        zone: "{{ osp_cluster_dns_zone }}"
        #zone: rhpds.opentlc.com
        record: "{{ _dns }}"
        type: A
        ttl: "{{ _infra_osp_dns_default_ttl }}"
        value: "{{ _instance.public_v4 }}"
        key_name: "{{ ddns_key_name }}"
        key_secret: "{{ ddns_key_secret }}"
# When state == absent, don't use r_osp_facts (should not be needed)
- when: _dns_state == 'absent'
  block:
    - name: DNS entry ({{ _dns_state | default('present') }})
      nsupdate:
        server: "{{ osp_cluster_dns_server }}"
        zone: "{{ osp_cluster_dns_zone }}"
        #zone: rhpds.opentlc.com
        record: "{{ _dns }}"
        type: A
        ttl: "{{ _infra_osp_dns_default_ttl }}"
        key_name: "{{ ddns_key_name }}"
        key_secret: "{{ ddns_key_secret }}"
        state: absent
ansible/configs/osp-migration/env_vars.yml
New file
@@ -0,0 +1,107 @@
###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT
###### OR PASS as "-e" args to ansible-playbook command
# This is an account that must exist in OpenStack.
# It is used to create projects, access, Heat templates
admin_user: opentlc-mgr
# Authenication credentials for OpenStack in order to create the things.
# These should be included with your secrets, but are listed here for reference
# osp_auth_url:
# osp_auth_username:
# osp_auth_password:
# osp_auth_cloud:
# osp_auth_project_domain: #usually set to "default"
# osp_auth_user_domain: #usually set to "default"
# The output_dir holds all of the files generated during the deployment
# This includes generated Heat templates, SSH config, SSH keys
# This must be an absolute path and no vars (like $HOME or ~)
output_dir: /tmp/output_dir
# The name of the agnosticd config to deploy
env_type: osp-migration
# The {{ guid }} is used everywhere and it is what differentiates otherwise
# identical environments. Make this unique. Usually they are 4 characters, but
# it can be any reasonablre length.
guid: mydefault
# Used to add metadata (tags) to OpenStack objects created
project_tag: "{{ env_type }}-{{ guid }}"
osp_project_name: >-
  {{ project
  | replace('-bp','')
  | replace('OPTLC', 'OTLC-LAB-' + student_name)
  }}-{{ guid }}
# Why is this config being deployed?
# Some valid: development, ilt, production, event
purpose: development
# The type of cloud provider this will be deployed to
# none is used here because it has its own infra in infra.yml
cloud_provider: osp
# This should be overwritten based on the user ordering the catalog item
# It will be used by the bastion-student-user role and created on the bastion
student_name: lab-user
# Enable this if you want to create a user on the bastion
# Mutually exclusive with {{ install_ipa_client }}
install_student_user: true
# Enable this if you want to use IPA for user authentication.
# Mutually exclusive with {{ install_student_user }}
install_ipa_client: false
# TODO: What does this really do besides run the role?
set_env_authorized_key: true
env_authorized_key: "{{guid}}key"
key_name: "default_key_name"
# This is the user that Ansible will use to connect to the nodes it is
# configuring from the admin/control host
ansible_user: cloud-user
remote_user: cloud-user
# Run the bastion-lite role
install_bastion: false
# FTL is used for grading and solving. It will pull in the external ftl-injector role.
# This might be enabled when we have solvers to run or graders for ILT
install_ftl: true
# FTL injector will try to install python-pip and we only have python3-pip available
# This var will force the ftl-injector role to adapt accordingly
ftl_use_python3: true
# Packages to install on all of the hosts deployed as part of the agnosticd config
# This invokes the "common" role
install_common: true
# As part of the "common" role, this cause it to do a yum update on the host
update_packages: true
# If you want DNS entries to be created automatically, choose one of these.
# Alternately, they can both be set to false.
use_dynamic_dns: true
# This is not fully implemented yet
# use_route53: false
# The domain that you want to add DNS entries to
osp_cluster_dns_zone: blue.osp.opentlc.com
# The dynamic DNS server you will add entries to.
# NOTE: This is only applicable when {{ use_dynamic_dns}} is true
osp_cluster_dns_server: ddns01.opentlc.com
# Whether to wait for an ack from the DNS servers before continuing
wait_for_dns: true
# Authenticaion for DDNS
# ddns_key_name:
# ddns_secret_name:
ansible/configs/osp-migration/infra.yml
New file
@@ -0,0 +1,192 @@
---
- hosts: localhost
  gather_facts: false
  vars:
    api_user: "{{ guid }}"
  tasks:
    - set_fact:
        api_pass: "{{ lookup('password', '/dev/null length=20 chars=ascii_letters') }}"
    - name: Check if project exists
      environment:
        OS_AUTH_URL: "{{ osp_auth_url }}"
        OS_USERNAME: "{{ osp_auth_username }}"
        OS_PASSWORD: "{{ osp_auth_password }}"
        OS_PROJECT_NAME: "admin"
        OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
        OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
      os_project_info:
        name: "{{ osp_project_name }}"
      register: project_exists
    - fail:
        msg: Project exists, can't continue
      when: project_exists.openstack_projects
    - name: Create project and assign permission
      register: stack_admin_output
      environment:
        OS_AUTH_URL: "{{ osp_auth_url }}"
        OS_USERNAME: "{{ osp_auth_username }}"
        OS_PASSWORD: "{{ osp_auth_password }}"
        OS_PROJECT_NAME: "admin"
        OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
        OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
      os_stack:
        name: "create-project-{{osp_project_name}}"
        template: "files/templates/{{ project }}/stack_admin.yaml"
        parameters:
          project_name: "{{ osp_project_name }}"
          project_guid: "{{ guid }}"
          project_description: "created:{{ ansible_date_time.epoch }}"
          project_api_user: "{{ guid }}"
          project_api_pass: "{{ api_pass }}"
          blueprint: "{{ project }}"
    # when deleting we need to be able to authenticate using that project
    - name: Grant access to admin account to the new project
      environment:
        OS_AUTH_URL: "{{ osp_auth_url }}"
        OS_USERNAME: "{{ osp_auth_username }}"
        OS_PASSWORD: "{{ osp_auth_password }}"
        OS_PROJECT_NAME: "admin"
        OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
        OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
      os_user_role:
        state: present
        user: "{{ osp_auth_username }}"
        role: "admin"
        project: "{{ osp_project_name }}"
    - name: Create objects inside the project
      register: stack_user_output
      environment:
        OS_AUTH_URL: "{{ osp_auth_url }}"
        OS_USERNAME: "{{ guid }}"
        OS_PASSWORD: "{{ api_pass }}"
        OS_PROJECT_NAME: "{{ osp_project_name }}"
        OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
        OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
      os_stack:
        name: "create-objects-{{osp_project_name}}"
        template: "files/templates/{{ project }}/stack_user.yaml"
        parameters:
          project_name: "{{ osp_project_name }}"
          public_net_id: "{{ external_network }}"
          api_url: "{{ osp_auth_url }}"
          api_user: "{{ guid }}"
          api_pass: "{{ api_pass }}"
          project_guid: "{{ guid }}"
    - name: Save infra_key content
      set_fact:
        infra_private_key_content: "{{ stack_user_output | json_query(query) }}"
      vars:
        query: "stack.outputs[?@.output_key=='openstack_project_infra_key'].output_value|[0]"
    - name: Set infra_ssh_key path
      set_fact:
        infra_ssh_key: "{{ output_dir}}/{{ guid }}_infra_ssh_key.pem"
    - name: Copy infra_key content to output_dir
      copy:
        dest: "{{ infra_ssh_key }}"
        content: "{{ infra_private_key_content }}"
        mode: 0600
    - debug:
        var: stack_user_output
        verbosity: 2
    - name: Gather instance facts
      environment:
        OS_AUTH_URL: "{{ osp_auth_url }}"
        OS_USERNAME: "{{ guid }}"
        OS_PASSWORD: "{{ api_pass }}"
        OS_PROJECT_NAME: "{{ osp_project_name }}"
        OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
        OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
      os_server_facts:
        server: "*"
        filters:
          metadata:
            guid: "{{ guid }}"
      register: r_osp_facts
    - loop: "{{ r_osp_facts.ansible_facts.openstack_servers }}"
      loop_control:
        loop_var: _server
      environment:
        OS_AUTH_URL: "{{ osp_auth_url }}"
        OS_USERNAME: "{{ guid }}"
        OS_PASSWORD: "{{ api_pass }}"
        OS_PROJECT_NAME: "{{ osp_project_name }}"
        OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
        OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
      os_server_metadata:
        name: "{{ _server.name }}"
        meta:
          env_type: "{{ env_type }}"
    - name: debug osp_facts
      debug:
        var: r_osp_facts
    - name: Iterate over all instances and create DNS entries
      loop: "{{ r_osp_facts.ansible_facts.openstack_servers }}"
      loop_control:
        loop_var: _instance
      when: _instance.public_v4 | default('') != ''
      vars:
        _infra_osp_dns_default_ttl: 300
        _dns_state: present
      include_tasks: instance_loop.yml
    # Run common OSP create-inventory but don't use server.name.
    # Use  server.metadata.hostname  instead
    - name: Run infra-osp-create-inventory Role
      include_role:
        name: infra-osp-create-inventory
      vars:
        _name_selector: 'metadata.hostname'
    - name: Run Common SSH Config Generator Role
      import_role:
        name: infra-common-ssh-config-generate
# include global vars again, this time for all hosts now that the inventory is built
- import_playbook: ../../include_vars.yml
  tags:
    - create_inventory
    - must
- name: Step 001.3 Configure Linux Hosts and Wait for Connection
  hosts:
    # For now, only bastion is necessary
    - bastions:!windows:!network
  gather_facts: false
  any_errors_fatal: true
  ignore_errors: false
  tags:
    - step001
    - step001.3
    - wait_ssh
  tasks:
    - name: set facts for remote access
      tags:
        - create_inventory
      set_fact:
        # set python interpreter: Useful when the distrib running ansible has a different path
        # ex: when running using the alpine image
        #ansible_python_interpreter: env python
        ansible_ssh_common_args: >-
          {{ ansible_ssh_extra_args|d() }}
          -F {{ output_dir }}/{{ env_type }}_{{ guid }}_ssh_conf
          -o ControlPath=/tmp/{{ guid }}-%r-%h-%p
    - name: Run infra-osp-wait_for_linux_hosts Role
      import_role:
        name: infra-osp-wait_for_linux_hosts
ansible/configs/osp-migration/instance_loop.yml
New file
@@ -0,0 +1,12 @@
---
# support public DNS:
# host-GUID.zone.com
# host.GUID.zone.com
# host-GUID.GUID.zone.com
- loop:
    - "{{ _instance.metadata.hostname | regex_replace('-' ~ guid ~ '$', '.' + guid) }}"
    - "{{ _instance.metadata.hostname | regex_replace('-' ~ guid ~ '$', '.' + guid) }}"
    - "{{ _instance.metadata.hostname | regex_replace('-' ~ guid ~ '$', '-' ~ guid ~ '.' + guid) }}"
  loop_control:
    loop_var: _dns
  include_tasks: dns_loop.yml
ansible/configs/osp-migration/post_infra.yml
New file
@@ -0,0 +1,30 @@
---
- name: Step 002 Post Infrastructure, Configuring Bastion Hosts
  hosts: bastions
  become: true
  gather_facts: false
  tags:
    - step002
  tasks:
    - name: Ping bastion
      ping:
    - include_role:
        name: bastion
      when: install_bastion | bool
    - include_role:
        name: bastion-student-user
      when: install_student_user | bool
    - name: Print Student SSH access as user.info
      debug:
        msg: "{{ item }}"
      with_items:
        - "user.info: "
        - "user.info: You can access your bastion via SSH:"
        - "user.info: SSH Access: ssh {{ student_name }}@{{ groups.bastions.0 ~ '.' ~ osp_cluster_dns_zone }}"
    - name: Print Student SSH password as user.info
      debug:
        msg: "user.info: SSH password: {{ student_password | d(hostvars[groups.bastions.0].student_password) }}"
      when: print_student_password | default(true) | bool
ansible/configs/osp-migration/post_software.yml
New file
@@ -0,0 +1,10 @@
---
- name: Deploy workload(s) role on bastion of the shared cluster
  hosts: localhost
  connection: local
  gather_facts: false
  tags:
    - step005
  tasks:
    - debug:
        msg: "Post-Software checks completed successfully"
ansible/configs/osp-migration/pre_infra.yml
New file
@@ -0,0 +1,10 @@
- name: Step 000 Pre Infrastructure
  hosts: localhost
  connection: local
  become: false
  tags:
  - step001
  - pre_infrastructure
  tasks:
    - debug:
        msg: "Step 000 Pre Infrastructure - Dummy action"
ansible/configs/osp-migration/pre_software.yml
New file
@@ -0,0 +1,11 @@
---
- name: PreSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
  - flight_check
  tasks:
  - debug:
      msg: "Pre-Software checks completed successfully"
ansible/configs/osp-migration/requirements.yml
New file
@@ -0,0 +1,6 @@
---
# External role to setup grader host virtualenv and FTL grading infra
- src: https://github.com/redhat-gpte-devopsautomation/ftl-injector
  name: ftl-injector
  version: v0.15
ansible/configs/osp-migration/sample_vars.yml
New file
@@ -0,0 +1,28 @@
---
guid: testgucore
env_type: osp-migration
project: dev-ansible-tower-implementation-3.3-v6
# For this config we don't use any *common* agnosticd cloud-provider
# instead, the infrastructure part is done in infra.yml
cloud_provider: osp
admin_user: guillaume
student_name: guillaume
external_network: ee9d9e11-9f4b-4b78-8802-3d3e670ca0f0
osp_cluster_dns_zone: red.osp.opentlc.com
osp_cluster_dns_server: ddns01.opentlc.com
heat_retries: 0
repo_method: file
common_install_basic_packages_retries: 0
output_dir: /tmp/output_dir
ansible/configs/osp-migration/software.yml
New file
@@ -0,0 +1,5 @@
---
- name: Step 001 software
  hosts: bastions
  gather_facts: false
  become: true
ansible/configs/three-tier-app/sample_vars_osp.yml
@@ -19,6 +19,9 @@
repo_method: file
own_repo_path: http://admin.example.com/repos/version
ansible_user: cloud-user
remote_user: cloud-user
# Cloud specfic settings - example given here for OSP
cloud_provider: osp                     # Which AgnosticD Cloud Provider to use
@@ -38,14 +41,18 @@
frontend_instance_type: 2c2g30d
support_instance_type: 2c2g30d
bastion_instance_image: rhel-guest-7.7u2
app_instance_image: rhel-guest-7.7u2
appdb_instance_image: rhel-guest-7.7u2
frontend_instance_image: rhel-guest-7.7u2
support_instance_image: rhel-guest-7.7u2
#___image: rhel-guest-7.7u2    # blue
___image: rhel-server-7.7-update-2  # red
bastion_instance_image: "{{ ___image }}"
app_instance_image: "{{ ___image }}"
appdb_instance_image: "{{ ___image }}"
frontend_instance_image: "{{ ___image }}"
support_instance_image: "{{ ___image }}"
student_name: gucore
admin_user: opentlc-mgr
#admin_user: opentlc-mgr
admin_user: gucore
update_all_packages: false
ansible/include_vars.yml
@@ -15,8 +15,8 @@
      stat:
        path: "{{ item }}"
      loop:
        - configs/{{ env_type }}/env_vars.yml
        - cloud_providers/{{ cloud_provider }}_default_vars.yml
        - configs/{{ env_type }}/env_vars.yml
        - configs/{{ env_type }}/env_secret_vars.yml
        - "{{ secret_file | d('/secret/file/not/passed') }}"
      register: rstat_varfiles
ansible/main.yml
@@ -30,7 +30,14 @@
#################################################################################
#################################################################################
- import_playbook: "./cloud_providers/{{ cloud_provider }}_infrastructure_deployment.yml"
# Use first found:
# - infra.yml in config directory
# - common cloud_provider
- vars:
    findme:
      - configs/{{ env_type }}/infra.yml
      - cloud_providers/{{ cloud_provider }}_infrastructure_deployment.yml
  import_playbook: "{{ lookup('first_found', findme) }}"
  tags:
    - step001
    - deploy_infrastructure
ansible/roles/infra-osp-create-inventory/tasks/main.yml
@@ -1,5 +1,8 @@
---
- set_fact:
    _name_selector: name
- set_fact:
    stack_tag: "{{env_type | replace('-', '_')}}_{{guid}}"
  tags:
    - create_inventory
@@ -8,87 +11,85 @@
# Find the bastion
- name: Find the bastion in this batch of host
  set_fact:
    local_bastion: "{{ host.name }}"
    local_bastion: "{{ server | json_query(_name_selector) | default(server.name) }}"
  when:
    - host.status != 'terminated'
    - '"bastions" in host.metadata.AnsibleGroup'
    - server.status != 'terminated'
    - '"bastions" in server.metadata.AnsibleGroup | default("")'
  loop: "{{ r_osp_facts.ansible_facts.openstack_servers }}"
  loop_control:
    label: "{{ host.name | default(host.name) }}"
    loop_var: host
    label: "{{ server | json_query(_name_selector) | default(server.name) }}"
    loop_var: server
  ignore_errors: yes
  tags:
    - create_inventory
    - must
- when: host.status != 'terminated'
- when: server.status != 'terminated'
  block:
    - name: Add hosts to inventory
      add_host:
        name: "{{ host.name }}"
        name: "{{ server | json_query(_name_selector) | default(server.name) }}"
        original_name: "{{ server.name }}"
        groups:
          #TODO: remove thos tag_*
          - "tag_Project_{{stack_tag}}"
          - "tag_{{ stack_tag}} | default('unknowns') }}"
          - "{{ host.metadata.ostype | default('unknowns') }}"
          - "{{ server.metadata.ostype | default('unknowns') }}"
        ansible_user: "{{ ansible_user }}"
        remote_user: "{{ remote_user }}"
        # ansible_ssh_private_key_file: "{{item['key_name']}}"
        # key_name: "{{item['key_name']}}"
        state: "{{ host.status }}"
        instance_id: "{{ host.id }}"
        isolated: "{{ host.metadata.isolated | default(false) }}"
        state: "{{ server.status }}"
        instance_id: "{{ server.id }}"
        isolated: "{{ server.metadata.isolated | default(false) }}"
        # private_dns_name: "{{item['private_dns_name']}}"
        private_ip_address: "{{ host.private_v4 }}"
        public_ip_address: "{{ host.public_v4 | default('') }}"
        image_id: "{{ host.image.id | default('') }}"
        private_ip_address: "{{ server.private_v4 }}"
        public_ip_address: "{{ server.public_v4 | default('') }}"
        image_id: "{{ server.image.id | default('') }}"
        ansible_ssh_extra_args: "-o StrictHostKeyChecking=no"
        bastion: "{{ local_bastion | default('') }}"
      loop: "{{ r_osp_facts.ansible_facts.openstack_servers }}"
      loop_control:
        label: "{{ host.name }}"
        loop_var: host
        label: "{{ server | json_query(_name_selector) | default(server.name) }}"
        loop_var: server
      tags:
        - create_inventory
        - must
    - add_host:
        name: "{{ host.name }}"
        groups: "{{ host.metadata.AnsibleGroup }}"
        name: "{{ server | json_query(_name_selector) | default(server.name) }}"
        groups: "{{ server.metadata.AnsibleGroup }}"
      loop: "{{ r_osp_facts.ansible_facts.openstack_servers }}"
      loop_control:
        label: "{{ host.name }}"
        loop_var: host
        label: "{{ server | json_query(_name_selector) | default(server.name) }}"
        loop_var: server
      when: server.metadata.AnsibleGroup | default('') != ''
      tags:
        - create_inventory
        - must
    - name: Make sure bastion has public DNS name defined
      add_host:
        name: "{{ host.name }}"
        public_dns_name: "{{ host.name }}.{{ guid }}.{{osp_cluster_dns_zone}}"
      loop: "{{ r_osp_facts.ansible_facts.openstack_servers }}"
      loop_control:
        label: "{{ host.name }}"
        loop_var: host
      when:
        - host.name in groups['bastions']
        - host.public_v4 != ''
- name: Make sure bastion has public DNS name defined
  add_host:
    name: "{{ host }}"
    public_dns_name: "{{ host }}.{{ guid }}.{{osp_cluster_dns_zone}}"
  loop: "{{ groups['bastions'] }}"
  loop_control:
    loop_var: host
  when: hostvars[host].public_ip_address != ''
- debug:
    var: hostvars[local_bastion].public_ip_address
- debug:
    msg: "bastion IP is {{ lookup('dig',local_bastion ~ '.' ~ guid ~ '.' ~ osp_cluster_dns_zone) }}"
    msg: >-
      bastion IP is {{ lookup('dig', hostvars[local_bastion].public_dns_name) }}
  ignore_errors: true
- name: Verify that DNS matches bastion host_var
  assert:
    that:
    # Requires dnspython library
    - lookup('dig', bastion_lookup) == hostvars[local_bastion].public_ip_address
  vars:
    bastion_lookup: "{{ local_bastion ~ '.' ~ guid ~ '.' ~ osp_cluster_dns_zone }}"
    - lookup('dig', hostvars[local_bastion].public_dns_name) == hostvars[local_bastion].public_ip_address
- name: debug hostvars
  debug: