Marcos Entenza
2020-02-12 8d2e2d47f750deb1becbf23711ca6f79fa1be874
New config for SAP HANA tech enablement (#1141)

13 files added
816 ■■■■■ changed files
ansible/configs/sap-hana/README.adoc 52 ●●●●● patch | view | raw | blame | history
ansible/configs/sap-hana/default_vars.yml 319 ●●●●● patch | view | raw | blame | history
ansible/configs/sap-hana/destroy_env.yml 3 ●●●●● patch | view | raw | blame | history
ansible/configs/sap-hana/files/cloud_providers/osp_cloud_template_master.j2 232 ●●●●● patch | view | raw | blame | history
ansible/configs/sap-hana/files/hosts_template.j2 15 ●●●●● patch | view | raw | blame | history
ansible/configs/sap-hana/post_infra.yml 25 ●●●●● patch | view | raw | blame | history
ansible/configs/sap-hana/post_software.yml 18 ●●●●● patch | view | raw | blame | history
ansible/configs/sap-hana/pre_infra.yml 30 ●●●●● patch | view | raw | blame | history
ansible/configs/sap-hana/pre_software.yml 51 ●●●●● patch | view | raw | blame | history
ansible/configs/sap-hana/requirements.yml 4 ●●●● patch | view | raw | blame | history
ansible/configs/sap-hana/sample_vars.yml 19 ●●●●● patch | view | raw | blame | history
ansible/configs/sap-hana/software.yml 48 ●●●●● patch | view | raw | blame | history
ansible/configs/sap-hana/topology.png patch | view | raw | blame | history
ansible/configs/sap-hana/README.adoc
New file
@@ -0,0 +1,52 @@
== Overview
*SAP HANA Auto* _config_ will cover the Technical Enablement for 'RHEL for SAP Solutions' related trainning. This is the Techincal Enablement on how to insall SAP HANA on an standalone server using both Ansible and Ansible Tower
It makes an ideal base infrastructure to build on and can easily be extended via it's `env_vars.yml` to less or more machines and also to different operating system images.
image::topology.png[width=100%]
== Supported Cloud Providers
* OSP
== Review the Env_Type variable file
The link:./env_vars.yml[./env_vars.yml] file contains all the variables you need to define to control the deployment of your environment.
This includes the ability to:
* Change the number of machines deployed
* Changed the operating system image
* Change the tags carried by any instances
* Change the base packages installed
* Change/set the `ansible_user` and `remote_user`
These can be over-ridden at `ansible-playbook` runtime via `-e` options or perhaps more compactly by overriding vars in your own var file and invoking via `-e @my_secret_env_vars.yml`
For further information on customizing images consult the link:../../../docs/Creating_a_config.adoc[Creating a Config Guide]
== Deploying the SAP HANA Auto Config
You can deploy this config by running the following command from the `ansible`
directory. You will have to provide credentials and adjust settings to your own
environment.
`ansible-playbook -e @configs/sap-hana-auto/sample_vars.yml main.yml`
Or ideally using an external not shared vars file where to pass OSP credentials and others
`ansible-playbook -e @my_secret_env_vars.yml main.yml`
=== To Delete an environment
----
ansible-playbook -e @configs/sap-hana-auto/sample_vars.yml \
  configs/sap-hana-auto/destroy_env.yml
Or the custom variables file you used for the deployment:
ansible-playbook -e @my_secret_env_vars.yml \
  configs/sap-hana-auto/destroy_env.yml
----
ansible/configs/sap-hana/default_vars.yml
New file
@@ -0,0 +1,319 @@
### Vars that can be removed:
# use_satellite: true
# use_subscription_manager: false
# use_own_repos: false
#If using repo_method: satellite, you must set these values as well.
# satellite_url: satellite.example.com
# satellite_org: Sat_org_name
# satellite_activationkey: "rhel7basic"
# RHSM
# repo_method: rhn
# use_subscription_manager: true
# rhel_subscription_user: "xxxxxxxxxxxxx"
# rhel_subscription_pass: "xxxxxxxxxxx"
# rhsm_pool_ids:
#   - "xxxxxxxxxxxxxxxxxxxxxx"
#   - "xxxxxxxxxxxxxxxxxxxxxx"
# rhel_repos:
#    - "rhel-8-for-x86_64-baseos-rpms"
#    - "rhel-8-for-x86_64-appstream-rpms"
#    - "ansible-2-for-rhel-8-x86_64-rpms"
# rhel_repos:
#   - "rhel-7-server-rpms"
#   - "rhel-server-rhscl-7-rpms"
#   - "rhel-7-server-ansible-2.8-rpms"
###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT
###### OR PASS as "-e" args to ansible-playbook command
### Common Host settings
# repo_method: file # Other Options are: file, satellite and rhn
install_ipa_client: false
install_student_user: false
# Do you want to run a full yum update
update_packages: false
## guid is the deployment unique identifier, it will be appended to all tags,
## files and anything that identifies this environment from another "just like it"
guid: defaultguid
# This var is used to identify stack (cloudformation, azure resourcegroup, ...)
project_tag: "{{ env_type }}-{{ guid }}"
# This is where the ssh_config file will be created, this file is used to
# define the communication method to all the hosts in the deployment
deploy_local_ssh_config_location: "{{output_dir}}/"
install_bastion: true
install_common: true
## SB Don't set software_to_deploy from here, always use extra vars (-e) or "none" will be used
#software_to_deploy:: none
### If you want a Key Pair name created and injected into the hosts,
# set `set_env_authorized_key` to true and set the keyname in `env_authorized_key`
# you can use the key used to create the environment or use your own self generated key
# if you set "use_own_key" to false your PRIVATE key will be copied to the bastion. (This is {{key_name}})
use_own_key: true
env_authorized_key: "{{guid}}key"
#ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
set_env_authorized_key: true
# Is this running from Red Hat Ansible Tower
tower_run: false
## Networking (AWS)
subdomain_base_short: "{{ guid }}"
subdomain_base_suffix: ".example.opentlc.com"
subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}"
## Environment Sizing
bastion_instance_type: "2-16"
bastion_instance_count: 1
rootfs_size_bastion: "{{ rootfs_size_bastion }}"
nfs_size_bastion: 100
hana_instance_type: "4-32"
support_instance_type: "{{ support_instance_type }}"
hana_instance_count: 1
rootfs_size_hana: "{{ rootfs_size_hana }}"
pv_size_hana: 200
hana_instance_image: "{{ hana_instance_image }}"
support_instance_image: "{{ support_instance_image }}"
## Variables required when deploying Ansible Tower alongside the HANA and bastion instances
tower_instance_count: 0
tower_instance_image: "{{ tower_instance_image }}"
tower_instance_type: "4-32"
rootfs_size_tower: "{{ rootfs_size_tower }}"
# Instances images to use
#bastion_instance_image: xxxxxxxxxxxxxxxx
#hana_instance_image: xxxxxxxxxxxxx
#NFS Server variables
install_nfs: true
nfs_device: "/dev/vdb"
nfs_vg: "nfs"
nfs_export_path: "/srv/nfs"
nfs_shares:
  - "hana"
nfs_exports_config: "*(insecure,rw,root_squash,no_wdelay,sync)"
#Ansible Tower Variables
# ansible_tower:
#   admin_password: "xxxxxxxxxxxxxxx"
# ansible_tower_download_url: xxxxxxx
# ansible_tower_epel_download_url: xxxxx
security_groups:
  - name: HanaSG
    rules:
      - name: HanaSSHPrivate
        description: "SSH private"
        from_port: 22
        to_port: 22
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
        group: BastionSG
      - name: HanaUDPPortsPrivate
        description: "Only from bastion"
        from_port: 1
        to_port: 65535
        protocol: udp
        group: BastionSG
        rule_type: Ingress
      - name: HanaTCPPPortsPrivate
        description: "Only from bastion"
        from_port: 1
        to_port: 65535
        protocol: tcp
        group: BastionSG
        rule_type: Ingress
  - name: BastionSG
    rules:
      - name: BastionUDPPortsPrivate
        description: "Only from bastion"
        from_port: 1
        to_port: 65535
        protocol: udp
        group: HanaSG
        rule_type: Ingress
      - name: BastionTCPPPortsPrivate
        description: "Only from bastion"
        from_port: 1
        to_port: 65535
        protocol: tcp
        group: HanaSG
        rule_type: Ingress
  - name: TowerSG
    rules:
      - name: TowerHTTP
        description: "Tower HTTP"
        from_port: 80
        to_port: 80
        protocol: tcp
        rule_type: Ingress
      - name: TowerHTTPS
        description: "Tower HTTPS"
        from_port: 443
        to_port: 443
        protocol: tcp
        rule_type: Ingress
      - name: TowerSSHPrivate
        description: "SSH private"
        from_port: 22
        to_port: 22
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
        group: BastionSG
      - name: TowerUDPPortsPrivate
        description: "Only from bastion"
        from_port: 1
        to_port: 65535
        protocol: udp
        group: BastionSG
        rule_type: Ingress
      - name: TowerTCPPPortsPrivate
        description: "Only from bastion"
        from_port: 1
        to_port: 65535
        protocol: tcp
        group: BastionSG
        rule_type: Ingress
instances:
  - name: "bastion"
    count: "{{ bastion_instance_count }}"
    unique: true
    public_dns: true
    dns_loadbalancer: true
    floating_ip: true
    image_id: "{{ bastion_instance_image }}"
    sofware_image_id: "{{ sap_software_image }}"
    flavor:
      osp: "{{ bastion_instance_type }}"
    tags:
      - key: "AnsibleGroup"
        value: "bastions"
      - key: "ostype"
        value: "linux"
      - key: "instance_filter"
        value: "{{ env_type }}-{{ email }}"
    rootfs_size: "50"
    softwarefs_size: "{{ sap_software_size }}"
    volumes:
      - volume_name: "bastion_nfs"
        volume_size: "{{ nfs_size_bastion }}"
    security_groups:
      - BastionSG
  - name: "hana"
    count: "{{ hana_instance_count }}"
    public_dns: false
    dns_loadbalancer: false
    floating_ip: false
    image_id: "{{ hana_instance_image }}"
    flavor:
      osp: "{{ hana_instance_type }}"
    tags:
      - key: "AnsibleGroup"
        value: "hanas"
      - key: "ostype"
        value: "rhel"
      - key: "instance_filter"
        value: "{{ env_type }}-{{ email }}"
    key_name: "{{key_name}}"
    rootfs_size: "100"
    volumes:
      - volume_name: "hana_pv"
        volume_size: "{{ pv_size_hana }}"
    security_groups:
      - HanaSG
  - name: "tower"
    count: "{{ tower_instance_count }}"
    public_dns: true
    dns_loadbalancer: true
    floating_ip: true
    image_id: "{{ tower_instance_image }}"
    flavor:
      osp: "{{ tower_instance_type }}"
    tags:
      - key: "AnsibleGroup"
        value: "towers"
      - key: "ostype"
        value: "rhel"
      - key: "instance_filter"
        value: "{{ env_type }}-{{ email }}"
    key_name: "{{key_name}}"
    rootfs_size: "200"
    security_groups:
      - TowerSG
common_packages:
  - unzip
  - bash-completion
  - tmux
  - bind-utils
  - wget
  - git
  - vim-enhanced
  - at
zone_internal_dns: "{{guid}}.internal."
chomped_zone_internal_dns: "{{guid}}.internal"
bastion_public_dns: "bastion.{{subdomain_base}}."
bastion_public_dns_chomped: "bastion.{{subdomain_base}}"
vpcid_cidr_block: "192.168.0.0/16"
vpcid_name_tag: "{{subdomain_base}}"
#dopt_domain_name: "{{ aws_region }}.compute.internal"
rtb_public_name_tag: "{{subdomain_base}}-public"
rtb_private_name_tag: "{{subdomain_base}}-private"
cf_template_description: "{{ env_type }}-{{ guid }} Ansible Agnostic Deployer "
#### OSP ####
# See cloud_providers/osp_default_vars.yml
# See roles/infra-osp-project-create/defaults/main.yml
# Set this to true if you need to create a new project in OpenStack
# This should almost always be set to true for OpenShift installations
# If it is set to false, the {{ osp_project_name }} must already exist and
# should be able to run whatever you are deploying
#osp_project_create: true
# If osp_project_create is set to yes, define those:
# Quotas to set for new project that is created
#quota_num_instances: 15
#quota_num_cores: 72
#quota_memory: 131072 # in MB
#quota_num_volumes: 25
#quota_volumes_gigs: 500
#quota_loadbalancers: #when Octavia is available
#quota_pool: #when Octavia is available
#quota_networks: 3
#quota_subnets: 3
#quota_routers: 3
quota_fip: 7
#quota_sg: 10
#quota_sg_rules: 100
ansible/configs/sap-hana/destroy_env.yml
New file
@@ -0,0 +1,3 @@
---
- name: Import default destroy playbook
  import_playbook: ../../cloud_providers/{{cloud_provider}}_destroy_env.yml
ansible/configs/sap-hana/files/cloud_providers/osp_cloud_template_master.j2
New file
@@ -0,0 +1,232 @@
#jinja2: lstrip_blocks: "True"
---
heat_template_version: 2018-03-02
description: >-
  Top level HOT for creating new project, network resources and instances.
  This template relies on ResourceGroups and a nested template that is
  called to provision instances, ports, & floating IPs.
resources:
  {{ guid }}-infra_key:
    type: OS::Nova::KeyPair
    properties:
      name: {{ guid }}-infra_key
      save_private_key: true
{% if osp_project_create | bool %}
  {{ guid }}-project_user:
    type: OS::Keystone::User
    properties:
      name: {{ guid }}-user
      password: {{ heat_user_password }}
      domain: Default
  {{ guid }}-project_role_user:
    type: OS::Keystone::UserRoleAssignment
    properties:
      user: {get_resource: {{ guid }}-project_user}
      roles:
        - {project: {{ osp_project_name }}, role: _member_}
        - {project: {{ osp_project_name }}, role: swiftoperator}
    depends_on:
      - {{ guid }}-project_user
{% endif %}
{% for network in networks %}
  {{ network['name'] }}-network:
    type: OS::Neutron::Net
    properties:
      name: "{{ guid }}-{{ network['name'] }}-network"
      shared: {{ network['shared'] }}
  {{ network['name'] }}-subnet:
    type: OS::Neutron::Subnet
    properties:
      name: "{{ guid }}-{{ network['name'] }}-subnet"
      network_id: {get_resource: {{ network['name'] }}-network}
{% if network['dns_nameservers'] is defined %}
      dns_nameservers: [{{ network['dns_nameservers'] | list | join(",") }}]
{% endif %}
      cidr: {{ network['subnet_cidr'] }}
      gateway_ip: {{ network['gateway_ip'] }}
      allocation_pools:
        - start: {{ network['allocation_start'] }}
          end: {{ network['allocation_end'] }}
{% if network['create_router'] %}
  {{ network['name'] }}-router:
    type: OS::Neutron::Router
    properties:
      name: "{{ guid }}-{{ network['name'] }}-router"
      external_gateway_info:
        network: "{{ provider_network }}"
  {{ network['name'] }}-router_private_interface:
    type: OS::Neutron::RouterInterface
    properties:
      router: {get_resource: {{ network['name'] }}-router}
      subnet: {get_resource: {{ network['name'] }}-subnet}
{% endif %}
{% endfor %}
  ###################
  # Security groups #
  ###################
{% for security_group in security_groups | list + default_security_groups | list %}
  {{ security_group['name'] }}:
    type: OS::Neutron::SecurityGroup
    properties:
      name: {{ guid }}-{{ security_group['name'] }}
{% if security_group['description'] is defined %}
      description: "{{ security_group['description'] }}"
{% endif %}
{% for rule in security_group.rules %}
{% if rule['name'] is defined %}
  {{ guid }}-{{ security_group['name'] }}-rule_{{ rule['name'] }}:
{% else %}
  {{ guid }}-{{ security_group['name'] }}-rule_{{ lookup('password', '/dev/null length=5 chars=ascii_letters,digits') }}:
{% endif %}
    type: OS::Neutron::SecurityGroupRule
    properties:
      security_group: {get_resource: {{ security_group['name'] }}}
      direction: {{ rule['direction'] | default(rule.rule_type) | lower }}
      protocol: {{ rule['protocol'] | lower }}
{% if rule['description'] is defined %}
      description: {{ rule['description'] }}
{% endif %}
{% if rule['port_range_min'] is defined or
  rule.from_port is defined %}
      port_range_min: {{ rule['port_range_min'] | default(rule.from_port) }}
{% endif %}
{% if rule['port_range_max'] is defined or
  rule.to_port is defined %}
      port_range_max: {{ rule['port_range_max'] | default(rule.to_port) }}
{% endif %}
{% if rule['remote_ip_prefix'] is defined or
  rule.cidr is defined %}
      remote_ip_prefix: {{ rule['remote_ip_prefix'] | default(rule.cidr) }}
{% endif %}
{% if rule['remote_group'] is defined or
  rule.from_group is defined %}
      remote_group: {get_resource: {{ rule['remote_group'] | default(rule.from_group) }}}
{% endif %}
    depends_on: {{ security_group['name'] }}
{% endfor %}
{% endfor %}
  #############
  # Instances #
  #############
{% for instance in instances %}
  {% for myinstanceindex in range(instance.count|int) %}
    {% set iname = instance.name if instance.count == 1 else [instance.name, loop.index0] | join() %}
  ########### {{ iname }} ###########
  port_{{ iname }}:
    type: OS::Neutron::Port
    properties:
      network: { get_resource: {{ instance['network'] | default('default') }}-network }
      security_groups:
    {% if instance.security_groups is defined %}
      {% for security_group in instance.security_groups %}
        - {get_resource: {{ security_group }}}
      {% endfor %}
    {% endif %}
    depends_on:
      - {{ instance['network'] | default('default') }}-router_private_interface
    {% if instance.floating_ip | default(false) or instance.public_dns | default(false) %}
  fip_{{ iname }}:
    type: OS::Neutron::FloatingIP
    properties:
      floating_network: {{ provider_network }}
    depends_on:
      - {{ instance['network'] | default('default') }}-router_private_interface
  fip_association_{{ iname }}:
    type: OS::Neutron::FloatingIPAssociation
    properties:
      floatingip_id: {get_resource: fip_{{ iname }}}
      port_id: {get_resource: port_{{ iname }}}
    {% endif %}
  server_{{ iname }}:
    type: OS::Nova::Server
    properties:
      name: {{ iname }}
      flavor: {{ instance.flavor.osp }}
      key_name: {get_resource: {{ guid }}-infra_key}
      block_device_mapping_v2:
        - image: {{ instance.image_id | default(instance.image) }}
          delete_on_termination: true
          volume_size: {{ instance['rootfs_size'] | default(osp_default_rootfs_size) }}
          boot_index: 0
        {% if iname == "bastion" %}
        - image: {{ instance.sofware_image_id | default("sap-software") }}
          delete_on_termination: true
          volume_size: {{ instance['softwarefs_size'] }}
          boot_index: -1
        {% endif %}
      user_data: |
        #cloud-config
        ssh_authorized_keys: {{ all_ssh_authorized_keys | to_json }}
      user_data_format: RAW
      networks:
        - port: {get_resource: port_{{ iname }}}
    {% if instance['metadata'] is defined %}
      metadata: {{ instance.metadata | combine(default_metadata) | to_json }}
    {% endif %}
    {% if instance.tags is defined %}
      # Convert EC2 tags
      metadata:
      {% for key, value in default_metadata.items() %}
        '{{ key }}': {{ value | to_json }}
      {% endfor %}
      {% for tag in instance.tags %}
        '{{ tag.key }}': {{ tag.value | to_json }}
      {% endfor %}
    {% endif %}
    depends_on:
      - {{ instance['network'] | default('default') }}-router_private_interface
    {% if 'security_groups' in instance %}
      {% for security_group in instance.security_groups %}
      - {{ security_group }}
      {% endfor %}
    {% endif %}
    {% if instance.volumes is defined %}
  #### Volumes for {{ iname }} ####
      {% for volume in instance.volumes %}
        {% set loopvolume = loop %}
        {% set vname = ["volume", iname, loopvolume.index] | join('_') %}
  {{ vname }}:
    type: OS::Cinder::Volume
    properties:
      size: {{ volume.volume_size | default(volume.size) }}
          {% if volume.volume_name is defined %}
      name: {{ volume.volume_name | default(volume.name) }}
          {% endif %}
  volume_attachment_{{ vname }}:
    type: OS::Cinder::VolumeAttachment
    properties:
      volume_id: {get_resource: {{ vname }}}
      instance_uuid: {get_resource: server_{{ iname }}}
      {% endfor %}
    {% endif %}
  {% endfor %}
{% endfor %}
outputs:
  {{ guid }}-infra_key:
    description: The SSH infra key
    value: {get_attr: [{{ guid }}-infra_key, private_key]}
ansible/configs/sap-hana/files/hosts_template.j2
New file
@@ -0,0 +1,15 @@
[hanas]
{% for host in groups['hanas'] %}
hana
{% endfor %}
[hana:children]
hanas
[hana:vars]
timeout=60
ansible_become=yes
ansible_user={{remote_user}}
ansible_ssh_common_args="-o StrictHostKeyChecking=no"
ansible/configs/sap-hana/post_infra.yml
New file
@@ -0,0 +1,25 @@
- name: Step 002 Post Infrastructure
  hosts: localhost
  connection: local
  become: false
  tags:
    - step002
    - post_infrastructure
  tasks:
    - name: Job Template to launch a Job Template with update on launch inventory set
      uri:
        url: "https://{{ ansible_tower_ip }}/api/v1/job_templates/{{ job_template_id }}/launch/"
        method: POST
        user: "{{tower_admin}}"
        password: "{{tower_admin_password}}"
        body:
          extra_vars:
            guid: "{{guid}}"
            ipa_host_password: "{{ipa_host_password}}"
        body_format: json
        validate_certs: False
        HEADER_Content-Type: "application/json"
        status_code: 200, 201
      when: tower_run == 'true'
ansible/configs/sap-hana/post_software.yml
New file
@@ -0,0 +1,18 @@
- name: Step 00xxxxx post software
  hosts: support
  gather_facts: False
  become: yes
  tasks:
    - debug:
        msg: "Post-Software tasks Started"
- name: PostSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Post-Software checks completed successfully"
ansible/configs/sap-hana/pre_infra.yml
New file
@@ -0,0 +1,30 @@
- name: Step 000 Pre Infrastructure
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - step001
    - pre_infrastructure
    - generate_env_keys
  tasks:
    - debug:
        msg: "Step 000 Pre Infrastructure"
    - name: Generate SSH keys
      shell: ssh-keygen -b 2048 -t rsa -f "{{output_dir}}/{{env_authorized_key}}" -q -N ""
      args:
        creates: "{{output_dir}}/{{env_authorized_key}}"
      when: set_env_authorized_key | bool
    - name: fix permission
      file:
        path: "{{output_dir}}/{{env_authorized_key}}"
        mode: 0400
      when: set_env_authorized_key | bool
    - name: Generate SSH pub key
      shell: ssh-keygen -y -f "{{output_dir}}/{{env_authorized_key}}" > "{{output_dir}}/{{env_authorized_key}}.pub"
      args:
        creates: "{{output_dir}}/{{env_authorized_key}}.pub"
      when: set_env_authorized_key | bool
ansible/configs/sap-hana/pre_software.yml
New file
@@ -0,0 +1,51 @@
# Cloudformation template or equivalent should tag all hosts with Project:{{ env_type }}-{{ guid }}
- name: Configure all hosts with Repositories, Common Files and Set environment key
  hosts:
    - all:!windows
  become: true
  gather_facts: False
  roles:
    - { role: "set-repositories", when: 'repo_method is defined' }
    - { role: "common", when: 'install_common' }
    - { role: "set_env_authorized_key", when: 'set_env_authorized_key' }
  tags:
    - step004
    - common_tasks
- name: Configuring Bastion Hosts
  hosts: bastions
  become: true
  roles:
    -  { role: "bastion", when: 'install_bastion' }
    - role: bastion-student-user
      when: install_student_user | bool
  tags:
    - step004
    - bastion_tasks
- name: Copy SSH RSA to cloud-user
  hosts: bastions
  become: true
  tasks:
    - copy:
        src: /root/.ssh/{{env_authorized_key}}.pem
        dest: /home/cloud-user/.ssh/id_rsa
        owner: cloud-user
        group: cloud-user
        mode: 0400
        remote_src: yes
  tags:
    - step004
    - bastion_tasks
- name: PreSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - flight_check
  tasks:
    - debug:
        msg: "Pre-Software checks completed successfully"
ansible/configs/sap-hana/requirements.yml
New file
@@ -0,0 +1,4 @@
---
- src: https://github.com/redhat-cop/infra-ansible.git
  name: infra-ansible
  version: v1.0.13
ansible/configs/sap-hana/sample_vars.yml
New file
@@ -0,0 +1,19 @@
# ---
# # sample vars configuration file
# #
# # This file is passed to ansible-playbook to set key vars which need to be set
# # and typically customized for a sucessful deployment.
# #
# # Usage: ansible-playbook main.yml -e @configs/three-tier-app/sample_vars.yml
# #
# # Ideally make and keep a copy OUTSIDE your repo, especially if using Cloud Credentials
# # Credentials can also be set seperately i.e. ~/secrets.yml and passed in with
# # a 2nd `-e` argument i.e. -e ~/secrets.yml
# env_type: sap-hana-sat                # Name of config to deploy
# output_dir: /tmp/output_dir                # Writable working scratch directory
# email: name@example.com                 # User info for notifications
# guid: guid01                            # Your Global UNIQUE Identifier
ansible/configs/sap-hana/software.yml
New file
@@ -0,0 +1,48 @@
---
- name: Step 00xxxxx software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Install and configure NFS Server
  hosts: bastion
  become: True
  gather_facts: True
  roles:
    # Install and deloy NFS Server
    - { role: "host-ocp-nfs", when: install_nfs }
  tasks:
    - name: Ensure Ansible is installed
      yum:
        name: ansible
        state: latest
- name: Deploy Ansible Tower
  hosts: towers
  gather_facts: False
  become: yes
  tasks:
    # This is a bit hacky but we are facing some issues with Ansible, RHEL8 and python for some
    # modules and this workaround solved this particular issue
    - name: Ensure Python3 package is installed and alternatives for python updated
      shell: >
        yum install -y python3 && alternatives --set python /usr/bin/python3
    - name: Install Anisble Tower
      include_role:
        name: infra-ansible/roles/ansible/tower/config-ansible-tower
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/sap-hana/topology.png