Tok
2019-08-12 822318897ffaa998a2d4225213d59b13a1ed34d1
Cloned config ans-tower-prod to more clearly named ansible-tower
18 files added
1850 ■■■■■ changed files
ansible/configs/ansible-tower/README.adoc 80 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/destroy_env.yml 18 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/env_vars.yml 450 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/files/cloud_providers/ec2_cloud_template.j2 529 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/files/hosts_template.j2 51 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/files/repos_template.j2 38 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/files/tower_cli.j2 5 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/files/tower_template_inventory.j2 54 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/post_infra.yml 24 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/post_software.yml 33 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/pre_infra.yml 21 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/pre_software.yml 61 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/requirements.yml 6 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/sample_vars.yml 80 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/sample_vars_babylon.yml 169 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/software.yml 20 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/tower_workloads.yml 55 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/tower_workloads_workaround.yml 156 ●●●●● patch | view | raw | blame | history
ansible/configs/ansible-tower/README.adoc
New file
@@ -0,0 +1,80 @@
= ans-tower-lab config
Author: Prakhar Srivastava, psrivast@redhat.com and Shachar Boresntein, sha@redhat.com
Owner: Prakhar Srivastava, psrivast@redhat.com
Alternative Contacts: Tony Kay, tok@redhat.com,
== Overview
Currently the ans-tower-config is used to deploy as least 2 different labs for
the *Ansible Bootcamp ILT* aka *Ansible Advanced*. The primary differences being
the "normal" version creates the base infrastructure for students to depoly tower
whereas the "homework version" actually deploys and licesnes an Ansible Tower
Cluster and configures it with various objects, e.g. keys, workflows etc, to
give students a start on their homework.
The "homework" version is deployed when:
+
[source,yaml]
----
deploy_tower_homework: true
----
== NOTES
. There are 2 sets of deployer scripts:
**
**
. There are 2 sets of secrets files:
**
**
The "homework" lab requires additional secrets over and above the usual cloud (AWS)
credentails and repo path. These are traditionally stored on the admin host and
can be located via the relevant deployer scripts.
== Review the Env_Type variable file
* This file link:./env_vars.yml[./env_vars.yml] contains all the variables you
 need to define to control, or customize, the deployment of your environment. In
normal usage this should not need to be touched or ammended and one-off changes
can be tested by passing vars or var files with `-e` or `-e @my_version_vars.yml`.
== Secrets
As noted above a deploy of the normal lab requires just 3
== Running Ansible Playbook
You can run the playbook with the following arguments to overwrite the default variable values:
From the `ansible_agnostic_deployer/ansible` directory run
`
[source,bash]
----
ansible-playbook ansible/main.yml  \
      -e @./ansible/configs/ans-tower-prod/sample_vars.yml \
      -e @../secret.yml \
      -e "guid=sborenstest2" -vvvv
----
=== To Delete an environment
----
REGION=us-east-1
KEYNAME=ocpkey
GUID=test02
ENVTYPE=ans-tower-lab
CLOUDPROVIDER=ec2
ansible-playbook configs/${ENVTYPE}/destroy_env.yml \
        -e "guid=${GUID}" -e "env_type=${ENVTYPE}" \
        -e "cloud_provider=${CLOUDPROVIDER}" \
        -e "aws_region=${REGION}"  -e "key_name=${KEYNAME}"  \
        -e "subdomain_base_suffix=${BASESUFFIX}" \
        -e @~/secret.yml -vv
----
ansible/configs/ansible-tower/destroy_env.yml
New file
@@ -0,0 +1,18 @@
---
- import_playbook: ../../include_vars.yml
- name: Delete Infrastructure
  hosts: localhost
  connection: local
  gather_facts: False
  become: no
  tasks:
    - name: Run infra-ec2-template-destroy
      include_role:
        name: "infra-{{cloud_provider}}-template-destroy"
      when: cloud_provider == 'ec2'
    - name: Run infra-azure-template-destroy
      include_role:
        name: "infra-{{cloud_provider}}-template-destroy"
      when: cloud_provider == 'azure'
ansible/configs/ansible-tower/env_vars.yml
New file
@@ -0,0 +1,450 @@
---
## TODO: What variables can we strip out of here to build complex variables?
## i.e. what can we add into group_vars as opposed to config_vars?
## Example: We don't really need "subdomain_base_short". If we want to use this,
## should just toss in group_vars/all.
### Also, we should probably just create a variable reference in the README.md
### For now, just tagging comments in line with configuration file.
### Vars that can be removed:
# use_satellite: true
# use_subscription_manager: false
# use_own_repos: false
###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT
###### OR PASS as "-e" args to ansible-playbook command
### Common Host settings
repo_method: file # Other Options are: file, satellite and rhn
tower_admin_password: 'changeme'
tower_version: "3.4.3-1"
# Do you want to run a full yum update
update_packages: false
#If using repo_method: satellite, you must set these values as well.
# satellite_url: satellite.example.com
# satellite_org: Sat_org_name
# satellite_activationkey: "rhel7basic"
## guid is the deployment unique identifier, it will be appended to all tags,
## files and anything that identifies this environment from another "just like it"
guid: defaultguid
install_bastion: true
install_common: true
install_ipa_client: false
## SB Don't set software_to_deploy from here, always use extra vars (-e) or "none" will be used
software_to_deploy: tower
repo_version: "{{tower_version}}"
### If you want a Key Pair name created and injected into the hosts,
# set `set_env_authorized_key` to true and set the keyname in `env_authorized_key`
# you can use the key used to create the environment or use your own self generated key
# if you set "use_own_key" to false your PRIVATE key will be copied to the bastion. (This is {{key_name}})
use_own_key: true
env_authorized_key: "{{guid}}key"
ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
set_env_authorized_key: true
### AWS EC2 Environment settings
### Route 53 Zone ID (AWS)
# This is the Route53 HostedZoneId where you will create your Public DNS entries
# This only needs to be defined if your CF template uses route53
HostedZoneId: Z3IHLWJZOU9SRT
# The region to be used, if not specified by -e in the command line
aws_region: ap-southeast-2
# The key that is used to
key_name: "default_key_name"
## Networking (AWS)
subdomain_base_short: "{{ guid }}"
subdomain_base_suffix: ".example.opentlc.com"
subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}"
## Environment Sizing
bastion_instance_type: "t2.medium"
tower_instance_count: 3
tower_instance_type: "t2.medium"
worker_instance_count: 2
worker_instance_type: "t2.medium"
support_instance_count: 2
support_instance_type: "t2.medium"
subnets:
  - name: PublicSubnet
    cidr: "192.168.1.0/24"
    routing_table: true
security_groups:
  - name: BastionSG
    rules:
      - name: BasSSHPublic
        description: "SSH public"
        from_port: 22
        to_port: 22
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
  - name: TowerSG
    rules:
      - name: SSHTower
        description: "SSH public"
        from_port: 22
        to_port: 22
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: HTTPTower
        description: "HTTP public"
        from_port: 80
        to_port: 80
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: HTTPSTower
        description: "HTTP public"
        from_port: 443
        to_port: 443
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: RabbitMq
        description: "RabbitMq"
        from_port: 5672
        to_port: 5672
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: EPMD
        description: "EPMD"
        from_port: 4369
        to_port: 4369
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: RabbitMqCLi
        description: "RabbitMq Cli"
        from_port: 25672
        to_port: 25672
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: RabbitMqAPi
        description: "RabbitMq Api"
        from_port: 15672
        to_port: 15672
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: RabbitMqCliTools
        description: "RabbitMq CLi tools"
        from_port: 35672
        to_port: 35672
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: BasTowerTcp
        description: "ALL from bastion tcp"
        from_port: 0
        to_port: 65535
        protocol: tcp
        group: BastionSG
        rule_type: Ingress
      - name: BasTowerUdp
        description: "ALL from bastion udp"
        from_port: 0
        to_port: 65535
        protocol: udp
        group: BastionSG
        rule_type: Ingress
      - name: AllInternaltcp
        description: "All other nodes tcp"
        from_port: 0
        to_port: 65535
        protocol: tcp
        group: HostSG
        rule_type: Ingress
      - name: AllInternaludp
        description: "All other nodes udp"
        from_port: 0
        to_port: 65535
        protocol: udp
        group: HostSG
        rule_type: Ingress
      - name: AllTowerNodestcp
        description: "All tower nodes tcp"
        from_port: 0
        to_port: 65535
        protocol: tcp
        group: TowerSG
        rule_type: Ingress
      - name: AllTowerNodesudp
        description: "All tower nodes udp"
        from_port: 0
        to_port: 65535
        protocol: udp
        group: TowerSG
        rule_type: Ingress
  - name: HostSG
    rules:
      - name: HostUDPPorts
        description: "Only from Itself udp"
        from_port: 0
        to_port: 65535
        protocol: udp
        group: HostSG
        rule_type: Ingress
      - name: Postgresql
        description: "PostgreSql"
        from_port: 5432
        to_port: 5432
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: HostTCPPorts
        description: "Only from Itself tcp"
        from_port: 0
        to_port: 65535
        protocol: tcp
        group: HostSG
        rule_type: Ingress
      - name: TowerUDPPorts
        description: "Only from tower"
        from_port: 0
        to_port: 65535
        protocol: udp
        group: TowerSG
        rule_type: Ingress
      - name: TowerTCPPorts
        description: "Only from tower"
        from_port: 0
        to_port: 65535
        protocol: tcp
        group: TowerSG
        rule_type: Ingress
      - name: BastionUDPPorts
        description: "Only from bastion"
        from_port: 0
        to_port: 65535
        protocol: udp
        group: BastionSG
        rule_type: Ingress
      - name: BastionTCPPorts
        description: "Only from bastion"
        from_port: 0
        to_port: 65535
        protocol: tcp
        group: BastionSG
        rule_type: Ingress
instances:
  - name: "bastion"
    count: 1
    unique: true
    public_dns: true
    dns_loadbalancer: true
    security_groups:
      - BastionSG
    flavor:
      ec2: "{{bastion_instance_type}}"
      azure: "{{bastion_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "bastions"
      - key: "ostype"
        value: "linux"
    rootfs_size: "{{root_filesystem_size}}"
    subnet: PublicSubnet
  - name: "tower"
    count: "{{tower_instance_count}}"
    security_groups:
      - TowerSG
    public_dns: true
    dns_loadbalancer: true
    flavor:
      "ec2": "{{tower_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "towers"
      - key: "ostype"
        value: "linux"
    subnet: PublicSubnet
  - name: "support"
    count: "{{support_instance_count}}"
    public_dns: true
    security_groups:
      - TowerSG
      - HostSG
    flavor:
      "ec2": "{{support_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "support"
      - key: "ostype"
        value: "rhel"
    key_name: "{{key_name}}"
    subnet: PublicSubnet
######### Worker instances #########
instances_worker:
  - name: "worker"
    count: "{{worker_instance_count}}"
    security_groups:
      - HostSG
      - TowerSG
    public_dns: false
    dns_loadbalancer: false
    flavor:
      "ec2": "{{worker_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "workers"
      - key: "ostype"
        value: "linux"
    subnet: PublicSubnet
#########
  # - name: "worker{{target_regions[1].name}}"
  #   count: "{{worker_instance_count}}"
  #   security_groups:
  #     - HostSG
  #     - TowerSG
  #   public_dns: false
  #   dns_loadbalancer: false
  #   flavor:
  #     "ec2": "{{worker_instance_type}}"
  #   tags:
  #     - key: "AnsibleGroup"
  #       value: "workers"
  #     - key: "ostype"
  #       value: "linux"
  #     - key: Worker_region
  #       value: "{{ target_regions[1].name }}"
  #   subnet: PublicSubnet
  #   #########
  # - name: "worker{{target_regions[2].name}}"
  #   count: "{{worker_instance_count}}"
  #   security_groups:
  #     - HostSG
  #     - TowerSG
  #   public_dns: false
  #   dns_loadbalancer: false
  #   flavor:
  #     "ec2": "{{worker_instance_type}}"
  #   tags:
  #     - key: "AnsibleGroup"
  #       value: "workers"
  #     - key: "ostype"
  #       value: "linux"
  #     - key: Worker_region
  #       value: "{{ target_regions[2].name }}"
  #   subnet: PublicSubnet
    #######*************#############
###### VARIABLES YOU SHOULD ***NOT*** CONFIGURE FOR YOUR DEPLOYEMNT
###### You can, but you usually wouldn't need to.
ansible_user: ec2-user
remote_user: ec2-user
common_packages:
  - python
  - unzip
  - bash-completion
  - tmux
  - bind-utils
  - wget
  - git
  - vim-enhanced
  - at
  - python-pip
  - gcc
  - ansible
rhel_repos:
  - rhel-7-server-rpms
  - rhel-7-server-rh-common-rpms
  - rhel-7-server-extras-rpms
  - rhel-7-server-optional-rpms
  - rhel-7-server-rhscl-rpms
  - epel-release-latest-7
project_tag: "{{ env_type }}-{{ guid }}"
zone_internal_dns: "{{guid}}.internal."
chomped_zone_internal_dns: "{{guid}}.internal"
tower_public_dns: "towerlb.{{subdomain_base}}."
#tower_public_dns: "tower.{{subdomain_base}}."
bastion_public_dns: "bastion.{{subdomain_base}}."
bastion_public_dns_chomped: "bastion.{{subdomain_base}}"
# we don't use this anymore <sborenst>
# activedirectory_public_dns: "ad.{{subdomain_base}}."
# activedirectory_public_dns_chomped: "ad.{{subdomain_base}}"
#
# vpcid_cidr_block: "192.168.0.0/16"
# vpcid_name_tag: "{{subdomain_base}}"
#
# az_1_name: "{{ aws_region }}a"
# az_2_name: "{{ aws_region }}b"
#
# subnet_private_1_cidr_block: "192.168.2.0/24"
# subnet_private_1_az: "{{ az_2_name }}"
# subnet_private_1_name_tag: "{{subdomain_base}}-private"
#
# subnet_private_2_cidr_block: "192.168.1.0/24"
# subnet_private_2_az: "{{ az_1_name }}"
# subnet_private_2_name_tag: "{{subdomain_base}}-private"
#
# subnet_public_1_cidr_block: "192.168.10.0/24"
# subnet_public_1_az: "{{ az_1_name }}"
# subnet_public_1_name_tag: "{{subdomain_base}}-public"
#
# subnet_public_2_cidr_block: "192.168.20.0/24"
# subnet_public_2_az: "{{ az_2_name }}"
# subnet_public_2_name_tag: "{{subdomain_base}}-public"
#
# dopt_domain_name: "{{ aws_region }}.compute.internal"
#
# rtb_public_name_tag: "{{subdomain_base}}-public"
# rtb_private_name_tag: "{{subdomain_base}}-private"
#
#
# cf_template_description: "{{ env_type }}-{{ guid }} Ansible Agnostic Deployer"
tower_run: false
default_workloads:
  - tower-copy-ssh
  - tower-license-injector
  - cleanup-tower-default
  - tower-settings-update
  - tower-pip-packages
  - tower-user-create
  - tower-org-create
  - tower-credential-create
  - tower-project-create
  - tower-inventory-create
  - tower-jobtemplate-create
  - tower-babylon-job-runner
# infra_workloads|:
#   - tower-settings-update
#   - tower-pip-packages
#   - tower-user-create
#   - tower-org-create
#   - tower-project-create
#   - tower-inventory-create
#   - tower-jobtemplate-create
ansible/configs/ansible-tower/files/cloud_providers/ec2_cloud_template.j2
New file
@@ -0,0 +1,529 @@
#jinja2: lstrip_blocks: "True"
---
AWSTemplateFormatVersion: "2010-09-09"
Mappings:
  RegionMapping: {{ aws_ami_region_mapping | to_json }}
Resources:
  Vpc:
    Type: "AWS::EC2::VPC"
    Properties:
      CidrBlock: "{{ aws_vpc_cidr }}"
      EnableDnsSupport: true
      EnableDnsHostnames: true
      Tags:
        - Key: Name
          Value: "{{ aws_vpc_name }}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
  VpcInternetGateway:
    Type: "AWS::EC2::InternetGateway"
  VpcRouteTable:
    Type: "AWS::EC2::RouteTable"
    Properties:
      VpcId:
        Ref: Vpc
  VPCRouteInternetGateway:
    DependsOn: VpcGA
    Type: "AWS::EC2::Route"
    Properties:
      GatewayId:
        Ref: VpcInternetGateway
      DestinationCidrBlock: "0.0.0.0/0"
      RouteTableId:
        Ref: VpcRouteTable
  VpcGA:
    Type: "AWS::EC2::VPCGatewayAttachment"
    Properties:
      InternetGatewayId:
        Ref: VpcInternetGateway
      VpcId:
        Ref: Vpc
  PublicSubnet:
    Type: "AWS::EC2::Subnet"
    DependsOn:
      - Vpc
    Properties:
    {% if aws_availability_zone is defined %}
      AvailabilityZone: {{ aws_availability_zone }}
    {% endif %}
      CidrBlock: "{{ aws_public_subnet_cidr }}"
      Tags:
        - Key: Name
          Value: "{{project_tag}}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
      MapPublicIpOnLaunch: true
      VpcId:
        Ref: Vpc
  PublicSubnetRTA:
    Type: "AWS::EC2::SubnetRouteTableAssociation"
    Properties:
      RouteTableId:
        Ref: VpcRouteTable
      SubnetId:
        Ref: PublicSubnet
{% for security_group in security_groups|list + default_security_groups|list %}
  {{security_group['name']}}:
    Type: "AWS::EC2::SecurityGroup"
    Properties:
      GroupDescription: Host
      VpcId:
        Ref: Vpc
      Tags:
        - Key: Name
          Value: "{{security_group['name']}}"
{% endfor %}
{% for security_group in default_security_groups|list + security_groups|list %}
{% for rule in security_group.rules %}
  {{security_group['name']}}{{rule['name']}}:
    Type: "AWS::EC2::SecurityGroup{{rule['rule_type']}}"
    Properties:
     GroupId:
       Fn::GetAtt:
         - "{{security_group['name']}}"
         - GroupId
     IpProtocol: {{rule['protocol']}}
     FromPort: {{rule['from_port']}}
     ToPort: {{rule['to_port']}}
  {% if rule['cidr'] is defined %}
     CidrIp: "{{rule['cidr']}}"
  {% endif  %}
  {% if rule['from_group'] is defined %}
     SourceSecurityGroupId:
       Fn::GetAtt:
        - "{{rule['from_group']}}"
        - GroupId
  {% endif  %}
{% endfor %}
{% endfor %}
  DnsZonePrivate:
    Type: "AWS::Route53::HostedZone"
    Properties:
      Name: "{{ aws_dns_zone_private }}"
      VPCs:
        - VPCId:
            Ref: Vpc
          VPCRegion:
            Ref: "AWS::Region"
      HostedZoneConfig:
        Comment: "{{ aws_comment }}"
  {% if secondary_stack is not defined %}
  DnsZonePublic:
    Type: "AWS::Route53::HostedZone"
    Properties:
      Name: "{{ aws_dns_zone_public }}"
      HostedZoneConfig:
        Comment: "{{ aws_comment }}"
  DnsPublicDelegation:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
      - DnsZonePublic
    Properties:
    {% if HostedZoneId is defined %}
      HostedZoneId: "{{ HostedZoneId }}"
    {% else %}
      HostedZoneName: "{{ aws_dns_zone_root }}"
    {% endif %}
      RecordSets:
        - Name: "{{ aws_dns_zone_public }}"
          Type: NS
          TTL: {{ aws_dns_ttl_public }}
          ResourceRecords:
            "Fn::GetAtt":
              - DnsZonePublic
              - NameServers
    {% endif %}
{% for instance in instances %}
{% if instance['dns_loadbalancer'] | d(false) | bool
  and not instance['unique'] | d(false) | bool %}
  {{instance['name']}}DnsLoadBalancer:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
    {% for c in range(1, (instance['count']|int)+1) %}
      - {{instance['name']}}{{c}}
      {% if instance['public_dns'] %}
      - {{instance['name']}}{{c}}EIP
      {% endif %}
    {% endfor %}
    Properties:
      {% if secondary_stack is defined %}
      HostedZoneName: "{{ aws_dns_zone_public }}"
      {% else %}
      HostedZoneId:
        Ref: DnsZonePublic
      {% endif %}
      RecordSets:
      - Name: "{{instance['name']}}.{{aws_dns_zone_public_prefix|d('')}}{{ aws_dns_zone_public }}"
        Type: A
        TTL: {{ aws_dns_ttl_public }}
        ResourceRecords:
{% for c in range(1,(instance['count'] |int)+1) %}
          - "Fn::GetAtt":
            - {{instance['name']}}{{c}}
            - PublicIp
{% endfor %}
{% endif %}
{% for c in range(1,(instance['count'] |int)+1) %}
  {{instance['name']}}{{loop.index}}:
    Type: "AWS::EC2::Instance"
    Properties:
{% if custom_image is defined %}
      ImageId: {{ custom_image.image_id }}
{% else %}
      ImageId:
        Fn::FindInMap:
        - RegionMapping
        - Ref: AWS::Region
        - {{ instance.image | default(aws_default_image) }}
{% endif %}
      InstanceType: "{{instance['flavor'][cloud_provider]}}"
      KeyName: "{{instance.key_name | default(key_name)}}"
    {% if instance['UserData'] is defined %}
      {{instance['UserData']}}
    {% endif %}
    {% if instance['security_groups'] is defined %}
      SecurityGroupIds:
      {% for sg in instance.security_groups %}
        - Ref: {{ sg }}
      {% endfor %}
    {% else %}
      SecurityGroupIds:
        - Ref: DefaultSG
    {% endif %}
      SubnetId:
        Ref: PublicSubnet
      Tags:
    {% if instance['unique'] | d(false) | bool %}
        - Key: Name
          Value: {{instance['name']}}
        - Key: internaldns
          Value: {{instance['name']}}.{{aws_dns_zone_private_chomped}}
    {% else %}
        - Key: Name
          Value: {{instance['name']}}{{loop.index}}
        - Key: internaldns
          Value: {{instance['name']}}{{loop.index}}.{{aws_dns_zone_private_chomped}}
    {% endif %}
        - Key: "owner"
          Value: "{{ email | default('unknownuser') }}"
        - Key: "Project"
          Value: "{{project_tag}}"
        - Key: "{{project_tag}}"
          Value: "{{ instance['name'] }}"
    {% for tag in instance['tags'] %}
        - Key: {{tag['key']}}
          Value: {{tag['value']}}
    {% endfor %}
      BlockDeviceMappings:
    {% if '/dev/sda1' not in instance.volumes|d([])|json_query('[].device_name')
      and '/dev/sda1' not in instance.volumes|d([])|json_query('[].name')
%}
        - DeviceName: "/dev/sda1"
          Ebs:
            VolumeSize: "{{ instance['rootfs_size'] | default(aws_default_rootfs_size) }}"
            VolumeType: "{{ aws_default_volume_type }}"
    {% endif %}
    {% for vol in instance.volumes|default([]) if vol.enable|d(true) %}
        - DeviceName: "{{ vol.name | default(vol.device_name) }}"
          Ebs:
          {% if cloud_provider in vol and 'type' in vol.ec2 %}
            VolumeType: "{{ vol[cloud_provider].type }}"
          {% else %}
            VolumeType: "{{ aws_default_volume_type }}"
          {% endif %}
            VolumeSize: "{{ vol.size }}"
    {% endfor %}
  {{instance['name']}}{{loop.index}}InternalDns:
    Type: "AWS::Route53::RecordSetGroup"
    Properties:
      HostedZoneId:
        Ref: DnsZonePrivate
      RecordSets:
    {% if instance['unique'] | d(false) | bool %}
        - Name: "{{instance['name']}}.{{aws_dns_zone_private}}"
    {% else %}
        - Name: "{{instance['name']}}{{loop.index}}.{{aws_dns_zone_private}}"
    {% endif %}
          Type: A
          TTL: {{ aws_dns_ttl_private }}
          ResourceRecords:
            - "Fn::GetAtt":
              - {{instance['name']}}{{loop.index}}
              - PrivateIp
{% if instance['public_dns'] %}
  {{instance['name']}}{{loop.index}}EIP:
    Type: "AWS::EC2::EIP"
    DependsOn:
    - VpcGA
    Properties:
      InstanceId:
        Ref: {{instance['name']}}{{loop.index}}
  {{instance['name']}}{{loop.index}}PublicDns:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
      - {{instance['name']}}{{loop.index}}EIP
    Properties:
      {% if secondary_stack is defined %}
      HostedZoneName: "{{ aws_dns_zone_public }}"
      {% else %}
      HostedZoneId:
        Ref: DnsZonePublic
      {% endif %}
      RecordSets:
      {% if instance['unique'] | d(false) | bool %}
        - Name: "{{instance['name']}}.{{aws_dns_zone_public_prefix|d('')}}{{ aws_dns_zone_public }}"
      {% else %}
        - Name: "{{instance['name']}}{{loop.index}}.{{aws_dns_zone_public_prefix|d('')}}{{ aws_dns_zone_public }}"
      {% endif %}
          Type: A
          TTL: {{ aws_dns_ttl_public }}
          ResourceRecords:
          - "Fn::GetAtt":
            - {{instance['name']}}{{loop.index}}
            - PublicIp
{% endif %}
{% endfor %}
{% endfor %}
{% for worker_region in target_regions %}
{% for instance in instances_worker %}
{% if instance['dns_loadbalancer'] | d(false) | bool
  and not instance['unique'] | d(false) | bool %}
  {{instance['name']}}{{worker_region['name']}}DnsLoadBalancer:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
    {% for c in range(1, (instance['count']|int)+1) %}
      - {{instance['name']}}{{c}}{{worker_region['name']}}
      {% if instance['public_dns'] %}
      - {{instance['name']}}{{c}}{{worker_region['name']}}EIP
      {% endif %}
    {% endfor %}
    Properties:
      {% if secondary_stack is defined %}
      HostedZoneName: "{{ aws_dns_zone_public }}"
      {% else %}
      HostedZoneId:
        Ref: DnsZonePublic
      {% endif %}
      RecordSets:
      - Name: "{{instance['name']}}{{worker_region['name']}}.{{aws_dns_zone_public_prefix|d('')}}{{ aws_dns_zone_public }}"
        Type: A
        TTL: {{ aws_dns_ttl_public }}
        ResourceRecords:
{% for c in range(1,(instance['count'] |int)+1) %}
          - "Fn::GetAtt":
            - {{instance['name']}}{{c}}.{{worker_region['name']}}
            - PublicIp
{% endfor %}
{% endif %}
{% for c in range(1,(instance['count'] |int)+1) %}
  {{instance['name']}}{{loop.index}}{{worker_region['name']}}:
    Type: "AWS::EC2::Instance"
    Properties:
{% if custom_image is defined %}
      ImageId: {{ custom_image.image_id }}
{% else %}
      ImageId:
        Fn::FindInMap:
        - RegionMapping
        - Ref: AWS::Region
        - {{ instance.image | default(aws_default_image) }}
{% endif %}
      InstanceType: "{{instance['flavor'][cloud_provider]}}"
      KeyName: "{{instance.key_name | default(key_name)}}"
    {% if instance['UserData'] is defined %}
      {{instance['UserData']}}
    {% endif %}
    {% if instance['security_groups'] is defined %}
      SecurityGroupIds:
      {% for sg in instance.security_groups %}
        - Ref: {{ sg }}
      {% endfor %}
    {% else %}
      SecurityGroupIds:
        - Ref: DefaultSG
    {% endif %}
      SubnetId:
        Ref: PublicSubnet
      Tags:
    {% if instance['unique'] | d(false) | bool %}
        - Key: Name
          Value: {{instance['name']}}
        - Key: internaldns
          Value: {{instance['name']}}{{worker_region['name']}}.{{aws_dns_zone_private_chomped}}
    {% else %}
        - Key: Name
          Value: {{instance['name']}}{{loop.index}}.{{worker_region['name']}}
        - Key: internaldns
          Value: {{instance['name']}}{{loop.index}}.{{worker_region['name']}}.{{aws_dns_zone_private_chomped}}
    {% endif %}
        - Key: "owner"
          Value: "{{ email | default('unknownuser') }}"
        - Key: "Project"
          Value: "{{project_tag}}"
        - Key: "{{project_tag}}"
          Value: "{{ instance['name'] }}{{loop.index}}.{{worker_region['name']}}"
    {% for tag in instance['tags'] %}
        - Key: {{tag['key']}}
          Value: {{tag['value']}}
    {% endfor %}
      BlockDeviceMappings:
    {% if '/dev/sda1' not in instance.volumes|d([])|json_query('[].device_name')
      and '/dev/sda1' not in instance.volumes|d([])|json_query('[].name')
%}
        - DeviceName: "/dev/sda1"
          Ebs:
            VolumeSize: "{{ instance['rootfs_size'] | default(aws_default_rootfs_size) }}"
            VolumeType: "{{ aws_default_volume_type }}"
    {% endif %}
    {% for vol in instance.volumes|default([]) if vol.enable|d(true) %}
        - DeviceName: "{{ vol.name | default(vol.device_name) }}"
          Ebs:
          {% if cloud_provider in vol and 'type' in vol.ec2 %}
            VolumeType: "{{ vol[cloud_provider].type }}"
          {% else %}
            VolumeType: "{{ aws_default_volume_type }}"
          {% endif %}
            VolumeSize: "{{ vol.size }}"
    {% endfor %}
  {{instance['name']}}{{loop.index}}{{worker_region['name']}}InternalDns:
    Type: "AWS::Route53::RecordSetGroup"
    Properties:
      HostedZoneId:
        Ref: DnsZonePrivate
      RecordSets:
    {% if instance['unique'] | d(false) | bool %}
        - Name: "{{instance['name']}}{{worker_region['name']}}.{{aws_dns_zone_private}}"
    {% else %}
        - Name: "{{instance['name']}}{{loop.index}}.{{worker_region['name']}}.{{aws_dns_zone_private}}"
    {% endif %}
          Type: A
          TTL: {{ aws_dns_ttl_private }}
          ResourceRecords:
            - "Fn::GetAtt":
              - {{instance['name']}}{{loop.index}}{{worker_region['name']}}
              - PrivateIp
{% if instance['public_dns'] %}
  {{instance['name']}}{{loop.index}}{{worker_region['name']}}EIP:
    Type: "AWS::EC2::EIP"
    DependsOn:
    - VpcGA
    Properties:
      InstanceId:
        Ref: {{instance['name']}}{{loop.index}}{{worker_region['name']}}
  {{instance['name']}}{{loop.index}}{{worker_region['name']}}PublicDns:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
      - {{instance['name']}}{{loop.index}}{{worker_region['name']}}EIP
    Properties:
      {% if secondary_stack is defined %}
      HostedZoneName: "{{ aws_dns_zone_public }}"
      {% else %}
      HostedZoneId:
        Ref: DnsZonePublic
      {% endif %}
      RecordSets:
      {% if instance['unique'] | d(false) | bool %}
        - Name: "{{instance['name']}}{{worker_region['name']}}.{{aws_dns_zone_public_prefix|d('')}}{{ aws_dns_zone_public }}"
      {% else %}
        - Name: "{{instance['name']}}{{loop.index}}.{{worker_region['name']}}.{{aws_dns_zone_public_prefix|d('')}}{{ aws_dns_zone_public }}"
      {% endif %}
          Type: A
          TTL: {{ aws_dns_ttl_public }}
          ResourceRecords:
          - "Fn::GetAtt":
            - {{instance['name']}}{{loop.index}}.{{worker_region['name']}}
            - PublicIp
{% endif %}
{% endfor %}
{% endfor %}
{% endfor %}
  {% if secondary_stack is not defined %}
  Route53User:
    Type: AWS::IAM::User
    Properties:
      Policies:
        - PolicyName: Route53Access
          PolicyDocument:
            Statement:
              - Effect: Allow
                Action: route53:GetHostedZone
                Resource: arn:aws:route53:::change/*
              - Effect: Allow
                Action: route53:ListHostedZones
                Resource: "*"
              - Effect: Allow
                Action:
                  - route53:ChangeResourceRecordSets
                  - route53:ListResourceRecordSets
                  - route53:GetHostedZone
                Resource:
                  Fn::Join:
                    - ""
                    - - "arn:aws:route53:::hostedzone/"
                      - Ref: DnsZonePublic
              - Effect: Allow
                Action: route53:GetChange
                Resource: arn:aws:route53:::change/*
  Route53UserAccessKey:
      DependsOn: Route53User
      Type: AWS::IAM::AccessKey
      Properties:
        UserName:
          Ref: Route53User
  {% endif %}
Outputs:
  Route53internalzoneOutput:
    Description: The ID of the internal route 53 zone
    Value:
      Ref: DnsZonePrivate
  {% if secondary_stack is not defined %}
  Route53User:
    Value:
      Ref: Route53User
    Description: IAM User for Route53 (Let's Encrypt)
  Route53UserAccessKey:
    Value:
      Ref: Route53UserAccessKey
    Description: IAM User for Route53 (Let's Encrypt)
  Route53UserSecretAccessKey:
    Value:
      Fn::GetAtt:
        - Route53UserAccessKey
        - SecretAccessKey
    Description: IAM User for Route53 (Let's Encrypt)
  {% endif %}
ansible/configs/ansible-tower/files/hosts_template.j2
New file
@@ -0,0 +1,51 @@
[tower]
{% for host in groups['towers'] %}
{{ host }}
{% endfor %}
[database]
support1.{{chomped_zone_internal_dns}}
{% if target_regions is defined %}
{%for i_region in target_regions %}
[isolated_group_{{i_region.name}}]
{% for host in groups['workers'] %}
{% if '.' + i_region['name'] + '.' in host %}
{{ host }}
{% endif %}
{% endfor %}
[isolated_group_{{i_region.name}}:vars]
controller=tower
{% endfor %}
{% endif %}
[all:vars]
ansible_become=true
admin_password={{tower_admin_password}}
pg_host='support1.{{chomped_zone_internal_dns}}'
pg_port='5432'
pg_database='awx'
pg_username='awx'
pg_password={{tower_admin_password}}
rabbitmq_port=5672
rabbitmq_vhost=tower
rabbitmq_username=tower
rabbitmq_password={{ tower_admin_password | regex_replace('[^a-zA-Z0-9]') }}
rabbitmq_cookie=cookiemonster
rabbitmq_use_long_name=true
### For our use, not Tower install use (so you can run ansible command line)
[supports]
{% for host in groups['support'] %}
{{ host }}
{% endfor %}
ansible/configs/ansible-tower/files/repos_template.j2
New file
@@ -0,0 +1,38 @@
[rhel-7-server-rpms]
name=Red Hat Enterprise Linux 7
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-rpms
enabled=1
gpgcheck=0
[rhel-7-server-rh-common-rpms]
name=Red Hat Enterprise Linux 7 Common
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-rh-common-rpms
enabled=1
gpgcheck=0
[rhel-7-server-extras-rpms]
name=Red Hat Enterprise Linux 7 Extras
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-extras-rpms
enabled=1
gpgcheck=0
[rhel-7-server-optional-rpms]
name=Red Hat Enterprise Linux 7 Optional
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-optional-rpms
enabled=1
gpgcheck=0
[rhel-7-server-rhscl-rpms]
name=Red Hat Enterprise Linux 7 Optional
baseurl={{own_repo_path}}/{{repo_version}}/rhel-server-rhscl-7-rpms
enabled=1
gpgcheck=0
[epel]
name=Extra Packages for Enterprise Linux 7 - $basearch
baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch
mirrorlist=http://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=0
#gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
ansible/configs/ansible-tower/files/tower_cli.j2
New file
@@ -0,0 +1,5 @@
[general]
host = {{ tower_hostname }}
username = admin
password = {{tower_admin_password}}
verify_ssl = False
ansible/configs/ansible-tower/files/tower_template_inventory.j2
New file
@@ -0,0 +1,54 @@
[tower]
{% for host in groups['towers'] %}
tower{{loop.index}}.{{chomped_zone_internal_dns}} public_host_name=tower{{loop.index}}.{{ guid }}{{subdomain_base_suffix}} ssh_host={{host}}
{% endfor %}
[database]
## This should be replaced by supports[0] name
support1.{{chomped_zone_internal_dns}}
## Add isolated if needed, we should have an "IF" statement, only if worker groups exist and have instances.
{% if worker == 'yes' %}
[isolated_group_{{region}}]
{% for host in groups['workers'] %}
worker{{loop.index}}.{{chomped_zone_internal_dns}} public_host_name=worker{{loop.index}}.{{ guid }}{{subdomain_base_suffix}} ssh_host={{host}}
{% endfor %}
[isolated_group_{{region}}:vars]
controller=tower
{% endif %}
[all:vars]
ansible_become=true
admin_password={{tower_admin_password}}
## This should be replaced by supports[0] name
pg_host='support1.{{guid}}.internal'
pg_port='5432'
pg_database='awx'
pg_username='awx'
pg_password={{tower_admin_password}}
rabbitmq_port=5672
rabbitmq_vhost=tower
rabbitmq_username=tower
rabbitmq_password={{ tower_admin_password | regex_replace('[^a-zA-Z0-9]') }}
rabbitmq_cookie=cookiemonster
rabbitmq_use_long_name=true
### For our use, not Tower install use (so you can run ansible command line)
[supports]
{% for host in groups['support'] %}
support{{loop.index}}.{{chomped_zone_internal_dns}} public_host_name=support{{loop.index}}.{{ guid }}{{subdomain_base_suffix}} ssh_host={{host}}
{% endfor %}
{% if worker == 'yes' %}
[workers]
{% for host in groups['workers'] %}
worker{{loop.index}}.{{chomped_zone_internal_dns}} public_host_name=worker{{loop.index}}.{{ guid }}{{subdomain_base_suffix}} ssh_host={{host}}
{% endfor %}
{% endif %}
ansible/configs/ansible-tower/post_infra.yml
New file
@@ -0,0 +1,24 @@
- name: Step 002 Post Infrastructure
  hosts: localhost
  connection: local
  become: false
  tags:
  - step002
  - post_infrastructure
  tasks:
  - name: Job Template to launch a Job Template with update on launch inventory set
    uri:
      url: "https://{{ ansible_tower_ip }}/api/v1/job_templates/{{ job_template_id }}/launch/"
      method: POST
      user: "{{tower_admin}}"
      password: "{{tower_admin_password}}"
      body:
        extra_vars:
          guid: "{{guid}}"
          ipa_host_password: "{{ipa_host_password}}"
      body_format: json
      validate_certs: False
      HEADER_Content-Type: "application/json"
      status_code: 200, 201
    when: tower_run == 'true'
ansible/configs/ansible-tower/post_software.yml
New file
@@ -0,0 +1,33 @@
- name: Step 00xxxxx post software
  hosts: support
  gather_facts: False
  become: yes
  tasks:
    - debug:
        msg: "Post-Software tasks Started"
- name: Step lab post software deployment
  hosts: bastions
  gather_facts: False
  become: yes
  tags:
    - opentlc_bastion_tasks
  tasks:
- name: Setup Workloads on Tower
  import_playbook: tower_workloads.yml
- name: PostSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Post-Software checks completed successfully"
ansible/configs/ansible-tower/pre_infra.yml
New file
@@ -0,0 +1,21 @@
- name: Step 000 Pre Infrastructure
  hosts: localhost
  connection: local
  become: false
  tags:
    - step001
    - pre_infrastructure
  tasks:
  - debug:
      msg: "Pre-Software Steps starting"
- name: PretSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
  - pre_flight_check
  tasks:
  - debug:
      msg: "Pre-Software checks completed successfully"
ansible/configs/ansible-tower/pre_software.yml
New file
@@ -0,0 +1,61 @@
---
- name: Step 003 - Create env key
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - step003
    - generate_env_keys
  tasks:
    - name: Generate SSH keys
      shell: ssh-keygen -b 2048 -t rsa -f "{{output_dir}}/{{env_authorized_key}}" -q -N ""
      args:
        creates: "{{output_dir}}/{{env_authorized_key}}"
      when: set_env_authorized_key
- name: Configure all hosts with Repositories, Common Files and Set environment key
  hosts:
    - all:!windows
  become: true
  gather_facts: False
  tags:
    - step004
    - common_tasks
  roles:
    - { role: "set-repositories", when: 'repo_method is defined' }
    - { role: "common", when: 'install_common' }
    - { role: "set_env_authorized_key", when: 'set_env_authorized_key' }
- name: Configuring Bastion Hosts
  hosts: bastions
  become: true
  roles:
    - { role: "bastion", when: 'install_bastion' }
    - { role: "bastion-opentlc-ipa",  when: 'install_ipa_client' }
  tags:
    - step004
    - bastion_tasks
# - name: Inject and configure FTL on bastion as grader host
#   hosts: bastions
#   become: true
#   tasks:
#     - name: Setup FTL
#       include_role:
#         name: ftl-injector
#   tags:
#     - step004
#     - ftl-injector
- name: PreSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - flight_check
  tasks:
    - debug:
        msg: "Pre-Software checks completed successfully"
ansible/configs/ansible-tower/requirements.yml
New file
@@ -0,0 +1,6 @@
---
# External role to setup grader host virtualenv and FTL grading infra
- src: https://github.com/redhat-gpte-devopsautomation/ftl-injector
  name: ftl-injector
  version: v0.7
ansible/configs/ansible-tower/sample_vars.yml
New file
@@ -0,0 +1,80 @@
---
cloudformation_retries: 0
## Environment size
bastion_instance_type: "t2.medium"
tower_instance_type: "t2.medium"
worker_instance_type: "t2.medium"
support_instance_type: "t2.medium"
root_filesystem_size: 20                #Size of the root filesystem
# Env config basics
env_type: ans-tower-prod                 # Name of config to deploy
output_dir: /tmp                # Writable working scratch directory
email: name@example.com                 # User info for notifications
#guid: hwtest2                          # Unique string used in FQDN
# AWS specific
subdomain_base_suffix: .example.opentlc.com      # Your domain used in FQDN
# Path to yum repos (Will be overwritten by the last -e@ file, such as ../secrets.yml)
own_repo_path: http://admin.example.com/repos/product
# Cloud specfic settings - example given here for AWS
cloud_provider: ec2                     # Which AgnosticD Cloud Provider to use
aws_region: ap-southeast-2                  # AWS Region to deploy in
HostedZoneId: Z3IHLWJZOU9SRT            # You will need to change this
key_name: ocpkey                       # Keyname must exist in AWS
#Ansible Tower related vars
tower_version: 3.5.0-1                 # tower version you want to install
region: apac                           # region can not be with special characters in case of isolated node group
software_to_deploy: tower              # Define tower to install tower or none to have only infra ready.
worker: yes                            # Set yes to add isolated node group.
worker_instance_count: 1             # Set 0 to not to provision worker(isolated) nodes.
# tower_license: >                     #Set the tower licencse in the same format. Do not forget to add "eula_accepted: true".
#   {
#     "eula_accepted": true,
#     "company_name": "Red Hat",
#     "contact_email": "name@redhat.com",
#     "contact_name": "some person"
#     "hostname": "70a415ef832159a36413fa599",
#     "instance_count": 50,
#     "license_date": 16581423619,
#     "license_key":
#     "eea1b84d1e39cfEXAMPLE5739066069e60c6d0aEXAMPLE2c29cc61b2aEXAMPLE",
#     "license_type": "enterprise",
#     "subscription_name": "Ansible Tower by Red Hat (50 Managed Nodes), RHT Internal",
#     "trial": true
#   }
# accounts:                                      #Define users you want to create. Set superuser: yes to make user system wide System Administrator
#   - user: test1
#     password: changeme
#     email: babylon@example.com
#     firstname: test1
#     lastname: one
#     superuser: yes
#   - user: test2
#     password: changeme
#     email: babylon1@example.com
#     firstname: test2
#     lastname: two
#   - user: test3
#   - user: test4
#     lastname: four
# tower_organization:
#   - name: gpte
#   - name: BU
target_regions:
  - name: na
  - name: emea
  - name: na
ansible/configs/ansible-tower/sample_vars_babylon.yml
New file
@@ -0,0 +1,169 @@
---
cloudformation_retries: 0
# ## Environment size
# bastion_instance_type: "t2.medium"
# tower_instance_type: "t2.medium"
# worker_instance_type: "t2.medium"
# support_instance_type: "t2.medium"
root_filesystem_size: 20                #Size of the root filesystem
# Env config basics
env_type: ans-tower-prod                 # Name of config to deploy
output_dir: /opt/workdir               # Writable working scratch directory
email: name@example.com                 # User info for notifications
#guid: hwtest2                          # Unique string used in FQDN
# AWS specific
subdomain_base_suffix: .example.opentlc.com      # Your domain used in FQDN
# Path to yum repos (Will be overwritten by the last -e@ file, such as ../secrets.yml)
own_repo_path: http://admin.example.com/repos/product
# Cloud specfic settings - example given here for AWS
cloud_provider: ec2                     # Which AgnosticD Cloud Provider to use
aws_region: ap-southeast-2                  # AWS Region to deploy in
HostedZoneId: Z3IHLWJZOU9SRT            # You will need to change this
key_name: ocpkey                       # Keyname must exist in AWS
#Ansible Tower related vars
tower_version: 3.5.0-1                 # tower version you want to install
region: apac                           # region can not be with special characters in case of isolated node group
software_to_deploy: tower              # Define tower to install tower or none to have only infra ready.
tower_instance_count: 2
support_instance_count: 2
worker_instance_count: 2              # Set 0 to not to provision worker(isolated) nodes.
# tower_license: >                     #Set the tower licencse in the same format. Do not forget to add "eula_accepted: true".
#   {
#     "eula_accepted": true,
#     "company_name": "Red Hat",
#     "contact_email": "name@redhat.com",
#     "contact_name": "some person"
#     "hostname": "70a415ef832159a36413fa599",
#     "instance_count": 50,
#     "license_date": 16581423619,
#     "license_key":
#     "eea1b84d1e39cfEXAMPLE5739066069e60c6d0aEXAMPLE2c29cc61b2aEXAMPLE",
#     "license_type": "enterprise",
#     "subscription_name": "Ansible Tower by Red Hat (50 Managed Nodes), RHT Internal",
#     "trial": true
#   }
tower_host_name: "tower1.{{guid}}{{subdomain_base_suffix}}"
tower_user_accounts:                                      #Define users you want to create. Set superuser: yes to make user system wide System Administrator
  - user: babylon
    password: changeme
    email: babylon@example.com
    firstname: Baby
    lastname: Lon
    superuser: yes
  - user: babylon-viewer
    password: changeme
    email: babylon1@example.com
    firstname: Babylon
    lastname: Viewer
#   - user: test3
#   - user: test4
#     lastname: four
tower_organization:
  - name: gpte
  - name: BU
target_regions:
  - name: emea
  - name: apac
### tower project roles
tower_projects:
  - name: babylon-dev
    description: "babylon dev project"
    organization: "gpte"
    scm_url: "https://github.com/redhat-gpte-devopsautomation/babylon.git"
    #scm_type:
    #scm_credential:
    scm_branch:  dev
    scm_update_on_launch: true
tower_inventories:
  - name: empty-inventory-emea
    description: emea
    organization: gpte
    instance_group: emea
  - name: empty-inventory-apac
    description: apac
    organization: gpte
    instance_group: apac
  - name: empty-inventory
    description: "Empty inventory"
    organization: gpte
    # instance_group: ""
tower_job_templates:
  - name: babylon_job_runner
    description: "babylon job runner"
    job_type: run
    #vault_credential:
    project: babylon
    playbook: job-runner.yml
    become: yes
# Tower settings
tower_setting_params:
  AWX_PROOT_BASE_PATH: "/tmp"
  AWX_PROOT_SHOW_PATHS: "'/var/lib/awx/projects/', '/tmp'"
# List of virtual environment which will be created
# restart of tower service is required
# ansible-tower-service restart
# https://docs.ansible.com/ansible-tower/latest/html/userguide/security.html
tower_virtual_environment:
  - /var/lib/awx/venv/ansible
  - /var/lib/awx/venv/test1
# Path of Virtual Env for update
tower_update_venv: /var/lib/awx/venv/ansible
# Pip packages with version which needs to be updated for venv
pip_requirements:
  - boto==2.49.0
  - boto3==1.9.200
  - awscli==1.16.210
  - ansible-tower-cli==3.3.6
key_local_path: "~/.ssh/{{key_name}}.pem"
tower_job_templates:
  - name: job-runner-dev
    description: "babylon job runner dev"
    job_type: run
    #vault_credential:
    project: babylon-dev
    playbook: job-runner.yml
    inventory: empty-inventory
    become: yes
# Tower settings
tower_setting_params:
  AWX_PROOT_BASE_PATH: "/tmp"
  AWX_PROOT_SHOW_PATHS: "'/var/lib/awx/projects/', '/tmp'"
# List of virtual environment which will be created (WIP)
# tower_virtual_environment:
#   - /var/lib/awx/venv/ansible
#   - /var/lib/awx/venv/test1
# Path of Virtual Env for update
tower_update_venv: /var/lib/awx/venv/ansible
key_local_path: "~/.ssh/{{key_name}}.pem"
ansible/configs/ansible-tower/software.yml
New file
@@ -0,0 +1,20 @@
---
#  - name: Step 00xxxxx software
#    hosts: bastions[0]
#    gather_facts: False
#    become: false
#    tasks:
#      - debug:
#          msg: "Software tasks Started - None"
 - name: Software flight-check
   hosts: localhost
   connection: local
   gather_facts: false
   become: false
   tags:
     - post_flight_check
   tasks:
     - debug:
         msg: "Software checks completed successfully"
ansible/configs/ansible-tower/tower_workloads.yml
New file
@@ -0,0 +1,55 @@
---
- name: Install workloads
  hosts: bastions
  gather_facts: false
  run_once: true
  become: true
  tasks:
  - set_fact:
      tower_hostname: "{{ item | first }}"
    loop:
      - "{{ query('inventory_hostnames', 'towers') }}"
  - name: Install tower-default workloads
    when:
    - default_workloads | d("") | length > 0
    tags:
    - default_workloads
    block:
    - name: Install tower-default-workloads
      when:
      - default_workloads | d("") | length >0
      block:
      - name: Deploy tower-default workloads
        include_role:
          name: "{{ workload_loop_var }}"
        vars:
          tower_username: "admin"
        loop: "{{ default_workloads }}"
        loop_control:
          loop_var: workload_loop_var
  - name: Install tower-infra workloads
    when:
    - infra_workloads|d("")|length > 0
    tags:
      - infra_workloads
    block:
    - name: Check if admin_user is set
      fail:
        msg: admin_user must be set for tower-infra workloads
      when:
      - not admin_user is defined or admin_user|length == 0
    - name: Install tower-infra-workloads
      when:
      - infra_workloads|d("")|length >0
      block:
      - name: Deploy tower-infra workloads
        include_role:
          name: "{{ workload_loop_var }}"
        vars:
          tower_username: admin
          ACTION: "provision"
        loop: "{{ infra_workloads.split(',')|list }}"
        loop_control:
          loop_var: workload_loop_var
ansible/configs/ansible-tower/tower_workloads_workaround.yml
New file
@@ -0,0 +1,156 @@
- hosts: bastions
  gather_facts: false
  become: yes
  tasks:
  - name: Inject License
    include_role:
      name: tower-license-injector
    when: tower_license is defined
    tags:
      - tower-license-injector
###### delete demo stuff #######
  - name: Delete Demo Job Template
    tower_job_template:
      name: "Demo Job Template"
      state: absent
      job_type: run
      playbook: "hello_world.yml"
      project: "Demo Project"
      inventory: "Demo Inventory"
      tower_host: "{{ tower_host_name }}"
      tower_username: admin
      tower_password: "{{tower_admin_password}}"
      tower_verify_ssl: false
    ignore_errors: yes
  - name: Delete Demo Credential
    command: tower-cli credential delete -n "Demo Credential"
    ignore_errors: yes
  - name: Delete Demo Project
    tower_project:
      name: "Demo Project"
      state: absent
      tower_host: "{{ tower_host_name }}"
      tower_username: admin
      tower_password: "{{tower_admin_password}}"
      tower_verify_ssl: false
    ignore_errors: yes
  - name: Delete Demo Inventory
    tower_inventory:
      name: "Demo Inventory"
      organization: Default
      state: absent
      tower_host: "{{ tower_host_name }}"
      tower_username: admin
      tower_password: "{{tower_admin_password}}"
      tower_verify_ssl: false
    ignore_errors: yes
###### Create tower users #####
  - name: Add tower user
    tower_user:
      username: "{{ item.user }}"
      password: "{{ item.password | default('change_me') }}"
      email: "{{ item.email | default('rhpds-admins@redhat.com') }}"
      first_name: "{{ item.firstname | default(item.user) }}"
      last_name: "{{ item.lastname | default(item.user) }}"
      superuser: "{{ item.superuser | default('no')}}"
      state: present
      tower_host: "{{ tower_host_name }}"
      tower_username: admin
      tower_password: "{{tower_admin_password}}"
      tower_verify_ssl: false
    loop: "{{ tower_accounts }}"
    when: tower_accounts is defined
    tags:
      - tower-user-create
#### Create Tower Organization ####
  - name: Add tower org
    tower_organization:
      name: "{{ item.name }}"
      state: present
      tower_host: "{{ tower_host_name }}"
      tower_username: admin
      tower_password: "{{tower_admin_password}}"
      tower_verify_ssl: false
    loop: "{{ tower_organization }}"
    when: tower_organization is defined
    tags:
      - tower-org-create
#### Create tower Project #####
  - name: Add tower project
    tower_project:
      name: "{{ item.name }}"
      description: "{{ item.description }}"
      organization:  "{{ item.organization | default('Default')}}"
      scm_url:  "{{ item.scm_url }}"
      scm_type: "{{ item.scm_type | d('git')}}"
      scm_credential: "{{ item.scm_credential | d('')}}"
      scm_branch:  "{{ item.scm_branch | d('master') }}"
      scm_update_on_launch: "{{ item.scm_update_on_launch | d('false') }}"
      state: present
      tower_host: "{{ tower_host_name }}"
      tower_username: admin
      tower_password: "{{tower_admin_password}}"
      tower_verify_ssl: false
    loop: "{{ tower_projects }}"
    when: tower_projects is defined
    tags:
      - tower-project-create
#### Create tower Inventory ####
  - name: Block for Inventory
    when: tower_inventories is defined
    block:
    - name: Add tower inventory
      tower_inventory:
        name: "{{ item.name }}"
        description: "{{ item.description  }}"
        organization: "{{ item.organization | d('gpte') }}"
        state: present
        tower_host: "{{ tower_host_name }}"
        tower_username: admin
        tower_password: "{{tower_admin_password}}"
        tower_verify_ssl: false
      loop: "{{ tower_inventories }}"
      tags:
        - tower-inventory-create
    - name: Associate instance group to inventory
      command: >-
        tower-cli inventory
        associate_ig
        --inventory "{{ item.name }}"
        --instance-group "{{ item.instance_group | d('') }}"
      loop: "{{ tower_inventories }}"
      when:
        - item.instance_group is defined
#### Create Tower Job Template ####
  - name: Add tower JobTemplate
    tower_job_template:
      name: "{{ item.name }}"
      description: "{{ item.description  }}"
      job_type: run
      ask_inventory: Yes
      ask_credential: Yes
      vault_credential: "{{ item.vault_credential | d('') }}"
      ask_extra_vars: Yes
      project: "{{ item.project }}"
      playbook: "{{ item.playbook | d('main.yml') }}"
      become_enabled: "{{ item.become | d('no') }}"
      concurrent_jobs_enabled: Yes
      state: present
      tower_host: "{{ tower_host_name }}"
      tower_username: admin
      tower_password: "{{tower_admin_password}}"
      tower_verify_ssl: false
    loop: "{{ tower_job_templates }}"
    when: tower_job_templates is defined
    tags:
      - tower-job-template-create