Mitesh The Mouse
2019-10-23 674e351813b1b7ad2dcd3c8879e972259dda06ab
Satellite config (#776)

* Few changes made for creating PR

* satellite v64 for production with local capsule

* minor change

* satellite local capsule configuration updates

* Update env_vars.yml

* Update env_vars.yml

* host template: Fix possible mismatch between host and loop.index

* Security fixes

* improvements/fixes for satellite roles and satellite-v64-config

* satellite-multi-region

* Role and sample_vars updated

* inden issue fixed in satellite role

* fixed typo in satellite-v64-prod env_vars
5 files deleted
18 files added
10 files modified
2076 ■■■■ changed files
ansible/configs/satellite-multi-region/README.adoc 35 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/deploy_stack.yml 77 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/destroy_env.yml 41 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/env_vars.yml 215 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/files/cloud_providers/capsule.j2 369 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/files/cloud_providers/default.j2 369 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/files/etc_hosts_template.j2 16 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/files/hosts_template.j2 23 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/files/repos_template.j2 122 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/files/tower_template_inventory.j2 54 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/post_infra.yml 79 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/post_software.yml 31 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/pre_infra.yml 30 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/pre_software.yml 46 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/sample_vars.yml 93 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/software.yml 52 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/start.yml 33 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/stop.yml 33 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-v64-prod/README.adoc 28 ●●●● patch | view | raw | blame | history
ansible/configs/satellite-v64-prod/env_vars.yml 189 ●●●● patch | view | raw | blame | history
ansible/configs/satellite-v64-prod/files/cloud_providers/ec2_cloud_template.j2 23 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-v64-prod/sample_vars.yml 10 ●●●● patch | view | raw | blame | history
ansible/configs/satellite-v64-prod/software.yml 18 ●●●●● patch | view | raw | blame | history
ansible/roles/satellite-capsule-configuration/tasks/main.yml 9 ●●●●● patch | view | raw | blame | history
ansible/roles/satellite-manage-activationkey/tasks/main.yml 8 ●●●● patch | view | raw | blame | history
ansible/roles/satellite-manage-activationkey/vars/main.yml 3 ●●●●● patch | view | raw | blame | history
ansible/roles/satellite-manage-capsule-certificate/defaults/main.yml 2 ●●●●● patch | view | raw | blame | history
ansible/roles/satellite-manage-content-view/tasks/main.yml 10 ●●●● patch | view | raw | blame | history
ansible/roles/satellite-manage-content-view/vars/main.yml 5 ●●●●● patch | view | raw | blame | history
ansible/roles/satellite-manage-manifest/tasks/main.yml 2 ●●● patch | view | raw | blame | history
ansible/roles/satellite-manage-manifest/vars/main.yml 2 ●●●●● patch | view | raw | blame | history
ansible/roles/satellite-manage-subscription-and-sync/tasks/main.yml 47 ●●●● patch | view | raw | blame | history
ansible/roles/satellite-manage-subscription-and-sync/vars/main.yml 2 ●●●●● patch | view | raw | blame | history
ansible/configs/satellite-multi-region/README.adoc
New file
@@ -0,0 +1,35 @@
= Multi-region-tower
This config deploys towers in multiple regions.
. You specify the target regions using the `target_regions` variable, for example:
+
[source,yaml]
----
target_regions:
  - region: us-east-1
    stack: default
    name: na
    vpc_cidr: 10.1.0.0/16
    subnet_cidr: 10.1.0.0/24
  - region: eu-central-1
    stack: worker.j2
    name: emea
    vpc_cidr: 10.1.0.0/16
    subnet_cidr: 10.1.0.0/24
  - region: ap-southeast-1
    stack: worker.j2
    name: apac
    vpc_cidr: 10.1.0.0/16
    subnet_cidr: 10.1.0.0/24
----
+
This will deploy the same stack (default) in the specified regions. The default CloudFormation template is used: either in the config dir, or the default cloudformation template if there is none in the config dir.
+
If you want to use a specific template in a region, just specify the file name and store the template in `configs/multi-region-example/files/cloud_providers/FILENAME`.
+
. In this config the first region specified under target_regions dictionary will have master cluster installed which will include tower nodes and database node.
. Respective other regions next specified list under target_regions will be used to create Isolated group i.e. "name: emea" = [isolated_group_emea ] and worker nodes will be added.
Have a look at link:sample_vars.yml[] too for example vars.
ansible/configs/satellite-multi-region/deploy_stack.yml
New file
@@ -0,0 +1,77 @@
---
- debug:
    msg: "stack_file is {{stack_file}}"
- when: stack_file == 'default'
  block:
    - name: Run infra-ec2-template-generate Role (target_regions)
      include_role:
        name: infra-ec2-template-generate
      vars:
        cloudformation_template: "{{ output_dir }}/{{ env_type }}.{{ guid }}.{{ cloud_provider }}_cloud_template_{{ aws_region }}"
    - set_fact:
        stack_deployed: false
    - name: Run infra-ec2-template-create Role (target_regions)
      include_role:
        name: infra-ec2-template-create
      vars:
        aws_region_loop: "{{ aws_region }}"
        cloudformation_template: "{{ output_dir }}/{{ env_type }}.{{ guid }}.{{ cloud_provider }}_cloud_template_{{ aws_region }}"
- when: stack_file != 'default'
  block:
    - name: Run infra-ec2-template-generate Role (target_regions)
      include_role:
        name: infra-ec2-template-generate
      vars:
        cloudformation_template_src: "../../configs/{{ env_type }}/files/cloud_providers/{{ stack_file }}"
        cloudformation_template: "{{ output_dir }}/{{ env_type }}.{{ guid }}.{{ cloud_provider }}_cloud_template_{{ aws_region }}"
    - set_fact:
        stack_deployed: false
    - name: Run infra-ec2-template-create Role (target_regions)
      include_role:
        name: infra-ec2-template-create
      vars:
        aws_region_loop: "{{ aws_region }}"
        cloudformation_template: "{{ output_dir }}/{{ env_type }}.{{ guid }}.{{ cloud_provider }}_cloud_template_{{ aws_region }}"
- name: report Cloudformation error (target_regions)
  fail:
    msg: "FAIL {{ project_tag }} Create Cloudformation"
  when: not cloudformation_out is succeeded
  tags:
    - provision_cf_template
- name: Run infra-ec2-create-inventory Role (target_regions)
  include_role:
    name: infra-ec2-create-inventory
##### Task to append env_type ssh_config to add capsule nodes deffination for removing dependencies on bastion ###########
- name: Add capsule nodes to workdir ssh config file
  blockinfile:
    dest: "{{output_dir}}/{{ env_type }}_{{ guid }}_ssh_conf"
    marker: "##### {mark} ADDED BASTION PROXY HOST {{ item }} {{ env_type }}-{{ guid }} ######"
    content: |
        Host {{ item }} {{ hostvars[item].shortname |d('')}}
          Hostname  {{ hostvars[item].public_dns_name }}
          IdentityFile {{ ssh_key | default(infra_ssh_key) | default(ansible_ssh_private_key_file) | default(default_key_name)}}
          IdentitiesOnly yes
          User ec2-user
          ControlMaster auto
          ControlPath /tmp/{{ guid }}-%r-%h-%p
          ControlPersist 5m
          StrictHostKeyChecking no
          ConnectTimeout 60
          ConnectionAttempts 10
          UserKnownHostsFile {{ansible_known_host}}
  loop: "{{ groups['capsules'] }}"
  tags:
    - capsule_ssh_config
### End task ####
ansible/configs/satellite-multi-region/destroy_env.yml
New file
@@ -0,0 +1,41 @@
---
- import_playbook: ../../include_vars.yml
- name: Delete Infrastructure
  hosts: localhost
  connection: local
  gather_facts: False
  become: no
  environment:
    AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
    AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
  tasks:
    - name: Find all VPC peering matching the stack name
      ec2_vpc_peering_facts:
        region: "{{ item.region }}"
        filters:
          "tag:stack": "{{ project_tag }}"
          status-code:
            - pending-acceptance
            - expired
            - provisioning
            - active
            - rejected
      register: vpc_peers
      loop: "{{ target_regions }}"
    - name: Delete all peering Connection
      ec2_vpc_peer:
        region: "{{ item.requester_vpc_info.region }}"
        peering_id: "{{ item.vpc_peering_connection_id }}"
        state: absent
      loop: "{{ vpc_peers.results |json_query('[].result[]') }}"
    - name: Run infra-ec2-template-destroy
      include_role:
        name: "infra-{{cloud_provider}}-template-destroy"
      vars:
        aws_region: "{{ _region.region }}"
      loop_control:
        loop_var: _region
      loop: "{{ target_regions | reverse | list }}"
ansible/configs/satellite-multi-region/env_vars.yml
New file
@@ -0,0 +1,215 @@
################################################################################
################################################################################
### Environment Structure
################################################################################
################################################################################
## Environment Sizing
# target_regions:
#   - region: ap-southeast-2
#     stack: default
#     name: apac
#     vpc_cidr: 10.1.0.0/16
#     subnet_cidr: 10.1.0.0/24
#   - region: ap-southeast-1
#     stack: capsule.j2
#     name: emea
#     vpc_cidr: 10.2.0.0/16
#     subnet_cidr: 10.2.0.0/24
#   - region: ap-southeast-1
#     stack: default
#     name: apac
#     vpc_cidr: 10.3.0.0/16
#     subnet_cidr: 10.3.0.0/24
default_key_name: ~/.ssh/{{key_name}}.pem
# How many do you want for each instance type
bastion_instance_type: "t2.medium"
bastion_instance_image: RHEL75
satellite_instance_count: 1
satellite_instance_type: "m5.xlarge"
capsule_instance_count: 1
capsule_instance_type: "m5.xlarge"
security_groups:
  - name: BastionSG
    rules:
      - name: BasSSHPublic
        description: "SSH public"
        from_port: 22
        to_port: 22
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
  - name: SatelliteSG
    rules:
      - name: SatHTTPSPorts
        description: "HTTPS Public"
        from_port: 443
        to_port: 443
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: BastionUDPPorts
        description: "Only from bastion"
        from_port: 0
        to_port: 65535
        protocol: udp
        from_group: DefaultSG
        rule_type: Ingress
      - name: BastionTCPPorts
        description: "Only from bastion"
        from_port: 0
        to_port: 65535
        protocol: tcp
        from_group: DefaultSG
        rule_type: Ingress
  - name: CapsuleSG
    rules:
      - name: SatHTTPSPorts
        description: "HTTPS Public"
        from_port: 9090
        to_port: 9090
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: BastionUDPPorts
        description: "Only from bastion"
        from_port: 0
        to_port: 65535
        protocol: udp
        from_group: DefaultSG
        rule_type: Ingress
      - name: BastionTCPPorts
        description: "Only from bastion"
        from_port: 0
        to_port: 65535
        protocol: tcp
        from_group: DefaultSG
        rule_type: Ingress
# Environment Instances
instances:
  - name: "bastion"
    count: 1
    unique: true
    public_dns: true
    dns_loadbalancer: false
    security_groups:
      - BastionSG
      - DefaultSG
    image: "{{ bastion_instance_image }}"
    flavor:
      ec2: "{{bastion_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "bastions"
      - key: "ostype"
        value: "linux"
      - key: "instance_filter"
        value: "{{ env_type }}-{{ email }}"
  - name: "satellite"
    count: "{{satellite_instance_count}}"
    public_dns: true
    security_groups:
      - SatelliteSG
      - DefaultSG
    flavor:
      ec2: "{{satellite_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "satellites"
      - key: "ostype"
        value: "linux"
      - key: "instance_filter"
        value: "{{ env_type }}-{{ email }}"
######### capsule instances #########
capsule_instances:
  - name: "capsule"
    count: "{{capsule_instance_count}}"
    security_groups:
      - CapsuleSG
      - DefaultSG
      - BastionSG
    public_dns: true
    dns_loadbalancer: false
    flavor:
      ec2: "{{capsule_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "capsules"
      - key: "ostype"
        value: "linux"
      - key: "instance_filter"
        value: "{{ env_type }}-{{ email }}"
# DNS settings for environmnet
subdomain_base_short: "{{ guid }}"
subdomain_base_suffix: ".example.opentlc.com"
subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}"
zone_internal_dns: "{{guid}}.internal."
chomped_zone_internal_dns: "{{guid}}.internal"
# Stuff that only GPTE cares about:
install_ipa_client: false
repo_method: file
use_own_repos: true
repo_version: "6.4"
# Do you want to run a full yum update
update_packages: false
common_packages:
  - python
  - unzip
  - bash-completion
  - tmux
  - wget
  - git
  - vim-enhanced
  - at
  - python27-python-pip
  - bind-utils
guid: defaultguid
install_bastion: true
install_common: true
install_ipa_client: false
install_satellite: true
configure_satellite: true
install_capsule: true
configure_capsule: true
deploy_local_ssh_config_location: "{{output_dir}}/"
use_own_key: true
env_authorized_key: "{{guid}}key"
set_env_authorized_key: true
HostedZoneId: Z3IHLWJZOU9SRT
project_tag: "{{ env_type }}-{{ guid }}"
ansible/configs/satellite-multi-region/files/cloud_providers/capsule.j2
New file
@@ -0,0 +1,369 @@
#jinja2: lstrip_blocks: "True"
---
AWSTemplateFormatVersion: "2010-09-09"
Mappings:
  RegionMapping: {{ aws_ami_region_mapping | to_json }}
Resources:
  Vpc:
    Type: "AWS::EC2::VPC"
    Properties:
      CidrBlock: "{{ aws_vpc_cidr }}"
      EnableDnsSupport: true
      EnableDnsHostnames: true
      Tags:
        - Key: Name
          Value: "{{ aws_vpc_name }}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
  VpcInternetGateway:
    Type: "AWS::EC2::InternetGateway"
  VpcRouteTable:
    Type: "AWS::EC2::RouteTable"
    Properties:
      VpcId:
        Ref: Vpc
  VPCRouteInternetGateway:
    DependsOn: VpcGA
    Type: "AWS::EC2::Route"
    Properties:
      GatewayId:
        Ref: VpcInternetGateway
      DestinationCidrBlock: "0.0.0.0/0"
      RouteTableId:
        Ref: VpcRouteTable
  VpcGA:
    Type: "AWS::EC2::VPCGatewayAttachment"
    Properties:
      InternetGatewayId:
        Ref: VpcInternetGateway
      VpcId:
        Ref: Vpc
  PublicSubnet:
    Type: "AWS::EC2::Subnet"
    DependsOn:
      - Vpc
    Properties:
    {% if aws_availability_zone is defined %}
      AvailabilityZone: {{ aws_availability_zone }}
    {% endif %}
      CidrBlock: "{{ aws_public_subnet_cidr }}"
      Tags:
        - Key: Name
          Value: "{{project_tag}}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
      MapPublicIpOnLaunch: true
      VpcId:
        Ref: Vpc
  PublicSubnetRTA:
    Type: "AWS::EC2::SubnetRouteTableAssociation"
    Properties:
      RouteTableId:
        Ref: VpcRouteTable
      SubnetId:
        Ref: PublicSubnet
{% for security_group in security_groups|list + default_security_groups|list %}
  {{security_group['name']}}:
    Type: "AWS::EC2::SecurityGroup"
    Properties:
      GroupDescription: Host
      VpcId:
        Ref: Vpc
      Tags:
        - Key: Name
          Value: "{{security_group['name']}}"
{% endfor %}
{% for security_group in default_security_groups|list + security_groups|list %}
{% for rule in security_group.rules %}
  {{security_group['name']}}{{rule['name']}}:
    Type: "AWS::EC2::SecurityGroup{{rule['rule_type']}}"
    Properties:
     GroupId:
       Fn::GetAtt:
         - "{{security_group['name']}}"
         - GroupId
     IpProtocol: {{rule['protocol']}}
     FromPort: {{rule['from_port']}}
     ToPort: {{rule['to_port']}}
  {% if rule['cidr'] is defined %}
     CidrIp: "{{rule['cidr']}}"
  {% endif  %}
  {% if rule['from_group'] is defined %}
     SourceSecurityGroupId:
       Fn::GetAtt:
        - "{{rule['from_group']}}"
        - GroupId
  {% endif  %}
{% endfor %}
{% endfor %}
  DnsZonePrivate:
    Type: "AWS::Route53::HostedZone"
    Properties:
      Name: "{{ aws_dns_zone_private }}"
      VPCs:
        - VPCId:
            Ref: Vpc
          VPCRegion:
            Ref: "AWS::Region"
      HostedZoneConfig:
        Comment: "{{ aws_comment }}"
  {% if secondary_stack is not defined %}
  DnsZonePublic:
    Type: "AWS::Route53::HostedZone"
    Properties:
      Name: "{{ aws_dns_zone_public }}"
      HostedZoneConfig:
        Comment: "{{ aws_comment }}"
  DnsPublicDelegation:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
      - DnsZonePublic
    Properties:
    {% if HostedZoneId is defined %}
      HostedZoneId: "{{ HostedZoneId }}"
    {% else %}
      HostedZoneName: "{{ aws_dns_zone_root }}"
    {% endif %}
      RecordSets:
        - Name: "{{ aws_dns_zone_public }}"
          Type: NS
          TTL: {{ aws_dns_ttl_public }}
          ResourceRecords:
            "Fn::GetAtt":
              - DnsZonePublic
              - NameServers
    {% endif %}
{% for instance in capsule_instances %}
{% if instance['dns_loadbalancer'] | d(false) | bool
  and not instance['unique'] | d(false) | bool %}
  {{instance['name']}}DnsLoadBalancer:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
    {% for c in range(1, (instance['count']|int)+1) %}
      - {{instance['name']}}{{c}}
      {% if instance['public_dns'] %}
      - {{instance['name']}}{{c}}EIP
      {% endif %}
    {% endfor %}
    Properties:
      {% if secondary_stack is defined %}
      HostedZoneName: "{{ aws_dns_zone_public }}"
      {% else %}
      HostedZoneId:
        Ref: DnsZonePublic
      {% endif %}
      RecordSets:
      - Name: "{{instance['name']}}.{{aws_dns_zone_public_prefix|d('')}}{{ aws_dns_zone_public }}"
        Type: A
        TTL: {{ aws_dns_ttl_public }}
        ResourceRecords:
{% for c in range(1,(instance['count'] |int)+1) %}
          - "Fn::GetAtt":
            - {{instance['name']}}{{c}}
            - PublicIp
{% endfor %}
{% endif %}
{% for c in range(1,(instance['count'] |int)+1) %}
  {{instance['name']}}{{loop.index}}:
    Type: "AWS::EC2::Instance"
    Properties:
{% if custom_image is defined %}
      ImageId: {{ custom_image.image_id }}
{% else %}
      ImageId:
        Fn::FindInMap:
        - RegionMapping
        - Ref: AWS::Region
        - {{ instance.image | default(aws_default_image) }}
{% endif %}
      InstanceType: "{{instance['flavor'][cloud_provider]}}"
      KeyName: "{{instance.key_name | default(key_name)}}"
    {% if instance['UserData'] is defined %}
      {{instance['UserData']}}
    {% endif %}
    {% if instance['security_groups'] is defined %}
      SecurityGroupIds:
      {% for sg in instance.security_groups %}
        - Ref: {{ sg }}
      {% endfor %}
    {% else %}
      SecurityGroupIds:
        - Ref: DefaultSG
    {% endif %}
      SubnetId:
        Ref: PublicSubnet
      Tags:
    {% if instance['unique'] | d(false) | bool %}
        - Key: Name
          Value: {{instance['name']}}
        - Key: internaldns
          Value: {{instance['name']}}.{{aws_dns_zone_private_chomped}}
    {% else %}
        - Key: Name
          Value: {{instance['name']}}{{loop.index}}
        - Key: internaldns
          Value: {{instance['name']}}{{loop.index}}.{{aws_dns_zone_private_chomped}}
    {% endif %}
        - Key: "owner"
          Value: "{{ email | default('unknownuser') }}"
        - Key: "Project"
          Value: "{{project_tag}}"
        - Key: "{{project_tag}}"
          Value: "{{ instance['name'] }}"
    {% for tag in instance['tags'] %}
        - Key: {{tag['key']}}
          Value: {{tag['value']}}
    {% endfor %}
      BlockDeviceMappings:
    {% if '/dev/sda1' not in instance.volumes|d([])|json_query('[].device_name')
      and '/dev/sda1' not in instance.volumes|d([])|json_query('[].name')
%}
        - DeviceName: "/dev/sda1"
          Ebs:
            VolumeSize: "{{ instance['rootfs_size'] | default(aws_default_rootfs_size) }}"
            VolumeType: "{{ aws_default_volume_type }}"
    {% endif %}
    {% for vol in instance.volumes|default([]) if vol.enable|d(true) %}
        - DeviceName: "{{ vol.name | default(vol.device_name) }}"
          Ebs:
          {% if cloud_provider in vol and 'type' in vol.ec2 %}
            VolumeType: "{{ vol[cloud_provider].type }}"
          {% else %}
            VolumeType: "{{ aws_default_volume_type }}"
          {% endif %}
            VolumeSize: "{{ vol.size }}"
    {% endfor %}
  {{instance['name']}}{{loop.index}}InternalDns:
    Type: "AWS::Route53::RecordSetGroup"
    Properties:
      HostedZoneId:
        Ref: DnsZonePrivate
      RecordSets:
    {% if instance['unique'] | d(false) | bool %}
        - Name: "{{instance['name']}}.{{aws_dns_zone_private}}"
    {% else %}
        - Name: "{{instance['name']}}{{loop.index}}.{{aws_dns_zone_private}}"
    {% endif %}
          Type: A
          TTL: {{ aws_dns_ttl_private }}
          ResourceRecords:
            - "Fn::GetAtt":
              - {{instance['name']}}{{loop.index}}
              - PrivateIp
{% if instance['public_dns'] %}
  {{instance['name']}}{{loop.index}}EIP:
    Type: "AWS::EC2::EIP"
    DependsOn:
    - VpcGA
    Properties:
      InstanceId:
        Ref: {{instance['name']}}{{loop.index}}
  {{instance['name']}}{{loop.index}}PublicDns:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
      - {{instance['name']}}{{loop.index}}EIP
    Properties:
      {% if secondary_stack is defined %}
      HostedZoneName: "{{ aws_dns_zone_public }}"
      {% else %}
      HostedZoneId:
        Ref: DnsZonePublic
      {% endif %}
      RecordSets:
      {% if instance['unique'] | d(false) | bool %}
        - Name: "{{instance['name']}}.{{aws_dns_zone_public_prefix|d('')}}{{ aws_dns_zone_public }}"
      {% else %}
        - Name: "{{instance['name']}}{{loop.index}}.{{aws_dns_zone_public_prefix|d('')}}{{ aws_dns_zone_public }}"
      {% endif %}
          Type: A
          TTL: {{ aws_dns_ttl_public }}
          ResourceRecords:
          - "Fn::GetAtt":
            - {{instance['name']}}{{loop.index}}
            - PublicIp
{% endif %}
{% endfor %}
{% endfor %}
  {% if secondary_stack is not defined %}
  Route53User:
    Type: AWS::IAM::User
    Properties:
      Policies:
        - PolicyName: Route53Access
          PolicyDocument:
            Statement:
              - Effect: Allow
                Action: route53:GetHostedZone
                Resource: arn:aws:route53:::change/*
              - Effect: Allow
                Action: route53:ListHostedZones
                Resource: "*"
              - Effect: Allow
                Action:
                  - route53:ChangeResourceRecordSets
                  - route53:ListResourceRecordSets
                  - route53:GetHostedZone
                Resource:
                  Fn::Join:
                    - ""
                    - - "arn:aws:route53:::hostedzone/"
                      - Ref: DnsZonePublic
              - Effect: Allow
                Action: route53:GetChange
                Resource: arn:aws:route53:::change/*
  Route53UserAccessKey:
      DependsOn: Route53User
      Type: AWS::IAM::AccessKey
      Properties:
        UserName:
          Ref: Route53User
  {% endif %}
Outputs:
  Route53internalzoneOutput:
    Description: The ID of the internal route 53 zone
    Value:
      Ref: DnsZonePrivate
  {% if secondary_stack is not defined %}
  Route53User:
    Value:
      Ref: Route53User
    Description: IAM User for Route53 (Let's Encrypt)
  Route53UserAccessKey:
    Value:
      Ref: Route53UserAccessKey
    Description: IAM User for Route53 (Let's Encrypt)
  Route53UserSecretAccessKey:
    Value:
      Fn::GetAtt:
        - Route53UserAccessKey
        - SecretAccessKey
    Description: IAM User for Route53 (Let's Encrypt)
  {% endif %}
ansible/configs/satellite-multi-region/files/cloud_providers/default.j2
New file
@@ -0,0 +1,369 @@
#jinja2: lstrip_blocks: "True"
---
AWSTemplateFormatVersion: "2010-09-09"
Mappings:
  RegionMapping: {{ aws_ami_region_mapping | to_json }}
Resources:
  Vpc:
    Type: "AWS::EC2::VPC"
    Properties:
      CidrBlock: "{{ aws_vpc_cidr }}"
      EnableDnsSupport: true
      EnableDnsHostnames: true
      Tags:
        - Key: Name
          Value: "{{ aws_vpc_name }}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
  VpcInternetGateway:
    Type: "AWS::EC2::InternetGateway"
  VpcRouteTable:
    Type: "AWS::EC2::RouteTable"
    Properties:
      VpcId:
        Ref: Vpc
  VPCRouteInternetGateway:
    DependsOn: VpcGA
    Type: "AWS::EC2::Route"
    Properties:
      GatewayId:
        Ref: VpcInternetGateway
      DestinationCidrBlock: "0.0.0.0/0"
      RouteTableId:
        Ref: VpcRouteTable
  VpcGA:
    Type: "AWS::EC2::VPCGatewayAttachment"
    Properties:
      InternetGatewayId:
        Ref: VpcInternetGateway
      VpcId:
        Ref: Vpc
  PublicSubnet:
    Type: "AWS::EC2::Subnet"
    DependsOn:
      - Vpc
    Properties:
    {% if aws_availability_zone is defined %}
      AvailabilityZone: {{ aws_availability_zone }}
    {% endif %}
      CidrBlock: "{{ aws_public_subnet_cidr }}"
      Tags:
        - Key: Name
          Value: "{{project_tag}}"
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
      MapPublicIpOnLaunch: true
      VpcId:
        Ref: Vpc
  PublicSubnetRTA:
    Type: "AWS::EC2::SubnetRouteTableAssociation"
    Properties:
      RouteTableId:
        Ref: VpcRouteTable
      SubnetId:
        Ref: PublicSubnet
{% for security_group in security_groups|list + default_security_groups|list %}
  {{security_group['name']}}:
    Type: "AWS::EC2::SecurityGroup"
    Properties:
      GroupDescription: Host
      VpcId:
        Ref: Vpc
      Tags:
        - Key: Name
          Value: "{{security_group['name']}}"
{% endfor %}
{% for security_group in default_security_groups|list + security_groups|list %}
{% for rule in security_group.rules %}
  {{security_group['name']}}{{rule['name']}}:
    Type: "AWS::EC2::SecurityGroup{{rule['rule_type']}}"
    Properties:
     GroupId:
       Fn::GetAtt:
         - "{{security_group['name']}}"
         - GroupId
     IpProtocol: {{rule['protocol']}}
     FromPort: {{rule['from_port']}}
     ToPort: {{rule['to_port']}}
  {% if rule['cidr'] is defined %}
     CidrIp: "{{rule['cidr']}}"
  {% endif  %}
  {% if rule['from_group'] is defined %}
     SourceSecurityGroupId:
       Fn::GetAtt:
        - "{{rule['from_group']}}"
        - GroupId
  {% endif  %}
{% endfor %}
{% endfor %}
  DnsZonePrivate:
    Type: "AWS::Route53::HostedZone"
    Properties:
      Name: "{{ aws_dns_zone_private }}"
      VPCs:
        - VPCId:
            Ref: Vpc
          VPCRegion:
            Ref: "AWS::Region"
      HostedZoneConfig:
        Comment: "{{ aws_comment }}"
  {% if secondary_stack is not defined %}
  DnsZonePublic:
    Type: "AWS::Route53::HostedZone"
    Properties:
      Name: "{{ aws_dns_zone_public }}"
      HostedZoneConfig:
        Comment: "{{ aws_comment }}"
  DnsPublicDelegation:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
      - DnsZonePublic
    Properties:
    {% if HostedZoneId is defined %}
      HostedZoneId: "{{ HostedZoneId }}"
    {% else %}
      HostedZoneName: "{{ aws_dns_zone_root }}"
    {% endif %}
      RecordSets:
        - Name: "{{ aws_dns_zone_public }}"
          Type: NS
          TTL: {{ aws_dns_ttl_public }}
          ResourceRecords:
            "Fn::GetAtt":
              - DnsZonePublic
              - NameServers
    {% endif %}
{% for instance in instances %}
{% if instance['dns_loadbalancer'] | d(false) | bool
  and not instance['unique'] | d(false) | bool %}
  {{instance['name']}}DnsLoadBalancer:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
    {% for c in range(1, (instance['count']|int)+1) %}
      - {{instance['name']}}{{c}}
      {% if instance['public_dns'] %}
      - {{instance['name']}}{{c}}EIP
      {% endif %}
    {% endfor %}
    Properties:
      {% if secondary_stack is defined %}
      HostedZoneName: "{{ aws_dns_zone_public }}"
      {% else %}
      HostedZoneId:
        Ref: DnsZonePublic
      {% endif %}
      RecordSets:
      - Name: "{{instance['name']}}.{{aws_dns_zone_public_prefix|d('')}}{{ aws_dns_zone_public }}"
        Type: A
        TTL: {{ aws_dns_ttl_public }}
        ResourceRecords:
{% for c in range(1,(instance['count'] |int)+1) %}
          - "Fn::GetAtt":
            - {{instance['name']}}{{c}}
            - PublicIp
{% endfor %}
{% endif %}
{% for c in range(1,(instance['count'] |int)+1) %}
  {{instance['name']}}{{loop.index}}:
    Type: "AWS::EC2::Instance"
    Properties:
{% if custom_image is defined %}
      ImageId: {{ custom_image.image_id }}
{% else %}
      ImageId:
        Fn::FindInMap:
        - RegionMapping
        - Ref: AWS::Region
        - {{ instance.image | default(aws_default_image) }}
{% endif %}
      InstanceType: "{{instance['flavor'][cloud_provider]}}"
      KeyName: "{{instance.key_name | default(key_name)}}"
    {% if instance['UserData'] is defined %}
      {{instance['UserData']}}
    {% endif %}
    {% if instance['security_groups'] is defined %}
      SecurityGroupIds:
      {% for sg in instance.security_groups %}
        - Ref: {{ sg }}
      {% endfor %}
    {% else %}
      SecurityGroupIds:
        - Ref: DefaultSG
    {% endif %}
      SubnetId:
        Ref: PublicSubnet
      Tags:
    {% if instance['unique'] | d(false) | bool %}
        - Key: Name
          Value: {{instance['name']}}
        - Key: internaldns
          Value: {{instance['name']}}.{{aws_dns_zone_private_chomped}}
    {% else %}
        - Key: Name
          Value: {{instance['name']}}{{loop.index}}
        - Key: internaldns
          Value: {{instance['name']}}{{loop.index}}.{{aws_dns_zone_private_chomped}}
    {% endif %}
        - Key: "owner"
          Value: "{{ email | default('unknownuser') }}"
        - Key: "Project"
          Value: "{{project_tag}}"
        - Key: "{{project_tag}}"
          Value: "{{ instance['name'] }}"
    {% for tag in instance['tags'] %}
        - Key: {{tag['key']}}
          Value: {{tag['value']}}
    {% endfor %}
      BlockDeviceMappings:
    {% if '/dev/sda1' not in instance.volumes|d([])|json_query('[].device_name')
      and '/dev/sda1' not in instance.volumes|d([])|json_query('[].name')
%}
        - DeviceName: "/dev/sda1"
          Ebs:
            VolumeSize: "{{ instance['rootfs_size'] | default(aws_default_rootfs_size) }}"
            VolumeType: "{{ aws_default_volume_type }}"
    {% endif %}
    {% for vol in instance.volumes|default([]) if vol.enable|d(true) %}
        - DeviceName: "{{ vol.name | default(vol.device_name) }}"
          Ebs:
          {% if cloud_provider in vol and 'type' in vol.ec2 %}
            VolumeType: "{{ vol[cloud_provider].type }}"
          {% else %}
            VolumeType: "{{ aws_default_volume_type }}"
          {% endif %}
            VolumeSize: "{{ vol.size }}"
    {% endfor %}
  {{instance['name']}}{{loop.index}}InternalDns:
    Type: "AWS::Route53::RecordSetGroup"
    Properties:
      HostedZoneId:
        Ref: DnsZonePrivate
      RecordSets:
    {% if instance['unique'] | d(false) | bool %}
        - Name: "{{instance['name']}}.{{aws_dns_zone_private}}"
    {% else %}
        - Name: "{{instance['name']}}{{loop.index}}.{{aws_dns_zone_private}}"
    {% endif %}
          Type: A
          TTL: {{ aws_dns_ttl_private }}
          ResourceRecords:
            - "Fn::GetAtt":
              - {{instance['name']}}{{loop.index}}
              - PrivateIp
{% if instance['public_dns'] %}
  {{instance['name']}}{{loop.index}}EIP:
    Type: "AWS::EC2::EIP"
    DependsOn:
    - VpcGA
    Properties:
      InstanceId:
        Ref: {{instance['name']}}{{loop.index}}
  {{instance['name']}}{{loop.index}}PublicDns:
    Type: "AWS::Route53::RecordSetGroup"
    DependsOn:
      - {{instance['name']}}{{loop.index}}EIP
    Properties:
      {% if secondary_stack is defined %}
      HostedZoneName: "{{ aws_dns_zone_public }}"
      {% else %}
      HostedZoneId:
        Ref: DnsZonePublic
      {% endif %}
      RecordSets:
      {% if instance['unique'] | d(false) | bool %}
        - Name: "{{instance['name']}}.{{aws_dns_zone_public_prefix|d('')}}{{ aws_dns_zone_public }}"
      {% else %}
        - Name: "{{instance['name']}}{{loop.index}}.{{aws_dns_zone_public_prefix|d('')}}{{ aws_dns_zone_public }}"
      {% endif %}
          Type: A
          TTL: {{ aws_dns_ttl_public }}
          ResourceRecords:
          - "Fn::GetAtt":
            - {{instance['name']}}{{loop.index}}
            - PublicIp
{% endif %}
{% endfor %}
{% endfor %}
  {% if secondary_stack is not defined %}
  Route53User:
    Type: AWS::IAM::User
    Properties:
      Policies:
        - PolicyName: Route53Access
          PolicyDocument:
            Statement:
              - Effect: Allow
                Action: route53:GetHostedZone
                Resource: arn:aws:route53:::change/*
              - Effect: Allow
                Action: route53:ListHostedZones
                Resource: "*"
              - Effect: Allow
                Action:
                  - route53:ChangeResourceRecordSets
                  - route53:ListResourceRecordSets
                  - route53:GetHostedZone
                Resource:
                  Fn::Join:
                    - ""
                    - - "arn:aws:route53:::hostedzone/"
                      - Ref: DnsZonePublic
              - Effect: Allow
                Action: route53:GetChange
                Resource: arn:aws:route53:::change/*
  Route53UserAccessKey:
      DependsOn: Route53User
      Type: AWS::IAM::AccessKey
      Properties:
        UserName:
          Ref: Route53User
  {% endif %}
Outputs:
  Route53internalzoneOutput:
    Description: The ID of the internal route 53 zone
    Value:
      Ref: DnsZonePrivate
  {% if secondary_stack is not defined %}
  Route53User:
    Value:
      Ref: Route53User
    Description: IAM User for Route53 (Let's Encrypt)
  Route53UserAccessKey:
    Value:
      Ref: Route53UserAccessKey
    Description: IAM User for Route53 (Let's Encrypt)
  Route53UserSecretAccessKey:
    Value:
      Fn::GetAtt:
        - Route53UserAccessKey
        - SecretAccessKey
    Description: IAM User for Route53 (Let's Encrypt)
  {% endif %}
ansible/configs/satellite-multi-region/files/etc_hosts_template.j2
New file
@@ -0,0 +1,16 @@
{# This is /etc/hosts file #}
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
{% for host in groups['satellites'] %}
{% if host == inventory_hostname %}
{{ ansible_default_ipv4['address'] }}  {{host}}
{% endif %}
{% endfor %}
{% for host in groups['capsules'] %}
{% if host == inventory_hostname %}
{{ ansible_default_ipv4['address'] }}  {{host}}
{% endif %}
{% endfor %}
ansible/configs/satellite-multi-region/files/hosts_template.j2
New file
@@ -0,0 +1,23 @@
{# # These are the satellitehosts #}
[satellites]
{% for host in groups['satellites'] %}
{{host}}
{% endfor %}
[capsules]
{% for host in groups['capsules'] %}
{{host}}
{% endfor %}
[all:vars]
{# ###########################################################################
### Ansible Vars
########################################################################### #}
timeout=60
ansible_become=yes
ansible_user={{remote_user}}
[all:children]
satellites
capsules
ansible/configs/satellite-multi-region/files/repos_template.j2
New file
@@ -0,0 +1,122 @@
{%if inventory_hostname in groups['bastions'] %}
[epel]
name=Extra Packages for Enterprise Linux 7 - $basearch
baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch
mirrorlist=http://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=0
#gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
[rhel-7-server-rpms]
name=Red Hat Enterprise Linux 7
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-rpms
enabled=1
gpgcheck=0
[rhel-server-rhscl-7-rpms]
name=Red Hat Enterprise Linux 7 RHSCL
baseurl={{own_repo_path}}/{{repo_version}}/rhel-server-rhscl-7-rpms
enabled=1
gpgcheck=0
[rhel-7-server-ansible-2.6-rpms]
name=Red Hat Enterprise Ansible 2.6
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-ansible-2.6-rpms
enabled=1
gpgcheck=0
[rhel-7-server-extras-rpms]
name=Red Hat Enterprise Linux Extra RPMs
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-extras-rpms
enabled=1
gpgcheck=0
{% endif %}
{%if groups['satellites'] is defined %}
{%if inventory_hostname in groups['satellites'] %}
{# satellite repos #}
[rhel-7-server-rpms]
name=Red Hat Enterprise Linux 7
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-rpms
enabled=1
gpgcheck=0
[rhel-server-rhscl-7-rpms]
name=Red Hat Enterprise Linux 7 RHSCL
baseurl={{own_repo_path}}/{{repo_version}}/rhel-server-rhscl-7-rpms
enabled=1
gpgcheck=0
[rhel-7-server-ansible-2.6-rpms]
name=Red Hat Enterprise Ansible 2.6
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-ansible-2.6-rpms
enabled=1
gpgcheck=0
[rhel-7-server-extras-rpms]
name=Red Hat Enterprise Linux Extra RPMs
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-extras-rpms
enabled=1
gpgcheck=0
[rhel-7-server-satellite-6.4-rpms]
name=Red Hat Enterprise Satellite 6.4
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-satellite-6.4-rpms
enabled=1
gpgcheck=0
[rhel-7-server-satellite-maintenance-6-rpms]
name=Red Hat Enterprise Satellite 6 Maintenance
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-satellite-maintenance-6-rpms
enabled=1
gpgcheck=0
{% endif %}
{% endif %}
{% if groups['capsules'] is defined %}
{% if inventory_hostname in groups['capsules'] %}
{# capsule repos #}
[rhel-7-server-rpms]
name=Red Hat Enterprise Linux 7
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-rpms
enabled=1
gpgcheck=0
[rhel-server-rhscl-7-rpms]
name=Red Hat Enterprise Linux 7 RHSCL
baseurl={{own_repo_path}}/{{repo_version}}/rhel-server-rhscl-7-rpms
enabled=1
gpgcheck=0
[rhel-7-server-ansible-2.6-rpms]
name=Red Hat Enterprise Ansible 2.6
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-ansible-2.6-rpms
enabled=1
gpgcheck=0
[rhel-7-server-satellite-capsule-6.4-rpms]
name=Red Hat Enterprise Satellite Capsule 6.4
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-satellite-capsule-6.4-rpms
enabled=1
gpgcheck=0
[rhel-7-server-satellite-maintenance-6-rpms]
name=Red Hat Enterprise Satellite 6 Maintenance
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-satellite-maintenance-6-rpms
enabled=1
gpgcheck=0
[rhel-7-server-satellite-tools-6.4-rpms]
name=Red Hat Enterprise Linux Satellite tools 6.4
baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-satellite-tools-6.4-rpms
enabled=1
gpgcheck=0
{% endif %}
{% endif %}
ansible/configs/satellite-multi-region/files/tower_template_inventory.j2
New file
@@ -0,0 +1,54 @@
[tower]
{% for host in groups['towers'] %}
tower{{loop.index}}.{{chomped_zone_internal_dns}} public_host_name=tower{{loop.index}}.{{ guid }}{{subdomain_base_suffix}} ssh_host={{host}}
{% endfor %}
[database]
## This should be replaced by supports[0] name
support1.{{chomped_zone_internal_dns}}
## Add isolated if needed, we should have an "IF" statement, only if worker groups exist and have instances.
{% if worker == 'yes' %}
[isolated_group_{{region}}]
{% for host in groups['workers'] %}
worker{{loop.index}}.{{chomped_zone_internal_dns}} public_host_name=worker{{loop.index}}.{{ guid }}{{subdomain_base_suffix}} ssh_host={{host}}
{% endfor %}
[isolated_group_{{region}}:vars]
controller=tower
{% endif %}
[all:vars]
ansible_become=true
admin_password={{tower_admin_password}}
## This should be replaced by supports[0] name
pg_host='support1.{{guid}}.internal'
pg_port='5432'
pg_database='awx'
pg_username='awx'
pg_password={{tower_admin_password}}
rabbitmq_port=5672
rabbitmq_vhost=tower
rabbitmq_username=tower
rabbitmq_password={{ tower_admin_password | regex_replace('[^a-zA-Z0-9]') }}
rabbitmq_cookie=cookiemonster
rabbitmq_use_long_name=true
### For our use, not Tower install use (so you can run ansible command line)
[supports]
{% for host in groups['support'] %}
support{{loop.index}}.{{chomped_zone_internal_dns}} public_host_name=support{{loop.index}}.{{ guid }}{{subdomain_base_suffix}} ssh_host={{host}}
{% endfor %}
{% if worker == 'yes' %}
[workers]
{% for host in groups['workers'] %}
worker{{loop.index}}.{{chomped_zone_internal_dns}} public_host_name=worker{{loop.index}}.{{ guid }}{{subdomain_base_suffix}} ssh_host={{host}}
{% endfor %}
{% endif %}
ansible/configs/satellite-multi-region/post_infra.yml
New file
@@ -0,0 +1,79 @@
- name: Step 002 Post Infrastructure
  hosts: localhost
  gather_facts: false
  become: false
  tasks:
    - debug:
        msg: "Step 002 Post Infrastructure"
    - name: Deploy secondary stacks
      loop: "{{ target_regions[1:] }}"
      vars:
        stack_file: "{{ _region.stack }}"
        aws_region: "{{ _region.region }}"
        aws_vpc_cidr: "{{ _region.vpc_cidr }}"
        aws_public_subnet_cidr: "{{ _region.subnet_cidr }}"
        aws_dns_zone_private: "{{ _region.name }}.{{ guid }}.internal."
        aws_dns_zone_public_prefix: "{{ _region.name }}."
        secondary_stack: yes
      loop_control:
        loop_var: _region
      include_tasks: deploy_stack.yml
- import_playbook: ../../include_vars.yml
  tags:
    - create_inventory
    - must
- name: Step 001.3 Configure Linux Hosts and Wait for Connection
  hosts:
    - all:!windows:!network
  gather_facts: false
  any_errors_fatal: true
  ignore_errors: false
  become: true
  tags:
    - step001
    - step001.3
    - wait_ssh
    - set_hostname
  tasks:
    - name: set facts for remote access
      tags:
        - create_inventory
      set_fact:
        aws_region_final: "{{hostvars['localhost'].aws_region_final}}"
        ansible_ssh_extra_args: "{{ ansible_ssh_extra_args|d() }} -F {{output_dir}}/{{ env_type }}_{{ guid }}_ssh_conf"
    - name: Run infra-ec2-wait_for_linux_hosts Role
      import_role:
        name: infra-ec2-wait_for_linux_hosts
    - name: Run infra-ec2-linux-set-hostname Role
      import_role:
        name: infra-ec2-linux-set-hostname
- name: Step 002 Create VPC peering
  hosts: localhost
  gather_facts: false
  become: false
  tags: vpc_peering
  environment:
    AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
    AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
  tasks:
    - include_role:
        name: infra-ec2-vpc-peering
      tags: vpcpeering
      vars:
        vpc_region: "{{ _region[0].region }}"
        peer_region: "{{ _region[1].region }}"
        vpc_private_zone: "{{ _region[0].name }}.{{ guid }}.internal."
        peer_private_zone: "{{ _region[1].name }}.{{ guid }}.internal."
      loop: "{{ target_regions | product(target_regions) | list }}"
      loop_control:
        loop_var: _region
      when:
        - _region[0].region != _region[1].region
        - _region[0].region < _region[1].region
ansible/configs/satellite-multi-region/post_software.yml
New file
@@ -0,0 +1,31 @@
- name: Step 005 Post Software
  hosts: localhost
  gather_facts: false
  become: false
  tasks:
    - debug:
        msg: "Step 005 Post Software"
- name: Step lab post software deployment
  hosts: bastions
  gather_facts: False
  become: yes
  tags:
    - opentlc_bastion_tasks
  tasks:
    - import_role:
        name: bastion-opentlc-ipa
      when: install_ipa_client|bool
- name: PostSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Post-Software checks completed successfully"
ansible/configs/satellite-multi-region/pre_infra.yml
New file
@@ -0,0 +1,30 @@
- name: Step 000 Pre Infrastructure
  hosts: localhost
  gather_facts: false
  become: false
  tasks:
    - debug:
        msg: "Step 000 Pre Infrastructure"
    - fail:
        msg: |-
          'aws_region' must not be defined.
          This is multi-region example, please use the 'target_regions' list.
      when: aws_region is defined
    - fail:
        msg: "'target_regions' is not defined"
      when: target_regions is not defined
    - fail:
        msg: "'target_regions' must contain at least 1 region."
      when: target_regions | length < 1
    - name: set aws_region as the first
      set_fact:
        aws_region: "{{ target_regions[0].region }}"
        aws_vpc_cidr: "{{ target_regions[0].vpc_cidr }}"
        aws_public_subnet_cidr: "{{ target_regions[0].subnet_cidr }}"
        aws_dns_zone_private: "{{ target_regions[0].name }}.{{ guid }}.internal."
        aws_dns_zone_public_prefix: "{{ target_regions[0].name }}."
ansible/configs/satellite-multi-region/pre_software.yml
New file
@@ -0,0 +1,46 @@
- name: Step 003 Pre Software
  hosts: localhost
  gather_facts: false
  become: false
  tasks:
    - debug:
        msg: "Step 003 Pre Software"
    - import_role:
        name: infra-local-create-ssh_key
      when: set_env_authorized_key | bool
- name: Configure all hosts with Repositories
  hosts:
    - all:!windows
  become: true
  gather_facts: False
  tags:
    - step004
    - common_tasks
  roles:
    - { role: "set-repositories", when: 'repo_method is defined' }
    - { role: "set_env_authorized_key", when: 'set_env_authorized_key' }
- name: Configuring Bastion Hosts
  hosts: bastions
  become: true
  roles:
    - { role: "common", when: 'install_common' }
    - {role: "bastion", when: 'install_bastion' }
    - { role: "bastion-opentlc-ipa", when: 'install_ipa_client' }
  tags:
    - step004
    - bastion_tasks
- name: PreSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - presoftware_flight_check
  tasks:
    - debug:
        msg: "Pre-Software checks completed successfully"
ansible/configs/satellite-multi-region/sample_vars.yml
New file
@@ -0,0 +1,93 @@
---
env_type: satellite-multi-region      # Name of config to deploy
output_dir: /tmp/workdir                # Writable working scratch directory
email: name@example.com                 # User info for notifications
guid: defaultguid                            # Unique string used in FQDN
subdomain_base_suffix: .example.opentlc.com      # Your domain used in FQDN
# Cloud specfic settings - example given here for AWS
cloud_provider: ec2                     # Which AgnosticD Cloud Provider to use
HostedZoneId: Z3IHLWJZOU9SRT            # You will need to change this
key_name: ocpkey                        # Keyname must exist in AWS
target_regions:
  - region: ap-southeast-2
    stack: default
    name: apac
    vpc_cidr: 10.1.0.0/16
    subnet_cidr: 10.1.0.0/24
  - region: ap-southeast-1
    stack: capsule.j2
    name: emea
    vpc_cidr: 10.2.0.0/16
    subnet_cidr: 10.2.0.0/24
  # - region: ap-southeast-1
  #   stack: default
  #   name: apac
  #   vpc_cidr: 10.3.0.0/16
  #   subnet_cidr: 10.3.0.0/24
###### satellite env related variables ###############
satellite_admin: admin
satellite_admin_password: r3dh4t1!
org: "Default Organization"
subscription_name:  "Employee SKU"
# manifest_file: ~/office_work/manifests/manifest_satellite-vm_1.zip
content_view_name: "capsule server content"
activation_key_name: "capsule_activation_key"
life_cycle_env_name: "Library"
#
########### repo product and name ###############
satellite_repository:
  - organization: "{{org}}"
    product: 'Red Hat Enterprise Linux Server'
    basearch: 'x86_64'
    releasever:  '7Server'
    name: 'Red Hat Enterprise Linux 7 Server (RPMs)'
    # sync_name: 'Red Hat Enterprise Linux 7 Server RPMs x86_64 7Server'
  - organization: "{{org}}"
    product: 'Red Hat Satellite Capsule'
    basearch: 'x86_64'
    name: 'Red Hat Satellite Capsule 6.4 (for RHEL 7 Server) (RPMs)'
    # sync_name: 'Red Hat Satellite Capsule 6.4 for RHEL 7 Server RPMs x86_64'
  - organization: "{{org}}"
    product: 'Red Hat Ansible Engine'
    basearch: 'x86_64'
    name: 'Red Hat Ansible Engine 2.6 RPMs for Red Hat Enterprise Linux 7 Server'
    # sync_name: 'Red Hat Ansible Engine 2.6 RPMs for Red Hat Enterprise Linux 7 Server x86_64'
  - organization: "{{org}}"
    product: 'Red Hat Software Collections for RHEL Server'
    basearch: 'x86_64'
    releasever:  '7Server'
    name: 'Red Hat Software Collections RPMs for Red Hat Enterprise Linux 7 Server'
    # sync_name: 'Red Hat Software Collections RPMs for Red Hat Enterprise Linux 7 Server x86_64 7Server'
  - organization: "{{org}}"
    product: 'Red Hat Enterprise Linux Server'
    basearch: 'x86_64'
    name: 'Red Hat Satellite Maintenance 6 (for RHEL 7 Server) (RPMs)'
    # sync_name: 'Red Hat Satellite Maintenance 6 for RHEL 7 Server RPMs x86_64'
  - organization: "{{org}}"
    product: 'Red Hat Enterprise Linux Server'
    basearch: 'x86_64'
    name: 'Red Hat Satellite Tools 6.4 (for RHEL 7 Server) (RPMs)'
    # sync_name: 'Red Hat Satellite Tools 6.4 for RHEL 7 Server RPMs x86_64'
...
ansible/configs/satellite-multi-region/software.yml
New file
@@ -0,0 +1,52 @@
---
- name: Step 004 Environment specific Software
  hosts: localhost
  gather_facts: False
  become: false
  tasks:
    - debug:
        msg: "Software tasks Started"
- name: Configuring satellite Hosts
  hosts: satellites
  become: True
  gather_facts: True
  pre_tasks:
      - name: Copy /etc/hosts file
        template:
          src: "./files/etc_hosts_template.j2"
          dest: /etc/hosts
  roles:
    - { role: "satellite-installation",                 when: install_satellite }
    - { role: "satellite-manage-manifest",              when: configure_satellite }
    - { role: "satellite-manage-subscription-and-sync", when: configure_satellite }
    - { role: "satellite-manage-content-view",          when: configure_satellite }
    - { role: "satellite-manage-activationkey",         when: configure_satellite }
    - { role: "satellite-manage-capsule-certificate",   when: configure_satellite }
- name: Configuring capsule Hosts
  hosts: capsules
  become: True
  gather_facts: True
  pre_tasks:
      - name: Copy /etc/hosts file
        template:
          src: "./files/etc_hosts_template.j2"
          dest: /etc/hosts
  roles:
    - { role: "satellite-capsule-installation",   when: install_capsule }
    - { role: "satellite-capsule-configuration",  when: configure_capsule }
- name: Software flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
    - post_flight_check
  tasks:
    - debug:
        msg: "Software checks completed successfully"
ansible/configs/satellite-multi-region/start.yml
New file
@@ -0,0 +1,33 @@
---
- import_playbook: ../../include_vars.yml
- name: Stop instances in all regions
  hosts: localhost
  gather_facts: false
  become: false
  environment:
    AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
    AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
  tasks:
    - debug:
        msg: "Step 002 Post Infrastructure"
    - loop: "{{ target_regions | json_query('[].region') }}"
      loop_control:
        loop_var: _region
      ec2:
        instance_tags:
          "aws:cloudformation:stack-name": "{{ project_tag }}"
        state: running
        region: "{{ _region }}"
      # Shell equivalent
      # shell: >-
      #   aws ec2 start-instances
      #   --region {{ _region }}
      #   --instance-ids $(
      #     aws ec2 describe-instances
      #     --filters "Name=tag:aws:cloudformation:stack-name,Values={{ project_tag }}
      #     --query Reservations[*].Instances[*].InstanceId
      #     --region {{_region }}
      #     --output text
      #   )
ansible/configs/satellite-multi-region/stop.yml
New file
@@ -0,0 +1,33 @@
---
- import_playbook: ../../include_vars.yml
- name: Stop instances in all regions
  hosts: localhost
  gather_facts: false
  become: false
  environment:
    AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
    AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
  tasks:
    - debug:
        msg: "Step 002 Post Infrastructure"
    - loop: "{{ target_regions | json_query('[].region') }}"
      loop_control:
        loop_var: _region
      ec2:
        instance_tags:
          "aws:cloudformation:stack-name": "{{ project_tag }}"
        state: stopped
        region: "{{ _region }}"
      # Shell equivalent
      # shell: >-
      #   aws ec2 stop-instances
      #   --region {{ _region }}
      #   --instance-ids $(
      #     aws ec2 describe-instances
      #     --filters "Name=tag:aws:cloudformation:stack-name,Values={{ project_tag }}
      #     --query Reservations[*].Instances[*].InstanceId
      #     --region {{_region }}
      #     --output text
      #   )
ansible/configs/satellite-v64-prod/README.adoc
@@ -1,8 +1,8 @@
= satellite-vm config
= satellite-v64-prod config
== Review the Env_Type variable file
* This file link:./env_vars.yml[./env_vars.yml] contains all the variables you
* This file link:./sample_vars.yml[./sample_vars.yml] contains all the variables you
 need to define to control the deployment of your environment.
@@ -10,32 +10,30 @@
You can run the playbook with the following arguments to overwrite the default variable values:
From the `ansible_agnostic_deployer/ansible` directory run
`
[source,bash]
----
ansible-playbook main.yml  \
      -e "guid=${GUID}" \
      - @./config/satellite-vm-prod/sample_vars.yml
      -e "rhel_subscription_user=${RHN_USER}"  \
      -e "rhel_subscription_pass=${RHN_PASS}" \
      - @./configs/satellite-v64-prod/sample_vars.yml
      -e @~/secrets.yml
      -e GUID=test01
=== To Delete an environment
      -e manifest_file=~/manifest.zip
      -e guid=test01
----
== To Delete an environment
[source,bash]
----
REGION=us-east-1
KEYNAME=ocpkey
GUID=test01
ENVTYPE=satellite-vm
ENVTYPE=satellite-v64-prod
CLOUDPROVIDER=ec2
ansible-playbook configs/${ENVTYPE}/destroy_env.yml \
        -e "guid=${GUID}" -e "env_type=${ENVTYPE}" \
        -e "cloud_provider=${CLOUDPROVIDER}" \
        -e "aws_region=${REGION}"  -e "key_name=${KEYNAME}"  \
        -e "aws_region=${REGION}"
        -e "key_name=${KEYNAME}"  \
        -e "subdomain_base_suffix=${BASESUFFIX}" \
        -e @~/secret.yml -vv
        -e @~/secret.yml -vv
----
ansible/configs/satellite-v64-prod/env_vars.yml
@@ -7,13 +7,15 @@
### For now, just tagging comments in line with configuration file.
### Vars that can be removed
use_own_repos: false
###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT
###### OR PASS as "-e" args to ansible-playbook command
### Common Host settings
repo_method: file # Other Options are: file, satellite and rhn
use_own_repos: true
repo_version: "6.4"
# Do you want to run a full yum update
@@ -69,7 +71,6 @@
subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}"
## Environment Sizing
bastion_instance_type: "t2.medium"
satellite_instance_count: 1
@@ -96,162 +97,52 @@
  - name: SatelliteSG
    rules:
      - name: BastionUDPPorts
        description: "Only from bastion"
        from_port: 0
        to_port: 65535
        protocol: udp
        group: BastionSG
        rule_type: Ingress
      - name: BastionTCPPorts
        description: "Only from bastion"
        from_port: 0
        to_port: 65535
        protocol: tcp
        group: BastionSG
        rule_type: Ingress
      - name: SatSSHPublic
        description: "SSH public"
        from_port: 22
        to_port: 22
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: SatHTTPPorts
        description: "HTTP Public"
        from_port: 80
        to_port: 80
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: SatHTTPSPorts
        description: "HTTPS Public"
        from_port: 443
        to_port: 443
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: SatKatello5646Ports
        description: "Katello/qpid Public"
        from_port: 5646
        to_port: 5646
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: SatKatello5647Ports
        description: "Katello/qpid Public"
        from_port: 5647
        to_port: 5647
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: SatamqpPorts
        description: "amqp Public"
        from_port: 5671
        to_port: 5671
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: SatPuppetPorts
        description: "Puppet Public"
        from_port: 8140
        to_port: 8140
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: SatForemanPorts
        description: "Foreman Smart Proxy Public"
        from_port: 9090
        to_port: 9090
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: SatDNSTCPPorts
        description: "DNS Public"
        from_port: 53
        to_port: 53
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: SatDNSUDPPorts
        description: "DNS Public"
        from_port: 53
        to_port: 53
        protocol: udp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: SatDHCP67Ports
        description: "DHCP Public"
        from_port: 67
        to_port: 67
        protocol: udp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: SatDHCP68Ports
        description: "DHCP Public"
        from_port: 68
        to_port: 68
        protocol: udp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: SatTFTPPorts
        description: "TFTP Public"
        from_port: 69
        to_port: 69
        protocol: udp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
  - name: HostSG
    rules:
      - name: HostUDPPorts
        description: "Only from Itself udp"
        from_port: 0
        to_port: 65535
        protocol: udp
        group: HostSG
        rule_type: Ingress
      - name: Postgresql
        description: "PostgreSql"
        from_port: 5432
        to_port: 5432
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: HostTCPPorts
        description: "Only from Itself tcp"
        from_port: 0
        to_port: 65535
        protocol: tcp
        group: HostSG
        rule_type: Ingress
      - name: TowerUDPPorts
        description: "Only from tower"
        from_port: 0
        to_port: 65535
        protocol: udp
        group: TowerSG
        rule_type: Ingress
      - name: TowerTCPPorts
        description: "Only from tower"
        from_port: 0
        to_port: 65535
        protocol: tcp
        group: TowerSG
        rule_type: Ingress
      - name: BastionUDPPorts
        description: "Only from bastion"
        from_port: 0
        to_port: 65535
        protocol: udp
        group: BastionSG
        from_group: DefaultSG
        rule_type: Ingress
      - name: BastionTCPPorts
        description: "Only from bastion"
        from_port: 0
        to_port: 65535
        protocol: tcp
        group: BastionSG
        from_group: DefaultSG
        rule_type: Ingress
  - name: CapsuleSG
    rules:
      - name: SatHTTPSPorts
        description: "HTTPS Public"
        from_port: 9090
        to_port: 9090
        protocol: tcp
        cidr: "0.0.0.0/0"
        rule_type: Ingress
      - name: BastionUDPPorts
        description: "Only from bastion"
        from_port: 0
        to_port: 65535
        protocol: udp
        from_group: DefaultSG
        rule_type: Ingress
      - name: BastionTCPPorts
        description: "Only from bastion"
        from_port: 0
        to_port: 65535
        protocol: tcp
        from_group: DefaultSG
        rule_type: Ingress
instances:
@@ -262,6 +153,7 @@
    dns_loadbalancer: true
    security_groups:
      - BastionSG
      - DefaultSG
    flavor:
      ec2: "{{bastion_instance_type}}"
    tags:
@@ -276,8 +168,9 @@
    public_dns: true
    security_groups: 
      - SatelliteSG
      - DefaultSG
    flavor:
      "ec2": "{{satellite_instance_type}}"
      ec2: "{{satellite_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "satellites"
@@ -290,17 +183,19 @@
  - name: "capsule"
    count: "{{capsule_instance_count}}"
    security_groups:
      - SatelliteSG
    public_dns: false
      - CapsuleSG
      - DefaultSG
    public_dns: true
    dns_loadbalancer: false
    flavor:
      "ec2": "{{capsule_instance_type}}"
      ec2: "{{capsule_instance_type}}"
    tags:
      - key: "AnsibleGroup"
        value: "capsules"
      - key: "ostype"
        value: "linux"
    subnet: PublicSubnet
#########
@@ -333,17 +228,13 @@
  - rhel-7-server-ansible-2.6-rpms
  - rhel-7-server-extras-rpms
project_tag: "{{ env_type }}-{{ guid }}"
zone_internal_dns: "{{guid}}.internal."
chomped_zone_internal_dns: "{{guid}}.internal"
bastion_public_dns: "bastion.{{subdomain_base}}."
bastion_public_dns_chomped: "bastion.{{subdomain_base}}"
vpcid_cidr_block: "192.168.0.0/16"
vpcid_name_tag: "{{subdomain_base}}"
@@ -372,13 +263,11 @@
rtb_public_name_tag: "{{subdomain_base}}-public"
rtb_private_name_tag: "{{subdomain_base}}-private"
cf_template_description: "{{ env_type }}-{{ guid }} Ansible Agnostic Deployer "
# Cloudformation template selection
stack_file: default
secret_dir: "~/secrets"
env_type: satellite-vm-prod
env_type: satellite-v64-prod
ansible/configs/satellite-v64-prod/files/cloud_providers/ec2_cloud_template.j2
@@ -17,16 +17,20 @@
        - Key: Hostlication
          Value:
            Ref: "AWS::StackId"
  VpcInternetGateway:
    Type: "AWS::EC2::InternetGateway"
  VpcGA:
    Type: "AWS::EC2::VPCGatewayAttachment"
    Properties:
      InternetGatewayId:
        Ref: VpcInternetGateway
      VpcId:
        Ref: Vpc
  VpcRouteTable:
    Type: "AWS::EC2::RouteTable"
    Properties:
      VpcId:
        Ref: Vpc
  VPCRouteInternetGateway:
    DependsOn: VpcGA
    Type: "AWS::EC2::Route"
@@ -37,14 +41,6 @@
      RouteTableId:
        Ref: VpcRouteTable
  VpcGA:
    Type: "AWS::EC2::VPCGatewayAttachment"
    Properties:
      InternetGatewayId:
        Ref: VpcInternetGateway
      VpcId:
        Ref: Vpc
  PublicSubnet:
    Type: "AWS::EC2::Subnet"
    DependsOn:
@@ -53,7 +49,6 @@
    {% if aws_availability_zone is defined %}
      AvailabilityZone: {{ aws_availability_zone }}
    {% endif %}
      CidrBlock: "{{ aws_public_subnet_cidr }}"
      Tags:
        - Key: Name
@@ -84,7 +79,6 @@
        - Key: Name
          Value: "{{security_group['name']}}"
{% endfor %}
{% for security_group in default_security_groups|list + security_groups|list %}
{% for rule in security_group.rules %}
  {{security_group['name']}}{{rule['name']}}:
@@ -100,7 +94,6 @@
  {% if rule['cidr'] is defined %}
     CidrIp: "{{rule['cidr']}}"
  {% endif  %}
  {% if rule['from_group'] is defined %}
     SourceSecurityGroupId:
       Fn::GetAtt:
@@ -460,7 +453,7 @@
          TTL: {{ aws_dns_ttl_public }}
          ResourceRecords:
          - "Fn::GetAtt":
            - {{instance['name']}}{{loop.index}}.{{capsule_region['name']}}
            - {{instance['name']}}{{loop.index}}{{capsule_region['name']}}
            - PublicIp
{% endif %}
{% endfor %}
ansible/configs/satellite-v64-prod/sample_vars.yml
@@ -22,23 +22,23 @@
key_name: ocpkey                       # Keyname must exist in AWS
capsule_instance_count: 2
install_capsule: true
configure_capsule: true
# Target regions for capsule servers
target_regions:
  - name: apac
###### satellite env related variables ###############
# rhel_subscription_user: **************
# rhel_subscription_pass: **********
satellite_admin: admin
satellite_admin_password: r3dh4t1!
org: "Default Organization"
subscription_name:  "Employee SKU"
manifest_file: ~/office_work/manifests/manifest_satellite-vm_1.zip
# manifest_file: ~/office_work/manifests/manifest_satellite-vm_1.zip
content_view_name: "capsule server content"
activation_key_name: "capsule_activation_key"
life_cycle_env_name: "Library"
############ Custom facts not need to be set  ###########
#
ansible/configs/satellite-v64-prod/software.yml
@@ -18,14 +18,12 @@
          src: "./files/etc_hosts_template.j2"
          dest: /etc/hosts
  roles:
    # Satellite-installation role installs satellite and Configures firewalld
    - { role: "satellite-installation", when: install_satellite }
    # # Uploads manifest, adds & sync repos, creates content-view and activation keys
    - { role: "satellite-manage-manifest", when: configure_satellite }
    - { role: "satellite-installation",                 when: install_satellite }
    - { role: "satellite-manage-manifest",              when: configure_satellite }
    - { role: "satellite-manage-subscription-and-sync", when: configure_satellite } 
    - { role: "satellite-manage-content-view", when: configure_satellite }
    - { role: "satellite-manage-activationkey", when: configure_satellite }
    - { role: "satellite-manage-capsule-certificate", when: configure_satellite }
    - { role: "satellite-manage-content-view",          when: configure_satellite }
    - { role: "satellite-manage-activationkey",         when: configure_satellite }
    - { role: "satellite-manage-capsule-certificate",   when: configure_satellite }
    
    
- name: Configuring capsule Hosts
@@ -38,10 +36,8 @@
          src: "./files/etc_hosts_template.j2"
          dest: /etc/hosts
  roles:
    # Satellite-installation role installs satellite and Configures firewalld
    - { role: "satellite-capsule-installation", when: install_capsule }
    # Uploads manifest, adds & sync repos, creates content-view and activation keys
    - { role: "satellite-capsule-configuration", when: configure_capsule }
    - { role: "satellite-capsule-installation",   when: install_capsule }
    - { role: "satellite-capsule-configuration",  when: configure_capsule }
    
ansible/roles/satellite-capsule-configuration/tasks/main.yml
@@ -2,10 +2,11 @@
- name: Download Cert from Satellite
  get_url:
    url: "https://satellite1.{{guid}}.internal/pub/katello-ca-consumer-latest.noarch.rpm"
    url: "https://{{item}}/pub/katello-ca-consumer-latest.noarch.rpm"
    dest: /root/katello-ca-consumer-latest.noarch.rpm
    mode: 0664
    validate_certs: no
  loop: "{{ groups['satellites'] }}"
- name: Remove rh-amazon-rhui-client package
  tags: packer
@@ -32,9 +33,10 @@
- name: Register with activation-key
  redhat_subscription:
    state: present
    server_hostname: "satellite1.{{guid}}.internal"
    server_hostname: "{{item}}"
    activationkey: "{{activation_key_name}}"
    org_id: "Default_Organization"
  loop: "{{ groups['satellites'] }}"
- name: Disable all repos
  command: subscription-manager repos --disable "*"
@@ -61,7 +63,8 @@
  synchronize:
    src: /root/{{ inventory_hostname }}-certs.tar
    dest: /root
  delegate_to: satellite1.{{guid}}.internal
  delegate_to: "{{item}}"
  loop: "{{ groups['satellites'] }}"
- name: Configure Satellite Capsule
  command: >-
ansible/roles/satellite-manage-activationkey/tasks/main.yml
@@ -12,7 +12,7 @@
- name: Creating fact list from Name of existing Activation Keys
  set_fact:
    list_of_exist_activation_key: "{{ list_of_exist_activation_key + [ item.split(':')[1].lstrip(' ') ] }}"
    list_of_exist_activation_key: "{{ list_of_exist_activation_key | d([]) + [ item.split(':')[1].lstrip(' ') ] }}"
  loop: "{{activation_key_list.stdout_lines}}"
  when: activation_key_list is defined
  tags:
@@ -28,7 +28,7 @@
      --name "{{activation_key_name}}"
      --content-view "{{content_view_name}}"
      --lifecycle-environment "{{life_cycle_env_name}}"
  when: activation_key_name not in list_of_exist_activation_key
  when: 'activation_key_name not in list_of_exist_activation_key| d([])'
  tags:
    - configure_satellite
    - configure_satellite_activationkey
@@ -49,7 +49,7 @@
- name: Creating fact list of subscription ID from existing Activation-Keys 
  set_fact:
    list_of_existing_subscription_in_activation_key: "{{ list_of_existing_subscription_in_activation_key + [ item.split(':')[1].strip(' ') | int  ] }}"
    list_of_existing_subscription_in_activation_key: "{{ list_of_existing_subscription_in_activation_key | d([]) + [ item.split(':')[1].strip(' ') | int  ] }}"
  loop: "{{ list_of_susbscription_in_activation_key.stdout_lines }}"
  when: list_of_susbscription_in_activation_key is defined
  tags:
@@ -64,7 +64,7 @@
      --organization "{{org}}"
      --name "{{activation_key_name}}"
      --subscription-id 1
  when: 1 not in list_of_existing_subscription_in_activation_key
  when: '1 not in list_of_existing_subscription_in_activation_key|d([])'
  tags:
    - configure_satellite
ansible/roles/satellite-manage-activationkey/vars/main.yml
File was deleted
ansible/roles/satellite-manage-capsule-certificate/defaults/main.yml
File was deleted
ansible/roles/satellite-manage-content-view/tasks/main.yml
@@ -11,7 +11,7 @@
- name: Creating list fact of names available in Content-Views
  set_fact: 
    content_view_exist: "{{ content_view_exist + [item.split(':')[1].lstrip(' ')]}}"
    content_view_exist: "{{ content_view_exist | d([]) + [item.split(':')[1].lstrip(' ')]}}"
  loop: "{{content_view_list.stdout_lines}}"
  tags:
    - configure_satellite
@@ -44,7 +44,7 @@
- name: Creating list fact of enabled Repo IDs from subscription 
  set_fact:
    sub_repos_name_exist: "{{ sub_repos_name_exist + [ item.split(':')[1].lstrip(' ')] }}"
    sub_repos_name_exist: "{{ sub_repos_name_exist | d([]) + [ item.split(':')[1].lstrip(' ')] }}"
  loop: "{{repo_list.stdout_lines}}"
  when: repo_list is defined
  tags:
@@ -69,7 +69,7 @@
- name: Creating list fact of existing Repository IDs from Content-View
  set_fact:
    content_view_repos_exist: "{{ content_view_repos_exist + [item.lstrip(' ')]}}"
    content_view_repos_exist: "{{ content_view_repos_exist | d([]) + [item.lstrip(' ')]}}"
  loop: "{{ content_view_repos_list.stdout_lines[0].split(':')[1].split(',') }}"
  when: content_view_repos_list is defined
  tags:
@@ -105,7 +105,7 @@
- name: Creating facts list of published versions of Content-View
  set_fact:
    published_content_view_versions: "{{ published_content_view_versions + [  item.split(':')[1].strip(' ') | int  ] }}"
    published_content_view_versions: "{{ published_content_view_versions | d([]) + [  item.split(':')[1].strip(' ') | int  ] }}"
  loop: "{{ published_versions.stdout_lines }} "
  when: published_versions is defined
  tags:
@@ -119,7 +119,7 @@
      --organization "{{org}}"
      --name "{{content_view_name}}"
      --async
  when: published_content_view_versions | length == 0
  when: published_content_view_versions | d([]) | length == 0
  tags: 
    - configure_satellite
    - configure_satellite_content_view
ansible/roles/satellite-manage-content-view/vars/main.yml
File was deleted
ansible/roles/satellite-manage-manifest/tasks/main.yml
@@ -10,7 +10,7 @@
# - debug: var=subscription_list
- name: Creating list fact of existing subscriptions
  set_fact:
    list_of_existing_subscriptions: "{{ list_of_existing_subscriptions + [ item.split(':').1.lstrip(' ') ] }}"
    list_of_existing_subscriptions: "{{ list_of_existing_subscriptions | d([]) + [ item.split(':').1.lstrip(' ') ] }}"
  loop: "{{ subscription_list.stdout_lines }}"
  when: subscription_list is defined
  tags:
ansible/roles/satellite-manage-manifest/vars/main.yml
File was deleted
ansible/roles/satellite-manage-subscription-and-sync/tasks/main.yml
@@ -13,7 +13,7 @@
           
- name: Creating fact from enabled Repos Name list from subscription of {{org}} 
  set_fact:
    repos_added: "{{ repos_added + [ item.split(':')[1].lstrip(' ') ] }}"
    repos_added: "{{ repos_added | d([]) + [ item.split(':')[1].lstrip(' ') ] }}"
  loop: "{{ repo_name.stdout_lines}}"
  when: repo_name is defined
  tags:
@@ -37,10 +37,10 @@
    --basearch "{{item.basearch}}" 
    --name "{{item.name}}"
    --releasever "{{item.releasever}}"
  when: ((item.sync_name not in repos_added) and
      (item.releasever is defined))
  when:
    - 'item.sync_name not in repos_added|d([])'
    - 'item.releasever is defined'
  loop: "{{ satellite_repository }}"
  # ignore_errors: yes
  tags:
    - configure_satellite
    - configure_satellite_repository
@@ -52,25 +52,44 @@
    --product "{{item.product}}" 
    --basearch "{{item.basearch}}" 
    --name "{{item.name}}"
  when: ((item.sync_name not in repos_added) and
        (item.releasever is not defined ))
  # ignore_errors: yes
  when:
    - 'item.sync_name not in repos_added|d([])'
    - 'item.releasever is not defined'
  loop: "{{ satellite_repository }}"
  tags:
    - configure_satellite
    - configure_satellite_repository
- name: Grab ID of existing repos
  shell: >-
    hammer repository list
    --organization "{{ org }}" | grep yum | awk '{print $1}'
  register: repos
# - debug: var=repos
- name: Sync repo
  command: >-
    hammer repository synchronize
    --organization "{{item.organization}}"
    --product "{{item.product}}"
    --name "{{item.sync_name}}"
    --async
  loop: "{{ satellite_repository }}"
  ignore_errors: yes
    hammer repository synchronize
    --organization "{{ org }}"
    --id "{{ item }}"
  loop: "{{ repos.stdout_lines }}"
  register: sync_result
  retries: 3
  until: "{{ sync_result is  success }} "
  tags:
    - configure_satellite
    - configure_satellite_repository
    - configure_satellite_sync_repo
# - name: Sync repo
#   command: >-
#     hammer repository synchronize
#     --organization "{{item.organization}}"
#     --product "{{item.product}}"
#     --name "{{item.sync_name}}"
#     --async
#   loop: "{{ satellite_repository }}"
#   ignore_errors: yes
ansible/roles/satellite-manage-subscription-and-sync/vars/main.yml
File was deleted