Wolfgang Kulhanek
2020-01-20 aace47f7718b874c19dec572774d9896bfcf75ae
New config
23 files added
1844 ■■■■■ changed files
ansible/configs/ocp4-cluster-wk/README.adoc 155 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/default_vars.yml 490 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/default_vars_osp.yml 91 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/destroy_env.yml 34 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/files/admin_host_requirements.txt 88 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/files/cloud_providers/osp_cloud_template_master.j2 176 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/files/cloud_providers/osp_cloud_template_nested.j2 96 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/files/clouds.yaml.j2 12 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/files/general-ms.yaml.j2 55 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/files/htpasswd.j2 6 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/files/macos_requirements.txt 71 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/files/pv-registry.yaml.j2 14 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/files/pv-user.yaml.j2 21 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/files/pvc-registry.yaml 13 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/files/repos_template.j2 29 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/files/rfc2136.ini.j2 10 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/files/update_ignition.py 50 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/post_infra.yml 50 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/post_software.yml 30 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/pre_infra.yml 10 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/pre_software.yml 100 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/requirements.yml 6 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/software.yml 237 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-cluster-wk/README.adoc
New file
@@ -0,0 +1,155 @@
= OCP 4 Disconnected Install Lab
== Config Description
The following config includes:
* One bastion host for installation
* One utility VM for other services such as container registry and httpd
* DNS and other resources for OCP4
* SSH access setup
== Review the Env_Type variable file
* This file link:./env_vars.yml[./env_vars.yml] contains all the variables you need to define to control the deployment of your environment.  These are the defaults.
* Override the defaults for your environment by creating your own myenvironment-variables.yml file, as below.
== Running Ansible Playbook
=== Running Playbook With Environment and Secrets files
You can create yaml files of your desired configs and secrets and execute them:
`ansible-playbook ansible/main.yaml -e @myenvironment-variables.yml  -e@my-secrets.yml`
=== To Delete an environment
Run the `destroy_env.yml` playbook.
Ex: `ansible-playbook ansible/configs/ocp4-disconnected-osp-lab/destroy_env.yml -e @myenvironment-variables.yml  -e@my-secrets.yml`
TODO: Tie this into the `cloud_providers/osp_destroy_env.yml`
The teardown process is roughly as follows:
* Delete compute & storage resources
** Use `openstack purge`
** ex: `openstack --os-cloud sten2 project purge --keep-project --project sten2-project`
* Delete network resources
** Use `neutron purge`
** ex: `neutron --os-cloud sten2 purge bb4e371f4bf443feb6e4435c8f5df6ae`
* Delete the Heat stack
** If you don't delete the HOT as the user that created it, the keypair will not be deleted
* Delete Keypair if necessary
** TODO: add this functionality
* Cleanup DNS entries for bastion & OpenShift
* Delete project
Software stages in config provide:
* Pull copy of the installer to bastion
* Pull copy of the oc binary to bastion
* Clone ansible for generating the IaaS to bastion
** Go through the IPI created assets and create those?
** Talk to engineering and see what they are creating?
* Install any load balancer to utility VM?
* Install registry to utility VM?
Lab:
* Explain UPI
* Explain OpenStack environment and requirements
* Prep work
* Create IaaS components
* OpenShift install
* OLM install workaround?
== Workstation Setup:
With either setup, you also need to have a `clouds.yaml` file on your system with credentials.
You can store this either in your working directory or in `~/.config/openstack/clouds.yaml`.
=== RHEL:
To prepare an admin host to deploy this config. This has been tested on RHEL 7.7.
sudo subscription-manager register
sudo subscription-manager attach --pool=<yourpool>
sudo subscription-manager repos --disable=* --enable rhel-7-server-optional-rpms \
  --enable rhel-7-server-rpms --enable rhel-7-server-extras-rpms
sudo yum update -y
sudo yum install python-virtualenv git gcc
git clone https://github.com/redhat-cop/agnosticd.git
cd agnosticd
git checkout disco-ocp4
virtualenv ~/venv-openstack
source ~/venv-openstack/bin/activate
pip install -r ./ansible/configs/ocp4-disconnected-ha-lab/files/admin_host_requirements.txt
=== MacOS:
# Install python3:
brew install python
# Make sure your path has this in it:
PATH="/usr/local/opt/python/libexec/bin:/usr/local/bin:$PATH"
# Make sure virtualenv and virtualenvwrapper are installed system wide
pip install virtualenv
pip install virtualenvwrapper
# Add this to your .bashrc
export WORKON_HOME=~/.virtualenvs
[ -f /usr/local/bin/virtualenvwrapper.sh ] && source /usr/local/bin/virtualenvwrapper.sh
# To start a new python virtual env
mkvirtualenv venv-openstack
# Activate virtual env
workon venv-openstack
# Clone repo and install python libraries
git clone https://github.com/redhat-cop/agnosticd.git
cd agnosticd
git checkout disco-ocp4
pip install -r ./ansible/configs/ocp4-disconnected-ha-lab/files/macos_requirements.txt
User access:
student_name is defined either in sample_vars or from deployer script. This is the account that people will use and will generally match their opentlc ID. lab-user is the default defined in the role
Pre-software:
Student name is set up by the bastion-student-user role
  It generates a password if not defined in student_password
  It creates a user on the bastions
    *We should have it create on all nodes (i.e. utilityVM also)
  It adds student_key to the student_user account on bastion
    *We should have it create on all nodes
    Where do we get student_key from?
  It adds env_authorized_key to the student_user account on bastion
    What is this key actually used for? It gets generated in set_env_authorized_key role
    If this key is dynamically generated per run, can we send it in email?
    What about the guid-infra-key we create in heat template - can we use that instead of this?
    *We should have it create on all nodes
  It enables password auth and passwordless sudo
Nate added tasks to copy priv key, pub key, ssh conf from root > student .ssh directory
  There is probably a better way
student_name account has the {{guid}}key.pub added to its authorized keys
cloud-user has ^ + sucked in ones + one generated by nova
Software:
We connect as ansible_user, which is cloud-user for OSP
We install python Openstack modules using pip3
  We need to make this available for all users, or at least student_name
  Add /usr/local/bin to system wide PATH
To load test, use cloudforms-oob:
./order_svc.sh -t 5 -y -c 'OPENTLC OpenShift 4 Labs' -d "environment=DEV,region=dev_na_osp,check=t,expiration=7,runtime=8" -i 'OpenShift 4 Install VM - OpenStack'
ansible/configs/ocp4-cluster-wk/default_vars.yml
New file
@@ -0,0 +1,490 @@
###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT
###### OR PASS as "-e" args to ansible-playbook command
# This is an account that must exist in OpenStack.
# It is used to create projects, access, Heat templates
admin_user: opentlc-mgr
# The output_dir holds all of the files generated during the deployment
# This includes generated Heat templates, SSH config, SSH keys
# This must be an absolute path and no vars (like $HOME or ~)
output_dir: /tmp/output_dir
# The name of the agnosticd config to deploy
env_type: ocp4-cluster
# The {{ guid }} is used everywhere and it is what differentiates otherwise
# identical environments. Make this unique. Usually they are 4 characters, but
# it can be any reasonable length.
guid: mydefault
# The name of the OpenShift cluster that will be deployed.
# This is primarily used if you want to automate the OpenShift deployment.
cluster_name: cluster-{{ guid }}
# Used to add metadata (tags) to OpenStack objects created
project_tag: "{{ env_type }}-{{ guid }}"
# Why is this config being deployed?
# Some valid: development, ilt, production, event
purpose: development
# This should be overwritten based on the user ordering the catalog item
# It will be used by the bastion-student-user role and created on the bastion
student_name: lab-user
# Enable this if you want to create a user on the bastion
# Mutually exclusive with {{ install_ipa_client }}
install_student_user: true
# Enable this if you want to use IPA for user authentication.
# Mutually exclusive with {{ install_student_user }}
install_ipa_client: false
# TODO: What does this really do besides run the role?
set_env_authorized_key: true
env_authorized_key: "{{guid}}key"
key_name: "default_key_name"
# Run the bastion-lite role
install_bastion: true
# This should be obsolete and not required in the current form
install_k8s_modules: false
# FTL is used for grading and solving. It will pull in the external ftl-injector role.
# This might be enabled when we have solvers to run or graders for ILT
install_ftl: false
# TODO: Decide on whether to use sat or give access to repos directly with key
# This will tell Agnosticd to use either:
# sattelite, rhn, or file for repos
repo_method: file
# If using satellite, these are needed:
# satellite_url: satellite.opentlc.com
# satellite_activationkey: # This should be stored in secrets
# satellite_org: # This should be stored in secrets
# use_content_view: true
# If using file, these are needed in addition to the repos_template.j2 file:
osrelease: 4.2.0
repo_version: '4.2'
own_repo_path: # Should be defined in secrets
# Packages to install on all of the hosts deployed as part of the agnosticd config
# This invokes the "common" role
install_common: true
# As part of the "common" role, this cause it to do a yum update on the host
update_packages: true
# The packages that will be installed by the "common" role. Only put things
# in this list that are needed, stable, and useful on every node.
common_packages:
  - unzip
  - bash-completion
  - tmux
  - bind-utils
  - wget
  - ansible
  - git
  - vim-enhanced
  - httpd-tools
  - openldap-clients
  - podman
  - tree
# This will run in the post_software phase and run playbooks in the
# software_playbooks directory
software_to_deploy: none
# If you want DNS entries to be created automatically, choose one of these.
# Alternately, they can both be set to false.
use_dynamic_dns: true
# This is not fully implemented yet
# use_route53: false
# Quotas to set for new project that is created
quota_num_instances: 15
quota_num_cores: 72
quota_memory: 163840 # in MB
quota_num_volumes: 25
quota_volumes_gigs: 500
#quota_loadbalancers: #when Octavia is available
#quota_pool: #when Octavia is available
quota_networks: 3
quota_subnets: 3
quota_routers: 3
quota_fip: 5
quota_sg: 10
quota_sg_rules: 100
# Instances to be provisioned in new project
# Provide these as a list.
# Each instance type can have any number of replicas deployed with the same
# configuration.
# Metadata in OpenStack is equivelent to tags in AWS
# These instances will be created with Cinder persistent volumes
instances:
  - name: bastion
    count: 1
    unique: yes
    alt_name: bastion
    image_id: "{{ bastion_instance_image }}"
    floating_ip: yes
    flavor:
      osp: "{{ bastion_instance_type }}"
    metadata:
      - AnsibleGroup: "bastions,clientvms"
      - function: bastion
      - user: nate
      - project: "{{ project_tag }}"
      - ostype: linux
      - Purpose: "{{ purpose }}"
    rootfs_size: 30
    network: ocp
    security_groups:
      - bastion_sg
  # - name: utilityvm
  #   count: 1
  #   image_id: "{{ utilityvm_instance_image }}"
  #   floating_ip: no
  #   flavor:
  #     osp: "{{ utilityvm_instance_type }}"
  #   metadata:
  #     - AnsibleGroup: "utility"
  #     - function: bastion
  #     - user: nate
  #     - project: "{{ project_tag }}"
  #     - ostype: linux
  #     - Purpose: "{{ purpose }}"
  #   rootfs_size: 50
  #   network: ocp
  #   security_groups:
  #     - utility_sg
# Security groups and associated rules. This will be provided
#when the Heat template is generated separate groups and rules
security_groups:
  - name: bastion_sg
    description: Bastion security group allows basic icmp and SSH ingress and egress to *
    rules:
    - protocol: icmp
      direction: ingress
    - protocol: tcp
      direction: ingress
      port_range_min: 22
      port_range_max: 22
      remote_ip_prefix: 0.0.0.0/0
  # - name: utility_sg
  #   description: Utility security group allows SSH from bastion and egress to *
  #   rules:
  #   - protocol: icmp
  #     direction: ingress
  #     remote_group: "bastion_sg"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 22
  #     port_range_max: 22
  #     remote_group: "bastion_sg"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 5000
  #     port_range_max: 5000
  #     remote_ip_prefix: "{{ ocp_network_subnet_cidr }}"
  #     description: "local container registry"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 80
  #     port_range_max: 80
  #     remote_ip_prefix: "{{ ocp_network_subnet_cidr }}"
  #     description: "http traffic for ignition files"
  # - name: isolated_sg
  #   description: All instances in the disconnected network
  #   rules:
  #   - protocol: icmp
  #     direction: ingress
  #     remote_group: "bastion_sg"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 22
  #     port_range_max: 22
  #     remote_group: "bastion_sg"
  # - name: master_sg
  #   description: Security group for OpenShift master and bootstrap
  #   rules:
  #   - protocol: icmp
  #     direction: ingress
  #     description: "icmp"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 22623
  #     port_range_max: 22623
  #     remote_ip_prefix: "{{ ocp_network_subnet_cidr }}"
  #     description: "machine config server"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 22
  #     port_range_max: 22
  #     remote_group: "bastion_sg"
  #     description: "SSH"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 53
  #     port_range_max: 53
  #     remote_ip_prefix: "{{ ocp_network_subnet_cidr }}"
  #     description: "DNS (tcp)"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 53
  #     port_range_max: 53
  #     remote_ip_prefix: "{{ ocp_network_subnet_cidr }}"
  #     description: "DNS (udp)"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 5353
  #     port_range_max: 5353
  #     remote_ip_prefix: "{{ ocp_network_subnet_cidr }}"
  #     description: "mDNS"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 6443
  #     port_range_max: 6443
  #     remote_ip_prefix: 0.0.0.0/0
  #     description: "OpenShift API"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 4789
  #     port_range_max: 4789
  #     remote_group: "master_sg"
  #     description: "VXLAN"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 4789
  #     port_range_max: 4789
  #     remote_group: "worker_sg"
  #     description: "VXLAN (worker)"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 6081
  #     port_range_max: 6081
  #     remote_group: "master_sg"
  #     description: "Geneve"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 6081
  #     port_range_max: 6081
  #     remote_group: "worker_sg"
  #     description: "Geneve (worker)"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 6641
  #     port_range_max: 6642
  #     remote_group: "master_sg"
  #     description: "OVNDB"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 6641
  #     port_range_max: 6642
  #     remote_group: "worker_sg"
  #     description: "OVNDB (worker)"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 9000
  #     port_range_max: 9999
  #     remote_group: "master_sg"
  #     description: "Master ingress internal (tcp)"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 9000
  #     port_range_max: 9999
  #     remote_group: "worker_sg"
  #     description: "Master ingress from worker (tcp)"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 9000
  #     port_range_max: 9999
  #     remote_group: "master_sg"
  #     description: "Master ingress internal (udp)"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 9000
  #     port_range_max: 9999
  #     remote_group: "worker_sg"
  #     description: "Master ingress from worker (udp)"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 10259
  #     port_range_max: 10259
  #     remote_group: "master_sg"
  #     description: "Kube Scheduler"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 10259
  #     port_range_max: 10259
  #     remote_group: "worker_sg"
  #     description: "Kube Scheduler (worker)"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 10257
  #     port_range_max: 10257
  #     remote_group: "master_sg"
  #     description: "Kube controller manager"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 10257
  #     port_range_max: 10257
  #     remote_group: "worker_sg"
  #     description: "Kube controller manager (worker)"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 10250
  #     port_range_max: 10250
  #     remote_group: "master_sg"
  #     description: "master ingress kubelet secure"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 10250
  #     port_range_max: 10250
  #     remote_group: "worker_sg"
  #     description: "master ingress kubelet secure from worker"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 2379
  #     port_range_max: 2380
  #     remote_group: "master_sg"
  #     description: "etcd"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 30000
  #     port_range_max: 32767
  #     remote_group: "master_sg"
  #     description: "master ingress services (tcp)"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 30000
  #     port_range_max: 32767
  #     remote_group: "master_sg"
  #     description: "master ingress services (udp)"
  #   - protocol: vrrp
  #     direction: ingress
  #     remote_ip_prefix: "{{ ocp_network_subnet_cidr }}"
  #     description: "VRRP"
  # - name: worker_sg
  #   description: Security group for OpenShift workers
  #   rules:
  #   - protocol: icmp
  #     direction: ingress
  #     description: icmp
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 22
  #     port_range_max: 22
  #     remote_group: "bastion_sg"
  #     description: "SSH"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 5353
  #     port_range_max: 5353
  #     remote_ip_prefix: "{{ ocp_network_subnet_cidr }}"
  #     description: "mDNS"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 80
  #     port_range_max: 80
  #     description: "Ingress HTTP"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 443
  #     port_range_max: 443
  #     description: "Ingress HTTPS"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 1936
  #     port_range_max: 1936
  #     remote_ip_prefix: "{{ ocp_network_subnet_cidr }}"
  #     description: "router stats"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 4789
  #     port_range_max: 4789
  #     remote_group: "master_sg"
  #     description: "VXLAN from master"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 4789
  #     port_range_max: 4789
  #     remote_group: "worker_sg"
  #     description: "VXLAN"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 6081
  #     port_range_max: 6081
  #     remote_group: "master_sg"
  #     description: "Geneve from master"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 6081
  #     port_range_max: 6081
  #     remote_group: "worker_sg"
  #     description: "Geneve"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 9000
  #     port_range_max: 9999
  #     remote_group: "worker_sg"
  #     description: "Worker ingress internal (tcp)"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 9000
  #     port_range_max: 9999
  #     remote_group: "master_sg"
  #     description: "Worker ingress from master (tcp)"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 9000
  #     port_range_max: 9999
  #     remote_group: "master_sg"
  #     description: "Worker ingress from master (udp)"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 9000
  #     port_range_max: 9999
  #     remote_group: "worker_sg"
  #     description: "Worker ingress internal (udp)"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 10250
  #     port_range_max: 10250
  #     remote_group: "master_sg"
  #     description: "master ingress kubelet secure from master"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 10250
  #     port_range_max: 10250
  #     remote_group: "worker_sg"
  #     description: "master ingress kubelet secure"
  #   - protocol: tcp
  #     direction: ingress
  #     port_range_min: 30000
  #     port_range_max: 32767
  #     remote_group: "worker_sg"
  #     description: "worker ingress services (tcp)"
  #   - protocol: udp
  #     direction: ingress
  #     port_range_min: 30000
  #     port_range_max: 32767
  #     remote_group: "worker_sg"
  #     description: "worker ingress services (udp)"
  #   - protocol: vrrp
  #     direction: ingress
  #     remote_ip_prefix: "{{ ocp_network_subnet_cidr }}"
  #     description: "VRRP"
### OCP Specific
worker_instance_count: 2
master_instance_count: 3
install_ocp4: true
ocp4_installer_version: "4.2.0"
subdomain_base_suffix: ".example.opentlc.com"
ansible/configs/ocp4-cluster-wk/default_vars_osp.yml
New file
@@ -0,0 +1,91 @@
# The type of cloud provider this will be deployed to
cloud_provider: osp
# Authenication credentials for OpenStack in order to create the things.
# These should be included with your secrets, but are listed here for reference
# osp_auth_url:
# osp_auth_username:
# osp_auth_password:
# osp_auth_cloud:
# osp_auth_project_domain: #usually set to "default"
# osp_auth_user_domain: #usually set to "default"
# The name of the project that will be created in OpenStack for the user
osp_project_name: "{{ guid }}-project"
# Set this to true if you need to create a new project in OpenStack
# This should almost always be set to true for OpenShift installations
# If it is set to false, the {{ osp_project_name }} must already exist and
# should be able to run whatever you are deploying
osp_project_create: true
# This is the user that Ansible will use to connect to the nodes it is
# configuring from the admin/control host
ansible_user: cloud-user
remote_user: cloud-user
# The domain that you want to add DNS entries to
osp_cluster_dns_zone: blue.osp.opentlc.com
# The dynamic DNS server you will add entries to.
# NOTE: This is only applicable when {{ use_dynamic_dns}} is true
osp_cluster_dns_server: ddns01.opentlc.com
# Whether to wait for an ack from the DNS servers before continuing
wait_for_dns: true
# Authenticaion for DDNS
# ddns_key_name:
# ddns_secret_name:
# Set this to true if you want a FIPs provisioned for an OpenShift on OpenStack install
# This will provision an API and Ingress FIP
openshift_fip_provision: True
# This requires DDNS or other DNS solution configured
# If enabled, it will add DNS entries for the API and Ingress FIPs
openshift_fip_dns: True
# The external network in OpenStack where the floating IPs (FIPs) come from
provider_network: external
# If you are deploying OpenShift, this should be set to the network that you
# want to use and will be used to create security groups.
# It will pull the subnet CIDR from the defined network below, based on the
# name you define for {{ ocp_network }}
ocp_network: "ocp"
ocp_network_subnet_cidr: "{{ networks | json_query(query_subnet_cidr) | first }}"
query_subnet_cidr: "[?name=='{{ ocp_network }}'].subnet_cidr"
# A list of the private networks and subnets to create in the project
# You can create as many as you want, but at least one is required.
# Use the name of the networks where appropriate in the instance list
networks:
  - name: ocp
    shared: "false"
    subnet_cidr: 192.168.47.0/24
    gateway_ip: 192.168.47.1
    allocation_start: 192.168.47.10
    allocation_end: 192.168.47.254
    dns_nameservers: []
    create_router: true
# Another example network if you need more than one deployed
  # - name: testnet
  #   shared: "false"
  #   subnet_cidr: 192.47.0.0/24
  #   gateway_ip: 192.47.0.1
  #   allocation_start: 192.47.0.25
  #   allocation_end: 192.47.0.156
  #   dns_nameservers:
  #     - 8.8.8.8
  #     - 1.1.1.1
  #   create_router: true
# These will influence the bastion if it is being deployed
bastion_instance_type: 2c2g30d
bastion_instance_image: rhel-server-7.7-update-2
# These will influence the utility VM, which is primarily used for disconnected
# install, but can be used for anything really.
utilityvm_instance_type: 2c2g30d
utilityvm_instance_image: rhel-server-7.7-update-2
ansible/configs/ocp4-cluster-wk/destroy_env.yml
New file
@@ -0,0 +1,34 @@
---
- import_playbook: ../../setup_runtime.yml
- name: Teardown OpenStack project and resources
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tasks:
    - name: Run infra-osp-dns
      include_role:
        name: infra-osp-dns
      vars:
        _dns_state: absent
    - name: Remove DNS entry for OpenShift API and ingress
      nsupdate:
        server: "{{ osp_cluster_dns_server }}"
        zone: "{{ osp_cluster_dns_zone }}"
        record: "{{ item }}.{{ guid }}"
        type: A
        key_name: "{{ ddns_key_name }}"
        key_secret: "{{ ddns_key_secret }}"
        state: absent
      loop:
        - "api"
        - "*.apps"
      when:
        - openshift_fip_provision
        - use_dynamic_dns
    - name: Run infra-osp-resources-destroy role
      include_role:
        name: infra-osp-resources-destroy
ansible/configs/ocp4-cluster-wk/files/admin_host_requirements.txt
New file
@@ -0,0 +1,88 @@
ansible==2.8.6
appdirs==1.4.3
asn1crypto==1.2.0
attrs==19.3.0
Babel==2.7.0
bcrypt==3.1.7
certifi==2019.9.11
cffi==1.13.0
chardet==3.0.4
cliff==2.16.0
cmd2==0.8.9
colorama==0.4.1
configparser==4.0.2
contextlib2==0.6.0.post1
cryptography==2.8
debtcollector==1.22.0
decorator==4.4.0
distro==1.4.0
dnspython==1.16.0
dogpile.cache==0.8.0
enum34==1.1.6
funcsigs==1.0.2
functools32==3.2.3.post2
futures==3.3.0
idna==2.8
importlib-metadata==0.23
ipaddress==1.0.23
iso8601==0.1.12
Jinja2==2.10.3
jmespath==0.9.4
jsonpatch==1.24
jsonpointer==2.0
jsonschema==3.1.1
keystoneauth1==3.17.1
MarkupSafe==1.1.1
monotonic==1.5
more-itertools==5.0.0
msgpack==0.6.2
munch==2.3.2
netaddr==0.7.19
netifaces==0.10.9
openstacksdk==0.36.0
os-client-config==1.33.0
os-service-types==1.7.0
osc-lib==1.14.1
oslo.config==6.11.1
oslo.context==2.23.0
oslo.i18n==3.24.0
oslo.log==3.44.1
oslo.serialization==2.29.2
oslo.utils==3.41.2
paramiko==2.6.0
pathlib2==2.3.5
pbr==5.4.3
prettytable==0.7.2
pycparser==2.19
pyinotify==0.9.6
PyNaCl==1.3.0
pyOpenSSL==19.0.0
pyparsing==2.4.2
pyperclip==1.7.0
pyrsistent==0.15.4
python-cinderclient==5.0.0
python-dateutil==2.8.1
python-glanceclient==2.17.0
python-heatclient==1.18.0
python-keystoneclient==3.21.0
python-neutronclient==6.14.0
python-novaclient==15.1.0
python-openstackclient==4.0.0
python-swiftclient==3.8.1
pytz==2019.3
PyYAML==5.1.2
requests==2.22.0
requestsexceptions==1.4.0
rfc3986==1.3.2
scandir==1.10.0
selinux==0.2.1
simplejson==3.16.0
six==1.12.0
stevedore==1.31.0
subprocess32==3.5.4
unicodecsv==0.14.1
urllib3==1.25.6
warlock==1.3.3
wcwidth==0.1.7
wrapt==1.11.2
zipp==0.6.0
ansible/configs/ocp4-cluster-wk/files/cloud_providers/osp_cloud_template_master.j2
New file
@@ -0,0 +1,176 @@
heat_template_version: 2018-03-02
description: Top level HOT for creating new project, network resources, and instances. This template relies on ResourceGroups and a nested template that is called to provision instances, ports, & floating IPs.
resources:
  {{ guid }}-infra_key:
    type: OS::Nova::KeyPair
    properties:
      name: {{ guid }}-infra_key
      save_private_key: true
  {{ guid }}-project_user:
    type: OS::Keystone::User
    properties:
      name: {{ guid }}-user
      password: {{ heat_user_password }}
      domain: Default
  {{ guid }}-project_role_user:
    type: OS::Keystone::UserRoleAssignment
    properties:
      user: { get_resource: {{ guid }}-project_user }
      roles:
        - {project: {{ osp_project_name }}, role: _member_ }
    depends_on:
      - {{ guid }}-project_user
{% for network in networks %}
  {{ network['name'] }}-network:
    type: OS::Neutron::Net
    properties:
      name: "{{ guid }}-{{ network['name'] }}-network"
      shared: {{ network['shared'] }}
  {{ network['name'] }}-subnet:
    type: OS::Neutron::Subnet
    properties:
      name: "{{ guid }}-{{ network['name'] }}-subnet"
      network_id: { get_resource: {{ network['name'] }}-network }
{% if network['dns_nameservers'] is defined %}
      dns_nameservers: {{ network['dns_nameservers'] }}
{% endif %}
      cidr: {{ network['subnet_cidr'] }}
      gateway_ip: {{ network['gateway_ip'] }}
      allocation_pools:
      -  start: {{ network['allocation_start'] }}
         end: {{ network['allocation_end'] }}
{% if network['create_router'] %}
  {{ network['name'] }}-router:
    type: OS::Neutron::Router
    properties:
      name: "{{ guid }}-{{ network['name'] }}-router"
      external_gateway_info:
        network: "{{ provider_network }}"
  {{ network['name'] }}-router_private_interface:
    type: OS::Neutron::RouterInterface
    properties:
      router: { get_resource: {{ network['name'] }}-router }
      subnet: { get_resource: {{ network['name'] }}-subnet }
{% endif %}
{% endfor %}
{% for security_group in security_groups %}
  {{ security_group['name'] }}:
    type: OS::Neutron::SecurityGroup
    properties:
      name: {{ guid }}-{{ security_group['name'] }}
{% if security_group['description'] is defined %}
      description: "{{ security_group['description'] }}"
{% endif %}
{% for rule in security_group.rules %}
{% if rule['name'] is defined %}
  {{ security_group['name'] }}-rule_{{ rule['name'] }}:
{% else %}
  {{ security_group['name'] }}-rule_{{ lookup('password', '/dev/null length=5 chars=ascii_letters,digits') }}:
{% endif %}
    type: OS::Neutron::SecurityGroupRule
    properties:
      security_group: { get_resource: {{ security_group['name'] }} }
      direction: {{ rule['direction'] }}
      protocol: {{ rule['protocol'] }}
{% if rule['description'] is defined %}
      description: {{ rule['description'] }}
{% endif %}
{% if rule['port_range_min'] is defined %}
      port_range_min: {{ rule['port_range_min'] }}
{% endif %}
{% if rule['port_range_max'] is defined %}
      port_range_max: {{ rule['port_range_max'] }}
{% endif %}
{% if rule['remote_ip_prefix'] is defined %}
      remote_ip_prefix: {{ rule['remote_ip_prefix'] }}
{% endif %}
{% if rule['remote_group'] is defined %}
      remote_group: { get_resource: {{ rule['remote_group'] }} }
{% endif %}
    depends_on: {{ security_group['name'] }}
{% endfor %}
{% endfor %}
{% for instance in instances %}
  {{instance['name']}}:
    type: OS::Heat::ResourceGroup
    properties:
      count: {{ instance['count'] }}
      resource_def:
        type: {{ heat_nested_template }}
        properties:
          network_private: { get_resource: {{ instance['network'] }}-network }
          volume_size: {{ instance['rootfs_size'] | default(osp_default_rootfs_size) }}
          key_name: { get_resource: {{ guid }}-infra_key }
          security_groups:
{% for security_group in instance.security_groups %}
            - {{ guid }}-{{ security_group }}
{% endfor %}
          provider_network: {{ provider_network}}
{% if instance['count'] > 1 %}
          instance_name: {{ instance['name'] }}_%index%
{% else %}
          instance_name: {{ instance['name'] }}
{% endif %}
          instance_flavor: {{ instance['flavor'].osp }}
          instance_image: {{ instance['image_id'] }}
{% if instance.floating_ip %}
          instance_fip: true
{% else %}
          instance_fip: false
{% endif %}
          instance_metadata:
            guid: "{{ guid }}"
            env_type: "{{ env_type }}"
{% if instance['metadata'] %}
{% for data in instance['metadata'] %}
{% for key, value in data.items() %}
            {{ key }}: {{ value }}
{% endfor %}
{% endfor %}
{% endif %}
    depends_on:
      - {{ instance['network'] }}-router_private_interface
{% for security_group in instance.security_groups %}
      - {{ security_group }}
{% endfor %}
{% endfor %}
{% if openshift_fip_provision %}
  ocp_api_fip:
    type: OS::Neutron::FloatingIP
    properties:
      floating_network: "{{ provider_network }}"
  ocp_ingress_fip:
    type: OS::Neutron::FloatingIP
    properties:
      floating_network: "{{ provider_network }}"
{% endif %}
outputs:
  {{ guid }}-infra_key:
    description: The SSH infra key
    value: { get_attr: [ {{ guid }}-infra_key, private_key ] }
{% if openshift_fip_provision %}
  ocp_api_fip:
    description: The floating IP of the OpenShift API
    value: { get_attr: [ ocp_api_fip, floating_ip_address ] }
  ocp_ingress_fip:
    description: The floating IP of the OpenShift ingress
    value: { get_attr: [ ocp_ingress_fip, floating_ip_address ] }
{% endif %}
ansible/configs/ocp4-cluster-wk/files/cloud_providers/osp_cloud_template_nested.j2
New file
@@ -0,0 +1,96 @@
heat_template_version: 2018-03-02
description: Nested HOT for creating instances, ports, & floating IPs. This template relies is called by a top level template that is resposible for common resources such as network, router, etc.
parameters:
  network_private:
    type: string
    description: The name of the network created by the top level HOT.
  security_groups:
    type: json
    description: The list of security groups created by the top level HOT. Passed as a list or string?
  provider_network:
    type: string
    description: The provider network where floating IPs will be provisioned from.
  instance_name:
    type: string
    description: The instance name is passed from the top level HOT. It should be appended with an index value if there will be more than one of this type of instance deployed.
  instance_image:
    type: string
  instance_flavor:
    type: string
  instance_metadata:
    type: json
    description: Metadata in OpenStack is the equivilent to tags in a cloud provider such as AWS
  instance_fip:
    type: boolean
    description: When passed from top level HOT, this will determine whether the FIP and FIP association resources are created based on condition.
  volume_size:
    type: number
    description: The size in GB of the volume being created from an image.
  key_name:
    type: string
    description: The SSH key that will be added to the instance.
conditions:
  create_fip:
    get_param: instance_fip
resources:
  port:
    type: OS::Neutron::Port
    properties:
      network: { get_param: network_private }
      security_groups: { get_param: security_groups }
  fip:
    type: OS::Neutron::FloatingIP
    condition: create_fip
    properties:
      floating_network: { get_param: provider_network }
  fip_association:
    type: OS::Neutron::FloatingIPAssociation
    condition: create_fip
    properties:
      floatingip_id: { get_resource: fip }
      port_id: { get_resource: port }
  instance:
    type: OS::Nova::Server
    properties:
      name: { get_param: instance_name }
      flavor: { get_param: instance_flavor }
      key_name: { get_param: key_name }
      block_device_mapping_v2:
        - image: { get_param: instance_image }
          delete_on_termination: true
          volume_size: { get_param: volume_size }
          boot_index: 0
      user_data: |
        #cloud-config
        ssh_authorized_keys: {{ all_ssh_authorized_keys | to_json }}
      user_data_format: RAW
      networks:
        - port: { get_resource: port }
      metadata:
        get_param: instance_metadata
{#
outputs:
  fip:
    value: { get_attr: [instance, fip ] }
#}
ansible/configs/ocp4-cluster-wk/files/clouds.yaml.j2
New file
@@ -0,0 +1,12 @@
clouds:
  {{ osp_project_name }}:
    auth:
      auth_url: "{{ osp_auth_url }}"
      username: "{{ guid }}-user"
      project_name: "{{ osp_project_name }}"
      project_id: "{{ hostvars['localhost']['osp_project_info'][0].id }}"
      user_domain_name: "Default"
      password: "{{ hostvars['localhost']['heat_user_password'] }}"
    region_name: "regionOne"
    interface: "public"
    identity_api_version: 3
ansible/configs/ocp4-cluster-wk/files/general-ms.yaml.j2
New file
@@ -0,0 +1,55 @@
apiVersion: machine.openshift.io/v1beta1
kind: MachineSet
metadata:
  labels:
    machine.openshift.io/cluster-api-cluster: {{ lookup('env', 'INFRA_ID') }}
    machine.openshift.io/cluster-api-machine-role: worker
    machine.openshift.io/cluster-api-machine-type: worker
  name: general-purpose-{{ msid }}
  namespace: openshift-machine-api
spec:
  replicas: 0
  selector:
    matchLabels:
      machine.openshift.io/cluster-api-cluster: {{ lookup('env', 'INFRA_ID') }}
      machine.openshift.io/cluster-api-machineset: general-purpose-{{ msid }}
  template:
    metadata:
      creationTimestamp: null
      labels:
        machine.openshift.io/cluster-api-cluster: {{ lookup('env', 'INFRA_ID') }}
        machine.openshift.io/cluster-api-machine-role: worker
        machine.openshift.io/cluster-api-machine-type: worker
        machine.openshift.io/cluster-api-machineset: general-purpose-{{ msid }}
    spec:
      metadata:
        labels:
          failure-domain.beta.kubernetes.io/region: "east"
          failure-domain.beta.kubernetes.io/zone: "{{ msid }}"
          node-role.kubernetes.io/general-use: ""
      providerSpec:
        value:
          apiVersion: openstackproviderconfig.openshift.io/v1alpha1
          cloudName: openstack
          cloudsSecret:
            name: openstack-cloud-credentials
            namespace: openshift-machine-api
          flavor: 4c12g30d
          image: rhcos-ocp42
          kind: OpenstackProviderSpec
          networks:
          - filter: {}
            subnets:
            - filter:
                name: {{ lookup('env', 'GUID') }}-ocp-subnet
          securityGroups:
          - filter: {}
            name: {{ lookup('env', 'GUID') }}-worker_sg
          serverMetadata:
            Name: {{ lookup('env', 'INFRA_ID') }}-worker
            openshiftClusterID: {{ lookup('env', 'INFRA_ID') }}
          tags:
          - openshiftClusterID={{ lookup('env', 'INFRA_ID') }}
          trunk: false
          userDataSecret:
            name: worker-user-data
ansible/configs/ocp4-cluster-wk/files/htpasswd.j2
New file
@@ -0,0 +1,6 @@
andrew:$apr1$dZPb2ECf$ercevOFO5znrynUfUj4tb/
karla:$apr1$FQx2mX4c$eJc21GuVZWNg1ULF8I2G31
{{admin_user|d('opentlc-mgr')}}:{{admin_password_hash|d('$apr1$glFN48wz$dR9w94PGiQL8qZXcXGd0L0')}}
{% for i in range(0, [ (user_count|int), 200 ] | max + 1) %}
user{{i}}:{{user_password_hash|d('$apr1$FmrTsuSa$yducoDpvYq0KEV0ErmwpA1')}}
{% endfor %}
ansible/configs/ocp4-cluster-wk/files/macos_requirements.txt
New file
@@ -0,0 +1,71 @@
ansible==2.8.6
appdirs==1.4.3
asn1crypto==1.2.0
attrs==19.3.0
Babel==2.7.0
certifi==2019.9.11
cffi==1.13.0
chardet==3.0.4
cliff==2.16.0
cmd2==0.8.9
colorama==0.4.1
cryptography==2.8
debtcollector==1.22.0
decorator==4.4.0
dnspython==1.16.0
dogpile.cache==0.8.0
idna==2.8
importlib-metadata==0.23
iso8601==0.1.12
Jinja2==2.10.3
jmespath==0.9.4
jsonpatch==1.24
jsonpointer==2.0
jsonschema==3.1.1
keystoneauth1==3.17.1
MarkupSafe==1.1.1
more-itertools==7.2.0
msgpack==0.6.2
munch==2.3.2
netaddr==0.7.19
netifaces==0.10.9
openstacksdk==0.36.0
os-client-config==1.33.0
os-service-types==1.7.0
osc-lib==1.14.1
oslo.config==6.11.1
oslo.context==2.23.0
oslo.i18n==3.24.0
oslo.log==3.44.1
oslo.serialization==2.29.2
oslo.utils==3.41.2
passlib==1.7.1
pbr==5.4.3
prettytable==0.7.2
pycparser==2.19
pyOpenSSL==19.0.0
pyparsing==2.4.2
pyperclip==1.7.0
pyrsistent==0.15.4
python-cinderclient==5.0.0
python-dateutil==2.8.1
python-glanceclient==2.17.0
python-heatclient==1.18.0
python-keystoneclient==3.21.0
python-neutronclient==6.14.0
python-novaclient==15.1.0
python-openstackclient==4.0.0
python-swiftclient==3.8.1
pytz==2019.3
PyYAML==5.1.2
requests==2.22.0
requestsexceptions==1.4.0
rfc3986==1.3.2
simplejson==3.16.0
six==1.12.0
stevedore==1.31.0
urllib3==1.25.6
warlock==1.3.3
wcwidth==0.1.7
wrapt==1.11.2
zipp==0.6.0
ansible/configs/ocp4-cluster-wk/files/pv-registry.yaml.j2
New file
@@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolume
metadata:
  name: registry-storage
spec:
  capacity:
    storage: 100Gi
  accessModes:
  - ReadWriteMany
  nfs:
    path: /ocp-registry
    server: {{ nfs_server_address }}
  persistentVolumeReclaimPolicy: Recycle
  storageClassName: nfs
ansible/configs/ocp4-cluster-wk/files/pv-user.yaml.j2
New file
@@ -0,0 +1,21 @@
---
{%  for pv in range(1,user_vols|int) %}
apiVersion: v1
kind: PersistentVolume
metadata:
  name: vol{{ pv }}
spec:
  capacity:
    storage: {{ user_vols_size }}
  accessModes:
  - ReadWriteOnce
{% if pv % 2 == 0 %}
  - ReadWriteMany
{% endif %}
  nfs:
    path: /user_vols/vol{{pv}}
    server: {{ nfs_server_address }}
  persistentVolumeReclaimPolicy: Recycle
  storageClassName: nfs
---
{% endfor %}
ansible/configs/ocp4-cluster-wk/files/pvc-registry.yaml
New file
@@ -0,0 +1,13 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: image-registry-storage
  namespace: openshift-image-registry
spec:
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 100Gi
  storageClassName: nfs
  volumeMode: Filesystem
ansible/configs/ocp4-cluster-wk/files/repos_template.j2
New file
@@ -0,0 +1,29 @@
[rhel-7-server-rpms]
name=Red Hat Enterprise Linux 7
baseurl={{own_repo_path}}/rhel-7-server-rpms
enabled=1
gpgcheck=0
[rhel-7-server-rh-common-rpms]
name=Red Hat Enterprise Linux 7 Common
baseurl={{own_repo_path}}/rhel-7-server-rh-common-rpms
enabled=1
gpgcheck=0
[rhel-7-server-extras-rpms]
name=Red Hat Enterprise Linux 7 Extras
baseurl={{own_repo_path}}/rhel-7-server-extras-rpms
enabled=1
gpgcheck=0
[rhel-7-server-optional-rpms]
name=Red Hat Enterprise Linux 7 Optional
baseurl={{own_repo_path}}/rhel-7-server-optional-rpms
enabled=1
gpgcheck=0
[rhel-7-server-ansible-2.8-rpms]
name=Red Hat Enterprise Linux Ansible (RPMs)
baseurl={{own_repo_path}}/rhel-7-server-ansible-2.8-rpms
enabled=1
gpgcheck=0
ansible/configs/ocp4-cluster-wk/files/rfc2136.ini.j2
New file
@@ -0,0 +1,10 @@
# Target DNS server
dns_rfc2136_server = {{ osp_cluster_dns_server }}
# Target DNS port
dns_rfc2136_port = 53
# TSIG key name
dns_rfc2136_name = {{ ddns_key_name }}
# TSIG key secret
dns_rfc2136_secret = {{ ddns_key_secret }}
# TSIG key algorithm
dns_rfc2136_algorithm = HMAC-MD5
ansible/configs/ocp4-cluster-wk/files/update_ignition.py
New file
@@ -0,0 +1,50 @@
import base64
import json
import os
with open('bootstrap.ign', 'r') as f:
    ignition = json.load(f)
files = ignition['storage'].get('files', [])
infra_id = os.environ.get('INFRA_ID', 'openshift').encode()
hostname_b64 = base64.standard_b64encode(infra_id + b'-bootstrap\n').decode().strip()
files.append(
{
    'path': '/etc/hostname',
    'mode': 420,
    'contents': {
        'source': 'data:text/plain;charset=utf-8;base64,' + hostname_b64,
        'verification': {}
    },
    'filesystem': 'root',
})
dhcp_client_conf_b64 = base64.standard_b64encode(b'[main]\ndhcp=dhclient\n').decode().strip()
files.append(
{
    'path': '/etc/NetworkManager/conf.d/dhcp-client.conf',
    'mode': 420,
    'contents': {
        'source': 'data:text/plain;charset=utf-8;base64,' + dhcp_client_conf_b64,
        'verification': {}
        },
    'filesystem': 'root',
})
dhclient_cont_b64 = base64.standard_b64encode(b'send dhcp-client-identifier = hardware;\nprepend domain-name-servers 127.0.0.1;\n').decode().strip()
files.append(
{
    'path': '/etc/dhcp/dhclient.conf',
    'mode': 420,
    'contents': {
        'source': 'data:text/plain;charset=utf-8;base64,' + dhclient_cont_b64,
        'verification': {}
        },
    'filesystem': 'root'
})
ignition['storage']['files'] = files;
with open('bootstrap.ign', 'w') as f:
    json.dump(ignition, f)
ansible/configs/ocp4-cluster-wk/post_infra.yml
New file
@@ -0,0 +1,50 @@
- name: Step 002 Post Infrastructure
  hosts: localhost
  connection: local
  become: false
  tags:
  - step002
  - post_infrastructure
  environment:
    OS_AUTH_URL: "{{ osp_auth_url }}"
    OS_USERNAME: "{{ osp_auth_username }}"
    OS_PASSWORD: "{{ osp_auth_password }}"
    OS_PROJECT_NAME: "{{ osp_project_name }}"
    OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
    OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
  tasks:
    - name: Create DNS entries for OpenShift FIPs
      debug:
        msg: Currently using {{ osp_cluster_dns_zone }} on server {{ osp_cluster_dns_server }}
      when: openshift_fip_provision
    - set_fact:
        ocp_api_fip: "{{ hot_outputs | json_query(query) }}"
      vars:
        query: "outputs[?@.output_key=='ocp_api_fip'].output_value|[0]"
      when: openshift_fip_provision
    - set_fact:
        ocp_ingress_fip: "{{ hot_outputs | json_query(query) }}"
      vars:
        query: "outputs[?@.output_key=='ocp_ingress_fip'].output_value|[0]"
      when: openshift_fip_provision
    - name: Add DNS entry for OpenShift API and ingress
      nsupdate:
        server: "{{ osp_cluster_dns_server }}"
        zone: "{{ osp_cluster_dns_zone }}"
        record: "{{ item.dns }}.{{ guid }}"
        type: A
        ttl: 5
        value: "{{ item.name }}"
        key_name: "{{ ddns_key_name }}"
        key_secret: "{{ ddns_key_secret }}"
      loop:
        - name: "{{ ocp_api_fip }}"
          dns: "api"
        - name: "{{ ocp_ingress_fip }}"
          dns: "*.apps"
      loop_control:
        label: item.name
      when: openshift_fip_provision
ansible/configs/ocp4-cluster-wk/post_software.yml
New file
@@ -0,0 +1,30 @@
---
- name: Deploy workload(s) role on bastion of the shared cluster
  hosts: localhost
  connection: local
  gather_facts: false
  tags:
    - step005
  tasks:
    - name: print out user.info
      debug:
        msg: "{{ item }}"
      loop:
        - "user.info: You can access your bastion via SSH:"
        - "user.info: ssh {{ student_name }}@bastion.{{ guid }}.{{ osp_cluster_dns_zone }}"
        - "user.info: "
        - "user.info: Make sure you use the username '{{ student_name }}' and the password '{{ hostvars['bastion']['student_password'] }}' when prompted."
        - "user.info: "
        - "user.info: Your base domain is '{{ osp_cluster_dns_zone }}'"
        - "user.info: "
        - "user.info: For reference, the floating IPs you will use for OpenShift are:"
        - "user.info: "
        - "user.info: API IP: {{ ocp_api_fip }}"
        - "user.info: API FQDN: api.{{ guid }}.{{ osp_cluster_dns_zone }}"
        - "user.info: "
        - "user.info: Ingress IP: {{ ocp_ingress_fip }}"
        - "user.info: Ingress FQDN: *.apps.{{ guid }}.{{ osp_cluster_dns_zone }}"
    - debug:
        msg: "Post-Software checks completed successfully"
ansible/configs/ocp4-cluster-wk/pre_infra.yml
New file
@@ -0,0 +1,10 @@
- name: Step 000 Pre Infrastructure
  hosts: localhost
  connection: local
  become: false
  tags:
  - step001
  - pre_infrastructure
  tasks:
    - debug:
        msg: "Step 000 Pre Infrastructure - Dummy action"
ansible/configs/ocp4-cluster-wk/pre_software.yml
New file
@@ -0,0 +1,100 @@
---
- name: Step 003 - Create env key
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
  - step003
  - generate_env_keys
  tasks:
  - name: Generate SSH keys
    shell: ssh-keygen -b 2048 -t rsa -f "{{output_dir}}/{{env_authorized_key}}" -q -N ""
    args:
      creates: "{{output_dir}}/{{env_authorized_key}}"
    when: set_env_authorized_key | bool
  - name: fix permission
    file:
      path: "{{output_dir}}/{{env_authorized_key}}"
      mode: 0400
    when: set_env_authorized_key | bool
  - name: Generate SSH pub key
    shell: ssh-keygen -y -f "{{output_dir}}/{{env_authorized_key}}" > "{{output_dir}}/{{env_authorized_key}}.pub"
    args:
      creates: "{{output_dir}}/{{env_authorized_key}}.pub"
    when: set_env_authorized_key | bool
- name: Configure all hosts with Repositories, Common Files and Set environment key
  hosts:
  - all:!windows
  become: true
  gather_facts: False
  tags:
  - step004
  - common_tasks
  roles:
  - { role: "set-repositories",       when: 'repo_method is defined' }
  - { role: "common",                 when: 'install_common | bool' }
  - { role: "set_env_authorized_key", when: 'set_env_authorized_key | bool' }
  tasks:
    - name: Add GUID to /etc/skel/.bashrc
      lineinfile:
        path: "/etc/skel/.bashrc"
        regexp: "^export GUID"
        line: "export GUID={{ guid }}"
- name: Configuring Bastion Hosts
  hosts: bastions
  become: true
  roles:
  - { role: "bastion-lite",         when: 'install_bastion | bool' }
  - { role: "bastion-student-user", when: 'install_student_user | bool' }
  tags:
  - step004
  - bastion_tasks
  tasks:
    - name: Copy SSH private key to student user .ssh directory
      copy:
        src: "/root/.ssh/{{env_authorized_key}}.pem"
        dest: "/home/{{ student_name }}/.ssh/{{env_authorized_key}}.pem"
        mode: 0600
        owner: "{{ student_name }}"
        remote_src: true
      when:
        - student_name is defined
        - env_authorized_key is defined
    - name: Copy SSH public key to student user .ssh directory
      copy:
        src: "/root/.ssh/{{env_authorized_key}}.pub"
        dest: "/home/{{ student_name }}/.ssh/{{env_authorized_key}}.pub"
        mode: 0600
        owner: "{{ student_name }}"
        remote_src: true
      when:
        - student_name is defined
        - env_authorized_key is defined
    - name: Copy SSH config to student user .ssh directory
      copy:
        src: "/root/.ssh/config"
        dest: "/home/{{ student_name }}/.ssh/config"
        mode: 0600
        owner: "{{ student_name }}"
        remote_src: true
      when:
        - student_name is defined
        - env_authorized_key is defined
- name: PreSoftware flight-check
  hosts: localhost
  connection: local
  gather_facts: false
  become: false
  tags:
  - flight_check
  tasks:
  - debug:
      msg: "Pre-Software checks completed successfully"
ansible/configs/ocp4-cluster-wk/requirements.yml
New file
@@ -0,0 +1,6 @@
---
# External role to setup grader host virtualenv and FTL grading infra
- src: https://github.com/redhat-gpte-devopsautomation/ftl-injector
  name: ftl-injector
  version: v0.16.0
ansible/configs/ocp4-cluster-wk/software.yml
New file
@@ -0,0 +1,237 @@
---
- name: Step 001 software
  hosts: bastions
  gather_facts: false
  become: true
  tasks:
    - when: cloud_provider == "osp"
      block:
        - name: Install packages for OpenStack CLI
          package:
            name:
              - gcc
              - python3
              - python3-devel
        - name: Copy requirements.txt
          copy:
            src: "./files/openstack_requirements.txt"
            dest: "/root/requirements.txt"
        - name: Install python requirements for OpenStack CLI
          command: "pip3 install -r /root/requirements.txt"
        - name: Remove requirements.txt
          file:
            path: "/root/requirements.txt"
            state: absent
        - name: Add /usr/local/bin to PATH
          copy:
            dest: /etc/profile.d/custom-path.sh
            content: 'PATH=$PATH:/usr/local/bin'
        - name: Create .config directory
          file:
            path: /home/{{ student_name }}/.config/openstack
            state: directory
            owner: "{{ student_name }}"
            group: users
            mode: 0744
        - name: Create clouds.yaml file
          template:
            src: "./files/clouds.yaml.j2"
            dest: "/home/{{ student_name }}/.config/openstack/clouds.yaml"
            owner: "{{ student_name }}"
            mode: 0700
        - name: Add environment variables for API and Ingress FIPs
          lineinfile:
            path: "/home/{{ student_name }}/.bashrc"
            regexp: "^export {{ item.env_var }}"
            line: "export {{ item.env_var }}={{ item.ip }}"
          loop:
            - ip: "{{ hostvars['localhost']['ocp_api_fip'] }}"
              env_var: "API_FIP"
            - ip: "{{ hostvars['localhost']['ocp_ingress_fip'] }}"
              env_var: "INGRESS_FIP"
          loop_control:
            label: item.ip
          when: openshift_fip_provision
        - name: Add environment variable for DNS domain
          lineinfile:
            path: "/home/{{ student_name }}/.bashrc"
            regexp: "^export OPENSHIFT_DNS_ZONE"
            line: "export OPENSHIFT_DNS_ZONE={{ osp_cluster_dns_zone }}"
          when: openshift_fip_provision
        - name: Add environment variable for OpenStack credentials
          lineinfile:
            path: "/home/{{ student_name }}/.bashrc"
            regexp: "^export OS_CLOUD"
            line: "export OS_CLOUD={{ guid }}-project"
        - name: Create resources directory
          file:
            path: "/home/{{ student_name }}/resources"
            state: directory
            owner: "{{ student_name }}"
            group: users
            mode: 0744
        - name: Add python script to update ignition
          copy:
            dest: "/home/{{ student_name }}/resources/update_ignition.py"
            src: "./files/update_ignition.py"
            owner: "{{ student_name }}"
        - name: Add jinja for machinesets to resources directory
          copy:
            dest: "/home/{{ student_name }}/resources/general-ms.yaml.j2"
            src: "./files/general-ms.yaml.j2"
            owner: "{{ student_name }}"
        - name: Add PV file for OCP registry
          template:
            src: "./files/pv-registry.yaml.j2"
            dest: "/home/{{ student_name }}/resources/pv-registry.yaml"
            owner: "{{ student_name }}"
        - name: Add PV files for user vols
          template:
            src: "./files/pv-user.yaml.j2"
            dest: "/home/{{ student_name }}/resources/pv-user.yaml"
            owner: "{{ student_name }}"
        - name: Add PVC file for OCP registry
          copy:
            src: "./files/pvc-registry.yaml"
            dest: "/home/{{ student_name }}/resources/pvc-registry.yaml"
            owner: "{{ student_name }}"
        # In case we ever use Swift or something similar for bootstrap.ign
        # - name: Create swift container for ignition
        #   command: |
        #     /usr/local/bin/openstack --os-cloud {{ osp_project_name }} container create ignition
        #   become_user: "{{ student_name }}"
        # - name: Set ACL for ignition container
        #   command: |
        #     /usr/local/bin/swift post --read-acl ".r:*,.rlistings" ignition
        #   become_user: "{{ student_name }}"
        #   environment:
        #     OS_AUTH_URL: "{{ osp_auth_url }}"
        #     OS_USERNAME: "{{ guid }}-user"
        #     OS_PASSWORD: "{{ hostvars['localhost']['heat_user_password'] }}"
        #     OS_PROJECT_NAME: "{{ osp_project_name }}"
        #     OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
        # - name: Add signing key for Swift tempurl
        #   command: |
        #     /usr/local/bin/openstack --os-cloud {{ osp_project_name }} object store account set --property Temp-URL-Key=redhatgpte
        #   become_user: "{{ student_name }}"
- name: Step 002 rootless podman
  hosts: bastions,utility
  gather_facts: false
  become: true
  tasks:
    - name: Install slirp4netns
      package:
        name: slirp4netns
    - name: update max_user_namespaces in sysctl
      sysctl:
        name: user.max_user_namespaces
        value: "28633"
        sysctl_file: /etc/sysctl.d/userns.conf
        reload: yes
- name: Step 003 Utility VM config
  hosts: utility
  gather_facts: false
  become: true
  tasks:
    - name: Install httpd on utilityVM
      package:
        name: httpd
    - name: Start and enable httpd on utilityVM
      service:
        name: httpd
        state: started
        enabled: yes
    - name: Create vols for registry NFS
      file:
        path: /srv/nfs/ocp-registry
        state: directory
        mode: 777
    - name: Create user vols for NFS
      shell: "mkdir -p /srv/nfs/user_vols/vol{1..{{user_vols}}}"
    - name: chmod the user vols
      shell: "chmod -R 777 /srv/nfs"
    - name: create exports file
      file:
        path: /etc/exports.d/{{ env_type }}-{{ guid }}.exports
        state: touch
        mode: 755
    - name: create exports file
      lineinfile:
        dest: /etc/exports.d/{{ env_type }}-{{ guid }}.exports
        line: '/srv/nfs {{ nfs_exports_config }}'
        state: present
    - name: Enable and start NFS server
      service:
        name: nfs-server
        state: restarted
        enabled: yes
- name: Step 004 LE certs
  hosts: bastions
  gather_facts: false
  become: true
  vars:
    _certbot_dns_provider: "rfc2136"
    _certbot_domain: "api.{{ guid }}.{{ osp_cluster_dns_zone }}"
    _certbot_wildcard_domain: "*.apps.{{ guid }}.{{ osp_cluster_dns_zone }}"
    _certbot_remote_dir: "/home/{{ student_name }}"
    _certbot_remote_dir_owner: "{{ student_name }}"
    _certbot_install_dir: "/home/{{ student_name }}/certificates"
    _certbot_install_dir_owner: "{{ student_name }}"
    _certbot_production: False
    _certbot_user: "{{ student_name }}"
    _certbot_cache_archive_file: "{{ output_dir|d('/tmp') }}/{{ guid }}-certs.tar.gz"
    _certbot_use_cache: False
    use_python3: "{{ all_use_python3 }}"
  tasks:
    - name: Copy credentials to host temporarily
      template:
        src: ./files/rfc2136.ini.j2
        dest: /home/{{ _certbot_user }}/.rfc2136.ini
        owner: "{{ _certbot_user }}"
    - import_role:
        name: host-lets-encrypt-certs-certbot
    - name: Remove credentials once LE certs complete
      file:
        state: absent
        path: /home/{{ _certbot_user }}/.rfc2136.ini
      when: _certbot_setup_complete
    - name: Copy the LE root certs into trusted bundle
      copy:
        dest: /etc/pki/ca-trust/source/anchors/le-chain.pem
        src: /home/{{ student_name }}/certificates/chain.pem
        remote_src: true
    - name: Update CA trust
      command: update-ca-trust