Wolfgang Kulhanek
2020-03-12 6c25b430d6555a75cf639be91233cd536318d88d
[WIP] New way of doing role inputs (#1309)

* Fix spelling

* New Role: ocp4-workload-machinesets, new way to handle input to roles

* Add verbosity: 2 to debug statements, add recursive=true to combine logic

* Remove OWNERS, add meta/main.yml

* Switch to assert

* Fix debug statements

* Used wrong variable for user password

* Second occurance of wrong variable

* Fix instance type for AWS

* Add root_volume_size example

* added user-*.yaml to .gitignore

* Don't remove kubeadmin file. Doesn't hurt to be there.

* Fix taints
16 files added
16 files modified
953 ■■■■ changed files
.gitignore 3 ●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-authentication/defaults/main.yml 53 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-authentication/meta/main.yml 14 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-authentication/tasks/main.yml 1 ●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-authentication/tasks/workload.yml 125 ●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-authentication/templates/cluster_role_binding.j2 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-authentication/templates/htpasswd.j2 6 ●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-authentication/templates/oauth-opentlc-ldap.j2 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-infra-nodes/OWNERS 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-logging/defaults/main.yml 34 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-logging/meta/main.yml 15 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-logging/tasks/workload.yml 38 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-logging/templates/cluster_logging.j2 38 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-machinesets/defaults/main.yml 88 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-machinesets/files/cluster-monitoring-config.yml 78 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-machinesets/meta/main.yml 16 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-machinesets/readme.adoc 111 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-machinesets/tasks/main.yml 30 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-machinesets/tasks/post_workload.yml 8 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-machinesets/tasks/pre_workload.yml 8 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-machinesets/tasks/remove_workload.yml 82 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-machinesets/tasks/workload.yml 73 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-machinesets/templates/image-registry.j2 14 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-machinesets/templates/ingress-controller.j2 18 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-machinesets/templates/machine-config-daemonset.j2 24 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-machinesets/templates/node-ca-daemonset.j2 24 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4_machineset_config/meta/main.yml 4 ●●● patch | view | raw | blame | history
ansible/roles/ocp4_machineset_config/tasks/machineset-group-aws.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp4_machineset_config/tasks/machineset-openstack.yml 2 ●●● patch | view | raw | blame | history
ansible/roles/ocp4_machineset_config/tasks/set-facts.yml 4 ●●●● patch | view | raw | blame | history
ansible/roles/ocp4_machineset_config/templates/machineset-aws.j2 13 ●●●● patch | view | raw | blame | history
ansible/roles/ocp4_machineset_config/templates/machineset-openstack.j2 13 ●●●● patch | view | raw | blame | history
.gitignore
@@ -26,7 +26,8 @@
.pre-commit-config.yaml
.DS_Store
my_*_vars.yml
user-data.yaml
user-info.yaml
.vscode/settings.json
__pycache__
*.pyc
ansible/roles/ocp4-workload-authentication/defaults/main.yml
@@ -3,30 +3,41 @@
ocp_username: "system:admin"
silent: False
# ocp4_idm_install can be one of none, htpasswd, ldap
ocp4_idm_install: none
ocp4_workload_authentication_defaults:
  # idm_type: 'none', 'ldap' or 'httpasswd'
  # admin_user: wkulhane-redhat.com
  idm_type: htpasswd
# Set up a user from the Authentication Provider with cluster-admin permissions
ocp4_idm_admin_user: opentlc-mgr
  # Base of the users for htpasswd
  htpasswd_user_base: user
  htpasswd_user_count: 100
  # Set a password for all htpasswd users
  # If no password set a 16 character random password will be generated
  # htpasswd_user_password: openshift
# LDAP settings
ocp4_idm_ldap_url: ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid
ocp4_idm_ldap_ca_url: http://ipa.opentlc.com/ipa/config/ca.crt
ocp4_idm_ldap_bind_dn: "uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com"
  # admin_user for LDAP will need to exist in LDAP
  # admin_user for htpasswd will be created
  admin_user: admin
  # Set a password for the admin user (only htpasswd)
  # If no password set a 16 character random password will be generated
  # htpasswd_admin_password: openshift_admin
# htpasswd settings
# -----------------
# Base of the users for htpasswd
ocp4_idm_htpasswd_user_base: user
ocp4_idm_htpasswd_user_count: 100
  # LDAP settings
  ldap_url: ldaps://ipa1.opentlc.com:636/cn=users,cn=accounts,dc=opentlc,dc=com?uid
  ldap_ca_url: http://ipa.opentlc.com/ipa/config/ca.crt
  ldap_bind_dn: "uid=ose-mwl-auth,cn=users,cn=accounts,dc=opentlc,dc=com"
# Set a password for the Admin User
# If no password set a 16 character random password will be generated
# ocp4_idm_htpasswd_admin_password:
  # Remove Kubeadmin user upon successful installation of Authentication
  remove_kubeadmin: true
# Set a password for all htpasswd users
# If no password set a 16 character random password will be generated
# ocp4_idm_htpasswd_user_password:
# Override the defaults by setting the overrides in
# ocp4_workload_logging_input: {}
#
# For example to set up LDAP:
# ocp4_workload_authentication_input:
#   idm_type: ldap
#   admin_user: wkulhane-redhat.com
# Remove Kubeadmin user upon successful installation of Authentication
ocp4_idm_remove_kubeadmin: true
# Secret Variables should come from secrets file
# ocp4_workload_authentication_secret:
#   ldap_bind_password: <should come from secrets>
ansible/roles/ocp4-workload-authentication/meta/main.yml
New file
@@ -0,0 +1,14 @@
---
galaxy_info:
  role_name: ocp4-workload-authentication
  author: Wolfgang Kulhanek
  description: |
    Set up Authentication for OpenShift 4. Either ldap, htpasswd or none can be set up.
    ldap will set up OpenTLC LDAP authentication.
  license: MIT
  min_ansible_version: 2.8
  platforms: []
  galaxy_tags:
  - ocp
  - openshift
dependencies: []
ansible/roles/ocp4-workload-authentication/tasks/main.yml
@@ -1,5 +1,4 @@
---
# Do not modify this file
- name: Running Pre Workload Tasks
ansible/roles/ocp4-workload-authentication/tasks/workload.yml
@@ -1,50 +1,68 @@
---
# Implement your Workload deployment tasks here
- name: Set up ocp4_workload_authentication combined dictionary
  set_fact:
    ocp4_workload_authentication: >-
      {{ ocp4_workload_authentication_defaults
       | combine(ocp4_workload_authentication_input  | default( {} ),
                 ocp4_workload_authentication_secret | default( {} ), recursive=true)
      }}
- name: Print combined role variables
  debug:
    var: ocp4_workload_authentication
    verbosity: 2
- name: Check that ocp4_idm_install is defined
  when:
  - ocp4_idm_install is not defined
  fail:
    msg: "ocp4_idm_install is not defined"
- name: Check that ocp4_workload_authentication.idm_type is defined and valid
  assert:
    that:
    - ocp4_workload_authentication.idm_type is defined
    - ocp4_workload_authentication.idm_type == "none" or ocp4_workload_authentication.idm_type == "htpasswd" or ocp4_workload_authentication.idm_type == "ldap"
    fail_msg: "ocp4_workload_authentication.idm_type is not defined or not in ('none', 'htpasswd', 'ldap')."
- name: Setup HTPasswd Authentication
  when: ocp4_idm_install == "htpasswd"
  when: ocp4_workload_authentication.idm_type == "htpasswd"
  block:
  # Generate Passwords if no passwords specified
  - name: Generate cluster admin password
    when: ocp4_idm_htpasswd_admin_password | d('') | length == 0
    when: ocp4_workload_authentication.htpasswd_admin_password | d('') | length == 0
    set_fact:
      ocp4_idm_htpasswd_admin_password: "{{ lookup('password', '/dev/null length=16 chars=ascii_letters,digits') }}"
      ocp4_workload_authentication_admin_password: "{{ lookup('password', '/dev/null length=16 chars=ascii_letters,digits') }}"
  - name: Use provided admin password
    when: ocp4_workload_authentication.htpasswd_admin_password | d('') | length > 0
    set_fact:
      ocp4_workload_authentication_admin_password: "{{ ocp4_workload_authentication.htpasswd_admin_password }}"
  - name: Generate htpasswd hash for admin user
    shell: >-
      htpasswd -nb "admin" "{{ ocp4_workload_authentication_admin_password }}"|cut -d: -f2
    register: r_htpasswd_line
  - name: Set htpasswd admin password hash
    set_fact:
      ocp4_workload_authentication_admin_password_hash: "{{ r_htpasswd_line.stdout }}"
    when:
    - r_htpasswd_line is succeeded
  - name: Generate user passwords
    when: ocp4_idm_htpasswd_user_password | d('') | length == 0
    when: ocp4_workload_authentication.htpasswd_user_password | d('') | length == 0
    set_fact:
      ocp4_idm_htpasswd_user_password: "{{ lookup('password', '/dev/null length=16 chars=ascii_letters,digits') }}"
      ocp4_workload_authentication_user_password: "{{ lookup('password', '/dev/null length=16 chars=ascii_letters,digits') }}"
  # Generate password hashes
  - name: Use provided user passwords
    when: ocp4_workload_authentication.htpasswd_user_password | d('') | length > 0
    set_fact:
      ocp4_workload_authentication_user_password: "{{ ocp4_workload_authentication.htpasswd_user_password }}"
  - name: Generate htpasswd hash for user passwords
    shell: >-
      htpasswd -nb "userN" "{{ ocp4_idm_htpasswd_user_password }}"|cut -d: -f2
    register: htpasswd_line
      htpasswd -nb "userN" "{{ ocp4_workload_authentication_user_password }}"|cut -d: -f2
    register: r_htpasswd_line
  - name: Set fact ocp4_idm_htpasswd_user_password_hash
  - name: Set htpasswd user password hash
    set_fact:
      ocp4_idm_htpasswd_user_password_hash: "{{ htpasswd_line.stdout }}"
      ocp4_workload_authentication_user_password_hash: "{{ r_htpasswd_line.stdout }}"
    when:
    - htpasswd_line is succeeded
    - r_htpasswd_line is succeeded
  - name: Generate htpasswd hash for ocp4_idm_admin_user
    shell: >-
      htpasswd -nb "admin" "{{ ocp4_idm_htpasswd_admin_password }}"|cut -d: -f2
    register: htpasswd_line
  - name: Set fact ocp4_idm_htpasswd_admin_password_hash
    set_fact:
      ocp4_idm_htpasswd_admin_password_hash: "{{ htpasswd_line.stdout }}"
    when:
    - htpasswd_line is succeeded
  # Generate htpasswd file
  - name: Generate htpasswd file
    template:
      src: "htpasswd.j2"
@@ -66,9 +84,6 @@
  - name: Update OAuth Configuration
    k8s:
      state: present
      merge_type:
      - strategic-merge
      - merge
      definition: "{{ lookup('file', item ) | from_yaml }}"
    loop:
    - ./files/oauth-htpasswd.yaml
@@ -85,30 +100,30 @@
      msg: "{{ item }}"
    loop:
    - "user.info: HTPasswd Authentication is enabled on this cluster."
    - "user.info: {{ ocp4_idm_htpasswd_user_base }}1 .. {{ ocp4_idm_htpasswd_user_base }}{{ ocp4_idm_htpasswd_user_count }} are created."
    - "user.info: User `{{ ocp4_idm_admin_user }}` is cluster admin with password `{{ ocp4_idm_htpasswd_admin_password }}`."
    - "user.info: {{ ocp4_workload_authentication.htpasswd_user_base }}1 .. {{ ocp4_workload_authentication.htpasswd_user_base }}{{ ocp4_workload_authentication.htpasswd_user_count }} are created with password `{{ ocp4_workload_authentication_user_password }}`"
    - "user.info: User `{{ ocp4_workload_authentication.admin_user }}` with password `{{ ocp4_workload_authentication_admin_password }}` is cluster admin."
  - name: Print User Information for each User
    agnosticd_user_info:
      user: "{{ ocp4_idm_htpasswd_user_base }}{{ n }}"
      user: "{{ ocp4_workload_authentication.htpasswd_user_base }}{{ n }}"
      data:
        password: "{{ ocp4_idm_htpasswd_user_password }}"
        login_command: "oc login -u {{ ocp4_idm_htpasswd_user_base }}{{ n }} -p {{ ocp4_idm_htpasswd_user_password }} {{ r_cluster.resources[0].status.apiServerURL }}"
    loop: "{{ range(1, 1 + ocp4_idm_htpasswd_user_count | int) | list }}"
        password: "{{ ocp4_workload_authentication_user_password }}"
        login_command: "oc login -u {{ ocp4_workload_authentication.htpasswd_user_base }}{{ n }} -p {{ ocp4_workload_authentication_user_password }} {{ r_cluster.resources[0].status.apiServerURL }}"
    loop: "{{ range(1, 1 + ocp4_workload_authentication.htpasswd_user_count | int) | list }}"
    loop_control:
      loop_var: n
- name: Setup OpenTLC LDAP Authentication
  when: ocp4_idm_install == "ldap"
  when: ocp4_workload_authentication.idm_type == "ldap"
  block:
  - name: Check for LDAP Bind Password
    when: ocp4_workload_authentication_secret.ldap_bind_password is not defined
    fail:
      msg: LDAP Authentication is configured but LDAP BindPassword (ocp4_idm_ldap_bindPassword) is not defined.
    when: ocp4_idm_ldap_bindPassword is not defined
      msg: LDAP Authentication is configured but LDAP BindPassword (ocp4_workload_authentication_secret.ldap_bind_password) is not defined.
  - name: Get IPA CA Cert
    get_url:
      url: "{{ ocp4_idm_ldap_ca_url }}"
      url: "{{ ocp4_workload_authentication.ldap_ca_url }}"
      dest: "/home/{{ ansible_user }}/ipa-ca.crt"
      mode: 0660
@@ -132,14 +147,11 @@
      namespace: openshift-config
  - name: Create LDAP Bind Password Secret
    shell: "oc create secret generic opentlc-ldap-secret --from-literal=bindPassword=\"{{ ocp4_idm_ldap_bindPassword }}\" -n openshift-config"
    shell: "oc create secret generic opentlc-ldap-secret --from-literal=bindPassword=\"{{ ocp4_workload_authentication_secret.ldap_bind_password }}\" -n openshift-config"
  - name: Update OAuth Configuration
    k8s:
      state: present
      merge_type:
      - strategic-merge
      - merge
      definition: "{{ lookup('template', item ) | from_yaml }}"
    loop:
    - ./templates/oauth-opentlc-ldap.j2
@@ -150,24 +162,21 @@
    loop:
    - "user.info: OpenTLC LDAP Authentication is enabled on this cluster."
    - "user.info: Use your OpenTLC user and Password to log into this cluster."
    - "user.info: User `{{ ocp4_idm_admin_user }}` is cluster admin."
    - "user.info: User `{{ ocp4_workload_authentication.admin_user }}` is cluster admin."
- name: Set up Cluster Admin User
  when:
  - ocp4_idm_install != "none"
  - ocp4_idm_admin_user is defined
  - ocp4_workload_authentication.idm_type != "none"
  - ocp4_workload_authentication.admin_user is defined
  k8s:
    state: present
    merge_type:
    - strategic-merge
    - merge
    definition: "{{ lookup('template', './templates/cluster_role_binding.j2') | from_yaml }}"
- name: Remove kubeadmin User
  when:
  - ocp4_idm_admin_user is defined
  - ocp4_idm_install != "none"
  - ocp4_idm_remove_kubeadmin | bool
  - ocp4_workload_authentication.idm_type != "none"
  - ocp4_workload_authentication.admin_user is defined
  - ocp4_workload_authentication.remove_kubeadmin | bool
  block:
  - name: Remove kubeadmin user secret
    k8s:
@@ -176,10 +185,6 @@
      kind: Secret
      namespace: kube-system
      name: kubeadmin
  - name: Remove kubeadmin file
    file:
      state: absent
      path: "/home/{{ ansible_user }}/{{ cluster_name }}/auth/kubeadmin-password"
# Leave this as the last task in the playbook.
- name: workload tasks complete
ansible/roles/ocp4-workload-authentication/templates/cluster_role_binding.j2
@@ -1,7 +1,7 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: "cluster-admin-{{ ocp4_idm_admin_user }}"
  name: "cluster-admin-{{ ocp4_workload_authentication.admin_user }}"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
@@ -9,4 +9,4 @@
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: "{{ ocp4_idm_admin_user }}"
  name: "{{ ocp4_workload_authentication.admin_user }}"
ansible/roles/ocp4-workload-authentication/templates/htpasswd.j2
@@ -1,6 +1,6 @@
andrew:$apr1$dZPb2ECf$ercevOFO5znrynUfUj4tb/
karla:$apr1$FQx2mX4c$eJc21GuVZWNg1ULF8I2G31
{{ ocp4_idm_admin_user }}:{{ ocp4_idm_htpasswd_admin_password_hash | d('$apr1$glFN48wz$dR9w94PGiQL8qZXcXGd0L0') }}
{% for i in range(1, 1 + ocp4_idm_htpasswd_user_count | int) %}
{{ ocp4_idm_htpasswd_user_base }}{{ i }}:{{ ocp4_idm_htpasswd_user_password_hash | d('$apr1$FmrTsuSa$yducoDpvYq0KEV0ErmwpA1') }}
{{ ocp4_workload_authentication.admin_user }}:{{ ocp4_workload_authentication_admin_password_hash | d('$apr1$glFN48wz$dR9w94PGiQL8qZXcXGd0L0') }}
{% for i in range(1, 1 + ocp4_workload_authentication.htpasswd_user_count | int) %}
{{ ocp4_workload_authentication.htpasswd_user_base }}{{ i }}:{{ ocp4_workload_authentication_user_password_hash | d('$apr1$FmrTsuSa$yducoDpvYq0KEV0ErmwpA1') }}
{% endfor %}
ansible/roles/ocp4-workload-authentication/templates/oauth-opentlc-ldap.j2
@@ -19,10 +19,10 @@
        - cn
        preferredUsername:
        - uid
      bindDN: "{{ ocp4_idm_ldap_bind_dn }}"
      bindDN: "{{ ocp4_workload_authentication.ldap_bind_dn }}"
      bindPassword:
        name: opentlc-ldap-secret
      insecure: false
      ca:
        name: opentlc-ldap-ca-cert
      url: "{{ ocp4_idm_ldap_url }}"
      url: "{{ ocp4_workload_authentication.ldap_url }}"
ansible/roles/ocp4-workload-infra-nodes/OWNERS
New file
@@ -0,0 +1,4 @@
Red Hat GPTE
Author(s):
- Wolfgang Kulhanek (wkulhane@redhat.com)
ansible/roles/ocp4-workload-logging/defaults/main.yml
@@ -3,24 +3,34 @@
ocp_username: opentlc-mgr
silent: False
_logging_elasticsearch_replicas: 1
_logging_elasticsearch_memory_request: "8Gi"
_logging_elasticsearch_storage_request: "50Gi"
# Set up Cluster Logging on regular worker nodes
ocp4_workload_logging_defaults:
  node_role: ""
  elasticsearch_replicas: 1
  elasticsearch_memory_request: "8Gi"
  elasticsearch_storage_request: "50Gi"
# Set the following to specify dedicated nodes for the logging
# The nodes need to be exist (e.g. via the role ocp4-workload-infra-nodes)
# Override the defaults by setting the overrides in
# ocp4_workload_logging_input: {}
#
# If set then the following needs to be true:
# Example: Set up Cluster Logging on dedicated nodes
# In this example `elasticsearch` nodes.
# The nodes need to be exist (e.g. via the role ocp4-workload-machinesets)
#
# The following needs to be true
# Node has a label:
#   node-role.kubernetes.io/{{ _logging_use_dedicated_nodes }}: ""
#   node-role.kubernetes.io/{{ ocp4_workload_logging.use_node_role }}: ""
#   e.g. node-role.kubernetes.io/infra: ""
# Node has taints:
#  - key: "{{ _logging_use_dedicated_nodes }}"
#  - key: "{{ ocp4_workload_logging.use_node_role }}"
#    value: reserved
#    effect: NoSchedule
#  - key: "{{ _logging_use_dedicated_nodes }}"
#  - key: "{{ocp4_workload_logging.use_node_role }}"
#    value: reserved
#    effect: NoExecute
# Example:
#   _logging_use_dedicated_nodes: "elasticsearch"
_logging_use_dedicated_nodes: ""
# ocp4_workload_logging_inputs:
#   node_role: "elasticsearch"
#   elasticsearch_replicas: 1
#   elasticsearch_memory_request: "8Gi"
#   elasticsearch_storage_request: "50Gi"
ansible/roles/ocp4-workload-logging/meta/main.yml
New file
@@ -0,0 +1,15 @@
---
galaxy_info:
  role_name: ocp4-workload-logging
  author: Wolfgang Kulhanek
  description: |
    Set up Cluster Logging stack for OpenShift 4.
    Cluster Logging can be installed on worker nodes or dedicated nodes. Dedicated
    Nodes can be created using the ocp4-workload-machinesets role first.
  license: MIT
  min_ansible_version: 2.8
  platforms: []
  galaxy_tags:
  - ocp
  - openshift
dependencies: []
ansible/roles/ocp4-workload-logging/tasks/workload.yml
@@ -1,4 +1,16 @@
---
- name: Set up ocp4_workload_logging combined dictionary
  set_fact:
    ocp4_workload_logging: >-
      {{ ocp4_workload_logging_defaults
       | combine(ocp4_workload_logging_input  | default( {} ),
                 ocp4_workload_logging_secret | default( {}), recursive=true )
      }}
- name: Print combined role variables
  debug:
    var: ocp4_workload_logging
    verbosity: 2
- name: Check if Elasticsearch Operator is already installed
  k8s_facts:
    api_version: v1
@@ -20,8 +32,6 @@
  - name: Set Elasticsearch channel
    set_fact:
      logging_elasticsearch_channel: "{{ r_eo_channel.resources[0].status.defaultChannel }}"  
    # shell: "oc get packagemanifest elasticsearch-operator -n openshift-marketplace -o jsonpath='{.status.defaultChannel}'"
    # register: r_eo_version
  - name: Print Elasticsearch channel to be installed
    debug:
@@ -58,20 +68,16 @@
    - r_eo_deployment.resources[0].status.availableReplicas is defined
    - r_eo_deployment.resources[0].status.availableReplicas | int == r_eo_deployment.resources[0].spec.replicas | int
  - name: Get current stable channel for Cluster Logging
    k8s_facts:
      api_version: packages.operators.coreos.com/v1
      kind: PackageManifest
      name: cluster-logging
      namespace: openshift-marketplace
    register: r_logging_channel
  - name: Set Cluster Logging channel
    set_fact:
      logging_channel: "{{ r_logging_channel.resources[0].status.defaultChannel }}"
# - name: Get current stable version of Cluster Logging
#   shell: "oc get packagemanifest cluster-logging -n openshift-marketplace -o jsonpath='{.status.defaultChannel}'"
#   register: r_logging_version
- name: Get current stable channel for Cluster Logging
  k8s_facts:
    api_version: packages.operators.coreos.com/v1
    kind: PackageManifest
    name: cluster-logging
    namespace: openshift-marketplace
  register: r_logging_channel
- name: Set Cluster Logging channel
  set_fact:
    logging_channel: "{{ r_logging_channel.resources[0].status.defaultChannel }}"
- name: Print Cluster Logging channel to be installed
  debug:
ansible/roles/ocp4-workload-logging/templates/cluster_logging.j2
@@ -8,20 +8,20 @@
  logStore:
    type: "elasticsearch"
    elasticsearch:
      nodeCount: {{ _logging_elasticsearch_replicas|int }}
{% if _logging_elasticsearch_replicas|int > 1 %}
      nodeCount: {{ ocp4_workload_logging.elasticsearch_replicas|int }}
{% if ocp4_workload_logging.elasticsearch_replicas|int > 1 %}
      redundancyPolicy: "SingleRedundancy"
{% else %}
      redundancyPolicy: "ZeroRedundancy"
{% endif %}
      nodeSelector: 
{% if _logging_use_dedicated_nodes | d("") | length > 0 %}
        "node-role.kubernetes.io/{{ _logging_use_dedicated_nodes }}": ""
{% if ocp4_workload_logging.node_role | d("") | length > 0 %}
        "node-role.kubernetes.io/{{ ocp4_workload_logging.node_role }}": ""
      tolerations:
      - key: "{{ _logging_use_dedicated_nodes }}"
      - key: "{{ ocp4_workload_logging.node_role }}"
        value: reserved
        effect: NoSchedule
      - key: "{{ _logging_use_dedicated_nodes }}"
      - key: "{{ ocp4_workload_logging.node_role }}"
        value: reserved
        effect: NoExecute
{% else %}
@@ -29,22 +29,28 @@
{% endif %}
      resources:
        request:
          memory: "{{ _logging_elasticsearch_memory_request }}"
          memory: "{{ ocp4_workload_logging.elasticsearch_memory_request }}"
      storage:
{% if cloud_provider is match("ec2") %}
        storageClassName: "gp2"
        size: "{{ _logging_elasticsearch_storage_request }}"
{% elif cloud_provider is match("osp") %}
        storageClassName: "standard"
{% else %}
        storageClassName: ""
{% endif %}
        size: "{{ ocp4_workload_logging.elasticsearch_storage_request }}"
  visualization:
    type: "kibana"
    kibana:
      replicas: 1
      nodeSelector: 
{% if _logging_use_dedicated_nodes | d("") | length > 0 %}
        "node-role.kubernetes.io/{{ _logging_use_dedicated_nodes }}": ""
{% if ocp4_workload_logging.node_role | d("") | length > 0 %}
        "node-role.kubernetes.io/{{ ocp4_workload_logging.node_role }}": ""
      tolerations:
      - key: "{{ _logging_use_dedicated_nodes }}"
      - key: "{{ ocp4_workload_logging.node_role }}"
        value: reserved
        effect: NoSchedule
      - key: "{{ _logging_use_dedicated_nodes }}"
      - key: "{{ocp4_workload_logging.node_role }}"
        value: reserved
        effect: NoExecute
{% else %}
@@ -55,13 +61,13 @@
    curator:
      schedule: "30 3 * * *"
      nodeSelector: 
{% if _logging_use_dedicated_nodes | d("") | length > 0 %}
        "node-role.kubernetes.io/{{ _logging_use_dedicated_nodes }}": ""
{% if ocp4_workload_logging.node_role | d("") | length > 0 %}
        "node-role.kubernetes.io/{{ ocp4_workload_logging.node_role }}": ""
      tolerations:
      - key: "{{ _logging_use_dedicated_nodes }}"
      - key: "{{ ocp4_workload_logging.node_role }}"
        value: reserved
        effect: NoSchedule
      - key: "{{ _logging_use_dedicated_nodes }}"
      - key: "{{ ocp4_workload_logging.node_role }}"
        value: reserved
        effect: NoExecute
{% else %}
ansible/roles/ocp4-workload-machinesets/defaults/main.yml
New file
@@ -0,0 +1,88 @@
---
become_override: False
ocp_username: opentlc-mgr
silent: False
# Machineset Groups to be set up
# Roles must be unique in the cluster
# e.g. if you need more worker nodes use worker1, worker2, ... or
#      more descriptive names.
ocp4_workload_machinesets_defaults:
  machineset_groups:
  # Infranodes: Must be named "infra" if
  # desired
  - name: infra
    autoscale: false
    total_replicas: 1
    total_replicas_min: 1
    total_replicas_max: 1
    role: infra
    taints:
    - key: infra
      value: reserved
      effect: NoSchedule
    - key: infra
      value: reserved
      effect: NoExecute
    instance_type: "m5.2xlarge"
# root_volume_size is only available for AWS it is ignored for OpenStack
#   root_volume_size: "150"
# instance_type for OpenStack
#   instance_type: "4c16g30d"
# Override the defaults by setting the overrides in
# ocp4_workload_logging_input: {}
# To add Elasticsearch nodes (for Cluster Logging) add the following to
# the ocp4_workload_machinesets_group:
#
# ocp4_workload_machinesets_input:
# - name: elasticsearch
#   autoscale: false
#   total_replicas: 1
#   total_replicas_min: 1
#   total_replicas_max: 1
#   role: elasticsearch
#   taints:
#   - key: elasticsearch
#     value: reserved
#     effect: NoSchedule
#   - key: elasticsearch
#     value: reserved
#     effect: NoExecute
#   instance_type: "4c16g30d"
#   instance_type: "m5.4xlarge"
# To add another group of worker nodes - with autoscaling enabled add
# the following:
#
# ocp4_workload_machinesets_input:
# - name: worker-scaled
#   autoscale: true
#   total_replicas: 1
#   total_replicas_min: 1
#   total_replicas_max: 5
#   role: worker-scaled
#   taints: {}
#   instance_type: "4c16g30d"
# To add OpenShift Container Storage Nodes add the
# following to the ocp4_workloads_machineset_group.
# Make sure you have enough disk space (and quota):
#
# ocp4_workload_machinesets_input:
# - name: ocs
#   autoscale: false
#   total_replicas: 3
#   total_replicas_min: 3
#   total_replicas_max: 3
#   role: ocs
#   taints:
#   - key: node.ocs.openshift.io/storage
#     value: true
#     effect: NoSchedule
#   node_labels:
#     node-role.kubernetes.io/ocs: ""
#     cluster.ocs.openshift.io/openshift-storage: ""
#   instance_type: "m5.4xlarge"
#   instance_type: "12c32g30d"
ansible/roles/ocp4-workload-machinesets/files/cluster-monitoring-config.yml
New file
@@ -0,0 +1,78 @@
apiVersion: v1
kind: ConfigMap
metadata:
  name: cluster-monitoring-config
  namespace: openshift-monitoring
data:
  config.yaml: |
    alertmanagerMain:
      nodeSelector:
        node-role.kubernetes.io/infra: ""
      tolerations:
      - key: infra
        value: reserved
        effect: NoSchedule
      - key: infra
        value: reserved
        effect: NoExecute
    prometheusK8s:
      retention: 48h
      nodeSelector:
        node-role.kubernetes.io/infra: ""
      tolerations:
      - key: infra
        value: reserved
        effect: NoSchedule
      - key: infra
        value: reserved
        effect: NoExecute
    prometheusOperator:
      nodeSelector:
        node-role.kubernetes.io/infra: ""
      tolerations:
      - key: infra
        value: reserved
        effect: NoSchedule
      - key: infra
        value: reserved
        effect: NoExecute
    grafana:
      nodeSelector:
        node-role.kubernetes.io/infra: ""
      tolerations:
      - key: infra
        value: reserved
        effect: NoSchedule
      - key: infra
        value: reserved
        effect: NoExecute
    k8sPrometheusAdapter:
      nodeSelector:
        node-role.kubernetes.io/infra: ""
      tolerations:
      - key: infra
        value: reserved
        effect: NoSchedule
      - key: infra
        value: reserved
        effect: NoExecute
    kubeStateMetrics:
      nodeSelector:
        node-role.kubernetes.io/infra: ""
      tolerations:
      - key: infra
        value: reserved
        effect: NoSchedule
      - key: infra
        value: reserved
        effect: NoExecute
    telemeterClient:
      nodeSelector:
        node-role.kubernetes.io/infra: ""
      tolerations:
      - key: infra
        value: reserved
        effect: NoSchedule
      - key: infra
        value: reserved
        effect: NoExecute
ansible/roles/ocp4-workload-machinesets/meta/main.yml
New file
@@ -0,0 +1,16 @@
---
galaxy_info:
  role_name: ocp4-workload-machinesets
  author: Wolfgang Kulhanek
  description: |
    Set up additional MachineSets for OpenShift. One worker machineset must exist
    before this role can be called.
    Currently supports AWS and OpenStack.
  license: MIT
  min_ansible_version: 2.8
  platforms: []
  galaxy_tags:
  - ocp
  - openshift
dependencies:
- ocp4_machineset_config
ansible/roles/ocp4-workload-machinesets/readme.adoc
New file
@@ -0,0 +1,111 @@
= ocp4-workload-machinesets - Create OpenShift 4 MachineSets
== Role overview
* This role creates additional MachineSets in an OpenShift 4 Cluster. For each MachineSet group it creates a machineset for each availability zone found (if the cloud platform supports availability zones) and then scales the machinesets to the number of replicas desired. Optionally this role also sets up a MachineAutoscaler for the created MachineSet(s). It consists of the following playbooks:
** Playbook: link:./tasks/pre_workload.yml[pre_workload.yml] - Sets up an environment for the workload deployment.
*** Debug task will print out: `pre_workload Tasks completed successfully.`
** Playbook: link:./tasks/workload.yml[workload.yml] - Used to create the infra nodes
*** Debug task will print out: `workload Tasks completed successfully.`
** Playbook: link:./tasks/post_workload.yml[post_workload.yml] - Used to configure the workload after deployment
*** This role doesn't do anything here
*** Debug task will print out: `post_workload Tasks completed successfully.`
** Playbook: link:./tasks/remove_workload.yml[remove_workload.yml] - Used to delete the workload
*** This role removes the infrastructure nodes (DANGER!!!). It will not remove node selectors from infra components. This will have to be done manually.
*** Debug task will print out: `remove_workload Tasks completed successfully.`
== Review the defaults variable file
* This file link:./defaults/main.yml[./defaults/main.yml] contains all the variables you need to define to control the deployment of your workload.
* The variable *ocp_username* is mandatory to assign the workload to the correct OpenShift user.
* A variable *silent=True* can be passed to suppress debug messages.
* You can modify any of these default values by adding `-e "variable_name=variable_value"` to the command line
=== Deploy a Workload with the `ocp-workload` playbook [Mostly for testing]
----
TARGET_HOST="bastion.ocp43.openshift.opentlc.com"
OCP_USERNAME="opentlc-mgr"
WORKLOAD="ocp4-workload-machinesets"
GUID=1001
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
    -e"ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem" \
    -e"ansible_user=ec2-user" \
    -e"ocp_username=${OCP_USERNAME}" \
    -e"ocp_workload=${WORKLOAD}" \
    -e"silent=False" \
    -e"guid=${GUID}" \
    -e"ACTION=create"
----
=== To Delete an environment
----
TARGET_HOST="bastion.ocp43.openshift.opentlc.com"
OCP_USERNAME="opentlc-mgr"
WORKLOAD="ocp4-workload-machinesets"
GUID=1002
# a TARGET_HOST is specified in the command line, without using an inventory file
ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \
    -e"ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem" \
    -e"ansible_user=ec2-user" \
    -e"ocp_username=${OCP_USERNAME}" \
    -e"ocp_workload=${WORKLOAD}" \
    -e"guid=${GUID}" \
    -e"ACTION=remove"
----
== Other related information:
=== Deploy Workload on OpenShift Cluster from an existing playbook:
[source,yaml]
----
- name: Deploy a workload role on a master host
  hosts: all
  become: true
  gather_facts: False
  tags:
    - step007
  roles:
    - { role: "{{ocp_workload}}", when: 'ocp_workload is defined' }
----
NOTE: You might want to change `hosts: all` to fit your requirements
=== Set up your Ansible inventory file
* You can create an Ansible inventory file to define your connection method to your host (Master/Bastion with `oc` command)
* You can also use the command line to define the hosts directly if your `ssh` configuration is set to connect to the host correctly
* You can also use the command line to use localhost or if your cluster is already authenticated and configured in your `oc` configuration
.Example inventory file
[source, ini]
----
[gptehosts:vars]
ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem
ansible_user=ec2-user
[gptehosts:children]
openshift
[openshift]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
bastion.cluster3.openshift.opentlc.com
bastion.cluster4.openshift.opentlc.com
[dev]
bastion.cluster1.openshift.opentlc.com
bastion.cluster2.openshift.opentlc.com
[prod]
bastion.cluster3.openshift.opentlc.com
bastion.cluster4.openshift.opentlc.com
----
ansible/roles/ocp4-workload-machinesets/tasks/main.yml
New file
@@ -0,0 +1,30 @@
---
# Do not modify this file
- name: Running Pre Workload Tasks
  include_tasks:
    file: ./pre_workload.yml
    apply:
      become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload Tasks
  include_tasks:
    file: ./workload.yml
    apply:
      become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Post Workload Tasks
  include_tasks:
    file: ./post_workload.yml
    apply:
      become: "{{ become_override | bool }}"
  when: ACTION == "create" or ACTION == "provision"
- name: Running Workload removal Tasks
  include_tasks:
    file: ./remove_workload.yml
    apply:
      become: "{{ become_override | bool }}"
  when: ACTION == "destroy" or ACTION == "remove"
ansible/roles/ocp4-workload-machinesets/tasks/post_workload.yml
New file
@@ -0,0 +1,8 @@
---
# Implement your Post Workload deployment tasks here
# Leave this as the last task in the playbook.
- name: post_workload tasks complete
  debug:
    msg: "Post-Workload Tasks completed successfully."
  when: not silent|bool
ansible/roles/ocp4-workload-machinesets/tasks/pre_workload.yml
New file
@@ -0,0 +1,8 @@
---
# Implement your Pre Workload deployment tasks here
# Leave this as the last task in the playbook.
- name: pre_workload tasks complete
  debug:
    msg: "Pre-Workload tasks completed successfully."
  when: not silent|bool
ansible/roles/ocp4-workload-machinesets/tasks/remove_workload.yml
New file
@@ -0,0 +1,82 @@
# vim: set ft=ansible
---
# Implement your Workload removal tasks here
- name: Find Infra machinesets
  k8s_facts:
    api_version: machine.openshift.io/v1beta1
    kind: MachineSet
    namespace: openshift-machine-api
    label_selectors:
      - agnosticd.redhat.com/machineset-group = infra
  register: r_infra_machinesets
- name: Find Elasticsearch machinesets
  k8s_facts:
    api_version: machine.openshift.io/v1beta1
    kind: MachineSet
    namespace: openshift-machine-api
    label_selectors:
      - agnosticd.redhat.com/machineset-group = elasticsearch
  register: r_elasticsearch_machinesets
- name: Delete infra machinesets
  when: r_infra_machinesets.resources | length | int > 0
  k8s:
    state: absent
    definition: "{{ item }}"
  with_items: "{{ r_infra_machinesets.resources }}"
- name: Delete Elasticsearch machinesets
  when: r_elasticsearch_machinesets.resources | length | int > 0
  k8s:
    state: absent
    definition: "{{ item }}"
  with_items: "{{ r_elasticsearch_machinesets.resources }}"
- name: Print Warning
  debug:
    msg: "WARNING: Make sure to change the node selectors for Ingress Controllers, Image Registry and Monitoring"
# Seems there is no way to >remove< things via k8s modules. Only add. So node selectors etc need to be removed manually.
# - name: Move Ingress Controllers to Worker Nodes
#   k8s:
#     state: present
#     definition:
#       apiVersion: operator.openshift.io/v1
#       kind: IngressController
#       metadata:
#         name: default
#         namespace: openshift-ingress-operator
#       spec:
#         nodePlacement:
#           nodeSelector:
#             matchLabels:
#               node-role.kubernetes.io/worker: ""
# - name: Move Image Registry to Worker Nodes
#   k8s:
#     state: present
#     definition:
#       apiVersion: imageregistry.operator.openshift.io/v1
#       kind: Config
#       metadata:
#         name: cluster
#       spec:
#         nodeSelector:
#           "node-role.kubernetes.io/worker": ""
# - name: Remove Cluster Monitoring Config Map
#   k8s:
#     state: absent
#     api_version: v1
#     kind: ConfigMap
#     name: cluster-monitoring-config
#     namespace: openshift-monitoring
# Leave this as the last task in the playbook.
- name: remove_workload tasks complete
  debug:
    msg: "Remove Workload tasks completed successfully."
  when: not silent|bool
ansible/roles/ocp4-workload-machinesets/tasks/workload.yml
New file
@@ -0,0 +1,73 @@
---
- name: Set up ocp4_workload_machinesets combined dictionary
  set_fact:
    ocp4_workload_machinesets: >-
      {{ ocp4_workload_machinesets_defaults
       | combine(ocp4_workload_machinesets_input  | default( {} ),
                 ocp4_workload_machinesets_secret | default( {} ), recursive=true )
      }}
- name: Print combined role variables
  debug:
    var: ocp4_workload_machinesets
    verbosity: 2
- name: Configure OpenShift 4 machinesets
  include_role:
    name: ocp4_machineset_config
  vars:
    ocp4_machineset_config_groups: "{{ ocp4_workload_machinesets.machineset_groups }}"
- name: Wait for Nodes to be available
  k8s_facts:
    api_version: v1
    kind: Node
    label_selectors:
    - "node-role.kubernetes.io/{{ item.role }}="
  register: r_nodes
  until:
  - r_nodes.resources | length | int == item.total_replicas | int
  delay: 30
  retries: 20
  loop: "{{ ocp4_workload_machinesets.machineset_groups }}"
# The Machine Config Daemon and Node CA DaemonSets do not include
# Universal Tolerations. So by adding taints to Infra and other
# nodes the Machine Config Daemon and Node CA DaemonSets
# pods would be removed from those nodes.
# This adds the necessary tolerations.
# The Product fix is curently targeted for OpenShift 4.5.
# See https://bugzilla.redhat.com/show_bug.cgi?id=1780318
- name: Fix Machine Config and Node CA Daemon Sets (add Tolerations for new nodes)
  k8s:
    state: present
    merge_type:
    - merge
    definition: "{{ lookup('template', '{{ item }}') }}"
  loop:
  - ./templates/machine-config-daemonset.j2
  - ./templates/node-ca-daemonset.j2
- name: Set up Infra Nodes when one MaschineSet group is called 'infra'
  when:
  - ocp4_workload_machinesets.machineset_groups | selectattr('name', 'contains', 'infra')
  block:
  - name: Configure Ingress Controllers and Image Registry
    k8s:
      state: present
      merge_type:
      - merge
      definition: "{{ lookup('template', '{{ item }}') }}"
    loop:
    - ./templates/ingress-controller.j2
    - ./templates/image-registry.j2
  - name: Create Config Map for Cluster Monitoring
    k8s:
      state: present
      definition: "{{ lookup('file', './files/cluster-monitoring-config.yml') }}"
# Leave this as the last task in the playbook.
- name: workload tasks complete
  debug:
    msg: "Workload Tasks completed successfully."
  when: not silent|bool
ansible/roles/ocp4-workload-machinesets/templates/image-registry.j2
New file
@@ -0,0 +1,14 @@
apiVersion: imageregistry.operator.openshift.io/v1
kind: Config
metadata:
  name: cluster
spec:
  nodeSelector:
    "node-role.kubernetes.io/infra": ""
  tolerations:
  - effect: NoSchedule
    key: infra
    value: reserved
  - effect: NoExecute
    key: infra
    value: reserved
ansible/roles/ocp4-workload-machinesets/templates/ingress-controller.j2
New file
@@ -0,0 +1,18 @@
apiVersion: operator.openshift.io/v1
kind: IngressController
metadata:
  name: default
  namespace: openshift-ingress-operator
spec:
  replicas: {{ (ocp4_workload_machinesets.machineset_groups | selectattr('name', 'contains', 'infra')|first).total_replicas | int }}
  nodePlacement:
    nodeSelector:
      matchLabels:
        node-role.kubernetes.io/infra: ""
    tolerations:
    - effect: NoSchedule
      key: infra
      value: reserved
    - effect: NoExecute
      key: infra
      value: reserved
ansible/roles/ocp4-workload-machinesets/templates/machine-config-daemonset.j2
New file
@@ -0,0 +1,24 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: machine-config-daemon
  namespace: openshift-machine-config-operator
spec:
  template:
    spec:
      tolerations:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
        operator: Exists
      - effect: NoSchedule
        key: node-role.kubernetes.io/etcd
        operator: Exists
{% for item in ocp4_workload_machinesets.machineset_groups %}
{% if item.taints | d("") | length > 0 %}
{% for taint in item.taints %}
      - effect: "{{ taint.effect }}"
        key: "{{ taint.key }}"
        value: "{{ taint.value }}"
{% endfor %}
{% endif %}
{% endfor %}
ansible/roles/ocp4-workload-machinesets/templates/node-ca-daemonset.j2
New file
@@ -0,0 +1,24 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: node-ca
  namespace: openshift-image-registry
spec:
  template:
    spec:
      tolerations:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
        operator: Exists
      - effect: NoSchedule
        key: node-role.kubernetes.io/etcd
        operator: Exists
{% for item in ocp4_workload_machinesets.machineset_groups %}
{% if item.taints | d("") | length > 0 %}
{% for taint in item.taints %}
      - effect: "{{ taint.effect }}"
        key: "{{ taint.key }}"
        value: "{{ taint.value }}"
{% endfor %}
{% endif %}
{% endfor %}
ansible/roles/ocp4_machineset_config/meta/main.yml
@@ -2,7 +2,9 @@
galaxy_info:
  role_name: ocp4_machineset_config
  author: Johnathan Kupferer, Wolfgang Kulhanek
  description: Configure OpenShift 4 MachineSets
  description: |
    Create OpenShift 4 MachineSets. One worker machine set must exist before this role is executed.
    Currently supports AWS and OpenStack
  license: MIT
  min_ansible_version: 2.7
  platforms: []
ansible/roles/ocp4_machineset_config/tasks/machineset-group-aws.yml
@@ -15,9 +15,9 @@
    availability_zone_region: "{{ aws_worker_availability_zone.region }}"
    availability_zone_subnet: "{{ aws_worker_availability_zone.subnet }}"
    aws_instance_type: >-
      {{ machineset_group.aws_instance_type | default(default_aws_instance_type) }}
      {{ machineset_group.instance_type | default(default_aws_instance_type) }}
    aws_root_volume_size: >-
      {{ machineset_group.aws_root_volume_size | default(default_aws_root_volume_size) }}
      {{ machineset_group.root_volume_size | default(default_aws_root_volume_size) }}
    machineset_name: >-
      {{ [cluster_label, machineset_group.name, availability_zone] | join('-') }}
    machineset_group_node_labels: >-
ansible/roles/ocp4_machineset_config/tasks/machineset-openstack.yml
@@ -1,5 +1,5 @@
---
- name: Define custom machinesets
- name: Define custom MachineSets
  include_tasks: machineset-group-openstack.yml
  loop: "{{ ocp4_machineset_config_groups }}"
  loop_control:
ansible/roles/ocp4_machineset_config/tasks/set-facts.yml
@@ -1,5 +1,5 @@
---
- name: Get machinesets
- name: Get MachineSets
  k8s_facts:
    api_version: machine.openshift.io/v1beta1
    kind: MachineSet
@@ -23,7 +23,7 @@
    base_worker_machineset_json_query: >-
      [?!contains(keys(metadata.labels), '{{ machineset_group_label }}')]
- name: Print current Machinesets
- name: Print current MachineSets
  debug: var=ocp4_current_machineset_names
- name: Set cluster facts for AWS
ansible/roles/ocp4_machineset_config/templates/machineset-aws.j2
@@ -66,14 +66,13 @@
          tags: {{ aws_worker_tags | to_json }}
          userDataSecret:
            name: worker-user-data
{% if machineset_group.taint | d('') | length > 0 %}
{% if machineset_group.taints | d('') | length > 0 %}
      taints:
      - key: "{{ machineset_group.taint }}"
        value: "reserved"
        effect: "NoSchedule"
      - key: "{{ machineset_group.taint }}"
        value: "reserved"
        effect: "NoExecute"
{% for taint in machineset_group.taints %}
      - key: "{{ taint.key }}"
        value: "{{ taint.value }}"
        effect: "{{ taint.effect }}"
{% endfor %}
{% endif %}
      versions:
        kubelet: ""
ansible/roles/ocp4_machineset_config/templates/machineset-openstack.j2
@@ -55,14 +55,13 @@
          trunk: true
          userDataSecret:
            name: worker-user-data
{% if machineset_group.taint | d('') | length > 0 %}
{% if machineset_group.taints | d('') | length > 0 %}
      taints:
      - key: "{{ machineset_group.taint }}"
        value: "reserved"
        effect: "NoSchedule"
      - key: "{{ machineset_group.taint }}"
        value: "reserved"
        effect: "NoExecute"
{% for taint in machineset_group.taints %}
      - key: "{{ taint.key }}"
        value: "{{ taint.value }}"
        effect: "{{ taint.effect }}"
{% endfor %}
{% endif %}
      versions:
        kubelet: ""