From 93757c5fbd015faf35e12db4464d51b6acc291de Mon Sep 17 00:00:00 2001
From: Wolfgang Kulhanek <wkulhanek@users.noreply.github.com>
Date: Tue, 10 Mar 2020 00:15:41 +0100
Subject: [PATCH] Fix workloads (#1270)

---
 ansible/roles/ocp4_machineset_config/tasks/machineset-aws.yml                             |    2 
 ansible/roles/ocp4_machineset_config/README.adoc                                          |    8 
 ansible/roles/ocp4-workload-infra-nodes/templates/ingress-controller.j2                   |   18 +
 ansible/roles/ocp4-workload-infra-nodes/templates/machine-config-daemonset.j2             |   29 +
 ansible/roles/ocp4-workload-infra-nodes/defaults/main.yml                                 |   10 
 ansible/roles/ocp4_machineset_config/tasks/machineset-group-osp.yml                       |   64 ++++
 ansible/roles/ocp4-workload-logging/templates/logging_subscription.j2                     |   11 
 ansible/roles/ocp4-workload-logging/templates/cluster_logging.j2                          |   58 +++
 ansible/roles/ocp4-workload-infra-nodes/tasks/remove_workload.yml                         |   64 +++
 ansible/roles/ocp4-workload-logging/files/logging_operatorgroup.yaml                      |    3 
 ansible/roles/ocp4_machineset_config/templates/osp-machineset.yml.j2                      |   73 ++++
 ansible/roles/ocp4-workload-logging/files/eo_rolebinding.yaml                             |   14 
 ansible/roles/ocp4-workload-infra-nodes/templates/image-registry.j2                       |   14 
 ansible/roles/ocp4_machineset_config/meta/main.yml                                        |    2 
 ansible/roles/ocp4_machineset_config/tasks/machineset-osp.yml                             |   11 
 ansible/roles/ocp4-workload-logging/tasks/workload.yml                                    |  186 ++++++-----
 ansible/roles/ocp4-workload-logging/files/eo_namespace.yaml                               |    8 
 ansible/roles/ocp4-workload-infra-nodes/templates/node-ca-daemonset.j2                    |   29 +
 ansible/roles/ocp4-workload-logging/files/logging_curator_configmap.yaml                  |    0 
 ansible/roles/ocp4-workload-logging/files/eo_operatorgroup.yaml                           |    6 
 ansible/roles/ocp4-workload-logging/templates/eo_subscription.j2                          |   12 
 ansible/roles/ocp4_machineset_config/templates/aws-machineset.yml.j2                      |   11 
 ansible/roles/ocp4-workload-logging/tasks/remove_workload.yml                             |   62 ---
 ansible/roles/ocp4_machineset_config/vars/main.yml                                        |   11 
 ansible/roles/ocp4_machineset_config/tasks/main.yml                                       |    4 
 ansible/roles/ocp4-workload-logging/files/eo_role.yaml                                    |   16 +
 /dev/null                                                                                 |   15 
 ansible/roles/ocp4-workload-logging/files/logging_namespace.yaml                          |    2 
 ansible/roles/ocp4_machineset_config/tasks/machineset-group-aws.yml                       |    0 
 ansible/roles/ocp4-workload-infra-nodes/files/cluster-monitoring-config.yml               |   78 ++++
 ansible/roles/ocp4-workload-logging/defaults/main.yml                                     |   14 
 ansible/roles/ocp4_machineset_config/defaults/main.yml                                    |    8 
 ansible/roles/ocp4-workload-enable-lets-encrypt-certificates/files/router-with-certs.yaml |    2 
 ansible/roles/ocp4-workload-infra-nodes/tasks/workload.yml                                |   73 ++++
 34 files changed, 725 insertions(+), 193 deletions(-)

diff --git a/ansible/roles/ocp4-workload-enable-lets-encrypt-certificates/files/router-with-certs.yaml b/ansible/roles/ocp4-workload-enable-lets-encrypt-certificates/files/router-with-certs.yaml
index 96f5cef..ab79eac 100644
--- a/ansible/roles/ocp4-workload-enable-lets-encrypt-certificates/files/router-with-certs.yaml
+++ b/ansible/roles/ocp4-workload-enable-lets-encrypt-certificates/files/router-with-certs.yaml
@@ -1,8 +1,6 @@
 apiVersion: operator.openshift.io/v1
 kind: IngressController
 metadata:
-  finalizers:
-  - ingress.openshift.io/ingress-controller
   name: default
   namespace: openshift-ingress-operator
 spec:
diff --git a/ansible/roles/ocp4-workload-infra-nodes/defaults/main.yml b/ansible/roles/ocp4-workload-infra-nodes/defaults/main.yml
index ffb99c2..080c9dd 100644
--- a/ansible/roles/ocp4-workload-infra-nodes/defaults/main.yml
+++ b/ansible/roles/ocp4-workload-infra-nodes/defaults/main.yml
@@ -3,5 +3,11 @@
 ocp_username: opentlc-mgr
 silent: False
 
-_infra_node_replicas: 3
-_infra_node_instance_type: m4.4xlarge
\ No newline at end of file
+_infra_node_replicas: 1
+_infra_node_instance_type: m5.4xlarge
+
+# Create separate Elasticsearch Nodes
+# When false only Infranodes will be created
+_infra_node_elasticsearch_nodes: false
+_infra_node_elasticsearch_replicas: 1
+_infra_node_elasticsearch_instance_type: m5.4xlarge
\ No newline at end of file
diff --git a/ansible/roles/ocp4-workload-infra-nodes/files/cluster-monitoring-config.yml b/ansible/roles/ocp4-workload-infra-nodes/files/cluster-monitoring-config.yml
new file mode 100644
index 0000000..f46a209
--- /dev/null
+++ b/ansible/roles/ocp4-workload-infra-nodes/files/cluster-monitoring-config.yml
@@ -0,0 +1,78 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: cluster-monitoring-config
+  namespace: openshift-monitoring
+data:
+  config.yaml: |
+    alertmanagerMain:
+      nodeSelector:
+        node-role.kubernetes.io/infra: ""
+      tolerations:
+      - key: infra
+        value: reserved
+        effect: NoSchedule
+      - key: infra
+        value: reserved
+        effect: NoExecute
+    prometheusK8s:
+      retention: 48h
+      nodeSelector:
+        node-role.kubernetes.io/infra: ""
+      tolerations:
+      - key: infra
+        value: reserved
+        effect: NoSchedule
+      - key: infra
+        value: reserved
+        effect: NoExecute
+    prometheusOperator:
+      nodeSelector:
+        node-role.kubernetes.io/infra: ""
+      tolerations:
+      - key: infra
+        value: reserved
+        effect: NoSchedule
+      - key: infra
+        value: reserved
+        effect: NoExecute
+    grafana:
+      nodeSelector:
+        node-role.kubernetes.io/infra: ""
+      tolerations:
+      - key: infra
+        value: reserved
+        effect: NoSchedule
+      - key: infra
+        value: reserved
+        effect: NoExecute
+    k8sPrometheusAdapter:
+      nodeSelector:
+        node-role.kubernetes.io/infra: ""
+      tolerations:
+      - key: infra
+        value: reserved
+        effect: NoSchedule
+      - key: infra
+        value: reserved
+        effect: NoExecute
+    kubeStateMetrics:
+      nodeSelector:
+        node-role.kubernetes.io/infra: ""
+      tolerations:
+      - key: infra
+        value: reserved
+        effect: NoSchedule
+      - key: infra
+        value: reserved
+        effect: NoExecute
+    telemeterClient:
+      nodeSelector:
+        node-role.kubernetes.io/infra: ""
+      tolerations:
+      - key: infra
+        value: reserved
+        effect: NoSchedule
+      - key: infra
+        value: reserved
+        effect: NoExecute
diff --git a/ansible/roles/ocp4-workload-infra-nodes/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-infra-nodes/tasks/remove_workload.yml
index c25031c..65fc383 100644
--- a/ansible/roles/ocp4-workload-infra-nodes/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-infra-nodes/tasks/remove_workload.yml
@@ -2,20 +2,76 @@
 ---
 # Implement your Workload removal tasks here
 
-- name: find infra machinesets
+- name: Find Infra machinesets
   k8s_facts:
     api_version: machine.openshift.io/v1beta1
     kind: MachineSet
     namespace: openshift-machine-api
     label_selectors:
       - agnosticd.redhat.com/machineset-group = infra
-  register: machinesets_out
+  register: r_infra_machinesets
 
-- name: delete infra machinesets
+- name: Find Elasticsearch machinesets
+  k8s_facts:
+    api_version: machine.openshift.io/v1beta1
+    kind: MachineSet
+    namespace: openshift-machine-api
+    label_selectors:
+      - agnosticd.redhat.com/machineset-group = elasticsearch
+  register: r_elasticsearch_machinesets
+
+- name: Delete infra machinesets
+  when: r_infra_machinesets.resources | length | int > 0
   k8s:
     state: absent
     definition: "{{ item }}"
-  with_items: "{{ machinesets_out.resources }}" 
+  with_items: "{{ r_infra_machinesets.resources }}" 
+
+- name: Delete Elasticsearch machinesets
+  when: r_elasticsearch_machinesets.resources | length | int > 0
+  k8s:
+    state: absent
+    definition: "{{ item }}"
+  with_items: "{{ r_elasticsearch_machinesets.resources }}" 
+
+- name: Print Warning
+  debug:
+    msg: "WARNING: Make sure to change the node selectors for Ingress Controllers, Image Registry and Monitoring"
+
+# - name: Move Ingress Controllers to Worker Nodes
+#   k8s:
+#     state: present
+#     definition:
+#       apiVersion: operator.openshift.io/v1
+#       kind: IngressController
+#       metadata:
+#         name: default
+#         namespace: openshift-ingress-operator
+#       spec:
+#         nodePlacement:
+#           nodeSelector:
+#             matchLabels:
+#               node-role.kubernetes.io/worker: ""
+
+# - name: Move Image Registry to Worker Nodes
+#   k8s:
+#     state: present
+#     definition:
+#       apiVersion: imageregistry.operator.openshift.io/v1
+#       kind: Config
+#       metadata:
+#         name: cluster
+#       spec:
+#         nodeSelector:
+#           "node-role.kubernetes.io/worker": ""
+
+# - name: Remove Cluster Monitoring Config Map
+#   k8s:
+#     state: absent
+#     api_version: v1
+#     kind: ConfigMap
+#     name: cluster-monitoring-config
+#     namespace: openshift-monitoring
 
 # Leave this as the last task in the playbook.
 - name: remove_workload tasks complete
diff --git a/ansible/roles/ocp4-workload-infra-nodes/tasks/workload.yml b/ansible/roles/ocp4-workload-infra-nodes/tasks/workload.yml
index b43f918..7fec557 100644
--- a/ansible/roles/ocp4-workload-infra-nodes/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-infra-nodes/tasks/workload.yml
@@ -1,4 +1,3 @@
-# vim: set ft=ansible
 ---
 - name: Configure OCP4 infra machinesets
   include_role:
@@ -7,9 +6,79 @@
     ocp4_machineset_config_groups:
     - name: infra
       role: infra
-      aws_instance_type: "{{ _infra_node_instance_type }}"
+      taint: infra
+      instance_type: "{{ _infra_node_instance_type }}"
       total_replicas: "{{ _infra_node_replicas }}"
 
+- name: Configure OCP4 Elasticsearch machinesets
+  when: _infra_node_elasticsearch_nodes | d(False) | bool
+  include_role:
+    name: ocp4_machineset_config
+  vars:
+    ocp4_machineset_config_groups:
+    - name: elasticsearch
+      role: elasticsearch
+      taint: elasticsearch
+      instance_type: "{{ _infra_node_elasticsearch_instance_type }}"
+      total_replicas: "{{ _infra_node_elasticsearch_replicas }}"
+
+- name: Wait for Infra Nodes to be available
+  k8s_facts:
+    api_version: v1
+    kind: Node
+    label_selectors:
+    - node-role.kubernetes.io/infra =
+  register: r_infra_nodes
+  until:
+  - r_infra_nodes.resources | length | int == _infra_node_replicas | int
+  delay: 30
+  retries: 15
+
+- name: Wait for Elasticsearch Nodes to be available
+  when: _infra_node_elasticsearch_nodes | d(False) | bool
+  k8s_facts:
+    api_version: v1
+    kind: Node
+    label_selectors:
+    - node-role.kubernetes.io/elasticsearch =
+  register: r_es_nodes
+  until:
+  - r_es_nodes.resources | length | int == _infra_node_elasticsearch_replicas | int
+  delay: 30
+  retries: 15
+
+# The Machine Config Daemon DaemonSet does not include
+# Universal Tolerations. So by adding taints to Infra
+# (and Elasticsearch) nodes the Machine Config Daemon
+# pods would be removed from those nodes.
+# This adds the necessary tolerations.
+# It may be 4.5 before this is fixed.
+# See https://bugzilla.redhat.com/show_bug.cgi?id=1780318
+- name: Fix Machine Config and Node CA Daemon Sets (add Tolerations for Infra and Elasticsearch nodes)
+  k8s:
+    state: present
+    merge_type:
+    - merge
+    definition: "{{ lookup('template', '{{ item }}') }}"
+  loop:
+  - ./templates/machine-config-daemonset.j2
+  - ./templates/node-ca-daemonset.j2
+
+- name: Configure Ingress Controllers and Image Registry
+  k8s:
+    state: present
+    merge_type:
+    - merge
+    definition: "{{ lookup('template', '{{ item }}') }}"
+  loop:
+  - ./templates/ingress-controller.j2
+  - ./templates/image-registry.j2
+
+- name: Create Config Map for Cluster Monitoring
+  k8s:
+    state: present
+    definition: "{{ lookup('file', './files/cluster-monitoring-config.yml') }}"
+
 # Leave this as the last task in the playbook.
 - name: workload tasks complete
   debug:
diff --git a/ansible/roles/ocp4-workload-infra-nodes/templates/image-registry.j2 b/ansible/roles/ocp4-workload-infra-nodes/templates/image-registry.j2
new file mode 100644
index 0000000..49f9da8
--- /dev/null
+++ b/ansible/roles/ocp4-workload-infra-nodes/templates/image-registry.j2
@@ -0,0 +1,14 @@
+apiVersion: imageregistry.operator.openshift.io/v1
+kind: Config
+metadata:
+  name: cluster
+spec:
+  nodeSelector:
+    "node-role.kubernetes.io/infra": ""
+  tolerations:
+  - effect: NoSchedule
+    key: infra
+    value: reserved
+  - effect: NoExecute
+    key: infra
+    value: reserved
\ No newline at end of file
diff --git a/ansible/roles/ocp4-workload-infra-nodes/templates/ingress-controller.j2 b/ansible/roles/ocp4-workload-infra-nodes/templates/ingress-controller.j2
new file mode 100644
index 0000000..2fccffd
--- /dev/null
+++ b/ansible/roles/ocp4-workload-infra-nodes/templates/ingress-controller.j2
@@ -0,0 +1,18 @@
+apiVersion: operator.openshift.io/v1
+kind: IngressController
+metadata:
+  name: default
+  namespace: openshift-ingress-operator
+spec:
+  replicas: {{ _infra_node_replicas | int }}
+  nodePlacement:
+    nodeSelector:
+      matchLabels:
+        node-role.kubernetes.io/infra: ""
+    tolerations:
+    - effect: NoSchedule
+      key: infra
+      value: reserved
+    - effect: NoExecute
+      key: infra
+      value: reserved
\ No newline at end of file
diff --git a/ansible/roles/ocp4-workload-infra-nodes/templates/machine-config-daemonset.j2 b/ansible/roles/ocp4-workload-infra-nodes/templates/machine-config-daemonset.j2
new file mode 100644
index 0000000..65954d9
--- /dev/null
+++ b/ansible/roles/ocp4-workload-infra-nodes/templates/machine-config-daemonset.j2
@@ -0,0 +1,29 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: machine-config-daemon
+  namespace: openshift-machine-config-operator
+spec:
+  template:
+    spec:
+      tolerations:
+      - effect: NoSchedule
+        key: node-role.kubernetes.io/master
+        operator: Exists
+      - effect: NoSchedule
+        key: node-role.kubernetes.io/etcd
+        operator: Exists
+      - effect: NoSchedule
+        key: infra
+        value: reserved
+      - effect: NoExecute
+        key: infra
+        value: reserved
+{% if _infra_node_elasticsearch_nodes | bool %}
+      - effect: NoSchedule
+        key: elasticsearch
+        value: reserved
+      - effect: NoExecute
+        key: elasticsearch
+        value: reserved    
+{% endif %}
\ No newline at end of file
diff --git a/ansible/roles/ocp4-workload-infra-nodes/templates/node-ca-daemonset.j2 b/ansible/roles/ocp4-workload-infra-nodes/templates/node-ca-daemonset.j2
new file mode 100644
index 0000000..3812327
--- /dev/null
+++ b/ansible/roles/ocp4-workload-infra-nodes/templates/node-ca-daemonset.j2
@@ -0,0 +1,29 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: node-ca
+  namespace: openshift-image-registry
+spec:
+  template:
+    spec:
+      tolerations:
+      - effect: NoSchedule
+        key: node-role.kubernetes.io/master
+        operator: Exists
+      - effect: NoSchedule
+        key: node-role.kubernetes.io/etcd
+        operator: Exists
+      - effect: NoSchedule
+        key: infra
+        value: reserved
+      - effect: NoExecute
+        key: infra
+        value: reserved
+{% if _infra_node_elasticsearch_nodes | bool %}
+      - effect: NoSchedule
+        key: elasticsearch
+        value: reserved
+      - effect: NoExecute
+        key: elasticsearch
+        value: reserved    
+{% endif %}
\ No newline at end of file
diff --git a/ansible/roles/ocp4-workload-logging/defaults/main.yml b/ansible/roles/ocp4-workload-logging/defaults/main.yml
index f607165..1cde41d 100644
--- a/ansible/roles/ocp4-workload-logging/defaults/main.yml
+++ b/ansible/roles/ocp4-workload-logging/defaults/main.yml
@@ -3,8 +3,14 @@
 ocp_username: opentlc-mgr
 silent: False
 
-_logging_elasticsearch_replicas: 3
-_logging_elasticsearch_memory_request: "4Gi"
+_logging_elasticsearch_replicas: 1
+_logging_elasticsearch_memory_request: "8Gi"
 _logging_elasticsearch_storage_request: "200Gi"
-_logging_use_infra_nodes: True
-_logging_wait_for_deployment: True
\ No newline at end of file
+
+# Set to true to place pods on nodes with label
+#   node-role.kubernetes.io/infra: ""
+_logging_use_infra_nodes: False
+# Set to true to place pods on nodes with label
+#   node-role.kubernetes.io/elasticsearch: ""
+_logging_use_elasticsearch_nodes: True
+# If both are false the Logging Components will run on Worker nodes
diff --git a/ansible/roles/ocp4-workload-logging/files/elasticsearch_catalog_source.yaml b/ansible/roles/ocp4-workload-logging/files/elasticsearch_catalog_source.yaml
deleted file mode 100644
index ae240ad..0000000
--- a/ansible/roles/ocp4-workload-logging/files/elasticsearch_catalog_source.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-apiVersion: operators.coreos.com/v1
-kind: CatalogSourceConfig
-metadata:
-  name: elasticsearch-operator
-  namespace: openshift-marketplace
-spec:
-  packages: elasticsearch-operator
-  targetNamespace: openshift-operators
diff --git a/ansible/roles/ocp4-workload-logging/files/elasticsearch_subscription.yaml b/ansible/roles/ocp4-workload-logging/files/elasticsearch_subscription.yaml
deleted file mode 100644
index f333922..0000000
--- a/ansible/roles/ocp4-workload-logging/files/elasticsearch_subscription.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-apiVersion: operators.coreos.com/v1alpha1
-kind: Subscription
-metadata:
-  name: elasticsearch-operator
-  namespace: openshift-operators
-spec:
-  channel: preview
-  installPlanApproval: Automatic
-  name: elasticsearch-operator
-  source: elasticsearch-operator
-  sourceNamespace: openshift-operators
-#  startingCSV: elasticsearch-operator.v4.1.2
diff --git a/ansible/roles/ocp4-workload-logging/files/eo_namespace.yaml b/ansible/roles/ocp4-workload-logging/files/eo_namespace.yaml
new file mode 100644
index 0000000..0f479cd
--- /dev/null
+++ b/ansible/roles/ocp4-workload-logging/files/eo_namespace.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: openshift-operators-redhat 
+  annotations:
+    openshift.io/node-selector: ""
+  labels:
+    openshift.io/cluster-monitoring: "true"
\ No newline at end of file
diff --git a/ansible/roles/ocp4-workload-logging/files/eo_operatorgroup.yaml b/ansible/roles/ocp4-workload-logging/files/eo_operatorgroup.yaml
new file mode 100644
index 0000000..481ec1e
--- /dev/null
+++ b/ansible/roles/ocp4-workload-logging/files/eo_operatorgroup.yaml
@@ -0,0 +1,6 @@
+apiVersion: operators.coreos.com/v1
+kind: OperatorGroup
+metadata:
+  name: openshift-operators-redhat
+  namespace: openshift-operators-redhat 
+spec: {}
\ No newline at end of file
diff --git a/ansible/roles/ocp4-workload-logging/files/eo_role.yaml b/ansible/roles/ocp4-workload-logging/files/eo_role.yaml
new file mode 100644
index 0000000..2f73028
--- /dev/null
+++ b/ansible/roles/ocp4-workload-logging/files/eo_role.yaml
@@ -0,0 +1,16 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: prometheus-k8s
+  namespace: openshift-operators-redhat
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - services
+  - endpoints
+  - pods
+  verbs:
+  - get
+  - list
+  - watch
diff --git a/ansible/roles/ocp4-workload-logging/files/eo_rolebinding.yaml b/ansible/roles/ocp4-workload-logging/files/eo_rolebinding.yaml
new file mode 100644
index 0000000..e20baa8
--- /dev/null
+++ b/ansible/roles/ocp4-workload-logging/files/eo_rolebinding.yaml
@@ -0,0 +1,14 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: prometheus-k8s
+  namespace: openshift-operators-redhat
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: prometheus-k8s
+subjects:
+- kind: ServiceAccount
+  name: prometheus-k8s
+namespace: openshift-operators-redhat
+
diff --git a/ansible/roles/ocp4-workload-logging/files/logging_catalog_source.yaml b/ansible/roles/ocp4-workload-logging/files/logging_catalog_source.yaml
deleted file mode 100644
index b4ec899..0000000
--- a/ansible/roles/ocp4-workload-logging/files/logging_catalog_source.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-apiVersion: operators.coreos.com/v1
-kind: CatalogSourceConfig
-metadata:
-  name: cluster-logging-operator
-  namespace: openshift-marketplace
-spec:
-  packages: cluster-logging
-  targetNamespace: openshift-logging
diff --git a/ansible/roles/ocp4-workload-logging/files/curator_configmap.yaml b/ansible/roles/ocp4-workload-logging/files/logging_curator_configmap.yaml
similarity index 100%
rename from ansible/roles/ocp4-workload-logging/files/curator_configmap.yaml
rename to ansible/roles/ocp4-workload-logging/files/logging_curator_configmap.yaml
diff --git a/ansible/roles/ocp4-workload-logging/files/namespace.yaml b/ansible/roles/ocp4-workload-logging/files/logging_namespace.yaml
similarity index 83%
rename from ansible/roles/ocp4-workload-logging/files/namespace.yaml
rename to ansible/roles/ocp4-workload-logging/files/logging_namespace.yaml
index 4161fb4..08b3aa9 100644
--- a/ansible/roles/ocp4-workload-logging/files/namespace.yaml
+++ b/ansible/roles/ocp4-workload-logging/files/logging_namespace.yaml
@@ -3,7 +3,7 @@
 metadata:
   name: openshift-logging
   annotations:
-    openshift.io/node-selector: "" 
+    openshift.io/node-selector: ""
   labels:
     openshift.io/cluster-logging: "true"
     openshift.io/cluster-monitoring: "true"
diff --git a/ansible/roles/ocp4-workload-logging/files/operatorgroup.yaml b/ansible/roles/ocp4-workload-logging/files/logging_operatorgroup.yaml
similarity index 80%
rename from ansible/roles/ocp4-workload-logging/files/operatorgroup.yaml
rename to ansible/roles/ocp4-workload-logging/files/logging_operatorgroup.yaml
index 12e1b67..bdcc99b 100644
--- a/ansible/roles/ocp4-workload-logging/files/operatorgroup.yaml
+++ b/ansible/roles/ocp4-workload-logging/files/logging_operatorgroup.yaml
@@ -5,4 +5,7 @@
   namespace: openshift-logging
 spec:
   targetNamespaces:
+  - openshift-logging
+status:
+  namespaces:
   - openshift-logging
\ No newline at end of file
diff --git a/ansible/roles/ocp4-workload-logging/files/logging_subscription.yaml b/ansible/roles/ocp4-workload-logging/files/logging_subscription.yaml
deleted file mode 100644
index bfc4893..0000000
--- a/ansible/roles/ocp4-workload-logging/files/logging_subscription.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-apiVersion: operators.coreos.com/v1alpha1
-kind: Subscription
-metadata:
-  name: cluster-logging
-  namespace: openshift-logging
-  labels:
-    csc-owner-name: installed-redhat-openshift-logging
-    csc-owner-namespace: openshift-marketplace
-spec:
-  channel: preview
-  installPlanApproval: Automatic
-  name: cluster-logging
-  source: cluster-logging-operator
-  sourceNamespace: openshift-logging
-#  startingCSV: clusterlogging.v4.1.2
diff --git a/ansible/roles/ocp4-workload-logging/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-logging/tasks/remove_workload.yml
index 89eaa03..660bd0d 100644
--- a/ansible/roles/ocp4-workload-logging/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-logging/tasks/remove_workload.yml
@@ -1,7 +1,7 @@
 # vim: set ft=ansible
 ---
 # Implement your Workload removal tasks here
-- name: Remove Logging CR
+- name: Remove OpenShift ClusterLogging
   k8s:
     state: absent
     definition:
@@ -13,7 +13,7 @@
 
 # operator nukes all pods once cr is gone
 # waiting for just one to remain is a bit of a hack
-- name: wait for only one pod
+- name: Wait for logging pods to be terminated
   k8s_facts:
     api_version: v1
     kind: Pod
@@ -23,51 +23,7 @@
   retries: 20
   delay: 10
 
-- name: logging operatorgroup
-  k8s:
-    state: absent
-    definition:
-      apiVersion: operators.coreos.com/v1
-      kind: OperatorGroup
-      metadata:
-        name: openshift-logging-operatorgroup
-        namespace: openshift-logging
-
-### elastic catalog source config
-- name: catalog source config
-  k8s:
-    state: absent
-    definition:
-      apiVersion: operators.coreos.com/v1
-      kind: CatalogSourceConfig
-      metadata:
-        name: elasticsearch-operator
-        namespace: openshift-marketplace
-
-### elastic subscription
-- name: elastic subscription
-  k8s:
-    state: absent
-    definition:
-      apiVersion: operators.coreos.com/v1alpha1
-      kind: Subscription
-      metadata:
-        name: elasticsearch-operator
-        namespace: openshift-operators
-
-### logging catalog source config
-- name: logging catalog source config
-  k8s:
-    state: absent
-    definition:
-      apiVersion: operators.coreos.com/v1
-      kind: CatalogSourceConfig
-      metadata:
-        name: cluster-logging-operator
-        namespace: openshift-marketplace
-
-### logging subscription
-- name: logging subscription
+- name: Remove logging subscription
   k8s:
     state: absent
     definition:
@@ -77,7 +33,17 @@
         name: cluster-logging
         namespace: openshift-logging
 
-- name: logging project
+- name: Remove logging operatorgroup
+  k8s:
+    state: absent
+    definition:
+      apiVersion: operators.coreos.com/v1
+      kind: OperatorGroup
+      metadata:
+        name: openshift-logging-operatorgroup
+        namespace: openshift-logging
+
+- name: Remove openshift-logging project
   k8s: 
     state: absent
     definition:
diff --git a/ansible/roles/ocp4-workload-logging/tasks/workload.yml b/ansible/roles/ocp4-workload-logging/tasks/workload.yml
index 3b9ecd1..507dde9 100644
--- a/ansible/roles/ocp4-workload-logging/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-logging/tasks/workload.yml
@@ -1,104 +1,112 @@
-# vim: set ft=ansible
 ---
-# Implement your Workload deployment tasks here
-
-- name: Create OpenShift Objects for Logging
-  ignore_errors: yes
-  retries: 5
-  delay: 10
-  until: r_objects is succeeded
-  register: r_objects
-  k8s:
-    state: present
-    merge_type:
-    - strategic-merge
-    - merge
-    definition: "{{ lookup('file', item ) | from_yaml }}"
-  loop:
-  - ./files/namespace.yaml
-  - ./files/operatorgroup.yaml
-  - ./files/elasticsearch_catalog_source.yaml
-  - ./files/elasticsearch_subscription.yaml
-  - ./files/logging_catalog_source.yaml
-  - ./files/logging_subscription.yaml
-
-- name: Wait for Elasticsearch CRD
+- name: Check if Elasticsearch Operator is already installed
   k8s_facts:
-    api_version: apiextensions.k8s.io/v1beta1
-    kind: CustomResourceDefinition
-    name: elasticsearches.logging.openshift.io
-  register: r_elasticsearch_crd
-  retries: 20
-  delay: 10
-  ignore_errors: yes
-  until: r_elasticsearch_crd.resources | list | length == 1
+    api_version: v1
+    kind: Deployment
+    namespace: "openshift-operators-redhat"
+    name: "elasticsearch-operator"
+  register: r_eo_deployment_exists
 
-- name: Notify user if Elasticsearch deployment failed
-  when: not r_elasticsearch_crd.resources | list | length == 1
-  debug:
-    msg: "user.info: *** Elasticsearch operator could not be installed ***"
-
-- name: Wait for Logging CRD
-  k8s_facts:
-    api_version: apiextensions.k8s.io/v1beta1
-    kind: CustomResourceDefinition
-    name: clusterloggings.logging.openshift.io
-  register: r_logging_crd
-  retries: 20
-  delay: 10
-  ignore_errors: yes
-  until: r_logging_crd.resources | list | length == 1
-
-- name: Notify user if Logging deployment failed
-  when: not r_logging_crd.resources | list | length == 1
-  debug:
-    msg: "user.info: *** Logging operator could not be installed ***"
-
-- name: Deploy Logging
-  when:
-  - r_logging_crd.resources | list | length == 1
-  - r_elasticsearch_crd.resources | list | length == 1
+- name: Install Elasticsearch Operator if not installed
+  when: r_eo_deployment_exists.resources | length | int == 0
   block:
-  - name: Create OpenShift Objects for ClusterLogging
+  - name: Get current stable version of Elasticsearch
+    shell: "oc get packagemanifest elasticsearch-operator -n openshift-marketplace -o jsonpath='{.status.defaultChannel}'"
+    register: r_eo_version
+
+  - name: Print Elasticsearch version to be installed
+    debug:
+      msg: "Elasticsearch version to be installed: {{ r_eo_version.stdout }}"
+
+  - name: Install Elasticsearch operator prerequisites
     k8s:
       state: present
-      merge_type:
-      - strategic-merge
-      - merge
-      definition: "{{ lookup('template', item ) | from_yaml }}"
-    ignore_errors: yes
+      definition: "{{ lookup('file', item ) | from_yaml }}"
     loop:
-    - ./templates/cluster_logging.j2
+    - ./files/eo_namespace.yaml
+    - ./files/eo_operatorgroup.yaml
+    - ./files/eo_role.yaml
+    - ./files/eo_rolebinding.yaml
 
-  - name: Sleep to give the status field a chance to populate
-    when: _logging_wait_for_deployment | bool
-    pause:
-      seconds: 60
-  
-  - name: Wait until Elasticsearch Cluster Status is green
-    when: _logging_wait_for_deployment| bool
+  - name: Install Elasticsearch operator
+    k8s:
+      state: present
+      definition: "{{ lookup('template', item ) | from_yaml }}"
+    loop:
+    - ./templates/eo_subscription.j2
+
+  - name: Wait for Elasticsearch operator to be ready
     k8s_facts:
-      api_version: logging.openshift.io/v1
-      kind: ClusterLogging
-      name: instance
-      namespace: openshift-logging
-    register: r_logging
+      api_version: v1
+      kind: Deployment
+      namespace: "openshift-operators-redhat"
+      name: "elasticsearch-operator"
+    register: r_eo_deployment
     retries: 30
     delay: 10
-    ignore_errors: yes
-    until: 
-    - r_logging.resources[0].status.logStore.elasticsearchStatus[0].clusterHealth == "green"
+    until:
+    - r_eo_deployment.resources | length | int > 0
+    - r_eo_deployment.resources[0].status.availableReplicas is defined
+    - r_eo_deployment.resources[0].status.availableReplicas | int == r_eo_deployment.resources[0].spec.replicas | int
 
-  - name: Update Logging Curator configuration
-    k8s:
-      state: present
-      merge_type:
-      - strategic-merge
-      - merge
-      definition: "{{ lookup('file', item ) | from_yaml }}"
-    ignore_errors: yes
-    loop:
-    - ./files/curator_configmap.yaml
+- name: Get current stable version of Cluster Logging
+  shell: "oc get packagemanifest cluster-logging -n openshift-marketplace -o jsonpath='{.status.defaultChannel}'"
+  register: r_logging_version
+
+- name: Print Cluster Logging version to be installed
+  debug:
+    msg: "Cluster Logging version to be installed: {{ r_logging_version.stdout }}"
+
+- name: Install OpenShift Logging Operator Prerequisites
+  k8s:
+    state: present
+    definition: "{{ lookup('file', item ) | from_yaml }}"
+  loop:
+  - ./files/logging_namespace.yaml
+  - ./files/logging_operatorgroup.yaml
+  - ./files/logging_curator_configmap.yaml
+
+- name: Install OpenShift Logging Operator
+  k8s:
+    state: present
+    definition: "{{ lookup('template', item ) | from_yaml }}"
+  loop:
+  - ./templates/logging_subscription.j2
+
+- name: Wait for Cluster Logging Operator to be ready
+  k8s_facts:
+    api_version: v1
+    kind: Deployment
+    namespace: "openshift-logging"
+    name: "cluster-logging-operator"
+  register: r_logging_deployment
+  retries: 30
+  delay: 10
+  until:
+  - r_logging_deployment.resources | length | int > 0
+  - r_logging_deployment.resources[0].status.availableReplicas is defined
+  - r_logging_deployment.resources[0].status.availableReplicas | int == r_logging_deployment.resources[0].spec.replicas | int
+
+- name: Create OpenShift ClusterLogging
+  k8s:
+    state: present
+    definition: "{{ lookup('template', item ) | from_yaml }}"
+  loop:
+  - ./templates/cluster_logging.j2
+
+- name: Wait until Elasticsearch cluster status is green
+  k8s_facts:
+    api_version: logging.openshift.io/v1
+    kind: ClusterLogging
+    name: instance
+    namespace: openshift-logging
+  register: r_logging
+  retries: 30
+  delay: 10
+  ignore_errors: yes
+  until: 
+  - r_logging.resources[0].status.logStore.elasticsearchStatus[0].cluster.status is defined
+  - r_logging.resources[0].status.logStore.elasticsearchStatus[0].cluster.status == "green"
 
 # Leave this as the last task in the playbook.
 - name: workload tasks complete
diff --git a/ansible/roles/ocp4-workload-logging/templates/cluster_logging.j2 b/ansible/roles/ocp4-workload-logging/templates/cluster_logging.j2
index cdec16b..be715f0 100644
--- a/ansible/roles/ocp4-workload-logging/templates/cluster_logging.j2
+++ b/ansible/roles/ocp4-workload-logging/templates/cluster_logging.j2
@@ -17,6 +17,22 @@
       nodeSelector: 
 {% if _logging_use_infra_nodes|bool %}
         node-role.kubernetes.io/infra: ""
+      tolerations:
+      - key: infra
+        value: reserved
+        effect: NoSchedule
+      - key: infra
+        value: reserved
+        effect: NoExecute
+{% elif _logging_use_elasticsearch_nodes | bool %}
+        node-role.kubernetes.io/elasticsearch: ""
+      tolerations:
+      - key: elasticsearch
+        value: reserved
+        effect: NoSchedule
+      - key: elasticsearch
+        value: reserved
+        effect: NoExecute
 {% else %}
         node-role.kubernetes.io/worker: ""
 {% endif %}
@@ -33,6 +49,22 @@
       nodeSelector: 
 {% if _logging_use_infra_nodes|bool %}
         node-role.kubernetes.io/infra: ""
+      tolerations:
+      - key: infra
+        value: reserved
+        effect: NoSchedule
+      - key: infra
+        value: reserved
+        effect: NoExecute
+{% elif _logging_use_elasticsearch_nodes | bool %}
+        node-role.kubernetes.io/elasticsearch: ""
+      tolerations:
+      - key: elasticsearch
+        value: reserved
+        effect: NoSchedule
+      - key: elasticsearch
+        value: reserved
+        effect: NoExecute
 {% else %}
         node-role.kubernetes.io/worker: ""
 {% endif %}
@@ -43,16 +75,28 @@
       nodeSelector: 
 {% if _logging_use_infra_nodes|bool %}
         node-role.kubernetes.io/infra: ""
+      tolerations:
+      - key: infra
+        value: reserved
+        effect: NoSchedule
+      - key: infra
+        value: reserved
+        effect: NoExecute
+{% elif _logging_use_elasticsearch_nodes | bool %}
+        node-role.kubernetes.io/elasticsearch: ""
+      tolerations:
+      - key: elasticsearch
+        value: reserved
+        effect: NoSchedule
+      - key: elasticsearch
+        value: reserved
+        effect: NoExecute
 {% else %}
         node-role.kubernetes.io/worker: ""
 {% endif %}
   collection:
     logs:
       type: "fluentd"
-      fluentd: {}
-      nodeSelector: 
-{% if _logging_use_infra_nodes|bool %}
-        node-role.kubernetes.io/infra: ""
-{% else %}
-        node-role.kubernetes.io/worker: ""
-{% endif %}
\ No newline at end of file
+      fluentd:
+        tolerations:
+        - operator: Exists
diff --git a/ansible/roles/ocp4-workload-logging/templates/eo_subscription.j2 b/ansible/roles/ocp4-workload-logging/templates/eo_subscription.j2
new file mode 100644
index 0000000..ec6a89d
--- /dev/null
+++ b/ansible/roles/ocp4-workload-logging/templates/eo_subscription.j2
@@ -0,0 +1,12 @@
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+  name: "elasticsearch-operator-{{ r_eo_version.stdout }}"
+  namespace: "openshift-operators-redhat" 
+spec:
+  channel: "{{ r_eo_version.stdout }}"
+  installPlanApproval: "Automatic"
+  source: "redhat-operators"
+  sourceNamespace: "openshift-marketplace"
+  name: "elasticsearch-operator"
+  
\ No newline at end of file
diff --git a/ansible/roles/ocp4-workload-logging/templates/logging_subscription.j2 b/ansible/roles/ocp4-workload-logging/templates/logging_subscription.j2
new file mode 100644
index 0000000..98a05dc
--- /dev/null
+++ b/ansible/roles/ocp4-workload-logging/templates/logging_subscription.j2
@@ -0,0 +1,11 @@
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+  name: cluster-logging
+  namespace: openshift-logging
+spec:
+  channel: "{{ r_logging_version.stdout }}"
+  installPlanApproval: Automatic
+  name: cluster-logging
+  source: redhat-operators
+  sourceNamespace: openshift-marketplace
diff --git a/ansible/roles/ocp4_machineset_config/README.adoc b/ansible/roles/ocp4_machineset_config/README.adoc
index 1d4a7f5..d00d63b 100644
--- a/ansible/roles/ocp4_machineset_config/README.adoc
+++ b/ansible/roles/ocp4_machineset_config/README.adoc
@@ -1,7 +1,7 @@
-# ocp4_machineset_config
+# ocp4_machineset_config_aws
 
 OpenShift 4 MachineSet management to implement custom machinesets such as to
-create dedicated compute and infra nodes.
+create dedicated compute and infra nodes on AWS.
 
 This Ansible role will query the cluster for the base worker machinesets
 provisioned by the installer and then manage custom machinesets based on the
@@ -31,14 +31,14 @@
     ocp4_machineset_config_groups:
     - name: compute
       role: compute
-      aws_instance_type: m4.large
+      aws_instance_type: m5.4xlarge
       aws_root_volume_size: 80
       autoscale: true
       total_replicas_min: 3
       total_replicas_max: 30
     - name: infra
       role: infra
-      aws_instance_type: m4.large
+      aws_instance_type: m5.4xlarge
       total_replicas: 2
 ```
 
diff --git a/ansible/roles/ocp4_machineset_config/defaults/main.yml b/ansible/roles/ocp4_machineset_config/defaults/main.yml
index 31dddc4..e5d2a18 100644
--- a/ansible/roles/ocp4_machineset_config/defaults/main.yml
+++ b/ansible/roles/ocp4_machineset_config/defaults/main.yml
@@ -2,10 +2,14 @@
 ocp4_machineset_config_domain: agnosticd.redhat.com
 ocp4_machineset_config_group_label: "{{ ocp4_machineset_config_domain }}/machineset-group"
 ocp4_machineset_config_groups: []
-ocp4_machineset_config_default_aws_instance_type: m4.large
-ocp4_machineset_config_default_aws_root_volume_size: 120
 ocp4_machineset_config_disable_base_worker_machinesets: false
 
+ocp4_machineset_config_default_aws_instance_type: m5.4xlarge
+ocp4_machineset_config_default_aws_root_volume_size: 120
+
+ocp4_machineset_config_default_osp_instance_type: "4c12g30d"
+ocp4_machineset_config_default_osp_root_volume_size: 120
+
 ocp4_cluster_autoscaler_spec:
   scaleDown:
     enabled: true
diff --git a/ansible/roles/ocp4_machineset_config/meta/main.yml b/ansible/roles/ocp4_machineset_config/meta/main.yml
index de093b9..2dfafc0 100644
--- a/ansible/roles/ocp4_machineset_config/meta/main.yml
+++ b/ansible/roles/ocp4_machineset_config/meta/main.yml
@@ -1,7 +1,7 @@
 ---
 galaxy_info:
   role_name: ocp4_machineset_config
-  author: Johnathan Kupferer
+  author: Johnathan Kupferer, Wolfgang Kulhanek
   description: Configure OpenShift 4 MachineSets
   license: MIT
   min_ansible_version: 2.7
diff --git a/ansible/roles/ocp4_machineset_config/tasks/aws.yml b/ansible/roles/ocp4_machineset_config/tasks/machineset-aws.yml
similarity index 96%
rename from ansible/roles/ocp4_machineset_config/tasks/aws.yml
rename to ansible/roles/ocp4_machineset_config/tasks/machineset-aws.yml
index 0d4360e..77062c0 100644
--- a/ansible/roles/ocp4_machineset_config/tasks/aws.yml
+++ b/ansible/roles/ocp4_machineset_config/tasks/machineset-aws.yml
@@ -1,6 +1,6 @@
 ---
 - name: Define custom machinesets
-  include_tasks: aws-machineset-group.yml
+  include_tasks: machineset-group-aws.yml
   loop: "{{ ocp4_machineset_config_groups }}"
   loop_control:
     label: "{{ machineset_group.name }}"
diff --git a/ansible/roles/ocp4_machineset_config/tasks/aws-machineset-group.yml b/ansible/roles/ocp4_machineset_config/tasks/machineset-group-aws.yml
similarity index 100%
rename from ansible/roles/ocp4_machineset_config/tasks/aws-machineset-group.yml
rename to ansible/roles/ocp4_machineset_config/tasks/machineset-group-aws.yml
diff --git a/ansible/roles/ocp4_machineset_config/tasks/machineset-group-osp.yml b/ansible/roles/ocp4_machineset_config/tasks/machineset-group-osp.yml
new file mode 100644
index 0000000..dfbcbf5
--- /dev/null
+++ b/ansible/roles/ocp4_machineset_config/tasks/machineset-group-osp.yml
@@ -0,0 +1,64 @@
+---
+- name: Define {{ machineset_group.name }} machinesets
+  k8s:
+    state: present
+    definition: "{{ lookup('template', 'osp-machineset.yml.j2') | from_yaml }}"
+  # Iterate through availability zones in reverse order
+  # as it makes the math easier to scale zone "a"
+  # before "b" to match expected behavior.
+  loop: "{{ osp_worker_availability_zones[::-1] }}"
+  loop_control:
+    label: "{{ machineset_name }}"
+    loop_var: osp_worker_availability_zone
+    index_var: loop_index
+  vars:
+    availability_zone: "{{ osp_worker_availability_zone.name }}"
+    availability_zone_region: "{{ osp_worker_availability_zone.region }}"
+    availability_zone_subnet: "{{ osp_worker_availability_zone.subnet }}"
+    osp_instance_type: >-
+      {{ machineset_group.osp_instance_type | default(default_osp_instance_type) }}
+    osp_root_volume_size: >-
+      {{ machineset_group.osp_root_volume_size | default(default_osp_root_volume_size) }}
+    machineset_name: >-
+      {{ [cluster_label, machineset_group.name, availability_zone] | join('-') }}
+    machineset_group_node_labels: >-
+      {{ machineset_group.node_labels
+       | default({'node-role.kubernetes.io/' + machineset_group.role: ''}
+           if machineset_group.role|default(False) else {})
+      }}
+    machineset_group_total_replicas: >-
+      {{ machineset_group.total_replicas
+       | default(machineset_group.total_replicas_min)
+       | default(0)
+      }}
+    machineset_replicas: >-
+      {{ (
+        (machineset_group_total_replicas|int + loop_index) / osp_worker_availability_zones|count
+      ) | int }}
+
+- name: Define {{ machineset_group.name }} machineautoscalers
+  k8s:
+    state: present
+    definition: "{{ lookup('template', 'machineautoscaler.yml.j2') | from_yaml }}"
+  # Iterate through availability zones in reverse order as it makes the math
+  # easier to scale zone "a" before "b" to match expected behavior.
+  loop: "{{ osp_worker_availability_zones[::-1] }}"
+  loop_control:
+    label: "{{ machineset_name }}"
+    loop_var: osp_worker_availability_zone
+    index_var: loop_index
+  vars:
+    availability_zone: "{{ osp_worker_availability_zone.name }}"
+    machineset_name: >-
+      {{ [cluster_label, machineset_group.name, availability_zone] | join('-') }}
+    machineset_min_replicas: >-
+      {{ (
+         (machineset_group.total_replicas_min|default(0) + loop_index) /
+         osp_worker_availability_zones|count
+      ) | int }}
+    machineset_max_replicas: >-
+      {{ (
+         (machineset_group.total_replicas_max|default(100) + loop_index) /
+         osp_worker_availability_zones|count
+      ) | int }}
+  when: machineset_group.autoscale | default(False) | bool
diff --git a/ansible/roles/ocp4_machineset_config/tasks/aws.yml b/ansible/roles/ocp4_machineset_config/tasks/machineset-osp.yml
similarity index 85%
copy from ansible/roles/ocp4_machineset_config/tasks/aws.yml
copy to ansible/roles/ocp4_machineset_config/tasks/machineset-osp.yml
index 0d4360e..c96c4e5 100644
--- a/ansible/roles/ocp4_machineset_config/tasks/aws.yml
+++ b/ansible/roles/ocp4_machineset_config/tasks/machineset-osp.yml
@@ -1,20 +1,21 @@
 ---
 - name: Define custom machinesets
-  include_tasks: aws-machineset-group.yml
+  include_tasks: machineset-group-osp.yml
   loop: "{{ ocp4_machineset_config_groups }}"
   loop_control:
     label: "{{ machineset_group.name }}"
     loop_var: machineset_group
   vars:
-    aws_coreos_ami_id: >-
+    osp_coreos_ami_id: >-
       {{ reference_provider_spec_value.ami.id }}
     aws_iam_instance_profile_id: >-
       {{ reference_provider_spec_value.iamInstanceProfile.id }}
-    aws_worker_security_groups: >-
+    osp_worker_security_groups: >-
       {{ reference_provider_spec_value.securityGroups }}
-    aws_worker_tags: >-
+    osp_worker_tags: >-
       {{ reference_provider_spec_value.tags }}
-    aws_worker_availability_zones: >-
+      
+    osp_worker_availability_zones: >-
       {{ ocp4_base_worker_machinesets
        | json_query(availability_zone_json_query)
       }}
diff --git a/ansible/roles/ocp4_machineset_config/tasks/main.yml b/ansible/roles/ocp4_machineset_config/tasks/main.yml
index 2be85f6..347445e 100644
--- a/ansible/roles/ocp4_machineset_config/tasks/main.yml
+++ b/ansible/roles/ocp4_machineset_config/tasks/main.yml
@@ -3,11 +3,11 @@
   include_tasks: set-facts.yml
 
 - name: Disable base worker machinesets
-  include_tasks: disable-base-worker-machinesets.yml
   when: disable_base_worker_machinesets|bool
+  include_tasks: disable-base-worker-machinesets.yml
 
 - name: Configure machinesets for cloud provider
-  include_tasks: "{{ cloud_provider_platform }}.yml"
+  include_tasks: "machineset-{{ cloud_provider_platform }}.yml"
 
 - name: Enable cluster autoscaler
   include_tasks: enable-cluster-autoscaler.yml
diff --git a/ansible/roles/ocp4_machineset_config/templates/aws-machineset.yml.j2 b/ansible/roles/ocp4_machineset_config/templates/aws-machineset.yml.j2
index 572d570..ae93a89 100644
--- a/ansible/roles/ocp4_machineset_config/templates/aws-machineset.yml.j2
+++ b/ansible/roles/ocp4_machineset_config/templates/aws-machineset.yml.j2
@@ -23,7 +23,6 @@
       machine.openshift.io/cluster-api-machineset: {{ machineset_name }}
   template:
     metadata:
-      creationTimestamp: null
       labels:
         {{ machineset_group_label }}: {{ machineset_group.name }}
         machine.openshift.io/cluster-api-cluster: {{ cluster_label }}
@@ -34,7 +33,6 @@
         machine.openshift.io/cluster-api-machineset: {{ machineset_name }}
     spec:
       metadata:
-        creationTimestamp: null
         labels: {{ machineset_group_node_labels | to_json }}
       providerSpec:
         value:
@@ -68,5 +66,14 @@
           tags: {{ aws_worker_tags | to_json }}
           userDataSecret:
             name: worker-user-data
+{% if machineset_group.taint | d('') | length > 0 %}            
+      taints:
+      - key: "{{ machineset_group.taint }}"
+        value: "reserved"
+        effect: "NoSchedule"
+      - key: "{{ machineset_group.taint }}"
+        value: "reserved"
+        effect: "NoExecute"
+{% endif %}
       versions:
         kubelet: ""
diff --git a/ansible/roles/ocp4_machineset_config/templates/osp-machineset.yml.j2 b/ansible/roles/ocp4_machineset_config/templates/osp-machineset.yml.j2
new file mode 100644
index 0000000..400cf22
--- /dev/null
+++ b/ansible/roles/ocp4_machineset_config/templates/osp-machineset.yml.j2
@@ -0,0 +1,73 @@
+---
+apiVersion: machine.openshift.io/v1beta1
+kind: MachineSet
+metadata:
+  name: {{ machineset_name }}
+  namespace: openshift-machine-api
+  labels:
+    {{ machineset_group_label }}: {{ machineset_group.name }}
+    machine.openshift.io/cluster-api-cluster: {{ cluster_label }}
+spec:
+{% if machineset_name not in ocp4_current_machineset_names
+   or not machineset_group.autoscale|default(False)
+%}
+  replicas: {{ machineset_replicas }}
+{% endif %}
+  selector:
+    matchLabels:
+      machine.openshift.io/cluster-api-cluster: {{ cluster_label }}
+{% if 'role' in machineset_group %}
+      machine.openshift.io/cluster-api-machine-role: {{ machineset_group.role }}
+      machine.openshift.io/cluster-api-machine-type: {{ machineset_group.role }}
+{% endif %}
+      machine.openshift.io/cluster-api-machineset: {{ machineset_name }}
+  template:
+    metadata:
+      labels:
+        {{ machineset_group_label }}: {{ machineset_group.name }}
+        machine.openshift.io/cluster-api-cluster: {{ cluster_label }}
+{% if 'role' in machineset_group %}
+        machine.openshift.io/cluster-api-machine-role: {{ machineset_group.role }}
+        machine.openshift.io/cluster-api-machine-type: {{ machineset_group.role }}
+{% endif %}
+        machine.openshift.io/cluster-api-machineset: {{ machineset_name }}
+    spec:
+      metadata:
+        labels: {{ machineset_group_node_labels | to_json }}
+      providerSpec:
+        value:
+          apiVersion: openstackproviderconfig.openshift.io/v1alpha1
+          cloudName: openstack
+          cloudsSecret:
+            name: openstack-cloud-credentials
+            namespace: openshift-machine-api
+          flavor: {{ osp_instance_type }}
+          image: wk-r2kbd-rhcos
+          kind: OpenstackProviderSpec
+          metadata:
+            creationTimestamp: null
+          networks:
+          - filter: {}
+            subnets:
+            - filter:
+                name: wk-r2kbd-nodes
+                tags: openshiftClusterID=wk-r2kbd
+          securityGroups: {{ osp_worker_security_groups | to_json }}
+          serverMetadata:
+            Name: wk-r2kbd-worker
+            openshiftClusterID: wk-r2kbd
+          tags: {{ osp_worker_tags | to_json }}
+          trunk: true
+          userDataSecret:
+            name: worker-user-data
+{% if machineset_group.taint | d('') | length > 0 %}            
+      taints:
+      - key: "{{ machineset_group.taint }}"
+        value: "reserved"
+        effect: "NoSchedule"
+      - key: "{{ machineset_group.taint }}"
+        value: "reserved"
+        effect: "NoExecute"
+{% endif %}
+      versions:
+        kubelet: ""
diff --git a/ansible/roles/ocp4_machineset_config/vars/main.yml b/ansible/roles/ocp4_machineset_config/vars/main.yml
index c1451d3..0bae770 100644
--- a/ansible/roles/ocp4_machineset_config/vars/main.yml
+++ b/ansible/roles/ocp4_machineset_config/vars/main.yml
@@ -2,9 +2,16 @@
 config_domain: "{{ ocp4_machineset_config_annotation_domain }}"
 machineset_group_label: "{{ ocp4_machineset_config_group_label }}"
 machineset_groups: "{{ ocp4_machineset_config_groups }}"
+
+disable_base_worker_machinesets: >-
+  {{ ocp4_machineset_config_disable_base_worker_machinesets | bool }}
+
 default_aws_instance_type: >-
   {{ ocp4_machineset_config_default_aws_instance_type }}
 default_aws_root_volume_size: >-
   {{ ocp4_machineset_config_default_aws_root_volume_size }}
-disable_base_worker_machinesets: >-
-  {{ ocp4_machineset_config_disable_base_worker_machinesets | bool }}
+
+default_osp_instance_type: >-
+  {{ ocp4_machineset_config_default_osp_instance_type }}
+default_osp_root_volume_size: >-
+  {{ ocp4_machineset_config_default_osp_root_volume_size }}

--
Gitblit v1.9.3