ansible/roles/ocp4-workload-enable-lets-encrypt-certificates/files/router-with-certs.yaml
@@ -1,8 +1,6 @@ apiVersion: operator.openshift.io/v1 kind: IngressController metadata: finalizers: - ingress.openshift.io/ingress-controller name: default namespace: openshift-ingress-operator spec: ansible/roles/ocp4-workload-infra-nodes/defaults/main.yml
@@ -3,5 +3,11 @@ ocp_username: opentlc-mgr silent: False _infra_node_replicas: 3 _infra_node_instance_type: m4.4xlarge _infra_node_replicas: 1 _infra_node_instance_type: m5.4xlarge # Create separate Elasticsearch Nodes # When false only Infranodes will be created _infra_node_elasticsearch_nodes: false _infra_node_elasticsearch_replicas: 1 _infra_node_elasticsearch_instance_type: m5.4xlarge ansible/roles/ocp4-workload-infra-nodes/files/cluster-monitoring-config.yml
New file @@ -0,0 +1,78 @@ apiVersion: v1 kind: ConfigMap metadata: name: cluster-monitoring-config namespace: openshift-monitoring data: config.yaml: | alertmanagerMain: nodeSelector: node-role.kubernetes.io/infra: "" tolerations: - key: infra value: reserved effect: NoSchedule - key: infra value: reserved effect: NoExecute prometheusK8s: retention: 48h nodeSelector: node-role.kubernetes.io/infra: "" tolerations: - key: infra value: reserved effect: NoSchedule - key: infra value: reserved effect: NoExecute prometheusOperator: nodeSelector: node-role.kubernetes.io/infra: "" tolerations: - key: infra value: reserved effect: NoSchedule - key: infra value: reserved effect: NoExecute grafana: nodeSelector: node-role.kubernetes.io/infra: "" tolerations: - key: infra value: reserved effect: NoSchedule - key: infra value: reserved effect: NoExecute k8sPrometheusAdapter: nodeSelector: node-role.kubernetes.io/infra: "" tolerations: - key: infra value: reserved effect: NoSchedule - key: infra value: reserved effect: NoExecute kubeStateMetrics: nodeSelector: node-role.kubernetes.io/infra: "" tolerations: - key: infra value: reserved effect: NoSchedule - key: infra value: reserved effect: NoExecute telemeterClient: nodeSelector: node-role.kubernetes.io/infra: "" tolerations: - key: infra value: reserved effect: NoSchedule - key: infra value: reserved effect: NoExecute ansible/roles/ocp4-workload-infra-nodes/tasks/remove_workload.yml
@@ -2,20 +2,76 @@ --- # Implement your Workload removal tasks here - name: find infra machinesets - name: Find Infra machinesets k8s_facts: api_version: machine.openshift.io/v1beta1 kind: MachineSet namespace: openshift-machine-api label_selectors: - agnosticd.redhat.com/machineset-group = infra register: machinesets_out register: r_infra_machinesets - name: delete infra machinesets - name: Find Elasticsearch machinesets k8s_facts: api_version: machine.openshift.io/v1beta1 kind: MachineSet namespace: openshift-machine-api label_selectors: - agnosticd.redhat.com/machineset-group = elasticsearch register: r_elasticsearch_machinesets - name: Delete infra machinesets when: r_infra_machinesets.resources | length | int > 0 k8s: state: absent definition: "{{ item }}" with_items: "{{ machinesets_out.resources }}" with_items: "{{ r_infra_machinesets.resources }}" - name: Delete Elasticsearch machinesets when: r_elasticsearch_machinesets.resources | length | int > 0 k8s: state: absent definition: "{{ item }}" with_items: "{{ r_elasticsearch_machinesets.resources }}" - name: Print Warning debug: msg: "WARNING: Make sure to change the node selectors for Ingress Controllers, Image Registry and Monitoring" # - name: Move Ingress Controllers to Worker Nodes # k8s: # state: present # definition: # apiVersion: operator.openshift.io/v1 # kind: IngressController # metadata: # name: default # namespace: openshift-ingress-operator # spec: # nodePlacement: # nodeSelector: # matchLabels: # node-role.kubernetes.io/worker: "" # - name: Move Image Registry to Worker Nodes # k8s: # state: present # definition: # apiVersion: imageregistry.operator.openshift.io/v1 # kind: Config # metadata: # name: cluster # spec: # nodeSelector: # "node-role.kubernetes.io/worker": "" # - name: Remove Cluster Monitoring Config Map # k8s: # state: absent # api_version: v1 # kind: ConfigMap # name: cluster-monitoring-config # namespace: openshift-monitoring # Leave this as the last task in the playbook. - name: remove_workload tasks complete ansible/roles/ocp4-workload-infra-nodes/tasks/workload.yml
@@ -1,4 +1,3 @@ # vim: set ft=ansible --- - name: Configure OCP4 infra machinesets include_role: @@ -7,9 +6,79 @@ ocp4_machineset_config_groups: - name: infra role: infra aws_instance_type: "{{ _infra_node_instance_type }}" taint: infra instance_type: "{{ _infra_node_instance_type }}" total_replicas: "{{ _infra_node_replicas }}" - name: Configure OCP4 Elasticsearch machinesets when: _infra_node_elasticsearch_nodes | d(False) | bool include_role: name: ocp4_machineset_config vars: ocp4_machineset_config_groups: - name: elasticsearch role: elasticsearch taint: elasticsearch instance_type: "{{ _infra_node_elasticsearch_instance_type }}" total_replicas: "{{ _infra_node_elasticsearch_replicas }}" - name: Wait for Infra Nodes to be available k8s_facts: api_version: v1 kind: Node label_selectors: - node-role.kubernetes.io/infra = register: r_infra_nodes until: - r_infra_nodes.resources | length | int == _infra_node_replicas | int delay: 30 retries: 15 - name: Wait for Elasticsearch Nodes to be available when: _infra_node_elasticsearch_nodes | d(False) | bool k8s_facts: api_version: v1 kind: Node label_selectors: - node-role.kubernetes.io/elasticsearch = register: r_es_nodes until: - r_es_nodes.resources | length | int == _infra_node_elasticsearch_replicas | int delay: 30 retries: 15 # The Machine Config Daemon DaemonSet does not include # Universal Tolerations. So by adding taints to Infra # (and Elasticsearch) nodes the Machine Config Daemon # pods would be removed from those nodes. # This adds the necessary tolerations. # It may be 4.5 before this is fixed. # See https://bugzilla.redhat.com/show_bug.cgi?id=1780318 - name: Fix Machine Config and Node CA Daemon Sets (add Tolerations for Infra and Elasticsearch nodes) k8s: state: present merge_type: - merge definition: "{{ lookup('template', '{{ item }}') }}" loop: - ./templates/machine-config-daemonset.j2 - ./templates/node-ca-daemonset.j2 - name: Configure Ingress Controllers and Image Registry k8s: state: present merge_type: - merge definition: "{{ lookup('template', '{{ item }}') }}" loop: - ./templates/ingress-controller.j2 - ./templates/image-registry.j2 - name: Create Config Map for Cluster Monitoring k8s: state: present definition: "{{ lookup('file', './files/cluster-monitoring-config.yml') }}" # Leave this as the last task in the playbook. - name: workload tasks complete debug: ansible/roles/ocp4-workload-infra-nodes/templates/image-registry.j2
New file @@ -0,0 +1,14 @@ apiVersion: imageregistry.operator.openshift.io/v1 kind: Config metadata: name: cluster spec: nodeSelector: "node-role.kubernetes.io/infra": "" tolerations: - effect: NoSchedule key: infra value: reserved - effect: NoExecute key: infra value: reserved ansible/roles/ocp4-workload-infra-nodes/templates/ingress-controller.j2
New file @@ -0,0 +1,18 @@ apiVersion: operator.openshift.io/v1 kind: IngressController metadata: name: default namespace: openshift-ingress-operator spec: replicas: {{ _infra_node_replicas | int }} nodePlacement: nodeSelector: matchLabels: node-role.kubernetes.io/infra: "" tolerations: - effect: NoSchedule key: infra value: reserved - effect: NoExecute key: infra value: reserved ansible/roles/ocp4-workload-infra-nodes/templates/machine-config-daemonset.j2
New file @@ -0,0 +1,29 @@ apiVersion: apps/v1 kind: DaemonSet metadata: name: machine-config-daemon namespace: openshift-machine-config-operator spec: template: spec: tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/etcd operator: Exists - effect: NoSchedule key: infra value: reserved - effect: NoExecute key: infra value: reserved {% if _infra_node_elasticsearch_nodes | bool %} - effect: NoSchedule key: elasticsearch value: reserved - effect: NoExecute key: elasticsearch value: reserved {% endif %} ansible/roles/ocp4-workload-infra-nodes/templates/node-ca-daemonset.j2
New file @@ -0,0 +1,29 @@ apiVersion: apps/v1 kind: DaemonSet metadata: name: node-ca namespace: openshift-image-registry spec: template: spec: tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/etcd operator: Exists - effect: NoSchedule key: infra value: reserved - effect: NoExecute key: infra value: reserved {% if _infra_node_elasticsearch_nodes | bool %} - effect: NoSchedule key: elasticsearch value: reserved - effect: NoExecute key: elasticsearch value: reserved {% endif %} ansible/roles/ocp4-workload-logging/defaults/main.yml
@@ -3,8 +3,14 @@ ocp_username: opentlc-mgr silent: False _logging_elasticsearch_replicas: 3 _logging_elasticsearch_memory_request: "4Gi" _logging_elasticsearch_replicas: 1 _logging_elasticsearch_memory_request: "8Gi" _logging_elasticsearch_storage_request: "200Gi" _logging_use_infra_nodes: True _logging_wait_for_deployment: True # Set to true to place pods on nodes with label # node-role.kubernetes.io/infra: "" _logging_use_infra_nodes: False # Set to true to place pods on nodes with label # node-role.kubernetes.io/elasticsearch: "" _logging_use_elasticsearch_nodes: True # If both are false the Logging Components will run on Worker nodes ansible/roles/ocp4-workload-logging/files/elasticsearch_catalog_source.yaml
File was deleted ansible/roles/ocp4-workload-logging/files/elasticsearch_subscription.yaml
File was deleted ansible/roles/ocp4-workload-logging/files/eo_namespace.yaml
New file @@ -0,0 +1,8 @@ apiVersion: v1 kind: Namespace metadata: name: openshift-operators-redhat annotations: openshift.io/node-selector: "" labels: openshift.io/cluster-monitoring: "true" ansible/roles/ocp4-workload-logging/files/eo_operatorgroup.yaml
New file @@ -0,0 +1,6 @@ apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: openshift-operators-redhat namespace: openshift-operators-redhat spec: {} ansible/roles/ocp4-workload-logging/files/eo_role.yaml
New file @@ -0,0 +1,16 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: prometheus-k8s namespace: openshift-operators-redhat rules: - apiGroups: - "" resources: - services - endpoints - pods verbs: - get - list - watch ansible/roles/ocp4-workload-logging/files/eo_rolebinding.yaml
New file @@ -0,0 +1,14 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: prometheus-k8s namespace: openshift-operators-redhat roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: prometheus-k8s subjects: - kind: ServiceAccount name: prometheus-k8s namespace: openshift-operators-redhat ansible/roles/ocp4-workload-logging/files/logging_catalog_source.yaml
File was deleted ansible/roles/ocp4-workload-logging/files/logging_curator_configmap.yaml
ansible/roles/ocp4-workload-logging/files/logging_namespace.yaml
File was renamed from ansible/roles/ocp4-workload-logging/files/namespace.yaml @@ -3,7 +3,7 @@ metadata: name: openshift-logging annotations: openshift.io/node-selector: "" openshift.io/node-selector: "" labels: openshift.io/cluster-logging: "true" openshift.io/cluster-monitoring: "true" ansible/roles/ocp4-workload-logging/files/logging_operatorgroup.yaml
File was renamed from ansible/roles/ocp4-workload-logging/files/operatorgroup.yaml @@ -5,4 +5,7 @@ namespace: openshift-logging spec: targetNamespaces: - openshift-logging status: namespaces: - openshift-logging ansible/roles/ocp4-workload-logging/files/logging_subscription.yaml
File was deleted ansible/roles/ocp4-workload-logging/tasks/remove_workload.yml
@@ -1,7 +1,7 @@ # vim: set ft=ansible --- # Implement your Workload removal tasks here - name: Remove Logging CR - name: Remove OpenShift ClusterLogging k8s: state: absent definition: @@ -13,7 +13,7 @@ # operator nukes all pods once cr is gone # waiting for just one to remain is a bit of a hack - name: wait for only one pod - name: Wait for logging pods to be terminated k8s_facts: api_version: v1 kind: Pod @@ -23,51 +23,7 @@ retries: 20 delay: 10 - name: logging operatorgroup k8s: state: absent definition: apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: openshift-logging-operatorgroup namespace: openshift-logging ### elastic catalog source config - name: catalog source config k8s: state: absent definition: apiVersion: operators.coreos.com/v1 kind: CatalogSourceConfig metadata: name: elasticsearch-operator namespace: openshift-marketplace ### elastic subscription - name: elastic subscription k8s: state: absent definition: apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: elasticsearch-operator namespace: openshift-operators ### logging catalog source config - name: logging catalog source config k8s: state: absent definition: apiVersion: operators.coreos.com/v1 kind: CatalogSourceConfig metadata: name: cluster-logging-operator namespace: openshift-marketplace ### logging subscription - name: logging subscription - name: Remove logging subscription k8s: state: absent definition: @@ -77,7 +33,17 @@ name: cluster-logging namespace: openshift-logging - name: logging project - name: Remove logging operatorgroup k8s: state: absent definition: apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: openshift-logging-operatorgroup namespace: openshift-logging - name: Remove openshift-logging project k8s: state: absent definition: ansible/roles/ocp4-workload-logging/tasks/workload.yml
@@ -1,104 +1,112 @@ # vim: set ft=ansible --- # Implement your Workload deployment tasks here - name: Create OpenShift Objects for Logging ignore_errors: yes retries: 5 delay: 10 until: r_objects is succeeded register: r_objects k8s: state: present merge_type: - strategic-merge - merge definition: "{{ lookup('file', item ) | from_yaml }}" loop: - ./files/namespace.yaml - ./files/operatorgroup.yaml - ./files/elasticsearch_catalog_source.yaml - ./files/elasticsearch_subscription.yaml - ./files/logging_catalog_source.yaml - ./files/logging_subscription.yaml - name: Wait for Elasticsearch CRD - name: Check if Elasticsearch Operator is already installed k8s_facts: api_version: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition name: elasticsearches.logging.openshift.io register: r_elasticsearch_crd retries: 20 delay: 10 ignore_errors: yes until: r_elasticsearch_crd.resources | list | length == 1 api_version: v1 kind: Deployment namespace: "openshift-operators-redhat" name: "elasticsearch-operator" register: r_eo_deployment_exists - name: Notify user if Elasticsearch deployment failed when: not r_elasticsearch_crd.resources | list | length == 1 debug: msg: "user.info: *** Elasticsearch operator could not be installed ***" - name: Wait for Logging CRD k8s_facts: api_version: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition name: clusterloggings.logging.openshift.io register: r_logging_crd retries: 20 delay: 10 ignore_errors: yes until: r_logging_crd.resources | list | length == 1 - name: Notify user if Logging deployment failed when: not r_logging_crd.resources | list | length == 1 debug: msg: "user.info: *** Logging operator could not be installed ***" - name: Deploy Logging when: - r_logging_crd.resources | list | length == 1 - r_elasticsearch_crd.resources | list | length == 1 - name: Install Elasticsearch Operator if not installed when: r_eo_deployment_exists.resources | length | int == 0 block: - name: Create OpenShift Objects for ClusterLogging - name: Get current stable version of Elasticsearch shell: "oc get packagemanifest elasticsearch-operator -n openshift-marketplace -o jsonpath='{.status.defaultChannel}'" register: r_eo_version - name: Print Elasticsearch version to be installed debug: msg: "Elasticsearch version to be installed: {{ r_eo_version.stdout }}" - name: Install Elasticsearch operator prerequisites k8s: state: present merge_type: - strategic-merge - merge definition: "{{ lookup('template', item ) | from_yaml }}" ignore_errors: yes definition: "{{ lookup('file', item ) | from_yaml }}" loop: - ./templates/cluster_logging.j2 - ./files/eo_namespace.yaml - ./files/eo_operatorgroup.yaml - ./files/eo_role.yaml - ./files/eo_rolebinding.yaml - name: Sleep to give the status field a chance to populate when: _logging_wait_for_deployment | bool pause: seconds: 60 - name: Wait until Elasticsearch Cluster Status is green when: _logging_wait_for_deployment| bool - name: Install Elasticsearch operator k8s: state: present definition: "{{ lookup('template', item ) | from_yaml }}" loop: - ./templates/eo_subscription.j2 - name: Wait for Elasticsearch operator to be ready k8s_facts: api_version: logging.openshift.io/v1 kind: ClusterLogging name: instance namespace: openshift-logging register: r_logging api_version: v1 kind: Deployment namespace: "openshift-operators-redhat" name: "elasticsearch-operator" register: r_eo_deployment retries: 30 delay: 10 ignore_errors: yes until: - r_logging.resources[0].status.logStore.elasticsearchStatus[0].clusterHealth == "green" until: - r_eo_deployment.resources | length | int > 0 - r_eo_deployment.resources[0].status.availableReplicas is defined - r_eo_deployment.resources[0].status.availableReplicas | int == r_eo_deployment.resources[0].spec.replicas | int - name: Update Logging Curator configuration k8s: state: present merge_type: - strategic-merge - merge definition: "{{ lookup('file', item ) | from_yaml }}" ignore_errors: yes loop: - ./files/curator_configmap.yaml - name: Get current stable version of Cluster Logging shell: "oc get packagemanifest cluster-logging -n openshift-marketplace -o jsonpath='{.status.defaultChannel}'" register: r_logging_version - name: Print Cluster Logging version to be installed debug: msg: "Cluster Logging version to be installed: {{ r_logging_version.stdout }}" - name: Install OpenShift Logging Operator Prerequisites k8s: state: present definition: "{{ lookup('file', item ) | from_yaml }}" loop: - ./files/logging_namespace.yaml - ./files/logging_operatorgroup.yaml - ./files/logging_curator_configmap.yaml - name: Install OpenShift Logging Operator k8s: state: present definition: "{{ lookup('template', item ) | from_yaml }}" loop: - ./templates/logging_subscription.j2 - name: Wait for Cluster Logging Operator to be ready k8s_facts: api_version: v1 kind: Deployment namespace: "openshift-logging" name: "cluster-logging-operator" register: r_logging_deployment retries: 30 delay: 10 until: - r_logging_deployment.resources | length | int > 0 - r_logging_deployment.resources[0].status.availableReplicas is defined - r_logging_deployment.resources[0].status.availableReplicas | int == r_logging_deployment.resources[0].spec.replicas | int - name: Create OpenShift ClusterLogging k8s: state: present definition: "{{ lookup('template', item ) | from_yaml }}" loop: - ./templates/cluster_logging.j2 - name: Wait until Elasticsearch cluster status is green k8s_facts: api_version: logging.openshift.io/v1 kind: ClusterLogging name: instance namespace: openshift-logging register: r_logging retries: 30 delay: 10 ignore_errors: yes until: - r_logging.resources[0].status.logStore.elasticsearchStatus[0].cluster.status is defined - r_logging.resources[0].status.logStore.elasticsearchStatus[0].cluster.status == "green" # Leave this as the last task in the playbook. - name: workload tasks complete ansible/roles/ocp4-workload-logging/templates/cluster_logging.j2
@@ -17,6 +17,22 @@ nodeSelector: {% if _logging_use_infra_nodes|bool %} node-role.kubernetes.io/infra: "" tolerations: - key: infra value: reserved effect: NoSchedule - key: infra value: reserved effect: NoExecute {% elif _logging_use_elasticsearch_nodes | bool %} node-role.kubernetes.io/elasticsearch: "" tolerations: - key: elasticsearch value: reserved effect: NoSchedule - key: elasticsearch value: reserved effect: NoExecute {% else %} node-role.kubernetes.io/worker: "" {% endif %} @@ -33,6 +49,22 @@ nodeSelector: {% if _logging_use_infra_nodes|bool %} node-role.kubernetes.io/infra: "" tolerations: - key: infra value: reserved effect: NoSchedule - key: infra value: reserved effect: NoExecute {% elif _logging_use_elasticsearch_nodes | bool %} node-role.kubernetes.io/elasticsearch: "" tolerations: - key: elasticsearch value: reserved effect: NoSchedule - key: elasticsearch value: reserved effect: NoExecute {% else %} node-role.kubernetes.io/worker: "" {% endif %} @@ -43,16 +75,28 @@ nodeSelector: {% if _logging_use_infra_nodes|bool %} node-role.kubernetes.io/infra: "" tolerations: - key: infra value: reserved effect: NoSchedule - key: infra value: reserved effect: NoExecute {% elif _logging_use_elasticsearch_nodes | bool %} node-role.kubernetes.io/elasticsearch: "" tolerations: - key: elasticsearch value: reserved effect: NoSchedule - key: elasticsearch value: reserved effect: NoExecute {% else %} node-role.kubernetes.io/worker: "" {% endif %} collection: logs: type: "fluentd" fluentd: {} nodeSelector: {% if _logging_use_infra_nodes|bool %} node-role.kubernetes.io/infra: "" {% else %} node-role.kubernetes.io/worker: "" {% endif %} fluentd: tolerations: - operator: Exists ansible/roles/ocp4-workload-logging/templates/eo_subscription.j2
New file @@ -0,0 +1,12 @@ apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: "elasticsearch-operator-{{ r_eo_version.stdout }}" namespace: "openshift-operators-redhat" spec: channel: "{{ r_eo_version.stdout }}" installPlanApproval: "Automatic" source: "redhat-operators" sourceNamespace: "openshift-marketplace" name: "elasticsearch-operator" ansible/roles/ocp4-workload-logging/templates/logging_subscription.j2
New file @@ -0,0 +1,11 @@ apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: cluster-logging namespace: openshift-logging spec: channel: "{{ r_logging_version.stdout }}" installPlanApproval: Automatic name: cluster-logging source: redhat-operators sourceNamespace: openshift-marketplace ansible/roles/ocp4_machineset_config/README.adoc
@@ -1,7 +1,7 @@ # ocp4_machineset_config # ocp4_machineset_config_aws OpenShift 4 MachineSet management to implement custom machinesets such as to create dedicated compute and infra nodes. create dedicated compute and infra nodes on AWS. This Ansible role will query the cluster for the base worker machinesets provisioned by the installer and then manage custom machinesets based on the @@ -31,14 +31,14 @@ ocp4_machineset_config_groups: - name: compute role: compute aws_instance_type: m4.large aws_instance_type: m5.4xlarge aws_root_volume_size: 80 autoscale: true total_replicas_min: 3 total_replicas_max: 30 - name: infra role: infra aws_instance_type: m4.large aws_instance_type: m5.4xlarge total_replicas: 2 ``` ansible/roles/ocp4_machineset_config/defaults/main.yml
@@ -2,10 +2,14 @@ ocp4_machineset_config_domain: agnosticd.redhat.com ocp4_machineset_config_group_label: "{{ ocp4_machineset_config_domain }}/machineset-group" ocp4_machineset_config_groups: [] ocp4_machineset_config_default_aws_instance_type: m4.large ocp4_machineset_config_default_aws_root_volume_size: 120 ocp4_machineset_config_disable_base_worker_machinesets: false ocp4_machineset_config_default_aws_instance_type: m5.4xlarge ocp4_machineset_config_default_aws_root_volume_size: 120 ocp4_machineset_config_default_osp_instance_type: "4c12g30d" ocp4_machineset_config_default_osp_root_volume_size: 120 ocp4_cluster_autoscaler_spec: scaleDown: enabled: true ansible/roles/ocp4_machineset_config/meta/main.yml
@@ -1,7 +1,7 @@ --- galaxy_info: role_name: ocp4_machineset_config author: Johnathan Kupferer author: Johnathan Kupferer, Wolfgang Kulhanek description: Configure OpenShift 4 MachineSets license: MIT min_ansible_version: 2.7 ansible/roles/ocp4_machineset_config/tasks/machineset-aws.yml
File was renamed from ansible/roles/ocp4_machineset_config/tasks/aws.yml @@ -1,6 +1,6 @@ --- - name: Define custom machinesets include_tasks: aws-machineset-group.yml include_tasks: machineset-group-aws.yml loop: "{{ ocp4_machineset_config_groups }}" loop_control: label: "{{ machineset_group.name }}" ansible/roles/ocp4_machineset_config/tasks/machineset-group-aws.yml
ansible/roles/ocp4_machineset_config/tasks/machineset-group-osp.yml
New file @@ -0,0 +1,64 @@ --- - name: Define {{ machineset_group.name }} machinesets k8s: state: present definition: "{{ lookup('template', 'osp-machineset.yml.j2') | from_yaml }}" # Iterate through availability zones in reverse order # as it makes the math easier to scale zone "a" # before "b" to match expected behavior. loop: "{{ osp_worker_availability_zones[::-1] }}" loop_control: label: "{{ machineset_name }}" loop_var: osp_worker_availability_zone index_var: loop_index vars: availability_zone: "{{ osp_worker_availability_zone.name }}" availability_zone_region: "{{ osp_worker_availability_zone.region }}" availability_zone_subnet: "{{ osp_worker_availability_zone.subnet }}" osp_instance_type: >- {{ machineset_group.osp_instance_type | default(default_osp_instance_type) }} osp_root_volume_size: >- {{ machineset_group.osp_root_volume_size | default(default_osp_root_volume_size) }} machineset_name: >- {{ [cluster_label, machineset_group.name, availability_zone] | join('-') }} machineset_group_node_labels: >- {{ machineset_group.node_labels | default({'node-role.kubernetes.io/' + machineset_group.role: ''} if machineset_group.role|default(False) else {}) }} machineset_group_total_replicas: >- {{ machineset_group.total_replicas | default(machineset_group.total_replicas_min) | default(0) }} machineset_replicas: >- {{ ( (machineset_group_total_replicas|int + loop_index) / osp_worker_availability_zones|count ) | int }} - name: Define {{ machineset_group.name }} machineautoscalers k8s: state: present definition: "{{ lookup('template', 'machineautoscaler.yml.j2') | from_yaml }}" # Iterate through availability zones in reverse order as it makes the math # easier to scale zone "a" before "b" to match expected behavior. loop: "{{ osp_worker_availability_zones[::-1] }}" loop_control: label: "{{ machineset_name }}" loop_var: osp_worker_availability_zone index_var: loop_index vars: availability_zone: "{{ osp_worker_availability_zone.name }}" machineset_name: >- {{ [cluster_label, machineset_group.name, availability_zone] | join('-') }} machineset_min_replicas: >- {{ ( (machineset_group.total_replicas_min|default(0) + loop_index) / osp_worker_availability_zones|count ) | int }} machineset_max_replicas: >- {{ ( (machineset_group.total_replicas_max|default(100) + loop_index) / osp_worker_availability_zones|count ) | int }} when: machineset_group.autoscale | default(False) | bool ansible/roles/ocp4_machineset_config/tasks/machineset-osp.ymlcopy from ansible/roles/ocp4_machineset_config/tasks/aws.yml copy to ansible/roles/ocp4_machineset_config/tasks/machineset-osp.yml
File was copied from ansible/roles/ocp4_machineset_config/tasks/aws.yml @@ -1,20 +1,21 @@ --- - name: Define custom machinesets include_tasks: aws-machineset-group.yml include_tasks: machineset-group-osp.yml loop: "{{ ocp4_machineset_config_groups }}" loop_control: label: "{{ machineset_group.name }}" loop_var: machineset_group vars: aws_coreos_ami_id: >- osp_coreos_ami_id: >- {{ reference_provider_spec_value.ami.id }} aws_iam_instance_profile_id: >- {{ reference_provider_spec_value.iamInstanceProfile.id }} aws_worker_security_groups: >- osp_worker_security_groups: >- {{ reference_provider_spec_value.securityGroups }} aws_worker_tags: >- osp_worker_tags: >- {{ reference_provider_spec_value.tags }} aws_worker_availability_zones: >- osp_worker_availability_zones: >- {{ ocp4_base_worker_machinesets | json_query(availability_zone_json_query) }} ansible/roles/ocp4_machineset_config/tasks/main.yml
@@ -3,11 +3,11 @@ include_tasks: set-facts.yml - name: Disable base worker machinesets include_tasks: disable-base-worker-machinesets.yml when: disable_base_worker_machinesets|bool include_tasks: disable-base-worker-machinesets.yml - name: Configure machinesets for cloud provider include_tasks: "{{ cloud_provider_platform }}.yml" include_tasks: "machineset-{{ cloud_provider_platform }}.yml" - name: Enable cluster autoscaler include_tasks: enable-cluster-autoscaler.yml ansible/roles/ocp4_machineset_config/templates/aws-machineset.yml.j2
@@ -23,7 +23,6 @@ machine.openshift.io/cluster-api-machineset: {{ machineset_name }} template: metadata: creationTimestamp: null labels: {{ machineset_group_label }}: {{ machineset_group.name }} machine.openshift.io/cluster-api-cluster: {{ cluster_label }} @@ -34,7 +33,6 @@ machine.openshift.io/cluster-api-machineset: {{ machineset_name }} spec: metadata: creationTimestamp: null labels: {{ machineset_group_node_labels | to_json }} providerSpec: value: @@ -68,5 +66,14 @@ tags: {{ aws_worker_tags | to_json }} userDataSecret: name: worker-user-data {% if machineset_group.taint | d('') | length > 0 %} taints: - key: "{{ machineset_group.taint }}" value: "reserved" effect: "NoSchedule" - key: "{{ machineset_group.taint }}" value: "reserved" effect: "NoExecute" {% endif %} versions: kubelet: "" ansible/roles/ocp4_machineset_config/templates/osp-machineset.yml.j2
New file @@ -0,0 +1,73 @@ --- apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: {{ machineset_name }} namespace: openshift-machine-api labels: {{ machineset_group_label }}: {{ machineset_group.name }} machine.openshift.io/cluster-api-cluster: {{ cluster_label }} spec: {% if machineset_name not in ocp4_current_machineset_names or not machineset_group.autoscale|default(False) %} replicas: {{ machineset_replicas }} {% endif %} selector: matchLabels: machine.openshift.io/cluster-api-cluster: {{ cluster_label }} {% if 'role' in machineset_group %} machine.openshift.io/cluster-api-machine-role: {{ machineset_group.role }} machine.openshift.io/cluster-api-machine-type: {{ machineset_group.role }} {% endif %} machine.openshift.io/cluster-api-machineset: {{ machineset_name }} template: metadata: labels: {{ machineset_group_label }}: {{ machineset_group.name }} machine.openshift.io/cluster-api-cluster: {{ cluster_label }} {% if 'role' in machineset_group %} machine.openshift.io/cluster-api-machine-role: {{ machineset_group.role }} machine.openshift.io/cluster-api-machine-type: {{ machineset_group.role }} {% endif %} machine.openshift.io/cluster-api-machineset: {{ machineset_name }} spec: metadata: labels: {{ machineset_group_node_labels | to_json }} providerSpec: value: apiVersion: openstackproviderconfig.openshift.io/v1alpha1 cloudName: openstack cloudsSecret: name: openstack-cloud-credentials namespace: openshift-machine-api flavor: {{ osp_instance_type }} image: wk-r2kbd-rhcos kind: OpenstackProviderSpec metadata: creationTimestamp: null networks: - filter: {} subnets: - filter: name: wk-r2kbd-nodes tags: openshiftClusterID=wk-r2kbd securityGroups: {{ osp_worker_security_groups | to_json }} serverMetadata: Name: wk-r2kbd-worker openshiftClusterID: wk-r2kbd tags: {{ osp_worker_tags | to_json }} trunk: true userDataSecret: name: worker-user-data {% if machineset_group.taint | d('') | length > 0 %} taints: - key: "{{ machineset_group.taint }}" value: "reserved" effect: "NoSchedule" - key: "{{ machineset_group.taint }}" value: "reserved" effect: "NoExecute" {% endif %} versions: kubelet: "" ansible/roles/ocp4_machineset_config/vars/main.yml
@@ -2,9 +2,16 @@ config_domain: "{{ ocp4_machineset_config_annotation_domain }}" machineset_group_label: "{{ ocp4_machineset_config_group_label }}" machineset_groups: "{{ ocp4_machineset_config_groups }}" disable_base_worker_machinesets: >- {{ ocp4_machineset_config_disable_base_worker_machinesets | bool }} default_aws_instance_type: >- {{ ocp4_machineset_config_default_aws_instance_type }} default_aws_root_volume_size: >- {{ ocp4_machineset_config_default_aws_root_volume_size }} disable_base_worker_machinesets: >- {{ ocp4_machineset_config_disable_base_worker_machinesets | bool }} default_osp_instance_type: >- {{ ocp4_machineset_config_default_osp_instance_type }} default_osp_root_volume_size: >- {{ ocp4_machineset_config_default_osp_root_volume_size }}