ansible/roles/ocp4-workload-istio-controlplane/defaults/main.yml | ●●●●● patch | view | raw | blame | history | |
ansible/roles/ocp4-workload-istio-controlplane/readme.adoc | ●●●●● patch | view | raw | blame | history | |
ansible/roles/ocp4-workload-istio-controlplane/tasks/main.yml | ●●●●● patch | view | raw | blame | history | |
ansible/roles/ocp4-workload-istio-controlplane/tasks/post_workload.yml | ●●●●● patch | view | raw | blame | history | |
ansible/roles/ocp4-workload-istio-controlplane/tasks/pre_workload.yml | ●●●●● patch | view | raw | blame | history | |
ansible/roles/ocp4-workload-istio-controlplane/tasks/remove_workload.yml | ●●●●● patch | view | raw | blame | history | |
ansible/roles/ocp4-workload-istio-controlplane/tasks/workload.yml | ●●●●● patch | view | raw | blame | history |
ansible/roles/ocp4-workload-istio-controlplane/defaults/main.yml
New file @@ -0,0 +1,7 @@ --- become_override: False ocp_username: opentlc-mgr silent: False _infra_node_replicas: 3 _infra_node_instance_type: m4.4xlarge ansible/roles/ocp4-workload-istio-controlplane/readme.adoc
New file @@ -0,0 +1,115 @@ = ocp4-workload-istio-controlplane - Deploy the Istio control plane == Role overview * This role deploys the Istio control plane. It consists of the following playbooks: ** Playbook: link:./tasks/pre_workload.yml[pre_workload.yml] - Sets up an environment for the workload deployment. *** Debug task will print out: `pre_workload Tasks completed successfully.` ** Playbook: link:./tasks/workload.yml[workload.yml] - Used to deploy Istio *** Debug task will print out: `workload Tasks completed successfully.` ** Playbook: link:./tasks/post_workload.yml[post_workload.yml] - Used to configure the workload after deployment *** This role doesn't do anything here *** Debug task will print out: `post_workload Tasks completed successfully.` ** Playbook: link:./tasks/remove_workload.yml[remove_workload.yml] - Used to delete the workload *** This role removes the logging deployment and project but not the operator configs *** Debug task will print out: `remove_workload Tasks completed successfully.` == Review the defaults variable file * This file link:./defaults/main.yml[./defaults/main.yml] contains all the variables you need to define to control the deployment of your workload. * The variable *ocp_username* is mandatory to assign the workload to the correct OpenShift user. * A variable *silent=True* can be passed to suppress debug messages. * You can modify any of these default values by adding `-e "variable_name=variable_value"` to the command line === Deploy a Workload with the `ocp-workload` playbook [Mostly for testing] ---- TARGET_HOST="bastion.na311.openshift.opentlc.com" OCP_USERNAME="shacharb-redhat.com" WORKLOAD="ocp-workload-enable-service-broker" GUID=1001 # a TARGET_HOST is specified in the command line, without using an inventory file ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \ -e"ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem" \ -e"ansible_user=ec2-user" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"silent=False" \ -e"guid=${GUID}" \ -e"ACTION=create" ---- === To Delete an environment ---- TARGET_HOST="bastion.na311.openshift.opentlc.com" OCP_USERNAME="opentlc-mgr" WORKLOAD="ocp4-workload-infra-nodes" GUID=1002 # a TARGET_HOST is specified in the command line, without using an inventory file ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \ -e"ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem" \ -e"ansible_user=ec2-user" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ACTION=remove" ---- == Other related information: === Deploy Workload on OpenShift Cluster from an existing playbook: [source,yaml] ---- - name: Deploy a workload role on a master host hosts: all become: true gather_facts: False tags: - step007 roles: - { role: "{{ocp_workload}}", when: 'ocp_workload is defined' } ---- NOTE: You might want to change `hosts: all` to fit your requirements === Set up your Ansible inventory file * You can create an Ansible inventory file to define your connection method to your host (Master/Bastion with `oc` command) * You can also use the command line to define the hosts directly if your `ssh` configuration is set to connect to the host correctly * You can also use the command line to use localhost or if your cluster is already authenticated and configured in your `oc` configuration .Example inventory file [source, ini] ---- [gptehosts:vars] ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem ansible_user=ec2-user [gptehosts:children] openshift [openshift] bastion.cluster1.openshift.opentlc.com bastion.cluster2.openshift.opentlc.com bastion.cluster3.openshift.opentlc.com bastion.cluster4.openshift.opentlc.com [dev] bastion.cluster1.openshift.opentlc.com bastion.cluster2.openshift.opentlc.com [prod] bastion.cluster3.openshift.opentlc.com bastion.cluster4.openshift.opentlc.com ---- ansible/roles/ocp4-workload-istio-controlplane/tasks/main.yml
New file @@ -0,0 +1,23 @@ --- # Do not modify this file - name: Running Pre Workload Tasks import_tasks: ./pre_workload.yml become: "{{ become_override | bool }}" when: ACTION == "create" or ACTION == "provision" - name: Running Workload Tasks import_tasks: ./workload.yml become: "{{ become_override | bool }}" when: ACTION == "create" or ACTION == "provision" - name: Running Post Workload Tasks import_tasks: ./post_workload.yml become: "{{ become_override | bool }}" when: ACTION == "create" or ACTION == "provision" - name: Running Workload removal Tasks import_tasks: ./remove_workload.yml become: "{{ become_override | bool }}" when: ACTION == "destroy" or ACTION == "remove" ansible/roles/ocp4-workload-istio-controlplane/tasks/post_workload.yml
New file @@ -0,0 +1,9 @@ --- # Implement your Post Workload deployment tasks here # Leave this as the last task in the playbook. - name: post_workload tasks complete debug: msg: "Post-Workload Tasks completed successfully." when: not silent|bool ansible/roles/ocp4-workload-istio-controlplane/tasks/pre_workload.yml
New file @@ -0,0 +1,9 @@ --- # Implement your Pre Workload deployment tasks here # Leave this as the last task in the playbook. - name: pre_workload tasks complete debug: msg: "Pre-Workload tasks completed successfully." when: not silent|bool ansible/roles/ocp4-workload-istio-controlplane/tasks/remove_workload.yml
New file @@ -0,0 +1,98 @@ # vim: set ft=ansible --- # Implement your Workload removal tasks here - name: remove istio controlplane cr k8s: state: absent definition: apiVersion: istio.openshift.com/v1alpha3 kind: ControlPlane metadata: name: basic-install namespace: istio-system # operator nukes all pods once cr is gone # waiting for just one to remain is a bit of a hack - name: wait for no pods in istio-system k8s_facts: api_version: v1 kind: Pod namespace: istio-system register: istio_pods until: istio_pods.resources | list | length < 1 retries: 100 delay: 10 - name: remove istio operator deployment k8s: state: absent definition: apiVersion: apps/v1 kind: Deployment metadata: name: istio-operator namespace: istio-operator - name: remove istio operator cluster role binding k8s: state: absent definition: kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: istio-operator-account-istio-operator-cluster-role-binding - name: remove istio operator sa k8s: state: absent definition: apiVersion: v1 kind: ServiceAccount metadata: name: istio-operator namespace: istio-operator - name: remove istio operator rbac k8s: state: absent definition: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: istio-operator - name: remove istio controlplane crd k8s: state: absent definition: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: controlplanes.istio.openshift.com - name: remove installation crd (to be removed in subsequent release) k8s: state: absent definition: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: installations.istio.openshift.com - name: remove istio-system and istio-operator projects k8s: state: absent definition: apiVersion: project.openshift.io/v1 kind: Project metadata: name: "{{ item }}" with_items: - istio-system - istio-operator # Leave this as the last task in the playbook. - name: remove_workload tasks complete debug: msg: "Remove Workload tasks completed successfully." when: not silent|bool ansible/roles/ocp4-workload-istio-controlplane/tasks/workload.yml
New file @@ -0,0 +1,572 @@ # vim: set ft=ansible --- # Implement your Workload deployment tasks here - name: create istio-system and istio-operator projects k8s: state: present definition: apiVersion: project.openshift.io/v1 kind: Project metadata: name: "{{ item }}" with_items: - istio-system - istio-operator - name: installation crd (to be removed in subsequent release) k8s: state: present definition: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: installations.istio.openshift.com spec: group: istio.openshift.com names: kind: Installation plural: installations singular: installation scope: Namespaced subresources: status: {} version: v1alpha1 - name: istio controlplane crd k8s: state: present definition: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: controlplanes.istio.openshift.com spec: group: istio.openshift.com names: kind: ControlPlane listKind: ControlPlaneList plural: controlplanes singular: controlplane scope: Namespaced subresources: status: {} version: v1alpha3 - name: istio operator rbac k8s: state: present definition: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: istio-operator rules: - apiGroups: - "" resources: - pods - services - endpoints - persistentvolumeclaims - events - configmaps - secrets - serviceaccounts - namespaces - routes verbs: - '*' - apiGroups: - apps resources: - deployments - daemonsets - replicasets - statefulsets verbs: - '*' - apiGroups: - autoscaling resources: - horizontalpodautoscalers verbs: - '*' - apiGroups: - extensions resources: - daemonsets - deployments verbs: - '*' - apiGroups: - policy resources: - poddisruptionbudgets verbs: - '*' - apiGroups: - admissionregistration.k8s.io resources: - mutatingwebhookconfigurations - validatingwebhookconfigurations verbs: - '*' - apiGroups: - certmanager.k8s.io resources: - clusterissuers verbs: - '*' - apiGroups: - rbac.authorization.k8s.io resources: - clusterrolebindings - clusterroles - roles - rolebindings verbs: - '*' - apiGroups: - authentication.istio.io resources: # for galley, *: get, list, watch # for mixer, *: create, get, list, watch # for pilot, *: * # for istio-authenticated, *: * - '*' - meshpolicies verbs: - '*' - apiGroups: - config.istio.io resources: # for galley, *: get, list, watch # for pilot, *: * # for istio-authenticated, *: * - '*' - attributemanifests - handlers - logentries - rules - metrics - kuberneteses verbs: - '*' - apiGroups: - networking.istio.io resources: # for galley, *: get, list, watch # for pilot, *: * # for istio-authenticated, *: * - '*' - gateways - destinationrules - virtualservices - envoyfilters verbs: - '*' - apiGroups: - monitoring.coreos.com resources: - servicemonitors verbs: - get - create - apiGroups: - istio.openshift.com resources: - '*' - istiocontrolplanes - installations verbs: - '*' - apiGroups: - apps.openshift.io resources: - deploymentconfigs verbs: - '*' - apiGroups: - oauth.openshift.io resources: - oauthclients verbs: - '*' - apiGroups: - project.openshift.io resources: - projects - projectrequests verbs: - '*' - apiGroups: - route.openshift.io resources: - routes - routes/custom-host verbs: - '*' - apiGroups: - security.openshift.io resources: - securitycontextconstraints verbs: - '*' # for galley (pilot and prometheus also watch nodes) - apiGroups: - "" resources: - nodes verbs: - get - list - watch - apiGroups: - extensions resources: - ingresses verbs: - get - list - watch - apiGroups: - extensions - apps resources: - deployments/finalizers resourceNames: - istio-galley - istio-sidecar-injector verbs: - update # for mixer - apiGroups: - apiextensions.k8s.io resources: - customresourcedefinitions verbs: - get - list - watch - apiGroups: - extensions resources: - replicasets verbs: - get - list - watch - apiGroups: - "" resources: - replicationcontrollers verbs: - get - list - watch # for pilot # for istio-authenticated, *: * - apiGroups: - rbac.istio.io resources: - '*' verbs: - '*' - get - list - watch - apiGroups: - apiextensions.k8s.io resources: - customresourcedefinitions verbs: - '*' - apiGroups: - extensions resources: - ingresses - ingresses/status verbs: - '*' # prometheus - apiGroups: - "" resources: - nodes/proxy verbs: - get - list - watch - nonResourceURLs: - "/metrics" verbs: - get # citadel - apiGroups: - authentication.k8s.io resources: - tokenreviews verbs: - create # kiali - apiGroups: [""] resources: - configmaps - endpoints - namespaces - nodes - pods - services - replicationcontrollers verbs: - get - list - watch - apiGroups: ["extensions", "apps"] resources: - deployments - statefulsets - replicasets verbs: - get - list - watch - apiGroups: ["autoscaling"] resources: - horizontalpodautoscalers verbs: - get - list - watch - apiGroups: ["batch"] resources: - cronjobs - jobs verbs: - '*' - apiGroups: ["project.openshift.io"] resources: - projects verbs: - get - apiGroups: ["route.openshift.io"] resources: - routes verbs: - get - apiGroups: ["apps.openshift.io"] resources: - deploymentconfigs verbs: - get - list - watch - apiGroups: ["config.istio.io"] resources: - apikeys - authorizations - checknothings - circonuses - deniers - fluentds - handlers - kubernetesenvs - kuberneteses - listcheckers - listentries - logentries - memquotas - metrics - opas - prometheuses - quotas - quotaspecbindings - quotaspecs - rbacs - reportnothings - rules - solarwindses - stackdrivers - statsds - stdios verbs: - create - delete - get - list - patch - watch - apiGroups: ["networking.istio.io"] resources: - destinationrules - gateways - serviceentries - virtualservices verbs: - create - delete - get - list - patch - watch - apiGroups: ["authentication.istio.io"] resources: - policies verbs: - create - delete - get - list - patch - watch - apiGroups: ["monitoring.kiali.io"] resources: - monitoringdashboards verbs: - get - name: istio operator sa k8s: state: present definition: apiVersion: v1 kind: ServiceAccount metadata: name: istio-operator namespace: istio-operator - name: istio operator cluster role binding k8s: state: present definition: kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: istio-operator-account-istio-operator-cluster-role-binding subjects: - kind: ServiceAccount namespace: istio-operator name: istio-operator roleRef: kind: ClusterRole name: istio-operator apiGroup: rbac.authorization.k8s.io - name: istio operator deployment k8s: state: present definition: apiVersion: apps/v1 kind: Deployment metadata: name: istio-operator namespace: istio-operator spec: replicas: 1 selector: matchLabels: name: istio-operator template: metadata: labels: name: istio-operator spec: serviceAccountName: istio-operator volumes: - name: discovery-cache emptyDir: medium: Memory containers: - name: istio-operator image: openshift-istio-tech-preview/istio-operator:0.10.0 ports: - containerPort: 60000 name: metrics command: - istio-operator - --discoveryCacheDir - /home/istio-operator/.kube/cache/discovery - "--istioPrefix=openshift-istio-tech-preview/" - "--deploymentType=openshift" imagePullPolicy: Always env: - name: WATCH_NAMESPACE value: "" - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: OPERATOR_NAME value: "istio-operator" volumeMounts: - name: discovery-cache mountPath: /home/istio-operator/.kube/cache/discovery - name: istio controlplane cr k8s: state: present definition: apiVersion: istio.openshift.com/v1alpha3 kind: ControlPlane metadata: name: basic-install namespace: istio-system spec: istio: global: proxy: resources: requests: cpu: 100m memory: 128Mi limits: cpu: 500m memory: 128Mi gateways: istio-egressgateway: autoscaleEnabled: false istio-ingressgateway: autoscaleEnabled: false ior_enabled: false mixer: policy: autoscaleEnabled: false telemetry: autoscaleEnabled: false resources: requests: cpu: 100m memory: 1G limits: cpu: 500m memory: 4G pilot: autoscaleEnabled: false traceSampling: 100.0 kiali: dashboard: user: admin passphrase: admin - name: wait up to 5 minutes for istio operator pod to be ready shell: "oc get deployment -n istio-operator istio-operator -o jsonpath='{.status.readyReplicas}'" register: istio_deployment_status until: "istio_deployment_status.stdout | int >= 1" retries: 5 delay: 60 - name: wait up to 8 minutes for the elasticsearch statefulset to exist shell: "oc get statefulset elasticsearch -n istio-system" register: elasticsearch_set_status until: elasticsearch_set_status.rc == 0 retries: 8 delay: 60 # jaeger is the last thing to come up because it depends on ES working - name: wait up to 11 minutes for jaeger-collector to be healthy shell: "oc get deployment -n istio-system jaeger-collector -o jsonpath='{.status.readyReplicas}'" register: jaeger_collector_ready until: "jaeger_collector_ready.stdout | int == 1" retries: 11 delay: 60