Preparing_your_workstation.adoc
@@ -28,7 +28,7 @@ ---- # Install basic packages yum install -y wget python python-boto unzip boto3 tmux git yum install -y wget python python-boto unzip python2-boto3.noarch tmux git ansible # Another option to configure python boto is: git clone git://github.com/boto/boto.git ansible/bfg-1.12.16.jarBinary files differ
ansible/configs/ansible-cicd-lab/docker_setup.yml
New file @@ -0,0 +1,24 @@ --- - name: Install Docker. package: name: docker state: present - name: Add docker group if it doesn't yet exist. group: name: docker state: present system: true - name: Ensure Docker is started and enabled at boot. service: name: docker state: started enabled: true - name: Ensure docker users are added to the docker group. user: name: "{{ item }}" groups: docker append: yes with_items: "{{ docker_users }}" ansible/configs/ansible-cicd-lab/env_vars.yml
@@ -41,6 +41,8 @@ install_common: true software_to_deploy: none install_student_user: false repo_version: "3.6" ### If you want a Key Pair name created and injected into the hosts, @@ -77,6 +79,8 @@ HostedZoneId: Z3IHLWJZOU9SRT # The region to be used, if not specified by -e in the command line aws_region: ap-southeast-2 # pick a or c availability zone. aws_availability_zone: "{{ aws_region }}{{ ['a','c']|random}}" # The key that is used to key_name: "default_key_name" @@ -88,8 +92,8 @@ ## Environment Sizing bastion_instance_type: "m4.large" tower_instance_type: "m4.large" cicd_instance_type: "m4.large" tower_instance_type: "m4.xlarge" cicd_instance_type: "m4.xlarge" app_instance_type: "m4.large" appdb_instance_type: "m4.large" support_instance_type: "m4.large" @@ -264,13 +268,13 @@ - extended-choice-parameter # Extended Choice Parameter - ansible-tower # Ansible Tower Plugin jenkins_plugin_timeout: 240 # Jenkins tends to run into timeout while installing plug-ins jenkins_admin_password: r3dh4t1! jenkins_admin_password: "{{ tower_admin_password }}" jenkins_protocol: "https" jenkins_selfsigned_certificate: yes jenkins_port: 8443 jenkins_home: /var/lib/jenkins jenkins_keystore_path: "/opt/jenkins/jenkins.jks" jenkins_keystore_password: "r3dh4t1!" jenkins_keystore_password: "{{ tower_admin_password }}" jenkins_url_prefix: "" jenkins_java_options_env_var: JENKINS_JAVA_OPTIONS jenkins_java_options: "-Djenkins.install.runSetupWizard=false" @@ -278,6 +282,7 @@ ### Tower Variables tower_setup_version: "3.2.6" # default would be latest, which is dangerous tower_admin: admin # don't change this! There are places where no variable is used. tower_org_name: Acme tower_project_name: Acme @@ -292,5 +297,11 @@ ### Gogs Variables ansible_service_mgr: systemd gogs_admin_username: cicduser1 gogs_admin_password: r3dh4t! gogs_admin_username: cicduser1 # can't be called admin or it fails gogs_admin_password: "{{ tower_admin_password }}" ### Docker variables (Docker is needed by Molecule) docker_users: - jenkins - git - "{{ remote_user }}" ansible/configs/ansible-cicd-lab/files/cloud_providers/ec2_cloud_template.j2
@@ -105,6 +105,9 @@ DependsOn: - Vpc Properties: {% if aws_availability_zone is defined %} AvailabilityZone: {{ aws_availability_zone }} {% endif %} CidrBlock: "192.199.0.0/24" Tags: - Key: Name ansible/configs/ansible-cicd-lab/files/hosts_template.j2
@@ -9,10 +9,10 @@ ansible_ssh_private_key_file="~/.ssh/{{guid}}key.pem" ansible_ssh_common_args="-o StrictHostKeyChecking=no" [3tierapp:children] frontends apps appdbs support towers cicd [towers] @@ -32,3 +32,9 @@ {% for host in groups['appdbs'] %} appdb{{loop.index}}.{{chomped_zone_internal_dns}} ansible_ssh_host=appdb{{loop.index}}.{{subdomain_base}} {% endfor %} [cicd] ## These are the cicd {% for host in groups['cicd'] %} cicd{{loop.index}}.{{chomped_zone_internal_dns}} ansible_ssh_host=cicd{{loop.index}}.{{subdomain_base}} {% endfor %} ansible/configs/ansible-cicd-lab/files/tower_hosts_template.j2
@@ -22,7 +22,8 @@ rabbitmq_vhost=tower rabbitmq_username=tower rabbitmq_password={{tower_admin_password}} # because RabbitMQ accepts only alpha-numeric passwords with no special characters rabbitmq_password={{ tower_admin_password | regex_replace('[^a-zA-Z0-9]') }} rabbitmq_cookie=cookiemonster rabbitmq_use_long_name=true ansible/configs/ansible-cicd-lab/jenkins_ssh_setup.yml
New file @@ -0,0 +1,25 @@ --- - name: Retrieve SSH File from Bastion slurp: src: "/root/.ssh/{{ guid }}key.pem" register: bastion_ssh_key no_log: True delegate_to: "{{ bastion_host }}" - name: Create Jenkins SSH Directory file: state: directory path: /var/lib/jenkins/.ssh mode: 0700 owner: jenkins group: jenkins - name: Set SSH Key copy: content: "{{ bastion_ssh_key.content | b64decode }}" dest: /var/lib/jenkins/.ssh/id_rsa owner: jenkins group: jenkins mode: 0600 no_log: True ansible/configs/ansible-cicd-lab/post_software.yml
@@ -36,6 +36,15 @@ - { role: "{{ ANSIBLE_REPO_PATH }}/roles/host-jenkins-server" } - { role: "{{ ANSIBLE_REPO_PATH }}/roles/molecule" } tasks: - name: include docker_setup tasks include_tasks: docker_setup.yml post_tasks: - name: Jenkins SSH Configuration include_tasks: jenkins_ssh_setup.yml vars: bastion_host: "{{ groups['bastions'][0] }}" - hosts: bastions become: false vars_files: ansible/configs/ansible-cicd-lab/pre_software.yml
@@ -25,6 +25,8 @@ roles: - { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion", when: 'install_bastion' } - { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-opentlc-ipa", when: 'install_ipa_client' } - role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion-student-user" when: install_student_user | bool tags: - step004 - bastion_tasks ansible/configs/ansible-cicd-lab/requirements.yml
@@ -1,5 +1,9 @@ # Use with `ansible-galaxy install --force -r requirements.yml -p ../../roles/` # (only during development, not during installation) --- - src: geerlingguy.jenkins name: host-jenkins-server # commented out as it has been customized to support SSL #- src: geerlingguy.jenkins # name: host-jenkins-server # this installs docker from the Docker Inc. repositories, not what we want #- src: geerlingguy.docker # name: host-docker-server ansible/configs/linklight/README.adoc
@@ -2,19 +2,53 @@ Ansible Linklight is an example of an existing deployer being wrapped by Ansible. Linklight is capable of deploying multiple different lab environments: * Engine (4 nodes, for lightbulb labs) * Networking (2 nodes + 2 routers, for linklight labs) link:https://github.com/network-automation/linklight[Ansible Linklight Project] * Engine ** 4 Linux nodes * Networking ** V1 Networking Lab with 2 Linux nodes, 2 Cisco routers * Networking V2 ** V2 Networking Lab with 2 Linux nodesi, 4 Cisco routers ** (This is acheieved by seting the varaible `networking; true` The `linklight` deployer is called by first running a fairly typical `Ansible Agnostic Deployer` `pre_infra.yml` but at the end of that playbook: * cloning the `lightbulb` repo to a specfic commit * executing the cloned provisioioning playbook ** `linklight` vars are made availble via env_vars.yml Cloning the `linklight` repo to a specfic commit, this is currently hard coded into the deployer script and executing the cloned provisioioning playbook. `linklight` specfic vars are made availble via: * linklight_all_vars.yml And one of: * linklight_engine_vars.yml * linklight_networking_v1_vars.yml * linklight_networking_v2_allcisco_vars.yml https://github.com/ansible/lightbulb == How the Different Labs get Configured For deployment via RHPDS or similar deployer script will substitute the value of `student_workloads` into a call to the relevant lab var file e.g. Deployer Usage: [source,bash] ---- $ mydeploy-script.sh --student_workloads=linklight_engine --more_args ... ---- [source,bash] ---- ansible-playbook main.yml \ -e ANSIBLE_REPO_PATH=${ANSIBLE_REPO_PATH} \ -e guid=${GUID} \ ..... args omitted for brevity -e @configs/linklight/linklight_all_vars.yml \ -e @configs/linklight/${STUDENT_WORKLOAD}_vars.yml ---- == Set up your "Secret" variables @@ -50,88 +84,76 @@ ---- == Review the Env_Type variable file * This file link:./env_vars.yml[./env_vars.yml] contains all the variables you need to define to control the deployment of your environment. == Running Ansible Playbook You can run the playbook with the following arguments to overwrite the default variable values: You can run the playbook with the following arguments [NOTE] ==== Also it sets an ANSIBLE_CONFIG var to load a specfic set of vars for these labs. This is the HIGHEST precedence for `ansible.cfg` so will mask any other `ansible.cfg` ==== [source,bash] ---- GUID=td07 export ANSIBLE_CONFIG=configs/linklight/ansible.cfg GUID=td08 ENV_TYPE=linklight CLOUD_PROVIDER=ec2 EC2_REGION=us-east-1 EC2_AVAILABILITY_ZONE=us-east-1a EC2_NAME_PREFIX=${GUID} EC2_KEYNAME=${GUID}-link WORKSHOP_DNS_ZONE=example.opentlc.com # set NETWORKING to f to deploy Ansible Engine env NETWORKING=true LOCAL_SECURITY=false ADMIN_PASSWORD=ansible EMAIL_USERS=no CREATE_LOGIN_PAGE=false STUDENT_TOTAL=2 USERS='{"users":[{ "name": "Alice", "username": "alice", "email": "alice@example.com" }, { "name": "Bob", "username":"bob", "email": "bob@example.com" }]}' ANSIBLE_REPO_PATH=$(pwd) ansible-playbook main.yml \ -e ANSIBLE_REPO_PATH=${ANSIBLE_REPO_PATH} \ -e guid=${GUID} \ -e env_type=${ENV_TYPE} \ -e networking=${NETWORKING} \ -e project_tag=${ENV_TYPE}-${GUID} \ -e ec2_key_name=${EC2_KEYNAME} \ -e ec2_region=${EC2_REGION} \ -e ec2_az=${EC2_AVAILABILITY_ZONE} \ -e ec2_name_prefix=${GUID} \ -e cloud_provider=${CLOUD_PROVIDER} \ -e workshop_dns_zone=${WORKSHOP_DNS_ZONE} \ -e "{'student_total': ${STUDENT_TOTAL}}" \ -e admin_password=${ADMIN_PASSWORD} \ -e localsecurity=${LOCAL_SECURITY} \ -e create_login_page=${CREATE_LOGIN_PAGE} \ -e email=${EMAIL_USERS} \ -e users=${USERS} \ -e software_to_deploy=none \ --skip-tags=deploy_infrastructure,post_infra_tasks,pre_software_tasks,deploy_software,post_software,email --skip-tags=deploy_infrastructure,post_infra_tasks,pre_software_tasks,deploy_software,post_software,email \ -e @configs/linklight/linklight_all_vars.yml \ -e @configs/linklight/linklight_engine_vars.yml ---- === Optional Additional Variables == Deploying other Linklight Labs . Ansible Linklight has a new networking lab `_v2` under active development. To deploy this configuration set the following: + [source,bash] ---- -e special=all_cisco ---- . To change from the default ssh key behaviour set the following: + [source,bash] ---- #use_own_key: true #env_authorized_key: "{{guid}}key" #set_env_authorized_key: true ---- === Networking v1 Lab To deploy networking v1 substitute the last line above (`-e @configs/linklight/linklight_engine_vars.yml`) for `-e @configs/linklight/linklight_networking_v1_vars.yml` === Networking v2 Lab (all Cisco) To deploy networking v1 substitute the last line above (`-e @configs/linklight/linklight_engine_vars.yml`) for `-e @configs/linklight/linklight_networking_v2_allcisco_vars.yml` === To Delete an environment This deletes the Ansible Engine Lab, notes below for the Networking labs [source,bash] ---- GUID=td05 GUID=td08 ENV_TYPE=linklight EC2_NAME_PREFIX=${GUID} EC2_REGION=us-east-1 EC2_AVAILABILITY_ZONE=us-east-1a ANSIBLE_REPO_PATH=$(pwd) @@ -139,5 +161,12 @@ -e ANSIBLE_REPO_PATH=${ANSIBLE_REPO_PATH} \ -e ec2_name_prefix=${GUID} \ -e ec2_region=${EC2_REGION} \ -e ec2_az=${EC2_AVAILABILITY_ZONE} -e @configs/linklight/linklight_all_vars.yml \ -e @configs/linklight/linklight_engine_vars.yml ---- To delete networking v1 substitute the last line above (`-e @configs/linklight/linklight_engine_vars.yml`) for· `-e linklight_networking_v1_vars.yml` To delete networking v2 substitute the last line above (`-e @configs/linklight/linklight_engine_vars.yml`) for· `-e linklight_networking_v2_allcisco_vars.yml` ansible/configs/linklight/ansible.cfg
New file @@ -0,0 +1,24 @@ [defaults] # some basic default values... inventory = hosts forks = 50 host_key_checking = False retry_files_enabled = False no_target_syslog = False callback_whitelist = time [ssh_connection] scp_if_ssh = True # persistent_connection vars are for Cisco Routers etc # issues seen with timeouts when deploying regionally # e.g. ap-southeast-1 etc. Also used by original # Linklight project. [persistent_connection] command_timeout = 1000 connect_timeout = 1000 connect_retry_timeout = 1000 ansible/configs/linklight/destroy_env.yml
@@ -7,4 +7,4 @@ - "./env_vars.yml" - "./env_secret_vars.yml" - import_playbook: "{{ ANSIBLE_REPO_PATH }}/workdir/lightbulb/tools/aws_lab_setup/teardown_lab.yml" - import_playbook: "{{ ANSIBLE_REPO_PATH }}/workdir/linklight/provisioner/teardown_lab.yml" ansible/configs/linklight/env_vars.yml
@@ -3,12 +3,8 @@ project_tag: "{{ env_type }}-{{ guid }}" # # Lightbulb comes with its OWN deployer which AAD simply wraps # Linklight` comes with its OWN deployer which AAD simply wraps # Hence many of the variables typically used are redundant # # Note: both AAD and Lightbulb use "email" as a var for different # purposes. Below the lightbulb usage is used. # ###### Variables from the Ansible Lightbulb AWS Provisioner: @@ -17,7 +13,7 @@ ec2_az: us-east-1a # the availability zone ec2_name_prefix: "{{ guid }}" # name prefix for all the VMs student_total: 2 # amount of work benches to provision student_total: 1 # amount of work benches to provision ## Optional Variables ansible/configs/linklight/linklight_all_vars.yml
New file @@ -0,0 +1,18 @@ # All linklight lab environments consume this file # # - linklight_engine # - linklight_networking_v1 # - linklight_networking_v2_allcisco admin_password: ansible # password used for student account on control node create_login_page: true # creates S3 bucket with labs and login details workshop_dns_zone: example.opentlc.com # Domain for S3 bucket localsecurity: false email: no # vars sourced from elsewhere (CloudForms + Deployer Script) # should not need to be set - here for completeness # ec2_region: us-east-1 # region where the nodes will live # ec2_name_prefix: <GUID> # name prefix for all the VMs # student_total: 1 # amount of work benches to provision ansible/configs/linklight/linklight_engine_vars.yml
New file @@ -0,0 +1,5 @@ # Ansible Engine is the default # # No vars need to be passed here placeholder_var: placeholder # Ansible doesn't like -e @empty-file.yml ansible/configs/linklight/linklight_networking_v1_vars.yml
New file @@ -0,0 +1,3 @@ # 2018-09-14 Should only need 1 var, boolean networking networking: true # if true deploys v1 networking lab ansible/configs/linklight/linklight_v2_allcisco_vars.yml
New file @@ -0,0 +1,6 @@ # 2018-09-14 linklight v2 lab needs # - networking: true # tells it to deploy a netowrking lab # - special: all_cisco # tells it to deploy a 4 router lab networking: true special: all_cisco ansible/configs/linklight/requirements.txt
New file @@ -0,0 +1,32 @@ ansible==2.6.2 asn1crypto==0.24.0 bcrypt==3.1.4 boto==2.49.0 boto3==1.7.77 botocore==1.10.77 certifi==2018.8.24 cffi==1.11.5 chardet==3.0.4 cryptography==2.3.1 docutils==0.14 enum34==1.1.6 futures==3.2.0 httpie==0.9.9 idna==2.7 ipaddress==1.0.22 Jinja2==2.10 jmespath==0.9.3 MarkupSafe==1.0 netaddr==0.7.19 paramiko==2.4.1 passlib==1.7.1 pyasn1==0.4.4 pycparser==2.18 Pygments==2.2.0 PyNaCl==1.2.1 python-dateutil==2.7.3 PyYAML==3.13 requests==2.19.1 s3transfer==0.1.13 six==1.11.0 urllib3==1.23 ansible/configs/ocp-gpu-single-node/env_vars.yml
@@ -8,15 +8,15 @@ ### Vars that can be removed: # use_satellite: true # use_subscription_manager: false # use_own_repos: false use_subscription_manager: false use_own_repos: true ###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT ###### OR PASS as "-e" args to ansible-playbook command ### Common Host settings repo_version: "3.10" repo_method: rhn # Other Options are: file, satellite and rhn repo_method: file # Other Options are: file, satellite and rhn #If using repo_method: satellite, you must set these values as well. # satellite_url: https://satellite.example.com ansible/configs/ocp-gpu-single-node/files/cloud_providers/ec2_cloud_template.j2
@@ -284,6 +284,23 @@ - "Fn::GetAtt": - {{instance['name']}}{{loop.index}} - PublicIp # cloud dns is same as the bastion which is our only host {{instance['name']}}{{loop.index}}CloudDNS: Type: "AWS::Route53::RecordSetGroup" DependsOn: - {{instance['name']}}{{loop.index}}EIP Properties: HostedZoneId: {{HostedZoneId}} RecordSets: - Name: "{{cloudapps_dns}}" Type: A TTL: 900 ResourceRecords: - Fn::GetAtt: - {{instance['name']}}{{loop.index}} - PublicIp {% endif %} {% endfor %} {% endfor %} ansible/configs/ocp-ha-lab/files/hosts_template.3.10.34.j2
New file @@ -0,0 +1,316 @@ # # ansible inventory for OpenShift Container Platform 3.10.14 # [OSEv3:vars] ########################################################################### ### Ansible Vars ########################################################################### timeout=60 ansible_user={{ansible_ssh_user}} ansible_become=yes ########################################################################### ### OpenShift Basic Vars ########################################################################### openshift_deployment_type=openshift-enterprise openshift_disable_check="disk_availability,memory_availability,docker_image_availability" oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version} openshift_examples_modify_imagestreams=true {% if container_runtime == "cri-o" %} openshift_use_crio=True openshift_crio_use_rpm=True openshift_crio_enable_docker_gc=True openshift_crio_docker_gc_node_selector={'runtime': 'cri-o'} {% endif %} # Node Groups openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true','runtime={{container_runtime}}']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true','runtime={{container_runtime}}']}, {'name': 'node-config-glusterfs', 'labels': ['runtime={{container_runtime}}']}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true','runtime={{container_runtime}}'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}] # Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. -> These need to go into the above # openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']} # Configure logrotate scripts # See: https://github.com/nickhammond/ansible-logrotate logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] {% if install_nfs|bool %} # Set this line to enable NFS openshift_enable_unsupported_configurations=True {% endif %} ########################################################################### ### OpenShift Cockpit Vars ########################################################################### # Enable cockpit osm_use_cockpit=true osm_cockpit_plugins=['cockpit-kubernetes'] ########################################################################### ### OpenShift Master Vars ########################################################################### openshift_master_api_port={{master_api_port}} openshift_master_console_port={{master_api_port}} openshift_master_cluster_method=native openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal openshift_master_cluster_public_hostname={{master_lb_dns}} openshift_master_default_subdomain={{cloudapps_suffix}} #openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'} openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}} ########################################################################### ### OpenShift Network Vars ########################################################################### osm_cluster_network_cidr=10.1.0.0/16 openshift_portal_net=172.30.0.0/16 {{multi_tenant_setting}} # os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy' ########################################################################### ### OpenShift Authentication Vars ########################################################################### # htpasswd Authentication openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] openshift_master_htpasswd_file=/root/htpasswd.openshift # LDAP Authentication (download ipa-ca.crt first) # openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'}] # openshift_master_ldap_ca_file=/root/ipa-ca.crt ########################################################################### ### OpenShift Metrics and Logging Vars ########################################################################### ######################## # Enable cluster metrics ######################## openshift_metrics_install_metrics={{install_metrics}} {% if install_nfs|bool %} openshift_metrics_storage_kind=nfs openshift_metrics_storage_access_modes=['ReadWriteOnce'] openshift_metrics_storage_nfs_directory=/srv/nfs openshift_metrics_storage_nfs_options='*(rw,root_squash)' openshift_metrics_storage_volume_name=metrics openshift_metrics_storage_volume_size=10Gi openshift_metrics_storage_labels={'storage': 'metrics'} {% endif %} openshift_metrics_cassanda_pvc_storage_class_name='' openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/infra": "true"} openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra": "true"} openshift_metrics_heapster_nodeselector={"node-role.kubernetes.io/infra": "true"} # Store Metrics for 2 days openshift_metrics_duration=2 {% if install_prometheus|bool %} ######################### # Add Prometheus Metrics: ######################### openshift_hosted_prometheus_deploy=true openshift_prometheus_namespace=openshift-metrics openshift_prometheus_node_selector={"node-role.kubernetes.io/infra":"true"} # Prometheus {% if install_glusterfs|bool %} openshift_prometheus_storage_type='pvc' openshift_prometheus_storage_kind=dynamic openshift_prometheus_storage_class='glusterfs-storage-block' openshift_prometheus_storage_volume_size=20Gi openshift_prometheus_storage_access_modes=['ReadWriteOnce'] openshift_prometheus_storage_volume_name=prometheus {% elif install_nfs|bool %} openshift_prometheus_storage_type='emptydir' {% endif %} # For prometheus-alertmanager {% if install_glusterfs|bool %} openshift_prometheus_alertmanager_storage_type='pvc' openshift_prometheus_alertmanager_storage_kind=dynamic openshift_prometheus_alertmanager_storage_class='glusterfs-storage-block' openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] openshift_prometheus_alertmanager_storage_volume_size=10Gi openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager {% elif install_nfs|bool %} openshift_prometheus_alertmanager_storage_type='emptydir' {% endif %} # For prometheus-alertbuffer {% if install_glusterfs|bool %} openshift_prometheus_alertbuffer_storage_type='pvc' openshift_prometheus_alertbuffer_storage_kind=dynamic openshift_prometheus_alertbuffer_storage_class='glusterfs-storage-block' openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer openshift_prometheus_alertbuffer_storage_volume_size=10Gi {% elif install_nfs|bool %} openshift_prometheus_alertbuffer_storage_type='emptydir' {% endif %} # Suggested Quotas and limits for Prometheus components: openshift_prometheus_memory_requests=2Gi openshift_prometheus_cpu_requests=750m openshift_prometheus_memory_limit=2Gi openshift_prometheus_cpu_limit=750m openshift_prometheus_alertmanager_memory_requests=300Mi openshift_prometheus_alertmanager_cpu_requests=200m openshift_prometheus_alertmanager_memory_limit=300Mi openshift_prometheus_alertmanager_cpu_limit=200m openshift_prometheus_alertbuffer_memory_requests=300Mi openshift_prometheus_alertbuffer_cpu_requests=200m openshift_prometheus_alertbuffer_memory_limit=300Mi openshift_prometheus_alertbuffer_cpu_limit=200m {# The following file will need to be copied over to the bastion before deployment # There is an example in ocp-workshop/files # openshift_prometheus_additional_rules_file=/root/prometheus_alerts_rules.yml #} # Grafana openshift_grafana_node_selector={"node-role.kubernetes.io/infra":"true"} openshift_grafana_storage_type=pvc openshift_grafana_pvc_size=2Gi openshift_grafana_node_exporter=true {% if install_glusterfs|bool %} openshift_grafana_sc_name=glusterfs-storage {% endif %} {% endif %} # Enable cluster logging ######################## openshift_logging_install_logging={{install_logging}} {% if install_nfs|bool and not install_glusterfs|bool %} openshift_logging_storage_kind=nfs openshift_logging_storage_access_modes=['ReadWriteOnce'] openshift_logging_storage_nfs_directory=/srv/nfs openshift_logging_storage_nfs_options='*(rw,root_squash)' openshift_logging_storage_volume_name=logging openshift_logging_storage_volume_size=10Gi openshift_logging_storage_labels={'storage': 'logging'} openshift_logging_es_pvc_storage_class_name='' {% endif %} {% if install_glusterfs|bool %} openshift_logging_es_pvc_dynamic=true openshift_logging_es_pvc_size=20Gi openshift_logging_es_pvc_storage_class_name='glusterfs-storage-block' {% endif %} openshift_logging_es_memory_limit=8Gi openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra": "true"} openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra": "true"} openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra": "true"} openshift_logging_es_cluster_size=1 openshift_logging_curator_default_days=2 ########################################################################### ### OpenShift Router and Registry Vars ########################################################################### openshift_hosted_router_replicas={{infranode_instance_count}} # openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"} openshift_hosted_registry_replicas=1 {% if install_nfs|bool %} openshift_hosted_registry_storage_kind=nfs openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] openshift_hosted_registry_storage_nfs_directory=/srv/nfs openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' openshift_hosted_registry_storage_volume_name=registry openshift_hosted_registry_storage_volume_size=20Gi {% endif %} openshift_hosted_registry_pullthrough=true openshift_hosted_registry_acceptschema2=true openshift_hosted_registry_enforcequota=true ########################################################################### ### OpenShift Service Catalog Vars ########################################################################### openshift_enable_service_catalog=true template_service_broker_install=true # openshift_template_service_broker_namespaces=['openshift'] ansible_service_broker_install=true ansible_service_broker_local_registry_whitelist=['.*-apb$'] ########################################################################### ### OpenShift Hosts ########################################################################### [OSEv3:children] lb masters etcd nodes {% if install_nfs|bool %} nfs {% endif %} {% if install_glusterfs|bool %} #glusterfs {% endif %} [lb] {% for host in groups['loadbalancers'] %} {{ hostvars[host].internaldns }} {% endfor %} [masters] {% for host in groups['masters']|sort %} {{ hostvars[host].internaldns }} {% endfor %} [etcd] {% for host in groups['masters']|sort %} {{ hostvars[host].internaldns }} {% endfor %} [nodes] ## These are the masters {% for host in groups['masters']|sort %} {{ hostvars[host].internaldns }} openshift_node_group_name='node-config-master' {% endfor %} ## These are infranodes {% for host in groups['infranodes']|sort %} {{ hostvars[host].internaldns }} openshift_node_group_name='node-config-infra' {% endfor %} ## These are regular nodes {% for host in groups['nodes']|sort %} {{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute' {% endfor %} ## These are OCS nodes {% for host in groups['support']|sort %} # {{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute' {% endfor %} {% if install_nfs|bool %} [nfs] {% for host in [groups['support']|sort|first] %} {{ hostvars[host].internaldns }} {% endfor %} {% endif %} #[glusterfs] {% for host in groups['support']|sort %} # {{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]' {% endfor %} ansible/configs/ocp-ha-lab/files/hosts_template.3.9.41.j2
New file @@ -0,0 +1,235 @@ [OSEv3:vars] ########################################################################### ### Ansible Vars ########################################################################### timeout=60 ansible_become=yes ansible_ssh_user={{ansible_ssh_user}} ########################################################################### ### OpenShift Basic Vars ########################################################################### deployment_type=openshift-enterprise openshift_disable_check="memory_availability" # Default node selectors osm_default_node_selector='env=app' openshift_hosted_infra_selector="env=infra" ########################################################################### ### OpenShift Master Vars ########################################################################### openshift_master_api_port={{master_api_port}} openshift_master_console_port={{master_api_port}} openshift_master_cluster_method=native openshift_master_cluster_hostname=loadbalancer1.{{guid}}.internal openshift_master_cluster_public_hostname={{master_lb_dns}} openshift_master_default_subdomain={{cloudapps_suffix}} #openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'} openshift_master_overwrite_named_certificates={{openshift_master_overwrite_named_certificates}} # Set this line to enable NFS openshift_enable_unsupported_configurations=True ########################################################################### ### OpenShift Network Vars ########################################################################### #osm_cluster_network_cidr=10.1.0.0/16 #openshift_portal_net=172.30.0.0/16 #os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' {{multi_tenant_setting}} ########################################################################### ### OpenShift Authentication Vars ########################################################################### # htpasswd Authentication openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] openshift_master_htpasswd_file=/root/htpasswd.openshift # LDAP Authentication (download ipa-ca.crt first) # openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'}] # openshift_master_ldap_ca_file=/root/ipa-ca.crt ########################################################################### ### OpenShift Router and Registry Vars ########################################################################### # Bug in 3.9.30 - Bug 1583500 - Unqualified image is completed with "docker.io" # https://bugzilla.redhat.com/show_bug.cgi?id=1583500 # Workaround: oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version} openshift_examples_modify_imagestreams=true openshift_hosted_router_replicas={{infranode_instance_count}} # openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"} openshift_hosted_registry_replicas=1 openshift_hosted_registry_storage_kind=nfs openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] openshift_hosted_registry_storage_nfs_directory=/srv/nfs openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' openshift_hosted_registry_storage_volume_name=registry openshift_hosted_registry_storage_volume_size=20Gi openshift_hosted_registry_pullthrough=true openshift_hosted_registry_acceptschema2=true openshift_hosted_registry_enforcequota=true ########################################################################### ### OpenShift Service Catalog Vars ########################################################################### openshift_enable_service_catalog=true template_service_broker_install=true openshift_template_service_broker_namespaces=['openshift'] ansible_service_broker_install=true ansible_service_broker_local_registry_whitelist=['.*-apb$'] openshift_hosted_etcd_storage_kind=nfs openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)" openshift_hosted_etcd_storage_nfs_directory=/srv/nfs openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'} openshift_hosted_etcd_storage_volume_name=etcd-asb openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce'] openshift_hosted_etcd_storage_volume_size=10G ########################################################################### ### OpenShift Metrics and Logging Vars ########################################################################### # Enable cluster metrics openshift_metrics_install_metrics={{install_metrics}} openshift_metrics_storage_kind=nfs openshift_metrics_storage_access_modes=['ReadWriteOnce'] openshift_metrics_storage_nfs_directory=/srv/nfs openshift_metrics_storage_nfs_options='*(rw,root_squash)' openshift_metrics_storage_volume_name=metrics openshift_metrics_storage_volume_size=10Gi openshift_metrics_storage_labels={'storage': 'metrics'} openshift_metrics_cassandra_nodeselector={"env":"infra"} openshift_metrics_hawkular_nodeselector={"env":"infra"} openshift_metrics_heapster_nodeselector={"env":"infra"} # Enable cluster logging openshift_logging_install_logging={{install_logging}} openshift_logging_storage_kind=nfs openshift_logging_storage_access_modes=['ReadWriteOnce'] openshift_logging_storage_nfs_directory=/srv/nfs openshift_logging_storage_nfs_options='*(rw,root_squash)' openshift_logging_storage_volume_name=logging openshift_logging_storage_volume_size=10Gi openshift_logging_storage_labels={'storage': 'logging'} # openshift_logging_kibana_hostname=kibana.{{cloudapps_suffix}} openshift_logging_es_cluster_size=1 openshift_logging_es_nodeselector={"env":"infra"} openshift_logging_kibana_nodeselector={"env":"infra"} openshift_logging_curator_nodeselector={"env":"infra"} ########################################################################### ### OpenShift Prometheus Vars ########################################################################### ## Add Prometheus Metrics: openshift_hosted_prometheus_deploy=true openshift_prometheus_node_selector={"env":"infra"} openshift_prometheus_namespace=openshift-metrics # Prometheus openshift_prometheus_storage_kind=nfs openshift_prometheus_storage_access_modes=['ReadWriteOnce'] openshift_prometheus_storage_nfs_directory=/srv/nfs openshift_prometheus_storage_nfs_options='*(rw,root_squash)' openshift_prometheus_storage_volume_name=prometheus openshift_prometheus_storage_volume_size=10Gi openshift_prometheus_storage_labels={'storage': 'prometheus'} openshift_prometheus_storage_type='pvc' # For prometheus-alertmanager openshift_prometheus_alertmanager_storage_kind=nfs openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] openshift_prometheus_alertmanager_storage_nfs_directory=/srv/nfs openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)' openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager openshift_prometheus_alertmanager_storage_volume_size=10Gi openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} openshift_prometheus_alertmanager_storage_type='pvc' # For prometheus-alertbuffer openshift_prometheus_alertbuffer_storage_kind=nfs openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] openshift_prometheus_alertbuffer_storage_nfs_directory=/srv/nfs openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)' openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer openshift_prometheus_alertbuffer_storage_volume_size=10Gi openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} openshift_prometheus_alertbuffer_storage_type='pvc' # Necessary because of a bug in the installer on 3.9 openshift_prometheus_node_exporter_image_version=v3.9 ########################################################################### ### OpenShift Hosts ########################################################################### [OSEv3:children] lb masters etcd nodes nfs #glusterfs [lb] {% for host in groups['loadbalancers'] %} {{ hostvars[host].internaldns }} {% endfor %} [masters] {% for host in groups['masters']|sort %} {{ hostvars[host].internaldns }} {% endfor %} [etcd] {% for host in groups['masters']|sort %} {{ hostvars[host].internaldns }} {% endfor %} [nodes] ## These are the masters {% for host in groups['masters']|sort %} {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'env':'master', 'cluster': '{{guid}}'}" {% endfor %} ## These are infranodes {% for host in groups['infranodes']|sort %} {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'env':'infra', 'cluster': '{{guid}}'}" {% endfor %} ## These are regular nodes {% for host in groups['nodes']|sort %} {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'env':'app', 'cluster': '{{guid}}'}" {% endfor %} ## These are CNS nodes {% for host in groups['support']|sort %} # {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'env':'glusterfs', 'cluster': '{{guid}}'}" {% endfor %} [nfs] {% set nfshost = groups['support']|sort|first %} {{ hostvars[nfshost].internaldns }} openshift_hostname={{ hostvars[nfshost].internaldns }} #[glusterfs] {% for host in groups['support']|sort %} # {{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]' {% endfor %} ansible/configs/ocp-ha-lab/files/labs_hosts_template.3.10.34.j2
New file @@ -0,0 +1,90 @@ # # LAB inventory # ansible inventory for OpenShift Container Platform 3.10.14 # [OSEv3:vars] ########################################################################### ### Ansible Vars ########################################################################### timeout=60 ansible_user={{ansible_ssh_user}} ansible_become=yes ########################################################################### ### OpenShift Basic Vars ########################################################################### openshift_disable_check="disk_availability,memory_availability,docker_image_availability" openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true','runtime={{container_runtime}}']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true','runtime={{container_runtime}}']}, {'name': 'node-config-glusterfs', 'labels': ['runtime={{container_runtime}}']}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true','runtime={{container_runtime}}'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}] # Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. -> These need to go into the above # openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']} # Configure logrotate scripts # See: https://github.com/nickhammond/ansible-logrotate logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] ########################################################################### ### OpenShift Hosts ########################################################################### [OSEv3:children] lb masters etcd nodes {% if install_nfs|bool %} nfs {% endif %} {% if install_glusterfs|bool %} #glusterfs {% endif %} [lb] {% for host in groups['loadbalancers'] %} {{ hostvars[host].internaldns }} {% endfor %} [masters] {% for host in groups['masters']|sort %} {{ hostvars[host].internaldns }} {% endfor %} [etcd] {% for host in groups['masters']|sort %} {{ hostvars[host].internaldns }} {% endfor %} [nodes] ## These are the masters {% for host in groups['masters']|sort %} {{ hostvars[host].internaldns }} openshift_node_group_name='node-config-master' {% endfor %} ## These are infranodes {% for host in groups['infranodes']|sort %} {{ hostvars[host].internaldns }} openshift_node_group_name='node-config-infra' {% endfor %} ## These are regular nodes {% for host in groups['nodes']|sort %} {{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute' {% endfor %} ## These are OCS nodes {% for host in groups['support']|sort %} # {{ hostvars[host].internaldns }} openshift_node_group_name='node-config-compute' {% endfor %} {% if install_nfs|bool %} [nfs] {% for host in [groups['support']|sort|first] %} {{ hostvars[host].internaldns }} {% endfor %} {% endif %} #[glusterfs] {% for host in groups['support']|sort %} # {{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]' {% endfor %} ansible/configs/ocp-ha-lab/files/labs_hosts_template.3.9.41.j2
New file @@ -0,0 +1,71 @@ [OSEv3:vars] ########################################################################### ### Ansible Vars ########################################################################### timeout=60 ansible_become=yes ansible_ssh_user={{ansible_ssh_user}} # disable memory check, as we are not a production environment openshift_disable_check="memory_availability" # Set this line to enable NFS openshift_enable_unsupported_configurations=True ########################################################################### ### OpenShift Hosts ########################################################################### [OSEv3:children] lb masters etcd nodes nfs #glusterfs [lb] {% for host in groups['loadbalancers'] %} {{ hostvars[host].internaldns }} {% endfor %} [masters] {% for host in groups['masters']|sort %} {{ hostvars[host].internaldns }} {% endfor %} [etcd] {% for host in groups['masters']|sort %} {{ hostvars[host].internaldns }} {% endfor %} [nodes] ## These are the masters {% for host in groups['masters']|sort %} {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'env': 'master', 'cluster': '{{guid}}'}" {% endfor %} ## These are infranodes {% for host in groups['infranodes']|sort %} {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'env':'infra', 'cluster': '{{guid}}'}" {% endfor %} ## These are regular nodes {% for host in groups['nodes']|sort %} {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'env':'app', 'cluster': '{{guid}}'}" {% endfor %} ## These are CNS nodes {% for host in groups['support']|sort %} # {{ hostvars[host].internaldns }} openshift_hostname={{ hostvars[host].internaldns }} openshift_node_labels="{'env':'glusterfs', 'cluster': '{{guid}}'}" {% endfor %} [nfs] {% set nfshost = groups['support']|sort|first %} {{ hostvars[nfshost].internaldns }} openshift_hostname={{ hostvars[nfshost].internaldns }} #[glusterfs] {% for host in groups['support']|sort %} # {{ hostvars[host].internaldns }} glusterfs_devices='[ "{{ glusterfs_app_device_name }}" ]' {% endfor %} ansible/configs/ocp-workshop/scaleup.yml
@@ -104,7 +104,7 @@ hosts: - "newnodes" roles: - "{{ ANSIBLE_REPO_PATH }}/roles/openshift-node" - "{{ ANSIBLE_REPO_PATH }}/roles/host-ocp-node" tags: - openshift_node_tasks ansible/configs/quay-enterprise/software.yml
@@ -35,7 +35,7 @@ qe_quay_ssl_key_file: "" qe_quay_ssl_cert_file: "" when: - qe_quay_ssl_key_file is undefined or qe_quay_ssl_cert_file is undefined or qe_quay_ssl_key_file=="" or qe_quay_ssl_cert_file=="" - qe_quay_ssl_key_file is undefined or qe_quay_ssl_cert_file is undefined - name: Set up Let's Encrypt Certificates hosts: @@ -63,12 +63,16 @@ vars: - acme_domain: "{{ qe_quay_hostname }}.{{ subdomain_base }}" - acme_remote_dir: "/root" - acme_cache_cert_file: "{{ qe_quay_ssl_cert_file }}" - acme_cache_key_file: "{{ qe_quay_ssl_key_file }}" - acme_cache_cert_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.cert" - acme_cache_key_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.key" - acme_cache_archive_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}_acme.tgz" - acme_production: "{{ qe_quay_ssl_lets_encrypt_production|d(False)|bool }}" - acme_renew_automatically: "{{ qe_quay_ssl_lets_encrypt_renew_automatically|d(False)|bool }}" - acme_force_issue: "{{ qe_quay_ssl_lets_encrypt_force_renew|d(False)|bool }}" - name: Set Cert/Key file locations to cached locations set_fact: qe_quay_ssl_key_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.key" qe_quay_ssl_cert_file: "{{ANSIBLE_REPO_PATH}}/workdir/{{guid}}.cert" - name: Set up Node Software (Docker) hosts: @@ -198,7 +202,7 @@ - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" tasks: - name: Set SSL Certificate to generated certificate if no certificate file specified - name: Set SSL Certificate to self signed certificate if no certificate file specified set_fact: qe_quay_ssl_cert_file: "/tmp/ssl.cert" when: ansible/configs/rhte-ansible-net/README.adoc
@@ -11,7 +11,7 @@ The `linklight` deployer is called by first running a fairly typical `Ansible Agnostic Deployer` `pre_infra.yml` but at the end of that playbook: * cloning the `lightbulb` repo to a specfic commit * cloning a forked `lightbulb` repo to a specfic commit * executing the cloned provisioioning playbook ** `linklight` vars are made availble via env_vars.yml @@ -130,7 +130,7 @@ [source,bash] ---- GUID=rnet06 GUID=rnet00 ENV_TYPE=rhte-ansible-net EC2_NAME_PREFIX=${GUID} @@ -143,8 +143,11 @@ -e ANSIBLE_REPO_PATH=${ANSIBLE_REPO_PATH} \ -e ec2_name_prefix=${GUID} \ -e ec2_region=${EC2_REGION} \ -e ec2_az=${EC2_AVAILABILITY_ZONE}· -e s3_state=absent \ -e state=absent \ -e create_login_page=true \ -e ec2_az=${EC2_AVAILABILITY_ZONE} -e workshop_dns_zone=example.opentlc.com \ ---- ansible/configs/rhte-oc-cluster-vms/env_vars.yml
@@ -62,7 +62,7 @@ ## Environment Sizing clientvm_instance_type: "t2.large" clientvm_instance_type: "m4.2xlarge" ###### VARIABLES YOU SHOULD ***NOT*** CONFIGURE FOR YOUR DEPLOYEMNT ansible/configs/rhte-oc-cluster-vms/post_software.yml
@@ -90,12 +90,14 @@ when: r_systemd is failed block: - name: Reboot VM command: shutdown -r now command: shutdown -r +1 async: 0 poll: 0 ignore_errors: yes - name: wait for linux host to be available (retry) wait_for_connection: delay: 60 delay: 90 timeout: 200 - ping: ansible/configs/rhte-ocp-workshop/files/clientvms_dns.json.j2
New file @@ -0,0 +1,16 @@ { "Comment": "Create ClientVMs DNS zone entries", "Changes": [ {% for host in groups['clientvms'] %} { "Action": "{{DNS_action}}", "ResourceRecordSet": { "Name": "clientvm{{loop.index}}.{{subdomain_base}}", "Type": "A", "TTL": 900, "ResourceRecords": [ { "Value": "{{hostvars[host].public_ip_address}}" } ] } }{{ "," if not loop.last else "" }} {% endfor %} ] } ansible/configs/rhte-ocp-workshop/post_infra.yml
@@ -16,7 +16,10 @@ when: - "'clientvms' in groups" - groups['clientvms'] | length > 0 - cloudformation_out_final is defined - cloudformation_out_final.stack_outputs.AutoScalingGroupClientVM is defined tags: - clientvms block: - name: test cloudformation_out_final debug: @@ -44,24 +47,23 @@ - name: Run infra-ec2-create-inventory Role import_role: name: "{{ ANSIBLE_REPO_PATH }}/roles/infra-ec2-create-inventory" - name: Run Common SSH Config Generator Role import_role: name: "{{ANSIBLE_REPO_PATH}}/roles/infra-common-ssh-config-generate" - name: Create DNS record for each clientVM route53: hosted_zone_id: "{{HostedZoneId}}" zone: "{{subdomain_base}}" record: "clientvm{{idx + 1}}.{{subdomain_base}}" state: present type: A ttl: 90 value: "{{hostvars[item].public_ip_address}}" with_items: "{{groups['clientvms']}}" ignore_errors: yes loop_control: index_var: idx pause: 2 - name: Create JSON file for DNS records for clientVM vars: DNS_action: CREATE template: src: ./files/clientvms_dns.json.j2 dest: "{{ANSIBLE_REPO_PATH}}/workdir/{{env_type}}-{{guid}}_clientvms_dns.json" - name: Create clientvms DNS names command: >- aws route53 change-resource-record-sets --hosted-zone-id {{HostedZoneId}} --change-batch file:///{{ANSIBLE_REPO_PATH}}/workdir/{{env_type}}-{{guid}}_clientvms_dns.json - name: Rename instance ec2_tag: ansible/configs/rhte-ocp-workshop/post_software.yml
@@ -83,14 +83,14 @@ -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "true"}}}' register: changesc_r failed_when: - changesc_r.stdout.find('storageclass "glusterfs-storage" not patched') == -1 - changesc_r.stdout.find('storageclass "glusterfs-storage" not patched') == -1 - changesc_r.rc != 0 changed_when: changesc_r.stdout.find('storageclass "glusterfs-storage" patched') != -1 - name: Remove default from glusterfs-storage-block class register: changesc_r changed_when: changesc_r.stdout.find('storageclass "glusterfs-storage-block" patched') != -1 failed_when: - changesc_r.stdout.find('storageclass "glusterfs-storage-block" not patched') == -1 - changesc_r.stdout.find('storageclass "glusterfs-storage-block" not patched') == -1 - changesc_r.rc != 0 command: > oc patch storageclass glusterfs-storage-block @@ -373,7 +373,7 @@ - osrelease is version_compare('3.9.0', '>=') - osrelease is version_compare('3.9.25', '<=') - container_runtime == "cri-o" block: block: - name: Patch dockergc DaemonSet shell: "oc patch daemonset dockergc --patch='\"spec\": { \"template\": { \"spec\": { \"containers\": [ { \"command\": [ \"/usr/bin/oc\" ], \"name\": \"dockergc\" } ] } } }' -n default" ignore_errors: true @@ -596,6 +596,8 @@ vars_files: - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" tags: - infra_workloads tasks: - name: Install ocp-infra workloads when: @@ -630,6 +632,8 @@ vars_files: - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" tags: - student_workloads tasks: - name: Install ocp-workloads when: ansible/gpte-hosts
New file @@ -0,0 +1,29 @@ [gptehosts:vars] ansible_ssh_private_key_file=~/.ssh/opentlc_admin_backdoor.pem #ansible_ssh_private_key_file=~/.ssh/ocpkey.pem ansible_ssh_user=ec2-user [gptehosts:children] openshift admin dev prod [admin] admin.na.shared.opentlc.com #ipa host [openshift] bastion.na1.openshift.opentlc.com bastion.rhpds.openshift.opentlc.com bastion.dev.openshift.opentlc.com ansible_ssh_host=ec2-34-213-120-47.us-west-2.compute.amazonaws.com bastion.dev37.openshift.opentlc.com [dev] bastion.dev.openshift.opentlc.com bastion.dev37.openshift.opentlc.com [prod] bastion.na1.openshift.opentlc.com bastion.rhpds.openshift.opentlc.com ansible/roles/host-gogs-server/tasks/main.yml
@@ -4,4 +4,6 @@ url: http://{{ ansible_hostname }}:3000/install method: POST body: "{{ lookup('template', 'templates/gogs_config.j2') }}" status_code: 302 status_code: - 302 - 404 # because the /install URL disappears after configuration ansible/roles/host-jenkins-server/tasks/main.yml
@@ -54,7 +54,7 @@ dest: "{{ jenkins_jar_location }}" validate_certs: no register: jarfile_get until: "'OK' in jarfile_get.msg or 'file already exists' in jarfile_get.msg" until: "'OK' in jarfile_get.msg or 'file already exists' in jarfile_get.msg or 'Not Modified' in jarfile_get.msg" retries: 5 delay: 10 check_mode: no ansible/roles/infra-ec2-template-create/tasks/main.yml
@@ -42,7 +42,7 @@ register: cloudformation_out until: >- cloudformation_out is succeeded or ( and ( 'output' in cloudformation_out and cloudformation_out.output in ["Stack CREATE complete", "Stack is already up-to-date."] ) @@ -72,7 +72,7 @@ register: cloudformation_out_s3 until: >- cloudformation_out is succeeded or ( and ( 'output' in cloudformation_out and cloudformation_out.output in ["Stack CREATE complete", "Stack is already up-to-date."] ) ansible/roles/molecule/defaults/main.yml
@@ -7,3 +7,4 @@ molecule_pip_packages: - molecule - docker-py ansible/roles/ocp-workload-3scale-experienced/defaults/main.yml
@@ -20,7 +20,7 @@ quota_secrets: 30 quota_requests_storage: 50Gi ocp_apps_domain: apps.{{ocp_domain}} ocp_apps_domain: apps.{{subdomain_base}} build_status_retries: 20 build_status_delay: 20 ansible/roles/ocp-workload-3scale-experienced/readme.adoc
@@ -2,47 +2,7 @@ This workload only sets a clusterquota for a student who has need to provision a 3scale AMP . === Deploy a Workload with the `ocp-workload` playbook [Mostly for testing] ---- HOST_GUID=dev39 TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com" WORKLOAD="ocp-workload-3scale-experienced" SSH_USERNAME="jbride-redhat.com" SSH_PRIVATE_KEY="id_ocp" GUID=jb05 OCP_USERNAME="jbride-redhat.com" # a TARGET_HOST is specified in the command line, without using an inventory file ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \ -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \ -e"ansible_ssh_user=${SSH_USERNAME}" \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_domain=$HOST_GUID.openshift.opentlc.com" \ -e"ACTION=create" ---- === To Delete an environment ---- HOST_GUID=dev39 TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com" WORKLOAD="ocp-workload-3scale-experienced" GUID=jb05 OCP_USERNAME="jbride-redhat.com" # a TARGET_HOST is specified in the command line, without using an inventory file ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \ -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \ -e"ansible_ssh_user=${SSH_USERNAME}" \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ACTION=remove" ---- GUID=user1 @@ -51,8 +11,6 @@ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_apps_domain=apps.${HOST_GUID}.openshift.opentlc.com" \ -e"ACTION=create" ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ @@ -60,3 +18,5 @@ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ACTION=remove" ---- ansible/roles/ocp-workload-3scale-lifecycle/defaults/main.yml
@@ -18,8 +18,7 @@ quota_secrets: 150 quota_requests_storage: 50Gi ocp_domain: "{{subdomain_base}}" ocp_apps_domain: "apps.{{ocp_domain}}" ocp_apps_domain: "apps.{{subdomain_base}}" build_status_retries: 20 build_status_delay: 20 ansible/roles/ocp-workload-3scale-multitenant/defaults/main.yml
@@ -5,15 +5,15 @@ - OPENTLC-PROJECT-PROVISIONERS # By default, this 3scale multi-tenant environment will be owned by admin1 ocp_username: admin1 ocp_username: adm1 ocp_user_needs_quota: true ocp_user_needs_quota: True quota_requests_cpu: 10 quota_limits_cpu: 20 quota_requests_cpu: 20 quota_limits_cpu: 30 quota_requests_memory: '10Gi' quota_limits_memory: '23Gi' quota_requests_memory: '20Gi' quota_limits_memory: '30Gi' quota_configmaps: 15 quota_pods: 30 @@ -29,26 +29,32 @@ deploy_status_retries: 15 deploy_status_delay: 20 ocp_domain: "{{subdomain_base}}" ocp_apps_domain: "apps.{{ocp_domain}}" ocp_apps_domain: "apps.{{subdomain_base}}" lab_name: 3scale-mt ocp_project: "{{lab_name}}-{{guid}}" ocp_project: "{{lab_name}}-{{ocp_username}}" ocp_user_passwd: r3dh4t1! threescale_template: https://raw.githubusercontent.com/gpe-mw-training/3scale_onpremise_implementation_labs/master/resources/3scale-amp-2.2.yml threescale_template: https://raw.githubusercontent.com/gpe-mw-training/3scale_onpremise_implementation_labs/master/resources/rhte/3scale-amp-2.2-higher-limits.yml #threescale_template: https://raw.githubusercontent.com/gpe-mw-training/3scale_onpremise_implementation_labs/master/resources/3scale-amp-2.2.yml modified_template_path: /tmp/amp.yml amp_admin_passwd: r3dh4t1! master_passwd: r3dh4t1! master_access_token: wtqhhsly new_app_output: "/tmp/{{ocp_project}}-out.log" # new tenants ###### new tenants ####### provision_tenants: False create_gws_with_each_tenant: false tenant_output_dir: /tmp/3scale_tenants tenant_provisioning_log_file: tenant_provisioning.log tenant_provisioning_results_file: user_info_file.txt start_tenant: 1 end_tenant: 100 tenantAdminPasswd: r3dh4t1! create_gws_with_each_tenant: false ################################### ansible/roles/ocp-workload-3scale-multitenant/readme.adoc
@@ -8,14 +8,14 @@ . This workload only needs to be executed once per OCP cluster . This workload provisions a single centralized 3scale multi-tenant app in a single OCP namespace called: 3scale-mt-$GUID . The OCP namespace for 3scale multi-tenant app will be owned by the following non-existant user: .. admin1 .. admin1 will be assigned a clusterquota so as to manage limits and requests assigned to 3scale .. adm0 .. adm0 will be assigned a clusterquota so as to manage limits and requests assigned to 3scale === Tenants This workload can optionally create multiple tenants in this single multi-tenant 3scale. If so then ensure the following when invoking this ansible: . specify TAGS as either "all" or "tenants" . specify ACTION = "tenant_mgmt" . specify "start_tenant" and "end_tenant" variables . set value of CREATE_GWS_WITH_EACH_TENANT (true / false) to automate provisioning of a staging and production GW for each tenant @@ -24,9 +24,6 @@ ----- # valid values: "tenants" TAGS=tenants # Tenant related variables START_TENANT=1 END_TENANT=1 @@ -34,24 +31,32 @@ WORKLOAD="ocp-workload-3scale-multitenant" REGION=`oc whoami --show-server | cut -d'.' -f 2` OCP_DOMAIN=$REGION.openshift.opentlc.com GUID=adm0 # API manager provision ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=$GUID" \ -e"ACTION=create" \ -e"ocp_domain=$OCP_DOMAIN" \ -e"ACTION=create" # Tenant Management ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_workload=${WORKLOAD}" \ -e"ACTION=tenant_mgmt" \ -e"start_tenant=$START_TENANT" \ -e"end_tenant=$END_TENANT" \ -e"create_gws_with_each_tenant=$CREATE_GWS_WITH_EACH_TENANT" \ -t $TAGS -e"create_gws_with_each_tenant=$CREATE_GWS_WITH_EACH_TENANT" # Delete ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=$GUID" \ -e"ACTION=remove" ----- ----- -e"subdomain_base=$SUB_DOMAIN" ----- ansible/roles/ocp-workload-3scale-multitenant/tasks/main.yml
@@ -14,6 +14,11 @@ become: "{{ become_override | bool }}" when: ACTION == "create" or ACTION == "provision" - name: Running Tenant Provision Tasks include: ./tenant_mgmt.yml become: "{{ become_override | bool }}" when: ACTION == "tenant_mgmt" - name: Running Workload removal Tasks include: ./remove_workload.yml become: "{{ become_override | bool }}" ansible/roles/ocp-workload-3scale-multitenant/tasks/remove_workload.yml
@@ -3,30 +3,37 @@ debug: msg: "Pre-Software checks completed successfully - Removed" - name: Remove user Quota - oc delete clusterresourcequota "clusterquota-{{ocp_username}}-{{ocp_project}}" shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{ocp_project}} ignore_errors: true - name: Remove user Project shell: "oc delete project {{ocp_project}}" ##### TENANT Management ####### - name: Tenant removal starting debug: msg: - "tenant_output_dir: {{tenant_output_dir}}" - "tenant_provisioning_log_file = {{tenant_output_dir}}/{{tenant_provisioning_log_file}}" - "tenant_provisioning_results_file = {{tenant_output_dir}}/{{tenant_provisioning_results_file}}" - "start and end tenants = {{start_tenant}} {{end_tenant}}" - "create API Gateways for each tenant = {{create_gws_with_each_tenant}}" - name: Copy tenant provisioning script to known path template: src: templates/manage_tenants.sh dest: /tmp/manage_tenants.sh mode: 0755 tags: all,tenants vars: create_tenants: "false" - name: Create tenants - name: Remove tenants shell: "/tmp/manage_tenants.sh" tags: all,tenants ignore_errors: true ############################################## - name: Remove user Quota - oc delete clusterresourcequota "clusterquota-{{ocp_username}}-{{ocp_project}}" shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{ocp_project}} ignore_errors: true - name: Remove user Project shell: "oc delete project {{ocp_project}}" - name: post_workload Tasks Complete debug: ansible/roles/ocp-workload-3scale-multitenant/tasks/tenant_mgmt.yml
New file @@ -0,0 +1,30 @@ --- - name: Tenant provisioning starting debug: msg: - "tenant_output_dir: {{tenant_output_dir}}" - "tenant_provisioning_log_file = {{tenant_output_dir}}/{{tenant_provisioning_log_file}}" - "tenant_provisioning_results_file = {{tenant_output_dir}}/{{tenant_provisioning_results_file}}" - "start and end tenants = {{start_tenant}} {{end_tenant}}" - "create API Gateways for each tenant = {{create_gws_with_each_tenant}}" - name: Copy tenant provisioning script to known path template: src: templates/manage_tenants.sh dest: /tmp/manage_tenants.sh mode: 0755 vars: create_tenants: "true" - name: Create tenants shell: "/tmp/manage_tenants.sh" - name: Creation of tenants complete debug: msg: Creation of tenants complete. output files available at {{tenant_output_dir}} - name: Tenant Rollout Complete debug: msg: Tenant Rollout Complete ansible/roles/ocp-workload-3scale-multitenant/tasks/workload.yml
@@ -27,7 +27,7 @@ when: api_project_result is failed - name: "Label namespace" command: "oc label namespace {{ocp_project}} AAD='{{guid}}'" command: "oc label namespace {{ocp_project}} AAD='{{ocp_username}}'" when: api_project_result is failed - name: Make sure we go back do default project @@ -162,37 +162,9 @@ pod_to_wait: - zync when: api_project_result is failed ################################################################### ################ Tenant Rollout ##################### - name: Tenant provisioning starting debug: msg: - "tenant_output_dir: {{tenant_output_dir}}" - "tenant_provisioning_log_file = {{tenant_output_dir}}/{{tenant_provisioning_log_file}}" - "tenant_provisioning_results_file = {{tenant_output_dir}}/{{tenant_provisioning_results_file}}" - "start and end tenants = {{start_tenant}} {{end_tenant}}" - "create API Gateways for each tenant = {{create_gws_with_each_tenant}}" tags: tenants - name: Copy tenant provisioning script to known path template: src: templates/manage_tenants.sh dest: /tmp/manage_tenants.sh mode: 0755 tags: tenants vars: create_tenants: "true" - name: Create tenants shell: "/tmp/manage_tenants.sh" tags: tenants - name: Creation of tenants complete debug: msg: Creation of tenants complete. output files available at {{tenant_output_dir}} tags: tenants - name: Annotate the empty project as requested by user shell: "oc annotate namespace {{ocp_project}} openshift.io/requester={{ocp_username}} --overwrite" ################################################################### ansible/roles/ocp-workload-3scale-multitenant/templates/manage_tenants.sh
@@ -11,7 +11,7 @@ master_access_token={{master_access_token}} tenantAdminPasswd={{tenantAdminPasswd}} create_tenant_url=https://{{ocp_project}}-master-admin.{{ocp_apps_domain}}/master/api/providers.xml delete_tenant_url=https://{{ocp_project}}-master-admin.{{ocp_apps_domain}}/master/api/providers.xml delete_tenant_sub_url=https://{{ocp_project}}-master-admin.{{ocp_apps_domain}}/master/api/providers/ output_dir={{tenant_output_dir}} user_info_file=$output_dir/{{tenant_provisioning_results_file}} log_file=$output_dir/{{tenant_provisioning_log_file}} @@ -31,7 +31,7 @@ curl -o $output_dir/3scale-apicast.yml https://raw.githubusercontent.com/gpe-mw-training/3scale_onpremise_implementation_labs/master/resources/rhte/3scale-apicast.yml for i in $(seq ${startTenant} ${endTenant}) ; do orgName=user$i-3scale-mt; orgName=user$i-{{ocp_project}}; tenantAdminId=user$i; output_file=$orgName-tenant-signup.xml @@ -161,22 +161,23 @@ echo -en "\n\nDeleting tenants $startTenant through $endTenant \n" > $log_file for i in $(seq ${startTenant} ${endTenant}) ; do orgName=user$i-3scale-mt; orgName=user$i-{{ocp_project}}; tenantAdminId=user$i; #1) delete tenant project oc adm new-project $tenantAdminId-gw >> $log_file oc delete project $tenantAdminId-gw >> $log_file #2) delete routes oc delete route $orgName-provider -n {{ocp_project}} >> $log_file oc delete route $orgName-developer -n {{ocp_project}} >> $log_file #3) delete tenant in 3scale API Manager curl -k \ -X DELETE \ -d access_token=$master_access_token \ -d org_name=$orgName \ $delete_tenant_url >> $log_file # delete_tenants_url=$delete_tenants_sub + $tenantId +".xml" #curl -k \ # -X DELETE \ # -d access_token=$master_access_token \ # -d org_name=$orgName \ # $delete_tenant_url >> $log_file done ansible/roles/ocp-workload-appmod-migration/ilt_provision.sh
@@ -5,10 +5,11 @@ WORKLOAD="ocp-workload-appmod-migration" LOG_FILE=/tmp/$WORKLOAD HOST_GUID=`oc whoami --show-server | cut -d'.' -f2` for var in $@ do case "$var" in --HOST_GUID=*) HOST_GUID=`echo $var | cut -f2 -d\=` ;; --START_PROJECT_NUM=*) START_PROJECT_NUM=`echo $var | cut -f2 -d\=` ;; --END_PROJECT_NUM=*) END_PROJECT_NUM=`echo $var | cut -f2 -d\=` ;; -h) HELP=true ;; @@ -17,12 +18,6 @@ done function ensurePreReqs() { if [ "x$HOST_GUID" == "x" ]; then echo -en "must pass parameter: --HOST_GUID=<ocp host GUID> . \n\n" help exit 1; fi LOG_FILE=$LOG_FILE-$HOST_GUID-$START_PROJECT_NUM-$END_PROJECT_NUM.log echo -en "starting\n\n" > $LOG_FILE @@ -31,11 +26,10 @@ function help() { echo -en "\n\nOPTIONS:"; echo -en "\n\t--HOST_GUID=* REQUIRED: specify GUID of target OCP environment)" echo -en "\n\t--START_PROJECT_NUM=* OPTIONAL: specify # of first OCP project to provision (defult = 1))" echo -en "\n\t--END_PROJECT_NUM=* OPTIONAL: specify # of OCP projects to provision (defualt = 1))" echo -en "\n\t-h this help manual" echo -en "\n\n\nExample: ./roles/$WORKLOAD/ilt_provision.sh --HOST_GUID=dev39 --START_PROJECT_NUM=1 --END_PROJECT_NUM=1\n\n" echo -en "\n\n\nExample: ./roles/$WORKLOAD/ilt_provision.sh --START_PROJECT_NUM=1 --END_PROJECT_NUM=1\n\n" } @@ -71,7 +65,6 @@ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_domain=$HOST_GUID.openshift.opentlc.com" \ -e"ACTION=create" >> $LOG_FILE if [ $? -ne 0 ]; then ansible/roles/ocp-workload-appmod-migration/readme.adoc
@@ -6,15 +6,12 @@ WORKLOAD="ocp-workload-appmod-migration" GUID=1 OCP_USERNAME="user$GUID" HOST_GUID=dev39 OCP_DOMAIN=$HOST_GUID.openshift.opentlc.com ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_domain=$OCP_DOMAIN" \ -e"ACTION=create" ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ @@ -25,47 +22,3 @@ -e"ACTION=remove" ----- === Deploy a Workload with the `ocp-workload` playbook [Mostly for testing] ---- HOST_GUID=dev39 TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com" WORKLOAD="ocp-workload-appmod-migration" SSH_USERNAME="jbride-redhat.com" SSH_PRIVATE_KEY="id_ocp" GUID=gptetraining01 OCP_USERNAME="gpsetraining1" # a TARGET_HOST is specified in the command line, without using an inventory file ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \ -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \ -e"ansible_ssh_user=${SSH_USERNAME}" \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_domain=$HOST_GUID.openshift.opentlc.com" \ -e"ACTION=create" ---- === To Delete an environment ---- HOST_GUID=dev39 TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com" WORKLOAD="ocp-workload-appmod-migration" GUID=gptetraining01 OCP_USERNAME="gpsetraining1" # a TARGET_HOST is specified in the command line, without using an inventory file ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \ -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \ -e"ansible_ssh_user=${SSH_USERNAME}" \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ACTION=remove" ---- ansible/roles/ocp-workload-appmod-migration/tasks/remove_workload.yml
@@ -20,7 +20,6 @@ - name: Remove user Projects - oc get projects command: "oc get projects -o json" register: all_projects - name: Remove user Projects - Convert output to json set_fact: ansible/roles/ocp-workload-appmod-migration/tasks/workload.yml
@@ -39,6 +39,7 @@ - name: Delete default limitrange shell: | oc delete limitrange {{ocp_constraints_project}}-core-resource-limits -n {{ocp_constraints_project}} ignore_errors: true - name: Create a new limitrange template: ansible/roles/ocp-workload-bxms-pam/defaults/main.yml
@@ -25,6 +25,7 @@ deploy_status_retries: 20 deploy_status_delay: 20 kie_admin_passwd: test1234! MAVEN_REPO_URL: http://nexus3.default.svc.cluster.local:8081/repository/maven-public/ ansible/roles/ocp-workload-bxms-pam/readme.adoc
@@ -10,15 +10,11 @@ WORKLOAD="ocp-workload-bxms-pam" GUID=jb45 HOST_GUID=`oc whoami --show-server | cut -d'.' -f 2` OCP_DOMAIN=$HOST_GUID.openshift.opentlc.com ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_workload=${WORKLOAD}" \ -e"ocp_user_needs_quota=true" \ -e"guid=$GUID" \ -e"ocp_domain=$OCP_DOMAIN" \ -e"ACTION=create" \ -e"ocp_username=dtorresf-redhat.com" @@ -30,50 +26,8 @@ -e"ocp_username=dtorresf-redhat.com" ----- ----- -e"subdomain_base=$SUB_DOMAIN" ----- === Deploy a Workload with the `ocp-workload` playbook [Mostly for testing] ---- GUID=jb45 HOST_GUID=dev39 TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com" OCP_USERNAME="jbride-redhat.com" WORKLOAD="ocp-workload-bxms-pam" SSH_USERNAME="jbride-redhat.com" SSH_PRIVATE_KEY="id_ocp" # a TARGET_HOST is specified in the command line, without using an inventory file ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \ -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \ -e"ansible_ssh_user=${SSH_USERNAME}" \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_apps_domain=apps.${HOST_GUID}.openshift.opentlc.com" \ -e"ACTION=create" ---- === To Delete an environment ---- GUID=jb45 HOST_GUID=dev39 TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com" OCP_USERNAME="jbride-redhat.com" WORKLOAD="ocp-workload-bxms-pam" SSH_USERNAME="jbride-redhat.com" SSH_PRIVATE_KEY="id_ocp" # a TARGET_HOST is specified in the command line, without using an inventory file ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \ -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \ -e"ansible_ssh_user=${SSH_USERNAME}" \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ACTION=remove" ---- ansible/roles/ocp-workload-fuse-ignite/defaults/main.yml
@@ -22,7 +22,7 @@ quota_secrets: 30 quota_requests_storage: 50Gi ocp_apps_domain: apps.{{ocp_domain}} ocp_apps_domain: apps.{{subdomain_base}} build_status_retries: 20 build_status_delay: 20 ansible/roles/ocp-workload-fuse-ignite/ilt_provision.sh
@@ -66,43 +66,6 @@ done } # Execute Ansible using the oc client on remote bastion node of an OCP workshop environment function executeAnsibleViaBastion() { TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com" SSH_USERNAME="jbride-redhat.com" SSH_PRIVATE_KEY="id_ocp" # NOTE: Ensure you have ssh'd (as $SSH_USERNMAE) into the bastion node of your OCP cluster environment at $TARGET_HOST and logged in using opentlc-mgr account: # oc login https://master.$HOST_GUID.openshift.opentlc.com -u opentlc-mgr GUID=$PROJECT_PREFIX$GUID echo -en "\n\nexecuteAnsibleViaBastion(): Provisioning project with GUID = $GUID and OCP_USERNAME = $OCP_USERNAME\n" >> $LOG_FILE ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \ -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \ -e"ansible_ssh_user=${SSH_USERNAME}" \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_domain=$HOST_GUID.openshift.opentlc.com" \ -e"POSTGRESQL_MEMORY_LIMIT=$POSTGRESQL_MEMORY_LIMIT" \ -e"PROMETHEUS_MEMORY_LIMIT=$PROMETHEUS_MEMORY_LIMIT" \ -e"META_MEMORY_LIMIT=$META_MEMORY_LIMIT" \ -e"SERVER_MEMORY_LIMIT=$SERVER_MEMORY_LIMIT" \ -e"ACTION=create" >> $LOG_FILE if [ $? -ne 0 ]; then echo -en "\n\n*** Error provisioning where GUID = $GUID\n\n " >> $LOG_FILE echo -en "\n\n*** Error provisioning where GUID = $GUID\n\n " exit 1; fi } function executeAnsibleViaLocalhost() { GUID=$PROJECT_PREFIX$GUID @@ -115,7 +78,6 @@ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_domain=$HOST_GUID.openshift.opentlc.com" \ -e"POSTGRESQL_MEMORY_LIMIT=$POSTGRESQL_MEMORY_LIMIT" \ -e"PROMETHEUS_MEMORY_LIMIT=$PROMETHEUS_MEMORY_LIMIT" \ -e"META_MEMORY_LIMIT=$META_MEMORY_LIMIT" \ ansible/roles/ocp-workload-fuse-ignite/readme.adoc
@@ -1,59 +1,6 @@ = ocp-workload-developer-environment - Sample Config == Execution using remote (bastion node) oc client === Deploy a Workload with the `ocp-workload` playbook [Mostly for testing] ---- HOST_GUID=dev39 TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com" WORKLOAD="ocp-workload-fuse-ignite" SSH_USERNAME="jbride-redhat.com" SSH_PRIVATE_KEY="id_ocp" GUID=jb05 POSTGRESQL_MEMORY_LIMIT=512Mi PROMETHEUS_MEMORY_LIMIT=255Mi META_MEMORY_LIMIT=1Gi SERVER_MEMORY_LIMIT=2Gi OCP_USERNAME="jbride-redhat.com" # a TARGET_HOST is specified in the command line, without using an inventory file ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \ -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \ -e"ansible_ssh_user=${SSH_USERNAME}" \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_domain=$HOST_GUID.openshift.opentlc.com" \ -e"POSTGRESQL_MEMORY_LIMIT=$POSTGRESQL_MEMORY_LIMIT" \ -e"PROMETHEUS_MEMORY_LIMIT=$PROMETHEUS_MEMORY_LIMIT" \ -e"META_MEMORY_LIMIT=$META_MEMORY_LIMIT" \ -e"SERVER_MEMORY_LIMIT=$SERVER_MEMORY_LIMIT" \ -e"ACTION=create" ---- === To Delete an environment ---- HOST_GUID=dev39 TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com" WORKLOAD="ocp-workload-fuse-ignite" GUID=jb05 OCP_USERNAME="jbride-redhat.com" # a TARGET_HOST is specified in the command line, without using an inventory file ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \ -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \ -e"ansible_ssh_user=${SSH_USERNAME}" \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ACTION=remove" ---- == Execution using localhost oc client ---- WORKLOAD="ocp-workload-fuse-ignite" HOST_GUID=dev39 @@ -62,13 +9,11 @@ PROMETHEUS_MEMORY_LIMIT=255Mi META_MEMORY_LIMIT=1Gi SERVER_MEMORY_LIMIT=2Gi OCP_DOMAIN=$HOST_GUID.openshift.opentlc.com ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_workload=${WORKLOAD}" \ -e"ocp_user_needs_quota=true" \ -e"guid=$GUID" \ -e"ocp_domain=$OCP_DOMAIN" \ -e"POSTGRESQL_MEMORY_LIMIT=$POSTGRESQL_MEMORY_LIMIT" \ -e"PROMETHEUS_MEMORY_LIMIT=$PROMETHEUS_MEMORY_LIMIT" \ -e"META_MEMORY_LIMIT=$META_MEMORY_LIMIT" \ ansible/roles/ocp-workload-fuse-ignite/tasks/workload.yml
@@ -46,7 +46,7 @@ shell: | oc new-app {{ignite_template_name}} \ -p ROUTE_HOSTNAME=fuse.{{ocp_project}}.{{ocp_apps_domain}} \ -p OPENSHIFT_MASTER=https://master.{{ocp_domain}} \ -p OPENSHIFT_MASTER=https://master.{{subdomain_base}} \ -p OPENSHIFT_PROJECT={{ocp_project}} \ -p POSTGRESQL_MEMORY_LIMIT={{POSTGRESQL_MEMORY_LIMIT}} \ -p PROMETHEUS_MEMORY_LIMIT={{PROMETHEUS_MEMORY_LIMIT}} \ ansible/roles/ocp-workload-fuse-on-ocp/defaults/main.yml
@@ -22,7 +22,7 @@ quota_secrets: 30 quota_requests_storage: 50Gi ocp_apps_domain: apps.{{ocp_domain}} ocp_apps_domain: apps.{{subdomain_base}} build_status_retries: 20 build_status_delay: 20 ansible/roles/ocp-workload-fuse-on-ocp/ilt_provision.sh
@@ -80,7 +80,6 @@ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_domain=$HOST_GUID.openshift.opentlc.com" \ -e"ACTION=create" >> $LOG_FILE if [ $? -ne 0 ]; then ansible/roles/ocp-workload-fuse-on-ocp/readme.adoc
@@ -20,7 +20,6 @@ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_domain=$HOST_GUID.openshift.opentlc.com" \ -e"ACTION=create" ---- ansible/roles/ocp-workload-iot-demo/tasks/workload.yml
@@ -109,11 +109,11 @@ - name: Print Console URL debug: msg: "user.info: EC Console: http://ec-console-{{ocp_project}}.{{ocp_apps_domain}}" msg: "user.info: EC Console: http://ec-console-{{ocp_project}}.{{ocp_apps_domain}} UserID: ec-sys Password: ec-password" - name: Print MQTT Broker URL debug: msg: "user.info: EC Broker MQTT: mqtt://ec-broker-mqtt.{{ocp_project}}.{{ocp_apps_domain}}:{{mqtt_port}}" msg: "user.info: EC Broker MQTT: mqtt://ec-broker-mqtt.{{ocp_project}}.{{ocp_apps_domain}}:{{mqtt_port}} Account: Red-Hat Login: ec-sys Password: ec-password" # Not used yet, disable until it is relevant #- name: Print MQTTS Broker URL @@ -131,7 +131,7 @@ - name: Print ESF Virtual Gateway URL debug: msg: "user.info: ESF Virtual Gateway: http://esf-{{ocp_project}}.{{ocp_apps_domain}}" msg: "user.info: ESF Virtual Gateway: http://esf-{{ocp_project}}.{{ocp_apps_domain}} UserID: admin Password: admin" - name: Expose virtual gateway web ui shell: "oc expose svc/esf" ansible/roles/ocp-workload-istio-community/defaults/main.yml
@@ -1,6 +1,5 @@ --- become_override: false ocp_username: jbride-redhat.com ocp_user_needs_quota: false ocp_user_groups: @@ -19,11 +18,12 @@ quota_secrets: 150 quota_requests_storage: 50Gi ocp_domain: "{{subdomain_base}}" ocp_apps_domain: "apps.{{ocp_domain}}" ocp_apps_domain: "apps.{{subdomain_base}}" build_status_retries: 20 build_status_delay: 20 deploy_status_retries: 15 deploy_status_delay: 20 lab_name: istio_community ansible/roles/ocp-workload-istio-community/readme.adoc
@@ -13,26 +13,20 @@ ----- WORKLOAD="ocp-workload-istio-community" GUID=admin1 OCP_USERNAME="admin$GUID" HOST_GUID=`oc whoami --show-server | cut -d'.' -f 2` OCP_DOMAIN=$HOST_GUID.openshift.opentlc.com NEEDS_QUOTA=true OCP_USERNAME=adm0 ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ACTION=create" \ -e"ocp_user_needs_quota=${NEEDS_QUOTA}" \ -e"ocp_domain=$OCP_DOMAIN" \ -e"ACTION=create" ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ACTION=remove" ansible/roles/ocp-workload-istio-community/tasks/pre_workload.yml
@@ -13,7 +13,7 @@ - name: Create user Quota - clusterresourcequota shell: | oc create clusterquota clusterquota-"{{ocp_username}}-{{guid}}" \ oc create clusterquota clusterquota-"{{ocp_username}}-{{lab_name}}" \ --project-annotation-selector=openshift.io/requester="{{ocp_username}}" \ --hard requests.cpu="{{quota_requests_cpu}}" \ --hard limits.cpu="{{quota_limits_cpu}}" \ ansible/roles/ocp-workload-istio-community/tasks/remove_workload.yml
@@ -3,12 +3,12 @@ debug: msg: "Pre-Software checks completed successfully - Removed" - name: Remove user Quota - oc delete clusterresourcequota "clusterquota-{{ocp_username}}-{{guid}}" shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{guid}} - name: Remove user Quota - oc delete clusterresourcequota "clusterquota-{{ocp_username}}-{{lab_name}}" shell: oc delete clusterresourcequota clusterquota-{{ocp_username}}-{{lab_name}} ignore_errors: true - name: Remove any lingering tmp files shell: "rm -rf /tmp/{{guid}}" shell: "rm -rf /tmp/{{lab_name}}" - name: Remove user Project shell: "oc delete project istio-system" ansible/roles/ocp-workload-istio-community/tasks/workload.yml
@@ -60,7 +60,7 @@ ########################################################### - name: "Label namespace" command: "oc label namespace {{ocp_project}} AAD='{{guid}}'" command: "oc label namespace {{ocp_project}} AAD='{{lab_name}}'" - name: Annotate the empty project as requested by user shell: "oc annotate namespace {{ocp_project}} openshift.io/requester={{ocp_username}} --overwrite" ansible/roles/ocp-workload-mw-rh-sso/defaults/main.yml
File was renamed from ansible/roles/ocp-workload-rh-sso/defaults/main.yml @@ -19,8 +19,7 @@ quota_secrets: 150 quota_requests_storage: 50Gi ocp_domain: "{{subdomain_base}}" ocp_apps_domain: "apps.{{ocp_domain}}" ocp_apps_domain: "apps.{{subdomain_base}}" build_status_retries: 20 build_status_delay: 20 ansible/roles/ocp-workload-mw-rh-sso/readme.adoc
File was renamed from ansible/roles/ocp-workload-rh-sso/readme.adoc @@ -17,14 +17,12 @@ GUID=jb45 OCP_USERNAME="jbride-redhat.com" HOST_GUID=`oc whoami --show-server | cut -d'.' -f 2` OCP_DOMAIN=$HOST_GUID.openshift.opentlc.com ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_workload=${WORKLOAD}" \ -e"ocp_user_needs_quota=true" \ -e"guid=$GUID" \ -e"ocp_domain=$OCP_DOMAIN" \ -e"ACTION=create" \ -e"ocp_username=${OCP_USERNAME}" @@ -92,4 +90,4 @@ Author Information ------------------ dtorresf, honghuac dtorresf, honghuac ansible/roles/ocp-workload-mw-rh-sso/tasks/main.yml
ansible/roles/ocp-workload-mw-rh-sso/tasks/post_workload.yml
ansible/roles/ocp-workload-mw-rh-sso/tasks/pre_workload.yml
ansible/roles/ocp-workload-mw-rh-sso/tasks/remove_workload.yml
ansible/roles/ocp-workload-mw-rh-sso/tasks/wait_for_build.yml
ansible/roles/ocp-workload-mw-rh-sso/tasks/wait_for_deploy.yml
ansible/roles/ocp-workload-mw-rh-sso/tasks/workload.yml
ansible/roles/ocp-workload-rhte-mw-api-biz/defaults/main.yml
@@ -19,8 +19,7 @@ quota_secrets: 30 quota_requests_storage: 50Gi ocp_domain: "{{subdomain_base}}" ocp_apps_domain: "apps.{{ocp_domain}}" ocp_apps_domain: "apps.{{subdomain_base}}" build_status_retries: 20 build_status_delay: 20 ansible/roles/ocp-workload-rhte-mw-api-biz/tasks/workload.yml
@@ -111,7 +111,7 @@ shell: | oc new-app {{ignite_template_name}} \ -p ROUTE_HOSTNAME=fuse.{{ocp_project}}.{{ocp_apps_domain}} \ -p OPENSHIFT_MASTER=https://master.{{ocp_domain}} \ -p OPENSHIFT_MASTER=https://master.{{subdomain_base}} \ -p OPENSHIFT_PROJECT={{fuseonline_cp_project}} \ -p POSTGRESQL_MEMORY_LIMIT={{POSTGRESQL_MEMORY_LIMIT}} \ -p PROMETHEUS_MEMORY_LIMIT={{PROMETHEUS_MEMORY_LIMIT}} \ ansible/roles/ocp-workload-rhte-mw-api-mesh/defaults/main.yml
@@ -18,8 +18,6 @@ quota_secrets: 150 quota_requests_storage: 50Gi ocp_domain: "{{subdomain_base}}" ocp_apps_domain: "apps.{{ocp_domain}}" build_status_retries: 20 build_status_delay: 20 ansible/roles/ocp-workload-rhte-mw-api-mesh/ilt_provision.sh
@@ -77,7 +77,6 @@ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_domain=$OCP_DOMAIN" \ -e"ACTION=create" >> $LOG_FILE if [ $? -ne 0 ]; ansible/roles/ocp-workload-rhte-mw-api-mesh/readme.adoc
@@ -9,16 +9,13 @@ ----- WORKLOAD="ocp-workload-rhte-mw-api-mesh" GUID=1 OCP_USERNAME="user$GUID" REGION=`oc whoami --show-server | cut -d'.' -f 2` OCP_DOMAIN=$REGION.openshift.opentlc.com GUID=a1001 OCP_USERNAME="developer" ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_domain=$OCP_DOMAIN" \ -e"ACTION=create" ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ ansible/roles/ocp-workload-rhte-mw-api-mesh/tasks/remove_workload.yml
@@ -11,9 +11,6 @@ shell: oc delete clusterresourcequota {{cluster_quota_name}} ignore_errors: true - name: Remove any lingering tmp files shell: "rm -rf /tmp/{{guid}}" - name: Remove user Project shell: "oc delete project {{ocp_project}}" ignore_errors: true ansible/roles/ocp-workload-rhte-mw-api-mesh/tasks/workload.yml
@@ -46,13 +46,10 @@ shell: "oc create configmap app-config --from-file=/tmp/app-config.yaml -n {{ocp_project}}" - stat: path=/tmp/coolstore-catalog-mongodb-persistent.yaml register: coolstore_file - name: Copy catalog service (with db) template to known path template: src: templates/coolstore-catalog-mongodb-persistent.yaml dest: /tmp/coolstore-catalog-mongodb-persistent.yaml when: coolstore_file.stat.exists == False - name: Build and create catalog service shell: | ansible/roles/ocp-workload-rhte-mw-api-mesh/templates/coolstore-catalog-mongodb-persistent.yaml
@@ -104,7 +104,13 @@ each image to be.","price":44.30}); failurePolicy: ignore timeoutSeconds: 600 resources: {} resources: limits: cpu: 250m memory: 1Gi requests: cpu: 100m memory: 256Mi type: Recreate template: metadata: @@ -126,7 +132,7 @@ value: ${CATALOG_DATABASE} - name: MONGODB_ADMIN_PASSWORD value: ${CATALOG_DB_PASSWORD} image: mongodb image: mongodb:3.4 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 10 ansible/roles/ocp-workload-rhte-mw-bfield-migration/readme.adoc
@@ -7,14 +7,11 @@ GUID=1 OCP_USERNAME="user$GUID" HOST_GUID=dev39 OCP_DOMAIN=$HOST_GUID.openshift.opentlc.com ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_domain=$OCP_DOMAIN" \ -e"ACTION=create" ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ @@ -25,47 +22,3 @@ -e"ACTION=remove" ----- === Deploy a Workload with the `ocp-workload` playbook [Mostly for testing] ---- HOST_GUID=dev39 TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com" WORKLOAD="ocp-workload-appmod-migration" SSH_USERNAME="jbride-redhat.com" SSH_PRIVATE_KEY="id_ocp" GUID=gptetraining01 OCP_USERNAME="gpsetraining1" # a TARGET_HOST is specified in the command line, without using an inventory file ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \ -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \ -e"ansible_ssh_user=${SSH_USERNAME}" \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_domain=$HOST_GUID.openshift.opentlc.com" \ -e"ACTION=create" ---- === To Delete an environment ---- HOST_GUID=dev39 TARGET_HOST="bastion.$HOST_GUID.openshift.opentlc.com" WORKLOAD="ocp-workload-appmod-migration" GUID=gptetraining01 OCP_USERNAME="gpsetraining1" # a TARGET_HOST is specified in the command line, without using an inventory file ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \ -e"ansible_ssh_private_key_file=~/.ssh/${SSH_PRIVATE_KEY}" \ -e"ansible_ssh_user=${SSH_USERNAME}" \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ACTION=remove" ---- ansible/roles/ocp-workload-rhte-mw-msa-mesh/defaults/main.yml
@@ -18,8 +18,7 @@ quota_secrets: 150 quota_requests_storage: 50Gi ocp_domain: "{{subdomain_base}}" ocp_apps_domain: "apps.{{ocp_domain}}" ocp_apps_domain: "apps.{{subdomain_base}}" build_status_retries: 20 build_status_delay: 20 ansible/roles/ocp-workload-rhte-mw-msa-mesh/readme.adoc
@@ -12,14 +12,12 @@ GUID=1 OCP_USERNAME="user$GUID" HOST_GUID=`oc whoami --show-server | cut -d'.' -f 2` OCP_DOMAIN=$HOST_GUID.openshift.opentlc.com ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_domain=$OCP_DOMAIN" \ -e"ACTION=create" ansible-playbook -i localhost, -c local ./configs/ocp-workloads/ocp-workload.yml \ ansible/roles/ocp-workload-rhte-mw-msa-orchestration/defaults/main.yml
@@ -19,8 +19,7 @@ quota_secrets: 100 quota_requests_storage: 80Gi ocp_domain: "{{subdomain_base}}" ocp_apps_domain: "apps.{{ocp_domain}}" ocp_apps_domain: "apps.{{subdomain_base}}" build_status_retries: 20 build_status_delay: 20 ansible/roles/ocp-workload-rhte-mw-msa-orchestration/ilt_provision.sh
@@ -5,7 +5,6 @@ WORKLOAD="ocp-workload-rhte-mw-msa-orchestration" LOG_FILE=/tmp/$WORKLOAD HOST_GUID=`oc whoami --show-server | cut -d'.' -f 2` OCP_DOMAIN=$HOST_GUID.openshift.opentlc.com PATH_TO_AAD_ROOT=$TRAINING/gpte/ansible_agnostic_deployer/ansible @@ -77,7 +76,6 @@ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=True" \ -e"ocp_domain=$OCP_DOMAIN" \ -e"ACTION=create" >> $LOG_FILE if [ $? -ne 0 ]; ansible/roles/ocp-workload-rhte-mw-op-intel/defaults/main.yml
@@ -19,8 +19,7 @@ quota_secrets: 100 quota_requests_storage: 50Gi ocp_domain: "{{subdomain_base}}" ocp_apps_domain: "apps.{{ocp_domain}}" ocp_apps_domain: "apps.{{subdomain_base}}" build_status_retries: 20 build_status_delay: 20 ansible/roles/ocp-workload-rhte-mw-op-intel/readme.adoc
@@ -15,7 +15,6 @@ -e"ocp_workload=${WORKLOAD}" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_user_needs_quota=true" \ -e"ocp_domain=${OCP_DOMAIN}" \ -e"ACTION=create" ansible/software_playbooks/tower.yml
@@ -49,7 +49,7 @@ dest: "{{tower_inventory_path}}" - name: unarchive the latest tower software unarchive: src: "https://releases.ansible.com/ansible-tower/setup/ansible-tower-setup-latest.tar.gz" src: "https://releases.ansible.com/ansible-tower/setup/ansible-tower-setup-{{ tower_setup_version | default('latest') }}.tar.gz" dest: /opt/tower remote_src: yes @@ -59,7 +59,7 @@ - name: print out tower Installer debug: var: "{{tower_installer_path}}" var: tower_installer_path - name: Add log path to Ansible configuration lineinfile: gpte-hosts
New file @@ -0,0 +1,29 @@ [gptehosts:vars] #ansible_ssh_private_key_file=~/.ssh/opentlc_admin_backdoor.pem ansible_ssh_private_key_file=~/.ssh/ocpkey.pem ansible_ssh_user=ec2-user [gptehosts:children] openshift admin dev prod [admin] admin.na.shared.opentlc.com #ipa host [openshift] bastion.na1.openshift.opentlc.com bastion.rhpds.openshift.opentlc.com bastion.dev.openshift.opentlc.com ansible_ssh_host=ec2-34-213-120-47.us-west-2.compute.amazonaws.com bastion.dev1.openshift.opentlc.com [dev] bastion.dev.openshift.opentlc.com bastion.dev1.openshift.opentlc.com [prod] bastion.na1.openshift.opentlc.com bastion.rhpds.openshift.opentlc.com scripts/bump_version.sh
@@ -4,7 +4,7 @@ echo "$0 [CONFIG] [STAGE]" echo echo "CONFIG: ALL | ocp-workshop | ocp-demo-lab | ans-tower-lab | ..." echo "STAGE: test | prod" echo "STAGE: test | prod | rhte" } if [ -z "${1}" ] || [ -z "${2}" ]; then tagging
New file @@ -0,0 +1,66 @@ git tag -l export TAG=three-tier-app-test-1.4 export TAG_COMMENT="Test ocp-ha-lab with new ec2 method" git tag -a ${TAG} -m "${TAG_COMMENT}" git push origin ${TAG} export TAG=three-tier-app-test-1.3 export TAG_COMMENT="Test three-tier-app with new ec2 method" git tag -a ${TAG} -m "${TAG_COMMENT}" git push origin ${TAG} export TAG=ocp-workshop-test-1.2 export TAG_COMMENT="Test ocp-workshop with new ec2 method" git tag -a ${TAG} -m "${TAG_COMMENT}" git push origin ${TAG} ocp-ha-lab-test-1.1 three-tier-app-test-1.3 ocp-workshop-test-1.2 ## Tested CONFIG=ocp-demo-lab CONFIG=ans-tower-lab ans-tower-lab auth-playground-lab ocp-demo-lab ocp-ha-lab ocp-operations ocp-workshop three-tier-app ### THIS DOESN"T WORK ANYMORE USER=sborenst CONFIG=rhte-oc-cluster-vms COMMENT="updated config for rhte - changed clientvmsize" ENV=rhte LATEST_TAG=`git tag -l | grep $CONFIG | grep $ENV | sort -n | tail -1 | awk -F"-" '{print $NF}'` echo LATEST_TAG IS $LATEST_TAG NEW_TAG_VERSION=`perl -E "say $LATEST_TAG + 0.1"` echo NEW_TAG_VERSION IS $NEW_TAG_VERSION NEW_TAG="${CONFIG}-${ENV}-$NEW_TAG_VERSION" echo NEW_TAG IS $NEW_TAG export TAG_COMMENT="$ENV $CONFIG $COMMENT . By $USER" echo TAG_COMMENT IS $TAG_COMMENT echo git tag -a ${NEW_TAG} -m "${TAG_COMMENT}" echo git push origin ${NEW_TAG} git tag -a ${NEW_TAG} -m "${TAG_COMMENT}" git push origin ${NEW_TAG} :verify_host_key test_oc.yml
New file @@ -0,0 +1,16 @@ --- - name: test oc module hosts: localhost tasks: - debug: msg: "test" - name: Create project oc: state: present inline: kind: ProjectRequest metadata: name: ansibletestproject displayName: Ansible Test Project description: This project was created using Ansible token: eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImFuc2libGUtc2EtdG9rZW4tcTVuMGwiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiYW5zaWJsZS1zYSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImM5ZGE2ODBmLWNmMGItMTFlNy1iNDQ4LTAyZWU0MDVkMDFhMiIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmFuc2libGUtc2EifQ.Yrz_YCQK8pN079uqU60zM30iGyCyBr_gZ8Z30vatfCQPQghv5TSMRV-f9EfZyyXOK0-uzJc7bzNWrd_x0WVv_3bvvMNjdbI386Ib0g1FCD8amwn4wSXCX5scFKznXxVyvDqFOLSgerjnImiBn3pkX6maPVowDqwo4ZTfmHMN3bmzWrZAQrRz6ecGeFXACoAEooTJexccXzEgdPWI6-D2G73BJdpkGzf8TJZVLpg-U0o9ANW3qXaEJ-3W2hqkcwbyVwcRZ4aHhmxXaOm3MrMRJbjswuhe6HPlXnE4Bcraq_HpuBtIUEhyrVR5_BaPYg4-ET1KUY80g3CYhKNdceW2Pg tests/scenarii/rhte-oc-cluster-vms.yml
New file @@ -0,0 +1,35 @@ --- guid: testocclustervms env_type: rhte-oc-cluster-vms key_name: john email: john-smith@redhat.com cloud_provider: ec2 aws_region: us-east-1 HostedZoneId: ... subdomain_base_suffix: .openshift.opentlc.com bastion_instance_type: t2.large master_instance_type: m4.4xlarge infranode_instance_type: m4.4xlarge node_instance_type: m4.4xlarge support_instance_type: t2.large node_instance_count: 1 install_ipa_client: false install_idm: htpasswd install_zabbix: false repo_method: file own_repo_path: "..." repo_version: "3.10" software_to_deploy: none osrelease: 3.10.14 install_openshiftapb: true run_ocp_diagnostics: false student_password: redhat num_users: 60 user_vols: 500 install_lets_encrypt_certificates: false install_openshiftapb: true run_ocp_diagnostics: false install_student_user: true num_users: 1 student_workloads: ocp-workload-rhte-mw-op-intel tests/scenarii/rhte-ocp-workshop.yml
New file @@ -0,0 +1,29 @@ --- guid: testgucorerhte4 env_type: rhte-ocp-workshop key_name: john email: john-smith@redhat.com cloud_provider: ec2 aws_region: us-east-1 HostedZoneId: xxxx subdomain_base_suffix: .openshift.opentlc.com bastion_instance_type: t2.large master_instance_type: m4.4xlarge infranode_instance_type: m4.4xlarge node_instance_type: m4.4xlarge support_instance_type: t2.large node_instance_count: 1 install_ipa_client: false install_idm: htpasswd install_zabbix: false repo_method: file own_repo_path: "http://..." repo_version: "3.10" software_to_deploy: openshift osrelease: 3.10.14 install_openshiftapb: true run_ocp_diagnostics: false student_password: redhat num_users: 60 user_vols: 500 install_lets_encrypt_certificates: false using_roleS_with_hong
New file @@ -0,0 +1,44 @@ Getting started with Agnostic deployer for Ocp Workloads 1. Developer needs to get the repo itself git clone https://github.com/sborenst/ansible_agnostic_deployer cd ansible_agnostic_deployer/ansible git checkout development 2. Admin needs to allow access on the bastion host "dev37" . Add developer public key to ---- ~/.ssh/authorized_keys ---- . Check that the developer can access the bastion host ---- ssh ec2-user@bastion.dev37.openshift.opentlc.com ---- ansible/roles/ocp-workload-developer-environment === Read the readme.file === test that you can run the roles ---- TARGET_HOST="bastion.dev37.openshift.opentlc.com" OCP_USERNAME="shacharb-redhat.com" WORKLOAD="ocp-workload-developer-environment" GUID=1001 # a TARGET_HOST is specified in the command line, without using an inventory file ansible-playbook -i ${TARGET_HOST}, ./configs/ocp-workloads/ocp-workload.yml \ -e"ansible_ssh_private_key_file=~/.ssh/keytoyourhost.pem" \ -e"ansible_ssh_user=ec2-user" \ -e"ANSIBLE_REPO_PATH=`pwd`" \ -e"ocp_username=${OCP_USERNAME}" \ -e"ocp_workload=${WORKLOAD}" \ -e"guid=${GUID}" \ -e"ocp_user_needs_quota=true" \ -e"ACTION=create" ----