## TODO: What variables can we strip out of here to build complex variables? ## i.e. what can we add into group_vars as opposed to config_vars? ## Example: We don't really need "subdomain_base_short". If we want to use this, ## should just toss in group_vars/all. ### Also, we should probably just create a variable reference in the README.md ### For now, just tagging comments in line with configuration file. ### Vars that can be removed: # use_satellite: true # use_subscription_manager: false # use_own_repos: false ###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT ###### OR PASS as "-e" args to ansible-playbook command ### Common Host settings repo_method: file # Other Options are: file, satellite and rhn # Do you want to run a full yum update update_packages: true #If using repo_method: satellite, you must set these values as well. # satellite_url: https://satellite.example.com # satellite_org: Sat_org_name # satellite_activationkey: "rhel7basic" ## guid is the deployment unique identifier, it will be appended to all tags, ## files and anything that identifies this environment from another "just like it" guid: defaultguid # This is where the ssh_config file will be created, this file is used to # define the communication method to all the hosts in the deployment deploy_local_ssh_config_location: "{{ANSIBLE_REPO_PATH}}/workdir" zabbix_host: 23.246.247.58 install_nfs: true install_bastion: true install_common: true install_opentlc_integration: true software_to_deploy: openshift deploy_openshift: true deploy_openshift_post: true deploy_env_post: true install_zabbix: false ocp_report: false install_ipa_client: false remove_self_provisioners: false # you can also use: allow_all, htpasswd, ldap install_idm: ldap idm_ca_url: http://ipa.opentlc.com/ipa/config/ca.crt install_metrics: true install_logging: true repo_version: "3.6" docker_version: "1.12.6" docker_device: /dev/xvdb ### If you want a Key Pair name created and injected into the hosts, # set `set_env_authorized_key` to true and set the keyname in `env_authorized_key` # you can use the key used to create the environment or use your own self generated key # if you set "use_own_key" to false your PRIVATE key will be copied to the bastion. (This is {{key_name}}) use_own_key: true env_authorized_key: "{{guid}}key" ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem set_env_authorized_key: true # Is this running from Red Hat Ansible Tower tower_run: false admin_user: opentlc-mgr admin_project: "ocp-workshop" ### AWS EC2 Environment settings ### Route 53 Zone ID (AWS) # This is the Route53 HostedZoneId where you will create your Public DNS entries # This only needs to be defined if your CF template uses route53 HostedZoneId: Z1TQFSYFZUAO0D # The region to be used, if not specified by -e in the command line aws_region: us-east-1 # The key that is used to key_name: "default_key_name" ## Networking (AWS) subdomain_base_short: "{{ guid }}" subdomain_base_suffix: ".openshift.opentlc.com" subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}" ## Environment Sizing bastion_instance_type: "t2.large" master_instance_type: "c4.4xlarge" etcd_instance_type: "{{master_instance_type}}" infranode_instance_type: "c4.4xlarge" node_instance_type: "c4.4xlarge" # r3.2xlarge support_instance_type: "c4.xlarge" node_instance_count: 5 infranode_instance_count: 1 master_instance_count: 1 support_instance_count: 1 # scaleup new_node_instance_count: 0 ###### VARIABLES YOU SHOULD ***NOT*** CONFIGURE FOR YOUR DEPLOYEMNT ## This might get removed env_specific_images: - "registry.access.redhat.com/jboss-eap-7/eap70-openshift:latest" #### Vars for the OpenShift Ansible hosts file master_api_port: 443 ovs_plugin: "subnet" # This can also be set to: "multitenant" multi_tenant_setting: "os_sdn_network_plugin_name='redhat/openshift-ovs-{{ovs_plugin}}'" master_lb_dns: "master.{{subdomain_base}}" project_request_message: 'To provision Projects you must request access in https://labs.opentlc.com or https://rhpds.redhat.com' cloudapps_suffix: 'apps.{{subdomain_base}}' openshift_master_ldap_ca_file: 'openshift_master_ldap_ca_file=/root/ca.crt' ## TODO: This should be registered as a variable. Awk for os verions (OCP). ## yum info openshift... osrelease: 3.5.5.15 openshift_master_overwrite_named_certificates: true timeout: 60 ###### You can, but you usually wouldn't need to. ansible_ssh_user: ec2-user remote_user: ec2-user common_packages: - python - unzip - bash-completion - tmux - bind-utils - wget - ansible - git - vim-enhanced - at rhel_repos: - rhel-7-server-rpms - rhel-7-server-extras-rpms - rhel-7-server-ose-{{repo_version}}-rpms - rhel-7-fast-datapath-rpms # use_subscription_manager: false # use_own_repos: true # # rhn_pool_id_string: OpenShift Container Platform ## NFS Server settings nfs_vg: nfsvg nfs_pvs: /dev/xvdb nfs_export_path: /srv/nfs nfs_size: 50 nfs_shares: - es-storage - user-vols - jenkins - nexus - nexus2 ocp_pvs: - es-storage - nexus - nexus2 - nexus3 user_vols: 200 user_vols_size: 4Gi cache_images: - "registry.access.redhat.com/jboss-eap-7/eap70-openshift:latest" # - "registry.access.redhat.com/rhscl/mongodb-32-rhel7:latest" # - "registry.access.redhat.com/openshift3/jenkins-slave-maven-rhel7" # - "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift" # - "registry.access.redhat.com/jboss-fuse-6/fis-java-openshift" # - "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift" # - "registry.access.redhat.com/openshift3/jenkins-2-rhel7" # - "registry.access.redhat.com/rhscl/mongodb-32-rhel7" # - "registry.access.redhat.com/rhscl/nodejs-4-rhel7" # - "registry.access.redhat.com/rhscl/postgresql-94-rhel7" # - "registry.access.redhat.com/rhscl/postgresql-95-rhel7 ### CLOUDFORMATIONS vars project_tag: "{{ env_type }}-{{ guid }}" create_internal_dns_entries: true zone_internal_dns: "{{guid}}.internal." chomped_zone_internal_dns: "{{guid}}.internal" zone_public_dns: "{{subdomain_base}}." cloudapps_dns: '*.apps.{{subdomain_base}}.' master_public_dns: "master.{{subdomain_base}}." bastion_public_dns: "bastion.{{subdomain_base}}." certtest_public_dns: "certtest.{{subdomain_base}}." bastion_public_dns_chomped: "bastion.{{subdomain_base}}" vpcid_cidr_block: "192.168.0.0/16" vpcid_name_tag: "{{subdomain_base}}" az_1_name: "{{ aws_region }}a" az_2_name: "{{ aws_region }}b" subnet_private_1_cidr_block: "192.168.2.0/24" subnet_private_1_az: "{{ az_2_name }}" subnet_private_1_name_tag: "{{subdomain_base}}-private" subnet_private_2_cidr_block: "192.168.1.0/24" subnet_private_2_az: "{{ az_1_name }}" subnet_private_2_name_tag: "{{subdomain_base}}-private" subnet_public_1_cidr_block: "192.168.10.0/24" subnet_public_1_az: "{{ az_1_name }}" subnet_public_1_name_tag: "{{subdomain_base}}-public" subnet_public_2_cidr_block: "192.168.20.0/24" subnet_public_2_az: "{{ az_2_name }}" subnet_public_2_name_tag: "{{subdomain_base}}-public" dopt_domain_name: "{{ aws_region }}.compute.internal" rtb_public_name_tag: "{{subdomain_base}}-public" rtb_private_name_tag: "{{subdomain_base}}-private" cf_template_description: "{{ env_type }}-{{ guid }} template " rootfs_size_node: 50 rootfs_size_infranode: 50 rootfs_size_master: 50 rootfs_size_bastion: 20 rootfs_size_support: 20