Mario Vázquez
2019-08-06 9672f1366951df83e90818e90d867eabe34c3a3d
Added helper scripts (#544)

4 files added
1 files modified
239 ■■■■■ changed files
ansible/roles/ocp4-workload-rhte-kubefed-app-portability/files/gen-mongo-certs 97 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-rhte-kubefed-app-portability/files/init-lab 21 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-rhte-kubefed-app-portability/files/namespace-cleanup 81 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-rhte-kubefed-app-portability/files/wait-for-deployment 28 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-rhte-kubefed-app-portability/tasks/workload.yml 12 ●●●●● patch | view | raw | blame | history
ansible/roles/ocp4-workload-rhte-kubefed-app-portability/files/gen-mongo-certs
New file
@@ -0,0 +1,97 @@
#!/bin/bash
check_contexts()
{
  if [ "03" -ne "0$(oc config get-contexts -o name 2>/dev/null| grep -Ec "cluster1|cluster2|cluster3")" ]
  then
    echo "Cluster contexts are not properly configured. Please follow: https://github.com/openshift/federation-dev/blob/agnosticd-changes/labs/2.md"
    exit 1
  fi
}
generate_ca_files()
{
  cat <<-EOF > /var/tmp/ca-config.json
  {
    "signing": {
      "default": {
        "expiry": "8760h"
      },
      "profiles": {
        "kubernetes": {
          "usages": ["signing", "key encipherment", "server auth", "client auth"],
          "expiry": "8760h"
        }
      }
    }
  }
EOF
  cat <<-EOF > /var/tmp/ca-csr.json
  {
    "CN": "Kubernetes",
    "key": {
      "algo": "rsa",
      "size": 2048
    },
    "names": [
      {
        "C": "US",
        "L": "Austin",
        "O": "Kubernetes",
        "OU": "TX",
        "ST": "Texas"
      }
    ]
  }
EOF
  cat <<-EOF > /var/tmp/mongodb-csr.json
  {
    "CN": "kubernetes",
    "key": {
      "algo": "rsa",
      "size": 2048
    },
    "names": [
      {
        "C": "US",
        "L": "Austin",
        "O": "Kubernetes",
        "OU": "TX",
        "ST": "Texas"
      }
    ]
  }
EOF
}
generate_certs()
{
  NAMESPACE=mongo
  SERVICE_NAME=mongo
  ROUTE_CLUSTER1=mongo-cluster1.$(oc --context=cluster1 -n openshift-console get route console -o jsonpath='{.status.ingress[*].host}' | sed "s/.*\(apps.*\)/\1/g")
  ROUTE_CLUSTER2=mongo-cluster2.$(oc --context=cluster2 -n openshift-console get route console -o jsonpath='{.status.ingress[*].host}' | sed "s/.*\(apps.*\)/\1/g")
  ROUTE_CLUSTER3=mongo-cluster3.$(oc --context=cluster3 -n openshift-console get route console -o jsonpath='{.status.ingress[*].host}' | sed "s/.*\(apps.*\)/\1/g")
  SANS="localhost,localhost.localdomain,127.0.0.1,${ROUTE_CLUSTER1},${ROUTE_CLUSTER2},${ROUTE_CLUSTER3},${SERVICE_NAME},${SERVICE_NAME}.${NAMESPACE},${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local"
  cfssl gencert -initca /var/tmp/ca-csr.json | cfssljson -bare /var/tmp/ca >/dev/null
  cfssl gencert -ca=/var/tmp/ca.pem -ca-key=/var/tmp/ca-key.pem -config=/var/tmp/ca-config.json -hostname=${SANS} -profile=kubernetes /var/tmp/mongodb-csr.json | cfssljson -bare /var/tmp/mongodb >/dev/null
}
combine_certs()
{
  cat /var/tmp/mongodb-key.pem /var/tmp/mongodb.pem > /var/tmp/mongo.pem
  echo "MongoDB PEM file generated at /var/tmp/mongo.pem"
}
main()
{
  check_contexts
  generate_ca_files
  generate_certs
  combine_certs
}
main
ansible/roles/ocp4-workload-rhte-kubefed-app-portability/files/init-lab
New file
@@ -0,0 +1,21 @@
#!/bin/bash
LAB_REPO=https://github.com/openshift/federation-dev
LAB_FOLDER=~/federation-dev
if [ -d ${LAB_FOLDER} ]
then
  echo "Backing up original lab folder ${LAB_FOLDER}"
  mv ${LAB_FOLDER}{,.$RANDOM}
fi
echo "Cloning ${LAB_REPO} in ${LAB_FOLDER}"
git clone ${LAB_REPO} ${LAB_FOLDER} &> /dev/null
echo "Configuring lab files"
sed -i "s/feddemocl/cluster/g" ${LAB_FOLDER}/labs/mongo-yaml/*.yaml
sed -i "s/feddemocl/cluster/g" ${LAB_FOLDER}/labs/pacman-yaml/*.yaml
echo "Lab initialization complete"
ansible/roles/ocp4-workload-rhte-kubefed-app-portability/files/namespace-cleanup
New file
@@ -0,0 +1,81 @@
#!/bin/bash
# GLOBAL VAR
KUBEFED_NAMESPACE=kube-federation-system
usage()
{
  echo "$0 [-n|--namespace NAMESPACE]"
  echo -ne "\noptions:\n"
  echo "-n|--namespace - Accepted values: name of the namespace where the demo will be deployed."
  echo -ne "\nexamples:\n"
  echo "cleanup mongo namespace: $0 -n mongo"
}
check_contexts()
{
  if [ "03" -ne "0$(oc config get-contexts -o name 2>/dev/null| grep -Ec "cluster1|cluster2|cluster3")" ]
  then
    echo "Cluster contexts are not properly configured. Please follow: https://github.com/openshift/federation-dev/blob/agnosticd-changes/labs/2.md"
    exit 1
  fi
}
check_namespace()
{
  if [ "${NAMESPACE_TO_CLEAN}" == "" ]
  then
    usage
    exit 1
  fi
  if [ "$(oc --context=cluster1 get namespace ${NAMESPACE_TO_CLEAN} -o name 2>/dev/null)" != "namespace/${NAMESPACE_TO_CLEAN}" ]
  then
    echo "Namespace ${NAMESPACE_TO_CLEAN} does not exist"
    exit 1
  fi
}
clean_namespace()
{
  CLUSTERS="cluster1 cluster2 cluster3"
  for federatedtypeconfig in $(oc --context=cluster1 -n ${KUBEFED_NAMESPACE} get federatedtypeconfig -o name | awk -F "/" '{print $2}')
  do
    FEDERATED_TYPE=$(oc --context=cluster1 -n ${KUBEFED_NAMESPACE} get federatedtypeconfig $federatedtypeconfig -o jsonpath='{.spec.federatedType.kind}')
    for object in $(oc --context=cluster1 -n ${NAMESPACE_TO_CLEAN} get $FEDERATED_TYPE -o name | awk -F "/" '{print $2}')
    do
      echo "Cleaning up $FEDERATED_TYPE"
      oc --context=cluster1 -n ${NAMESPACE_TO_CLEAN} delete $FEDERATED_TYPE $object --wait=false &>/dev/null
      oc --context=cluster1 -n ${NAMESPACE_TO_CLEAN} patch --type=merge $FEDERATED_TYPE $object -p '{"metadata":{"finalizers": []}}' &>/dev/null
    done
  done
  for cluster in ${CLUSTERS}
  do
    echo "Deleting namespace ${NAMESPACE_TO_CLEAN} from cluster ${cluster}"
    oc --context ${cluster} delete namespace ${NAMESPACE_TO_CLEAN} &>/dev/null
  done
}
main()
{
  check_contexts
  check_namespace
  clean_namespace
}
ARGS_ARRAY=( "$@" )
ARGS=1
for arg in $@; do
    case $arg in
        -n | --namespace)
                                NAMESPACE_TO_CLEAN=${ARGS_ARRAY[$ARGS]}
                                ;;
        -h | --help )
                                usage
                                exit 0
                                ;;
    esac
    ARGS=$(expr $ARGS + 1)
done
main
ansible/roles/ocp4-workload-rhte-kubefed-app-portability/files/wait-for-deployment
New file
@@ -0,0 +1,28 @@
#!/bin/bash
READY=0
WAIT=0
MAX_WAIT=300
CLUSTER="$1"
DEPLOYMENT_NAMESPACE="$2"
DEPLOYMENT_NAME="$3"
echo "Checking if deployment ${DEPLOYMENT_NAME} from namespace ${DEPLOYMENT_NAMESPACE} on cluster ${CLUSTER} is ready"
DESIRED_REPLICAS=$(oc --context=${CLUSTER} -n ${DEPLOYMENT_NAMESPACE} get deployment ${DEPLOYMENT_NAME} -o jsonpath='{ .spec.replicas }')
while [ $READY -eq 0 ]
do
  CLUSTER_REPLICAS_READY=$(oc --context=${CLUSTER} -n ${DEPLOYMENT_NAMESPACE} get deployment ${DEPLOYMENT_NAME} -o jsonpath='{ .status.readyReplicas }')
  if [ "0$CLUSTER_REPLICAS_READY" -eq "0$DESIRED_REPLICAS" ]
  then
    echo "Deployment is ready"
    READY=1
  else
    echo "Deployment is not ready yet, waiting... [$WAIT/$MAX_WAIT]"
    sleep 5
    WAIT=$(expr $WAIT + 5)
  fi
  if [ $WAIT -ge $MAX_WAIT ]
  then
    echo "Timeout while waiting deployment ${DEPLOYMENT_NAME} from namespace ${DEPLOYMENT_NAMESPACE} on cluster ${CLUSTER} to become ready"
    exit 1
  fi
done
ansible/roles/ocp4-workload-rhte-kubefed-app-portability/tasks/workload.yml
@@ -71,6 +71,18 @@
    mode: 0755
  become: true
- name: Ensure helper scripts are Deployed
  copy:
    src: "{{ item }}"
    dest: /usr/local/bin/
    mode: 0755
  become: true
  loop:
    - ./files/gen-mongo-certs
    - ./files/init-lab
    - ./files/wait-for-deployment
    - ./files/namespace-cleanup
# Leave this as the last task in the playbook.
- name: workload tasks complete
  debug: