Nate Stephany
2020-03-11 b000b8f5769e47a61b4ea45e8e7a735082751940
Add etcd WAL performance test to OCP4 disconnected config (#1294)

* Add test results block

* final test changes

* final changes for performance test plays
2 files added
1 files modified
1 files renamed
208 ■■■■■ changed files
ansible/configs/ocp4-disconnected-osp-lab/default_vars.yml 6 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-disconnected-osp-lab/files/fio-test.sh.j2 13 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-disconnected-osp-lab/files/upload-to-s3.sh 122 ●●●●● patch | view | raw | blame | history
ansible/configs/ocp4-disconnected-osp-lab/software.yml 67 ●●●● patch | view | raw | blame | history
ansible/configs/ocp4-disconnected-osp-lab/default_vars.yml
File was renamed from ansible/configs/ocp4-disconnected-osp-lab/env_vars.yml
@@ -205,7 +205,13 @@
# This can be used for function or load testing
test_enable: false
test_results: false
test_pull_secret: FROM_SECRET
test_runs: 50
test_s3_id: FROM_SECRET
test_s3_key: FROM_SECRET
test_s3_bucket: gpte-ocp-perf-test
test_s3_region: us-east-2
# If you are deploying OpenShift, this should be set to the network that you
# want to use and will be used to create security groups.
ansible/configs/ocp4-disconnected-osp-lab/files/fio-test.sh.j2
New file
@@ -0,0 +1,13 @@
#!/bin/sh
echo "Installing jq and fio"
dnf install -y jq fio
echo "creating fio directory"
mkdir /var/lib/etcd/fio
echo "running tests"
for i in {1..{{ test_runs }}};do echo "running test $i";fio --rw=write --ioengine=sync --fdatasync=1 --directory=/var/lib/etcd/fio --size=22m --bs=2300 --name=mytest --output-format=json+ | jq '.jobs[].sync.lat_ns.percentile."99.000000"' >> {{ guid }}-fio.out;done
echo "uploading to s3"
/host/home/core/upload-to-s3.sh '{{ test_s3_id }}' '{{ test_s3_key }}' {{ test_s3_bucket }}@{{ test_s3_region }} ./{{ guid }}-fio.out {{ guid }}-fio.out
ansible/configs/ocp4-disconnected-osp-lab/files/upload-to-s3.sh
New file
@@ -0,0 +1,122 @@
#!/bin/sh
usage()
{
    cat <<USAGE
Simple script uploading a file to S3. Supports AWS signature version 4, custom
region, permissions and mime-types. Uses Content-MD5 header to guarantee
uncorrupted file transfer.
Usage:
  `basename $0` aws_ak aws_sk bucket srcfile targfile [acl] [mime_type]
Where <arg> is one of:
  aws_ak     access key ('' for upload to public writable bucket)
  aws_sk     secret key ('' for upload to public writable bucket)
  bucket     bucket name (with optional @region suffix, default is us-east-1)
  srcfile    path to source file
  targfile   path to target (dir if it ends with '/', relative to bucket root)
  acl        s3 access permissions (default: public-read)
  mime_type  optional mime-type (tries to guess if omitted)
Dependencies:
  To run, this shell script depends on command-line curl and openssl, as well
  as standard Unix tools
Examples:
  To upload file '~/blog/media/image.png' to bucket 'storage' in region
  'eu-central-1' with key (path relative to bucket) 'media/image.png':
    `basename $0` ACCESS SECRET storage@eu-central-1 \\
      ~/blog/image.png media/
  To upload file '~/blog/media/image.png' to public-writable bucket 'storage'
  in default region 'us-east-1' with key (path relative to bucket) 'x/y.png':
    `basename $0` '' '' storage ~/blog/image.png x/y.png
USAGE
    exit 0
}
guessmime()
{
    mime=`file -b --mime-type $1`
    if [ "$mime" = "text/plain" ]; then
        case $1 in
            *.css)           mime=text/css;;
            *.ttf|*.otf)     mime=application/font-sfnt;;
            *.woff)          mime=application/font-woff;;
            *.woff2)         mime=font/woff2;;
            *rss*.xml|*.rss) mime=application/rss+xml;;
            *)               if head $1 | grep '<html.*>' >/dev/null; then mime=text/html; fi;;
        esac
    fi
    printf "$mime"
}
if [ $# -lt 5 ]; then usage; fi
# Inputs.
aws_ak="$1"                                                              # access key
aws_sk="$2"                                                              # secret key
bucket=`printf $3 | awk 'BEGIN{FS="@"}{print $1}'`                       # bucket name
region=`printf $3 | awk 'BEGIN{FS="@"}{print ($2==""?"us-east-1":$2)}'`  # region name
srcfile="$4"                                                             # source file
targfile=`echo "$5" | sed "s/\/$/\/$(basename $srcfile)/"`            # target file
acl=${6:-'public-read'}                                                  # s3 perms
mime=${7:-"`guessmime "$srcfile"`"}                                      # mime type
md5=`openssl md5 -binary "$srcfile" | openssl base64`
# Create signature if not public upload.
key_and_sig_args=''
if [ "$aws_ak" != "" ] && [ "$aws_sk" != "" ]; then
    # Need current and file upload expiration date. Handle GNU and BSD date command style to get tomorrow's date.
    date=`date -u +%Y%m%dT%H%M%SZ`
    expdate=`if ! date -v+1d +%Y-%m-%d 2>/dev/null; then date -d tomorrow +%Y-%m-%d; fi`
    expdate_s=`printf $expdate | sed s/-//g` # without dashes, as we need both formats below
    service='s3'
    # Generate policy and sign with secret key following AWS Signature version 4, below
    p=$(cat <<POLICY | openssl base64
{ "expiration": "${expdate}T12:00:00.000Z",
  "conditions": [
    {"acl": "$acl" },
    {"bucket": "$bucket" },
    ["starts-with", "\$key", ""],
    ["starts-with", "\$content-type", ""],
    ["content-length-range", 1, `ls -l -H "$srcfile" | awk '{print $5}' | head -1`],
    {"content-md5": "$md5" },
    {"x-amz-date": "$date" },
    {"x-amz-credential": "$aws_ak/$expdate_s/$region/$service/aws4_request" },
    {"x-amz-algorithm": "AWS4-HMAC-SHA256" }
  ]
}
POLICY
    )
    # AWS4-HMAC-SHA256 signature
    s=`printf "$expdate_s"   | openssl sha256 -hmac "AWS4$aws_sk"           -hex | sed 's/(stdin)= //'`
    s=`printf "$region"      | openssl sha256 -mac HMAC -macopt hexkey:"$s" -hex | sed 's/(stdin)= //'`
    s=`printf "$service"     | openssl sha256 -mac HMAC -macopt hexkey:"$s" -hex | sed 's/(stdin)= //'`
    s=`printf "aws4_request" | openssl sha256 -mac HMAC -macopt hexkey:"$s" -hex | sed 's/(stdin)= //'`
    s=`printf "$p"           | openssl sha256 -mac HMAC -macopt hexkey:"$s" -hex | sed 's/(stdin)= //'`
    key_and_sig_args="-F X-Amz-Credential=$aws_ak/$expdate_s/$region/$service/aws4_request -F X-Amz-Algorithm=AWS4-HMAC-SHA256 -F X-Amz-Signature=$s -F X-Amz-Date=${date}"
fi
# Upload. Supports anonymous upload if bucket is public-writable, and keys are set to ''.
echo "Uploading: $srcfile ($mime) to $bucket:$targfile"
curl                            \
    -# -k                       \
    -F key=$targfile            \
    -F acl=$acl                 \
    $key_and_sig_args           \
    -F "Policy=$p"              \
    -F "Content-MD5=$md5"       \
    -F "Content-Type=$mime"     \
    -F "file=@$srcfile"         \
    https://${bucket}.s3.amazonaws.com/ | cat # pipe through cat so curl displays upload progress bar, *and* response
ansible/configs/ocp4-disconnected-osp-lab/software.yml
@@ -272,16 +272,6 @@
            dest: "/etc/openstack/clouds.yaml"
            mode: 0644
        - name: Check if metadata.json exists
          stat:
            path: /home/{{ student_name }}/openstack-upi/metadata.json
          register: r_metadata
        - name: Get the infra ID
          shell: jq -r .infraID $HOME/openstack-upi/metadata.json
          register: r_infra_id
          when: r_metadata.stat.exists
        - name: Run solver for lab 03
          shell: /usr/local/bin/solve_lab ocp4_advanced_deployment 03_1
          register: r_solve_lab_03
@@ -298,4 +288,59 @@
        - name: dump r_solve_lab_03
          debug:
            var: r_solve_lab_03
    - when:
        - test_enable
        - test_results
      block:
        - name: Check if metadata.json exists
          stat:
            path: /home/{{ student_name }}/openstack-upi/metadata.json
          register: r_metadata
        - name: Get the infra ID
          shell: jq -r .infraID /home/{{ student_name }}/openstack-upi/metadata.json
          register: r_infra_id
          when: r_metadata.stat.exists
        - name: copy upload script to bastion
          copy:
            src: "./files/upload-to-s3.sh"
            dest: "/home/{{ student_name }}/resources/upload-to-s3.sh"
            mode: preserve
        - name: Copy test script to bastion
          template:
            src: "./files/fio-test.sh.j2"
            dest: "/home/{{ student_name }}/resources/fio-test.sh"
            mode: preserve
        - name: copy files to master-0
          shell: >
            scp -i /home/{{ student_name }}/.ssh/{{ guid }}key.pem
            -F /home/{{ student_name }}/.ssh/config
            /home/{{ student_name }}/resources/{{ item }}
            core@{{ INFRA_ID }}-master-0.example.com:{{ item }}
          loop:
            - "fio-test.sh"
            - "upload-to-s3.sh"
          vars:
            INFRA_ID: "{{ r_infra_id.stdout }}"
        - name: Run test container on master-0
          shell: >
            ssh -i /home/{{ student_name }}/.ssh/{{ guid }}key.pem
            -F /home/{{ student_name }}/.ssh/config
            core@{{ INFRA_ID }}-master-0.example.com
            sudo podman run --privileged --ipc=host --net=host --pid=host
            -v /var/lib/etcd:/var/lib/etcd -v /:/host docker.io/fedora:latest /host/home/core/fio-test.sh
          vars:
            INFRA_ID: "{{ r_infra_id.stdout }}"
        - name: Remove test scripts
          file:
            state: absent
            path: /home/{{ student_name }}/resources/{{ item }}
          loop:
            - "fio-test.sh"
            - "upload-to-s3.sh"