From 8593e4678bd7145424da567c21103adfb9f92631 Mon Sep 17 00:00:00 2001 From: Marek Skalický Date: Nov 30 2018 17:46:13 +0000 Subject: Pull changes from upstream and rebase for: rebuild for latest f30 --- diff --git a/2.4 b/2.4 new file mode 120000 index 0000000..945c9b4 --- /dev/null +++ b/2.4 @@ -0,0 +1 @@ +. \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index fd4a1db..1f47576 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.fedoraproject.org/f29/s2i-core:latest +FROM registry.fedoraproject.org/f30/s2i-core:latest # Apache HTTP Server image. # @@ -10,7 +10,6 @@ FROM registry.fedoraproject.org/f29/s2i-core:latest ENV HTTPD_VERSION=2.4 \ NAME=httpd \ - VERSION=$HTTPD_VERSION \ ARCH=x86_64 ENV SUMMARY="Platform for running Apache httpd $HTTPD_VERSION or building httpd-based application" \ @@ -28,8 +27,7 @@ LABEL summary="$SUMMARY" \ io.openshift.tags="builder,httpd,httpd24" \ com.redhat.component="$NAME" \ name="$FGC/$NAME" \ - version="2.4" \ - architecture="$ARCH" \ + version="$HTTPD_VERSION" \ usage="s2i build https://github.com/sclorg/httpd-container.git --context-dir=examples/sample-test-app/ $FGC/$NAME sample-server" \ maintainer="SoftwareCollections.org " @@ -46,6 +44,7 @@ ENV HTTPD_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/httpd/ \ HTTPD_APP_ROOT=${APP_ROOT} \ HTTPD_CONFIGURATION_PATH=${APP_ROOT}/etc/httpd.d \ HTTPD_MAIN_CONF_PATH=/etc/httpd/conf \ + HTTPD_MAIN_CONF_MODULES_D_PATH=/etc/httpd/conf.modules.d \ HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d \ HTTPD_VAR_RUN=/var/run/httpd \ HTTPD_DATA_PATH=/var/www \ @@ -56,6 +55,7 @@ COPY ./s2i/bin/ $STI_SCRIPTS_PATH COPY ./root / # Generate SSL certs and reset permissions of filesystem to default values +# Reset permissions of filesystem to default values RUN /usr/libexec/httpd-ssl-gencerts && \ /usr/libexec/httpd-prepare && rpm-file-permissions diff --git a/Dockerfile.fedora b/Dockerfile.fedora new file mode 120000 index 0000000..1d1fe94 --- /dev/null +++ b/Dockerfile.fedora @@ -0,0 +1 @@ +Dockerfile \ No newline at end of file diff --git a/README.md b/README.md deleted file mode 100644 index eb97041..0000000 --- a/README.md +++ /dev/null @@ -1,121 +0,0 @@ -Apache HTTP Server 2.4 -====================== - -This container image includes Apache HTTP Server 2.4 for OpenShift and general usage. -Users can choose between RHEL, CentOS, and Fedora based images. -The RHEL image is available in the [Red Hat Registry](https://access.redhat.com/containers) -as registry.access.redhat.com/rhscl/httpd-24-rhel7. -The CentOS image is then available on [Docker Hub](https://hub.docker.com/r/centos/httpd-24-centos7/) -as centos/httpd-24-centos7. - - -DESCRIPTION ------------ - -Apache HTTP Server 2.4 available as docker container, is a powerful, efficient, -and extensible web server. Apache supports a variety of features, many implemented as compiled modules -which extend the core functionality. -These can range from server-side programming language support to authentication schemes. -Virtual hosting allows one Apache installation to serve many different Web sites." - - -USAGE ------ - -For this, we will assume that you are using the `rhscl/httpd-24-rhel7` image. -The image can be used as a base image for other applications based on Apache HTTP web server. - -An example of the data on the host for both the examples above, that will be served by -Apache HTTP web server: - -``` -$ ls -lZ /wwwdata/html --rw-r--r--. 1 1001 1001 54321 Jan 01 12:34 index.html --rw-r--r--. 1 1001 1001 5678 Jan 01 12:34 page.html -``` - -If you want to run the image and mount the static pages available in `/wwwdata` on the host -as a docker volume, execute the following command: - -``` -$ docker run -d --name httpd -p 8080:8080 -v /wwwdata:/var/www:Z rhscl/httpd-24-rhel7 -``` - -This will create a container named `httpd` running Apache HTTP Server, serving data from -`/wwwdata` directory. Port 8080 will be exposed and mapped to the host. - -If you want to create a new Docker layered image, use [Source-to-Image](https://github.com/openshift/source-to-image), a tool for building/building artifacts from source and injecting into docker images. To create a new Docker image named `httpd-app` using Source-to-Image, while using data available in `/wwwdata` on the host, execute the following command: - -``` -$ s2i build file:///wwwdata/html rhscl/httpd-24-rhel7 httpd-app -``` - -To run such a new image, execute the following command: - -``` -$ docker run -d --name httpd -p 8080:8080 httpd-app -``` - - -CONFIGURATION -------------- - -The Apache HTTP Server container image supports the following configuration variable, which can be set by using the `-e` option with the docker run command: - -| Variable name | Description | -| :---------------------- | ----------------------------------------- | -| `HTTPD_LOG_TO_VOLUME` | By default, httpd logs into standard output, so the logs are accessible by using the docker logs command. When `HTTPD_LOG_TO_VOLUME` is set, httpd logs into `/var/log/httpd24`, which can be mounted to host system using the Docker volumes. This option is only allowed when container is run as UID 0. | - - -If you want to run the image and mount the log files into `/wwwlogs` on the host -as a docker volume, execute the following command: - -``` -$ docker run -d -u 0 -e HTTPD_LOG_TO_VOLUME=1 --name httpd -v /wwwlogs:/var/log/httpd24:Z rhscl/httpd-24-rhel7 -``` - - -VOLUMES -------- - -You can also set the following mount points by passing the `-v /host:/container` flag to Docker. - -| Volume mount point | Description | -| :----------------------- | ---------------------------------------------------------------------- | -| `/var/www` | Apache HTTP Server data directory | -| `/var/log/httpd24` | Apache HTTP Server log directory (available only when running as root, path `/var/log/httpd` is used in case of Fedora based image) | - -**Notice: When mouting a directory from the host into the container, ensure that the mounted -directory has the appropriate permissions and that the owner and group of the directory -matches the user UID or name which is running inside the container.** - - -DEFAULT USER ------------- - -By default, Apache HTTP Server container runs as UID 1001. That means the volume mounted directories for the files (if mounted using `-v` option) need to be prepared properly, so the UID 1001 can read them. - -To run the container as a different UID, use `-u` option. For example if you want to run the container as UID 1234, execute the following command: - -``` -docker run -d -u 1234 rhscl/httpd-24-rhel7 -``` - -To log into a volume mounted directory, the container needs to be run as UID 0 (see above). - - - -TROUBLESHOOTING ---------------- -The httpd deamon in the container logs to the standard output by default, so the log is available in the container log. The log can be examined by running: - - docker logs - - -SEE ALSO --------- -Dockerfile and other sources for this container image are available on -https://github.com/sclorg/httpd-container. -In that repository, Dockerfile for CentOS is called Dockerfile, Dockerfile -for RHEL is called Dockerfile.rhel7. - diff --git a/README.md b/README.md new file mode 120000 index 0000000..299cf2b --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +root/usr/share/container-scripts/httpd/README.md \ No newline at end of file diff --git a/help.md b/help.md new file mode 120000 index 0000000..42061c0 --- /dev/null +++ b/help.md @@ -0,0 +1 @@ +README.md \ No newline at end of file diff --git a/root/help.1 b/root/help.1 index e9040b3..ddf8343 100644 --- a/root/help.1 +++ b/root/help.1 @@ -113,6 +113,10 @@ The Apache HTTP Server container image supports the following configuration vari By default, httpd logs into standard output, so the logs are accessible by using the docker logs command. When \fB\fCHTTPD\_LOG\_TO\_VOLUME\fR is set, httpd logs into \fB\fC/var/log/httpd24\fR, which can be mounted to host system using the container volumes. This option is only allowed when container is run as UID 0. .PP +\fB\fB\fCHTTPD\_MPM\fR\fP + The variable \fB\fCHTTPD\_MPM\fR can be set to change the default Multi\-Processing Module (MPM) from the package default MPM. + +.PP If you want to run the image and mount the log files into \fB\fC/wwwlogs\fR on the host as a container volume, execute the following command: @@ -126,6 +130,18 @@ $ docker run \-d \-u 0 \-e HTTPD\_LOG\_TO\_VOLUME=1 \-\-name httpd \-v /wwwlogs: .RE .PP +To run an image using the \fB\fCevent\fR MPM (rather than the default \fB\fCprefork\fR), execute the following command: + +.PP +.RS + +.nf +$ docker run \-d \-e HTTPD\_MPM=event \-\-name httpd rhscl/httpd\-24\-rhel7 + +.fi +.RE + +.PP You can also set the following mount points by passing the \fB\fC\-v /host:/container\fR flag to Docker. .PP diff --git a/root/usr/libexec/httpd-prepare b/root/usr/libexec/httpd-prepare index f7378cf..e2857da 100755 --- a/root/usr/libexec/httpd-prepare +++ b/root/usr/libexec/httpd-prepare @@ -23,6 +23,7 @@ fi mkdir -p ${HTTPD_CONFIGURATION_PATH} chmod -R a+rwx ${HTTPD_MAIN_CONF_PATH} chmod -R a+rwx ${HTTPD_MAIN_CONF_D_PATH} +chmod -R a+rwx ${HTTPD_MAIN_CONF_MODULES_D_PATH} chmod -R a+r /etc/pki/tls/certs/localhost.crt chmod -R a+r /etc/pki/tls/private/localhost.key mkdir -p ${HTTPD_APP_ROOT}/etc diff --git a/root/usr/share/container-scripts/httpd/README.md b/root/usr/share/container-scripts/httpd/README.md index 9332c20..99eceed 100644 --- a/root/usr/share/container-scripts/httpd/README.md +++ b/root/usr/share/container-scripts/httpd/README.md @@ -80,6 +80,8 @@ The Apache HTTP Server container image supports the following configuration vari **`HTTPD_LOG_TO_VOLUME`** By default, httpd logs into standard output, so the logs are accessible by using the docker logs command. When `HTTPD_LOG_TO_VOLUME` is set, httpd logs into `/var/log/httpd24`, which can be mounted to host system using the container volumes. This option is only allowed when container is run as UID 0. +**`HTTPD_MPM`** + The variable `HTTPD_MPM` can be set to change the default Multi-Processing Module (MPM) from the package default MPM. If you want to run the image and mount the log files into `/wwwlogs` on the host @@ -89,6 +91,12 @@ as a container volume, execute the following command: $ docker run -d -u 0 -e HTTPD_LOG_TO_VOLUME=1 --name httpd -v /wwwlogs:/var/log/httpd24:Z rhscl/httpd-24-rhel7 ``` +To run an image using the `event` MPM (rather than the default `prefork`), execute the following command: + +``` +$ docker run -d -e HTTPD_MPM=event --name httpd rhscl/httpd-24-rhel7 +``` + You can also set the following mount points by passing the `-v /host:/container` flag to Docker. **`/var/www`** diff --git a/root/usr/share/container-scripts/httpd/common.sh b/root/usr/share/container-scripts/httpd/common.sh index 6b6b110..1cdeee9 100644 --- a/root/usr/share/container-scripts/httpd/common.sh +++ b/root/usr/share/container-scripts/httpd/common.sh @@ -23,6 +23,8 @@ config_privileged() { chmod 755 ${HTTPD_MAIN_CONF_PATH} && \ chmod 644 ${HTTPD_MAIN_CONF_D_PATH}/* && \ chmod 755 ${HTTPD_MAIN_CONF_D_PATH} && \ + chmod 644 ${HTTPD_MAIN_CONF_MODULES_D_PATH}/* && \ + chmod 755 ${HTTPD_MAIN_CONF_MODULES_D_PATH} && \ chmod 600 /etc/pki/tls/certs/localhost.crt && \ chmod 600 /etc/pki/tls/private/localhost.key && \ chmod 710 ${HTTPD_VAR_RUN} @@ -49,6 +51,15 @@ config_non_privileged() { fi } +config_mpm() { + if [ -v HTTPD_MPM -a -f ${HTTPD_MAIN_CONF_MODULES_D_PATH}/00-mpm.conf ]; then + local mpmconf=${HTTPD_MAIN_CONF_MODULES_D_PATH}/00-mpm.conf + sed -i -e 's,^LoadModule,#LoadModule,' ${mpmconf} + sed -i -e "/LoadModule mpm_${HTTPD_MPM}/s,^#LoadModule,LoadModule," ${mpmconf} + echo "---> Set MPM to ${HTTPD_MPM} in ${mpmconf}" + fi +} + # get_matched_files finds file for image extending function get_matched_files() { local custom_dir default_dir @@ -114,8 +125,11 @@ process_config_files() { process_ssl_certs() { local dir=${1:-.} if [ -d ${dir}/httpd-ssl/private ] && [ -d ${dir}/httpd-ssl/certs ]; then + echo "---> Moving the httpd-ssl directory included in the source to a directory that isn't exposed by httpd..." + mv ${dir}/httpd-ssl ${HTTPD_APP_ROOT} + fi + if [ -d ${HTTPD_APP_ROOT}/httpd-ssl/private ] && [ -d ${HTTPD_APP_ROOT}/httpd-ssl/certs ]; then echo "---> Looking for SSL certs for httpd..." - cp -r ${dir}/httpd-ssl ${HTTPD_APP_ROOT} local ssl_cert="$(ls -A ${HTTPD_APP_ROOT}/httpd-ssl/certs/*.pem | head -n 1)" local ssl_private="$(ls -A ${HTTPD_APP_ROOT}/httpd-ssl/private/*.pem | head -n 1)" if [ -f "${ssl_cert}" ] ; then @@ -130,7 +144,6 @@ process_ssl_certs() { sed -i '/^SSLCertificateKeyFile .*/d' ${HTTPD_MAIN_CONF_D_PATH}/ssl.conf fi fi - rm -rf ${dir}/httpd-ssl fi } diff --git a/root/usr/share/container-scripts/httpd/pre-init/10-set-mpm.sh b/root/usr/share/container-scripts/httpd/pre-init/10-set-mpm.sh new file mode 100644 index 0000000..f7de495 --- /dev/null +++ b/root/usr/share/container-scripts/httpd/pre-init/10-set-mpm.sh @@ -0,0 +1,3 @@ +source ${HTTPD_CONTAINER_SCRIPTS_PATH}/common.sh + +config_mpm diff --git a/sources b/sources deleted file mode 100644 index e69de29..0000000 --- a/sources +++ /dev/null diff --git a/test/run b/test/run index 13ac012..d73bc3b 100755 --- a/test/run +++ b/test/run @@ -7,8 +7,10 @@ THISDIR=$(dirname ${BASH_SOURCE[0]}) . ${THISDIR}/utils.sh test_dir="$(readlink -zf $(dirname "${BASH_SOURCE[0]}"))" +. "$test_dir/test-lib.sh" + function _container_is_scl() { - docker inspect --format='{{.Config.Env}}' "${1-$IMAGE_NAME}" | grep -q HTTPD_SCL + docker inspect --format='{{.ContainerConfig.Env}}' "${1-$IMAGE_NAME}" | grep -q HTTPD_SCL return $? } @@ -80,7 +82,7 @@ function run_default_page_test() { function run_as_root_test() { # Try running as root - DOCKER_ARGS="-u 0" + DOCKER_ARGS="--user 0" run "create_container test_run_as_root" DOCKER_ARGS= sleep 2 @@ -106,7 +108,7 @@ function _run_log_to_volume_test() { run "ls -d ${logs_dir} || mkdir ${logs_dir}" 0 'Create log directory' run "chown -R 1001:1001 ${logs_dir}" run "chcon -Rvt svirt_sandbox_file_t ${logs_dir}" 0 'Change SELinux context on the log dir' - DOCKER_ARGS="-e HTTPD_LOG_TO_VOLUME=1 -u 0 -v ${logs_dir}:${volume_dir}" + DOCKER_ARGS="-e HTTPD_LOG_TO_VOLUME=1 --user 0 -v ${logs_dir}:${volume_dir}" run "create_container test_log_dir_${variant}" DOCKER_ARGS= sleep 2 @@ -122,7 +124,7 @@ function _run_log_to_volume_test() { function _run_invalid_log_volume_test() { # Check wrong usage of the HTTP_LOG_TO_VOLUME env variable - DOCKER_ARGS="-e HTTPD_LOG_TO_VOLUME=1 -u 1001" + DOCKER_ARGS="-e HTTPD_LOG_TO_VOLUME=1 --user 1001" run "create_container test_log_dir_fail" DOCKER_ARGS= sleep 2 @@ -157,15 +159,31 @@ function _run_data_volume_test() { run "grep -e '^hello$' output" } +function _run_mpm_config_test() { + local mpm=$1 + # Check worker MPM can be configured + DOCKER_ARGS="-e HTTPD_MPM=$mpm --user 1001" + run "create_container test_mpm_${mpm}" + DOCKER_ARGS= + sleep 2 + cid=$(get_cid "test_mpm_$mpm") + run "docker logs $cid | grep -s mpm_${mpm}':notice.*resuming normal operations'" +} + +function run_mpm_config_test() { + for m in worker event prefork; do + _run_mpm_config_test $m + done +} function run_s2i_test() { # Test s2i use case # Since we built the candidate image locally, we don't want S2I attempt to pull # it from Docker hub s2i_args="--pull-policy=never" - run "s2i usage ${s2i_args} ${IMAGE_NAME}" 0 "Testing 's2i usage'" - run "s2i build ${s2i_args} file://${test_dir}/sample-test-app ${IMAGE_NAME} ${IMAGE_NAME}-testapp" 0 "Testing 's2i build'" - DOCKER_ARGS='-u 1000' + run "ct_s2i_usage ${IMAGE_NAME} ${s2i_args}" 0 "Testing 's2i usage'" + run "ct_s2i_build_as_df file://${test_dir}/sample-test-app ${IMAGE_NAME} ${IMAGE_NAME}-testapp ${s2i_args}" 0 "Testing 's2i build'" + DOCKER_ARGS='--user 1000' create_container testing-app-s2i ${IMAGE_NAME}-testapp DOCKER_ARGS= sleep 5 @@ -181,8 +199,8 @@ function run_pre_init_test() { # Since we built the candidate image locally, we don't want S2I attempt to pull # it from Docker hub s2i_args="--pull-policy=never" - run "s2i build ${s2i_args} file://${test_dir}/pre-init-test-app ${IMAGE_NAME} ${IMAGE_NAME}-testapp2" 0 "Testing 's2i build' with pre-init script" - DOCKER_ARGS='-u 1000' + run "ct_s2i_build_as_df file://${test_dir}/pre-init-test-app ${IMAGE_NAME} ${IMAGE_NAME}-testapp2 ${s2i_args}" 0 "Testing 's2i build' with pre-init script" + DOCKER_ARGS='--user 1000' create_container testing-app-pre-init ${IMAGE_NAME}-testapp2 DOCKER_ARGS= sleep 5 @@ -198,8 +216,8 @@ function run_self_cert_test() { # Since we built the candidate image locally, we don't want S2I attempt to pull # it from Docker hub s2i_args="--pull-policy=never" - run "s2i build ${s2i_args} file://${test_dir}/self-signed-ssl ${IMAGE_NAME} ${IMAGE_NAME}-self-signed" 0 "Testing 's2i build' with self-signed cert" - DOCKER_ARGS='-u 1000' + run "ct_s2i_build_as_df file://${test_dir}/self-signed-ssl ${IMAGE_NAME} ${IMAGE_NAME}-self-signed ${s2i_args}" 0 "Testing 's2i build' with self-signed cert" + DOCKER_ARGS='--user 1000' create_container testing-self-signed ${IMAGE_NAME}-self-signed DOCKER_ARGS= sleep 5 @@ -266,7 +284,9 @@ run_as_root_test run_log_to_volume_test run_data_volume_test run_s2i_test -run_pre_init_test" +run_pre_init_test +run_mpm_config_test +" test $# -eq 1 -a "${1-}" == --list && echo "$TEST_LIST" && exit 0 diff --git a/test/run-openshift b/test/run-openshift index a9e9eec..c72bc64 100755 --- a/test/run-openshift +++ b/test/run-openshift @@ -24,12 +24,12 @@ ct_os_cluster_up ct_os_test_s2i_app "${IMAGE_NAME}" "${THISDIR}/sample-test-app" . 'This is a sample s2i application with static content' # test remote example app -ct_os_test_s2i_app "${IMAGE_NAME}" "https://github.com/openshift/httpd-ex#${BRANCH_TO_TEST}" . 'Welcome to your static httpd application on OpenShift' +ct_os_test_s2i_app "${IMAGE_NAME}" "https://github.com/sclorg/httpd-ex#${BRANCH_TO_TEST}" . 'Welcome to your static httpd application on OpenShift' # test template from the example app ct_os_test_template_app "${IMAGE_NAME}" \ "https://raw.githubusercontent.com/openshift/httpd-ex/${BRANCH_TO_TEST}/openshift/templates/httpd.json" \ httpd \ 'Welcome to your static httpd application on OpenShift' \ - 8080 http 200 "-p SOURCE_REPOSITORY_REF=${BRANCH_TO_TEST}" + 8080 http 200 "-p SOURCE_REPOSITORY_REF=${BRANCH_TO_TEST} -p NAME=httpd-testing" diff --git a/test/test-lib-openshift.sh b/test/test-lib-openshift.sh index 53bb9d3..f988ac5 100644 --- a/test/test-lib-openshift.sh +++ b/test/test-lib-openshift.sh @@ -223,7 +223,7 @@ function _ct_os_get_uniq_project_name() { local r while true ; do r=${RANDOM} - mkdir /var/tmp/os-test-${r} &>/dev/null && echo test-${r} && break + mkdir /var/tmp/sclorg-test-${r} &>/dev/null && echo sclorg-test-${r} && break done } @@ -234,6 +234,10 @@ function _ct_os_get_uniq_project_name() { # Expects 'os' command that is properly logged in to the OpenShift cluster. # Not using mktemp, because we cannot use uppercase characters. function ct_os_new_project() { + if [ "${CT_SKIP_NEW_PROJECT:-false}" == 'true' ] ; then + echo "Creating project skipped." + return + fi local project_name="${1:-$(_ct_os_get_uniq_project_name)}" ; shift || : oc new-project ${project_name} # let openshift cluster to sync to avoid some race condition errors @@ -245,17 +249,38 @@ function ct_os_new_project() { # Deletes the specified project in the openshfit # Arguments: project - project name, uses the current project if omitted function ct_os_delete_project() { + if [ "${CT_SKIP_NEW_PROJECT:-false}" == 'true' ] ; then + echo "Deleting project skipped, cleaning objects only." + ct_delete_all_objects + return + fi local project_name="${1:-$(oc project -q)}" ; shift || : oc delete project "${project_name}" } +# ct_delete_all_objects +# ----------------- +# Deletes all objects within the project. +# Handy when we have one project and want to run more tests. +function ct_delete_all_objects() { + for x in bc builds dc is isimage istag po pv pvc rc routes secrets svc ; do + oc delete $x --all + done + # for some objects it takes longer to be really deleted, so a dummy sleep + # to avoid some races when other test can see not-yet-deleted objects and can fail + sleep 10 +} + # ct_os_docker_login # -------------------- # Logs in into docker daemon +# Uses global REGISRTY_ADDRESS environment variable for arbitrary registry address. +# Does not do anything if REGISTRY_ADDRESS is set. function ct_os_docker_login() { + [ -n "${REGISTRY_ADDRESS:-}" ] && "REGISTRY_ADDRESS set, not trying to docker login." && return 0 # docker login fails with "404 page not found" error sometimes, just try it more times for i in `seq 12` ; do - docker login -u developer -p $(oc whoami -t) 172.30.1.1:5000 && return 0 || : + docker login -u developer -p $(oc whoami -t) ${REGISRTY_ADDRESS:-172.30.1.1:5000} && return 0 || : sleep 5 done return 1 @@ -267,11 +292,12 @@ function ct_os_docker_login() { # Arguments: image - image name to upload # Arguments: imagestream - name and tag to use for the internal registry. # In the format of name:tag ($image_name:latest by default) +# Uses global REGISRTY_ADDRESS environment variable for arbitrary registry address. function ct_os_upload_image() { local input_name="${1}" ; shift local image_name=${input_name##*/} local imagestream=${1:-$image_name:latest} - local output_name="172.30.1.1:5000/$(oc project -q)/$imagestream" + local output_name="${REGISRTY_ADDRESS:-172.30.1.1:5000}/$(oc project -q)/$imagestream" ct_os_docker_login docker tag ${input_name} ${output_name} @@ -295,10 +321,12 @@ function ct_os_install_in_centos() { # use "true" for allow remote access to the web-UI, # "false" is default # Arguments: cluster_version - version of the OpenShift cluster to use, empty -# means default version of `oc`; example value: v3.7.0; +# means default version of `oc`; example value: 3.7; # also can be specified outside by OC_CLUSTER_VERSION function ct_os_cluster_up() { ct_os_cluster_running && echo "Cluster already running. Nothing is done." && return 0 + ct_os_logged_in && echo "Already logged in to a cluster. Nothing is done." && return 0 + mkdir -p /var/tmp/openshift local dir="${1:-$(mktemp -d /var/tmp/openshift/os-data-XXXXXX)}" ; shift || : local is_public="${1:-'false'}" ; shift || : @@ -308,7 +336,7 @@ function ct_os_cluster_up() { sed -i "s|OPTIONS='|OPTIONS='--insecure-registry 172.30.0.0/16 |" /etc/sysconfig/docker fi - systemctl stop firewalld + systemctl stop firewalld || : setenforce 0 iptables -F @@ -316,10 +344,25 @@ function ct_os_cluster_up() { local cluster_ip="127.0.0.1" [ "${is_public}" == "true" ] && cluster_ip=$(ct_get_public_ip) + if [ -n "${cluster_version}" ] ; then + # if $cluster_version is not set, we simply use oc that is available + ct_os_set_path_oc "${cluster_version}" + fi + mkdir -p ${dir}/{config,data,pv} - oc cluster up --host-data-dir=${dir}/data --host-config-dir=${dir}/config \ - --host-pv-dir=${dir}/pv --use-existing-config --public-hostname=${cluster_ip} \ - ${cluster_version:+--version=$cluster_version } + case $(oc version| head -n 1) in + "oc v3.1"?.*) + oc cluster up --base-dir="${dir}/data" --public-hostname="${cluster_ip}" + ;; + "oc v3."*) + oc cluster up --host-data-dir="${dir}/data" --host-config-dir="${dir}/config" \ + --host-pv-dir="${dir}/pv" --use-existing-config --public-hostname="${cluster_ip}" + ;; + *) + echo "ERROR: Unexpected oc version." >&2 + return 1 + ;; + esac oc version oc login -u system:admin oc project default @@ -344,6 +387,96 @@ function ct_os_cluster_running() { oc cluster status &>/dev/null } +# ct_os_logged_in +# --------------- +# Returns 0 if logged in to a cluster (remote or local) +function ct_os_logged_in() { + oc whoami >/dev/null +} + +# ct_os_set_path_oc OC_VERSION +# -------------------- +# This is a trick that helps using correct version of the `oc`: +# The input is version of the openshift in format v3.6.0 etc. +# If the currently available version of oc is not of this version, +# it first takes a look into /usr/local/oc-/bin directory, +# and if not found there it downloads the community release from github. +# In the end the PATH variable is changed, so the other tests can still use just 'oc'. +# Arguments: oc_version - X.Y part of the version of OSE (e.g. 3.9) +function ct_os_set_path_oc() { + local oc_version=$(ct_os_get_latest_ver $1) + local oc_path + + if oc version | grep -q "oc ${oc_version%.*}." ; then + echo "Binary oc found already available in version ${oc_version}: `which oc` Doing noting." + return 0 + fi + + # first check whether we already have oc available in /usr/local + local installed_oc_path="/usr/local/oc-${oc_version%.*}/bin" + + if [ -x "${installed_oc_path}/oc" ] ; then + oc_path="${installed_oc_path}" + echo "Binary oc found in ${installed_oc_path}" >&2 + else + # oc not available in /usr/local, try to download it from github (community release) + oc_path="/tmp/oc-${oc_version}-bin" + ct_os_download_upstream_oc "${oc_version}" "${oc_path}" + fi + if [ -z "${oc_path}/oc" ] ; then + echo "ERROR: oc not found installed, nor downloaded" >&1 + return 1 + fi + export PATH="${oc_path}:${PATH}" + if ! oc version | grep -q "oc ${oc_version%.*}." ; then + echo "ERROR: something went wrong, oc located at ${oc_path}, but oc of version ${oc_version} not found in PATH ($PATH)" >&1 + return 1 + else + echo "PATH set correctly, binary oc found in version ${oc_version}: `which oc`" + fi +} + +# ct_os_get_latest_ver VERSION_PART_X +# -------------------- +# Returns full version (vX.Y.Z) from part of the version (X.Y) +# Arguments: vxy - X.Y part of the version +# Returns vX.Y.Z variant of the version +function ct_os_get_latest_ver(){ + local vxy="v$1" + for vz in {3..0} ; do + curl -sif "https://github.com/openshift/origin/releases/tag/${vxy}.${vz}" >/dev/null && echo "${vxy}.${vz}" && return 0 + done + echo "ERROR: version ${vxy} not found in https://github.com/openshift/origin/tags" >&2 + return 1 +} + +# ct_os_download_upstream_oc OC_VERSION OUTPUT_DIR +# -------------------- +# Downloads a particular version of openshift-origin-client-tools from +# github into specified output directory +# Arguments: oc_version - version of OSE (e.g. v3.7.2) +# Arguments: output_dir - output directory +function ct_os_download_upstream_oc() { + local oc_version=$1 + local output_dir=$2 + + # check whether we already have the binary in place + [ -x "${output_dir}/oc" ] && return 0 + + mkdir -p "${output_dir}" + # using html output instead of https://api.github.com/repos/openshift/origin/releases/tags/${oc_version}, + # because API is limited for number of queries if not authenticated + tarball=$(curl -si "https://github.com/openshift/origin/releases/tag/${oc_version}" | grep -o -e "openshift-origin-client-tools-${oc_version}-[a-f0-9]*-linux-64bit.tar.gz" | head -n 1) + + # download, unpack the binaries and then put them into output directory + echo "Downloading https://github.com/openshift/origin/releases/download/${oc_version}/${tarball} into ${output_dir}/" >&2 + curl -sL https://github.com/openshift/origin/releases/download/${oc_version}/"${tarball}" | tar -C "${output_dir}" -xz + mv -f "${output_dir}"/"${tarball%.tar.gz}"/* "${output_dir}/" + + rmdir "${output_dir}"/"${tarball%.tar.gz}" +} + + # ct_os_test_s2i_app_func IMAGE APP CONTEXT_DIR CHECK_CMD [OC_ARGS] # -------------------- # Runs [image] and [app] in the openshift and optionally specifies env_params @@ -362,9 +495,10 @@ function ct_os_test_s2i_app_func() { local context_dir=${3} local check_command=${4} local oc_args=${5:-} + local import_image=${6:-} local image_name_no_namespace=${image_name##*/} local service_name="${image_name_no_namespace}-testing" - local image_tagged="${image_name_no_namespace}:testing" + local image_tagged="${image_name_no_namespace}:${VERSION}" if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then echo "ERROR: ct_os_test_s2i_app_func() requires at least 4 arguments that cannot be emtpy." >&2 @@ -373,7 +507,19 @@ function ct_os_test_s2i_app_func() { ct_os_new_project # Create a specific imagestream tag for the image so that oc cannot use anything else - ct_os_upload_image "${image_name}" "${image_tagged}" + if [ "${CT_SKIP_UPLOAD_IMAGE:-false}" == 'true' ] ; then + if [ -n "${import_image}" ] ; then + echo "Importing image ${import_image} as ${image_name}:${VERSION}" + oc import-image ${image_name}:${VERSION} --from ${import_image} --confirm + else + echo "Uploading and importing image skipped." + fi + else + if [ -n "${import_image}" ] ; then + echo "Warning: Import image ${import_image} requested, but uploading image ${image_name} instead." + fi + ct_os_upload_image "${image_name}" "${image_tagged}" + fi local app_param="${app}" if [ -d "${app}" ] ; then @@ -435,6 +581,7 @@ function ct_os_test_s2i_app() { local protocol=${6:-http} local response_code=${7:-200} local oc_args=${8:-} + local import_image=${9:-} if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then echo "ERROR: ct_os_test_s2i_app() requires at least 4 arguments that cannot be emtpy." >&2 @@ -444,8 +591,8 @@ function ct_os_test_s2i_app() { ct_os_test_s2i_app_func "${image_name}" \ "${app}" \ "${context_dir}" \ - "ct_test_response '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ - "${oc_args}" + "ct_os_test_response_internal '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "${oc_args}" "${import_image}" } # ct_os_test_template_app_func IMAGE APP IMAGE_IN_TEMPLATE CHECK_CMD [OC_ARGS] @@ -471,6 +618,7 @@ function ct_os_test_template_app_func() { local check_command=${4} local oc_args=${5:-} local other_images=${6:-} + local import_image=${7:-} if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then echo "ERROR: ct_os_test_template_app_func() requires at least 4 arguments that cannot be emtpy." >&2 @@ -481,27 +629,39 @@ function ct_os_test_template_app_func() { local image_tagged="${name_in_template}:${VERSION}" ct_os_new_project + # Create a specific imagestream tag for the image so that oc cannot use anything else - ct_os_upload_image "${image_name}" "${image_tagged}" - - # upload also other images, that template might need (list of pairs in the format | - local images_tags_a - local i_t - for i_t in ${other_images} ; do - echo "${i_t}" - IFS='|' read -ra image_tag_a <<< "${i_t}" - docker pull "${image_tag_a[0]}" - ct_os_upload_image "${image_tag_a[0]}" "${image_tag_a[1]}" - done + if [ "${CT_SKIP_UPLOAD_IMAGE:-false}" == 'true' ] ; then + if [ -n "${import_image}" ] ; then + echo "Importing image ${import_image} as ${image_name}:${VERSION}" + oc import-image ${image_name}:${VERSION} --from ${import_image} --confirm + else + echo "Uploading and importing image skipped." + fi + else + if [ -n "${import_image}" ] ; then + echo "Warning: Import image ${import_image} requested, but uploading image ${image_name} instead." + fi + ct_os_upload_image "${image_name}" "${image_tagged}" + + # upload also other images, that template might need (list of pairs in the format | + local images_tags_a + local i_t + for i_t in ${other_images} ; do + echo "${i_t}" + IFS='|' read -ra image_tag_a <<< "${i_t}" + docker pull "${image_tag_a[0]}" + ct_os_upload_image "${image_tag_a[0]}" "${image_tag_a[1]}" + done + fi local local_template=$(ct_obtain_input "${template}") + local namespace=${CT_NAMESPACE:-$(oc project -q)} oc new-app ${local_template} \ - -p NAME="${service_name}" \ - -p NAMESPACE="$(oc project -q)" \ + --name "${name_in_template}" \ + -p NAMESPACE="${namespace}" \ ${oc_args} - oc start-build "${service_name}" - ct_os_wait_pod_ready "${service_name}" 300 local ip=$(ct_os_get_service_ip "${service_name}") @@ -549,6 +709,7 @@ function ct_os_test_template_app() { local response_code=${7:-200} local oc_args=${8:-} local other_images=${9:-} + local import_image=${10:-} if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then echo "ERROR: ct_os_test_template_app() requires at least 4 arguments that cannot be emtpy." >&2 @@ -558,27 +719,28 @@ function ct_os_test_template_app() { ct_os_test_template_app_func "${image_name}" \ "${template}" \ "${name_in_template}" \ - "ct_test_response '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "ct_os_test_response_internal '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ "${oc_args}" \ - "${other_images}" + "${other_images}" \ + "${import_image}" } -# ct_os_test_image_update IMAGE IS CHECK_CMD OC_ARGS +# ct_os_test_image_update IMAGE_NAME OLD_IMAGE ISTAG CHECK_FUNCTION OC_ARGS # -------------------- # Runs an image update test with [image] uploaded to [is] imagestream -# and checks the services using an arbitrary function provided in [check_cmd]. -# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) -# Arguments: is - imagestream to upload the images into (compulsory) -# Arguments: check_cmd - command to be run to check functionality of created services (compulsory) +# and checks the services using an arbitrary function provided in [check_function]. +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: old_image - valid name of the image from the registry +# Arguments: istag - imagestream to upload the images into (compulsory) +# Arguments: check_function - command to be run to check functionality of created services (compulsory) # Arguments: oc_args - arguments to use during oc new-app (compulsory) ct_os_test_image_update() { local image_name=$1; shift + local old_image=$1; shift local istag=$1; shift local check_function=$1; shift local service_name=${image_name##*/} - local old_image="" ip="" check_command_exp="" registry="" - registry=$(ct_registry_from_os "$OS") - old_image="$registry/$image_name" + local ip="" check_command_exp="" echo "Running image update test for: $image_name" ct_os_new_project @@ -607,3 +769,157 @@ ct_os_test_image_update() { ct_os_delete_project } + +# ct_os_deploy_cmd_image IMAGE_NAME +# -------------------- +# Runs a special command pod, a pod that does nothing, but includes utilities for testing. +# A typical usage is a mysql pod that includes mysql commandline, that we need for testing. +# Running commands inside this command pod is done via ct_os_cmd_image_run function. +# The pod is not run again if already running. +# Arguments: image_name - image to be used as a command pod +function ct_os_deploy_cmd_image() { + local image_name=${1} + oc get pod command-app &>/dev/null && echo "command POD already running" && return 0 + echo "command POD not running yet, will start one called command-app" + oc create -f - <" + local sleep_time=3 + local attempt=1 + local result=1 + local status + local response_code + local response_file=$(mktemp /tmp/ct_test_response_XXXXXX) + local util_image_name='python:3.6' + + ct_os_deploy_cmd_image "${util_image_name}" + + while [ ${attempt} -le ${max_attempts} ]; do + ct_os_cmd_image_run "curl --connect-timeout 10 -s -w '%{http_code}' '${url}'" >${response_file} && status=0 || status=1 + if [ ${status} -eq 0 ]; then + response_code=$(cat ${response_file} | tail -c 3) + if [ "${response_code}" -eq "${expected_code}" ]; then + result=0 + fi + cat ${response_file} | grep -qP -e "${body_regexp}" || result=1; + # Some services return 40x code until they are ready, so let's give them + # some chance and not end with failure right away + # Do not wait if we already have expected outcome though + if [ ${result} -eq 0 -o ${attempt} -gt ${ignore_error_attempts} -o ${attempt} -eq ${max_attempts} ] ; then + break + fi + fi + attempt=$(( ${attempt} + 1 )) + sleep ${sleep_time} + done + rm -f ${response_file} + return ${result} +} + +# ct_os_get_image_from_pod +# ------------------------ +# Print image identifier from an existing pod to stdout +# Argument: pod_prefix - prefix or full name of the pod to get image from +ct_os_get_image_from_pod() { + local pod_prefix=$1 ; shift + local pod_name=$(ct_os_get_pod_name $pod_prefix) + oc get "po/${pod_name}" -o yaml | sed -ne 's/^\s*image:\s*\(.*\)\s*$/\1/ p' | head -1 +} + +# ct_os_check_cmd_internal +# ---------------- +# Runs a specified command, checks exit code and compares the output with expected regexp. +# That all is done inside an image in the cluster, so the function is used +# typically in clusters that are not accessible outside. +# The check is repeated until timeout. +# Argument: util_image_name - name of the image in the cluster that is used for running the cmd +# Argument: service_name - kubernetes' service name to work with (IP address is taken from this one) +# Argument: check_command - command that is run within the util_image_name container +# Argument: expected_content_match - regexp that must be in the output (use .* to ignore check) +# Argument: timeout - number of seconds to wait till the check succeeds +function ct_os_check_cmd_internal() { + local util_image_name=$1 ; shift + local service_name=$1 ; shift + local check_command=$1 ; shift + local expected_content_match=${1:-.*} ; shift + local timeout=${1:-60} ; shift || : + + : " Service ${service_name} check ..." + + local output + local ret + local ip=$(ct_os_get_service_ip "${service_name}") + local check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g") + + ct_os_deploy_cmd_image $(ct_os_get_image_from_pod "${util_image_name}" | head -n 1) + SECONDS=0 + + echo -n "Waiting for ${service_name} service becoming ready ..." + while true ; do + output=$(ct_os_cmd_image_run "$check_command_exp") + ret=$? + echo "${output}" | grep -qe "${expected_content_match}" || ret=1 + if [ ${ret} -eq 0 ] ; then + echo " PASS" + return 0 + fi + echo -n "." + [ ${SECONDS} -gt ${timeout} ] && break + sleep 3 + done + echo " FAIL" + return 1 +} + diff --git a/test/test-lib.sh b/test/test-lib.sh index dfc63d9..e372870 100644 --- a/test/test-lib.sh +++ b/test/test-lib.sh @@ -208,6 +208,29 @@ function ct_doc_content_old() { } +# ct_npm_works +# -------------------- +# Checks existance of the npm tool and runs it. +function ct_npm_works() { + local tmpdir=$(mktemp -d) + : " Testing npm in the container image" + docker run --rm ${IMAGE_NAME} /bin/bash -c "npm --version" >${tmpdir}/version + + if [ $? -ne 0 ] ; then + echo "ERROR: 'npm --version' does not work inside the image ${IMAGE_NAME}." >&2 + return 1 + fi + + docker run --rm ${IMAGE_NAME} /bin/bash -c "npm install jquery && test -f node_modules/jquery/src/jquery.js" + if [ $? -ne 0 ] ; then + echo "ERROR: npm could not install jquery inside the image ${IMAGE_NAME}." >&2 + return 1 + fi + + : " Success!" +} + + # ct_path_append PATH_VARNAME DIRECTORY # ------------------------------------- # Append DIRECTORY to VARIABLE of name PATH_VARNAME, the VARIABLE must consist @@ -284,7 +307,7 @@ function ct_obtain_input() { local output=$(mktemp "/var/tmp/test-input-XXXXXX$extension") if [ -f "${input}" ] ; then - cp "${input}" "${output}" + cp -f "${input}" "${output}" elif [ -d "${input}" ] ; then rm -f "${output}" cp -r -LH "${input}" "${output}" @@ -400,3 +423,85 @@ ct_random_string() | fold -w "${1-10}" \ | head -n 1 ) + +# ct_s2i_usage IMG_NAME [S2I_ARGS] +# ---------------------------- +# Create a container and run the usage script inside +# Argument: IMG_NAME - name of the image to be used for the container run +# Argument: S2I_ARGS - Additional list of source-to-image arguments, currently unused. +ct_s2i_usage() +{ + local img_name=$1; shift + local s2i_args="$*"; + local usage_command="/usr/libexec/s2i/usage" + docker run --rm "$img_name" bash -c "$usage_command" +} + +# ct_s2i_build_as_df APP_PATH SRC_IMAGE DST_IMAGE [S2I_ARGS] +# ---------------------------- +# Create a new s2i app image from local sources in a similar way as source-to-image would have used. +# Argument: APP_PATH - local path to the app sources to be used in the test +# Argument: SRC_IMAGE - image to be used as a base for the s2i build +# Argument: DST_IMAGE - image name to be used during the tagging of the s2i build result +# Argument: S2I_ARGS - Additional list of source-to-image arguments. +# Only used to check for pull-policy=never and environment variable definitions. +ct_s2i_build_as_df() +{ + local app_path=$1; shift + local src_image=$1; shift + local dst_image=$1; shift + local s2i_args="$*"; + local local_app=upload/src/ + local local_scripts=upload/scripts/ + local user_id= + local df_name= + local tmpdir= + # Use /tmp to not pollute cwd + tmpdir=$(mktemp -d) + df_name=$(mktemp -p "$tmpdir" Dockerfile.XXXX) + pushd "$tmpdir" + # Check if the image is available locally and try to pull it if it is not + docker images "$src_image" &>/dev/null || echo "$s2i_args" | grep -q "pull-policy=never" || docker pull "$src_image" + user_id=$(docker inspect -f "{{.ContainerConfig.User}}" "$src_image") + # Strip file:// from APP_PATH and copy its contents into current context + mkdir -p "$local_app" + cp -r "${app_path/file:\/\//}/." "$local_app" + [ -d "$local_app/.s2i/bin/" ] && mv "$local_app/.s2i/bin" "$local_scripts" + # Create a Dockerfile named df_name and fill it with proper content + #FIXME: Some commands could be combined into a single layer but not sure if worth the trouble for testing purposes + cat <"$df_name" +FROM $src_image +LABEL "io.openshift.s2i.build.image"="$src_image" \\ + "io.openshift.s2i.build.source-location"="$app_path" +USER root +COPY $local_app /tmp/src +EOF + [ -d "$local_scripts" ] && echo "COPY $local_scripts /tmp/scripts" >> "$df_name" && + echo "RUN chown -R $user_id:0 /tmp/scripts" >>"$df_name" + echo "RUN chown -R $user_id:0 /tmp/src" >>"$df_name" + # Check for custom environment variables inside .s2i/ folder + if [ -e "$local_app/.s2i/environment" ]; then + # Remove any comments and add the contents as ENV commands to the Dockerfile + sed '/^\s*#.*$/d' "$local_app/.s2i/environment" | while read -r line; do + echo "ENV $line" >>"$df_name" + done + fi + # Filter out env var definitions from $s2i_args and create Dockerfile ENV commands out of them + echo "$s2i_args" | grep -o -e '\(-e\|--env\)[[:space:]=]\S*=\S*' | sed -e 's/-e /ENV /' -e 's/--env[ =]/ENV /' >>"$df_name" + echo "USER $user_id" >>"$df_name" + # If exists, run the custom assemble script, else default to /usr/libexec/s2i/assemble + if [ -x "$local_scripts/assemble" ]; then + echo "RUN /tmp/scripts/assemble" >>"$df_name" + else + echo "RUN /usr/libexec/s2i/assemble" >>"$df_name" + fi + # If exists, set the custom run script as CMD, else default to /usr/libexec/s2i/run + if [ -x "$local_scripts/run" ]; then + echo "CMD /tmp/scripts/run" >>"$df_name" + else + echo "CMD /usr/libexec/s2i/run" >>"$df_name" + fi + # Run the build and tag the result + docker build -f "$df_name" -t "$dst_image" . + popd +}