Remove scripts which are no longer in use (#5295)

* remove scripts which are no longer in use

Signed-off-by: anandrkskd <anandrkskd@gmail.com>

* cleanup

Signed-off-by: anandrkskd <anandrkskd@gmail.com>

* removing sync-docs.sh as we have moved to new docs

Signed-off-by: anandrkskd <anandrkskd@gmail.com>

* cleanup

Signed-off-by: anandrkskd <anandrkskd@gmail.com>

* remove FIt

Signed-off-by: anandrkskd <anandrkskd@gmail.com>
This commit is contained in:
Anand Kumar Singh
2022-01-10 15:06:50 +05:30
committed by GitHub
parent b20103c9f3
commit e64ecc1f67
14 changed files with 49 additions and 821 deletions

View File

@@ -1,54 +0,0 @@
#!/bin/bash
# fpm is required installed (https://github.com/jordansissel/fpm)
BIN_DIR="./dist/bin/"
PKG_DIR="./dist/pkgs/"
mkdir -p $PKG_DIR
# package version, use current date by default (if build from master)
PKG_VERSION=$(date "+%Y%m%d%H%M%S")
# if this is run on travis make sure that binary was build with corrent version
if [[ -n $TRAVIS_TAG ]]; then
echo "Checking if odo version was set to the same version as current tag"
# use sed to get only semver part
bin_version=$(${BIN_DIR}/linux-amd64/odo version --client | head -1 | sed "s/^odo \(.*\) (.*)$/\1/")
if [ "$TRAVIS_TAG" == "${bin_version}" ]; then
echo "OK: odo version output is matching current tag"
else
echo "ERR: TRAVIS_TAG ($TRAVIS_TAG) is not matching 'odo version' (v${bin_version})"
exit 1
fi
# this is build from tag, that means it is proper relase, use version for PKG_VERSION
PKG_VERSION=$(echo ${bin_version} | sed "s/^v\(.*\)$/\1/")
fi
# create packages using fpm
fpm -h >/dev/null 2>&1 || {
echo "ERROR: fpm (https://github.com/jordansissel/fpm) is not installed. Can't create linux packages"
exit 1
}
TMP_DIR=$(mktemp -d)
mkdir -p $TMP_DIR/usr/local/bin/
cp $BIN_DIR/linux-amd64/odo $TMP_DIR/usr/local/bin/
echo "creating DEB package"
fpm \
--input-type dir --output-type deb \
--chdir $TMP_DIR \
--name odo --version $PKG_VERSION \
--architecture amd64 \
--maintainer "Tomas Kral <tkral@redhat.com>" \
--package $PKG_DIR
echo "creating RPM package"
fpm \
--input-type dir --output-type rpm \
--chdir $TMP_DIR \
--name odo --version $PKG_VERSION \
--architecture x86_64 --rpm-os linux \
--maintainer "Tomas Kral <tkral@redhat.com>" \
--package $PKG_DIR

View File

@@ -1,41 +0,0 @@
#!/usr/bin/env bash
# Runs integration tests on K8S cluster hosted in IBM Cloud
shout() {
set +x
echo -e "\n.---------------------------------------\n${1}\n'---------------------------------------\n"
set -x
}
set -ex
# This is one of the variables injected by ci-firewall. Its purpose is to allow scripts to handle uniqueness as needed
SCRIPT_IDENTITY=${SCRIPT_IDENTITY:-"def-id"}
case ${1} in
k8s)
export TEST_EXEC_NODES="24"
ibmcloud login --apikey $IBMC_DEVELOPER_OCLOGIN_APIKEY -a cloud.ibm.com -r eu-de -g "Developer-CI-and-QE"
ibmcloud ks cluster config --cluster $IBMC_K8S_CLUSTER_ID
# Integration tests
shout "| Running integration Tests on Kubernetes cluster in IBM Cloud"
make test-cmd-project
shout "Cleaning up some leftover namespaces"
set +x
for i in $(kubectl get namespace -o name); do
if [[ $i == "namespace/${SCRIPT_IDENTITY}"* ]]; then
kubectl delete $i
fi
done
set -x
odo logout
;;
*)
echo "Need parameter set to k8s"
exit 1
;;
esac

View File

@@ -1,76 +0,0 @@
#!/usr/bin/env bash
#Sets up requirements to run tests in K8S cluster hosted in the IBM Cloud
shout() {
set +x
echo -e "\n.---------------------------------------\n${1}\n'---------------------------------------\n"
set -x
}
# Create a bin directory whereever script runs. This will be where all binaries that need to be in PATH will reside.
export HOME="~/"
export GOPATH="~/go"
export GOBIN="$GOPATH/bin"
mkdir -p $GOBIN
# This is one of the variables injected by ci-firewall. Its purpose is to allow scripts to handle uniqueness as needed
SCRIPT_IDENTITY=${SCRIPT_IDENTITY:-"def-id"}
# Add GOBIN which is the bin dir we created earlier to PATH so any binaries there are automatically available in PATH
export PATH=$PATH:$GOBIN
# Prep for integration/e2e
shout "Building odo binaries"
make bin
# copy built odo to GOBIN
cp -avrf ./odo $GOBIN/
setup_kubeconfig() {
# Login as admin to IBM Cloud and get kubeconfig file for K8S cluster
ibmcloud login --apikey $IBMC_ADMIN_OCLOGIN_APIKEY -a cloud.ibm.com -r eu-de -g "Developer-CI-and-QE"
ibmcloud ks cluster config --cluster $IBMC_K8S_CLUSTER_ID
export KUBECONFIG=$HOME/.kube/config
if [[ ! -f $KUBECONFIG ]]; then
echo "Could not find kubeconfig file"
exit 1
fi
if [[ ! -z $KUBECONFIG ]]; then
# Copy kubeconfig to current directory, to avoid clashes with other test runs
# Read and Write permission to current kubeconfig file
cp $KUBECONFIG "`pwd`/config"
chmod 640 "`pwd`/config"
export KUBECONFIG="`pwd`/config"
fi
}
case ${1} in
k8s)
setup_kubeconfig
;;
*)
echo "<<< Need parameter set to K8S >>>"
exit 1
;;
esac
### Applies to both K8S and minikube
# Setup to find nessasary data from cluster setup
## Constants
SETUP_OPERATORS="./scripts/configure-cluster/common/setup-operators.sh"
# The OLM Version
export OLM_VERSION="v0.18.3"
# Enable OLM for running operator tests
curl -sL https://github.com/operator-framework/operator-lifecycle-manager/releases/download/$OLM_VERSION/install.sh | bash -s $OLM_VERSION
set +x
# Get kubectl cluster info
kubectl cluster-info
set -x
# Set kubernetes env var as true, to distinguish the platform inside the tests
export KUBERNETES=true
# Create Operators for Operator tests
sh $SETUP_OPERATORS

View File

@@ -18,22 +18,12 @@ case $1 in
export SENDQUEUE="amqp.ci.queue.minikube.send"
export SENDTOPIC="amqp.ci.topic.minikube.send"
export EXCHANGE="amqp.ci.exchange.minikube.send"
export SETUP_SCRIPT="scripts/minikube-minishift-setup-env.sh minikube"
export RUN_SCRIPT="scripts/minikube-minishift-all-tests.sh minikube"
export TIMEOUT="4h00m"
;;
k8s)
#Removing minishift (dropped support for 3.11) and adding K8S on IBM Cloud
export JOB_NAME="odo-k8s-ibmc-pr-build"
export SENDQUEUE="amqp.ci.queue.k8sibmc.send"
export SENDTOPIC="amqp.ci.topic.k8sibmc.send"
export EXCHANGE="amqp.ci.exchange.k8sibmc.send"
export SETUP_SCRIPT="scripts/k8sibmc-setup-env.sh k8s"
export RUN_SCRIPT="scripts/k8sibmc-all-tests.sh k8s"
export SETUP_SCRIPT="scripts/minikube-minishift-setup-env.sh"
export RUN_SCRIPT="scripts/minikube-minishift-all-tests.sh"
export TIMEOUT="4h00m"
;;
*)
echo "Must pass minikube or k8s as paramater"
echo "Must pass minikube as paramater"
exit 1
;;
esac

View File

@@ -1,9 +1,9 @@
#!/usr/bin/env bash
shout() {
set +x
echo -e "\n.---------------------------------------\n${1}\n'---------------------------------------\n"
set -x
set +x
echo -e "\n.---------------------------------------\n${1}\n'---------------------------------------\n"
set -x
}
set -ex
@@ -11,47 +11,20 @@ set -ex
# This is one of the variables injected by ci-firewall. Its purpose is to allow scripts to handle uniqueness as needed
SCRIPT_IDENTITY=${SCRIPT_IDENTITY:-"def-id"}
case ${1} in
minikube)
# Integration tests
shout "| Running integration Tests on MiniKube"
make test-operator-hub
make test-cmd-project
make test-integration-devfile
# Integration tests
shout "| Running integration Tests on MiniKube"
make test-operator-hub
make test-cmd-project
make test-integration-devfile
shout "Cleaning up some leftover namespaces"
shout "Cleaning up some leftover namespaces"
set +x
for i in $(kubectl get namespace -o name); do
if [[ $i == "namespace/${SCRIPT_IDENTITY}"* ]]; then
kubectl delete $i
fi
done
set -x
set +x
for i in $(kubectl get namespace -o name); do
if [[ $i == "namespace/${SCRIPT_IDENTITY}"* ]]; then
kubectl delete $i
fi
done
set -x
odo logout
;;
minishift)
cd $HOME/redhat-developer/odo
eval $(minishift oc-env)
shout "| Logging in to minishift..."
oc login -u developer -p developer --insecure-skip-tls-verify $(minishift ip):8443
shout "| Executing on minishift: generic, login, component command and plugin handler integration tests"
make test-integration
shout "| Executing on minishift: devfile catalog, create, push, watch, delete, registry, exec, test, env, status, config, debug and log command integration tests"
make test-integration-devfile
shout "| Executing on minishift: core beta, java, source e2e tests"
make test-e2e-devfile
odo logout
;;
*)
echo "Need parameter set to minikube or minishift"
exit 1
;;
esac
odo logout

View File

@@ -95,46 +95,38 @@ EOF
cd $pwd || return
}
case ${1} in
minikube)
mkStatus=$(minikube status)
shout "| Checking if Minikube needs to be started..."
if [[ "$mkStatus" == *"host: Running"* ]] && [[ "$mkStatus" == *"kubelet: Running"* ]]; then
if [[ "$mkStatus" == *"kubeconfig: Misconfigured"* ]]; then
minikube update-context
fi
setup_kubeconfig
kubectl config use-context minikube
else
minikube delete
shout "| Start minikube"
minikube start --vm-driver=docker --container-runtime=docker
setup_kubeconfig
setup_operator
setup_minikube_developer
mkStatus=$(minikube status)
shout "| Checking if Minikube needs to be started..."
if [[ "$mkStatus" == *"host: Running"* ]] && [[ "$mkStatus" == *"kubelet: Running"* ]]; then
if [[ "$mkStatus" == *"kubeconfig: Misconfigured"* ]]; then
minikube update-context
fi
setup_kubeconfig
kubectl config use-context minikube
else
minikube delete
shout "| Start minikube"
minikube start --vm-driver=docker --container-runtime=docker
setup_kubeconfig
setup_operator
setup_minikube_developer
fi
minikube version
# Setup to find necessary data from cluster setup
## Constants
minikube version
# Setup to find necessary data from cluster setup
## Constants
set +x
# Get kubectl cluster info
kubectl cluster-info
set +x
# Get kubectl cluster info
kubectl cluster-info
set -x
# Set kubernetes env var as true, to distinguish the platform inside the tests
export KUBERNETES=true
set -x
# Set kubernetes env var as true, to distinguish the platform inside the tests
export KUBERNETES=true
# Create a developer user if it is not created already and change the context to use it after the setup is done
kubectl config get-contexts developer-minikube || setup_minikube_developer
kubectl config use-context developer-minikube
;;
*)
echo "<<< Need parameter set to minikube or minishift >>>"
exit 1
;;
esac
# Create a developer user if it is not created already and change the context to use it after the setup is done
kubectl config get-contexts developer-minikube || setup_minikube_developer
kubectl config use-context developer-minikube
# Create a bin directory whereever script runs. This will be where all binaries that need to be in PATH will reside.
export HOME=$(pwd)/home
@@ -150,4 +142,4 @@ shout "Building odo binaries"
make bin
# copy built odo to GOBIN
cp -avrf ./odo $GOBIN/
cp -avrf ./odo $GOBIN/

View File

@@ -1,52 +0,0 @@
#!/usr/bin/env bash
shout() {
set +x
echo -e "\n.---------------------------------------\n${1}\n'---------------------------------------\n"
set -x
}
set -ex
export MINISHIFT_GITHUB_API_TOKEN=$MINISHIFT_GITHUB_API_TOKEN_VALUE
msStatus=$(minishift status)
shout "| Checking if Minishift needs to be started..."
if [[ "$msStatus" == *"Does Not Exist"* ]] || [[ "$msStatus" == *"Minishift: Stopped"* ]]
then
shout "| Starting Minishift..."
(minishift start --vm-driver kvm --show-libmachine-logs -v 5)
else
if [[ "$msStatus" == *"OpenShift: Stopped"* ]];
then
shout "| Minishift is running but Openshift is stopped, restarting minishift..."
(minishift stop)
(minishift start --vm-driver kvm --show-libmachine-logs -v 5)
else
if [[ "$msStatus" == *"Running"* ]];
then shout "| Minishift is running"
fi
fi
fi
compList=$(minishift openshift component list)
shout "| Checking if required components need to be installed..."
if [[ "$compList" == *"service-catalog"* ]]
then
shout "| service-catalog already installed "
else
shout "| Installing service-catalog ..."
(minishift openshift component add service-catalog)
fi
if [[ "$compList" == *"automation-service-broker"* ]]
then
shout "| automation-service-broker already installed "
else
shout "| Installing automation-service-broker ..."
(minishift openshift component add automation-service-broker)
fi
if [[ "$compList" == *"template-service-broker"* ]]
then
shout "| template-service-broker already installed "
else
shout "| Installing template-service-broker ..."
(minishift openshift component add template-service-broker)
fi

View File

@@ -1,50 +0,0 @@
#!/bin/sh
## Script for installing and running `oc cluster up`
## Inspired by https://github.com/radanalyticsio/oshinko-cli/blob/master/.travis.yml
## Use this variable to get more control over downloading client binary
OPENSHIFT_CLIENT_BINARY_URL=${OPENSHIFT_CLIENT_BINARY_URL:-'https://github.com/openshift/origin/releases/download/v3.11.0/openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz'}
sudo service docker stop
sudo sed -i -e 's/"mtu": 1460/"mtu": 1460, "insecure-registries": ["172.30.0.0\/16"]/' /etc/docker/daemon.json
sudo cat /etc/docker/daemon.json
sudo service docker start
sudo service docker status
# Docker version that oc cluster up uses
docker version
## download oc binaries
sudo wget $OPENSHIFT_CLIENT_BINARY_URL -O /tmp/openshift-origin-client-tools.tar.gz 2> /dev/null > /dev/null
sudo tar -xvzf /tmp/openshift-origin-client-tools.tar.gz --strip-components=1 -C /usr/local/bin
## Get oc version
oc version
## below cmd is important to get oc working in ubuntu
OPENSHIFT_CLIENT_VERSION=`echo $OPENSHIFT_CLIENT_BINARY_URL | awk -F '//' '{print $2}' | cut -d '/' -f 6`
sudo docker run -v /:/rootfs -ti --rm --entrypoint=/bin/bash --privileged openshift/origin:$OPENSHIFT_CLIENT_VERSION -c "mv /rootfs/bin/findmnt /rootfs/bin/findmnt.backup"
while true; do
if [ "$1" = "service-catalog" ]; then
oc cluster up --base-dir=$HOME/oscluster
oc cluster add --base-dir=$HOME/oscluster service-catalog
oc cluster add --base-dir=$HOME/oscluster template-service-broker
oc cluster add --base-dir=$HOME/oscluster automation-service-broker
else
oc cluster up
fi
if [ "$?" -eq 0 ]; then
./scripts/travis-check-pods.sh $1
if [ "$?" -eq 0 ]; then
break
fi
fi
echo "Retrying oc cluster up after failure"
oc cluster down
sleep 5
done

View File

@@ -1,142 +0,0 @@
#!/usr/bin/env bash
shout() {
set +x
echo -e "\n!!!!!!!!!!!!!!!!!!!!\n${1}\n!!!!!!!!!!!!!!!!!!!!\n"
set -x
}
set -ex
shout "Setting up some stuff"
# Create a bin directory whereever script runs. This will be where all binaries that need to be in PATH will reside.
mkdir bin artifacts
# Change the default location of go's bin directory (without affecting GOPATH). This is where compiled binaries will end up by default
# for eg go get ginkgo later on will produce ginkgo binary in GOBIN
export GOBIN="`pwd`/bin"
# Set kubeconfig to current dir. This ensures no clashes with other test runs
export KUBECONFIG="`pwd`/config"
export ARTIFACTS_DIR="`pwd`/artifacts"
export CUSTOM_HOMEDIR=$ARTIFACT_DIR
LIBDIR="./scripts/configure-cluster"
LIBCOMMON="$LIBDIR/common"
# This si one of the variables injected by ci-firewall. Its purpose is to allow scripts to handle uniqueness as needed
SCRIPT_IDENTITY=${SCRIPT_IDENTITY:-"def-id"}
export SKIP_USER_LOGIN_TESTS="true"
shout "Getting oc binary"
if [[ $BASE_OS == "linux" ]]; then
set +x
curl --connect-timeout 150 --max-time 2048 -k ${OCP4X_DOWNLOAD_URL}/${ARCH}/${BASE_OS}/oc.tar -o ./oc.tar
set -x
tar -C $GOBIN -xvf ./oc.tar && rm -rf ./oc.tar
else
set +x
curl --connect-timeout 150 --max-time 2048 -k ${OCP4X_DOWNLOAD_URL}/${ARCH}/${BASE_OS}/oc.zip -o ./oc.zip
set -x
if [[ $BASE_OS == "windows" ]]; then
GOBIN="$(cygpath -pw $GOBIN)"
CURRDIR="$(cygpath -pw $WORKDIR)"
powershell -Command "Expand-Archive -Path $CURRDIR\oc.zip -DestinationPath $GOBIN"
chmod +x $GOBIN/*
fi
if [[ $BASE_OS == "mac" ]]; then
unzip ./oc.zip -d $GOBIN && rm -rf ./oc.zip && chmod +x $GOBIN/oc
PATH="$PATH:/usr/local/bin:/usr/local/go/bin"
fi
fi
# Add GOBIN which is the bin dir we created earlier to PATH so any binaries there are automatically available in PATH
export PATH=$PATH:$GOBIN
#-----------------------------------------------------------------------------
shout "Running unit tests"
# Run unit tests
GOFLAGS='-mod=vendor' make test
# Prep for integration/e2e
shout "Building odo binaries"
make bin
# copy built odo to GOBIN
cp -avrf ./odo $GOBIN/
shout "getting ginkgo"
make goget-ginkgo
# Integration tests
shout "Testing against 4x cluster"
shout "Logging into 4x cluster for some setup (logs hidden)"
set +x
oc login -u kubeadmin -p ${OCP4X_KUBEADMIN_PASSWORD} --insecure-skip-tls-verify ${OCP4X_API_URL}
set -x
shout "Doing some presetup"
# Delete any projects with SCRIPT_IDENTITY PREFIX. This is GC from previous runs which fail before end of script cleanup
for i in $(oc projects -q); do
if [[ $i == "${SCRIPT_IDENTITY}"* ]]; then
oc delete project $i
fi
done
# Generate random project names to some tests
export REDHAT_OPENJDK11_RHEL8_PROJECT="${SCRIPT_IDENTITY}$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 4 | head -n 1)"
export REDHAT_OPENJDK11_UBI8_PROJECT="${SCRIPT_IDENTITY}$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 4 | head -n 1)"
export REDHAT_NODEJS12_RHEL7_PROJECT="${SCRIPT_IDENTITY}$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 4 | head -n 1)"
export REDHAT_NODEJS12_UBI8_PROJECT="${SCRIPT_IDENTITY}$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 4 | head -n 1)"
export REDHAT_NODEJS14_UBI8_PROJECT="${SCRIPT_IDENTITY}$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 4 | head -n 1)"
# Create the namespace for e2e image test apply pull secret to the namespace
for i in `echo "$REDHAT_OPENJDK11_RHEL8_PROJECT $REDHAT_NODEJS12_RHEL7_PROJECT $REDHAT_NODEJS12_UBI8_PROJECT $REDHAT_OPENJDK11_UBI8_PROJECT $REDHAT_NODEJS14_UBI8_PROJECT $REDHAT_POSTGRES_OPERATOR_PROJECT"`; do
# create the namespace
oc new-project $i
# Applying pull secret to the namespace which will be used for pulling images from authenticated registry
oc get secret pull-secret -n openshift-config -o yaml | sed "s/openshift-config/$i/g" | oc apply -f -
# Let developer user have access to the project
oc adm policy add-role-to-user edit developer
done
#---------------------------------------------------------------------
shout "Logging into 4x cluster as developer (logs hidden)"
set +x
oc login -u developer -p ${OCP4X_DEVELOPER_PASSWORD} --insecure-skip-tls-verify ${OCP4X_API_URL}
set -x
# Integration tests
shout "Running integration Tests"
make test-operator-hub || error=true
make test-integration || error=true
make test-integration-devfile || error=true
make test-cmd-login-logout || error=true
make test-cmd-project || error=true
# E2e tests
shout "Running e2e tests"
make test-e2e-all || error=true
# Fail the build if there is any error while test execution
if [ $error ]; then
exit -1
fi
shout "cleaning up post tests"
shout "Logging into 4x cluster for cleanup (logs hidden)"
set +x
oc login -u kubeadmin -p ${OCP4X_KUBEADMIN_PASSWORD} --insecure-skip-tls-verify ${OCP4X_API_URL}
set -x
shout "Cleaning up some leftover projects"
set +x
for i in $(oc projects -q); do
if [[ $i == "${SCRIPT_IDENTITY}"* ]]; then
oc delete project $i
fi
done
set -x

View File

@@ -1,9 +0,0 @@
#!/bin/bash
set -x
# Image stream source https://github.com/openshift/library/tree/master/community
# odo supported nodejs image stream
oc apply -n openshift -f tests/image-streams/supported-nodejs.json
# odo supported java image stream
oc apply -n openshift -f tests/image-streams/supported-java.json

View File

@@ -1,155 +0,0 @@
#!/usr/bin/env bash
# This document uses: https://gist.github.com/domenic/ec8b0fc8ab45f39403dd
# which effectively enables the synchronization of any documentation created on the GitHub repo
# to synchronize with the "gh-pages" branch and thus the website.
#
# In-case the above Gist is out of date, here are the instructions:
#
# 1. Generate a NEW SSH key that will be used to commit to the branch. This should have administrative
# privileges to modify your repo. https://help.github.com/articles/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent/
#
# 2. Encrypt the key using travis:
# $ travis encrypt-file deploy_key
# encrypting deploy_key for domenic/travis-encrypt-file-example
# storing result as deploy_key.enc
# storing secure env variables for decryption
#
# Please add the following to your build script (before_install stage in your .travis.yml, for instance):
#
# openssl aes-256-cbc -K $encrypted_0a6446eb3ae3_key -iv $encrypted_0a6446eb3ae3_key -in super_secret.txt.enc -out super_secret.txt -d
#
# Pro Tip: You can add it automatically by running with --add.
#
# Make sure to add deploy_key.enc to the git repository.
# Make sure not to add deploy_key to the git repository.
# Commit all changes to your .travis.yml.
#
# 3. Make note of the value.. and add it to the ENCRPYTION_LABEL environment variable below.
# Ensures that we run on Travis
if [ "$TRAVIS_BRANCH" != "master" ] || [ "$BUILD_DOCS" != "yes" ] || [ "$TRAVIS_SECURE_ENV_VARS" == "false" ] || [ "$TRAVIS_PULL_REQUEST" != "false" ] ; then
echo "Must be: a merged pr on the master branch, BUILD_DOCS=yes, TRAVIS_SECURE_ENV_VARS=false"
exit 0
fi
# Change the below to your credentials
DOCS_REPO_NAME="odo"
DOCS_REPO_URL="git@github.com:redhat-developer/odo.git"
DOCS_REPO_HTTP_URL="http://github.com/redhat-developer/odo"
DOCS_USER="odo-bot"
DOCS_EMAIL="cdrage+odo@redhat.com"
# Your encrypted key values as from Steps 2&3 of the above tutorial
ENCRYPTION_LABEL="0e738444b7d0"
# Things that don't "really" need to be changed
DOCS_KEY="scripts/deploy_key"
DOCS_BRANCH="gh-pages"
DOCS_FOLDER="docs"
# decrypt the private key
ENCRYPTED_KEY_VAR="encrypted_${ENCRYPTION_LABEL}_key"
ENCRYPTED_IV_VAR="encrypted_${ENCRYPTION_LABEL}_iv"
ENCRYPTED_KEY=${!ENCRYPTED_KEY_VAR}
ENCRYPTED_IV=${!ENCRYPTED_IV_VAR}
openssl aes-256-cbc -K $ENCRYPTED_KEY -iv $ENCRYPTED_IV -in "$DOCS_KEY.enc" -out "$DOCS_KEY" -d
chmod 600 "$DOCS_KEY"
eval `ssh-agent -s`
ssh-add "$DOCS_KEY"
# clone the repo
git clone "$DOCS_REPO_URL" "$DOCS_REPO_NAME"
# change to that directory (to prevent accidental pushing to master, etc.)
cd "$DOCS_REPO_NAME"
# switch to gh-pages and grab the docs folder from master
git checkout gh-pages
git checkout master docs
# =========================
# START Modify the documentation
# =========================
# Remove README.md from docs folder as it isn't relevant
rm docs/README.md
# Copy over the original README.md in the root directory
# to use as the index page for "documentation" on the site
cp README.md docs/readme.md
# TODO: Add Slate in the future. Keep this here for reference.
# File reference is going to be built with "Slate"
# cp docs/file-reference.md slate/source/index.html.md
# clean-up the docs and convert to jekyll-friendly docs
cd docs
for filename in *.md; do
if cat $filename | head -n 1 | grep "\-\-\-";
then
echo "$filename already contains Jekyll format"
else
# Remove ".md" from the name
name=${filename::-3}
echo "Adding Jekyll file format to $filename"
jekyll="---
layout: default
permalink: /$name/
redirect_from:
- /docs/$name.md/
---
"
echo -e "$jekyll\n$(cat $filename)" > $filename
fi
done
cd ..
# TODO: Add Slate in the future. Keep this here for reference.
# This builds "slate" our file reference documentation.
#slate="---
#title: Odo File Reference
#
#language_tabs:
# - yaml
#
#toc_footers:
# - <a href='http://openshiftdo.org'>openshiftdo.org</a>
# - <a href='https://github.com/redhat-developer/odo'>odo on GitHub</a>
#
#search: true
#---
#"
#echo -e "$slate\n$(cat slate/source/index.html.md)" > slate/source/index.html.md
#cd slate
#docker run --rm -v $PWD:/usr/src/app/source -w /usr/src/app/source cdrage/slate bundle exec middleman build --clean
#cd ..
# Weird file permissions when building slate (since it's in a docker container)
#sudo chown -R $USER:$USER slate
# remove the old file-reference
#rm -rf file-reference
#mv slate/build file-reference
# =========================
# END Modify the documentation
# =========================
# add relevant user information
git config user.name "$DOCS_USER"
# email assigned
git config user.email "$DOCS_EMAIL"
git add --all
# Check if anything changed, and if it's the case, push to origin/master.
if git commit -m 'Update docs' -m "Commit: $DOCS_REPO_HTTP_URL/commit/$TRAVIS_COMMIT" ; then
git push
fi
# cd back to the original root folder
cd ..

View File

@@ -1,98 +0,0 @@
#!/bin/bash
# Code from https://github.com/radanalyticsio/oshinko-cli/blob/master/travis-check-pods.sh
oc login -u system:admin
oc project default
while true; do
V=$(oc get dc docker-registry --template='{{index .status "latestVersion"}}')
P=$(oc get pod docker-registry-$V-deploy --template='{{index .status "phase"}}')
if [ "$?" -eq 0 ]; then
echo phase is $P for docker-registry deploy $V
if [ "$P" == "Failed" ]; then
echo "registry deploy failed, try again"
oc get pods
oc rollout retry dc/docker-registry
sleep 10
continue
fi
fi
REG=$(oc get pod -l deploymentconfig=docker-registry --template='{{index .items 0 "status" "phase"}}')
if [ "$?" -eq 0 ]; then
break
fi
oc get pods
echo "Waiting for registry pod"
sleep 10
done
while true; do
REG=$(oc get pod -l deploymentconfig=docker-registry --template='{{index .items 0 "status" "phase"}}')
if [ "$?" -ne 0 -o "$REG" == "Error" ]; then
echo "Registy pod is in error state..."
exit 1
fi
if [ "$REG" == "Running" ]; then
break
fi
sleep 5
done
while true; do
V=$(oc get dc router --template='{{index .status "latestVersion"}}')
P=$(oc get pod router-$V-deploy --template='{{index .status "phase"}}')
if [ "$?" -eq 0 ]; then
echo phase is $P for router deploy $V
if [ "$P" == "Failed" ]; then
echo "router deploy failed, try again"
oc get pods
oc rollout retry dc/router
sleep 10
continue
fi
fi
REG=$(oc get pod -l deploymentconfig=router --template='{{index .items 0 "status" "phase"}}')
if [ "$?" -eq 0 ]; then
break
fi
oc get pods
echo "Waiting for router pod"
sleep 10
done
while true; do
REG=$(oc get pod -l deploymentconfig=router --template='{{index .items 0 "status" "phase"}}')
if [ "$?" -ne 0 -o "$REG" == "Error" ]; then
echo "Router pod is in error state..."
exit 1
fi
if [ "$REG" == "Running" ]; then
break
fi
sleep 5
done
echo "Registry and router pods are okay"
if [ "$1" = "service-catalog" ]; then
echo "Waiting for template-service-broker"
while true; do
status=$(oc get clusterservicebroker template-service-broker -o jsonpath='{.status.conditions[0].status}')
if [ "$status" == "True" ]; then
break
fi
sleep 5
done
echo "Waiting for openshift-automation-service-broker"
while true; do
status=$(oc get clusterservicebroker openshift-automation-service-broker -o jsonpath='{.status.conditions[0].status}')
if [ "$status" == "True" ]; then
break
fi
sleep 5
done
fi

View File

@@ -1,50 +0,0 @@
#!/bin/bash
# upload linux packages to bintray repositories
# required $BINTRAY_USER and $BINTRAY_KEY
PKG_DIR="./dist/pkgs/"
if [[ -z "${BINTRAY_USER}" ]] || [[ -z "${BINTRAY_KEY}" ]] ; then
echo "Required variables \$BINTRAY_USER and \$BINTRAY_KEY"
exit 1
fi
# for deb
for pkg in `ls -1 $PKG_DIR/*.deb`; do
filename=$(basename $pkg)
# get version from filename
version=$(expr "$filename" : '.*_\([^_]*\)_.*')
repo="odo-deb-dev"
# if version is semver format upload to releases
if [[ $version =~ [0-9]+\.[0-9]+\.[0-9]+ ]] ; then
repo="odo-deb-releases"
fi
echo "Uploading DEB package $pkg version $version to Bintray $repo"
curl -T $pkg -u $BINTRAY_USER:$BINTRAY_KEY "https://api.bintray.com/content/odo/${repo}/odo/${version}/${filename};deb_distribution=stretch;deb_component=main;deb_architecture=amd64;publish=1"
echo ""
echo ""
done
# for rpm
for pkg in `ls -1 $PKG_DIR/*.rpm`; do
filename=$(basename $pkg)
# get version from filename
version=$(expr "$filename" : '.*-\(.*-[0-9]*\)\.x86_64.*')
repo="odo-rpm-dev"
# if version is semver format upload to releases
if [[ $version =~ [0-9]+\.[0-9]+\.[0-9]+ ]] ; then
repo="odo-rpm-releases"
fi
echo "Uploading RPM package $pkg version $version to Bintray $repo"
curl -T $pkg -u $BINTRAY_USER:$BINTRAY_KEY "https://api.bintray.com/content/odo/${repo}/odo/${version}/${filename};publish=1"
echo ""
echo ""
done