mirror of
https://github.com/ubuntu/microk8s.git
synced 2021-05-23 02:23:41 +03:00
Enforce double quotes with Black (#1968)
This commit is contained in:
@@ -346,8 +346,8 @@ def _get_microk8s_commands() -> List:
|
||||
if c.decode().startswith("microk8s.")
|
||||
]
|
||||
complete = mk8s
|
||||
if 'dashboard-proxy' not in mk8s:
|
||||
complete += ['dashboard-proxy']
|
||||
if "dashboard-proxy" not in mk8s:
|
||||
complete += ["dashboard-proxy"]
|
||||
complete.sort()
|
||||
return complete
|
||||
else:
|
||||
|
||||
@@ -36,7 +36,7 @@ class Auxiliary(ABC):
|
||||
|
||||
:return: Integer free space
|
||||
"""
|
||||
return disk_usage(realpath('/')).free
|
||||
return disk_usage(realpath("/")).free
|
||||
|
||||
def is_enough_space(self) -> bool:
|
||||
"""
|
||||
@@ -118,12 +118,12 @@ class Windows(Auxiliary):
|
||||
"""
|
||||
try:
|
||||
out = subprocess.check_output(
|
||||
['DISM', '/Online', '/Get-FeatureInfo', '/FeatureName:Microsoft-Hyper-V']
|
||||
["DISM", "/Online", "/Get-FeatureInfo", "/FeatureName:Microsoft-Hyper-V"]
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
if 'State : Disabled' in out.decode():
|
||||
if "State : Disabled" in out.decode():
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -138,12 +138,12 @@ class Windows(Auxiliary):
|
||||
try:
|
||||
subprocess.check_call(
|
||||
[
|
||||
'DISM',
|
||||
'/Online',
|
||||
'/Enable-Feature',
|
||||
'/All',
|
||||
'/NoRestart',
|
||||
'/FeatureName:Microsoft-Hyper-V',
|
||||
"DISM",
|
||||
"/Online",
|
||||
"/Enable-Feature",
|
||||
"/All",
|
||||
"/NoRestart",
|
||||
"/FeatureName:Microsoft-Hyper-V",
|
||||
]
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
|
||||
@@ -1,29 +1,29 @@
|
||||
MAX_CHARACTERS_WRAP: int = 120
|
||||
command_descriptions = {
|
||||
'add-node': "Adds a node to a cluster",
|
||||
'ambassador': "Ambassador API Gateway and Ingress",
|
||||
'cilium': "The cilium client",
|
||||
'config': "Print the kubeconfig",
|
||||
'ctr': "The containerd client",
|
||||
'dashboard-proxy': "Enable the Kubernetes dashboard and proxy to host",
|
||||
'dbctl': "Backup and restore the Kubernetes datastore",
|
||||
'disable': "Disables running add-ons",
|
||||
'enable': "Enables useful add-ons",
|
||||
'helm': "The helm client",
|
||||
'helm3': "The helm3 client",
|
||||
'inspect': "Checks the cluster and gathers logs",
|
||||
'istioctl': "The istio client",
|
||||
'join': "Joins this instance as a node to a cluster",
|
||||
'juju': "The Juju client",
|
||||
'kubectl': "The kubernetes client",
|
||||
'leave': "Disconnects this node from any cluster it has joined",
|
||||
'linkerd': "The linkerd client",
|
||||
'refresh-certs': "Refresh the CA certificates in this deployment",
|
||||
'remove-node': "Removes a node from the cluster",
|
||||
'reset': "Cleans the cluster from all workloads",
|
||||
'start': "Starts the kubernetes cluster",
|
||||
'status': "Displays the status of the cluster",
|
||||
'stop': "Stops the kubernetes cluster",
|
||||
"add-node": "Adds a node to a cluster",
|
||||
"ambassador": "Ambassador API Gateway and Ingress",
|
||||
"cilium": "The cilium client",
|
||||
"config": "Print the kubeconfig",
|
||||
"ctr": "The containerd client",
|
||||
"dashboard-proxy": "Enable the Kubernetes dashboard and proxy to host",
|
||||
"dbctl": "Backup and restore the Kubernetes datastore",
|
||||
"disable": "Disables running add-ons",
|
||||
"enable": "Enables useful add-ons",
|
||||
"helm": "The helm client",
|
||||
"helm3": "The helm3 client",
|
||||
"inspect": "Checks the cluster and gathers logs",
|
||||
"istioctl": "The istio client",
|
||||
"join": "Joins this instance as a node to a cluster",
|
||||
"juju": "The Juju client",
|
||||
"kubectl": "The kubernetes client",
|
||||
"leave": "Disconnects this node from any cluster it has joined",
|
||||
"linkerd": "The linkerd client",
|
||||
"refresh-certs": "Refresh the CA certificates in this deployment",
|
||||
"remove-node": "Removes a node from the cluster",
|
||||
"reset": "Cleans the cluster from all workloads",
|
||||
"start": "Starts the kubernetes cluster",
|
||||
"status": "Displays the status of the cluster",
|
||||
"stop": "Stops the kubernetes cluster",
|
||||
}
|
||||
DEFAULT_CORES: int = 2
|
||||
DEFAULT_MEMORY: int = 4
|
||||
|
||||
@@ -80,9 +80,9 @@ def get_kubectl_directory() -> str:
|
||||
def get_kubeconfig_path():
|
||||
"""Return a MicroK8s specific kubeconfig path."""
|
||||
if sys.platform == "win32":
|
||||
return os.path.join(os.environ.get('LocalAppData'), "MicroK8s", "config")
|
||||
return os.path.join(os.environ.get("LocalAppData"), "MicroK8s", "config")
|
||||
else:
|
||||
return os.path.join(os.path.expanduser('~'), ".microk8s", "config")
|
||||
return os.path.join(os.path.expanduser("~"), ".microk8s", "config")
|
||||
|
||||
|
||||
def clear_kubeconfig():
|
||||
|
||||
@@ -16,5 +16,5 @@
|
||||
|
||||
from cli import microk8s
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
microk8s.cli()
|
||||
|
||||
@@ -1,27 +1,27 @@
|
||||
from setuptools import setup
|
||||
|
||||
setup(
|
||||
name='microk8s',
|
||||
version='1.0.2',
|
||||
url='https://github.com/ubuntu/microk8s',
|
||||
license='Apache-2.0',
|
||||
author='Joe Borg',
|
||||
author_email='joseph.borg@canonical.com',
|
||||
description='MicroK8s is a small, fast, single-package Kubernetes for developers, IoT and edge',
|
||||
packages=['cli', 'common', 'vm_providers', 'vm_providers._multipass', 'vm_providers.repo'],
|
||||
name="microk8s",
|
||||
version="1.0.2",
|
||||
url="https://github.com/ubuntu/microk8s",
|
||||
license="Apache-2.0",
|
||||
author="Joe Borg",
|
||||
author_email="joseph.borg@canonical.com",
|
||||
description="MicroK8s is a small, fast, single-package Kubernetes for developers, IoT and edge",
|
||||
packages=["cli", "common", "vm_providers", "vm_providers._multipass", "vm_providers.repo"],
|
||||
install_requires=[
|
||||
'click~=7.0',
|
||||
'progressbar33==2.4',
|
||||
'requests==2.20.0',
|
||||
'requests_unixsocket==0.1.5',
|
||||
'simplejson==3.8.2',
|
||||
'toml==0.10.0',
|
||||
'urllib3==1.24.2',
|
||||
"click~=7.0",
|
||||
"progressbar33==2.4",
|
||||
"requests==2.20.0",
|
||||
"requests_unixsocket==0.1.5",
|
||||
"simplejson==3.8.2",
|
||||
"toml==0.10.0",
|
||||
"urllib3==1.24.2",
|
||||
],
|
||||
platforms='any',
|
||||
platforms="any",
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'microk8s=cli.microk8s:cli',
|
||||
"console_scripts": [
|
||||
"microk8s=cli.microk8s:cli",
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
@@ -11,9 +11,9 @@ from cli.microk8s import cli
|
||||
|
||||
class TestClass:
|
||||
@pytest.mark.skipif(
|
||||
platform.system() != 'Linux', reason='Add/remove multipass is available on Linux'
|
||||
platform.system() != "Linux", reason="Add/remove multipass is available on Linux"
|
||||
)
|
||||
@pytest.mark.skipif(os.getuid() != 0, reason='Add/remove multipass is possible to root')
|
||||
@pytest.mark.skipif(os.getuid() != 0, reason="Add/remove multipass is possible to root")
|
||||
@mock.patch("sys.stdin.isatty", return_value=True)
|
||||
def test_install_remove_multipass(self, tty_mock):
|
||||
"""
|
||||
@@ -24,19 +24,19 @@ class TestClass:
|
||||
# making sure we start on a clean machine with multipass
|
||||
result = runner.invoke(cli, "uninstall")
|
||||
subprocess.check_call("sudo snap install multipass --classic".split())
|
||||
assert os.path.isfile('/snap/bin/multipass')
|
||||
assert os.path.isfile("/snap/bin/multipass")
|
||||
assert result.exit_code == 0
|
||||
|
||||
# making sure we start on a clean machine
|
||||
result = runner.invoke(cli, "install")
|
||||
assert result.exit_code == 0
|
||||
assert os.path.isfile('/snap/bin/multipass')
|
||||
assert os.path.isfile("/snap/bin/multipass")
|
||||
|
||||
result = runner.invoke(cli, "status --wait-ready --timeout=60")
|
||||
assert result.exit_code == 0
|
||||
|
||||
result = runner.invoke(cli, "install")
|
||||
assert os.path.isfile('/snap/bin/multipass')
|
||||
assert os.path.isfile("/snap/bin/multipass")
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_all_cli(self):
|
||||
|
||||
@@ -155,7 +155,7 @@ class Provider(abc.ABC):
|
||||
f.write(kubeconfig)
|
||||
|
||||
def _setup_microk8s(self, specs: Dict) -> None:
|
||||
self.run("snap install microk8s --classic --channel {}".format(specs['channel']).split())
|
||||
self.run("snap install microk8s --classic --channel {}".format(specs["channel"]).split())
|
||||
if sys.platform == "win32":
|
||||
self.run("snap install microk8s-integrator-windows".split())
|
||||
elif sys.platform == "darwin":
|
||||
|
||||
@@ -58,9 +58,9 @@ class Multipass(Provider):
|
||||
def _launch(self, specs: Dict) -> None:
|
||||
image = "18.04"
|
||||
|
||||
cpus = "{}".format(specs['cpu'])
|
||||
mem = "{}G".format(specs['mem'])
|
||||
disk = "{}G".format(specs['disk'])
|
||||
cpus = "{}".format(specs["cpu"])
|
||||
mem = "{}G".format(specs["mem"])
|
||||
disk = "{}G".format(specs["disk"])
|
||||
|
||||
try_for = 10
|
||||
|
||||
|
||||
@@ -20,13 +20,13 @@ import click
|
||||
|
||||
MIN_MEM_GB = 14
|
||||
CONNECTIVITY_CHECKS = [
|
||||
'https://api.jujucharms.com/charmstore/v5/istio-pilot-5/icon.svg',
|
||||
"https://api.jujucharms.com/charmstore/v5/istio-pilot-5/icon.svg",
|
||||
]
|
||||
|
||||
|
||||
def kubectl_exists(resource):
|
||||
try:
|
||||
run('microk8s-kubectl.wrapper', 'get', '-nkubeflow', resource, die=False)
|
||||
run("microk8s-kubectl.wrapper", "get", "-nkubeflow", resource, die=False)
|
||||
return True
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
@@ -117,14 +117,14 @@ def check_connectivity():
|
||||
|
||||
try:
|
||||
run(
|
||||
'microk8s-kubectl.wrapper',
|
||||
'run',
|
||||
'--rm',
|
||||
'-i',
|
||||
'--restart=Never',
|
||||
'--image=curlimages/curl',
|
||||
'connectivity-check',
|
||||
'--',
|
||||
"microk8s-kubectl.wrapper",
|
||||
"run",
|
||||
"--rm",
|
||||
"-i",
|
||||
"--restart=Never",
|
||||
"--image=curlimages/curl",
|
||||
"connectivity-check",
|
||||
"--",
|
||||
url,
|
||||
die=False,
|
||||
stdout=False,
|
||||
@@ -138,22 +138,22 @@ def check_connectivity():
|
||||
|
||||
|
||||
def parse_hostname(hostname: str) -> ParseResult:
|
||||
if '//' in hostname:
|
||||
if "//" in hostname:
|
||||
parsed = urlparse(hostname)
|
||||
else:
|
||||
parsed = urlparse('//' + hostname)
|
||||
parsed = urlparse("//" + hostname)
|
||||
|
||||
if not parsed.scheme:
|
||||
parsed = parsed._replace(scheme='http')
|
||||
parsed = parsed._replace(scheme="http")
|
||||
|
||||
if not parsed.hostname:
|
||||
print("Manual hostname `%s` leaves hostname unspecified" % hostname)
|
||||
sys.exit(1)
|
||||
|
||||
if not parsed.port:
|
||||
parsed = parsed._replace(netloc=parsed.hostname or '' + (parsed.port or ''))
|
||||
parsed = parsed._replace(netloc=parsed.hostname or "" + (parsed.port or ""))
|
||||
|
||||
if parsed.path not in ('', '/'):
|
||||
if parsed.path not in ("", "/"):
|
||||
print("WARNING: The path `%s` was set on the hostname, but was ignored." % parsed.path)
|
||||
|
||||
if parsed.params:
|
||||
@@ -169,7 +169,7 @@ def parse_hostname(hostname: str) -> ParseResult:
|
||||
"WARNING: The fragment `%s` was set on the hostname, but was ignored." % parsed.fragment
|
||||
)
|
||||
|
||||
return parsed._replace(path='', params='', query='', fragment='')
|
||||
return parsed._replace(path="", params="", query="", fragment="")
|
||||
|
||||
|
||||
def get_hostname():
|
||||
@@ -195,39 +195,39 @@ def get_hostname():
|
||||
|
||||
@click.command()
|
||||
@click.option(
|
||||
'--bundle',
|
||||
default='cs:kubeflow-245',
|
||||
help='The Kubeflow bundle to deploy. Can be one of full, lite, edge, or a charm store URL.',
|
||||
"--bundle",
|
||||
default="cs:kubeflow-245",
|
||||
help="The Kubeflow bundle to deploy. Can be one of full, lite, edge, or a charm store URL.",
|
||||
)
|
||||
@click.option(
|
||||
'--channel',
|
||||
default='stable',
|
||||
type=click.Choice(['stable', 'candidate', 'beta', 'edge']),
|
||||
help='Which channel to deploy the bundle from. In most cases, this should be `stable`.',
|
||||
"--channel",
|
||||
default="stable",
|
||||
type=click.Choice(["stable", "candidate", "beta", "edge"]),
|
||||
help="Which channel to deploy the bundle from. In most cases, this should be `stable`.",
|
||||
)
|
||||
@click.option(
|
||||
'--debug/--no-debug',
|
||||
"--debug/--no-debug",
|
||||
default=False,
|
||||
help='If true, shows more verbose output when enabling Kubeflow.',
|
||||
help="If true, shows more verbose output when enabling Kubeflow.",
|
||||
)
|
||||
@click.option(
|
||||
'--hostname',
|
||||
help='If set, this hostname is used instead of a hostname generated by MetalLB.',
|
||||
"--hostname",
|
||||
help="If set, this hostname is used instead of a hostname generated by MetalLB.",
|
||||
)
|
||||
@click.option(
|
||||
'--ignore-min-mem/--no-ignore-min-mem',
|
||||
"--ignore-min-mem/--no-ignore-min-mem",
|
||||
default=False,
|
||||
help='If set, overrides the minimum memory check.',
|
||||
help="If set, overrides the minimum memory check.",
|
||||
)
|
||||
@click.option(
|
||||
'--no-proxy',
|
||||
help='Allows setting the juju-no-proxy configuration option.',
|
||||
"--no-proxy",
|
||||
help="Allows setting the juju-no-proxy configuration option.",
|
||||
)
|
||||
@click.password_option(
|
||||
envvar='KUBEFLOW_AUTH_PASSWORD',
|
||||
envvar="KUBEFLOW_AUTH_PASSWORD",
|
||||
default=get_random_pass,
|
||||
prompt=False,
|
||||
help='The Kubeflow dashboard password.',
|
||||
help="The Kubeflow dashboard password.",
|
||||
)
|
||||
def kubeflow(bundle, channel, debug, hostname, ignore_min_mem, no_proxy, password):
|
||||
if os.geteuid() == 0:
|
||||
@@ -235,12 +235,12 @@ def kubeflow(bundle, channel, debug, hostname, ignore_min_mem, no_proxy, passwor
|
||||
print("Try `microk8s enable kubeflow` instead.")
|
||||
sys.exit(1)
|
||||
|
||||
juju_path = Path(os.environ['SNAP_DATA']) / 'juju'
|
||||
juju_path = Path(os.environ["SNAP_DATA"]) / "juju"
|
||||
if juju_path.stat().st_gid == 0:
|
||||
print("Found bad permissions on %s, fixing..." % juju_path)
|
||||
try:
|
||||
run('sudo', 'chgrp', '-R', 'microk8s', str(juju_path), die=False)
|
||||
run('sudo', 'chmod', '-R', '775', str(juju_path), die=False)
|
||||
run("sudo", "chgrp", "-R", "microk8s", str(juju_path), die=False)
|
||||
run("sudo", "chmod", "-R", "775", str(juju_path), die=False)
|
||||
except subprocess.CalledProcessError as err:
|
||||
print("Encountered error while attempting to fix permissions:")
|
||||
print(err)
|
||||
@@ -276,22 +276,22 @@ def kubeflow(bundle, channel, debug, hostname, ignore_min_mem, no_proxy, passwor
|
||||
# should not have to specify a version for those bundles. However, allow the
|
||||
# user to specify a full charm store URL if they'd like, such as
|
||||
# `cs:kubeflow-lite-123`.
|
||||
if bundle == 'full':
|
||||
bundle = 'cs:kubeflow-245'
|
||||
elif bundle == 'lite':
|
||||
bundle = 'cs:kubeflow-lite-32'
|
||||
elif bundle == 'edge':
|
||||
bundle = 'cs:kubeflow-edge-29'
|
||||
if bundle == "full":
|
||||
bundle = "cs:kubeflow-245"
|
||||
elif bundle == "lite":
|
||||
bundle = "cs:kubeflow-lite-32"
|
||||
elif bundle == "edge":
|
||||
bundle = "cs:kubeflow-edge-29"
|
||||
else:
|
||||
bundle = bundle
|
||||
|
||||
run("microk8s-status.wrapper", "--wait-ready", debug=debug)
|
||||
run(
|
||||
'microk8s-kubectl.wrapper',
|
||||
'-nkube-system',
|
||||
'rollout',
|
||||
'status',
|
||||
'deployment.apps/calico-kube-controllers',
|
||||
"microk8s-kubectl.wrapper",
|
||||
"-nkube-system",
|
||||
"rollout",
|
||||
"status",
|
||||
"deployment.apps/calico-kube-controllers",
|
||||
debug=debug,
|
||||
)
|
||||
|
||||
@@ -307,11 +307,11 @@ def kubeflow(bundle, channel, debug, hostname, ignore_min_mem, no_proxy, passwor
|
||||
|
||||
run("microk8s-status.wrapper", "--wait-ready", debug=debug)
|
||||
run(
|
||||
'microk8s-kubectl.wrapper',
|
||||
'-nkube-system',
|
||||
'rollout',
|
||||
'status',
|
||||
'ds/calico-node',
|
||||
"microk8s-kubectl.wrapper",
|
||||
"-nkube-system",
|
||||
"rollout",
|
||||
"status",
|
||||
"ds/calico-node",
|
||||
debug=debug,
|
||||
)
|
||||
|
||||
@@ -366,34 +366,34 @@ def kubeflow(bundle, channel, debug, hostname, ignore_min_mem, no_proxy, passwor
|
||||
print("Operator pods ready.")
|
||||
print("Waiting for service pods to become ready.")
|
||||
|
||||
if kubectl_exists('service/pipelines-api'):
|
||||
with tempfile.NamedTemporaryFile(mode='w+') as f:
|
||||
if kubectl_exists("service/pipelines-api"):
|
||||
with tempfile.NamedTemporaryFile(mode="w+") as f:
|
||||
json.dump(
|
||||
{
|
||||
'apiVersion': 'v1',
|
||||
'kind': 'Service',
|
||||
'metadata': {'labels': {'juju-app': 'pipelines-api'}, 'name': 'ml-pipeline'},
|
||||
'spec': {
|
||||
'ports': [
|
||||
{'name': 'grpc', 'port': 8887, 'protocol': 'TCP', 'targetPort': 8887},
|
||||
{'name': 'http', 'port': 8888, 'protocol': 'TCP', 'targetPort': 8888},
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": {"labels": {"juju-app": "pipelines-api"}, "name": "ml-pipeline"},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{"name": "grpc", "port": 8887, "protocol": "TCP", "targetPort": 8887},
|
||||
{"name": "http", "port": 8888, "protocol": "TCP", "targetPort": 8888},
|
||||
],
|
||||
'selector': {'juju-app': 'pipelines-api'},
|
||||
'type': 'ClusterIP',
|
||||
"selector": {"juju-app": "pipelines-api"},
|
||||
"type": "ClusterIP",
|
||||
},
|
||||
},
|
||||
f,
|
||||
)
|
||||
f.flush()
|
||||
run('microk8s-kubectl.wrapper', 'apply', '-f', f.name)
|
||||
run("microk8s-kubectl.wrapper", "apply", "-f", f.name)
|
||||
|
||||
hostname = parse_hostname(hostname or get_hostname())
|
||||
|
||||
if kubectl_exists('service/dex-auth'):
|
||||
if kubectl_exists("service/dex-auth"):
|
||||
juju("config", "dex-auth", "public-url=%s" % hostname.geturl())
|
||||
juju('config', 'dex-auth', 'static-password=%s' % password)
|
||||
juju("config", "dex-auth", "static-password=%s" % password)
|
||||
|
||||
if kubectl_exists('service/oidc-gatekeeper'):
|
||||
if kubectl_exists("service/oidc-gatekeeper"):
|
||||
juju("config", "oidc-gatekeeper", "public-url=%s" % hostname.geturl())
|
||||
|
||||
retry_run(
|
||||
@@ -410,7 +410,7 @@ def kubeflow(bundle, channel, debug, hostname, ignore_min_mem, no_proxy, passwor
|
||||
|
||||
print("Congratulations, Kubeflow is now available.")
|
||||
|
||||
if kubectl_exists('service/istio-ingressgateway'):
|
||||
if kubectl_exists("service/istio-ingressgateway"):
|
||||
print(
|
||||
textwrap.dedent(
|
||||
"""
|
||||
@@ -444,4 +444,4 @@ def kubeflow(bundle, channel, debug, hostname, ignore_min_mem, no_proxy, passwor
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
kubeflow(prog_name='microk8s enable kubeflow', auto_envvar_prefix='KUBEFLOW')
|
||||
kubeflow(prog_name="microk8s enable kubeflow", auto_envvar_prefix="KUBEFLOW")
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
[tool.black]
|
||||
line-length = 100
|
||||
target-version = ['py35']
|
||||
skip-string-normalization = true
|
||||
|
||||
@@ -28,7 +28,7 @@ def add_token_with_expiry(token, file, ttl):
|
||||
:param ttl: How long the token should last before expiry, represented in seconds.
|
||||
"""
|
||||
|
||||
with open(file, 'a+') as fp:
|
||||
with open(file, "a+") as fp:
|
||||
if ttl != -1:
|
||||
expiry = int(round(time.time())) + ttl
|
||||
fp.write(token_with_expiry.format(token, expiry))
|
||||
@@ -36,18 +36,18 @@ def add_token_with_expiry(token, file, ttl):
|
||||
fp.write(token_without_expiry.format(token))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
|
||||
# initiate the parser with a description
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Produce a connection string for a node to join the cluster.',
|
||||
prog='microk8s add-node',
|
||||
description="Produce a connection string for a node to join the cluster.",
|
||||
prog="microk8s add-node",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--token-ttl",
|
||||
"-l",
|
||||
help="Specify how long the token is valid, before it expires. "
|
||||
"Value of \"-1\" indicates that the token is usable only once "
|
||||
'Value of "-1" indicates that the token is usable only once '
|
||||
"(i.e. after joining a node, the token becomes invalid)",
|
||||
type=int,
|
||||
default="-1",
|
||||
|
||||
@@ -31,8 +31,8 @@ from flask import Flask, jsonify, request, Response
|
||||
app = Flask(__name__)
|
||||
CLUSTER_API = "cluster/api/v1.0"
|
||||
CLUSTER_API_V2 = "cluster/api/v2.0"
|
||||
snapdata_path = os.environ.get('SNAP_DATA')
|
||||
snap_path = os.environ.get('SNAP')
|
||||
snapdata_path = os.environ.get("SNAP_DATA")
|
||||
snap_path = os.environ.get("SNAP")
|
||||
cluster_tokens_file = "{}/credentials/cluster-tokens.txt".format(snapdata_path)
|
||||
callback_token_file = "{}/credentials/callback-token.txt".format(snapdata_path)
|
||||
callback_tokens_file = "{}/credentials/callback-tokens.txt".format(snapdata_path)
|
||||
@@ -92,12 +92,12 @@ def store_callback_token(node, callback_token):
|
||||
"""
|
||||
tmp_file = "{}.tmp".format(callback_tokens_file)
|
||||
if not os.path.isfile(callback_tokens_file):
|
||||
open(callback_tokens_file, 'a+')
|
||||
open(callback_tokens_file, "a+")
|
||||
os.chmod(callback_tokens_file, 0o600)
|
||||
with open(tmp_file, "w") as backup_fp:
|
||||
os.chmod(tmp_file, 0o600)
|
||||
found = False
|
||||
with open(callback_tokens_file, 'r+') as callback_fp:
|
||||
with open(callback_tokens_file, "r+") as callback_fp:
|
||||
for _, line in enumerate(callback_fp):
|
||||
if line.startswith(node):
|
||||
backup_fp.write("{} {}\n".format(node, callback_token))
|
||||
@@ -126,7 +126,7 @@ def sign_client_cert(cert_request, token):
|
||||
" -days 365".format(csr=req_file, SNAP_DATA=snapdata_path, token=token)
|
||||
)
|
||||
|
||||
with open(req_file, 'w') as fp:
|
||||
with open(req_file, "w") as fp:
|
||||
fp.write(cert_request)
|
||||
subprocess.check_call(sign_cmd.split())
|
||||
with open(
|
||||
@@ -157,7 +157,7 @@ def get_token(name):
|
||||
with open(file) as fp:
|
||||
for _, line in enumerate(fp):
|
||||
if name in line:
|
||||
parts = line.split(',')
|
||||
parts = line.split(",")
|
||||
return parts[0].rstrip()
|
||||
return None
|
||||
|
||||
@@ -175,11 +175,11 @@ def add_kubelet_token(hostname):
|
||||
return old_token.rstrip()
|
||||
|
||||
alpha = string.ascii_letters + string.digits
|
||||
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(32))
|
||||
uid = ''.join(random.SystemRandom().choice(string.digits) for _ in range(8))
|
||||
with open(file, 'a') as fp:
|
||||
token = "".join(random.SystemRandom().choice(alpha) for _ in range(32))
|
||||
uid = "".join(random.SystemRandom().choice(string.digits) for _ in range(8))
|
||||
with open(file, "a") as fp:
|
||||
# TODO double check this format. Why is userid unique?
|
||||
line = "{},system:node:{},kubelet-{},\"system:nodes\"".format(token, hostname, uid)
|
||||
line = '{},system:node:{},kubelet-{},"system:nodes"'.format(token, hostname, uid)
|
||||
fp.write(line + os.linesep)
|
||||
return token.rstrip()
|
||||
|
||||
@@ -208,8 +208,8 @@ def get_arg(key, file):
|
||||
with open(filename) as fp:
|
||||
for _, line in enumerate(fp):
|
||||
if line.startswith(key):
|
||||
args = line.split(' ')
|
||||
args = args[-1].split('=')
|
||||
args = line.split(" ")
|
||||
args = args[-1].split("=")
|
||||
return args[-1].rstrip()
|
||||
return None
|
||||
|
||||
@@ -232,7 +232,7 @@ def is_valid(token_line, token_type=cluster_tokens_file):
|
||||
token_in_file = line.strip()
|
||||
if "|" in line:
|
||||
if not is_token_expired(line):
|
||||
token_in_file = line.strip().split('|')[0]
|
||||
token_in_file = line.strip().split("|")[0]
|
||||
if token == token_in_file:
|
||||
return True
|
||||
return False
|
||||
@@ -269,28 +269,28 @@ def get_node_ep(hostname, remote_addr):
|
||||
return remote_addr
|
||||
|
||||
|
||||
@app.route('/{}/join'.format(CLUSTER_API), methods=['POST'])
|
||||
@app.route("/{}/join".format(CLUSTER_API), methods=["POST"])
|
||||
def join_node_etcd():
|
||||
"""
|
||||
Web call to join a node to the cluster
|
||||
"""
|
||||
if request.headers['Content-Type'] == 'application/json':
|
||||
token = request.json['token']
|
||||
hostname = request.json['hostname']
|
||||
port = request.json['port']
|
||||
callback_token = request.json['callback']
|
||||
if request.headers["Content-Type"] == "application/json":
|
||||
token = request.json["token"]
|
||||
hostname = request.json["hostname"]
|
||||
port = request.json["port"]
|
||||
callback_token = request.json["callback"]
|
||||
else:
|
||||
token = request.form['token']
|
||||
hostname = request.form['hostname']
|
||||
port = request.form['port']
|
||||
callback_token = request.form['callback']
|
||||
token = request.form["token"]
|
||||
hostname = request.form["hostname"]
|
||||
port = request.form["port"]
|
||||
callback_token = request.form["callback"]
|
||||
|
||||
# Remove expired tokens
|
||||
remove_expired_token_from_file(cluster_tokens_file)
|
||||
|
||||
if not is_valid(token):
|
||||
error_msg = {"error": "Invalid token"}
|
||||
return Response(json.dumps(error_msg), mimetype='application/json', status=500)
|
||||
return Response(json.dumps(error_msg), mimetype="application/json", status=500)
|
||||
|
||||
if is_node_running_dqlite():
|
||||
msg = (
|
||||
@@ -298,7 +298,7 @@ def join_node_etcd():
|
||||
"Please retry after enabling HA on this joining node with 'microk8s enable ha-cluster'."
|
||||
)
|
||||
error_msg = {"error": msg}
|
||||
return Response(json.dumps(error_msg), mimetype='application/json', status=501)
|
||||
return Response(json.dumps(error_msg), mimetype="application/json", status=501)
|
||||
|
||||
add_token_to_certs_request(token)
|
||||
# remove token for backwards compatibility way of adding a node
|
||||
@@ -309,11 +309,11 @@ def join_node_etcd():
|
||||
store_callback_token(node_ep, callback_token)
|
||||
|
||||
ca = getCA()
|
||||
etcd_ep = get_arg('--listen-client-urls', 'etcd')
|
||||
api_port = get_arg('--secure-port', 'kube-apiserver')
|
||||
proxy_token = get_token('kube-proxy')
|
||||
etcd_ep = get_arg("--listen-client-urls", "etcd")
|
||||
api_port = get_arg("--secure-port", "kube-apiserver")
|
||||
proxy_token = get_token("kube-proxy")
|
||||
kubelet_token = add_kubelet_token(node_addr)
|
||||
service('restart', 'apiservice')
|
||||
service("restart", "apiservice")
|
||||
if node_addr != hostname:
|
||||
kubelet_args = read_kubelet_args_file(node_addr)
|
||||
else:
|
||||
@@ -330,51 +330,51 @@ def join_node_etcd():
|
||||
)
|
||||
|
||||
|
||||
@app.route('/{}/sign-cert'.format(CLUSTER_API), methods=['POST'])
|
||||
@app.route("/{}/sign-cert".format(CLUSTER_API), methods=["POST"])
|
||||
def sign_cert():
|
||||
"""
|
||||
Web call to sign a certificate
|
||||
"""
|
||||
if request.headers['Content-Type'] == 'application/json':
|
||||
token = request.json['token']
|
||||
cert_request = request.json['request']
|
||||
if request.headers["Content-Type"] == "application/json":
|
||||
token = request.json["token"]
|
||||
cert_request = request.json["request"]
|
||||
else:
|
||||
token = request.form['token']
|
||||
cert_request = request.form['request']
|
||||
token = request.form["token"]
|
||||
cert_request = request.form["request"]
|
||||
|
||||
token = token.strip()
|
||||
if not is_valid(token, certs_request_tokens_file):
|
||||
error_msg = {"error": "Invalid token"}
|
||||
return Response(json.dumps(error_msg), mimetype='application/json', status=500)
|
||||
return Response(json.dumps(error_msg), mimetype="application/json", status=500)
|
||||
|
||||
if is_node_running_dqlite():
|
||||
error_msg = {"error": "Not possible to join. This is an HA dqlite cluster."}
|
||||
return Response(json.dumps(error_msg), mimetype='application/json', status=501)
|
||||
return Response(json.dumps(error_msg), mimetype="application/json", status=501)
|
||||
|
||||
remove_token_from_file(token, certs_request_tokens_file)
|
||||
signed_cert = sign_client_cert(cert_request, token)
|
||||
return jsonify(certificate=signed_cert)
|
||||
|
||||
|
||||
@app.route('/{}/configure'.format(CLUSTER_API), methods=['POST'])
|
||||
@app.route("/{}/configure".format(CLUSTER_API), methods=["POST"])
|
||||
def configure():
|
||||
"""
|
||||
Web call to configure the node
|
||||
"""
|
||||
if request.headers['Content-Type'] == 'application/json':
|
||||
callback_token = request.json['callback']
|
||||
if request.headers["Content-Type"] == "application/json":
|
||||
callback_token = request.json["callback"]
|
||||
configuration = request.json
|
||||
else:
|
||||
callback_token = request.form['callback']
|
||||
configuration = json.loads(request.form['configuration'])
|
||||
callback_token = request.form["callback"]
|
||||
configuration = json.loads(request.form["configuration"])
|
||||
|
||||
callback_token = callback_token.strip()
|
||||
if not is_valid(callback_token, callback_token_file):
|
||||
error_msg = {"error": "Invalid token"}
|
||||
return Response(json.dumps(error_msg), mimetype='application/json', status=500)
|
||||
return Response(json.dumps(error_msg), mimetype="application/json", status=500)
|
||||
|
||||
# We expect something like this:
|
||||
'''
|
||||
"""
|
||||
{
|
||||
"callback": "xyztoken"
|
||||
"service":
|
||||
@@ -410,7 +410,7 @@ def configure():
|
||||
}
|
||||
]
|
||||
}
|
||||
'''
|
||||
"""
|
||||
|
||||
if "service" in configuration:
|
||||
for srv in configuration["service"]:
|
||||
@@ -429,7 +429,7 @@ def configure():
|
||||
if "restart" in srv and srv["restart"]:
|
||||
service_name = get_service_name(srv["name"])
|
||||
print("restarting {}".format(srv["name"]))
|
||||
service('restart', service_name)
|
||||
service("restart", service_name)
|
||||
|
||||
if "addon" in configuration:
|
||||
for addon in configuration["addon"]:
|
||||
@@ -446,7 +446,7 @@ def configure():
|
||||
)
|
||||
|
||||
resp_date = {"result": "ok"}
|
||||
resp = Response(json.dumps(resp_date), status=200, mimetype='application/json')
|
||||
resp = Response(json.dumps(resp_date), status=200, mimetype="application/json")
|
||||
return resp
|
||||
|
||||
|
||||
@@ -472,7 +472,7 @@ def get_dqlite_voters():
|
||||
).split(),
|
||||
timeout=4,
|
||||
)
|
||||
if data['Address'] in out.decode():
|
||||
if data["Address"] in out.decode():
|
||||
break
|
||||
else:
|
||||
print(".", end=" ", flush=True)
|
||||
@@ -501,15 +501,15 @@ def update_dqlite_ip(host):
|
||||
:param : the host others see for this node
|
||||
"""
|
||||
dqlite_port = get_dqlite_port()
|
||||
service('stop', 'apiserver')
|
||||
service("stop", "apiserver")
|
||||
time.sleep(10)
|
||||
|
||||
cluster_dir = "{}/var/kubernetes/backend".format(snapdata_path)
|
||||
# TODO make the port configurable
|
||||
update_data = {'Address': "{}:{}".format(host, dqlite_port)}
|
||||
with open("{}/update.yaml".format(cluster_dir), 'w') as f:
|
||||
update_data = {"Address": "{}:{}".format(host, dqlite_port)}
|
||||
with open("{}/update.yaml".format(cluster_dir), "w") as f:
|
||||
yaml.dump(update_data, f)
|
||||
service('start', 'apiserver')
|
||||
service("start", "apiserver")
|
||||
time.sleep(10)
|
||||
attempts = 12
|
||||
while True:
|
||||
@@ -551,32 +551,32 @@ def get_cluster_certs():
|
||||
return cluster_cert, cluster_key
|
||||
|
||||
|
||||
@app.route('/{}/join'.format(CLUSTER_API_V2), methods=['POST'])
|
||||
@app.route("/{}/join".format(CLUSTER_API_V2), methods=["POST"])
|
||||
def join_node_dqlite():
|
||||
"""
|
||||
Web call to join a node to the cluster
|
||||
"""
|
||||
if request.headers['Content-Type'] == 'application/json':
|
||||
token = request.json['token']
|
||||
port = request.json['port']
|
||||
if request.headers["Content-Type"] == "application/json":
|
||||
token = request.json["token"]
|
||||
port = request.json["port"]
|
||||
else:
|
||||
token = request.form['token']
|
||||
port = request.form['port']
|
||||
token = request.form["token"]
|
||||
port = request.form["port"]
|
||||
|
||||
if not is_valid(token):
|
||||
error_msg = {"error": "Invalid token"}
|
||||
return Response(json.dumps(error_msg), mimetype='application/json', status=500)
|
||||
return Response(json.dumps(error_msg), mimetype="application/json", status=500)
|
||||
|
||||
if not is_node_running_dqlite():
|
||||
error_msg = {"error": "Not possible to join. This is not an HA dqlite cluster."}
|
||||
return Response(json.dumps(error_msg), mimetype='application/json', status=501)
|
||||
return Response(json.dumps(error_msg), mimetype="application/json", status=501)
|
||||
|
||||
agent_port = get_cluster_agent_port()
|
||||
if port != agent_port:
|
||||
error_msg = {
|
||||
"error": "The port of the cluster agent has to be set to {}.".format(agent_port)
|
||||
}
|
||||
return Response(json.dumps(error_msg), mimetype='application/json', status=502)
|
||||
return Response(json.dumps(error_msg), mimetype="application/json", status=502)
|
||||
|
||||
voters = get_dqlite_voters() # type: List[str]
|
||||
# Check if we need to set dqlite with external IP
|
||||
@@ -586,7 +586,7 @@ def join_node_dqlite():
|
||||
callback_token = get_callback_token()
|
||||
remove_token_from_file(token, cluster_tokens_file)
|
||||
node_addr = request.remote_addr
|
||||
api_port = get_arg('--secure-port', 'kube-apiserver')
|
||||
api_port = get_arg("--secure-port", "kube-apiserver")
|
||||
kubelet_args = read_kubelet_args_file()
|
||||
cluster_cert, cluster_key = get_cluster_certs()
|
||||
# Make sure calico can autodetect the right interface for packet routing
|
||||
@@ -603,62 +603,62 @@ def join_node_dqlite():
|
||||
apiport=api_port,
|
||||
kubelet_args=kubelet_args,
|
||||
hostname_override=node_addr,
|
||||
admin_token=get_token('admin'),
|
||||
admin_token=get_token("admin"),
|
||||
)
|
||||
|
||||
|
||||
@app.route('/{}/upgrade'.format(CLUSTER_API), methods=['POST'])
|
||||
@app.route("/{}/upgrade".format(CLUSTER_API), methods=["POST"])
|
||||
def upgrade():
|
||||
"""
|
||||
Web call to upgrade the node
|
||||
"""
|
||||
callback_token = request.json['callback']
|
||||
callback_token = request.json["callback"]
|
||||
callback_token = callback_token.strip()
|
||||
if not is_valid(callback_token, callback_token_file):
|
||||
error_msg = {"error": "Invalid token"}
|
||||
return Response(json.dumps(error_msg), mimetype='application/json', status=500)
|
||||
return Response(json.dumps(error_msg), mimetype="application/json", status=500)
|
||||
|
||||
upgrade_request = request.json["upgrade"]
|
||||
phase = request.json["phase"]
|
||||
|
||||
# We expect something like this:
|
||||
'''
|
||||
"""
|
||||
{
|
||||
"callback": "xyztoken"
|
||||
"phase": "prepare", "commit" or "rollback"
|
||||
"upgrade": "XYZ-upgrade-name"
|
||||
}
|
||||
'''
|
||||
"""
|
||||
if phase == "prepare":
|
||||
upgrade_script = '{}/upgrade-scripts/{}/prepare-node.sh'.format(snap_path, upgrade_request)
|
||||
upgrade_script = "{}/upgrade-scripts/{}/prepare-node.sh".format(snap_path, upgrade_request)
|
||||
if not os.path.isfile(upgrade_script):
|
||||
print("Not ready to execute {}".format(upgrade_script))
|
||||
resp_data = {"result": "not ok"}
|
||||
resp = Response(json.dumps(resp_data), status=404, mimetype='application/json')
|
||||
resp = Response(json.dumps(resp_data), status=404, mimetype="application/json")
|
||||
return resp
|
||||
else:
|
||||
print("Executing {}".format(upgrade_script))
|
||||
subprocess.check_call(upgrade_script)
|
||||
resp_data = {"result": "ok"}
|
||||
resp = Response(json.dumps(resp_data), status=200, mimetype='application/json')
|
||||
resp = Response(json.dumps(resp_data), status=200, mimetype="application/json")
|
||||
return resp
|
||||
|
||||
elif phase == "commit":
|
||||
upgrade_script = '{}/upgrade-scripts/{}/commit-node.sh'.format(snap_path, upgrade_request)
|
||||
upgrade_script = "{}/upgrade-scripts/{}/commit-node.sh".format(snap_path, upgrade_request)
|
||||
print("Ready to execute {}".format(upgrade_script))
|
||||
print("Executing {}".format(upgrade_script))
|
||||
subprocess.check_call(upgrade_script)
|
||||
resp_data = {"result": "ok"}
|
||||
resp = Response(json.dumps(resp_data), status=200, mimetype='application/json')
|
||||
resp = Response(json.dumps(resp_data), status=200, mimetype="application/json")
|
||||
return resp
|
||||
|
||||
elif phase == "rollback":
|
||||
upgrade_script = '{}/upgrade-scripts/{}/rollback-node.sh'.format(snap_path, upgrade_request)
|
||||
upgrade_script = "{}/upgrade-scripts/{}/rollback-node.sh".format(snap_path, upgrade_request)
|
||||
print("Ready to execute {}".format(upgrade_script))
|
||||
print("Executing {}".format(upgrade_script))
|
||||
subprocess.check_call(upgrade_script)
|
||||
resp_data = {"result": "ok"}
|
||||
resp = Response(json.dumps(resp_data), status=200, mimetype='application/json')
|
||||
resp = Response(json.dumps(resp_data), status=200, mimetype="application/json")
|
||||
return resp
|
||||
|
||||
|
||||
@@ -670,7 +670,7 @@ def usage():
|
||||
print("-p, --port: port to listen to (default {})".format(default_port))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
server_cert = "{SNAP_DATA}/certs/server.crt".format(SNAP_DATA=snapdata_path)
|
||||
server_key = "{SNAP_DATA}/certs/server.key".format(SNAP_DATA=snapdata_path)
|
||||
try:
|
||||
|
||||
@@ -20,7 +20,7 @@ def try_set_file_permissions(file):
|
||||
|
||||
os.chmod(file, 0o660)
|
||||
try:
|
||||
shutil.chown(file, group='microk8s')
|
||||
shutil.chown(file, group="microk8s")
|
||||
except LookupError:
|
||||
# not setting the group means only the current user can access the file
|
||||
pass
|
||||
@@ -35,8 +35,8 @@ def remove_expired_token_from_file(file):
|
||||
backup_file = "{}.backup".format(file)
|
||||
# That is a critical section. We need to protect it.
|
||||
# We are safe for now because flask serves one request at a time.
|
||||
with open(backup_file, 'w') as back_fp:
|
||||
with open(file, 'r') as fp:
|
||||
with open(backup_file, "w") as back_fp:
|
||||
with open(file, "r") as fp:
|
||||
for _, line in enumerate(fp):
|
||||
if is_token_expired(line):
|
||||
continue
|
||||
@@ -56,8 +56,8 @@ def remove_token_from_file(token, file):
|
||||
backup_file = "{}.backup".format(file)
|
||||
# That is a critical section. We need to protect it.
|
||||
# We are safe for now because flask serves one request at a time.
|
||||
with open(backup_file, 'w') as back_fp:
|
||||
with open(file, 'r') as fp:
|
||||
with open(backup_file, "w") as back_fp:
|
||||
with open(file, "r") as fp:
|
||||
for _, line in enumerate(fp):
|
||||
# Not considering cluster tokens with expiry in this method.
|
||||
if "|" not in line:
|
||||
@@ -76,7 +76,7 @@ def is_token_expired(token_line):
|
||||
:returns: True if the token is expired, otherwise False
|
||||
"""
|
||||
if "|" in token_line:
|
||||
expiry = token_line.strip().split('|')[1]
|
||||
expiry = token_line.strip().split("|")[1]
|
||||
if int(round(time.time())) > int(expiry):
|
||||
return True
|
||||
|
||||
@@ -89,13 +89,13 @@ def get_callback_token():
|
||||
|
||||
:returns: the token
|
||||
"""
|
||||
snapdata_path = os.environ.get('SNAP_DATA')
|
||||
snapdata_path = os.environ.get("SNAP_DATA")
|
||||
callback_token_file = "{}/credentials/callback-token.txt".format(snapdata_path)
|
||||
if os.path.exists(callback_token_file):
|
||||
with open(callback_token_file) as fp:
|
||||
token = fp.read()
|
||||
else:
|
||||
token = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(64))
|
||||
token = "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(64))
|
||||
with open(callback_token_file, "w") as fp:
|
||||
fp.write("{}\n".format(token))
|
||||
try_set_file_permissions(callback_token_file)
|
||||
@@ -120,15 +120,15 @@ def get_dqlite_port():
|
||||
:return: the dqlite port
|
||||
"""
|
||||
# We get the dqlite port from the already existing deployment
|
||||
snapdata_path = os.environ.get('SNAP_DATA')
|
||||
snapdata_path = os.environ.get("SNAP_DATA")
|
||||
cluster_dir = "{}/var/kubernetes/backend".format(snapdata_path)
|
||||
dqlite_info = "{}/info.yaml".format(cluster_dir)
|
||||
port = 19001
|
||||
if os.path.exists(dqlite_info):
|
||||
with open(dqlite_info) as f:
|
||||
data = yaml.load(f, Loader=yaml.FullLoader)
|
||||
if 'Address' in data:
|
||||
port = data['Address'].split(':')[1]
|
||||
if "Address" in data:
|
||||
port = data["Address"].split(":")[1]
|
||||
|
||||
return port
|
||||
|
||||
@@ -140,14 +140,14 @@ def get_cluster_agent_port():
|
||||
:return: the port
|
||||
"""
|
||||
cluster_agent_port = "25000"
|
||||
snapdata_path = os.environ.get('SNAP_DATA')
|
||||
snapdata_path = os.environ.get("SNAP_DATA")
|
||||
filename = "{}/args/cluster-agent".format(snapdata_path)
|
||||
with open(filename) as fp:
|
||||
for _, line in enumerate(fp):
|
||||
if line.startswith("--bind"):
|
||||
port_parse = line.split(' ')
|
||||
port_parse = port_parse[-1].split('=')
|
||||
port_parse = port_parse[-1].split(':')
|
||||
port_parse = line.split(" ")
|
||||
port_parse = port_parse[-1].split("=")
|
||||
port_parse = port_parse[-1].split(":")
|
||||
if len(port_parse) > 1:
|
||||
cluster_agent_port = port_parse[1].rstrip()
|
||||
return cluster_agent_port
|
||||
@@ -157,7 +157,7 @@ def get_internal_ip_from_get_node(node_info):
|
||||
"""
|
||||
Retrieves the InternalIp returned by kubectl get no -o json
|
||||
"""
|
||||
for status_addresses in node_info['status']['addresses']:
|
||||
for status_addresses in node_info["status"]["addresses"]:
|
||||
if status_addresses["type"] == "InternalIP":
|
||||
return status_addresses["address"]
|
||||
|
||||
@@ -182,16 +182,16 @@ def apply_cni_manifest(timeout_insec=60):
|
||||
Apply the CNI yaml. If applying the manifest fails an exception is raised.
|
||||
:param timeout_insec: Try up to timeout seconds to apply the manifest.
|
||||
"""
|
||||
yaml = '{}/args/cni-network/cni.yaml'.format(os.environ.get('SNAP_DATA'))
|
||||
snap_path = os.environ.get('SNAP')
|
||||
cmd = '{}/microk8s-kubectl.wrapper apply -f {}'.format(snap_path, yaml)
|
||||
yaml = "{}/args/cni-network/cni.yaml".format(os.environ.get("SNAP_DATA"))
|
||||
snap_path = os.environ.get("SNAP")
|
||||
cmd = "{}/microk8s-kubectl.wrapper apply -f {}".format(snap_path, yaml)
|
||||
deadline = datetime.datetime.now() + datetime.timedelta(seconds=timeout_insec)
|
||||
while True:
|
||||
try:
|
||||
check_output(cmd.split()).strip().decode('utf8')
|
||||
check_output(cmd.split()).strip().decode("utf8")
|
||||
break
|
||||
except CalledProcessError as err:
|
||||
output = err.output.strip().decode('utf8').replace('\\n', '\n')
|
||||
output = err.output.strip().decode("utf8").replace("\\n", "\n")
|
||||
print("Applying {} failed with {}".format(yaml, output))
|
||||
if datetime.datetime.now() > deadline:
|
||||
raise
|
||||
@@ -204,9 +204,9 @@ def cni_is_patched():
|
||||
Detect if the cni.yaml manifest already has the hint for detecting nodes routing paths
|
||||
:return: True if calico knows where the rest of the nodes are.
|
||||
"""
|
||||
yaml = '{}/args/cni-network/cni.yaml'.format(os.environ.get('SNAP_DATA'))
|
||||
yaml = "{}/args/cni-network/cni.yaml".format(os.environ.get("SNAP_DATA"))
|
||||
with open(yaml) as f:
|
||||
if 'can-reach' in f.read():
|
||||
if "can-reach" in f.read():
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@@ -217,10 +217,10 @@ def patch_cni(ip):
|
||||
Patch the cni.yaml manifest with the proper hint on where the rest of the nodes are
|
||||
:param ip: The IP another k8s node has.
|
||||
"""
|
||||
cni_yaml = '{}/args/cni-network/cni.yaml'.format(os.environ.get('SNAP_DATA'))
|
||||
cni_yaml = "{}/args/cni-network/cni.yaml".format(os.environ.get("SNAP_DATA"))
|
||||
backup_file = "{}.backup".format(cni_yaml)
|
||||
with open(backup_file, 'w') as back_fp:
|
||||
with open(cni_yaml, 'r') as fp:
|
||||
with open(backup_file, "w") as back_fp:
|
||||
with open(cni_yaml, "r") as fp:
|
||||
for _, line in enumerate(fp):
|
||||
if "first-found" in line:
|
||||
line = line.replace("first-found", "can-reach={}".format(ip))
|
||||
@@ -250,10 +250,10 @@ def is_kubelite():
|
||||
"""
|
||||
Do we run kubelite?
|
||||
"""
|
||||
snap_data = os.environ.get('SNAP_DATA')
|
||||
snap_data = os.environ.get("SNAP_DATA")
|
||||
if not snap_data:
|
||||
snap_data = '/var/snap/microk8s/current/'
|
||||
kubelite_lock = '{}/var/lock/lite.lock'.format(snap_data)
|
||||
snap_data = "/var/snap/microk8s/current/"
|
||||
kubelite_lock = "{}/var/lock/lite.lock".format(snap_data)
|
||||
return os.path.exists(kubelite_lock)
|
||||
|
||||
|
||||
|
||||
@@ -17,8 +17,8 @@ from common.utils import (
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
CLUSTER_API = "cluster/api/v1.0"
|
||||
snapdata_path = os.environ.get('SNAP_DATA')
|
||||
snap_path = os.environ.get('SNAP')
|
||||
snapdata_path = os.environ.get("SNAP_DATA")
|
||||
snap_path = os.environ.get("SNAP")
|
||||
callback_tokens_file = "{}/credentials/callback-tokens.txt".format(snapdata_path)
|
||||
callback_token_file = "{}/credentials/callback-token.txt".format(snapdata_path)
|
||||
|
||||
@@ -48,7 +48,7 @@ def do_op(remote_op):
|
||||
continue
|
||||
print("Configuring node {}".format(node_ip))
|
||||
# TODO: make port configurable
|
||||
node_ep = "{}:{}".format(node_ip, '25000')
|
||||
node_ep = "{}:{}".format(node_ip, "25000")
|
||||
remote_op["callback"] = token.rstrip()
|
||||
# TODO: handle ssl verification
|
||||
res = requests.post(
|
||||
|
||||
@@ -27,8 +27,8 @@ from common.utils import (
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
CLUSTER_API = "cluster/api/v1.0"
|
||||
snapdata_path = os.environ.get('SNAP_DATA')
|
||||
snap_path = os.environ.get('SNAP')
|
||||
snapdata_path = os.environ.get("SNAP_DATA")
|
||||
snap_path = os.environ.get("SNAP")
|
||||
ca_cert_file_via_env = "${SNAP_DATA}/certs/ca.remote.crt"
|
||||
ca_cert_file = "{}/certs/ca.remote.crt".format(snapdata_path)
|
||||
callback_token_file = "{}/credentials/callback-token.txt".format(snapdata_path)
|
||||
@@ -96,9 +96,9 @@ def get_connection_info(master_ip, master_port, token, callback_token=None, clus
|
||||
|
||||
if connection_info.status_code != 200:
|
||||
message = "Error code {}.".format(connection_info.status_code) # type: str
|
||||
if connection_info.headers.get('content-type') == 'application/json':
|
||||
if connection_info.headers.get("content-type") == "application/json":
|
||||
res_data = connection_info.json() # type: Dict[str, str]
|
||||
if 'error' in res_data:
|
||||
if "error" in res_data:
|
||||
message = "{} {}".format(message, res_data["error"])
|
||||
print("Failed to join cluster. {}".format(message))
|
||||
exit(1)
|
||||
@@ -120,8 +120,8 @@ def set_arg(key, value, file):
|
||||
filename = "{}/args/{}".format(snapdata_path, file)
|
||||
filename_remote = "{}/args/{}.remote".format(snapdata_path, file)
|
||||
done = False
|
||||
with open(filename_remote, 'w+') as back_fp:
|
||||
with open(filename, 'r+') as fp:
|
||||
with open(filename_remote, "w+") as back_fp:
|
||||
with open(filename, "r+") as fp:
|
||||
for _, line in enumerate(fp):
|
||||
if line.startswith(key):
|
||||
done = True
|
||||
@@ -157,7 +157,7 @@ def get_etcd_client_cert(master_ip, master_port, token):
|
||||
subprocess.check_call(cmd_cert.split())
|
||||
with open(cer_req_file) as fp:
|
||||
csr = fp.read()
|
||||
req_data = {'token': token, 'request': csr}
|
||||
req_data = {"token": token, "request": csr}
|
||||
# TODO: enable ssl verification
|
||||
signed = requests.post(
|
||||
"https://{}:{}/{}/sign-cert".format(master_ip, master_port, CLUSTER_API),
|
||||
@@ -188,7 +188,7 @@ def update_flannel(etcd, master_ip, master_port, token):
|
||||
set_arg("--etcd-cafile", ca_cert_file_via_env, "flanneld")
|
||||
set_arg("--etcd-certfile", server_cert_file_via_env, "flanneld")
|
||||
set_arg("--etcd-keyfile", "${SNAP_DATA}/certs/server.key", "flanneld")
|
||||
service('restart', 'flanneld')
|
||||
service("restart", "flanneld")
|
||||
|
||||
|
||||
def ca_one_line(ca):
|
||||
@@ -197,7 +197,7 @@ def ca_one_line(ca):
|
||||
:param ca: the ca
|
||||
:return: one line
|
||||
"""
|
||||
return base64.b64encode(ca.encode('utf-8')).decode('utf-8')
|
||||
return base64.b64encode(ca.encode("utf-8")).decode("utf-8")
|
||||
|
||||
|
||||
def create_kubeconfig(token, ca, master_ip, api_port, filename, user):
|
||||
@@ -211,14 +211,14 @@ def create_kubeconfig(token, ca, master_ip, api_port, filename, user):
|
||||
:param filename: the name of the config file
|
||||
:param user: the user to use al login
|
||||
"""
|
||||
snap_path = os.environ.get('SNAP')
|
||||
snap_path = os.environ.get("SNAP")
|
||||
config_template = "{}/microk8s-resources/{}".format(snap_path, "kubelet.config.template")
|
||||
config = "{}/credentials/{}".format(snapdata_path, filename)
|
||||
shutil.copyfile(config, "{}.backup".format(config))
|
||||
try_set_file_permissions("{}.backup".format(config))
|
||||
ca_line = ca_one_line(ca)
|
||||
with open(config_template, 'r') as tfp:
|
||||
with open(config, 'w+') as fp:
|
||||
with open(config_template, "r") as tfp:
|
||||
with open(config, "w+") as fp:
|
||||
config_txt = tfp.read()
|
||||
config_txt = config_txt.replace("CADATA", ca_line)
|
||||
config_txt = config_txt.replace("NAME", user)
|
||||
@@ -243,7 +243,7 @@ def update_kubeproxy(token, ca, master_ip, api_port, hostname_override):
|
||||
set_arg("--master", None, "kube-proxy")
|
||||
if hostname_override:
|
||||
set_arg("--hostname-override", hostname_override, "kube-proxy")
|
||||
service('restart', 'proxy')
|
||||
service("restart", "proxy")
|
||||
|
||||
|
||||
def update_kubelet(token, ca, master_ip, api_port):
|
||||
@@ -257,7 +257,7 @@ def update_kubelet(token, ca, master_ip, api_port):
|
||||
"""
|
||||
create_kubeconfig(token, ca, master_ip, api_port, "kubelet.config", "kubelet")
|
||||
set_arg("--client-ca-file", "${SNAP_DATA}/certs/ca.remote.crt", "kubelet")
|
||||
service('restart', 'kubelet')
|
||||
service("restart", "kubelet")
|
||||
|
||||
|
||||
def store_remote_ca(ca):
|
||||
@@ -266,7 +266,7 @@ def store_remote_ca(ca):
|
||||
|
||||
:param ca: the CA
|
||||
"""
|
||||
with open(ca_cert_file, 'w+') as fp:
|
||||
with open(ca_cert_file, "w+") as fp:
|
||||
fp.write(ca)
|
||||
try_set_file_permissions(ca_cert_file)
|
||||
|
||||
@@ -276,11 +276,11 @@ def mark_cluster_node():
|
||||
Mark a node as being part of a cluster by creating a var/lock/clustered.lock
|
||||
"""
|
||||
lock_file = "{}/var/lock/clustered.lock".format(snapdata_path)
|
||||
open(lock_file, 'a').close()
|
||||
open(lock_file, "a").close()
|
||||
os.chmod(lock_file, 0o700)
|
||||
services = ['etcd', 'apiserver-kicker', 'kubelite']
|
||||
services = ["etcd", "apiserver-kicker", "kubelite"]
|
||||
for s in services:
|
||||
service('restart', s)
|
||||
service("restart", s)
|
||||
|
||||
|
||||
def generate_callback_token():
|
||||
@@ -289,7 +289,7 @@ def generate_callback_token():
|
||||
|
||||
:return: the token
|
||||
"""
|
||||
token = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(64))
|
||||
token = "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(64))
|
||||
with open(callback_token_file, "w") as fp:
|
||||
fp.write("{}\n".format(token))
|
||||
|
||||
@@ -367,7 +367,7 @@ def reset_current_dqlite_installation():
|
||||
# 3. wipe out the existing installation
|
||||
my_ep, other_ep = get_dqlite_endpoints()
|
||||
|
||||
service('stop', 'apiserver')
|
||||
service("stop", "apiserver")
|
||||
time.sleep(10)
|
||||
|
||||
delete_dqlite_node(my_ep, other_ep)
|
||||
@@ -386,10 +386,10 @@ def reset_current_dqlite_installation():
|
||||
else:
|
||||
# This node never joined a cluster. A cluster was formed around it.
|
||||
hostname = socket.gethostname() # type: str
|
||||
ip = '127.0.0.1' # type: str
|
||||
ip = "127.0.0.1" # type: str
|
||||
shutil.copy(
|
||||
'{}/microk8s-resources/certs/csr-dqlite.conf.template'.format(snap_path),
|
||||
'{}/var/tmp/csr-dqlite.conf'.format(snapdata_path),
|
||||
"{}/microk8s-resources/certs/csr-dqlite.conf.template".format(snap_path),
|
||||
"{}/var/tmp/csr-dqlite.conf".format(snapdata_path),
|
||||
)
|
||||
subprocess.check_call(
|
||||
"{}/bin/sed -i s/HOSTNAME/{}/g {}/var/tmp/csr-dqlite.conf".format(
|
||||
@@ -406,9 +406,9 @@ def reset_current_dqlite_installation():
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
subprocess.check_call(
|
||||
'{0}/usr/bin/openssl req -x509 -newkey rsa:4096 -sha256 -days 3650 -nodes '
|
||||
'-keyout {1}/var/kubernetes/backend/cluster.key '
|
||||
'-out {1}/var/kubernetes/backend/cluster.crt '
|
||||
"{0}/usr/bin/openssl req -x509 -newkey rsa:4096 -sha256 -days 3650 -nodes "
|
||||
"-keyout {1}/var/kubernetes/backend/cluster.key "
|
||||
"-out {1}/var/kubernetes/backend/cluster.crt "
|
||||
'-subj "/CN=k8s" -config {1}/var/tmp/csr-dqlite.conf -extensions v3_ext'.format(
|
||||
snap_path, snapdata_path
|
||||
).split(),
|
||||
@@ -417,11 +417,11 @@ def reset_current_dqlite_installation():
|
||||
)
|
||||
|
||||
# We reset to the default port and address
|
||||
init_data = {'Address': '127.0.0.1:19001'} # type: Dict[str, str]
|
||||
with open("{}/init.yaml".format(cluster_dir), 'w') as f:
|
||||
init_data = {"Address": "127.0.0.1:19001"} # type: Dict[str, str]
|
||||
with open("{}/init.yaml".format(cluster_dir), "w") as f:
|
||||
yaml.dump(init_data, f)
|
||||
|
||||
service('start', 'apiserver')
|
||||
service("start", "apiserver")
|
||||
|
||||
waits = 10 # type: int
|
||||
print("Waiting for node to start.", end=" ", flush=True)
|
||||
@@ -489,7 +489,7 @@ def get_dqlite_endpoints():
|
||||
if netifaces.AF_INET not in netifaces.ifaddresses(interface):
|
||||
continue
|
||||
for link in netifaces.ifaddresses(interface)[netifaces.AF_INET]:
|
||||
local_ips.append(link['addr'])
|
||||
local_ips.append(link["addr"])
|
||||
my_ep = []
|
||||
other_ep = []
|
||||
for ep in ep_addresses:
|
||||
@@ -532,7 +532,7 @@ def is_leader_without_successor():
|
||||
if netifaces.AF_INET not in netifaces.ifaddresses(interface):
|
||||
continue
|
||||
for link in netifaces.ifaddresses(interface)[netifaces.AF_INET]:
|
||||
local_ips.append(link['addr'])
|
||||
local_ips.append(link["addr"])
|
||||
|
||||
is_voter = False
|
||||
for ep in ep_addresses:
|
||||
@@ -560,8 +560,8 @@ def remove_kubelet_token(node):
|
||||
backup_file = "{}.backup".format(file)
|
||||
token = "system:node:{}".format(node)
|
||||
# That is a critical section. We need to protect it.
|
||||
with open(backup_file, 'w') as back_fp:
|
||||
with open(file, 'r') as fp:
|
||||
with open(backup_file, "w") as back_fp:
|
||||
with open(file, "r") as fp:
|
||||
for _, line in enumerate(fp):
|
||||
if token in line:
|
||||
continue
|
||||
@@ -580,13 +580,13 @@ def replace_admin_token(token):
|
||||
file = "{}/credentials/known_tokens.csv".format(snapdata_path)
|
||||
backup_file = "{}.backup".format(file)
|
||||
# That is a critical section. We need to protect it.
|
||||
with open(backup_file, 'w') as back_fp:
|
||||
with open(file, 'r') as fp:
|
||||
with open(backup_file, "w") as back_fp:
|
||||
with open(file, "r") as fp:
|
||||
for _, line in enumerate(fp):
|
||||
if "admin,admin,\"system:masters\"" in line:
|
||||
if 'admin,admin,"system:masters"' in line:
|
||||
continue
|
||||
back_fp.write("{}".format(line))
|
||||
back_fp.write("{},admin,admin,\"system:masters\"\n".format(token))
|
||||
back_fp.write('{},admin,admin,"system:masters"\n'.format(token))
|
||||
|
||||
try_set_file_permissions(backup_file)
|
||||
shutil.copyfile(backup_file, file)
|
||||
@@ -600,11 +600,11 @@ def remove_callback_token(node):
|
||||
"""
|
||||
tmp_file = "{}.tmp".format(callback_tokens_file)
|
||||
if not os.path.isfile(callback_tokens_file):
|
||||
open(callback_tokens_file, 'a+')
|
||||
open(callback_tokens_file, "a+")
|
||||
os.chmod(callback_tokens_file, 0o600)
|
||||
with open(tmp_file, "w") as backup_fp:
|
||||
os.chmod(tmp_file, 0o600)
|
||||
with open(callback_tokens_file, 'r+') as callback_fp:
|
||||
with open(callback_tokens_file, "r+") as callback_fp:
|
||||
for _, line in enumerate(callback_fp):
|
||||
parts = line.split()
|
||||
if parts[0] == node:
|
||||
@@ -645,9 +645,9 @@ def remove_dqlite_node(node, force=False):
|
||||
)
|
||||
info = json.loads(node_info.decode())
|
||||
node_address = None
|
||||
for a in info['status']['addresses']:
|
||||
if a['type'] == 'InternalIP':
|
||||
node_address = a['address']
|
||||
for a in info["status"]["addresses"]:
|
||||
if a["type"] == "InternalIP":
|
||||
node_address = a["address"]
|
||||
break
|
||||
|
||||
if not node_address:
|
||||
@@ -691,7 +691,7 @@ def get_token(name, tokens_file="known_tokens.csv"):
|
||||
with open(file) as fp:
|
||||
for line in fp:
|
||||
if name in line:
|
||||
parts = line.split(',')
|
||||
parts = line.split(",")
|
||||
return parts[0].rstrip()
|
||||
return None
|
||||
|
||||
@@ -707,7 +707,7 @@ def store_cert(filename, payload):
|
||||
backup_file_with_path = "{}.backup".format(file_with_path)
|
||||
shutil.copyfile(file_with_path, backup_file_with_path)
|
||||
try_set_file_permissions(backup_file_with_path)
|
||||
with open(file_with_path, 'w+') as fp:
|
||||
with open(file_with_path, "w+") as fp:
|
||||
fp.write(payload)
|
||||
try_set_file_permissions(file_with_path)
|
||||
|
||||
@@ -719,10 +719,10 @@ def store_cluster_certs(cluster_cert, cluster_key):
|
||||
:param cluster_cert: the cluster certificate
|
||||
:param cluster_key: the cluster certificate key
|
||||
"""
|
||||
with open(cluster_cert_file, 'w+') as fp:
|
||||
with open(cluster_cert_file, "w+") as fp:
|
||||
fp.write(cluster_cert)
|
||||
try_set_file_permissions(cluster_cert_file)
|
||||
with open(cluster_key_file, 'w+') as fp:
|
||||
with open(cluster_key_file, "w+") as fp:
|
||||
fp.write(cluster_key)
|
||||
try_set_file_permissions(cluster_key_file)
|
||||
|
||||
@@ -747,8 +747,8 @@ def create_admin_kubeconfig(ca, ha_admin_token=None):
|
||||
shutil.copyfile(config, "{}.backup".format(config))
|
||||
try_set_file_permissions("{}.backup".format(config))
|
||||
ca_line = ca_one_line(ca)
|
||||
with open(config_template, 'r') as tfp:
|
||||
with open(config, 'w+') as fp:
|
||||
with open(config_template, "r") as tfp:
|
||||
with open(config, "w+") as fp:
|
||||
for _, config_txt in enumerate(tfp):
|
||||
if config_txt.strip().startswith("username:"):
|
||||
continue
|
||||
@@ -805,7 +805,7 @@ def update_dqlite(cluster_cert, cluster_key, voters, host):
|
||||
:param voters: the dqlite voters
|
||||
:param host: the hostname others see of this node
|
||||
"""
|
||||
service('stop', 'apiserver')
|
||||
service("stop", "apiserver")
|
||||
time.sleep(10)
|
||||
shutil.rmtree(cluster_backup_dir, ignore_errors=True)
|
||||
shutil.move(cluster_dir, cluster_backup_dir)
|
||||
@@ -816,14 +816,14 @@ def update_dqlite(cluster_cert, cluster_key, voters, host):
|
||||
port = 19001
|
||||
with open("{}/info.yaml".format(cluster_backup_dir)) as f:
|
||||
data = yaml.load(f, Loader=yaml.FullLoader)
|
||||
if 'Address' in data:
|
||||
port = data['Address'].split(':')[1]
|
||||
if "Address" in data:
|
||||
port = data["Address"].split(":")[1]
|
||||
|
||||
init_data = {'Cluster': voters, 'Address': "{}:{}".format(host, port)}
|
||||
with open("{}/init.yaml".format(cluster_dir), 'w') as f:
|
||||
init_data = {"Cluster": voters, "Address": "{}:{}".format(host, port)}
|
||||
with open("{}/init.yaml".format(cluster_dir), "w") as f:
|
||||
yaml.dump(init_data, f)
|
||||
|
||||
service('start', 'apiserver')
|
||||
service("start", "apiserver")
|
||||
|
||||
waits = 10
|
||||
print("Waiting for this node to finish joining the cluster.", end=" ", flush=True)
|
||||
@@ -849,7 +849,7 @@ def update_dqlite(cluster_cert, cluster_key, voters, host):
|
||||
waits -= 1
|
||||
print(" ")
|
||||
|
||||
with open("{}//certs/csr.conf".format(snapdata_path), 'w') as f:
|
||||
with open("{}//certs/csr.conf".format(snapdata_path), "w") as f:
|
||||
f.write("changeme")
|
||||
|
||||
restart_all_services()
|
||||
@@ -869,7 +869,7 @@ def join_dqlite(connection_parts):
|
||||
print("Contacting cluster at {}".format(master_ip))
|
||||
info = get_connection_info(master_ip, master_port, token, cluster_type="dqlite")
|
||||
|
||||
hostname_override = info['hostname_override']
|
||||
hostname_override = info["hostname_override"]
|
||||
|
||||
store_cert("ca.crt", info["ca"])
|
||||
store_cert("ca.key", info["ca_key"])
|
||||
@@ -916,8 +916,8 @@ def join_etcd(connection_parts):
|
||||
info = get_connection_info(master_ip, master_port, token, callback_token=callback_token)
|
||||
store_base_kubelet_args(info["kubelet_args"])
|
||||
hostname_override = None
|
||||
if 'hostname_override' in info:
|
||||
hostname_override = info['hostname_override']
|
||||
if "hostname_override" in info:
|
||||
hostname_override = info["hostname_override"]
|
||||
store_remote_ca(info["ca"])
|
||||
update_flannel(info["etcd"], master_ip, master_port, token)
|
||||
update_kubeproxy(info["kubeproxy"], info["ca"], master_ip, info["apiport"], hostname_override)
|
||||
|
||||
@@ -15,16 +15,16 @@ kubeconfig = "--kubeconfig=" + os.path.expandvars("${SNAP_DATA}/credentials/clie
|
||||
|
||||
def get_current_arch():
|
||||
# architecture mapping
|
||||
arch_mapping = {'aarch64': 'arm64', 'x86_64': 'amd64'}
|
||||
arch_mapping = {"aarch64": "arm64", "x86_64": "amd64"}
|
||||
|
||||
return arch_mapping[platform.machine()]
|
||||
|
||||
|
||||
def snap_data() -> Path:
|
||||
try:
|
||||
return Path(os.environ['SNAP_DATA'])
|
||||
return Path(os.environ["SNAP_DATA"])
|
||||
except KeyError:
|
||||
return Path('/var/snap/microk8s/current')
|
||||
return Path("/var/snap/microk8s/current")
|
||||
|
||||
|
||||
def run(*args, die=True):
|
||||
@@ -69,7 +69,7 @@ def is_ha_enabled():
|
||||
|
||||
def get_dqlite_info():
|
||||
cluster_dir = os.path.expandvars("${SNAP_DATA}/var/kubernetes/backend")
|
||||
snap_path = os.environ.get('SNAP')
|
||||
snap_path = os.environ.get("SNAP")
|
||||
|
||||
info = []
|
||||
|
||||
@@ -79,7 +79,7 @@ def get_dqlite_info():
|
||||
waits = 10
|
||||
while waits > 0:
|
||||
try:
|
||||
with open("{}/info.yaml".format(cluster_dir), mode='r') as f:
|
||||
with open("{}/info.yaml".format(cluster_dir), mode="r") as f:
|
||||
data = yaml.load(f, Loader=yaml.FullLoader)
|
||||
out = subprocess.check_output(
|
||||
"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt "
|
||||
@@ -88,7 +88,7 @@ def get_dqlite_info():
|
||||
).split(),
|
||||
timeout=4,
|
||||
)
|
||||
if data['Address'] in out.decode():
|
||||
if data["Address"] in out.decode():
|
||||
break
|
||||
else:
|
||||
time.sleep(5)
|
||||
@@ -112,9 +112,9 @@ def get_dqlite_info():
|
||||
|
||||
|
||||
def is_cluster_locked():
|
||||
if (snap_data() / 'var/lock/clustered.lock').exists():
|
||||
click.echo('This MicroK8s deployment is acting as a node in a cluster.')
|
||||
click.echo('Please use the master node.')
|
||||
if (snap_data() / "var/lock/clustered.lock").exists():
|
||||
click.echo("This MicroK8s deployment is acting as a node in a cluster.")
|
||||
click.echo("Please use the master node.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@@ -156,8 +156,8 @@ def exit_if_no_permission():
|
||||
|
||||
|
||||
def ensure_started():
|
||||
if (snap_data() / 'var/lock/stopped.lock').exists():
|
||||
click.echo('microk8s is not running, try microk8s start', err=True)
|
||||
if (snap_data() / "var/lock/stopped.lock").exists():
|
||||
click.echo("microk8s is not running, try microk8s start", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@@ -177,7 +177,7 @@ def kubectl_get_clusterroles():
|
||||
def get_available_addons(arch):
|
||||
addon_dataset = os.path.expandvars("${SNAP}/addon-lists.yaml")
|
||||
available = []
|
||||
with open(addon_dataset, 'r') as file:
|
||||
with open(addon_dataset, "r") as file:
|
||||
# The FullLoader parameter handles the conversion from YAML
|
||||
# scalar values to Python the dictionary format
|
||||
addons = yaml.load(file, Loader=yaml.FullLoader)
|
||||
@@ -185,7 +185,7 @@ def get_available_addons(arch):
|
||||
if arch in addon["supported_architectures"]:
|
||||
available.append(addon)
|
||||
|
||||
available = sorted(available, key=lambda k: k['name'])
|
||||
available = sorted(available, key=lambda k: k["name"])
|
||||
return available
|
||||
|
||||
|
||||
@@ -230,7 +230,7 @@ def check_help_flag(addons: list) -> bool:
|
||||
calls to print help text and print out a generic message to that effect.
|
||||
"""
|
||||
addon = addons[0]
|
||||
if any(arg in addons for arg in ('-h', '--help')) and addon != 'kubeflow':
|
||||
if any(arg in addons for arg in ("-h", "--help")) and addon != "kubeflow":
|
||||
print("Addon %s does not yet have a help message." % addon)
|
||||
print("For more information about it, visit https://microk8s.io/docs/addons")
|
||||
return True
|
||||
@@ -244,18 +244,18 @@ def xable(action: str, addons: list, xabled_addons: list):
|
||||
the script names.
|
||||
"""
|
||||
actions = Path(__file__).absolute().parent / "../../../actions"
|
||||
existing_addons = {sh.with_suffix('').name[7:] for sh in actions.glob('enable.*.sh')}
|
||||
existing_addons = {sh.with_suffix("").name[7:] for sh in actions.glob("enable.*.sh")}
|
||||
|
||||
# Backwards compatibility with enabling multiple addons at once, e.g.
|
||||
# `microk8s.enable foo bar:"baz"`
|
||||
if all(a.split(':')[0] in existing_addons for a in addons) and len(addons) > 1:
|
||||
if all(a.split(":")[0] in existing_addons for a in addons) and len(addons) > 1:
|
||||
for addon in addons:
|
||||
if addon in xabled_addons and addon != 'kubeflow':
|
||||
if addon in xabled_addons and addon != "kubeflow":
|
||||
click.echo("Addon %s is already %sd." % (addon, action))
|
||||
else:
|
||||
addon, *args = addon.split(':')
|
||||
addon, *args = addon.split(":")
|
||||
wait_for_ready(timeout=30)
|
||||
p = subprocess.run([str(actions / ('%s.%s.sh' % (action, addon)))] + args)
|
||||
p = subprocess.run([str(actions / ("%s.%s.sh" % (action, addon)))] + args)
|
||||
if p.returncode:
|
||||
sys.exit(p.returncode)
|
||||
wait_for_ready(timeout=30)
|
||||
@@ -263,9 +263,9 @@ def xable(action: str, addons: list, xabled_addons: list):
|
||||
# The new way of xabling addons, that allows for unix-style argument passing,
|
||||
# such as `microk8s.enable foo --bar`.
|
||||
else:
|
||||
addon, *args = addons[0].split(':')
|
||||
addon, *args = addons[0].split(":")
|
||||
|
||||
if addon in xabled_addons and addon != 'kubeflow':
|
||||
if addon in xabled_addons and addon != "kubeflow":
|
||||
click.echo("Addon %s is already %sd." % (addon, action))
|
||||
sys.exit(0)
|
||||
|
||||
@@ -285,7 +285,7 @@ def xable(action: str, addons: list, xabled_addons: list):
|
||||
sys.exit(1)
|
||||
|
||||
wait_for_ready(timeout=30)
|
||||
script = [str(actions / ('%s.%s.sh' % (action, addon)))]
|
||||
script = [str(actions / ("%s.%s.sh" % (action, addon)))]
|
||||
if args:
|
||||
p = subprocess.run(script + args)
|
||||
else:
|
||||
|
||||
@@ -42,7 +42,7 @@ def run_command(command):
|
||||
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
|
||||
while True:
|
||||
output = process.stdout.readline()
|
||||
if (not output or output == '') and process.poll() is not None:
|
||||
if (not output or output == "") and process.poll() is not None:
|
||||
break
|
||||
if output:
|
||||
print(output.decode().strip())
|
||||
@@ -56,18 +56,18 @@ def backup(fname=None, debug=False):
|
||||
:param fname_tar: the tar file
|
||||
:param debug: show debug output
|
||||
"""
|
||||
snap_path = os.environ.get('SNAP')
|
||||
snap_path = os.environ.get("SNAP")
|
||||
# snap_path = '/snap/microk8s/current'
|
||||
# snapdata_path = '/var/snap/microk8s/current'
|
||||
|
||||
if not fname:
|
||||
fname = generate_backup_name()
|
||||
if fname.endswith('.tar.gz'):
|
||||
if fname.endswith(".tar.gz"):
|
||||
fname = fname[:-7]
|
||||
fname_tar = '{}.tar.gz'.format(fname)
|
||||
fname_tar = "{}.tar.gz".format(fname)
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
backup_cmd = '{}/bin/migrator --mode backup-dqlite --db-dir {}'.format(
|
||||
backup_cmd = "{}/bin/migrator --mode backup-dqlite --db-dir {}".format(
|
||||
snap_path, "{}/{}".format(tmpdirname, fname)
|
||||
)
|
||||
if debug:
|
||||
@@ -95,17 +95,17 @@ def restore(fname_tar, debug=False):
|
||||
:param fname_tar: the tar file
|
||||
:param debug: show debug output
|
||||
"""
|
||||
snap_path = os.environ.get('SNAP')
|
||||
snap_path = os.environ.get("SNAP")
|
||||
# snap_path = '/snap/microk8s/current'
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
with tarfile.open(fname_tar, "r:gz") as tar:
|
||||
tar.extractall(path=tmpdirname)
|
||||
if fname_tar.endswith('.tar.gz'):
|
||||
if fname_tar.endswith(".tar.gz"):
|
||||
fname = fname_tar[:-7]
|
||||
else:
|
||||
fname = fname_tar
|
||||
fname = os.path.basename(fname)
|
||||
restore_cmd = '{}/bin/migrator --mode restore-to-dqlite --db-dir {}'.format(
|
||||
restore_cmd = "{}/bin/migrator --mode restore-to-dqlite --db-dir {}".format(
|
||||
snap_path, "{}/{}".format(tmpdirname, fname)
|
||||
)
|
||||
if debug:
|
||||
@@ -120,7 +120,7 @@ def restore(fname_tar, debug=False):
|
||||
exit(4)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
exit_if_no_permission()
|
||||
is_cluster_locked()
|
||||
|
||||
@@ -130,22 +130,22 @@ if __name__ == '__main__':
|
||||
|
||||
# initiate the parser with a description
|
||||
parser = argparse.ArgumentParser(
|
||||
description="backup and restore the Kubernetes datastore.", prog='microk8s dbctl'
|
||||
description="backup and restore the Kubernetes datastore.", prog="microk8s dbctl"
|
||||
)
|
||||
parser.add_argument('--debug', action='store_true', help='print debug output')
|
||||
commands = parser.add_subparsers(title='commands', help='backup and restore operations')
|
||||
parser.add_argument("--debug", action="store_true", help="print debug output")
|
||||
commands = parser.add_subparsers(title="commands", help="backup and restore operations")
|
||||
restore_parser = commands.add_parser("restore")
|
||||
restore_parser.add_argument('backup-file', help='name of file with the backup')
|
||||
restore_parser.add_argument("backup-file", help="name of file with the backup")
|
||||
backup_parser = commands.add_parser("backup")
|
||||
backup_parser.add_argument('-o', metavar='backup-file', help='output filename')
|
||||
backup_parser.add_argument("-o", metavar="backup-file", help="output filename")
|
||||
args = parser.parse_args()
|
||||
|
||||
if 'backup-file' in args:
|
||||
fname = vars(args)['backup-file']
|
||||
if "backup-file" in args:
|
||||
fname = vars(args)["backup-file"]
|
||||
print("Restoring from {}".format(fname))
|
||||
restore(fname, args.debug)
|
||||
elif 'o' in args:
|
||||
elif "o" in args:
|
||||
print("Backing up the datastore")
|
||||
backup(vars(args)['o'], args.debug)
|
||||
backup(vars(args)["o"], args.debug)
|
||||
else:
|
||||
parser.print_help()
|
||||
|
||||
@@ -13,8 +13,8 @@ from common.utils import (
|
||||
from status import get_available_addons, get_current_arch, get_status
|
||||
|
||||
|
||||
@click.command(context_settings={'ignore_unknown_options': True})
|
||||
@click.argument('addons', nargs=-1, required=True)
|
||||
@click.command(context_settings={"ignore_unknown_options": True})
|
||||
@click.argument("addons", nargs=-1, required=True)
|
||||
def disable(addons):
|
||||
"""Disables one or more MicroK8s addons.
|
||||
|
||||
@@ -34,10 +34,10 @@ def disable(addons):
|
||||
wait_for_ready(timeout=30)
|
||||
|
||||
_, disabled_addons = get_status(get_available_addons(get_current_arch()), True)
|
||||
disabled_addons = {a['name'] for a in disabled_addons}
|
||||
disabled_addons = {a["name"] for a in disabled_addons}
|
||||
|
||||
xable('disable', addons, disabled_addons)
|
||||
xable("disable", addons, disabled_addons)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
disable(prog_name='microk8s disable')
|
||||
if __name__ == "__main__":
|
||||
disable(prog_name="microk8s disable")
|
||||
|
||||
@@ -13,8 +13,8 @@ from common.utils import (
|
||||
from status import get_available_addons, get_current_arch, get_status
|
||||
|
||||
|
||||
@click.command(context_settings={'ignore_unknown_options': True})
|
||||
@click.argument('addons', nargs=-1, required=True)
|
||||
@click.command(context_settings={"ignore_unknown_options": True})
|
||||
@click.argument("addons", nargs=-1, required=True)
|
||||
def enable(addons):
|
||||
"""Enables a MicroK8s addon.
|
||||
|
||||
@@ -34,10 +34,10 @@ def enable(addons):
|
||||
wait_for_ready(timeout=30)
|
||||
|
||||
enabled_addons, _ = get_status(get_available_addons(get_current_arch()), True)
|
||||
enabled_addons = {a['name'] for a in enabled_addons}
|
||||
enabled_addons = {a["name"] for a in enabled_addons}
|
||||
|
||||
xable('enable', addons, enabled_addons)
|
||||
xable("enable", addons, enabled_addons)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
enable(prog_name='microk8s enable')
|
||||
if __name__ == "__main__":
|
||||
enable(prog_name="microk8s enable")
|
||||
|
||||
@@ -4,9 +4,9 @@ import subprocess
|
||||
from dateutil.parser import parse
|
||||
import datetime
|
||||
|
||||
snapdata_path = os.environ.get('SNAP_DATA')
|
||||
snap_path = os.environ.get('SNAP')
|
||||
backup_dir = '{}/var/log/ca-backup/'.format(snapdata_path)
|
||||
snapdata_path = os.environ.get("SNAP_DATA")
|
||||
snap_path = os.environ.get("SNAP")
|
||||
backup_dir = "{}/var/log/ca-backup/".format(snapdata_path)
|
||||
|
||||
|
||||
def exit_if_no_root():
|
||||
@@ -15,7 +15,7 @@ def exit_if_no_root():
|
||||
"""
|
||||
if not os.geteuid() == 0:
|
||||
click.echo(
|
||||
'Elevated permissions is needed for this operation. Please run this command with sudo.'
|
||||
"Elevated permissions is needed for this operation. Please run this command with sudo."
|
||||
)
|
||||
exit(50)
|
||||
|
||||
@@ -29,12 +29,12 @@ def check_certificate():
|
||||
)
|
||||
try:
|
||||
cert_expire = subprocess.check_output(cmd.split())
|
||||
cert_expire_date = cert_expire.decode().split('=')
|
||||
cert_expire_date = cert_expire.decode().split("=")
|
||||
date = parse(cert_expire_date[1])
|
||||
diff = date - datetime.datetime.now(datetime.timezone.utc)
|
||||
click.echo('The CA certificate will expire in {} days.'.format(diff.days))
|
||||
click.echo("The CA certificate will expire in {} days.".format(diff.days))
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo('Failed to get CA info. {}'.format(e))
|
||||
click.echo("Failed to get CA info. {}".format(e))
|
||||
exit(4)
|
||||
|
||||
|
||||
@@ -43,25 +43,25 @@ def undo_refresh():
|
||||
Revert last certificate operation
|
||||
"""
|
||||
if not os.path.exists(backup_dir):
|
||||
click.echo('No previous backup found')
|
||||
click.echo("No previous backup found")
|
||||
exit(1)
|
||||
|
||||
try:
|
||||
subprocess.check_call('cp -r {}/certs {}/'.format(backup_dir, snapdata_path).split())
|
||||
subprocess.check_call('cp -r {}/credentials {}'.format(backup_dir, snapdata_path).split())
|
||||
subprocess.check_call("cp -r {}/certs {}/".format(backup_dir, snapdata_path).split())
|
||||
subprocess.check_call("cp -r {}/credentials {}".format(backup_dir, snapdata_path).split())
|
||||
except subprocess.CalledProcessError:
|
||||
click.echo('Failed to recover certificates')
|
||||
click.echo("Failed to recover certificates")
|
||||
exit(4)
|
||||
|
||||
try:
|
||||
subprocess.check_call('{}/microk8s-stop.wrapper'.format(snap_path).split())
|
||||
subprocess.check_call("{}/microk8s-stop.wrapper".format(snap_path).split())
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
|
||||
try:
|
||||
subprocess.check_call('{}/microk8s-start.wrapper'.format(snap_path).split())
|
||||
subprocess.check_call("{}/microk8s-start.wrapper".format(snap_path).split())
|
||||
except subprocess.CalledProcessError:
|
||||
click.echo('Failed to start MicroK8s after reverting the certificates')
|
||||
click.echo("Failed to start MicroK8s after reverting the certificates")
|
||||
exit(4)
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ def update_configs():
|
||||
Update all kubeconfig files used by the client and the services
|
||||
"""
|
||||
p = subprocess.Popen(
|
||||
['bash', '-c', '. {}/actions/common/utils.sh; update_configs'.format(snap_path)]
|
||||
["bash", "-c", ". {}/actions/common/utils.sh; update_configs".format(snap_path)]
|
||||
)
|
||||
p.communicate()
|
||||
|
||||
@@ -80,11 +80,11 @@ def take_backup():
|
||||
Backup the current certificates and credentials
|
||||
"""
|
||||
try:
|
||||
subprocess.check_call('mkdir -p {}'.format(backup_dir).split())
|
||||
subprocess.check_call('cp -r {}/certs {}'.format(snapdata_path, backup_dir).split())
|
||||
subprocess.check_call('cp -r {}/credentials {}'.format(snapdata_path, backup_dir).split())
|
||||
subprocess.check_call("mkdir -p {}".format(backup_dir).split())
|
||||
subprocess.check_call("cp -r {}/certs {}".format(snapdata_path, backup_dir).split())
|
||||
subprocess.check_call("cp -r {}/credentials {}".format(snapdata_path, backup_dir).split())
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo('Failed to backup the current CA. {}'.format(e))
|
||||
click.echo("Failed to backup the current CA. {}".format(e))
|
||||
exit(10)
|
||||
|
||||
|
||||
@@ -92,14 +92,14 @@ def produce_certs():
|
||||
"""
|
||||
Produce the CA and the rest of the needed certificates (eg service, front-proxy)
|
||||
"""
|
||||
subprocess.check_call('rm -rf {}/certs/ca.crt'.format(snapdata_path).split())
|
||||
subprocess.check_call('rm -rf {}/certs/front-proxy-ca.crt'.format(snapdata_path).split())
|
||||
subprocess.check_call('rm -rf {}/certs/csr.conf'.format(snapdata_path).split())
|
||||
subprocess.check_call("rm -rf {}/certs/ca.crt".format(snapdata_path).split())
|
||||
subprocess.check_call("rm -rf {}/certs/front-proxy-ca.crt".format(snapdata_path).split())
|
||||
subprocess.check_call("rm -rf {}/certs/csr.conf".format(snapdata_path).split())
|
||||
p = subprocess.Popen(
|
||||
['bash', '-c', '. {}/actions/common/utils.sh; produce_certs'.format(snap_path)]
|
||||
["bash", "-c", ". {}/actions/common/utils.sh; produce_certs".format(snap_path)]
|
||||
)
|
||||
p.communicate()
|
||||
subprocess.check_call('rm -rf .slr'.split())
|
||||
subprocess.check_call("rm -rf .slr".split())
|
||||
|
||||
|
||||
def refresh_ca():
|
||||
@@ -130,10 +130,10 @@ def install_certs(ca_dir):
|
||||
Recreate service certificate and front proxy using a user provided CA
|
||||
:param ca_dir: path to the ca.crt and ca.key
|
||||
"""
|
||||
subprocess.check_call('cp {}/ca.crt {}/certs/'.format(ca_dir, snapdata_path).split())
|
||||
subprocess.check_call('cp {}/ca.key {}/certs/'.format(ca_dir, snapdata_path).split())
|
||||
subprocess.check_call("cp {}/ca.crt {}/certs/".format(ca_dir, snapdata_path).split())
|
||||
subprocess.check_call("cp {}/ca.key {}/certs/".format(ca_dir, snapdata_path).split())
|
||||
p = subprocess.Popen(
|
||||
['bash', '-c', '. {}/actions/common/utils.sh; gen_server_cert'.format(snap_path)]
|
||||
["bash", "-c", ". {}/actions/common/utils.sh; gen_server_cert".format(snap_path)]
|
||||
)
|
||||
p.communicate()
|
||||
|
||||
@@ -143,28 +143,28 @@ def validate_certificates(ca_dir):
|
||||
Perform some basic testing of the user provided CA
|
||||
:param ca_dir: path to the ca.crt and ca.key
|
||||
"""
|
||||
if not os.path.isfile('{}/ca.crt'.format(ca_dir)) or not os.path.isfile(
|
||||
'{}/ca.key'.format(ca_dir)
|
||||
if not os.path.isfile("{}/ca.crt".format(ca_dir)) or not os.path.isfile(
|
||||
"{}/ca.key".format(ca_dir)
|
||||
):
|
||||
click.echo('Could not find ca.crt and ca.key files in {}'.format(ca_dir))
|
||||
click.echo("Could not find ca.crt and ca.key files in {}".format(ca_dir))
|
||||
exit(30)
|
||||
|
||||
try:
|
||||
cmd = '{}/usr/bin/openssl rsa -in {}/ca.key -check -noout -out /dev/null'.format(
|
||||
cmd = "{}/usr/bin/openssl rsa -in {}/ca.key -check -noout -out /dev/null".format(
|
||||
snap_path, ca_dir
|
||||
)
|
||||
subprocess.check_call(cmd.split(), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo('CA private key is invalid. {}'.format(e))
|
||||
click.echo("CA private key is invalid. {}".format(e))
|
||||
exit(31)
|
||||
|
||||
try:
|
||||
cmd = '{}/usr/bin/openssl x509 -in {}/ca.crt -text -noout -out /dev/null'.format(
|
||||
cmd = "{}/usr/bin/openssl x509 -in {}/ca.crt -text -noout -out /dev/null".format(
|
||||
snap_path, ca_dir
|
||||
)
|
||||
subprocess.check_call(cmd.split(), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo('CA certificate is invalid. {}'.format(e))
|
||||
click.echo("CA certificate is invalid. {}".format(e))
|
||||
exit(32)
|
||||
|
||||
|
||||
@@ -195,21 +195,21 @@ def install_ca(ca_dir):
|
||||
|
||||
|
||||
@click.command(
|
||||
name='refresh-certs',
|
||||
help='Replace the CA certificates with the ca.crt and ca.key found in CA_DIR.\n'
|
||||
'Omit the CA_DIR to auto-generate a new CA.',
|
||||
name="refresh-certs",
|
||||
help="Replace the CA certificates with the ca.crt and ca.key found in CA_DIR.\n"
|
||||
"Omit the CA_DIR to auto-generate a new CA.",
|
||||
)
|
||||
@click.argument('ca_dir', required=False, default=None, type=click.Path(exists=True))
|
||||
@click.option('-u', '--undo', is_flag=True, default=False, help='Revert the last refresh performed')
|
||||
@click.argument("ca_dir", required=False, default=None, type=click.Path(exists=True))
|
||||
@click.option("-u", "--undo", is_flag=True, default=False, help="Revert the last refresh performed")
|
||||
@click.option(
|
||||
'-c',
|
||||
'--check',
|
||||
"-c",
|
||||
"--check",
|
||||
is_flag=True,
|
||||
default=False,
|
||||
help='Check the expiration time of the installed CA',
|
||||
help="Check the expiration time of the installed CA",
|
||||
)
|
||||
@click.option(
|
||||
'--help',
|
||||
"--help",
|
||||
is_flag=True,
|
||||
default=False,
|
||||
)
|
||||
@@ -256,5 +256,5 @@ Options:
|
||||
click.echo(msg)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
refresh_certs()
|
||||
|
||||
@@ -74,11 +74,11 @@ def print_pretty(isReady, enabled_addons, disabled_addons):
|
||||
|
||||
print("addons:")
|
||||
if enabled_addons and len(enabled_addons) > 0:
|
||||
print('{:>2}{}'.format("", "enabled:"))
|
||||
print("{:>2}{}".format("", "enabled:"))
|
||||
for enabled in enabled_addons:
|
||||
print(console_formatter.format("", enabled["name"], enabled["description"]))
|
||||
if disabled_addons and len(disabled_addons) > 0:
|
||||
print('{:>2}{}'.format("", "disabled:"))
|
||||
print("{:>2}{}".format("", "disabled:"))
|
||||
for disabled in disabled_addons:
|
||||
print(console_formatter.format("", disabled["name"], disabled["description"]))
|
||||
else:
|
||||
@@ -159,7 +159,7 @@ def get_status(available_addons, isReady):
|
||||
kube_output = kube_output + cluster_output
|
||||
for addon in available_addons:
|
||||
found = False
|
||||
for row in kube_output.split('\n'):
|
||||
for row in kube_output.split("\n"):
|
||||
if is_enabled(addon["check_status"], row):
|
||||
enabled.append(addon)
|
||||
found = True
|
||||
@@ -181,14 +181,14 @@ def ha_cluster_formed(info):
|
||||
return ha_formed
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
exit_if_no_permission()
|
||||
exit_if_stopped()
|
||||
is_cluster_locked()
|
||||
|
||||
# initiate the parser with a description
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Microk8s cluster status check.', prog='microk8s status'
|
||||
description="Microk8s cluster status check.", prog="microk8s status"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--format",
|
||||
@@ -197,7 +197,7 @@ if __name__ == '__main__':
|
||||
choices={"pretty", "yaml", "short"},
|
||||
)
|
||||
parser.add_argument(
|
||||
"-w", "--wait-ready", action='store_true', help="wait until the cluster is in ready state"
|
||||
"-w", "--wait-ready", action="store_true", help="wait until the cluster is in ready state"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
@@ -208,7 +208,7 @@ if __name__ == '__main__':
|
||||
)
|
||||
parser.add_argument("-a", "--addon", help="check the status of an addon.", default="all")
|
||||
parser.add_argument(
|
||||
"--yaml", action='store_true', help="DEPRECATED, use '--format yaml' instead"
|
||||
"--yaml", action="store_true", help="DEPRECATED, use '--format yaml' instead"
|
||||
)
|
||||
|
||||
# read arguments from the command line
|
||||
|
||||
@@ -9,8 +9,8 @@ from common.utils import exit_if_no_permission, is_cluster_locked
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
CLUSTER_API = "cluster/api/v1.0"
|
||||
snapdata_path = os.environ.get('SNAP_DATA')
|
||||
snap_path = os.environ.get('SNAP')
|
||||
snapdata_path = os.environ.get("SNAP_DATA")
|
||||
snap_path = os.environ.get("SNAP")
|
||||
|
||||
|
||||
def upgrade_master(upgrade, phase):
|
||||
@@ -21,7 +21,7 @@ def upgrade_master(upgrade, phase):
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
upgrade_script = '{}/upgrade-scripts/{}/{}-master.sh'.format(snap_path, upgrade, phase)
|
||||
upgrade_script = "{}/upgrade-scripts/{}/{}-master.sh".format(snap_path, upgrade, phase)
|
||||
if os.path.isfile(upgrade_script):
|
||||
print("Running {}-upgrade script".format(phase))
|
||||
out = subprocess.check_output(upgrade_script)
|
||||
@@ -41,7 +41,7 @@ def node_upgrade(upgrade, phase, node_ep, token):
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
upgrade_script = '{}/upgrade-scripts/{}/{}-node.sh'.format(snap_path, upgrade, phase)
|
||||
upgrade_script = "{}/upgrade-scripts/{}/{}-node.sh".format(snap_path, upgrade, phase)
|
||||
if os.path.isfile(upgrade_script):
|
||||
remote_op = {"callback": token, "phase": phase, "upgrade": upgrade}
|
||||
# TODO: handle ssl verification
|
||||
@@ -160,7 +160,7 @@ def list_upgrades():
|
||||
"""
|
||||
List all available upgrades
|
||||
"""
|
||||
upgrades_dir = '{}/upgrade-scripts/'.format(snap_path)
|
||||
upgrades_dir = "{}/upgrade-scripts/".format(snap_path)
|
||||
upgrades = [
|
||||
dI for dI in os.listdir(upgrades_dir) if os.path.isdir(os.path.join(upgrades_dir, dI))
|
||||
]
|
||||
@@ -168,20 +168,20 @@ def list_upgrades():
|
||||
print(u)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
exit_if_no_permission()
|
||||
is_cluster_locked()
|
||||
|
||||
# initiate the parser with a description
|
||||
parser = argparse.ArgumentParser(description='MicroK8s supervised upgrades.', prog='upgrade')
|
||||
parser = argparse.ArgumentParser(description="MicroK8s supervised upgrades.", prog="upgrade")
|
||||
parser.add_argument(
|
||||
"-l", "--list", help="list available upgrades", nargs='?', const=True, type=bool
|
||||
"-l", "--list", help="list available upgrades", nargs="?", const=True, type=bool
|
||||
)
|
||||
parser.add_argument(
|
||||
"-r", "--run", help="run a specific upgrade script", nargs='?', type=str, default=None
|
||||
"-r", "--run", help="run a specific upgrade script", nargs="?", type=str, default=None
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u", "--undo", help="rollback a specific upgrade", nargs='?', type=str, default=None
|
||||
"-u", "--undo", help="rollback a specific upgrade", nargs="?", type=str, default=None
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
@@ -53,21 +53,21 @@ class TestAddons(object):
|
||||
sh.microk8s.enable.foo()
|
||||
|
||||
def test_help_text(self):
|
||||
status = yaml.load(sh.microk8s.status(format='yaml').stdout)
|
||||
expected = {a['name']: 'disabled' for a in status['addons']}
|
||||
expected['ha-cluster'] = 'enabled'
|
||||
status = yaml.load(sh.microk8s.status(format="yaml").stdout)
|
||||
expected = {a["name"]: "disabled" for a in status["addons"]}
|
||||
expected["ha-cluster"] = "enabled"
|
||||
|
||||
assert expected == {a['name']: a['status'] for a in status['addons']}
|
||||
assert expected == {a["name"]: a["status"] for a in status["addons"]}
|
||||
|
||||
for addon in status['addons']:
|
||||
sh.microk8s.enable(addon['name'], '--', '--help')
|
||||
for addon in status["addons"]:
|
||||
sh.microk8s.enable(addon["name"], "--", "--help")
|
||||
|
||||
assert expected == {a['name']: a['status'] for a in status['addons']}
|
||||
assert expected == {a["name"]: a["status"] for a in status["addons"]}
|
||||
|
||||
for addon in status['addons']:
|
||||
sh.microk8s.disable(addon['name'], '--', '--help')
|
||||
for addon in status["addons"]:
|
||||
sh.microk8s.disable(addon["name"], "--", "--help")
|
||||
|
||||
assert expected == {a['name']: a['status'] for a in status['addons']}
|
||||
assert expected == {a["name"]: a["status"] for a in status["addons"]}
|
||||
|
||||
def test_basic(self):
|
||||
"""
|
||||
@@ -111,19 +111,19 @@ class TestAddons(object):
|
||||
microk8s_disable("dashboard")
|
||||
print("Disabling storage")
|
||||
microk8s_disable("storage:destroy-storage")
|
||||
'''
|
||||
"""
|
||||
We would disable DNS here but this freezes any terminating pods.
|
||||
We let microk8s reset to do the cleanup.
|
||||
print("Disabling DNS")
|
||||
microk8s_disable("dns")
|
||||
'''
|
||||
"""
|
||||
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get('UNDER_TIME_PRESSURE') == 'True',
|
||||
os.environ.get("UNDER_TIME_PRESSURE") == "True",
|
||||
reason="Skipping GPU tests as we are under time pressure",
|
||||
)
|
||||
@pytest.mark.skipif(
|
||||
platform.machine() != 'x86_64', reason="GPU tests are only relevant in x86 architectures"
|
||||
platform.machine() != "x86_64", reason="GPU tests are only relevant in x86 architectures"
|
||||
)
|
||||
def test_gpu(self):
|
||||
"""
|
||||
@@ -142,10 +142,10 @@ class TestAddons(object):
|
||||
microk8s_disable("gpu")
|
||||
|
||||
@pytest.mark.skipif(
|
||||
platform.machine() != 'x86_64', reason="Istio tests are only relevant in x86 architectures"
|
||||
platform.machine() != "x86_64", reason="Istio tests are only relevant in x86 architectures"
|
||||
)
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get('UNDER_TIME_PRESSURE') == 'True',
|
||||
os.environ.get("UNDER_TIME_PRESSURE") == "True",
|
||||
reason="Skipping istio and knative tests as we are under time pressure",
|
||||
)
|
||||
def test_knative_istio(self):
|
||||
@@ -167,11 +167,11 @@ class TestAddons(object):
|
||||
microk8s_disable("istio")
|
||||
|
||||
@pytest.mark.skipif(
|
||||
platform.machine() != 'x86_64',
|
||||
platform.machine() != "x86_64",
|
||||
reason="Fluentd, prometheus, jaeger tests are only relevant in x86 architectures",
|
||||
)
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get('UNDER_TIME_PRESSURE') == 'True',
|
||||
os.environ.get("UNDER_TIME_PRESSURE") == "True",
|
||||
reason="Skipping jaeger, prometheus and fluentd tests as we are under time pressure",
|
||||
)
|
||||
def test_monitoring_addons(self):
|
||||
@@ -194,11 +194,11 @@ class TestAddons(object):
|
||||
microk8s_disable("fluentd")
|
||||
|
||||
@pytest.mark.skipif(
|
||||
platform.machine() != 'x86_64',
|
||||
platform.machine() != "x86_64",
|
||||
reason="Prometheus is only relevant in x86 architectures",
|
||||
)
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get('SKIP_PROMETHEUS') == 'True',
|
||||
os.environ.get("SKIP_PROMETHEUS") == "True",
|
||||
reason="Skipping prometheus if it crash loops on lxd",
|
||||
)
|
||||
def test_prometheus(self):
|
||||
@@ -215,10 +215,10 @@ class TestAddons(object):
|
||||
microk8s_reset()
|
||||
|
||||
@pytest.mark.skipif(
|
||||
platform.machine() != 'x86_64', reason="Cilium tests are only relevant in x86 architectures"
|
||||
platform.machine() != "x86_64", reason="Cilium tests are only relevant in x86 architectures"
|
||||
)
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get('UNDER_TIME_PRESSURE') == 'True',
|
||||
os.environ.get("UNDER_TIME_PRESSURE") == "True",
|
||||
reason="Skipping cilium tests as we are under time pressure",
|
||||
)
|
||||
def test_cilium(self):
|
||||
@@ -229,7 +229,7 @@ class TestAddons(object):
|
||||
run(
|
||||
"/snap/bin/microk8s.enable cilium".split(),
|
||||
stdout=PIPE,
|
||||
input=b'N\n',
|
||||
input=b"N\n",
|
||||
stderr=STDOUT,
|
||||
check=True,
|
||||
)
|
||||
@@ -241,7 +241,7 @@ class TestAddons(object):
|
||||
|
||||
@pytest.mark.skip("disabling the test while we work on a 1.20 release")
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get('UNDER_TIME_PRESSURE') == 'True',
|
||||
os.environ.get("UNDER_TIME_PRESSURE") == "True",
|
||||
reason="Skipping Linkerd tests as we are under time pressure",
|
||||
)
|
||||
def test_linkerd(self):
|
||||
@@ -270,11 +270,11 @@ class TestAddons(object):
|
||||
|
||||
@pytest.mark.skip("disabling the kubelfow addon until the new bundle becomes available")
|
||||
@pytest.mark.skipif(
|
||||
platform.machine() != 'x86_64',
|
||||
platform.machine() != "x86_64",
|
||||
reason="Kubeflow tests are only relevant in x86 architectures",
|
||||
)
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get('UNDER_TIME_PRESSURE') == 'True',
|
||||
os.environ.get("UNDER_TIME_PRESSURE") == "True",
|
||||
reason="Skipping kubeflow test as we are under time pressure",
|
||||
)
|
||||
def test_kubeflow_addon(self):
|
||||
@@ -291,11 +291,11 @@ class TestAddons(object):
|
||||
microk8s_disable("kubeflow")
|
||||
|
||||
@pytest.mark.skipif(
|
||||
platform.machine() != 'x86_64',
|
||||
platform.machine() != "x86_64",
|
||||
reason="Metallb tests are only relevant in x86 architectures",
|
||||
)
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get('UNDER_TIME_PRESSURE') == 'True',
|
||||
os.environ.get("UNDER_TIME_PRESSURE") == "True",
|
||||
reason="Skipping metallb test as we are under time pressure",
|
||||
)
|
||||
def test_metallb_addon(self):
|
||||
@@ -309,11 +309,11 @@ class TestAddons(object):
|
||||
|
||||
@pytest.mark.skip("disabling the test while we work on a 1.20 release")
|
||||
@pytest.mark.skipif(
|
||||
platform.machine() != 'x86_64',
|
||||
platform.machine() != "x86_64",
|
||||
reason="Ambassador tests are only relevant in x86 architectures",
|
||||
)
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get('UNDER_TIME_PRESSURE') == 'True',
|
||||
os.environ.get("UNDER_TIME_PRESSURE") == "True",
|
||||
reason="Skipping ambassador tests as we are under time pressure",
|
||||
)
|
||||
def test_ambassador(self):
|
||||
@@ -329,10 +329,10 @@ class TestAddons(object):
|
||||
microk8s_disable("ambassador")
|
||||
|
||||
@pytest.mark.skipif(
|
||||
platform.machine() != 'x86_64', reason="Multus tests are only relevant in x86 architectures"
|
||||
platform.machine() != "x86_64", reason="Multus tests are only relevant in x86 architectures"
|
||||
)
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get('UNDER_TIME_PRESSURE') == 'True',
|
||||
os.environ.get("UNDER_TIME_PRESSURE") == "True",
|
||||
reason="Skipping multus tests as we are under time pressure",
|
||||
)
|
||||
def test_multus(self):
|
||||
@@ -369,10 +369,10 @@ class TestAddons(object):
|
||||
microk8s_disable("traefik")
|
||||
|
||||
@pytest.mark.skipif(
|
||||
platform.machine() != 'x86_64', reason="KEDA tests are only relevant in x86 architectures"
|
||||
platform.machine() != "x86_64", reason="KEDA tests are only relevant in x86 architectures"
|
||||
)
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get('UNDER_TIME_PRESSURE') == 'True',
|
||||
os.environ.get("UNDER_TIME_PRESSURE") == "True",
|
||||
reason="Skipping KEDA tests as we are under time pressure",
|
||||
)
|
||||
def test_keda(self):
|
||||
@@ -390,8 +390,8 @@ class TestAddons(object):
|
||||
"""
|
||||
Test backup and restore commands.
|
||||
"""
|
||||
print('Checking dbctl backup and restore')
|
||||
if os.path.exists('backupfile.tar.gz'):
|
||||
os.remove('backupfile.tar.gz')
|
||||
print("Checking dbctl backup and restore")
|
||||
if os.path.exists("backupfile.tar.gz"):
|
||||
os.remove("backupfile.tar.gz")
|
||||
check_call("/snap/bin/microk8s.dbctl --debug backup -o backupfile".split())
|
||||
check_call("/snap/bin/microk8s.dbctl --debug restore backupfile.tar.gz".split())
|
||||
|
||||
@@ -11,8 +11,8 @@ from os import path
|
||||
# the test will attempt a refresh to the channel requested for testing
|
||||
# reuse_vms = ['vm-ldzcjb', 'vm-nfpgea', 'vm-pkgbtw']
|
||||
reuse_vms = None
|
||||
channel_to_test = os.environ.get('CHANNEL_TO_TEST', 'edge/ha-preview')
|
||||
backend = os.environ.get('BACKEND', None)
|
||||
channel_to_test = os.environ.get("CHANNEL_TO_TEST", "edge/ha-preview")
|
||||
backend = os.environ.get("BACKEND", None)
|
||||
|
||||
|
||||
class VM:
|
||||
@@ -26,43 +26,43 @@ class VM:
|
||||
If `attach_vm` is provided we just make sure the right MicroK8s is deployed.
|
||||
:param attach_vm: the name of the VM we want to reuse
|
||||
"""
|
||||
rnd_letters = ''.join(random.choice(string.ascii_lowercase) for i in range(6))
|
||||
rnd_letters = "".join(random.choice(string.ascii_lowercase) for i in range(6))
|
||||
self.backend = "none"
|
||||
self.vm_name = "vm-{}".format(rnd_letters)
|
||||
if attach_vm:
|
||||
self.vm_name = attach_vm
|
||||
|
||||
if path.exists('/snap/bin/multipass') or backend == 'multipass':
|
||||
print('Creating mulitpass VM')
|
||||
if path.exists("/snap/bin/multipass") or backend == "multipass":
|
||||
print("Creating mulitpass VM")
|
||||
self.backend = "multipass"
|
||||
if not attach_vm:
|
||||
subprocess.check_call(
|
||||
'/snap/bin/multipass launch 18.04 -n {} -m 2G'.format(self.vm_name).split()
|
||||
"/snap/bin/multipass launch 18.04 -n {} -m 2G".format(self.vm_name).split()
|
||||
)
|
||||
subprocess.check_call(
|
||||
'/snap/bin/multipass exec {} -- sudo '
|
||||
'snap install microk8s --classic --channel {}'.format(
|
||||
"/snap/bin/multipass exec {} -- sudo "
|
||||
"snap install microk8s --classic --channel {}".format(
|
||||
self.vm_name, channel_to_test
|
||||
).split()
|
||||
)
|
||||
else:
|
||||
subprocess.check_call(
|
||||
'/snap/bin/multipass exec {} -- sudo '
|
||||
'snap refresh microk8s --channel {}'.format(
|
||||
"/snap/bin/multipass exec {} -- sudo "
|
||||
"snap refresh microk8s --channel {}".format(
|
||||
self.vm_name, channel_to_test
|
||||
).split()
|
||||
)
|
||||
|
||||
elif path.exists('/snap/bin/lxc') or backend == 'lxc':
|
||||
elif path.exists("/snap/bin/lxc") or backend == "lxc":
|
||||
self.backend = "lxc"
|
||||
if not attach_vm:
|
||||
profiles = subprocess.check_output('/snap/bin/lxc profile list'.split())
|
||||
if 'microk8s' not in profiles.decode():
|
||||
subprocess.check_call('/snap/bin/lxc profile copy default microk8s'.split())
|
||||
with open('lxc/microk8s-zfs.profile', "r+") as fp:
|
||||
profiles = subprocess.check_output("/snap/bin/lxc profile list".split())
|
||||
if "microk8s" not in profiles.decode():
|
||||
subprocess.check_call("/snap/bin/lxc profile copy default microk8s".split())
|
||||
with open("lxc/microk8s-zfs.profile", "r+") as fp:
|
||||
profile_string = fp.read()
|
||||
process = subprocess.Popen(
|
||||
'/snap/bin/lxc profile edit microk8s'.split(),
|
||||
"/snap/bin/lxc profile edit microk8s".split(),
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
@@ -70,17 +70,17 @@ class VM:
|
||||
process.stdin.close()
|
||||
|
||||
subprocess.check_call(
|
||||
'/snap/bin/lxc launch -p default -p microk8s ubuntu:18.04 {}'.format(
|
||||
"/snap/bin/lxc launch -p default -p microk8s ubuntu:18.04 {}".format(
|
||||
self.vm_name
|
||||
).split()
|
||||
)
|
||||
cmd_prefix = '/snap/bin/lxc exec {} -- script -e -c'.format(self.vm_name).split()
|
||||
cmd = ['snap install microk8s --classic --channel {}'.format(channel_to_test)]
|
||||
cmd_prefix = "/snap/bin/lxc exec {} -- script -e -c".format(self.vm_name).split()
|
||||
cmd = ["snap install microk8s --classic --channel {}".format(channel_to_test)]
|
||||
time.sleep(20)
|
||||
subprocess.check_output(cmd_prefix + cmd)
|
||||
else:
|
||||
cmd = '/snap/bin/lxc exec {} -- '.format(self.vm_name).split()
|
||||
cmd.append('sudo snap refresh microk8s --channel {}'.format(channel_to_test))
|
||||
cmd = "/snap/bin/lxc exec {} -- ".format(self.vm_name).split()
|
||||
cmd.append("sudo snap refresh microk8s --channel {}".format(channel_to_test))
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
else:
|
||||
@@ -94,11 +94,11 @@ class VM:
|
||||
"""
|
||||
if self.backend == "multipass":
|
||||
output = subprocess.check_output(
|
||||
'/snap/bin/multipass exec {} -- sudo ' '{}'.format(self.vm_name, cmd).split()
|
||||
"/snap/bin/multipass exec {} -- sudo " "{}".format(self.vm_name, cmd).split()
|
||||
)
|
||||
return output
|
||||
elif self.backend == "lxc":
|
||||
cmd_prefix = '/snap/bin/lxc exec {} -- script -e -c '.format(self.vm_name).split()
|
||||
cmd_prefix = "/snap/bin/lxc exec {} -- script -e -c ".format(self.vm_name).split()
|
||||
output = subprocess.check_output(cmd_prefix + [cmd])
|
||||
return output
|
||||
else:
|
||||
@@ -110,11 +110,11 @@ class VM:
|
||||
"""
|
||||
print("Destroying VM in {}".format(self.backend))
|
||||
if self.backend == "multipass":
|
||||
subprocess.check_call('/snap/bin/multipass stop {}'.format(self.vm_name).split())
|
||||
subprocess.check_call('/snap/bin/multipass delete {}'.format(self.vm_name).split())
|
||||
subprocess.check_call("/snap/bin/multipass stop {}".format(self.vm_name).split())
|
||||
subprocess.check_call("/snap/bin/multipass delete {}".format(self.vm_name).split())
|
||||
elif self.backend == "lxc":
|
||||
subprocess.check_call('/snap/bin/lxc stop {}'.format(self.vm_name).split())
|
||||
subprocess.check_call('/snap/bin/lxc delete {}'.format(self.vm_name).split())
|
||||
subprocess.check_call("/snap/bin/lxc stop {}".format(self.vm_name).split())
|
||||
subprocess.check_call("/snap/bin/lxc delete {}".format(self.vm_name).split())
|
||||
|
||||
|
||||
class TestCluster(object):
|
||||
@@ -130,10 +130,10 @@ class TestCluster(object):
|
||||
if not reuse_vms:
|
||||
size = 3
|
||||
for i in range(0, size):
|
||||
print('Creating machine {}'.format(i))
|
||||
print("Creating machine {}".format(i))
|
||||
vm = VM()
|
||||
print('Waiting for machine {}'.format(i))
|
||||
vm.run('/snap/bin/microk8s.status --wait-ready --timeout 120')
|
||||
print("Waiting for machine {}".format(i))
|
||||
vm.run("/snap/bin/microk8s.status --wait-ready --timeout 120")
|
||||
self.VM.append(vm)
|
||||
else:
|
||||
for vm_name in reuse_vms:
|
||||
@@ -141,31 +141,31 @@ class TestCluster(object):
|
||||
|
||||
# Form cluster
|
||||
vm_master = self.VM[0]
|
||||
connected_nodes = vm_master.run('/snap/bin/microk8s.kubectl get no')
|
||||
connected_nodes = vm_master.run("/snap/bin/microk8s.kubectl get no")
|
||||
for vm in self.VM:
|
||||
if vm.vm_name in connected_nodes.decode():
|
||||
continue
|
||||
else:
|
||||
print('Adding machine {} to cluster'.format(vm.vm_name))
|
||||
add_node = vm_master.run('/snap/bin/microk8s.add-node')
|
||||
endpoint = [ep for ep in add_node.decode().split() if ':25000/' in ep]
|
||||
vm.run('/snap/bin/microk8s.join {}'.format(endpoint[0]))
|
||||
print("Adding machine {} to cluster".format(vm.vm_name))
|
||||
add_node = vm_master.run("/snap/bin/microk8s.add-node")
|
||||
endpoint = [ep for ep in add_node.decode().split() if ":25000/" in ep]
|
||||
vm.run("/snap/bin/microk8s.join {}".format(endpoint[0]))
|
||||
|
||||
# Wait for nodes to be ready
|
||||
print('Waiting for nodes to register')
|
||||
connected_nodes = vm_master.run('/snap/bin/microk8s.kubectl get no')
|
||||
while 'NotReady' in connected_nodes.decode():
|
||||
print("Waiting for nodes to register")
|
||||
connected_nodes = vm_master.run("/snap/bin/microk8s.kubectl get no")
|
||||
while "NotReady" in connected_nodes.decode():
|
||||
time.sleep(5)
|
||||
connected_nodes = vm_master.run('/snap/bin/microk8s.kubectl get no')
|
||||
connected_nodes = vm_master.run("/snap/bin/microk8s.kubectl get no")
|
||||
print(connected_nodes.decode())
|
||||
|
||||
# Wait for CNI pods
|
||||
print('Waiting for cni')
|
||||
print("Waiting for cni")
|
||||
while True:
|
||||
ready_pods = 0
|
||||
pods = vm_master.run('/snap/bin/microk8s.kubectl get po -n kube-system -o wide')
|
||||
pods = vm_master.run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
|
||||
for line in pods.decode().splitlines():
|
||||
if 'calico' in line and 'Running' in line:
|
||||
if "calico" in line and "Running" in line:
|
||||
ready_pods += 1
|
||||
if ready_pods == (len(self.VM) + 1):
|
||||
print(pods.decode())
|
||||
@@ -185,12 +185,12 @@ class TestCluster(object):
|
||||
"""
|
||||
Test each node has a calico pod.
|
||||
"""
|
||||
print('Checking calico is in all nodes')
|
||||
pods = self.VM[0].run('/snap/bin/microk8s.kubectl get po -n kube-system -o wide')
|
||||
print("Checking calico is in all nodes")
|
||||
pods = self.VM[0].run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
|
||||
for vm in self.VM:
|
||||
if vm.vm_name not in pods.decode():
|
||||
assert False
|
||||
print('Calico found in node {}'.format(vm.vm_name))
|
||||
print("Calico found in node {}".format(vm.vm_name))
|
||||
|
||||
def test_nodes_in_ha(self):
|
||||
"""
|
||||
@@ -198,34 +198,34 @@ class TestCluster(object):
|
||||
"""
|
||||
# All nodes see the same pods
|
||||
for vm in self.VM:
|
||||
pods = vm.run('/snap/bin/microk8s.kubectl get po -n kube-system -o wide')
|
||||
pods = vm.run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
|
||||
for other_vm in self.VM:
|
||||
if other_vm.vm_name not in pods.decode():
|
||||
assert False
|
||||
print('All nodes see the same pods')
|
||||
print("All nodes see the same pods")
|
||||
|
||||
attempt = 100
|
||||
while True:
|
||||
assert attempt > 0
|
||||
for vm in self.VM:
|
||||
status = vm.run('/snap/bin/microk8s.status')
|
||||
status = vm.run("/snap/bin/microk8s.status")
|
||||
if "high-availability: yes" not in status.decode():
|
||||
attempt += 1
|
||||
continue
|
||||
break
|
||||
|
||||
# remove a node
|
||||
print('Removing machine {}'.format(self.VM[0].vm_name))
|
||||
self.VM[0].run('/snap/bin/microk8s.leave')
|
||||
self.VM[1].run('/snap/bin/microk8s.remove-node {}'.format(self.VM[0].vm_name))
|
||||
print("Removing machine {}".format(self.VM[0].vm_name))
|
||||
self.VM[0].run("/snap/bin/microk8s.leave")
|
||||
self.VM[1].run("/snap/bin/microk8s.remove-node {}".format(self.VM[0].vm_name))
|
||||
# allow for some time for the leader to hand over leadership
|
||||
time.sleep(10)
|
||||
attempt = 100
|
||||
while True:
|
||||
ready_pods = 0
|
||||
pods = self.VM[1].run('/snap/bin/microk8s.kubectl get po -n kube-system -o wide')
|
||||
pods = self.VM[1].run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
|
||||
for line in pods.decode().splitlines():
|
||||
if 'calico' in line and 'Running' in line:
|
||||
if "calico" in line and "Running" in line:
|
||||
ready_pods += 1
|
||||
if ready_pods == (len(self.VM)):
|
||||
print(pods.decode())
|
||||
@@ -234,14 +234,14 @@ class TestCluster(object):
|
||||
if attempt <= 0:
|
||||
assert False
|
||||
time.sleep(5)
|
||||
print('Checking calico is on the nodes running')
|
||||
print("Checking calico is on the nodes running")
|
||||
|
||||
leftVMs = [self.VM[1], self.VM[2]]
|
||||
attempt = 100
|
||||
while True:
|
||||
assert attempt > 0
|
||||
for vm in leftVMs:
|
||||
status = vm.run('/snap/bin/microk8s.status')
|
||||
status = vm.run("/snap/bin/microk8s.status")
|
||||
if "high-availability: no" not in status.decode():
|
||||
attempt += 1
|
||||
time.sleep(2)
|
||||
@@ -249,23 +249,23 @@ class TestCluster(object):
|
||||
break
|
||||
|
||||
for vm in leftVMs:
|
||||
pods = vm.run('/snap/bin/microk8s.kubectl get po -n kube-system -o wide')
|
||||
pods = vm.run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
|
||||
for other_vm in leftVMs:
|
||||
if other_vm.vm_name not in pods.decode():
|
||||
time.sleep(2)
|
||||
assert False
|
||||
print('Remaining nodes see the same pods')
|
||||
print("Remaining nodes see the same pods")
|
||||
|
||||
print('Waiting for two ingress to appear')
|
||||
self.VM[1].run('/snap/bin/microk8s.enable ingress')
|
||||
print("Waiting for two ingress to appear")
|
||||
self.VM[1].run("/snap/bin/microk8s.enable ingress")
|
||||
# wait for two ingress to appear
|
||||
time.sleep(10)
|
||||
attempt = 100
|
||||
while True:
|
||||
ready_pods = 0
|
||||
pods = self.VM[1].run('/snap/bin/microk8s.kubectl get po -A -o wide')
|
||||
pods = self.VM[1].run("/snap/bin/microk8s.kubectl get po -A -o wide")
|
||||
for line in pods.decode().splitlines():
|
||||
if 'ingress' in line and 'Running' in line:
|
||||
if "ingress" in line and "Running" in line:
|
||||
ready_pods += 1
|
||||
if ready_pods == (len(self.VM) - 1):
|
||||
print(pods.decode())
|
||||
@@ -275,22 +275,22 @@ class TestCluster(object):
|
||||
assert False
|
||||
time.sleep(5)
|
||||
|
||||
print('Rejoin the node')
|
||||
add_node = self.VM[1].run('/snap/bin/microk8s.add-node')
|
||||
endpoint = [ep for ep in add_node.decode().split() if ':25000/' in ep]
|
||||
self.VM[0].run('/snap/bin/microk8s.join {}'.format(endpoint[0]))
|
||||
print("Rejoin the node")
|
||||
add_node = self.VM[1].run("/snap/bin/microk8s.add-node")
|
||||
endpoint = [ep for ep in add_node.decode().split() if ":25000/" in ep]
|
||||
self.VM[0].run("/snap/bin/microk8s.join {}".format(endpoint[0]))
|
||||
|
||||
print('Waiting for nodes to be ready')
|
||||
connected_nodes = self.VM[0].run('/snap/bin/microk8s.kubectl get no')
|
||||
while 'NotReady' in connected_nodes.decode():
|
||||
print("Waiting for nodes to be ready")
|
||||
connected_nodes = self.VM[0].run("/snap/bin/microk8s.kubectl get no")
|
||||
while "NotReady" in connected_nodes.decode():
|
||||
time.sleep(5)
|
||||
connected_nodes = self.VM[0].run('/snap/bin/microk8s.kubectl get no')
|
||||
connected_nodes = self.VM[0].run("/snap/bin/microk8s.kubectl get no")
|
||||
|
||||
attempt = 100
|
||||
while True:
|
||||
assert attempt > 0
|
||||
for vm in self.VM:
|
||||
status = vm.run('/snap/bin/microk8s.status')
|
||||
status = vm.run("/snap/bin/microk8s.status")
|
||||
if "high-availability: yes" not in status.decode():
|
||||
attempt += 1
|
||||
time.sleep(2)
|
||||
|
||||
@@ -7,9 +7,9 @@ from utils import (
|
||||
run_until_success,
|
||||
)
|
||||
|
||||
upgrade_from = os.environ.get('UPGRADE_MICROK8S_FROM', 'beta')
|
||||
upgrade_from = os.environ.get("UPGRADE_MICROK8S_FROM", "beta")
|
||||
# Have UPGRADE_MICROK8S_TO point to a file to upgrade to that file
|
||||
upgrade_to = os.environ.get('UPGRADE_MICROK8S_TO', 'edge')
|
||||
upgrade_to = os.environ.get("UPGRADE_MICROK8S_TO", "edge")
|
||||
|
||||
|
||||
class TestUpgradePath(object):
|
||||
@@ -18,7 +18,7 @@ class TestUpgradePath(object):
|
||||
"""
|
||||
|
||||
@pytest.mark.skipif(
|
||||
os.environ.get('UNDER_TIME_PRESSURE') == 'True',
|
||||
os.environ.get("UNDER_TIME_PRESSURE") == "True",
|
||||
reason="Skipping refresh path test as we are under time pressure",
|
||||
)
|
||||
def test_refresh_path(self):
|
||||
@@ -28,7 +28,7 @@ class TestUpgradePath(object):
|
||||
"""
|
||||
start_channel = 16
|
||||
last_stable_minor = None
|
||||
if upgrade_from.startswith('latest') or '/' not in upgrade_from:
|
||||
if upgrade_from.startswith("latest") or "/" not in upgrade_from:
|
||||
attempt = 0
|
||||
release_url = "https://dl.k8s.io/release/stable.txt"
|
||||
while attempt < 10 and not last_stable_minor:
|
||||
@@ -36,14 +36,14 @@ class TestUpgradePath(object):
|
||||
if r.status_code == 200:
|
||||
last_stable_str = r.content.decode().strip()
|
||||
# We have "v1.18.4" and we need the "18"
|
||||
last_stable_parts = last_stable_str.split('.')
|
||||
last_stable_parts = last_stable_str.split(".")
|
||||
last_stable_minor = int(last_stable_parts[1])
|
||||
else:
|
||||
time.sleep(3)
|
||||
attempt += 1
|
||||
else:
|
||||
channel_parts = upgrade_from.split('.')
|
||||
channel_parts = channel_parts[1].split('/')
|
||||
channel_parts = upgrade_from.split(".")
|
||||
channel_parts = channel_parts[1].split("/")
|
||||
print(channel_parts)
|
||||
last_stable_minor = int(channel_parts[0])
|
||||
|
||||
@@ -74,7 +74,7 @@ class TestUpgradePath(object):
|
||||
channel_minor += 1
|
||||
|
||||
print("Installing {}".format(upgrade_to))
|
||||
if upgrade_to.endswith('.snap'):
|
||||
if upgrade_to.endswith(".snap"):
|
||||
cmd = "sudo snap install {} --classic --dangerous".format(upgrade_to)
|
||||
else:
|
||||
cmd = "sudo snap refresh microk8s --channel={}".format(upgrade_to)
|
||||
|
||||
@@ -21,10 +21,10 @@ from utils import (
|
||||
run_until_success,
|
||||
)
|
||||
|
||||
upgrade_from = os.environ.get('UPGRADE_MICROK8S_FROM', 'beta')
|
||||
upgrade_from = os.environ.get("UPGRADE_MICROK8S_FROM", "beta")
|
||||
# Have UPGRADE_MICROK8S_TO point to a file to upgrade to that file
|
||||
upgrade_to = os.environ.get('UPGRADE_MICROK8S_TO', 'edge')
|
||||
under_time_pressure = os.environ.get('UNDER_TIME_PRESSURE', 'False')
|
||||
upgrade_to = os.environ.get("UPGRADE_MICROK8S_TO", "edge")
|
||||
under_time_pressure = os.environ.get("UNDER_TIME_PRESSURE", "False")
|
||||
|
||||
|
||||
class TestUpgrade(object):
|
||||
@@ -60,58 +60,58 @@ class TestUpgrade(object):
|
||||
enable = microk8s_enable("dashboard")
|
||||
assert "Nothing to do for" not in enable
|
||||
validate_dns_dashboard()
|
||||
test_matrix['dns_dashboard'] = validate_dns_dashboard
|
||||
test_matrix["dns_dashboard"] = validate_dns_dashboard
|
||||
except CalledProcessError:
|
||||
print('Will not test dns-dashboard')
|
||||
print("Will not test dns-dashboard")
|
||||
|
||||
try:
|
||||
enable = microk8s_enable("storage")
|
||||
assert "Nothing to do for" not in enable
|
||||
validate_storage()
|
||||
test_matrix['storage'] = validate_storage
|
||||
test_matrix["storage"] = validate_storage
|
||||
except CalledProcessError:
|
||||
print('Will not test storage')
|
||||
print("Will not test storage")
|
||||
|
||||
try:
|
||||
enable = microk8s_enable("ingress")
|
||||
assert "Nothing to do for" not in enable
|
||||
validate_ingress()
|
||||
test_matrix['ingress'] = validate_ingress
|
||||
test_matrix["ingress"] = validate_ingress
|
||||
except CalledProcessError:
|
||||
print('Will not test ingress')
|
||||
print("Will not test ingress")
|
||||
|
||||
try:
|
||||
enable = microk8s_enable("gpu")
|
||||
assert "Nothing to do for" not in enable
|
||||
validate_gpu()
|
||||
test_matrix['gpu'] = validate_gpu
|
||||
test_matrix["gpu"] = validate_gpu
|
||||
except CalledProcessError:
|
||||
print('Will not test gpu')
|
||||
print("Will not test gpu")
|
||||
|
||||
try:
|
||||
enable = microk8s_enable("registry")
|
||||
assert "Nothing to do for" not in enable
|
||||
validate_registry()
|
||||
test_matrix['registry'] = validate_registry
|
||||
test_matrix["registry"] = validate_registry
|
||||
except CalledProcessError:
|
||||
print('Will not test registry')
|
||||
print("Will not test registry")
|
||||
|
||||
try:
|
||||
validate_forward()
|
||||
test_matrix['forward'] = validate_forward
|
||||
test_matrix["forward"] = validate_forward
|
||||
except CalledProcessError:
|
||||
print('Will not test port forward')
|
||||
print("Will not test port forward")
|
||||
|
||||
try:
|
||||
enable = microk8s_enable("metrics-server")
|
||||
assert "Nothing to do for" not in enable
|
||||
validate_metrics_server()
|
||||
test_matrix['metrics_server'] = validate_metrics_server
|
||||
test_matrix["metrics_server"] = validate_metrics_server
|
||||
except CalledProcessError:
|
||||
print('Will not test the metrics server')
|
||||
print("Will not test the metrics server")
|
||||
|
||||
# AMD64 only tests
|
||||
if platform.machine() == 'x86_64' and under_time_pressure == 'False':
|
||||
if platform.machine() == "x86_64" and under_time_pressure == "False":
|
||||
"""
|
||||
# Prometheus operator on our lxc is chashlooping disabling the test for now.
|
||||
try:
|
||||
@@ -138,17 +138,17 @@ class TestUpgrade(object):
|
||||
enable = microk8s_enable("fluentd", timeout_insec=30)
|
||||
assert "Nothing to do for" not in enable
|
||||
validate_fluentd()
|
||||
test_matrix['fluentd'] = validate_fluentd
|
||||
test_matrix["fluentd"] = validate_fluentd
|
||||
except CalledProcessError:
|
||||
print('Will not test the fluentd')
|
||||
print("Will not test the fluentd")
|
||||
|
||||
try:
|
||||
enable = microk8s_enable("jaeger", timeout_insec=30)
|
||||
assert "Nothing to do for" not in enable
|
||||
validate_jaeger()
|
||||
test_matrix['jaeger'] = validate_jaeger
|
||||
test_matrix["jaeger"] = validate_jaeger
|
||||
except CalledProcessError:
|
||||
print('Will not test the jaeger addon')
|
||||
print("Will not test the jaeger addon")
|
||||
|
||||
# We are not testing cilium because we want to test the upgrade of the default CNI
|
||||
"""
|
||||
@@ -167,7 +167,7 @@ class TestUpgrade(object):
|
||||
enable = microk8s_enable("{}:{}".format("metallb", ip_ranges), timeout_insec=500)
|
||||
assert "MetalLB is enabled" in enable and "Nothing to do for" not in enable
|
||||
validate_metallb_config(ip_ranges)
|
||||
test_matrix['metallb'] = validate_metallb_config
|
||||
test_matrix["metallb"] = validate_metallb_config
|
||||
except CalledProcessError:
|
||||
print("Will not test the metallb addon")
|
||||
|
||||
@@ -185,7 +185,7 @@ class TestUpgrade(object):
|
||||
"""
|
||||
|
||||
# Refresh the snap to the target
|
||||
if upgrade_to.endswith('.snap'):
|
||||
if upgrade_to.endswith(".snap"):
|
||||
cmd = "sudo snap install {} --classic --dangerous".format(upgrade_to)
|
||||
else:
|
||||
cmd = "sudo snap refresh microk8s --channel={}".format(upgrade_to)
|
||||
@@ -210,14 +210,14 @@ def is_container():
|
||||
|
||||
"""
|
||||
try:
|
||||
if os.path.isdir('/run/systemd/system'):
|
||||
container = check_output('sudo systemd-detect-virt --container'.split())
|
||||
if os.path.isdir("/run/systemd/system"):
|
||||
container = check_output("sudo systemd-detect-virt --container".split())
|
||||
print("Tests are running in {}".format(container))
|
||||
return True
|
||||
except CalledProcessError:
|
||||
print("systemd-detect-virt did not detect a container")
|
||||
|
||||
if os.path.exists('/run/container_type'):
|
||||
if os.path.exists("/run/container_type"):
|
||||
return True
|
||||
|
||||
try:
|
||||
|
||||
@@ -6,7 +6,7 @@ import platform
|
||||
from subprocess import check_output, CalledProcessError
|
||||
|
||||
|
||||
arch_translate = {'aarch64': 'arm64', 'x86_64': 'amd64'}
|
||||
arch_translate = {"aarch64": "arm64", "x86_64": "amd64"}
|
||||
|
||||
|
||||
def run_until_success(cmd, timeout_insec=60, err_out=None):
|
||||
@@ -23,10 +23,10 @@ def run_until_success(cmd, timeout_insec=60, err_out=None):
|
||||
deadline = datetime.datetime.now() + datetime.timedelta(seconds=timeout_insec)
|
||||
while True:
|
||||
try:
|
||||
output = check_output(cmd.split()).strip().decode('utf8')
|
||||
return output.replace('\\n', '\n')
|
||||
output = check_output(cmd.split()).strip().decode("utf8")
|
||||
return output.replace("\\n", "\n")
|
||||
except CalledProcessError as err:
|
||||
output = err.output.strip().decode('utf8').replace('\\n', '\n')
|
||||
output = err.output.strip().decode("utf8").replace("\\n", "\n")
|
||||
print(output)
|
||||
if output == err_out:
|
||||
return output
|
||||
@@ -47,7 +47,7 @@ def kubectl(cmd, timeout_insec=300, err_out=None):
|
||||
Returns: the kubectl response in a string
|
||||
|
||||
"""
|
||||
cmd = '/snap/bin/microk8s.kubectl ' + cmd
|
||||
cmd = "/snap/bin/microk8s.kubectl " + cmd
|
||||
return run_until_success(cmd, timeout_insec, err_out)
|
||||
|
||||
|
||||
@@ -60,10 +60,10 @@ def docker(cmd):
|
||||
Returns: the docker response in a string
|
||||
|
||||
"""
|
||||
docker_bin = '/usr/bin/docker'
|
||||
if os.path.isfile('/snap/bin/microk8s.docker'):
|
||||
docker_bin = '/snap/bin/microk8s.docker'
|
||||
cmd = docker_bin + ' ' + cmd
|
||||
docker_bin = "/usr/bin/docker"
|
||||
if os.path.isfile("/snap/bin/microk8s.docker"):
|
||||
docker_bin = "/snap/bin/microk8s.docker"
|
||||
cmd = docker_bin + " " + cmd
|
||||
return run_until_success(cmd)
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ def kubectl_get(target, timeout_insec=300):
|
||||
Returns: YAML structured response
|
||||
|
||||
"""
|
||||
cmd = 'get -o yaml ' + target
|
||||
cmd = "get -o yaml " + target
|
||||
output = kubectl(cmd, timeout_insec)
|
||||
return yaml.load(output)
|
||||
|
||||
@@ -95,22 +95,22 @@ def wait_for_pod_state(
|
||||
raise TimeoutError(
|
||||
"Pod {} not in {} after {} seconds.".format(pod, desired_state, timeout_insec)
|
||||
)
|
||||
cmd = 'po {} -n {}'.format(pod, namespace)
|
||||
cmd = "po {} -n {}".format(pod, namespace)
|
||||
if label:
|
||||
cmd += ' -l {}'.format(label)
|
||||
cmd += " -l {}".format(label)
|
||||
data = kubectl_get(cmd, timeout_insec)
|
||||
if pod == "":
|
||||
if len(data['items']) > 0:
|
||||
status = data['items'][0]['status']
|
||||
if len(data["items"]) > 0:
|
||||
status = data["items"][0]["status"]
|
||||
else:
|
||||
status = []
|
||||
else:
|
||||
status = data['status']
|
||||
if 'containerStatuses' in status:
|
||||
container_status = status['containerStatuses'][0]
|
||||
state, details = list(container_status['state'].items())[0]
|
||||
status = data["status"]
|
||||
if "containerStatuses" in status:
|
||||
container_status = status["containerStatuses"][0]
|
||||
state, details = list(container_status["state"].items())[0]
|
||||
if desired_reason:
|
||||
reason = details.get('reason')
|
||||
reason = details.get("reason")
|
||||
if state == desired_state and reason == desired_reason:
|
||||
break
|
||||
elif state == desired_state:
|
||||
@@ -123,18 +123,18 @@ def wait_for_installation(cluster_nodes=1, timeout_insec=360):
|
||||
Wait for kubernetes service to appear.
|
||||
"""
|
||||
while True:
|
||||
cmd = 'svc kubernetes'
|
||||
cmd = "svc kubernetes"
|
||||
data = kubectl_get(cmd, timeout_insec)
|
||||
service = data['metadata']['name']
|
||||
if 'kubernetes' in service:
|
||||
service = data["metadata"]["name"]
|
||||
if "kubernetes" in service:
|
||||
break
|
||||
else:
|
||||
time.sleep(3)
|
||||
|
||||
while True:
|
||||
cmd = 'get no'
|
||||
cmd = "get no"
|
||||
nodes = kubectl(cmd, timeout_insec)
|
||||
if nodes.count(' Ready') == cluster_nodes:
|
||||
if nodes.count(" Ready") == cluster_nodes:
|
||||
break
|
||||
else:
|
||||
time.sleep(3)
|
||||
@@ -152,9 +152,9 @@ def wait_for_namespace_termination(namespace, timeout_insec=360):
|
||||
deadline = datetime.datetime.now() + datetime.timedelta(seconds=timeout_insec)
|
||||
while True:
|
||||
try:
|
||||
cmd = '/snap/bin/microk8s.kubectl get ns {}'.format(namespace)
|
||||
check_output(cmd.split()).strip().decode('utf8')
|
||||
print('Waiting...')
|
||||
cmd = "/snap/bin/microk8s.kubectl get ns {}".format(namespace)
|
||||
check_output(cmd.split()).strip().decode("utf8")
|
||||
print("Waiting...")
|
||||
except CalledProcessError:
|
||||
if datetime.datetime.now() > deadline:
|
||||
raise
|
||||
@@ -173,13 +173,13 @@ def microk8s_enable(addon, timeout_insec=300):
|
||||
|
||||
"""
|
||||
# NVidia pre-check so as to not wait for a timeout.
|
||||
if addon == 'gpu':
|
||||
if addon == "gpu":
|
||||
nv_out = run_until_success("lsmod", timeout_insec=10)
|
||||
if "nvidia" not in nv_out:
|
||||
print("Not a cuda capable system. Will not test gpu addon")
|
||||
raise CalledProcessError(1, "Nothing to do for gpu")
|
||||
|
||||
cmd = '/snap/bin/microk8s.enable {}'.format(addon)
|
||||
cmd = "/snap/bin/microk8s.enable {}".format(addon)
|
||||
return run_until_success(cmd, timeout_insec)
|
||||
|
||||
|
||||
@@ -191,7 +191,7 @@ def microk8s_disable(addon):
|
||||
addon: name of the addon
|
||||
|
||||
"""
|
||||
cmd = '/snap/bin/microk8s.disable {}'.format(addon)
|
||||
cmd = "/snap/bin/microk8s.disable {}".format(addon)
|
||||
return run_until_success(cmd, timeout_insec=300)
|
||||
|
||||
|
||||
@@ -199,14 +199,14 @@ def microk8s_clustering_capable():
|
||||
"""
|
||||
Are we in a clustering capable microk8s?
|
||||
"""
|
||||
return os.path.isfile('/snap/bin/microk8s.join')
|
||||
return os.path.isfile("/snap/bin/microk8s.join")
|
||||
|
||||
|
||||
def microk8s_reset(cluster_nodes=1):
|
||||
"""
|
||||
Call microk8s reset
|
||||
"""
|
||||
cmd = '/snap/bin/microk8s.reset'
|
||||
cmd = "/snap/bin/microk8s.reset"
|
||||
run_until_success(cmd, timeout_insec=300)
|
||||
wait_for_installation(cluster_nodes)
|
||||
|
||||
@@ -220,6 +220,6 @@ def update_yaml_with_arch(manifest_file):
|
||||
with open(manifest_file) as f:
|
||||
s = f.read()
|
||||
|
||||
with open(manifest_file, 'w') as f:
|
||||
s = s.replace('$ARCH', arch)
|
||||
with open(manifest_file, "w") as f:
|
||||
s = s.replace("$ARCH", arch)
|
||||
f.write(s)
|
||||
|
||||
@@ -148,7 +148,7 @@ def validate_ambassador():
|
||||
Validate the Ambassador API Gateway by creating a ingress rule.
|
||||
"""
|
||||
|
||||
if platform.machine() != 'x86_64':
|
||||
if platform.machine() != "x86_64":
|
||||
print("Ambassador tests are only relevant in x86 architectures")
|
||||
return
|
||||
|
||||
@@ -173,7 +173,7 @@ def validate_gpu():
|
||||
"""
|
||||
Validate gpu by trying a cuda-add.
|
||||
"""
|
||||
if platform.machine() != 'x86_64':
|
||||
if platform.machine() != "x86_64":
|
||||
print("GPU tests are only relevant in x86 architectures")
|
||||
return
|
||||
|
||||
@@ -197,7 +197,7 @@ def validate_istio():
|
||||
"""
|
||||
Validate istio by deploying the bookinfo app.
|
||||
"""
|
||||
if platform.machine() != 'x86_64':
|
||||
if platform.machine() != "x86_64":
|
||||
print("Istio tests are only relevant in x86 architectures")
|
||||
return
|
||||
|
||||
@@ -223,7 +223,7 @@ def validate_knative():
|
||||
"""
|
||||
Validate Knative by deploying the helloworld-go app.
|
||||
"""
|
||||
if platform.machine() != 'x86_64':
|
||||
if platform.machine() != "x86_64":
|
||||
print("Knative tests are only relevant in x86 architectures")
|
||||
return
|
||||
|
||||
@@ -251,7 +251,7 @@ def validate_registry():
|
||||
wait_for_pod_state("", "container-registry", "running", label="app=registry")
|
||||
pvc_stdout = kubectl("get pvc registry-claim -n container-registry -o yaml")
|
||||
pvc_yaml = yaml.safe_load(pvc_stdout)
|
||||
storage = pvc_yaml['spec']['resources']['requests']['storage']
|
||||
storage = pvc_yaml["spec"]["resources"]["requests"]["storage"]
|
||||
assert re.match("(^[2-9][0-9]{1,}|^[1-9][0-9]{2,})(Gi$)", storage)
|
||||
docker("pull busybox")
|
||||
docker("tag busybox localhost:32000/my-busybox")
|
||||
@@ -274,8 +274,8 @@ def validate_forward():
|
||||
manifest = os.path.join(here, "templates", "nginx-pod.yaml")
|
||||
kubectl("apply -f {}".format(manifest))
|
||||
wait_for_pod_state("", "default", "running", label="app=nginx")
|
||||
os.system('killall kubectl')
|
||||
os.system('/snap/bin/microk8s.kubectl port-forward pod/nginx 5123:80 &')
|
||||
os.system("killall kubectl")
|
||||
os.system("/snap/bin/microk8s.kubectl port-forward pod/nginx 5123:80 &")
|
||||
attempt = 10
|
||||
while attempt >= 0:
|
||||
try:
|
||||
@@ -313,7 +313,7 @@ def validate_prometheus():
|
||||
"""
|
||||
Validate the prometheus operator
|
||||
"""
|
||||
if platform.machine() != 'x86_64':
|
||||
if platform.machine() != "x86_64":
|
||||
print("Prometheus tests are only relevant in x86 architectures")
|
||||
return
|
||||
|
||||
@@ -325,7 +325,7 @@ def validate_fluentd():
|
||||
"""
|
||||
Validate fluentd
|
||||
"""
|
||||
if platform.machine() != 'x86_64':
|
||||
if platform.machine() != "x86_64":
|
||||
print("Fluentd tests are only relevant in x86 architectures")
|
||||
return
|
||||
|
||||
@@ -338,7 +338,7 @@ def validate_jaeger():
|
||||
"""
|
||||
Validate the jaeger operator
|
||||
"""
|
||||
if platform.machine() != 'x86_64':
|
||||
if platform.machine() != "x86_64":
|
||||
print("Jaeger tests are only relevant in x86 architectures")
|
||||
return
|
||||
|
||||
@@ -361,7 +361,7 @@ def validate_linkerd():
|
||||
"""
|
||||
Validate Linkerd by deploying emojivoto.
|
||||
"""
|
||||
if platform.machine() != 'x86_64':
|
||||
if platform.machine() != "x86_64":
|
||||
print("Linkerd tests are only relevant in x86 architectures")
|
||||
return
|
||||
|
||||
@@ -393,7 +393,7 @@ def validate_rbac():
|
||||
"""
|
||||
Validate RBAC is actually on
|
||||
"""
|
||||
output = kubectl("auth can-i --as=system:serviceaccount:default:default view pod", err_out='no')
|
||||
output = kubectl("auth can-i --as=system:serviceaccount:default:default view pod", err_out="no")
|
||||
assert "no" in output
|
||||
output = kubectl("auth can-i --as=admin --as-group=system:masters view pod")
|
||||
assert "yes" in output
|
||||
@@ -409,7 +409,7 @@ def cilium(cmd, timeout_insec=300, err_out=None):
|
||||
|
||||
Returns: the cilium response in a string
|
||||
"""
|
||||
cmd = '/snap/bin/microk8s.cilium ' + cmd
|
||||
cmd = "/snap/bin/microk8s.cilium " + cmd
|
||||
return run_until_success(cmd, timeout_insec, err_out)
|
||||
|
||||
|
||||
@@ -417,7 +417,7 @@ def validate_cilium():
|
||||
"""
|
||||
Validate cilium by deploying the bookinfo app.
|
||||
"""
|
||||
if platform.machine() != 'x86_64':
|
||||
if platform.machine() != "x86_64":
|
||||
print("Cilium tests are only relevant in x86 architectures")
|
||||
return
|
||||
|
||||
@@ -431,7 +431,7 @@ def validate_cilium():
|
||||
for attempt in range(0, 10):
|
||||
kubectl("apply -f {}".format(manifest))
|
||||
wait_for_pod_state("", "default", "running", label="app=nginx")
|
||||
output = cilium('endpoint list -o json', timeout_insec=20)
|
||||
output = cilium("endpoint list -o json", timeout_insec=20)
|
||||
if "nginx" in output:
|
||||
kubectl("delete -f {}".format(manifest))
|
||||
break
|
||||
@@ -458,9 +458,9 @@ def validate_multus():
|
||||
manifest = os.path.join(here, "templates", "multus-alpine.yaml")
|
||||
kubectl("apply -f {}".format(manifest))
|
||||
wait_for_pod_state("", "default", "running", label="app=multus-alpine")
|
||||
output = kubectl("exec multus-alpine -- ifconfig eth1", timeout_insec=900, err_out='no')
|
||||
output = kubectl("exec multus-alpine -- ifconfig eth1", timeout_insec=900, err_out="no")
|
||||
assert "10.111.111.111" in output
|
||||
output = kubectl("exec multus-alpine -- ifconfig eth2", timeout_insec=900, err_out='no')
|
||||
output = kubectl("exec multus-alpine -- ifconfig eth2", timeout_insec=900, err_out="no")
|
||||
assert "10.222.222.222" in output
|
||||
kubectl("delete -f {}".format(manifest))
|
||||
kubectl("delete -f {}".format(networks))
|
||||
@@ -471,7 +471,7 @@ def validate_kubeflow():
|
||||
"""
|
||||
Validate kubeflow
|
||||
"""
|
||||
if platform.machine() != 'x86_64':
|
||||
if platform.machine() != "x86_64":
|
||||
print("Kubeflow tests are only relevant in x86 architectures")
|
||||
return
|
||||
|
||||
@@ -482,7 +482,7 @@ def validate_metallb_config(ip_ranges="192.168.0.105"):
|
||||
"""
|
||||
Validate Metallb
|
||||
"""
|
||||
if platform.machine() != 'x86_64':
|
||||
if platform.machine() != "x86_64":
|
||||
print("Metallb tests are only relevant in x86 architectures")
|
||||
return
|
||||
out = kubectl("get configmap config -n metallb-system -o jsonpath='{.data.config}'")
|
||||
|
||||
@@ -16,7 +16,7 @@ class TestMicrok8sBranches(object):
|
||||
"""
|
||||
upstream_version = self._upstream_release()
|
||||
assert upstream_version
|
||||
version_parts = upstream_version.split('.')
|
||||
version_parts = upstream_version.split(".")
|
||||
major_minor_upstream_version = "{}.{}".format(version_parts[0][1:], version_parts[1])
|
||||
if version_parts[1] != "0":
|
||||
prev_major_minor_version = "{}.{}".format(
|
||||
|
||||
Reference in New Issue
Block a user