From 454e9c5f8664ea99ccea2417b6cc3ffb238cf834 Mon Sep 17 00:00:00 2001 From: RHE Date: Tue, 26 Dec 2017 13:35:54 +0100 Subject: moon v4 re-organization Change-Id: I73665f739f35ae18175f98d0739567e403c1fa80 Signed-off-by: RHE --- tools/bin/README.md | 5 + tools/bin/bootstrap.py | 235 ++++++++++ tools/bin/build_all.sh | 36 ++ tools/bin/build_all_pip.sh | 16 + tools/bin/delete_orchestrator.sh | 63 +++ tools/bin/moon_lib_update.sh | 43 ++ tools/bin/set_auth.src | 7 + tools/bin/start.sh | 39 ++ tools/moon/moon.conf | 87 ++++ tools/moon_keystone/Dockerfile | 25 ++ tools/moon_keystone/README.md | 26 ++ tools/moon_keystone/run.sh | 81 ++++ tools/moon_kubernetes/README.md | 82 ++++ tools/moon_kubernetes/conf/password_moon.txt | 1 + tools/moon_kubernetes/conf/password_root.txt | 1 + tools/moon_kubernetes/conf/ports.conf | 24 + tools/moon_kubernetes/init_k8s.sh | 33 ++ tools/moon_kubernetes/start_moon.sh | 37 ++ tools/moon_kubernetes/templates/consul.yaml | 33 ++ tools/moon_kubernetes/templates/db.yaml | 84 ++++ tools/moon_kubernetes/templates/keystone.yaml | 39 ++ tools/moon_kubernetes/templates/kube-dns.yaml | 183 ++++++++ .../templates/moon_configuration.yaml | 25 ++ tools/moon_kubernetes/templates/moon_gui.yaml | 42 ++ tools/moon_kubernetes/templates/moon_manager.yaml | 33 ++ .../templates/moon_orchestrator.yaml | 40 ++ tools/openstack/README.md | 73 +++ tools/openstack/glance/policy.json | 62 +++ tools/openstack/nova/policy.json | 488 +++++++++++++++++++++ 29 files changed, 1943 insertions(+) create mode 100644 tools/bin/README.md create mode 100644 tools/bin/bootstrap.py create mode 100644 tools/bin/build_all.sh create mode 100644 tools/bin/build_all_pip.sh create mode 100644 tools/bin/delete_orchestrator.sh create mode 100644 tools/bin/moon_lib_update.sh create mode 100644 tools/bin/set_auth.src create mode 100755 tools/bin/start.sh create mode 100644 tools/moon/moon.conf create mode 100644 tools/moon_keystone/Dockerfile create mode 100644 tools/moon_keystone/README.md create mode 100644 tools/moon_keystone/run.sh create mode 100644 tools/moon_kubernetes/README.md create mode 100644 tools/moon_kubernetes/conf/password_moon.txt create mode 100644 tools/moon_kubernetes/conf/password_root.txt create mode 100644 tools/moon_kubernetes/conf/ports.conf create mode 100644 tools/moon_kubernetes/init_k8s.sh create mode 100644 tools/moon_kubernetes/start_moon.sh create mode 100644 tools/moon_kubernetes/templates/consul.yaml create mode 100644 tools/moon_kubernetes/templates/db.yaml create mode 100644 tools/moon_kubernetes/templates/keystone.yaml create mode 100644 tools/moon_kubernetes/templates/kube-dns.yaml create mode 100644 tools/moon_kubernetes/templates/moon_configuration.yaml create mode 100644 tools/moon_kubernetes/templates/moon_gui.yaml create mode 100644 tools/moon_kubernetes/templates/moon_manager.yaml create mode 100644 tools/moon_kubernetes/templates/moon_orchestrator.yaml create mode 100644 tools/openstack/README.md create mode 100644 tools/openstack/glance/policy.json create mode 100644 tools/openstack/nova/policy.json (limited to 'tools') diff --git a/tools/bin/README.md b/tools/bin/README.md new file mode 100644 index 00000000..3125c468 --- /dev/null +++ b/tools/bin/README.md @@ -0,0 +1,5 @@ +# Automated Tools/Scripts + +## moon_utilities_update +- update moon_utilities to PIP: `./moon_utilities_update.sh upload` +- locally update moon_utilities for each moon Python package: `./moon_utilities_update.sh copy` \ No newline at end of file diff --git a/tools/bin/bootstrap.py b/tools/bin/bootstrap.py new file mode 100644 index 00000000..6f2a5e03 --- /dev/null +++ b/tools/bin/bootstrap.py @@ -0,0 +1,235 @@ +import os +import sys +import time +import requests +import yaml +import logging +import json +import base64 +import mysql.connector +import re +import subprocess + +logging.basicConfig(level=logging.INFO) +log = logging.getLogger("moon.bootstrap") +requests_log = logging.getLogger("requests.packages.urllib3") +requests_log.setLevel(logging.WARNING) +requests_log.propagate = True + +if len(sys.argv) == 2: + if os.path.isfile(sys.argv[1]): + CONF_FILENAME = sys.argv[1] + CONSUL_HOST = "consul" + else: + CONF_FILENAME = "moon.conf" + CONSUL_HOST = sys.argv[1] + CONSUL_PORT = 8500 +else: + CONSUL_HOST = sys.argv[1] if len(sys.argv) > 1 else "consul" + CONSUL_PORT = sys.argv[2] if len(sys.argv) > 2 else 8500 + CONF_FILENAME = sys.argv[3] if len(sys.argv) > 3 else "moon.conf" +HEADERS = {"content-type": "application/json"} + + +def search_config_file(): + data_config = None + for _file in ( + CONF_FILENAME, + "conf/moon.conf", + "../moon.conf", + "../conf/moon.conf", + "/etc/moon/moon.conf", + ): + try: + data_config = yaml.safe_load(open(_file)) + except FileNotFoundError: + data_config = None + continue + else: + break + if not data_config: + raise Exception("Configuration file not found...") + return data_config + + +def put(key, value): + url = "http://{host}:{port}/v1/kv/{key}".format(host=CONSUL_HOST, port=CONSUL_PORT, key=key) + log.info(url) + req = requests.put( + url, + headers=HEADERS, + json=value + ) + if req.status_code != 200: + raise Exception("Error connecting to Consul ({}, {})".format(req.status_code, req.text)) + + +def get(key): + url = "http://{host}:{port}/v1/kv/{key}".format(host=CONSUL_HOST, port=CONSUL_PORT, key=key) + req = requests.get(url) + data = req.json() + for item in data: + log.info("{} {} -> {}".format( + req.status_code, + item["Key"], + json.loads(base64.b64decode(item["Value"]).decode("utf-8")) + )) + yield json.loads(base64.b64decode(item["Value"]).decode("utf-8")) + + +def start_consul(data_config): + cmd = ["docker", "run", "-d", "--net=moon", "--name=consul", "--hostname=consul", "-p", "8500:8500", "consul"] + output = subprocess.run(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + if output.returncode != 0: + log.info(" ".join(cmd)) + log.info(output.returncode) + log.error(output.stderr) + log.error(output.stdout) + raise Exception("Error starting Consul container!") + while True: + try: + req = requests.get("http://{}:{}/ui".format(CONSUL_HOST, CONSUL_PORT)) + except requests.exceptions.ConnectionError: + log.info("Waiting for Consul ({}:{})".format(CONSUL_HOST, CONSUL_PORT)) + time.sleep(1) + continue + else: + break + # if req.status_code in (302, 200): + # break + # log.info("Waiting for Consul ({}:{})".format(CONSUL_HOST, CONSUL_PORT)) + # time.sleep(1) + log.info("Consul is up") + + req = requests.get("http://{}:{}/v1/kv/database".format(CONSUL_HOST, CONSUL_PORT)) + if req.status_code == 200: + log.info("Consul is already populated") + return + + put("database", data_config["database"]) + put("messenger", data_config["messenger"]) + put("slave", data_config["slave"]) + put("docker", data_config["docker"]) + put("logging", data_config["logging"]) + put("components_port_start", data_config["components"]["port_start"]) + + for _key, _value in data_config["components"].items(): + if type(_value) is dict: + put("components/{}".format(_key), data_config["components"][_key]) + + for _key, _value in data_config["plugins"].items(): + put("plugins/{}".format(_key), data_config["plugins"][_key]) + + for _key, _value in data_config["openstack"].items(): + put("openstack/{}".format(_key), data_config["openstack"][_key]) + + +def start_database(): + cmd = ["docker", "run", "-dti", "--net=moon", "--hostname=db", "--name=db", + "-e", "MYSQL_ROOT_PASSWORD=p4sswOrd1", "-e", "MYSQL_DATABASE=moon", "-e", "MYSQL_USER=moon", + "-e", "MYSQL_PASSWORD=p4sswOrd1", "-p", "3306:3306", "mysql:latest"] + output = subprocess.run(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + if output.returncode != 0: + log.info(cmd) + log.error(output.stderr) + log.error(output.stdout) + raise Exception("Error starting DB container!") + for database in get("database"): + database_url = database['url'] + match = re.search("(?P^[\\w+]+):\/\/(?P\\w+):(?P.+)@(?P\\w+):*(?P\\d*)", + database_url) + config = match.groupdict() + while True: + try: + conn = mysql.connector.connect( + host=config["host"], + user=config["user"], + password=config["password"], + database="moon" + ) + conn.close() + except mysql.connector.errors.InterfaceError: + log.info("Waiting for Database ({})".format(config["host"])) + time.sleep(1) + continue + else: + log.info("Database is up, populating it...") + output = subprocess.run(["moon_db_manager", "upgrade"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + if output.returncode != 0: + raise Exception("Error populating the database!") + break + + +def start_keystone(): + output = subprocess.run(["docker", "run", "-dti", "--net=moon", "--hostname=keystone", "--name=keystone", + "-e", "DB_HOST=db", "-e", "DB_PASSWORD_ROOT=p4sswOrd1", "-p", "35357:35357", + "-p", "5000:5000", "keystone:mitaka"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + if output.returncode != 0: + raise Exception("Error starting Keystone container!") + # TODO: Keystone answers request too quickly + # even if it is not fully loaded + # we must test if a token retrieval is possible or not + # to see if Keystone is truly up and running + for config in get("openstack/keystone"): + while True: + try: + time.sleep(1) + req = requests.get(config["url"]) + except requests.exceptions.ConnectionError: + log.info("Waiting for Keystone ({})".format(config["url"])) + time.sleep(1) + continue + else: + log.info("Keystone is up") + break + + +def start_moon(data_config): + cmds = [ + # ["docker", "run", "-dti", "--net=moon", "--name=wrapper", "--hostname=wrapper", "-p", + # "{0}:{0}".format(data_config['components']['wrapper']['port']), + # data_config['components']['wrapper']['container']], + ["docker", "run", "-dti", "--net=moon", "--name=manager", + "--hostname=manager", "-p", + "{0}:{0}".format(data_config['components']['manager']['port']), + data_config['components']['manager']['container']], + ["docker", "run", "-dti", "--net=moon", "--name=interface", + "--hostname=interface", "-p", + "{0}:{0}".format(data_config['components']['interface']['port']), + data_config['components']['interface']['container']], + ] + for cmd in cmds: + log.warning("Start {}".format(cmd[-1])) + # answer = input() + # if answer.lower() in ("y", "yes", "o", "oui"): + output = subprocess.run(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + time.sleep(3) + if output.returncode != 0: + log.info(" ".join(cmd)) + log.info(output.returncode) + log.error(output.stderr) + log.error(output.stdout) + raise Exception("Error starting {} container!".format(cmd[-1])) + subprocess.run(["docker", "ps"]) + + +def main(): + data_config = search_config_file() + subprocess.run(["docker", "rm", "-f", "consul", "db", "manager", "wrapper", "interface", "authz*", "keystone"]) + start_consul(data_config) + start_database() + start_keystone() + start_moon(data_config) + +main() + diff --git a/tools/bin/build_all.sh b/tools/bin/build_all.sh new file mode 100644 index 00000000..5bbf6a19 --- /dev/null +++ b/tools/bin/build_all.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +VERSION=v4.1 +export DOCKER_HOST=tcp://172.88.88.1:2376 + + +mkdir $MOON_HOME/moon_orchestrator/dist 2>/dev/null + +echo Building Moon_Orchestrator +cd $MOON_HOME/moon_orchestrator +docker build -t wukongsun/moon_orchestrator:${VERSION} . + +echo Building Moon_Interface +cd $MOON_HOME/moon_interface +docker build -t wukongsun/moon_interface:${VERSION} . + +echo Building Moon_Security_Router +cd $MOON_HOME/moon_secrouter +docker build -t wukongsun/moon_router:${VERSION} . + +echo Building Moon_Manager +cd $MOON_HOME/moon_manager +docker build -t wukongsun/moon_manager:${VERSION} . + +echo Building Moon_Authz +cd $MOON_HOME/moon_authz +docker build -t wukongsun/moon_authz:${VERSION} . + + +echo Building Moon_DB +cd $MOON_HOME/moon_db +python3 setup.py sdist bdist_wheel > /tmp/moon_db.log + +echo Building Moon_Utilities +cd $MOON_HOME/moon_utilities +python3 setup.py sdist bdist_wheel > /tmp/moon_utilities.log diff --git a/tools/bin/build_all_pip.sh b/tools/bin/build_all_pip.sh new file mode 100644 index 00000000..2b415bf0 --- /dev/null +++ b/tools/bin/build_all_pip.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + + +echo Building Moon_DB +cd $MOON_HOME/moon_db +python3 setup.py sdist bdist_wheel> /tmp/moon_db.log + + +echo Building Moon_Utilities +cd $MOON_HOME/moon_utilities +python3 setup.py sdist bdist_wheel> /tmp/moon_utilities.log + + +echo Building Moon_Orchestrator +cd $MOON_HOME/moon_orchestrator +python3 setup.py sdist bdist_wheel> /tmp/moon_orchestrator.log \ No newline at end of file diff --git a/tools/bin/delete_orchestrator.sh b/tools/bin/delete_orchestrator.sh new file mode 100644 index 00000000..95fcfddd --- /dev/null +++ b/tools/bin/delete_orchestrator.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +set +x + +kubectl delete -n moon -f kubernetes/templates/moon_orchestrator.yaml +for i in $(kubectl get deployments -n moon | grep wrapper | cut -d " " -f 1 | xargs); do + kubectl delete deployments/$i -n moon; +done +for i in $(kubectl get deployments -n moon | grep interface | cut -d " " -f 1 | xargs); do + kubectl delete deployments/$i -n moon; +done +for i in $(kubectl get deployments -n moon | grep authz | cut -d " " -f 1 | xargs); do + kubectl delete deployments/$i -n moon; +done +for i in $(kubectl get services -n moon | grep wrapper | cut -d " " -f 1 | xargs); do + kubectl delete services/$i -n moon; +done +for i in $(kubectl get services -n moon | grep interface | cut -d " " -f 1 | xargs); do + kubectl delete services/$i -n moon; +done +for i in $(kubectl get services -n moon | grep authz | cut -d " " -f 1 | xargs); do + kubectl delete services/$i -n moon; +done + +if [ "$1" = "build" ]; then + + DOCKER_ARGS="" + + cd moon_manager + docker build -t wukongsun/moon_manager:v4.3.1 . ${DOCKER_ARGS} + if [ "$2" = "push" ]; then + docker push wukongsun/moon_manager:v4.3.1 + fi + cd - + + cd moon_orchestrator + docker build -t wukongsun/moon_orchestrator:v4.3 . ${DOCKER_ARGS} + if [ "$2" = "push" ]; then + docker push wukongsun/moon_orchestrator:v4.3 + fi + cd - + + cd moon_interface + docker build -t wukongsun/moon_interface:v4.3 . ${DOCKER_ARGS} + if [ "$2" = "push" ]; then + docker push wukongsun/moon_interface:v4.3 + fi + cd - + + cd moon_authz + docker build -t wukongsun/moon_authz:v4.3 . ${DOCKER_ARGS} + if [ "$2" = "push" ]; then + docker push wukongsun/moon_authz:v4.3 + fi + cd - + + cd moon_wrapper + docker build -t wukongsun/moon_wrapper:v4.3 . ${DOCKER_ARGS} + if [ "$2" = "push" ]; then + docker push wukongsun/moon_wrapper:v4.3 + fi + cd - +fi diff --git a/tools/bin/moon_lib_update.sh b/tools/bin/moon_lib_update.sh new file mode 100644 index 00000000..3925e336 --- /dev/null +++ b/tools/bin/moon_lib_update.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# usage: moon_update.sh {build,upload,copy} {python_moondb,python_moonutilities} + +CMD=$1 +COMPONENT=$2 +GPG_ID=$3 + +VERSION=${COMPONENT}-$(grep __version__ ${COMPONENT}/${COMPONENT}/__init__.py | cut -d "\"" -f 2) + +cd ${COMPONENT} + +python3 setup.py sdist bdist_wheel + +if [ "$CMD" = "upload" ]; then + # Instead of "A0A96E75", use your own GPG ID + rm dist/*.asc 2>/dev/null + gpg --detach-sign -u "${GPG_ID}" -a dist/${VERSION}-py3-none-any.whl + gpg --detach-sign -u "${GPG_ID}" -a dist/${VERSION/_/-}.tar.gz + twine upload dist/${VERSION}-py3-none-any.whl dist/${VERSION}-py3-none-any.whl.asc + twine upload dist/${VERSION/_/-}.tar.gz dist/${VERSION/_/-}.tar.gz.asc +fi + +rm -f ../moon_manager/dist/${COMPONENT}* +rm -f ../moon_orchestrator/dist/${COMPONENT}* +rm -f ../moon_wrapper/dist/${COMPONENT}* +rm -f ../moon_interface/dist/${COMPONENT}* +rm -f ../moon_authz/dist/${COMPONENT}* + + +if [ "$CMD" = "copy" ]; then + mkdir -p ../moon_manager/dist/ 2>/dev/null + cp -v dist/${VERSION}-py3-none-any.whl ../moon_manager/dist/ + mkdir -p ../moon_orchestrator/dist/ 2>/dev/null + cp -v dist/${VERSION}-py3-none-any.whl ../moon_orchestrator/dist/ + mkdir -p ../moon_wrapper/dist/ 2>/dev/null + cp -v dist/${VERSION}-py3-none-any.whl ../moon_wrapper/dist/ + mkdir -p ../moon_interface/dist/ 2>/dev/null + cp -v dist/${VERSION}-py3-none-any.whl ../moon_interface/dist/ + mkdir -p ../moon_authz/dist/ 2>/dev/null + cp -v dist/${VERSION}-py3-none-any.whl ../moon_authz/dist/ +fi + diff --git a/tools/bin/set_auth.src b/tools/bin/set_auth.src new file mode 100644 index 00000000..d955e30b --- /dev/null +++ b/tools/bin/set_auth.src @@ -0,0 +1,7 @@ +export OS_USERNAME=admin +export OS_PASSWORD=p4ssw0rd +export OS_REGION_NAME=Orange +export OS_TENANT_NAME=admin +export OS_AUTH_URL=http://keystone:5000/v3 +export OS_DOMAIN_NAME=Default +export MOON_URL=http://172.18.0.11:38001 diff --git a/tools/bin/start.sh b/tools/bin/start.sh new file mode 100755 index 00000000..e95ac393 --- /dev/null +++ b/tools/bin/start.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +VERSION=4.1 +export DOCKER_HOST=tcp://172.88.88.1:2376 + +echo -e "\033[31mDeleting previous dockers\033[m" +docker rm -f $(docker ps -a | grep moon | cut -d " " -f 1) 2>/dev/null +docker rm -f messenger db keystone consul 2>/dev/null + +echo -e "\033[32mStarting Messenger\033[m" +docker run -dti --net=moon --hostname messenger --name messenger -e RABBITMQ_DEFAULT_USER=moon -e RABBITMQ_DEFAULT_PASS=p4sswOrd1 -e RABBITMQ_NODENAME=rabbit@messenger -e RABBITMQ_DEFAULT_VHOST=moon -e RABBITMQ_HIPE_COMPILE=1 -p 5671:5671 -p 5672:5672 -p 8080:15672 rabbitmq:3-management + +echo -e "\033[32mStarting DB manager\033[m" +docker run -dti --net=moon --hostname db --name db -e MYSQL_ROOT_PASSWORD=p4sswOrd1 -e MYSQL_DATABASE=moon -e MYSQL_USER=moon -e MYSQL_PASSWORD=p4sswOrd1 -p 3306:3306 mysql:latest + +docker run -d --net=moon --name=consul --hostname=consul -p 8500:8500 consul + +echo "waiting for Database (it may takes time)..." +echo -e "\033[35m" +sed '/ready for connections/q' <(docker logs db -f) +echo -e "\033[m" + +echo "waiting for Messenger (it may takes time)..." +echo -e "\033[35m" +sed '/Server startup complete;/q' <(docker logs messenger -f) +echo -e "\033[m" + +docker run -dti --net moon --hostname keystone --name keystone -e DB_HOST=db -e DB_PASSWORD_ROOT=p4sswOrd1 -p 35357:35357 -p 5000:5000 keystone:mitaka + +echo -e "\033[32mConfiguring Moon platform\033[m" +sudo pip install moon_db +moon_db_manager upgrade + +cd ${MOON_HOME}/moon_orchestrator +python3 populate_consul.py + +echo -e "\033[32mStarting Moon platform\033[m" + +docker container run -dti --net moon --hostname orchestrator --name orchestrator wukongsun/moon_orchestrator:${VERSION} diff --git a/tools/moon/moon.conf b/tools/moon/moon.conf new file mode 100644 index 00000000..a5a40ad2 --- /dev/null +++ b/tools/moon/moon.conf @@ -0,0 +1,87 @@ +database: + url: mysql+pymysql://moon:p4sswOrd1@db/moon + driver: sql + +openstack: + keystone: + url: http://keystone:5000/v3 + user: admin + password: p4ssw0rd + domain: default + project: admin + check_token: false + certificate: false + external: + url: http://keystone:30006/v3 + +plugins: + authz: + container: wukongsun/moon_authz:v4.3 + port: 8081 + session: + container: asteroide/session:latest + port: 8082 + +components: + interface: + port: 8080 + bind: 0.0.0.0 + hostname: interface + container: wukongsun/moon_interface:v4.3 + orchestrator: + port: 8083 + bind: 0.0.0.0 + hostname: orchestrator + container: wukongsun/moon_orchestrator:v4.3 + external: + port: 30003 + hostname: orchestrator + wrapper: + port: 8080 + bind: 0.0.0.0 + hostname: wrapper + container: wukongsun/moon_wrapper:v4.3.1 + timeout: 5 + manager: + port: 8082 + bind: 0.0.0.0 + hostname: manager + container: wukongsun/moon_manager:v4.3.1 + external: + port: 30001 + hostname: manager + port_start: 31001 + +logging: + version: 1 + + formatters: + brief: + format: "%(levelname)s %(name)s %(message)-30s" + custom: + format: "%(asctime)-15s %(levelname)s %(name)s %(message)s" + + handlers: + console: + class : logging.StreamHandler + formatter: brief + level : INFO + stream : ext://sys.stdout + file: + class : logging.handlers.RotatingFileHandler + formatter: custom + level : DEBUG + filename: /tmp/moon.log + maxBytes: 1048576 + backupCount: 3 + + loggers: + moon: + level: DEBUG + handlers: [console, file] + propagate: no + + root: + level: ERROR + handlers: [console] + diff --git a/tools/moon_keystone/Dockerfile b/tools/moon_keystone/Dockerfile new file mode 100644 index 00000000..2a43bd92 --- /dev/null +++ b/tools/moon_keystone/Dockerfile @@ -0,0 +1,25 @@ +FROM ubuntu:zesty + +ENV ADMIN_TOKEN=p4ssw0rd +ENV ADMIN_PASSWORD=p4ssw0rd +ENV DB_CONNECTION="mysql+pymysql" +ENV DB_DRIVER=sql +ENV DB_HOST=localhost +ENV DB_DATABASE=keystonedb +ENV DB_USER=keystone +ENV DB_PASSWORD=p4ssw0rd +ENV DB_USER_ROOT=root +ENV DB_PASSWORD_ROOT=p4sswOrd1 +ENV RABBIT_NODE=server +ENV INTERFACE_HOST="http://localhost:3001" + +RUN apt update && apt install apache2 rabbitmq-server keystone python-openstackclient libapache2-mod-wsgi mysql-client -y + +# RUN apt update && apt install iputils-ping net-tools -y + +ADD run.sh /root + +EXPOSE 35357 +EXPOSE 5000 + +CMD ["/bin/bash", "/root/run.sh"] \ No newline at end of file diff --git a/tools/moon_keystone/README.md b/tools/moon_keystone/README.md new file mode 100644 index 00000000..7027324e --- /dev/null +++ b/tools/moon_keystone/README.md @@ -0,0 +1,26 @@ +# Keystone container + +## build keystone image + +without proxy: +```bash +docker build -t keystone:mitaka . +``` + +with a proxy: +```bash +docker build --build-arg https_proxy=http://proxy:3128 --build-arg http_proxy=http://proxy:3128 -t keystone:mitaka . +``` + + +### access to the container +```bash +docker container exec -ti keystone /bin/bash +export OS_USERNAME=admin +export OS_PASSWORD=p4ssw0rd +export OS_REGION_NAME=Orange +export OS_TENANT_NAME=admin +export OS_AUTH_URL=http://localhost:5000/v3 +export OS_DOMAIN_NAME=Default +openstack project list +``` \ No newline at end of file diff --git a/tools/moon_keystone/run.sh b/tools/moon_keystone/run.sh new file mode 100644 index 00000000..2a61901e --- /dev/null +++ b/tools/moon_keystone/run.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash + +MY_HOSTNAME=localhost + +echo DB_HOST=$DB_HOST +echo DB_DATABASE=$DB_DATABASE +echo RABBIT_NODE=$RABBIT_NODE +echo RABBIT_NODE=$[RABBIT_NODE] +echo INTERFACE_HOST=$INTERFACE_HOST + +sed "s/#admin_token = /admin_token=$ADMIN_TOKEN/g" -i /etc/keystone/keystone.conf +sed "s/#connection = /connection = $DB_CONNECTION:\/\/$DB_USER:$DB_PASSWORD@$DB_HOST\/$DB_DATABASE/g" -i /etc/keystone/keystone.conf + +cat << EOF | tee -a /etc/keystone/keystone.conf +[cors] +allowed_origin = $INTERFACE_HOST +max_age = 3600 +allow_methods = POST,GET,DELETE +EOF + +until echo status | mysql -h${DB_HOST} -u${DB_USER_ROOT} -p${DB_PASSWORD_ROOT}; do + >&2 echo "MySQL is unavailable - sleeping" + sleep 1 +done + +>&2 echo "Mysql is up - executing command" + +mysql -h $DB_HOST -u$DB_USER_ROOT -p$DB_PASSWORD_ROOT </etc/apt/sources.list.d/kubernetes.list +deb http://apt.kubernetes.io/ kubernetes-xenial main +EOF +apt-get update +apt-get install -y kubelet kubeadm kubectl +``` + +## Moon Deployment +### Creation +Execute the script : `init_k8s.sh` +```bash +sudo bash init_k8s.sh +watch kubectl get po --namespace=kube-system +``` + +Wait until all the kubeadm containers are in the `running` state: +```bash +watch kubectl get po --namespace=kube-system +``` + +You must see something like this: + + $ kubectl get po --namespace=kube-system + NAME READY STATUS RESTARTS AGE + calico-etcd-7qgjb 1/1 Running 0 1h + calico-node-f8zvm 2/2 Running 1 1h + calico-policy-controller-59fc4f7888-ns9kv 1/1 Running 0 1h + etcd-varuna 1/1 Running 0 1h + kube-apiserver-varuna 1/1 Running 0 1h + kube-controller-manager-varuna 1/1 Running 0 1h + kube-dns-bfbb49cd7-rgqxn 3/3 Running 0 1h + kube-proxy-x88wg 1/1 Running 0 1h + kube-scheduler-varuna 1/1 Running 0 1h + + +### Execution +Execute the script : `start_moon.sh` +```bash +sudo bash start_moon.sh +watch kubectl get po --namespace=moon +``` + +Wait until all the Moon containers are in the `running` state: +```bash +watch kubectl get po --namespace=moon +``` + +You must see something like this: + + $ kubectl get po --namespace=moon + NAME READY STATUS RESTARTS AGE + consul-57b6d66975-9qnfx 1/1 Running 0 52m + db-867f9c6666-bq8cf 1/1 Running 0 52m + gui-bc9878b58-q288x 1/1 Running 0 51m + keystone-7d9cdbb69f-bl6ln 1/1 Running 0 52m + manager-5bfbb96988-2nvhd 1/1 Running 0 51m + manager-5bfbb96988-fg8vj 1/1 Running 0 51m + manager-5bfbb96988-w9wnk 1/1 Running 0 51m + orchestrator-65d8fb4574-tnfx2 1/1 Running 0 51m + wrapper-astonishing-748b7dcc4f-ngsvp 1/1 Running 0 51m \ No newline at end of file diff --git a/tools/moon_kubernetes/conf/password_moon.txt b/tools/moon_kubernetes/conf/password_moon.txt new file mode 100644 index 00000000..bb9bcf7d --- /dev/null +++ b/tools/moon_kubernetes/conf/password_moon.txt @@ -0,0 +1 @@ +p4sswOrd1 \ No newline at end of file diff --git a/tools/moon_kubernetes/conf/password_root.txt b/tools/moon_kubernetes/conf/password_root.txt new file mode 100644 index 00000000..bb9bcf7d --- /dev/null +++ b/tools/moon_kubernetes/conf/password_root.txt @@ -0,0 +1 @@ +p4sswOrd1 \ No newline at end of file diff --git a/tools/moon_kubernetes/conf/ports.conf b/tools/moon_kubernetes/conf/ports.conf new file mode 100644 index 00000000..487945c0 --- /dev/null +++ b/tools/moon_kubernetes/conf/ports.conf @@ -0,0 +1,24 @@ +manager: + port: 8082 + kport: 30001 +gui: + port: 3000 + kport: 30002 +orchestrator: + port: 8083 + kport: 30003 + +consul: + port: 8500 + kport: 30005 +keystone: + port: 5000 + kport: 30006 + +wrapper: + port: 8080 + kport: 30010 +interface: + port: 8080 +authz: + port: 8081 diff --git a/tools/moon_kubernetes/init_k8s.sh b/tools/moon_kubernetes/init_k8s.sh new file mode 100644 index 00000000..6eb94e78 --- /dev/null +++ b/tools/moon_kubernetes/init_k8s.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +set -x + +sudo kubeadm reset + +sudo swapoff -a + +sudo kubeadm init --pod-network-cidr=192.168.0.0/16 +#sudo kubeadm init --pod-network-cidr=10.244.0.0/16 + +mkdir -p $HOME/.kube +sudo cp -f /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config + +kubectl apply -f http://docs.projectcalico.org/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml +#kubectl apply -f https://raw.githubusercontent.com/projectcalico/canal/master/k8s-install/1.6/rbac.yaml +#kubectl apply -f https://raw.githubusercontent.com/projectcalico/canal/master/k8s-install/1.6/canal.yaml + +#kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml + +kubectl delete deployment kube-dns --namespace=kube-system +kubectl apply -f kubernetes/templates/kube-dns.yaml + +kubectl taint nodes --all node-role.kubernetes.io/master- + +kubectl proxy& +sleep 5 +echo ========================================= +kubectl get po --namespace=kube-system +echo ========================================= + + diff --git a/tools/moon_kubernetes/start_moon.sh b/tools/moon_kubernetes/start_moon.sh new file mode 100644 index 00000000..8121e319 --- /dev/null +++ b/tools/moon_kubernetes/start_moon.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +set -x + +kubectl create namespace moon +kubectl create configmap moon-config --from-file conf/moon.conf -n moon +kubectl create configmap config --from-file ~/.kube/config -n moon +kubectl create secret generic mysql-root-pass --from-file=kubernetes/conf/password_root.txt -n moon +kubectl create secret generic mysql-pass --from-file=kubernetes/conf/password_moon.txt -n moon + +kubectl create -n moon -f kubernetes/templates/consul.yaml +kubectl create -n moon -f kubernetes/templates/db.yaml +kubectl create -n moon -f kubernetes/templates/keystone.yaml + +echo ========================================= +kubectl get pods -n moon +echo ========================================= + +sleep 10 +kubectl create -n moon -f kubernetes/templates/moon_configuration.yaml + +echo Waiting for jobs moonforming +sleep 5 +kubectl get jobs -n moon +kubectl logs -n moon jobs/moonforming + +sleep 5 + +kubectl create -n moon -f kubernetes/templates/moon_manager.yaml + +sleep 2 + +kubectl create -n moon -f kubernetes/templates/moon_orchestrator.yaml + +kubectl create -n moon -f kubernetes/templates/moon_gui.yaml + + diff --git a/tools/moon_kubernetes/templates/consul.yaml b/tools/moon_kubernetes/templates/consul.yaml new file mode 100644 index 00000000..f0fb764e --- /dev/null +++ b/tools/moon_kubernetes/templates/consul.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + namespace: moon + name: consul +spec: + replicas: 1 + template: + metadata: + labels: + app: consul + spec: + hostname: consul + containers: + - name: consul + image: consul:latest + ports: + - containerPort: 8500 +--- + +apiVersion: v1 +kind: Service +metadata: + name: consul + namespace: moon +spec: + ports: + - port: 8500 + targetPort: 8500 + nodePort: 30005 + selector: + app: consul + type: NodePort diff --git a/tools/moon_kubernetes/templates/db.yaml b/tools/moon_kubernetes/templates/db.yaml new file mode 100644 index 00000000..38418643 --- /dev/null +++ b/tools/moon_kubernetes/templates/db.yaml @@ -0,0 +1,84 @@ +#apiVersion: v1 +#kind: PersistentVolume +#metadata: +# name: local-pv-1 +# labels: +# type: local +#spec: +# capacity: +# storage: 5Gi +# accessModes: +# - ReadWriteOnce +# hostPath: +# path: /tmp/data/pv-1 +#--- +# +#apiVersion: v1 +#kind: PersistentVolumeClaim +#metadata: +# name: mysql-pv-claim +# labels: +# platform: moon +# app: db +#spec: +# accessModes: +# - ReadWriteOnce +# resources: +# requests: +# storage: 5Gi +#--- + +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + namespace: moon + name: db +spec: + replicas: 1 + strategy: + type: Recreate + template: + metadata: + labels: + app: db + spec: + containers: + - name: db + image: mysql:latest + env: + - name: MYSQL_DATABASE + value: "moon" + - name: MYSQL_USER + value: "moon" + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: mysql-pass + key: password_moon.txt + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mysql-root-pass + key: password_root.txt + ports: + - containerPort: 3306 + name: mysql +# volumeMounts: +# - name: mysql-persistent-storage +# mountPath: /var/lib/mysql +# volumes: +# - name: mysql-persistent-storage +# persistentVolumeClaim: +# claimName: mysql-pv-claim +--- +apiVersion: v1 +kind: Service +metadata: + namespace: moon + name: db +spec: + ports: + - port: 3306 + selector: + app: db +--- \ No newline at end of file diff --git a/tools/moon_kubernetes/templates/keystone.yaml b/tools/moon_kubernetes/templates/keystone.yaml new file mode 100644 index 00000000..e4218e4c --- /dev/null +++ b/tools/moon_kubernetes/templates/keystone.yaml @@ -0,0 +1,39 @@ +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + namespace: moon + name: keystone +spec: + replicas: 1 + template: + metadata: + labels: + app: keystone + spec: + hostname: keystone + containers: + - name: keystone + image: asteroide/keystone:pike-cors + env: + - name: KEYSTONE_HOSTNAME + value: "127.0.0.1" + - name: KEYSTONE_PORT + value: "30006" + ports: + - containerPort: 35357 + containerPort: 5000 +--- + +apiVersion: v1 +kind: Service +metadata: + name: keystone + namespace: moon +spec: + ports: + - port: 5000 + targetPort: 5000 + nodePort: 30006 + selector: + app: keystone + type: NodePort diff --git a/tools/moon_kubernetes/templates/kube-dns.yaml b/tools/moon_kubernetes/templates/kube-dns.yaml new file mode 100644 index 00000000..c8f18fd8 --- /dev/null +++ b/tools/moon_kubernetes/templates/kube-dns.yaml @@ -0,0 +1,183 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "2" + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"extensions/v1beta1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2017-10-30T09:03:59Z","generation":1,"labels":{"k8s-app":"kube-dns"},"name":"kube-dns","namespace":"kube-system","resourceVersion":"556","selfLink":"/apis/extensions/v1beta1/namespaces/kube-system/deployments/kube-dns","uid":"4433b709-bd51-11e7-a055-80fa5b15034a"},"spec":{"replicas":1,"selector":{"matchLabels":{"k8s-app":"kube-dns"}},"strategy":{"rollingUpdate":{"maxSurge":"10%","maxUnavailable":0},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"k8s-app":"kube-dns"}},"spec":{"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchExpressions":[{"key":"beta.kubernetes.io/arch","operator":"In","values":["amd64"]}]}]}}},"containers":[{"args":["--domain=cluster.local.","--dns-port=10053","--config-dir=/kube-dns-config","--v=2"],"env":[{"name":"PROMETHEUS_PORT","value":"10055"}],"image":"gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5","imagePullPolicy":"IfNotPresent","livenessProbe":{"failureThreshold":5,"httpGet":{"path":"/healthcheck/kubedns","port":10054,"scheme":"HTTP"},"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"name":"kubedns","ports":[{"containerPort":10053,"name":"dns-local","protocol":"UDP"},{"containerPort":10053,"name":"dns-tcp-local","protocol":"TCP"},{"containerPort":10055,"name":"metrics","protocol":"TCP"}],"readinessProbe":{"failureThreshold":3,"httpGet":{"path":"/readiness","port":8081,"scheme":"HTTP"},"initialDelaySeconds":3,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"resources":{"limits":{"memory":"170Mi"},"requests":{"cpu":"100m","memory":"70Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","volumeMounts":[{"mountPath":"/kube-dns-config","name":"kube-dns-config"}]},{"args":["-v=2","-logtostderr","-configDir=/etc/k8s/dns/dnsmasq-nanny","-restartDnsmasq=true","--","-k","--cache-size=1000","--log-facility=-","--server=/cluster.local/127.0.0.1#10053","--server=/in-addr.arpa/127.0.0.1#10053","--server=/ip6.arpa/127.0.0.1#10053","--server=8.8.8.8"],"image":"gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5","imagePullPolicy":"IfNotPresent","livenessProbe":{"failureThreshold":5,"httpGet":{"path":"/healthcheck/dnsmasq","port":10054,"scheme":"HTTP"},"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"name":"dnsmasq","ports":[{"containerPort":53,"name":"dns","protocol":"UDP"},{"containerPort":53,"name":"dns-tcp","protocol":"TCP"}],"resources":{"requests":{"cpu":"150m","memory":"20Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","volumeMounts":[{"mountPath":"/etc/k8s/dns/dnsmasq-nanny","name":"kube-dns-config"}]},{"args":["--v=2","--logtostderr","--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A","--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A"],"image":"gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5","imagePullPolicy":"IfNotPresent","livenessProbe":{"failureThreshold":5,"httpGet":{"path":"/metrics","port":10054,"scheme":"HTTP"},"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"name":"sidecar","ports":[{"containerPort":10054,"name":"metrics","protocol":"TCP"}],"resources":{"requests":{"cpu":"10m","memory":"20Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"Default","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"serviceAccount":"kube-dns","serviceAccountName":"kube-dns","terminationGracePeriodSeconds":30,"tolerations":[{"key":"CriticalAddonsOnly","operator":"Exists"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"}],"volumes":[{"configMap":{"defaultMode":420,"name":"kube-dns","optional":true},"name":"kube-dns-config"}]}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2017-10-30T09:05:11Z","lastUpdateTime":"2017-10-30T09:05:11Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}} + creationTimestamp: 2017-10-30T09:03:59Z + generation: 2 + labels: + k8s-app: kube-dns + name: kube-dns + namespace: kube-system + resourceVersion: "300076" + selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/kube-dns + uid: 4433b709-bd51-11e7-a055-80fa5b15034a +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: kube-dns + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 + containers: + - args: + - --domain=cluster.local. + - --dns-port=10053 + - --config-dir=/kube-dns-config + - --v=2 + env: + - name: PROMETHEUS_PORT + value: "10055" + image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthcheck/kubedns + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + name: kubedns + ports: + - containerPort: 10053 + name: dns-local + protocol: UDP + - containerPort: 10053 + name: dns-tcp-local + protocol: TCP + - containerPort: 10055 + name: metrics + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readiness + port: 8081 + scheme: HTTP + initialDelaySeconds: 3 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + resources: + limits: + memory: 340Mi + requests: + cpu: 200m + memory: 140Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /kube-dns-config + name: kube-dns-config + - args: + - -v=2 + - -logtostderr + - -configDir=/etc/k8s/dns/dnsmasq-nanny + - -restartDnsmasq=true + - -- + - -k + - --dns-forward-max=300 + - --cache-size=1000 + - --log-facility=- + - --server=/cluster.local/127.0.0.1#10053 + - --server=/in-addr.arpa/127.0.0.1#10053 + - --server=/ip6.arpa/127.0.0.1#10053 + - --server=8.8.8.8 + image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthcheck/dnsmasq + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + name: dnsmasq + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + resources: + requests: + cpu: 150m + memory: 20Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/k8s/dns/dnsmasq-nanny + name: kube-dns-config + - args: + - --v=2 + - --logtostderr + - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A + - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A + image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /metrics + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + name: sidecar + ports: + - containerPort: 10054 + name: metrics + protocol: TCP + resources: + requests: + cpu: 10m + memory: 20Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: Default + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: kube-dns + serviceAccountName: kube-dns + terminationGracePeriodSeconds: 30 + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/master + volumes: + - configMap: + defaultMode: 420 + name: kube-dns + optional: true + name: kube-dns-config diff --git a/tools/moon_kubernetes/templates/moon_configuration.yaml b/tools/moon_kubernetes/templates/moon_configuration.yaml new file mode 100644 index 00000000..3bcaa533 --- /dev/null +++ b/tools/moon_kubernetes/templates/moon_configuration.yaml @@ -0,0 +1,25 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: moonforming + namespace: moon +spec: + template: + metadata: + name: moonforming + spec: + containers: + - name: moonforming + image: asteroide/moonforming:v1.3 + env: + - name: POPULATE_ARGS + value: "--verbose" # debug mode: --debug + volumeMounts: + - name: config-volume + mountPath: /etc/moon + volumes: + - name: config-volume + configMap: + name: moon-config + restartPolicy: Never + #backoffLimit: 4 \ No newline at end of file diff --git a/tools/moon_kubernetes/templates/moon_gui.yaml b/tools/moon_kubernetes/templates/moon_gui.yaml new file mode 100644 index 00000000..2d355216 --- /dev/null +++ b/tools/moon_kubernetes/templates/moon_gui.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + namespace: moon + name: gui +spec: + replicas: 1 + template: + metadata: + labels: + app: gui + spec: + hostname: gui + containers: + - name: gui + image: wukongsun/moon_gui:v4.3.1 + env: + - name: MANAGER_HOST + value: "127.0.0.1" + - name: MANAGER_PORT + value: "30001" + - name: KEYSTONE_HOST + value: "127.0.0.1" + - name: KEYSTONE_PORT + value: "30006" + ports: + - containerPort: 80 +--- + +apiVersion: v1 +kind: Service +metadata: + name: gui + namespace: moon +spec: + ports: + - port: 80 + targetPort: 80 + nodePort: 30002 + selector: + app: gui + type: NodePort diff --git a/tools/moon_kubernetes/templates/moon_manager.yaml b/tools/moon_kubernetes/templates/moon_manager.yaml new file mode 100644 index 00000000..9d4a09a8 --- /dev/null +++ b/tools/moon_kubernetes/templates/moon_manager.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: manager + namespace: moon +spec: + replicas: 3 + template: + metadata: + labels: + app: manager + spec: + hostname: manager + containers: + - name: manager + image: wukongsun/moon_manager:v4.3.1 + ports: + - containerPort: 8082 +--- + +apiVersion: v1 +kind: Service +metadata: + name: manager + namespace: moon +spec: + ports: + - port: 8082 + targetPort: 8082 + nodePort: 30001 + selector: + app: manager + type: NodePort diff --git a/tools/moon_kubernetes/templates/moon_orchestrator.yaml b/tools/moon_kubernetes/templates/moon_orchestrator.yaml new file mode 100644 index 00000000..419f2d52 --- /dev/null +++ b/tools/moon_kubernetes/templates/moon_orchestrator.yaml @@ -0,0 +1,40 @@ +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + namespace: moon + name: orchestrator +spec: + replicas: 1 + template: + metadata: + labels: + app: orchestrator + spec: + hostname: orchestrator + containers: + - name: orchestrator + image: wukongsun/moon_orchestrator:v4.3 + ports: + - containerPort: 8083 + volumeMounts: + - name: config-volume + mountPath: /root/.kube + volumes: + - name: config-volume + configMap: + name: config +--- + +apiVersion: v1 +kind: Service +metadata: + name: orchestrator + namespace: moon +spec: + ports: + - port: 8083 + targetPort: 8083 + nodePort: 30003 + selector: + app: orchestrator + type: NodePort diff --git a/tools/openstack/README.md b/tools/openstack/README.md new file mode 100644 index 00000000..8b5d06e5 --- /dev/null +++ b/tools/openstack/README.md @@ -0,0 +1,73 @@ +# OpenStack +## Installation +For the *Moon* platform, you must have the following OpenStack components installed somewhere: +- *Nova*, see [Nova install](https://docs.openstack.org/mitaka/install-guide-ubuntu/nova-controller-install.html) +- *Glance*, see [Glance install](https://docs.openstack.org/glance/pike/install/) +- *Keystone* is automatically installed and configured in the Moon platform. +After the Moon platform installation, the Keystone server will be available +at: `http://localhost:30005 or http://\:30005` + +You can also use your own Keystone server if you want. + +## Configuration +Before updating the configuration of the OpenStack platform, check that the platform +is working without Moon, use the following commands: +```bash +# set authentication +openstack endpoint list +openstack user list +openstack server list +``` + +In order to connect the OpenStack platform with the Moon platform, you must update some +configuration files in Nova and Glance: +- `/etc/nova/policy.json` +- `/etc/glance/policy.json` + +In some installed platform, the `/etc/nova/policy.json` can be absent so you have +to create one. You can find example files in those directory: +- `${MOON}/tools/openstack/nova/policy.json` +- `${MOON}/tools/openstack/glance/policy.json` + +Each line is mapped to an OpenStack API interface, for example, the following line +allows the user to get details for every virtual machines in the cloud +(the corresponding shell command is `openstack server list`): + + "os_compute_api:servers:detail": "", + +This lines indicates that there is no special authorisation to use this API, +every users can use it. If you want that the Moon platform handles that authorisation, +update this line with: + + "os_compute_api:servers:detail": "http://my_hostname:31001/authz" + +1) by replacing `my_hostname` with the hostname (or the IP address) of the Moon platform. +2) by updating the TCP port (default: 31001) with the good one. + +To find this TCP port, use the following command: + + $ kubectl get services -n moon | grep wrapper | cut -d ":" -f 2 | cut -d " " -f 1 + 31002/TCP + +## Tests +Here is a shell script to authenticate to the OpenStack platform as `admin`: +```bash +export OS_USERNAME=admin +export OS_PASSWORD=p4ssw0rd +export OS_REGION_NAME=Orange +export OS_TENANT_NAME=admin +export OS_AUTH_URL=http://moon_hostname:30006/v3 +export OS_DOMAIN_NAME=Default +export OS_IDENTITY_API_VERSION=3 +``` + +For the `demo_user`, use: +```bash +export OS_USERNAME=demo_user +export OS_PASSWORD=your_secret_password +export OS_REGION_NAME=Orange +export OS_TENANT_NAME=demo +export OS_AUTH_URL=http://moon_hostname:30006/v3 +export OS_DOMAIN_NAME=Default +export OS_IDENTITY_API_VERSION=3 +``` diff --git a/tools/openstack/glance/policy.json b/tools/openstack/glance/policy.json new file mode 100644 index 00000000..5505f67f --- /dev/null +++ b/tools/openstack/glance/policy.json @@ -0,0 +1,62 @@ +{ + "context_is_admin": "role:admin", + "default": "role:admin", + + "add_image": "http://my_hostname:31001/authz", + "delete_image": "http://my_hostname:31001/authz", + "get_image": "http://my_hostname:31001/authz", + "get_images": "http://my_hostname:31001/authz", + "modify_image": "http://my_hostname:31001/authz", + "publicize_image": "role:admin", + "communitize_image": "", + "copy_from": "", + + "download_image": "", + "upload_image": "", + + "delete_image_location": "", + "get_image_location": "", + "set_image_location": "", + + "add_member": "", + "delete_member": "", + "get_member": "", + "get_members": "", + "modify_member": "", + + "manage_image_cache": "role:admin", + + "get_task": "role:admin", + "get_tasks": "role:admin", + "add_task": "role:admin", + "modify_task": "role:admin", + + "deactivate": "", + "reactivate": "", + + "get_metadef_namespace": "", + "get_metadef_namespaces":"", + "modify_metadef_namespace":"", + "add_metadef_namespace":"", + + "get_metadef_object":"", + "get_metadef_objects":"", + "modify_metadef_object":"", + "add_metadef_object":"", + + "list_metadef_resource_types":"", + "get_metadef_resource_type":"", + "add_metadef_resource_type_association":"", + + "get_metadef_property":"", + "get_metadef_properties":"", + "modify_metadef_property":"", + "add_metadef_property":"", + + "get_metadef_tag":"", + "get_metadef_tags":"", + "modify_metadef_tag":"", + "add_metadef_tag":"", + "add_metadef_tags":"" + +} diff --git a/tools/openstack/nova/policy.json b/tools/openstack/nova/policy.json new file mode 100644 index 00000000..29763ce3 --- /dev/null +++ b/tools/openstack/nova/policy.json @@ -0,0 +1,488 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "is_admin:True or project_id:%(project_id)s", + "default": "rule:admin_or_owner", + + "cells_scheduler_filter:TargetCellFilter": "is_admin:True", + + "compute:create": "http://my_hostname:31001/authz", + "compute:create:attach_network": "", + "compute:create:attach_volume": "", + "compute:create:forced_host": "is_admin:True", + + "compute:get": "http://my_hostname:31001/authz", + "compute:get_all": "http://my_hostname:31001/authz", + "compute:get_all_tenants": "is_admin:True", + + "compute:update": "", + + "compute:get_instance_metadata": "", + "compute:get_all_instance_metadata": "", + "compute:get_all_instance_system_metadata": "", + "compute:update_instance_metadata": "", + "compute:delete_instance_metadata": "", + + "compute:get_instance_faults": "", + "compute:get_diagnostics": "", + "compute:get_instance_diagnostics": "", + + "compute:start": "rule:admin_or_owner", + "compute:stop": "rule:admin_or_owner", + + "compute:get_lock": "", + "compute:lock": "rule:admin_or_owner", + "compute:unlock": "rule:admin_or_owner", + "compute:unlock_override": "rule:admin_api", + + "compute:get_vnc_console": "", + "compute:get_spice_console": "", + "compute:get_rdp_console": "", + "compute:get_serial_console": "", + "compute:get_mks_console": "", + "compute:get_console_output": "", + + "compute:reset_network": "", + "compute:inject_network_info": "", + "compute:add_fixed_ip": "", + "compute:remove_fixed_ip": "", + + "compute:attach_volume": "", + "compute:detach_volume": "", + "compute:swap_volume": "", + + "compute:attach_interface": "", + "compute:detach_interface": "", + + "compute:set_admin_password": "", + + "compute:rescue": "", + "compute:unrescue": "", + + "compute:suspend": "", + "compute:resume": "", + + "compute:pause": "", + "compute:unpause": "", + + "compute:shelve": "", + "compute:shelve_offload": "", + "compute:unshelve": "", + + "compute:snapshot": "", + "compute:snapshot_volume_backed": "", + "compute:backup": "", + + "compute:resize": "", + "compute:confirm_resize": "", + "compute:revert_resize": "", + + "compute:rebuild": "", + "compute:reboot": "", + "compute:delete": "rule:admin_or_owner", + "compute:soft_delete": "rule:admin_or_owner", + "compute:force_delete": "rule:admin_or_owner", + + "compute:security_groups:add_to_instance": "", + "compute:security_groups:remove_from_instance": "", + + "compute:delete": "", + "compute:soft_delete": "", + "compute:force_delete": "", + "compute:restore": "", + + "compute:volume_snapshot_create": "", + "compute:volume_snapshot_delete": "", + + "admin_api": "is_admin:True", + "compute_extension:accounts": "rule:admin_api", + "compute_extension:admin_actions": "rule:admin_api", + "compute_extension:admin_actions:pause": "rule:admin_or_owner", + "compute_extension:admin_actions:unpause": "rule:admin_or_owner", + "compute_extension:admin_actions:suspend": "rule:admin_or_owner", + "compute_extension:admin_actions:resume": "rule:admin_or_owner", + "compute_extension:admin_actions:lock": "rule:admin_or_owner", + "compute_extension:admin_actions:unlock": "rule:admin_or_owner", + "compute_extension:admin_actions:resetNetwork": "rule:admin_api", + "compute_extension:admin_actions:injectNetworkInfo": "rule:admin_api", + "compute_extension:admin_actions:createBackup": "rule:admin_or_owner", + "compute_extension:admin_actions:migrateLive": "rule:admin_api", + "compute_extension:admin_actions:resetState": "rule:admin_api", + "compute_extension:admin_actions:migrate": "rule:admin_api", + "compute_extension:aggregates": "rule:admin_api", + "compute_extension:agents": "rule:admin_api", + "compute_extension:attach_interfaces": "", + "compute_extension:baremetal_nodes": "rule:admin_api", + "compute_extension:cells": "rule:admin_api", + "compute_extension:cells:create": "rule:admin_api", + "compute_extension:cells:delete": "rule:admin_api", + "compute_extension:cells:update": "rule:admin_api", + "compute_extension:cells:sync_instances": "rule:admin_api", + "compute_extension:certificates": "", + "compute_extension:cloudpipe": "rule:admin_api", + "compute_extension:cloudpipe_update": "rule:admin_api", + "compute_extension:config_drive": "", + "compute_extension:console_output": "", + "compute_extension:consoles": "", + "compute_extension:createserverext": "", + "compute_extension:deferred_delete": "", + "compute_extension:disk_config": "", + "compute_extension:evacuate": "rule:admin_api", + "compute_extension:extended_server_attributes": "rule:admin_api", + "compute_extension:extended_status": "", + "compute_extension:extended_availability_zone": "", + "compute_extension:extended_ips": "", + "compute_extension:extended_ips_mac": "", + "compute_extension:extended_vif_net": "", + "compute_extension:extended_volumes": "", + "compute_extension:fixed_ips": "rule:admin_api", + "compute_extension:flavor_access": "", + "compute_extension:flavor_access:addTenantAccess": "rule:admin_api", + "compute_extension:flavor_access:removeTenantAccess": "rule:admin_api", + "compute_extension:flavor_disabled": "", + "compute_extension:flavor_rxtx": "", + "compute_extension:flavor_swap": "", + "compute_extension:flavorextradata": "", + "compute_extension:flavorextraspecs:index": "", + "compute_extension:flavorextraspecs:show": "", + "compute_extension:flavorextraspecs:create": "rule:admin_api", + "compute_extension:flavorextraspecs:update": "rule:admin_api", + "compute_extension:flavorextraspecs:delete": "rule:admin_api", + "compute_extension:flavormanage": "rule:admin_api", + "compute_extension:floating_ip_dns": "", + "compute_extension:floating_ip_pools": "", + "compute_extension:floating_ips": "", + "compute_extension:floating_ips_bulk": "rule:admin_api", + "compute_extension:fping": "", + "compute_extension:fping:all_tenants": "rule:admin_api", + "compute_extension:hide_server_addresses": "is_admin:False", + "compute_extension:hosts": "rule:admin_api", + "compute_extension:hypervisors": "rule:admin_api", + "compute_extension:image_size": "", + "compute_extension:instance_actions": "", + "compute_extension:instance_actions:events": "rule:admin_api", + "compute_extension:instance_usage_audit_log": "rule:admin_api", + "compute_extension:keypairs": "", + "compute_extension:keypairs:index": "", + "compute_extension:keypairs:show": "", + "compute_extension:keypairs:create": "", + "compute_extension:keypairs:delete": "", + "compute_extension:multinic": "", + "compute_extension:networks": "rule:admin_api", + "compute_extension:networks:view": "", + "compute_extension:networks_associate": "rule:admin_api", + "compute_extension:os-tenant-networks": "", + "compute_extension:quotas:show": "", + "compute_extension:quotas:update": "rule:admin_api", + "compute_extension:quotas:delete": "rule:admin_api", + "compute_extension:quota_classes": "", + "compute_extension:rescue": "", + "compute_extension:security_group_default_rules": "rule:admin_api", + "compute_extension:security_groups": "", + "compute_extension:server_diagnostics": "rule:admin_api", + "compute_extension:server_groups": "", + "compute_extension:server_password": "", + "compute_extension:server_usage": "", + "compute_extension:services": "rule:admin_api", + "compute_extension:shelve": "", + "compute_extension:shelveOffload": "rule:admin_api", + "compute_extension:simple_tenant_usage:show": "rule:admin_or_owner", + "compute_extension:simple_tenant_usage:list": "rule:admin_api", + "compute_extension:unshelve": "", + "compute_extension:users": "rule:admin_api", + "compute_extension:virtual_interfaces": "", + "compute_extension:virtual_storage_arrays": "", + "compute_extension:volumes": "", + "compute_extension:volume_attachments:index": "", + "compute_extension:volume_attachments:show": "", + "compute_extension:volume_attachments:create": "", + "compute_extension:volume_attachments:update": "", + "compute_extension:volume_attachments:delete": "", + "compute_extension:volumetypes": "", + "compute_extension:availability_zone:list": "", + "compute_extension:availability_zone:detail": "rule:admin_api", + "compute_extension:used_limits_for_admin": "rule:admin_api", + "compute_extension:migrations:index": "rule:admin_api", + "compute_extension:os-assisted-volume-snapshots:create": "rule:admin_api", + "compute_extension:os-assisted-volume-snapshots:delete": "rule:admin_api", + "compute_extension:console_auth_tokens": "rule:admin_api", + "compute_extension:os-server-external-events:create": "rule:admin_api", + + "network:get_all": "", + "network:get": "", + "network:create": "", + "network:delete": "", + "network:associate": "", + "network:disassociate": "", + "network:get_vifs_by_instance": "", + "network:allocate_for_instance": "", + "network:deallocate_for_instance": "", + "network:validate_networks": "", + "network:get_instance_uuids_by_ip_filter": "", + "network:get_instance_id_by_floating_address": "", + "network:setup_networks_on_host": "", + "network:get_backdoor_port": "", + + "network:get_floating_ip": "", + "network:get_floating_ip_pools": "", + "network:get_floating_ip_by_address": "", + "network:get_floating_ips_by_project": "", + "network:get_floating_ips_by_fixed_address": "", + "network:allocate_floating_ip": "", + "network:associate_floating_ip": "", + "network:disassociate_floating_ip": "", + "network:release_floating_ip": "", + "network:migrate_instance_start": "", + "network:migrate_instance_finish": "", + + "network:get_fixed_ip": "", + "network:get_fixed_ip_by_address": "", + "network:add_fixed_ip_to_instance": "", + "network:remove_fixed_ip_from_instance": "", + "network:add_network_to_project": "", + "network:get_instance_nw_info": "", + + "network:get_dns_domains": "", + "network:add_dns_entry": "", + "network:modify_dns_entry": "", + "network:delete_dns_entry": "", + "network:get_dns_entries_by_address": "", + "network:get_dns_entries_by_name": "", + "network:create_private_dns_domain": "", + "network:create_public_dns_domain": "", + "network:delete_dns_domain": "", + "network:attach_external_network": "rule:admin_api", + "network:get_vif_by_mac_address": "", + + "os_compute_api:servers:detail:get_all_tenants": "is_admin:True", + "os_compute_api:servers:index:get_all_tenants": "is_admin:True", + "os_compute_api:servers:confirm_resize": "", + "os_compute_api:servers:create": "http://my_hostname:31001/authz", + "os_compute_api:servers:create:attach_network": "", + "os_compute_api:servers:create:attach_volume": "", + "os_compute_api:servers:create:forced_host": "rule:admin_api", + "os_compute_api:servers:delete": "http://my_hostname:31001/authz", + "os_compute_api:servers:update": "http://my_hostname:31001/authz", + "os_compute_api:servers:detail": "http://my_hostname:31001/authz", + "os_compute_api:servers:index": "http://my_hostname:31001/authz", + "os_compute_api:servers:reboot": "http://my_hostname:31001/authz", + "os_compute_api:servers:rebuild": "http://my_hostname:31001/authz", + "os_compute_api:servers:resize": "http://my_hostname:31001/authz", + "os_compute_api:servers:revert_resize": "http://my_hostname:31001/authz", + "os_compute_api:servers:show": "http://my_hostname:31001/authz", + "os_compute_api:servers:create_image": "", + "os_compute_api:servers:create_image:allow_volume_backed": "", + "os_compute_api:servers:start": "rule:admin_or_owner", + "os_compute_api:servers:stop": "rule:admin_or_owner", + "os_compute_api:os-access-ips:discoverable": "", + "os_compute_api:os-access-ips": "", + "os_compute_api:os-admin-actions": "rule:admin_api", + "os_compute_api:os-admin-actions:discoverable": "", + "os_compute_api:os-admin-actions:reset_network": "rule:admin_api", + "os_compute_api:os-admin-actions:inject_network_info": "rule:admin_api", + "os_compute_api:os-admin-actions:reset_state": "rule:admin_api", + "os_compute_api:os-admin-password": "", + "os_compute_api:os-admin-password:discoverable": "", + "os_compute_api:os-aggregates:discoverable": "", + "os_compute_api:os-aggregates:index": "rule:admin_api", + "os_compute_api:os-aggregates:create": "rule:admin_api", + "os_compute_api:os-aggregates:show": "rule:admin_api", + "os_compute_api:os-aggregates:update": "rule:admin_api", + "os_compute_api:os-aggregates:delete": "rule:admin_api", + "os_compute_api:os-aggregates:add_host": "rule:admin_api", + "os_compute_api:os-aggregates:remove_host": "rule:admin_api", + "os_compute_api:os-aggregates:set_metadata": "rule:admin_api", + "os_compute_api:os-agents": "rule:admin_api", + "os_compute_api:os-agents:discoverable": "", + "os_compute_api:os-attach-interfaces": "", + "os_compute_api:os-attach-interfaces:discoverable": "", + "os_compute_api:os-baremetal-nodes": "rule:admin_api", + "os_compute_api:os-baremetal-nodes:discoverable": "", + "os_compute_api:os-block-device-mapping-v1:discoverable": "", + "os_compute_api:os-cells": "rule:admin_api", + "os_compute_api:os-cells:create": "rule:admin_api", + "os_compute_api:os-cells:delete": "rule:admin_api", + "os_compute_api:os-cells:update": "rule:admin_api", + "os_compute_api:os-cells:sync_instances": "rule:admin_api", + "os_compute_api:os-cells:discoverable": "", + "os_compute_api:os-certificates:create": "", + "os_compute_api:os-certificates:show": "", + "os_compute_api:os-certificates:discoverable": "", + "os_compute_api:os-cloudpipe": "rule:admin_api", + "os_compute_api:os-cloudpipe:discoverable": "", + "os_compute_api:os-config-drive": "", + "os_compute_api:os-consoles:discoverable": "", + "os_compute_api:os-consoles:create": "", + "os_compute_api:os-consoles:delete": "", + "os_compute_api:os-consoles:index": "", + "os_compute_api:os-consoles:show": "", + "os_compute_api:os-console-output:discoverable": "", + "os_compute_api:os-console-output": "", + "os_compute_api:os-remote-consoles": "", + "os_compute_api:os-remote-consoles:discoverable": "", + "os_compute_api:os-create-backup:discoverable": "", + "os_compute_api:os-create-backup": "rule:admin_or_owner", + "os_compute_api:os-deferred-delete": "", + "os_compute_api:os-deferred-delete:discoverable": "", + "os_compute_api:os-disk-config": "", + "os_compute_api:os-disk-config:discoverable": "", + "os_compute_api:os-evacuate": "rule:admin_api", + "os_compute_api:os-evacuate:discoverable": "", + "os_compute_api:os-extended-server-attributes": "rule:admin_api", + "os_compute_api:os-extended-server-attributes:discoverable": "", + "os_compute_api:os-extended-status": "", + "os_compute_api:os-extended-status:discoverable": "", + "os_compute_api:os-extended-availability-zone": "", + "os_compute_api:os-extended-availability-zone:discoverable": "", + "os_compute_api:extensions": "", + "os_compute_api:extension_info:discoverable": "", + "os_compute_api:os-extended-volumes": "", + "os_compute_api:os-extended-volumes:discoverable": "", + "os_compute_api:os-fixed-ips": "rule:admin_api", + "os_compute_api:os-fixed-ips:discoverable": "", + "os_compute_api:os-flavor-access": "", + "os_compute_api:os-flavor-access:discoverable": "", + "os_compute_api:os-flavor-access:remove_tenant_access": "rule:admin_api", + "os_compute_api:os-flavor-access:add_tenant_access": "rule:admin_api", + "os_compute_api:os-flavor-rxtx": "", + "os_compute_api:os-flavor-rxtx:discoverable": "", + "os_compute_api:flavors:discoverable": "", + "os_compute_api:os-flavor-extra-specs:discoverable": "", + "os_compute_api:os-flavor-extra-specs:index": "", + "os_compute_api:os-flavor-extra-specs:show": "", + "os_compute_api:os-flavor-extra-specs:create": "rule:admin_api", + "os_compute_api:os-flavor-extra-specs:update": "rule:admin_api", + "os_compute_api:os-flavor-extra-specs:delete": "rule:admin_api", + "os_compute_api:os-flavor-manage:discoverable": "", + "os_compute_api:os-flavor-manage": "rule:admin_api", + "os_compute_api:os-floating-ip-dns": "", + "os_compute_api:os-floating-ip-dns:discoverable": "", + "os_compute_api:os-floating-ip-dns:domain:update": "rule:admin_api", + "os_compute_api:os-floating-ip-dns:domain:delete": "rule:admin_api", + "os_compute_api:os-floating-ip-pools": "", + "os_compute_api:os-floating-ip-pools:discoverable": "", + "os_compute_api:os-floating-ips": "", + "os_compute_api:os-floating-ips:discoverable": "", + "os_compute_api:os-floating-ips-bulk": "rule:admin_api", + "os_compute_api:os-floating-ips-bulk:discoverable": "", + "os_compute_api:os-fping": "", + "os_compute_api:os-fping:discoverable": "", + "os_compute_api:os-fping:all_tenants": "rule:admin_api", + "os_compute_api:os-hide-server-addresses": "is_admin:False", + "os_compute_api:os-hide-server-addresses:discoverable": "", + "os_compute_api:os-hosts": "rule:admin_api", + "os_compute_api:os-hosts:discoverable": "", + "os_compute_api:os-hypervisors": "rule:admin_api", + "os_compute_api:os-hypervisors:discoverable": "", + "os_compute_api:images:discoverable": "", + "os_compute_api:image-size": "", + "os_compute_api:image-size:discoverable": "", + "os_compute_api:os-instance-actions": "", + "os_compute_api:os-instance-actions:discoverable": "", + "os_compute_api:os-instance-actions:events": "rule:admin_api", + "os_compute_api:os-instance-usage-audit-log": "rule:admin_api", + "os_compute_api:os-instance-usage-audit-log:discoverable": "", + "os_compute_api:ips:discoverable": "", + "os_compute_api:ips:index": "rule:admin_or_owner", + "os_compute_api:ips:show": "rule:admin_or_owner", + "os_compute_api:os-keypairs:discoverable": "", + "os_compute_api:os-keypairs": "", + "os_compute_api:os-keypairs:index": "rule:admin_api or user_id:%(user_id)s", + "os_compute_api:os-keypairs:show": "rule:admin_api or user_id:%(user_id)s", + "os_compute_api:os-keypairs:create": "rule:admin_api or user_id:%(user_id)s", + "os_compute_api:os-keypairs:delete": "rule:admin_api or user_id:%(user_id)s", + "os_compute_api:limits:discoverable": "", + "os_compute_api:limits": "", + "os_compute_api:os-lock-server:discoverable": "", + "os_compute_api:os-lock-server:lock": "rule:admin_or_owner", + "os_compute_api:os-lock-server:unlock": "rule:admin_or_owner", + "os_compute_api:os-lock-server:unlock:unlock_override": "rule:admin_api", + "os_compute_api:os-migrate-server:discoverable": "", + "os_compute_api:os-migrate-server:migrate": "rule:admin_api", + "os_compute_api:os-migrate-server:migrate_live": "rule:admin_api", + "os_compute_api:os-multinic": "", + "os_compute_api:os-multinic:discoverable": "", + "os_compute_api:os-networks": "rule:admin_api", + "os_compute_api:os-networks:view": "", + "os_compute_api:os-networks:discoverable": "", + "os_compute_api:os-networks-associate": "rule:admin_api", + "os_compute_api:os-networks-associate:discoverable": "", + "os_compute_api:os-pause-server:discoverable": "", + "os_compute_api:os-pause-server:pause": "rule:admin_or_owner", + "os_compute_api:os-pause-server:unpause": "rule:admin_or_owner", + "os_compute_api:os-pci:pci_servers": "", + "os_compute_api:os-pci:discoverable": "", + "os_compute_api:os-pci:index": "rule:admin_api", + "os_compute_api:os-pci:detail": "rule:admin_api", + "os_compute_api:os-pci:show": "rule:admin_api", + "os_compute_api:os-personality:discoverable": "", + "os_compute_api:os-preserve-ephemeral-rebuild:discoverable": "", + "os_compute_api:os-quota-sets:discoverable": "", + "os_compute_api:os-quota-sets:show": "rule:admin_or_owner", + "os_compute_api:os-quota-sets:defaults": "", + "os_compute_api:os-quota-sets:update": "rule:admin_api", + "os_compute_api:os-quota-sets:delete": "rule:admin_api", + "os_compute_api:os-quota-sets:detail": "rule:admin_api", + "os_compute_api:os-quota-class-sets:update": "rule:admin_api", + "os_compute_api:os-quota-class-sets:show": "is_admin:True or quota_class:%(quota_class)s", + "os_compute_api:os-quota-class-sets:discoverable": "", + "os_compute_api:os-rescue": "", + "os_compute_api:os-rescue:discoverable": "", + "os_compute_api:os-scheduler-hints:discoverable": "", + "os_compute_api:os-security-group-default-rules:discoverable": "", + "os_compute_api:os-security-group-default-rules": "rule:admin_api", + "os_compute_api:os-security-groups": "", + "os_compute_api:os-security-groups:discoverable": "", + "os_compute_api:os-server-diagnostics": "rule:admin_api", + "os_compute_api:os-server-diagnostics:discoverable": "", + "os_compute_api:os-server-password": "", + "os_compute_api:os-server-password:discoverable": "", + "os_compute_api:os-server-usage": "", + "os_compute_api:os-server-usage:discoverable": "", + "os_compute_api:os-server-groups": "", + "os_compute_api:os-server-groups:discoverable": "", + "os_compute_api:os-services": "rule:admin_api", + "os_compute_api:os-services:discoverable": "", + "os_compute_api:server-metadata:discoverable": "", + "os_compute_api:server-metadata:index": "rule:admin_or_owner", + "os_compute_api:server-metadata:show": "rule:admin_or_owner", + "os_compute_api:server-metadata:delete": "rule:admin_or_owner", + "os_compute_api:server-metadata:create": "rule:admin_or_owner", + "os_compute_api:server-metadata:update": "rule:admin_or_owner", + "os_compute_api:server-metadata:update_all": "rule:admin_or_owner", + "os_compute_api:servers:discoverable": "", + "os_compute_api:os-shelve:shelve": "", + "os_compute_api:os-shelve:shelve:discoverable": "", + "os_compute_api:os-shelve:shelve_offload": "rule:admin_api", + "os_compute_api:os-simple-tenant-usage:discoverable": "", + "os_compute_api:os-simple-tenant-usage:show": "rule:admin_or_owner", + "os_compute_api:os-simple-tenant-usage:list": "rule:admin_api", + "os_compute_api:os-suspend-server:discoverable": "", + "os_compute_api:os-suspend-server:suspend": "rule:admin_or_owner", + "os_compute_api:os-suspend-server:resume": "rule:admin_or_owner", + "os_compute_api:os-tenant-networks": "rule:admin_or_owner", + "os_compute_api:os-tenant-networks:discoverable": "", + "os_compute_api:os-shelve:unshelve": "", + "os_compute_api:os-user-data:discoverable": "", + "os_compute_api:os-virtual-interfaces": "", + "os_compute_api:os-virtual-interfaces:discoverable": "", + "os_compute_api:os-volumes": "", + "os_compute_api:os-volumes:discoverable": "", + "os_compute_api:os-volumes-attachments:index": "", + "os_compute_api:os-volumes-attachments:show": "", + "os_compute_api:os-volumes-attachments:create": "", + "os_compute_api:os-volumes-attachments:update": "", + "os_compute_api:os-volumes-attachments:delete": "", + "os_compute_api:os-volumes-attachments:discoverable": "", + "os_compute_api:os-availability-zone:list": "", + "os_compute_api:os-availability-zone:discoverable": "", + "os_compute_api:os-availability-zone:detail": "rule:admin_api", + "os_compute_api:os-used-limits": "rule:admin_api", + "os_compute_api:os-used-limits:discoverable": "", + "os_compute_api:os-migrations:index": "rule:admin_api", + "os_compute_api:os-migrations:discoverable": "", + "os_compute_api:os-assisted-volume-snapshots:create": "rule:admin_api", + "os_compute_api:os-assisted-volume-snapshots:delete": "rule:admin_api", + "os_compute_api:os-assisted-volume-snapshots:discoverable": "", + "os_compute_api:os-console-auth-tokens": "rule:admin_api", + "os_compute_api:os-server-external-events:create": "rule:admin_api" +} -- cgit 1.2.3-korg