summaryrefslogtreecommitdiffstats
path: root/xci/installer
diff options
context:
space:
mode:
Diffstat (limited to 'xci/installer')
-rw-r--r--xci/installer/kubespray/README64
-rwxr-xr-xxci/installer/kubespray/deploy.sh157
-rw-r--r--xci/installer/kubespray/files/ha/inventory/group_vars/all.yml8
-rw-r--r--xci/installer/kubespray/playbooks/configure-installer.yml50
-rw-r--r--xci/installer/kubespray/playbooks/configure-kubenet.yml51
-rw-r--r--xci/installer/kubespray/playbooks/configure-opnfvhost.yml101
-rw-r--r--xci/installer/kubespray/playbooks/configure-targethosts.yml40
-rw-r--r--xci/installer/kubespray/playbooks/group_vars/all54
-rw-r--r--xci/installer/kubespray/playbooks/post-deployment.yml42
-rwxr-xr-xxci/installer/osa/deploy.sh45
-rw-r--r--xci/installer/osa/files/aio/flavor-vars.yml3
-rw-r--r--xci/installer/osa/files/aio/inventory2
-rw-r--r--xci/installer/osa/files/ansible-role-requirements.yml152
-rw-r--r--xci/installer/osa/files/global-requirement-pins.txt13
-rw-r--r--xci/installer/osa/files/ha/flavor-vars.yml39
-rw-r--r--xci/installer/osa/files/ha/inventory11
-rw-r--r--xci/installer/osa/files/ha/openstack_user_config.yml60
-rw-r--r--xci/installer/osa/files/ha/user_variables.yml7
-rw-r--r--xci/installer/osa/files/mini/flavor-vars.yml21
-rw-r--r--xci/installer/osa/files/mini/inventory8
-rw-r--r--xci/installer/osa/files/mini/user_variables.yml7
-rw-r--r--xci/installer/osa/files/noha/flavor-vars.yml27
-rw-r--r--xci/installer/osa/files/noha/inventory9
-rw-r--r--xci/installer/osa/files/noha/user_variables.yml7
-rw-r--r--xci/installer/osa/files/openstack_services.yml158
-rw-r--r--xci/installer/osa/files/setup-openstack.yml4
-rw-r--r--xci/installer/osa/files/user_variables_proxy.yml22
-rw-r--r--xci/installer/osa/files/user_variables_xci.yml17
-rw-r--r--xci/installer/osa/playbooks/bootstrap-scenarios.yml23
-rw-r--r--xci/installer/osa/playbooks/configure-localhost.yml75
-rw-r--r--xci/installer/osa/playbooks/configure-opnfvhost.yml280
-rw-r--r--xci/installer/osa/playbooks/configure-targethosts.yml63
-rw-r--r--xci/installer/osa/playbooks/post-deployment.yml66
-rw-r--r--xci/installer/osh/README50
-rwxr-xr-xxci/installer/osh/deploy.sh170
-rw-r--r--xci/installer/osh/files/ha/inventory/group_vars/all.yml8
-rw-r--r--xci/installer/osh/playbooks/configure-installer.yml51
-rw-r--r--xci/installer/osh/playbooks/configure-kubenet.yml51
-rw-r--r--xci/installer/osh/playbooks/configure-opnfvhost.yml101
-rw-r--r--xci/installer/osh/playbooks/configure-targethosts.yml40
-rw-r--r--xci/installer/osh/playbooks/group_vars/all.yml55
-rw-r--r--xci/installer/osh/playbooks/install-openstack-helm.yml24
-rw-r--r--xci/installer/osh/playbooks/post-deployment.yml42
-rw-r--r--xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml109
-rw-r--r--xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml18
-rw-r--r--xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml130
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml12
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service11
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml130
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml31
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml33
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j24
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml7
53 files changed, 2202 insertions, 561 deletions
diff --git a/xci/installer/kubespray/README b/xci/installer/kubespray/README
new file mode 100644
index 00000000..04202c28
--- /dev/null
+++ b/xci/installer/kubespray/README
@@ -0,0 +1,64 @@
+The xci/installer/kubespray/files/k8s-cluster.yml is obtained from kubespray.
+You can change the parameters according to your needs.
+When starting the deploy, it will be copied to the right directory and will be used by kubespray
+
+For example:
+ kube_network_plugin: calico
+ docker_storage_options: -s overlay2
+ kube_service_addresses: 10.233.0.0/18
+
+Requirements:
+ 1. Performance of hosts
+ The performance settings are not required officially. I recommend the following:
+ - VM_CPU=6
+ - VM_DISK=80GB
+ - VM_MEMORY_SIZE=16GB
+
+ 2. Distributions
+ - Ubuntu 16.04
+
+ 3. Packages:
+ - Ansible v2.4 (or newer) and python-netaddr is installed on the machine that will run Ansible commands
+ - Jinja 2.9 (or newer) is required to run the Ansible Playbooks
+
+ 4. Others:
+ - The target servers must have access to the Internet in order to pull docker images.
+ - The target servers are configured to allow IPv4 forwarding.
+ - Your ssh key must be copied to all the servers part of your inventory.
+ - The firewalls are not managed, you'll need to implement your own rules the way you used to. In order to avoid any issue during the deployment you should disable your firewall.
+
+Flavors:
+ 1. aio: Single host which acts as the deployment host, master and node.
+ 2. mini: One deployment host, 1 master host and 1 node host.
+ 3. noha: One deployment host, 1 master host and 2 node hosts.
+ 4. ha: One deployment host, 3 master hosts and 2 node hosts.
+
+Components Installed:
+ 1. etcd
+ 2. network plugins:(one of following, which you can choose. Default is calico)
+ - calico
+ - flannel
+ - contive
+ - weave
+ 3. kubernetes
+ 4. docker
+
+How to use:
+
+Clone the OPNFV Releng repository
+
+ git clone https://gerrit.opnfv.org/gerrit/releng-xci.git
+
+Change into the directory where the sandbox script is located
+
+ cd releng-xci/xci
+
+Set the variable to run kubespray
+
+ export INSTALLER_TYPE=kubespray
+ export DEPLOY_SCENARIO=k8-nosdn-nofeature
+ export XCI_FLAVOR=mini
+
+Execute sandbox script
+
+ ./xci-deploy.sh
diff --git a/xci/installer/kubespray/deploy.sh b/xci/installer/kubespray/deploy.sh
new file mode 100755
index 00000000..af80b38f
--- /dev/null
+++ b/xci/installer/kubespray/deploy.sh
@@ -0,0 +1,157 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Huawei
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -o errexit
+set -o nounset
+set -o pipefail
+
+K8_XCI_PLAYBOOKS="$(dirname $(realpath ${BASH_SOURCE[0]}))/playbooks"
+export ANSIBLE_ROLES_PATH=$HOME/.ansible/roles:/etc/ansible/roles:${XCI_PATH}/xci/playbooks/roles
+
+#-------------------------------------------------------------------------------
+# Configure localhost
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng-xci repository
+# - clones kubernetes-incubator/kubespray repository
+# - creates log directory
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring localhost for kubespray"
+echo "-----------------------------------------------------------------------"
+cd $XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -e XCI_PATH="${XCI_PATH}" \
+ -i dynamic_inventory.py configure-localhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured localhost for kubespray"
+
+#-------------------------------------------------------------------------------
+# Configure installer
+#-------------------------------------------------------------------------------
+# TODO: summarize what this playbook does
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring kubespray installer"
+echo "-----------------------------------------------------------------------"
+cd $K8_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-installer.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured kubespray installer"
+
+#-------------------------------------------------------------------------------
+# Configure deployment host, opnfv
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - synchronize opnfv/releng-xci and kubernetes-incubator/kubespray repositories
+# - generates/prepares ssh keys
+# - copies flavor files to be used by kubespray
+# - install packages required by kubespray
+#-------------------------------------------------------------------------------
+echo "Info: Configuring opnfv deployment host for kubespray"
+echo "-----------------------------------------------------------------------"
+cd $K8_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-opnfvhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured opnfv deployment host for kubespray"
+
+#-------------------------------------------------------------------------------
+# Configure target hosts for kubespray
+#-------------------------------------------------------------------------------
+# This playbook is only run for the all flavors except aio since aio is configured by the configure-opnfvhost.yml
+# This playbook
+# - adds public keys to target hosts
+# - install packages required by kubespray
+# - configures haproxy service
+#-------------------------------------------------------------------------------
+if [ $XCI_FLAVOR != "aio" ]; then
+ echo "Info: Configuring target hosts for kubespray"
+ echo "-----------------------------------------------------------------------"
+ cd $K8_XCI_PLAYBOOKS
+ ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-targethosts.yml
+ echo "-----------------------------------------------------------------------"
+ echo "Info: Configured target hosts for kubespray"
+fi
+
+
+echo "Info: Using kubespray to deploy the kubernetes cluster"
+echo "-----------------------------------------------------------------------"
+ssh root@$OPNFV_HOST_IP "set -o pipefail; export XCI_FLAVOR=$XCI_FLAVOR; export INSTALLER_TYPE=$INSTALLER_TYPE; \
+ export IDF=/root/releng-xci/xci/var/idf.yml; export PDF=/root/releng-xci/xci/var/pdf.yml; \
+ cd releng-xci/.cache/repos/kubespray/; ansible-playbook \
+ -i inventory/opnfv/dynamic_inventory.py cluster.yml -b | tee setup-kubernetes.log"
+scp root@$OPNFV_HOST_IP:~/releng-xci/.cache/repos/kubespray/setup-kubernetes.log \
+ $LOG_PATH/setup-kubernetes.log
+
+
+cd $K8_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-kubenet.yml
+echo
+echo "-----------------------------------------------------------------------"
+echo "Info: Kubernetes installation is successfully completed!"
+echo "-----------------------------------------------------------------------"
+
+#-------------------------------------------------------------------------------
+# Execute post-installation tasks
+#-------------------------------------------------------------------------------
+# Playbook post.yml is used in order to execute any post-deployment tasks that
+# are required for the scenario under test.
+#-------------------------------------------------------------------------------
+# copy admin.conf
+ssh root@$OPNFV_HOST_IP "mkdir -p ~/.kube/; \
+ cp -f ~/admin.conf ~/.kube/config"
+echo "-----------------------------------------------------------------------"
+echo "Info: Running post-deployment scenario role"
+echo "-----------------------------------------------------------------------"
+cd $K8_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ post-deployment.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Post-deployment scenario role execution done"
+echo "-----------------------------------------------------------------------"
+echo
+echo "Login opnfv host ssh root@$OPNFV_HOST_IP
+according to the user-guide to create a service
+https://kubernetes.io/docs/user-guide/walkthrough/k8s201/"
+echo
+echo "-----------------------------------------------------------------------"
+echo "Info: Kubernetes login details"
+echo "-----------------------------------------------------------------------"
+echo
+# Get the dashboard URL
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $OPNFV_HOST_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $OPNFV_HOST_IP;
+echo "known_hosts entry from opnfv host from previous deployment found and deleted"
+fi
+DASHBOARD_SERVICE=$(ssh -q -o StrictHostKeyChecking=no root@$OPNFV_HOST_IP "kubectl get service -n kube-system |grep kubernetes-dashboard")
+DASHBOARD_PORT=$(echo ${DASHBOARD_SERVICE} | awk '{print $5}' |awk -F "[:/]" '{print $2}')
+KUBER_SERVER_URL=$(ssh root@$OPNFV_HOST_IP "grep -r server ~/.kube/config")
+echo "Info: Kubernetes Dashboard URL:"
+echo $KUBER_SERVER_URL | awk '{print $2}'| sed -n "s#:[0-9]*\$#:$DASHBOARD_PORT#p"
+
+# Get the dashboard user and password
+MASTER_IP=$(echo ${KUBER_SERVER_URL} | awk '{print $2}' |awk -F "[:/]" '{print $4}')
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $MASTER_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $MASTER_IP;
+echo "Info: known_hosts entry for master host from previous deployment found and deleted"
+fi
+USER_CSV=$(ssh -q -o StrictHostKeyChecking=no root@$MASTER_IP " cat /etc/kubernetes/users/known_users.csv")
+USERNAME=$(echo $USER_CSV |awk -F ',' '{print $2}')
+PASSWORD=$(echo $USER_CSV |awk -F ',' '{print $1}')
+echo "Info: Dashboard username: ${USERNAME}"
+echo "Info: Dashboard password: ${PASSWORD}"
+
+# vim: set ts=4 sw=4 expandtab:
diff --git a/xci/installer/kubespray/files/ha/inventory/group_vars/all.yml b/xci/installer/kubespray/files/ha/inventory/group_vars/all.yml
new file mode 100644
index 00000000..d1b946a7
--- /dev/null
+++ b/xci/installer/kubespray/files/ha/inventory/group_vars/all.yml
@@ -0,0 +1,8 @@
+---
+loadbalancer_apiserver:
+ address: 192.168.122.222
+ port: 8383
+
+apiserver_loadbalancer_domain_name: 192.168.122.222
+supplementary_addresses_in_ssl_keys:
+ - 192.168.122.222
diff --git a/xci/installer/kubespray/playbooks/configure-installer.yml b/xci/installer/kubespray/playbooks/configure-installer.yml
new file mode 100644
index 00000000..d88ee55c
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/configure-installer.yml
@@ -0,0 +1,50 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+ connection: local
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ tasks:
+ - name: delete existing kubespray/inventory/opnfv directory
+ file:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+ state: absent
+
+ - name: copy kubespray/inventory/sample as kubespray/inventory/opnfv
+ copy:
+ src: "{{ xci_path }}/.cache/repos/kubespray/inventory/sample/"
+ dest: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+
+ - name: update kubespray k8s-cluster.yml for xci
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ with_items:
+ - { regexp: "kube_version:.*", line: "kube_version: {{ kubernetes_version }}" }
+ - { regexp: "kubeconfig_localhost:.*", line: "kubeconfig_localhost: true" }
+ - { regexp: "kube_basic_auth:.*", line: "kube_basic_auth: true" }
+ - { regexp: "dashboard_enabled:.*", line: "dashboard_enabled: true" }
+
+# NOTE(fdegir): the reason for this task to be separate from the task which uses lineinfile
+# module is that escaping curly braces does not work with with_items. what happens is that
+# ansible tries to resolve {{ ansible_env.HOME }} which we don't want since it should point
+# to home folder of the user executing this task at runtime.
+ - name: update kubespray artifacts_dir
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "artifacts_dir:.*"
+ line: "artifacts_dir: '{{ '{{' }} ansible_env.HOME {{ '}}' }}'"
+
+ - name: change dashboard server type to NodePort
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2"
+ insertafter: 'targetPort'
+ line: " type: NodePort"
diff --git a/xci/installer/kubespray/playbooks/configure-kubenet.yml b/xci/installer/kubespray/playbooks/configure-kubenet.yml
new file mode 100644
index 00000000..18a126c1
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/configure-kubenet.yml
@@ -0,0 +1,51 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# NOTE(hwoarang) Kubenet expects networking to be prepared by the administrator so it's necessary
+# to do that as part of the node configuration. All we need is to add static routes on every node
+# so cbr0 interfaces can talk to each other.
+- name: Prepare networking for kubenet
+ hosts: k8s-cluster
+ remote_user: root
+ gather_facts: True
+ become: yes
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ tasks:
+ - name: Configure static routes
+ block:
+ - name: Collect cbr0 information from the nodes
+ set_fact:
+ kubenet_xci_static_routes: |-
+ {% set static_routes = [] %}
+ {% for host in groups['k8s-cluster']|select("ne", inventory_hostname) %}
+ {%- set _ = static_routes.append(
+ {'network': (hostvars[host]['ansible_cbr0']['ipv4']['network']+'/'+
+ hostvars[host]['ansible_cbr0']['ipv4']['netmask'])|ipaddr('net'),
+ 'gateway': hostvars[host]['ansible_default_ipv4']['address']}) -%}
+ {% endfor %}
+ {{ static_routes }}
+
+ - name: Add static routes on each node
+ shell: "ip route show | grep -q {{ item.network }} || ip route add {{ item.network }} via {{ item.gateway }}"
+ with_items: "{{ kubenet_xci_static_routes }}"
+ loop_control:
+ label: "{{ item.network }}"
+ when: deploy_scenario.find('k8-nosdn-') != -1
+
+ - name: Ensure rp_filter is disabled on localhost
+ sysctl:
+ name: net.ipv4.conf.all.rp_filter
+ sysctl_set: yes
+ state: present
+ value: "{{ (kubenet_xci_static_routes is defined) | ternary(0, 1) }}"
+ reload: yes
+ delegate_to: localhost
+ run_once: True
diff --git a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
new file mode 100644
index 00000000..52e42b06
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
@@ -0,0 +1,101 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_path: "{{ ansible_env.HOME }}/releng-xci"
+ remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/installer/{{ installer_type }}/files/{{ xci_flavor }}"
+ remote_xci_playbooks: "{{ ansible_env.HOME }}/releng-xci/xci/playbooks"
+
+ roles:
+ - role: bootstrap-host
+ configure_network: xci_flavor != 'aio'
+
+ tasks:
+ - name: Create list of files to copy
+ shell: |
+ git ls-tree -r --name-only HEAD > {{ xci_cache }}/releng-xci.files
+ echo ".git/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/repos/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/xci.env" >> {{ xci_cache }}/releng-xci.files
+ args:
+ executable: /bin/bash
+ chdir: "{{ xci_path }}"
+ changed_when: False
+ delegate_to: 127.0.0.1
+ tags:
+ - skip_ansible_lint
+
+ - name: Copy releng-xci to remote host
+ synchronize:
+ archive: yes
+ src: "{{ xci_path }}/"
+ dest: "{{ remote_xci_path }}"
+ delete: yes
+ rsync_opts:
+ - "--recursive"
+ - "--files-from={{ xci_cache }}/releng-xci.files"
+
+ - name: link xci dynamic inventory to kubespray/inventory/opnfv directory
+ file:
+ src: "{{ remote_xci_playbooks }}/dynamic_inventory.py"
+ path: "{{ remote_xci_path }}/.cache/repos/kubespray/inventory/opnfv/dynamic_inventory.py"
+ state: link
+
+ - name: Download kubectl and place it to /usr/local/bin
+ get_url:
+ url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubernetes_version }}/bin/linux/amd64/kubectl"
+ dest: /usr/local/bin/kubectl
+ owner: root
+ group: root
+ mode: 0755
+
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Prepare everything to run the {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+
+ - name: Install required packages
+ package:
+ name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
+ state: present
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+ when: xci_flavor == 'aio'
+
+ - name: pip install required packages
+ pip:
+ name: "{{ item.name }}"
+ version: "{{ item.version | default(omit) }}"
+ with_items:
+ - { name: 'ansible', version: "{{ xci_kube_ansible_pip_version }}" }
+ - { name: 'netaddr' }
+ - { name: 'ansible-modules-hashivault' }
+
+ - name: fetch xci environment
+ copy:
+ src: "{{ xci_path }}/.cache/xci.env"
+ dest: /root/xci.env
+
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
diff --git a/xci/installer/kubespray/playbooks/configure-targethosts.yml b/xci/installer/kubespray/playbooks/configure-targethosts.yml
new file mode 100644
index 00000000..2fde9877
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/configure-targethosts.yml
@@ -0,0 +1,40 @@
+---
+- hosts: k8s-cluster
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+
+ roles:
+ - role: bootstrap-host
+
+ tasks:
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
+
+ - name: Install dbus
+ package:
+ name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
+ state: present
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+
+- hosts: kube-master
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ roles:
+ - role: "keepalived"
+ when: xci_flavor == 'ha'
+ - role: "haproxy_server"
+ haproxy_service_configs: "{{ haproxy_default_services}}"
+ when: xci_flavor == 'ha'
diff --git a/xci/installer/kubespray/playbooks/group_vars/all b/xci/installer/kubespray/playbooks/group_vars/all
new file mode 100644
index 00000000..328f8dba
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/group_vars/all
@@ -0,0 +1,54 @@
+keepalived_ubuntu_src: "uca"
+keepalived_uca_apt_repo_url: "{{ uca_apt_repo_url | default('http://ubuntu-cloud.archive.canonical.com/ubuntu') }}"
+
+keepalived_sync_groups:
+ haproxy:
+ instances:
+ - external
+
+haproxy_keepalived_external_interface: "{{ ansible_default_ipv4.interface }}"
+haproxy_keepalived_authentication_password: 'keepalived'
+keepalived_instances:
+ external:
+ interface: "{{ haproxy_keepalived_external_interface }}"
+ state: "BACKUP"
+ virtual_router_id: "{{ haproxy_keepalived_external_virtual_router_id | default ('10') }}"
+ priority: "{{ ((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*100)-((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*50) }}"
+ authentication_password: "{{ haproxy_keepalived_authentication_password }}"
+ vips:
+ - "{{ haproxy_keepalived_external_vip_cidr | default('192.168.122.222/32') }} dev {{ haproxy_keepalived_external_interface }}"
+
+haproxy_default_services:
+ - service:
+ haproxy_service_name: proxy-apiserver
+ haproxy_backend_nodes: "{{ groups['kube-master'] | default([]) }}"
+ haproxy_port: 8383
+ haproxy_backend_port: 6443
+ haproxy_balance_type: tcp
+
+haproxy_bind_on_non_local: "True"
+haproxy_use_keepalived: "True"
+keepalived_selinux_compile_rules:
+ - keepalived_ping
+ - keepalived_haproxy_pid_file
+
+# Ensure that the package state matches the global setting
+haproxy_package_state: "latest"
+
+haproxy_whitelist_networks:
+ - 192.168.0.0/16
+ - 172.16.0.0/12
+ - 10.0.0.0/8
+
+haproxy_galera_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_glance_registry_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_keystone_admin_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_nova_metadata_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_rabbitmq_management_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_repo_git_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_repo_cache_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_octavia_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_ssl: false
+
+internal_lb_vip_address: "192.168.122.222"
+external_lb_vip_address: "{{ internal_lb_vip_address }}"
diff --git a/xci/installer/kubespray/playbooks/post-deployment.yml b/xci/installer/kubespray/playbooks/post-deployment.yml
new file mode 100644
index 00000000..5c2f7f36
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/post-deployment.yml
@@ -0,0 +1,42 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_scenario_path: "{{ ansible_env.HOME }}/releng-xci/.cache/repos/scenarios/{{ deploy_scenario }}/scenarios/{{ deploy_scenario }}"
+
+ tasks:
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Check if any post-deployment task defined for {{ deploy_scenario }} role
+ stat:
+ path: "{{ remote_xci_scenario_path }}/role/{{ deploy_scenario }}/tasks/post-deployment.yml"
+ register: post_deployment_yml
+
+ - name: Execute post-deployment tasks of {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+ tasks_from: post-deployment
+ when:
+ - post_deployment_yml.stat.exists
diff --git a/xci/installer/osa/deploy.sh b/xci/installer/osa/deploy.sh
index b8637f22..8b3a67d0 100755
--- a/xci/installer/osa/deploy.sh
+++ b/xci/installer/osa/deploy.sh
@@ -38,8 +38,8 @@ fi
echo "Info: Configuring localhost for openstack-ansible"
echo "-----------------------------------------------------------------------"
-cd $OSA_XCI_PLAYBOOKS
-ansible-playbook ${XCI_ANSIBLE_VERBOSITY} -e XCI_PATH="${XCI_PATH}" -i inventory configure-localhost.yml
+cd $XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i "localhost," configure-localhost.yml
echo "-----------------------------------------------------------------------"
echo "Info: Configured localhost host for openstack-ansible"
@@ -57,7 +57,8 @@ echo "Info: Configured localhost host for openstack-ansible"
echo "Info: Configuring opnfv deployment host for openstack-ansible"
echo "-----------------------------------------------------------------------"
cd $OSA_XCI_PLAYBOOKS
-ansible-playbook ${XCI_ANSIBLE_VERBOSITY} -e XCI_PATH="${XCI_PATH}" -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory \
+ansible-galaxy install -r ${XCI_PATH}/xci/files/requirements.yml -p $HOME/.ansible/roles
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
configure-opnfvhost.yml
echo "-----------------------------------------------------------------------"
echo "Info: Configured opnfv deployment host for openstack-ansible"
@@ -67,7 +68,6 @@ echo "Info: Configured opnfv deployment host for openstack-ansible"
#-------------------------------------------------------------------------------
# This playbook is only run for the all flavors except aio since aio is configured
# by an upstream script.
-
# This playbook
# - adds public keys to target hosts
# - configures network
@@ -77,7 +77,7 @@ if [[ $XCI_FLAVOR != "aio" ]]; then
echo "Info: Configuring target hosts for openstack-ansible"
echo "-----------------------------------------------------------------------"
cd $OSA_XCI_PLAYBOOKS
- ansible-playbook ${XCI_ANSIBLE_VERBOSITY} -e XCI_PATH="${XCI_PATH}" -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory \
+ ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
configure-targethosts.yml
echo "-----------------------------------------------------------------------"
echo "Info: Configured target hosts"
@@ -90,7 +90,7 @@ fi
#-------------------------------------------------------------------------------
echo "Info: Setting up target hosts for openstack-ansible"
echo "-----------------------------------------------------------------------"
-ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_VERBOSITY} \
+ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible \
releng-xci/.cache/repos/openstack-ansible/playbooks/setup-hosts.yml | tee setup-hosts.log "
scp root@$OPNFV_HOST_IP:~/setup-hosts.log $LOG_PATH/setup-hosts.log
echo "-----------------------------------------------------------------------"
@@ -112,7 +112,7 @@ echo "Info: Set up target hosts for openstack-ansible successfuly"
echo "Info: Gathering facts"
echo "-----------------------------------------------------------------------"
ssh root@$OPNFV_HOST_IP "set -o pipefail; cd releng-xci/.cache/repos/openstack-ansible/playbooks; \
- ansible ${XCI_ANSIBLE_VERBOSITY} -m setup -a gather_subset=network,hardware,virtual all"
+ ansible -m setup -a gather_subset=network,hardware,virtual all"
echo "-----------------------------------------------------------------------"
#-------------------------------------------------------------------------------
@@ -123,15 +123,10 @@ echo "-----------------------------------------------------------------------"
echo "Info: Setting up infrastructure"
echo "-----------------------------------------------------------------------"
echo "xci: running ansible playbook setup-infrastructure.yml"
-ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_VERBOSITY} \
+ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible \
releng-xci/.cache/repos/openstack-ansible/playbooks/setup-infrastructure.yml | tee setup-infrastructure.log"
scp root@$OPNFV_HOST_IP:~/setup-infrastructure.log $LOG_PATH/setup-infrastructure.log
echo "-----------------------------------------------------------------------"
-# check the log to see if we have any error
-if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-infrastructure.log; then
- echo "Error: OpenStack node setup failed!"
- exit 1
-fi
#-------------------------------------------------------------------------------
# Verify database cluster
@@ -157,18 +152,30 @@ echo "Info: Database cluster verification successful!"
#-------------------------------------------------------------------------------
echo "Info: Installing OpenStack on target hosts"
echo "-----------------------------------------------------------------------"
-ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_VERBOSITY} \
+ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible \
releng-xci/.cache/repos/openstack-ansible/playbooks/setup-openstack.yml | tee opnfv-setup-openstack.log"
scp root@$OPNFV_HOST_IP:~/opnfv-setup-openstack.log $LOG_PATH/opnfv-setup-openstack.log
echo "-----------------------------------------------------------------------"
-# check the log to see if we have any error
-if grep -q 'failed=1\|unreachable=1' $LOG_PATH/opnfv-setup-openstack.log; then
- echo "Error: OpenStack installation failed!"
- exit 1
-fi
+echo
echo "Info: OpenStack installation is successfully completed!"
#-------------------------------------------------------------------------------
+# Execute post-installation tasks
+#-------------------------------------------------------------------------------
+# Playbook post.yml is used in order to execute any post-deployment tasks that
+# are required for the scenario under test.
+#-------------------------------------------------------------------------------
+echo "-----------------------------------------------------------------------"
+echo "Info: Running post-deployment scenario role"
+echo "-----------------------------------------------------------------------"
+cd $OSA_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ post-deployment.yml
+echo "-----------------------------------------------------------------------"
+echo
+echo "Info: Post-deployment scenario role execution done"
+
+#-------------------------------------------------------------------------------
# - Getting OpenStack login information
#-------------------------------------------------------------------------------
echo "Info: Openstack login details"
diff --git a/xci/installer/osa/files/aio/flavor-vars.yml b/xci/installer/osa/files/aio/flavor-vars.yml
deleted file mode 100644
index 6ac1e0fe..00000000
--- a/xci/installer/osa/files/aio/flavor-vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# this file is added intentionally in order to simplify putting files in place
-# in future, it might contain vars specific to this flavor
diff --git a/xci/installer/osa/files/aio/inventory b/xci/installer/osa/files/aio/inventory
deleted file mode 100644
index 9a3dd9ee..00000000
--- a/xci/installer/osa/files/aio/inventory
+++ /dev/null
@@ -1,2 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
diff --git a/xci/installer/osa/files/ansible-role-requirements.yml b/xci/installer/osa/files/ansible-role-requirements.yml
index 195244a0..e787aff5 100644
--- a/xci/installer/osa/files/ansible-role-requirements.yml
+++ b/xci/installer/osa/files/ansible-role-requirements.yml
@@ -7,180 +7,180 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# these versions are based on the osa commit 7b3aac28a0a87e5966527829f6b0abcbc2303cc7 on 2017-12-11
-# https://review.openstack.org/gitweb?p=openstack/openstack-ansible.git;a=commit;h=7b3aac28a0a87e5966527829f6b0abcbc2303cc7
+# these versions are based on the osa commit e41b0c40501ea8906fcbdcc7d37ff6ef0cd5cf02 on 2018-12-11
+# https://git.openstack.org/cgit/openstack/openstack-ansible/commit/?h=refs/heads/stable/rocky&id=e41b0c40501ea8906fcbdcc7d37ff6ef0cd5cf02
- name: ansible-hardening
scm: git
src: https://git.openstack.org/openstack/ansible-hardening
- version: 46a94c72518f83d27b25a5fa960dde7130956215
+ version: 14e6bb6a411b6b03bf258144be66845a5831705c
- name: apt_package_pinning
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
- version: eba07d7dd7962d90301c49fc088551f9b35f367a
+ version: 4b2584d699c79ac65acfeb2157a97327df6f0fd6
- name: pip_install
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-pip_install
- version: 32c27505c6e0ee00ea0fb4a1c62240c60f17a0e3
+ version: 671e7129ad3dcf20bdda942842f9f76203bf5a5e
- name: galera_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-galera_client
- version: 9a8302cbba24ea4e5907567e5f93e874d30d79df
+ version: 6dbac51e5b74ffdee429375f6c22739e7a5ef017
- name: galera_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-galera_server
- version: f2bfbd38513ac8d61ba4e02a4d5ef6cbbca259cc
+ version: 7a7036f6d15ce3117a925217b66cba806034bb96
- name: ceph_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-ceph_client
- version: 34a04f7b24c80297866bc5ab56618e2211b1d5f9
+ version: 278aaca502533b33b9714393e47b536654055c58
- name: haproxy_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server
- version: 9966fd96fede46c3b00c9e069e402eae90c66f17
+ version: 6bc259471283162b3cb8ec0c4bc736f81254d050
- name: keepalived
scm: git
src: https://github.com/evrardjp/ansible-keepalived
- version: 5deafcab39de162ac1550c58246963974e8dcf4e
+ version: 64764d25ab868417f1138a7b9605f2eb94cbfd02
- name: lxc_container_create
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
- version: 68f81c679be88577633f98e8b9252a62bdcef754
+ version: 14a74f2fb60fa7865cf34f75e3196e802847b9d1
- name: lxc_hosts
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
- version: 6b529de0315fe6cd12f6e78c00a5f2f2d3a01e28
+ version: 83e20af591b00fc796eba0e0e1c7650faaa20cd7
- name: memcached_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-memcached_server
- version: ae6f721dc0342e1e7b45ff2448ab51f7539dc01f
+ version: e058c81a44859c7bcd3eeaac49a8f25b423e38a4
- name: openstack_hosts
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
- version: 05c7f09d181de1809fd596cc0d879c49e3f86bbf
+ version: 0028cedcccc4913bd1c604404c84be16164d1fe5
- name: os_keystone
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_keystone
- version: cd9d4ef7d8614d241fa40ba33c1c205fd2b47fa1
+ version: 5a54cc6ba50875c4068e4cdfe3cb23ae1603e257
- name: openstack_openrc
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
- version: d594c2debc249daa5b7f6f2890f546093efd1ee5
+ version: 805ef5349db7d8af0132b546ff56a36ec80ea7db
- name: os_aodh
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_aodh
- version: ce871dee75511f94bfd24dde8f97e573cf6d3ead
+ version: 9b8d7483d69e60f4ae71ceb6a3336ff81f355c38
- name: os_barbican
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_barbican
- version: c3e191037d0978479e3cb95a59b2986adab28c69
+ version: f9ce44edb809c92735fa093334fa1d79cc538126
- name: os_ceilometer
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer
- version: 55bb04eaad4dd5c7fdad742b3557dc30dc9d45bf
+ version: 221dcccfef3efa1a187678f71c59d81d7e930a92
- name: os_cinder
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_cinder
- version: 536dd3446e0fc7fc68ab42b982ac9affc4215787
+ version: a824d8d4dc6de6563f186449838e94c69a869e02
+- name: os_congress
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_congress
+ version: 0e6ccb63dba466bb1b7a11e94db7a420c716c06d
- name: os_designate
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_designate
- version: a65d7a3394aef340ff94587dd0bb48133ed00763
+ version: 74c33e9788607f772d8402c4f5cfc79eb379278b
- name: os_glance
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_glance
- version: 43aa00424f233a6125f7a9216cec42da1d8ca4c5
+ version: 7ec6a11b98715530e3cd5adbf682c2834e3122a8
- name: os_gnocchi
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi
- version: b1f7574dc529f8298a983d8d0e09520e90b571a8
+ version: db881f143223723b38f5d197e8e4b6dd4e057c6f
- name: os_heat
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_heat
- version: 0b3eb9348d55d6b1cf077a2c45b297f9a1be730d
+ version: 14b8927123aa9b0cf47f365c1ab9f82147ce4bdc
- name: os_horizon
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_horizon
- version: da72526dc1757688ecec8914344e330aaa0be720
+ version: b088034eeaa73ac781fe271588ba03871c88118e
- name: os_ironic
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_ironic
- version: a90558f7a216e5e661c5d1a4048dbe30559542d1
+ version: 6ecf38f1296080a33366528ad40d513539138925
- name: os_magnum
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_magnum
- version: 736d1707339cb99396578018a6bda7af9184fb02
-- name: os_molteniron
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_molteniron
- version: 9b4c104a252c453bcd798fec9dbae7224b3d8001
+ version: 316f22626d242e33ce56fad367ef3570e0d8ab8b
- name: os_neutron
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_neutron
- version: 962cd92243641092412b6ef09a41bbf5e698c4a1
+ version: 3032836715b4055041554583fa2ed685ab076c25
- name: os_nova
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_nova
- version: 53df001c9034f198b9349def3c9158f8bbe43ff3
+ version: 9db5bf5ab6f82c1947d05a1ec7cd6e3ef304760f
- name: os_octavia
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_octavia
- version: 02ad3c68802287a1ba54cf10de085dcd14c324d8
+ version: 508ea6d834153d0eb6da5bd32d10472f483c6dfa
- name: os_rally
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_rally
- version: bc9075dba204e64d11cb397017d32b0c2297eed0
+ version: 8e98112b858ecffbb92c6ae342237af87416b7fa
- name: os_sahara
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_sahara
- version: 3c45121050ba21bd284f054d7b82a338f347157f
+ version: ed7aa2d64a2ea3508c7d88a9e869524fdf0e9353
- name: os_swift
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_swift
- version: f31217bb097519f15755f2337165657d7eb6b014
+ version: a88edf84964819870ef990d25b3bfa514186249a
- name: os_tacker
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_tacker
- version: d95902891c4e6200510509c066006c921cfff8df
+ version: bbce8657c13d2545bf632eb81bb78329a5479798
- name: os_tempest
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_tempest
- version: 866dedbcba180ca82c3c93823cef3db2d3241d1b
+ version: 08341f4a19b2ed2231b790496c9f7cf2b4eda2e6
- name: os_trove
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_trove
- version: b425fa316999d0863a44126f239a33d8c3fec3a6
+ version: eaca0137de0d3d7bd57a68eecfecf52e3171f591
- name: plugins
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-plugins
- version: d2f60237761646968a4b39b15185fb5c84e7386f
+ version: a84ae0d744047fe41a0c028213de8daa52f72aee
- name: rabbitmq_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server
- version: 311f76890c8f99cb0b46958775d84de614609323
+ version: deccf93bdda1aa873b956418168368284509c99b
- name: repo_build
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-repo_build
- version: 59a3f444c263235d8f0f584da8768656179fa02a
+ version: 630a6dfdcb46ba719ddb7fd7a4875259c5602b15
- name: repo_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-repo_server
- version: 7889f37cdd2a90b4b98e8ef2e886f1fd4950fc0a
+ version: dd143b381b2fb94a3ba435f951e8b9338353a48d
- name: rsyslog_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client
- version: 310cfe9506d3742be10790533ad0d16100d81498
+ version: ed5e61c8bc2aabb905918bb2751ae985b1cfe229
- name: rsyslog_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server
- version: ba7bb699c0c874c7977add86ca308ca18be8f9a8
+ version: 9318bafbe60fed5f026c1e216d693bce745b9f99
- name: sshd
scm: git
src: https://github.com/willshersystems/ansible-sshd
- version: 537b9b2bc2fd7f23301222098344727f8161993c
+ version: d2ba81107ade1cf53c8b93590465c21ad2bc4530
- name: bird
scm: git
src: https://github.com/logan2211/ansible-bird
- version: 5033c412398cf6f98097a9ac274a6f12810c807e
+ version: 0fdb4848b5aca949ffade9be5a2ae254979e673e
- name: etcd
scm: git
src: https://github.com/logan2211/ansible-etcd
@@ -188,40 +188,44 @@
- name: unbound
scm: git
src: https://github.com/logan2211/ansible-unbound
- version: 7be67d6b60718896f0c17a7d4a14b912f72a59ae
+ version: 3bb7414f46b757e943507b65ca4c9f1080a008b0
- name: resolvconf
scm: git
src: https://github.com/logan2211/ansible-resolvconf
- version: d48dd3eea22094b6ecc6aa6ea07279c8e68e28b5
-- name: ceph-defaults
+ version: '1.4'
+- name: ceph-ansible
scm: git
- src: https://github.com/ceph/ansible-ceph-defaults
- version: 19884aaac1bc58921952af955c66602ccca89e93
-- name: ceph-common
+ src: https://github.com/ceph/ceph-ansible
+ version: a5aca6ebbc341feb34b9ec0d73e16aeeedae63ac
+- name: opendaylight
scm: git
- src: https://github.com/ceph/ansible-ceph-common
- version: 08804bd46dff42ebff64e7f27c86f2265fe4d6fc
-- name: ceph-config
+ src: https://github.com/opendaylight/integration-packaging-ansible-opendaylight
+ version: 0aebbc250b34ac5ac14b37bdf9b1a2e1cfaa5a76
+- name: haproxy_endpoints
scm: git
- src: https://github.com/ceph/ansible-ceph-config
- version: e070537f443c3ae5d262835c8b0a7a992850283b
-- name: ceph-mon
+ src: https://github.com/logan2211/ansible-haproxy-endpoints
+ version: 8e3a24a35beb16d717072dc83895c5a1f92689fb
+- name: nspawn_container_create
+ src: https://git.openstack.org/openstack/openstack-ansible-nspawn_container_create
scm: git
- src: https://github.com/ceph/ansible-ceph-mon
- version: 309b7e339e057d56d9dd38bdd61998b900f45ba8
-- name: ceph-mgr
+ version: 2bcf03f1cca550731789d5b53c7d0806ef5f5ff7
+- name: nspawn_hosts
+ src: https://git.openstack.org/openstack/openstack-ansible-nspawn_hosts
scm: git
- src: https://github.com/ceph/ansible-ceph-mgr
- version: fe8f0864500b54cc7c9f897b871ba2cdf1d37096
-- name: ceph-osd
+ version: f69e101b9191682986272b766747f107b8a7a136
+- name: systemd_service
+ src: https://git.openstack.org/openstack/ansible-role-systemd_service
scm: git
- src: https://github.com/ceph/ansible-ceph-osd
- version: e022d6773bc827e75ad051b429dec786a75d68f4
-- name: opendaylight
+ version: a085a50c338b2eeaa87ed50eaaa22564d7c12968
+- name: systemd_mount
+ src: https://git.openstack.org/openstack/ansible-role-systemd_mount
scm: git
- src: https://github.com/opendaylight/integration-packaging-ansible-opendaylight
- version: ef1367ad15ad10ac8cc9416f6fd49fd8b350d377
-- name: haproxy_endpoints
+ version: ee6263b3ce6502712ff4d6fb56474066df1773e4
+- name: systemd_networkd
+ src: https://git.openstack.org/openstack/ansible-role-systemd_networkd
scm: git
- src: https://github.com/logan2211/ansible-haproxy-endpoints
- version: 49901861b16b8afaa9bccdbc649ac956610ff22b
+ version: b024d0a3d97caf06b962a1f19450511b108dc5eb
+- name: python_venv_build
+ src: https://git.openstack.org/openstack/ansible-role-python_venv_build
+ scm: git
+ version: 5fdd8e00633f28606fc531a449d741e8c772a9fc
diff --git a/xci/installer/osa/files/global-requirement-pins.txt b/xci/installer/osa/files/global-requirement-pins.txt
index aa3b1169..ec198a79 100644
--- a/xci/installer/osa/files/global-requirement-pins.txt
+++ b/xci/installer/osa/files/global-requirement-pins.txt
@@ -5,10 +5,17 @@
#
# Use this file with caution!
#
+
+### Pinned for gnocchi's dependency pycradox
+# https://github.com/sileht/pycradox/commit/2209f89fd65ecf31bea8eac6405acce2543e7b84
+Cython<0.28
+
###
### These are pinned to ensure exactly the same behaviour forever! ###
### These pins are updated through the sources-branch-updater script ###
###
-pip==9.0.1
-setuptools==36.6.0
-wheel==0.30.0
+# Bumping pip to version 10 fails in tempest when trying to install
+# packages with an empty list.
+pip==18.0
+setuptools==40.0.0
+wheel==0.31.1
diff --git a/xci/installer/osa/files/ha/flavor-vars.yml b/xci/installer/osa/files/ha/flavor-vars.yml
deleted file mode 100644
index 167502c9..00000000
--- a/xci/installer/osa/files/ha/flavor-vars.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-host_info: {
- 'opnfv': {
- 'VLAN_IP': '192.168.122.2',
- 'MGMT_IP': '172.29.236.10',
- 'VXLAN_IP': '172.29.240.10',
- 'STORAGE_IP': '172.29.244.10'
- },
- 'controller00': {
- 'VLAN_IP': '192.168.122.3',
- 'MGMT_IP': '172.29.236.11',
- 'VXLAN_IP': '172.29.240.11',
- 'STORAGE_IP': '172.29.244.11'
- },
- 'controller01': {
- 'VLAN_IP': '192.168.122.4',
- 'MGMT_IP': '172.29.236.12',
- 'VXLAN_IP': '172.29.240.12',
- 'STORAGE_IP': '172.29.244.12'
- },
- 'controller02': {
- 'VLAN_IP': '192.168.122.5',
- 'MGMT_IP': '172.29.236.13',
- 'VXLAN_IP': '172.29.240.13',
- 'STORAGE_IP': '172.29.244.13'
- },
- 'compute00': {
- 'VLAN_IP': '192.168.122.6',
- 'MGMT_IP': '172.29.236.14',
- 'VXLAN_IP': '172.29.240.14',
- 'STORAGE_IP': '172.29.244.14'
- },
- 'compute01': {
- 'VLAN_IP': '192.168.122.7',
- 'MGMT_IP': '172.29.236.15',
- 'VXLAN_IP': '172.29.240.15',
- 'STORAGE_IP': '172.29.244.15'
- }
-}
diff --git a/xci/installer/osa/files/ha/inventory b/xci/installer/osa/files/ha/inventory
deleted file mode 100644
index 94b1d074..00000000
--- a/xci/installer/osa/files/ha/inventory
+++ /dev/null
@@ -1,11 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-controller01 ansible_ssh_host=192.168.122.4
-controller02 ansible_ssh_host=192.168.122.5
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.6
-compute01 ansible_ssh_host=192.168.122.7
diff --git a/xci/installer/osa/files/ha/openstack_user_config.yml b/xci/installer/osa/files/ha/openstack_user_config.yml
index 360aa5cb..dc2ec183 100644
--- a/xci/installer/osa/files/ha/openstack_user_config.yml
+++ b/xci/installer/osa/files/ha/openstack_user_config.yml
@@ -77,18 +77,18 @@ shared-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# repository (apt cache, python packages, etc)
repo-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# load balancer
# Ideally the load balancer should not use the Infrastructure hosts.
@@ -97,9 +97,9 @@ haproxy_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# rsyslog server
# log_hosts:
@@ -115,18 +115,18 @@ identity_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# cinder api services
storage-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# glance
# The settings here are repeated for each infra host.
@@ -139,27 +139,27 @@ image_hosts:
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
@@ -170,43 +170,43 @@ compute-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# heat
orchestration_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# horizon
dashboard_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# neutron server, agents (L3, etc)
network_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# nova hypervisors
compute_hosts:
compute00:
- ip: 172.29.236.14
+ ip: 172.29.236.12
compute01:
- ip: 172.29.236.15
+ ip: 172.29.236.13
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
@@ -225,10 +225,10 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
container_vars:
cinder_backends:
limit_container_types: cinder_volume
@@ -238,10 +238,10 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
container_vars:
cinder_backends:
limit_container_types: cinder_volume
@@ -251,5 +251,5 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
diff --git a/xci/installer/osa/files/ha/user_variables.yml b/xci/installer/osa/files/ha/user_variables.yml
index 72960a01..8c2e9f0c 100644
--- a/xci/installer/osa/files/ha/user_variables.yml
+++ b/xci/installer/osa/files/ha/user_variables.yml
@@ -21,6 +21,9 @@
# # Debug and Verbose options.
debug: false
+# package_state: present should give us a better chance to finish
+package_state: present
+
# Allow root logins
security_sshd_permit_root_login: yes
@@ -62,6 +65,7 @@ barbican_wsgi_processes: 2
barbican_wsgi_threads: 1
## Cinder
+cinder_volume_clear: none
cinder_wsgi_processes_max: 2
cinder_wsgi_threads: 1
cinder_wsgi_buffer_size: 16384
@@ -159,7 +163,6 @@ openrc_nova_endpoint_type: "publicURL"
openrc_os_endpoint_type: "publicURL"
openrc_clouds_yml_interface: "public"
openrc_region_name: RegionOne
-haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
-haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+openrc_insecure: true
keystone_service_adminuri_insecure: true
keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/mini/flavor-vars.yml b/xci/installer/osa/files/mini/flavor-vars.yml
deleted file mode 100644
index 0d446ba2..00000000
--- a/xci/installer/osa/files/mini/flavor-vars.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-host_info: {
- 'opnfv': {
- 'VLAN_IP': '192.168.122.2',
- 'MGMT_IP': '172.29.236.10',
- 'VXLAN_IP': '172.29.240.10',
- 'STORAGE_IP': '172.29.244.10'
- },
- 'controller00': {
- 'VLAN_IP': '192.168.122.3',
- 'MGMT_IP': '172.29.236.11',
- 'VXLAN_IP': '172.29.240.11',
- 'STORAGE_IP': '172.29.244.11'
- },
- 'compute00': {
- 'VLAN_IP': '192.168.122.4',
- 'MGMT_IP': '172.29.236.12',
- 'VXLAN_IP': '172.29.240.12',
- 'STORAGE_IP': '172.29.244.12'
- },
-}
diff --git a/xci/installer/osa/files/mini/inventory b/xci/installer/osa/files/mini/inventory
deleted file mode 100644
index eb73e5e3..00000000
--- a/xci/installer/osa/files/mini/inventory
+++ /dev/null
@@ -1,8 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.4
diff --git a/xci/installer/osa/files/mini/user_variables.yml b/xci/installer/osa/files/mini/user_variables.yml
index 9ec9e405..b4d847bc 100644
--- a/xci/installer/osa/files/mini/user_variables.yml
+++ b/xci/installer/osa/files/mini/user_variables.yml
@@ -21,6 +21,9 @@
# # Debug and Verbose options.
debug: false
+# package_state: present should give us a better chance to finish
+package_state: present
+
# Allow root logins
security_sshd_permit_root_login: yes
@@ -62,6 +65,7 @@ barbican_wsgi_processes: 2
barbican_wsgi_threads: 1
## Cinder
+cinder_volume_clear: none
cinder_wsgi_processes_max: 2
cinder_wsgi_threads: 1
cinder_wsgi_buffer_size: 16384
@@ -159,7 +163,6 @@ openrc_nova_endpoint_type: "publicURL"
openrc_os_endpoint_type: "publicURL"
openrc_clouds_yml_interface: "public"
openrc_region_name: RegionOne
-haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
-haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+openrc_insecure: true
keystone_service_adminuri_insecure: true
keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/noha/flavor-vars.yml b/xci/installer/osa/files/noha/flavor-vars.yml
deleted file mode 100644
index 3c69a34b..00000000
--- a/xci/installer/osa/files/noha/flavor-vars.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-host_info: {
- 'opnfv': {
- 'VLAN_IP': '192.168.122.2',
- 'MGMT_IP': '172.29.236.10',
- 'VXLAN_IP': '172.29.240.10',
- 'STORAGE_IP': '172.29.244.10'
- },
- 'controller00': {
- 'VLAN_IP': '192.168.122.3',
- 'MGMT_IP': '172.29.236.11',
- 'VXLAN_IP': '172.29.240.11',
- 'STORAGE_IP': '172.29.244.11'
- },
- 'compute00': {
- 'VLAN_IP': '192.168.122.4',
- 'MGMT_IP': '172.29.236.12',
- 'VXLAN_IP': '172.29.240.12',
- 'STORAGE_IP': '172.29.244.12'
- },
- 'compute01': {
- 'VLAN_IP': '192.168.122.5',
- 'MGMT_IP': '172.29.236.13',
- 'VXLAN_IP': '172.29.240.13',
- 'STORAGE_IP': '172.29.244.13'
- }
-}
diff --git a/xci/installer/osa/files/noha/inventory b/xci/installer/osa/files/noha/inventory
deleted file mode 100644
index b4f9f6d0..00000000
--- a/xci/installer/osa/files/noha/inventory
+++ /dev/null
@@ -1,9 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.4
-compute01 ansible_ssh_host=192.168.122.5
diff --git a/xci/installer/osa/files/noha/user_variables.yml b/xci/installer/osa/files/noha/user_variables.yml
index 66573428..5e7ed83c 100644
--- a/xci/installer/osa/files/noha/user_variables.yml
+++ b/xci/installer/osa/files/noha/user_variables.yml
@@ -21,6 +21,9 @@
# # Debug and Verbose options.
debug: false
+# package_state: present should give us a better chance to finish
+package_state: present
+
# Allow root logins
security_sshd_permit_root_login: yes
@@ -62,6 +65,7 @@ barbican_wsgi_processes: 2
barbican_wsgi_threads: 1
## Cinder
+cinder_volume_clear: none
cinder_wsgi_processes_max: 2
cinder_wsgi_threads: 1
cinder_wsgi_buffer_size: 16384
@@ -159,7 +163,6 @@ openrc_nova_endpoint_type: "publicURL"
openrc_os_endpoint_type: "publicURL"
openrc_clouds_yml_interface: "public"
openrc_region_name: RegionOne
-haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
-haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+openrc_insecure: true
keystone_service_adminuri_insecure: true
keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/openstack_services.yml b/xci/installer/osa/files/openstack_services.yml
index 86501634..64718e33 100644
--- a/xci/installer/osa/files/openstack_services.yml
+++ b/xci/installer/osa/files/openstack_services.yml
@@ -31,192 +31,270 @@
## Global Requirements
requirements_git_repo: https://git.openstack.org/openstack/requirements
-requirements_git_install_branch: 691711c0effddd9cbaaadba3d494c15bc422fdd5 # HEAD of "master" as of 24.11.2017
+requirements_git_install_branch: 32f8fa388d3b8367320a3308a350f28254a82d65 # HEAD of "stable/rocky" as of 11.12.2018
+requirements_git_track_branch: stable/rocky
## Aodh service
aodh_git_repo: https://git.openstack.org/openstack/aodh
-aodh_git_install_branch: 359043dc774be847cb539d18d13e336d40453e72 # HEAD of "master" as of 24.11.2017
+aodh_git_install_branch: ae5e710cd5ade867ebd0e6666bad95f82d130210 # HEAD of "stable/rocky" as of 11.12.2018
aodh_git_project_group: aodh_all
+aodh_git_track_branch: stable/rocky
## Barbican service
barbican_git_repo: https://git.openstack.org/openstack/barbican
-barbican_git_install_branch: 5617d605f2e12840933e4a9d6417912cdbb811d5 # HEAD of "master" as of 24.11.2017
+barbican_git_install_branch: 0a1a9917e791d0c6fc8534a052700af5f5cbe9d0 # HEAD of "stable/rocky" as of 11.12.2018
barbican_git_project_group: barbican_all
+barbican_git_track_branch: stable/rocky
## Ceilometer service
ceilometer_git_repo: https://git.openstack.org/openstack/ceilometer
-ceilometer_git_install_branch: bd464f1f572ba150f52e284de430d13045dc6c18 # HEAD of "master" as of 24.11.2017
-ceilometer_git_project_group: ceilometer_all
+ceilometer_git_install_branch: 018ff32fe0200a041297c386eb8b381f1bec0e71 # HEAD of "stable/rocky" as of 11.12.2018
+ceilometer_git_project_group: all
+ceilometer_git_track_branch: stable/rocky
## Cinder service
cinder_git_repo: https://git.openstack.org/openstack/cinder
-cinder_git_install_branch: 80558687d0fa55f2adf699e7369ebe3dbc3591bf # HEAD of "master" as of 24.11.2017
+cinder_git_install_branch: 8dbf5d7882a6271514a3075a02cd080e44b709d5 # HEAD of "stable/rocky" as of 11.12.2018
cinder_git_project_group: cinder_all
+cinder_git_track_branch: stable/rocky
## Designate service
designate_git_repo: https://git.openstack.org/openstack/designate
-designate_git_install_branch: 2f75586379e8d611f37e06d385e79d0bc2c84ca1 # HEAD of "master" as of 24.11.2017
+designate_git_install_branch: af1bb8a36a704bb1a226fe5154f828e152ef23e1 # HEAD of "stable/rocky" as of 11.12.2018
designate_git_project_group: designate_all
+designate_git_track_branch: stable/rocky
## Horizon Designate dashboard plugin
designate_dashboard_git_repo: https://git.openstack.org/openstack/designate-dashboard
-designate_dashboard_git_install_branch: 571e127e5f853aa4dbdd377d831e32f8ff81eafe # HEAD of "master" as of 24.11.2017
+designate_dashboard_git_install_branch: faa67c87ad3cd5563da722f13b3adaee5bfe350f # HEAD of "stable/rocky" as of 11.12.2018
designate_dashboard_git_project_group: horizon_all
+designate_dashboard_git_track_branch: stable/rocky
## Dragonflow service
+# please update the branch (sha and update the comment) when stable/rocky is branched on this repo.
dragonflow_git_repo: https://git.openstack.org/openstack/dragonflow
-dragonflow_git_install_branch: 7bf00cf315659252f03f6c65f6159a924da6f978 # HEAD of "master" as of 24.11.2017
+dragonflow_git_install_branch: 945b1e368c651ffa3655f42df724d9f13a7b6b96 # FROZEN HEAD of "master" as of 17.08.2018
dragonflow_git_project_group: neutron_all
+dragonflow_git_track_branch: None
## Glance service
glance_git_repo: https://git.openstack.org/openstack/glance
-glance_git_install_branch: d88bd2ca8ef95810441dae640d3c6b9e79eca353 # HEAD of "master" as of 24.11.2017
+glance_git_install_branch: 4982c24f0aeb64f9d20159e543a90e31fc325dce # HEAD of "stable/rocky" as of 11.12.2018
glance_git_project_group: glance_all
+glance_git_track_branch: stable/rocky
## Heat service
heat_git_repo: https://git.openstack.org/openstack/heat
-heat_git_install_branch: f4a06c2a92a361dbb401107b4ea1ab60972f473e # HEAD of "master" as of 24.11.2017
+heat_git_install_branch: 98eea44d5d91b74e1ab28c052e4fbc4b533d5f83 # HEAD of "stable/rocky" as of 11.12.2018
heat_git_project_group: heat_all
+heat_git_track_branch: stable/rocky
+## Horizon Heat dashboard plugin
+# please update the branch (sha and update the comment) when stable/rocky is branched on this repo.
+heat_dashboard_git_repo: https://git.openstack.org/openstack/heat-dashboard
+heat_dashboard_git_install_branch: bc7f5068bbb6f7974eaffa2d865a859ff0fd0069 # FROZEN HEAD of "master" as of 17.08.2018
+heat_dashboard_git_project_group: horizon_all
+heat_dashboard_git_track_branch: None
## Horizon service
horizon_git_repo: https://git.openstack.org/openstack/horizon
-horizon_git_install_branch: 846d269d90e01e463b510474040e0ad984a5679f # HEAD of "master" as of 24.11.2017
+horizon_git_install_branch: 0ccfce882749998f3a6a7f9bfc6fa74ea346ca53 # HEAD of "stable/rocky" as of 11.12.2018
horizon_git_project_group: horizon_all
+horizon_git_track_branch: stable/rocky
## Horizon Ironic dashboard plugin
ironic_dashboard_git_repo: https://git.openstack.org/openstack/ironic-ui
-ironic_dashboard_git_install_branch: d6199d51171e6c8700663b0b0618ee0adf033b4d # HEAD of "master" as of 24.11.2017
+ironic_dashboard_git_install_branch: c700f3a613f3d78875caf7588e7bdf42a5db83cb # HEAD of "stable/rocky" as of 11.12.2018
ironic_dashboard_git_project_group: horizon_all
+ironic_dashboard_git_track_branch: stable/rocky
## Horizon Magnum dashboard plugin
magnum_dashboard_git_repo: https://git.openstack.org/openstack/magnum-ui
-magnum_dashboard_git_install_branch: 6160d903fae9c652b459c93c218e0ea75924a85d # HEAD of "master" as of 24.11.2017
+magnum_dashboard_git_install_branch: 2e9cb253eaee45a57f07369e432369dbff8fc173 # HEAD of "stable/rocky" as of 11.12.2018
magnum_dashboard_git_project_group: horizon_all
+magnum_dashboard_git_track_branch: stable/rocky
## Horizon LBaaS dashboard plugin
neutron_lbaas_dashboard_git_repo: https://git.openstack.org/openstack/neutron-lbaas-dashboard
-neutron_lbaas_dashboard_git_install_branch: ef650294bcc7447d441e6a710c39d64e384e1b27 # HEAD of "master" as of 24.11.2017
+neutron_lbaas_dashboard_git_install_branch: 84fd20a474e8165ddbf5cf4bd14b7eb7da63ed41 # HEAD of "stable/rocky" as of 11.12.2018
neutron_lbaas_dashboard_git_project_group: horizon_all
+neutron_lbaas_dashboard_git_track_branch: stable/rocky
## Horizon FWaaS dashboard plugin
neutron_fwaas_dashboard_git_repo: https://git.openstack.org//openstack/neutron-fwaas-dashboard
-neutron_fwaas_dashboard_git_install_branch: 6de122d4753a6db24d2dc4c22a71e702ed980e82 # HEAD of "master" as of 24.11.2017
+neutron_fwaas_dashboard_git_install_branch: 4adf5599211ef90696da94b2fee3aac730f3b7bc # HEAD of "stable/rocky" as of 11.12.2018
neutron_fwaas_dashboard_git_project_group: horizon_all
+neutron_fwaas_dashboard_git_track_branch: stable/rocky
## Horizon Sahara dashboard plugin
sahara_dashboard_git_repo: https://git.openstack.org/openstack/sahara-dashboard
-sahara_dashboard_git_install_branch: 3e5c59e6229dac8b303029058fcee9d61200ebc8 # HEAD of "master" as of 24.11.2017
+sahara_dashboard_git_install_branch: 6e3f7538ce7779612d8e82b069597c06c2225a77 # HEAD of "stable/rocky" as of 11.12.2018
sahara_dashboard_git_project_group: horizon_all
+sahara_dashboard_git_track_branch: stable/rocky
## Keystone service
keystone_git_repo: https://git.openstack.org/openstack/keystone
-keystone_git_install_branch: 70fe4ec09b55def21361a32c8fa7f12e7c891ab1 # HEAD of "master" as of 24.11.2017
+keystone_git_install_branch: 295ccda8190b39a505c397d2f4d9e4896dc538cf # HEAD of "stable/rocky" as of 11.12.2018
keystone_git_project_group: keystone_all
+keystone_git_track_branch: stable/rocky
## Neutron service
neutron_git_repo: https://git.openstack.org/openstack/neutron
-neutron_git_install_branch: d1277c1630570ca45b490c48371e3f7e97be78c3 # HEAD of "master" as of 24.11.2017
+neutron_git_install_branch: ae2ef681403d1f103170ea70df1010f006244752 # HEAD of "stable/rocky" as of 11.12.2018
neutron_git_project_group: neutron_all
+neutron_git_track_branch: stable/rocky
neutron_lbaas_git_repo: https://git.openstack.org/openstack/neutron-lbaas
-neutron_lbaas_git_install_branch: b1123e7a759248dfa63afdf8b86aafd692572ebd # HEAD of "master" as of 24.11.2017
+neutron_lbaas_git_install_branch: 1353bad713fd97418a9984016da49df8cfa8825b # HEAD of "stable/rocky" as of 11.12.2018
neutron_lbaas_git_project_group: neutron_all
+neutron_lbaas_git_track_branch: stable/rocky
neutron_vpnaas_git_repo: https://git.openstack.org/openstack/neutron-vpnaas
-neutron_vpnaas_git_install_branch: 79e4eb81dd05588bcf68b92d46c62f0d26153542 # HEAD of "master" as of 24.11.2017
+neutron_vpnaas_git_install_branch: 0876f4dfe7e2f57305110e035efa753bfb711a3f # HEAD of "stable/rocky" as of 11.12.2018
neutron_vpnaas_git_project_group: neutron_all
+neutron_vpnaas_git_track_branch: stable/rocky
neutron_fwaas_git_repo: https://git.openstack.org/openstack/neutron-fwaas
-neutron_fwaas_git_install_branch: 74eac2ca2980e6162d9c88ee6bd48830386c392a # HEAD of "master" as of 24.11.2017
+neutron_fwaas_git_install_branch: 5ece265b65247ee81a9335d5a685fa9f0a68b0fc # HEAD of "stable/rocky" as of 11.12.2018
neutron_fwaas_git_project_group: neutron_all
+neutron_fwaas_git_track_branch: stable/rocky
neutron_dynamic_routing_git_repo: https://git.openstack.org/openstack/neutron-dynamic-routing
-neutron_dynamic_routing_git_install_branch: 183c3fa4840d22be1974534eb9e1b28b552f4a42 # HEAD of "master" as of 24.11.2017
+neutron_dynamic_routing_git_install_branch: ae3a01ca1fd6270fc27b3c6bae11afc0f17563d5 # HEAD of "stable/rocky" as of 11.12.2018
neutron_dynamic_routing_git_project_group: neutron_all
+neutron_dynamic_routing_git_track_branch: stable/rocky
+# Networking Calico is following master
networking_calico_git_repo: https://git.openstack.org/openstack/networking-calico
-networking_calico_git_install_branch: 9688df1a3d1d8b3fd9ba367e82fe6b0559416728 # HEAD of "master" as of 24.11.2017
+networking_calico_git_install_branch: 79c7e00360ddb5fd3c38e60e5bbb3399928d9172 # HEAD of "master" as of 11.12.2018
networking_calico_git_project_group: neutron_all
+networking_calico_git_track_branch: stable/rocky
+
+networking_odl_git_repo: https://git.openstack.org/openstack/networking-odl
+networking_odl_git_install_branch: 1cef1f0939a405eea4cb87e712794e8fa26b5166 # HEAD of "stable/rocky" as of 11.12.2018
+networking_odl_git_project_group: neutron_all
+networking_odl_git_track_branch: stable/rocky
+
+networking_ovn_git_repo: https://git.openstack.org/openstack/networking-ovn
+networking_ovn_git_install_branch: e077aa93b1dc244b59864236d7c673f852e4e3ba # HEAD of "stable/rocky" as of 11.12.2018
+networking_ovn_git_project_group: neutron_all
+
+# BGPVPN is frozen until further notice due to
+# https://github.com/openstack/networking-bgpvpn/commit/e9a0ea199b47f76f69545e04bdb4db44869c388b#diff-b4ef698db8ca845e5845c4618278f29a
+networking_bgpvpn_git_repo: https://git.openstack.org/openstack/networking-bgpvpn
+networking_bgpvpn_git_install_branch: 3b93ddacd390d92fb144e5660324d4da064ad9a4 # FROZEN HEAD of "stable/rocky" as of 31.03.2018
+networking_bgpvpn_git_project_group: neutron_all
+networking_bgpvpn_git_track_branch: None
+
+networking_sfc_git_repo: https://git.openstack.org/openstack/networking-sfc
+networking_sfc_git_install_branch: f0eddef3d53bbad417038f9d32b196ace2ebd0b2 # HEAD of "stable/rocky" as of 11.12.2018
+networking_sfc_git_project_group: neutron_all
+networking_sfc_git_track_branch: stable/rocky
+
## Nova service
nova_git_repo: https://git.openstack.org/openstack/nova
-nova_git_install_branch: 22a790ef45b0523e8cf2ed97d14e050431c90fd9 # HEAD of "master" as of 24.11.2017
+nova_git_install_branch: 8066142a1e381536291232250b3237e5c01ed1f4 # HEAD of "stable/rocky" as of 11.12.2018
nova_git_project_group: nova_all
+nova_git_track_branch: stable/rocky
## PowerVM Virt Driver
nova_powervm_git_repo: https://git.openstack.org/openstack/nova-powervm
-nova_powervm_git_install_branch: f2de4441e39b0f66cf31f854b228e9e7037f04de # HEAD of "master" as of 24.11.2017
+nova_powervm_git_install_branch: 984b122668161703eee33918d570c61ae9c5b1ca # HEAD of "stable/rocky" as of 11.12.2018
nova_powervm_git_project_group: nova_all
+nova_powervm_git_track_branch: stable/rocky
## LXD Virt Driver
+# please update the branch (sha and update the comment) when stable/rocky is branched on this repo.
nova_lxd_git_repo: https://git.openstack.org/openstack/nova-lxd
-nova_lxd_git_install_branch: e498de603b31c189fd32a6067d45a36575b96b0a # HEAD of "master" as of 24.11.2017
+nova_lxd_git_install_branch: bc8d540c95b3209321658000fd74b0e5065a7ee2 # FROZEN HEAD of "master" as of 17.08.2018
nova_lxd_git_project_group: nova_all
+nova_lxd_git_track_branch: None
## Sahara service
sahara_git_repo: https://git.openstack.org/openstack/sahara
-sahara_git_install_branch: 395856c513b1efad82db8fa78fb1cbfe0f3a6749 # HEAD of "master" as of 24.11.2017
+sahara_git_install_branch: ddb518fd81b82308bdd01e58ebf6ed7a48c544ae # HEAD of "stable/rocky" as of 11.12.2018
sahara_git_project_group: sahara_all
+sahara_git_track_branch: stable/rocky
## Swift service
swift_git_repo: https://git.openstack.org/openstack/swift
-swift_git_install_branch: 3135878d2fe9909f49fcadeeb9cc6c6933d06127 # HEAD of "master" as of 24.11.2017
+swift_git_install_branch: 7fdf66ab70da705774a4ae9c328a3e762bb2f3b4 # HEAD of "stable/rocky" as of 11.12.2018
swift_git_project_group: swift_all
+swift_git_track_branch: stable/rocky
## Swift3 middleware
+# please remove this when swift role is configured without this middleware (and uses swift code only)
swift_swift3_git_repo: https://git.openstack.org/openstack/swift3
-swift_swift3_git_install_branch: 1fb6a30ee59a16cd4b6c49bab963ff9e3f974580 # HEAD of "master" as of 24.11.2017
+swift_swift3_git_install_branch: 90db5d1510b2a770387961e7bf0fbeae8101ba45 # FROZEN HEAD of "master" as of 17.08.2018
swift_swift3_git_project_group: swift_all
+swift_swift3_git_track_branch: None
## Ironic service
ironic_git_repo: https://git.openstack.org/openstack/ironic
-ironic_git_install_branch: 27ce77142bfb9ac56e85db37e0923a0eb47f2f7a # HEAD of "master" as of 24.11.2017
+ironic_git_install_branch: 6a6c0d882fe8ac299d18df75d2bbd111b170ad48 # HEAD of "stable/rocky" as of 11.12.2018
ironic_git_project_group: ironic_all
+ironic_git_track_branch: stable/rocky
+
## Magnum service
magnum_git_repo: https://git.openstack.org/openstack/magnum
-magnum_git_install_branch: 4bf3b3263870a4ec81cf372713cacec446b3ee84 # HEAD of "master" as of 24.11.2017
+magnum_git_install_branch: 765e207a5d3a45b8523cb2c34e5d74541da481e6 # HEAD of "stable/rocky" as of 11.12.2018
magnum_git_project_group: magnum_all
+magnum_git_track_branch: stable/rocky
+
## Trove service
trove_git_repo: https://git.openstack.org/openstack/trove
-trove_git_install_branch: b09d0eb3135047891a369d3c0eb2c6e9ae649f5b # HEAD of "master" as of 24.11.2017
+trove_git_install_branch: 2953676e81fc22099e72ea7d0f27002a59aa779f # HEAD of "stable/rocky" as of 11.12.2018
trove_git_project_group: trove_all
+trove_git_track_branch: stable/rocky
## Horizon Trove dashboard plugin
trove_dashboard_git_repo: https://git.openstack.org/openstack/trove-dashboard
-trove_dashboard_git_install_branch: 14a4609606d42cae827b8fc6b44453caea258976 # HEAD of "master" as of 24.11.2017
+trove_dashboard_git_install_branch: c6482d8f7ebeb980a99cc89593245be381675984 # HEAD of "stable/rocky" as of 11.12.2018
trove_dashboard_git_project_group: horizon_all
+trove_dashboard_git_track_branch: stable/rocky
+
## Octavia service
octavia_git_repo: https://git.openstack.org/openstack/octavia
-octavia_git_install_branch: bb9bb2d05b268cff9846e0a09ad3940be5fe5a80 # HEAD of "master" as of 24.11.2017
+octavia_git_install_branch: ec4c88e23ebeb786491158682f9a7dd42928f97a # HEAD of "stable/rocky" as of 12.14.2018
octavia_git_project_group: octavia_all
+octavia_git_track_branch: stable/rocky
-## Molteniron service
-molteniron_git_repo: https://git.openstack.org/openstack/molteniron
-molteniron_git_install_branch: 094276cda77d814d07ad885e7d63de8d1243750a # HEAD of "master" as of 24.11.2017
-molteniron_git_project_group: molteniron_all
## Tacker service
tacker_git_repo: https://git.openstack.org/openstack/tacker
-tacker_git_install_branch: cc03b5d952527b8cad2e2e309a97d55afb1ca559 # HEAD of "master" as of 24.11.2017
+tacker_git_install_branch: 279b1a2840b9f28377476e0d11ca83ce2e88a0b2 # HEAD of "stable/rocky" as of 11.12.2018
tacker_git_project_group: tacker_all
+tacker_git_track_branch: stable/rocky
+
+## Congress service
+congress_git_repo: https://git.openstack.org/openstack/congress
+congress_git_install_branch: 6862ac9f356a5403e1e37050e12f032f661bae96 # HEAD of "stable/rocky" as of 11.12.2018
+congress_git_project_group: congress_all
+congress_git_track_branch: stable/rocky
+
+## Horizon Octavia dashboard plugin
+octavia_dashboard_git_repo: https://git.openstack.org/openstack/octavia-dashboard
+octavia_dashboard_git_install_branch: 80766f9390492c24de38911d7240c5490c7ef562 # HEAD of "stable/rocky" as of 11.12.2018
+octavia_dashboard_git_project_group: horizon_all
+octavia_dashboard_git_track_branch: stable/rocky
diff --git a/xci/installer/osa/files/setup-openstack.yml b/xci/installer/osa/files/setup-openstack.yml
index c2cb1c79..904215b7 100644
--- a/xci/installer/osa/files/setup-openstack.yml
+++ b/xci/installer/osa/files/setup-openstack.yml
@@ -19,9 +19,13 @@
- include: os-nova-install.yml
- include: os-neutron-install.yml
- include: os-heat-install.yml
+- include: os-ceilometer-install.yml
- include: os-horizon-install.yml
+ when: not core_openstack | default(False)
- include: os-swift-install.yml
- include: os-ironic-install.yml
+ when: not core_openstack | default(False)
+- include: os-barbican-install.yml
- include: os-tacker-install.yml
- include: os-tempest-install.yml
when: (tempest_install | default(False)) | bool or (tempest_run | default(False)) | bool
diff --git a/xci/installer/osa/files/user_variables_proxy.yml b/xci/installer/osa/files/user_variables_proxy.yml
new file mode 100644
index 00000000..d25c3181
--- /dev/null
+++ b/xci/installer/osa/files/user_variables_proxy.yml
@@ -0,0 +1,22 @@
+---
+# Copyright 2018, Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+http_proxy_env_url: ""
+no_proxy_env: "localhost,127.0.0.1,{{ internal_lb_vip_address }},{{ external_lb_vip_address }},{% for host in groups['all_containers'] %}{{ hostvars[host]['container_address'] }}{% if not loop.last %},{% endif %}{% endfor %}"
+global_environment_variables:
+ HTTP_PROXY: "{{ http_proxy_env_url }}"
+ NO_PROXY: "{{ no_proxy_env }}"
+ http_proxy: "{{ http_proxy_env_url }}"
+ no_proxy: "{{ no_proxy_env }}"
diff --git a/xci/installer/osa/files/user_variables_xci.yml b/xci/installer/osa/files/user_variables_xci.yml
new file mode 100644
index 00000000..1d69f532
--- /dev/null
+++ b/xci/installer/osa/files/user_variables_xci.yml
@@ -0,0 +1,17 @@
+---
+# Copyright 2018, SUSE LINUX GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+debug: False
+install_method: source
diff --git a/xci/installer/osa/playbooks/bootstrap-scenarios.yml b/xci/installer/osa/playbooks/bootstrap-scenarios.yml
deleted file mode 100644
index 98acf73b..00000000
--- a/xci/installer/osa/playbooks/bootstrap-scenarios.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-#
-# This file is aimed to be used by scenarios to plug into the XCI.
-# Ideally, all they need to do at this point is to include their
-# role using a statement like the following one
-#
-# - name: Include foobar role
-# include_role:
-# name: "foobar"
-# when: DEPLOY_SCENARIO == "foobar"
-
-- name: Prepare everything to run the os-nosdn-nofeature scenario
- include_role:
- name: "os-nosdn-nofeature"
- when: DEPLOY_SCENARIO == 'os-nosdn-nofeature'
-- name: Prepare everything to run the os-odl-nofeature scenario
- include_role:
- name: "os-odl-nofeature"
- when: DEPLOY_SCENARIO == 'os-odl-nofeature'
-- name: Prepare everything to run the os-odl-sfc scenario
- include_role:
- name: "os-odl-sfc"
- when: DEPLOY_SCENARIO == 'os-odl-sfc'
diff --git a/xci/installer/osa/playbooks/configure-localhost.yml b/xci/installer/osa/playbooks/configure-localhost.yml
deleted file mode 100644
index caa5d673..00000000
--- a/xci/installer/osa/playbooks/configure-localhost.yml
+++ /dev/null
@@ -1,75 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- hosts: localhost
- connection: local
-
- pre_tasks:
- - name: Load distribution variables
- include_vars:
- file: "{{ item }}"
- failed_when: false
- with_items:
- - "{{ XCI_PATH }}/xci/var/opnfv.yml"
- - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
-
- - name: cleanup leftovers of previous deployment
- file:
- path: "{{ item }}"
- state: absent
- recurse: no
- with_items:
- - "{{ XCI_CACHE }}/repos"
- - "{{ LOG_PATH }} "
- - "{{ OPNFV_SSH_HOST_KEYS_PATH }}"
-
- roles:
- - role: clone-repository
- project: "openstack/openstack-ansible-openstack_openrc"
- repo: "{{ OPENSTACK_OSA_OPENRC_GIT_URL }}"
- dest: roles/openstack-ansible-openstack_openrc
- version: "master"
- - role: clone-repository
- project: "openstack/openstack-ansible"
- repo: "{{ OPENSTACK_OSA_GIT_URL }}"
- dest: "{{ XCI_CACHE }}/repos/openstack-ansible"
- version: "{{ OPENSTACK_OSA_VERSION }}"
-
- tasks:
- - name: create log directory {{LOG_PATH}}
- file:
- path: "{{LOG_PATH}}"
- state: directory
- recurse: no
- - name: check if certificate directory /etc/ssl/certs exists already
- stat: path=/etc/ssl/certs
- register: check_etc_ssl_certs
- - name: create certificate directory /etc/ssl/certs
- become: true
- file:
- path: "/etc/ssl/certs"
- state: directory
- when: check_etc_ssl_certs.stat.exists == false
- - name: create key directory /etc/ssl/private
- become: true
- file:
- path: "/etc/ssl/private"
- state: directory
- - name: generate self signed certificate
- command: openssl req -new -nodes -x509 -subj "{{ XCI_SSL_SUBJECT }}" -days 3650 -keyout "/etc/ssl/private/xci.key" -out "/etc/ssl/certs/xci.crt" -extensions v3_ca
- become: true
- - name: Synchronize local development OSA repository to XCI paths
- # command module is much faster than the copy module
- synchronize:
- src: "{{ OPENSTACK_OSA_DEV_PATH }}"
- dest: "{{ XCI_CACHE }}/repos/openstack-ansible"
- recursive: yes
- delete: yes
- when:
- - OPENSTACK_OSA_DEV_PATH != ""
diff --git a/xci/installer/osa/playbooks/configure-opnfvhost.yml b/xci/installer/osa/playbooks/configure-opnfvhost.yml
index de922d3c..07ad683b 100644
--- a/xci/installer/osa/playbooks/configure-opnfvhost.yml
+++ b/xci/installer/osa/playbooks/configure-opnfvhost.yml
@@ -10,176 +10,190 @@
- hosts: opnfv
remote_user: root
vars_files:
- - "{{ XCI_PATH }}/xci/var/opnfv.yml"
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ - "{{ xci_path }}/xci/installer/osa/files/openstack_services.yml"
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
+ HTTP_PROXY: "{{ lookup('env','http_proxy') }}"
+ HTTPS_PROXY: "{{ lookup('env','https_proxy') }}"
+ NO_PROXY: "{{ lookup('env','no_proxy') }}"
pre_tasks:
- name: Load distribution variables
include_vars:
file: "{{ item }}"
with_items:
- - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
- - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml"
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
- name: Set facts for remote deployment
set_fact:
remote_xci_path: "{{ ansible_env.HOME }}/releng-xci"
- remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/installer/{{XCI_INSTALLER}}/files/{{ XCI_FLAVOR }}"
+ remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/installer/{{installer_type}}/files/{{ xci_flavor }}"
remote_xci_playbooks: "{{ ansible_env.HOME }}/releng-xci/xci/playbooks"
roles:
- - role: configure-network
- when: XCI_FLAVOR != "aio"
+ - role: bootstrap-host
+ configure_network: xci_flavor != 'aio'
+ - role: ruzickap.proxy_settings
+ proxy_settings_http_proxy: "{{ lookup('env','http_proxy') }}"
+ proxy_settings_https_proxy: "{{ lookup('env','https_proxy') }}"
+ proxy_settings_ftp_proxy: "{{ lookup('env','ftp_proxy') }}"
+ proxy_settings_no_proxy: "{{ lookup('env','no_proxy') }}"
tasks:
- - name: generate SSH keys
- shell: ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""
+ - name: Create list of files to copy
+ shell: |
+ git ls-tree -r --name-only HEAD > {{ xci_cache }}/releng-xci.files
+ echo ".git/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/repos/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/xci.env" >> {{ xci_cache }}/releng-xci.files
args:
- creates: "{{ ansible_env.HOME }}/.ssh/id_rsa"
- - name: fetch public key
- fetch:
- src: "{{ ansible_env.HOME }}/.ssh/id_rsa.pub"
- dest: "{{ XCI_PATH }}/xci/files/authorized_keys"
- flat: yes
+ executable: /bin/bash
+ chdir: "{{ xci_path }}"
+ changed_when: False
+ delegate_to: 127.0.0.1
+ tags:
+ - skip_ansible_lint
+
- name: Copy releng-xci to remote host
synchronize:
- src: "{{ XCI_PATH }}/"
+ archive: yes
+ src: "{{ xci_path }}/"
dest: "{{ remote_xci_path }}"
- recursive: yes
delete: yes
- - name: copy flavor inventory
- shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/inventory {{ remote_xci_playbooks }}"
- - name: copy openstack_deploy
- shell: "/bin/cp -rf {{OPENSTACK_OSA_PATH}}/etc/openstack_deploy {{OPENSTACK_OSA_ETC_PATH}}"
- - name: copy openstack_user_config.yml
- shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/openstack_user_config.yml {{OPENSTACK_OSA_ETC_PATH}}"
- failed_when: false
- - name: copy all user override files
- shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/user_variables.yml {{OPENSTACK_OSA_ETC_PATH}}"
- failed_when: false
- - name: copy cinder.yml
- shell: "/bin/cp -rf {{ remote_xci_path }}/xci/installer/osa/files/cinder.yml {{OPENSTACK_OSA_ETC_PATH}}/env.d"
- - name: Configure AIO tempest
+ rsync_opts:
+ - "--recursive"
+ - "--files-from={{ xci_cache }}/releng-xci.files"
+
+ - name: Re-create OpenStack-Ansible /etc directory
+ file:
+ path: "{{ openstack_osa_etc_path }}"
+ state: "{{ item }}"
+ with_items:
+ - absent
+ - directory
+
+ - name: Remove upstream OpenStack-Ansible files
+ file:
+ path: "{{ openstack_osa_path }}/playbooks/{{ item }}"
+ state: absent
+ with_items:
+ - inventory
+ - setup-openstack.yml
+
+ - name: Copy OpenStack-Ansible configuration files
+ command: "/bin/cp -rf {{ item.src }} {{ item.dest }}"
+ args:
+ creates: "{{ item.dest }}/{{ item.src | basename }}"
+ with_items:
+ - { src: "{{ openstack_osa_path }}/etc/openstack_deploy/env.d", dest: "{{ openstack_osa_etc_path }}" }
+ - { src: "{{ openstack_osa_path }}/etc/openstack_deploy/conf.d", dest: "{{ openstack_osa_etc_path }}" }
+ - { src: "{{ openstack_osa_path }}/etc/openstack_deploy/user_secrets.yml", dest: "{{ openstack_osa_etc_path }}" }
+ - { src: "{{ remote_xci_flavor_files }}/openstack_user_config.yml", dest: "{{ openstack_osa_etc_path }}" }
+ - { src: "{{ remote_xci_flavor_files }}/user_variables.yml", dest: "{{ openstack_osa_etc_path }}" }
+ - { src: "{{ remote_xci_flavor_files }}/ceph.yml", dest: "{{ openstack_osa_etc_path }}/conf.d/", cond: xci_ceph_enabled }
+ - { src: "{{ remote_xci_flavor_files }}/user_ceph.yml", dest: "{{ openstack_osa_etc_path }}/user_ceph.yml", cond: xci_ceph_enabled }
+ - { src: "{{ remote_xci_flavor_files }}/user_variables_ceph.yml", dest: "{{ openstack_osa_etc_path }}/user_variables_ceph.yml", cond: xci_ceph_enabled }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/cinder.yml", dest: "{{ openstack_osa_etc_path }}/env.d" }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/user_variables_xci.yml", dest: "{{ openstack_osa_etc_path }}/user_variables_xci.yml" }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/user_variables_proxy.yml", dest: "{{ openstack_osa_etc_path }}/user_variables_proxy.yml", cond: "{{ lookup('env', 'http_proxy') != '' }}" }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/setup-openstack.yml", dest: "{{ openstack_osa_path }}/playbooks" }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/ansible-role-requirements.yml", dest: "{{openstack_osa_path}}/ansible-role-requirements.yml", cond: "{{ openstack_osa_version != 'master' }}" }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/global-requirement-pins.txt", dest: "{{openstack_osa_path}}/global-requirement-pins.txt", cond: "{{ openstack_osa_version != 'master' }}" }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/openstack_services.yml", dest: "{{ openstack_osa_path }}/playbooks/defaults/repo_packages/openstack_services.yml", cond: "{{ openstack_osa_version != 'master' }}" }
+ when: item.cond is not defined or (item.cond is defined and item.cond | bool)
+ loop_control:
+ label: "{{ item.src }}"
+
+ - name: Configure OpenStack-Ansible components
lineinfile:
- path: "{{ OPENSTACK_OSA_ETC_PATH }}/user_variables.yml"
- line: "{{ item }}: {{ RUN_TEMPEST | bool }}"
+ path: "{{ openstack_osa_etc_path }}/user_variables.yml"
+ line: "{{ item.component }}: {{ item.value }}"
state: present
with_items:
- - "tempest_install"
- - "tempest_run"
- - block:
- - name: copy ceph.yml
- shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/ceph.yml {{OPENSTACK_OSA_ETC_PATH}}/conf.d/"
- - name: copy user_ceph.yml
- shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/user_ceph.yml {{OPENSTACK_OSA_ETC_PATH}}/user_ceph.yml"
- - name: copy user_variables_ceph.yml
- shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/user_variables_ceph.yml {{OPENSTACK_OSA_ETC_PATH}}/user_variables_ceph.yml"
- when: XCI_CEPH_ENABLED == "true"
- # TODO: We need to get rid of this as soon as the issue is fixed upstream
- - name: change the haproxy state from disable to enable
- replace:
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/os-keystone-install.yml"
- regexp: '(\s+)haproxy_state: disabled'
- replace: '\1haproxy_state: enabled'
- - name: copy OPNFV OpenStack playbook
- shell: "/bin/cp -rf {{ remote_xci_path }}/xci/installer/osa/files/setup-openstack.yml {{OPENSTACK_OSA_PATH}}/playbooks"
- - name: copy pinned versions of OSA Roles and global requirements
- shell: "/bin/cp -rf {{ remote_xci_path }}/xci/installer/osa/files/{{ item }} {{OPENSTACK_OSA_PATH}}/{{ item }}"
- with_items:
- - "ansible-role-requirements.yml"
- - "global-requirement-pins.txt"
- when:
- - OPENSTACK_OSA_VERSION != "master"
- - name: copy pinned versions of OpenStack services
- shell: "/bin/cp -rf {{ remote_xci_path }}/xci/installer/osa/files/openstack_services.yml {{OPENSTACK_OSA_PATH}}/playbooks/defaults/repo_packages/openstack_services.yml"
+ - { component: "tempest_install", value: "{{ run_tempest | bool }}" }
+ - { component: "tempest_run", value: "{{ run_tempest | bool }}" }
+ - { component: "core_openstack", value: "{{ core_openstack_install | bool }}" }
+
+ - name: "Configure http_proxy_env_url"
+ lineinfile:
+ path: "{{openstack_osa_etc_path}}/user_variables_proxy.yml"
+ regexp: "^http_proxy_env_url:.*"
+ line: "{{ 'http_proxy_env_url: ' + lookup('env','http_proxy') }}"
when:
- - OPENSTACK_OSA_VERSION != "master"
- - include: bootstrap-scenarios.yml
+ - lookup('env','http_proxy') != ""
+
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Prepare everything to run the {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+
- name: bootstrap ansible on opnfv host
command: "/bin/bash ./scripts/bootstrap-ansible.sh"
args:
- chdir: "{{OPENSTACK_OSA_PATH}}"
- - name: install python Crypto module
- package:
- name: "{{ python_crypto_package_name }}"
- - name: install PyYAML
+ creates: "/usr/local/bin/openstack-ansible"
+ chdir: "{{openstack_osa_path}}"
+
+ - name: install opnfv pip required packages
pip:
- name: pyyaml
+ name: "{{ item }}"
state: present
- - name: generate password token
- command: "python pw-token-gen.py --file {{OPENSTACK_OSA_ETC_PATH}}/user_secrets.yml"
- args:
- chdir: "{{OPENSTACK_OSA_PATH}}/scripts"
- - name: check if certificate directory /etc/ssl/certs exists already
- stat: path=/etc/ssl/certs
- register: check_etc_ssl_certs
- - name: create certificate directory /etc/ssl/certs
+ extra_args: '-c https://raw.githubusercontent.com/openstack/requirements/{{ requirements_git_install_branch }}/upper-constraints.txt'
+ with_items:
+ - pyyaml
+ - python-neutronclient
+ - python-openstackclient
+ - name: Install ARA callback plugin in OSA virtualenv
+ pip:
+ name: ara
+ version: 0.16.4
+ state: present
+ extra_args: '-c https://raw.githubusercontent.com/openstack/requirements/{{ requirements_git_install_branch }}/upper-constraints.txt'
+ executable: '/opt/ansible-runtime/bin/pip'
+ - name: Determine ARA callback location
+ command: "/opt/ansible-runtime/bin/python -c 'import os,ara; print(os.path.dirname(ara.__file__))'"
+ changed_when: False
+ register: _ara_install_dir
+ - name: Create local Ansible plugins directory
file:
- path: "/etc/ssl/certs"
+ path: "{{ ansible_env.HOME }}/.ansible/plugins/callback/ara"
state: directory
- when: check_etc_ssl_certs.stat.exists == false
- - name: create key directory /etc/ssl/private
+ - name: Configure ARA callback
file:
- path: "/etc/ssl/private"
- state: directory
- - name: copy certificate to /etc/ssl/certs
- copy:
- src: "/etc/ssl/certs/xci.crt"
- dest: "/etc/ssl/certs/"
- - name: read remote key from /etc/ssl/private
- set_fact:
- xci_ssl_key: "{{ lookup('pipe', 'sudo cat /etc/ssl/private/xci.key' ) }}"
- - name: copy key to /etc/ssl/private
- copy:
- content: "{{ xci_ssl_key }}"
- dest: "/etc/ssl/private/xci.key"
- become: true
- - name: install opnfv required packages
- package:
- name: "{{ opnfv_required_packages }}"
- state: latest
- # Docker is needed for functest
- - name: Ensure Docker service is started and enabled
- service:
- name: "{{ docker_service_name }}"
- state: started
- enabled: yes
- - name: install opnfv required pip packages
- pip:
- name: "{{ opnfv_required_pip }}"
- state: present
-
-- hosts: localhost
- remote_user: root
-
- tasks:
- - name: Append public keys to authorized_keys
- shell: "/bin/cat {{ ansible_env.HOME }}/.ssh/id_rsa.pub >> {{ XCI_PATH }}/xci/files/authorized_keys"
+ path: "{{ ansible_env.HOME }}/.ansible/plugins/callback/ara/callbacks"
+ src: "{{ _ara_install_dir.stdout }}/plugins/callbacks"
+ force: yes
+ state: link
+ - name: generate password token
+ command: "python pw-token-gen.py --file {{openstack_osa_etc_path}}/user_secrets.yml"
+ args:
+ chdir: "{{openstack_osa_path}}/scripts"
+ changed_when: True
-- hosts: opnfv
- remote_user: root
- vars_files:
- - "{{ XCI_PATH }}/xci/var/opnfv.yml"
+ - name: fetch xci environment
+ copy:
+ src: "{{ xci_path }}/.cache/xci.env"
+ dest: /root/xci.env
- pre_tasks:
- - name: Load distribution variables
+ - name: Reload OpenStack-Ansible variables
include_vars:
- file: "{{ item }}"
- failed_when: false
- with_items:
- - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
- - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml"
- - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/user_variables.yml"
- roles:
- - role: "openstack-ansible-openstack_openrc"
+ file: "{{ xci_flavor_ansible_file_path }}/user_variables.yml"
- tasks:
- - name: add extra insecure flag to generated openrc
- blockinfile:
- dest: "{{ ansible_env.HOME }}/openrc"
- block: |
- export OS_INSECURE=true
+ - name: Generate openrc
+ include_role:
+ name: "openstack-ansible-openstack_openrc"
- name: fetch generated openrc
fetch:
src: "{{ ansible_env.HOME }}/openrc"
- dest: "{{ XCI_PATH }}/.cache/openrc"
+ dest: "{{ xci_path }}/.cache/openrc"
flat: true
+
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
diff --git a/xci/installer/osa/playbooks/configure-targethosts.yml b/xci/installer/osa/playbooks/configure-targethosts.yml
index fb43a920..dfa17696 100644
--- a/xci/installer/osa/playbooks/configure-targethosts.yml
+++ b/xci/installer/osa/playbooks/configure-targethosts.yml
@@ -1,49 +1,36 @@
---
-- hosts: all
- remote_user: root
- tasks:
- - name: add public key to host
- copy:
- src: "{{ XCI_PATH }}/xci/files/authorized_keys"
- dest: /root/.ssh/authorized_keys
-
-- hosts: controller
+- hosts: openstack
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
+ HTTP_PROXY: "{{ lookup('env','http_proxy') }}"
+ HTTPS_PROXY: "{{ lookup('env','https_proxy') }}"
+ NO_PROXY: "{{ lookup('env','no_proxy') }}"
remote_user: root
vars_files:
- - "{{ XCI_PATH }}/xci/var/opnfv.yml"
+ - "{{ xci_path }}/xci/var/opnfv.yml"
pre_tasks:
- name: Load distribution variables
include_vars:
file: "{{ item }}"
with_items:
- - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
- - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml"
- roles:
- - role: configure-network
- # we need to force sync time with ntp or the nodes will be out of sync timewise
- - role: synchronize-time
-
-- hosts: compute
- remote_user: root
- vars_files:
- - "{{ XCI_PATH }}/xci/var/opnfv.yml"
-
- pre_tasks:
- - name: Load distribution variables
- include_vars:
- file: "{{ item }}"
- with_items:
- - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
- - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml"
- roles:
- - role: configure-network
- # we need to force sync time with ntp or the nodes will be out of sync timewise
- - role: synchronize-time
- - role: configure-ceph
- when: XCI_CEPH_ENABLED == "true"
-
-- hosts: compute00
- remote_user: root
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
roles:
+ - role: ruzickap.proxy_settings
+ proxy_settings_http_proxy: "{{ lookup('env','http_proxy') }}"
+ proxy_settings_https_proxy: "{{ lookup('env','https_proxy') }}"
+ proxy_settings_ftp_proxy: "{{ lookup('env','ftp_proxy') }}"
+ proxy_settings_no_proxy: "{{ lookup('env','no_proxy') }}"
+ - role: bootstrap-host
- role: configure-nfs
+ when:
+ - "'compute' in group_names"
+ - role: configure-ceph
+ when:
+ - xci_ceph_enabled == "true"
+ - "'compute' in group_names"
+ tasks:
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
diff --git a/xci/installer/osa/playbooks/post-deployment.yml b/xci/installer/osa/playbooks/post-deployment.yml
new file mode 100644
index 00000000..36c052c9
--- /dev/null
+++ b/xci/installer/osa/playbooks/post-deployment.yml
@@ -0,0 +1,66 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ - "{{ xci_path }}/xci/installer/osa/files/openstack_services.yml"
+ - "{{ xci_path }}/xci/installer/osa/files/{{ xci_flavor }}/user_variables.yml"
+
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
+ HTTP_PROXY: "{{ lookup('env','http_proxy') }}"
+ HTTPS_PROXY: "{{ lookup('env','https_proxy') }}"
+ NO_PROXY: "{{ lookup('env','no_proxy') }}"
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_scenario_path: "{{ ansible_env.HOME }}/releng-xci/.cache/repos/scenarios/{{ deploy_scenario }}/scenarios/{{ deploy_scenario }}"
+
+ roles:
+ - role: ruzickap.proxy_settings
+ proxy_settings_http_proxy: "{{ lookup('env','http_proxy') }}"
+ proxy_settings_https_proxy: "{{ lookup('env','https_proxy') }}"
+ proxy_settings_ftp_proxy: "{{ lookup('env','ftp_proxy') }}"
+ proxy_settings_no_proxy: "{{ lookup('env','no_proxy') }}"
+
+ tasks:
+ - name: "Configure http_proxy_env_url"
+ lineinfile:
+ path: "{{openstack_osa_etc_path}}/user_variables_proxy.yml"
+ regexp: "^http_proxy_env_url:.*"
+ line: "{{ 'http_proxy_env_url: ' + lookup('env','http_proxy') }}"
+ when:
+ - lookup('env','http_proxy') != ""
+
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Check if any post-deployment task defined for {{ deploy_scenario }} role
+ stat:
+ path: "{{ remote_xci_scenario_path }}/role/{{ deploy_scenario }}/tasks/post-deployment.yml"
+ register: post_deployment_yml
+
+ - name: Execute post-deployment tasks of {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+ tasks_from: post-deployment
+ when:
+ - post_deployment_yml.stat.exists
diff --git a/xci/installer/osh/README b/xci/installer/osh/README
new file mode 100644
index 00000000..902ac10e
--- /dev/null
+++ b/xci/installer/osh/README
@@ -0,0 +1,50 @@
+Requirements:
+ 1. Performance of hosts
+ The performance settings are not required officially. I recommend the following:
+ - VM_CPU=6
+ - VM_DISK=80GB
+ - VM_MEMORY_SIZE=16GB
+
+ 2. Distributions
+ - Ubuntu 16.04
+
+ 3. Packages:
+ - Ansible v2.4 (or newer) and python-netaddr is installed on the machine that will run Ansible commands
+ - Jinja 2.9 (or newer) is required to run the Ansible Playbooks
+
+ 4. Others:
+ - The target servers must have access to the Internet in order to pull docker images.
+ - The target servers are configured to allow IPv4 forwarding.
+ - Your ssh key must be copied to all the servers part of your inventory.
+ - The firewalls are not managed, you'll need to implement your own rules the way you used to. In order to avoid any issue during the deployment you should disable your firewall.
+
+Flavors:
+ 1. mini: One deployment host, 1 master host and 1 node host.
+ 2. noha: One deployment host, 1 master host and 2 node hosts.
+
+Components Installed:
+ 1. etcd
+ 2. network plugins:
+ - calico
+ 3. kubernetes
+ 4. docker
+
+How to use:
+
+Clone the OPNFV Releng repository
+
+ git clone https://gerrit.opnfv.org/gerrit/releng-xci.git
+
+Change into the directory where the sandbox script is located
+
+ cd releng-xci/xci
+
+Set the variable to run openstack-helm
+
+ export INSTALLER_TYPE=osh
+ export DEPLOY_SCENARIO=k8-calico-nofeature
+ export XCI_FLAVOR=mini
+
+Execute sandbox script
+
+ ./xci-deploy.sh
diff --git a/xci/installer/osh/deploy.sh b/xci/installer/osh/deploy.sh
new file mode 100755
index 00000000..e56845b8
--- /dev/null
+++ b/xci/installer/osh/deploy.sh
@@ -0,0 +1,170 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Huawei
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -o errexit
+set -o nounset
+set -o pipefail
+
+OSH_XCI_PLAYBOOKS="$(dirname $(realpath ${BASH_SOURCE[0]}))/playbooks"
+export ANSIBLE_ROLES_PATH=$HOME/.ansible/roles:/etc/ansible/roles:${XCI_PATH}/xci/playbooks/roles
+
+#-------------------------------------------------------------------------------
+# Configure localhost
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng-xci repository
+# - clones kubernetes-incubator/kubespray repository
+# - creates log directory
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring localhost for kubespray"
+echo "-----------------------------------------------------------------------"
+cd $XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -e XCI_PATH="${XCI_PATH}" \
+ -i dynamic_inventory.py configure-localhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured localhost for kubespray"
+
+#-------------------------------------------------------------------------------
+# Configure installer
+#-------------------------------------------------------------------------------
+# TODO: summarize what this playbook does
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring kubespray installer"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-installer.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured kubespray installer"
+
+#-------------------------------------------------------------------------------
+# Configure deployment host, opnfv
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - synchronize opnfv/releng-xci and kubernetes-incubator/kubespray repositories
+# - generates/prepares ssh keys
+# - copies flavor files to be used by kubespray
+# - install packages required by kubespray
+#-------------------------------------------------------------------------------
+echo "Info: Configuring opnfv deployment host for kubespray"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-opnfvhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured opnfv deployment host for kubespray"
+
+#-------------------------------------------------------------------------------
+# Configure target hosts for kubespray
+#-------------------------------------------------------------------------------
+# This playbook is only run for the all flavors except aio since aio is configured by the configure-opnfvhost.yml
+# This playbook
+# - adds public keys to target hosts
+# - install packages required by kubespray
+# - configures haproxy service
+#-------------------------------------------------------------------------------
+if [ $XCI_FLAVOR != "aio" ]; then
+ echo "Info: Configuring target hosts for kubespray"
+ echo "-----------------------------------------------------------------------"
+ cd $OSH_XCI_PLAYBOOKS
+ ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-targethosts.yml
+ echo "-----------------------------------------------------------------------"
+ echo "Info: Configured target hosts for kubespray"
+fi
+
+
+echo "Info: Using kubespray to deploy the kubernetes cluster"
+echo "-----------------------------------------------------------------------"
+ssh root@$OPNFV_HOST_IP "set -o pipefail; export XCI_FLAVOR=$XCI_FLAVOR; export INSTALLER_TYPE=$INSTALLER_TYPE; \
+ export IDF=/root/releng-xci/xci/var/idf.yml; export PDF=/root/releng-xci/xci/var/pdf.yml; \
+ cd releng-xci/.cache/repos/kubespray/; ansible-playbook \
+ -i inventory/opnfv/dynamic_inventory.py cluster.yml -b | tee setup-kubernetes.log"
+scp root@$OPNFV_HOST_IP:~/releng-xci/.cache/repos/kubespray/setup-kubernetes.log \
+ $LOG_PATH/setup-kubernetes.log
+
+
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-kubenet.yml
+echo
+echo "-----------------------------------------------------------------------"
+echo "Info: Kubernetes installation is successfully completed!"
+echo "-----------------------------------------------------------------------"
+
+#-------------------------------------------------------------------------------
+# Execute post-installation tasks
+#-------------------------------------------------------------------------------
+# Playbook post.yml is used in order to execute any post-deployment tasks that
+# are required for the scenario under test.
+#-------------------------------------------------------------------------------
+# copy admin.conf
+ssh root@$OPNFV_HOST_IP "mkdir -p ~/.kube/;\
+ cp -f ~/admin.conf ~/.kube/config;"
+
+echo "-----------------------------------------------------------------------"
+echo "Info: Running post-deployment scenario role"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ post-deployment.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Post-deployment scenario role execution done"
+echo "-----------------------------------------------------------------------"
+echo
+echo "Login opnfv host ssh root@$OPNFV_HOST_IP
+according to the user-guide to create a service
+https://kubernetes.io/docs/user-guide/walkthrough/k8s201/"
+echo
+echo "-----------------------------------------------------------------------"
+echo "Info: Kubernetes login details"
+echo "-----------------------------------------------------------------------"
+echo
+# Get the dashboard URL
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $OPNFV_HOST_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $OPNFV_HOST_IP;
+echo "Info: known_hosts entry for opnfv host from previous deployment found and deleted"
+fi
+DASHBOARD_SERVICE=$(ssh -q -o StrictHostKeyChecking=no root@$OPNFV_HOST_IP "kubectl get service -n kube-system |grep kubernetes-dashboard")
+DASHBOARD_PORT=$(echo ${DASHBOARD_SERVICE} | awk '{print $5}' |awk -F "[:/]" '{print $2}')
+KUBER_SERVER_URL=$(ssh root@$OPNFV_HOST_IP "grep -r server ~/.kube/config")
+echo "Info: Kubernetes Dashboard URL:"
+echo $KUBER_SERVER_URL | awk '{print $2}'| sed -n "s#:[0-9]*\$#:$DASHBOARD_PORT#p"
+
+# Get the dashboard user and password
+MASTER_IP=$(echo ${KUBER_SERVER_URL} | awk '{print $2}' |awk -F "[:/]" '{print $4}')
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $MASTER_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $MASTER_IP;
+echo "Info: known_hosts entry for master host from previous deployment found and deleted"
+fi
+USER_CSV=$(ssh -q -o StrictHostKeyChecking=no root@$MASTER_IP " cat /etc/kubernetes/users/known_users.csv")
+USERNAME=$(echo $USER_CSV |awk -F ',' '{print $2}')
+PASSWORD=$(echo $USER_CSV |awk -F ',' '{print $1}')
+echo "Info: Dashboard username: ${USERNAME}"
+echo "Info: Dashboard password: ${PASSWORD}"
+
+echo "-----------------------------------------------------------------------"
+echo "Info: Continue with running the openstack-helm installation"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -v -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ install-openstack-helm.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Openstack-helm installation execution done"
+echo "-----------------------------------------------------------------------"
+echo
+
+
+# vim: set ts=4 sw=4 expandtab:
diff --git a/xci/installer/osh/files/ha/inventory/group_vars/all.yml b/xci/installer/osh/files/ha/inventory/group_vars/all.yml
new file mode 100644
index 00000000..d1b946a7
--- /dev/null
+++ b/xci/installer/osh/files/ha/inventory/group_vars/all.yml
@@ -0,0 +1,8 @@
+---
+loadbalancer_apiserver:
+ address: 192.168.122.222
+ port: 8383
+
+apiserver_loadbalancer_domain_name: 192.168.122.222
+supplementary_addresses_in_ssl_keys:
+ - 192.168.122.222
diff --git a/xci/installer/osh/playbooks/configure-installer.yml b/xci/installer/osh/playbooks/configure-installer.yml
new file mode 100644
index 00000000..383f55fc
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-installer.yml
@@ -0,0 +1,51 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2019 Ericsson Software Technology and Others
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+ connection: local
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ tasks:
+ - name: delete existing kubespray/inventory/opnfv directory
+ file:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+ state: absent
+
+ - name: copy kubespray/inventory/sample as kubespray/inventory/opnfv
+ copy:
+ src: "{{ xci_path }}/.cache/repos/kubespray/inventory/sample/"
+ dest: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+
+ - name: update kubespray k8s-cluster.yml for xci
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ with_items:
+ - { regexp: "kube_version:.*", line: "kube_version: {{ kubernetes_version }}" }
+ - { regexp: "kubeconfig_localhost:.*", line: "kubeconfig_localhost: true" }
+ - { regexp: "kube_basic_auth:.*", line: "kube_basic_auth: true" }
+ - { regexp: "dashboard_enabled:.*", line: "dashboard_enabled: true" }
+
+# NOTE(fdegir): the reason for this task to be separate from the task which uses lineinfile
+# module is that escaping curly braces does not work with with_items. what happens is that
+# ansible tries to resolve {{ ansible_env.HOME }} which we don't want since it should point
+# to home folder of the user executing this task at runtime.
+ - name: update kubespray artifacts_dir
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "artifacts_dir:.*"
+ line: "artifacts_dir: '{{ '{{' }} ansible_env.HOME {{ '}}' }}'"
+
+ - name: change dashboard server type to NodePort
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2"
+ insertafter: 'targetPort'
+ line: " type: NodePort"
diff --git a/xci/installer/osh/playbooks/configure-kubenet.yml b/xci/installer/osh/playbooks/configure-kubenet.yml
new file mode 100644
index 00000000..18a126c1
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-kubenet.yml
@@ -0,0 +1,51 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# NOTE(hwoarang) Kubenet expects networking to be prepared by the administrator so it's necessary
+# to do that as part of the node configuration. All we need is to add static routes on every node
+# so cbr0 interfaces can talk to each other.
+- name: Prepare networking for kubenet
+ hosts: k8s-cluster
+ remote_user: root
+ gather_facts: True
+ become: yes
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ tasks:
+ - name: Configure static routes
+ block:
+ - name: Collect cbr0 information from the nodes
+ set_fact:
+ kubenet_xci_static_routes: |-
+ {% set static_routes = [] %}
+ {% for host in groups['k8s-cluster']|select("ne", inventory_hostname) %}
+ {%- set _ = static_routes.append(
+ {'network': (hostvars[host]['ansible_cbr0']['ipv4']['network']+'/'+
+ hostvars[host]['ansible_cbr0']['ipv4']['netmask'])|ipaddr('net'),
+ 'gateway': hostvars[host]['ansible_default_ipv4']['address']}) -%}
+ {% endfor %}
+ {{ static_routes }}
+
+ - name: Add static routes on each node
+ shell: "ip route show | grep -q {{ item.network }} || ip route add {{ item.network }} via {{ item.gateway }}"
+ with_items: "{{ kubenet_xci_static_routes }}"
+ loop_control:
+ label: "{{ item.network }}"
+ when: deploy_scenario.find('k8-nosdn-') != -1
+
+ - name: Ensure rp_filter is disabled on localhost
+ sysctl:
+ name: net.ipv4.conf.all.rp_filter
+ sysctl_set: yes
+ state: present
+ value: "{{ (kubenet_xci_static_routes is defined) | ternary(0, 1) }}"
+ reload: yes
+ delegate_to: localhost
+ run_once: True
diff --git a/xci/installer/osh/playbooks/configure-opnfvhost.yml b/xci/installer/osh/playbooks/configure-opnfvhost.yml
new file mode 100644
index 00000000..52e42b06
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-opnfvhost.yml
@@ -0,0 +1,101 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_path: "{{ ansible_env.HOME }}/releng-xci"
+ remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/installer/{{ installer_type }}/files/{{ xci_flavor }}"
+ remote_xci_playbooks: "{{ ansible_env.HOME }}/releng-xci/xci/playbooks"
+
+ roles:
+ - role: bootstrap-host
+ configure_network: xci_flavor != 'aio'
+
+ tasks:
+ - name: Create list of files to copy
+ shell: |
+ git ls-tree -r --name-only HEAD > {{ xci_cache }}/releng-xci.files
+ echo ".git/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/repos/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/xci.env" >> {{ xci_cache }}/releng-xci.files
+ args:
+ executable: /bin/bash
+ chdir: "{{ xci_path }}"
+ changed_when: False
+ delegate_to: 127.0.0.1
+ tags:
+ - skip_ansible_lint
+
+ - name: Copy releng-xci to remote host
+ synchronize:
+ archive: yes
+ src: "{{ xci_path }}/"
+ dest: "{{ remote_xci_path }}"
+ delete: yes
+ rsync_opts:
+ - "--recursive"
+ - "--files-from={{ xci_cache }}/releng-xci.files"
+
+ - name: link xci dynamic inventory to kubespray/inventory/opnfv directory
+ file:
+ src: "{{ remote_xci_playbooks }}/dynamic_inventory.py"
+ path: "{{ remote_xci_path }}/.cache/repos/kubespray/inventory/opnfv/dynamic_inventory.py"
+ state: link
+
+ - name: Download kubectl and place it to /usr/local/bin
+ get_url:
+ url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubernetes_version }}/bin/linux/amd64/kubectl"
+ dest: /usr/local/bin/kubectl
+ owner: root
+ group: root
+ mode: 0755
+
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Prepare everything to run the {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+
+ - name: Install required packages
+ package:
+ name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
+ state: present
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+ when: xci_flavor == 'aio'
+
+ - name: pip install required packages
+ pip:
+ name: "{{ item.name }}"
+ version: "{{ item.version | default(omit) }}"
+ with_items:
+ - { name: 'ansible', version: "{{ xci_kube_ansible_pip_version }}" }
+ - { name: 'netaddr' }
+ - { name: 'ansible-modules-hashivault' }
+
+ - name: fetch xci environment
+ copy:
+ src: "{{ xci_path }}/.cache/xci.env"
+ dest: /root/xci.env
+
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
diff --git a/xci/installer/osh/playbooks/configure-targethosts.yml b/xci/installer/osh/playbooks/configure-targethosts.yml
new file mode 100644
index 00000000..2fde9877
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-targethosts.yml
@@ -0,0 +1,40 @@
+---
+- hosts: k8s-cluster
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+
+ roles:
+ - role: bootstrap-host
+
+ tasks:
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
+
+ - name: Install dbus
+ package:
+ name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
+ state: present
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+
+- hosts: kube-master
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ roles:
+ - role: "keepalived"
+ when: xci_flavor == 'ha'
+ - role: "haproxy_server"
+ haproxy_service_configs: "{{ haproxy_default_services}}"
+ when: xci_flavor == 'ha'
diff --git a/xci/installer/osh/playbooks/group_vars/all.yml b/xci/installer/osh/playbooks/group_vars/all.yml
new file mode 100644
index 00000000..7453bdab
--- /dev/null
+++ b/xci/installer/osh/playbooks/group_vars/all.yml
@@ -0,0 +1,55 @@
+---
+keepalived_ubuntu_src: "uca"
+keepalived_uca_apt_repo_url: "{{ uca_apt_repo_url | default('http://ubuntu-cloud.archive.canonical.com/ubuntu') }}"
+
+keepalived_sync_groups:
+ haproxy:
+ instances:
+ - external
+
+haproxy_keepalived_external_interface: "{{ ansible_default_ipv4.interface }}"
+haproxy_keepalived_authentication_password: 'keepalived'
+keepalived_instances:
+ external:
+ interface: "{{ haproxy_keepalived_external_interface }}"
+ state: "BACKUP"
+ virtual_router_id: "{{ haproxy_keepalived_external_virtual_router_id | default ('10') }}"
+ priority: "{{ ((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*100)-((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*50) }}"
+ authentication_password: "{{ haproxy_keepalived_authentication_password }}"
+ vips:
+ - "{{ haproxy_keepalived_external_vip_cidr | default('192.168.122.222/32') }} dev {{ haproxy_keepalived_external_interface }}"
+
+haproxy_default_services:
+ - service:
+ haproxy_service_name: proxy-apiserver
+ haproxy_backend_nodes: "{{ groups['kube-master'] | default([]) }}"
+ haproxy_port: 8383
+ haproxy_backend_port: 6443
+ haproxy_balance_type: tcp
+
+haproxy_bind_on_non_local: "True"
+haproxy_use_keepalived: "True"
+keepalived_selinux_compile_rules:
+ - keepalived_ping
+ - keepalived_haproxy_pid_file
+
+# Ensure that the package state matches the global setting
+haproxy_package_state: "latest"
+
+haproxy_whitelist_networks:
+ - 192.168.0.0/16
+ - 172.16.0.0/12
+ - 10.0.0.0/8
+
+haproxy_galera_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_glance_registry_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_keystone_admin_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_nova_metadata_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_rabbitmq_management_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_repo_git_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_repo_cache_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_octavia_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_ssl: false
+
+internal_lb_vip_address: "192.168.122.222"
+external_lb_vip_address: "{{ internal_lb_vip_address }}"
diff --git a/xci/installer/osh/playbooks/install-openstack-helm.yml b/xci/installer/osh/playbooks/install-openstack-helm.yml
new file mode 100644
index 00000000..a16572a5
--- /dev/null
+++ b/xci/installer/osh/playbooks/install-openstack-helm.yml
@@ -0,0 +1,24 @@
+---
+- hosts: kube-node
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ roles:
+ - role: prepare-kube-nodes-osh
+
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ roles:
+ - role: prepare-opnfvhost-osh
+ - role: prepare-osh
+ - role: install-osh-mini
+ when: xci_flavor == 'mini'
+ environment:
+ - CONTAINER_DISTRO_NAME: "{{ container_distro_name }}"
+ - CONTAINER_DISTRO_VERSION: "{{ container_distro_version }}"
+ - OPENSTACK_RELEASE: "{{ openstack_osh_version }}"
+ - role: install-osh-noha
+ when: xci_flavor == 'noha'
diff --git a/xci/installer/osh/playbooks/post-deployment.yml b/xci/installer/osh/playbooks/post-deployment.yml
new file mode 100644
index 00000000..5c2f7f36
--- /dev/null
+++ b/xci/installer/osh/playbooks/post-deployment.yml
@@ -0,0 +1,42 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_scenario_path: "{{ ansible_env.HOME }}/releng-xci/.cache/repos/scenarios/{{ deploy_scenario }}/scenarios/{{ deploy_scenario }}"
+
+ tasks:
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Check if any post-deployment task defined for {{ deploy_scenario }} role
+ stat:
+ path: "{{ remote_xci_scenario_path }}/role/{{ deploy_scenario }}/tasks/post-deployment.yml"
+ register: post_deployment_yml
+
+ - name: Execute post-deployment tasks of {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+ tasks_from: post-deployment
+ when:
+ - post_deployment_yml.stat.exists
diff --git a/xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml b/xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml
new file mode 100644
index 00000000..e5df54fa
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml
@@ -0,0 +1,109 @@
+---
+
+- name: Setup Clients
+ command: ./tools/deployment/common/setup-client.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy the ingress controller
+ command: ./tools/deployment/component/common/ingress.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy MariaDB
+ command: ./tools/deployment/component/common/mariadb.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy memcached
+ command: ./tools/deployment/component/common/memcached.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy RabbitMQ
+ command: ./tools/deployment/component/common/rabbitmq.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Update nfs-provisioner helm-chart
+ shell: helm dependency update nfs-provisioner
+ args:
+ chdir: /root/repos/openstack-helm-infra
+ executable: /bin/bash
+ tags:
+ - skip_ansible_lint
+
+- name: Deploy nfs-provisioner
+ command: ./tools/deployment/component/nfs-provisioner/nfs-provisioner.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Keystone
+ command: ./tools/deployment/component/keystone/keystone.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Heat
+ command: ./tools/deployment/component/heat/heat.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Glance
+ command: ./tools/deployment/component/glance/glance.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy OpenvSwitch
+ command: ./tools/deployment/component/compute-kit/openvswitch.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Libvirt
+ command: ./tools/deployment/component/compute-kit/libvirt.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Add br-vxlan as the tunnel interface
+ lineinfile:
+ path: /root/repos/openstack-helm/tools/deployment/component/compute-kit/compute-kit.sh
+ regexp: 'tunnel: docker0'
+ line: ' tunnel: br-vxlan'
+
+- name: Deploy Compute Kit (Nova and Neutron)
+ command: ./tools/deployment/component/compute-kit/compute-kit.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Copy script to the worker node
+ command: "scp -o \"StrictHostKeyChecking no\" tools/deployment/developer/ceph/170-setup-gateway.sh root@{{ hostvars.node1.ip }}:170-setup-gateway.sh"
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Setup the gateway to the public network at worker node
+ command: /root/170-setup-gateway.sh
+ changed_when: false
+ delegate_to: node1
+
+- name: Add a route from opnfv to worker node for the public network
+ command: ip route add 172.24.4.0/24 via 192.168.122.4
+ changed_when: false
+
+# Deployment validation
+- name: Exercise the cloud
+ command: ./tools/deployment/developer/common/900-use-it.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
diff --git a/xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml b/xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml
new file mode 100644
index 00000000..03c02a83
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml
@@ -0,0 +1,18 @@
+---
+# Copyright 2019, SUSE Linux GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cacheable: yes
+container_distro_name: "{{ (osh_distro=='opensuse') | ternary('opensuse', 'ubuntu') }}"
+container_distro_version: "{{ (osh_distro=='opensuse') | ternary('15', 'xenial') }}"
diff --git a/xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml b/xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml
new file mode 100644
index 00000000..befdcfce
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml
@@ -0,0 +1,130 @@
+---
+- name: Setup Clients
+ command: ./tools/deployment/multinode/010-setup-client.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy the ingress controller
+ command: ./tools/deployment/multinode/020-ingress.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Ceph
+ command: ./tools/deployment/multinode/030-ceph.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Activate the openstack namespace to be able to use Ceph
+ command: ./tools/deployment/multinode/040-ceph-ns-activate.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy MariaDB
+ command: ./tools/deployment/multinode/050-mariadb.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy RabbitMQ
+ command: ./tools/deployment/multinode/060-rabbitmq.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy memcached
+ command: ./tools/deployment/multinode/070-memcached.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Keystone
+ command: ./tools/deployment/multinode/080-keystone.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Horizon
+ command: ./tools/deployment/multinode/085-horizon.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Rados Gateway for object store
+ command: ./tools/deployment/multinode/090-ceph-radosgateway.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Glance
+ command: ./tools/deployment/multinode/100-glance.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Cinder
+ command: ./tools/deployment/multinode/110-cinder.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy OpenvSwitch
+ command: ./tools/deployment/multinode/120-openvswitch.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Libvirt
+ command: ./tools/deployment/multinode/130-libvirt.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Add br-vxlan as the tunnel interface
+ lineinfile:
+ path: /root/repos/openstack-helm/tools/deployment/multinode/140-compute-kit.sh
+ regexp: 'NETWORK_TUNNEL_DEV="$(network_tunnel_dev)"'
+ line: 'NETWORK_TUNNEL_DEV=br-vxlan'
+
+- name: Deploy Compute Kit (Nova and Neutron)
+ command: ./tools/deployment/multinode/140-compute-kit.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Heat
+ command: ./tools/deployment/multinode/150-heat.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Barbican
+ command: ./tools/deployment/multinode/160-barbican.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Copy script to the worker node
+ command: "scp -o \"StrictHostKeyChecking no\" tools/deployment/developer/ceph/170-setup-gateway.sh root@{{ hostvars.node1.ip }}:170-setup-gateway.sh"
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Setup the gateway to the public network at worker node
+ command: /root/170-setup-gateway.sh
+ changed_when: false
+ delegate_to: node1
+
+- name: Add a route from opnfv to worker node for the public network
+ command: ip route add 172.24.4.0/24 via 192.168.122.4
+ changed_when: false
+
+# Deployment validation
+- name: Exercise the cloud
+ command: ./tools/deployment/developer/common/900-use-it.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
diff --git a/xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml b/xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml
new file mode 100644
index 00000000..ff0aff60
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Install packages in kubernetes nodes
+ package:
+ name: "{{ packages }}"
+ state: present
+ changed_when: false
+ vars:
+ packages:
+ - ceph-common
+ - rbd-nbd
+ - apparmor
+ - nfs-common
diff --git a/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service
new file mode 100644
index 00000000..c3988d6f
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Helm Server
+After=network.target
+
+[Service]
+User=root
+Restart=always
+ExecStart=/usr/bin/helm serve
+
+[Install]
+WantedBy=multi-user.target
diff --git a/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml
new file mode 100644
index 00000000..72ae821f
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml
@@ -0,0 +1,130 @@
+---
+- name: Set kubernetes service account permissions
+ command: "kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default"
+ changed_when: false
+
+- name: Set kubernetes node labels
+ command: "kubectl label nodes {{ item }} {{ node_labels[item]|join(' ') }}"
+ changed_when: false
+ with_items: "{{ groups['kube-node'] }}"
+
+- name: Create directories
+ file:
+ path: /root/{{ item }}
+ state: directory
+ with_items:
+ ['repos','tmp', '.helm/repository/local']
+
+- name: Rename bifrost clouds file to get it out of precedence
+ command: "mv .config/openstack/clouds.yaml .config/openstack/clouds.yaml.bifrost"
+ changed_when: false
+
+- name: Clone openstack-helm
+ git:
+ repo: "{{ osh_git_url }}"
+ dest: /root/repos/openstack-helm
+ version: "{{ osh_version }}"
+ update: true
+ force: true
+ register: git_clone
+ until: git_clone is success
+ retries: 2
+ delay: 5
+
+- name: Fix dns nameserver for openstack installation (mini flavor)
+ lineinfile:
+ path: /root/repos/openstack-helm/tools/gate/files/heat-public-net-deployment.yaml
+ regexp: '10\.96\.0\.10'
+ line: " - 10.233.0.3"
+
+- name: Fix dns nameserver for openstack installation (noha flavor)
+ lineinfile:
+ path: /root/repos/openstack-helm/tempest/values.yaml
+ regexp: 'dns_servers'
+ line: " dns_servers: 10.233.0.3"
+
+- name: Clone openstack-helm-infra
+ git:
+ repo: "{{ osh_infra_git_url }}"
+ dest: /root/repos/openstack-helm-infra
+ version: "{{ osh_infra_version }}"
+ update: true
+ force: true
+ register: git_clone
+ until: git_clone is success
+ retries: 2
+ delay: 5
+
+- name: Get helm
+ get_url:
+ url: "{{ osh_helm_binary_url }}/helm-{{ osh_helm_binary_version }}-linux-amd64.tar.gz"
+ dest: tmp
+
+- name: Uncompress helm package
+ command: "tar zxvf tmp/helm-{{ osh_helm_binary_version }}-linux-amd64.tar.gz --strip-components=1 -C tmp/"
+ changed_when: false
+ tags:
+ - skip_ansible_lint
+
+- name: Put helm in system binaries
+ copy:
+ src: tmp/helm
+ dest: /usr/bin/helm
+ remote_src: yes
+ mode: 0755
+
+- name: Create helm-serve service file
+ copy:
+ src: helm-serve.service
+ dest: "/etc/systemd/system/helm-serve.service"
+ mode: 0640
+
+- name: Start helm-serve service
+ service:
+ name: helm-serve
+ state: started
+ enabled: yes
+
+- name: Wait for helm-serve service to start
+ wait_for:
+ port: 8879
+ host: 127.0.0.1
+
+- name: Install pyhelm
+ pip:
+ name: pyhelm
+
+- name: Init helm
+ command: "helm init"
+ changed_when: false
+
+- name: Remove stable (external) service from helm
+ command: "helm repo remove stable"
+ changed_when: false
+
+- name: Add local repositories service to helm
+ command: "helm repo add local http://localhost:8879/charts"
+ changed_when: false
+
+- name: Make charts from infra
+ make:
+ chdir: /root/repos/openstack-helm-infra
+ target: "{{ item }}"
+ with_items:
+ - helm-toolkit
+ - ingress
+ - mariadb
+ - rabbitmq
+ - memcached
+ - ceph-mon
+ - ceph-osd
+ - ceph-client
+ - ceph-provisioners
+ - ceph-rgw
+ - openvswitch
+ - libvirt
+
+- name: Install packages
+ package:
+ name: "{{ required_packages }}"
+ state: present
diff --git a/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml
new file mode 100644
index 00000000..979c3329
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml
@@ -0,0 +1,31 @@
+---
+required_packages:
+- patch
+- ipcalc
+- jq
+- nmap
+- bc
+
+node_labels:
+ node1:
+ - openstack-control-plane=enabled
+ - openstack-compute-node={{ (xci_flavor == 'mini') | ternary('enabled', 'disable') }}
+ - openstack-helm-node-class=primary
+ - openvswitch=enabled
+ - linuxbridge=enabled
+ - ceph-mon=enabled
+ - ceph-osd=enabled
+ - ceph-mds=enabled
+ - ceph-mgr=enabled
+ - ceph-rgw=enabled
+ node2:
+ - openstack-control-plane={{ (xci_flavor == 'noha') | ternary('disable', 'enabled') }}
+ - openstack-compute-node=enabled
+ - openstack-helm-node-class=secondary
+ - openvswitch=enabled
+ - linuxbridge=enabled
+ - ceph-mon=enabled
+ - ceph-osd=enabled
+ - ceph-mds=enabled
+ - ceph-mgr=enabled
+ - ceph-rgw=enabled
diff --git a/xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml b/xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml
new file mode 100644
index 00000000..453a815c
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml
@@ -0,0 +1,33 @@
+---
+- name: Write new resolv.conf file
+ template:
+ src: resolv.conf.j2
+ dest: /etc/resolv.conf
+
+- name: Make resolv.conf immutable
+ shell: "chattr +i /etc/resolv.conf"
+ changed_when: false
+ args:
+ executable: /bin/bash
+ tags:
+ - skip_ansible_lint
+
+#TODO Fetch the value from a file generated by k8s deployer
+- name: Get kube service addresses
+ shell: "grep -r 'kube_service_addresses:' /root/releng-xci/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml | awk '{print $2}'"
+ changed_when: false
+ args:
+ executable: /bin/bash
+ register: kube_service_addresses
+ tags:
+ - skip_ansible_lint
+
+#This rule allows openstack client in OPNFV VM to reach openstack
+- name: Update routing table with kube service addresses
+ shell: "ip route add {{ kube_service_addresses.stdout }} via 192.168.122.3 dev br-vlan onlink"
+ changed_when: false
+ args:
+ executable: /bin/bash
+ tags:
+ - skip_ansible_lint
+
diff --git a/xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j2 b/xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j2
new file mode 100644
index 00000000..ae706e02
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j2
@@ -0,0 +1,4 @@
+{{ dns_var }}
+{% for nameserver in external_dns_nameservers %}
+nameserver {{ nameserver }}
+{% endfor %}
diff --git a/xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml b/xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml
new file mode 100644
index 00000000..4d6f9cbb
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml
@@ -0,0 +1,7 @@
+---
+kube_dns_ip: "10.233.0.3"
+external_dns_nameservers:
+- '{{kube_dns_ip}}'
+- '192.168.122.1'
+dns_var: "search svc.cluster.local cluster.local"
+