aboutsummaryrefslogtreecommitdiffstats
path: root/deploy
diff options
context:
space:
mode:
Diffstat (limited to 'deploy')
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh6
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml (renamed from deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install.yml)0
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_RedHat.yml49
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/tasks/main.yml10
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/templates/ceph.repo7
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/vars/RedHat.yml14
-rw-r--r--deploy/adapters/ansible/roles/cinder-controller/templates/cinder_init.sh2
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml4
-rw-r--r--deploy/adapters/ansible/roles/common/tasks/main.yml15
-rw-r--r--deploy/adapters/ansible/roles/common/templates/ntp.conf16
-rw-r--r--deploy/adapters/ansible/roles/dashboard/tasks/main.yml10
-rw-r--r--deploy/adapters/ansible/roles/dashboard/vars/Debian.yml9
-rw-r--r--deploy/adapters/ansible/roles/dashboard/vars/RedHat.yml11
-rw-r--r--deploy/adapters/ansible/roles/ha/files/notify.sh4
-rw-r--r--deploy/adapters/ansible/roles/ha/tasks/main.yml6
-rw-r--r--deploy/adapters/ansible/roles/ha/templates/failover.j265
-rw-r--r--deploy/adapters/ansible/roles/ha/templates/haproxy.cfg20
-rw-r--r--deploy/adapters/ansible/roles/ha/templates/keepalived.conf42
-rw-r--r--deploy/adapters/ansible/roles/keystone/templates/keystone_init8
-rw-r--r--deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml12
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/templates/opendaylight31
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/templates/opendaylight.service13
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/vars/RedHat.yml4
-rw-r--r--deploy/adapters/ansible/roles/setup-network/files/setup_networks/log.py41
-rwxr-xr-xdeploy/adapters/ansible/roles/setup-network/files/setup_networks/net_init20
-rw-r--r--deploy/adapters/ansible/roles/setup-network/files/setup_networks/setup_networks.py77
-rw-r--r--deploy/adapters/ansible/roles/setup-network/tasks/main.yml55
-rw-r--r--deploy/adapters/ansible/roles/setup-network/templates/network.cfg4
-rw-r--r--deploy/conf/baremetal_cluster_sh.yml43
-rw-r--r--deploy/conf/network_cfg.yaml25
-rw-r--r--deploy/host_vm.sh61
31 files changed, 417 insertions, 267 deletions
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh b/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh
index 2535f364..7a6cc1e2 100644
--- a/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh
+++ b/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh
@@ -19,15 +19,15 @@ fi
if [ -L "/dev/ceph-volumes/ceph0" ]; then
echo "remove lv vg"
-lvremove /dev/ceph-volumes/ceph0
-vgremove ceph-volumes
+lvremove -f /dev/ceph-volumes/ceph0
+vgremove -f ceph-volumes
rm -r /dev/ceph-volumes
fi
losetup -d /dev/loop0
echo "vgcreate"
-vgcreate ceph-volumes $(sudo losetup --show -f /ceph/images/ceph-volumes.img)
+vgcreate -y ceph-volumes $(sudo losetup --show -f /ceph/images/ceph-volumes.img)
echo "lvcreate"
sudo lvcreate -L9G -nceph0 ceph-volumes
echo "mkfs"
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml
index f897c944..f897c944 100644
--- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install.yml
+++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_RedHat.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_RedHat.yml
new file mode 100644
index 00000000..ed88cd5c
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_RedHat.yml
@@ -0,0 +1,49 @@
+---
+- name: create ceph cluster
+ shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy new {{ ceph_cluster_hosts.stdout_lines[0] }}
+ tags:
+ - create_ceph_cluster
+
+- name: install ceph for every nodes includes jumpserver
+ shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy install --no-adjust-repos --repo-url http://10.1.0.12/cblr/repo_mirror/centos7-juno-ppa --gpg-url http://10.1.0.12/cblr/repo_mirror/centos7-juno-ppa/ceph_key_release.asc {{ ceph_cluster_hosts.stdout_lines[0]}}
+
+- name: create monitor node in controller group
+ shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy --overwrite-conf mon create-initial
+
+- name: copy create_osd.sh to host1
+ copy: src=create_osd.sh dest=~/create_osd.sh mode=0777
+ tags:
+ - create_osd
+
+- name: copy create_osd.sh to other nodes
+ shell: scp -o StrictHostKeyChecking=no ~/create_osd.sh {{ item }}:~/
+ with_items:
+ - "{{ groups['controller'] }}"
+ tags:
+ - create_osd
+
+- name: create osd
+ shell: ssh -o StrictHostKeyChecking=no -t {{ item }} "~/create_osd.sh"
+ with_items:
+ - "{{ groups['controller'] }}"
+ tags:
+ - create_osd
+
+- name: prepare create osd
+ shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy --repo-url http://10.1.0.12/cblr/repo_mirror/centos7-juno-ppa --gpg-url http://10.1.0.12/cblr/repo_mirror/centos7-juno-ppa/ceph_key_release.asc osd prepare {{ item }}:/var/local/osd
+ with_items:
+ - "{{ groups['controller'] }}"
+ tags:
+ - create_osd
+
+
+- name: activate osd
+ shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy --repo-url http://10.1.0.12/cblr/repo_mirror/centos7-juno-ppa --gpg-url http://10.1.0.12/cblr/repo_mirror/centos7-juno-ppa/ceph_key_release.asc osd activate {{ item }}:/var/local/osd
+ with_items:
+ - "{{ groups['controller'] }}"
+ tags:
+ - create_osd
+ - activate_osd
+
+- name: create admin node
+ shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy admin {{ ceph_cluster_hosts.stdout_lines[0] }}
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/main.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/main.yml
index a1a9127d..b50b38c8 100644
--- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/main.yml
@@ -6,8 +6,14 @@
- ceph_setup_env
- ceph_install
-- include: ceph_install.yml
- when: inventory_hostname == "{{ groups['controller'][0] }}"
+- include: ceph_install_Debian.yml
+ when: inventory_hostname == groups['controller'][0] and ansible_os_family == "Debian"
+ tags:
+ - ceph_deploy
+ - ceph_install
+
+- include: ceph_install_RedHat.yml
+ when: inventory_hostname == groups['controller'][0] and ansible_os_family == "RedHat"
tags:
- ceph_deploy
- ceph_install
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/templates/ceph.repo b/deploy/adapters/ansible/roles/ceph-deploy/templates/ceph.repo
deleted file mode 100644
index e6b7c7d9..00000000
--- a/deploy/adapters/ansible/roles/ceph-deploy/templates/ceph.repo
+++ /dev/null
@@ -1,7 +0,0 @@
-[ceph-noarch]
-name=Ceph noarch packages
-baseurl=http://ceph.com/rpm-giant/el6/noarch
-enabled=1
-gpgcheck=1
-type=rpm-md
-gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/vars/RedHat.yml b/deploy/adapters/ansible/roles/ceph-deploy/vars/RedHat.yml
index 5ed6cc10..fd607d38 100644
--- a/deploy/adapters/ansible/roles/ceph-deploy/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/ceph-deploy/vars/RedHat.yml
@@ -1,18 +1,8 @@
---
packages:
+ - ceph-radosgw
+ - fcgi
- ceph-deploy
- - python-flask
- - libgoogle-perftools4
- - libleveldb1
- - liblttng-ust0
- - libsnappy1
- - librbd1
- - librados2
- - python-ceph
- ceph
- - ceph-mds
- - ceph-common
- - ceph-fs-common
- - gdisk
services: []
diff --git a/deploy/adapters/ansible/roles/cinder-controller/templates/cinder_init.sh b/deploy/adapters/ansible/roles/cinder-controller/templates/cinder_init.sh
index abe4d06a..bc92bac0 100644
--- a/deploy/adapters/ansible/roles/cinder-controller/templates/cinder_init.sh
+++ b/deploy/adapters/ansible/roles/cinder-controller/templates/cinder_init.sh
@@ -2,5 +2,5 @@ keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-role-add --user=cinder --tenant=service --role=admin
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=cinder --type=volume --description="OpenStack Block Storage"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ volume / {print $2}') --publicurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s --internalurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s --adminurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ volume / {print $2}') --publicurl=http://{{ public_vip.ip }}:8776/v1/%\(tenant_id\)s --internalurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s --adminurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s
diff --git a/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml b/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
index 3700bcdd..b64024da 100644
--- a/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
@@ -43,6 +43,10 @@
shell: losetup {{ first_lo.stdout }} /mnt/cinder-volumes
when: cindervolumes.stdout != 'cinder-volumes'
+- name: destroy GPT lable
+ shell: dd if=/dev/urandom of=/dev/sdb bs=4M count=1
+ ignore_errors: True
+
- name: create physical and group volumes
lvg: vg=cinder-volumes pvs={{ physical_device }}
vg_options=--force
diff --git a/deploy/adapters/ansible/roles/common/tasks/main.yml b/deploy/adapters/ansible/roles/common/tasks/main.yml
index b58bb44d..ead70aed 100644
--- a/deploy/adapters/ansible/roles/common/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/common/tasks/main.yml
@@ -36,13 +36,20 @@
- name: update pip.conf
template: src=pip.conf dest=~/.pip/pip.conf
-- name: sync between sys clock and hard clock
- command: su -s /bin/sh -c "service {{ ntp_service }} stop; ntpd -gq; hwclock --systohc"
- ignore_errors: True
-
- name: update ntp conf
template: src=ntp.conf dest=/etc/ntp.conf backup=yes
+- name: stop ntp
+ service: name={{ ntp_service }} state=stopped enabled=yes
+
+- name: use ntpdate once for initial sync time
+ shell: ntpdate {{ ntp_server }}
+ ignore_errors: True
+
+- name: sync sys clock to hard clock
+ shell: hwclock --systohc
+ ignore_errors: True
+
- name: restart ntp
service: name={{ ntp_service }} state=restarted enabled=yes
diff --git a/deploy/adapters/ansible/roles/common/templates/ntp.conf b/deploy/adapters/ansible/roles/common/templates/ntp.conf
index c6138092..2d560be2 100644
--- a/deploy/adapters/ansible/roles/common/templates/ntp.conf
+++ b/deploy/adapters/ansible/roles/common/templates/ntp.conf
@@ -16,14 +16,12 @@ filegen clockstats file clockstats type day enable
# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board
# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for
# more information.
-server {{ NTP_SERVER_LOCAL }}
-server 0.ubuntu.pool.ntp.org
-server 1.ubuntu.pool.ntp.org
-server 2.ubuntu.pool.ntp.org
-server 3.ubuntu.pool.ntp.org
+server {{ ntp_server }}
+server {{ internal_vip.ip }}
-# Use Ubuntu's ntp server as a fallback.
-server ntp.ubuntu.com
+# Use local server as a fallback.
+server 127.127.1.0 # local clock
+fudge 127.127.1.0 stratum 10
# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for
# details. The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions>
@@ -34,8 +32,8 @@ server ntp.ubuntu.com
# up blocking replies from your own upstream servers.
# By default, exchange time with everybody, but don't allow configuration.
-restrict -4 default kod notrap nomodify nopeer noquery
-restrict -6 default kod notrap nomodify nopeer noquery
+restrict -4 default kod notrap nomodify
+restrict -6 default kod notrap nomodify
# Local users may interrogate the ntp server more closely.
restrict 127.0.0.1
diff --git a/deploy/adapters/ansible/roles/dashboard/tasks/main.yml b/deploy/adapters/ansible/roles/dashboard/tasks/main.yml
index 2cad1174..dd5c6fd0 100644
--- a/deploy/adapters/ansible/roles/dashboard/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/dashboard/tasks/main.yml
@@ -1,6 +1,16 @@
---
- include_vars: "{{ ansible_os_family }}.yml"
+- name: install http packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: http_packages
+
+- name: set http config
+ lineinfile: dest={{ http_config_file }} regexp='^Listen 80' line='Listen {{ internal_ip }}:80'
+
+- name: restart http services
+ service: name={{ http_service }} state=restarted enabled=yes
+
- name: install dashboard packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
with_items: packages | union(packages_noarch)
diff --git a/deploy/adapters/ansible/roles/dashboard/vars/Debian.yml b/deploy/adapters/ansible/roles/dashboard/vars/Debian.yml
index fee64c38..97c4af40 100644
--- a/deploy/adapters/ansible/roles/dashboard/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/dashboard/vars/Debian.yml
@@ -1,8 +1,13 @@
---
-packages:
+http_packages:
- apache2
- libapache2-mod-wsgi
+http_service: apache2
+
+packages: []
+
services:
- - apache2
- memcached
+
+http_config_file: "/etc/apache2/ports.conf"
diff --git a/deploy/adapters/ansible/roles/dashboard/vars/RedHat.yml b/deploy/adapters/ansible/roles/dashboard/vars/RedHat.yml
index f0acce9b..5e84901c 100644
--- a/deploy/adapters/ansible/roles/dashboard/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/dashboard/vars/RedHat.yml
@@ -1,8 +1,13 @@
---
-packages:
+http_packages:
- httpd
+
+http_service: httpd
+
+packages:
- mod_wsgi
- python-memcached
-services:
- - httpd
+services: []
+
+http_config_file: "/etc/httpd/conf/httpd.conf"
diff --git a/deploy/adapters/ansible/roles/ha/files/notify.sh b/deploy/adapters/ansible/roles/ha/files/notify.sh
deleted file mode 100644
index 5edffe84..00000000
--- a/deploy/adapters/ansible/roles/ha/files/notify.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-python /usr/local/bin/failover.py $1
-mysql -uroot -e"flush hosts"
-service mysql restart
diff --git a/deploy/adapters/ansible/roles/ha/tasks/main.yml b/deploy/adapters/ansible/roles/ha/tasks/main.yml
index edd5e6dd..668f6847 100644
--- a/deploy/adapters/ansible/roles/ha/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ha/tasks/main.yml
@@ -57,12 +57,6 @@
- name: copy galera_chk file
copy: src=galera_chk dest=/usr/local/bin/galera_chk mode=0777
-- name: copy notify file
- copy: src=notify.sh dest=/usr/local/bin/notify.sh mode=0777
-
-- name: copy notify template file
- template: src=failover.j2 dest=/usr/local/bin/failover.py mode=0777
-
- name: add network service
lineinfile: dest=/etc/services state=present
line="mysqlchk 9200/tcp"
diff --git a/deploy/adapters/ansible/roles/ha/templates/failover.j2 b/deploy/adapters/ansible/roles/ha/templates/failover.j2
deleted file mode 100644
index 3b08cf2d..00000000
--- a/deploy/adapters/ansible/roles/ha/templates/failover.j2
+++ /dev/null
@@ -1,65 +0,0 @@
-import ConfigParser, os, socket
-import logging as LOG
-import pxssh
-import sys
-import re
-
-LOG_FILE="/var/log/mysql_failover"
-try:
- os.remove(LOG_FILE)
-except:
- pass
-
-LOG.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename=LOG_FILE,level=LOG.DEBUG)
-ha_vip = {{ internal_vip.ip }}
-LOG.info("ha_vip: %s" % ha_vip)
-
-#ha_vip = "10.1.0.50"
-galera_path = '/etc/mysql/conf.d/wsrep.cnf'
-pattern = re.compile(r"gcomm://(?P<prev_ip>.*)")
-
-def ssh_get_hostname(ip):
- try:
- s = pxssh.pxssh()
- s.login("%s" % ip, "root", "root")
- s.sendline('hostname') # run a command
- s.prompt() # match the prompt
- result = s.before.strip() # print everything before the prompt.
- return result.split(os.linesep)[1]
- except pxssh.ExceptionPxssh as e:
- LOG.error("pxssh failed on login.")
- raise
-
-def failover(mode):
- config = ConfigParser.ConfigParser()
- config.optionxform = str
- config.readfp(open(galera_path))
- wsrep_cluster_address = config.get("mysqld", "wsrep_cluster_address")
- wsrep_cluster_address = pattern.match(wsrep_cluster_address).groupdict()["prev_ip"]
-
- LOG.info("old wsrep_cluster_address = %s" % wsrep_cluster_address)
-
- if mode == "master":
- # refresh wsrep_cluster_address to null
- LOG.info("I'm being master, set wsrep_cluster_address to null")
- wsrep_cluster_address = ""
-
- elif mode == "backup":
- # refresh wsrep_cluster_address to master int ip
- hostname = ssh_get_hostname(ha_vip)
- wsrep_cluster_address = socket.gethostbyname(hostname)
- LOG.info("I'm being slave, set wsrep_cluster_address to master internal ip")
-
- LOG.info("new wsrep_cluster_address = %s" % wsrep_cluster_address)
- wsrep_cluster_address = "gcomm://%s" % wsrep_cluster_address
- config.set("mysqld", "wsrep_cluster_address", wsrep_cluster_address)
- with open(galera_path, 'wb') as fp:
- #config.write(sys.stdout)
- config.write(fp)
-
- os.system("service mysql restart")
- LOG.info("failover success!!!")
-
-if __name__ == "__main__":
- LOG.debug("call me: %s" % sys.argv)
- failover(sys.argv[1])
diff --git a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
index f1a2312c..27aa5b24 100644
--- a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
+++ b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
@@ -27,6 +27,7 @@ defaults
listen proxy-glance_registry_cluster
bind {{ internal_vip.ip }}:9191
+ bind {{ public_vip.ip }}:9191
option tcpka
option tcplog
balance source
@@ -36,6 +37,7 @@ listen proxy-glance_registry_cluster
listen proxy-glance_api_cluster
bind {{ internal_vip.ip }}:9292
+ bind {{ public_vip.ip }}:9292
option tcpka
option httpchk
option tcplog
@@ -46,6 +48,7 @@ listen proxy-glance_api_cluster
listen proxy-nova-novncproxy
bind {{ internal_vip.ip }}:6080
+ bind {{ public_vip.ip }}:6080
option tcpka
option tcplog
balance source
@@ -55,6 +58,7 @@ listen proxy-nova-novncproxy
listen proxy-network
bind {{ internal_vip.ip }}:9696
+ bind {{ public_vip.ip }}:9696
option tcpka
option tcplog
balance source
@@ -64,6 +68,7 @@ listen proxy-network
listen proxy-volume
bind {{ internal_vip.ip }}:8776
+ bind {{ public_vip.ip }}:8776
option tcpka
option httpchk
option tcplog
@@ -74,6 +79,7 @@ listen proxy-volume
listen proxy-keystone_admin_cluster
bind {{ internal_vip.ip }}:35357
+ bind {{ public_vip.ip }}:35357
option tcpka
option httpchk
option tcplog
@@ -84,6 +90,7 @@ listen proxy-keystone_admin_cluster
listen proxy-keystone_public_internal_cluster
bind {{ internal_vip.ip }}:5000
+ bind {{ public_vip.ip }}:5000
option tcpka
option httpchk
option tcplog
@@ -94,6 +101,7 @@ listen proxy-keystone_public_internal_cluster
listen proxy-nova_compute_api_cluster
bind {{ internal_vip.ip }}:8774
+ bind {{ public_vip.ip }}:8774
mode tcp
option httpchk
option tcplog
@@ -104,6 +112,7 @@ listen proxy-nova_compute_api_cluster
listen proxy-nova_metadata_api_cluster
bind {{ internal_vip.ip }}:8775
+ bind {{ public_vip.ip }}:8775
option tcpka
option tcplog
balance source
@@ -113,6 +122,7 @@ listen proxy-nova_metadata_api_cluster
listen proxy-cinder_api_cluster
bind {{ internal_vip.ip }}:8776
+ bind {{ public_vip.ip }}:8776
mode tcp
option httpchk
option tcplog
@@ -121,6 +131,16 @@ listen proxy-cinder_api_cluster
server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5
{% endfor %}
+listen proxy-dashboarad
+ bind {{ public_vip.ip }}:80
+ option tcpka
+ option httpchk
+ option tcplog
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:80 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
listen stats
mode http
bind 0.0.0.0:8888
diff --git a/deploy/adapters/ansible/roles/ha/templates/keepalived.conf b/deploy/adapters/ansible/roles/ha/templates/keepalived.conf
index f1e6db5d..a2e008a7 100644
--- a/deploy/adapters/ansible/roles/ha/templates/keepalived.conf
+++ b/deploy/adapters/ansible/roles/ha/templates/keepalived.conf
@@ -19,30 +19,24 @@ vrrp_instance internal_vip {
virtual_ipaddress {
{{ internal_vip.ip }}/{{ internal_vip.netmask }} dev {{ internal_vip.interface }}
}
+}
- notify_master "/usr/local/bin/notify.sh master"
- notify_backup "/usr/local/bin/notify.sh backup"
+vrrp_instance public_vip {
+ interface {{ network_cfg.public_vip.interface }}
+ virtual_router_id {{ vrouter_id_public }}
+ state BACKUP
+ nopreempt
+ preempt_delay 30
+ advert_int 1
+ priority 100
-}
+ authentication {
+ auth_type PASS
+ auth_pass 4321
+ }
-#vrrp_instance public_vip {
-# interface {{ network_cfg.public_vip.interface }}
-# virtual_router_id {{ vrouter_id_public }}
-# state BACKUP
-# nopreempt
-# preempt_delay 30
-# advert_int 1
-# priority 100
-#
-# authentication {
-# auth_type PASS
-# auth_pass 4321
-# }
-#
-# virtual_ipaddress {
-# {{ network_cfg.public_vip.ip }}/{{ network_cfg.public_vip.netmask }} dev {{ network_cfg.public_vip.interface }}
-# }
-#
-#}
-#
-# notify_backup "/usr/local/bin/notify.sh backup"
+ virtual_ipaddress {
+ {{ network_cfg.public_vip.ip }}/{{ network_cfg.public_vip.netmask }} dev {{ network_cfg.public_vip.interface }}
+ }
+
+}
diff --git a/deploy/adapters/ansible/roles/keystone/templates/keystone_init b/deploy/adapters/ansible/roles/keystone/templates/keystone_init
index d9cc65a9..c7e22324 100644
--- a/deploy/adapters/ansible/roles/keystone/templates/keystone_init
+++ b/deploy/adapters/ansible/roles/keystone/templates/keystone_init
@@ -22,7 +22,7 @@ keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}
# regist keystone
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=keystone --type=identity --description="OpenStack Identity"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service_id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ identity / {print $2}') --publicurl=http://{{ internal_vip.ip }}:5000/v2.0 --internalurl=http://{{ internal_vip.ip }}:5000/v2.0 --adminurl=http://{{ internal_vip.ip }}:35357/v2.0
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service_id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ identity / {print $2}') --publicurl=http://{{ public_vip.ip }}:5000/v2.0 --internalurl=http://{{ internal_vip.ip }}:5000/v2.0 --adminurl=http://{{ internal_vip.ip }}:35357/v2.0
# Create a glance user that the Image Service can use to authenticate with the Identity service
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name=glance --pass={{ GLANCE_PASS }} --email=glance@example.com
@@ -30,7 +30,7 @@ keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}
#Register the Image Service with the Identity service so that other OpenStack services can locate it
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=glance --type=image --description="OpenStack Image Service"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ image / {print $2}') --publicurl=http://{{ internal_vip.ip }}:9292 --internalurl=http://{{ internal_vip.ip }}:9292 --adminurl=http://{{ internal_vip.ip }}:9292
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ image / {print $2}') --publicurl=http://{{ public_vip.ip }}:9292 --internalurl=http://{{ internal_vip.ip }}:9292 --adminurl=http://{{ internal_vip.ip }}:9292
#Create a nova user that Compute uses to authenticate with the Identity Service
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name=nova --pass={{ NOVA_PASS }} --email=nova@example.com
@@ -38,10 +38,10 @@ keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}
# register Compute with the Identity Service so that other OpenStack services can locate it
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=nova --type=compute --description="OpenStack Compute"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ compute / {print $2}') --publicurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s --internalurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s --adminurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ compute / {print $2}') --publicurl=http://{{ public_vip.ip }}:8774/v2/%\(tenant_id\)s --internalurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s --adminurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s
# register netron user, role and service
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name neutron --pass {{ NEUTRON_PASS }} --email neutron@example.com
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-role-add --user neutron --tenant service --role admin
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name neutron --type network --description "OpenStack Networking"
-keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ network / {print $2}') --publicurl http://{{ internal_vip.ip }}:9696 --adminurl http://{{ internal_vip.ip }}:9696 --internalurl http://{{ internal_vip.ip }}:9696
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ network / {print $2}') --publicurl http://{{ public_vip.ip }}:9696 --adminurl http://{{ internal_vip.ip }}:9696 --internalurl http://{{ internal_vip.ip }}:9696
diff --git a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml
index 991e33cc..26758f58 100644
--- a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml
+++ b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml
@@ -9,10 +9,12 @@
notify:
- restart neutron control services
-- name: restart neutron-server
- service: name=neutron-server state=restarted enabled=yes
- delegate_to: "{{ item }}"
- run_once: True
- with_items: groups['controller']
+- name: restart first neutron-server
+ service: name=neutron-server state=restarted enabled=yes
+ when: inventory_hostname == groups['controller'][0]
+
+- name: restart other neutron-server
+ service: name=neutron-server state=restarted enabled=yes
+ when: inventory_hostname != groups['controller'][0]
- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight b/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight
new file mode 100755
index 00000000..90a267d6
--- /dev/null
+++ b/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight
@@ -0,0 +1,31 @@
+#!/bin/bash
+# chkconfig: 345 98 2
+# description: ODL controller
+# OpenDaylight service controller script
+export JAVA_HOME=/usr/lib/jvm/java-8-oracle
+USER=odl
+
+cd /opt/opendaylight-0.3.0
+case "$1" in
+ start)
+ /bin/su -m $USER -s /bin/bash -c ./bin/start
+ ;;
+ stop)
+ /bin/su -m $USER -s /bin/bash -c ./bin/stop
+ ;;
+ status)
+ PID=`ps aux | grep java | grep karaf | awk '{print $2}'`
+ if test -z $PID
+ then
+ echo "ODL is down..."
+ exit 1
+ else
+ echo "ODL is running... PID $PID"
+ exit 0
+ fi
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|status}"
+ exit 1
+ ;;
+esac
diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight.service b/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight.service
deleted file mode 100755
index a4adeeec..00000000
--- a/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight.service
+++ /dev/null
@@ -1,13 +0,0 @@
-[Unit]
-Description=OpenDaylight SDN Controller
-Documentation=https://wiki.opendaylight.org/view/Main_Page http://www.opendaylight.org/
-After=network.service
-
-[Service]
-Type=forking
-ExecStart=/opt/opendaylight-0.3.0/bin/start
-User=odl
-Group=odl
-
-[Install]
-WantedBy=multi-user.target
diff --git a/deploy/adapters/ansible/roles/odl_cluster/vars/RedHat.yml b/deploy/adapters/ansible/roles/odl_cluster/vars/RedHat.yml
index c0dfede1..c125f89e 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/vars/RedHat.yml
@@ -9,5 +9,5 @@ compute_packages:
service_file:
- src: opendaylight.service
- dst: /usr/lib/systemd/system/opendaylight.service
+ src: opendaylight
+ dst: /etc/init.d/opendaylight
diff --git a/deploy/adapters/ansible/roles/setup-network/files/setup_networks/log.py b/deploy/adapters/ansible/roles/setup-network/files/setup_networks/log.py
new file mode 100644
index 00000000..fffeb589
--- /dev/null
+++ b/deploy/adapters/ansible/roles/setup-network/files/setup_networks/log.py
@@ -0,0 +1,41 @@
+import logging
+import os
+loggers = {}
+log_dir="/var/log/setup_network"
+try:
+ os.makedirs(log_dir)
+except:
+ pass
+
+def getLogger(name):
+ if name in loggers:
+ return loggers[name]
+
+ logger = logging.getLogger(name)
+ logger.setLevel(logging.DEBUG)
+
+ # create file handler which logs even debug messages
+ log_file = "%s/%s.log" % (log_dir, name)
+ try:
+ os.remove(log_file)
+ except:
+ pass
+
+ fh = logging.FileHandler(log_file)
+ fh.setLevel(logging.DEBUG)
+
+ # create console handler with a higher log level
+ ch = logging.StreamHandler()
+ ch.setLevel(logging.ERROR)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
+ ch.setFormatter(formatter)
+ fh.setFormatter(formatter)
+
+ # add the handlers to logger
+ logger.addHandler(ch)
+ logger.addHandler(fh)
+
+ loggers[name] = logger
+ return logger
diff --git a/deploy/adapters/ansible/roles/setup-network/files/setup_networks/net_init b/deploy/adapters/ansible/roles/setup-network/files/setup_networks/net_init
new file mode 100755
index 00000000..c27a8bf8
--- /dev/null
+++ b/deploy/adapters/ansible/roles/setup-network/files/setup_networks/net_init
@@ -0,0 +1,20 @@
+#!/bin/bash
+## BEGIN INIT INFO
+# Provides: anamon.init
+# Default-Start: 3 5
+# Default-Stop: 0 1 2 4 6
+# Required-Start: $network
+# Short-Description: Starts the cobbler anamon boot notification program
+# Description: anamon runs the first time a machine is booted after
+# installation.
+## END INIT INFO
+
+#
+# anamon.init: Starts the cobbler post-install boot notification program
+#
+# chkconfig: 35 0 6
+#
+# description: anamon runs the first time a machine is booted after
+# installation.
+#
+python /opt/setup_networks/setup_networks.py
diff --git a/deploy/adapters/ansible/roles/setup-network/files/setup_networks/setup_networks.py b/deploy/adapters/ansible/roles/setup-network/files/setup_networks/setup_networks.py
new file mode 100644
index 00000000..5e6520af
--- /dev/null
+++ b/deploy/adapters/ansible/roles/setup-network/files/setup_networks/setup_networks.py
@@ -0,0 +1,77 @@
+import yaml
+import netaddr
+import os
+import log as logging
+
+LOG = logging.getLogger("net-init")
+config_path = os.path.join(os.path.dirname(__file__), "network.cfg")
+#from socket import AF_INET
+#from pyroute2 import IPRoute
+#from pyroute2 import IPRouteRequest
+
+#ip = IPRoute()
+def setup_bondings(bond_mappings):
+ print bond_mappings
+
+def add_vlan_link(interface, ifname, vlan_id):
+ LOG.info("add_vlan_link enter")
+ #idx = ip.link_lookup(ifname=interface)[0]
+ #ip.link_create(ifname=ifname,
+ # kind="vlan",
+ # vlan_id=vlan_id,
+ # link=idx)
+ cmd = "ip link add link %s name %s type vlan id %s; " % (ifname, interface, vlan_id)
+ cmd += "ip link set %s up; ip link set %s up" % (interface, ifname)
+ LOG.info("add_vlan_link: cmd=%s" % cmd)
+ os.system(cmd)
+
+def add_ovs_port(ovs_br, ifname, vlan_id=None):
+ LOG.info("add_ovs_port enter")
+ cmd = "ovs-vsctl --may-exist add-port %s %s" % (ovs_br, ifname)
+ if vlan_id:
+ cmd += " tag=%s" % vlan_id
+ cmd += " -- set Interface %s type=internal;" % ifname
+ cmd += "ip link set %s up;" % ifname
+ LOG.info("add_ovs_port: cmd=%s" % cmd)
+ os.system(cmd)
+
+def setup_intfs(sys_intf_mappings):
+ LOG.info("setup_intfs enter")
+ for intf_name, intf_info in sys_intf_mappings.items():
+ if intf_info["type"] == "vlan":
+ add_vlan_link(intf_name, intf_info["interface"], intf_info["vlan_tag"])
+ elif intf_info["type"] == "ovs":
+ add_ovs_port(intf_info["interface"], intf_name, vlan_id=intf_info.get("vlan_tag"))
+ else:
+ pass
+
+def setup_ips(ip_settings, sys_intf_mappings):
+ LOG.info("setup_ips enter")
+ for intf_info in ip_settings.values():
+ network = netaddr.IPNetwork(intf_info["cidr"])
+ if sys_intf_mappings[intf_info["name"]]["type"] == "ovs":
+ intf_name = intf_info["name"]
+ else:
+ intf_name = intf_info["alias"]
+ cmd = "ip addr add %s/%s brd %s dev %s;" \
+ % (intf_info["ip"], intf_info["netmask"], str(network.broadcast),intf_name)
+ if "gw" in intf_info:
+ cmd += "ip route add default via %s dev %s" % (intf_info["gw"], intf_name)
+ LOG.info("setup_ips: cmd=%s" % cmd)
+ os.system(cmd)
+ #idx = ip.link_lookup(ifname=intf_name)[0]
+ #ip.addr('add',
+ # index=idx,
+ # address=intf_info["ip"],
+ # broadcast=str(network.broadcast),
+ # prefixlen=intf_info["netmask"])
+
+def main(config):
+ setup_bondings(config["bond_mappings"])
+ setup_intfs(config["sys_intf_mappings"])
+ setup_ips(config["ip_settings"], config["sys_intf_mappings"])
+
+if __name__ == "__main__":
+ os.system("service openvswitch-switch status|| service openvswitch-switch start")
+ config = yaml.load(open(config_path))
+ main(config)
diff --git a/deploy/adapters/ansible/roles/setup-network/tasks/main.yml b/deploy/adapters/ansible/roles/setup-network/tasks/main.yml
index 8df1ac3a..8667a9b1 100644
--- a/deploy/adapters/ansible/roles/setup-network/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/setup-network/tasks/main.yml
@@ -14,30 +14,31 @@
with_items: "{{ network_cfg['provider_net_mappings'] }}"
when: 'item["type"] == "ovs"'
-- name: setup sys intf
- shell: ip link del {{ item.key }}; \
- ip link add link {{ item.value["interface"] }} name {{ item.key }} type vlan id {{ item.value["vlan_tag"] }}; \
- ip link set {{ item.value["interface"] }} up
- when: '"vlan_tag" in item.value and item.value["type"] == "vlan"'
- with_dict: "{{ sys_intf_mappings }}"
-
-
-- name: set sys intf ip
- shell: ip addr del {{ item.value["ip"] }}/{{ item.value["netmask"] }} dev {{ item.value["alias"] }}; \
- ip addr add {{ item.value["ip"] }}/{{ item.value["netmask"] }} dev {{ item.value["alias"] }}; \
- ip link set {{ item.value["alias"] }} up
- with_dict: "{{ host_ip_settings }}"
-
-- name: set gateway
- shell: ip route del default; \
- ip route add default via {{ item.value["gw"] }} dev {{ item.key }}
- when: '"gw" in item.value'
- with_dict: "{{ host_ip_settings }}"
-
-- name: copy net config
- template: src=my_configs.debian dest=/etc/network/interfaces.d/my_configs.cfg
- when: ansible_os_family == "Debian"
-
-- name: source net config
- lineinfile: dest=/etc/network/interfaces line='source /etc/network/interfaces.d/my_configs.cfg'
- when: ansible_os_family == "Debian"
+- name: ensure script dir exist
+ shell: mkdir -p /opt/setup_networks
+
+- name: copy scripts
+ copy: src={{ item }} dest=/opt/setup_networks
+ with_items:
+ - setup_networks/log.py
+ - setup_networks/setup_networks.py
+
+- name: copy boot scripts
+ copy: src={{ item }} dest=/etc/init.d mode=0755
+ with_items:
+ - setup_networks/net_init
+
+- name: copy config files
+ template: src=network.cfg dest=/opt/setup_networks
+
+- name: make sure python lib exist
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items:
+ - python-yaml
+ - python-netaddr
+
+- name: run scripts
+ shell: python /opt/setup_networks/setup_networks.py
+
+- name: add to boot scripts
+ service: name=net_init enabled=yes
diff --git a/deploy/adapters/ansible/roles/setup-network/templates/network.cfg b/deploy/adapters/ansible/roles/setup-network/templates/network.cfg
new file mode 100644
index 00000000..230d10a9
--- /dev/null
+++ b/deploy/adapters/ansible/roles/setup-network/templates/network.cfg
@@ -0,0 +1,4 @@
+bond_mappings: {{ network_cfg["bond_mappings"] }}
+ip_settings: {{ ip_settings[inventory_hostname] }}
+sys_intf_mappings: {{ sys_intf_mappings }}
+
diff --git a/deploy/conf/baremetal_cluster_sh.yml b/deploy/conf/baremetal_cluster_sh.yml
new file mode 100644
index 00000000..1078cb5c
--- /dev/null
+++ b/deploy/conf/baremetal_cluster_sh.yml
@@ -0,0 +1,43 @@
+
+TYPE: baremetal
+FLAVOR: cluster
+POWER_TOOL: ipmitool
+
+ipmiUser: root
+ipmiPass: Huawei@123
+
+hosts:
+ - name: host1
+ mac: 'D8:49:0B:DA:2A:28'
+ ipmiUser: root
+ ipmiPass: Huawei@123
+ ipmiIp: 192.168.2.145
+ roles:
+ - controller
+ - ha
+
+ - name: host2
+ mac: 'D8:49:0B:DA:5B:5D'
+ ipmiIp: 192.168.2.155
+ roles:
+ - controller
+ - ha
+
+ - name: host3
+ mac: 'D8:49:0B:DA:5A:B7'
+ ipmiIp: 192.168.2.165
+ roles:
+ - controller
+ - ha
+
+ - name: host4
+ mac: 'D8:49:0B:DA:58:99'
+ ipmiIp: 192.168.2.175
+ roles:
+ - compute
+
+ - name: host5
+ mac: 'D8:49:0B:DA:56:85'
+ ipmiIp: 192.168.2.185
+ roles:
+ - compute
diff --git a/deploy/conf/network_cfg.yaml b/deploy/conf/network_cfg.yaml
index a5f2c791..d79ff8e5 100644
--- a/deploy/conf/network_cfg.yaml
+++ b/deploy/conf/network_cfg.yaml
@@ -13,7 +13,7 @@ provider_net_mappings:
sys_intf_mappings:
- name: mgmt
interface: eth1
- vlan_tag: 2
+ vlan_tag: 101
type: vlan
role:
- controller
@@ -21,7 +21,7 @@ sys_intf_mappings:
- name: storage
interface: eth1
- vlan_tag: 3
+ vlan_tag: 102
type: vlan
role:
- controller
@@ -29,8 +29,7 @@ sys_intf_mappings:
- name: external
interface: br-prv
- vlan_tag: 4
- type: vlan
+ type: ovs
role:
- controller
- compute
@@ -56,10 +55,10 @@ ip_settings:
- name: external
ip_ranges:
- - - "172.16.3.2"
- - "172.16.3.100"
- cidr: "172.16.3.0/24"
- gw: "172.16.3.1"
+ - - "192.168.50.210"
+ - "192.168.50.220"
+ cidr: "192.168.50.0/24"
+ gw: "192.168.50.1"
role:
- controller
- compute
@@ -70,7 +69,7 @@ internal_vip:
interface: mgmt
public_vip:
- ip: 172.16.3.222
+ ip: 192.168.50.240
netmask: "24"
interface: external
@@ -84,7 +83,7 @@ public_net_info:
router: router-ext
enable_dhcp: False
no_gateway: False
- external_gw: "172.16.3.1"
- floating_ip_cidr: "172.16.3.0/24"
- floating_ip_start: "172.16.3.100"
- floating_ip_end: "172.16.3.254"
+ external_gw: "192.168.50.1"
+ floating_ip_cidr: "192.168.50.0/24"
+ floating_ip_start: "192.168.50.221"
+ floating_ip_end: "192.168.50.231"
diff --git a/deploy/host_vm.sh b/deploy/host_vm.sh
deleted file mode 100644
index 0754b1f4..00000000
--- a/deploy/host_vm.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-host_vm_dir=$WORK_DIR/vm
-function tear_down_machines() {
- for i in $HOSTNAMES; do
- sudo virsh destroy $i
- sudo virsh undefine $i
- rm -rf $host_vm_dir/$i
- done
-}
-
-function reboot_hosts() {
- log_warn "reboot_hosts do nothing"
-}
-
-function launch_host_vms() {
- old_ifs=$IFS
- IFS=,
- tear_down_machines
- #function_bod
- mac_array=($machines)
- log_info "bringing up pxe boot vms"
- i=0
- for host in $HOSTNAMES; do
- log_info "creating vm disk for instance $host"
- vm_dir=$host_vm_dir/$host
- mkdir -p $vm_dir
- sudo qemu-img create -f raw $vm_dir/disk.img ${VIRT_DISK}
- # create vm xml
- sed -e "s/REPLACE_MEM/$VIRT_MEM/g" \
- -e "s/REPLACE_CPU/$VIRT_CPUS/g" \
- -e "s/REPLACE_NAME/$host/g" \
- -e "s#REPLACE_IMAGE#$vm_dir/disk.img#g" \
- -e "s/REPLACE_BOOT_MAC/${mac_array[i]}/g" \
- -e "s/REPLACE_BRIDGE_MGMT/br_install/g" \
- -e "s/REPLACE_BRIDGE_TENANT/br_install/g" \
- -e "s/REPLACE_BRIDGE_PUBLIC/br_install/g" \
- -e "s/REPLACE_BRIDGE_STORAGE/br_install/g" \
- $COMPASS_DIR/deploy/template/vm/host.xml\
- > $vm_dir/libvirt.xml
-
- sudo virsh define $vm_dir/libvirt.xml
- sudo virsh start $host
- let i=i+1
- done
- IFS=$old_ifs
-}
-
-function get_host_macs() {
- local config_file=$WORK_DIR/installer/compass-install/install/group_vars/all
- local mac_generator=${COMPASS_DIR}/deploy/mac_generator.sh
- local machines=
-
- chmod +x $mac_generator
- mac_array=`$mac_generator $VIRT_NUMBER`
- machines=`echo $mac_array|sed 's/ /,/g'`
-
- echo "test: true" >> $config_file
- echo "pxe_boot_macs: [${machines}]" >> $config_file
-
- echo $machines
-}
-