aboutsummaryrefslogtreecommitdiffstats
path: root/deploy
diff options
context:
space:
mode:
Diffstat (limited to 'deploy')
-rw-r--r--deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml15
-rwxr-xr-xdeploy/adapters/ansible/openstack_liberty/roles/odl_cluster/tasks/odl_controller.yml4
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/HA-ansible-multinodes.yml15
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/ext-network/tasks/main.yml3
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/keystone/tasks/keystone_install.yml9
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/log.py41
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/net_init20
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/setup_networks.py73
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/main.yml69
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/keepalived.conf47
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/my_configs.debian14
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/network.cfg5
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml14
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/tasks/keystone_install.yml10
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/odl_controller.yml16
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/openvswitch.yml5
-rw-r--r--deploy/adapters/ansible/roles/compute-recovery/tasks/main.yml26
-rw-r--r--deploy/adapters/ansible/roles/compute-recovery/vars/Debian.yml (renamed from deploy/adapters/ansible/openstack_mitaka/roles/keystone/vars/Debian.yml)16
-rw-r--r--deploy/adapters/ansible/roles/compute-recovery/vars/RedHat.yml (renamed from deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/templates/admin-openrc-v3.sh)16
-rw-r--r--deploy/adapters/ansible/roles/compute-recovery/vars/main.yml10
-rw-r--r--deploy/adapters/ansible/roles/controller-recovery/tasks/main.yml26
-rw-r--r--deploy/adapters/ansible/roles/controller-recovery/vars/Debian.yml40
-rw-r--r--deploy/adapters/ansible/roles/controller-recovery/vars/RedHat.yml39
-rw-r--r--deploy/adapters/ansible/roles/controller-recovery/vars/main.yml11
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/main.yml4
-rw-r--r--deploy/adapters/ansible/roles/heat/tasks/heat_install.yml21
-rw-r--r--deploy/adapters/ansible/roles/heat/templates/heat_debian.j228
-rw-r--r--deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml1
-rw-r--r--deploy/adapters/ansible/roles/keystone/templates/admin-openrc-v3.sh (renamed from deploy/adapters/ansible/openstack_mitaka/roles/keystone/templates/admin-openrc-v3.sh)0
-rw-r--r--deploy/adapters/ansible/roles/keystone/vars/Debian.yml1
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml16
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Debian (renamed from deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg)0
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Redhat59
-rw-r--r--deploy/adapters/ansible/roles/setup-network/tasks/main.yml2
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-nosdn-nofeature-ha.yml4
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-ocl-nofeature-ha.yml4
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-moon-ha.yml4
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-nofeature-ha.yml4
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-odl_l3-nofeature-ha.yml4
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-onos-nofeature-ha.yml4
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-onos-sfc-ha.yml4
-rwxr-xr-xdeploy/network.sh20
42 files changed, 680 insertions, 44 deletions
diff --git a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
index 27aef19c..7f61a1cf 100644
--- a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
+++ b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
@@ -232,3 +232,18 @@
max_fail_percentage: 0
roles:
- ext-network
+
+- hosts: controller
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - controller-recovery
+
+- hosts: compute
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - compute-recovery
+
diff --git a/deploy/adapters/ansible/openstack_liberty/roles/odl_cluster/tasks/odl_controller.yml b/deploy/adapters/ansible/openstack_liberty/roles/odl_cluster/tasks/odl_controller.yml
index dff7d00b..3c7032d1 100755
--- a/deploy/adapters/ansible/openstack_liberty/roles/odl_cluster/tasks/odl_controller.yml
+++ b/deploy/adapters/ansible/openstack_liberty/roles/odl_cluster/tasks/odl_controller.yml
@@ -237,7 +237,9 @@
shell: crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin;
when: odl_l3_agent == "Enable"
-
+- name: configure metadata for l3 configuration
+ shell: crudini --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata True;
+ when: odl_l3_agent == "Enable"
- name: drop and recreate neutron database
shell: mysql -e "drop database if exists neutron;";
diff --git a/deploy/adapters/ansible/openstack_mitaka/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack_mitaka/HA-ansible-multinodes.yml
index 25b469e0..7ef467ee 100644
--- a/deploy/adapters/ansible/openstack_mitaka/HA-ansible-multinodes.yml
+++ b/deploy/adapters/ansible/openstack_mitaka/HA-ansible-multinodes.yml
@@ -241,3 +241,18 @@
max_fail_percentage: 0
roles:
- tacker
+
+- hosts: controller
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - controller-recovery
+
+- hosts: compute
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - compute-recovery
+
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/tasks/main.yml
index 4d2afc24..b52b9178 100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/tasks/main.yml
+++ b/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/tasks/main.yml
@@ -17,6 +17,9 @@
- name: restart neutron server
service: name=neutron-server state=restarted enabled=yes
+- name: wait for neutron time
+ shell: "sleep 10"
+
- name: create external net
neutron_network:
login_username: ADMIN
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/tasks/keystone_install.yml b/deploy/adapters/ansible/openstack_mitaka/roles/keystone/tasks/keystone_install.yml
index ea6926f4..ba4fc28e 100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/tasks/keystone_install.yml
+++ b/deploy/adapters/ansible/openstack_mitaka/roles/keystone/tasks/keystone_install.yml
@@ -26,6 +26,15 @@
state=absent
when: ansible_os_family == "Debian"
+- name: disable boot auto start
+ file:
+ path={{ item }}
+ state=absent
+ with_items:
+ - /etc/init.d/keystone
+ - /etc/init/keystone.conf
+ when: ansible_os_family == "Debian"
+
- name: generate keystone service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/log.py b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/log.py
new file mode 100644
index 00000000..fffeb589
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/log.py
@@ -0,0 +1,41 @@
+import logging
+import os
+loggers = {}
+log_dir="/var/log/setup_network"
+try:
+ os.makedirs(log_dir)
+except:
+ pass
+
+def getLogger(name):
+ if name in loggers:
+ return loggers[name]
+
+ logger = logging.getLogger(name)
+ logger.setLevel(logging.DEBUG)
+
+ # create file handler which logs even debug messages
+ log_file = "%s/%s.log" % (log_dir, name)
+ try:
+ os.remove(log_file)
+ except:
+ pass
+
+ fh = logging.FileHandler(log_file)
+ fh.setLevel(logging.DEBUG)
+
+ # create console handler with a higher log level
+ ch = logging.StreamHandler()
+ ch.setLevel(logging.ERROR)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
+ ch.setFormatter(formatter)
+ fh.setFormatter(formatter)
+
+ # add the handlers to logger
+ logger.addHandler(ch)
+ logger.addHandler(fh)
+
+ loggers[name] = logger
+ return logger
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/net_init b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/net_init
new file mode 100755
index 00000000..c27a8bf8
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/net_init
@@ -0,0 +1,20 @@
+#!/bin/bash
+## BEGIN INIT INFO
+# Provides: anamon.init
+# Default-Start: 3 5
+# Default-Stop: 0 1 2 4 6
+# Required-Start: $network
+# Short-Description: Starts the cobbler anamon boot notification program
+# Description: anamon runs the first time a machine is booted after
+# installation.
+## END INIT INFO
+
+#
+# anamon.init: Starts the cobbler post-install boot notification program
+#
+# chkconfig: 35 0 6
+#
+# description: anamon runs the first time a machine is booted after
+# installation.
+#
+python /opt/setup_networks/setup_networks.py
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/setup_networks.py b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/setup_networks.py
new file mode 100644
index 00000000..e58d6c72
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/setup_networks.py
@@ -0,0 +1,73 @@
+import yaml
+import netaddr
+import os
+import log as logging
+
+LOG = logging.getLogger("net-init")
+config_path = os.path.join(os.path.dirname(__file__), "network.cfg")
+
+def setup_bondings(bond_mappings):
+ print bond_mappings
+
+def add_vlan_link(interface, ifname, vlan_id):
+ LOG.info("add_vlan_link enter")
+ cmd = "ip link add link %s name %s type vlan id %s; " % (ifname, interface, vlan_id)
+ cmd += "ip link set %s up; ip link set %s up" % (interface, ifname)
+ LOG.info("add_vlan_link: cmd=%s" % cmd)
+ os.system(cmd)
+
+def add_ovs_port(ovs_br, ifname, uplink, vlan_id=None):
+ LOG.info("add_ovs_port enter")
+ cmd = "ovs-vsctl --may-exist add-port %s %s" % (ovs_br, ifname)
+ if vlan_id:
+ cmd += " tag=%s" % vlan_id
+ cmd += " -- set Interface %s type=internal;" % ifname
+ cmd += "ip link set dev %s address `ip link show %s |awk '/link\/ether/{print $2}'`;" \
+ % (ifname, uplink)
+ cmd += "ip link set %s up;" % ifname
+ LOG.info("add_ovs_port: cmd=%s" % cmd)
+ os.system(cmd)
+
+def setup_intfs(sys_intf_mappings, uplink_map):
+ LOG.info("setup_intfs enter")
+ for intf_name, intf_info in sys_intf_mappings.items():
+ if intf_info["type"] == "vlan":
+ add_vlan_link(intf_name, intf_info["interface"], intf_info["vlan_tag"])
+ elif intf_info["type"] == "ovs":
+ add_ovs_port(
+ intf_info["interface"],
+ intf_name,
+ uplink_map[intf_info["interface"]],
+ vlan_id=intf_info.get("vlan_tag"))
+ else:
+ pass
+
+def setup_ips(ip_settings, sys_intf_mappings):
+ LOG.info("setup_ips enter")
+ for intf_info in ip_settings.values():
+ network = netaddr.IPNetwork(intf_info["cidr"])
+ if sys_intf_mappings[intf_info["name"]]["type"] == "ovs":
+ intf_name = intf_info["name"]
+ else:
+ intf_name = intf_info["alias"]
+ cmd = "ip addr add %s/%s brd %s dev %s;" \
+ % (intf_info["ip"], intf_info["netmask"], str(network.broadcast),intf_name)
+ if "gw" in intf_info:
+ cmd += "route del default;"
+ cmd += "ip route add default via %s dev %s" % (intf_info["gw"], intf_name)
+ LOG.info("setup_ips: cmd=%s" % cmd)
+ os.system(cmd)
+
+def main(config):
+ uplink_map = {}
+ setup_bondings(config["bond_mappings"])
+ for provider_net in config["provider_net_mappings"]:
+ uplink_map[provider_net['name']] = provider_net['interface']
+
+ setup_intfs(config["sys_intf_mappings"], uplink_map)
+ setup_ips(config["ip_settings"], config["sys_intf_mappings"])
+
+if __name__ == "__main__":
+ os.system("service openvswitch-switch status|| service openvswitch-switch start")
+ config = yaml.load(open(config_path))
+ main(config)
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/main.yml
index bda2ba30..6b619057 100755
--- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/main.yml
+++ b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/main.yml
@@ -40,13 +40,74 @@
- name: Stop the Open vSwitch service and clear existing OVSDB
shell: >
- ovs-vsctl del-br br-int ;
- ovs-vsctl del-br br-tun ;
- ovs-vsctl del-manager ;
- ip link delete onos_port1 type veth peer name onos_port2;
+ service openvswitch-switch stop ;
+ rm -rf /var/log/openvswitch/* ;
+ rm -rf /etc/openvswitch/conf.db ;
+ service openvswitch-switch start ;
when: groups['onos']|length !=0
ignore_errors: True
+##################################################################
+########### Recover External network #################
+##################################################################
+
+- name: add ovs bridge
+ openvswitch_bridge: bridge={{ item["name"] }} state=present
+ with_items: "{{ network_cfg['provider_net_mappings'] }}"
+ when: item["type"] == "ovs" and groups['onos']|length !=0
+
+- name: add ovs uplink
+ openvswitch_port: bridge={{ item["name"] }} port={{ item["interface"] }} state=present
+ with_items: "{{ network_cfg['provider_net_mappings'] }}"
+ when: item["type"] == "ovs" and groups['onos']|length !=0
+
+- name: add ovs uplink
+ shell: ip link set {{ item["interface"] }} up
+ with_items: "{{ network_cfg['provider_net_mappings'] }}"
+ when: item["type"] == "ovs" and groups['onos']|length !=0
+
+- name: ensure script dir exist
+ shell: mkdir -p /opt/setup_networks
+ when: groups['onos']|length !=0
+
+- name: copy scripts
+ copy: src={{ item }} dest=/opt/setup_networks
+ with_items:
+ - setup_networks/log.py
+ - setup_networks/setup_networks.py
+ when: groups['onos']|length !=0
+
+- name: copy boot scripts
+ copy: src={{ item }} dest=/etc/init.d/ mode=0755
+ with_items:
+ - setup_networks/net_init
+ when: groups['onos']|length !=0
+
+- name: copy config files
+ template: src=network.cfg dest=/opt/setup_networks
+ when: groups['onos']|length !=0
+
+- name: make sure python lib exist
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items:
+ - python-yaml
+ - python-netaddr
+ when: groups['onos']|length !=0
+
+- name: run scripts
+ shell: python /opt/setup_networks/setup_networks.py
+ when: groups['onos']|length !=0
+
+- name: add to boot scripts
+ service: name=net_init enabled=yes
+ when: groups['onos']|length !=0
+##################################################################
+
+- name: restart keepalived to recover external IP
+ shell: service keepalived restart
+ when: inventory_hostname in groups['onos']
+ ignore_errors: True
+
- name: Install ONOS Cluster on Controller
include: onos_controller.yml
when: inventory_hostname in groups['onos'] and onos_sfc == "Disable"
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/keepalived.conf b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/keepalived.conf
new file mode 100644
index 00000000..4ccf1c43
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/keepalived.conf
@@ -0,0 +1,47 @@
+global_defs {
+ router_id {{ inventory_hostname }}
+}
+
+vrrp_sync_group VG1 {
+ group {
+ internal_vip
+ public_vip
+ }
+}
+
+vrrp_instance internal_vip {
+ interface {{ internal_vip.interface }}
+ virtual_router_id {{ vrouter_id_internal }}
+ state BACKUP
+ nopreempt
+ advert_int 1
+ priority {{ 50 + (host_index[inventory_hostname] * 50) }}
+
+ authentication {
+ auth_type PASS
+ auth_pass 1234
+ }
+
+ virtual_ipaddress {
+ {{ internal_vip.ip }}/{{ internal_vip.netmask }} dev {{ internal_vip.interface }}
+ }
+}
+
+vrrp_instance public_vip {
+ interface br-ex
+ virtual_router_id {{ vrouter_id_public }}
+ state BACKUP
+ nopreempt
+ advert_int 1
+ priority {{ 50 + (host_index[inventory_hostname] * 50) }}
+
+ authentication {
+ auth_type PASS
+ auth_pass 4321
+ }
+
+ virtual_ipaddress {
+ {{ network_cfg.public_vip.ip }}/{{ network_cfg.public_vip.netmask }} dev br-ex
+ }
+
+}
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/my_configs.debian b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/my_configs.debian
new file mode 100644
index 00000000..5ab1519b
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/my_configs.debian
@@ -0,0 +1,14 @@
+{%- for alias, intf in host_ip_settings.items() %}
+
+auto {{ alias }}
+iface {{ alias }} inet static
+ address {{ intf["ip"] }}
+ netmask {{ intf["netmask"] }}
+{% if "gw" in intf %}
+ gateway {{ intf["gw"] }}
+{% endif %}
+{% if intf["name"] == alias %}
+ pre-up ip link set {{ sys_intf_mappings[alias]["interface"] }} up
+ pre-up ip link add link {{ sys_intf_mappings[alias]["interface"] }} name {{ alias }} type vlan id {{ sys_intf_mappings[alias]["vlan_tag"] }}
+{% endif %}
+{% endfor %}
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/network.cfg b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/network.cfg
new file mode 100644
index 00000000..75ba90cb
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/network.cfg
@@ -0,0 +1,5 @@
+bond_mappings: {{ network_cfg["bond_mappings"] }}
+ip_settings: {{ ip_settings[inventory_hostname] }}
+sys_intf_mappings: {{ sys_intf_mappings }}
+provider_net_mappings: {{ network_cfg["provider_net_mappings"] }}
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml
index 1300ab64..c6b1c9ab 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/HA-ansible-multinodes.yml
@@ -243,3 +243,17 @@
roles:
- ext-network
+- hosts: controller
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - controller-recovery
+
+- hosts: compute
+ remote_user: root
+ accelerate: true
+ max_fail_percentage: 0
+ roles:
+ - compute-recovery
+
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/tasks/keystone_install.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/tasks/keystone_install.yml
index ea6926f4..79d02729 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/tasks/keystone_install.yml
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/tasks/keystone_install.yml
@@ -26,6 +26,16 @@
state=absent
when: ansible_os_family == "Debian"
+- name: disable boot auto start
+ file:
+ path={{ item }}
+ state=absent
+ with_items:
+ - /etc/init.d/keystone
+ - /etc/init/keystone.conf
+ - /lib/systemd/system/keystone.service
+ when: ansible_os_family == "Debian"
+
- name: generate keystone service list
lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/odl_controller.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/odl_controller.yml
index 9de5f478..4cf7948a 100755
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/odl_controller.yml
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/odl_controller.yml
@@ -89,11 +89,21 @@
- name: create karaf config
template:
- src: org.apache.karaf.features.cfg
+ src: org.apache.karaf.features.cfg.Debian
dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg"
owner: odl
group: odl
mode: 0775
+ when: ansible_os_family == "Debian"
+
+- name: create karaf config
+ template:
+ src: org.apache.karaf.features.cfg.Redhat
+ dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg"
+ owner: odl
+ group: odl
+ mode: 0775
+ when: ansible_os_family == "RedHat"
- name: create tomcat config
template:
@@ -269,7 +279,9 @@
shell: crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin;
when: odl_l3_agent == "Enable"
-
+- name: configure metadata for l3 configuration
+ shell: crudini --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata True;
+ when: odl_l3_agent == "Enable"
- name: drop and recreate neutron database
shell: mysql -e "drop database if exists neutron;";
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/openvswitch.yml
index 021143fc..b8cb6c91 100755
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/openvswitch.yml
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/openvswitch.yml
@@ -151,3 +151,8 @@
- name: execute ml2 configuration script
command: su -s /bin/sh -c "/opt/ml2_conf.sh;"
+
+#- name: change odl password for moon
+# shell: crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_odl password {{ ADMIN_PASS }}
+# when: moon == "Enable"
+
diff --git a/deploy/adapters/ansible/roles/compute-recovery/tasks/main.yml b/deploy/adapters/ansible/roles/compute-recovery/tasks/main.yml
new file mode 100644
index 00000000..b333b9e0
--- /dev/null
+++ b/deploy/adapters/ansible/roles/compute-recovery/tasks/main.yml
@@ -0,0 +1,26 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: Register RECOVERY
+ set_fact: RECOVERY_ENV={{RECOVERY_ENV | default('False')}}
+ tags:
+ - recovery
+
+- include_vars: "{{ ansible_os_family }}.yml"
+ when: RECOVERY_ENV
+ tags:
+ - recovery
+
+- name: restart compute services
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: compute_services | union(compute_services_noarch)
+ when: RECOVERY_ENV
+ tags:
+ - recovery
+
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/vars/Debian.yml b/deploy/adapters/ansible/roles/compute-recovery/vars/Debian.yml
index b8d8e7c2..23200a50 100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/compute-recovery/vars/Debian.yml
@@ -7,15 +7,9 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
+compute_services:
+ - nova-compute
+ - neutron-openvswitch-agent
+ - cinder-volume
+ - ceilometer-agent-compute
-cron_path: "/var/spool/cron/crontabs"
-
-packages:
- - keystone
- - python-openstackclient
-
-services:
- - apache2
-
-apache_config_dir: /etc/apache2
-http_service_name: apache2
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/templates/admin-openrc-v3.sh b/deploy/adapters/ansible/roles/compute-recovery/vars/RedHat.yml
index c3d863e8..c96b062a 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/templates/admin-openrc-v3.sh
+++ b/deploy/adapters/ansible/roles/compute-recovery/vars/RedHat.yml
@@ -6,14 +6,10 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Verify the Identity Service installation
-export OS_PASSWORD={{ ADMIN_PASS }}
-export OS_TENANT_NAME=admin
-export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v3
-export OS_IDENTITY_API_VERSION=3
-export OS_USERNAME=admin
-export OS_VOLUME_API_VERSION=2
-export OS_USER_DOMAIN_NAME=Default
-export OS_PROJECT_DOMAIN_NAME=Default
-
+---
+compute_services:
+ - openstack-nova-compute
+ - neutron-openvswitch-agent
+ - openstack-cinder-volume
+ - openstack-ceilometer-compute
diff --git a/deploy/adapters/ansible/roles/compute-recovery/vars/main.yml b/deploy/adapters/ansible/roles/compute-recovery/vars/main.yml
new file mode 100644
index 00000000..c2c0d8ee
--- /dev/null
+++ b/deploy/adapters/ansible/roles/compute-recovery/vars/main.yml
@@ -0,0 +1,10 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+compute_services_noarch: []
diff --git a/deploy/adapters/ansible/roles/controller-recovery/tasks/main.yml b/deploy/adapters/ansible/roles/controller-recovery/tasks/main.yml
new file mode 100644
index 00000000..ccf3e46b
--- /dev/null
+++ b/deploy/adapters/ansible/roles/controller-recovery/tasks/main.yml
@@ -0,0 +1,26 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: Register RECOVERY
+ set_fact: RECOVERY_ENV={{RECOVERY_ENV | default('False')}}
+ tags:
+ - recovery
+
+- include_vars: "{{ ansible_os_family }}.yml"
+ when: RECOVERY_ENV
+ tags:
+ - recovery
+
+- name: restart controller services
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: controller_services | union(controller_services_noarch)
+ when: RECOVERY_ENV
+ tags:
+ - recovery
+
diff --git a/deploy/adapters/ansible/roles/controller-recovery/vars/Debian.yml b/deploy/adapters/ansible/roles/controller-recovery/vars/Debian.yml
new file mode 100644
index 00000000..34675f6b
--- /dev/null
+++ b/deploy/adapters/ansible/roles/controller-recovery/vars/Debian.yml
@@ -0,0 +1,40 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+controller_services:
+ - keepalived
+ - apache2
+ - nova-api
+ - nova-cert
+ - nova-conductor
+ - nova-consoleauth
+ - nova-novncproxy
+ - nova-scheduler
+ - neutron-server
+ - cinder-api
+ - cinder-scheduler
+ - glance-registry
+ - glance-api
+ - openvswitch-switch
+ - neutron-openvswitch-agent
+ - neutron-l3-agent
+ - neutron-dhcp-agent
+ - neutron-metadata-agent
+ - ceilometer-agent-central
+ - ceilometer-agent-notification
+ - ceilometer-api
+ - ceilometer-collector
+ - heat-api
+ - heat-api-cfn
+ - heat-engine
+ - aodh-api
+ - aodh-notifier
+ - aodh-evaluator
+ - aodh-listener
+
diff --git a/deploy/adapters/ansible/roles/controller-recovery/vars/RedHat.yml b/deploy/adapters/ansible/roles/controller-recovery/vars/RedHat.yml
new file mode 100644
index 00000000..35c0a955
--- /dev/null
+++ b/deploy/adapters/ansible/roles/controller-recovery/vars/RedHat.yml
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+controller_services:
+ - keepalived
+ - httpd
+ - openstack-nova-api
+ - openstack-nova-cert
+ - openstack-nova-conductor
+ - openstack-nova-consoleauth
+ - openstack-nova-novncproxy
+ - openstack-nova-scheduler
+ - neutron-server
+ - openstack-cinder-api
+ - openstack-cinder-scheduler
+ - openstack-glance-api
+ - openstack-glance-registry
+ - neutron-openvswitch-agent
+ - neutron-l3-agent
+ - neutron-dhcp-agent
+ - neutron-metadata-agent
+ - openstack-ceilometer-central
+ - openstack-ceilometer-notification
+ - openstack-ceilometer-api
+ - openstack-ceilometer-collector
+ - openstack-heat-api
+ - openstack-heat-api-cfn
+ - openstack-heat-engine
+ - openstack-aodh-api
+ - openstack-aodh-notifier
+ - openstack-aodh-evaluator
+ - openstack-aodh-listener
+
diff --git a/deploy/adapters/ansible/roles/controller-recovery/vars/main.yml b/deploy/adapters/ansible/roles/controller-recovery/vars/main.yml
new file mode 100644
index 00000000..22af29f4
--- /dev/null
+++ b/deploy/adapters/ansible/roles/controller-recovery/vars/main.yml
@@ -0,0 +1,11 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+controller_services_noarch: []
+
diff --git a/deploy/adapters/ansible/roles/database/tasks/main.yml b/deploy/adapters/ansible/roles/database/tasks/main.yml
index 124b2639..f28da23b 100644
--- a/deploy/adapters/ansible/roles/database/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/main.yml
@@ -10,9 +10,13 @@
- include_vars: "{{ ansible_os_family }}.yml"
tags:
- test_mongo
+ - recovery
- include: mariadb_install.yml
+
- include: mariadb_cluster.yml
+ tags:
+ - recovery
- include: mariadb_config.yml
when:
diff --git a/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml b/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml
index 1fbada8c..a6e76c74 100644
--- a/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml
+++ b/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml
@@ -17,6 +17,18 @@
lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
+# '
+
+- name: create heat user domain
+ shell: >
+ . /opt/admin-openrc-v3.sh;
+ openstack domain create --description "Stack projects and users" heat;
+ openstack user create --domain heat --password {{ HEAT_PASS }} heat_domain_admin;
+ openstack role add --domain heat --user-domain heat --user heat_domain_admin admin;
+ openstack role create heat_stack_owner;
+ openstack role add --project demo --user demo heat_stack_owner;
+ when: inventory_hostname == groups['controller'][0] and ansible_os_family == "Debian"
+
- name: update heat conf
template: src=heat.j2
dest=/etc/heat/heat.conf
@@ -24,4 +36,13 @@
notify:
- restart heat service
- remove heat-sqlite-db
+ when: ansible_os_family == "RedHat"
+- name: update heat conf
+ template: src=heat_debian.j2
+ dest=/etc/heat/heat.conf
+ backup=yes
+ notify:
+ - restart heat service
+ - remove heat-sqlite-db
+ when: ansible_os_family == "Debian"
diff --git a/deploy/adapters/ansible/roles/heat/templates/heat_debian.j2 b/deploy/adapters/ansible/roles/heat/templates/heat_debian.j2
new file mode 100644
index 00000000..62df9fd9
--- /dev/null
+++ b/deploy/adapters/ansible/roles/heat/templates/heat_debian.j2
@@ -0,0 +1,28 @@
+[DEFAULT]
+heat_metadata_server_url = http://{{ internal_vip.ip }}:8000
+heat_waitcondition_server_url = http://{{ internal_vip.ip }}:8000/v1/waitcondition
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+log_dir = /var/log/heat
+stack_domain_admin = heat_domain_admin
+stack_domain_admin_password = {{ HEAT_PASS }}
+stack_user_domain_name = heat
+
+[database]
+connection = mysql://heat:{{ HEAT_DBPASS }}@{{ db_host }}/heat
+idle_timeout = 30
+use_db_reconnect = True
+pool_timeout = 10
+
+[ec2authtoken]
+auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+
+[keystone_authtoken]
+auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+identity_uri = http://{{ internal_vip.ip }}:35357
+admin_tenant_name = service
+admin_user = heat
+admin_password = {{ HEAT_PASS }}
+
diff --git a/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml b/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml
index ffae8ff0..ea6926f4 100644
--- a/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml
+++ b/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml
@@ -83,5 +83,6 @@
with_items:
- admin-openrc.sh
- demo-openrc.sh
+ - admin-openrc-v3.sh
- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/templates/admin-openrc-v3.sh b/deploy/adapters/ansible/roles/keystone/templates/admin-openrc-v3.sh
index c3d863e8..c3d863e8 100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/templates/admin-openrc-v3.sh
+++ b/deploy/adapters/ansible/roles/keystone/templates/admin-openrc-v3.sh
diff --git a/deploy/adapters/ansible/roles/keystone/vars/Debian.yml b/deploy/adapters/ansible/roles/keystone/vars/Debian.yml
index 67547277..b8d8e7c2 100644
--- a/deploy/adapters/ansible/roles/keystone/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/keystone/vars/Debian.yml
@@ -12,6 +12,7 @@ cron_path: "/var/spool/cron/crontabs"
packages:
- keystone
+ - python-openstackclient
services:
- apache2
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml
index 15513b30..f0ca1054 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml
@@ -89,11 +89,21 @@
- name: create karaf config
template:
- src: org.apache.karaf.features.cfg
+ src: org.apache.karaf.features.cfg.Debian
dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg"
owner: odl
group: odl
mode: 0775
+ when: ansible_os_family == "Debian"
+
+- name: create karaf config
+ template:
+ src: org.apache.karaf.features.cfg.Redhat
+ dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg"
+ owner: odl
+ group: odl
+ mode: 0775
+ when: ansible_os_family == "RedHat"
- name: create tomcat config
template:
@@ -256,7 +266,9 @@
shell: crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin;
when: odl_l3_agent == "Enable"
-
+- name: configure metadata for l3 configuration
+ shell: crudini --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata True;
+ when: odl_l3_agent == "Enable"
- name: drop and recreate neutron database
shell: mysql -e "drop database if exists neutron;";
diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg b/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Debian
index 4bbd33c3..4bbd33c3 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg
+++ b/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Debian
diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Redhat b/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Redhat
new file mode 100755
index 00000000..0691a984
--- /dev/null
+++ b/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Redhat
@@ -0,0 +1,59 @@
+################################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+################################################################################
+
+#
+# Defines if the startlvl should be respected during feature startup. The default value is true. The default
+# behavior for 2.x is false (!) for this property
+#
+# Be aware that this property is deprecated and will be removed in Karaf 4.0. So, if you need to
+# set this to false, please use this only as a temporary solution!
+#
+#respectStartLvlDuringFeatureStartup=true
+
+
+#
+# Defines if the startlvl should be respected during feature uninstall. The default value is true.
+# If true, means stop bundles respecting the descend order of start level in a certain feature.
+#
+#respectStartLvlDuringFeatureUninstall=true
+
+#
+# Comma separated list of features repositories to register by default
+#
+featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.integration/features-integration-index/0.4.2-Beryllium-SR2/xml/features
+#featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.integration/features-integration-index/0.4.0-Beryllium/xml/features
+#featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.integration/features-integration-index/0.4.0-Beryllium-RC1/xml/features
+#mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.integration/features-integration-index/0.3.3-Lithium-SR3/xml/features
+#mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.integration/features-integration-index/0.3.2-Lithium-SR2/xml/features
+
+#
+# Comma separated list of features to install at startup
+#
+#featuresBoot=config,standard,region,package,kar,ssh,management,odl-restconf-all,odl-aaa-authn,odl-dlux-all,odl-ovsdb-openstack
+featuresBoot=config,standard,region,package,kar,ssh,management,odl-ovsdb-openstack
+
+#,odl-restconf-all,odl-aaa-authn,odl-dlux-all
+
+# odl-base-all,odl-restconf,odl-ovsdb-openstack,odl-dlux-all,odl-mdsal-apidocs
+#,odl-mdsal-clustering,odl-openflowplugin-flow-services
+
+#
+# Defines if the boot features are started in asynchronous mode (in a dedicated thread)
+#
+featuresBootAsynchronous=false
diff --git a/deploy/adapters/ansible/roles/setup-network/tasks/main.yml b/deploy/adapters/ansible/roles/setup-network/tasks/main.yml
index 727b24ea..7873c073 100644
--- a/deploy/adapters/ansible/roles/setup-network/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/setup-network/tasks/main.yml
@@ -55,6 +55,8 @@
- name: run scripts
shell: python /opt/setup_networks/setup_networks.py
+ tags:
+ - recovery
- name: add to boot scripts
service: name=net_init enabled=yes
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-nofeature-ha.yml
index b13b76f9..8d5794f7 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-nofeature-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-nofeature-ha.yml
@@ -31,9 +31,9 @@ hosts:
- ceph-mon
- name: host3
- mac: 'D8:49:0B:DA:67:1F'
+ mac: '78:D7:52:A0:B1:99'
interfaces:
- - eth1: 'D8:49:0B:DA:67:20'
+ - eth1: '78:D7:52:A0:B1:9A'
ipmiIp: 172.16.130.29
ipmiPass: Huawei@123
roles:
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-ocl-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-ocl-nofeature-ha.yml
index 9eb4adae..bd10dc6d 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-ocl-nofeature-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-ocl-nofeature-ha.yml
@@ -31,9 +31,9 @@ hosts:
- ceph-osd
- name: host3
- mac: 'D8:49:0B:DA:67:1F'
+ mac: '78:D7:52:A0:B1:99'
interfaces:
- - eth1: 'D8:49:0B:DA:67:20'
+ - eth1: '78:D7:52:A0:B1:9A'
ipmiIp: 172.16.130.29
ipmiPass: Huawei@123
roles:
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-moon-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-moon-ha.yml
index 16739a8b..2286bc6a 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-moon-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-moon-ha.yml
@@ -35,9 +35,9 @@ hosts:
- ceph-mon
- name: host3
- mac: 'D8:49:0B:DA:67:1F'
+ mac: '78:D7:52:A0:B1:99'
interfaces:
- - eth1: 'D8:49:0B:DA:67:20'
+ - eth1: '78:D7:52:A0:B1:9A'
ipmiIp: 172.16.130.29
ipmiPass: Huawei@123
roles:
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-nofeature-ha.yml
index 98b24a1e..241ea650 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-nofeature-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-nofeature-ha.yml
@@ -33,9 +33,9 @@ hosts:
- ceph-mon
- name: host3
- mac: 'D8:49:0B:DA:67:1F'
+ mac: '78:D7:52:A0:B1:99'
interfaces:
- - eth1: 'D8:49:0B:DA:67:20'
+ - eth1: '78:D7:52:A0:B1:9A'
ipmiIp: 172.16.130.29
ipmiPass: Huawei@123
roles:
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-odl_l3-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-odl_l3-nofeature-ha.yml
index 2c578faa..f7d2b587 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-odl_l3-nofeature-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-odl_l3-nofeature-ha.yml
@@ -35,9 +35,9 @@ hosts:
- ceph-mon
- name: host3
- mac: 'D8:49:0B:DA:67:1F'
+ mac: '78:D7:52:A0:B1:99'
interfaces:
- - eth1: 'D8:49:0B:DA:67:20'
+ - eth1: '78:D7:52:A0:B1:9A'
ipmiIp: 172.16.130.29
ipmiPass: Huawei@123
roles:
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-onos-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-onos-nofeature-ha.yml
index aec1ff09..09d59628 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-onos-nofeature-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-onos-nofeature-ha.yml
@@ -33,9 +33,9 @@ hosts:
- ceph-mon
- name: host3
- mac: 'D8:49:0B:DA:67:1F'
+ mac: '78:D7:52:A0:B1:99'
interfaces:
- - eth1: 'D8:49:0B:DA:67:20'
+ - eth1: '78:D7:52:A0:B1:9A'
ipmiIp: 172.16.130.29
ipmiPass: Huawei@123
roles:
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-onos-sfc-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-onos-sfc-ha.yml
index 98ae5eba..e7956d9b 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-onos-sfc-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-onos-sfc-ha.yml
@@ -35,9 +35,9 @@ hosts:
- ceph-mon
- name: host3
- mac: 'D8:49:0B:DA:67:1F'
+ mac: '78:D7:52:A0:B1:99'
interfaces:
- - eth1: 'D8:49:0B:DA:67:20'
+ - eth1: '78:D7:52:A0:B1:9A'
ipmiIp: 172.16.130.29
ipmiPass: Huawei@123
roles:
diff --git a/deploy/network.sh b/deploy/network.sh
index e3230fa9..46b8c023 100755
--- a/deploy/network.sh
+++ b/deploy/network.sh
@@ -104,6 +104,23 @@ function setup_baremetal_net() {
setup_bridge_net install $INSTALL_NIC
}
+function setup_network_boot_scripts() {
+ sudo cp $COMPASS_DIR/deploy/network.sh /usr/sbin/network_setup
+ sudo chmod +777 /usr/sbin/network_setup
+ sudo cat << EOF >> /usr/sbin/network_setup
+
+sleep 2
+save_network_info
+clear_forward_rejct_rules
+EOF
+ sudo chmod 755 /usr/sbin/network_setup
+
+ egrep -R "^/usr/sbin/network_setup" /etc/rc.local
+ if [[ $? != 0 ]]; then
+ sudo sed -i '/^exit 0/i\/usr\/sbin\/network_setup' /etc/rc.local
+ fi
+}
+
function create_nets() {
setup_nat_net mgmt $MGMT_GW $MGMT_MASK $MGMT_IP_START $MGMT_IP_END
@@ -113,4 +130,7 @@ function create_nets() {
# create external network
setup_bridge_external
clear_forward_rejct_rules
+
+ setup_network_boot_scripts
}
+