From 93f8d97757dfe37d262a95148ac94f7a2c78a64e Mon Sep 17 00:00:00 2001 From: Alexandru Avadanii Date: Wed, 23 Aug 2017 15:38:06 +0200 Subject: p/fuel: Rebase after authorized_keys fix upstream Change-Id: Id53c2513b52d05a44dffda0fda40f8df259e226b Signed-off-by: Alexandru Avadanii --- ...-network-parametrize-Openstack-public-net.patch | 45 --- ...-salt-formulas-Add-enable-armband-formula.patch | 2 +- ...-config-AArch64-baremetal-Use-arm64-image.patch | 38 +++ ...s-maas-Add-mcp.rsa.pub-to-authorized_keys.patch | 31 --- ...s-baremetal-AArch64-virtio-NIC-names-sync.patch | 308 +++++++++++++++++++++ ...-config-AArch64-baremetal-Use-arm64-image.patch | 38 --- ...s-baremetal-AArch64-virtio-NIC-names-sync.patch | 308 --------------------- ...-Add-curtin_userdata_arm64_generic_xenial.patch | 104 +++++++ ...-Add-curtin_userdata_arm64_generic_xenial.patch | 104 ------- ...twork-public-mgmt-Use-arm-pod3-POD-config.patch | 165 +++++++++++ ...lasses-baremetal-arm-pod3-config-hardcode.patch | 95 +++++++ ...twork-public-mgmt-Use-arm-pod3-POD-config.patch | 165 ----------- ...lasses-baremetal-arm-pod3-config-hardcode.patch | 95 ------- ...aas-boot-resources-Add-arm64-architecture.patch | 31 +++ .../0013-libvirt-Use-libvirt-unix_sock_group.patch | 50 ++++ ...aas-boot-resources-Add-arm64-architecture.patch | 31 --- .../0014-libvirt-Use-libvirt-unix_sock_group.patch | 50 ---- ...lt-formulas-armband-Extend-libvirt_domain.patch | 97 +++++++ ...lt-formulas-armband-Extend-libvirt_domain.patch | 97 ------- ...-virtng.py-virt.sls-Extend-libvirt_domain.patch | 198 +++++++++++++ ...p-salt-formulas-armband-AArch64-bootstrap.patch | 82 ++++++ ...-virtng.py-virt.sls-Extend-libvirt_domain.patch | 198 ------------- ...p-salt-formulas-armband-AArch64-bootstrap.patch | 82 ------ .../0017-seedng-module-Add-AArch64-repo.patch | 54 ++++ .../0018-seedng-module-Add-AArch64-repo.patch | 54 ---- 25 files changed, 1223 insertions(+), 1299 deletions(-) delete mode 100644 patches/opnfv-fuel/0000-states-network-parametrize-Openstack-public-net.patch create mode 100644 patches/opnfv-fuel/0007-mcp-config-AArch64-baremetal-Use-arm64-image.patch delete mode 100644 patches/opnfv-fuel/0007-states-maas-Add-mcp.rsa.pub-to-authorized_keys.patch create mode 100644 patches/opnfv-fuel/0008-classes-baremetal-AArch64-virtio-NIC-names-sync.patch delete mode 100644 patches/opnfv-fuel/0008-mcp-config-AArch64-baremetal-Use-arm64-image.patch delete mode 100644 patches/opnfv-fuel/0009-classes-baremetal-AArch64-virtio-NIC-names-sync.patch create mode 100644 patches/opnfv-fuel/0009-maas-Add-curtin_userdata_arm64_generic_xenial.patch delete mode 100644 patches/opnfv-fuel/0010-maas-Add-curtin_userdata_arm64_generic_xenial.patch create mode 100644 patches/opnfv-fuel/0010-network-public-mgmt-Use-arm-pod3-POD-config.patch create mode 100644 patches/opnfv-fuel/0011-classes-baremetal-arm-pod3-config-hardcode.patch delete mode 100644 patches/opnfv-fuel/0011-network-public-mgmt-Use-arm-pod3-POD-config.patch delete mode 100644 patches/opnfv-fuel/0012-classes-baremetal-arm-pod3-config-hardcode.patch create mode 100644 patches/opnfv-fuel/0012-maas-boot-resources-Add-arm64-architecture.patch create mode 100644 patches/opnfv-fuel/0013-libvirt-Use-libvirt-unix_sock_group.patch delete mode 100644 patches/opnfv-fuel/0013-maas-boot-resources-Add-arm64-architecture.patch delete mode 100644 patches/opnfv-fuel/0014-libvirt-Use-libvirt-unix_sock_group.patch create mode 100644 patches/opnfv-fuel/0014-mcp-salt-formulas-armband-Extend-libvirt_domain.patch delete mode 100644 patches/opnfv-fuel/0015-mcp-salt-formulas-armband-Extend-libvirt_domain.patch create mode 100644 patches/opnfv-fuel/0015-virtng.py-virt.sls-Extend-libvirt_domain.patch create mode 100644 patches/opnfv-fuel/0016-mcp-salt-formulas-armband-AArch64-bootstrap.patch delete mode 100644 patches/opnfv-fuel/0016-virtng.py-virt.sls-Extend-libvirt_domain.patch delete mode 100644 patches/opnfv-fuel/0017-mcp-salt-formulas-armband-AArch64-bootstrap.patch create mode 100644 patches/opnfv-fuel/0017-seedng-module-Add-AArch64-repo.patch delete mode 100644 patches/opnfv-fuel/0018-seedng-module-Add-AArch64-repo.patch (limited to 'patches') diff --git a/patches/opnfv-fuel/0000-states-network-parametrize-Openstack-public-net.patch b/patches/opnfv-fuel/0000-states-network-parametrize-Openstack-public-net.patch deleted file mode 100644 index 4d776bcd..00000000 --- a/patches/opnfv-fuel/0000-states-network-parametrize-Openstack-public-net.patch +++ /dev/null @@ -1,45 +0,0 @@ -From: Alexandru Avadanii -Date: Thu, 17 Aug 2017 18:54:16 +0200 -Subject: [PATCH] states/network: parametrize Openstack public net - -Determine public network based on public IPs of compute nodes. - -Change-Id: I5a6b29a0458b0b839f8fdb3e32616a41d7a621f7 -Signed-off-by: Alexandru Avadanii ---- - mcp/config/states/networks | 28 +++++++++++++++++++++++++--- - 1 file changed, 25 insertions(+), 3 deletions(-) - -diff --git a/mcp/config/states/networks b/mcp/config/states/networks -index 205e0a9..6f294ce 100755 ---- a/mcp/config/states/networks -+++ b/mcp/config/states/networks -@@ -1,3 +1,25 @@ --salt 'ctl01*' cmd.run ". /root/keystonercv3; openstack compute service list; openstack network agent list; openstack stack list; openstack volume service list" --salt 'ctl01*' cmd.run ". /root/keystonercv3; openstack network create --external --default --provider-network-type flat --provider-physical-network physnet1 floating_net" --salt 'ctl01*' cmd.run ". /root/keystonercv3; openstack subnet create --gateway 10.16.0.1 --no-dhcp --allocation-pool start=10.16.0.130,end=10.16.0.254 --network floating_net --subnet-range 10.16.0.0/24 floating_subnet" -+#!/bin/bash -+ -+# Determine public network based on external IPs from compute node -+# NOTE: mask currently hardcoded to /24 -+PUBLIC_NET=$(salt --out yaml 'cmp*' pillar.get _param:external_address | \ -+ awk --re-interval '/[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}/{print $2; exit}') -+ -+[ -n "${PUBLIC_NET}" ] || PUBLIC_NET=$(salt --out yaml 'cmp*' \ -+ pillar.get _param:openstack_compute_node01_external_address | \ -+ awk --re-interval '/[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}/{print $2; exit}') -+ -+[ -n "${PUBLIC_NET}" ] && PUBLIC_NET="${PUBLIC_NET%.*}.0/24" || PUBLIC_NET="10.16.0.0/24" -+ -+salt 'ctl01*' cmd.run ". /root/keystonercv3; \ -+ openstack compute service list; \ -+ openstack network agent list; \ -+ openstack stack list; \ -+ openstack volume service list" -+salt 'ctl01*' cmd.run ". /root/keystonercv3; \ -+ openstack network create --external --default --provider-network-type flat \ -+ --provider-physical-network physnet1 floating_net" -+salt 'ctl01*' cmd.run ". /root/keystonercv3; \ -+ openstack subnet create --gateway ${PUBLIC_NET%.*}.1 --no-dhcp \ -+ --allocation-pool start=${PUBLIC_NET%.*}.130,end=${PUBLIC_NET%.*}.254 \ -+ --network floating_net --subnet-range ${PUBLIC_NET} floating_subnet" diff --git a/patches/opnfv-fuel/0004-mcp-salt-formulas-Add-enable-armband-formula.patch b/patches/opnfv-fuel/0004-mcp-salt-formulas-Add-enable-armband-formula.patch index 7bc8baa8..b053b699 100644 --- a/patches/opnfv-fuel/0004-mcp-salt-formulas-Add-enable-armband-formula.patch +++ b/patches/opnfv-fuel/0004-mcp-salt-formulas-Add-enable-armband-formula.patch @@ -36,7 +36,7 @@ Signed-off-by: Alexandru Avadanii create mode 100644 mcp/salt-formulas/armband/vgabios.sls diff --git a/mcp/config/states/maas b/mcp/config/states/maas -index 52a9b77..9a27a39 100755 +index fecd991..85cee8c 100755 --- a/mcp/config/states/maas +++ b/mcp/config/states/maas @@ -51,6 +51,7 @@ wait_for "! salt '*' test.ping | tee /dev/stderr | fgrep -q 'Not connected'" diff --git a/patches/opnfv-fuel/0007-mcp-config-AArch64-baremetal-Use-arm64-image.patch b/patches/opnfv-fuel/0007-mcp-config-AArch64-baremetal-Use-arm64-image.patch new file mode 100644 index 00000000..cd959da3 --- /dev/null +++ b/patches/opnfv-fuel/0007-mcp-config-AArch64-baremetal-Use-arm64-image.patch @@ -0,0 +1,38 @@ +From: Alexandru Avadanii +Date: Sun, 6 Aug 2017 19:34:06 +0200 +Subject: [PATCH] mcp/config: AArch64 baremetal: Use arm64 image + +While at it, bump default vCPU number from 2 to 6. + +Signed-off-by: Alexandru Avadanii +--- + mcp/config/scenario/baremetal/defaults.yaml | 4 ++-- + mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml | 2 +- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/mcp/config/scenario/baremetal/defaults.yaml b/mcp/config/scenario/baremetal/defaults.yaml +index b841e88..17fbbfc 100644 +--- a/mcp/config/scenario/baremetal/defaults.yaml ++++ b/mcp/config/scenario/baremetal/defaults.yaml +@@ -1,6 +1,6 @@ +-base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img ++base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-uefi1.img + virtual: + default: +- vcpus: 2 ++ vcpus: 6 + ram: 4096 + +diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml +index e3d47b0..93e2de0 100644 +--- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml ++++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml +@@ -51,7 +51,7 @@ parameters: + address: ${_param:single_address} + netmask: 255.255.255.0 + +- salt_control_xenial_image: http://apt.mirantis.com/images/ubuntu-16-04-x64-latest.qcow2 ++ salt_control_xenial_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-uefi1.img + salt_api_password_hash: "$6$WV0P1shnoDh2gI/Z$22/Bcd7ffMv0jDlFpT63cAU4PiXHz9pjXwngToKwqAsgoeK4HNR3PiKaushjxp3JsQ8hNoJmAC6TxzVqfV8WH/" + linux: + network: diff --git a/patches/opnfv-fuel/0007-states-maas-Add-mcp.rsa.pub-to-authorized_keys.patch b/patches/opnfv-fuel/0007-states-maas-Add-mcp.rsa.pub-to-authorized_keys.patch deleted file mode 100644 index 8265abef..00000000 --- a/patches/opnfv-fuel/0007-states-maas-Add-mcp.rsa.pub-to-authorized_keys.patch +++ /dev/null @@ -1,31 +0,0 @@ -From: Alexandru Avadanii -Date: Tue, 22 Aug 2017 22:04:28 +0200 -Subject: [PATCH] states/maas: Add mcp.rsa.pub to authorized_keys - -Add our mcp.rsa.pub RSA key to all nodes, including VCP VMs. -This is required for functest to be able to fetch openrc. - -While at it, add retry wrappers for more VCP VM state.sls calls. - -Change-Id: I34f79848c52e36de8d981055880321a081420874 -Signed-off-by: Alexandru Avadanii -Signed-off-by: Guillermo Herrero ---- - mcp/config/states/maas | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/mcp/config/states/maas b/mcp/config/states/maas -index 52a9b77..fecd991 100755 ---- a/mcp/config/states/maas -+++ b/mcp/config/states/maas -@@ -69,6 +69,9 @@ while [ $rc -ne 0 ]; do - sleep 5 - done - --salt -C '* and not cfg01* and not mas01*' saltutil.sync_all -+wait_for "salt -C '* and not cfg01* and not mas01*' ssh.set_auth_key ${SUDO_USER} \ -+ $(awk 'NR==1{print $2}' $(eval echo ~${SUDO_USER}/.ssh/authorized_keys))" -+ -+wait_for "salt -C '* and not cfg01* and not mas01*' saltutil.sync_all" - salt -C '* and not cfg01* and not mas01*' state.apply salt - wait_for "salt -C '* and not cfg01* and not mas01*' state.apply linux,ntp" diff --git a/patches/opnfv-fuel/0008-classes-baremetal-AArch64-virtio-NIC-names-sync.patch b/patches/opnfv-fuel/0008-classes-baremetal-AArch64-virtio-NIC-names-sync.patch new file mode 100644 index 00000000..045c22d0 --- /dev/null +++ b/patches/opnfv-fuel/0008-classes-baremetal-AArch64-virtio-NIC-names-sync.patch @@ -0,0 +1,308 @@ +From: Alexandru Avadanii +Date: Sun, 6 Aug 2017 20:42:47 +0200 +Subject: [PATCH] classes: baremetal: AArch64: virtio NIC names sync + +grep -e "ens[[:digit:]]" -R . -l | \ + xargs sed -i \ + -e 's/ens3/enp1s0/g' \ + -e 's/ens4/enp2s0/g' \ + -e 's/ens5/enp3s0/g' \ + -e 's/ens6/enp4s0/g' + +Since AArch64 will be using virtio-net-pci NIC model for guests, +predictable interface naming yields a slightly different scheme. + +Update all configuration to reflect this. + +NOTE: Above configuration is expected with libvirt 3.x, which puts +each NIC on a separate PCIe bus (which also imposes virtio modern +is used). + +Signed-off-by: Alexandru Avadanii +--- + .../baremetal-mcp-ocata-ovs-ha/infra/config.yml | 4 ++-- + .../baremetal-mcp-ocata-ovs-ha/infra/kvm.yml | 18 ++++++++++++------ + .../baremetal-mcp-ocata-ovs-ha/infra/maas.yml | 8 ++++---- + .../openstack/compute.yml | 21 ++++++++++----------- + .../openstack/control.yml | 6 +++--- + .../openstack/dashboard.yml | 4 ++-- + .../openstack/database.yml | 6 +++--- + .../openstack/message_queue.yml | 6 +++--- + .../baremetal-mcp-ocata-ovs-ha/openstack/proxy.yml | 6 +++--- + .../openstack/telemetry.yml | 6 +++--- + 10 files changed, 45 insertions(+), 40 deletions(-) + +diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/config.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/config.yml +index ce2c951..7d95ebc 100644 +--- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/config.yml ++++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/config.yml +@@ -32,8 +32,8 @@ parameters: + linux: + network: + interface: +- ens3: ${_param:linux_dhcp_interface} +- ens4: ${_param:linux_single_interface} ++ enp1s0: ${_param:linux_dhcp_interface} ++ enp2s0: ${_param:linux_single_interface} + salt: + master: + accept_policy: open_mode +diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml +index 1608c65..8677a79 100644 +--- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml ++++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml +@@ -25,7 +25,8 @@ parameters: + cluster_node03_address: ${_param:infra_kvm_node03_address} + keepalived_vip_interface: br-ctl + keepalived_vip_virtual_router_id: 69 +- deploy_nic: enp6s0 ++ deploy_nic: eth0 ++ trunk_nic: eth1 + salt: + control: + size: #RAM 4096,8192,16384,32768,65536 +@@ -115,7 +116,7 @@ parameters: + linux: + network: + interface: +- eth3: ++ eth0: + enabled: true + type: eth + proto: manual +@@ -123,6 +124,11 @@ parameters: + netmask: 255.255.255.0 + name: ${_param:deploy_nic} + noifupdown: true ++ eth1: ++ enabled: true ++ type: eth ++ proto: manual ++ name: ${_param:trunk_nic} + br-mgmt: + enabled: true + proto: dhcp +@@ -133,13 +139,13 @@ parameters: + use_interfaces: + - ${_param:deploy_nic} + noifupdown: true +- vlan300: ++ vlan2183: + enabled: true + proto: manual + type: vlan +- name: ${_param:deploy_nic}.300 ++ name: ${_param:trunk_nic}.2183 + use_interfaces: +- - ${_param:deploy_nic} ++ - ${_param:trunk_nic} + br-ctl: + enabled: true + type: bridge +@@ -147,4 +153,4 @@ parameters: + address: ${_param:single_address} + netmask: 255.255.255.0 + use_interfaces: +- - ${_param:deploy_nic}.300 ++ - ${_param:trunk_nic}.2183 +diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml +index d193469..73215ee 100644 +--- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml ++++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml +@@ -4,10 +4,10 @@ classes: + - cluster.baremetal-mcp-ocata-ovs-ha.infra + parameters: + _param: +- dhcp_interface: ens3 +- primary_interface: ens4 +- pxe_interface: ens5 +- external_interface: ens6 ++ dhcp_interface: enp1s0 ++ primary_interface: enp2s0 ++ pxe_interface: enp3s0 ++ external_interface: enp4s0 + interface_mtu: 1500 + # MaaS has issues using MTU > 1500 for PXE interface + pxe_interface_mtu: 1500 +diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/compute.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/compute.yml +index 070ab78..e932f0d 100644 +--- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/compute.yml ++++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/compute.yml +@@ -19,9 +19,8 @@ parameters: + cluster_node03_hostname: ctl03 + cluster_node03_address: ${_param:openstack_control_node03_address} + nova_vncproxy_url: https://${_param:cluster_public_host}:6080 +- mgmt_nic: enp6s0 +- tenant_nic: enp7s0 +- external_nic: enp8s0 ++ mgmt_nic: eth0 ++ tenant_nic: eth1 + linux_system_codename: xenial + interface_mtu: 1500 + keepalived_vip_interface: br-ctl +@@ -53,19 +52,19 @@ parameters: + netmask: 255.255.255.0 + mtu: 1500 + use_interfaces: +- - ${_param:tenant_nic}.302 +- vlan300: ++ - ${_param:tenant_nic}.2185 ++ vlan2183: + enabled: true + proto: manual + type: vlan +- name: ${_param:mgmt_nic}.300 ++ name: ${_param:tenant_nic}.2183 + use_interfaces: +- - ${_param:mgmt_nic} +- vlan302: ++ - ${_param:tenant_nic} ++ vlan2185: + enabled: true + proto: manual + type: vlan +- name: ${_param:tenant_nic}.302 ++ name: ${_param:tenant_nic}.2185 + use_interfaces: + - ${_param:tenant_nic} + br-ctl: +@@ -75,7 +74,7 @@ parameters: + address: ${_param:single_address} + netmask: 255.255.255.0 + use_interfaces: +- - ${_param:mgmt_nic}.300 ++ - ${_param:tenant_nic}.2183 + br-floating: + enabled: true + type: ovs_bridge +@@ -92,6 +91,6 @@ parameters: + address: ${_param:external_address} + netmask: 255.255.255.0 + use_interfaces: +- - ${_param:external_nic} ++ - ${_param:tenant_nic} + use_ovs_ports: + - float-to-ex +diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/control.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/control.yml +index 6f47f8a..5ba1411 100644 +--- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/control.yml ++++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/control.yml +@@ -23,7 +23,7 @@ classes: + - cluster.baremetal-mcp-ocata-ovs-ha.infra + parameters: + _param: +- keepalived_vip_interface: ens3 ++ keepalived_vip_interface: enp2s0 + keepalived_vip_virtual_router_id: 50 + cluster_vip_address: ${_param:openstack_control_address} + cluster_local_address: ${_param:single_address} +@@ -37,8 +37,8 @@ parameters: + linux: + network: + interface: +- ens2: ${_param:linux_dhcp_interface} +- ens3: ${_param:linux_single_interface} ++ enp1s0: ${_param:linux_dhcp_interface} ++ enp2s0: ${_param:linux_single_interface} + bind: + server: + control: +diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/dashboard.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/dashboard.yml +index 39cc073..18f622c 100644 +--- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/dashboard.yml ++++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/dashboard.yml +@@ -7,5 +7,5 @@ parameters: + linux: + network: + interface: +- ens2: ${_param:linux_dhcp_interface} +- ens3: ${_param:linux_single_interface} ++ enp1s0: ${_param:linux_dhcp_interface} ++ enp2s0: ${_param:linux_single_interface} +diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/database.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/database.yml +index dabda49..7a6934b 100644 +--- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/database.yml ++++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/database.yml +@@ -16,7 +16,7 @@ classes: + - cluster.baremetal-mcp-ocata-ovs-ha + parameters: + _param: +- keepalived_vip_interface: ens3 ++ keepalived_vip_interface: enp2s0 + keepalived_vip_virtual_router_id: 80 + galera_server_cluster_name: openstack_cluster + cluster_vip_address: ${_param:openstack_database_address} +@@ -30,5 +30,5 @@ parameters: + linux: + network: + interface: +- ens2: ${_param:linux_dhcp_interface} +- ens3: ${_param:linux_single_interface} ++ enp1s0: ${_param:linux_dhcp_interface} ++ enp2s0: ${_param:linux_single_interface} +diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/message_queue.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/message_queue.yml +index 881644b..f510a7b 100644 +--- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/message_queue.yml ++++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/message_queue.yml +@@ -7,7 +7,7 @@ classes: + - cluster.baremetal-mcp-ocata-ovs-ha + parameters: + _param: +- keepalived_vip_interface: ens3 ++ keepalived_vip_interface: enp2s0 + keepalived_vip_virtual_router_id: 90 + cluster_vip_address: ${_param:openstack_message_queue_address} + cluster_local_address: ${_param:single_address} +@@ -20,5 +20,5 @@ parameters: + linux: + network: + interface: +- ens2: ${_param:linux_dhcp_interface} +- ens3: ${_param:linux_single_interface} ++ enp1s0: ${_param:linux_dhcp_interface} ++ enp2s0: ${_param:linux_single_interface} +diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/proxy.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/proxy.yml +index 2147741..4e25b78 100644 +--- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/proxy.yml ++++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/proxy.yml +@@ -15,7 +15,7 @@ classes: + # - cluster.baremetal-mcp-ocata-ovs-ha.stacklight.proxy + parameters: + _param: +- keepalived_vip_interface: ens3 ++ keepalived_vip_interface: enp2s0 + keepalived_vip_virtual_router_id: 240 + nginx_proxy_ssl: + enabled: true +@@ -27,8 +27,8 @@ parameters: + linux: + network: + interface: +- ens2: ${_param:linux_dhcp_interface} +- ens3: ${_param:linux_single_interface} ++ enp1s0: ${_param:linux_dhcp_interface} ++ enp2s0: ${_param:linux_single_interface} + system: + package: + libapache2-mod-wsgi: +diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/telemetry.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/telemetry.yml +index 505ee7f..6ad13ae 100644 +--- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/telemetry.yml ++++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/telemetry.yml +@@ -10,7 +10,7 @@ classes: + - cluster.baremetal-mcp-ocata-ovs-ha.infra + parameters: + _param: +- keepalived_vip_interface: ens3 ++ keepalived_vip_interface: enp2s0 + keepalived_vip_virtual_router_id: 230 + cluster_vip_address: ${_param:openstack_telemetry_address} + cluster_local_address: ${_param:single_address} +@@ -23,5 +23,5 @@ parameters: + linux: + network: + interface: +- ens2: ${_param:linux_dhcp_interface} +- ens3: ${_param:linux_single_interface} ++ enp1s0: ${_param:linux_dhcp_interface} ++ enp2s0: ${_param:linux_single_interface} diff --git a/patches/opnfv-fuel/0008-mcp-config-AArch64-baremetal-Use-arm64-image.patch b/patches/opnfv-fuel/0008-mcp-config-AArch64-baremetal-Use-arm64-image.patch deleted file mode 100644 index cd959da3..00000000 --- a/patches/opnfv-fuel/0008-mcp-config-AArch64-baremetal-Use-arm64-image.patch +++ /dev/null @@ -1,38 +0,0 @@ -From: Alexandru Avadanii -Date: Sun, 6 Aug 2017 19:34:06 +0200 -Subject: [PATCH] mcp/config: AArch64 baremetal: Use arm64 image - -While at it, bump default vCPU number from 2 to 6. - -Signed-off-by: Alexandru Avadanii ---- - mcp/config/scenario/baremetal/defaults.yaml | 4 ++-- - mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml | 2 +- - 2 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/mcp/config/scenario/baremetal/defaults.yaml b/mcp/config/scenario/baremetal/defaults.yaml -index b841e88..17fbbfc 100644 ---- a/mcp/config/scenario/baremetal/defaults.yaml -+++ b/mcp/config/scenario/baremetal/defaults.yaml -@@ -1,6 +1,6 @@ --base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img -+base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-uefi1.img - virtual: - default: -- vcpus: 2 -+ vcpus: 6 - ram: 4096 - -diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml -index e3d47b0..93e2de0 100644 ---- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml -+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml -@@ -51,7 +51,7 @@ parameters: - address: ${_param:single_address} - netmask: 255.255.255.0 - -- salt_control_xenial_image: http://apt.mirantis.com/images/ubuntu-16-04-x64-latest.qcow2 -+ salt_control_xenial_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-uefi1.img - salt_api_password_hash: "$6$WV0P1shnoDh2gI/Z$22/Bcd7ffMv0jDlFpT63cAU4PiXHz9pjXwngToKwqAsgoeK4HNR3PiKaushjxp3JsQ8hNoJmAC6TxzVqfV8WH/" - linux: - network: diff --git a/patches/opnfv-fuel/0009-classes-baremetal-AArch64-virtio-NIC-names-sync.patch b/patches/opnfv-fuel/0009-classes-baremetal-AArch64-virtio-NIC-names-sync.patch deleted file mode 100644 index 045c22d0..00000000 --- a/patches/opnfv-fuel/0009-classes-baremetal-AArch64-virtio-NIC-names-sync.patch +++ /dev/null @@ -1,308 +0,0 @@ -From: Alexandru Avadanii -Date: Sun, 6 Aug 2017 20:42:47 +0200 -Subject: [PATCH] classes: baremetal: AArch64: virtio NIC names sync - -grep -e "ens[[:digit:]]" -R . -l | \ - xargs sed -i \ - -e 's/ens3/enp1s0/g' \ - -e 's/ens4/enp2s0/g' \ - -e 's/ens5/enp3s0/g' \ - -e 's/ens6/enp4s0/g' - -Since AArch64 will be using virtio-net-pci NIC model for guests, -predictable interface naming yields a slightly different scheme. - -Update all configuration to reflect this. - -NOTE: Above configuration is expected with libvirt 3.x, which puts -each NIC on a separate PCIe bus (which also imposes virtio modern -is used). - -Signed-off-by: Alexandru Avadanii ---- - .../baremetal-mcp-ocata-ovs-ha/infra/config.yml | 4 ++-- - .../baremetal-mcp-ocata-ovs-ha/infra/kvm.yml | 18 ++++++++++++------ - .../baremetal-mcp-ocata-ovs-ha/infra/maas.yml | 8 ++++---- - .../openstack/compute.yml | 21 ++++++++++----------- - .../openstack/control.yml | 6 +++--- - .../openstack/dashboard.yml | 4 ++-- - .../openstack/database.yml | 6 +++--- - .../openstack/message_queue.yml | 6 +++--- - .../baremetal-mcp-ocata-ovs-ha/openstack/proxy.yml | 6 +++--- - .../openstack/telemetry.yml | 6 +++--- - 10 files changed, 45 insertions(+), 40 deletions(-) - -diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/config.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/config.yml -index ce2c951..7d95ebc 100644 ---- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/config.yml -+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/config.yml -@@ -32,8 +32,8 @@ parameters: - linux: - network: - interface: -- ens3: ${_param:linux_dhcp_interface} -- ens4: ${_param:linux_single_interface} -+ enp1s0: ${_param:linux_dhcp_interface} -+ enp2s0: ${_param:linux_single_interface} - salt: - master: - accept_policy: open_mode -diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml -index 1608c65..8677a79 100644 ---- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml -+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml -@@ -25,7 +25,8 @@ parameters: - cluster_node03_address: ${_param:infra_kvm_node03_address} - keepalived_vip_interface: br-ctl - keepalived_vip_virtual_router_id: 69 -- deploy_nic: enp6s0 -+ deploy_nic: eth0 -+ trunk_nic: eth1 - salt: - control: - size: #RAM 4096,8192,16384,32768,65536 -@@ -115,7 +116,7 @@ parameters: - linux: - network: - interface: -- eth3: -+ eth0: - enabled: true - type: eth - proto: manual -@@ -123,6 +124,11 @@ parameters: - netmask: 255.255.255.0 - name: ${_param:deploy_nic} - noifupdown: true -+ eth1: -+ enabled: true -+ type: eth -+ proto: manual -+ name: ${_param:trunk_nic} - br-mgmt: - enabled: true - proto: dhcp -@@ -133,13 +139,13 @@ parameters: - use_interfaces: - - ${_param:deploy_nic} - noifupdown: true -- vlan300: -+ vlan2183: - enabled: true - proto: manual - type: vlan -- name: ${_param:deploy_nic}.300 -+ name: ${_param:trunk_nic}.2183 - use_interfaces: -- - ${_param:deploy_nic} -+ - ${_param:trunk_nic} - br-ctl: - enabled: true - type: bridge -@@ -147,4 +153,4 @@ parameters: - address: ${_param:single_address} - netmask: 255.255.255.0 - use_interfaces: -- - ${_param:deploy_nic}.300 -+ - ${_param:trunk_nic}.2183 -diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml -index d193469..73215ee 100644 ---- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml -+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml -@@ -4,10 +4,10 @@ classes: - - cluster.baremetal-mcp-ocata-ovs-ha.infra - parameters: - _param: -- dhcp_interface: ens3 -- primary_interface: ens4 -- pxe_interface: ens5 -- external_interface: ens6 -+ dhcp_interface: enp1s0 -+ primary_interface: enp2s0 -+ pxe_interface: enp3s0 -+ external_interface: enp4s0 - interface_mtu: 1500 - # MaaS has issues using MTU > 1500 for PXE interface - pxe_interface_mtu: 1500 -diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/compute.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/compute.yml -index 070ab78..e932f0d 100644 ---- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/compute.yml -+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/compute.yml -@@ -19,9 +19,8 @@ parameters: - cluster_node03_hostname: ctl03 - cluster_node03_address: ${_param:openstack_control_node03_address} - nova_vncproxy_url: https://${_param:cluster_public_host}:6080 -- mgmt_nic: enp6s0 -- tenant_nic: enp7s0 -- external_nic: enp8s0 -+ mgmt_nic: eth0 -+ tenant_nic: eth1 - linux_system_codename: xenial - interface_mtu: 1500 - keepalived_vip_interface: br-ctl -@@ -53,19 +52,19 @@ parameters: - netmask: 255.255.255.0 - mtu: 1500 - use_interfaces: -- - ${_param:tenant_nic}.302 -- vlan300: -+ - ${_param:tenant_nic}.2185 -+ vlan2183: - enabled: true - proto: manual - type: vlan -- name: ${_param:mgmt_nic}.300 -+ name: ${_param:tenant_nic}.2183 - use_interfaces: -- - ${_param:mgmt_nic} -- vlan302: -+ - ${_param:tenant_nic} -+ vlan2185: - enabled: true - proto: manual - type: vlan -- name: ${_param:tenant_nic}.302 -+ name: ${_param:tenant_nic}.2185 - use_interfaces: - - ${_param:tenant_nic} - br-ctl: -@@ -75,7 +74,7 @@ parameters: - address: ${_param:single_address} - netmask: 255.255.255.0 - use_interfaces: -- - ${_param:mgmt_nic}.300 -+ - ${_param:tenant_nic}.2183 - br-floating: - enabled: true - type: ovs_bridge -@@ -92,6 +91,6 @@ parameters: - address: ${_param:external_address} - netmask: 255.255.255.0 - use_interfaces: -- - ${_param:external_nic} -+ - ${_param:tenant_nic} - use_ovs_ports: - - float-to-ex -diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/control.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/control.yml -index 6f47f8a..5ba1411 100644 ---- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/control.yml -+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/control.yml -@@ -23,7 +23,7 @@ classes: - - cluster.baremetal-mcp-ocata-ovs-ha.infra - parameters: - _param: -- keepalived_vip_interface: ens3 -+ keepalived_vip_interface: enp2s0 - keepalived_vip_virtual_router_id: 50 - cluster_vip_address: ${_param:openstack_control_address} - cluster_local_address: ${_param:single_address} -@@ -37,8 +37,8 @@ parameters: - linux: - network: - interface: -- ens2: ${_param:linux_dhcp_interface} -- ens3: ${_param:linux_single_interface} -+ enp1s0: ${_param:linux_dhcp_interface} -+ enp2s0: ${_param:linux_single_interface} - bind: - server: - control: -diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/dashboard.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/dashboard.yml -index 39cc073..18f622c 100644 ---- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/dashboard.yml -+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/dashboard.yml -@@ -7,5 +7,5 @@ parameters: - linux: - network: - interface: -- ens2: ${_param:linux_dhcp_interface} -- ens3: ${_param:linux_single_interface} -+ enp1s0: ${_param:linux_dhcp_interface} -+ enp2s0: ${_param:linux_single_interface} -diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/database.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/database.yml -index dabda49..7a6934b 100644 ---- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/database.yml -+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/database.yml -@@ -16,7 +16,7 @@ classes: - - cluster.baremetal-mcp-ocata-ovs-ha - parameters: - _param: -- keepalived_vip_interface: ens3 -+ keepalived_vip_interface: enp2s0 - keepalived_vip_virtual_router_id: 80 - galera_server_cluster_name: openstack_cluster - cluster_vip_address: ${_param:openstack_database_address} -@@ -30,5 +30,5 @@ parameters: - linux: - network: - interface: -- ens2: ${_param:linux_dhcp_interface} -- ens3: ${_param:linux_single_interface} -+ enp1s0: ${_param:linux_dhcp_interface} -+ enp2s0: ${_param:linux_single_interface} -diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/message_queue.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/message_queue.yml -index 881644b..f510a7b 100644 ---- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/message_queue.yml -+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/message_queue.yml -@@ -7,7 +7,7 @@ classes: - - cluster.baremetal-mcp-ocata-ovs-ha - parameters: - _param: -- keepalived_vip_interface: ens3 -+ keepalived_vip_interface: enp2s0 - keepalived_vip_virtual_router_id: 90 - cluster_vip_address: ${_param:openstack_message_queue_address} - cluster_local_address: ${_param:single_address} -@@ -20,5 +20,5 @@ parameters: - linux: - network: - interface: -- ens2: ${_param:linux_dhcp_interface} -- ens3: ${_param:linux_single_interface} -+ enp1s0: ${_param:linux_dhcp_interface} -+ enp2s0: ${_param:linux_single_interface} -diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/proxy.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/proxy.yml -index 2147741..4e25b78 100644 ---- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/proxy.yml -+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/proxy.yml -@@ -15,7 +15,7 @@ classes: - # - cluster.baremetal-mcp-ocata-ovs-ha.stacklight.proxy - parameters: - _param: -- keepalived_vip_interface: ens3 -+ keepalived_vip_interface: enp2s0 - keepalived_vip_virtual_router_id: 240 - nginx_proxy_ssl: - enabled: true -@@ -27,8 +27,8 @@ parameters: - linux: - network: - interface: -- ens2: ${_param:linux_dhcp_interface} -- ens3: ${_param:linux_single_interface} -+ enp1s0: ${_param:linux_dhcp_interface} -+ enp2s0: ${_param:linux_single_interface} - system: - package: - libapache2-mod-wsgi: -diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/telemetry.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/telemetry.yml -index 505ee7f..6ad13ae 100644 ---- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/telemetry.yml -+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/telemetry.yml -@@ -10,7 +10,7 @@ classes: - - cluster.baremetal-mcp-ocata-ovs-ha.infra - parameters: - _param: -- keepalived_vip_interface: ens3 -+ keepalived_vip_interface: enp2s0 - keepalived_vip_virtual_router_id: 230 - cluster_vip_address: ${_param:openstack_telemetry_address} - cluster_local_address: ${_param:single_address} -@@ -23,5 +23,5 @@ parameters: - linux: - network: - interface: -- ens2: ${_param:linux_dhcp_interface} -- ens3: ${_param:linux_single_interface} -+ enp1s0: ${_param:linux_dhcp_interface} -+ enp2s0: ${_param:linux_single_interface} diff --git a/patches/opnfv-fuel/0009-maas-Add-curtin_userdata_arm64_generic_xenial.patch b/patches/opnfv-fuel/0009-maas-Add-curtin_userdata_arm64_generic_xenial.patch new file mode 100644 index 00000000..aa521a1f --- /dev/null +++ b/patches/opnfv-fuel/0009-maas-Add-curtin_userdata_arm64_generic_xenial.patch @@ -0,0 +1,104 @@ +From: Alexandru Avadanii +Date: Mon, 7 Aug 2017 19:45:01 +0200 +Subject: [PATCH] maas: Add curtin_userdata_arm64_generic_xenial + +Add AArch64 specific configuration. + +Signed-off-by: Alexandru Avadanii +--- + ...-Add-curtin_userdata_arm64_generic_xenial.patch | 76 ++++++++++++++++++++++ + mcp/patches/patches.list | 1 + + 2 files changed, 77 insertions(+) + create mode 100644 mcp/patches/0101-maas-Add-curtin_userdata_arm64_generic_xenial.patch + +diff --git a/mcp/patches/0101-maas-Add-curtin_userdata_arm64_generic_xenial.patch b/mcp/patches/0101-maas-Add-curtin_userdata_arm64_generic_xenial.patch +new file mode 100644 +index 0000000..b969e07 +--- /dev/null ++++ b/mcp/patches/0101-maas-Add-curtin_userdata_arm64_generic_xenial.patch +@@ -0,0 +1,76 @@ ++From: Alexandru Avadanii ++Date: Sat, 5 Aug 2017 02:03:01 +0200 ++Subject: [PATCH] maas: Add curtin_userdata_arm64_generic_xenial ++ ++Based on curtin_userdata_amd64_generic_xenial, add new arm64 ++specific configuration file: ++- curtin_userdata_arm64_generic_xenial ++ ++Signed-off-by: Alexandru Avadanii ++--- ++ ++diff --git a/maas/region.sls b/maas/region.sls ++--- a/maas/region.sls +++++ b/maas/region.sls ++@@ -70,6 +70,18 @@ ++ - require: ++ - pkg: maas_region_packages ++ +++/etc/maas/preseeds/curtin_userdata_arm64_generic_xenial: +++ file.managed: +++ - source: salt://maas/files/curtin_userdata_arm64_generic_xenial +++ - template: jinja +++ - user: root +++ - group: root +++ - mode: 644 +++ - context: +++ salt_master_ip: {{ region.salt_master_ip }} +++ - require: +++ - pkg: maas_region_packages +++ ++ maas_region_services: ++ service.running: ++ - enable: true ++diff --git a/maas/files/curtin_userdata_arm64_generic_xenial b/maas/files/curtin_userdata_arm64_generic_xenial ++new file mode 100644 ++--- /dev/null +++++ b/maas/files/curtin_userdata_arm64_generic_xenial ++@@ -0,0 +1,38 @@ +++{%- from "maas/map.jinja" import server with context %} +++{% raw %} +++#cloud-config +++debconf_selections: +++ maas: | +++ {{for line in str(curtin_preseed).splitlines()}} +++ {{line}} +++ {{endfor}} +++{{if third_party_drivers and driver}} +++early_commands: +++ {{py: key_string = ''.join(['\\x%x' % x for x in map(ord, driver['key_binary'])])}} +++ driver_00_get_key: /bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg +++ driver_01_add_key: ["apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"] +++ driver_02_add: ["add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"] +++ driver_03_update_install: ["sh", "-c", "apt-get update --quiet && apt-get --assume-yes install {{driver['package']}}"] +++ driver_04_load: ["sh", "-c", "depmod && modprobe {{driver['module']}}"] +++{{endif}} +++late_commands: +++ maas: [wget, '--no-proxy', {{node_disable_pxe_url|escape.json}}, '--post-data', {{node_disable_pxe_data|escape.json}}, '-O', '/dev/null'] +++ apt_00_set_repo: ["curtin", "in-target", "--", "sh", "-c", "echo 'deb [arch=arm64] http://linux.enea.com/saltstack/apt/ubuntu/16.04/arm64/latest xenial main' >> /etc/apt/sources.list"] +++ apt_01_set_gpg: ["curtin", "in-target", "--", "sh", "-c", "echo 'LS0tLS1CRUdJTiBQR1AgUFVCTElDIEtFWSBCTE9DSy0tLS0tClZlcnNpb246IEdudVBHIHYxCgptUUVOQkZhZ0Fyb0JDQURXYm9OSWp1RjZsQjFtV3YyK0VidnFZM2xLbDVtTEtocjJEblNVa0tlSFVQQnY4Z05NCnFLOFEwMEFNSXlQaXlFaGdqQStkV2l6Wis1YUJneG9pWTdvTWVMSjJYeW0zNlUvOFNZcTJCV2QzU0dDYk1Ob3oKU0pEeERVU00vSEZWczZhdEYxTTNEWTlvTjY1aFNWbnU0dXk1VHU2YXNmNms0cmhBeWswejQrcFJjUEJDdTJ2cQptbkdpM0NPTS8rOVBTaHJFS2VWT3g1VzJ2Ukp5d1VGdXE4RUR2UW5Sb0owR3ZNMjhKaUpJYW53MTdZd0lQeGhnCkJLWlZwWmphbjVYK2loVk1Yd0EyaC9HL0ZTNU9taGQ1MFJxVjZMV1NZczk0VkpKZ1lxSHg4VU1tN2l6Y3hJK1AKY3QzSWNiRDE5NWJQYkorU2J1aUZlNDVaTHNkWTFNeUdpVTJCQUJFQkFBRzBLMFZ1WldFZ1FYSnRZbUZ1WkNCRQpaWFp2Y0hNZ1ZHVmhiU0E4WVhKdFltRnVaRUJsYm1WaExtTnZiVDZKQVQ0RUV3RUNBQ2dGQWxhZ0Fyb0NHd01GCkNRUENad0FHQ3drSUJ3TUNCaFVJQWdrS0N3UVdBZ01CQWg0QkFoZUFBQW9KRU42cmtMcDVpckhSc0c4SC8yUDIKaE82akZIUFJEMXU4ajl1Zk85QXhvSW5hRUc5R2VSanV2TWMxdENVem92WHZUczEwNlRaMGFzNk1iQUoxUzZ1NQpFOFV5UTErVkdlcHZkSUpQWHgzUEN4TG1VdDZXSXFtZE9kcUZyeEljbG9oaWcva2FyZGkzTmZYMU1CdmxFVi9jCjdRNTFINDNocmxNcU1wcWdZMUJtKzUzUExDNHVqamxESkJ0Sk9FVTlka2kzMTliVW1uZytnTzlCQWxqREpGWHYKSnFZNitQL2VyN0lnQ2JSWFVYWWZ5Snpnang5SndsS2JtZFE4UW5DclpqRi9WSElXNDAvbXU1SUlKdUZ2dUN0aQp0Y29nK1NOU2xEbUNPUyt3RThDQ29qZXVKcXVwSU9jejl6eXBWR3pldXMvTjVRNUVFRmU3R1lXWVMvNU5LVWtFCjFUdXVGZVpLdTVOSkM1cmt3SVU9Cj03cDYwCi0tLS0tRU5EIFBHUCBQVUJMSUMgS0VZIEJMT0NLLS0tLS0K'|base64 -d|apt-key add -"] +++ apt_03_update: ["curtin", "in-target", "--", "apt-get", "update"] +++ salt_01_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "salt-minion"] +++{% endraw %} +++ salt_02_hostname_set: ["curtin", "in-target", "--", "echo", "{% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}"] +++ salt_03_hostname_get: ["curtin", "in-target", "--", "sh", "-c", "echo 'id: {% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}' >> /etc/salt/minion"] +++ salt_04_master: ["curtin", "in-target", "--", "sh", "-c", "echo 'master: {{ salt_master_ip }}' >> /etc/salt/minion"] +++{% raw %} +++{{if third_party_drivers and driver}} +++ driver_00_key_get: curtin in-target -- sh -c "/bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg" +++ driver_02_key_add: ["curtin", "in-target", "--", "apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"] +++ driver_03_add: ["curtin", "in-target", "--", "add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"] +++ driver_04_update_install: ["curtin", "in-target", "--", "apt-get", "update", "--quiet"] +++ driver_05_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "{{driver['package']}}"] +++ driver_06_depmod: ["curtin", "in-target", "--", "depmod"] +++ driver_07_update_initramfs: ["curtin", "in-target", "--", "update-initramfs", "-u"] +++{{endif}} +++{% endraw %} +diff --git a/mcp/patches/patches.list b/mcp/patches/patches.list +index 1a651cf..c66ecb0 100644 +--- a/mcp/patches/patches.list ++++ b/mcp/patches/patches.list +@@ -5,3 +5,4 @@ + /usr/share/salt-formulas/env: 0005-maas-module-Obtain-fabric-ID-from-CIDR.patch + /usr/share/salt-formulas/env: 0006-maas-module-Add-VLAN-DHCP-enable-support.patch + /usr/share/salt-formulas/env: 0007-linux.network.interface-noifupdown-support.patch ++/usr/share/salt-formulas/env: 0101-maas-Add-curtin_userdata_arm64_generic_xenial.patch diff --git a/patches/opnfv-fuel/0010-maas-Add-curtin_userdata_arm64_generic_xenial.patch b/patches/opnfv-fuel/0010-maas-Add-curtin_userdata_arm64_generic_xenial.patch deleted file mode 100644 index aa521a1f..00000000 --- a/patches/opnfv-fuel/0010-maas-Add-curtin_userdata_arm64_generic_xenial.patch +++ /dev/null @@ -1,104 +0,0 @@ -From: Alexandru Avadanii -Date: Mon, 7 Aug 2017 19:45:01 +0200 -Subject: [PATCH] maas: Add curtin_userdata_arm64_generic_xenial - -Add AArch64 specific configuration. - -Signed-off-by: Alexandru Avadanii ---- - ...-Add-curtin_userdata_arm64_generic_xenial.patch | 76 ++++++++++++++++++++++ - mcp/patches/patches.list | 1 + - 2 files changed, 77 insertions(+) - create mode 100644 mcp/patches/0101-maas-Add-curtin_userdata_arm64_generic_xenial.patch - -diff --git a/mcp/patches/0101-maas-Add-curtin_userdata_arm64_generic_xenial.patch b/mcp/patches/0101-maas-Add-curtin_userdata_arm64_generic_xenial.patch -new file mode 100644 -index 0000000..b969e07 ---- /dev/null -+++ b/mcp/patches/0101-maas-Add-curtin_userdata_arm64_generic_xenial.patch -@@ -0,0 +1,76 @@ -+From: Alexandru Avadanii -+Date: Sat, 5 Aug 2017 02:03:01 +0200 -+Subject: [PATCH] maas: Add curtin_userdata_arm64_generic_xenial -+ -+Based on curtin_userdata_amd64_generic_xenial, add new arm64 -+specific configuration file: -+- curtin_userdata_arm64_generic_xenial -+ -+Signed-off-by: Alexandru Avadanii -+--- -+ -+diff --git a/maas/region.sls b/maas/region.sls -+--- a/maas/region.sls -++++ b/maas/region.sls -+@@ -70,6 +70,18 @@ -+ - require: -+ - pkg: maas_region_packages -+ -++/etc/maas/preseeds/curtin_userdata_arm64_generic_xenial: -++ file.managed: -++ - source: salt://maas/files/curtin_userdata_arm64_generic_xenial -++ - template: jinja -++ - user: root -++ - group: root -++ - mode: 644 -++ - context: -++ salt_master_ip: {{ region.salt_master_ip }} -++ - require: -++ - pkg: maas_region_packages -++ -+ maas_region_services: -+ service.running: -+ - enable: true -+diff --git a/maas/files/curtin_userdata_arm64_generic_xenial b/maas/files/curtin_userdata_arm64_generic_xenial -+new file mode 100644 -+--- /dev/null -++++ b/maas/files/curtin_userdata_arm64_generic_xenial -+@@ -0,0 +1,38 @@ -++{%- from "maas/map.jinja" import server with context %} -++{% raw %} -++#cloud-config -++debconf_selections: -++ maas: | -++ {{for line in str(curtin_preseed).splitlines()}} -++ {{line}} -++ {{endfor}} -++{{if third_party_drivers and driver}} -++early_commands: -++ {{py: key_string = ''.join(['\\x%x' % x for x in map(ord, driver['key_binary'])])}} -++ driver_00_get_key: /bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg -++ driver_01_add_key: ["apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"] -++ driver_02_add: ["add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"] -++ driver_03_update_install: ["sh", "-c", "apt-get update --quiet && apt-get --assume-yes install {{driver['package']}}"] -++ driver_04_load: ["sh", "-c", "depmod && modprobe {{driver['module']}}"] -++{{endif}} -++late_commands: -++ maas: [wget, '--no-proxy', {{node_disable_pxe_url|escape.json}}, '--post-data', {{node_disable_pxe_data|escape.json}}, '-O', '/dev/null'] -++ apt_00_set_repo: ["curtin", "in-target", "--", "sh", "-c", "echo 'deb [arch=arm64] http://linux.enea.com/saltstack/apt/ubuntu/16.04/arm64/latest xenial main' >> /etc/apt/sources.list"] -++ apt_01_set_gpg: ["curtin", "in-target", "--", "sh", "-c", "echo 'LS0tLS1CRUdJTiBQR1AgUFVCTElDIEtFWSBCTE9DSy0tLS0tClZlcnNpb246IEdudVBHIHYxCgptUUVOQkZhZ0Fyb0JDQURXYm9OSWp1RjZsQjFtV3YyK0VidnFZM2xLbDVtTEtocjJEblNVa0tlSFVQQnY4Z05NCnFLOFEwMEFNSXlQaXlFaGdqQStkV2l6Wis1YUJneG9pWTdvTWVMSjJYeW0zNlUvOFNZcTJCV2QzU0dDYk1Ob3oKU0pEeERVU00vSEZWczZhdEYxTTNEWTlvTjY1aFNWbnU0dXk1VHU2YXNmNms0cmhBeWswejQrcFJjUEJDdTJ2cQptbkdpM0NPTS8rOVBTaHJFS2VWT3g1VzJ2Ukp5d1VGdXE4RUR2UW5Sb0owR3ZNMjhKaUpJYW53MTdZd0lQeGhnCkJLWlZwWmphbjVYK2loVk1Yd0EyaC9HL0ZTNU9taGQ1MFJxVjZMV1NZczk0VkpKZ1lxSHg4VU1tN2l6Y3hJK1AKY3QzSWNiRDE5NWJQYkorU2J1aUZlNDVaTHNkWTFNeUdpVTJCQUJFQkFBRzBLMFZ1WldFZ1FYSnRZbUZ1WkNCRQpaWFp2Y0hNZ1ZHVmhiU0E4WVhKdFltRnVaRUJsYm1WaExtTnZiVDZKQVQ0RUV3RUNBQ2dGQWxhZ0Fyb0NHd01GCkNRUENad0FHQ3drSUJ3TUNCaFVJQWdrS0N3UVdBZ01CQWg0QkFoZUFBQW9KRU42cmtMcDVpckhSc0c4SC8yUDIKaE82akZIUFJEMXU4ajl1Zk85QXhvSW5hRUc5R2VSanV2TWMxdENVem92WHZUczEwNlRaMGFzNk1iQUoxUzZ1NQpFOFV5UTErVkdlcHZkSUpQWHgzUEN4TG1VdDZXSXFtZE9kcUZyeEljbG9oaWcva2FyZGkzTmZYMU1CdmxFVi9jCjdRNTFINDNocmxNcU1wcWdZMUJtKzUzUExDNHVqamxESkJ0Sk9FVTlka2kzMTliVW1uZytnTzlCQWxqREpGWHYKSnFZNitQL2VyN0lnQ2JSWFVYWWZ5Snpnang5SndsS2JtZFE4UW5DclpqRi9WSElXNDAvbXU1SUlKdUZ2dUN0aQp0Y29nK1NOU2xEbUNPUyt3RThDQ29qZXVKcXVwSU9jejl6eXBWR3pldXMvTjVRNUVFRmU3R1lXWVMvNU5LVWtFCjFUdXVGZVpLdTVOSkM1cmt3SVU9Cj03cDYwCi0tLS0tRU5EIFBHUCBQVUJMSUMgS0VZIEJMT0NLLS0tLS0K'|base64 -d|apt-key add -"] -++ apt_03_update: ["curtin", "in-target", "--", "apt-get", "update"] -++ salt_01_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "salt-minion"] -++{% endraw %} -++ salt_02_hostname_set: ["curtin", "in-target", "--", "echo", "{% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}"] -++ salt_03_hostname_get: ["curtin", "in-target", "--", "sh", "-c", "echo 'id: {% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}' >> /etc/salt/minion"] -++ salt_04_master: ["curtin", "in-target", "--", "sh", "-c", "echo 'master: {{ salt_master_ip }}' >> /etc/salt/minion"] -++{% raw %} -++{{if third_party_drivers and driver}} -++ driver_00_key_get: curtin in-target -- sh -c "/bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg" -++ driver_02_key_add: ["curtin", "in-target", "--", "apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"] -++ driver_03_add: ["curtin", "in-target", "--", "add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"] -++ driver_04_update_install: ["curtin", "in-target", "--", "apt-get", "update", "--quiet"] -++ driver_05_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "{{driver['package']}}"] -++ driver_06_depmod: ["curtin", "in-target", "--", "depmod"] -++ driver_07_update_initramfs: ["curtin", "in-target", "--", "update-initramfs", "-u"] -++{{endif}} -++{% endraw %} -diff --git a/mcp/patches/patches.list b/mcp/patches/patches.list -index 1a651cf..c66ecb0 100644 ---- a/mcp/patches/patches.list -+++ b/mcp/patches/patches.list -@@ -5,3 +5,4 @@ - /usr/share/salt-formulas/env: 0005-maas-module-Obtain-fabric-ID-from-CIDR.patch - /usr/share/salt-formulas/env: 0006-maas-module-Add-VLAN-DHCP-enable-support.patch - /usr/share/salt-formulas/env: 0007-linux.network.interface-noifupdown-support.patch -+/usr/share/salt-formulas/env: 0101-maas-Add-curtin_userdata_arm64_generic_xenial.patch diff --git a/patches/opnfv-fuel/0010-network-public-mgmt-Use-arm-pod3-POD-config.patch b/patches/opnfv-fuel/0010-network-public-mgmt-Use-arm-pod3-POD-config.patch new file mode 100644 index 00000000..beb90533 --- /dev/null +++ b/patches/opnfv-fuel/0010-network-public-mgmt-Use-arm-pod3-POD-config.patch @@ -0,0 +1,165 @@ +From: Alexandru Avadanii +Date: Tue, 8 Aug 2017 03:49:53 +0200 +Subject: [PATCH] network: public, mgmt: Use arm-pod3 POD config + +Move MaaS IPs to x.y.z.2 (upstream Fuel uses x.y.z.3). + +Signed-off-by: Alexandru Avadanii +--- + ci/deploy.sh | 2 +- + mcp/config/states/maas | 2 +- + .../baremetal-mcp-ocata-ovs-ha/infra/init.yml | 22 ++++---- + .../baremetal-mcp-ocata-ovs-ha/openstack/init.yml | 60 +++++++++++----------- + 4 files changed, 43 insertions(+), 43 deletions(-) + +diff --git a/ci/deploy.sh b/ci/deploy.sh +index cf7b3b3..2b4e724 100755 +--- a/ci/deploy.sh ++++ b/ci/deploy.sh +@@ -139,7 +139,7 @@ URI_REGEXP='(file|https?|ftp)://.*' + + export SSH_KEY=${SSH_KEY:-mcp.rsa} + export SALT_MASTER=${SALT_MASTER_IP:-192.168.10.100} +-export MAAS_IP=${MAAS_IP:-192.168.10.3} ++export MAAS_IP=${MAAS_IP:-192.168.10.2} + export SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ${SSH_KEY}" + + # Variables below are disabled for now, to be re-introduced or removed later +diff --git a/mcp/config/states/maas b/mcp/config/states/maas +index 85cee8c..46bad6f 100755 +--- a/mcp/config/states/maas ++++ b/mcp/config/states/maas +@@ -20,7 +20,7 @@ salt -C 'mas01*' state.apply linux.network.interface + salt -C 'mas01*' state.apply maas.pxe_nat + salt -C 'mas01*' state.apply maas.cluster + salt -C 'cfg01*' cmd.run \ +- "route add -net 192.168.11.0/24 gw ${MAAS_IP:-192.168.10.3}" ++ "route add -net 192.168.11.0/24 gw ${MAAS_IP:-192.168.10.2}" + + wait_for "salt -C 'mas01*' state.apply maas.region" + +diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml +index 93e2de0..1d75356 100644 +--- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml ++++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml +@@ -16,19 +16,19 @@ parameters: + infra_maas_database_password: opnfv_secret + + # infra service addresses +- infra_config_address: 10.167.4.100 ++ infra_config_address: 172.16.10.100 + infra_config_deploy_address: 192.168.10.100 +- infra_maas_node01_address: 10.167.4.3 +- infra_maas_node01_deploy_address: 192.168.11.3 +- infra_maas_node01_external_address: 10.16.0.3 +- infra_compute_node01_address: 10.167.4.141 +- infra_compute_node02_address: 10.167.4.142 +- infra_compute_node03_address: 10.167.4.143 ++ infra_maas_node01_address: 172.16.10.2 ++ infra_maas_node01_deploy_address: 192.168.11.2 ++ infra_maas_node01_external_address: 10.0.8.2 ++ infra_compute_node01_address: 172.16.10.141 ++ infra_compute_node02_address: 172.16.10.142 ++ infra_compute_node03_address: 172.16.10.143 + +- infra_kvm_address: 10.167.4.140 +- infra_kvm_node01_address: 10.167.4.141 +- infra_kvm_node02_address: 10.167.4.142 +- infra_kvm_node03_address: 10.167.4.143 ++ infra_kvm_address: 172.16.10.140 ++ infra_kvm_node01_address: 172.16.10.141 ++ infra_kvm_node02_address: 172.16.10.142 ++ infra_kvm_node03_address: 172.16.10.143 + + infra_maas_node01_hostname: mas01 + infra_kvm_node01_hostname: kvm01 +diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/init.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/init.yml +index 9c2bc7b..90e0cf3 100644 +--- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/init.yml ++++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/init.yml +@@ -3,9 +3,9 @@ parameters: + + openstack_version: ocata + +- openstack_gateway_node01_address: 10.167.4.124 +- openstack_gateway_node02_address: 10.167.4.125 +- openstack_gateway_node03_address: 10.167.4.126 ++ openstack_gateway_node01_address: 172.16.10.124 ++ openstack_gateway_node02_address: 172.16.10.125 ++ openstack_gateway_node03_address: 172.16.10.126 + openstack_gateway_node01_tenant_address: 10.1.0.6 + openstack_gateway_node02_tenant_address: 10.1.0.7 + openstack_gateway_node03_tenant_address: 10.1.0.9 +@@ -14,21 +14,21 @@ parameters: + openstack_gateway_node03_hostname: gtw03 + + # openstack service addresses +- openstack_proxy_address: 10.167.4.80 +- openstack_proxy_node01_address: 10.167.4.81 +- openstack_proxy_node02_address: 10.167.4.82 +- openstack_control_address: 10.167.4.10 +- openstack_control_node01_address: 10.167.4.11 +- openstack_control_node02_address: 10.167.4.12 +- openstack_control_node03_address: 10.167.4.13 +- openstack_database_address: 10.167.4.50 +- openstack_database_node01_address: 10.167.4.51 +- openstack_database_node02_address: 10.167.4.52 +- openstack_database_node03_address: 10.167.4.53 +- openstack_message_queue_address: 10.167.4.40 +- openstack_message_queue_node01_address: 10.167.4.41 +- openstack_message_queue_node02_address: 10.167.4.42 +- openstack_message_queue_node03_address: 10.167.4.43 ++ openstack_proxy_address: 172.16.10.80 ++ openstack_proxy_node01_address: 172.16.10.81 ++ openstack_proxy_node02_address: 172.16.10.82 ++ openstack_control_address: 172.16.10.10 ++ openstack_control_node01_address: 172.16.10.11 ++ openstack_control_node02_address: 172.16.10.12 ++ openstack_control_node03_address: 172.16.10.13 ++ openstack_database_address: 172.16.10.50 ++ openstack_database_node01_address: 172.16.10.51 ++ openstack_database_node02_address: 172.16.10.52 ++ openstack_database_node03_address: 172.16.10.53 ++ openstack_message_queue_address: 172.16.10.40 ++ openstack_message_queue_node01_address: 172.16.10.41 ++ openstack_message_queue_node02_address: 172.16.10.42 ++ openstack_message_queue_node03_address: 172.16.10.43 + + + openstack_telemetry_hostname: mdb +@@ -36,23 +36,23 @@ parameters: + openstack_telemetry_node02_hostname: mdb02 + openstack_telemetry_node03_hostname: mdb03 + +- openstack_telemetry_address: 10.167.4.75 +- openstack_telemetry_node01_address: 10.167.4.76 +- openstack_telemetry_node02_address: 10.167.4.77 +- openstack_telemetry_node03_address: 10.167.4.78 ++ openstack_telemetry_address: 172.16.10.75 ++ openstack_telemetry_node01_address: 172.16.10.76 ++ openstack_telemetry_node02_address: 172.16.10.77 ++ openstack_telemetry_node03_address: 172.16.10.78 + + # OpenStack Compute +- openstack_compute_node01_single_address: 10.167.4.101 +- openstack_compute_node02_single_address: 10.167.4.102 +- openstack_compute_node03_single_address: 10.167.4.103 +- openstack_compute_node01_control_address: 10.167.4.101 +- openstack_compute_node02_control_address: 10.167.4.102 +- openstack_compute_node03_control_address: 10.167.4.103 ++ openstack_compute_node01_single_address: 172.16.10.101 ++ openstack_compute_node02_single_address: 172.16.10.102 ++ openstack_compute_node03_single_address: 172.16.10.103 ++ openstack_compute_node01_control_address: 172.16.10.101 ++ openstack_compute_node02_control_address: 172.16.10.102 ++ openstack_compute_node03_control_address: 172.16.10.103 + openstack_compute_node01_tenant_address: 10.1.0.101 + openstack_compute_node02_tenant_address: 10.1.0.102 + openstack_compute_node03_tenant_address: 10.1.0.103 +- openstack_compute_node01_external_address: 172.30.10.2 +- openstack_compute_node02_external_address: 172.30.10.3 ++ openstack_compute_node01_external_address: 10.0.8.101 ++ openstack_compute_node02_external_address: 10.0.8.102 + + # openstack service hostnames + openstack_proxy_hostname: prx diff --git a/patches/opnfv-fuel/0011-classes-baremetal-arm-pod3-config-hardcode.patch b/patches/opnfv-fuel/0011-classes-baremetal-arm-pod3-config-hardcode.patch new file mode 100644 index 00000000..229ea3a0 --- /dev/null +++ b/patches/opnfv-fuel/0011-classes-baremetal-arm-pod3-config-hardcode.patch @@ -0,0 +1,95 @@ +From: Alexandru Avadanii +Date: Thu, 10 Aug 2017 18:32:37 +0200 +Subject: [PATCH] classes: baremetal: arm-pod3 config hardcode + +Populate MaaS configuration with node information for arm-pod3. + +FIXME: Read all this info from PDF (Pod Descriptor File) later. + +Signed-off-by: Alexandru Avadanii +--- + .../baremetal-mcp-ocata-ovs-ha/infra/maas.yml | 40 +++++++++++----------- + 1 file changed, 20 insertions(+), 20 deletions(-) + +diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml +index 73215ee..26115fe 100644 +--- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml ++++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml +@@ -48,57 +48,57 @@ parameters: + machines: + kvm01: + interface: +- mac: "00:25:b5:a0:00:2a" ++ mac: "e0:ff:f7:00:08:ae" + power_parameters: +- power_address: "172.30.8.75" +- power_password: "octopus" ++ power_address: "10.0.8.3" ++ power_password: "admin" + power_type: ipmi + power_user: "admin" +- architecture: 'amd64/generic' ++ architecture: 'arm64/generic' + distro_series: xenial + hwe_kernel: hwe-16.04 + kvm02: + interface: +- mac: "00:25:b5:a0:00:3a" ++ mac: "e0:ff:f7:00:08:b1" + power_parameters: +- power_address: "172.30.8.65" +- power_password: "octopus" ++ power_address: "10.0.8.4" ++ power_password: "admin" + power_type: ipmi + power_user: "admin" +- architecture: 'amd64/generic' ++ architecture: 'arm64/generic' + distro_series: xenial + hwe_kernel: hwe-16.04 + kvm03: + interface: +- mac: "00:25:b5:a0:00:4a" ++ mac: "e0:ff:f7:00:08:bd" + power_parameters: +- power_address: "172.30.8.74" +- power_password: "octopus" ++ power_address: "10.0.8.5" ++ power_password: "admin" + power_type: ipmi + power_user: "admin" +- architecture: 'amd64/generic' ++ architecture: 'arm64/generic' + distro_series: xenial + hwe_kernel: hwe-16.04 + cmp001: + interface: +- mac: "00:25:b5:a0:00:5a" ++ mac: "e0:ff:f7:00:08:c6" + power_parameters: +- power_address: "172.30.8.73" +- power_password: "octopus" ++ power_address: "10.0.8.6" ++ power_password: "admin" + power_type: ipmi + power_user: "admin" +- architecture: 'amd64/generic' ++ architecture: 'arm64/generic' + distro_series: xenial + hwe_kernel: hwe-16.04 + cmp002: + interface: +- mac: "00:25:b5:a0:00:6a" ++ mac: "e0:ff:f7:00:08:cf" + power_parameters: +- power_address: "172.30.8.72" +- power_password: "octopus" ++ power_address: "10.0.8.7" ++ power_password: "admin" + power_type: ipmi + power_user: "admin" +- architecture: 'amd64/generic' ++ architecture: 'arm64/generic' + distro_series: xenial + hwe_kernel: hwe-16.04 + linux: diff --git a/patches/opnfv-fuel/0011-network-public-mgmt-Use-arm-pod3-POD-config.patch b/patches/opnfv-fuel/0011-network-public-mgmt-Use-arm-pod3-POD-config.patch deleted file mode 100644 index 95e39265..00000000 --- a/patches/opnfv-fuel/0011-network-public-mgmt-Use-arm-pod3-POD-config.patch +++ /dev/null @@ -1,165 +0,0 @@ -From: Alexandru Avadanii -Date: Tue, 8 Aug 2017 03:49:53 +0200 -Subject: [PATCH] network: public, mgmt: Use arm-pod3 POD config - -Move MaaS IPs to x.y.z.2 (upstream Fuel uses x.y.z.3). - -Signed-off-by: Alexandru Avadanii ---- - ci/deploy.sh | 2 +- - mcp/config/states/maas | 2 +- - .../baremetal-mcp-ocata-ovs-ha/infra/init.yml | 22 ++++---- - .../baremetal-mcp-ocata-ovs-ha/openstack/init.yml | 60 +++++++++++----------- - 4 files changed, 43 insertions(+), 43 deletions(-) - -diff --git a/ci/deploy.sh b/ci/deploy.sh -index cf7b3b3..2b4e724 100755 ---- a/ci/deploy.sh -+++ b/ci/deploy.sh -@@ -139,7 +139,7 @@ URI_REGEXP='(file|https?|ftp)://.*' - - export SSH_KEY=${SSH_KEY:-mcp.rsa} - export SALT_MASTER=${SALT_MASTER_IP:-192.168.10.100} --export MAAS_IP=${MAAS_IP:-192.168.10.3} -+export MAAS_IP=${MAAS_IP:-192.168.10.2} - export SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ${SSH_KEY}" - - # Variables below are disabled for now, to be re-introduced or removed later -diff --git a/mcp/config/states/maas b/mcp/config/states/maas -index 9a27a39..f83f4a7 100755 ---- a/mcp/config/states/maas -+++ b/mcp/config/states/maas -@@ -20,7 +20,7 @@ salt -C 'mas01*' state.apply linux.network.interface - salt -C 'mas01*' state.apply maas.pxe_nat - salt -C 'mas01*' state.apply maas.cluster - salt -C 'cfg01*' cmd.run \ -- "route add -net 192.168.11.0/24 gw ${MAAS_IP:-192.168.10.3}" -+ "route add -net 192.168.11.0/24 gw ${MAAS_IP:-192.168.10.2}" - - wait_for "salt -C 'mas01*' state.apply maas.region" - -diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml -index 93e2de0..1d75356 100644 ---- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml -+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml -@@ -16,19 +16,19 @@ parameters: - infra_maas_database_password: opnfv_secret - - # infra service addresses -- infra_config_address: 10.167.4.100 -+ infra_config_address: 172.16.10.100 - infra_config_deploy_address: 192.168.10.100 -- infra_maas_node01_address: 10.167.4.3 -- infra_maas_node01_deploy_address: 192.168.11.3 -- infra_maas_node01_external_address: 10.16.0.3 -- infra_compute_node01_address: 10.167.4.141 -- infra_compute_node02_address: 10.167.4.142 -- infra_compute_node03_address: 10.167.4.143 -+ infra_maas_node01_address: 172.16.10.2 -+ infra_maas_node01_deploy_address: 192.168.11.2 -+ infra_maas_node01_external_address: 10.0.8.2 -+ infra_compute_node01_address: 172.16.10.141 -+ infra_compute_node02_address: 172.16.10.142 -+ infra_compute_node03_address: 172.16.10.143 - -- infra_kvm_address: 10.167.4.140 -- infra_kvm_node01_address: 10.167.4.141 -- infra_kvm_node02_address: 10.167.4.142 -- infra_kvm_node03_address: 10.167.4.143 -+ infra_kvm_address: 172.16.10.140 -+ infra_kvm_node01_address: 172.16.10.141 -+ infra_kvm_node02_address: 172.16.10.142 -+ infra_kvm_node03_address: 172.16.10.143 - - infra_maas_node01_hostname: mas01 - infra_kvm_node01_hostname: kvm01 -diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/init.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/init.yml -index 9c2bc7b..90e0cf3 100644 ---- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/init.yml -+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/init.yml -@@ -3,9 +3,9 @@ parameters: - - openstack_version: ocata - -- openstack_gateway_node01_address: 10.167.4.124 -- openstack_gateway_node02_address: 10.167.4.125 -- openstack_gateway_node03_address: 10.167.4.126 -+ openstack_gateway_node01_address: 172.16.10.124 -+ openstack_gateway_node02_address: 172.16.10.125 -+ openstack_gateway_node03_address: 172.16.10.126 - openstack_gateway_node01_tenant_address: 10.1.0.6 - openstack_gateway_node02_tenant_address: 10.1.0.7 - openstack_gateway_node03_tenant_address: 10.1.0.9 -@@ -14,21 +14,21 @@ parameters: - openstack_gateway_node03_hostname: gtw03 - - # openstack service addresses -- openstack_proxy_address: 10.167.4.80 -- openstack_proxy_node01_address: 10.167.4.81 -- openstack_proxy_node02_address: 10.167.4.82 -- openstack_control_address: 10.167.4.10 -- openstack_control_node01_address: 10.167.4.11 -- openstack_control_node02_address: 10.167.4.12 -- openstack_control_node03_address: 10.167.4.13 -- openstack_database_address: 10.167.4.50 -- openstack_database_node01_address: 10.167.4.51 -- openstack_database_node02_address: 10.167.4.52 -- openstack_database_node03_address: 10.167.4.53 -- openstack_message_queue_address: 10.167.4.40 -- openstack_message_queue_node01_address: 10.167.4.41 -- openstack_message_queue_node02_address: 10.167.4.42 -- openstack_message_queue_node03_address: 10.167.4.43 -+ openstack_proxy_address: 172.16.10.80 -+ openstack_proxy_node01_address: 172.16.10.81 -+ openstack_proxy_node02_address: 172.16.10.82 -+ openstack_control_address: 172.16.10.10 -+ openstack_control_node01_address: 172.16.10.11 -+ openstack_control_node02_address: 172.16.10.12 -+ openstack_control_node03_address: 172.16.10.13 -+ openstack_database_address: 172.16.10.50 -+ openstack_database_node01_address: 172.16.10.51 -+ openstack_database_node02_address: 172.16.10.52 -+ openstack_database_node03_address: 172.16.10.53 -+ openstack_message_queue_address: 172.16.10.40 -+ openstack_message_queue_node01_address: 172.16.10.41 -+ openstack_message_queue_node02_address: 172.16.10.42 -+ openstack_message_queue_node03_address: 172.16.10.43 - - - openstack_telemetry_hostname: mdb -@@ -36,23 +36,23 @@ parameters: - openstack_telemetry_node02_hostname: mdb02 - openstack_telemetry_node03_hostname: mdb03 - -- openstack_telemetry_address: 10.167.4.75 -- openstack_telemetry_node01_address: 10.167.4.76 -- openstack_telemetry_node02_address: 10.167.4.77 -- openstack_telemetry_node03_address: 10.167.4.78 -+ openstack_telemetry_address: 172.16.10.75 -+ openstack_telemetry_node01_address: 172.16.10.76 -+ openstack_telemetry_node02_address: 172.16.10.77 -+ openstack_telemetry_node03_address: 172.16.10.78 - - # OpenStack Compute -- openstack_compute_node01_single_address: 10.167.4.101 -- openstack_compute_node02_single_address: 10.167.4.102 -- openstack_compute_node03_single_address: 10.167.4.103 -- openstack_compute_node01_control_address: 10.167.4.101 -- openstack_compute_node02_control_address: 10.167.4.102 -- openstack_compute_node03_control_address: 10.167.4.103 -+ openstack_compute_node01_single_address: 172.16.10.101 -+ openstack_compute_node02_single_address: 172.16.10.102 -+ openstack_compute_node03_single_address: 172.16.10.103 -+ openstack_compute_node01_control_address: 172.16.10.101 -+ openstack_compute_node02_control_address: 172.16.10.102 -+ openstack_compute_node03_control_address: 172.16.10.103 - openstack_compute_node01_tenant_address: 10.1.0.101 - openstack_compute_node02_tenant_address: 10.1.0.102 - openstack_compute_node03_tenant_address: 10.1.0.103 -- openstack_compute_node01_external_address: 172.30.10.2 -- openstack_compute_node02_external_address: 172.30.10.3 -+ openstack_compute_node01_external_address: 10.0.8.101 -+ openstack_compute_node02_external_address: 10.0.8.102 - - # openstack service hostnames - openstack_proxy_hostname: prx diff --git a/patches/opnfv-fuel/0012-classes-baremetal-arm-pod3-config-hardcode.patch b/patches/opnfv-fuel/0012-classes-baremetal-arm-pod3-config-hardcode.patch deleted file mode 100644 index 229ea3a0..00000000 --- a/patches/opnfv-fuel/0012-classes-baremetal-arm-pod3-config-hardcode.patch +++ /dev/null @@ -1,95 +0,0 @@ -From: Alexandru Avadanii -Date: Thu, 10 Aug 2017 18:32:37 +0200 -Subject: [PATCH] classes: baremetal: arm-pod3 config hardcode - -Populate MaaS configuration with node information for arm-pod3. - -FIXME: Read all this info from PDF (Pod Descriptor File) later. - -Signed-off-by: Alexandru Avadanii ---- - .../baremetal-mcp-ocata-ovs-ha/infra/maas.yml | 40 +++++++++++----------- - 1 file changed, 20 insertions(+), 20 deletions(-) - -diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml -index 73215ee..26115fe 100644 ---- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml -+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml -@@ -48,57 +48,57 @@ parameters: - machines: - kvm01: - interface: -- mac: "00:25:b5:a0:00:2a" -+ mac: "e0:ff:f7:00:08:ae" - power_parameters: -- power_address: "172.30.8.75" -- power_password: "octopus" -+ power_address: "10.0.8.3" -+ power_password: "admin" - power_type: ipmi - power_user: "admin" -- architecture: 'amd64/generic' -+ architecture: 'arm64/generic' - distro_series: xenial - hwe_kernel: hwe-16.04 - kvm02: - interface: -- mac: "00:25:b5:a0:00:3a" -+ mac: "e0:ff:f7:00:08:b1" - power_parameters: -- power_address: "172.30.8.65" -- power_password: "octopus" -+ power_address: "10.0.8.4" -+ power_password: "admin" - power_type: ipmi - power_user: "admin" -- architecture: 'amd64/generic' -+ architecture: 'arm64/generic' - distro_series: xenial - hwe_kernel: hwe-16.04 - kvm03: - interface: -- mac: "00:25:b5:a0:00:4a" -+ mac: "e0:ff:f7:00:08:bd" - power_parameters: -- power_address: "172.30.8.74" -- power_password: "octopus" -+ power_address: "10.0.8.5" -+ power_password: "admin" - power_type: ipmi - power_user: "admin" -- architecture: 'amd64/generic' -+ architecture: 'arm64/generic' - distro_series: xenial - hwe_kernel: hwe-16.04 - cmp001: - interface: -- mac: "00:25:b5:a0:00:5a" -+ mac: "e0:ff:f7:00:08:c6" - power_parameters: -- power_address: "172.30.8.73" -- power_password: "octopus" -+ power_address: "10.0.8.6" -+ power_password: "admin" - power_type: ipmi - power_user: "admin" -- architecture: 'amd64/generic' -+ architecture: 'arm64/generic' - distro_series: xenial - hwe_kernel: hwe-16.04 - cmp002: - interface: -- mac: "00:25:b5:a0:00:6a" -+ mac: "e0:ff:f7:00:08:cf" - power_parameters: -- power_address: "172.30.8.72" -- power_password: "octopus" -+ power_address: "10.0.8.7" -+ power_password: "admin" - power_type: ipmi - power_user: "admin" -- architecture: 'amd64/generic' -+ architecture: 'arm64/generic' - distro_series: xenial - hwe_kernel: hwe-16.04 - linux: diff --git a/patches/opnfv-fuel/0012-maas-boot-resources-Add-arm64-architecture.patch b/patches/opnfv-fuel/0012-maas-boot-resources-Add-arm64-architecture.patch new file mode 100644 index 00000000..517dd232 --- /dev/null +++ b/patches/opnfv-fuel/0012-maas-boot-resources-Add-arm64-architecture.patch @@ -0,0 +1,31 @@ +From: Alexandru Avadanii +Date: Fri, 11 Aug 2017 00:07:29 +0200 +Subject: [PATCH] maas: boot-resources: Add arm64 architecture + +Signed-off-by: Alexandru Avadanii +--- + mcp/patches/0003-maas-region-force-artifact-download.patch | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/mcp/patches/0003-maas-region-force-artifact-download.patch b/mcp/patches/0003-maas-region-force-artifact-download.patch +index f0ce50f..9997552 100644 +--- a/mcp/patches/0003-maas-region-force-artifact-download.patch ++++ b/mcp/patches/0003-maas-region-force-artifact-download.patch +@@ -42,7 +42,7 @@ diff --git a/maas/files/maas-artifact-sync.sh b/maas/files/maas-artifact-sync.sh + new file mode 100644 + --- /dev/null + +++ b/maas/files/maas-artifact-sync.sh +-@@ -0,0 +1,21 @@ ++@@ -0,0 +1,26 @@ + +{%- from "maas/map.jinja" import region with context %} + +#!/bin/bash + +function wait_for { +@@ -64,3 +64,8 @@ new file mode 100644 + +wait_for 90 "! maas opnfv boot-resources is-importing | grep -q 'true'" + +maas opnfv rack-controllers import-boot-images || exit 3 + +wait_for 30 "test -d /var/lib/maas/boot-resources/current/ubuntu/amd64" +++maas opnfv boot-source-selection update 1 1 arches='amd64' arches='arm64' || exit 4 +++maas opnfv boot-resources import || exit 2 +++wait_for 90 "! maas opnfv boot-resources is-importing | grep -q 'true'" +++maas opnfv rack-controllers import-boot-images || exit 3 +++wait_for 30 "test -d /var/lib/maas/boot-resources/current/ubuntu/arm64" diff --git a/patches/opnfv-fuel/0013-libvirt-Use-libvirt-unix_sock_group.patch b/patches/opnfv-fuel/0013-libvirt-Use-libvirt-unix_sock_group.patch new file mode 100644 index 00000000..de3b16bd --- /dev/null +++ b/patches/opnfv-fuel/0013-libvirt-Use-libvirt-unix_sock_group.patch @@ -0,0 +1,50 @@ +From: Alexandru Avadanii +Date: Sun, 20 Aug 2017 18:18:53 +0200 +Subject: [PATCH] libvirt: Use "libvirt" unix_sock_group + +On AArch64, libvirt 3.x is used, which uses "libvirt" by default, +so change old "libvirtd" in . + +Signed-off-by: Alexandru Avadanii +--- + ...libvirt-unix_sock_group-s-libvirtd-libvirt.patch | 21 +++++++++++++++++++++ + mcp/patches/patches.list | 1 + + 2 files changed, 22 insertions(+) + create mode 100644 mcp/patches/0102-libvirt-unix_sock_group-s-libvirtd-libvirt.patch + +diff --git a/mcp/patches/0102-libvirt-unix_sock_group-s-libvirtd-libvirt.patch b/mcp/patches/0102-libvirt-unix_sock_group-s-libvirtd-libvirt.patch +new file mode 100644 +index 0000000..003b249 +--- /dev/null ++++ b/mcp/patches/0102-libvirt-unix_sock_group-s-libvirtd-libvirt.patch +@@ -0,0 +1,21 @@ ++From: Alexandru Avadanii ++Date: Sun, 20 Aug 2017 02:03:01 +0200 ++Subject: [PATCH] libvirt: unix_sock_group: s/libvirtd/libvirt/ ++ ++On AArch64, libvirt 3.x is used, which uses "libvirt" by default. ++ ++Signed-off-by: Alexandru Avadanii ++--- ++ ++diff --git a/libvirt/files/libvirtd.conf.Debian b/libvirt/files/libvirtd.conf.Debian ++--- a/libvirt/files/libvirtd.conf.Debian +++++ b/libvirt/files/libvirtd.conf.Debian ++@@ -81,7 +81,7 @@ ++ # without becoming root. ++ # ++ # This is restricted to 'root' by default. ++-unix_sock_group = "libvirtd" +++unix_sock_group = "libvirt" ++ ++ # Set the UNIX socket permissions for the R/O socket. This is used ++ # for monitoring VM status only +diff --git a/mcp/patches/patches.list b/mcp/patches/patches.list +index c66ecb0..a42f513 100644 +--- a/mcp/patches/patches.list ++++ b/mcp/patches/patches.list +@@ -6,3 +6,4 @@ + /usr/share/salt-formulas/env: 0006-maas-module-Add-VLAN-DHCP-enable-support.patch + /usr/share/salt-formulas/env: 0007-linux.network.interface-noifupdown-support.patch + /usr/share/salt-formulas/env: 0101-maas-Add-curtin_userdata_arm64_generic_xenial.patch ++/usr/share/salt-formulas/env: 0102-libvirt-unix_sock_group-s-libvirtd-libvirt.patch diff --git a/patches/opnfv-fuel/0013-maas-boot-resources-Add-arm64-architecture.patch b/patches/opnfv-fuel/0013-maas-boot-resources-Add-arm64-architecture.patch deleted file mode 100644 index 517dd232..00000000 --- a/patches/opnfv-fuel/0013-maas-boot-resources-Add-arm64-architecture.patch +++ /dev/null @@ -1,31 +0,0 @@ -From: Alexandru Avadanii -Date: Fri, 11 Aug 2017 00:07:29 +0200 -Subject: [PATCH] maas: boot-resources: Add arm64 architecture - -Signed-off-by: Alexandru Avadanii ---- - mcp/patches/0003-maas-region-force-artifact-download.patch | 7 ++++++- - 1 file changed, 6 insertions(+), 1 deletion(-) - -diff --git a/mcp/patches/0003-maas-region-force-artifact-download.patch b/mcp/patches/0003-maas-region-force-artifact-download.patch -index f0ce50f..9997552 100644 ---- a/mcp/patches/0003-maas-region-force-artifact-download.patch -+++ b/mcp/patches/0003-maas-region-force-artifact-download.patch -@@ -42,7 +42,7 @@ diff --git a/maas/files/maas-artifact-sync.sh b/maas/files/maas-artifact-sync.sh - new file mode 100644 - --- /dev/null - +++ b/maas/files/maas-artifact-sync.sh --@@ -0,0 +1,21 @@ -+@@ -0,0 +1,26 @@ - +{%- from "maas/map.jinja" import region with context %} - +#!/bin/bash - +function wait_for { -@@ -64,3 +64,8 @@ new file mode 100644 - +wait_for 90 "! maas opnfv boot-resources is-importing | grep -q 'true'" - +maas opnfv rack-controllers import-boot-images || exit 3 - +wait_for 30 "test -d /var/lib/maas/boot-resources/current/ubuntu/amd64" -++maas opnfv boot-source-selection update 1 1 arches='amd64' arches='arm64' || exit 4 -++maas opnfv boot-resources import || exit 2 -++wait_for 90 "! maas opnfv boot-resources is-importing | grep -q 'true'" -++maas opnfv rack-controllers import-boot-images || exit 3 -++wait_for 30 "test -d /var/lib/maas/boot-resources/current/ubuntu/arm64" diff --git a/patches/opnfv-fuel/0014-libvirt-Use-libvirt-unix_sock_group.patch b/patches/opnfv-fuel/0014-libvirt-Use-libvirt-unix_sock_group.patch deleted file mode 100644 index de3b16bd..00000000 --- a/patches/opnfv-fuel/0014-libvirt-Use-libvirt-unix_sock_group.patch +++ /dev/null @@ -1,50 +0,0 @@ -From: Alexandru Avadanii -Date: Sun, 20 Aug 2017 18:18:53 +0200 -Subject: [PATCH] libvirt: Use "libvirt" unix_sock_group - -On AArch64, libvirt 3.x is used, which uses "libvirt" by default, -so change old "libvirtd" in . - -Signed-off-by: Alexandru Avadanii ---- - ...libvirt-unix_sock_group-s-libvirtd-libvirt.patch | 21 +++++++++++++++++++++ - mcp/patches/patches.list | 1 + - 2 files changed, 22 insertions(+) - create mode 100644 mcp/patches/0102-libvirt-unix_sock_group-s-libvirtd-libvirt.patch - -diff --git a/mcp/patches/0102-libvirt-unix_sock_group-s-libvirtd-libvirt.patch b/mcp/patches/0102-libvirt-unix_sock_group-s-libvirtd-libvirt.patch -new file mode 100644 -index 0000000..003b249 ---- /dev/null -+++ b/mcp/patches/0102-libvirt-unix_sock_group-s-libvirtd-libvirt.patch -@@ -0,0 +1,21 @@ -+From: Alexandru Avadanii -+Date: Sun, 20 Aug 2017 02:03:01 +0200 -+Subject: [PATCH] libvirt: unix_sock_group: s/libvirtd/libvirt/ -+ -+On AArch64, libvirt 3.x is used, which uses "libvirt" by default. -+ -+Signed-off-by: Alexandru Avadanii -+--- -+ -+diff --git a/libvirt/files/libvirtd.conf.Debian b/libvirt/files/libvirtd.conf.Debian -+--- a/libvirt/files/libvirtd.conf.Debian -++++ b/libvirt/files/libvirtd.conf.Debian -+@@ -81,7 +81,7 @@ -+ # without becoming root. -+ # -+ # This is restricted to 'root' by default. -+-unix_sock_group = "libvirtd" -++unix_sock_group = "libvirt" -+ -+ # Set the UNIX socket permissions for the R/O socket. This is used -+ # for monitoring VM status only -diff --git a/mcp/patches/patches.list b/mcp/patches/patches.list -index c66ecb0..a42f513 100644 ---- a/mcp/patches/patches.list -+++ b/mcp/patches/patches.list -@@ -6,3 +6,4 @@ - /usr/share/salt-formulas/env: 0006-maas-module-Add-VLAN-DHCP-enable-support.patch - /usr/share/salt-formulas/env: 0007-linux.network.interface-noifupdown-support.patch - /usr/share/salt-formulas/env: 0101-maas-Add-curtin_userdata_arm64_generic_xenial.patch -+/usr/share/salt-formulas/env: 0102-libvirt-unix_sock_group-s-libvirtd-libvirt.patch diff --git a/patches/opnfv-fuel/0014-mcp-salt-formulas-armband-Extend-libvirt_domain.patch b/patches/opnfv-fuel/0014-mcp-salt-formulas-armband-Extend-libvirt_domain.patch new file mode 100644 index 00000000..b1e675b2 --- /dev/null +++ b/patches/opnfv-fuel/0014-mcp-salt-formulas-armband-Extend-libvirt_domain.patch @@ -0,0 +1,97 @@ +From: Alexandru Avadanii +Date: Sun, 20 Aug 2017 22:41:26 +0200 +Subject: [PATCH] mcp: salt-formulas: armband: Extend libvirt_domain + +Add new state in armband salt formula that extends salt's virt +libvirt_domain.jinja template with support for: +- hw_firmware_type; +- virt_machine_model; +- cpu_model; + +These will later be leveraged via salt virt formula with AArch64 +specific values. + +Signed-off-by: Alexandru Avadanii +--- + mcp/config/states/maas | 2 +- + .../armband/files/libvirt_domain.jinja.diff | 46 ++++++++++++++++++++++ + .../armband/libvirt_domain_template.sls | 4 ++ + 3 files changed, 51 insertions(+), 1 deletion(-) + create mode 100644 mcp/salt-formulas/armband/files/libvirt_domain.jinja.diff + create mode 100644 mcp/salt-formulas/armband/libvirt_domain_template.sls + +diff --git a/mcp/config/states/maas b/mcp/config/states/maas +index 46bad6f..9fe7de0 100755 +--- a/mcp/config/states/maas ++++ b/mcp/config/states/maas +@@ -51,7 +51,7 @@ wait_for "! salt '*' test.ping | tee /dev/stderr | fgrep -q 'Not connected'" + + salt -C '* and not cfg01* and not mas01*' state.apply linux,ntp + +-salt -C 'kvm*' state.sls armband ++salt -C 'kvm*' state.sls armband,armband.libvirt_domain_template + salt -C 'kvm*' state.sls libvirt + + salt -C '* and not cfg01* and not mas01*' state.apply salt +diff --git a/mcp/salt-formulas/armband/files/libvirt_domain.jinja.diff b/mcp/salt-formulas/armband/files/libvirt_domain.jinja.diff +new file mode 100644 +index 0000000..4e7fad5 +--- /dev/null ++++ b/mcp/salt-formulas/armband/files/libvirt_domain.jinja.diff +@@ -0,0 +1,46 @@ ++From: Alexandru Avadanii ++Date: Sun Aug 20 18:18:53 2017 +0200 ++Subject: [PATCH] libvirt_domain.jinja: Add AArch64 support ++ ++Salt virt state relies on a Jinja template to create a libvirt ++XML definition for each new VM. ++This template needs to be extended with a few specific options ++for AArch64: ++- UEFI loader support (pflash); ++- custom machine model (e.g. 'virt-2.9'), since AArch64 defaults to ++ 'integratorcp'; ++- custom cpu model; ++ ++Allow all these to be parametrized from the salt virt formula, ++which we will also adapt to allow the params to be passed via our ++reclass model. ++ ++Signed-off-by: Alexandru Avadanii ++--- ++ ++diff --git a/usr/lib/python2.7/dist-packages/salt/templates/virt/libvirt_domain.jinja b/usr/lib/python2.7/dist-packages/salt/templates/virt/libvirt_domain.jinja ++--- a/usr/lib/python2.7/dist-packages/salt/templates/virt/libvirt_domain.jinja +++++ b/usr/lib/python2.7/dist-packages/salt/templates/virt/libvirt_domain.jinja ++@@ -3,11 +3,22 @@ ++ {{ cpu }} ++ {{ mem }} ++ +++ {% if custom_virt_machine %} +++ hvm +++ {% else %} ++ hvm +++ {% endif %} +++ {% if os_loader_type == 'pflash' %} +++ {{ os_loader }} +++ {{ os_loader_nvram }} +++ {% endif %} ++ {% for dev in boot_dev %} ++ ++ {% endfor %} ++ +++ {% if cpu_mode == 'custom' %} +++ +++ {% endif %} ++ ++ {% for diskname, disk in disks.items() %} ++ +diff --git a/mcp/salt-formulas/armband/libvirt_domain_template.sls b/mcp/salt-formulas/armband/libvirt_domain_template.sls +new file mode 100644 +index 0000000..7cc8e22 +--- /dev/null ++++ b/mcp/salt-formulas/armband/libvirt_domain_template.sls +@@ -0,0 +1,4 @@ ++/usr/lib/python2.7/dist-packages/salt/templates/virt/libvirt_domain.jinja: ++ file.patch: ++ - source: salt://armband/files/libvirt_domain.jinja.diff ++ - hash: "9d02cd8aa04497c2d75c72840a5c6c41" diff --git a/patches/opnfv-fuel/0015-mcp-salt-formulas-armband-Extend-libvirt_domain.patch b/patches/opnfv-fuel/0015-mcp-salt-formulas-armband-Extend-libvirt_domain.patch deleted file mode 100644 index dda55e42..00000000 --- a/patches/opnfv-fuel/0015-mcp-salt-formulas-armband-Extend-libvirt_domain.patch +++ /dev/null @@ -1,97 +0,0 @@ -From: Alexandru Avadanii -Date: Sun, 20 Aug 2017 22:41:26 +0200 -Subject: [PATCH] mcp: salt-formulas: armband: Extend libvirt_domain - -Add new state in armband salt formula that extends salt's virt -libvirt_domain.jinja template with support for: -- hw_firmware_type; -- virt_machine_model; -- cpu_model; - -These will later be leveraged via salt virt formula with AArch64 -specific values. - -Signed-off-by: Alexandru Avadanii ---- - mcp/config/states/maas | 2 +- - .../armband/files/libvirt_domain.jinja.diff | 46 ++++++++++++++++++++++ - .../armband/libvirt_domain_template.sls | 4 ++ - 3 files changed, 51 insertions(+), 1 deletion(-) - create mode 100644 mcp/salt-formulas/armband/files/libvirt_domain.jinja.diff - create mode 100644 mcp/salt-formulas/armband/libvirt_domain_template.sls - -diff --git a/mcp/config/states/maas b/mcp/config/states/maas -index f83f4a7..bc8d04f 100755 ---- a/mcp/config/states/maas -+++ b/mcp/config/states/maas -@@ -51,7 +51,7 @@ wait_for "! salt '*' test.ping | tee /dev/stderr | fgrep -q 'Not connected'" - - salt -C '* and not cfg01* and not mas01*' state.apply linux,ntp - --salt -C 'kvm*' state.sls armband -+salt -C 'kvm*' state.sls armband,armband.libvirt_domain_template - salt -C 'kvm*' state.sls libvirt - - salt -C '* and not cfg01* and not mas01*' state.apply salt -diff --git a/mcp/salt-formulas/armband/files/libvirt_domain.jinja.diff b/mcp/salt-formulas/armband/files/libvirt_domain.jinja.diff -new file mode 100644 -index 0000000..4e7fad5 ---- /dev/null -+++ b/mcp/salt-formulas/armband/files/libvirt_domain.jinja.diff -@@ -0,0 +1,46 @@ -+From: Alexandru Avadanii -+Date: Sun Aug 20 18:18:53 2017 +0200 -+Subject: [PATCH] libvirt_domain.jinja: Add AArch64 support -+ -+Salt virt state relies on a Jinja template to create a libvirt -+XML definition for each new VM. -+This template needs to be extended with a few specific options -+for AArch64: -+- UEFI loader support (pflash); -+- custom machine model (e.g. 'virt-2.9'), since AArch64 defaults to -+ 'integratorcp'; -+- custom cpu model; -+ -+Allow all these to be parametrized from the salt virt formula, -+which we will also adapt to allow the params to be passed via our -+reclass model. -+ -+Signed-off-by: Alexandru Avadanii -+--- -+ -+diff --git a/usr/lib/python2.7/dist-packages/salt/templates/virt/libvirt_domain.jinja b/usr/lib/python2.7/dist-packages/salt/templates/virt/libvirt_domain.jinja -+--- a/usr/lib/python2.7/dist-packages/salt/templates/virt/libvirt_domain.jinja -++++ b/usr/lib/python2.7/dist-packages/salt/templates/virt/libvirt_domain.jinja -+@@ -3,11 +3,22 @@ -+ {{ cpu }} -+ {{ mem }} -+ -++ {% if custom_virt_machine %} -++ hvm -++ {% else %} -+ hvm -++ {% endif %} -++ {% if os_loader_type == 'pflash' %} -++ {{ os_loader }} -++ {{ os_loader_nvram }} -++ {% endif %} -+ {% for dev in boot_dev %} -+ -+ {% endfor %} -+ -++ {% if cpu_mode == 'custom' %} -++ -++ {% endif %} -+ -+ {% for diskname, disk in disks.items() %} -+ -diff --git a/mcp/salt-formulas/armband/libvirt_domain_template.sls b/mcp/salt-formulas/armband/libvirt_domain_template.sls -new file mode 100644 -index 0000000..7cc8e22 ---- /dev/null -+++ b/mcp/salt-formulas/armband/libvirt_domain_template.sls -@@ -0,0 +1,4 @@ -+/usr/lib/python2.7/dist-packages/salt/templates/virt/libvirt_domain.jinja: -+ file.patch: -+ - source: salt://armband/files/libvirt_domain.jinja.diff -+ - hash: "9d02cd8aa04497c2d75c72840a5c6c41" diff --git a/patches/opnfv-fuel/0015-virtng.py-virt.sls-Extend-libvirt_domain.patch b/patches/opnfv-fuel/0015-virtng.py-virt.sls-Extend-libvirt_domain.patch new file mode 100644 index 00000000..1a64c6d6 --- /dev/null +++ b/patches/opnfv-fuel/0015-virtng.py-virt.sls-Extend-libvirt_domain.patch @@ -0,0 +1,198 @@ +From: Alexandru Avadanii +Date: Mon, 21 Aug 2017 01:10:16 +0200 +Subject: [PATCH] virtng.py, virt.sls: Extend libvirt_domain + +Extend _modules/virtng.py, salt/control/virt.sls with support for: +- hw_firmware_type; +- virt_machine_model; +- cpu_model; + +This functionality relies on the corresponding changes to be +implemented in libvirt_domain.jinja template. + +These will later be leveraged via our reclass model with AArch64 +specific values. + +Signed-off-by: Alexandru Avadanii +--- + .../0103-virtng-module-Extend-libvirt_domain.patch | 54 ++++++++++++++++++++++ + ...4-salt-control-virt-Extend-libvirt_domain.patch | 51 ++++++++++++++++++++ + mcp/patches/patches.list | 2 + + .../baremetal-mcp-ocata-ovs-ha/infra/kvm.yml | 15 ++++++ + 4 files changed, 122 insertions(+) + create mode 100644 mcp/patches/0103-virtng-module-Extend-libvirt_domain.patch + create mode 100644 mcp/patches/0104-salt-control-virt-Extend-libvirt_domain.patch + +diff --git a/mcp/patches/0103-virtng-module-Extend-libvirt_domain.patch b/mcp/patches/0103-virtng-module-Extend-libvirt_domain.patch +new file mode 100644 +index 0000000..706b67d +--- /dev/null ++++ b/mcp/patches/0103-virtng-module-Extend-libvirt_domain.patch +@@ -0,0 +1,54 @@ ++From: Alexandru Avadanii ++Date: Mon, 21 Aug 2017 02:03:01 +0200 ++Subject: [PATCH] virtng: module: Extend libvirt_domain ++ ++Extend virtng.py with support for passing down new params: ++- hw_firmware_type; ++- virt_machine_model; ++- cpu_model; ++ ++This functionality relies on the corresponding changes to be ++implemented in libvirt_domain.jinja template. ++ ++These will later be leveraged via salt virt formula with AArch64 ++specific values. ++ ++Signed-off-by: Alexandru Avadanii ++--- ++ ++diff --git a/_modules/virtng.py b/_modules/virtng.py ++--- a/_modules/virtng.py +++++ b/_modules/virtng.py ++@@ -56,6 +56,10 @@ ++ ++ VIRT_DEFAULT_HYPER = 'kvm' ++ +++DEFAULT_UEFI_LOADER_PATH = { +++ "x86_64": "/usr/share/OVMF/OVMF_CODE.fd", +++ "aarch64": "/usr/share/AAVMF/AAVMF_CODE.fd" +++} ++ ++ def __virtual__(): ++ if not HAS_ALL_IMPORTS: ++@@ -227,6 +231,21 @@ ++ # TODO: make bus and model parameterized, this works for 64-bit Linux ++ context['controller_model'] = 'lsilogic' ++ +++ # TODO: limit cpu_model, hw_firmware_type, virt_machine_type to qemu/kvm +++ # FIXME: parametrize hardcoded path for NVRAM storage +++ if 'hw_firmware_type' in kwargs and kwargs['hw_firmware_type'] == 'uefi': +++ context['os_loader_type'] = 'pflash' +++ context['os_loader'] = DEFAULT_UEFI_LOADER_PATH[os.uname()[-1]] +++ context['os_loader_nvram'] = '/var/lib/libvirt/qemu/nvram/{0}_VARS.fd'.format(name) +++ +++ if 'virt_machine_model' in kwargs: +++ context['custom_virt_machine'] = True +++ context['virt_machine_model'] = kwargs['virt_machine_model'] +++ +++ if 'cpu_model' in kwargs: +++ context['cpu_mode'] = 'custom' +++ context['cpu_model'] = kwargs['cpu_model'] +++ ++ if 'boot_dev' in kwargs: ++ context['boot_dev'] = [] ++ for dev in kwargs['boot_dev'].split(): +diff --git a/mcp/patches/0104-salt-control-virt-Extend-libvirt_domain.patch b/mcp/patches/0104-salt-control-virt-Extend-libvirt_domain.patch +new file mode 100644 +index 0000000..a9fe11b +--- /dev/null ++++ b/mcp/patches/0104-salt-control-virt-Extend-libvirt_domain.patch +@@ -0,0 +1,51 @@ ++From: Alexandru Avadanii ++Date: Mon, 21 Aug 2017 02:03:01 +0200 ++Subject: [PATCH] salt: control: virt: Extend libvirt_domain ++ ++Extend salt/control/virt.sls with support for new params: ++- hw_firmware_type; ++- virt_machine_model; ++- cpu_model; ++ ++This functionality relies on the corresponding changes to be ++implemented in libvirt_domain.jinja template, as well as in ++salt custom py module virtng.py. ++ ++These will later be leveraged via reclass model with AArch64 ++specific values. ++ ++FIXME: Move all new params to different section, as "size" is ++definitely not the best choice. ++ ++Example reclass model usage: ++salt: ++ control: ++ size: ++ openstack.example_vm_type: ++ hw_firmware_type: uefi ++ virt_machine_model: virt ++ cpu_model: host-passthrough ++ ++Signed-off-by: Alexandru Avadanii ++--- ++ ++diff --git a/salt/control/virt.sls b/salt/control/virt.sls ++--- a/salt/control/virt.sls +++++ b/salt/control/virt.sls ++@@ -44,6 +44,16 @@ ++ seed: True ++ serial_type: pty ++ console: True +++ # FIXME(armband): Move these 3 params to different section +++ {%- if size.hw_firmware_type is defined %} +++ hw_firmware_type: {{ size.hw_firmware_type }} +++ {%- endif %} +++ {%- if size.virt_machine_model is defined %} +++ virt_machine_model: {{ size.virt_machine_model }} +++ {%- endif %} +++ {%- if size.cpu_model is defined %} +++ cpu_model: {{ size.cpu_model }} +++ {%- endif %} ++ - unless: virsh list --all --name| grep -E "^{{ node_name }}.{{ cluster.domain }}$" ++ ++ #salt_control_seed_{{ cluster_name }}_{{ node_name }}: +diff --git a/mcp/patches/patches.list b/mcp/patches/patches.list +index a42f513..67b9c82 100644 +--- a/mcp/patches/patches.list ++++ b/mcp/patches/patches.list +@@ -7,3 +7,5 @@ + /usr/share/salt-formulas/env: 0007-linux.network.interface-noifupdown-support.patch + /usr/share/salt-formulas/env: 0101-maas-Add-curtin_userdata_arm64_generic_xenial.patch + /usr/share/salt-formulas/env: 0102-libvirt-unix_sock_group-s-libvirtd-libvirt.patch ++/usr/share/salt-formulas/env: 0103-virtng-module-Extend-libvirt_domain.patch ++/usr/share/salt-formulas/env: 0104-salt-control-virt-Extend-libvirt_domain.patch +diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml +index 8677a79..914908d 100644 +--- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml ++++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml +@@ -34,26 +34,41 @@ parameters: + openstack.control: + cpu: 6 + ram: 8192 ++ hw_firmware_type: uefi ++ virt_machine_model: virt ++ cpu_model: host-passthrough + disk_profile: small + net_profile: default + openstack.database: + cpu: 6 + ram: 8192 ++ hw_firmware_type: uefi ++ virt_machine_model: virt ++ cpu_model: host-passthrough + disk_profile: large + net_profile: default + openstack.message_queue: + cpu: 6 + ram: 8192 ++ hw_firmware_type: uefi ++ virt_machine_model: virt ++ cpu_model: host-passthrough + disk_profile: small + net_profile: default + openstack.telemetry: + cpu: 4 + ram: 4096 ++ hw_firmware_type: uefi ++ virt_machine_model: virt ++ cpu_model: host-passthrough + disk_profile: xxlarge + net_profile: default + openstack.proxy: + cpu: 4 + ram: 4096 ++ hw_firmware_type: uefi ++ virt_machine_model: virt ++ cpu_model: host-passthrough + disk_profile: small + net_profile: default + # stacklight.log: diff --git a/patches/opnfv-fuel/0016-mcp-salt-formulas-armband-AArch64-bootstrap.patch b/patches/opnfv-fuel/0016-mcp-salt-formulas-armband-AArch64-bootstrap.patch new file mode 100644 index 00000000..dc8993fc --- /dev/null +++ b/patches/opnfv-fuel/0016-mcp-salt-formulas-armband-AArch64-bootstrap.patch @@ -0,0 +1,82 @@ +From: Alexandru Avadanii +Date: Mon, 21 Aug 2017 20:53:03 +0200 +Subject: [PATCH] mcp: salt-formulas: armband: AArch64 bootstrap + +Recent changes in salt bootstrap script from [1] whitelist a +fixed pool of known architectures. Add "arm64" to that list on the +fly, as part of `config.gather_bootstrap_script`. + +NOTE: This change will be leveraged by passing a custom DEB repo to +the bootstrap script with `-R linux.enea.com/saltstack`. + +NOTE: After running this new state, salt-minion should be restarted +to pick up the changes, so we'll run it before rebooting kvm nodes. + +[1] http://bootstrap.saltstack.com + +Signed-off-by: Alexandru Avadanii +--- + mcp/config/states/maas | 1 + + .../armband/bootstrap_script_arm64.sls | 4 +++ + mcp/salt-formulas/armband/files/cloud.py.diff | 29 ++++++++++++++++++++++ + 3 files changed, 34 insertions(+) + create mode 100644 mcp/salt-formulas/armband/bootstrap_script_arm64.sls + create mode 100644 mcp/salt-formulas/armband/files/cloud.py.diff + +diff --git a/mcp/config/states/maas b/mcp/config/states/maas +index 9fe7de0..0cf4f68 100755 +--- a/mcp/config/states/maas ++++ b/mcp/config/states/maas +@@ -46,6 +46,7 @@ salt -C '* and not cfg01* and not mas01*' saltutil.sync_all + + salt -C 'kvm*' pkg.install bridge-utils + salt -C 'kvm*' state.apply linux.network ++salt -C 'kvm*' state.apply armband.bootstrap_script_arm64 + salt -C 'kvm*' system.reboot + wait_for "! salt '*' test.ping | tee /dev/stderr | fgrep -q 'Not connected'" + +diff --git a/mcp/salt-formulas/armband/bootstrap_script_arm64.sls b/mcp/salt-formulas/armband/bootstrap_script_arm64.sls +new file mode 100644 +index 0000000..845dedd +--- /dev/null ++++ b/mcp/salt-formulas/armband/bootstrap_script_arm64.sls +@@ -0,0 +1,4 @@ ++/usr/lib/python2.7/dist-packages/salt/utils/cloud.py: ++ file.patch: ++ - source: salt://armband/files/cloud.py.diff ++ - hash: "ecd450b187156c1f6a91ea272fd668b0" +diff --git a/mcp/salt-formulas/armband/files/cloud.py.diff b/mcp/salt-formulas/armband/files/cloud.py.diff +new file mode 100644 +index 0000000..75c3281 +--- /dev/null ++++ b/mcp/salt-formulas/armband/files/cloud.py.diff +@@ -0,0 +1,29 @@ ++From: Alexandru Avadanii ++Date: Sun Aug 20 18:18:53 2017 +0200 ++Subject: [PATCH] cloud.py: Allow AArch64 arch in salt bootstrap ++ ++Recent changes in salt bootstrap script from [1] whitelist a ++fixed pool of known architectures. Add "arm64" to that list on the ++fly, as part of `config.gather_bootstrap_script`. ++ ++NOTE: This change will be leveraged by passing a custom DEB repo to ++the bootstrap script with `-R linux.enea.com/saltstack`. ++ ++[1] http://bootstrap.saltstack.com ++ ++Signed-off-by: Alexandru Avadanii ++--- ++ ++diff --git a//usr/lib/python2.7/dist-packages/salt/utils/cloud.py b//usr/lib/python2.7/dist-packages/salt/utils/cloud.py ++--- a//usr/lib/python2.7/dist-packages/salt/utils/cloud.py +++++ b//usr/lib/python2.7/dist-packages/salt/utils/cloud.py ++@@ -2772,6 +2772,9 @@ ++ if not script_content: ++ raise ValueError('No content in bootstrap script !') ++ +++ # NOTE(armband): edit bootstrap script on the fly to allow AArch64 +++ script_content = script_content.replace('"amd64")', '"amd64"|"arm64")') +++ ++ # Get the path to the built-in deploy scripts directory ++ builtin_deploy_dir = os.path.join( ++ os.path.dirname(__file__), diff --git a/patches/opnfv-fuel/0016-virtng.py-virt.sls-Extend-libvirt_domain.patch b/patches/opnfv-fuel/0016-virtng.py-virt.sls-Extend-libvirt_domain.patch deleted file mode 100644 index 1a64c6d6..00000000 --- a/patches/opnfv-fuel/0016-virtng.py-virt.sls-Extend-libvirt_domain.patch +++ /dev/null @@ -1,198 +0,0 @@ -From: Alexandru Avadanii -Date: Mon, 21 Aug 2017 01:10:16 +0200 -Subject: [PATCH] virtng.py, virt.sls: Extend libvirt_domain - -Extend _modules/virtng.py, salt/control/virt.sls with support for: -- hw_firmware_type; -- virt_machine_model; -- cpu_model; - -This functionality relies on the corresponding changes to be -implemented in libvirt_domain.jinja template. - -These will later be leveraged via our reclass model with AArch64 -specific values. - -Signed-off-by: Alexandru Avadanii ---- - .../0103-virtng-module-Extend-libvirt_domain.patch | 54 ++++++++++++++++++++++ - ...4-salt-control-virt-Extend-libvirt_domain.patch | 51 ++++++++++++++++++++ - mcp/patches/patches.list | 2 + - .../baremetal-mcp-ocata-ovs-ha/infra/kvm.yml | 15 ++++++ - 4 files changed, 122 insertions(+) - create mode 100644 mcp/patches/0103-virtng-module-Extend-libvirt_domain.patch - create mode 100644 mcp/patches/0104-salt-control-virt-Extend-libvirt_domain.patch - -diff --git a/mcp/patches/0103-virtng-module-Extend-libvirt_domain.patch b/mcp/patches/0103-virtng-module-Extend-libvirt_domain.patch -new file mode 100644 -index 0000000..706b67d ---- /dev/null -+++ b/mcp/patches/0103-virtng-module-Extend-libvirt_domain.patch -@@ -0,0 +1,54 @@ -+From: Alexandru Avadanii -+Date: Mon, 21 Aug 2017 02:03:01 +0200 -+Subject: [PATCH] virtng: module: Extend libvirt_domain -+ -+Extend virtng.py with support for passing down new params: -+- hw_firmware_type; -+- virt_machine_model; -+- cpu_model; -+ -+This functionality relies on the corresponding changes to be -+implemented in libvirt_domain.jinja template. -+ -+These will later be leveraged via salt virt formula with AArch64 -+specific values. -+ -+Signed-off-by: Alexandru Avadanii -+--- -+ -+diff --git a/_modules/virtng.py b/_modules/virtng.py -+--- a/_modules/virtng.py -++++ b/_modules/virtng.py -+@@ -56,6 +56,10 @@ -+ -+ VIRT_DEFAULT_HYPER = 'kvm' -+ -++DEFAULT_UEFI_LOADER_PATH = { -++ "x86_64": "/usr/share/OVMF/OVMF_CODE.fd", -++ "aarch64": "/usr/share/AAVMF/AAVMF_CODE.fd" -++} -+ -+ def __virtual__(): -+ if not HAS_ALL_IMPORTS: -+@@ -227,6 +231,21 @@ -+ # TODO: make bus and model parameterized, this works for 64-bit Linux -+ context['controller_model'] = 'lsilogic' -+ -++ # TODO: limit cpu_model, hw_firmware_type, virt_machine_type to qemu/kvm -++ # FIXME: parametrize hardcoded path for NVRAM storage -++ if 'hw_firmware_type' in kwargs and kwargs['hw_firmware_type'] == 'uefi': -++ context['os_loader_type'] = 'pflash' -++ context['os_loader'] = DEFAULT_UEFI_LOADER_PATH[os.uname()[-1]] -++ context['os_loader_nvram'] = '/var/lib/libvirt/qemu/nvram/{0}_VARS.fd'.format(name) -++ -++ if 'virt_machine_model' in kwargs: -++ context['custom_virt_machine'] = True -++ context['virt_machine_model'] = kwargs['virt_machine_model'] -++ -++ if 'cpu_model' in kwargs: -++ context['cpu_mode'] = 'custom' -++ context['cpu_model'] = kwargs['cpu_model'] -++ -+ if 'boot_dev' in kwargs: -+ context['boot_dev'] = [] -+ for dev in kwargs['boot_dev'].split(): -diff --git a/mcp/patches/0104-salt-control-virt-Extend-libvirt_domain.patch b/mcp/patches/0104-salt-control-virt-Extend-libvirt_domain.patch -new file mode 100644 -index 0000000..a9fe11b ---- /dev/null -+++ b/mcp/patches/0104-salt-control-virt-Extend-libvirt_domain.patch -@@ -0,0 +1,51 @@ -+From: Alexandru Avadanii -+Date: Mon, 21 Aug 2017 02:03:01 +0200 -+Subject: [PATCH] salt: control: virt: Extend libvirt_domain -+ -+Extend salt/control/virt.sls with support for new params: -+- hw_firmware_type; -+- virt_machine_model; -+- cpu_model; -+ -+This functionality relies on the corresponding changes to be -+implemented in libvirt_domain.jinja template, as well as in -+salt custom py module virtng.py. -+ -+These will later be leveraged via reclass model with AArch64 -+specific values. -+ -+FIXME: Move all new params to different section, as "size" is -+definitely not the best choice. -+ -+Example reclass model usage: -+salt: -+ control: -+ size: -+ openstack.example_vm_type: -+ hw_firmware_type: uefi -+ virt_machine_model: virt -+ cpu_model: host-passthrough -+ -+Signed-off-by: Alexandru Avadanii -+--- -+ -+diff --git a/salt/control/virt.sls b/salt/control/virt.sls -+--- a/salt/control/virt.sls -++++ b/salt/control/virt.sls -+@@ -44,6 +44,16 @@ -+ seed: True -+ serial_type: pty -+ console: True -++ # FIXME(armband): Move these 3 params to different section -++ {%- if size.hw_firmware_type is defined %} -++ hw_firmware_type: {{ size.hw_firmware_type }} -++ {%- endif %} -++ {%- if size.virt_machine_model is defined %} -++ virt_machine_model: {{ size.virt_machine_model }} -++ {%- endif %} -++ {%- if size.cpu_model is defined %} -++ cpu_model: {{ size.cpu_model }} -++ {%- endif %} -+ - unless: virsh list --all --name| grep -E "^{{ node_name }}.{{ cluster.domain }}$" -+ -+ #salt_control_seed_{{ cluster_name }}_{{ node_name }}: -diff --git a/mcp/patches/patches.list b/mcp/patches/patches.list -index a42f513..67b9c82 100644 ---- a/mcp/patches/patches.list -+++ b/mcp/patches/patches.list -@@ -7,3 +7,5 @@ - /usr/share/salt-formulas/env: 0007-linux.network.interface-noifupdown-support.patch - /usr/share/salt-formulas/env: 0101-maas-Add-curtin_userdata_arm64_generic_xenial.patch - /usr/share/salt-formulas/env: 0102-libvirt-unix_sock_group-s-libvirtd-libvirt.patch -+/usr/share/salt-formulas/env: 0103-virtng-module-Extend-libvirt_domain.patch -+/usr/share/salt-formulas/env: 0104-salt-control-virt-Extend-libvirt_domain.patch -diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml -index 8677a79..914908d 100644 ---- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml -+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml -@@ -34,26 +34,41 @@ parameters: - openstack.control: - cpu: 6 - ram: 8192 -+ hw_firmware_type: uefi -+ virt_machine_model: virt -+ cpu_model: host-passthrough - disk_profile: small - net_profile: default - openstack.database: - cpu: 6 - ram: 8192 -+ hw_firmware_type: uefi -+ virt_machine_model: virt -+ cpu_model: host-passthrough - disk_profile: large - net_profile: default - openstack.message_queue: - cpu: 6 - ram: 8192 -+ hw_firmware_type: uefi -+ virt_machine_model: virt -+ cpu_model: host-passthrough - disk_profile: small - net_profile: default - openstack.telemetry: - cpu: 4 - ram: 4096 -+ hw_firmware_type: uefi -+ virt_machine_model: virt -+ cpu_model: host-passthrough - disk_profile: xxlarge - net_profile: default - openstack.proxy: - cpu: 4 - ram: 4096 -+ hw_firmware_type: uefi -+ virt_machine_model: virt -+ cpu_model: host-passthrough - disk_profile: small - net_profile: default - # stacklight.log: diff --git a/patches/opnfv-fuel/0017-mcp-salt-formulas-armband-AArch64-bootstrap.patch b/patches/opnfv-fuel/0017-mcp-salt-formulas-armband-AArch64-bootstrap.patch deleted file mode 100644 index febf0943..00000000 --- a/patches/opnfv-fuel/0017-mcp-salt-formulas-armband-AArch64-bootstrap.patch +++ /dev/null @@ -1,82 +0,0 @@ -From: Alexandru Avadanii -Date: Mon, 21 Aug 2017 20:53:03 +0200 -Subject: [PATCH] mcp: salt-formulas: armband: AArch64 bootstrap - -Recent changes in salt bootstrap script from [1] whitelist a -fixed pool of known architectures. Add "arm64" to that list on the -fly, as part of `config.gather_bootstrap_script`. - -NOTE: This change will be leveraged by passing a custom DEB repo to -the bootstrap script with `-R linux.enea.com/saltstack`. - -NOTE: After running this new state, salt-minion should be restarted -to pick up the changes, so we'll run it before rebooting kvm nodes. - -[1] http://bootstrap.saltstack.com - -Signed-off-by: Alexandru Avadanii ---- - mcp/config/states/maas | 1 + - .../armband/bootstrap_script_arm64.sls | 4 +++ - mcp/salt-formulas/armband/files/cloud.py.diff | 29 ++++++++++++++++++++++ - 3 files changed, 34 insertions(+) - create mode 100644 mcp/salt-formulas/armband/bootstrap_script_arm64.sls - create mode 100644 mcp/salt-formulas/armband/files/cloud.py.diff - -diff --git a/mcp/config/states/maas b/mcp/config/states/maas -index bc8d04f..3cea11d 100755 ---- a/mcp/config/states/maas -+++ b/mcp/config/states/maas -@@ -46,6 +46,7 @@ salt -C '* and not cfg01* and not mas01*' saltutil.sync_all - - salt -C 'kvm*' pkg.install bridge-utils - salt -C 'kvm*' state.apply linux.network -+salt -C 'kvm*' state.apply armband.bootstrap_script_arm64 - salt -C 'kvm*' system.reboot - wait_for "! salt '*' test.ping | tee /dev/stderr | fgrep -q 'Not connected'" - -diff --git a/mcp/salt-formulas/armband/bootstrap_script_arm64.sls b/mcp/salt-formulas/armband/bootstrap_script_arm64.sls -new file mode 100644 -index 0000000..845dedd ---- /dev/null -+++ b/mcp/salt-formulas/armband/bootstrap_script_arm64.sls -@@ -0,0 +1,4 @@ -+/usr/lib/python2.7/dist-packages/salt/utils/cloud.py: -+ file.patch: -+ - source: salt://armband/files/cloud.py.diff -+ - hash: "ecd450b187156c1f6a91ea272fd668b0" -diff --git a/mcp/salt-formulas/armband/files/cloud.py.diff b/mcp/salt-formulas/armband/files/cloud.py.diff -new file mode 100644 -index 0000000..75c3281 ---- /dev/null -+++ b/mcp/salt-formulas/armband/files/cloud.py.diff -@@ -0,0 +1,29 @@ -+From: Alexandru Avadanii -+Date: Sun Aug 20 18:18:53 2017 +0200 -+Subject: [PATCH] cloud.py: Allow AArch64 arch in salt bootstrap -+ -+Recent changes in salt bootstrap script from [1] whitelist a -+fixed pool of known architectures. Add "arm64" to that list on the -+fly, as part of `config.gather_bootstrap_script`. -+ -+NOTE: This change will be leveraged by passing a custom DEB repo to -+the bootstrap script with `-R linux.enea.com/saltstack`. -+ -+[1] http://bootstrap.saltstack.com -+ -+Signed-off-by: Alexandru Avadanii -+--- -+ -+diff --git a//usr/lib/python2.7/dist-packages/salt/utils/cloud.py b//usr/lib/python2.7/dist-packages/salt/utils/cloud.py -+--- a//usr/lib/python2.7/dist-packages/salt/utils/cloud.py -++++ b//usr/lib/python2.7/dist-packages/salt/utils/cloud.py -+@@ -2772,6 +2772,9 @@ -+ if not script_content: -+ raise ValueError('No content in bootstrap script !') -+ -++ # NOTE(armband): edit bootstrap script on the fly to allow AArch64 -++ script_content = script_content.replace('"amd64")', '"amd64"|"arm64")') -++ -+ # Get the path to the built-in deploy scripts directory -+ builtin_deploy_dir = os.path.join( -+ os.path.dirname(__file__), diff --git a/patches/opnfv-fuel/0017-seedng-module-Add-AArch64-repo.patch b/patches/opnfv-fuel/0017-seedng-module-Add-AArch64-repo.patch new file mode 100644 index 00000000..330d416a --- /dev/null +++ b/patches/opnfv-fuel/0017-seedng-module-Add-AArch64-repo.patch @@ -0,0 +1,54 @@ +From: Alexandru Avadanii +Date: Mon, 21 Aug 2017 20:42:00 +0200 +Subject: [PATCH] seedng: module: Add AArch64 repo + +salt custom py module seedng.py should use custom repo arg +"-R linux.enea.com/saltstack" on AArch64 nodes. + +Signed-off-by: Alexandru Avadanii +--- + .../0105-seedng-module-Add-AArch64-repo.patch | 25 ++++++++++++++++++++++ + mcp/patches/patches.list | 1 + + 2 files changed, 26 insertions(+) + create mode 100644 mcp/patches/0105-seedng-module-Add-AArch64-repo.patch + +diff --git a/mcp/patches/0105-seedng-module-Add-AArch64-repo.patch b/mcp/patches/0105-seedng-module-Add-AArch64-repo.patch +new file mode 100644 +index 0000000..e191421 +--- /dev/null ++++ b/mcp/patches/0105-seedng-module-Add-AArch64-repo.patch +@@ -0,0 +1,25 @@ ++From: Alexandru Avadanii ++Date: Mon, 21 Aug 2017 02:03:01 +0200 ++Subject: [PATCH] seedng: module: Add AArch64 repo ++ ++salt custom py module seedng.py should use custom repo arg ++"-R linux.enea.com/saltstack" on AArch64 nodes. ++ ++Signed-off-by: Alexandru Avadanii ++--- ++ ++diff --git a/_modules/seedng.py b/_modules/seedng.py ++--- a/_modules/seedng.py +++++ b/_modules/seedng.py ++@@ -256,8 +256,10 @@ ++ boot_, tmppath = (prep_bootstrap(mpt) ++ or salt.syspaths.BOOTSTRAP) ++ # Exec the chroot command +++ cmdR = '-R linux.enea.com/saltstack' if os.uname()[-1] == 'aarch64' else '' ++ cmd = 'if type salt-minion; then exit 0; ' ++- cmd += 'else sh {0} -c /tmp; fi'.format(os.path.join(tmppath, 'bootstrap-salt.sh')) +++ cmd += 'else sh {0} {1} -c /tmp; fi'.format( +++ os.path.join(tmppath, 'bootstrap-salt.sh'), cmdR) ++ return not __salt__['cmd.run_chroot'](mpt, cmd, python_shell=True)['retcode'] ++ ++ +diff --git a/mcp/patches/patches.list b/mcp/patches/patches.list +index 67b9c82..b4b2b30 100644 +--- a/mcp/patches/patches.list ++++ b/mcp/patches/patches.list +@@ -9,3 +9,4 @@ + /usr/share/salt-formulas/env: 0102-libvirt-unix_sock_group-s-libvirtd-libvirt.patch + /usr/share/salt-formulas/env: 0103-virtng-module-Extend-libvirt_domain.patch + /usr/share/salt-formulas/env: 0104-salt-control-virt-Extend-libvirt_domain.patch ++/usr/share/salt-formulas/env: 0105-seedng-module-Add-AArch64-repo.patch diff --git a/patches/opnfv-fuel/0018-seedng-module-Add-AArch64-repo.patch b/patches/opnfv-fuel/0018-seedng-module-Add-AArch64-repo.patch deleted file mode 100644 index 330d416a..00000000 --- a/patches/opnfv-fuel/0018-seedng-module-Add-AArch64-repo.patch +++ /dev/null @@ -1,54 +0,0 @@ -From: Alexandru Avadanii -Date: Mon, 21 Aug 2017 20:42:00 +0200 -Subject: [PATCH] seedng: module: Add AArch64 repo - -salt custom py module seedng.py should use custom repo arg -"-R linux.enea.com/saltstack" on AArch64 nodes. - -Signed-off-by: Alexandru Avadanii ---- - .../0105-seedng-module-Add-AArch64-repo.patch | 25 ++++++++++++++++++++++ - mcp/patches/patches.list | 1 + - 2 files changed, 26 insertions(+) - create mode 100644 mcp/patches/0105-seedng-module-Add-AArch64-repo.patch - -diff --git a/mcp/patches/0105-seedng-module-Add-AArch64-repo.patch b/mcp/patches/0105-seedng-module-Add-AArch64-repo.patch -new file mode 100644 -index 0000000..e191421 ---- /dev/null -+++ b/mcp/patches/0105-seedng-module-Add-AArch64-repo.patch -@@ -0,0 +1,25 @@ -+From: Alexandru Avadanii -+Date: Mon, 21 Aug 2017 02:03:01 +0200 -+Subject: [PATCH] seedng: module: Add AArch64 repo -+ -+salt custom py module seedng.py should use custom repo arg -+"-R linux.enea.com/saltstack" on AArch64 nodes. -+ -+Signed-off-by: Alexandru Avadanii -+--- -+ -+diff --git a/_modules/seedng.py b/_modules/seedng.py -+--- a/_modules/seedng.py -++++ b/_modules/seedng.py -+@@ -256,8 +256,10 @@ -+ boot_, tmppath = (prep_bootstrap(mpt) -+ or salt.syspaths.BOOTSTRAP) -+ # Exec the chroot command -++ cmdR = '-R linux.enea.com/saltstack' if os.uname()[-1] == 'aarch64' else '' -+ cmd = 'if type salt-minion; then exit 0; ' -+- cmd += 'else sh {0} -c /tmp; fi'.format(os.path.join(tmppath, 'bootstrap-salt.sh')) -++ cmd += 'else sh {0} {1} -c /tmp; fi'.format( -++ os.path.join(tmppath, 'bootstrap-salt.sh'), cmdR) -+ return not __salt__['cmd.run_chroot'](mpt, cmd, python_shell=True)['retcode'] -+ -+ -diff --git a/mcp/patches/patches.list b/mcp/patches/patches.list -index 67b9c82..b4b2b30 100644 ---- a/mcp/patches/patches.list -+++ b/mcp/patches/patches.list -@@ -9,3 +9,4 @@ - /usr/share/salt-formulas/env: 0102-libvirt-unix_sock_group-s-libvirtd-libvirt.patch - /usr/share/salt-formulas/env: 0103-virtng-module-Extend-libvirt_domain.patch - /usr/share/salt-formulas/env: 0104-salt-control-virt-Extend-libvirt_domain.patch -+/usr/share/salt-formulas/env: 0105-seedng-module-Add-AArch64-repo.patch -- cgit 1.2.3-korg