diff options
Diffstat (limited to 'ci')
-rwxr-xr-x | ci/02-deploybundle.sh | 26 | ||||
-rwxr-xr-x | ci/03-maasdeploy.sh | 30 | ||||
-rw-r--r-- | ci/config_tpl/juju2/bundle_tpl/ceilometer.yaml | 1 | ||||
-rw-r--r-- | ci/config_tpl/juju2/bundle_tpl/ceph.yaml | 5 | ||||
-rw-r--r-- | ci/config_tpl/juju2/bundle_tpl/cinder.yaml | 1 | ||||
-rw-r--r-- | ci/config_tpl/juju2/bundle_tpl/glance.yaml | 1 | ||||
-rw-r--r-- | ci/config_tpl/juju2/bundle_tpl/oclphase1.yaml | 2 | ||||
-rw-r--r-- | ci/config_tpl/juju2/bundle_tpl/openstack-dashboard.yaml | 1 | ||||
-rw-r--r-- | ci/config_tpl/juju2/bundle_tpl/spaces.yaml | 32 | ||||
-rw-r--r-- | ci/config_tpl/juju2/bundlek8_tpl/bundle.yaml | 2 | ||||
-rw-r--r-- | ci/config_tpl/juju2/bundlek8_tpl/easyrsa.yaml | 2 | ||||
-rw-r--r-- | ci/config_tpl/juju2/bundlek8_tpl/etcd.yaml | 2 | ||||
-rw-r--r-- | ci/config_tpl/juju2/bundlek8_tpl/flannel.yaml | 4 | ||||
-rw-r--r-- | ci/config_tpl/juju2/bundlek8_tpl/kubernetes.yaml | 4 | ||||
-rw-r--r-- | ci/config_tpl/juju2/bundlek8_tpl/ovn.yaml | 5 | ||||
-rw-r--r-- | ci/config_tpl/juju2/bundlek8_tpl/spaces.yaml | 5 | ||||
-rwxr-xr-x | ci/deploy.sh | 39 |
17 files changed, 81 insertions, 81 deletions
diff --git a/ci/02-deploybundle.sh b/ci/02-deploybundle.sh index 1bb0084c..0d4166e5 100755 --- a/ci/02-deploybundle.sh +++ b/ci/02-deploybundle.sh @@ -151,25 +151,13 @@ fi #keep the back in cloud for later debugging. pastebinit bundles.yaml || true -if [[ "$jujuver" < "2" ]]; then - echo "... Deployment Started ...." - juju-deployer -vW -d -t 7200 -r 5 -c bundles.yaml $opnfvdistro-"$openstack"-nodes - count=`juju status nodes --format=short | grep nodes | wc -l` - c=0 - while [ $c -lt $count ]; do - juju ssh nodes/$c 'echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf && sudo sysctl -p' || true - juju ssh nodes-compute/$c 'echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf && sudo sysctl -p' || true - juju ssh nodes/$c 'echo 2048 | sudo tee /proc/sys/fs/inotify/max_user_instances' || true - juju ssh nodes-compute/$c 'echo 2048 | sudo tee /proc/sys/fs/inotify/max_user_instances' || true - let c+=1 - done +# with JUJU 2.0 bundles has to be deployed only once. +juju deploy bundles.yaml --debug +sleep 120 +check_status allocating - juju-deployer -vW -d -t 7200 -r 5 -c bundles.yaml $opnfvdistro-"$openstack" || true -else - # with JUJU 2.0 bundles has to be deployed only once. - juju deploy bundles.yaml --debug - sleep 120 - check_status allocating +# need to revisit later if not needed we will remove the below. +openfile_fix() { # seeing issue related to number of open files. count=`juju status nodes --format=short | grep nodes | wc -l` c=0 @@ -180,7 +168,7 @@ else juju ssh nodes-compute/$c 'echo 2048 | sudo tee /proc/sys/fs/inotify/max_user_instances' || true let c+=1 done -fi +} if [ "$opnfvsdn" = "ocl" ] then diff --git a/ci/03-maasdeploy.sh b/ci/03-maasdeploy.sh index dcf4b46b..d74214bf 100755 --- a/ci/03-maasdeploy.sh +++ b/ci/03-maasdeploy.sh @@ -255,9 +255,9 @@ configuremaas(){ #create the required spaces. maas $PROFILE space update 0 name=default || true - for space in unused admin-api internal-api public-api \ + for space in admin-api internal-api public-api \ storage-access storage-cluster admin \ - tenant-data tenant-api tenant-public oam-space + tenant-data tenant-api tenant-public os-api do echo_info "Creating the space $space" maas $PROFILE spaces create name=$space || true @@ -281,7 +281,7 @@ configuremaas(){ setupspacenetwork(){ #get space, subnet and vlan and create accordingly. - #for type in pxe admin data storage external floating public; do + #for type in admin osapi data storage external floating public; do nettypes=`cat labconfig.json | jq '.opnfv.spaces[]'.type | cut -d \" -f 2` for type in $nettypes; do config_done=0 @@ -335,13 +335,13 @@ setupspacenetwork(){ fi fi case "$type" in - 'pxe') JUJU_SPACE="oam-space"; DHCP='enabled' ;; 'admin') JUJU_SPACE="internal-api"; DHCP='enabled' ;; 'data') JUJU_SPACE="tenant-data"; DHCP='' ;; 'public') JUJU_SPACE="public-api"; DHCP='' ;; 'storage') JUJU_SPACE="storage-cluster"; DHCP='' ;; 'storageaccess') JUJU_SPACE="storage-data"; DHCP='' ;; 'floating') JUJU_SPACE="tenant-public"; DHCP='' ;; + 'osapi') JUJU_SPACE="os-api"; DHCP='' ;; *) JUJU_SPACE='default'; DHCP='OFF'; echo_info " >>> Unknown SPACE" ;; esac JUJU_SPACE_ID=$(maas $PROFILE spaces read | jq -r ".[] | select(.name==\"$JUJU_SPACE\")".id) @@ -352,7 +352,7 @@ setupspacenetwork(){ maas $PROFILE vlan update $NET_FABRIC_ID $JUJU_VLAN_VID space=$JUJU_SPACE_ID fi fi - if ([ $type == "admin" ] || [ $type == "pxe" ]); then + if ([ $type == "admin" ]); then # If we have a network, we create it if ([ $NET_FABRIC_ID ]); then # Set ranges @@ -409,10 +409,9 @@ addnodes(){ brid=`brctl show | grep 8000 | cut -d "8" -f 1 | tr "\n" " " | tr " " " " | tr -s " "` ADMIN_BR=`cat labconfig.json | jq '.opnfv.spaces[] | select(.type=="admin")'.bridge | cut -d \" -f 2 ` - PXE_BR=`cat labconfig.json | jq '.opnfv.spaces[] | select(.type=="admin")'.bridge | cut -d \" -f 2 ` for feature in $brid; do - if ([ "$feature" == "$ADMIN_BR" ] || [ "$feature" == "$PXE_BR" ]); then + if ([ "$feature" == "$ADMIN_BR" ]); then netw=$netw" --network bridge="$feature",model=virtio" else netw=$netw @@ -489,8 +488,6 @@ addnodes(){ units=$(($units - 1)); NODE_NAME=`cat labconfig.json | jq ".lab.racks[].nodes[$units].name" | cut -d \" -f 2 ` MAC_ADDRESS=`cat labconfig.json | jq ".lab.racks[].nodes[$units].nics[] | select(.spaces[]==\"admin\").mac"[0] | cut -d \" -f 2 ` - MAC_ADDRESS_PXE=`cat labconfig.json | jq ".lab.racks[].nodes[$units].nics[] | select(.spaces[]==\"pxe\").mac"[0] | cut -d \" -f 2 ` - #MAC_ADDRESS1=`cat labconfig.json | jq ".lab.racks[].nodes[$units].nics[] | select(.spaces[]==\"floating\").mac"[0] | cut -d \" -f 2 ` POWER_TYPE=`cat labconfig.json | jq ".lab.racks[].nodes[$units].power.type" | cut -d \" -f 2 ` POWER_IP=`cat labconfig.json | jq ".lab.racks[].nodes[$units].power.address" | cut -d \" -f 2 ` POWER_USER=`cat labconfig.json | jq ".lab.racks[].nodes[$units].power.user" | cut -d \" -f 2 ` @@ -508,17 +505,10 @@ addnodes(){ NODE_ARC="$NODE_ARCHES/generic" echo_info "Creating node $NODE_NAME" - if ([ $MAC_ADDRESS_PXE ] && ["$MAC_ADDRESS_PXE" != "null" ]); then - maas $PROFILE machines create autodetect_nodegroup='yes' name=$NODE_NAME \ - hostname=$NODE_NAME power_type=$POWER_TYPE power_parameters_power_address=$POWER_IP \ - power_parameters_power_user=$POWER_USER power_parameters_power_pass=$POWER_PASS \ - mac_addresses=$MAC_ADDRESS_PXE architecture=$NODE_ARC - else - maas $PROFILE machines create autodetect_nodegroup='yes' name=$NODE_NAME \ - hostname=$NODE_NAME power_type=$POWER_TYPE power_parameters_power_address=$POWER_IP \ - power_parameters_power_user=$POWER_USER power_parameters_power_pass=$POWER_PASS \ - mac_addresses=$MAC_ADDRESS architecture=$NODE_ARC - fi + maas $PROFILE machines create autodetect_nodegroup='yes' name=$NODE_NAME \ + hostname=$NODE_NAME power_type=$POWER_TYPE power_parameters_power_address=$POWER_IP \ + power_parameters_power_user=$POWER_USER power_parameters_power_pass=$POWER_PASS \ + mac_addresses=$MAC_ADDRESS architecture=$NODE_ARC done fi diff --git a/ci/config_tpl/juju2/bundle_tpl/ceilometer.yaml b/ci/config_tpl/juju2/bundle_tpl/ceilometer.yaml index b34bec24..de6a1214 100644 --- a/ci/config_tpl/juju2/bundle_tpl/ceilometer.yaml +++ b/ci/config_tpl/juju2/bundle_tpl/ceilometer.yaml @@ -1,6 +1,7 @@ mongodb: charm: ./{{ ubuntu.release }}/mongodb num_units: 1 + constraints: *oam-space-constr {% if os.service.bindings %} bindings: "": *internal-space diff --git a/ci/config_tpl/juju2/bundle_tpl/ceph.yaml b/ci/config_tpl/juju2/bundle_tpl/ceph.yaml index 87591a42..cdbe4869 100644 --- a/ci/config_tpl/juju2/bundle_tpl/ceph.yaml +++ b/ci/config_tpl/juju2/bundle_tpl/ceph.yaml @@ -28,8 +28,8 @@ {% endfor %} {% else %} - "lxd:nodes/0" - - "lxd:nodes/1" - - "lxd:nodes/2" + - "lxd:nodes/0" + - "lxd:nodes/0" {% endif %} {% endif %} ceph-osd: @@ -83,6 +83,7 @@ charm: "./{{ ubuntu.release }}/ceph-radosgw" num_units: {{ unit_qty() if os.beta.hacluster_ceph_radosgw else 1 }} {% if os.service.bindings %} + constraints: *ceph-access-constr bindings: "": *oam-space public: *public-space diff --git a/ci/config_tpl/juju2/bundle_tpl/cinder.yaml b/ci/config_tpl/juju2/bundle_tpl/cinder.yaml index 027fc045..0313f07c 100644 --- a/ci/config_tpl/juju2/bundle_tpl/cinder.yaml +++ b/ci/config_tpl/juju2/bundle_tpl/cinder.yaml @@ -6,6 +6,7 @@ num_units: {{ unit_qty() }} {% endif %} {% if os.service.bindings %} + constraints: *ceph-access-constr bindings: "": *oam-space public: *public-space diff --git a/ci/config_tpl/juju2/bundle_tpl/glance.yaml b/ci/config_tpl/juju2/bundle_tpl/glance.yaml index 198cefb7..725c436e 100644 --- a/ci/config_tpl/juju2/bundle_tpl/glance.yaml +++ b/ci/config_tpl/juju2/bundle_tpl/glance.yaml @@ -2,6 +2,7 @@ charm: "./{{ ubuntu.release }}/glance" num_units: {{ unit_qty() }} {% if os.service.bindings %} + constraints: *ceph-access-constr bindings: "": *oam-space public: *public-space diff --git a/ci/config_tpl/juju2/bundle_tpl/oclphase1.yaml b/ci/config_tpl/juju2/bundle_tpl/oclphase1.yaml index 588eeb2b..e27b0d48 100644 --- a/ci/config_tpl/juju2/bundle_tpl/oclphase1.yaml +++ b/ci/config_tpl/juju2/bundle_tpl/oclphase1.yaml @@ -105,7 +105,7 @@ charm: ./{{ ubuntu.release }}/contrail-keystone-auth num_units: 1 bindings: - "": internal-api + "": *oam-space to: - "lxd:nodes/0" diff --git a/ci/config_tpl/juju2/bundle_tpl/openstack-dashboard.yaml b/ci/config_tpl/juju2/bundle_tpl/openstack-dashboard.yaml index 5e240e24..9132096e 100644 --- a/ci/config_tpl/juju2/bundle_tpl/openstack-dashboard.yaml +++ b/ci/config_tpl/juju2/bundle_tpl/openstack-dashboard.yaml @@ -2,6 +2,7 @@ charm: "./{{ ubuntu.release }}/openstack-dashboard" num_units: {{ unit_qty() }} {% if os.service.bindings %} + constraints: *oam-space-constr bindings: "": *public-space shared-db: *internal-space diff --git a/ci/config_tpl/juju2/bundle_tpl/spaces.yaml b/ci/config_tpl/juju2/bundle_tpl/spaces.yaml index 63f624f3..3023dfc0 100644 --- a/ci/config_tpl/juju2/bundle_tpl/spaces.yaml +++ b/ci/config_tpl/juju2/bundle_tpl/spaces.yaml @@ -1,34 +1,35 @@ - - {% if os.release == 'mitaka' %} openstack-origin: &openstack-origin distro {% else %} openstack-origin: &openstack-origin cloud:{{ ubuntu.release }}-{{ os.release }} {% endif %} - openstack-region: &openstack-region {{ os.region }} worker-multiplier: &worker-multiplier {{ os.api.worker_multiplier }} data-port: &data-port br-data:{{ opnfv.ext_port }} # OAM - Operations, Administration and Maintenance -{% if opnfv.spaces_dict.pxe is defined %} - oam-space: &oam-space oam-space -{% else %} oam-space: &oam-space internal-api -{% endif %} + # This is OpenStack Internal network; for internalURL endpoints # This is OpenStack Admin network; for adminURL endpoints +{% if opnfv.spaces_dict.osapi is defined %} + internal-space: &internal-space os-api + admin-space: &admin-space os-api +{% else %} + internal-space: &internal-space internal-api admin-space: &admin-space internal-api +{% endif %} # This is OpenStack Public network; for publicURL endpoints {% if opnfv.spaces_dict.public is defined %} public-space: &public-space public-api {% else %} +{% if opnfv.spaces_dict.osapi is defined %} + public-space: &public-space os-api +{% else %} public-space: &public-space internal-api {% endif %} - - # This is OpenStack Internal network; for internalURL endpoints - internal-space: &internal-space internal-api +{% endif %} # This is the overlay network {% if opnfv.spaces_dict.data is defined %} @@ -39,7 +40,13 @@ # CEPH configuration # CEPH access network - ceph-public-space: &ceph-public-space internal-api +{% if opnfv.spaces_dict.storageaccess is defined %} + ceph-public-space: &ceph-public-space storage-access-space + ceph-access-constr: &ceph-access-constr spaces=storage-access-space +{% else %} + ceph-public-space: &ceph-public-space internal-api + ceph-access-constr: &ceph-access-constr spaces=internal-api +{% endif %} # CEPH replication network {% if opnfv.spaces_dict.storage is defined %} @@ -49,8 +56,7 @@ {% endif %} # Workaround for 'only one default binding supported' - oam-space-constr: &oam-space-constr spaces=oam-space - ceph-access-constr: &ceph-access-constr spaces=ceph-access-space + oam-space-constr: &oam-space-constr spaces=internal-api # CEPH OSD and journal devices; temporary workaround for #1674148 {% if os.lxd %} diff --git a/ci/config_tpl/juju2/bundlek8_tpl/bundle.yaml b/ci/config_tpl/juju2/bundlek8_tpl/bundle.yaml index 96a27cfb..b4ed14ac 100644 --- a/ci/config_tpl/juju2/bundlek8_tpl/bundle.yaml +++ b/ci/config_tpl/juju2/bundlek8_tpl/bundle.yaml @@ -6,6 +6,8 @@ # deployment before moving to production. # series: {{ ubuntu.release }} + variables: +{% include 'spaces.yaml' %} services: nodes: charm: "cs:{{ ubuntu.release }}/ubuntu" diff --git a/ci/config_tpl/juju2/bundlek8_tpl/easyrsa.yaml b/ci/config_tpl/juju2/bundlek8_tpl/easyrsa.yaml index ea46e424..87802a0e 100644 --- a/ci/config_tpl/juju2/bundlek8_tpl/easyrsa.yaml +++ b/ci/config_tpl/juju2/bundlek8_tpl/easyrsa.yaml @@ -3,7 +3,7 @@ num_units: 1 {% if os.service.bindings %} bindings: - "": internal-api + "": *oam-space {% endif %} to: {% if k8.network.controller == 'ovn' %} diff --git a/ci/config_tpl/juju2/bundlek8_tpl/etcd.yaml b/ci/config_tpl/juju2/bundlek8_tpl/etcd.yaml index d902adf8..0908e3a1 100644 --- a/ci/config_tpl/juju2/bundlek8_tpl/etcd.yaml +++ b/ci/config_tpl/juju2/bundlek8_tpl/etcd.yaml @@ -11,7 +11,7 @@ {% endif %} {% if os.service.bindings %} bindings: - "": internal-api + "": *oam-space {% endif %} to: {% if k8.network.controller == 'ovn' %} diff --git a/ci/config_tpl/juju2/bundlek8_tpl/flannel.yaml b/ci/config_tpl/juju2/bundlek8_tpl/flannel.yaml index 561e0880..79b89c7f 100644 --- a/ci/config_tpl/juju2/bundlek8_tpl/flannel.yaml +++ b/ci/config_tpl/juju2/bundlek8_tpl/flannel.yaml @@ -3,5 +3,5 @@ charm: cs:~containers/flannel {% if os.service.bindings %} bindings: - "": internal-api -{% endif %}
\ No newline at end of file + "": *oam-space +{% endif %} diff --git a/ci/config_tpl/juju2/bundlek8_tpl/kubernetes.yaml b/ci/config_tpl/juju2/bundlek8_tpl/kubernetes.yaml index 933c1ef7..645ab157 100644 --- a/ci/config_tpl/juju2/bundlek8_tpl/kubernetes.yaml +++ b/ci/config_tpl/juju2/bundlek8_tpl/kubernetes.yaml @@ -4,7 +4,7 @@ expose: true {% if os.service.bindings %} bindings: - "": internal-api + "": *oam-space {% endif %} {% if k8.network.controller == 'ovn' %} options: @@ -23,7 +23,7 @@ expose: true {% if os.service.bindings %} bindings: - "": internal-api + "": *oam-space {% endif %} {% if k8.network.controller == 'ovn' %} options: diff --git a/ci/config_tpl/juju2/bundlek8_tpl/ovn.yaml b/ci/config_tpl/juju2/bundlek8_tpl/ovn.yaml index 978c07c9..98def5ae 100644 --- a/ci/config_tpl/juju2/bundlek8_tpl/ovn.yaml +++ b/ci/config_tpl/juju2/bundlek8_tpl/ovn.yaml @@ -3,7 +3,8 @@ charm: "cs:~aakashkt/ovn-15" {% if os.service.bindings %} bindings: - "": internal-api + "": *oam-space {% endif %} options: - gateway-physical-interface: "none"
\ No newline at end of file + gateway-physical-interface: "none" + diff --git a/ci/config_tpl/juju2/bundlek8_tpl/spaces.yaml b/ci/config_tpl/juju2/bundlek8_tpl/spaces.yaml new file mode 100644 index 00000000..17dbd7da --- /dev/null +++ b/ci/config_tpl/juju2/bundlek8_tpl/spaces.yaml @@ -0,0 +1,5 @@ + + + # OAM - Operations, Administration and Maintenance + oam-space: &oam-space internal-api + diff --git a/ci/deploy.sh b/ci/deploy.sh index f9c1b2d0..b013b33c 100755 --- a/ci/deploy.sh +++ b/ci/deploy.sh @@ -228,29 +228,32 @@ echo_info "Configuring public access" # translate bundle.yaml to json python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < bundles.yaml > bundles.json -# get services list having a public interface -srv_list=$(cat bundles.json | jq -r ".services | to_entries[] | {\"key\": .key, \"value\": .value[\"bindings\"]} | select (.value!=null) | select(.value[] | contains(\"public-api\"))".key) -# get cnt list from service list -cnt_list=$(for cnt in $srv_list; do juju status $cnt --format=json | jq -r ".machines[].containers | to_entries[]".key; done) -# get public network gateway (supposing it is the first ip of the network) -public_api_gw=$(cat labconfig.json | jq --raw-output ".opnfv.spaces[] | select(.type==\"public\")".gateway) -admin_gw=$(cat labconfig.json | jq --raw-output ".opnfv.spaces[] | select(.type==\"admin\")".gateway) - -if ([ $admin_gw ] && [ $admin_gw != "null" ]); then - # set default gateway to public api gateway - for cnt in $cnt_list; do - echo_info "Changing default gateway on $cnt" - if ([ $public_api_gw ] && [ $public_api_gw != "null" ]); then - juju ssh $cnt "sudo ip r d default && sudo ip r a default via $public_api_gw"; - juju ssh $cnt "gw_dev=\$(ip r l | grep 'via $public_api_gw' | cut -d \ -f5) &&\ + +public_service() { + # get services list having a public interface + srv_list=$(cat bundles.json | jq -r ".services | to_entries[] | {\"key\": .key, \"value\": .value[\"bindings\"]} | select (.value!=null) | select(.value[] | contains(\"public-api\"))".key) + # get cnt list from service list + cnt_list=$(for cnt in $srv_list; do juju status $cnt --format=json | jq -r ".machines[].containers | to_entries[]".key; done) + # get public network gateway (supposing it is the first ip of the network) + public_api_gw=$(cat labconfig.json | jq --raw-output ".opnfv.spaces[] | select(.type==\"public\")".gateway) + admin_gw=$(cat labconfig.json | jq --raw-output ".opnfv.spaces[] | select(.type==\"admin\")".gateway) + + if ([ $admin_gw ] && [ $admin_gw != "null" ]); then + # set default gateway to public api gateway + for cnt in $cnt_list; do + echo_info "Changing default gateway on $cnt" + if ([ $public_api_gw ] && [ $public_api_gw != "null" ]); then + juju ssh $cnt "sudo ip r d default && sudo ip r a default via $public_api_gw"; + juju ssh $cnt "gw_dev=\$(ip r l | grep 'via $public_api_gw' | cut -d \ -f5) &&\ sudo cp /etc/network/interfaces /etc/network/interfaces.bak &&\ echo 'removing old default gateway' &&\ sudo perl -i -pe 's/^\ *gateway $admin_gw\n$//' /etc/network/interfaces &&\ sudo perl -i -pe \"s/iface \$gw_dev inet static/iface \$gw_dev inet static\\n gateway $public_api_gw/\" /etc/network/interfaces \ "; - fi - done -fi + fi + done + fi +} # Configuring deployment if ([ $opnfvmodel == "openstack" ]); then |