diff options
18 files changed, 778 insertions, 44 deletions
diff --git a/jjb/releng/opnfv-docker.sh b/jjb/releng/opnfv-docker.sh index 9bd711bc6..5d73a9d70 100644 --- a/jjb/releng/opnfv-docker.sh +++ b/jjb/releng/opnfv-docker.sh @@ -17,14 +17,16 @@ echo "Starting opnfv-docker for $DOCKER_REPO_NAME ..." echo "--------------------------------------------------------" echo - -if [[ -n $(ps -ef|grep 'docker build'|grep -v grep) ]]; then - echo "There is already another build process in progress:" - echo $(ps -ef|grep 'docker build'|grep -v grep) - # Abort this job since it will collide and might mess up the current one. - echo "Aborting..." - exit 1 -fi +count=30 # docker build jobs might take up to ~30 min +while [[ -n `ps -ef|grep 'docker build'|grep -v grep` ]]; do + echo "Build in progress. Waiting..." + sleep 60 + count=$(( $count - 1 )) + if [ $count -eq 0 ]; then + echo "Timeout. Aborting..." + exit 1 + fi +done # Remove previous running containers if exist if [[ -n "$(docker ps -a | grep $DOCKER_REPO_NAME)" ]]; then diff --git a/prototypes/xci/file/ha/configure-targethosts.yml b/prototypes/xci/file/ha/configure-targethosts.yml new file mode 100644 index 000000000..6dc147f3b --- /dev/null +++ b/prototypes/xci/file/ha/configure-targethosts.yml @@ -0,0 +1,36 @@ +--- +- hosts: all + remote_user: root + tasks: + - name: add public key to host + copy: + src: ../file/authorized_keys + dest: /root/.ssh/authorized_keys + - name: configure modules + copy: + src: ../file/modules + dest: /etc/modules + +- hosts: controller + remote_user: root + vars_files: + - ../var/{{ ansible_os_family }}.yml + - ../var/flavor-vars.yml + roles: + # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros + - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" } + +- hosts: compute + remote_user: root + vars_files: + - ../var/{{ ansible_os_family }}.yml + - ../var/flavor-vars.yml + roles: + # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros + - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" } + +- hosts: compute01 + remote_user: root + # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros + roles: + - role: configure-nfs diff --git a/prototypes/xci/file/ha/flavor-vars.yml b/prototypes/xci/file/ha/flavor-vars.yml index e69de29bb..3cd1d6246 100644 --- a/prototypes/xci/file/ha/flavor-vars.yml +++ b/prototypes/xci/file/ha/flavor-vars.yml @@ -0,0 +1,37 @@ +--- +host_info: { + 'opnfv': { + 'MGMT_IP': '172.29.236.10', + 'VLAN_IP': '192.168.122.2', + 'STORAGE_IP': '172.29.244.10' + }, + 'controller00': { + 'MGMT_IP': '172.29.236.11', + 'VLAN_IP': '192.168.122.3', + 'STORAGE_IP': '172.29.244.11' + }, + 'controller01': { + 'MGMT_IP': '172.29.236.12', + 'VLAN_IP': '192.168.122.4', + 'STORAGE_IP': '172.29.244.12' + }, + 'controller02': { + 'MGMT_IP': '172.29.236.13', + 'VLAN_IP': '192.168.122.5', + 'STORAGE_IP': '172.29.244.13' + }, + 'compute00': { + 'MGMT_IP': '172.29.236.14', + 'VLAN_IP': '192.168.122.6', + 'STORAGE_IP': '172.29.244.14', + 'VLAN_IP_SECOND': '173.29.241.1', + 'VXLAN_IP': '172.29.240.14' + }, + 'compute01': { + 'MGMT_IP': '172.29.236.15', + 'VLAN_IP': '192.168.122.7', + 'STORAGE_IP': '172.29.244.15', + 'VLAN_IP_SECOND': '173.29.241.2', + 'VXLAN_IP': '172.29.240.15' + } +} diff --git a/prototypes/xci/file/ha/inventory b/prototypes/xci/file/ha/inventory index e69de29bb..94b1d074d 100644 --- a/prototypes/xci/file/ha/inventory +++ b/prototypes/xci/file/ha/inventory @@ -0,0 +1,11 @@ +[opnfv] +opnfv ansible_ssh_host=192.168.122.2 + +[controller] +controller00 ansible_ssh_host=192.168.122.3 +controller01 ansible_ssh_host=192.168.122.4 +controller02 ansible_ssh_host=192.168.122.5 + +[compute] +compute00 ansible_ssh_host=192.168.122.6 +compute01 ansible_ssh_host=192.168.122.7 diff --git a/prototypes/xci/file/ha/openstack_user_config.yml b/prototypes/xci/file/ha/openstack_user_config.yml index e69de29bb..43e88c0d0 100644 --- a/prototypes/xci/file/ha/openstack_user_config.yml +++ b/prototypes/xci/file/ha/openstack_user_config.yml @@ -0,0 +1,278 @@ +--- +cidr_networks: + container: 172.29.236.0/22 + tunnel: 172.29.240.0/22 + storage: 172.29.244.0/22 + +used_ips: + - "172.29.236.1,172.29.236.50" + - "172.29.240.1,172.29.240.50" + - "172.29.244.1,172.29.244.50" + - "172.29.248.1,172.29.248.50" + +global_overrides: + internal_lb_vip_address: 172.29.236.222 + external_lb_vip_address: 192.168.122.220 + tunnel_bridge: "br-vxlan" + management_bridge: "br-mgmt" + provider_networks: + - network: + container_bridge: "br-mgmt" + container_type: "veth" + container_interface: "eth1" + ip_from_q: "container" + type: "raw" + group_binds: + - all_containers + - hosts + is_container_address: true + is_ssh_address: true + - network: + container_bridge: "br-vxlan" + container_type: "veth" + container_interface: "eth10" + ip_from_q: "tunnel" + type: "vxlan" + range: "1:1000" + net_name: "vxlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth12" + host_bind_override: "eth12" + type: "flat" + net_name: "flat" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth11" + type: "vlan" + range: "1:1" + net_name: "vlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-storage" + container_type: "veth" + container_interface: "eth2" + ip_from_q: "storage" + type: "raw" + group_binds: + - glance_api + - cinder_api + - cinder_volume + - nova_compute + +# ## +# ## Infrastructure +# ## + +# galera, memcache, rabbitmq, utility +shared-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# repository (apt cache, python packages, etc) +repo-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# load balancer +# Ideally the load balancer should not use the Infrastructure hosts. +# Dedicated hardware is best for improved performance and security. +haproxy_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# rsyslog server +# log_hosts: +# log1: +# ip: 172.29.236.14 + +# ## +# ## OpenStack +# ## + +# keystone +identity_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# cinder api services +storage-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# glance +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +image_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.15" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + controller01: + ip: 172.29.236.12 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.15" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + controller02: + ip: 172.29.236.13 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.15" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + +# nova api, conductor, etc services +compute-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# heat +orchestration_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# horizon +dashboard_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# neutron server, agents (L3, etc) +network_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# ceilometer (telemetry API) +metering-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# aodh (telemetry alarm service) +metering-alarm_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# gnocchi (telemetry metrics storage) +metrics_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# nova hypervisors +compute_hosts: + compute00: + ip: 172.29.236.14 + compute01: + ip: 172.29.236.15 + +# ceilometer compute agent (telemetry) +metering-compute_hosts: + compute00: + ip: 172.29.236.14 + compute01: + ip: 172.29.236.15 +# cinder volume hosts (NFS-backed) +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +storage_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + lvm: + volume_group: cinder-volumes + volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver + volume_backend_name: LVM_iSCSI + iscsi_ip_address: "172.29.244.11" + controller01: + ip: 172.29.236.12 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + lvm: + volume_group: cinder-volumes + volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver + volume_backend_name: LVM_iSCSI + iscsi_ip_address: "172.29.244.12" + controller02: + ip: 172.29.236.13 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + lvm: + volume_group: cinder-volumes + volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver + volume_backend_name: LVM_iSCSI + iscsi_ip_address: "172.29.244.13" diff --git a/prototypes/xci/file/ha/user_variables.yml b/prototypes/xci/file/ha/user_variables.yml new file mode 100644 index 000000000..65cbcc11b --- /dev/null +++ b/prototypes/xci/file/ha/user_variables.yml @@ -0,0 +1,27 @@ +--- +# Copyright 2014, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ## +# ## This file contains commonly used overrides for convenience. Please inspect +# ## the defaults for each role to find additional override options. +# ## + +# # Debug and Verbose options. +debug: false + +haproxy_keepalived_external_vip_cidr: "192.168.122.220/32" +haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32" +haproxy_keepalived_external_interface: br-vlan +haproxy_keepalived_internal_interface: br-mgmt diff --git a/prototypes/xci/file/noha/configure-targethosts.yml b/prototypes/xci/file/noha/configure-targethosts.yml new file mode 100644 index 000000000..6dc147f3b --- /dev/null +++ b/prototypes/xci/file/noha/configure-targethosts.yml @@ -0,0 +1,36 @@ +--- +- hosts: all + remote_user: root + tasks: + - name: add public key to host + copy: + src: ../file/authorized_keys + dest: /root/.ssh/authorized_keys + - name: configure modules + copy: + src: ../file/modules + dest: /etc/modules + +- hosts: controller + remote_user: root + vars_files: + - ../var/{{ ansible_os_family }}.yml + - ../var/flavor-vars.yml + roles: + # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros + - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" } + +- hosts: compute + remote_user: root + vars_files: + - ../var/{{ ansible_os_family }}.yml + - ../var/flavor-vars.yml + roles: + # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros + - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" } + +- hosts: compute01 + remote_user: root + # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros + roles: + - role: configure-nfs diff --git a/prototypes/xci/file/noha/flavor-vars.yml b/prototypes/xci/file/noha/flavor-vars.yml index e69de29bb..7f52d343a 100644 --- a/prototypes/xci/file/noha/flavor-vars.yml +++ b/prototypes/xci/file/noha/flavor-vars.yml @@ -0,0 +1,27 @@ +--- +host_info: { + 'opnfv': { + 'MGMT_IP': '172.29.236.10', + 'VLAN_IP': '192.168.122.2', + 'STORAGE_IP': '172.29.244.10' + }, + 'controller00': { + 'MGMT_IP': '172.29.236.11', + 'VLAN_IP': '192.168.122.3', + 'STORAGE_IP': '172.29.244.11' + }, + 'compute00': { + 'MGMT_IP': '172.29.236.12', + 'VLAN_IP': '192.168.122.4', + 'VLAN_IP_SECOND': '173.29.241.1', + 'VXLAN_IP': '172.29.240.12', + 'STORAGE_IP': '172.29.244.12' + }, + 'compute01': { + 'MGMT_IP': '172.29.236.13', + 'VLAN_IP': '192.168.122.5', + 'VLAN_IP_SECOND': '173.29.241.2', + 'VXLAN_IP': '172.29.240.13', + 'STORAGE_IP': '172.29.244.13' + } +} diff --git a/prototypes/xci/file/noha/inventory b/prototypes/xci/file/noha/inventory index e69de29bb..b4f9f6d0c 100644 --- a/prototypes/xci/file/noha/inventory +++ b/prototypes/xci/file/noha/inventory @@ -0,0 +1,9 @@ +[opnfv] +opnfv ansible_ssh_host=192.168.122.2 + +[controller] +controller00 ansible_ssh_host=192.168.122.3 + +[compute] +compute00 ansible_ssh_host=192.168.122.4 +compute01 ansible_ssh_host=192.168.122.5 diff --git a/prototypes/xci/file/noha/openstack_user_config.yml b/prototypes/xci/file/noha/openstack_user_config.yml index e69de29bb..999741580 100644 --- a/prototypes/xci/file/noha/openstack_user_config.yml +++ b/prototypes/xci/file/noha/openstack_user_config.yml @@ -0,0 +1,190 @@ +--- +cidr_networks: + container: 172.29.236.0/22 + tunnel: 172.29.240.0/22 + storage: 172.29.244.0/22 + +used_ips: + - "172.29.236.1,172.29.236.50" + - "172.29.240.1,172.29.240.50" + - "172.29.244.1,172.29.244.50" + - "172.29.248.1,172.29.248.50" + +global_overrides: + internal_lb_vip_address: 172.29.236.11 + external_lb_vip_address: 192.168.122.3 + tunnel_bridge: "br-vxlan" + management_bridge: "br-mgmt" + provider_networks: + - network: + container_bridge: "br-mgmt" + container_type: "veth" + container_interface: "eth1" + ip_from_q: "container" + type: "raw" + group_binds: + - all_containers + - hosts + is_container_address: true + is_ssh_address: true + - network: + container_bridge: "br-vxlan" + container_type: "veth" + container_interface: "eth10" + ip_from_q: "tunnel" + type: "vxlan" + range: "1:1000" + net_name: "vxlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth12" + host_bind_override: "eth12" + type: "flat" + net_name: "flat" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth11" + type: "vlan" + range: "1:1" + net_name: "vlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-storage" + container_type: "veth" + container_interface: "eth2" + ip_from_q: "storage" + type: "raw" + group_binds: + - glance_api + - cinder_api + - cinder_volume + - nova_compute + +# ## +# ## Infrastructure +# ## + +# galera, memcache, rabbitmq, utility +shared-infra_hosts: + controller00: + ip: 172.29.236.11 + +# repository (apt cache, python packages, etc) +repo-infra_hosts: + controller00: + ip: 172.29.236.11 + +# load balancer +# Ideally the load balancer should not use the Infrastructure hosts. +# Dedicated hardware is best for improved performance and security. +haproxy_hosts: + controller00: + ip: 172.29.236.11 + +# rsyslog server +# log_hosts: +# log1: +# ip: 172.29.236.14 + +# ## +# ## OpenStack +# ## + +# keystone +identity_hosts: + controller00: + ip: 172.29.236.11 + +# cinder api services +storage-infra_hosts: + controller00: + ip: 172.29.236.11 + +# glance +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +image_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.13" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + +# nova api, conductor, etc services +compute-infra_hosts: + controller00: + ip: 172.29.236.11 + +# heat +orchestration_hosts: + controller00: + ip: 172.29.236.11 + +# horizon +dashboard_hosts: + controller00: + ip: 172.29.236.11 + +# neutron server, agents (L3, etc) +network_hosts: + controller00: + ip: 172.29.236.11 + +# ceilometer (telemetry API) +metering-infra_hosts: + controller00: + ip: 172.29.236.11 + +# aodh (telemetry alarm service) +metering-alarm_hosts: + controller00: + ip: 172.29.236.11 + +# gnocchi (telemetry metrics storage) +metrics_hosts: + controller00: + ip: 172.29.236.11 + +# nova hypervisors +compute_hosts: + compute00: + ip: 172.29.236.12 + compute01: + ip: 172.29.236.13 + +# ceilometer compute agent (telemetry) +metering-compute_hosts: + compute00: + ip: 172.29.236.12 + compute01: + ip: 172.29.236.13 +# cinder volume hosts (NFS-backed) +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +storage_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + lvm: + volume_group: cinder-volumes + volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver + volume_backend_name: LVM_iSCSI + iscsi_ip_address: "172.29.244.11" diff --git a/prototypes/xci/file/noha/user_variables.yml b/prototypes/xci/file/noha/user_variables.yml new file mode 100644 index 000000000..e4a63a257 --- /dev/null +++ b/prototypes/xci/file/noha/user_variables.yml @@ -0,0 +1,27 @@ +--- +# Copyright 2014, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ## +# ## This file contains commonly used overrides for convenience. Please inspect +# ## the defaults for each role to find additional override options. +# ## + +# # Debug and Verbose options. +debug: false + +haproxy_keepalived_external_vip_cidr: "192.168.122.3/32" +haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32" +haproxy_keepalived_external_interface: br-vlan +haproxy_keepalived_internal_interface: br-mgmt diff --git a/prototypes/xci/file/user_variables.yml b/prototypes/xci/file/user_variables.yml deleted file mode 100644 index e69de29bb..000000000 --- a/prototypes/xci/file/user_variables.yml +++ /dev/null diff --git a/prototypes/xci/playbooks/configure-opnfvhost.yml b/prototypes/xci/playbooks/configure-opnfvhost.yml index abebd1d7f..6689c8dc7 100644 --- a/prototypes/xci/playbooks/configure-opnfvhost.yml +++ b/prototypes/xci/playbooks/configure-opnfvhost.yml @@ -17,6 +17,8 @@ - role: remove-folders - { role: clone-repository, project: "opnfv/releng", repo: "{{ OPNFV_RELENG_GIT_URL }}", dest: "{{ OPNFV_RELENG_PATH }}", version: "{{ OPNFV_RELENG_VERSION }}" } - { role: clone-repository, project: "openstack/openstack-ansible", repo: "{{ OPENSTACK_OSA_GIT_URL }}", dest: "{{ OPENSTACK_OSA_PATH }}", version: "{{ OPENSTACK_OSA_VERSION }}" } + # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros + - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/opnfv.interface.j2", dest: "/etc/network/interfaces" } tasks: - name: generate SSH keys shell: ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N "" @@ -48,9 +50,6 @@ shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/setup-openstack.yml {{OPENSTACK_OSA_PATH}}/playbooks" - name: copy OPNFV role requirements shell: "/bin/cp -rf {{OPNFV_RELENG_PATH}}/prototypes/xci/file/ansible-role-requirements.yml {{OPENSTACK_OSA_PATH}}" - roles: - # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros - - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/opnfv.interface.j2", dest: "/etc/network/interfaces" } - hosts: localhost remote_user: root tasks: diff --git a/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml b/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml new file mode 100644 index 000000000..b188f4dbb --- /dev/null +++ b/prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml @@ -0,0 +1,36 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# TODO: this is for xenial and needs to be adjusted for different distros +- block: + - name: make NFS dir + file: + dest: /images + mode: 777 + state: directory + - name: configure NFS service + lineinfile: + dest: /etc/services + state: present + create: yes + line: "{{ item }}" + with_items: + - "nfs 2049/tcp" + - "nfs 2049/udp" + - name: configure NFS exports on ubuntu xenial + copy: + src: ../file/exports + dest: /etc/exports + when: ansible_distribution_release == "xenial" + # TODO: the service name might be different on other distros and needs to be adjusted + - name: restart ubuntu xenial NFS service + service: + name: nfs-kernel-server + state: restarted + when: ansible_distribution_release == "xenial" diff --git a/utils/test/testapi/opnfv_testapi/common/raises.py b/utils/test/testapi/opnfv_testapi/common/raises.py new file mode 100644 index 000000000..ed3a84ee0 --- /dev/null +++ b/utils/test/testapi/opnfv_testapi/common/raises.py @@ -0,0 +1,31 @@ +import httplib + +from tornado import web + + +class Raiser(object): + code = httplib.OK + + def __init__(self, reason): + raise web.HTTPError(self.code, reason) + + +class BadRequest(Raiser): + code = httplib.BAD_REQUEST + + +class Forbidden(Raiser): + code = httplib.FORBIDDEN + + +class NotFound(Raiser): + code = httplib.NOT_FOUND + + +class Unauthorized(Raiser): + code = httplib.UNAUTHORIZED + + +class CodeTBD(object): + def __init__(self, code, reason): + raise web.HTTPError(code, reason) diff --git a/utils/test/testapi/opnfv_testapi/resources/handlers.py b/utils/test/testapi/opnfv_testapi/resources/handlers.py index bf8a92b54..c2b1a6476 100644 --- a/utils/test/testapi/opnfv_testapi/resources/handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/handlers.py @@ -22,13 +22,13 @@ from datetime import datetime import functools -import httplib import json from tornado import gen from tornado import web import models +from opnfv_testapi.common import raises from opnfv_testapi.tornado_swagger import swagger DEFAULT_REPRESENTATION = "application/json" @@ -56,9 +56,7 @@ class GenericApiHandler(web.RequestHandler): try: self.json_args = json.loads(self.request.body) except (ValueError, KeyError, TypeError) as error: - raise web.HTTPError(httplib.BAD_REQUEST, - "Bad Json format [{}]". - format(error)) + raises.BadRequest("Bad Json format [{}]".format(error)) def finish_request(self, json_object=None): if json_object: @@ -83,13 +81,11 @@ class GenericApiHandler(web.RequestHandler): try: token = self.request.headers['X-Auth-Token'] except KeyError: - raise web.HTTPError(httplib.UNAUTHORIZED, - "No Authentication Header.") + raises.Unauthorized("No Authentication Header.") query = {'access_token': token} check = yield self._eval_db_find_one(query, 'tokens') if not check: - raise web.HTTPError(httplib.FORBIDDEN, - "Invalid Token.") + raises.Forbidden("Invalid Token.") ret = yield gen.coroutine(method)(self, *args, **kwargs) raise gen.Return(ret) return wrapper @@ -101,14 +97,13 @@ class GenericApiHandler(web.RequestHandler): :param db_checks: [(table, exist, query, error)] """ if self.json_args is None: - raise web.HTTPError(httplib.BAD_REQUEST, "no body") + raises.BadRequest('no body') data = self.table_cls.from_dict(self.json_args) for miss in miss_checks: miss_data = data.__getattribute__(miss) if miss_data is None or miss_data == '': - raise web.HTTPError(httplib.BAD_REQUEST, - '{} missing'.format(miss)) + raises.BadRequest('{} missing'.format(miss)) for k, v in kwargs.iteritems(): data.__setattr__(k, v) @@ -117,7 +112,7 @@ class GenericApiHandler(web.RequestHandler): check = yield self._eval_db_find_one(query(data), table) if (exist and not check) or (not exist and check): code, message = error(data) - raise web.HTTPError(code, message) + raises.CodeTBD(code, message) if self.table != 'results': data.creation_date = datetime.now() @@ -153,18 +148,16 @@ class GenericApiHandler(web.RequestHandler): def _get_one(self, query): data = yield self._eval_db_find_one(query) if data is None: - raise web.HTTPError(httplib.NOT_FOUND, - "[{}] not exist in table [{}]" - .format(query, self.table)) + raises.NotFound("[{}] not exist in table [{}]" + .format(query, self.table)) self.finish_request(self.format_data(data)) @authenticate def _delete(self, query): data = yield self._eval_db_find_one(query) if data is None: - raise web.HTTPError(httplib.NOT_FOUND, - "[{}] not exit in table [{}]" - .format(query, self.table)) + raises.NotFound("[{}] not exit in table [{}]" + .format(query, self.table)) yield self._eval_db(self.table, 'remove', query) self.finish_request() @@ -172,14 +165,13 @@ class GenericApiHandler(web.RequestHandler): @authenticate def _update(self, query, db_keys): if self.json_args is None: - raise web.HTTPError(httplib.BAD_REQUEST, "No payload") + raises.BadRequest("No payload") # check old data exist from_data = yield self._eval_db_find_one(query) if from_data is None: - raise web.HTTPError(httplib.NOT_FOUND, - "{} could not be found in table [{}]" - .format(query, self.table)) + raises.NotFound("{} could not be found in table [{}]" + .format(query, self.table)) data = self.table_cls.from_dict(from_data) # check new data exist @@ -187,9 +179,8 @@ class GenericApiHandler(web.RequestHandler): if not equal: to_data = yield self._eval_db_find_one(new_query) if to_data is not None: - raise web.HTTPError(httplib.FORBIDDEN, - "{} already exists in table [{}]" - .format(new_query, self.table)) + raises.Forbidden("{} already exists in table [{}]" + .format(new_query, self.table)) # we merge the whole document """ edit_request = self._update_requests(data) @@ -206,7 +197,7 @@ class GenericApiHandler(web.RequestHandler): request = self._update_request(request, k, v, data.__getattribute__(k)) if not request: - raise web.HTTPError(httplib.FORBIDDEN, "Nothing to update") + raises.Forbidden("Nothing to update") edit_request = data.format() edit_request.update(request) diff --git a/utils/test/testapi/opnfv_testapi/resources/result_handlers.py b/utils/test/testapi/opnfv_testapi/resources/result_handlers.py index 44b9f8c07..3e78057ce 100644 --- a/utils/test/testapi/opnfv_testapi/resources/result_handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/result_handlers.py @@ -11,8 +11,8 @@ from datetime import timedelta import httplib from bson import objectid -from tornado import web +from opnfv_testapi.common import raises from opnfv_testapi.resources import handlers from opnfv_testapi.resources import result_models from opnfv_testapi.tornado_swagger import swagger @@ -30,8 +30,7 @@ class GenericResultHandler(handlers.GenericApiHandler): try: value = int(value) except: - raise web.HTTPError(httplib.BAD_REQUEST, - '{} must be int'.format(key)) + raises.BadRequest('{} must be int'.format(key)) return value def set_query(self): diff --git a/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py b/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py index a2856dbd7..9d0233c77 100644 --- a/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py @@ -1,8 +1,7 @@ import functools import httplib -from tornado import web - +from opnfv_testapi.common import raises from opnfv_testapi.resources import handlers import opnfv_testapi.resources.scenario_models as models from opnfv_testapi.tornado_swagger import swagger @@ -185,8 +184,7 @@ class ScenarioGURHandler(GenericScenarioHandler): def _update_requests_rename(self, data): data.name = self._term.get('name') if not data.name: - raise web.HTTPError(httplib.BAD_REQUEST, - "new scenario name is not provided") + raises.BadRequest("new scenario name is not provided") def _update_requests_add_installer(self, data): data.installers.append(models.ScenarioInstaller.from_dict(self._term)) |