diff options
Diffstat (limited to 'deploy/adapters/ansible/roles')
22 files changed, 171 insertions, 158 deletions
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh b/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh index 3b4e687d..13e5fd8a 100644 --- a/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh +++ b/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh @@ -9,7 +9,7 @@ if [ ! -d "/ceph/images" ]; then mkdir -p /ceph/images fi -rm /ceph/images/ceph-volumes.img +rm -f /ceph/images/ceph-volumes.img if [ ! -f "/ceph/images/ceph-volumes.img" ]; then echo "create ceph-volumes.img" @@ -18,7 +18,6 @@ sgdisk -g --clear /ceph/images/ceph-volumes.img fi #safe check -ps -ef |grep create_osd.sh |awk '{print $2}' |xargs kill -9 ps -ef |grep lvremove |awk '{print $2}' |xargs kill -9 ps -ef |grep vgremove |awk '{print $2}' |xargs kill -9 ps -ef |grep vgcreate |awk '{print $2}' |xargs kill -9 diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml index 34ce6fdc..b437d4a6 100644 --- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml +++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml @@ -1,21 +1,21 @@ --- - name: modify glance-api.conf for ceph - shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(default_store\).*/\1 = rbd/' /etc/glance/glance-api.conf && sed -i '/^\[glance_store/a stores = rbd\nrbd_store_pool = images\nrbd_store_user = glance\nrbd_store_ceph_conf = /etc/ceph/ceph.conf\nrbd_store_chunk_size = 8' /etc/glance/glance-api.conf && sudo glance-control api restart" + shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(default_store\).*/\1 = rbd/' /etc/glance/glance-api.conf && sed -i '/^\[glance_store/a stores = rbd\nrbd_store_pool = images\nrbd_store_user = glance\nrbd_store_ceph_conf = /etc/ceph/ceph.conf\nrbd_store_chunk_size = 8\nshow_image_direct_url=True' /etc/glance/glance-api.conf && sudo glance-control api restart" with_items: - "{{ groups['controller'] }}" tags: - ceph_conf_glance - name: modify cinder.conf for ceph - shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(volume_driver\).*/\1 = cinder.volume.drivers.rbd.RBDDriver/' /etc/cinder/cinder.conf && sed -i '/^\[DEFAULT/a rbd_pool = volumes\nrbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_flatten_volume_from_snapshot = false\nrbd_max_clone_depth = 5\nrbd_store_chunk_size = 4\nrados_connect_timeout = -1\nglance_api_version = 2\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}' /etc/cinder/cinder.conf && sudo service cinder-volume restart" + shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(volume_driver\).*/\1 = cinder.volume.drivers.rbd.RBDDriver/' /etc/cinder/cinder.conf && sed -i '/^\[DEFAULT/a rbd_pool = volumes\nrbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_flatten_volume_from_snapshot = false\nrbd_max_clone_depth = 5\nrbd_store_chunk_size = 4\nrados_connect_timeout = -1\nglance_api_version = 2\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}' /etc/cinder/cinder.conf && sudo service {{ cinder_service }} restart" with_items: - "{{ groups['compute'] }}" tags: - ceph_conf_cinder - name: modify nova.conf for ceph - shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(images_type\).*/\1 = rbd/' /etc/nova/nova-compute.conf && sed -i '/^\[libvirt/a images_rbd_pool = vms\nimages_rbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}\ndisk_cachemodes=\"network=writeback\"\nlive_migration_flag=\"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"' /etc/nova/nova-compute.conf && sudo service nova-compute restart" + shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(images_type\).*/\1 = rbd/' /etc/nova/nova-compute.conf && sed -i '/^\[libvirt/a images_rbd_pool = vms\nimages_rbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}\ndisk_cachemodes=\"network=writeback\"\nlive_migration_flag=\"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"' /etc/nova/nova-compute.conf && sudo service {{ nova_service }} restart" with_items: - "{{ groups['compute'] }}" tags: diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_pre.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_pre.yml index 41c24709..a2ff030b 100644 --- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_pre.yml +++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_pre.yml @@ -1,7 +1,7 @@ --- - name: create pool - shell: ceph osd pool create {{ item }} 128 + shell: ceph osd pool create {{ item }} 80 with_items: - volumes - images @@ -54,6 +54,6 @@ - "{{ groups['compute'] }}" tags: - ceph_copy_secret - + ignore_errors: True diff --git a/deploy/adapters/ansible/roles/ceph-deploy/vars/Debian.yml b/deploy/adapters/ansible/roles/ceph-deploy/vars/Debian.yml index 5ed6cc10..04eda092 100644 --- a/deploy/adapters/ansible/roles/ceph-deploy/vars/Debian.yml +++ b/deploy/adapters/ansible/roles/ceph-deploy/vars/Debian.yml @@ -16,3 +16,6 @@ packages: - gdisk services: [] + +cinder_service: cinder-volume +nova_service: nova-compute diff --git a/deploy/adapters/ansible/roles/ceph-deploy/vars/RedHat.yml b/deploy/adapters/ansible/roles/ceph-deploy/vars/RedHat.yml index fd607d38..14e8428e 100644 --- a/deploy/adapters/ansible/roles/ceph-deploy/vars/RedHat.yml +++ b/deploy/adapters/ansible/roles/ceph-deploy/vars/RedHat.yml @@ -6,3 +6,6 @@ packages: - ceph services: [] + +cinder_service: openstack-cinder-volume +nova_service: openstack-nova-compute diff --git a/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_config.yml b/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_config.yml index 77029b81..5bce443c 100644 --- a/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_config.yml +++ b/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_config.yml @@ -3,22 +3,8 @@ template: src=cinder.conf dest=/etc/cinder/cinder.conf - name: sync cinder db - shell: su -s /bin/sh -c "cinder-manage db sync" cinder && cinder - register: result - run_once: True - until: result.rc == 0 - retries: 5 - delay: 3 + cinder_manage: action=dbsync notify: - restart cinder control serveice - meta: flush_handlers - -- name: upload cinder keystone register script - template: src=cinder_init.sh dest=/opt/cinder_init.sh mode=0744 - -- name: run cinder register script - shell: for i in {0..5}; do /opt/cinder_init.sh && touch cinder_init_complete; if [ $? != 0 ]; then sleep 5; else break; fi; done - run_once: True - args: - creates: cinder_init_complete diff --git a/deploy/adapters/ansible/roles/cinder-controller/templates/cinder_init.sh b/deploy/adapters/ansible/roles/cinder-controller/templates/cinder_init.sh deleted file mode 100644 index bc92bac0..00000000 --- a/deploy/adapters/ansible/roles/cinder-controller/templates/cinder_init.sh +++ /dev/null @@ -1,6 +0,0 @@ -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name=cinder --pass={{ CINDER_PASS }} --email=cinder@example.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-role-add --user=cinder --tenant=service --role=admin - -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=cinder --type=volume --description="OpenStack Block Storage" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ volume / {print $2}') --publicurl=http://{{ public_vip.ip }}:8776/v1/%\(tenant_id\)s --internalurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s --adminurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s - diff --git a/deploy/adapters/ansible/roles/database/tasks/mariadb.yml b/deploy/adapters/ansible/roles/database/tasks/mariadb.yml index 37c9a2c9..9a9ebda5 100644 --- a/deploy/adapters/ansible/roles/database/tasks/mariadb.yml +++ b/deploy/adapters/ansible/roles/database/tasks/mariadb.yml @@ -28,9 +28,7 @@ register: mysql_init_complete - name: restart first mysql server - shell: service mysql restart --wsrep-cluster-address="gcomm://" - args: - creates: /opt/mysql_init_complete + shell: service mysql restart --wsrep-cluster-address="gcomm://" && touch /opt/mysql_init_complete when: inventory_hostname == haproxy_hosts.keys()[0] and mysql_init_complete.stat.exists == False tags: - mysql_restart @@ -40,9 +38,7 @@ #delay: 5 - name: restart other mysql server - shell: service mysql restart - args: - creates: /opt/mysql_init_complete + shell: service mysql restart && touch /opt/mysql_init_complete tags: - mysql_restart when: inventory_hostname != haproxy_hosts.keys()[0] and mysql_init_complete.stat.exists == False diff --git a/deploy/adapters/ansible/roles/database/tasks/mongodb.yml b/deploy/adapters/ansible/roles/database/tasks/mongodb.yml index a0edcce8..ca61e905 100644 --- a/deploy/adapters/ansible/roles/database/tasks/mongodb.yml +++ b/deploy/adapters/ansible/roles/database/tasks/mongodb.yml @@ -19,7 +19,7 @@ - name: manually restart mongodb server service: name=mongodb state=restarted -- wait_for: port=27017 delay=3 timeout=10 host={{ internal_vip.ip }} +- wait_for: port=27017 delay=3 timeout=30 host={{ internal_vip.ip }} - name: create mongodb user run_once: True diff --git a/deploy/adapters/ansible/roles/glance/tasks/glance_config.yml b/deploy/adapters/ansible/roles/glance/tasks/glance_config.yml index e2390d0e..548c2309 100644 --- a/deploy/adapters/ansible/roles/glance/tasks/glance_config.yml +++ b/deploy/adapters/ansible/roles/glance/tasks/glance_config.yml @@ -1,36 +1,7 @@ --- -- name: init glance db version - shell: glance-manage db_version_control 0 - run_once: True - when: ansible_os_family == "Debian" - - name: sync glance db - shell: sleep 15; su -s /bin/sh -c "glance-manage db_sync" glance - run_once: True - register: result - until: result.rc == 0 - retries: 5 - delay: 3 + glance_manage: action=dbsync notify: - restart glance services - meta: flush_handlers - -- name: place image upload script - template: src=image_upload.sh dest=/opt/image_upload.sh mode=0744 - -- name: get image http server - shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf - register: http_server - -- name: download cirros image file - get_url: url="http://{{ http_server.stdout_lines[0] }}/image/{{ build_in_image_name }}" dest=/opt/{{ build_in_image_name }} - -- name: wait for 9292 port to become available - wait_for: host={{ image_host }} port=9292 delay=5 - -- name: run image upload - run_once: True - shell: for i in {0..5}; do /opt/image_upload.sh && touch image_upload_completed; if [ $? != 0 ] ;then sleep 5; else break;fi;done - args: - creates: image_upload_completed diff --git a/deploy/adapters/ansible/roles/glance/tasks/glance_install.yml b/deploy/adapters/ansible/roles/glance/tasks/glance_install.yml index 294b35e8..6d2b7053 100644 --- a/deploy/adapters/ansible/roles/glance/tasks/glance_install.yml +++ b/deploy/adapters/ansible/roles/glance/tasks/glance_install.yml @@ -18,4 +18,3 @@ - name: remove default sqlite db shell: rm /var/lib/glance/glance.sqlite || touch glance.sqllite.db.removed - diff --git a/deploy/adapters/ansible/roles/glance/tasks/main.yml b/deploy/adapters/ansible/roles/glance/tasks/main.yml index 64b8ca9d..7cbcfa96 100644 --- a/deploy/adapters/ansible/roles/glance/tasks/main.yml +++ b/deploy/adapters/ansible/roles/glance/tasks/main.yml @@ -17,4 +17,3 @@ - config - glance_config - glance - diff --git a/deploy/adapters/ansible/roles/ha/templates/keepalived.conf b/deploy/adapters/ansible/roles/ha/templates/keepalived.conf index a2e008a7..d6f424cd 100644 --- a/deploy/adapters/ansible/roles/ha/templates/keepalived.conf +++ b/deploy/adapters/ansible/roles/ha/templates/keepalived.conf @@ -2,6 +2,13 @@ global_defs { router_id {{ inventory_hostname }} } +vrrp_sync_group VG1 { + group { + internal_vip + public_vip + } +} + vrrp_instance internal_vip { interface {{ internal_vip.interface }} virtual_router_id {{ vrouter_id_internal }} diff --git a/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml b/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml index 78ac970b..f69a83cb 100644 --- a/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml +++ b/deploy/adapters/ansible/roles/keystone/tasks/keystone_config.yml @@ -1,18 +1,52 @@ --- - name: keystone-manage db-sync - shell: su -s /bin/sh -c "keystone-manage db_sync" - register: result - run_once: True - until: result.rc == 0 - retries: 5 - delay: 3 + keystone_manage: action=dbsync -- name: place keystone init script under /opt/ - template: src=keystone_init dest=/opt/keystone_init mode=0744 +- name: wait for keystone ready + wait_for: port=35357 delay=3 timeout=10 host={{ internal_vip.ip }} -- name: run keystone_init - run_once: True - shell: /opt/keystone_init && touch keystone_init_complete || keystone_init_failed - args: - creates: keystone_init_complete +- name: cron job to purge expired tokens hourly + cron: + name: 'purge expired tokens' + special_time: hourly + job: '/usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1' +- name: add tenants + keystone_user: + token: "{{ ADMIN_TOKEN }}" + endpoint: "http://{{ internal_ip }}:35357/v2.0" + tenant: "{{ item.tenant }}" + tenant_description: "{{ item.tenant_description }}" + with_items: "{{ os_users }}" + +- name: add users + keystone_user: + token: "{{ ADMIN_TOKEN }}" + endpoint: "http://{{ internal_ip }}:35357/v2.0" + user: "{{ item.user }}" + tenant: "{{ item.tenant }}" + password: "{{ item.password }}" + email: "{{ item.email }}" + with_items: "{{ os_users }}" + +- name: grant roles + keystone_user: + token: "{{ ADMIN_TOKEN }}" + endpoint: "http://{{ internal_ip }}:35357/v2.0" + user: "{{ item.user }}" + role: "{{ item.role }}" + tenant: "{{ item.tenant }}" + with_items: "{{ os_users }}" + +- name: add endpoints + keystone_service: + token: "{{ ADMIN_TOKEN }}" + endpoint: "http://{{ internal_ip }}:35357/v2.0" + name: "{{ item.name }}" + type: "{{ item.type }}" + region: "{{ item.region}}" + description: "{{ item.description }}" + publicurl: "{{ item.publicurl }}" + internalurl: "{{ item.internalurl }}" + adminurl: "{{ item.adminurl }}" + with_items: "{{ os_services }}" diff --git a/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml b/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml index 32d2b6be..e4488016 100644 --- a/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml +++ b/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml @@ -15,12 +15,6 @@ - name: delete sqlite database shell: rm /var/lib/keystone/keystone.db || echo sqllite database already removed -- name: cron job to purge expired tokens hourly - shell: (crontab -l -u keystone 2>&1 | grep -q token_flush) || echo '@hourly /usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1' >> {{ cron_path }}/keystone - -- name: modify keystone cron rights - file: path={{ cron_path }}/keystone mode=0600 - - name: keystone source files template: src={{ item }} dest=/opt/{{ item }} with_items: diff --git a/deploy/adapters/ansible/roles/keystone/tasks/main.yml b/deploy/adapters/ansible/roles/keystone/tasks/main.yml index 3ff37342..aa3ff1d5 100644 --- a/deploy/adapters/ansible/roles/keystone/tasks/main.yml +++ b/deploy/adapters/ansible/roles/keystone/tasks/main.yml @@ -6,6 +6,7 @@ - keystone - include: keystone_config.yml + when: inventory_hostname == groups['controller'][0] tags: - config - keystone_config diff --git a/deploy/adapters/ansible/roles/keystone/templates/keystone_init b/deploy/adapters/ansible/roles/keystone/templates/keystone_init deleted file mode 100644 index 0ea6e08a..00000000 --- a/deploy/adapters/ansible/roles/keystone/templates/keystone_init +++ /dev/null @@ -1,54 +0,0 @@ -set -e -while ! keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-list; do - echo "not ready" - sleep 1 -done -echo "keystone is ready" - -# create an administrative user - -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 role-create --name=admin -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 tenant-create --name=admin --description="Admin Tenant" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --tenant=admin --email=admin@admin.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-role-add --user=admin --tenant=admin --role=admin - -# create a normal user - -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 tenant-create --name=demo --description="Demo Tenant" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name=demo --pass={{ DEMO_PASS }} --tenant=demo --email=DEMO_EMAIL - -# create a service tenant -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 tenant-create --name=service --description="Service Tenant" - -# regist keystone -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=keystone --type=identity --description="OpenStack Identity" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service_id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ identity / {print $2}') --publicurl=http://{{ public_vip.ip }}:5000/v2.0 --internalurl=http://{{ internal_vip.ip }}:5000/v2.0 --adminurl=http://{{ internal_vip.ip }}:35357/v2.0 - -# Create a glance user that the Image Service can use to authenticate with the Identity service -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name=glance --pass={{ GLANCE_PASS }} --email=glance@example.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-role-add --user=glance --tenant=service --role=admin - -#Register the Image Service with the Identity service so that other OpenStack services can locate it -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=glance --type=image --description="OpenStack Image Service" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ image / {print $2}') --publicurl=http://{{ public_vip.ip }}:9292 --internalurl=http://{{ internal_vip.ip }}:9292 --adminurl=http://{{ internal_vip.ip }}:9292 - -#Create a nova user that Compute uses to authenticate with the Identity Service -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name=nova --pass={{ NOVA_PASS }} --email=nova@example.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-role-add --user=nova --tenant=service --role=admin - -# register Compute with the Identity Service so that other OpenStack services can locate it -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=nova --type=compute --description="OpenStack Compute" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ compute / {print $2}') --publicurl=http://{{ public_vip.ip }}:8774/v2/%\(tenant_id\)s --internalurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s --adminurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s - -# register netron user, role and service -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name neutron --pass {{ NEUTRON_PASS }} --email neutron@example.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-role-add --user neutron --tenant service --role admin -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name neutron --type network --description "OpenStack Networking" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ network / {print $2}') --publicurl http://{{ public_vip.ip }}:9696 --adminurl http://{{ internal_vip.ip }}:9696 --internalurl http://{{ internal_vip.ip }}:9696 - - -# register ceilometer related -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name ceilometer --pass {{ CEILOMETER_PASS }} --email ceilometer@example.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-role-add --user ceilometer --tenant service --role admin -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name ceilometer --type metering --description "OpenStack Telemetry" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ metering / {print $2}') --publicurl http://{{ public_vip.ip }}:8777 --adminurl http://{{ internal_vip.ip }}:8777 --internalurl http://{{ internal_vip.ip }}:8777 diff --git a/deploy/adapters/ansible/roles/keystone/vars/main.yml b/deploy/adapters/ansible/roles/keystone/vars/main.yml index cc24916a..dc3ca498 100644 --- a/deploy/adapters/ansible/roles/keystone/vars/main.yml +++ b/deploy/adapters/ansible/roles/keystone/vars/main.yml @@ -3,4 +3,102 @@ packages_noarch: - python-keystoneclient services_noarch: [] +os_services: + - name: keystone + type: identity + region: regionOne + description: "OpenStack Identity" + publicurl: "http://{{ public_vip.ip }}:5000/v2.0" + internalurl: "http://{{ internal_vip.ip }}:5000/v2.0" + adminurl: "http://{{ internal_vip.ip }}:35357/v2.0" + - name: glance + type: image + region: regionOne + description: "OpenStack Image Service" + publicurl: "http://{{ public_vip.ip }}:9292" + internalurl: "http://{{ internal_vip.ip }}:9292" + adminurl: "http://{{ internal_vip.ip }}:9292" + + - name: nova + type: compute + region: regionOne + description: "OpenStack Compute" + publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s" + internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s" + adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s" + + - name: neutron + type: network + region: regionOne + description: "OpenStack Networking" + publicurl: "http://{{ public_vip.ip }}:9696" + internalurl: "http://{{ internal_vip.ip }}:9696" + adminurl: "http://{{ internal_vip.ip }}:9696" + + - name: ceilometer + type: metering + region: regionOne + description: "OpenStack Telemetry" + publicurl: "http://{{ public_vip.ip }}:8777/v2.0" + internalurl: "http://{{ internal_vip.ip }}:8777/v2.0" + adminurl: "http://{{ internal_vip.ip }}:8777/v2.0" + + - name: cinder + type: volume + region: regionOne + description: "OpenStack Block Storage" + publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s" + internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s" + adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s" + + +os_users: + - user: admin + password: "{{ ADMIN_PASS }}" + email: admin@admin.com + role: admin + tenant: admin + tenant_description: "Admin Tenant" + + - user: glance + password: "{{ GLANCE_PASS }}" + email: glance@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: nova + password: "{{ NOVA_PASS }}" + email: nova@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: keystone + password: "{{ keystone_PASS }}" + email: keystone@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: neutron + password: "{{ NEUTRON_PASS }}" + email: neutron@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: ceilometer + password: "{{ CEILOMETER_PASS }}" + email: ceilometer@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: cinder + password: "{{ CINDER_PASS }}" + email: cinder@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" diff --git a/deploy/adapters/ansible/roles/neutron-compute/templates/neutron_init.sh b/deploy/adapters/ansible/roles/neutron-compute/templates/neutron_init.sh deleted file mode 100644 index b92e202f..00000000 --- a/deploy/adapters/ansible/roles/neutron-compute/templates/neutron_init.sh +++ /dev/null @@ -1,4 +0,0 @@ -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True - -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}} - diff --git a/deploy/adapters/ansible/roles/neutron-controller/templates/neutron_init.sh b/deploy/adapters/ansible/roles/neutron-controller/templates/neutron_init.sh deleted file mode 100644 index b92e202f..00000000 --- a/deploy/adapters/ansible/roles/neutron-controller/templates/neutron_init.sh +++ /dev/null @@ -1,4 +0,0 @@ -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True - -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}} - diff --git a/deploy/adapters/ansible/roles/neutron-network/templates/neutron_init.sh b/deploy/adapters/ansible/roles/neutron-network/templates/neutron_init.sh deleted file mode 100644 index b92e202f..00000000 --- a/deploy/adapters/ansible/roles/neutron-network/templates/neutron_init.sh +++ /dev/null @@ -1,4 +0,0 @@ -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True - -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}} - diff --git a/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml b/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml index df2d5dad..7085ddcf 100644 --- a/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml +++ b/deploy/adapters/ansible/roles/nova-controller/tasks/nova_config.yml @@ -1,11 +1,6 @@ --- - name: nova db sync - command: su -s /bin/sh -c "nova-manage db sync" nova - register: result - until: result.rc == 0 - run_once: True - retries: 5 - delay: 3 + nova_manage: action=dbsync notify: - restart nova service |