From a07334031b4b4b055a19c971d0cc3164f95d5a0b Mon Sep 17 00:00:00 2001 From: baigk Date: Tue, 22 Sep 2015 22:57:22 +0800 Subject: add public vip for external access JIRA: COMPASS-69 Change-Id: I7c2b6a026d2fb002174aa5f0a619d9fe6982e528 Signed-off-by: baigk --- .../cinder-controller/templates/cinder_init.sh | 2 +- .../ansible/roles/dashboard/tasks/main.yml | 3 + .../ansible/roles/dashboard/templates/ports.j2 | 15 +++++ deploy/adapters/ansible/roles/ha/files/notify.sh | 4 -- deploy/adapters/ansible/roles/ha/tasks/main.yml | 6 -- .../ansible/roles/ha/templates/failover.j2 | 65 ---------------------- .../ansible/roles/ha/templates/haproxy.cfg | 14 +++++ .../ansible/roles/ha/templates/keepalived.conf | 42 ++++++-------- .../ansible/roles/keystone/templates/keystone_init | 8 +-- .../neutron-controller/tasks/neutron_config.yml | 12 ++-- deploy/conf/baremetal_cluster_sh.yml | 43 ++++++++++++++ deploy/host_vm.sh | 61 -------------------- 12 files changed, 105 insertions(+), 170 deletions(-) create mode 100644 deploy/adapters/ansible/roles/dashboard/templates/ports.j2 delete mode 100644 deploy/adapters/ansible/roles/ha/files/notify.sh delete mode 100644 deploy/adapters/ansible/roles/ha/templates/failover.j2 create mode 100644 deploy/conf/baremetal_cluster_sh.yml delete mode 100644 deploy/host_vm.sh (limited to 'deploy') diff --git a/deploy/adapters/ansible/roles/cinder-controller/templates/cinder_init.sh b/deploy/adapters/ansible/roles/cinder-controller/templates/cinder_init.sh index abe4d06a..bc92bac0 100644 --- a/deploy/adapters/ansible/roles/cinder-controller/templates/cinder_init.sh +++ b/deploy/adapters/ansible/roles/cinder-controller/templates/cinder_init.sh @@ -2,5 +2,5 @@ keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }} keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-role-add --user=cinder --tenant=service --role=admin keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=cinder --type=volume --description="OpenStack Block Storage" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ volume / {print $2}') --publicurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s --internalurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s --adminurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ volume / {print $2}') --publicurl=http://{{ public_vip.ip }}:8776/v1/%\(tenant_id\)s --internalurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s --adminurl=http://{{ internal_vip.ip }}:8776/v1/%\(tenant_id\)s diff --git a/deploy/adapters/ansible/roles/dashboard/tasks/main.yml b/deploy/adapters/ansible/roles/dashboard/tasks/main.yml index 2cad1174..9206fda4 100644 --- a/deploy/adapters/ansible/roles/dashboard/tasks/main.yml +++ b/deploy/adapters/ansible/roles/dashboard/tasks/main.yml @@ -5,6 +5,9 @@ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" with_items: packages | union(packages_noarch) +- name: set apache2 config + template: src=ports.j2 dest=/etc/apache2/ports.conf backup=yes + - name: remove ubuntu theme action: "{{ ansible_pkg_mgr }} name=openstack-dashboard-ubuntu-theme state=absent" diff --git a/deploy/adapters/ansible/roles/dashboard/templates/ports.j2 b/deploy/adapters/ansible/roles/dashboard/templates/ports.j2 new file mode 100644 index 00000000..0bfa0428 --- /dev/null +++ b/deploy/adapters/ansible/roles/dashboard/templates/ports.j2 @@ -0,0 +1,15 @@ +# if you just change the port or add more ports here, you will likely also +# have to change the VirtualHost statement in +# /etc/apache2/sites-enabled/000-default.conf + +Listen {{ internal_ip }}:80 + + + Listen 443 + + + + Listen 443 + + +# vim: syntax=apache ts=4 sw=4 sts=4 sr noet diff --git a/deploy/adapters/ansible/roles/ha/files/notify.sh b/deploy/adapters/ansible/roles/ha/files/notify.sh deleted file mode 100644 index 5edffe84..00000000 --- a/deploy/adapters/ansible/roles/ha/files/notify.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -python /usr/local/bin/failover.py $1 -mysql -uroot -e"flush hosts" -service mysql restart diff --git a/deploy/adapters/ansible/roles/ha/tasks/main.yml b/deploy/adapters/ansible/roles/ha/tasks/main.yml index edd5e6dd..668f6847 100644 --- a/deploy/adapters/ansible/roles/ha/tasks/main.yml +++ b/deploy/adapters/ansible/roles/ha/tasks/main.yml @@ -57,12 +57,6 @@ - name: copy galera_chk file copy: src=galera_chk dest=/usr/local/bin/galera_chk mode=0777 -- name: copy notify file - copy: src=notify.sh dest=/usr/local/bin/notify.sh mode=0777 - -- name: copy notify template file - template: src=failover.j2 dest=/usr/local/bin/failover.py mode=0777 - - name: add network service lineinfile: dest=/etc/services state=present line="mysqlchk 9200/tcp" diff --git a/deploy/adapters/ansible/roles/ha/templates/failover.j2 b/deploy/adapters/ansible/roles/ha/templates/failover.j2 deleted file mode 100644 index 3b08cf2d..00000000 --- a/deploy/adapters/ansible/roles/ha/templates/failover.j2 +++ /dev/null @@ -1,65 +0,0 @@ -import ConfigParser, os, socket -import logging as LOG -import pxssh -import sys -import re - -LOG_FILE="/var/log/mysql_failover" -try: - os.remove(LOG_FILE) -except: - pass - -LOG.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename=LOG_FILE,level=LOG.DEBUG) -ha_vip = {{ internal_vip.ip }} -LOG.info("ha_vip: %s" % ha_vip) - -#ha_vip = "10.1.0.50" -galera_path = '/etc/mysql/conf.d/wsrep.cnf' -pattern = re.compile(r"gcomm://(?P.*)") - -def ssh_get_hostname(ip): - try: - s = pxssh.pxssh() - s.login("%s" % ip, "root", "root") - s.sendline('hostname') # run a command - s.prompt() # match the prompt - result = s.before.strip() # print everything before the prompt. - return result.split(os.linesep)[1] - except pxssh.ExceptionPxssh as e: - LOG.error("pxssh failed on login.") - raise - -def failover(mode): - config = ConfigParser.ConfigParser() - config.optionxform = str - config.readfp(open(galera_path)) - wsrep_cluster_address = config.get("mysqld", "wsrep_cluster_address") - wsrep_cluster_address = pattern.match(wsrep_cluster_address).groupdict()["prev_ip"] - - LOG.info("old wsrep_cluster_address = %s" % wsrep_cluster_address) - - if mode == "master": - # refresh wsrep_cluster_address to null - LOG.info("I'm being master, set wsrep_cluster_address to null") - wsrep_cluster_address = "" - - elif mode == "backup": - # refresh wsrep_cluster_address to master int ip - hostname = ssh_get_hostname(ha_vip) - wsrep_cluster_address = socket.gethostbyname(hostname) - LOG.info("I'm being slave, set wsrep_cluster_address to master internal ip") - - LOG.info("new wsrep_cluster_address = %s" % wsrep_cluster_address) - wsrep_cluster_address = "gcomm://%s" % wsrep_cluster_address - config.set("mysqld", "wsrep_cluster_address", wsrep_cluster_address) - with open(galera_path, 'wb') as fp: - #config.write(sys.stdout) - config.write(fp) - - os.system("service mysql restart") - LOG.info("failover success!!!") - -if __name__ == "__main__": - LOG.debug("call me: %s" % sys.argv) - failover(sys.argv[1]) diff --git a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg index f1a2312c..8f026fa4 100644 --- a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg +++ b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg @@ -36,6 +36,7 @@ listen proxy-glance_registry_cluster listen proxy-glance_api_cluster bind {{ internal_vip.ip }}:9292 + bind {{ public_vip.ip }}:9292 option tcpka option httpchk option tcplog @@ -94,6 +95,7 @@ listen proxy-keystone_public_internal_cluster listen proxy-nova_compute_api_cluster bind {{ internal_vip.ip }}:8774 + bind {{ public_vip.ip }}:8774 mode tcp option httpchk option tcplog @@ -104,6 +106,7 @@ listen proxy-nova_compute_api_cluster listen proxy-nova_metadata_api_cluster bind {{ internal_vip.ip }}:8775 + bind {{ public_vip.ip }}:8775 option tcpka option tcplog balance source @@ -113,6 +116,7 @@ listen proxy-nova_metadata_api_cluster listen proxy-cinder_api_cluster bind {{ internal_vip.ip }}:8776 + bind {{ public_vip.ip }}:8776 mode tcp option httpchk option tcplog @@ -121,6 +125,16 @@ listen proxy-cinder_api_cluster server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5 {% endfor %} +listen proxy-dashboarad + bind {{ public_vip.ip }}:80 + option tcpka + option httpchk + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:80 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + listen stats mode http bind 0.0.0.0:8888 diff --git a/deploy/adapters/ansible/roles/ha/templates/keepalived.conf b/deploy/adapters/ansible/roles/ha/templates/keepalived.conf index f1e6db5d..a2e008a7 100644 --- a/deploy/adapters/ansible/roles/ha/templates/keepalived.conf +++ b/deploy/adapters/ansible/roles/ha/templates/keepalived.conf @@ -19,30 +19,24 @@ vrrp_instance internal_vip { virtual_ipaddress { {{ internal_vip.ip }}/{{ internal_vip.netmask }} dev {{ internal_vip.interface }} } +} - notify_master "/usr/local/bin/notify.sh master" - notify_backup "/usr/local/bin/notify.sh backup" +vrrp_instance public_vip { + interface {{ network_cfg.public_vip.interface }} + virtual_router_id {{ vrouter_id_public }} + state BACKUP + nopreempt + preempt_delay 30 + advert_int 1 + priority 100 -} + authentication { + auth_type PASS + auth_pass 4321 + } -#vrrp_instance public_vip { -# interface {{ network_cfg.public_vip.interface }} -# virtual_router_id {{ vrouter_id_public }} -# state BACKUP -# nopreempt -# preempt_delay 30 -# advert_int 1 -# priority 100 -# -# authentication { -# auth_type PASS -# auth_pass 4321 -# } -# -# virtual_ipaddress { -# {{ network_cfg.public_vip.ip }}/{{ network_cfg.public_vip.netmask }} dev {{ network_cfg.public_vip.interface }} -# } -# -#} -# -# notify_backup "/usr/local/bin/notify.sh backup" + virtual_ipaddress { + {{ network_cfg.public_vip.ip }}/{{ network_cfg.public_vip.netmask }} dev {{ network_cfg.public_vip.interface }} + } + +} diff --git a/deploy/adapters/ansible/roles/keystone/templates/keystone_init b/deploy/adapters/ansible/roles/keystone/templates/keystone_init index d9cc65a9..c7e22324 100644 --- a/deploy/adapters/ansible/roles/keystone/templates/keystone_init +++ b/deploy/adapters/ansible/roles/keystone/templates/keystone_init @@ -22,7 +22,7 @@ keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }} # regist keystone keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=keystone --type=identity --description="OpenStack Identity" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service_id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ identity / {print $2}') --publicurl=http://{{ internal_vip.ip }}:5000/v2.0 --internalurl=http://{{ internal_vip.ip }}:5000/v2.0 --adminurl=http://{{ internal_vip.ip }}:35357/v2.0 +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service_id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ identity / {print $2}') --publicurl=http://{{ public_vip.ip }}:5000/v2.0 --internalurl=http://{{ internal_vip.ip }}:5000/v2.0 --adminurl=http://{{ internal_vip.ip }}:35357/v2.0 # Create a glance user that the Image Service can use to authenticate with the Identity service keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name=glance --pass={{ GLANCE_PASS }} --email=glance@example.com @@ -30,7 +30,7 @@ keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }} #Register the Image Service with the Identity service so that other OpenStack services can locate it keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=glance --type=image --description="OpenStack Image Service" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ image / {print $2}') --publicurl=http://{{ internal_vip.ip }}:9292 --internalurl=http://{{ internal_vip.ip }}:9292 --adminurl=http://{{ internal_vip.ip }}:9292 +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ image / {print $2}') --publicurl=http://{{ public_vip.ip }}:9292 --internalurl=http://{{ internal_vip.ip }}:9292 --adminurl=http://{{ internal_vip.ip }}:9292 #Create a nova user that Compute uses to authenticate with the Identity Service keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name=nova --pass={{ NOVA_PASS }} --email=nova@example.com @@ -38,10 +38,10 @@ keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }} # register Compute with the Identity Service so that other OpenStack services can locate it keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name=nova --type=compute --description="OpenStack Compute" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ compute / {print $2}') --publicurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s --internalurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s --adminurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ compute / {print $2}') --publicurl=http://{{ public_vip.ip }}:8774/v2/%\(tenant_id\)s --internalurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s --adminurl=http://{{ internal_vip.ip }}:8774/v2/%\(tenant_id\)s # register netron user, role and service keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-create --name neutron --pass {{ NEUTRON_PASS }} --email neutron@example.com keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 user-role-add --user neutron --tenant service --role admin keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-create --name neutron --type network --description "OpenStack Networking" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ network / {print $2}') --publicurl http://{{ internal_vip.ip }}:9696 --adminurl http://{{ internal_vip.ip }}:9696 --internalurl http://{{ internal_vip.ip }}:9696 +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ internal_vip.ip }}:35357/v2.0 service-list | awk '/ network / {print $2}') --publicurl http://{{ public_vip.ip }}:9696 --adminurl http://{{ internal_vip.ip }}:9696 --internalurl http://{{ internal_vip.ip }}:9696 diff --git a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml index 991e33cc..26758f58 100644 --- a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml +++ b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_config.yml @@ -9,10 +9,12 @@ notify: - restart neutron control services -- name: restart neutron-server - service: name=neutron-server state=restarted enabled=yes - delegate_to: "{{ item }}" - run_once: True - with_items: groups['controller'] +- name: restart first neutron-server + service: name=neutron-server state=restarted enabled=yes + when: inventory_hostname == groups['controller'][0] + +- name: restart other neutron-server + service: name=neutron-server state=restarted enabled=yes + when: inventory_hostname != groups['controller'][0] - meta: flush_handlers diff --git a/deploy/conf/baremetal_cluster_sh.yml b/deploy/conf/baremetal_cluster_sh.yml new file mode 100644 index 00000000..1078cb5c --- /dev/null +++ b/deploy/conf/baremetal_cluster_sh.yml @@ -0,0 +1,43 @@ + +TYPE: baremetal +FLAVOR: cluster +POWER_TOOL: ipmitool + +ipmiUser: root +ipmiPass: Huawei@123 + +hosts: + - name: host1 + mac: 'D8:49:0B:DA:2A:28' + ipmiUser: root + ipmiPass: Huawei@123 + ipmiIp: 192.168.2.145 + roles: + - controller + - ha + + - name: host2 + mac: 'D8:49:0B:DA:5B:5D' + ipmiIp: 192.168.2.155 + roles: + - controller + - ha + + - name: host3 + mac: 'D8:49:0B:DA:5A:B7' + ipmiIp: 192.168.2.165 + roles: + - controller + - ha + + - name: host4 + mac: 'D8:49:0B:DA:58:99' + ipmiIp: 192.168.2.175 + roles: + - compute + + - name: host5 + mac: 'D8:49:0B:DA:56:85' + ipmiIp: 192.168.2.185 + roles: + - compute diff --git a/deploy/host_vm.sh b/deploy/host_vm.sh deleted file mode 100644 index 0754b1f4..00000000 --- a/deploy/host_vm.sh +++ /dev/null @@ -1,61 +0,0 @@ -host_vm_dir=$WORK_DIR/vm -function tear_down_machines() { - for i in $HOSTNAMES; do - sudo virsh destroy $i - sudo virsh undefine $i - rm -rf $host_vm_dir/$i - done -} - -function reboot_hosts() { - log_warn "reboot_hosts do nothing" -} - -function launch_host_vms() { - old_ifs=$IFS - IFS=, - tear_down_machines - #function_bod - mac_array=($machines) - log_info "bringing up pxe boot vms" - i=0 - for host in $HOSTNAMES; do - log_info "creating vm disk for instance $host" - vm_dir=$host_vm_dir/$host - mkdir -p $vm_dir - sudo qemu-img create -f raw $vm_dir/disk.img ${VIRT_DISK} - # create vm xml - sed -e "s/REPLACE_MEM/$VIRT_MEM/g" \ - -e "s/REPLACE_CPU/$VIRT_CPUS/g" \ - -e "s/REPLACE_NAME/$host/g" \ - -e "s#REPLACE_IMAGE#$vm_dir/disk.img#g" \ - -e "s/REPLACE_BOOT_MAC/${mac_array[i]}/g" \ - -e "s/REPLACE_BRIDGE_MGMT/br_install/g" \ - -e "s/REPLACE_BRIDGE_TENANT/br_install/g" \ - -e "s/REPLACE_BRIDGE_PUBLIC/br_install/g" \ - -e "s/REPLACE_BRIDGE_STORAGE/br_install/g" \ - $COMPASS_DIR/deploy/template/vm/host.xml\ - > $vm_dir/libvirt.xml - - sudo virsh define $vm_dir/libvirt.xml - sudo virsh start $host - let i=i+1 - done - IFS=$old_ifs -} - -function get_host_macs() { - local config_file=$WORK_DIR/installer/compass-install/install/group_vars/all - local mac_generator=${COMPASS_DIR}/deploy/mac_generator.sh - local machines= - - chmod +x $mac_generator - mac_array=`$mac_generator $VIRT_NUMBER` - machines=`echo $mac_array|sed 's/ /,/g'` - - echo "test: true" >> $config_file - echo "pxe_boot_macs: [${machines}]" >> $config_file - - echo $machines -} - -- cgit 1.2.3-korg