@@ -67,3 +67,9 @@ sudo: True roles: - monitor + +- hosts: all + remote_user: root + sudo: True + roles: + - secgroup @@ -3,6 +3,7 @@ template: src=cinder.conf dest=/etc/cinder/cinder.conf - name: sync cinder db - cinder_manage: action=dbsync + #cinder_manage: action=dbsync + shell: cinder-manage db sync notify: - restart cinder control serveice @@ -1,3 +1,3 @@ if [[ ! -f /var/cinder.img ]]; then - dd if=/dev/zero of=/var/cinder.img bs=1 count=1 seek=$1 + dd if=/dev/zero of=/var/cinder.img bs=1 count=0 seek=$1 fi @@ -2,5 +2,5 @@ size=`df /var | awk '$3 ~ /[0-9]+/ { print $4 }'`; if [[ $size -gt 2000000000 ]]; then echo -n 2000000000000; else - echo -n $((size * 1000)); + echo -n $((size * 1000 / 512 * 512)); fi @@ -7,6 +7,7 @@ maridb_packages: - MariaDB-Galera-server - MariaDB-client - galera + - MySQL-python services: [] @@ -1,6 +1,8 @@ --- - name: sync glance db - glance_manage: action=dbsync + #glance_manage: action=dbsync + shell: glance-manage db sync + ignore_errors: True notify: - restart glance services @@ -13,8 +13,6 @@ with_items: - glance-api.conf - glance-registry.conf - notify: - - restart glance services - name: remove default sqlite db shell: rm /var/lib/glance/glance.sqlite || touch glance.sqllite.db.removed @@ -17,7 +17,7 @@ line="/opt/images *(rw,insecure,sync,all_squash)" run_once: True -- name: restart nfs service +- name: restart compass nfs service local_action: service name={{ item }} state=restarted enabled=yes with_items: - rpcbind @@ -32,6 +32,10 @@ shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf register: ip_info +- name: restart host nfs service + service: name={{ item }} state=restarted enabled=yes + with_items: '{{ nfs_services }}' + - name: mount image directory shell: | mount -t nfs -onfsvers=3 {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images @@ -6,6 +6,7 @@ packages: nfs_packages: - nfs-common +nfs_services: [] services: - glance-registry - glance-api @@ -6,6 +6,10 @@ nfs_packages: - nfs-utils - rpcbind +nfs_services: + - rpcbind + - rpc-statd + services: - openstack-glance-api - openstack-glance-registry @@ -1,6 +1,7 @@ --- - name: keystone-manage db-sync - keystone_manage: action=dbsync + #keystone_manage: action=dbsync + shell: keystone-manage db_sync - name: wait for keystone ready wait_for: port=35357 delay=3 timeout=10 host={{ internal_vip.ip }} @@ -1,5 +1,6 @@ --- packages: + - openstack-neutron - openstack-neutron-ml2 - openstack-neutron-openvswitch @@ -17,6 +17,12 @@ - name: shut down and disable Neutron's openvswitch agent services service: name=neutron-plugin-openvswitch-agent state=stopped +- name: Stop the Open vSwitch service and clear existing OVSDB + shell: > + ovs-vsctl del-br br-int ; + ovs-vsctl del-br br-tun ; + ovs-vsctl del-manager ; + #- name: remove Neutron's openvswitch agent services # shell: > # update-rc.d neutron-plugin-openvswitch-agent remove @@ -0,0 +1,10 @@ +--- +- name: restart controller relation service + service: name={{ item }} state=restarted enabled=yes + ignore_errors: True + with_items: controller_services + +- name: restart compute relation service + service: name={{ item }} state=restarted enabled=yes + ignore_errors: True + with_items: compute_services @@ -0,0 +1,10 @@ +--- +- include_vars: "{{ ansible_os_family }}.yml" + tags: secgroup + +- debug: msg={{ enable_secgroup }} + tags: secgroup + +- include: secgroup.yml + when: '{{ enable_secgroup }} == False' + tags: secgroup @@ -0,0 +1,27 @@ +--- +- name: make sure template dir exits + file: path=/opt/os_templates state=directory mode=0755 + tags: secgroup + +- name: copy configs + template: src={{ item.src}} dest=/opt/os_templates + with_items: "{{ configs_templates }}" + tags: secgroup + +- name: update controller configs + shell: '[ -f {{ item.1 }} ] && crudini --merge {{ item.1 }} < /opt/os_templates/{{ item.0.src }} || /bin/true' + tags: secgroup + with_subelements: + - configs_templates + - dest + notify: restart controller relation service + when: inventory_hostname in "{{ groups['controller'] }}" + +- name: update compute configs + shell: '[ -f {{ item.1 }} ] && crudini --merge {{ item.1 }} < /opt/os_templates/{{ item.0.src }} || /bin/true' + tags: secgroup + with_subelements: + - configs_templates + - dest + notify: restart compute relation service + when: inventory_hostname in "{{ groups['compute'] }}" @@ -0,0 +1,4 @@ +[securitygroup] +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver +enable_security_group = False + @@ -0,0 +1,3 @@ +[DEFAULT] +firewall_driver = nova.virt.firewall.NoopFirewallDriver +security_group_api = nova @@ -0,0 +1,27 @@ +--- +configs_templates: + - src: nova.j2 + dest: + - /etc/nova/nova.conf + - src: neutron.j2 + dest: + - /etc/neutron/plugins/ml2/ml2_conf.ini + - /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini + - /etc/neutron/plugins/ml2/restproxy.ini + +controller_services: + - nova-api + - nova-cert + - nova-conductor + - nova-consoleauth + - nova-novncproxy + - nova-scheduler + - neutron-server + - neutron-plugin-openvswitch-agent + - neutron-l3-agent + - neutron-dhcp-agent + - neutron-metadata-agent + +compute_services: + - nova-compute + - neutron-plugin-openvswitch-agent @@ -0,0 +1,27 @@ +--- +configs_templates: + - src: nova.j2 + dest: + - /etc/nova/nova.conf + - src: neutron.j2 + dest: + - /etc/neutron/plugins/ml2/ml2_conf.ini + - /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini + - /etc/neutron/plugins/ml2/restproxy.ini + +controller_services: + - openstack-nova-api + - openstack-nova-cert + - openstack-nova-conductor + - openstack-nova-consoleauth + - openstack-nova-novncproxy + - openstack-nova-scheduler + - neutron-openvswitch-agent + - neutron-l3-agent + - neutron-dhcp-agent + - neutron-metadata-agent + - neutron-server + +compute_services: + - openstack-nova-compute + - neutron-openvswitch-agent @@ -0,0 +1,3 @@ +--- +packages_noarch: [] +metering_secret: 1c5df72079b31fb47747 @@ -201,6 +201,9 @@ opts = [ cfg.StrOpt('cluster_vip', help='cluster ip address', default=''), + cfg.StrOpt('enable_secgroup', + help='enable security group', + default='true'), cfg.StrOpt('network_cfg', help='netowrk config file', default=''), @@ -695,17 +698,11 @@ class CompassClient(object): ) """ package_config['ha_proxy'] = {} - - #TODO, we need two vip - if CONF.cluster_pub_vip: - package_config["ha_proxy"]["pub_vip"] = CONF.cluster_pub_vip - - if CONF.cluster_prv_vip: - package_config["ha_proxy"]["prv_vip"] = CONF.cluster_prv_vip - if CONF.cluster_vip: package_config["ha_proxy"]["vip"] = CONF.cluster_vip + package_config['enable_secgroup'] = (CONF.enable_secgroup == "true") + status, resp = self.client.update_cluster_config( cluster_id, package_config=package_config) LOG.info( @@ -5,8 +5,8 @@ export INSTALL_GW=${INSTALL_GW:-10.1.0.1} export INSTALL_IP_START=${INSTALL_IP_START:-10.1.0.1} export INSTALL_IP_END=${INSTALL_IP_END:-10.1.0.254} export MGMT_IP=${MGMT_IP:-192.168.200.2} -export MGMT_MASK=${MAGMT_MASK:-255.255.252.0} -export MGMT_GW=${MAGMT_GW:-192.168.200.1} +export MGMT_MASK=${MGMT_MASK:-255.255.252.0} +export MGMT_GW=${MGMT_GW:-192.168.200.1} export MGMT_IP_START=${MGMT_IP_START:-192.168.200.3} export MGMT_IP_END=${MGMT_IP_END:-192.168.200.254} export EXTERNAL_NIC=${EXTERNAL_NIC:-eth0} @@ -18,6 +18,7 @@ export SUBNETS="10.1.0.0/24,172.16.2.0/24,172.16.3.0/24,172.16.4.0/24" export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-'10.1.0.50'} export MANAGEMENT_INTERFACE=${MANAGEMENT_INTERFACE:-eth0} export DASHBOARD_URL="" +export ENABLE_SECGROUP="false" function next_ip { ip_addr=$1 @@ -22,6 +22,7 @@ function deploy_host(){ --host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" \ --machines=${machines//\'} --switch_credential="${SWITCH_CREDENTIAL}" --deploy_type="${TYPE}" \ --deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}" \ - --cluster_vip="${VIP}" --network_cfg="$NETWORK" --neutron_cfg="$NEUTRON" + --cluster_vip="${VIP}" --network_cfg="$NETWORK" --neutron_cfg="$NEUTRON" \ + --enable_secgroup="${ENABLE_SECGROUP}" } |