summaryrefslogtreecommitdiffstats
path: root/deploy/compass_vm.sh
blob: dc391acc97fcc848092986e01e4e113caeffe767 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
#!/bin/bash
##############################################################################
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
compass_vm_dir=$WORK_DIR/vm/compass
rsa_file=$compass_vm_dir/boot.rsa
ssh_args="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i $rsa_file"
function tear_down_compass() {
    sudo virsh destroy compass > /dev/null 2>&1
    sudo virsh undefine compass > /dev/null 2>&1

    sudo umount $compass_vm_dir/old > /dev/null 2>&1
    sudo umount $compass_vm_dir/new > /dev/null 2>&1

    sudo rm -rf $compass_vm_dir

    log_info "tear_down_compass success!!!"
}

function install_compass_core() {
    install_compass "compass_nodocker.yml"
}

function set_compass_machine() {
    local config_file=$WORK_DIR/installer/compass-install/install/group_vars/all

    sed -i -e '/test: true/d' -e '/pxe_boot_macs/d' $config_file
    echo "test: true" >> $config_file
    echo "pxe_boot_macs: [${machines}]" >> $config_file

    install_compass "compass_machine.yml"
}

function install_compass() {
    local inventory_file=$compass_vm_dir/inventory.file
    sed -i "s/mgmt_next_ip:.*/mgmt_next_ip: ${COMPASS_SERVER}/g" $WORK_DIR/installer/compass-install/install/group_vars/all
    echo "compass_nodocker ansible_ssh_host=$MGMT_IP ansible_ssh_port=22" > $inventory_file
    PYTHONUNBUFFERED=1 ANSIBLE_FORCE_COLOR=true ANSIBLE_HOST_KEY_CHECKING=false ANSIBLE_SSH_ARGS='-o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=60s' ansible-playbook -e pipeline=true --private-key=$rsa_file --user=root --connection=ssh --inventory-file=$inventory_file $WORK_DIR/installer/compass-install/install/$1
    exit_status=$?
    rm $inventory_file
    if [[ $exit_status != 0 ]];then
        /bin/false
    fi
}

function wait_ok() {
    set +x
    log_info "wait_compass_ok enter"
    ssh-keygen -f "/root/.ssh/known_hosts" -R $MGMT_IP >/dev/null 2>&1
    retry=0
    until timeout 1s ssh $ssh_args root@$MGMT_IP "exit" >/dev/null 2>&1
    do
        log_progress "os install time used: $((retry*100/$1))%"
        sleep 1
        let retry+=1
        if [[ $retry -ge $1 ]];then
            timeout 1s ssh $ssh_args root@$MGMT_IP "exit"
            log_error "os install time out"
            exit 1
        fi
    done
    set -x
    log_warn "os install time used: 100%"
    log_info "wait_compass_ok exit"
}

function launch_compass() {
    local old_mnt=$compass_vm_dir/old
    local new_mnt=$compass_vm_dir/new
    local old_iso=$WORK_DIR/iso/centos.iso
    local new_iso=$compass_vm_dir/centos.iso

    log_info "launch_compass enter"
    tear_down_compass

    set -e
    mkdir -p $compass_vm_dir $old_mnt
    sudo mount -o loop $old_iso $old_mnt
    cd $old_mnt;find .|cpio -pd $new_mnt;cd -

    sudo umount $old_mnt

    chmod 755 -R $new_mnt

    cp $COMPASS_DIR/util/isolinux.cfg $new_mnt/isolinux/ -f
    cp $COMPASS_DIR/util/ks.cfg $new_mnt/isolinux/ -f

    sed -i -e "s/REPLACE_MGMT_IP/$MGMT_IP/g" \
           -e "s/REPLACE_MGMT_NETMASK/$MGMT_MASK/g" \
           -e "s/REPLACE_GW/$MGMT_GW/g" \
           -e "s/REPLACE_INSTALL_IP/$COMPASS_SERVER/g" \
           -e "s/REPLACE_INSTALL_NETMASK/$INSTALL_MASK/g" \
           -e "s/REPLACE_COMPASS_EXTERNAL_NETMASK/$COMPASS_EXTERNAL_MASK/g" \
           -e "s/REPLACE_COMPASS_EXTERNAL_IP/$COMPASS_EXTERNAL_IP/g" \
           -e "s/REPLACE_COMPASS_EXTERNAL_GW/$COMPASS_EXTERNAL_GW/g" \
           $new_mnt/isolinux/isolinux.cfg

    if [[ -n $COMPASS_DNS1 ]]; then
        sed -i -e "s/REPLACE_COMPASS_DNS1/$COMPASS_DNS1/g" $new_mnt/isolinux/isolinux.cfg
    fi

    if [[ -n $COMPASS_DNS2 ]]; then
        sed -i -e "s/REPLACE_COMPASS_DNS2/$COMPASS_DNS2/g" $new_mnt/isolinux/isolinux.cfg
    fi

    ssh-keygen -f $new_mnt/bootstrap/boot.rsa -t rsa -N ''
    cp $new_mnt/bootstrap/boot.rsa $rsa_file

    rm -rf $new_mnt/.rr_moved $new_mnt/rr_moved
    sudo mkisofs -quiet -r -J -R -b isolinux/isolinux.bin  -no-emul-boot -boot-load-size 4 -boot-info-table -hide-rr-moved -x "lost+found:" -o $new_iso $new_mnt

    rm -rf $old_mnt $new_mnt

    qemu-img create -f qcow2 $compass_vm_dir/disk.img 100G

    # create vm xml
    sed -e "s/REPLACE_MEM/$COMPASS_VIRT_MEM/g" \
        -e "s/REPLACE_CPU/$COMPASS_VIRT_CPUS/g" \
        -e "s#REPLACE_IMAGE#$compass_vm_dir/disk.img#g" \
        -e "s#REPLACE_ISO#$compass_vm_dir/centos.iso#g" \
        -e "s/REPLACE_NET_MGMT/mgmt/g" \
        -e "s/REPLACE_NET_INSTALL/install/g" \
        -e "s/REPLACE_NET_EXTERNAL/external/g" \
        $COMPASS_DIR/deploy/template/vm/compass.xml \
        > $WORK_DIR/vm/compass/libvirt.xml

    sudo virsh define $compass_vm_dir/libvirt.xml
    sudo virsh start compass

    exit_status=$?
    if [ $exit_status != 0 ];then
        log_error "virsh start compass failed"
        exit 1
    fi

    if ! wait_ok 500;then
        log_error "install os timeout"
        exit 1
    fi

    if ! install_compass_core;then
        log_error "install compass core failed"
        exit 1
    fi

    set +e
    log_info "launch_compass exit"
}
ass="p">('ceilometer_backend')) == 'mongodb' { include ::mongodb::globals include ::mongodb::server $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') $mongo_node_string = join($mongo_node_ips_with_port, ',') $mongodb_replset = hiera('mongodb::server::replset') $ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" if downcase(hiera('bootstrap_nodeid')) == $::hostname { mongodb_replset { $mongodb_replset : members => $mongo_node_ips_with_port, } } } # Redis $redis_node_ips = hiera('redis_node_ips') $redis_master_hostname = downcase(hiera('bootstrap_nodeid')) if $redis_master_hostname == $::hostname { $slaveof = undef } else { $slaveof = "${redis_master_hostname} 6379" } class {'::redis' : slaveof => $slaveof, } if count($redis_node_ips) > 1 { Class['::tripleo::redis_notification'] -> Service['redis-sentinel'] include ::redis::sentinel include ::tripleo::redis_notification } if str2bool(hiera('enable_galera', 'true')) { $mysql_config_file = '/etc/my.cnf.d/galera.cnf' } else { $mysql_config_file = '/etc/my.cnf.d/server.cnf' } # TODO Galara class { 'mysql::server': config_file => $mysql_config_file, override_options => { 'mysqld' => { 'bind-address' => hiera('mysql_bind_host'), 'max_connections' => '1024', 'open_files_limit' => '-1', }, } } # FIXME: this should only occur on the bootstrap host (ditto for db syncs) # Create all the database schemas # Example DSN format: mysql://user:password@host/dbname $allowed_hosts = ['%',hiera('mysql_bind_host')] $keystone_dsn = split(hiera('keystone::database_connection'), '[@:/?]') class { 'keystone::db::mysql': user => $keystone_dsn[3], password => $keystone_dsn[4], host => $keystone_dsn[5], dbname => $keystone_dsn[6], allowed_hosts => $allowed_hosts, } $glance_dsn = split(hiera('glance::api::database_connection'), '[@:/?]') class { 'glance::db::mysql': user => $glance_dsn[3], password => $glance_dsn[4], host => $glance_dsn[5], dbname => $glance_dsn[6], allowed_hosts => $allowed_hosts, } $nova_dsn = split(hiera('nova::database_connection'), '[@:/?]') class { 'nova::db::mysql': user => $nova_dsn[3], password => $nova_dsn[4], host => $nova_dsn[5], dbname => $nova_dsn[6], allowed_hosts => $allowed_hosts, } $neutron_dsn = split(hiera('neutron::server::database_connection'), '[@:/?]') class { 'neutron::db::mysql': user => $neutron_dsn[3], password => $neutron_dsn[4], host => $neutron_dsn[5], dbname => $neutron_dsn[6], allowed_hosts => $allowed_hosts, } $cinder_dsn = split(hiera('cinder::database_connection'), '[@:/?]') class { 'cinder::db::mysql': user => $cinder_dsn[3], password => $cinder_dsn[4], host => $cinder_dsn[5], dbname => $cinder_dsn[6], allowed_hosts => $allowed_hosts, } $heat_dsn = split(hiera('heat::database_connection'), '[@:/?]') class { 'heat::db::mysql': user => $heat_dsn[3], password => $heat_dsn[4], host => $heat_dsn[5], dbname => $heat_dsn[6], allowed_hosts => $allowed_hosts, } if downcase(hiera('ceilometer_backend')) == 'mysql' { $ceilometer_dsn = split(hiera('ceilometer_mysql_conn_string'), '[@:/?]') class { 'ceilometer::db::mysql': user => $ceilometer_dsn[3], password => $ceilometer_dsn[4], host => $ceilometer_dsn[5], dbname => $ceilometer_dsn[6], allowed_hosts => $allowed_hosts, } } $rabbit_nodes = hiera('rabbit_node_ips') if count($rabbit_nodes) > 1 { class { '::rabbitmq': config_cluster => true, cluster_nodes => $rabbit_nodes, tcp_keepalive => false, config_kernel_variables => hiera('rabbitmq_kernel_variables'), config_variables => hiera('rabbitmq_config_variables'), environment_variables => hiera('rabbitmq_environment'), } rabbitmq_policy { 'ha-all@/': pattern => '^(?!amq\.).*', definition => { 'ha-mode' => 'all', }, } } else { include ::rabbitmq } # pre-install swift here so we can build rings include ::swift $cinder_enable_rbd_backend = hiera('cinder_enable_rbd_backend', false) $enable_ceph = $cinder_enable_rbd_backend if $enable_ceph { class { 'ceph::profile::params': mon_initial_members => downcase(hiera('ceph_mon_initial_members')) } include ::ceph::profile::mon } if str2bool(hiera('enable_ceph_storage', 'false')) { if str2bool(hiera('ceph_osd_selinux_permissive', true)) { exec { 'set selinux to permissive on boot': command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", path => ["/usr/bin", "/usr/sbin"], } exec { 'set selinux to permissive': command => "setenforce 0", onlyif => "which setenforce && getenforce | grep -i 'enforcing'", path => ["/usr/bin", "/usr/sbin"], } -> Class['ceph::profile::osd'] } include ::ceph::profile::client include ::ceph::profile::osd } } #END STEP 2 if hiera('step') >= 3 { include ::keystone #TODO: need a cleanup-keystone-tokens.sh solution here keystone_config { 'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2'; } file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]: ensure => 'directory', owner => 'keystone', group => 'keystone', require => Package['keystone'], } file { '/etc/keystone/ssl/certs/signing_cert.pem': content => hiera('keystone_signing_certificate'), owner => 'keystone', group => 'keystone', notify => Service['keystone'], require => File['/etc/keystone/ssl/certs'], } file { '/etc/keystone/ssl/private/signing_key.pem': content => hiera('keystone_signing_key'), owner => 'keystone', group => 'keystone', notify => Service['keystone'], require => File['/etc/keystone/ssl/private'], } file { '/etc/keystone/ssl/certs/ca.pem': content => hiera('keystone_ca_certificate'), owner => 'keystone', group => 'keystone', notify => Service['keystone'], require => File['/etc/keystone/ssl/certs'], } $glance_backend = downcase(hiera('glance_backend', 'swift')) case $glance_backend { swift: { $backend_store = 'glance.store.swift.Store' } file: { $backend_store = 'glance.store.filesystem.Store' } rbd: { $backend_store = 'glance.store.rbd.Store' } default: { fail('Unrecognized glance_backend parameter.') } } $http_store = ['glance.store.http.Store'] $glance_store = concat($http_store, $backend_store) # TODO: notifications, scrubber, etc. include ::glance class { 'glance::api': known_stores => $glance_store } include ::glance::registry include join(['::glance::backend::', $glance_backend]) include ::nova include ::nova::api include ::nova::cert include ::nova::conductor include ::nova::consoleauth include ::nova::network::neutron include ::nova::vncproxy include ::nova::scheduler include ::neutron include ::neutron::server include ::neutron::agents::l3 include ::neutron::agents::dhcp include ::neutron::agents::metadata file { '/etc/neutron/dnsmasq-neutron.conf': content => hiera('neutron_dnsmasq_options'), owner => 'neutron', group => 'neutron', notify => Service['neutron-dhcp-service'], require => Package['neutron'], } class { 'neutron::plugins::ml2': flat_networks => split(hiera('neutron_flat_networks'), ','), tenant_network_types => [hiera('neutron_tenant_network_type')], } class { 'neutron::agents::ml2::ovs': bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), tunnel_types => split(hiera('neutron_tunnel_types'), ','), } Service['neutron-server'] -> Service['neutron-dhcp-service'] Service['neutron-server'] -> Service['neutron-l3'] Service['neutron-server'] -> Service['neutron-ovs-agent-service'] Service['neutron-server'] -> Service['neutron-metadata'] include ::cinder include ::cinder::api include ::cinder::glance include ::cinder::scheduler include ::cinder::volume class {'cinder::setup_test_volume': size => join([hiera('cinder_lvm_loop_device_size'), 'M']), } $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true) if $cinder_enable_iscsi { $cinder_iscsi_backend = 'tripleo_iscsi' cinder::backend::iscsi { $cinder_iscsi_backend : iscsi_ip_address => hiera('cinder_iscsi_ip_address'), iscsi_helper => hiera('cinder_iscsi_helper'), } } if $enable_ceph { Ceph_pool { pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'), pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'), size => hiera('ceph::profile::params::osd_pool_default_size'), } $ceph_pools = hiera('ceph_pools') ceph::pool { $ceph_pools : } } if $cinder_enable_rbd_backend { $cinder_rbd_backend = 'tripleo_ceph' cinder_config { "${cinder_rbd_backend}/host": value => 'hostgroup'; } cinder::backend::rbd { $cinder_rbd_backend : rbd_pool => 'volumes', rbd_user => 'openstack', rbd_secret_uuid => hiera('ceph::profile::params::fsid'), require => Ceph::Pool['volumes'], } } if hiera('cinder_enable_netapp_backend', false) { $cinder_netapp_backend = hiera('cinder::backend::netapp::title') cinder_config { "${cinder_netapp_backend}/host": value => 'hostgroup'; } if hiera('cinder_netapp_nfs_shares', undef) { $cinder_netapp_nfs_shares = split(hiera('cinder_netapp_nfs_shares', undef), ',') } cinder::backend::netapp { $cinder_netapp_backend : nfs_shares => $cinder_netapp_nfs_shares, } } $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend]) class { '::cinder::backends' : enabled_backends => $cinder_enabled_backends, } # swift proxy include ::memcached include ::swift::proxy include ::swift::proxy::proxy_logging include ::swift::proxy::healthcheck include ::swift::proxy::cache include ::swift::proxy::keystone include ::swift::proxy::authtoken include ::swift::proxy::staticweb include ::swift::proxy::ratelimit include ::swift::proxy::catch_errors include ::swift::proxy::tempurl include ::swift::proxy::formpost # swift storage if str2bool(hiera('enable_swift_storage', 'true')) { class {'swift::storage::all': mount_check => str2bool(hiera('swift_mount_check')) } if(!defined(File['/srv/node'])) { file { '/srv/node': ensure => directory, owner => 'swift', group => 'swift', require => Package['openstack-swift'], } } $swift_components = ['account', 'container', 'object'] swift::storage::filter::recon { $swift_components : } swift::storage::filter::healthcheck { $swift_components : } } # Ceilometer $ceilometer_backend = downcase(hiera('ceilometer_backend')) case $ceilometer_backend { /mysql/ : { $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string') } default : { $ceilometer_database_connection = $ceilometer_mongodb_conn_string } } include ::ceilometer include ::ceilometer::api include ::ceilometer::agent::notification include ::ceilometer::agent::central include ::ceilometer::alarm::notifier include ::ceilometer::alarm::evaluator include ::ceilometer::expirer include ::ceilometer::collector include ceilometer::agent::auth class { '::ceilometer::db' : database_connection => $ceilometer_database_connection, } Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } # Heat include ::heat include ::heat::api include ::heat::api_cfn include ::heat::api_cloudwatch include ::heat::engine # Horizon $vhost_params = { add_listen => false } class { 'horizon': cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), vhost_extra_params => $vhost_params, } $snmpd_user = hiera('snmpd_readonly_user_name') snmp::snmpv3_user { $snmpd_user: authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } class { 'snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } hiera_include('controller_classes') } #END STEP 3