From 209dbb5628f30cd7034e83b29fc38605503c6a83 Mon Sep 17 00:00:00 2001 From: grakiss Date: Fri, 6 Nov 2015 17:55:14 +0800 Subject: using cinder volume instead of creating new img JIRA: COMPASS-117 - using already created cinder-volumes,no need to create a ceph-volumes Change-Id: I6e8a93258596b7823ec37cad301d0fa57bbd95c3 Signed-off-by: grakiss --- .../ansible/roles/ceph-deploy/files/create_osd.sh | 25 +++++----------------- .../ceph-deploy/tasks/ceph_install_Debian.yml | 5 ++++- .../ceph-deploy/tasks/ceph_install_RedHat.yml | 9 +++++--- 3 files changed, 15 insertions(+), 24 deletions(-) (limited to 'deploy/adapters/ansible') diff --git a/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh b/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh index 02860bde..d0c631fb 100644 --- a/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh +++ b/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh @@ -5,17 +5,6 @@ umount /var/local/osd rm -r /var/local/osd fi -if [ ! -d "/ceph/images" ]; then -mkdir -p /ceph/images -fi - -rm -f /ceph/images/ceph-volumes.img - -if [ ! -f "/ceph/images/ceph-volumes.img" ]; then -echo "create ceph-volumes.img" -dd if=/dev/zero of=/ceph/images/ceph-volumes.img bs=1K seek=$(df / | awk '$3 ~ /[0-9]+/ { print $4 }') count=0 oflag=direct -sgdisk -g --clear /ceph/images/ceph-volumes.img -fi #safe check ps -ef |grep lvremove |awk '{print $2}' |xargs kill -9 @@ -23,24 +12,20 @@ ps -ef |grep vgremove |awk '{print $2}' |xargs kill -9 ps -ef |grep vgcreate |awk '{print $2}' |xargs kill -9 ps -ef |grep lvcreate |awk '{print $2}' |xargs kill -9 -if [ -L "/dev/ceph-volumes/ceph0" ]; then +if [ -L "/dev/cinder-volumes/ceph0" ]; then echo "remove lv vg" -lvremove -f /dev/ceph-volumes/ceph0 -vgremove -f ceph-volumes -rm -r /dev/ceph-volumes +lvremove -f /dev/cinder-volumes/ceph0 fi -echo "vgcreate" -vgcreate -y ceph-volumes $(losetup --show -f /ceph/images/ceph-volumes.img) echo "lvcreate" -lvcreate -l 100%FREE -nceph0 ceph-volumes +lvcreate -l 100%FREE -nceph0 cinder-volumes echo "mkfs" -mkfs.xfs -f /dev/ceph-volumes/ceph0 +mkfs.xfs -f /dev/cinder-volumes/ceph0 if [ ! -d "/var/local/osd" ]; then echo "mount osd" mkdir -p /var/local/osd -mount /dev/ceph-volumes/ceph0 /var/local/osd +mount /dev/cinder-volumes/ceph0 /var/local/osd fi diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml index 3959d466..744120b8 100644 --- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml +++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml @@ -4,6 +4,9 @@ tags: - create_ceph_cluster +- name: default config for ceph + shell: cd {{ ceph_cluster_dir[0] }} && echo "osd_journal_size = 1024" >> ceph.conf && echo "osd_pool_default_size = 2" >> ceph.conf + - name: install ceph for every nodes includes jumpserver shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy install {{ ceph_cluster_hosts.stdout_lines[0]}} @@ -11,7 +14,7 @@ shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy --overwrite-conf mon create-initial - name: gather keys - shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy gatherkeys {{ ceph_cluster_hosts.stdout_lines[0] }} + shell: sleep 5 && cd {{ ceph_cluster_dir[0] }} && ceph-deploy gatherkeys {{ inventory_hostname }} ignore_errors: True - name: copy create_osd.sh to host1 diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_RedHat.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_RedHat.yml index 97d925d6..385c7571 100644 --- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_RedHat.yml +++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_RedHat.yml @@ -4,6 +4,9 @@ tags: - create_ceph_cluster +- name: default config for ceph + shell: cd {{ ceph_cluster_dir[0] }} && echo "osd_journal_size = 1024" >> ceph.conf && echo "osd_pool_default_size = 2" >> ceph.conf + - name: install ceph for every nodes includes jumpserver shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy install --no-adjust-repos --repo-url http://10.1.0.12/cblr/repo_mirror/centos7-juno-ppa --gpg-url http://10.1.0.12/cblr/repo_mirror/centos7-juno-ppa/ceph_key_release.asc {{ ceph_cluster_hosts.stdout_lines[0]}} @@ -11,7 +14,7 @@ shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy --overwrite-conf mon create-initial - name: gather keys - shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy gatherkeys {{ ceph_cluster_hosts.stdout_lines[0] }} + shell: sleep 5 && cd {{ ceph_cluster_dir[0] }} && ceph-deploy gatherkeys {{ inventory_hostname }} ignore_errors: True - name: copy create_osd.sh to host1 @@ -34,7 +37,7 @@ - create_osd - name: prepare create osd - shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy --repo-url http://10.1.0.12/cblr/repo_mirror/centos7-juno-ppa --gpg-url http://10.1.0.12/cblr/repo_mirror/centos7-juno-ppa/ceph_key_release.asc osd prepare {{ item }}:/var/local/osd + shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy osd prepare {{ item }}:/var/local/osd with_items: - "{{ groups['compute'] }}" tags: @@ -42,7 +45,7 @@ - name: activate osd - shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy --repo-url http://10.1.0.12/cblr/repo_mirror/centos7-juno-ppa --gpg-url http://10.1.0.12/cblr/repo_mirror/centos7-juno-ppa/ceph_key_release.asc osd activate {{ item }}:/var/local/osd + shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy osd activate {{ item }}:/var/local/osd with_items: - "{{ groups['compute'] }}" tags: -- cgit 1.2.3-korg