summaryrefslogtreecommitdiffstats
path: root/deploy
diff options
context:
space:
mode:
authorgrakiss <grakiss.wanglei@huawei.com>2016-01-13 14:59:11 +0800
committerJustin chi <chigang@huawei.com>2016-01-30 03:34:26 +0000
commit951cec74b7b8cef5db2f2bda7375653fedaedbf8 (patch)
tree219fdc31014836b059597bd6439dadbe6c6cb59b /deploy
parentffcccb6b50fe7e5954188b44f6666f7ac3dee3da (diff)
deploy CEPH nodes parallelly
JIRA:COMPASS-264 - do not use ceph-deploy - do not need centeral node to deploy ceph Change-Id: I0f9c459060a2fccbad73a323697ec23ea2115410 Signed-off-by: grakiss <grakiss.wanglei@huawei.com> (cherry picked from commit 2616357bbf1d5cfc1f7e3923e12b8adfc5627fc6)
Diffstat (limited to 'deploy')
-rw-r--r--deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml27
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-config/files/create_osd.sh31
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-config/tasks/create_config.yml59
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-config/tasks/main.yml5
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-config/templates/ceph.j225
-rw-r--r--deploy/adapters/ansible/roles/ceph-config/templates/create_monmap.j25
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-config/templates/dump_var.j28
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml2
-rw-r--r--deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml21
-rw-r--r--deploy/adapters/ansible/roles/ceph-mon/tasks/main.yml5
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_conf.yml32
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_pre.yml69
-rw-r--r--deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml18
-rw-r--r--deploy/adapters/ansible/roles/ceph-openstack/templates/secret.j26
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-openstack/vars/Debian.yml22
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-openstack/vars/RedHat.yml12
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-openstack/vars/main.yml5
-rwxr-xr-xdeploy/adapters/ansible/roles/ceph-osd/files/create_osd.sh31
-rw-r--r--deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml14
-rw-r--r--deploy/adapters/ansible/roles/ceph-osd/tasks/main.yml5
-rw-r--r--deploy/adapters/ansible/roles/ceph-purge/tasks/main.yml27
21 files changed, 427 insertions, 2 deletions
diff --git a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
index f60d61f3..e03fa9fe 100644
--- a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
+++ b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
@@ -113,8 +113,33 @@
- hosts: ceph_adm
remote_user: root
accelerate: true
+ roles: []
+ # - ceph-deploy
+
+- hosts: all
+ remote_user: root
+ accelerate: true
+ roles:
+ - ceph-purge
+ - ceph-config
+
+- hosts: ceph_mon
+ remote_user: root
+ accelerate: true
+ roles:
+ - ceph-mon
+
+- hosts: ceph_osd
+ remote_user: root
+ accelerate: true
+ roles:
+ - ceph-osd
+
+- hosts: all
+ remote_user: root
+ accelerate: true
roles:
- - ceph-deploy
+ - ceph-openstack
- hosts: all
remote_user: root
diff --git a/deploy/adapters/ansible/roles/ceph-config/files/create_osd.sh b/deploy/adapters/ansible/roles/ceph-config/files/create_osd.sh
new file mode 100755
index 00000000..2c9e57f1
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-config/files/create_osd.sh
@@ -0,0 +1,31 @@
+if [ -d "/var/local/osd" ]; then
+echo "clear /var/local/osd"
+rm -r /var/local/osd/
+umount /var/local/osd
+rm -r /var/local/osd
+fi
+
+
+#safe check
+ps -ef |grep lvremove |awk '{print $2}' |xargs kill -9
+ps -ef |grep vgremove |awk '{print $2}' |xargs kill -9
+ps -ef |grep vgcreate |awk '{print $2}' |xargs kill -9
+ps -ef |grep lvcreate |awk '{print $2}' |xargs kill -9
+
+if [ -L "/dev/storage-volumes/ceph0" ]; then
+echo "remove lv vg"
+lvremove -f /dev/storage-volumes/ceph0
+fi
+
+
+echo "lvcreate"
+lvcreate -l 100%FREE -nceph0 storage-volumes
+echo "mkfs"
+mkfs.xfs -f /dev/storage-volumes/ceph0
+
+if [ ! -d "/var/local/osd" ]; then
+echo "mount osd"
+mkdir -p /var/local/osd
+mount /dev/storage-volumes/ceph0 /var/local/osd
+fi
+
diff --git a/deploy/adapters/ansible/roles/ceph-config/tasks/create_config.yml b/deploy/adapters/ansible/roles/ceph-config/tasks/create_config.yml
new file mode 100755
index 00000000..0822239e
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-config/tasks/create_config.yml
@@ -0,0 +1,59 @@
+- name: gen ceph fsid
+ shell: uuidgen
+ register: ceph_fsid
+ run_once: true
+
+- name: gen ceph conf
+ local_action:
+ module: "template"
+ src: "ceph.j2"
+ dest: "/tmp/ceph.conf"
+ run_once: true
+
+- name: "make directory for ceph config file"
+ file: path="/etc/ceph" state="directory"
+
+- name: copy ceph conf to dest mon node
+ copy: src="/tmp/ceph.conf" dest="/etc/ceph/ceph.conf"
+
+- name: install ceph-related packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items:
+ - ceph
+
+- name: gen create monmap script
+ local_action: template src="create_monmap.j2" dest="/tmp/create_monmap.sh" mode=0755
+ run_once: true
+
+- name: create monmap
+ script: /tmp/create_monmap.sh
+ when: inventory_hostname in groups['ceph_mon']
+
+- name: create mon.keyring
+ shell: "ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'"
+ when: inventory_hostname == groups['ceph_mon'][0]
+
+- name: create admin.keyring
+ shell: "ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'"
+ when: inventory_hostname == groups['ceph_mon'][0]
+
+- name: Add the client.admin key to the ceph.mon.keyring
+ shell: "ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring"
+ when: inventory_hostname == groups['ceph_mon'][0]
+
+- name: fetch mon.keyring to local
+ fetch: src="/tmp/ceph.mon.keyring" dest="/tmp/ceph.mon.keyring" flat=yes
+ when: inventory_hostname == groups['ceph_mon'][0]
+
+- name: fetch client.admin.keyring to local
+ fetch: src="/etc/ceph/ceph.client.admin.keyring" dest="/tmp/ceph.client.admin.keyring" flat=yes
+ when: inventory_hostname == groups['ceph_mon'][0]
+
+- name: copy mon.keyring to remote nodes
+ copy: src="/tmp/ceph.mon.keyring" dest="/tmp/ceph.mon.keyring"
+
+- name: copy admin.keyring to remote nodes
+ copy: src="/tmp/ceph.client.admin.keyring" dest="/etc/ceph/ceph.client.admin.keyring"
+
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/ceph-config/tasks/main.yml b/deploy/adapters/ansible/roles/ceph-config/tasks/main.yml
new file mode 100755
index 00000000..3512dfb8
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-config/tasks/main.yml
@@ -0,0 +1,5 @@
+- include: create_config.yml
+ tags:
+ - ceph_config
+ - ceph_deploy
+ - ceph_mon
diff --git a/deploy/adapters/ansible/roles/ceph-config/templates/ceph.j2 b/deploy/adapters/ansible/roles/ceph-config/templates/ceph.j2
new file mode 100755
index 00000000..bd0e3f5d
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-config/templates/ceph.j2
@@ -0,0 +1,25 @@
+[global]
+fsid = {{ ceph_fsid.stdout }}
+mon initial members = {{ groups["ceph_mon"] | join(", ")}}
+mon host =
+{%- for host in groups["ceph_mon"] -%}
+{{ ', ' if not loop.first else ''}}{{ ip_settings[host].mgmt.ip }}
+{%- endfor %}
+
+public network = {{ mgmt_cidr }}
+cluster network = {{ storage_cidr }}
+
+auth cluster required = cephx
+auth service required = cephx
+auth client required = cephx
+
+osd journal size = 1024
+filestore xattr use omap = true
+osd pool default size = 1
+osd pool default min size = 1
+osd pool default pg num = 333
+osd pool default pgp num = 333
+osd crush chooseleaf type = 1
+
+debug mon = 1
+debug ms = 0
diff --git a/deploy/adapters/ansible/roles/ceph-config/templates/create_monmap.j2 b/deploy/adapters/ansible/roles/ceph-config/templates/create_monmap.j2
new file mode 100644
index 00000000..7d1eb9d7
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-config/templates/create_monmap.j2
@@ -0,0 +1,5 @@
+monmaptool --create --clobber --fsid {{ ceph_fsid.stdout }}
+{%- for host in groups['ceph_mon']%}
+ --add {{host}} {{ ip_settings[host].mgmt.ip }}:6789
+{%- endfor %}
+ /tmp/monmap
diff --git a/deploy/adapters/ansible/roles/ceph-config/templates/dump_var.j2 b/deploy/adapters/ansible/roles/ceph-config/templates/dump_var.j2
new file mode 100755
index 00000000..a4a9b155
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-config/templates/dump_var.j2
@@ -0,0 +1,8 @@
+HOSTVARS (ANSIBLE GATHERED, group_vars, host_vars) :
+
+{{ hostvars[inventory_hostname] | to_yaml }}
+
+PLAYBOOK VARS:
+
+{{ vars | to_yaml }}
+
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml
index 1e1b5863..ab010266 100644
--- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml
+++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_openstack_conf.yml
@@ -8,7 +8,7 @@
ignore_errors: True
- name: modify glance-api.conf for ceph
- shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(default_store\).*/\1 = rbd/g' /etc/glance/glance-api.conf && sed -i '/^\[glance_store/a rbd_store_pool = images\nrbd_store_user = glance\nrbd_store_ceph_conf = /etc/ceph/ceph.conf\nrbd_store_chunk_size = 8\nshow_image_direct_url=True' /etc/glance/glance-api.conf"
+ shell: ssh -o StrictHostKeyChecking=no {{ item }} -t "sed -i 's/^\(default_store\).*/\1 = rbd/g' /etc/glance/glance-api.conf && sed -i '/^\[glance_store/a stores = rbd\nrbd_store_pool = images\nrbd_store_user = glance\nrbd_store_ceph_conf = /etc/ceph/ceph.conf\nrbd_store_chunk_size = 8\nshow_image_direct_url=True' /etc/glance/glance-api.conf"
with_items:
- "{{ groups['controller'] }}"
tags:
diff --git a/deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml b/deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml
new file mode 100644
index 00000000..e2fc8ebb
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-mon/tasks/install_mon.yml
@@ -0,0 +1,21 @@
+
+- name: Create a default data directory
+ file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}" state="directory"
+
+- name: Populate the monitor daemon
+ shell: "ceph-mon --mkfs -i {{ inventory_hostname }} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring"
+
+- name: Touch the done file
+ file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}/done" state="touch"
+
+- name: start mon daemon
+ shell: start ceph-mon id={{ inventory_hostname }}
+
+- name: auto start ceph-mon
+ file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}/upstart" state="touch"
+
+- name: sleep for creating osd keyring
+ shell: sleep 10
+
+- name: fetch osd keyring
+ fetch: src="/var/lib/ceph/bootstrap-osd/ceph.keyring" dest="/tmp/ceph.osd.keyring" flat=yes
diff --git a/deploy/adapters/ansible/roles/ceph-mon/tasks/main.yml b/deploy/adapters/ansible/roles/ceph-mon/tasks/main.yml
new file mode 100644
index 00000000..666cfe6a
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-mon/tasks/main.yml
@@ -0,0 +1,5 @@
+- include: install_mon.yml
+ when: inventory_hostname in groups["ceph_mon"]
+ tags:
+ - ceph_mon
+ - ceph_deploy
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_conf.yml b/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_conf.yml
new file mode 100755
index 00000000..ebe54fe9
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_conf.yml
@@ -0,0 +1,32 @@
+---
+- name: chown of glance/api.log
+ shell: chown -R glance:glance /var/log/glance
+ when: inventory_hostname in groups['controller']
+ tags:
+ - ceph_conf_glance
+ ignore_errors: True
+
+- name: modify glance-api.conf for ceph
+ shell: sed -i 's/^\(default_store\).*/\1 = rbd/g' /etc/glance/glance-api.conf && sed -i '/^\[glance_store/a rbd_store_pool = images\nrbd_store_user = glance\nrbd_store_ceph_conf = /etc/ceph/ceph.conf\nrbd_store_chunk_size = 8\nshow_image_direct_url=True' /etc/glance/glance-api.conf
+ when: inventory_hostname in groups['controller']
+ tags:
+ - ceph_conf_glance
+
+- name: restart glance
+ shell: rm -f /var/log/glance/api.log && chown -R glance:glance /var/log/glance && service {{ glance_service }} restart
+ when: inventory_hostname in groups['controller']
+ tags:
+ - ceph_conf_glance
+ ignore_errors: True
+
+- name: modify cinder.conf for ceph
+ shell: sed -i 's/^\(volume_driver\).*/\1 = cinder.volume.drivers.rbd.RBDDriver/g' /etc/cinder/cinder.conf && sed -i 's/^\(rbd_secret_uuid\).*/\1 = {{ ceph_uuid.stdout_lines[0] }}/g' /etc/cinder/cinder.conf && sed -i '/^\[DEFAULT/a rbd_pool = volumes\nrbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_flatten_volume_from_snapshot = false\nrbd_max_clone_depth = 5\nrbd_store_chunk_size = 4\nrados_connect_timeout = -1\nglance_api_version = 2\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}' /etc/cinder/cinder.conf && service {{ cinder_service }} restart
+ when: inventory_hostname in groups['compute']
+ tags:
+ - ceph_conf_cinder
+
+- name: modify nova.conf for ceph
+ shell: sed -i 's/^\(images_type\).*/\1 = rbd/g' /etc/nova/nova-compute.conf && sed -i 's/^\(rbd_secret_uuid\).*/\1 = {{ ceph_uuid.stdout_lines[0] }}/g' /etc/nova/nova-compute.conf && sed -i '/^\[libvirt/a images_rbd_pool = vms\nimages_rbd_ceph_conf = /etc/ceph/ceph.conf\nrbd_user = cinder\nrbd_secret_uuid = {{ ceph_uuid.stdout_lines[0] }}\ndisk_cachemodes=\"network=writeback\"\nlive_migration_flag=\"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"' /etc/nova/nova-compute.conf && service {{ nova_service }} restart
+ when: inventory_hostname in groups['compute']
+ tags:
+ - ceph_conf_nova
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_pre.yml b/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_pre.yml
new file mode 100755
index 00000000..8433d7cb
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-openstack/tasks/ceph_openstack_pre.yml
@@ -0,0 +1,69 @@
+---
+- name: gen ceph uuid
+ shell: uuidgen
+ register: ceph_uuid
+ run_once: True
+ tags:
+ - ceph_copy_secret
+
+- name: gen template secret.xml
+ local_action:
+ module: "template"
+ src: "secret.j2"
+ dest: "/tmp/secret.xml"
+ mode: "0777"
+ run_once: True
+ tags:
+ - ceph_copy_secret
+
+- name: create pool
+ shell: ceph osd pool create {{ item }} 50
+ with_items:
+ - volumes
+ - images
+ - backups
+ - vms
+ run_once: True
+
+- name: create ceph users for openstack
+ shell: ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' && ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
+ run_once: True
+
+- name: send glance key to controller nodes
+ shell: ceph auth get-or-create client.glance | tee /etc/ceph/ceph.client.glance.keyring && chown glance:glance /etc/ceph/ceph.client.glance.keyring
+ when: inventory_hostname in groups['controller']
+
+- name: send cinder key to compute nodes
+ shell: ceph auth get-or-create client.cinder | tee /etc/ceph/ceph.client.cinder.keyring && chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
+ when: inventory_hostname in groups['compute']
+ tags:
+ - ceph_send_key
+
+- name: copy cinder key to compute node
+ shell: ceph auth get-key client.cinder | tee client.cinder.key
+ when: inventory_hostname in groups['compute']
+ tags:
+ - ceph_copy_secret
+
+- name: copy secret.xml to compute nodes
+ copy: src="/tmp/secret.xml" dest="~/secret.xml"
+ when: inventory_hostname in groups['compute']
+ tags:
+ - ceph_copy_secret
+
+- name: undefine libvirt secret in case of repeatedly execute ceph_deploy
+ shell: "virsh secret-list | awk '$1 ~ /[0-9]+/ {print $1}' | xargs virsh secret-undefine"
+ when: inventory_hostname in groups['compute']
+ tags:
+ - ceph_copy_secret
+ ignore_errors: True
+
+
+- name: create key for libvirt on compute nodes
+ shell: "virsh secret-define --file ~/secret.xml && virsh secret-set-value --secret {{ ceph_uuid.stdout_lines[0] }} --base64 $(cat client.cinder.key)"
+ when: inventory_hostname in groups['compute']
+ tags:
+ - ceph_copy_secret
+ ignore_errors: True
+
+
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml b/deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml
new file mode 100644
index 00000000..1f295028
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-openstack/tasks/main.yml
@@ -0,0 +1,18 @@
+- include_vars: "{{ ansible_os_family }}.yml"
+ tags:
+ - ceph_deploy
+ - ceph_openstack_pre
+ - ceph_openstack_conf
+ - ceph_openstack
+
+- include: ceph_openstack_pre.yml
+ tags:
+ - ceph_deploy
+ - ceph_openstack_pre
+ - ceph_openstack
+
+- include: ceph_openstack_conf.yml
+ tags:
+ - ceph_deploy
+ - ceph_openstack_conf
+ - ceph_openstack
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/templates/secret.j2 b/deploy/adapters/ansible/roles/ceph-openstack/templates/secret.j2
new file mode 100644
index 00000000..a0ffc6e3
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-openstack/templates/secret.j2
@@ -0,0 +1,6 @@
+<secret ephemeral='no' private='no'>
+ <uuid>{{ ceph_uuid.stdout_lines[0] }}</uuid>
+ <usage type='ceph'>
+ <name>client.cinder secret</name>
+ </usage>
+</secret>
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/vars/Debian.yml b/deploy/adapters/ansible/roles/ceph-openstack/vars/Debian.yml
new file mode 100755
index 00000000..86fecb79
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-openstack/vars/Debian.yml
@@ -0,0 +1,22 @@
+---
+packages:
+ - ceph-deploy
+ - python-flask
+ - libgoogle-perftools4
+ - libleveldb1
+ - liblttng-ust0
+ - libsnappy1
+ - librbd1
+ - librados2
+ - python-ceph
+ - ceph
+ - ceph-mds
+ - ceph-common
+ - ceph-fs-common
+ - gdisk
+
+services: []
+
+cinder_service: cinder-volume
+nova_service: nova-compute
+glance_service: glance-api
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/vars/RedHat.yml b/deploy/adapters/ansible/roles/ceph-openstack/vars/RedHat.yml
new file mode 100755
index 00000000..a2c18ba6
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-openstack/vars/RedHat.yml
@@ -0,0 +1,12 @@
+---
+packages:
+ - ceph-radosgw
+ - fcgi
+ - ceph-deploy
+ - ceph
+
+services: []
+
+cinder_service: openstack-cinder-volume
+nova_service: openstack-nova-compute
+glance_service: openstack-glance-api
diff --git a/deploy/adapters/ansible/roles/ceph-openstack/vars/main.yml b/deploy/adapters/ansible/roles/ceph-openstack/vars/main.yml
new file mode 100755
index 00000000..71ace4a5
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-openstack/vars/main.yml
@@ -0,0 +1,5 @@
+---
+packages_noarch: []
+
+ceph_cluster_dir:
+ - /root/ceph-cluster
diff --git a/deploy/adapters/ansible/roles/ceph-osd/files/create_osd.sh b/deploy/adapters/ansible/roles/ceph-osd/files/create_osd.sh
new file mode 100755
index 00000000..2c9e57f1
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-osd/files/create_osd.sh
@@ -0,0 +1,31 @@
+if [ -d "/var/local/osd" ]; then
+echo "clear /var/local/osd"
+rm -r /var/local/osd/
+umount /var/local/osd
+rm -r /var/local/osd
+fi
+
+
+#safe check
+ps -ef |grep lvremove |awk '{print $2}' |xargs kill -9
+ps -ef |grep vgremove |awk '{print $2}' |xargs kill -9
+ps -ef |grep vgcreate |awk '{print $2}' |xargs kill -9
+ps -ef |grep lvcreate |awk '{print $2}' |xargs kill -9
+
+if [ -L "/dev/storage-volumes/ceph0" ]; then
+echo "remove lv vg"
+lvremove -f /dev/storage-volumes/ceph0
+fi
+
+
+echo "lvcreate"
+lvcreate -l 100%FREE -nceph0 storage-volumes
+echo "mkfs"
+mkfs.xfs -f /dev/storage-volumes/ceph0
+
+if [ ! -d "/var/local/osd" ]; then
+echo "mount osd"
+mkdir -p /var/local/osd
+mount /dev/storage-volumes/ceph0 /var/local/osd
+fi
+
diff --git a/deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml b/deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml
new file mode 100644
index 00000000..bf49c50e
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-osd/tasks/install_osd.yml
@@ -0,0 +1,14 @@
+---
+
+- name: create osd lv and mount it on /var/local/osd
+ script: create_osd.sh
+
+- name: copy osd keyring
+ copy: src="/tmp/ceph.osd.keyring" dest="/var/lib/ceph/bootstrap-osd/ceph.keyring"
+
+- name: prepare osd disk
+ shell: ceph-disk prepare --fs-type xfs /var/local/osd
+
+- name: activate osd node
+ shell: ceph-disk activate /var/local/osd
+
diff --git a/deploy/adapters/ansible/roles/ceph-osd/tasks/main.yml b/deploy/adapters/ansible/roles/ceph-osd/tasks/main.yml
new file mode 100644
index 00000000..d6c46640
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-osd/tasks/main.yml
@@ -0,0 +1,5 @@
+- include: install_osd.yml
+ when: inventory_hostname in groups["ceph_osd"]
+ tags:
+ - ceph_osd
+ - ceph_deploy
diff --git a/deploy/adapters/ansible/roles/ceph-purge/tasks/main.yml b/deploy/adapters/ansible/roles/ceph-purge/tasks/main.yml
new file mode 100644
index 00000000..8bf3a3ed
--- /dev/null
+++ b/deploy/adapters/ansible/roles/ceph-purge/tasks/main.yml
@@ -0,0 +1,27 @@
+- name: clear tmp files
+ local_action: shell rm -rf /tmp/ceph*
+ tags:
+ - ceph_purge
+ - ceph_deploy
+
+- name: install ceph-related packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items:
+ - ceph-deploy
+ tags:
+ - ceph_purge
+ - ceph_deploy
+
+- name: purge ceph
+ shell: "ceph-deploy purge {{ inventory_hostname }}; ceph-deploy purgedata {{ inventory_hostname }}; ceph-deploy forgetkeys"
+ tags:
+ - ceph_purge
+ - ceph_deploy
+
+- name: remove monmap
+ file: path="/tmp/monmap" state="absent"
+ tags:
+ - ceph_purge
+ - ceph_deploy
+
+