summaryrefslogtreecommitdiffstats
path: root/src/ceph/qa/workunits
diff options
context:
space:
mode:
Diffstat (limited to 'src/ceph/qa/workunits')
-rw-r--r--src/ceph/qa/workunits/Makefile4
-rwxr-xr-xsrc/ceph/qa/workunits/caps/mon_commands.sh25
-rw-r--r--src/ceph/qa/workunits/ceph-disk/60-ceph-by-partuuid.rules29
-rwxr-xr-xsrc/ceph/qa/workunits/ceph-disk/ceph-disk-no-lockbox4608
-rw-r--r--src/ceph/qa/workunits/ceph-disk/ceph-disk-test.py777
-rwxr-xr-xsrc/ceph/qa/workunits/ceph-disk/ceph-disk.sh46
-rwxr-xr-xsrc/ceph/qa/workunits/ceph-helpers-root.sh92
-rwxr-xr-xsrc/ceph/qa/workunits/ceph-tests/ceph-admin-commands.sh14
-rwxr-xr-xsrc/ceph/qa/workunits/cephtool/test.sh2621
-rwxr-xr-xsrc/ceph/qa/workunits/cephtool/test_daemon.sh43
-rwxr-xr-xsrc/ceph/qa/workunits/cls/test_cls_hello.sh5
-rwxr-xr-xsrc/ceph/qa/workunits/cls/test_cls_journal.sh6
-rwxr-xr-xsrc/ceph/qa/workunits/cls/test_cls_lock.sh5
-rwxr-xr-xsrc/ceph/qa/workunits/cls/test_cls_numops.sh5
-rwxr-xr-xsrc/ceph/qa/workunits/cls/test_cls_rbd.sh6
-rwxr-xr-xsrc/ceph/qa/workunits/cls/test_cls_refcount.sh5
-rwxr-xr-xsrc/ceph/qa/workunits/cls/test_cls_rgw.sh8
-rwxr-xr-xsrc/ceph/qa/workunits/cls/test_cls_sdk.sh5
-rw-r--r--src/ceph/qa/workunits/direct_io/.gitignore3
-rw-r--r--src/ceph/qa/workunits/direct_io/Makefile11
-rwxr-xr-xsrc/ceph/qa/workunits/direct_io/big.sh6
-rw-r--r--src/ceph/qa/workunits/direct_io/direct_io_test.c312
-rwxr-xr-xsrc/ceph/qa/workunits/direct_io/misc.sh16
-rw-r--r--src/ceph/qa/workunits/direct_io/test_short_dio_read.c57
-rw-r--r--src/ceph/qa/workunits/direct_io/test_sync_io.c250
-rw-r--r--src/ceph/qa/workunits/erasure-code/.gitignore2
-rw-r--r--src/ceph/qa/workunits/erasure-code/bench.html34
-rwxr-xr-xsrc/ceph/qa/workunits/erasure-code/bench.sh188
-rwxr-xr-xsrc/ceph/qa/workunits/erasure-code/encode-decode-non-regression.sh39
-rw-r--r--src/ceph/qa/workunits/erasure-code/examples.css97
-rw-r--r--src/ceph/qa/workunits/erasure-code/jquery.flot.categories.js190
-rw-r--r--src/ceph/qa/workunits/erasure-code/jquery.flot.js3168
-rw-r--r--src/ceph/qa/workunits/erasure-code/jquery.js9472
-rw-r--r--src/ceph/qa/workunits/erasure-code/plot.js82
-rw-r--r--src/ceph/qa/workunits/false.sh3
-rw-r--r--src/ceph/qa/workunits/fs/.gitignore1
-rw-r--r--src/ceph/qa/workunits/fs/Makefile11
-rwxr-xr-xsrc/ceph/qa/workunits/fs/misc/acl.sh50
-rwxr-xr-xsrc/ceph/qa/workunits/fs/misc/chmod.sh60
-rwxr-xr-xsrc/ceph/qa/workunits/fs/misc/direct_io.py50
-rwxr-xr-xsrc/ceph/qa/workunits/fs/misc/dirfrag.sh52
-rwxr-xr-xsrc/ceph/qa/workunits/fs/misc/filelock_deadlock.py72
-rwxr-xr-xsrc/ceph/qa/workunits/fs/misc/filelock_interrupt.py87
-rwxr-xr-xsrc/ceph/qa/workunits/fs/misc/i_complete_vs_rename.sh31
-rwxr-xr-xsrc/ceph/qa/workunits/fs/misc/layout_vxattrs.sh116
-rwxr-xr-xsrc/ceph/qa/workunits/fs/misc/mkpool_layout_vxattrs.sh15
-rwxr-xr-xsrc/ceph/qa/workunits/fs/misc/multiple_rsync.sh25
-rwxr-xr-xsrc/ceph/qa/workunits/fs/misc/trivial_sync.sh7
-rwxr-xr-xsrc/ceph/qa/workunits/fs/misc/xattrs.sh14
-rwxr-xr-xsrc/ceph/qa/workunits/fs/multiclient_sync_read_eof.py44
-rwxr-xr-xsrc/ceph/qa/workunits/fs/norstats/kernel_untar_tar.sh26
-rwxr-xr-xsrc/ceph/qa/workunits/fs/quota/quota.sh129
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snap-rm-diff.sh11
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-0.sh27
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-1.sh31
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-2.sh61
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-authwb.sh14
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-capwb.sh35
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-dir-rename.sh19
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-double-null.sh25
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-estale.sh15
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-git-ceph.sh35
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-intodir.sh24
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh44
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-parents.sh41
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-snap-rename.sh35
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-snap-rm-cmp.sh26
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-upchildrealms.sh30
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/snaptest-xattrwb.sh31
-rwxr-xr-xsrc/ceph/qa/workunits/fs/snaps/untar_snap_rm.sh20
-rw-r--r--src/ceph/qa/workunits/fs/test_o_trunc.c45
-rwxr-xr-xsrc/ceph/qa/workunits/fs/test_o_trunc.sh7
-rwxr-xr-xsrc/ceph/qa/workunits/fs/test_python.sh6
-rwxr-xr-xsrc/ceph/qa/workunits/hadoop/repl.sh42
-rwxr-xr-xsrc/ceph/qa/workunits/hadoop/terasort.sh76
-rwxr-xr-xsrc/ceph/qa/workunits/hadoop/wordcount.sh35
-rwxr-xr-xsrc/ceph/qa/workunits/kernel_untar_build.sh20
-rwxr-xr-xsrc/ceph/qa/workunits/libcephfs-java/test.sh39
-rwxr-xr-xsrc/ceph/qa/workunits/libcephfs/test.sh6
-rwxr-xr-xsrc/ceph/qa/workunits/mgr/test_localpool.sh21
-rwxr-xr-xsrc/ceph/qa/workunits/mon/auth_caps.sh130
-rw-r--r--src/ceph/qa/workunits/mon/caps.py366
-rwxr-xr-xsrc/ceph/qa/workunits/mon/caps.sh55
-rwxr-xr-xsrc/ceph/qa/workunits/mon/crush_ops.sh205
-rwxr-xr-xsrc/ceph/qa/workunits/mon/osd.sh24
-rwxr-xr-xsrc/ceph/qa/workunits/mon/ping.py114
-rwxr-xr-xsrc/ceph/qa/workunits/mon/pool_ops.sh49
-rwxr-xr-xsrc/ceph/qa/workunits/mon/rbd_snaps_ops.sh61
-rwxr-xr-xsrc/ceph/qa/workunits/mon/test_mon_config_key.py481
-rwxr-xr-xsrc/ceph/qa/workunits/objectstore/test_fuse.sh129
-rwxr-xr-xsrc/ceph/qa/workunits/osdc/stress_objectcacher.sh28
-rwxr-xr-xsrc/ceph/qa/workunits/post-file.sh7
-rwxr-xr-xsrc/ceph/qa/workunits/rados/clone.sh13
-rwxr-xr-xsrc/ceph/qa/workunits/rados/load-gen-big.sh10
-rwxr-xr-xsrc/ceph/qa/workunits/rados/load-gen-mix-small-long.sh10
-rwxr-xr-xsrc/ceph/qa/workunits/rados/load-gen-mix-small.sh10
-rwxr-xr-xsrc/ceph/qa/workunits/rados/load-gen-mix.sh10
-rwxr-xr-xsrc/ceph/qa/workunits/rados/load-gen-mostlyread.sh10
-rwxr-xr-xsrc/ceph/qa/workunits/rados/stress_watch.sh7
-rwxr-xr-xsrc/ceph/qa/workunits/rados/test.sh51
-rwxr-xr-xsrc/ceph/qa/workunits/rados/test_alloc_hint.sh176
-rwxr-xr-xsrc/ceph/qa/workunits/rados/test_cache_pool.sh170
-rwxr-xr-xsrc/ceph/qa/workunits/rados/test_envlibrados_for_rocksdb.sh96
-rwxr-xr-xsrc/ceph/qa/workunits/rados/test_hang.sh8
-rwxr-xr-xsrc/ceph/qa/workunits/rados/test_health_warnings.sh75
-rwxr-xr-xsrc/ceph/qa/workunits/rados/test_pool_access.sh23
-rwxr-xr-xsrc/ceph/qa/workunits/rados/test_pool_quota.sh68
-rwxr-xr-xsrc/ceph/qa/workunits/rados/test_python.sh4
-rwxr-xr-xsrc/ceph/qa/workunits/rados/test_rados_timeouts.sh47
-rwxr-xr-xsrc/ceph/qa/workunits/rados/test_rados_tool.sh575
-rwxr-xr-xsrc/ceph/qa/workunits/rados/test_tmap_to_omap.sh28
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/cli_generic.sh470
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/concurrent.sh375
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/diff.sh52
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/diff_continuous.sh59
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/huge-tickets.sh41
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/image_read.sh677
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/import_export.sh233
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/issue-20295.sh18
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/journal.sh310
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/kernel.sh89
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/krbd_data_pool.sh203
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/krbd_exclusive_option.sh165
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/krbd_fallocate.sh124
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/krbd_stable_pages_required.sh17
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/map-snapshot-io.sh17
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/map-unmap.sh44
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/merge_diff.sh474
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/notify_master.sh5
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/notify_slave.sh5
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/permissions.sh148
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/qemu-iotests.sh45
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/qemu_dynamic_features.sh48
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/qemu_rebuild_object_map.sh36
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/rbd-ggate.sh182
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/rbd-nbd.sh189
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/rbd_mirror.sh433
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/rbd_mirror_ha.sh207
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh910
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/rbd_mirror_stress.sh186
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/read-flags.sh60
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/run_devstack_tempest.sh122
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/set_ro.py113
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/simple_big.sh12
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/smalliobench.sh18
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/test_admin_socket.sh152
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/test_librbd.sh9
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/test_librbd_api.sh4
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/test_librbd_python.sh12
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/test_lock_fence.sh47
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/test_rbd_mirror.sh9
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh37
-rwxr-xr-xsrc/ceph/qa/workunits/rbd/verify_pool.sh27
-rwxr-xr-xsrc/ceph/qa/workunits/rename/all.sh36
-rwxr-xr-xsrc/ceph/qa/workunits/rename/dir_pri_nul.sh28
-rwxr-xr-xsrc/ceph/qa/workunits/rename/dir_pri_pri.sh11
-rw-r--r--src/ceph/qa/workunits/rename/plan.txt111
-rwxr-xr-xsrc/ceph/qa/workunits/rename/prepare.sh21
-rwxr-xr-xsrc/ceph/qa/workunits/rename/pri_nul.sh11
-rwxr-xr-xsrc/ceph/qa/workunits/rename/pri_pri.sh12
-rwxr-xr-xsrc/ceph/qa/workunits/rename/pri_rem.sh31
-rwxr-xr-xsrc/ceph/qa/workunits/rename/rem_nul.sh29
-rwxr-xr-xsrc/ceph/qa/workunits/rename/rem_pri.sh29
-rwxr-xr-xsrc/ceph/qa/workunits/rename/rem_rem.sh61
-rwxr-xr-xsrc/ceph/qa/workunits/rest/test-restful.sh16
-rwxr-xr-xsrc/ceph/qa/workunits/rest/test.py424
-rwxr-xr-xsrc/ceph/qa/workunits/rest/test_mgr_rest_api.py94
-rwxr-xr-xsrc/ceph/qa/workunits/restart/test-backtraces.py262
-rwxr-xr-xsrc/ceph/qa/workunits/rgw/run-s3tests.sh82
-rwxr-xr-xsrc/ceph/qa/workunits/rgw/s3_bucket_quota.pl393
-rwxr-xr-xsrc/ceph/qa/workunits/rgw/s3_multipart_upload.pl151
-rwxr-xr-xsrc/ceph/qa/workunits/rgw/s3_user_quota.pl191
-rw-r--r--src/ceph/qa/workunits/rgw/s3_utilities.pm220
-rwxr-xr-xsrc/ceph/qa/workunits/suites/blogbench.sh15
-rwxr-xr-xsrc/ceph/qa/workunits/suites/bonnie.sh11
-rwxr-xr-xsrc/ceph/qa/workunits/suites/cephfs_journal_tool_smoke.sh92
-rwxr-xr-xsrc/ceph/qa/workunits/suites/dbench-short.sh5
-rwxr-xr-xsrc/ceph/qa/workunits/suites/dbench.sh6
-rwxr-xr-xsrc/ceph/qa/workunits/suites/ffsb.sh22
-rwxr-xr-xsrc/ceph/qa/workunits/suites/fio.sh42
-rwxr-xr-xsrc/ceph/qa/workunits/suites/fsstress.sh20
-rwxr-xr-xsrc/ceph/qa/workunits/suites/fsx.sh16
-rwxr-xr-xsrc/ceph/qa/workunits/suites/fsync-tester.sh12
-rwxr-xr-xsrc/ceph/qa/workunits/suites/iogen.sh17
-rwxr-xr-xsrc/ceph/qa/workunits/suites/iozone-sync.sh22
-rwxr-xr-xsrc/ceph/qa/workunits/suites/iozone.sh7
-rwxr-xr-xsrc/ceph/qa/workunits/suites/pjd.sh17
-rw-r--r--src/ceph/qa/workunits/suites/random_write.32.ffsb48
-rwxr-xr-xsrc/ceph/qa/workunits/suites/wac.sh12
-rwxr-xr-xsrc/ceph/qa/workunits/true.sh3
190 files changed, 0 insertions, 35849 deletions
diff --git a/src/ceph/qa/workunits/Makefile b/src/ceph/qa/workunits/Makefile
deleted file mode 100644
index f75f5df..0000000
--- a/src/ceph/qa/workunits/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-DIRS = direct_io fs
-
-all:
- for d in $(DIRS) ; do ( cd $$d ; $(MAKE) all ) ; done
diff --git a/src/ceph/qa/workunits/caps/mon_commands.sh b/src/ceph/qa/workunits/caps/mon_commands.sh
deleted file mode 100755
index 5b5bce6..0000000
--- a/src/ceph/qa/workunits/caps/mon_commands.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/sh -ex
-
-ceph-authtool --create-keyring k --gen-key -p --name client.xx
-ceph auth add -i k client.xx mon "allow command foo; allow command bar *; allow command baz ...; allow command foo add * mon allow\\ rwx osd allow\\ *"
-
-( ceph -k k -n client.xx foo || true ) | grep 'unrecog'
-( ceph -k k -n client.xx foo ooo || true ) | grep 'Access denied'
-( ceph -k k -n client.xx fo || true ) | grep 'Access denied'
-( ceph -k k -n client.xx fooo || true ) | grep 'Access denied'
-
-( ceph -k k -n client.xx bar || true ) | grep 'Access denied'
-( ceph -k k -n client.xx bar a || true ) | grep 'unrecog'
-( ceph -k k -n client.xx bar a b c || true ) | grep 'Access denied'
-( ceph -k k -n client.xx ba || true ) | grep 'Access denied'
-( ceph -k k -n client.xx barr || true ) | grep 'Access denied'
-
-( ceph -k k -n client.xx baz || true ) | grep -v 'Access denied'
-( ceph -k k -n client.xx baz a || true ) | grep -v 'Access denied'
-( ceph -k k -n client.xx baz a b || true ) | grep -v 'Access denied'
-
-( ceph -k k -n client.xx foo add osd.1 -i k mon 'allow rwx' osd 'allow *' || true ) | grep 'unrecog'
-( ceph -k k -n client.xx foo add osd a b c -i k mon 'allow rwx' osd 'allow *' || true ) | grep 'Access denied'
-( ceph -k k -n client.xx foo add osd a b c -i k mon 'allow *' || true ) | grep 'Access denied'
-
-echo OK \ No newline at end of file
diff --git a/src/ceph/qa/workunits/ceph-disk/60-ceph-by-partuuid.rules b/src/ceph/qa/workunits/ceph-disk/60-ceph-by-partuuid.rules
deleted file mode 100644
index 1ed0b12..0000000
--- a/src/ceph/qa/workunits/ceph-disk/60-ceph-by-partuuid.rules
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# Make sure /dev/disk/by-partuuid is populated
-#
-
-# forward scsi device event to corresponding block device
-ACTION=="change", SUBSYSTEM=="scsi", ENV{DEVTYPE}=="scsi_device", TEST=="block", ATTR{block/*/uevent}="change"
-
-ACTION=="remove", GOTO="persistent_storage_end_two"
-
-SUBSYSTEM!="block", GOTO="persistent_storage_end_two"
-
-# skip rules for inappropriate block devices
-KERNEL=="fd*|mtd*|nbd*|gnbd*|btibm*|md*", GOTO="persistent_storage_end_two"
-
-# ignore partitions that span the entire disk
-TEST=="whole_disk", GOTO="persistent_storage_end_two"
-
-# for partitions import parent information
-ENV{DEVTYPE}=="partition", IMPORT{parent}="ID_*"
-
-# skip unpartitioned removable media devices from drivers which do not send "change" events
-ENV{DEVTYPE}=="disk", KERNEL!="sd*|sr*", ATTR{removable}=="1", GOTO="persistent_storage_end_two"
-
-# probe filesystem metadata of disks
-KERNEL!="sr*", IMPORT{program}="/sbin/blkid -o udev -p $tempnode"
-
-ENV{ID_PART_ENTRY_SCHEME}=="gpt", ENV{ID_PART_ENTRY_UUID}=="?*", SYMLINK+="disk/by-partuuid/$env{ID_PART_ENTRY_UUID}"
-
-LABEL="persistent_storage_end_two"
diff --git a/src/ceph/qa/workunits/ceph-disk/ceph-disk-no-lockbox b/src/ceph/qa/workunits/ceph-disk/ceph-disk-no-lockbox
deleted file mode 100755
index b9c1c6c..0000000
--- a/src/ceph/qa/workunits/ceph-disk/ceph-disk-no-lockbox
+++ /dev/null
@@ -1,4608 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (C) 2015 Red Hat <contact@redhat.com>
-# Copyright (C) 2014 Inktank <info@inktank.com>
-# Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
-# Copyright (C) 2014 Catalyst.net Ltd
-#
-# Author: Loic Dachary <loic@dachary.org>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU Library Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Library Public License for more details.
-#
-# THIS IS ceph-disk AS OF dc5a9053ce69c0630091774f16ce421da67d26fb v10.0.3-2247-gdc5a905
-# PRIOR TO THE INTRODUCTION OF THE LOCKBOX VOLUME TO STORE KEY FETCHING
-# STRATEGIES
-#
-import argparse
-import errno
-import fcntl
-import json
-import logging
-import os
-import platform
-import re
-import subprocess
-import stat
-import sys
-import tempfile
-import uuid
-import time
-import shlex
-import pwd
-import grp
-
-CEPH_OSD_ONDISK_MAGIC = 'ceph osd volume v026'
-
-PTYPE = {
- 'regular': {
- 'journal': {
- # identical because creating a journal is atomic
- 'ready': '45b0969e-9b03-4f30-b4c6-b4b80ceff106',
- 'tobe': '45b0969e-9b03-4f30-b4c6-b4b80ceff106',
- },
- 'block': {
- # identical because creating a block is atomic
- 'ready': 'cafecafe-9b03-4f30-b4c6-b4b80ceff106',
- 'tobe': 'cafecafe-9b03-4f30-b4c6-b4b80ceff106',
- },
- 'osd': {
- 'ready': '4fbd7e29-9d25-41b8-afd0-062c0ceff05d',
- 'tobe': '89c57f98-2fe5-4dc0-89c1-f3ad0ceff2be',
- },
- },
- 'luks': {
- 'journal': {
- 'ready': '45b0969e-9b03-4f30-b4c6-35865ceff106',
- 'tobe': '89c57f98-2fe5-4dc0-89c1-35865ceff2be',
- },
- 'block': {
- 'ready': 'cafecafe-9b03-4f30-b4c6-35865ceff106',
- 'tobe': '89c57f98-2fe5-4dc0-89c1-35865ceff2be',
- },
- 'osd': {
- 'ready': '4fbd7e29-9d25-41b8-afd0-35865ceff05d',
- 'tobe': '89c57f98-2fe5-4dc0-89c1-5ec00ceff2be',
- },
- },
- 'plain': {
- 'journal': {
- 'ready': '45b0969e-9b03-4f30-b4c6-5ec00ceff106',
- 'tobe': '89c57f98-2fe5-4dc0-89c1-35865ceff2be',
- },
- 'block': {
- 'ready': 'cafecafe-9b03-4f30-b4c6-5ec00ceff106',
- 'tobe': '89c57f98-2fe5-4dc0-89c1-35865ceff2be',
- },
- 'osd': {
- 'ready': '4fbd7e29-9d25-41b8-afd0-5ec00ceff05d',
- 'tobe': '89c57f98-2fe5-4dc0-89c1-5ec00ceff2be',
- },
- },
- 'mpath': {
- 'journal': {
- 'ready': '45b0969e-8ae0-4982-bf9d-5a8d867af560',
- 'tobe': '45b0969e-8ae0-4982-bf9d-5a8d867af560',
- },
- 'block': {
- 'ready': 'cafecafe-8ae0-4982-bf9d-5a8d867af560',
- 'tobe': 'cafecafe-8ae0-4982-bf9d-5a8d867af560',
- },
- 'osd': {
- 'ready': '4fbd7e29-8ae0-4982-bf9d-5a8d867af560',
- 'tobe': '89c57f98-8ae0-4982-bf9d-5a8d867af560',
- },
- },
-}
-
-
-class Ptype(object):
-
- @staticmethod
- def get_ready_by_type(what):
- return [x['ready'] for x in PTYPE[what].values()]
-
- @staticmethod
- def get_ready_by_name(name):
- return [x[name]['ready'] for x in PTYPE.values()]
-
- @staticmethod
- def is_regular_space(ptype):
- return Ptype.is_what_space('regular', ptype)
-
- @staticmethod
- def is_mpath_space(ptype):
- return Ptype.is_what_space('mpath', ptype)
-
- @staticmethod
- def is_plain_space(ptype):
- return Ptype.is_what_space('plain', ptype)
-
- @staticmethod
- def is_luks_space(ptype):
- return Ptype.is_what_space('luks', ptype)
-
- @staticmethod
- def is_what_space(what, ptype):
- for name in Space.NAMES:
- if ptype == PTYPE[what][name]['ready']:
- return True
- return False
-
- @staticmethod
- def space_ptype_to_name(ptype):
- for what in PTYPE.values():
- for name in Space.NAMES:
- if ptype == what[name]['ready']:
- return name
- raise ValueError('ptype ' + ptype + ' not found')
-
- @staticmethod
- def is_dmcrypt_space(ptype):
- for name in Space.NAMES:
- if Ptype.is_dmcrypt(ptype, name):
- return True
- return False
-
- @staticmethod
- def is_dmcrypt(ptype, name):
- for what in ('plain', 'luks'):
- if ptype == PTYPE[what][name]['ready']:
- return True
- return False
-
-DEFAULT_FS_TYPE = 'xfs'
-SYSFS = '/sys'
-
-"""
-OSD STATUS Definition
-"""
-OSD_STATUS_OUT_DOWN = 0
-OSD_STATUS_OUT_UP = 1
-OSD_STATUS_IN_DOWN = 2
-OSD_STATUS_IN_UP = 3
-
-MOUNT_OPTIONS = dict(
- btrfs='noatime,user_subvol_rm_allowed',
- # user_xattr is default ever since linux 2.6.39 / 3.0, but we'll
- # delay a moment before removing it fully because we did have some
- # issues with ext4 before the xatts-in-leveldb work, and it seemed
- # that user_xattr helped
- ext4='noatime,user_xattr',
- xfs='noatime,inode64',
-)
-
-MKFS_ARGS = dict(
- btrfs=[
- # btrfs requires -f, for the same reason as xfs (see comment below)
- '-f',
- '-m', 'single',
- '-l', '32768',
- '-n', '32768',
- ],
- xfs=[
- # xfs insists on not overwriting previous fs; even if we wipe
- # partition table, we often recreate it exactly the same way,
- # so we'll see ghosts of filesystems past
- '-f',
- '-i', 'size=2048',
- ],
-)
-
-INIT_SYSTEMS = [
- 'upstart',
- 'sysvinit',
- 'systemd',
- 'auto',
- 'none',
-]
-
-STATEDIR = '/var/lib/ceph'
-
-SYSCONFDIR = '/etc/ceph'
-
-prepare_lock = None
-activate_lock = None
-SUPPRESS_PREFIX = None
-
-# only warn once about some things
-warned_about = {}
-
-# Nuke the TERM variable to avoid confusing any subprocesses we call.
-# For example, libreadline will print weird control sequences for some
-# TERM values.
-if 'TERM' in os.environ:
- del os.environ['TERM']
-
-LOG_NAME = __name__
-if LOG_NAME == '__main__':
- LOG_NAME = os.path.basename(sys.argv[0])
-LOG = logging.getLogger(LOG_NAME)
-
-# Allow user-preferred values for subprocess user and group
-CEPH_PREF_USER = None
-CEPH_PREF_GROUP = None
-
-
-class filelock(object):
- def __init__(self, fn):
- self.fn = fn
- self.fd = None
-
- def acquire(self):
- assert not self.fd
- self.fd = file(self.fn, 'w')
- fcntl.lockf(self.fd, fcntl.LOCK_EX)
-
- def release(self):
- assert self.fd
- fcntl.lockf(self.fd, fcntl.LOCK_UN)
- self.fd = None
-
-
-class Error(Exception):
- """
- Error
- """
-
- def __str__(self):
- doc = self.__doc__.strip()
- return ': '.join([doc] + [str(a) for a in self.args])
-
-
-class MountError(Error):
- """
- Mounting filesystem failed
- """
-
-
-class UnmountError(Error):
- """
- Unmounting filesystem failed
- """
-
-
-class BadMagicError(Error):
- """
- Does not look like a Ceph OSD, or incompatible version
- """
-
-
-class TruncatedLineError(Error):
- """
- Line is truncated
- """
-
-
-class TooManyLinesError(Error):
- """
- Too many lines
- """
-
-
-class FilesystemTypeError(Error):
- """
- Cannot discover filesystem type
- """
-
-
-class CephDiskException(Exception):
- """
- A base exception for ceph-disk to provide custom (ad-hoc) messages that
- will be caught and dealt with when main() is executed
- """
- pass
-
-
-class ExecutableNotFound(CephDiskException):
- """
- Exception to report on executables not available in PATH
- """
- pass
-
-
-def is_systemd():
- """
- Detect whether systemd is running
- """
- with file('/proc/1/comm', 'rb') as i:
- for line in i:
- if 'systemd' in line:
- return True
- return False
-
-
-def is_upstart():
- """
- Detect whether upstart is running
- """
- (out, err, _) = command(['init', '--version'])
- if 'upstart' in out:
- return True
- return False
-
-
-def maybe_mkdir(*a, **kw):
- """
- Creates a new directory if it doesn't exist, removes
- existing symlink before creating the directory.
- """
- # remove any symlink, if it is there..
- if os.path.exists(*a) and stat.S_ISLNK(os.lstat(*a).st_mode):
- LOG.debug('Removing old symlink at %s', *a)
- os.unlink(*a)
- try:
- os.mkdir(*a, **kw)
- except OSError, e:
- if e.errno == errno.EEXIST:
- pass
- else:
- raise
-
-
-def which(executable):
- """find the location of an executable"""
- if 'PATH' in os.environ:
- envpath = os.environ['PATH']
- else:
- envpath = os.defpath
- PATH = envpath.split(os.pathsep)
-
- locations = PATH + [
- '/usr/local/bin',
- '/bin',
- '/usr/bin',
- '/usr/local/sbin',
- '/usr/sbin',
- '/sbin',
- ]
-
- for location in locations:
- executable_path = os.path.join(location, executable)
- if (os.path.isfile(executable_path) and
- os.access(executable_path, os.X_OK)):
- return executable_path
-
-
-def _get_command_executable(arguments):
- """
- Return the full path for an executable, raise if the executable is not
- found. If the executable has already a full path do not perform any checks.
- """
- if arguments[0].startswith('/'): # an absolute path
- return arguments
- executable = which(arguments[0])
- if not executable:
- command_msg = 'Could not run command: %s' % ' '.join(arguments)
- executable_msg = '%s not in path.' % arguments[0]
- raise ExecutableNotFound('%s %s' % (executable_msg, command_msg))
-
- # swap the old executable for the new one
- arguments[0] = executable
- return arguments
-
-
-def command(arguments, **kwargs):
- """
- Safely execute a ``subprocess.Popen`` call making sure that the
- executable exists and raising a helpful error message
- if it does not.
-
- .. note:: This should be the preferred way of calling ``subprocess.Popen``
- since it provides the caller with the safety net of making sure that
- executables *will* be found and will error nicely otherwise.
-
- This returns the output of the command and the return code of the
- process in a tuple: (output, returncode).
- """
- arguments = _get_command_executable(arguments)
- LOG.info('Running command: %s' % ' '.join(arguments))
- process = subprocess.Popen(
- arguments,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- **kwargs)
- out, err = process.communicate()
- return out, err, process.returncode
-
-
-def command_check_call(arguments):
- """
- Safely execute a ``subprocess.check_call`` call making sure that the
- executable exists and raising a helpful error message if it does not.
-
- .. note:: This should be the preferred way of calling
- ``subprocess.check_call`` since it provides the caller with the safety net
- of making sure that executables *will* be found and will error nicely
- otherwise.
- """
- arguments = _get_command_executable(arguments)
- LOG.info('Running command: %s', ' '.join(arguments))
- return subprocess.check_call(arguments)
-
-
-def platform_distro():
- """
- Returns a normalized, lower case string without any leading nor trailing
- whitespace that represents the distribution name of the current machine.
- """
- distro = platform_information()[0] or ''
- return distro.strip().lower()
-
-
-def platform_information():
- distro, release, codename = platform.linux_distribution()
- # this could be an empty string in Debian
- if not codename and 'debian' in distro.lower():
- debian_codenames = {
- '8': 'jessie',
- '7': 'wheezy',
- '6': 'squeeze',
- }
- major_version = release.split('.')[0]
- codename = debian_codenames.get(major_version, '')
-
- # In order to support newer jessie/sid or wheezy/sid strings we test
- # this if sid is buried in the minor, we should use sid anyway.
- if not codename and '/' in release:
- major, minor = release.split('/')
- if minor == 'sid':
- codename = minor
- else:
- codename = major
-
- return (
- str(distro).strip(),
- str(release).strip(),
- str(codename).strip()
- )
-
-#
-# An alternative block_path implementation would be
-#
-# name = basename(dev)
-# return /sys/devices/virtual/block/$name
-#
-# It is however more fragile because it relies on the fact
-# that the basename of the device the user will use always
-# matches the one the driver will use. On Ubuntu 14.04, for
-# instance, when multipath creates a partition table on
-#
-# /dev/mapper/353333330000007d0 -> ../dm-0
-#
-# it will create partition devices named
-#
-# /dev/mapper/353333330000007d0-part1
-#
-# which is the same device as /dev/dm-1 but not a symbolic
-# link to it:
-#
-# ubuntu@other:~$ ls -l /dev/mapper /dev/dm-1
-# brw-rw---- 1 root disk 252, 1 Aug 15 17:52 /dev/dm-1
-# lrwxrwxrwx 1 root root 7 Aug 15 17:52 353333330000007d0 -> ../dm-0
-# brw-rw---- 1 root disk 252, 1 Aug 15 17:52 353333330000007d0-part1
-#
-# Using the basename in this case fails.
-#
-
-
-def block_path(dev):
- path = os.path.realpath(dev)
- rdev = os.stat(path).st_rdev
- (M, m) = (os.major(rdev), os.minor(rdev))
- return "{sysfs}/dev/block/{M}:{m}".format(sysfs=SYSFS, M=M, m=m)
-
-
-def get_dm_uuid(dev):
- uuid_path = os.path.join(block_path(dev), 'dm', 'uuid')
- LOG.debug("get_dm_uuid " + dev + " uuid path is " + uuid_path)
- if not os.path.exists(uuid_path):
- return False
- uuid = open(uuid_path, 'r').read()
- LOG.debug("get_dm_uuid " + dev + " uuid is " + uuid)
- return uuid
-
-
-def is_mpath(dev):
- """
- True if the path is managed by multipath
- """
- uuid = get_dm_uuid(dev)
- return (uuid and
- (re.match('part\d+-mpath-', uuid) or
- re.match('mpath-', uuid)))
-
-
-def get_dev_name(path):
- """
- get device name from path. e.g.::
-
- /dev/sda -> sdas, /dev/cciss/c0d1 -> cciss!c0d1
-
- a device "name" is something like::
-
- sdb
- cciss!c0d1
-
- """
- assert path.startswith('/dev/')
- base = path[5:]
- return base.replace('/', '!')
-
-
-def get_dev_path(name):
- """
- get a path (/dev/...) from a name (cciss!c0d1)
- a device "path" is something like::
-
- /dev/sdb
- /dev/cciss/c0d1
-
- """
- return '/dev/' + name.replace('!', '/')
-
-
-def get_dev_relpath(name):
- """
- get a relative path to /dev from a name (cciss!c0d1)
- """
- return name.replace('!', '/')
-
-
-def get_dev_size(dev, size='megabytes'):
- """
- Attempt to get the size of a device so that we can prevent errors
- from actions to devices that are smaller, and improve error reporting.
-
- Because we want to avoid breakage in case this approach is not robust, we
- will issue a warning if we failed to get the size.
-
- :param size: bytes or megabytes
- :param dev: the device to calculate the size
- """
- fd = os.open(dev, os.O_RDONLY)
- dividers = {'bytes': 1, 'megabytes': 1024 * 1024}
- try:
- device_size = os.lseek(fd, 0, os.SEEK_END)
- divider = dividers.get(size, 1024 * 1024) # default to megabytes
- return device_size / divider
- except Exception as error:
- LOG.warning('failed to get size of %s: %s' % (dev, str(error)))
- finally:
- os.close(fd)
-
-
-def get_partition_mpath(dev, pnum):
- part_re = "part{pnum}-mpath-".format(pnum=pnum)
- partitions = list_partitions_mpath(dev, part_re)
- if partitions:
- return partitions[0]
- else:
- return None
-
-
-def get_partition_dev(dev, pnum):
- """
- get the device name for a partition
-
- assume that partitions are named like the base dev,
- with a number, and optionally
- some intervening characters (like 'p'). e.g.,
-
- sda 1 -> sda1
- cciss/c0d1 1 -> cciss!c0d1p1
- """
- partname = None
- if is_mpath(dev):
- partname = get_partition_mpath(dev, pnum)
- else:
- name = get_dev_name(os.path.realpath(dev))
- for f in os.listdir(os.path.join('/sys/block', name)):
- if f.startswith(name) and f.endswith(str(pnum)):
- # we want the shortest name that starts with the base name
- # and ends with the partition number
- if not partname or len(f) < len(partname):
- partname = f
- if partname:
- return get_dev_path(partname)
- else:
- raise Error('partition %d for %s does not appear to exist' %
- (pnum, dev))
-
-
-def list_all_partitions():
- """
- Return a list of devices and partitions
- """
- names = os.listdir('/sys/block')
- dev_part_list = {}
- for name in names:
- # /dev/fd0 may hang http://tracker.ceph.com/issues/6827
- if re.match(r'^fd\d$', name):
- continue
- dev_part_list[name] = list_partitions(get_dev_path(name))
- return dev_part_list
-
-
-def list_partitions(dev):
- dev = os.path.realpath(dev)
- if is_mpath(dev):
- return list_partitions_mpath(dev)
- else:
- return list_partitions_device(dev)
-
-
-def list_partitions_mpath(dev, part_re="part\d+-mpath-"):
- p = block_path(dev)
- partitions = []
- holders = os.path.join(p, 'holders')
- for holder in os.listdir(holders):
- uuid_path = os.path.join(holders, holder, 'dm', 'uuid')
- uuid = open(uuid_path, 'r').read()
- LOG.debug("list_partitions_mpath: " + uuid_path + " uuid = " + uuid)
- if re.match(part_re, uuid):
- partitions.append(holder)
- return partitions
-
-
-def list_partitions_device(dev):
- """
- Return a list of partitions on the given device name
- """
- partitions = []
- basename = get_dev_name(dev)
- for name in os.listdir(block_path(dev)):
- if name.startswith(basename):
- partitions.append(name)
- return partitions
-
-
-def get_partition_base(dev):
- """
- Get the base device for a partition
- """
- dev = os.path.realpath(dev)
- if not stat.S_ISBLK(os.lstat(dev).st_mode):
- raise Error('not a block device', dev)
-
- name = get_dev_name(dev)
- if os.path.exists(os.path.join('/sys/block', name)):
- raise Error('not a partition', dev)
-
- # find the base
- for basename in os.listdir('/sys/block'):
- if os.path.exists(os.path.join('/sys/block', basename, name)):
- return get_dev_path(basename)
- raise Error('no parent device for partition', dev)
-
-
-def is_partition_mpath(dev):
- uuid = get_dm_uuid(dev)
- return bool(re.match('part\d+-mpath-', uuid))
-
-
-def partnum_mpath(dev):
- uuid = get_dm_uuid(dev)
- return re.findall('part(\d+)-mpath-', uuid)[0]
-
-
-def get_partition_base_mpath(dev):
- slave_path = os.path.join(block_path(dev), 'slaves')
- slaves = os.listdir(slave_path)
- assert slaves
- name_path = os.path.join(slave_path, slaves[0], 'dm', 'name')
- name = open(name_path, 'r').read().strip()
- return os.path.join('/dev/mapper', name)
-
-
-def is_partition(dev):
- """
- Check whether a given device path is a partition or a full disk.
- """
- if is_mpath(dev):
- return is_partition_mpath(dev)
-
- dev = os.path.realpath(dev)
- st = os.lstat(dev)
- if not stat.S_ISBLK(st.st_mode):
- raise Error('not a block device', dev)
-
- name = get_dev_name(dev)
- if os.path.exists(os.path.join('/sys/block', name)):
- return False
-
- # make sure it is a partition of something else
- major = os.major(st.st_rdev)
- minor = os.minor(st.st_rdev)
- if os.path.exists('/sys/dev/block/%d:%d/partition' % (major, minor)):
- return True
-
- raise Error('not a disk or partition', dev)
-
-
-def is_mounted(dev):
- """
- Check if the given device is mounted.
- """
- dev = os.path.realpath(dev)
- with file('/proc/mounts', 'rb') as proc_mounts:
- for line in proc_mounts:
- fields = line.split()
- if len(fields) < 3:
- continue
- mounts_dev = fields[0]
- path = fields[1]
- if mounts_dev.startswith('/') and os.path.exists(mounts_dev):
- mounts_dev = os.path.realpath(mounts_dev)
- if mounts_dev == dev:
- return path
- return None
-
-
-def is_held(dev):
- """
- Check if a device is held by another device (e.g., a dm-crypt mapping)
- """
- assert os.path.exists(dev)
- if is_mpath(dev):
- return []
-
- dev = os.path.realpath(dev)
- base = get_dev_name(dev)
-
- # full disk?
- directory = '/sys/block/{base}/holders'.format(base=base)
- if os.path.exists(directory):
- return os.listdir(directory)
-
- # partition?
- part = base
- while len(base):
- directory = '/sys/block/{base}/{part}/holders'.format(
- part=part, base=base)
- if os.path.exists(directory):
- return os.listdir(directory)
- base = base[:-1]
- return []
-
-
-def verify_not_in_use(dev, check_partitions=False):
- """
- Verify if a given device (path) is in use (e.g. mounted or
- in use by device-mapper).
-
- :raises: Error if device is in use.
- """
- assert os.path.exists(dev)
- if is_mounted(dev):
- raise Error('Device is mounted', dev)
- holders = is_held(dev)
- if holders:
- raise Error('Device %s is in use by a device-mapper '
- 'mapping (dm-crypt?)' % dev, ','.join(holders))
-
- if check_partitions and not is_partition(dev):
- for partname in list_partitions(dev):
- partition = get_dev_path(partname)
- if is_mounted(partition):
- raise Error('Device is mounted', partition)
- holders = is_held(partition)
- if holders:
- raise Error('Device %s is in use by a device-mapper '
- 'mapping (dm-crypt?)'
- % partition, ','.join(holders))
-
-
-def must_be_one_line(line):
- """
- Checks if given line is really one single line.
-
- :raises: TruncatedLineError or TooManyLinesError
- :return: Content of the line, or None if line isn't valid.
- """
- if line[-1:] != '\n':
- raise TruncatedLineError(line)
- line = line[:-1]
- if '\n' in line:
- raise TooManyLinesError(line)
- return line
-
-
-def read_one_line(parent, name):
- """
- Read a file whose sole contents are a single line.
-
- Strips the newline.
-
- :return: Contents of the line, or None if file did not exist.
- """
- path = os.path.join(parent, name)
- try:
- line = file(path, 'rb').read()
- except IOError as e:
- if e.errno == errno.ENOENT:
- return None
- else:
- raise
-
- try:
- line = must_be_one_line(line)
- except (TruncatedLineError, TooManyLinesError) as e:
- raise Error(
- 'File is corrupt: {path}: {msg}'.format(
- path=path,
- msg=e,
- )
- )
- return line
-
-
-def write_one_line(parent, name, text):
- """
- Write a file whose sole contents are a single line.
-
- Adds a newline.
- """
- path = os.path.join(parent, name)
- tmp = '{path}.{pid}.tmp'.format(path=path, pid=os.getpid())
- with file(tmp, 'wb') as tmp_file:
- tmp_file.write(text + '\n')
- os.fsync(tmp_file.fileno())
- path_set_context(tmp)
- os.rename(tmp, path)
-
-
-def init_get():
- """
- Get a init system using 'ceph-detect-init'
- """
- init = _check_output(
- args=[
- 'ceph-detect-init',
- '--default', 'sysvinit',
- ],
- )
- init = must_be_one_line(init)
- return init
-
-
-def check_osd_magic(path):
- """
- Check that this path has the Ceph OSD magic.
-
- :raises: BadMagicError if this does not look like a Ceph OSD data
- dir.
- """
- magic = read_one_line(path, 'magic')
- if magic is None:
- # probably not mkfs'ed yet
- raise BadMagicError(path)
- if magic != CEPH_OSD_ONDISK_MAGIC:
- raise BadMagicError(path)
-
-
-def check_osd_id(osd_id):
- """
- Ensures osd id is numeric.
- """
- if not re.match(r'^[0-9]+$', osd_id):
- raise Error('osd id is not numeric', osd_id)
-
-
-def allocate_osd_id(
- cluster,
- fsid,
- keyring,
-):
- """
- Accocates an OSD id on the given cluster.
-
- :raises: Error if the call to allocate the OSD id fails.
- :return: The allocated OSD id.
- """
-
- LOG.debug('Allocating OSD id...')
- try:
- osd_id = _check_output(
- args=[
- 'ceph',
- '--cluster', cluster,
- '--name', 'client.bootstrap-osd',
- '--keyring', keyring,
- 'osd', 'create', '--concise',
- fsid,
- ],
- )
- except subprocess.CalledProcessError as e:
- raise Error('ceph osd create failed', e, e.output)
- osd_id = must_be_one_line(osd_id)
- check_osd_id(osd_id)
- return osd_id
-
-
-def get_osd_id(path):
- """
- Gets the OSD id of the OSD at the given path.
- """
- osd_id = read_one_line(path, 'whoami')
- if osd_id is not None:
- check_osd_id(osd_id)
- return osd_id
-
-
-def get_ceph_user():
- global CEPH_PREF_USER
-
- if CEPH_PREF_USER is not None:
- try:
- pwd.getpwnam(CEPH_PREF_USER)
- return CEPH_PREF_USER
- except KeyError:
- print "No such user: " + CEPH_PREF_USER
- sys.exit(2)
- else:
- try:
- pwd.getpwnam('ceph')
- return 'ceph'
- except KeyError:
- return 'root'
-
-
-def get_ceph_group():
- global CEPH_PREF_GROUP
-
- if CEPH_PREF_GROUP is not None:
- try:
- grp.getgrnam(CEPH_PREF_GROUP)
- return CEPH_PREF_GROUP
- except KeyError:
- print "No such group: " + CEPH_PREF_GROUP
- sys.exit(2)
- else:
- try:
- grp.getgrnam('ceph')
- return 'ceph'
- except KeyError:
- return 'root'
-
-
-def path_set_context(path):
- # restore selinux context to default policy values
- if which('restorecon'):
- command(['restorecon', '-R', path])
-
- # if ceph user exists, set owner to ceph
- if get_ceph_user() == 'ceph':
- command(['chown', '-R', 'ceph:ceph', path])
-
-
-def _check_output(args=None, **kwargs):
- out, err, ret = command(args, **kwargs)
- if ret:
- cmd = args[0]
- error = subprocess.CalledProcessError(ret, cmd)
- error.output = out + err
- raise error
- return out
-
-
-def get_conf(cluster, variable):
- """
- Get the value of the given configuration variable from the
- cluster.
-
- :raises: Error if call to ceph-conf fails.
- :return: The variable value or None.
- """
- try:
- out, err, ret = command(
- [
- 'ceph-conf',
- '--cluster={cluster}'.format(
- cluster=cluster,
- ),
- '--name=osd.',
- '--lookup',
- variable,
- ],
- close_fds=True,
- )
- except OSError as e:
- raise Error('error executing ceph-conf', e, err)
- if ret == 1:
- # config entry not found
- return None
- elif ret != 0:
- raise Error('getting variable from configuration failed')
- value = out.split('\n', 1)[0]
- # don't differentiate between "var=" and no var set
- if not value:
- return None
- return value
-
-
-def get_conf_with_default(cluster, variable):
- """
- Get a config value that is known to the C++ code.
-
- This will fail if called on variables that are not defined in
- common config options.
- """
- try:
- out = _check_output(
- args=[
- 'ceph-osd',
- '--cluster={cluster}'.format(
- cluster=cluster,
- ),
- '--show-config-value={variable}'.format(
- variable=variable,
- ),
- ],
- close_fds=True,
- )
- except subprocess.CalledProcessError as e:
- raise Error(
- 'getting variable from configuration failed',
- e,
- )
-
- value = str(out).split('\n', 1)[0]
- return value
-
-
-def get_fsid(cluster):
- """
- Get the fsid of the cluster.
-
- :return: The fsid or raises Error.
- """
- fsid = get_conf_with_default(cluster=cluster, variable='fsid')
- if fsid is None:
- raise Error('getting cluster uuid from configuration failed')
- return fsid.lower()
-
-
-def get_dmcrypt_key_path(
- _uuid,
- key_dir,
- luks
-):
- """
- Get path to dmcrypt key file.
-
- :return: Path to the dmcrypt key file, callers should check for existence.
- """
- if luks:
- path = os.path.join(key_dir, _uuid + ".luks.key")
- else:
- path = os.path.join(key_dir, _uuid)
-
- return path
-
-
-def get_or_create_dmcrypt_key(
- _uuid,
- key_dir,
- key_size,
- luks
-):
- """
- Get path to existing dmcrypt key or create a new key file.
-
- :return: Path to the dmcrypt key file.
- """
- path = get_dmcrypt_key_path(_uuid, key_dir, luks)
- if os.path.exists(path):
- return path
-
- # make a new key
- try:
- if not os.path.exists(key_dir):
- os.makedirs(key_dir, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
- with file('/dev/urandom', 'rb') as i:
- key = i.read(key_size / 8)
- fd = os.open(path, os.O_WRONLY | os.O_CREAT,
- stat.S_IRUSR | stat.S_IWUSR)
- assert os.write(fd, key) == len(key)
- os.close(fd)
- return path
- except:
- raise Error('unable to read or create dm-crypt key', path)
-
-
-def _dmcrypt_map(
- rawdev,
- keypath,
- _uuid,
- cryptsetup_parameters,
- luks,
- format_dev=False,
-):
- """
- Maps a device to a dmcrypt device.
-
- :return: Path to the dmcrypt device.
- """
- dev = '/dev/mapper/' + _uuid
- luksFormat_args = [
- 'cryptsetup',
- '--batch-mode',
- '--key-file',
- keypath,
- 'luksFormat',
- rawdev,
- ] + cryptsetup_parameters
-
- luksOpen_args = [
- 'cryptsetup',
- '--key-file',
- keypath,
- 'luksOpen',
- rawdev,
- _uuid,
- ]
-
- create_args = [
- 'cryptsetup',
- '--key-file',
- keypath,
- 'create',
- _uuid,
- rawdev,
- ] + cryptsetup_parameters
-
- try:
- if luks:
- if format_dev:
- command_check_call(luksFormat_args)
- command_check_call(luksOpen_args)
- else:
- # Plain mode has no format function, nor any validation
- # that the key is correct.
- command_check_call(create_args)
- # set proper ownership of mapped device
- command_check_call(['chown', 'ceph:ceph', dev])
- return dev
-
- except subprocess.CalledProcessError as e:
- raise Error('unable to map device', rawdev, e)
-
-
-def dmcrypt_unmap(
- _uuid
-):
- """
- Removes the dmcrypt device with the given UUID.
- """
- retries = 0
- while True:
- try:
- command_check_call(['cryptsetup', 'remove', _uuid])
- break
- except subprocess.CalledProcessError as e:
- if retries == 10:
- raise Error('unable to unmap device', _uuid, e)
- else:
- time.sleep(0.5 + retries * 1.0)
- retries += 1
-
-
-def mount(
- dev,
- fstype,
- options,
-):
- """
- Mounts a device with given filessystem type and
- mount options to a tempfile path under /var/lib/ceph/tmp.
- """
- # sanity check: none of the arguments are None
- if dev is None:
- raise ValueError('dev may not be None')
- if fstype is None:
- raise ValueError('fstype may not be None')
-
- # pick best-of-breed mount options based on fs type
- if options is None:
- options = MOUNT_OPTIONS.get(fstype, '')
-
- # mount
- path = tempfile.mkdtemp(
- prefix='mnt.',
- dir=STATEDIR + '/tmp',
- )
- try:
- LOG.debug('Mounting %s on %s with options %s', dev, path, options)
- command_check_call(
- [
- 'mount',
- '-t', fstype,
- '-o', options,
- '--',
- dev,
- path,
- ],
- )
- if which('restorecon'):
- command(
- [
- 'restorecon',
- path,
- ],
- )
- except subprocess.CalledProcessError as e:
- try:
- os.rmdir(path)
- except (OSError, IOError):
- pass
- raise MountError(e)
-
- return path
-
-
-def unmount(
- path,
-):
- """
- Unmount and removes the given mount point.
- """
- retries = 0
- while True:
- try:
- LOG.debug('Unmounting %s', path)
- command_check_call(
- [
- '/bin/umount',
- '--',
- path,
- ],
- )
- break
- except subprocess.CalledProcessError as e:
- # on failure, retry 3 times with incremental backoff
- if retries == 3:
- raise UnmountError(e)
- else:
- time.sleep(0.5 + retries * 1.0)
- retries += 1
-
- os.rmdir(path)
-
-
-###########################################
-
-def extract_parted_partition_numbers(partitions):
- numbers_as_strings = re.findall('^\d+', partitions, re.MULTILINE)
- return map(int, numbers_as_strings)
-
-
-def get_free_partition_index(dev):
- """
- Get the next free partition index on a given device.
-
- :return: Index number (> 1 if there is already a partition on the device)
- or 1 if there is no partition table.
- """
- try:
- lines = _check_output(
- args=[
- 'parted',
- '--machine',
- '--',
- dev,
- 'print',
- ],
- )
- except subprocess.CalledProcessError as e:
- LOG.info('cannot read partition index; assume it '
- 'isn\'t present\n (Error: %s)' % e)
- return 1
-
- if not lines:
- raise Error('parted failed to output anything')
- LOG.debug('get_free_partition_index: analyzing ' + lines)
- if ('CHS;' not in lines and
- 'CYL;' not in lines and
- 'BYT;' not in lines):
- raise Error('parted output expected to contain one of ' +
- 'CHH; CYL; or BYT; : ' + lines)
- if os.path.realpath(dev) not in lines:
- raise Error('parted output expected to contain ' + dev + ': ' + lines)
- _, partitions = lines.split(os.path.realpath(dev))
- partition_numbers = extract_parted_partition_numbers(partitions)
- if partition_numbers:
- return max(partition_numbers) + 1
- else:
- return 1
-
-
-def check_journal_reqs(args):
- _, _, allows_journal = command([
- 'ceph-osd', '--check-allows-journal',
- '-i', '0',
- '--cluster', args.cluster,
- ])
- _, _, wants_journal = command([
- 'ceph-osd', '--check-wants-journal',
- '-i', '0',
- '--cluster', args.cluster,
- ])
- _, _, needs_journal = command([
- 'ceph-osd', '--check-needs-journal',
- '-i', '0',
- '--cluster', args.cluster,
- ])
- return (not allows_journal, not wants_journal, not needs_journal)
-
-
-def update_partition(dev, description):
- """
- Must be called after modifying a partition table so the kernel
- know about the change and fire udev events accordingly. A side
- effect of partprobe is to remove partitions and add them again.
- The first udevadm settle waits for ongoing udev events to
- complete, just in case one of them rely on an existing partition
- on dev. The second udevadm settle guarantees to the caller that
- all udev events related to the partition table change have been
- processed, i.e. the 95-ceph-osd.rules actions and mode changes,
- group changes etc. are complete.
- """
- LOG.debug('Calling partprobe on %s device %s', description, dev)
- partprobe_ok = False
- error = 'unknown error'
- for i in (1, 2, 3, 4, 5):
- command_check_call(['udevadm', 'settle', '--timeout=600'])
- try:
- _check_output(['partprobe', dev])
- partprobe_ok = True
- break
- except subprocess.CalledProcessError as e:
- error = e.output
- if ('unable to inform the kernel' not in error and
- 'Device or resource busy' not in error):
- raise
- LOG.debug('partprobe %s failed : %s (ignored, waiting 60s)'
- % (dev, error))
- time.sleep(60)
- if not partprobe_ok:
- raise Error('partprobe %s failed : %s' % (dev, error))
- command_check_call(['udevadm', 'settle', '--timeout=600'])
-
-
-def zap(dev):
- """
- Destroy the partition table and content of a given disk.
- """
- dev = os.path.realpath(dev)
- dmode = os.stat(dev).st_mode
- if not stat.S_ISBLK(dmode) or is_partition(dev):
- raise Error('not full block device; cannot zap', dev)
- try:
- LOG.debug('Zapping partition table on %s', dev)
-
- # try to wipe out any GPT partition table backups. sgdisk
- # isn't too thorough.
- lba_size = 4096
- size = 33 * lba_size
- with file(dev, 'wb') as dev_file:
- dev_file.seek(-size, os.SEEK_END)
- dev_file.write(size * '\0')
-
- command_check_call(
- [
- 'sgdisk',
- '--zap-all',
- '--',
- dev,
- ],
- )
- command_check_call(
- [
- 'sgdisk',
- '--clear',
- '--mbrtogpt',
- '--',
- dev,
- ],
- )
-
- update_partition(dev, 'zapped')
-
- except subprocess.CalledProcessError as e:
- raise Error(e)
-
-
-def adjust_symlink(target, path):
- create = True
- if os.path.lexists(path):
- try:
- mode = os.lstat(path).st_mode
- if stat.S_ISREG(mode):
- LOG.debug('Removing old file %s', path)
- os.unlink(path)
- elif stat.S_ISLNK(mode):
- old = os.readlink(path)
- if old != target:
- LOG.debug('Removing old symlink %s -> %s', path, old)
- os.unlink(path)
- else:
- create = False
- except:
- raise Error('unable to remove (or adjust) old file (symlink)',
- path)
- if create:
- LOG.debug('Creating symlink %s -> %s', path, target)
- try:
- os.symlink(target, path)
- except:
- raise Error('unable to create symlink %s -> %s' % (path, target))
-
-
-class Device(object):
-
- def __init__(self, path, args):
- self.args = args
- self.path = path
- self.dev_size = None
- self.partitions = {}
- self.ptype_map = None
- assert not is_partition(self.path)
-
- def create_partition(self, uuid, name, size=0, num=0):
- ptype = self.ptype_tobe_for_name(name)
- if num == 0:
- num = get_free_partition_index(dev=self.path)
- if size > 0:
- new = '--new={num}:0:+{size}M'.format(num=num, size=size)
- if size > self.get_dev_size():
- LOG.error('refusing to create %s on %s' % (name, self.path))
- LOG.error('%s size (%sM) is bigger than device (%sM)'
- % (name, size, self.get_dev_size()))
- raise Error('%s device size (%sM) is not big enough for %s'
- % (self.path, self.get_dev_size(), name))
- else:
- new = '--largest-new={num}'.format(num=num)
-
- LOG.debug('Creating %s partition num %d size %d on %s',
- name, num, size, self.path)
- command_check_call(
- [
- 'sgdisk',
- new,
- '--change-name={num}:ceph {name}'.format(num=num, name=name),
- '--partition-guid={num}:{uuid}'.format(num=num, uuid=uuid),
- '--typecode={num}:{uuid}'.format(num=num, uuid=ptype),
- '--mbrtogpt',
- '--',
- self.path,
- ]
- )
- update_partition(self.path, 'created')
- return num
-
- def ptype_tobe_for_name(self, name):
- if name == 'data':
- name = 'osd'
- if self.ptype_map is None:
- partition = DevicePartition.factory(
- path=self.path, dev=None, args=self.args)
- self.ptype_map = partition.ptype_map
- return self.ptype_map[name]['tobe']
-
- def get_partition(self, num):
- if num not in self.partitions:
- dev = get_partition_dev(self.path, num)
- partition = DevicePartition.factory(
- path=self.path, dev=dev, args=self.args)
- partition.set_partition_number(num)
- self.partitions[num] = partition
- return self.partitions[num]
-
- def get_dev_size(self):
- if self.dev_size is None:
- self.dev_size = get_dev_size(self.path)
- return self.dev_size
-
- @staticmethod
- def factory(path, args):
- return Device(path, args)
-
-
-class DevicePartition(object):
-
- def __init__(self, args):
- self.args = args
- self.num = None
- self.rawdev = None
- self.dev = None
- self.uuid = None
- self.ptype_map = None
- self.ptype = None
- self.set_variables_ptype()
-
- def get_uuid(self):
- if self.uuid is None:
- self.uuid = get_partition_uuid(self.rawdev)
- return self.uuid
-
- def get_ptype(self):
- if self.ptype is None:
- self.ptype = get_partition_type(self.rawdev)
- return self.ptype
-
- def set_partition_number(self, num):
- self.num = num
-
- def get_partition_number(self):
- return self.num
-
- def set_dev(self, dev):
- self.dev = dev
- self.rawdev = dev
-
- def get_dev(self):
- return self.dev
-
- def get_rawdev(self):
- return self.rawdev
-
- def set_variables_ptype(self):
- self.ptype_map = PTYPE['regular']
-
- def ptype_for_name(self, name):
- return self.ptype_map[name]['ready']
-
- @staticmethod
- def factory(path, dev, args):
- dmcrypt_type = CryptHelpers.get_dmcrypt_type(args)
- if ((path is not None and is_mpath(path)) or
- (dev is not None and is_mpath(dev))):
- partition = DevicePartitionMultipath(args)
- elif dmcrypt_type == 'luks':
- partition = DevicePartitionCryptLuks(args)
- elif dmcrypt_type == 'plain':
- partition = DevicePartitionCryptPlain(args)
- else:
- partition = DevicePartition(args)
- partition.set_dev(dev)
- return partition
-
-
-class DevicePartitionMultipath(DevicePartition):
-
- def set_variables_ptype(self):
- self.ptype_map = PTYPE['mpath']
-
-
-class DevicePartitionCrypt(DevicePartition):
-
- def __init__(self, args):
- super(DevicePartitionCrypt, self).__init__(args)
- self.osd_dm_keypath = None
- self.cryptsetup_parameters = CryptHelpers.get_cryptsetup_parameters(
- self.args)
- self.dmcrypt_type = CryptHelpers.get_dmcrypt_type(self.args)
- self.dmcrypt_keysize = CryptHelpers.get_dmcrypt_keysize(self.args)
-
- def setup_crypt(self):
- pass
-
- def map(self):
- self.setup_crypt()
- self.dev = _dmcrypt_map(
- rawdev=self.rawdev,
- keypath=self.osd_dm_keypath,
- _uuid=self.get_uuid(),
- cryptsetup_parameters=self.cryptsetup_parameters,
- luks=self.luks(),
- format_dev=True,
- )
-
- def unmap(self):
- self.setup_crypt()
- dmcrypt_unmap(self.get_uuid())
- self.dev = self.rawdev
-
- def format(self):
- self.setup_crypt()
- self.map()
- self.unmap()
-
-
-class DevicePartitionCryptPlain(DevicePartitionCrypt):
-
- def luks(self):
- return False
-
- def setup_crypt(self):
- if self.osd_dm_keypath is not None:
- return
-
- self.cryptsetup_parameters += ['--key-size', str(self.dmcrypt_keysize)]
-
- self.osd_dm_keypath = get_or_create_dmcrypt_key(
- self.get_uuid(), self.args.dmcrypt_key_dir,
- self.dmcrypt_keysize, False)
-
- def set_variables_ptype(self):
- self.ptype_map = PTYPE['plain']
-
-
-class DevicePartitionCryptLuks(DevicePartitionCrypt):
-
- def luks(self):
- return True
-
- def setup_crypt(self):
- if self.osd_dm_keypath is not None:
- return
-
- if self.dmcrypt_keysize == 1024:
- # We don't force this into the cryptsetup_parameters,
- # as we want the cryptsetup defaults
- # to prevail for the actual LUKS key lengths.
- pass
- else:
- self.cryptsetup_parameters += ['--key-size',
- str(self.dmcrypt_keysize)]
-
- self.osd_dm_keypath = get_or_create_dmcrypt_key(
- self.get_uuid(), self.args.dmcrypt_key_dir,
- self.dmcrypt_keysize, True)
-
- def set_variables_ptype(self):
- self.ptype_map = PTYPE['luks']
-
-
-class Prepare(object):
-
- @staticmethod
- def parser():
- parser = argparse.ArgumentParser(add_help=False)
- parser.add_argument(
- '--cluster',
- metavar='NAME',
- default='ceph',
- help='cluster name to assign this disk to',
- )
- parser.add_argument(
- '--cluster-uuid',
- metavar='UUID',
- help='cluster uuid to assign this disk to',
- )
- parser.add_argument(
- '--osd-uuid',
- metavar='UUID',
- help='unique OSD uuid to assign this disk to',
- )
- parser.add_argument(
- '--dmcrypt',
- action='store_true', default=None,
- help='encrypt DATA and/or JOURNAL devices with dm-crypt',
- )
- parser.add_argument(
- '--dmcrypt-key-dir',
- metavar='KEYDIR',
- default='/etc/ceph/dmcrypt-keys',
- help='directory where dm-crypt keys are stored',
- )
- return parser
-
- @staticmethod
- def set_subparser(subparsers):
- parents = [
- Prepare.parser(),
- PrepareData.parser(),
- ]
- parents.extend(PrepareFilestore.parent_parsers())
- parents.extend(PrepareBluestore.parent_parsers())
- parser = subparsers.add_parser(
- 'prepare',
- parents=parents,
- help='Prepare a directory or disk for a Ceph OSD',
- )
- parser.set_defaults(
- func=Prepare.main,
- )
- return parser
-
- def prepare(self):
- prepare_lock.acquire()
- self.prepare_locked()
- prepare_lock.release()
-
- @staticmethod
- def factory(args):
- if args.bluestore:
- return PrepareBluestore(args)
- else:
- return PrepareFilestore(args)
-
- @staticmethod
- def main(args):
- Prepare.factory(args).prepare()
-
-
-class PrepareFilestore(Prepare):
-
- def __init__(self, args):
- self.data = PrepareFilestoreData(args)
- self.journal = PrepareJournal(args)
-
- @staticmethod
- def parent_parsers():
- return [
- PrepareJournal.parser(),
- ]
-
- def prepare_locked(self):
- self.data.prepare(self.journal)
-
-
-class PrepareBluestore(Prepare):
-
- def __init__(self, args):
- self.data = PrepareBluestoreData(args)
- self.block = PrepareBluestoreBlock(args)
-
- @staticmethod
- def parser():
- parser = argparse.ArgumentParser(add_help=False)
- parser.add_argument(
- '--bluestore',
- action='store_true', default=None,
- help='bluestore objectstore',
- )
- parser.add_argument(
- '--filestore',
- action='store_true', default=True,
- help='IGNORED FORWARD COMPATIBIILTY HACK',
- )
- return parser
-
- @staticmethod
- def parent_parsers():
- return [
- PrepareBluestore.parser(),
- PrepareBluestoreBlock.parser(),
- ]
-
- def prepare_locked(self):
- self.data.prepare(self.block)
-
-
-class Space(object):
-
- NAMES = ('block', 'journal')
-
-
-class PrepareSpace(object):
-
- NONE = 0
- FILE = 1
- DEVICE = 2
-
- def __init__(self, args):
- self.args = args
- self.set_type()
- self.space_size = self.get_space_size()
- if (getattr(self.args, self.name) and
- getattr(self.args, self.name + '_uuid') is None):
- setattr(self.args, self.name + '_uuid', str(uuid.uuid4()))
- self.space_symlink = None
- self.space_dmcrypt = None
-
- def set_type(self):
- name = self.name
- args = self.args
- dmode = os.stat(args.data).st_mode
- if (self.wants_space() and
- stat.S_ISBLK(dmode) and
- not is_partition(args.data) and
- getattr(args, name) is None and
- getattr(args, name + '_file') is None):
- LOG.info('Will colocate %s with data on %s',
- name, args.data)
- setattr(args, name, args.data)
-
- if getattr(args, name) is None:
- if getattr(args, name + '_dev'):
- raise Error('%s is unspecified; not a block device' %
- name.capitalize(), getattr(args, name))
- self.type = self.NONE
- return
-
- if not os.path.exists(getattr(args, name)):
- if getattr(args, name + '_dev'):
- raise Error('%s does not exist; not a block device' %
- name.capitalize(), getattr(args, name))
- self.type = self.FILE
- return
-
- mode = os.stat(getattr(args, name)).st_mode
- if stat.S_ISBLK(mode):
- if getattr(args, name + '_file'):
- raise Error('%s is not a regular file' % name.capitalize,
- geattr(args, name))
- self.type = self.DEVICE
- return
-
- if stat.S_ISREG(mode):
- if getattr(args, name + '_dev'):
- raise Error('%s is not a block device' % name.capitalize,
- geattr(args, name))
- self.type = self.FILE
-
- raise Error('%s %s is neither a block device nor regular file' %
- (name.capitalize, geattr(args, name)))
-
- def is_none(self):
- return self.type == self.NONE
-
- def is_file(self):
- return self.type == self.FILE
-
- def is_device(self):
- return self.type == self.DEVICE
-
- @staticmethod
- def parser(name):
- parser = argparse.ArgumentParser(add_help=False)
- parser.add_argument(
- '--%s-uuid' % name,
- metavar='UUID',
- help='unique uuid to assign to the %s' % name,
- )
- parser.add_argument(
- '--%s-file' % name,
- action='store_true', default=None,
- help='verify that %s is a file' % name.upper(),
- )
- parser.add_argument(
- '--%s-dev' % name,
- action='store_true', default=None,
- help='verify that %s is a block device' % name.upper(),
- )
- parser.add_argument(
- name,
- metavar=name.upper(),
- nargs='?',
- help=('path to OSD %s disk block device;' % name +
- ' leave out to store %s in file' % name),
- )
- return parser
-
- def wants_space(self):
- return True
-
- def populate_data_path(self, path):
- if self.type == self.DEVICE:
- self.populate_data_path_device(path)
- elif self.type == self.FILE:
- self.populate_data_path_file(path)
- elif self.type == self.NONE:
- pass
- else:
- raise Error('unexpected type ', self.type)
-
- def populate_data_path_file(self, path):
- space_uuid = self.name + '_uuid'
- if getattr(self.args, space_uuid) is not None:
- write_one_line(path, space_uuid,
- getattr(self.args, space_uuid))
-
- def populate_data_path_device(self, path):
- self.populate_data_path_file(path)
- if self.space_symlink is not None:
- adjust_symlink(self.space_symlink,
- os.path.join(path, self.name))
-
- if self.space_dmcrypt is not None:
- adjust_symlink(self.space_dmcrypt,
- os.path.join(path, self.name + '_dmcrypt'))
- else:
- try:
- os.unlink(os.path.join(path, self.name + '_dmcrypt'))
- except OSError:
- pass
-
- def prepare(self):
- if self.type == self.DEVICE:
- self.prepare_device()
- elif self.type == self.FILE:
- self.prepare_file()
- elif self.type == self.NONE:
- pass
- else:
- raise Error('unexpected type ', self.type)
-
- def prepare_file(self):
- if not os.path.exists(getattr(self.args, self.name)):
- LOG.debug('Creating %s file %s with size 0'
- ' (ceph-osd will resize and allocate)',
- self.name,
- getattr(self.args, self.name))
- with file(getattr(self.args, self.name), 'wb') as space_file:
- pass
-
- LOG.debug('%s is file %s',
- self.name.capitalize(),
- getattr(self.args, self.name))
- LOG.warning('OSD will not be hot-swappable if %s is '
- 'not the same device as the osd data' %
- self.name)
- self.space_symlink = space_file
-
- def prepare_device(self):
- reusing_partition = False
-
- if is_partition(getattr(self.args, self.name)):
- LOG.debug('%s %s is a partition',
- self.name.capitalize(), getattr(self.args, self.name))
- partition = DevicePartition.factory(
- path=None, dev=getattr(self.args, self.name), args=self.args)
- if isinstance(partition, DevicePartitionCrypt):
- raise Error(getattr(self.args, self.name) +
- ' partition already exists'
- ' and --dmcrypt specified')
- LOG.warning('OSD will not be hot-swappable' +
- ' if ' + self.name + ' is not' +
- ' the same device as the osd data')
- if partition.get_ptype() == partition.ptype_for_name(self.name):
- LOG.debug('%s %s was previously prepared with '
- 'ceph-disk. Reusing it.',
- self.name.capitalize(),
- getattr(self.args, self.name))
- reusing_partition = True
- # Read and reuse the partition uuid from this journal's
- # previous life. We reuse the uuid instead of changing it
- # because udev does not reliably notice changes to an
- # existing partition's GUID. See
- # http://tracker.ceph.com/issues/10146
- setattr(self.args, self.name + '_uuid', partition.get_uuid())
- LOG.debug('Reusing %s with uuid %s',
- self.name,
- getattr(self.args, self.name + '_uuid'))
- else:
- LOG.warning('%s %s was not prepared with '
- 'ceph-disk. Symlinking directly.',
- self.name.capitalize(),
- getattr(self.args, self.name))
- self.space_symlink = getattr(self.args, self.name)
- return
-
- self.space_symlink = '/dev/disk/by-partuuid/{uuid}'.format(
- uuid=getattr(self.args, self.name + '_uuid'))
-
- if self.args.dmcrypt:
- self.space_dmcrypt = self.space_symlink
- self.space_symlink = '/dev/mapper/{uuid}'.format(
- uuid=getattr(self.args, self.name + '_uuid'))
-
- if reusing_partition:
- # confirm that the space_symlink exists. It should since
- # this was an active space
- # in the past. Continuing otherwise would be futile.
- assert os.path.exists(self.space_symlink)
- return
-
- num = self.desired_partition_number()
-
- if num == 0:
- LOG.warning('OSD will not be hot-swappable if %s '
- 'is not the same device as the osd data',
- self.name)
-
- device = Device.factory(getattr(self.args, self.name), self.args)
- num = device.create_partition(
- uuid=getattr(self.args, self.name + '_uuid'),
- name=self.name,
- size=self.space_size,
- num=num)
-
- partition = device.get_partition(num)
-
- LOG.debug('%s is GPT partition %s',
- self.name.capitalize(),
- self.space_symlink)
-
- if isinstance(partition, DevicePartitionCrypt):
- partition.format()
-
- command_check_call(
- [
- 'sgdisk',
- '--typecode={num}:{uuid}'.format(
- num=num,
- uuid=partition.ptype_for_name(self.name),
- ),
- '--',
- getattr(self.args, self.name),
- ],
- )
-
- LOG.debug('%s is GPT partition %s',
- self.name.capitalize(),
- self.space_symlink)
-
-
-class PrepareJournal(PrepareSpace):
-
- def __init__(self, args):
- self.name = 'journal'
- (self.allows_journal,
- self.wants_journal,
- self.needs_journal) = check_journal_reqs(args)
-
- if args.journal and not self.allows_journal:
- raise Error('journal specified but not allowed by osd backend')
-
- super(PrepareJournal, self).__init__(args)
-
- def wants_space(self):
- return self.wants_journal
-
- def get_space_size(self):
- return int(get_conf_with_default(
- cluster=self.args.cluster,
- variable='osd_journal_size',
- ))
-
- def desired_partition_number(self):
- if self.args.journal == self.args.data:
- # we're sharing the disk between osd data and journal;
- # make journal be partition number 2
- num = 2
- else:
- num = 0
- return num
-
- @staticmethod
- def parser():
- return PrepareSpace.parser('journal')
-
-
-class PrepareBluestoreBlock(PrepareSpace):
-
- def __init__(self, args):
- self.name = 'block'
- super(PrepareBluestoreBlock, self).__init__(args)
-
- def get_space_size(self):
- return 0 # get as much space as possible
-
- def desired_partition_number(self):
- if self.args.block == self.args.data:
- num = 2
- else:
- num = 0
- return num
-
- @staticmethod
- def parser():
- return PrepareSpace.parser('block')
-
-
-class CryptHelpers(object):
-
- @staticmethod
- def get_cryptsetup_parameters(args):
- cryptsetup_parameters_str = get_conf(
- cluster=args.cluster,
- variable='osd_cryptsetup_parameters',
- )
- if cryptsetup_parameters_str is None:
- return []
- else:
- return shlex.split(cryptsetup_parameters_str)
-
- @staticmethod
- def get_dmcrypt_keysize(args):
- dmcrypt_keysize_str = get_conf(
- cluster=args.cluster,
- variable='osd_dmcrypt_key_size',
- )
- dmcrypt_type = CryptHelpers.get_dmcrypt_type(args)
- if dmcrypt_type == 'luks':
- if dmcrypt_keysize_str is None:
- # As LUKS will hash the 'passphrase' in .luks.key
- # into a key, set a large default
- # so if not updated for some time, it is still a
- # reasonable value.
- #
- return 1024
- else:
- return int(dmcrypt_keysize_str)
- elif dmcrypt_type == 'plain':
- if dmcrypt_keysize_str is None:
- # This value is hard-coded in the udev script
- return 256
- else:
- LOG.warning('ensure the 95-ceph-osd.rules file has '
- 'been copied to /etc/udev/rules.d '
- 'and modified to call cryptsetup '
- 'with --key-size=%s' % dmcrypt_keysize_str)
- return int(dmcrypt_keysize_str)
- else:
- return 0
-
- @staticmethod
- def get_dmcrypt_type(args):
- if args.dmcrypt:
- dmcrypt_type = get_conf(
- cluster=args.cluster,
- variable='osd_dmcrypt_type',
- )
-
- if dmcrypt_type is None or dmcrypt_type == 'luks':
- return 'luks'
- elif dmcrypt_type == 'plain':
- return 'plain'
- else:
- raise Error('invalid osd_dmcrypt_type parameter '
- '(must be luks or plain): ', dmcrypt_type)
- else:
- return None
-
-
-class PrepareData(object):
-
- FILE = 1
- DEVICE = 2
-
- def __init__(self, args):
-
- self.args = args
- self.partition = None
- self.set_type()
- if self.args.cluster_uuid is None:
- self.args.cluster_uuid = get_fsid(cluster=self.args.cluster)
-
- if self.args.osd_uuid is None:
- self.args.osd_uuid = str(uuid.uuid4())
-
- def set_type(self):
- dmode = os.stat(self.args.data).st_mode
-
- if stat.S_ISDIR(dmode):
- self.type = self.FILE
- elif stat.S_ISBLK(dmode):
- self.type = self.DEVICE
- else:
- raise Error('not a dir or block device', args.data)
-
- def is_file(self):
- return self.type == self.FILE
-
- def is_device(self):
- return self.type == self.DEVICE
-
- @staticmethod
- def parser():
- parser = argparse.ArgumentParser(add_help=False)
- parser.add_argument(
- '--fs-type',
- help='file system type to use (e.g. "ext4")',
- )
- parser.add_argument(
- '--zap-disk',
- action='store_true', default=None,
- help='destroy the partition table (and content) of a disk',
- )
- parser.add_argument(
- '--data-dir',
- action='store_true', default=None,
- help='verify that DATA is a dir',
- )
- parser.add_argument(
- '--data-dev',
- action='store_true', default=None,
- help='verify that DATA is a block device',
- )
- parser.add_argument(
- 'data',
- metavar='DATA',
- help='path to OSD data (a disk block device or directory)',
- )
- return parser
-
- def populate_data_path_file(self, path, *to_prepare_list):
- self.populate_data_path(path, *to_prepare_list)
-
- def populate_data_path(self, path, *to_prepare_list):
- if os.path.exists(os.path.join(path, 'magic')):
- LOG.debug('Data dir %s already exists', path)
- return
- else:
- LOG.debug('Preparing osd data dir %s', path)
-
- if self.args.osd_uuid is None:
- self.args.osd_uuid = str(uuid.uuid4())
-
- write_one_line(path, 'ceph_fsid', self.args.cluster_uuid)
- write_one_line(path, 'fsid', self.args.osd_uuid)
- write_one_line(path, 'magic', CEPH_OSD_ONDISK_MAGIC)
-
- for to_prepare in to_prepare_list:
- to_prepare.populate_data_path(path)
-
- def prepare(self, *to_prepare_list):
- if self.type == self.DEVICE:
- self.prepare_device(*to_prepare_list)
- elif self.type == self.FILE:
- self.prepare_file(*to_prepare_list)
- else:
- raise Error('unexpected type ', self.type)
-
- def prepare_file(self, *to_prepare_list):
-
- if not os.path.exists(self.args.data):
- raise Error('data path for directory does not exist',
- self.args.data)
-
- if self.args.data_dev:
- raise Error('data path is not a block device', self.args.data)
-
- for to_prepare in to_prepare_list:
- to_prepare.prepare()
-
- self.populate_data_path_file(self.args.data, *to_prepare_list)
-
- def sanity_checks(self):
- if not os.path.exists(self.args.data):
- raise Error('data path for device does not exist',
- self.args.data)
- verify_not_in_use(self.args.data, True)
-
- def set_variables(self):
- if self.args.fs_type is None:
- self.args.fs_type = get_conf(
- cluster=self.args.cluster,
- variable='osd_mkfs_type',
- )
- if self.args.fs_type is None:
- self.args.fs_type = get_conf(
- cluster=self.args.cluster,
- variable='osd_fs_type',
- )
- if self.args.fs_type is None:
- self.args.fs_type = DEFAULT_FS_TYPE
-
- self.mkfs_args = get_conf(
- cluster=self.args.cluster,
- variable='osd_mkfs_options_{fstype}'.format(
- fstype=self.args.fs_type,
- ),
- )
- if self.mkfs_args is None:
- self.mkfs_args = get_conf(
- cluster=self.args.cluster,
- variable='osd_fs_mkfs_options_{fstype}'.format(
- fstype=self.args.fs_type,
- ),
- )
-
- self.mount_options = get_conf(
- cluster=self.args.cluster,
- variable='osd_mount_options_{fstype}'.format(
- fstype=self.args.fs_type,
- ),
- )
- if self.mount_options is None:
- self.mount_options = get_conf(
- cluster=self.args.cluster,
- variable='osd_fs_mount_options_{fstype}'.format(
- fstype=self.args.fs_type,
- ),
- )
- else:
- # remove whitespaces
- self.mount_options = "".join(self.mount_options.split())
-
- if self.args.osd_uuid is None:
- self.args.osd_uuid = str(uuid.uuid4())
-
- def prepare_device(self, *to_prepare_list):
- self.sanity_checks()
- self.set_variables()
- if self.args.zap_disk is not None:
- zap(self.args.data)
-
- def create_data_partition(self):
- device = Device.factory(self.args.data, self.args)
- partition_number = 1
- device.create_partition(uuid=self.args.osd_uuid,
- name='data',
- num=partition_number,
- size=self.get_space_size())
- return device.get_partition(partition_number)
-
- def set_data_partition(self):
- if is_partition(self.args.data):
- LOG.debug('OSD data device %s is a partition',
- self.args.data)
- self.partition = DevicePartition.factory(
- path=None, dev=self.args.data, args=self.args)
- ptype = partition.get_ptype()
- if ptype != ptype_osd:
- LOG.warning('incorrect partition UUID: %s, expected %s'
- % (ptype, ptype_osd))
- else:
- LOG.debug('Creating osd partition on %s',
- self.args.data)
- self.partition = self.create_data_partition()
-
- def populate_data_path_device(self, *to_prepare_list):
- partition = self.partition
-
- if isinstance(partition, DevicePartitionCrypt):
- partition.map()
-
- try:
- args = [
- 'mkfs',
- '-t',
- self.args.fs_type,
- ]
- if self.mkfs_args is not None:
- args.extend(self.mkfs_args.split())
- if self.args.fs_type == 'xfs':
- args.extend(['-f']) # always force
- else:
- args.extend(MKFS_ARGS.get(self.args.fs_type, []))
- args.extend([
- '--',
- partition.get_dev(),
- ])
- try:
- LOG.debug('Creating %s fs on %s',
- self.args.fs_type, partition.get_dev())
- command_check_call(args)
- except subprocess.CalledProcessError as e:
- raise Error(e)
-
- path = mount(dev=partition.get_dev(),
- fstype=self.args.fs_type,
- options=self.mount_options)
-
- try:
- self.populate_data_path(path, *to_prepare_list)
- finally:
- path_set_context(path)
- unmount(path)
- finally:
- if isinstance(partition, DevicePartitionCrypt):
- partition.unmap()
-
- if not is_partition(self.args.data):
- try:
- command_check_call(
- [
- 'sgdisk',
- '--typecode=%d:%s' % (partition.get_partition_number(),
- partition.ptype_for_name('osd')),
- '--',
- self.args.data,
- ],
- )
- except subprocess.CalledProcessError as e:
- raise Error(e)
- update_partition(self.args.data, 'prepared')
- command_check_call(['udevadm', 'trigger',
- '--action=add',
- '--sysname-match',
- os.path.basename(partition.rawdev)])
-
-
-class PrepareFilestoreData(PrepareData):
-
- def get_space_size(self):
- return 0 # get as much space as possible
-
- def prepare_device(self, *to_prepare_list):
- super(PrepareFilestoreData, self).prepare_device(*to_prepare_list)
- for to_prepare in to_prepare_list:
- to_prepare.prepare()
- self.set_data_partition()
- self.populate_data_path_device(*to_prepare_list)
-
- def populate_data_path(self, path, *to_prepare_list):
- super(PrepareFilestoreData, self).populate_data_path(path,
- *to_prepare_list)
- write_one_line(path, 'type', 'filestore')
-
-
-class PrepareBluestoreData(PrepareData):
-
- def get_space_size(self):
- return 100 # MB
-
- def prepare_device(self, *to_prepare_list):
- super(PrepareBluestoreData, self).prepare_device(*to_prepare_list)
- self.set_data_partition()
- for to_prepare in to_prepare_list:
- to_prepare.prepare()
- self.populate_data_path_device(*to_prepare_list)
-
- def populate_data_path(self, path, *to_prepare_list):
- super(PrepareBluestoreData, self).populate_data_path(path,
- *to_prepare_list)
- write_one_line(path, 'type', 'bluestore')
-
-
-def mkfs(
- path,
- cluster,
- osd_id,
- fsid,
- keyring,
-):
- monmap = os.path.join(path, 'activate.monmap')
- command_check_call(
- [
- 'ceph',
- '--cluster', cluster,
- '--name', 'client.bootstrap-osd',
- '--keyring', keyring,
- 'mon', 'getmap', '-o', monmap,
- ],
- )
-
- osd_type = read_one_line(path, 'type')
-
- if osd_type == 'bluestore':
- command_check_call(
- [
- 'ceph-osd',
- '--cluster', cluster,
- '--mkfs',
- '--mkkey',
- '-i', osd_id,
- '--monmap', monmap,
- '--osd-data', path,
- '--osd-uuid', fsid,
- '--keyring', os.path.join(path, 'keyring'),
- '--setuser', get_ceph_user(),
- '--setgroup', get_ceph_user(),
- ],
- )
- else:
- command_check_call(
- [
- 'ceph-osd',
- '--cluster', cluster,
- '--mkfs',
- '--mkkey',
- '-i', osd_id,
- '--monmap', monmap,
- '--osd-data', path,
- '--osd-journal', os.path.join(path, 'journal'),
- '--osd-uuid', fsid,
- '--keyring', os.path.join(path, 'keyring'),
- '--setuser', get_ceph_user(),
- '--setgroup', get_ceph_group(),
- ],
- )
-
-
-def auth_key(
- path,
- cluster,
- osd_id,
- keyring,
-):
- try:
- # try dumpling+ cap scheme
- command_check_call(
- [
- 'ceph',
- '--cluster', cluster,
- '--name', 'client.bootstrap-osd',
- '--keyring', keyring,
- 'auth', 'add', 'osd.{osd_id}'.format(osd_id=osd_id),
- '-i', os.path.join(path, 'keyring'),
- 'osd', 'allow *',
- 'mon', 'allow profile osd',
- ],
- )
- except subprocess.CalledProcessError as err:
- if err.returncode == errno.EINVAL:
- # try old cap scheme
- command_check_call(
- [
- 'ceph',
- '--cluster', cluster,
- '--name', 'client.bootstrap-osd',
- '--keyring', keyring,
- 'auth', 'add', 'osd.{osd_id}'.format(osd_id=osd_id),
- '-i', os.path.join(path, 'keyring'),
- 'osd', 'allow *',
- 'mon', 'allow rwx',
- ],
- )
- else:
- raise
-
-
-def get_mount_point(cluster, osd_id):
- parent = STATEDIR + '/osd'
- return os.path.join(
- parent,
- '{cluster}-{osd_id}'.format(cluster=cluster, osd_id=osd_id),
- )
-
-
-def move_mount(
- dev,
- path,
- cluster,
- osd_id,
- fstype,
- mount_options,
-):
- LOG.debug('Moving mount to final location...')
- osd_data = get_mount_point(cluster, osd_id)
- maybe_mkdir(osd_data)
-
- # pick best-of-breed mount options based on fs type
- if mount_options is None:
- mount_options = MOUNT_OPTIONS.get(fstype, '')
-
- # we really want to mount --move, but that is not supported when
- # the parent mount is shared, as it is by default on RH, Fedora,
- # and probably others. Also, --bind doesn't properly manipulate
- # /etc/mtab, which *still* isn't a symlink to /proc/mounts despite
- # this being 2013. Instead, mount the original device at the final
- # location.
- command_check_call(
- [
- '/bin/mount',
- '-o',
- mount_options,
- '--',
- dev,
- osd_data,
- ],
- )
- command_check_call(
- [
- '/bin/umount',
- '-l', # lazy, in case someone else is peeking at the
- # wrong moment
- '--',
- path,
- ],
- )
-
-
-def start_daemon(
- cluster,
- osd_id,
-):
- LOG.debug('Starting %s osd.%s...', cluster, osd_id)
-
- path = (STATEDIR + '/osd/{cluster}-{osd_id}').format(
- cluster=cluster, osd_id=osd_id)
-
- try:
- if os.path.exists(os.path.join(path, 'upstart')):
- command_check_call(
- [
- '/sbin/initctl',
- # use emit, not start, because start would fail if the
- # instance was already running
- 'emit',
- # since the daemon starting doesn't guarantee much about
- # the service being operational anyway, don't bother
- # waiting for it
- '--no-wait',
- '--',
- 'ceph-osd',
- 'cluster={cluster}'.format(cluster=cluster),
- 'id={osd_id}'.format(osd_id=osd_id),
- ],
- )
- elif os.path.exists(os.path.join(path, 'sysvinit')):
- if os.path.exists('/usr/sbin/service'):
- svc = '/usr/sbin/service'
- else:
- svc = '/sbin/service'
- command_check_call(
- [
- svc,
- 'ceph',
- '--cluster',
- '{cluster}'.format(cluster=cluster),
- 'start',
- 'osd.{osd_id}'.format(osd_id=osd_id),
- ],
- )
- elif os.path.exists(os.path.join(path, 'systemd')):
- command_check_call(
- [
- 'systemctl',
- 'enable',
- 'ceph-osd@{osd_id}'.format(osd_id=osd_id),
- ],
- )
- command_check_call(
- [
- 'systemctl',
- 'start',
- 'ceph-osd@{osd_id}'.format(osd_id=osd_id),
- ],
- )
- else:
- raise Error('{cluster} osd.{osd_id} is not tagged '
- 'with an init system'.format(
- cluster=cluster,
- osd_id=osd_id,
- ))
- except subprocess.CalledProcessError as e:
- raise Error('ceph osd start failed', e)
-
-
-def stop_daemon(
- cluster,
- osd_id,
-):
- LOG.debug('Stoping %s osd.%s...', cluster, osd_id)
-
- path = (STATEDIR + '/osd/{cluster}-{osd_id}').format(
- cluster=cluster, osd_id=osd_id)
-
- try:
- if os.path.exists(os.path.join(path, 'upstart')):
- command_check_call(
- [
- '/sbin/initctl',
- 'stop',
- 'ceph-osd',
- 'cluster={cluster}'.format(cluster=cluster),
- 'id={osd_id}'.format(osd_id=osd_id),
- ],
- )
- elif os.path.exists(os.path.join(path, 'sysvinit')):
- svc = which('service')
- command_check_call(
- [
- svc,
- 'ceph',
- '--cluster',
- '{cluster}'.format(cluster=cluster),
- 'stop',
- 'osd.{osd_id}'.format(osd_id=osd_id),
- ],
- )
- elif os.path.exists(os.path.join(path, 'systemd')):
- command_check_call(
- [
- 'systemctl',
- 'disable',
- 'ceph-osd@{osd_id}'.format(osd_id=osd_id),
- ],
- )
- command_check_call(
- [
- 'systemctl',
- 'stop',
- 'ceph-osd@{osd_id}'.format(osd_id=osd_id),
- ],
- )
- else:
- raise Error('{cluster} osd.{osd_id} is not tagged with an init '
- ' system'.format(cluster=cluster, osd_id=osd_id))
- except subprocess.CalledProcessError as e:
- raise Error('ceph osd stop failed', e)
-
-
-def detect_fstype(
- dev,
-):
- fstype = _check_output(
- args=[
- '/sbin/blkid',
- # we don't want stale cached results
- '-p',
- '-s', 'TYPE',
- '-o', 'value',
- '--',
- dev,
- ],
- )
- fstype = must_be_one_line(fstype)
- return fstype
-
-
-def dmcrypt_map(dev, dmcrypt_key_dir):
- ptype = get_partition_type(dev)
- if ptype in Ptype.get_ready_by_type('plain'):
- luks = False
- cryptsetup_parameters = ['--key-size', '256']
- elif ptype in Ptype.get_ready_by_type('luks'):
- luks = True
- cryptsetup_parameters = []
- else:
- raise Error('--dmcrypt called for dev %s with invalid ptype %s'
- % (dev, ptype))
- part_uuid = get_partition_uuid(dev)
- dmcrypt_key_path = get_dmcrypt_key_path(part_uuid, dmcrypt_key_dir, luks)
- return _dmcrypt_map(
- rawdev=dev,
- keypath=dmcrypt_key_path,
- _uuid=part_uuid,
- cryptsetup_parameters=cryptsetup_parameters,
- luks=luks,
- format_dev=False,
- )
-
-
-def mount_activate(
- dev,
- activate_key_template,
- init,
- dmcrypt,
- dmcrypt_key_dir,
- reactivate=False,
-):
-
- if dmcrypt:
- part_uuid = get_partition_uuid(dev)
- dev = dmcrypt_map(dev, dmcrypt_key_dir)
- try:
- fstype = detect_fstype(dev=dev)
- except (subprocess.CalledProcessError,
- TruncatedLineError,
- TooManyLinesError) as e:
- raise FilesystemTypeError(
- 'device {dev}'.format(dev=dev),
- e,
- )
-
- # TODO always using mount options from cluster=ceph for
- # now; see http://tracker.newdream.net/issues/3253
- mount_options = get_conf(
- cluster='ceph',
- variable='osd_mount_options_{fstype}'.format(
- fstype=fstype,
- ),
- )
-
- if mount_options is None:
- mount_options = get_conf(
- cluster='ceph',
- variable='osd_fs_mount_options_{fstype}'.format(
- fstype=fstype,
- ),
- )
-
- # remove whitespaces from mount_options
- if mount_options is not None:
- mount_options = "".join(mount_options.split())
-
- path = mount(dev=dev, fstype=fstype, options=mount_options)
-
- # check if the disk is deactive, change the journal owner, group
- # mode for correct user and group.
- if os.path.exists(os.path.join(path, 'deactive')):
- # logging to syslog will help us easy to know udev triggered failure
- if not reactivate:
- unmount(path)
- # we need to unmap again because dmcrypt map will create again
- # on bootup stage (due to deactivate)
- if '/dev/mapper/' in dev:
- part_uuid = dev.replace('/dev/mapper/', '')
- dmcrypt_unmap(part_uuid)
- LOG.info('OSD deactivated! reactivate with: --reactivate')
- raise Error('OSD deactivated! reactivate with: --reactivate')
- # flag to activate a deactive osd.
- deactive = True
- else:
- deactive = False
-
- osd_id = None
- cluster = None
- try:
- (osd_id, cluster) = activate(path, activate_key_template, init)
-
- # Now active successfully
- # If we got reactivate and deactive, remove the deactive file
- if deactive and reactivate:
- os.remove(os.path.join(path, 'deactive'))
- LOG.info('Remove `deactive` file.')
-
- # check if the disk is already active, or if something else is already
- # mounted there
- active = False
- other = False
- src_dev = os.stat(path).st_dev
- try:
- dst_dev = os.stat((STATEDIR + '/osd/{cluster}-{osd_id}').format(
- cluster=cluster,
- osd_id=osd_id)).st_dev
- if src_dev == dst_dev:
- active = True
- else:
- parent_dev = os.stat(STATEDIR + '/osd').st_dev
- if dst_dev != parent_dev:
- other = True
- elif os.listdir(get_mount_point(cluster, osd_id)):
- LOG.info(get_mount_point(cluster, osd_id) +
- " is not empty, won't override")
- other = True
-
- except OSError:
- pass
-
- if active:
- LOG.info('%s osd.%s already mounted in position; unmounting ours.'
- % (cluster, osd_id))
- unmount(path)
- elif other:
- raise Error('another %s osd.%s already mounted in position '
- '(old/different cluster instance?); unmounting ours.'
- % (cluster, osd_id))
- else:
- move_mount(
- dev=dev,
- path=path,
- cluster=cluster,
- osd_id=osd_id,
- fstype=fstype,
- mount_options=mount_options,
- )
- return (cluster, osd_id)
-
- except:
- LOG.error('Failed to activate')
- unmount(path)
- raise
- finally:
- # remove our temp dir
- if os.path.exists(path):
- os.rmdir(path)
-
-
-def activate_dir(
- path,
- activate_key_template,
- init,
-):
-
- if not os.path.exists(path):
- raise Error(
- 'directory %s does not exist' % path
- )
-
- (osd_id, cluster) = activate(path, activate_key_template, init)
-
- if init not in (None, 'none'):
- canonical = (STATEDIR + '/osd/{cluster}-{osd_id}').format(
- cluster=cluster,
- osd_id=osd_id)
- if path != canonical:
- # symlink it from the proper location
- create = True
- if os.path.lexists(canonical):
- old = os.readlink(canonical)
- if old != path:
- LOG.debug('Removing old symlink %s -> %s', canonical, old)
- try:
- os.unlink(canonical)
- except:
- raise Error('unable to remove old symlink', canonical)
- else:
- create = False
- if create:
- LOG.debug('Creating symlink %s -> %s', canonical, path)
- try:
- os.symlink(path, canonical)
- except:
- raise Error('unable to create symlink %s -> %s'
- % (canonical, path))
-
- return (cluster, osd_id)
-
-
-def find_cluster_by_uuid(_uuid):
- """
- Find a cluster name by searching /etc/ceph/*.conf for a conf file
- with the right uuid.
- """
- _uuid = _uuid.lower()
- no_fsid = []
- if not os.path.exists(SYSCONFDIR):
- return None
- for conf_file in os.listdir(SYSCONFDIR):
- if not conf_file.endswith('.conf'):
- continue
- cluster = conf_file[:-5]
- try:
- fsid = get_fsid(cluster)
- except Error as e:
- if e.message != 'getting cluster uuid from configuration failed':
- raise e
- no_fsid.append(cluster)
- else:
- if fsid == _uuid:
- return cluster
- # be tolerant of /etc/ceph/ceph.conf without an fsid defined.
- if len(no_fsid) == 1 and no_fsid[0] == 'ceph':
- LOG.warning('No fsid defined in ' + SYSCONFDIR +
- '/ceph.conf; using anyway')
- return 'ceph'
- return None
-
-
-def activate(
- path,
- activate_key_template,
- init,
-):
-
- check_osd_magic(path)
-
- ceph_fsid = read_one_line(path, 'ceph_fsid')
- if ceph_fsid is None:
- raise Error('No cluster uuid assigned.')
- LOG.debug('Cluster uuid is %s', ceph_fsid)
-
- cluster = find_cluster_by_uuid(ceph_fsid)
- if cluster is None:
- raise Error('No cluster conf found in ' + SYSCONFDIR +
- ' with fsid %s' % ceph_fsid)
- LOG.debug('Cluster name is %s', cluster)
-
- fsid = read_one_line(path, 'fsid')
- if fsid is None:
- raise Error('No OSD uuid assigned.')
- LOG.debug('OSD uuid is %s', fsid)
-
- keyring = activate_key_template.format(cluster=cluster,
- statedir=STATEDIR)
-
- osd_id = get_osd_id(path)
- if osd_id is None:
- osd_id = allocate_osd_id(
- cluster=cluster,
- fsid=fsid,
- keyring=keyring,
- )
- write_one_line(path, 'whoami', osd_id)
- LOG.debug('OSD id is %s', osd_id)
-
- if not os.path.exists(os.path.join(path, 'ready')):
- LOG.debug('Initializing OSD...')
- # re-running mkfs is safe, so just run until it completes
- mkfs(
- path=path,
- cluster=cluster,
- osd_id=osd_id,
- fsid=fsid,
- keyring=keyring,
- )
-
- if init not in (None, 'none'):
- if init == 'auto':
- conf_val = get_conf(
- cluster=cluster,
- variable='init'
- )
- if conf_val is not None:
- init = conf_val
- else:
- init = init_get()
-
- LOG.debug('Marking with init system %s', init)
- with file(os.path.join(path, init), 'w'):
- pass
-
- # remove markers for others, just in case.
- for other in INIT_SYSTEMS:
- if other != init:
- try:
- os.unlink(os.path.join(path, other))
- except OSError:
- pass
-
- if not os.path.exists(os.path.join(path, 'active')):
- LOG.debug('Authorizing OSD key...')
- auth_key(
- path=path,
- cluster=cluster,
- osd_id=osd_id,
- keyring=keyring,
- )
- write_one_line(path, 'active', 'ok')
- LOG.debug('%s osd.%s data dir is ready at %s', cluster, osd_id, path)
- return (osd_id, cluster)
-
-
-def main_activate(args):
- cluster = None
- osd_id = None
-
- if not os.path.exists(args.path):
- raise Error('%s does not exist' % args.path)
-
- if is_suppressed(args.path):
- LOG.info('suppressed activate request on %s', args.path)
- return
-
- activate_lock.acquire() # noqa
- try:
- mode = os.stat(args.path).st_mode
- if stat.S_ISBLK(mode):
- if (is_partition(args.path) and
- (get_partition_type(args.path) ==
- PTYPE['mpath']['osd']['ready']) and
- not is_mpath(args.path)):
- raise Error('%s is not a multipath block device' %
- args.path)
- (cluster, osd_id) = mount_activate(
- dev=args.path,
- activate_key_template=args.activate_key_template,
- init=args.mark_init,
- dmcrypt=args.dmcrypt,
- dmcrypt_key_dir=args.dmcrypt_key_dir,
- reactivate=args.reactivate,
- )
- osd_data = get_mount_point(cluster, osd_id)
-
- elif stat.S_ISDIR(mode):
- (cluster, osd_id) = activate_dir(
- path=args.path,
- activate_key_template=args.activate_key_template,
- init=args.mark_init,
- )
- osd_data = args.path
-
- else:
- raise Error('%s is not a directory or block device' % args.path)
-
- if (not args.no_start_daemon and args.mark_init == 'none'):
- command_check_call(
- [
- 'ceph-osd',
- '--cluster={cluster}'.format(cluster=cluster),
- '--id={osd_id}'.format(osd_id=osd_id),
- '--osd-data={path}'.format(path=osd_data),
- '--osd-journal={path}/journal'.format(path=osd_data),
- ],
- )
-
- if (not args.no_start_daemon and
- args.mark_init not in (None, 'none')):
-
- start_daemon(
- cluster=cluster,
- osd_id=osd_id,
- )
-
- finally:
- activate_lock.release() # noqa
-
-
-###########################
-
-def _mark_osd_out(cluster, osd_id):
- LOG.info('Prepare to mark osd.%d out...', osd_id)
- command([
- 'ceph',
- 'osd',
- 'out',
- 'osd.%d' % osd_id,
- ])
-
-
-def _check_osd_status(cluster, osd_id):
- """
- report the osd status:
- 00(0) : means OSD OUT AND DOWN
- 01(1) : means OSD OUT AND UP
- 10(2) : means OSD IN AND DOWN
- 11(3) : means OSD IN AND UP
- """
- LOG.info("Checking osd id: %s ..." % osd_id)
- found = False
- status_code = 0
- out, err, ret = command([
- 'ceph',
- 'osd',
- 'dump',
- '--cluster={cluster}'.format(
- cluster=cluster,
- ),
- '--format',
- 'json',
- ])
- out_json = json.loads(out)
- for item in out_json[u'osds']:
- if item.get(u'osd') == int(osd_id):
- found = True
- if item.get(u'in') is 1:
- status_code += 2
- if item.get(u'up') is 1:
- status_code += 1
- if not found:
- raise Error('Could not osd.%s in osd tree!' % osd_id)
- return status_code
-
-
-def _remove_osd_directory_files(mounted_path, cluster):
- """
- To remove the 'ready', 'active', INIT-specific files.
- """
- if os.path.exists(os.path.join(mounted_path, 'ready')):
- os.remove(os.path.join(mounted_path, 'ready'))
- LOG.info('Remove `ready` file.')
- else:
- LOG.info('`ready` file is already removed.')
-
- if os.path.exists(os.path.join(mounted_path, 'active')):
- os.remove(os.path.join(mounted_path, 'active'))
- LOG.info('Remove `active` file.')
- else:
- LOG.info('`active` file is already removed.')
-
- # Just check `upstart` and `sysvinit` directly if filename is init-spec.
- conf_val = get_conf(
- cluster=cluster,
- variable='init'
- )
- if conf_val is not None:
- init = conf_val
- else:
- init = init_get()
- os.remove(os.path.join(mounted_path, init))
- LOG.info('Remove `%s` file.', init)
- return
-
-
-def main_deactivate(args):
- activate_lock.acquire() # noqa
- try:
- main_deactivate_locked(args)
- finally:
- activate_lock.release() # noqa
-
-
-def main_deactivate_locked(args):
- osd_id = args.deactivate_by_id
- path = args.path
- target_dev = None
- dmcrypt = False
- devices = list_devices()
-
- # list all devices and found we need
- for device in devices:
- if 'partitions' in device:
- for dev_part in device.get('partitions'):
- if (osd_id and
- 'whoami' in dev_part and
- dev_part['whoami'] == osd_id):
- target_dev = dev_part
- elif (path and
- 'path' in dev_part and
- dev_part['path'] == path):
- target_dev = dev_part
- if not target_dev:
- raise Error('Cannot find any match device!!')
-
- # set up all we need variable
- osd_id = target_dev['whoami']
- part_type = target_dev['ptype']
- mounted_path = target_dev['mount']
- if Ptype.is_dmcrypt(part_type, 'osd'):
- dmcrypt = True
-
- # Do not do anything if osd is already down.
- status_code = _check_osd_status(args.cluster, osd_id)
- if status_code == OSD_STATUS_IN_UP:
- if args.mark_out is True:
- _mark_osd_out(args.cluster, int(osd_id))
- stop_daemon(args.cluster, osd_id)
- elif status_code == OSD_STATUS_IN_DOWN:
- if args.mark_out is True:
- _mark_osd_out(args.cluster, int(osd_id))
- LOG.info("OSD already out/down. Do not do anything now.")
- return
- elif status_code == OSD_STATUS_OUT_UP:
- stop_daemon(args.cluster, osd_id)
- elif status_code == OSD_STATUS_OUT_DOWN:
- LOG.info("OSD already out/down. Do not do anything now.")
- return
-
- # remove 'ready', 'active', and INIT-specific files.
- _remove_osd_directory_files(mounted_path, args.cluster)
-
- # Write deactivate to osd directory!
- with open(os.path.join(mounted_path, 'deactive'), 'w'):
- path_set_context(os.path.join(mounted_path, 'deactive'))
-
- unmount(mounted_path)
- LOG.info("Umount `%s` successfully.", mounted_path)
-
- if dmcrypt:
- dmcrypt_unmap(target_dev['uuid'])
- for name in Space.NAMES:
- if name + '_uuid' in target_dev:
- dmcrypt_unmap(target_dev[name + '_uuid'])
-
-###########################
-
-
-def _remove_from_crush_map(cluster, osd_id):
- LOG.info("Prepare to remove osd.%s from crush map..." % osd_id)
- command([
- 'ceph',
- 'osd',
- 'crush',
- 'remove',
- 'osd.%s' % osd_id,
- ])
-
-
-def _delete_osd_auth_key(cluster, osd_id):
- LOG.info("Prepare to delete osd.%s cephx key..." % osd_id)
- command([
- 'ceph',
- 'auth',
- 'del',
- 'osd.%s' % osd_id,
- ])
-
-
-def _deallocate_osd_id(cluster, osd_id):
- LOG.info("Prepare to deallocate the osd-id: %s..." % osd_id)
- command([
- 'ceph',
- 'osd',
- 'rm',
- '%s' % osd_id,
- ])
-
-
-def destroy_lookup_device(args, predicate, description):
- devices = list_devices()
- for device in devices:
- for partition in device.get('partitions', []):
- if partition['dmcrypt']:
- dmcrypt_path = dmcrypt_map(partition['path'],
- args.dmcrypt_key_dir)
- list_dev_osd(dmcrypt_path, {}, partition)
- dmcrypt_unmap(partition['uuid'])
- if predicate(partition):
- return partition
- raise Error('found no device matching ', description)
-
-
-def main_destroy(args):
- osd_id = args.destroy_by_id
- path = args.path
- dmcrypt = False
- target_dev = None
-
- if path:
- if not is_partition(path):
- raise Error(path + " must be a partition device")
- path = os.path.realpath(path)
-
- if path:
- target_dev = destroy_lookup_device(
- args, lambda x: x.get('path') == path,
- path)
- elif osd_id:
- target_dev = destroy_lookup_device(
- args, lambda x: x.get('whoami') == osd_id,
- 'osd id ' + str(osd_id))
-
- osd_id = target_dev['whoami']
- dev_path = target_dev['path']
- if target_dev['ptype'] == PTYPE['mpath']['osd']['ready']:
- base_dev = get_partition_base_mpath(dev_path)
- else:
- base_dev = get_partition_base(dev_path)
-
- # Before osd deactivate, we cannot destroy it
- status_code = _check_osd_status(args.cluster, osd_id)
- if status_code != OSD_STATUS_OUT_DOWN and \
- status_code != OSD_STATUS_IN_DOWN:
- raise Error("Could not destroy the active osd. (osd-id: %s)" %
- osd_id)
-
- # Remove OSD from crush map
- _remove_from_crush_map(args.cluster, osd_id)
-
- # Remove OSD cephx key
- _delete_osd_auth_key(args.cluster, osd_id)
-
- # Deallocate OSD ID
- _deallocate_osd_id(args.cluster, osd_id)
-
- # we remove the crypt map and device mapper (if dmcrypt is True)
- if dmcrypt:
- for name in Space.NAMES:
- if target_dev.get(name + '_uuid'):
- dmcrypt_unmap(target_dev[name + '_uuid'])
-
- # Check zap flag. If we found zap flag, we need to find device for
- # destroy this osd data.
- if args.zap is True:
- # erase the osd data
- LOG.info("Prepare to zap the device %s" % base_dev)
- zap(base_dev)
-
-
-def get_space_osd_uuid(name, path):
- if not os.path.exists(path):
- raise Error('%s does not exist' % path)
-
- mode = os.stat(path).st_mode
- if not stat.S_ISBLK(mode):
- raise Error('%s is not a block device' % path)
-
- if (is_partition(path) and
- get_partition_type(path) in (PTYPE['mpath']['journal']['ready'],
- PTYPE['mpath']['block']['ready']) and
- not is_mpath(path)):
- raise Error('%s is not a multipath block device' %
- path)
-
- try:
- out = _check_output(
- args=[
- 'ceph-osd',
- '--get-device-fsid',
- path,
- ],
- close_fds=True,
- )
- except subprocess.CalledProcessError as e:
- raise Error(
- 'failed to get osd uuid/fsid from %s' % name,
- e,
- )
- value = str(out).split('\n', 1)[0]
- LOG.debug('%s %s has OSD UUID %s', name.capitalize(), path, value)
- return value
-
-
-def main_activate_space(name, args):
- if not os.path.exists(args.dev):
- raise Error('%s does not exist' % args.dev)
-
- cluster = None
- osd_id = None
- osd_uuid = None
- dev = None
- activate_lock.acquire() # noqa
- try:
- if args.dmcrypt:
- dev = dmcrypt_map(args.dev, args.dmcrypt_key_dir)
- else:
- dev = args.dev
- # FIXME: For an encrypted journal dev, does this return the
- # cyphertext or plaintext dev uuid!? Also, if the journal is
- # encrypted, is the data partition also always encrypted, or
- # are mixed pairs supported!?
- osd_uuid = get_space_osd_uuid(name, dev)
- path = os.path.join('/dev/disk/by-partuuid/', osd_uuid.lower())
-
- if is_suppressed(path):
- LOG.info('suppressed activate request on %s', path)
- return
-
- (cluster, osd_id) = mount_activate(
- dev=path,
- activate_key_template=args.activate_key_template,
- init=args.mark_init,
- dmcrypt=args.dmcrypt,
- dmcrypt_key_dir=args.dmcrypt_key_dir,
- reactivate=args.reactivate,
- )
-
- start_daemon(
- cluster=cluster,
- osd_id=osd_id,
- )
-
- finally:
- activate_lock.release() # noqa
-
-
-###########################
-
-
-def main_activate_all(args):
- dir = '/dev/disk/by-parttypeuuid'
- LOG.debug('Scanning %s', dir)
- if not os.path.exists(dir):
- return
- err = False
- for name in os.listdir(dir):
- if name.find('.') < 0:
- continue
- (tag, uuid) = name.split('.')
-
- if tag in Ptype.get_ready_by_name('osd'):
-
- if Ptype.is_dmcrypt(tag, 'osd'):
- path = os.path.join('/dev/mapper', uuid)
- else:
- path = os.path.join(dir, name)
-
- if is_suppressed(path):
- LOG.info('suppressed activate request on %s', path)
- continue
-
- LOG.info('Activating %s', path)
- activate_lock.acquire() # noqa
- try:
- # never map dmcrypt cyphertext devices
- (cluster, osd_id) = mount_activate(
- dev=path,
- activate_key_template=args.activate_key_template,
- init=args.mark_init,
- dmcrypt=False,
- dmcrypt_key_dir='',
- )
- start_daemon(
- cluster=cluster,
- osd_id=osd_id,
- )
-
- except Exception as e:
- print >> sys.stderr, '{prog}: {msg}'.format(
- prog=args.prog,
- msg=e,
- )
- err = True
-
- finally:
- activate_lock.release() # noqa
- if err:
- raise Error('One or more partitions failed to activate')
-
-
-###########################
-
-def is_swap(dev):
- dev = os.path.realpath(dev)
- with file('/proc/swaps', 'rb') as proc_swaps:
- for line in proc_swaps.readlines()[1:]:
- fields = line.split()
- if len(fields) < 3:
- continue
- swaps_dev = fields[0]
- if swaps_dev.startswith('/') and os.path.exists(swaps_dev):
- swaps_dev = os.path.realpath(swaps_dev)
- if swaps_dev == dev:
- return True
- return False
-
-
-def get_oneliner(base, name):
- path = os.path.join(base, name)
- if os.path.isfile(path):
- with open(path, 'r') as _file:
- return _file.readline().rstrip()
- return None
-
-
-def get_dev_fs(dev):
- fscheck, _, _ = command(
- [
- 'blkid',
- '-s',
- 'TYPE',
- dev,
- ],
- )
- if 'TYPE' in fscheck:
- fstype = fscheck.split()[1].split('"')[1]
- return fstype
- else:
- return None
-
-
-def split_dev_base_partnum(dev):
- if is_mpath(dev):
- partnum = partnum_mpath(dev)
- base = get_partition_base_mpath(dev)
- else:
- b = block_path(dev)
- partnum = open(os.path.join(b, 'partition')).read().strip()
- base = get_partition_base(dev)
- return (base, partnum)
-
-
-def get_partition_type(part):
- return get_blkid_partition_info(part, 'ID_PART_ENTRY_TYPE')
-
-
-def get_partition_uuid(part):
- return get_blkid_partition_info(part, 'ID_PART_ENTRY_UUID')
-
-
-def get_blkid_partition_info(dev, what=None):
- out, _, _ = command(
- [
- 'blkid',
- '-o',
- 'udev',
- '-p',
- dev,
- ]
- )
- p = {}
- for line in out.splitlines():
- (key, value) = line.split('=')
- p[key] = value
- if what:
- return p.get(what)
- else:
- return p
-
-
-def more_osd_info(path, uuid_map, desc):
- desc['ceph_fsid'] = get_oneliner(path, 'ceph_fsid')
- if desc['ceph_fsid']:
- desc['cluster'] = find_cluster_by_uuid(desc['ceph_fsid'])
- desc['whoami'] = get_oneliner(path, 'whoami')
- for name in Space.NAMES:
- uuid = get_oneliner(path, name + '_uuid')
- if uuid:
- desc[name + '_uuid'] = uuid.lower()
- if desc[name + '_uuid'] in uuid_map:
- desc[name + '_dev'] = uuid_map[desc[name + '_uuid']]
-
-
-def list_dev_osd(dev, uuid_map, desc):
- desc['mount'] = is_mounted(dev)
- desc['fs_type'] = get_dev_fs(dev)
- desc['state'] = 'unprepared'
- if desc['mount']:
- desc['state'] = 'active'
- more_osd_info(desc['mount'], uuid_map, desc)
- elif desc['fs_type']:
- try:
- tpath = mount(dev=dev, fstype=desc['fs_type'], options='')
- if tpath:
- try:
- magic = get_oneliner(tpath, 'magic')
- if magic is not None:
- desc['magic'] = magic
- desc['state'] = 'prepared'
- more_osd_info(tpath, uuid_map, desc)
- finally:
- unmount(tpath)
- except MountError:
- pass
-
-
-def list_format_more_osd_info_plain(dev):
- desc = []
- if dev.get('ceph_fsid'):
- if dev.get('cluster'):
- desc.append('cluster ' + dev['cluster'])
- else:
- desc.append('unknown cluster ' + dev['ceph_fsid'])
- if dev.get('whoami'):
- desc.append('osd.%s' % dev['whoami'])
- for name in Space.NAMES:
- if dev.get(name + '_dev'):
- desc.append(name + ' %s' % dev[name + '_dev'])
- return desc
-
-
-def list_format_dev_plain(dev, prefix=''):
- desc = []
- if dev['ptype'] == PTYPE['regular']['osd']['ready']:
- desc = (['ceph data', dev['state']] +
- list_format_more_osd_info_plain(dev))
- elif Ptype.is_dmcrypt(dev['ptype'], 'osd'):
- dmcrypt = dev['dmcrypt']
- if not dmcrypt['holders']:
- desc = ['ceph data (dmcrypt %s)' % dmcrypt['type'],
- 'not currently mapped']
- elif len(dmcrypt['holders']) == 1:
- holder = get_dev_path(dmcrypt['holders'][0])
- desc = ['ceph data (dmcrypt %s %s)' %
- (dmcrypt['type'], holder)]
- desc += list_format_more_osd_info_plain(dev)
- else:
- desc = ['ceph data (dmcrypt %s)' % dmcrypt['type'],
- 'holders: ' + ','.join(dmcrypt['holders'])]
- elif Ptype.is_regular_space(dev['ptype']):
- name = Ptype.space_ptype_to_name(dev['ptype'])
- desc.append('ceph ' + name)
- if dev.get(name + '_for'):
- desc.append('for %s' % dev[name + '_for'])
- elif Ptype.is_dmcrypt_space(dev['ptype']):
- name = Ptype.space_ptype_to_name(dev['ptype'])
- dmcrypt = dev['dmcrypt']
- if dmcrypt['holders'] and len(dmcrypt['holders']) == 1:
- holder = get_dev_path(dmcrypt['holders'][0])
- desc = ['ceph ' + name + ' (dmcrypt %s %s)' %
- (dmcrypt['type'], holder)]
- else:
- desc = ['ceph ' + name + ' (dmcrypt %s)' % dmcrypt['type']]
- if dev.get(name + '_for'):
- desc.append('for %s' % dev[name + '_for'])
- else:
- desc.append(dev['type'])
- if dev.get('fs_type'):
- desc.append(dev['fs_type'])
- elif dev.get('ptype'):
- desc.append(dev['ptype'])
- if dev.get('mount'):
- desc.append('mounted on %s' % dev['mount'])
- return '%s%s %s' % (prefix, dev['path'], ', '.join(desc))
-
-
-def list_format_plain(devices):
- lines = []
- for device in devices:
- if device.get('partitions'):
- lines.append('%s :' % device['path'])
- for p in sorted(device['partitions']):
- lines.append(list_format_dev_plain(dev=p,
- prefix=' '))
- else:
- lines.append(list_format_dev_plain(dev=device,
- prefix=''))
- return "\n".join(lines)
-
-
-def list_dev(dev, uuid_map, space_map):
- info = {
- 'path': dev,
- 'dmcrypt': {},
- }
-
- info['is_partition'] = is_partition(dev)
- if info['is_partition']:
- ptype = get_partition_type(dev)
- info['uuid'] = get_partition_uuid(dev)
- else:
- ptype = 'unknown'
- info['ptype'] = ptype
- LOG.info("list_dev(dev = " + dev + ", ptype = " + str(ptype) + ")")
- if ptype in (PTYPE['regular']['osd']['ready'],
- PTYPE['mpath']['osd']['ready']):
- info['type'] = 'data'
- if ptype == PTYPE['mpath']['osd']['ready']:
- info['multipath'] = True
- list_dev_osd(dev, uuid_map, info)
- elif ptype == PTYPE['plain']['osd']['ready']:
- holders = is_held(dev)
- info['type'] = 'data'
- info['dmcrypt']['holders'] = holders
- info['dmcrypt']['type'] = 'plain'
- if len(holders) == 1:
- list_dev_osd(get_dev_path(holders[0]), uuid_map, info)
- elif ptype == PTYPE['luks']['osd']['ready']:
- holders = is_held(dev)
- info['type'] = 'data'
- info['dmcrypt']['holders'] = holders
- info['dmcrypt']['type'] = 'LUKS'
- if len(holders) == 1:
- list_dev_osd(get_dev_path(holders[0]), uuid_map, info)
- elif Ptype.is_regular_space(ptype) or Ptype.is_mpath_space(ptype):
- name = Ptype.space_ptype_to_name(ptype)
- info['type'] = name
- if ptype == PTYPE['mpath'][name]['ready']:
- info['multipath'] = True
- if info.get('uuid') in space_map:
- info[name + '_for'] = space_map[info['uuid']]
- elif Ptype.is_plain_space(ptype):
- name = Ptype.space_ptype_to_name(ptype)
- holders = is_held(dev)
- info['type'] = name
- info['dmcrypt']['type'] = 'plain'
- info['dmcrypt']['holders'] = holders
- if info.get('uuid') in space_map:
- info[name + '_for'] = space_map[info['uuid']]
- elif Ptype.is_luks_space(ptype):
- name = Ptype.space_ptype_to_name(ptype)
- holders = is_held(dev)
- info['type'] = name
- info['dmcrypt']['type'] = 'LUKS'
- info['dmcrypt']['holders'] = holders
- if info.get('uuid') in space_map:
- info[name + '_for'] = space_map[info['uuid']]
- else:
- path = is_mounted(dev)
- fs_type = get_dev_fs(dev)
- if is_swap(dev):
- info['type'] = 'swap'
- else:
- info['type'] = 'other'
- if fs_type:
- info['fs_type'] = fs_type
- if path:
- info['mount'] = path
-
- return info
-
-
-def list_devices():
- partmap = list_all_partitions()
-
- uuid_map = {}
- space_map = {}
- for base, parts in sorted(partmap.iteritems()):
- for p in parts:
- dev = get_dev_path(p)
- part_uuid = get_partition_uuid(dev)
- if part_uuid:
- uuid_map[part_uuid] = dev
- ptype = get_partition_type(dev)
- LOG.debug("main_list: " + dev +
- " ptype = " + str(ptype) +
- " uuid = " + str(part_uuid))
- if ptype in Ptype.get_ready_by_name('osd'):
- if Ptype.is_dmcrypt(ptype, 'osd'):
- holders = is_held(dev)
- if len(holders) != 1:
- continue
- dev_to_mount = get_dev_path(holders[0])
- else:
- dev_to_mount = dev
-
- fs_type = get_dev_fs(dev_to_mount)
- if fs_type is not None:
- try:
- tpath = mount(dev=dev_to_mount,
- fstype=fs_type, options='')
- try:
- for name in Space.NAMES:
- space_uuid = get_oneliner(tpath,
- name + '_uuid')
- if space_uuid:
- space_map[space_uuid.lower()] = dev
- finally:
- unmount(tpath)
- except MountError:
- pass
-
- LOG.debug("main_list: " + str(partmap) + ", uuid_map = " +
- str(uuid_map) + ", space_map = " + str(space_map))
-
- devices = []
- for base, parts in sorted(partmap.iteritems()):
- if parts:
- disk = {'path': get_dev_path(base)}
- partitions = []
- for p in sorted(parts):
- partitions.append(list_dev(get_dev_path(p),
- uuid_map,
- space_map))
- disk['partitions'] = partitions
- devices.append(disk)
- else:
- device = list_dev(get_dev_path(base), uuid_map, space_map)
- device['path'] = get_dev_path(base)
- devices.append(device)
- LOG.debug("list_devices: " + str(devices))
- return devices
-
-
-def main_list(args):
- devices = list_devices()
- if args.path:
- paths = []
- for path in args.path:
- if os.path.exists(path):
- paths.append(os.path.realpath(path))
- else:
- paths.append(path)
- selected_devices = []
- for device in devices:
- for path in paths:
- if re.search(path + '$', device['path']):
- selected_devices.append(device)
- else:
- selected_devices = devices
- if args.format == 'json':
- print json.dumps(selected_devices)
- else:
- output = list_format_plain(selected_devices)
- if output:
- print output
-
-
-###########################
-#
-# Mark devices that we want to suppress activates on with a
-# file like
-#
-# /var/lib/ceph/tmp/suppress-activate.sdb
-#
-# where the last bit is the sanitized device name (/dev/X without the
-# /dev/ prefix) and the is_suppress() check matches a prefix. That
-# means suppressing sdb will stop activate on sdb1, sdb2, etc.
-#
-
-def is_suppressed(path):
- disk = os.path.realpath(path)
- try:
- if (not disk.startswith('/dev/') or
- not stat.S_ISBLK(os.lstat(disk).st_mode)):
- return False
- base = get_dev_name(disk)
- while len(base):
- if os.path.exists(SUPPRESS_PREFIX + base): # noqa
- return True
- base = base[:-1]
- except:
- return False
-
-
-def set_suppress(path):
- disk = os.path.realpath(path)
- if not os.path.exists(disk):
- raise Error('does not exist', path)
- if not stat.S_ISBLK(os.lstat(path).st_mode):
- raise Error('not a block device', path)
- base = get_dev_name(disk)
-
- with file(SUPPRESS_PREFIX + base, 'w') as f: # noqa
- pass
- LOG.info('set suppress flag on %s', base)
-
-
-def unset_suppress(path):
- disk = os.path.realpath(path)
- if not os.path.exists(disk):
- raise Error('does not exist', path)
- if not stat.S_ISBLK(os.lstat(path).st_mode):
- raise Error('not a block device', path)
- assert disk.startswith('/dev/')
- base = get_dev_name(disk)
-
- fn = SUPPRESS_PREFIX + base # noqa
- if not os.path.exists(fn):
- raise Error('not marked as suppressed', path)
-
- try:
- os.unlink(fn)
- LOG.info('unset suppress flag on %s', base)
- except OSError as e:
- raise Error('failed to unsuppress', e)
-
-
-def main_suppress(args):
- set_suppress(args.path)
-
-
-def main_unsuppress(args):
- unset_suppress(args.path)
-
-
-def main_zap(args):
- for dev in args.dev:
- zap(dev)
-
-
-def main_trigger(args):
- LOG.debug("main_trigger: " + str(args))
- if is_systemd() and not args.sync:
- # http://www.freedesktop.org/software/systemd/man/systemd-escape.html
- escaped_dev = args.dev[1:].replace('-', '\\x2d')
- service = 'ceph-disk@{dev}.service'.format(dev=escaped_dev)
- LOG.info('systemd detected, triggering %s' % service)
- command(
- [
- 'systemctl',
- '--no-block',
- 'restart',
- service,
- ]
- )
- return
- if is_upstart() and not args.sync:
- LOG.info('upstart detected, triggering ceph-disk task')
- command(
- [
- 'initctl',
- 'emit',
- 'ceph-disk',
- 'dev={dev}'.format(dev=args.dev),
- 'pid={pid}'.format(pid=os.getpid()),
- ]
- )
- return
-
- parttype = get_partition_type(args.dev)
- partid = get_partition_uuid(args.dev)
-
- LOG.info('trigger {dev} parttype {parttype} uuid {partid}'.format(
- dev=args.dev,
- parttype=parttype,
- partid=partid,
- ))
-
- if parttype in (PTYPE['regular']['osd']['ready'],
- PTYPE['mpath']['osd']['ready']):
- command(
- [
- 'ceph-disk',
- 'activate',
- args.dev,
- ]
- )
- elif parttype in (PTYPE['regular']['journal']['ready'],
- PTYPE['mpath']['journal']['ready']):
- command(
- [
- 'ceph-disk',
- 'activate-journal',
- args.dev,
- ]
- )
-
- # journals are easy: map, chown, activate-journal
- elif parttype == PTYPE['plain']['journal']['ready']:
- command(
- [
- '/sbin/cryptsetup',
- '--key-file',
- '/etc/ceph/dmcrypt-keys/{partid}'.format(partid=partid),
- '--key-size',
- '256',
- 'create',
- partid,
- args.dev,
- ]
- )
- newdev = '/dev/mapper/' + partid
- count = 0
- while not os.path.exists(newdev) and count <= 10:
- time.sleep(1)
- count += 1
- command(
- [
- '/bin/chown',
- 'ceph:ceph',
- newdev,
- ]
- )
- command(
- [
- '/usr/sbin/ceph-disk',
- 'activate-journal',
- newdev,
- ]
- )
- elif parttype == PTYPE['luks']['journal']['ready']:
- command(
- [
- '/sbin/cryptsetup',
- '--key-file',
- '/etc/ceph/dmcrypt-keys/{partid}.luks.key'.format(
- partid=partid),
- 'luksOpen',
- args.dev,
- partid,
- ]
- )
- newdev = '/dev/mapper/' + partid
- count = 0
- while not os.path.exists(newdev) and count <= 10:
- time.sleep(1)
- count += 1
- command(
- [
- '/bin/chown',
- 'ceph:ceph',
- newdev,
- ]
- )
- command(
- [
- '/usr/sbin/ceph-disk',
- 'activate-journal',
- newdev,
- ]
- )
-
- elif parttype in (PTYPE['regular']['block']['ready'],
- PTYPE['mpath']['block']['ready']):
- command(
- [
- 'ceph-disk',
- 'activate-block',
- args.dev,
- ]
- )
-
- # blocks are easy: map, chown, activate-block
- elif parttype == PTYPE['plain']['block']['ready']:
- command(
- [
- '/sbin/cryptsetup',
- '--key-file',
- '/etc/ceph/dmcrypt-keys/{partid}'.format(partid=partid),
- '--key-size',
- '256',
- 'create',
- partid,
- args.dev,
- ]
- )
- newdev = '/dev/mapper/' + partid
- count = 0
- while not os.path.exists(newdev) and count <= 10:
- time.sleep(1)
- count += 1
- command(
- [
- '/bin/chown',
- 'ceph:ceph',
- newdev,
- ]
- )
- command(
- [
- '/usr/sbin/ceph-disk',
- 'activate-block',
- newdev,
- ]
- )
- elif parttype == PTYPE['luks']['block']['ready']:
- command(
- [
- '/sbin/cryptsetup',
- '--key-file',
- '/etc/ceph/dmcrypt-keys/{partid}.luks.key'.format(
- partid=partid),
- 'luksOpen',
- args.dev,
- partid,
- ]
- )
- newdev = '/dev/mapper/' + partid
- count = 0
- while not os.path.exists(newdev) and count <= 10:
- time.sleep(1)
- count += 1
- command(
- [
- '/bin/chown',
- 'ceph:ceph',
- newdev,
- ]
- )
- command(
- [
- '/usr/sbin/ceph-disk',
- 'activate-block',
- newdev,
- ]
- )
-
- # osd data: map, activate
- elif parttype == PTYPE['plain']['osd']['ready']:
- command(
- [
- '/sbin/cryptsetup',
- '--key-file',
- '/etc/ceph/dmcrypt-keys/{partid}'.format(partid=partid),
- '--key-size',
- '256',
- 'create',
- partid,
- args.dev,
- ]
- )
- newdev = '/dev/mapper/' + partid
- count = 0
- while not os.path.exists(newdev) and count <= 10:
- time.sleep(1)
- count += 1
- command(
- [
- '/usr/sbin/ceph-disk',
- 'activate',
- newdev,
- ]
- )
-
- elif parttype == PTYPE['luks']['osd']['ready']:
- command(
- [
- '/sbin/cryptsetup',
- '--key-file',
- '/etc/ceph/dmcrypt-keys/{partid}.luks.key'.format(
- partid=partid),
- 'luksOpen',
- args.dev,
- partid,
- ]
- )
- newdev = '/dev/mapper/' + partid
- count = 0
- while not os.path.exists(newdev) and count <= 10:
- time.sleep(1)
- count += 1
- command(
- [
- '/usr/sbin/ceph-disk',
- 'activate',
- newdev,
- ]
- )
-
- else:
- raise Error('unrecognized partition type %s' % parttype)
-
-
-def setup_statedir(dir):
- # XXX The following use of globals makes linting
- # really hard. Global state in Python is iffy and
- # should be avoided.
- global STATEDIR
- STATEDIR = dir
-
- if not os.path.exists(STATEDIR):
- os.mkdir(STATEDIR)
- if not os.path.exists(STATEDIR + "/tmp"):
- os.mkdir(STATEDIR + "/tmp")
-
- global prepare_lock
- prepare_lock = filelock(STATEDIR + '/tmp/ceph-disk.prepare.lock')
-
- global activate_lock
- activate_lock = filelock(STATEDIR + '/tmp/ceph-disk.activate.lock')
-
- global SUPPRESS_PREFIX
- SUPPRESS_PREFIX = STATEDIR + '/tmp/suppress-activate.'
-
-
-def setup_sysconfdir(dir):
- global SYSCONFDIR
- SYSCONFDIR = dir
-
-
-def parse_args(argv):
- parser = argparse.ArgumentParser(
- 'ceph-disk',
- )
- parser.add_argument(
- '-v', '--verbose',
- action='store_true', default=None,
- help='be more verbose',
- )
- parser.add_argument(
- '--log-stdout',
- action='store_true', default=None,
- help='log to stdout',
- )
- parser.add_argument(
- '--prepend-to-path',
- metavar='PATH',
- default='/usr/bin',
- help=('prepend PATH to $PATH for backward compatibility '
- '(default /usr/bin)'),
- )
- parser.add_argument(
- '--statedir',
- metavar='PATH',
- default='/var/lib/ceph',
- help=('directory in which ceph state is preserved '
- '(default /var/lib/ceph)'),
- )
- parser.add_argument(
- '--sysconfdir',
- metavar='PATH',
- default='/etc/ceph',
- help=('directory in which ceph configuration files are found '
- '(default /etc/ceph)'),
- )
- parser.add_argument(
- '--setuser',
- metavar='USER',
- default=None,
- help='use the given user for subprocesses, rather than ceph or root'
- )
- parser.add_argument(
- '--setgroup',
- metavar='GROUP',
- default=None,
- help='use the given group for subprocesses, rather than ceph or root'
- )
- parser.set_defaults(
- # we want to hold on to this, for later
- prog=parser.prog,
- )
-
- subparsers = parser.add_subparsers(
- title='subcommands',
- description='valid subcommands',
- help='sub-command help',
- )
-
- Prepare.set_subparser(subparsers)
- make_activate_parser(subparsers)
- make_activate_block_parser(subparsers)
- make_activate_journal_parser(subparsers)
- make_activate_all_parser(subparsers)
- make_list_parser(subparsers)
- make_suppress_parser(subparsers)
- make_deactivate_parser(subparsers)
- make_destroy_parser(subparsers)
- make_zap_parser(subparsers)
- make_trigger_parser(subparsers)
-
- args = parser.parse_args(argv)
- return args
-
-
-def make_trigger_parser(subparsers):
- trigger_parser = subparsers.add_parser(
- 'trigger',
- help='Trigger an event (caled by udev)')
- trigger_parser.add_argument(
- 'dev',
- help=('device'),
- )
- trigger_parser.add_argument(
- '--sync',
- action='store_true', default=None,
- help=('do operation synchronously; do not trigger systemd'),
- )
- trigger_parser.set_defaults(
- func=main_trigger,
- )
- return trigger_parser
-
-
-def make_activate_parser(subparsers):
- activate_parser = subparsers.add_parser(
- 'activate',
- help='Activate a Ceph OSD')
- activate_parser.add_argument(
- '--mount',
- action='store_true', default=None,
- help='mount a block device [deprecated, ignored]',
- )
- activate_parser.add_argument(
- '--activate-key',
- metavar='PATH',
- help='bootstrap-osd keyring path template (%(default)s)',
- dest='activate_key_template',
- )
- activate_parser.add_argument(
- '--mark-init',
- metavar='INITSYSTEM',
- help='init system to manage this dir',
- default='auto',
- choices=INIT_SYSTEMS,
- )
- activate_parser.add_argument(
- '--no-start-daemon',
- action='store_true', default=None,
- help='do not start the daemon',
- )
- activate_parser.add_argument(
- 'path',
- metavar='PATH',
- help='path to block device or directory',
- )
- activate_parser.add_argument(
- '--dmcrypt',
- action='store_true', default=None,
- help='map DATA and/or JOURNAL devices with dm-crypt',
- )
- activate_parser.add_argument(
- '--dmcrypt-key-dir',
- metavar='KEYDIR',
- default='/etc/ceph/dmcrypt-keys',
- help='directory where dm-crypt keys are stored',
- )
- activate_parser.add_argument(
- '--reactivate',
- action='store_true', default=False,
- help='activate the deactived OSD',
- )
- activate_parser.set_defaults(
- activate_key_template='{statedir}/bootstrap-osd/{cluster}.keyring',
- func=main_activate,
- )
- return activate_parser
-
-
-def make_activate_block_parser(subparsers):
- return make_activate_space_parser('block', subparsers)
-
-
-def make_activate_journal_parser(subparsers):
- return make_activate_space_parser('journal', subparsers)
-
-
-def make_activate_space_parser(name, subparsers):
- activate_space_parser = subparsers.add_parser(
- 'activate-%s' % name,
- help='Activate an OSD via its %s device' % name)
- activate_space_parser.add_argument(
- 'dev',
- metavar='DEV',
- help='path to %s block device' % name,
- )
- activate_space_parser.add_argument(
- '--activate-key',
- metavar='PATH',
- help='bootstrap-osd keyring path template (%(default)s)',
- dest='activate_key_template',
- )
- activate_space_parser.add_argument(
- '--mark-init',
- metavar='INITSYSTEM',
- help='init system to manage this dir',
- default='auto',
- choices=INIT_SYSTEMS,
- )
- activate_space_parser.add_argument(
- '--dmcrypt',
- action='store_true', default=None,
- help=('map data and/or auxiliariy (journal, etc.) '
- 'devices with dm-crypt'),
- )
- activate_space_parser.add_argument(
- '--dmcrypt-key-dir',
- metavar='KEYDIR',
- default='/etc/ceph/dmcrypt-keys',
- help='directory where dm-crypt keys are stored',
- )
- activate_space_parser.add_argument(
- '--reactivate',
- action='store_true', default=False,
- help='activate the deactived OSD',
- )
- activate_space_parser.set_defaults(
- activate_key_template='{statedir}/bootstrap-osd/{cluster}.keyring',
- func=lambda args: main_activate_space(name, args),
- )
- return activate_space_parser
-
-
-def make_activate_all_parser(subparsers):
- activate_all_parser = subparsers.add_parser(
- 'activate-all',
- help='Activate all tagged OSD partitions')
- activate_all_parser.add_argument(
- '--activate-key',
- metavar='PATH',
- help='bootstrap-osd keyring path template (%(default)s)',
- dest='activate_key_template',
- )
- activate_all_parser.add_argument(
- '--mark-init',
- metavar='INITSYSTEM',
- help='init system to manage this dir',
- default='auto',
- choices=INIT_SYSTEMS,
- )
- activate_all_parser.set_defaults(
- activate_key_template='{statedir}/bootstrap-osd/{cluster}.keyring',
- func=main_activate_all,
- )
- return activate_all_parser
-
-
-def make_list_parser(subparsers):
- list_parser = subparsers.add_parser(
- 'list',
- help='List disks, partitions, and Ceph OSDs')
- list_parser.add_argument(
- '--format',
- help='output format',
- default='plain',
- choices=['json', 'plain'],
- )
- list_parser.add_argument(
- 'path',
- metavar='PATH',
- nargs='*',
- help='path to block devices, relative to /sys/block',
- )
- list_parser.set_defaults(
- func=main_list,
- )
- return list_parser
-
-
-def make_suppress_parser(subparsers):
- suppress_parser = subparsers.add_parser(
- 'suppress-activate',
- help='Suppress activate on a device (prefix)')
- suppress_parser.add_argument(
- 'path',
- metavar='PATH',
- help='path to block device or directory',
- )
- suppress_parser.set_defaults(
- func=main_suppress,
- )
-
- unsuppress_parser = subparsers.add_parser(
- 'unsuppress-activate',
- help='Stop suppressing activate on a device (prefix)')
- unsuppress_parser.add_argument(
- 'path',
- metavar='PATH',
- help='path to block device or directory',
- )
- unsuppress_parser.set_defaults(
- func=main_unsuppress,
- )
- return suppress_parser
-
-
-def make_deactivate_parser(subparsers):
- deactivate_parser = subparsers.add_parser(
- 'deactivate',
- help='Deactivate a Ceph OSD')
- deactivate_parser.add_argument(
- '--cluster',
- metavar='NAME',
- default='ceph',
- help='cluster name to assign this disk to',
- )
- deactivate_parser.add_argument(
- 'path',
- metavar='PATH',
- nargs='?',
- help='path to block device or directory',
- )
- deactivate_parser.add_argument(
- '--deactivate-by-id',
- metavar='<id>',
- help='ID of OSD to deactive'
- )
- deactivate_parser.add_argument(
- '--mark-out',
- action='store_true', default=False,
- help='option to mark the osd out',
- )
- deactivate_parser.set_defaults(
- func=main_deactivate,
- )
-
-
-def make_destroy_parser(subparsers):
- destroy_parser = subparsers.add_parser(
- 'destroy',
- help='Destroy a Ceph OSD')
- destroy_parser.add_argument(
- '--cluster',
- metavar='NAME',
- default='ceph',
- help='cluster name to assign this disk to',
- )
- destroy_parser.add_argument(
- 'path',
- metavar='PATH',
- nargs='?',
- help='path to block device or directory',
- )
- destroy_parser.add_argument(
- '--destroy-by-id',
- metavar='<id>',
- help='ID of OSD to destroy'
- )
- destroy_parser.add_argument(
- '--dmcrypt-key-dir',
- metavar='KEYDIR',
- default='/etc/ceph/dmcrypt-keys',
- help=('directory where dm-crypt keys are stored '
- '(If you don\'t know how it work, '
- 'dont use it. we have default value)'),
- )
- destroy_parser.add_argument(
- '--zap',
- action='store_true', default=False,
- help='option to erase data and partition',
- )
- destroy_parser.set_defaults(
- func=main_destroy,
- )
-
-
-def make_zap_parser(subparsers):
- zap_parser = subparsers.add_parser(
- 'zap',
- help='Zap/erase/destroy a device\'s partition table (and contents)')
- zap_parser.add_argument(
- 'dev',
- metavar='DEV',
- nargs='+',
- help='path to block device',
- )
- zap_parser.set_defaults(
- func=main_zap,
- )
- return zap_parser
-
-
-def main(argv):
- args = parse_args(argv)
-
- setup_logging(args.verbose, args.log_stdout)
-
- if args.prepend_to_path != '':
- path = os.environ.get('PATH', os.defpath)
- os.environ['PATH'] = args.prepend_to_path + ":" + path
-
- setup_statedir(args.statedir)
- setup_sysconfdir(args.sysconfdir)
-
- global CEPH_PREF_USER
- CEPH_PREF_USER = args.setuser
- global CEPH_PREF_GROUP
- CEPH_PREF_GROUP = args.setgroup
-
- if args.verbose:
- args.func(args)
- else:
- main_catch(args.func, args)
-
-
-def setup_logging(verbose, log_stdout):
- loglevel = logging.WARNING
- if verbose:
- loglevel = logging.DEBUG
-
- if log_stdout:
- ch = logging.StreamHandler(stream=sys.stdout)
- ch.setLevel(loglevel)
- formatter = logging.Formatter('%(filename)s: %(message)s')
- ch.setFormatter(formatter)
- LOG.addHandler(ch)
- LOG.setLevel(loglevel)
- else:
- logging.basicConfig(
- level=loglevel,
- )
-
-
-def main_catch(func, args):
-
- try:
- func(args)
-
- except Error as e:
- raise SystemExit(
- '{prog}: {msg}'.format(
- prog=args.prog,
- msg=e,
- )
- )
-
- except CephDiskException as error:
- exc_name = error.__class__.__name__
- raise SystemExit(
- '{prog} {exc_name}: {msg}'.format(
- prog=args.prog,
- exc_name=exc_name,
- msg=error,
- )
- )
-
-
-def run():
- main(sys.argv[1:])
-
-if __name__ == '__main__':
- main(sys.argv[1:])
- warned_about = {}
diff --git a/src/ceph/qa/workunits/ceph-disk/ceph-disk-test.py b/src/ceph/qa/workunits/ceph-disk/ceph-disk-test.py
deleted file mode 100644
index 637fa90..0000000
--- a/src/ceph/qa/workunits/ceph-disk/ceph-disk-test.py
+++ /dev/null
@@ -1,777 +0,0 @@
-#
-# Copyright (C) 2015, 2016 Red Hat <contact@redhat.com>
-#
-# Author: Loic Dachary <loic@dachary.org>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU Library Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Library Public License for more details.
-#
-# When debugging these tests (must be root), here are a few useful commands:
-#
-# export PATH=.:..:$PATH
-# ceph-disk.sh # run once to prepare the environment as it would be by teuthology
-# ln -sf /home/ubuntu/ceph/src/ceph-disk/ceph_disk/main.py $(which ceph-disk)
-# ln -sf /home/ubuntu/ceph/udev/95-ceph-osd.rules /lib/udev/rules.d/95-ceph-osd.rules
-# ln -sf /home/ubuntu/ceph/systemd/ceph-disk@.service /usr/lib/systemd/system/ceph-disk@.service
-# ceph-disk.conf will be silently ignored if it is a symbolic link or a hard link /var/log/upstart for logs
-# cp /home/ubuntu/ceph/src/upstart/ceph-disk.conf /etc/init/ceph-disk.conf
-# id=3 ; ceph-disk deactivate --deactivate-by-id $id ; ceph-disk destroy --purge --zap --destroy-by-id $id
-# py.test -s -v -k test_activate_dmcrypt_luks ceph-disk-test.py
-#
-# CentOS 7
-# udevadm monitor --property & tail -f /var/log/messages
-# udev rules messages are logged in /var/log/messages
-# systemctl stop ceph-osd@2
-# systemctl start ceph-osd@2
-#
-# udevadm monitor --property & tail -f /var/log/syslog /var/log/upstart/* # on Ubuntu 14.04
-# udevadm test --action=add /block/vdb/vdb1 # verify the udev rule is run as expected
-# udevadm control --reload # when changing the udev rules
-# sudo /usr/sbin/ceph-disk -v trigger /dev/vdb1 # activates if vdb1 is data
-#
-# integration tests coverage
-# pip install coverage
-# perl -pi -e 's|"ceph-disk |"coverage run --source=/usr/sbin/ceph-disk --append /usr/sbin/ceph-disk |' ceph-disk-test.py
-# rm -f .coverage ; py.test -s -v ceph-disk-test.py
-# coverage report --show-missing
-#
-import argparse
-import json
-import logging
-import configobj
-import os
-import pytest
-import re
-import subprocess
-import sys
-import tempfile
-import time
-import uuid
-
-LOG = logging.getLogger('CephDisk')
-
-
-class CephDisk:
-
- def __init__(self):
- self.conf = configobj.ConfigObj('/etc/ceph/ceph.conf')
-
- def save_conf(self):
- self.conf.write(open('/etc/ceph/ceph.conf', 'wb'))
-
- @staticmethod
- def helper(command):
- command = "ceph-helpers-root.sh " + command
- return CephDisk.sh(command)
-
- @staticmethod
- def sh(command):
- LOG.debug(":sh: " + command)
- proc = subprocess.Popen(
- args=command,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- shell=True,
- bufsize=1)
- output, _ = proc.communicate()
- if proc.poll():
- LOG.warning(output.decode('utf-8'))
- raise subprocess.CalledProcessError(
- returncode=proc.returncode,
- cmd=command,
- output=output,
- )
- lines = []
- for line in output.decode('utf-8').split('\n'):
- if 'dangerous and experimental' in line:
- LOG.debug('SKIP dangerous and experimental')
- continue
- lines.append(line)
- LOG.debug(line.strip().encode('ascii', 'ignore'))
- return "".join(lines)
-
- def unused_disks(self, pattern='[vs]d.'):
- names = [x for x in os.listdir("/sys/block") if re.match(pattern, x)]
- if not names:
- return []
- disks = json.loads(
- self.sh("ceph-disk list --format json " + " ".join(names)))
- unused = []
- for disk in disks:
- if 'partitions' not in disk:
- unused.append(disk['path'])
- return unused
-
- def ensure_sd(self):
- LOG.debug(self.unused_disks('sd.'))
- if self.unused_disks('sd.'):
- return
- modprobe = "modprobe scsi_debug vpd_use_hostno=0 add_host=1 dev_size_mb=300 ; udevadm settle"
- try:
- self.sh(modprobe)
- except:
- self.helper("install linux-image-extra-3.13.0-61-generic")
- self.sh(modprobe)
-
- def unload_scsi_debug(self):
- self.sh("rmmod scsi_debug || true")
-
- def get_lockbox(self):
- disks = json.loads(self.sh("ceph-disk list --format json"))
- for disk in disks:
- if 'partitions' in disk:
- for partition in disk['partitions']:
- if partition.get('type') == 'lockbox':
- return partition
- raise Exception("no lockbox found " + str(disks))
-
- def get_osd_partition(self, uuid):
- disks = json.loads(self.sh("ceph-disk list --format json"))
- for disk in disks:
- if 'partitions' in disk:
- for partition in disk['partitions']:
- if partition.get('uuid') == uuid:
- return partition
- raise Exception("uuid = " + uuid + " not found in " + str(disks))
-
- def get_journal_partition(self, uuid):
- return self.get_space_partition('journal', uuid)
-
- def get_block_partition(self, uuid):
- return self.get_space_partition('block', uuid)
-
- def get_blockdb_partition(self, uuid):
- return self.get_space_partition('block.db', uuid)
-
- def get_blockwal_partition(self, uuid):
- return self.get_space_partition('block.wal', uuid)
-
- def get_space_partition(self, name, uuid):
- data_partition = self.get_osd_partition(uuid)
- space_dev = data_partition[name + '_dev']
- disks = json.loads(self.sh("ceph-disk list --format json"))
- for disk in disks:
- if 'partitions' in disk:
- for partition in disk['partitions']:
- if partition['path'] == space_dev:
- if name + '_for' in partition:
- assert partition[
- name + '_for'] == data_partition['path']
- return partition
- raise Exception(
- name + " for uuid = " + uuid + " not found in " + str(disks))
-
- def destroy_osd(self, uuid):
- id = self.sh("ceph osd create " + uuid).strip()
- self.sh("""
- set -xe
- ceph-disk --verbose deactivate --deactivate-by-id {id}
- ceph-disk --verbose destroy --purge --destroy-by-id {id} --zap
- """.format(id=id))
-
- def deactivate_osd(self, uuid):
- id = self.sh("ceph osd create " + uuid).strip()
- self.sh("""
- set -xe
- ceph-disk --verbose deactivate --once --deactivate-by-id {id}
- """.format(id=id))
-
- @staticmethod
- def osd_up_predicate(osds, uuid):
- for osd in osds:
- if osd['uuid'] == uuid and 'up' in osd['state']:
- return True
- return False
-
- @staticmethod
- def wait_for_osd_up(uuid):
- CephDisk.wait_for_osd(uuid, CephDisk.osd_up_predicate, 'up')
-
- @staticmethod
- def osd_down_predicate(osds, uuid):
- found = False
- for osd in osds:
- if osd['uuid'] == uuid:
- found = True
- if 'down' in osd['state'] or ['exists'] == osd['state']:
- return True
- return not found
-
- @staticmethod
- def wait_for_osd_down(uuid):
- CephDisk.wait_for_osd(uuid, CephDisk.osd_down_predicate, 'down')
-
- @staticmethod
- def wait_for_osd(uuid, predicate, info):
- LOG.info("wait_for_osd " + info + " " + uuid)
- for delay in (1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024):
- dump = json.loads(CephDisk.sh("ceph osd dump -f json"))
- if predicate(dump['osds'], uuid):
- return True
- time.sleep(delay)
- raise Exception('timeout waiting for osd ' + uuid + ' to be ' + info)
-
- def check_osd_status(self, uuid, space_name=None):
- data_partition = self.get_osd_partition(uuid)
- assert data_partition['type'] == 'data'
- assert data_partition['state'] == 'active'
- if space_name is not None:
- space_partition = self.get_space_partition(space_name, uuid)
- assert space_partition
-
-
-class TestCephDisk(object):
-
- def setup_class(self):
- logging.basicConfig(level=logging.DEBUG)
- c = CephDisk()
- if c.sh("lsb_release -si").strip() == 'CentOS':
- c.helper("install multipath-tools device-mapper-multipath")
- c.conf['global']['pid file'] = '/var/run/ceph/$cluster-$name.pid'
- #
- # Avoid json parsing interference
- #
- c.conf['global']['debug monc'] = 0
- #
- # objecstore
- #
- c.conf['global']['osd journal size'] = 100
- #
- # bluestore
- #
- c.conf['global']['bluestore fsck on mount'] = 'true'
- c.save_conf()
-
- def setup(self):
- c = CephDisk()
- for key in ('osd objectstore', 'osd dmcrypt type'):
- if key in c.conf['global']:
- del c.conf['global'][key]
- c.save_conf()
-
- def test_deactivate_reactivate_osd(self):
- c = CephDisk()
- disk = c.unused_disks()[0]
- osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk --verbose zap " + disk)
- c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
- " " + disk)
- c.wait_for_osd_up(osd_uuid)
- device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
- assert len(device['partitions']) == 2
- c.check_osd_status(osd_uuid, 'journal')
- data_partition = c.get_osd_partition(osd_uuid)
- c.sh("ceph-disk --verbose deactivate " + data_partition['path'])
- c.wait_for_osd_down(osd_uuid)
- c.sh("ceph-disk --verbose activate " + data_partition['path'] + " --reactivate")
- # check again
- c.wait_for_osd_up(osd_uuid)
- device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
- assert len(device['partitions']) == 2
- c.check_osd_status(osd_uuid, 'journal')
- c.helper("pool_read_write")
- c.destroy_osd(osd_uuid)
-
- def test_destroy_osd_by_id(self):
- c = CephDisk()
- disk = c.unused_disks()[0]
- osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid + " " + disk)
- c.wait_for_osd_up(osd_uuid)
- c.check_osd_status(osd_uuid)
- c.destroy_osd(osd_uuid)
-
- def test_destroy_osd_by_dev_path(self):
- c = CephDisk()
- disk = c.unused_disks()[0]
- osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid + " " + disk)
- c.wait_for_osd_up(osd_uuid)
- partition = c.get_osd_partition(osd_uuid)
- assert partition['type'] == 'data'
- assert partition['state'] == 'active'
- c.sh("ceph-disk --verbose deactivate " + partition['path'])
- c.wait_for_osd_down(osd_uuid)
- c.sh("ceph-disk --verbose destroy --purge " + partition['path'] + " --zap")
-
- def test_deactivate_reactivate_dmcrypt_plain(self):
- c = CephDisk()
- c.conf['global']['osd dmcrypt type'] = 'plain'
- c.save_conf()
- osd_uuid = self.activate_dmcrypt('ceph-disk-no-lockbox')
- data_partition = c.get_osd_partition(osd_uuid)
- c.sh("ceph-disk --verbose deactivate " + data_partition['path'])
- c.wait_for_osd_down(osd_uuid)
- c.sh("ceph-disk --verbose activate-journal " + data_partition['journal_dev'] +
- " --reactivate" + " --dmcrypt")
- c.wait_for_osd_up(osd_uuid)
- c.check_osd_status(osd_uuid, 'journal')
- c.destroy_osd(osd_uuid)
- c.save_conf()
-
- def test_deactivate_reactivate_dmcrypt_luks(self):
- c = CephDisk()
- osd_uuid = self.activate_dmcrypt('ceph-disk')
- data_partition = c.get_osd_partition(osd_uuid)
- lockbox_partition = c.get_lockbox()
- c.sh("ceph-disk --verbose deactivate " + data_partition['path'])
- c.wait_for_osd_down(osd_uuid)
- c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path'])
- c.sh("ceph-disk --verbose activate-journal " + data_partition['journal_dev'] +
- " --reactivate" + " --dmcrypt")
- c.wait_for_osd_up(osd_uuid)
- c.check_osd_status(osd_uuid, 'journal')
- c.destroy_osd(osd_uuid)
-
- def test_activate_dmcrypt_plain_no_lockbox(self):
- c = CephDisk()
- c.conf['global']['osd dmcrypt type'] = 'plain'
- c.save_conf()
- osd_uuid = self.activate_dmcrypt('ceph-disk-no-lockbox')
- c.destroy_osd(osd_uuid)
- c.save_conf()
-
- def test_activate_dmcrypt_luks_no_lockbox(self):
- c = CephDisk()
- osd_uuid = self.activate_dmcrypt('ceph-disk-no-lockbox')
- c.destroy_osd(osd_uuid)
-
- def test_activate_dmcrypt_luks_with_lockbox(self):
- c = CephDisk()
- osd_uuid = self.activate_dmcrypt('ceph-disk')
- c.destroy_osd(osd_uuid)
-
- def test_activate_lockbox(self):
- c = CephDisk()
- osd_uuid = self.activate_dmcrypt('ceph-disk')
- lockbox = c.get_lockbox()
- assert lockbox['state'] == 'active'
- c.sh("umount " + lockbox['path'])
- lockbox = c.get_lockbox()
- assert lockbox['state'] == 'prepared'
- c.sh("ceph-disk --verbose trigger " + lockbox['path'])
- success = False
- for delay in (1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024):
- lockbox = c.get_lockbox()
- if lockbox['state'] == 'active':
- success = True
- break
- time.sleep(delay)
- if not success:
- raise Exception('timeout waiting for lockbox ' + lockbox['path'])
- c.destroy_osd(osd_uuid)
-
- def activate_dmcrypt(self, ceph_disk):
- c = CephDisk()
- disk = c.unused_disks()[0]
- osd_uuid = str(uuid.uuid1())
- journal_uuid = str(uuid.uuid1())
- c.sh("ceph-disk --verbose zap " + disk)
- c.sh(ceph_disk + " --verbose prepare --filestore " +
- " --osd-uuid " + osd_uuid +
- " --journal-uuid " + journal_uuid +
- " --dmcrypt " +
- " " + disk)
- c.wait_for_osd_up(osd_uuid)
- c.check_osd_status(osd_uuid, 'journal')
- return osd_uuid
-
- def test_trigger_dmcrypt_journal_lockbox(self):
- c = CephDisk()
- osd_uuid = self.activate_dmcrypt('ceph-disk')
- data_partition = c.get_osd_partition(osd_uuid)
- lockbox_partition = c.get_lockbox()
- c.deactivate_osd(osd_uuid)
- c.wait_for_osd_down(osd_uuid)
- with pytest.raises(subprocess.CalledProcessError):
- # fails because the lockbox is not mounted yet
- c.sh("ceph-disk --verbose trigger --sync " + data_partition['journal_dev'])
- c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path'])
- c.wait_for_osd_up(osd_uuid)
- c.destroy_osd(osd_uuid)
-
- def test_trigger_dmcrypt_data_lockbox(self):
- c = CephDisk()
- osd_uuid = self.activate_dmcrypt('ceph-disk')
- data_partition = c.get_osd_partition(osd_uuid)
- lockbox_partition = c.get_lockbox()
- c.deactivate_osd(osd_uuid)
- c.wait_for_osd_down(osd_uuid)
- with pytest.raises(subprocess.CalledProcessError):
- # fails because the lockbox is not mounted yet
- c.sh("ceph-disk --verbose trigger --sync " + data_partition['path'])
- c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path'])
- c.wait_for_osd_up(osd_uuid)
- c.destroy_osd(osd_uuid)
-
- def test_trigger_dmcrypt_lockbox(self):
- c = CephDisk()
- osd_uuid = self.activate_dmcrypt('ceph-disk')
- data_partition = c.get_osd_partition(osd_uuid)
- lockbox_partition = c.get_lockbox()
- c.deactivate_osd(osd_uuid)
- c.wait_for_osd_down(osd_uuid)
- c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path'])
- c.wait_for_osd_up(osd_uuid)
- c.destroy_osd(osd_uuid)
-
- def test_activate_no_journal(self):
- c = CephDisk()
- disk = c.unused_disks()[0]
- osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk --verbose zap " + disk)
- c.conf['global']['osd objectstore'] = 'memstore'
- c.save_conf()
- c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
- " " + disk)
- c.wait_for_osd_up(osd_uuid)
- device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
- assert len(device['partitions']) == 1
- partition = device['partitions'][0]
- assert partition['type'] == 'data'
- assert partition['state'] == 'active'
- assert 'journal_dev' not in partition
- c.helper("pool_read_write")
- c.destroy_osd(osd_uuid)
- c.save_conf()
-
- def test_activate_with_journal_dev_no_symlink(self):
- c = CephDisk()
- disk = c.unused_disks()[0]
- osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk --verbose zap " + disk)
- c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
- " " + disk)
- c.wait_for_osd_up(osd_uuid)
- device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
- assert len(device['partitions']) == 2
- c.check_osd_status(osd_uuid, 'journal')
- c.helper("pool_read_write")
- c.destroy_osd(osd_uuid)
-
- def test_activate_bluestore(self):
- c = CephDisk()
- disk = c.unused_disks()[0]
- osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk --verbose zap " + disk)
- c.conf['global']['osd objectstore'] = 'bluestore'
- c.save_conf()
- c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + osd_uuid +
- " " + disk)
- c.wait_for_osd_up(osd_uuid)
- device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
- assert len(device['partitions']) == 2
- c.check_osd_status(osd_uuid, 'block')
- c.helper("pool_read_write")
- c.destroy_osd(osd_uuid)
- c.sh("ceph-disk --verbose zap " + disk)
-
- def test_activate_bluestore_seperated_block_db_wal(self):
- c = CephDisk()
- disk1 = c.unused_disks()[0]
- disk2 = c.unused_disks()[1]
- osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk --verbose zap " + disk1 + " " + disk2)
- c.conf['global']['osd objectstore'] = 'bluestore'
- c.save_conf()
- c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + osd_uuid +
- " " + disk1 + " --block.db " + disk2 + " --block.wal " + disk2)
- c.wait_for_osd_up(osd_uuid)
- device = json.loads(c.sh("ceph-disk list --format json " + disk1))[0]
- assert len(device['partitions']) == 2
- device = json.loads(c.sh("ceph-disk list --format json " + disk2))[0]
- assert len(device['partitions']) == 2
- c.check_osd_status(osd_uuid, 'block')
- c.check_osd_status(osd_uuid, 'block.wal')
- c.check_osd_status(osd_uuid, 'block.db')
- c.helper("pool_read_write")
- c.destroy_osd(osd_uuid)
- c.sh("ceph-disk --verbose zap " + disk1 + " " + disk2)
-
- def test_activate_bluestore_reuse_db_wal_partition(self):
- c = CephDisk()
- disks = c.unused_disks()
- block_disk = disks[0]
- db_wal_disk = disks[1]
- #
- # Create an OSD with two disks (one for block,
- # the other for block.db and block.wal ) and then destroy osd.
- #
- osd_uuid1 = str(uuid.uuid1())
- c.sh("ceph-disk --verbose zap " + block_disk + " " + db_wal_disk)
- c.conf['global']['osd objectstore'] = 'bluestore'
- c.save_conf()
- c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " +
- osd_uuid1 + " " + block_disk + " --block.db " + db_wal_disk +
- " --block.wal " + db_wal_disk)
- c.wait_for_osd_up(osd_uuid1)
- blockdb_partition = c.get_blockdb_partition(osd_uuid1)
- blockdb_path = blockdb_partition['path']
- blockwal_partition = c.get_blockwal_partition(osd_uuid1)
- blockwal_path = blockwal_partition['path']
- c.destroy_osd(osd_uuid1)
- c.sh("ceph-disk --verbose zap " + block_disk)
- #
- # Create another OSD with the block.db and block.wal partition
- # of the previous OSD
- #
- osd_uuid2 = str(uuid.uuid1())
- c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " +
- osd_uuid2 + " " + block_disk + " --block.db " + blockdb_path +
- " --block.wal " + blockwal_path)
- c.wait_for_osd_up(osd_uuid2)
- device = json.loads(c.sh("ceph-disk list --format json " + block_disk))[0]
- assert len(device['partitions']) == 2
- device = json.loads(c.sh("ceph-disk list --format json " + db_wal_disk))[0]
- assert len(device['partitions']) == 2
- c.check_osd_status(osd_uuid2, 'block')
- c.check_osd_status(osd_uuid2, 'block.wal')
- c.check_osd_status(osd_uuid2, 'block.db')
- blockdb_partition = c.get_blockdb_partition(osd_uuid2)
- blockwal_partition = c.get_blockwal_partition(osd_uuid2)
- #
- # Verify the previous OSD partition has been reused
- #
- assert blockdb_partition['path'] == blockdb_path
- assert blockwal_partition['path'] == blockwal_path
- c.destroy_osd(osd_uuid2)
- c.sh("ceph-disk --verbose zap " + block_disk + " " + db_wal_disk)
-
- def test_activate_with_journal_dev_is_symlink(self):
- c = CephDisk()
- disk = c.unused_disks()[0]
- osd_uuid = str(uuid.uuid1())
- tempdir = tempfile.mkdtemp()
- symlink = os.path.join(tempdir, 'osd')
- os.symlink(disk, symlink)
- c.sh("ceph-disk --verbose zap " + symlink)
- c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
- " " + symlink)
- c.wait_for_osd_up(osd_uuid)
- device = json.loads(c.sh("ceph-disk list --format json " + symlink))[0]
- assert len(device['partitions']) == 2
- data_partition = c.get_osd_partition(osd_uuid)
- assert data_partition['type'] == 'data'
- assert data_partition['state'] == 'active'
- journal_partition = c.get_journal_partition(osd_uuid)
- assert journal_partition
- c.helper("pool_read_write")
- c.destroy_osd(osd_uuid)
- c.sh("ceph-disk --verbose zap " + symlink)
- os.unlink(symlink)
- os.rmdir(tempdir)
-
- def test_activate_journal_file(self):
- c = CephDisk()
- disks = c.unused_disks()
- data_disk = disks[0]
- #
- # /var/lib/ceph/osd is required otherwise it may violate
- # restrictions enforced by systemd regarding the directories
- # which ceph-osd is allowed to read/write
- #
- tempdir = tempfile.mkdtemp(dir='/var/lib/ceph/osd')
- c.sh("chown ceph:ceph " + tempdir + " || true")
- journal_file = os.path.join(tempdir, 'journal')
- osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
- " " + data_disk + " " + journal_file)
- c.wait_for_osd_up(osd_uuid)
- device = json.loads(
- c.sh("ceph-disk list --format json " + data_disk))[0]
- assert len(device['partitions']) == 1
- partition = device['partitions'][0]
- assert journal_file == os.readlink(
- os.path.join(partition['mount'], 'journal'))
- c.check_osd_status(osd_uuid)
- c.helper("pool_read_write 1") # 1 == pool size
- c.destroy_osd(osd_uuid)
- c.sh("ceph-disk --verbose zap " + data_disk)
- os.unlink(journal_file)
- os.rmdir(tempdir)
-
- def test_activate_separated_journal(self):
- c = CephDisk()
- disks = c.unused_disks()
- data_disk = disks[0]
- journal_disk = disks[1]
- osd_uuid = self.activate_separated_journal(data_disk, journal_disk)
- c.helper("pool_read_write 1") # 1 == pool size
- c.destroy_osd(osd_uuid)
- c.sh("ceph-disk --verbose zap " + data_disk + " " + journal_disk)
-
- def test_activate_separated_journal_dev_is_symlink(self):
- c = CephDisk()
- disks = c.unused_disks()
- data_disk = disks[0]
- journal_disk = disks[1]
- tempdir = tempfile.mkdtemp()
- data_symlink = os.path.join(tempdir, 'osd')
- os.symlink(data_disk, data_symlink)
- journal_symlink = os.path.join(tempdir, 'journal')
- os.symlink(journal_disk, journal_symlink)
- osd_uuid = self.activate_separated_journal(
- data_symlink, journal_symlink)
- c.helper("pool_read_write 1") # 1 == pool size
- c.destroy_osd(osd_uuid)
- c.sh("ceph-disk --verbose zap " + data_symlink + " " + journal_symlink)
- os.unlink(data_symlink)
- os.unlink(journal_symlink)
- os.rmdir(tempdir)
-
- def activate_separated_journal(self, data_disk, journal_disk):
- c = CephDisk()
- osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
- " " + data_disk + " " + journal_disk)
- c.wait_for_osd_up(osd_uuid)
- device = json.loads(
- c.sh("ceph-disk list --format json " + data_disk))[0]
- assert len(device['partitions']) == 1
- c.check_osd_status(osd_uuid, 'journal')
- return osd_uuid
-
- #
- # Create an OSD and get a journal partition from a disk that
- # already contains a journal partition which is in use. Updates of
- # the kernel partition table may behave differently when a
- # partition is in use. See http://tracker.ceph.com/issues/7334 for
- # more information.
- #
- def test_activate_two_separated_journal(self):
- c = CephDisk()
- disks = c.unused_disks()
- data_disk = disks[0]
- other_data_disk = disks[1]
- journal_disk = disks[2]
- osd_uuid = self.activate_separated_journal(data_disk, journal_disk)
- other_osd_uuid = self.activate_separated_journal(
- other_data_disk, journal_disk)
- #
- # read/write can only succeed if the two osds are up because
- # the pool needs two OSD
- #
- c.helper("pool_read_write 2") # 2 == pool size
- c.destroy_osd(osd_uuid)
- c.destroy_osd(other_osd_uuid)
- c.sh("ceph-disk --verbose zap " + data_disk + " " +
- journal_disk + " " + other_data_disk)
-
- #
- # Create an OSD and reuse an existing journal partition
- #
- def test_activate_reuse_journal(self):
- c = CephDisk()
- disks = c.unused_disks()
- data_disk = disks[0]
- journal_disk = disks[1]
- #
- # Create an OSD with a separated journal and destroy it.
- #
- osd_uuid = self.activate_separated_journal(data_disk, journal_disk)
- journal_partition = c.get_journal_partition(osd_uuid)
- journal_path = journal_partition['path']
- c.destroy_osd(osd_uuid)
- c.sh("ceph-disk --verbose zap " + data_disk)
- osd_uuid = str(uuid.uuid1())
- #
- # Create another OSD with the journal partition of the previous OSD
- #
- c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
- " " + data_disk + " " + journal_path)
- c.helper("pool_read_write 1") # 1 == pool size
- c.wait_for_osd_up(osd_uuid)
- device = json.loads(
- c.sh("ceph-disk list --format json " + data_disk))[0]
- assert len(device['partitions']) == 1
- c.check_osd_status(osd_uuid)
- journal_partition = c.get_journal_partition(osd_uuid)
- #
- # Verify the previous OSD partition has been reused
- #
- assert journal_partition['path'] == journal_path
- c.destroy_osd(osd_uuid)
- c.sh("ceph-disk --verbose zap " + data_disk + " " + journal_disk)
-
- def test_activate_multipath(self):
- c = CephDisk()
- if c.sh("lsb_release -si").strip() != 'CentOS':
- pytest.skip(
- "see issue https://bugs.launchpad.net/ubuntu/+source/multipath-tools/+bug/1488688")
- c.ensure_sd()
- #
- # Figure out the name of the multipath device
- #
- disk = c.unused_disks('sd.')[0]
- c.sh("mpathconf --enable || true")
- c.sh("multipath " + disk)
- holders = os.listdir(
- "/sys/block/" + os.path.basename(disk) + "/holders")
- assert 1 == len(holders)
- name = open("/sys/block/" + holders[0] + "/dm/name").read()
- multipath = "/dev/mapper/" + name
- #
- # Prepare the multipath device
- #
- osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk --verbose zap " + multipath)
- c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
- " " + multipath)
- c.wait_for_osd_up(osd_uuid)
- device = json.loads(
- c.sh("ceph-disk list --format json " + multipath))[0]
- assert len(device['partitions']) == 2
- data_partition = c.get_osd_partition(osd_uuid)
- assert data_partition['type'] == 'data'
- assert data_partition['state'] == 'active'
- journal_partition = c.get_journal_partition(osd_uuid)
- assert journal_partition
- c.helper("pool_read_write")
- c.destroy_osd(osd_uuid)
- c.sh("udevadm settle")
- c.sh("multipath -F")
- c.unload_scsi_debug()
-
-
-class CephDiskTest(CephDisk):
-
- def main(self, argv):
- parser = argparse.ArgumentParser(
- 'ceph-disk-test',
- )
- parser.add_argument(
- '-v', '--verbose',
- action='store_true', default=None,
- help='be more verbose',
- )
- parser.add_argument(
- '--destroy-osd',
- help='stop, umount and destroy',
- )
- args = parser.parse_args(argv)
-
- if args.verbose:
- logging.basicConfig(level=logging.DEBUG)
-
- if args.destroy_osd:
- dump = json.loads(CephDisk.sh("ceph osd dump -f json"))
- osd_uuid = None
- for osd in dump['osds']:
- if str(osd['osd']) == args.destroy_osd:
- osd_uuid = osd['uuid']
- if osd_uuid:
- self.destroy_osd(osd_uuid)
- else:
- raise Exception("cannot find OSD " + args.destroy_osd +
- " ceph osd dump -f json")
- return
-
-if __name__ == '__main__':
- sys.exit(CephDiskTest().main(sys.argv[1:]))
diff --git a/src/ceph/qa/workunits/ceph-disk/ceph-disk.sh b/src/ceph/qa/workunits/ceph-disk/ceph-disk.sh
deleted file mode 100755
index 7102efb..0000000
--- a/src/ceph/qa/workunits/ceph-disk/ceph-disk.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-if [ -f $(dirname $0)/../ceph-helpers-root.sh ]; then
- source $(dirname $0)/../ceph-helpers-root.sh
-else
- echo "$(dirname $0)/../ceph-helpers-root.sh does not exist."
- exit 1
-fi
-
-install python-pytest || true
-install pytest || true
-
-# complete the cluster setup done by the teuthology ceph task
-sudo chown $(id -u) /etc/ceph/ceph.conf
-if ! test -f /etc/ceph/ceph.client.admin.keyring ; then
- sudo cp /etc/ceph/ceph.keyring /etc/ceph/ceph.client.admin.keyring
-fi
-if ! sudo test -f /var/lib/ceph/bootstrap-osd/ceph.keyring ; then
- sudo ceph-create-keys --id a
-fi
-sudo ceph osd crush rm osd.0 || true
-sudo ceph osd crush rm osd.1 || true
-
-sudo cp $(dirname $0)/60-ceph-by-partuuid.rules /lib/udev/rules.d
-sudo udevadm control --reload
-
-perl -pi -e 's|pid file.*|pid file = /var/run/ceph/\$cluster-\$name.pid|' /etc/ceph/ceph.conf
-
-PATH=$(dirname $0):$(dirname $0)/..:$PATH
-
-: ${PYTHON:=python}
-PY_VERSION=$($PYTHON --version 2>&1)
-
-if ! ${PYTHON} -m pytest --version > /dev/null 2>&1; then
- echo "py.test not installed for ${PY_VERSION}"
- exit 1
-fi
-
-sudo env PATH=$(dirname $0):$(dirname $0)/..:$PATH PYTHONWARNINGS=ignore ${PYTHON} -m pytest -s -v $(dirname $0)/ceph-disk-test.py
-result=$?
-
-sudo rm -f /lib/udev/rules.d/60-ceph-by-partuuid.rules
-# own whatever was created as a side effect of the py.test run
-# so that it can successfully be removed later on by a non privileged
-# process
-sudo chown -R $(id -u) $(dirname $0)
-exit $result
diff --git a/src/ceph/qa/workunits/ceph-helpers-root.sh b/src/ceph/qa/workunits/ceph-helpers-root.sh
deleted file mode 100755
index f65f591..0000000
--- a/src/ceph/qa/workunits/ceph-helpers-root.sh
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 Red Hat <contact@redhat.com>
-#
-# Author: Loic Dachary <loic@dachary.org>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU Library Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Library Public License for more details.
-#
-
-#######################################################################
-
-function install() {
- for package in "$@" ; do
- install_one $package
- done
- return 0
-}
-
-function install_one() {
- case $(lsb_release -si) in
- Ubuntu|Debian|Devuan)
- sudo apt-get install -y "$@"
- ;;
- CentOS|Fedora|RedHatEnterpriseServer)
- sudo yum install -y "$@"
- ;;
- *SUSE*)
- sudo zypper --non-interactive install "$@"
- ;;
- *)
- echo "$(lsb_release -si) is unknown, $@ will have to be installed manually."
- ;;
- esac
-}
-
-#######################################################################
-
-function control_osd() {
- local action=$1
- local id=$2
-
- local init=$(ceph-detect-init)
-
- case $init in
- upstart)
- sudo service ceph-osd $action id=$id
- ;;
- systemd)
- sudo systemctl $action ceph-osd@$id
- ;;
- *)
- echo ceph-detect-init returned an unknown init system: $init >&2
- return 1
- ;;
- esac
- return 0
-}
-
-#######################################################################
-
-function pool_read_write() {
- local size=${1:-1}
- local dir=/tmp
- local timeout=360
- local test_pool=test_pool
-
- ceph osd pool delete $test_pool $test_pool --yes-i-really-really-mean-it || return 1
- ceph osd pool create $test_pool 4 || return 1
- ceph osd pool set $test_pool size $size || return 1
- ceph osd pool set $test_pool min_size $size || return 1
- ceph osd pool application enable $test_pool rados
-
- echo FOO > $dir/BAR
- timeout $timeout rados --pool $test_pool put BAR $dir/BAR || return 1
- timeout $timeout rados --pool $test_pool get BAR $dir/BAR.copy || return 1
- diff $dir/BAR $dir/BAR.copy || return 1
- ceph osd pool delete $test_pool $test_pool --yes-i-really-really-mean-it || return 1
-}
-
-#######################################################################
-
-set -x
-
-"$@"
diff --git a/src/ceph/qa/workunits/ceph-tests/ceph-admin-commands.sh b/src/ceph/qa/workunits/ceph-tests/ceph-admin-commands.sh
deleted file mode 100755
index 4d850c3..0000000
--- a/src/ceph/qa/workunits/ceph-tests/ceph-admin-commands.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh -e
-
-#check ceph health
-ceph -s
-#list pools
-rados lspools
-#lisr rbd images
-ceph osd pool create rbd 128 128
-rbd ls
-#check that the monitors work
-ceph osd set nodown
-ceph osd unset nodown
-
-exit 0
diff --git a/src/ceph/qa/workunits/cephtool/test.sh b/src/ceph/qa/workunits/cephtool/test.sh
deleted file mode 100755
index 1534417..0000000
--- a/src/ceph/qa/workunits/cephtool/test.sh
+++ /dev/null
@@ -1,2621 +0,0 @@
-#!/bin/bash -x
-# -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
-# vim: ts=8 sw=8 ft=bash smarttab
-
-source $(dirname $0)/../../standalone/ceph-helpers.sh
-
-set -e
-set -o functrace
-PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
-SUDO=${SUDO:-sudo}
-export CEPH_DEV=1
-
-function get_admin_socket()
-{
- local client=$1
-
- if test -n "$CEPH_ASOK_DIR";
- then
- echo $(get_asok_dir)/$client.asok
- else
- local cluster=$(echo $CEPH_ARGS | sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
- echo "/var/run/ceph/$cluster-$client.asok"
- fi
-}
-
-function check_no_osd_down()
-{
- ! ceph osd dump | grep ' down '
-}
-
-function wait_no_osd_down()
-{
- max_run=300
- for i in $(seq 1 $max_run) ; do
- if ! check_no_osd_down ; then
- echo "waiting for osd(s) to come back up ($i/$max_run)"
- sleep 1
- else
- break
- fi
- done
- check_no_osd_down
-}
-
-function expect_false()
-{
- set -x
- if "$@"; then return 1; else return 0; fi
-}
-
-
-TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
-trap "rm -fr $TEMP_DIR" 0
-
-TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
-
-#
-# retry_eagain max cmd args ...
-#
-# retry cmd args ... if it exits on error and its output contains the
-# string EAGAIN, at most $max times
-#
-function retry_eagain()
-{
- local max=$1
- shift
- local status
- local tmpfile=$TEMP_DIR/retry_eagain.$$
- local count
- for count in $(seq 1 $max) ; do
- status=0
- "$@" > $tmpfile 2>&1 || status=$?
- if test $status = 0 ||
- ! grep --quiet EAGAIN $tmpfile ; then
- break
- fi
- sleep 1
- done
- if test $count = $max ; then
- echo retried with non zero exit status, $max times: "$@" >&2
- fi
- cat $tmpfile
- rm $tmpfile
- return $status
-}
-
-#
-# map_enxio_to_eagain cmd arg ...
-#
-# add EAGAIN to the output of cmd arg ... if the output contains
-# ENXIO.
-#
-function map_enxio_to_eagain()
-{
- local status=0
- local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
-
- "$@" > $tmpfile 2>&1 || status=$?
- if test $status != 0 &&
- grep --quiet ENXIO $tmpfile ; then
- echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
- fi
- cat $tmpfile
- rm $tmpfile
- return $status
-}
-
-function check_response()
-{
- expected_string=$1
- retcode=$2
- expected_retcode=$3
- if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
- echo "return code invalid: got $retcode, expected $expected_retcode" >&2
- exit 1
- fi
-
- if ! grep --quiet -- "$expected_string" $TMPFILE ; then
- echo "Didn't find $expected_string in output" >&2
- cat $TMPFILE >&2
- exit 1
- fi
-}
-
-function get_config_value_or_die()
-{
- local target config_opt raw val
-
- target=$1
- config_opt=$2
-
- raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
- if [[ $? -ne 0 ]]; then
- echo "error obtaining config opt '$config_opt' from '$target': $raw"
- exit 1
- fi
-
- raw=`echo $raw | sed -e 's/[{} "]//g'`
- val=`echo $raw | cut -f2 -d:`
-
- echo "$val"
- return 0
-}
-
-function expect_config_value()
-{
- local target config_opt expected_val val
- target=$1
- config_opt=$2
- expected_val=$3
-
- val=$(get_config_value_or_die $target $config_opt)
-
- if [[ "$val" != "$expected_val" ]]; then
- echo "expected '$expected_val', got '$val'"
- exit 1
- fi
-}
-
-function ceph_watch_start()
-{
- local whatch_opt=--watch
-
- if [ -n "$1" ]; then
- whatch_opt=--watch-$1
- if [ -n "$2" ]; then
- whatch_opt+=" --watch-channel $2"
- fi
- fi
-
- CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
- ceph $whatch_opt > $CEPH_WATCH_FILE &
- CEPH_WATCH_PID=$!
-
- # wait until the "ceph" client is connected and receiving
- # log messages from monitor
- for i in `seq 3`; do
- grep -q "cluster" $CEPH_WATCH_FILE && break
- sleep 1
- done
-}
-
-function ceph_watch_wait()
-{
- local regexp=$1
- local timeout=30
-
- if [ -n "$2" ]; then
- timeout=$2
- fi
-
- for i in `seq ${timeout}`; do
- grep -q "$regexp" $CEPH_WATCH_FILE && break
- sleep 1
- done
-
- kill $CEPH_WATCH_PID
-
- if ! grep "$regexp" $CEPH_WATCH_FILE; then
- echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
- cat $CEPH_WATCH_FILE >&2
- return 1
- fi
-}
-
-function test_mon_injectargs()
-{
- CEPH_ARGS='--mon_debug_dump_location the.dump' ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
- check_response "osd_enable_op_tracker = 'false'"
- ! grep "the.dump" $TMPFILE || return 1
- ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500' >& $TMPFILE || return 1
- check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '500'"
- ceph tell osd.0 injectargs --no-osd_enable_op_tracker >& $TMPFILE || return 1
- check_response "osd_enable_op_tracker = 'false'"
- ceph tell osd.0 injectargs -- --osd_enable_op_tracker >& $TMPFILE || return 1
- check_response "osd_enable_op_tracker = 'true'"
- ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600' >& $TMPFILE || return 1
- check_response "osd_enable_op_tracker = 'true' osd_op_history_duration = '600'"
- expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
- ceph tell osd.0 injectargs -- '--osd_op_history_duration'
-
- ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200' >& $TMPFILE || return 1
- check_response "osd_deep_scrub_interval = '2419200.000000' (not observed, change may require restart)"
-
- ceph tell osd.0 injectargs -- '--mon_probe_timeout 2' >& $TMPFILE || return 1
- check_response "mon_probe_timeout = '2.000000' (not observed, change may require restart)"
-
- ceph tell osd.0 injectargs -- '--mon-lease 6' >& $TMPFILE || return 1
- check_response "mon_lease = '6.000000' (not observed, change may require restart)"
-
- # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
- expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 >& $TMPFILE || return 1
- check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
-}
-
-function test_mon_injectargs_SI()
-{
- # Test SI units during injectargs and 'config set'
- # We only aim at testing the units are parsed accordingly
- # and don't intend to test whether the options being set
- # actually expect SI units to be passed.
- # Keep in mind that all integer based options (i.e., INT,
- # LONG, U32, U64) will accept SI unit modifiers.
- initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
- $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
- expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
- $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
- expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
- $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
- expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
- $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
- check_response "'10F': (22) Invalid argument"
- # now test with injectargs
- ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
- expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
- ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
- expect_config_value "mon.a" "mon_pg_warn_min_objects" 10240
- ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
- expect_config_value "mon.a" "mon_pg_warn_min_objects" 1073741824
- expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
- expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
- $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
-}
-
-function test_tiering_agent()
-{
- local slow=slow_eviction
- local fast=fast_eviction
- ceph osd pool create $slow 1 1
- ceph osd pool application enable $slow rados
- ceph osd pool create $fast 1 1
- ceph osd tier add $slow $fast
- ceph osd tier cache-mode $fast writeback
- ceph osd tier set-overlay $slow $fast
- ceph osd pool set $fast hit_set_type bloom
- rados -p $slow put obj1 /etc/group
- ceph osd pool set $fast target_max_objects 1
- ceph osd pool set $fast hit_set_count 1
- ceph osd pool set $fast hit_set_period 5
- # wait for the object to be evicted from the cache
- local evicted
- evicted=false
- for i in `seq 1 300` ; do
- if ! rados -p $fast ls | grep obj1 ; then
- evicted=true
- break
- fi
- sleep 1
- done
- $evicted # assert
- # the object is proxy read and promoted to the cache
- rados -p $slow get obj1 - >/dev/null
- # wait for the promoted object to be evicted again
- evicted=false
- for i in `seq 1 300` ; do
- if ! rados -p $fast ls | grep obj1 ; then
- evicted=true
- break
- fi
- sleep 1
- done
- $evicted # assert
- ceph osd tier remove-overlay $slow
- ceph osd tier remove $slow $fast
- ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
- ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
-}
-
-function test_tiering_1()
-{
- # tiering
- ceph osd pool create slow 2
- ceph osd pool application enable slow rados
- ceph osd pool create slow2 2
- ceph osd pool application enable slow2 rados
- ceph osd pool create cache 2
- ceph osd pool create cache2 2
- ceph osd tier add slow cache
- ceph osd tier add slow cache2
- expect_false ceph osd tier add slow2 cache
- # test some state transitions
- ceph osd tier cache-mode cache writeback
- expect_false ceph osd tier cache-mode cache forward
- ceph osd tier cache-mode cache forward --yes-i-really-mean-it
- expect_false ceph osd tier cache-mode cache readonly
- ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
- expect_false ceph osd tier cache-mode cache forward
- ceph osd tier cache-mode cache forward --yes-i-really-mean-it
- ceph osd tier cache-mode cache none
- ceph osd tier cache-mode cache writeback
- ceph osd tier cache-mode cache proxy
- ceph osd tier cache-mode cache writeback
- expect_false ceph osd tier cache-mode cache none
- expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
- # test with dirty objects in the tier pool
- # tier pool currently set to 'writeback'
- rados -p cache put /etc/passwd /etc/passwd
- flush_pg_stats
- # 1 dirty object in pool 'cache'
- ceph osd tier cache-mode cache proxy
- expect_false ceph osd tier cache-mode cache none
- expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
- ceph osd tier cache-mode cache writeback
- # remove object from tier pool
- rados -p cache rm /etc/passwd
- rados -p cache cache-flush-evict-all
- flush_pg_stats
- # no dirty objects in pool 'cache'
- ceph osd tier cache-mode cache proxy
- ceph osd tier cache-mode cache none
- ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
- TRIES=0
- while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
- do
- grep 'currently creating pgs' $TMPFILE
- TRIES=$(( $TRIES + 1 ))
- test $TRIES -ne 60
- sleep 3
- done
- expect_false ceph osd pool set cache pg_num 4
- ceph osd tier cache-mode cache none
- ceph osd tier set-overlay slow cache
- expect_false ceph osd tier set-overlay slow cache2
- expect_false ceph osd tier remove slow cache
- ceph osd tier remove-overlay slow
- ceph osd tier set-overlay slow cache2
- ceph osd tier remove-overlay slow
- ceph osd tier remove slow cache
- ceph osd tier add slow2 cache
- expect_false ceph osd tier set-overlay slow cache
- ceph osd tier set-overlay slow2 cache
- ceph osd tier remove-overlay slow2
- ceph osd tier remove slow2 cache
- ceph osd tier remove slow cache2
-
- # make sure a non-empty pool fails
- rados -p cache2 put /etc/passwd /etc/passwd
- while ! ceph df | grep cache2 | grep ' 1 ' ; do
- echo waiting for pg stats to flush
- sleep 2
- done
- expect_false ceph osd tier add slow cache2
- ceph osd tier add slow cache2 --force-nonempty
- ceph osd tier remove slow cache2
-
- ceph osd pool ls | grep cache2
- ceph osd pool ls -f json-pretty | grep cache2
- ceph osd pool ls detail | grep cache2
- ceph osd pool ls detail -f json-pretty | grep cache2
-
- ceph osd pool delete slow slow --yes-i-really-really-mean-it
- ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
- ceph osd pool delete cache cache --yes-i-really-really-mean-it
- ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
-}
-
-function test_tiering_2()
-{
- # make sure we can't clobber snapshot state
- ceph osd pool create snap_base 2
- ceph osd pool application enable snap_base rados
- ceph osd pool create snap_cache 2
- ceph osd pool mksnap snap_cache snapname
- expect_false ceph osd tier add snap_base snap_cache
- ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
- ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
-}
-
-function test_tiering_3()
-{
- # make sure we can't create snapshot on tier
- ceph osd pool create basex 2
- ceph osd pool application enable basex rados
- ceph osd pool create cachex 2
- ceph osd tier add basex cachex
- expect_false ceph osd pool mksnap cache snapname
- ceph osd tier remove basex cachex
- ceph osd pool delete basex basex --yes-i-really-really-mean-it
- ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
-}
-
-function test_tiering_4()
-{
- # make sure we can't create an ec pool tier
- ceph osd pool create eccache 2 2 erasure
- expect_false ceph osd set-require-min-compat-client bobtail
- ceph osd pool create repbase 2
- ceph osd pool application enable repbase rados
- expect_false ceph osd tier add repbase eccache
- ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
- ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
-}
-
-function test_tiering_5()
-{
- # convenient add-cache command
- ceph osd pool create slow 2
- ceph osd pool application enable slow rados
- ceph osd pool create cache3 2
- ceph osd tier add-cache slow cache3 1024000
- ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
- ceph osd tier remove slow cache3 2> $TMPFILE || true
- check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
- ceph osd tier remove-overlay slow
- ceph osd tier remove slow cache3
- ceph osd pool ls | grep cache3
- ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
- ! ceph osd pool ls | grep cache3 || exit 1
- ceph osd pool delete slow slow --yes-i-really-really-mean-it
-}
-
-function test_tiering_6()
-{
- # check add-cache whether work
- ceph osd pool create datapool 2
- ceph osd pool application enable datapool rados
- ceph osd pool create cachepool 2
- ceph osd tier add-cache datapool cachepool 1024000
- ceph osd tier cache-mode cachepool writeback
- rados -p datapool put object /etc/passwd
- rados -p cachepool stat object
- rados -p cachepool cache-flush object
- rados -p datapool stat object
- ceph osd tier remove-overlay datapool
- ceph osd tier remove datapool cachepool
- ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
- ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
-}
-
-function test_tiering_7()
-{
- # protection against pool removal when used as tiers
- ceph osd pool create datapool 2
- ceph osd pool application enable datapool rados
- ceph osd pool create cachepool 2
- ceph osd tier add-cache datapool cachepool 1024000
- ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
- check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
- ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
- check_response "EBUSY: pool 'datapool' has tiers cachepool"
- ceph osd tier remove-overlay datapool
- ceph osd tier remove datapool cachepool
- ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
- ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
-}
-
-function test_tiering_8()
-{
- ## check health check
- ceph osd set notieragent
- ceph osd pool create datapool 2
- ceph osd pool application enable datapool rados
- ceph osd pool create cache4 2
- ceph osd tier add-cache datapool cache4 1024000
- ceph osd tier cache-mode cache4 writeback
- tmpfile=$(mktemp|grep tmp)
- dd if=/dev/zero of=$tmpfile bs=4K count=1
- ceph osd pool set cache4 target_max_objects 200
- ceph osd pool set cache4 target_max_bytes 1000000
- rados -p cache4 put foo1 $tmpfile
- rados -p cache4 put foo2 $tmpfile
- rm -f $tmpfile
- flush_pg_stats
- ceph df | grep datapool | grep ' 2 '
- ceph osd tier remove-overlay datapool
- ceph osd tier remove datapool cache4
- ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
- ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
- ceph osd unset notieragent
-}
-
-function test_tiering_9()
-{
- # make sure 'tier remove' behaves as we expect
- # i.e., removing a tier from a pool that's not its base pool only
- # results in a 'pool foo is now (or already was) not a tier of bar'
- #
- ceph osd pool create basepoolA 2
- ceph osd pool application enable basepoolA rados
- ceph osd pool create basepoolB 2
- ceph osd pool application enable basepoolB rados
- poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
- poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
-
- ceph osd pool create cache5 2
- ceph osd pool create cache6 2
- ceph osd tier add basepoolA cache5
- ceph osd tier add basepoolB cache6
- ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
- ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
- ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
- ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
-
- ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
- ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
- ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
- ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
-
- ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
- ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
-
- ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
- ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
- ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
- ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
-}
-
-function test_auth()
-{
- ceph auth add client.xx mon allow osd "allow *"
- ceph auth export client.xx >client.xx.keyring
- ceph auth add client.xx -i client.xx.keyring
- rm -f client.xx.keyring
- ceph auth list | grep client.xx
- ceph auth ls | grep client.xx
- ceph auth get client.xx | grep caps | grep mon
- ceph auth get client.xx | grep caps | grep osd
- ceph auth get-key client.xx
- ceph auth print-key client.xx
- ceph auth print_key client.xx
- ceph auth caps client.xx osd "allow rw"
- expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
- ceph auth get client.xx | grep osd | grep "allow rw"
- ceph auth export | grep client.xx
- ceph auth export -o authfile
- ceph auth import -i authfile
- ceph auth export -o authfile2
- diff authfile authfile2
- rm authfile authfile2
- ceph auth del client.xx
- expect_false ceph auth get client.xx
-
- # (almost) interactive mode
- echo -e 'auth add client.xx mon allow osd "allow *"\n' | ceph
- ceph auth get client.xx
- # script mode
- echo 'auth del client.xx' | ceph
- expect_false ceph auth get client.xx
-
- #
- # get / set auid
- #
- local auid=444
- ceph-authtool --create-keyring --name client.TEST --gen-key --set-uid $auid TEST-keyring
- expect_false ceph auth import --in-file TEST-keyring
- rm TEST-keyring
- ceph-authtool --create-keyring --name client.TEST --gen-key --cap mon "allow r" --set-uid $auid TEST-keyring
- ceph auth import --in-file TEST-keyring
- rm TEST-keyring
- ceph auth get client.TEST > $TMPFILE
- check_response "auid = $auid"
- ceph --format json-pretty auth get client.TEST > $TMPFILE
- check_response '"auid": '$auid
- ceph auth ls > $TMPFILE
- check_response "auid: $auid"
- ceph --format json-pretty auth ls > $TMPFILE
- check_response '"auid": '$auid
- ceph auth del client.TEST
-}
-
-function test_auth_profiles()
-{
- ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
- mgr 'allow profile read-only'
- ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
- mgr 'allow profile read-write'
- ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
-
- ceph auth export > client.xx.keyring
-
- # read-only is allowed all read-only commands (auth excluded)
- ceph -n client.xx-profile-ro -k client.xx.keyring status
- ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
- ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
- ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
- ceph -n client.xx-profile-ro -k client.xx.keyring mds dump
- # read-only gets access denied for rw commands or auth commands
- ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
- check_response "EACCES: access denied"
- ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
- check_response "EACCES: access denied"
- ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true
- check_response "EACCES: access denied"
-
- # read-write is allowed for all read-write commands (except auth)
- ceph -n client.xx-profile-rw -k client.xx.keyring status
- ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
- ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
- ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
- ceph -n client.xx-profile-rw -k client.xx.keyring mds dump
- ceph -n client.xx-profile-rw -k client.xx.keyring log foo
- ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
- ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
- # read-write gets access denied for auth commands
- ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true
- check_response "EACCES: access denied"
-
- # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
- ceph -n client.xx-profile-rd -k client.xx.keyring auth ls
- ceph -n client.xx-profile-rd -k client.xx.keyring auth export
- ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
- ceph -n client.xx-profile-rd -k client.xx.keyring status
- ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
- check_response "EACCES: access denied"
- ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
- check_response "EACCES: access denied"
- # read-only 'mon' subsystem commands are allowed
- ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
- # but read-write 'mon' commands are not
- ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
- check_response "EACCES: access denied"
- ceph -n client.xx-profile-rd -k client.xx.keyring mds dump >& $TMPFILE || true
- check_response "EACCES: access denied"
- ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
- check_response "EACCES: access denied"
- ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
- check_response "EACCES: access denied"
-
- ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
- ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
-
- # add a new role-definer with the existing role-definer
- ceph -n client.xx-profile-rd -k client.xx.keyring \
- auth add client.xx-profile-rd2 mon 'allow profile role-definer'
- ceph -n client.xx-profile-rd -k client.xx.keyring \
- auth export > client.xx.keyring.2
- # remove old role-definer using the new role-definer
- ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
- auth del client.xx-profile-rd
- # remove the remaining role-definer with admin
- ceph auth del client.xx-profile-rd2
- rm -f client.xx.keyring client.xx.keyring.2
-}
-
-function test_mon_caps()
-{
- ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
- chmod +r $TEMP_DIR/ceph.client.bug.keyring
- ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
- ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
-
- rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
- check_response "Permission denied"
-
- rm -rf $TEMP_DIR/ceph.client.bug.keyring
- ceph auth del client.bug
- ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
- chmod +r $TEMP_DIR/ceph.client.bug.keyring
- ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
- ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
- ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
- rados lspools --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
- check_response "Permission denied"
-}
-
-function test_mon_misc()
-{
- # with and without verbosity
- ceph osd dump | grep '^epoch'
- ceph --concise osd dump | grep '^epoch'
-
- ceph osd df | grep 'MIN/MAX VAR'
-
- # df
- ceph df > $TMPFILE
- grep GLOBAL $TMPFILE
- grep -v DIRTY $TMPFILE
- ceph df detail > $TMPFILE
- grep DIRTY $TMPFILE
- ceph df --format json > $TMPFILE
- grep 'total_bytes' $TMPFILE
- grep -v 'dirty' $TMPFILE
- ceph df detail --format json > $TMPFILE
- grep 'rd_bytes' $TMPFILE
- grep 'dirty' $TMPFILE
- ceph df --format xml | grep '<total_bytes>'
- ceph df detail --format xml | grep '<rd_bytes>'
-
- ceph fsid
- ceph health
- ceph health detail
- ceph health --format json-pretty
- ceph health detail --format xml-pretty
-
- ceph time-sync-status
-
- ceph node ls
- for t in mon osd mds ; do
- ceph node ls $t
- done
-
- ceph_watch_start
- mymsg="this is a test log message $$.$(date)"
- ceph log "$mymsg"
- ceph log last | grep "$mymsg"
- ceph log last 100 | grep "$mymsg"
- ceph_watch_wait "$mymsg"
-
- ceph mgr dump
- ceph mgr module ls
- ceph mgr module enable restful
- expect_false ceph mgr module enable foodne
- ceph mgr module enable foodne --force
- ceph mgr module disable foodne
- ceph mgr module disable foodnebizbangbash
-
- ceph mon metadata a
- ceph mon metadata
- ceph mon count-metadata ceph_version
- ceph mon versions
-
- ceph mgr metadata
- ceph mgr versions
- ceph mgr count-metadata ceph_version
-
- ceph versions
-
- ceph node ls
-}
-
-function check_mds_active()
-{
- fs_name=$1
- ceph fs get $fs_name | grep active
-}
-
-function wait_mds_active()
-{
- fs_name=$1
- max_run=300
- for i in $(seq 1 $max_run) ; do
- if ! check_mds_active $fs_name ; then
- echo "waiting for an active MDS daemon ($i/$max_run)"
- sleep 5
- else
- break
- fi
- done
- check_mds_active $fs_name
-}
-
-function get_mds_gids()
-{
- fs_name=$1
- ceph fs get $fs_name --format=json | python -c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])"
-}
-
-function fail_all_mds()
-{
- fs_name=$1
- ceph fs set $fs_name cluster_down true
- mds_gids=$(get_mds_gids $fs_name)
- for mds_gid in $mds_gids ; do
- ceph mds fail $mds_gid
- done
- if check_mds_active $fs_name ; then
- echo "An active MDS remains, something went wrong"
- ceph fs get $fs_name
- exit -1
- fi
-
-}
-
-function remove_all_fs()
-{
- existing_fs=$(ceph fs ls --format=json | python -c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])")
- for fs_name in $existing_fs ; do
- echo "Removing fs ${fs_name}..."
- fail_all_mds $fs_name
- echo "Removing existing filesystem '${fs_name}'..."
- ceph fs rm $fs_name --yes-i-really-mean-it
- echo "Removed '${fs_name}'."
- done
-}
-
-# So that tests requiring MDS can skip if one is not configured
-# in the cluster at all
-function mds_exists()
-{
- ceph auth ls | grep "^mds"
-}
-
-# some of the commands are just not idempotent.
-function without_test_dup_command()
-{
- if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
- $@
- else
- local saved=${CEPH_CLI_TEST_DUP_COMMAND}
- unset CEPH_CLI_TEST_DUP_COMMAND
- $@
- CEPH_CLI_TEST_DUP_COMMAND=saved
- fi
-}
-
-function test_mds_tell()
-{
- local FS_NAME=cephfs
- if ! mds_exists ; then
- echo "Skipping test, no MDS found"
- return
- fi
-
- remove_all_fs
- ceph osd pool create fs_data 10
- ceph osd pool create fs_metadata 10
- ceph fs new $FS_NAME fs_metadata fs_data
- wait_mds_active $FS_NAME
-
- # Test injectargs by GID
- old_mds_gids=$(get_mds_gids $FS_NAME)
- echo Old GIDs: $old_mds_gids
-
- for mds_gid in $old_mds_gids ; do
- ceph tell mds.$mds_gid injectargs "--debug-mds 20"
- done
- expect_false ceph tell mds.a injectargs mds_max_file_recover -1
-
- # Test respawn by rank
- without_test_dup_command ceph tell mds.0 respawn
- new_mds_gids=$old_mds_gids
- while [ $new_mds_gids -eq $old_mds_gids ] ; do
- sleep 5
- new_mds_gids=$(get_mds_gids $FS_NAME)
- done
- echo New GIDs: $new_mds_gids
-
- # Test respawn by ID
- without_test_dup_command ceph tell mds.a respawn
- new_mds_gids=$old_mds_gids
- while [ $new_mds_gids -eq $old_mds_gids ] ; do
- sleep 5
- new_mds_gids=$(get_mds_gids $FS_NAME)
- done
- echo New GIDs: $new_mds_gids
-
- remove_all_fs
- ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
- ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
-}
-
-function test_mon_mds()
-{
- local FS_NAME=cephfs
- remove_all_fs
-
- ceph osd pool create fs_data 10
- ceph osd pool create fs_metadata 10
- ceph fs new $FS_NAME fs_metadata fs_data
-
- ceph fs set $FS_NAME cluster_down true
- ceph fs set $FS_NAME cluster_down false
-
- # Legacy commands, act on default fs
- ceph mds cluster_down
- ceph mds cluster_up
-
- ceph mds compat rm_incompat 4
- ceph mds compat rm_incompat 4
-
- # We don't want any MDSs to be up, their activity can interfere with
- # the "current_epoch + 1" checking below if they're generating updates
- fail_all_mds $FS_NAME
-
- ceph mds compat show
- expect_false ceph mds deactivate 2
- ceph mds dump
- ceph fs dump
- ceph fs get $FS_NAME
- for mds_gid in $(get_mds_gids $FS_NAME) ; do
- ceph mds metadata $mds_id
- done
- ceph mds metadata
- ceph mds versions
- ceph mds count-metadata os
-
- # XXX mds fail, but how do you undo it?
- mdsmapfile=$TEMP_DIR/mdsmap.$$
- current_epoch=$(ceph mds getmap -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
- [ -s $mdsmapfile ]
- rm $mdsmapfile
-
- ceph osd pool create data2 10
- ceph osd pool create data3 10
- data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
- data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
- ceph mds add_data_pool $data2_pool
- ceph mds add_data_pool $data3_pool
- ceph mds add_data_pool 100 >& $TMPFILE || true
- check_response "Error ENOENT"
- ceph mds add_data_pool foobarbaz >& $TMPFILE || true
- check_response "Error ENOENT"
- ceph mds remove_data_pool $data2_pool
- ceph mds remove_data_pool $data3_pool
- ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
- ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
- ceph mds set allow_multimds false
- expect_false ceph mds set_max_mds 4
- ceph mds set allow_multimds true
- ceph mds set_max_mds 4
- ceph mds set_max_mds 3
- ceph mds set_max_mds 256
- expect_false ceph mds set_max_mds 257
- ceph mds set max_mds 4
- ceph mds set max_mds 256
- expect_false ceph mds set max_mds 257
- expect_false ceph mds set max_mds asdf
- expect_false ceph mds set inline_data true
- ceph mds set inline_data true --yes-i-really-mean-it
- ceph mds set inline_data yes --yes-i-really-mean-it
- ceph mds set inline_data 1 --yes-i-really-mean-it
- expect_false ceph mds set inline_data --yes-i-really-mean-it
- ceph mds set inline_data false
- ceph mds set inline_data no
- ceph mds set inline_data 0
- expect_false ceph mds set inline_data asdf
- ceph mds set max_file_size 1048576
- expect_false ceph mds set max_file_size 123asdf
-
- expect_false ceph mds set allow_new_snaps
- expect_false ceph mds set allow_new_snaps true
- ceph mds set allow_new_snaps true --yes-i-really-mean-it
- ceph mds set allow_new_snaps 0
- ceph mds set allow_new_snaps false
- ceph mds set allow_new_snaps no
- expect_false ceph mds set allow_new_snaps taco
-
- # we should never be able to add EC pools as data or metadata pools
- # create an ec-pool...
- ceph osd pool create mds-ec-pool 10 10 erasure
- set +e
- ceph mds add_data_pool mds-ec-pool 2>$TMPFILE
- check_response 'erasure-code' $? 22
- set -e
- ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
- data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
- metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
-
- fail_all_mds $FS_NAME
-
- set +e
- # Check that rmfailed requires confirmation
- expect_false ceph mds rmfailed 0
- ceph mds rmfailed 0 --yes-i-really-mean-it
- set -e
-
- # Check that `newfs` is no longer permitted
- expect_false ceph mds newfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
-
- # Check that 'fs reset' runs
- ceph fs reset $FS_NAME --yes-i-really-mean-it
-
- # Check that creating a second FS fails by default
- ceph osd pool create fs_metadata2 10
- ceph osd pool create fs_data2 10
- set +e
- expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
- set -e
-
- # Check that setting enable_multiple enables creation of second fs
- ceph fs flag set enable_multiple true --yes-i-really-mean-it
- ceph fs new cephfs2 fs_metadata2 fs_data2
-
- # Clean up multi-fs stuff
- fail_all_mds cephfs2
- ceph fs rm cephfs2 --yes-i-really-mean-it
- ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
- ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
-
- fail_all_mds $FS_NAME
-
- # Clean up to enable subsequent fs new tests
- ceph fs rm $FS_NAME --yes-i-really-mean-it
-
- set +e
- ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
- check_response 'erasure-code' $? 22
- ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
- check_response 'erasure-code' $? 22
- ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
- check_response 'erasure-code' $? 22
- set -e
-
- # ... new create a cache tier in front of the EC pool...
- ceph osd pool create mds-tier 2
- ceph osd tier add mds-ec-pool mds-tier
- ceph osd tier set-overlay mds-ec-pool mds-tier
- tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
-
- # Use of a readonly tier should be forbidden
- ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
- set +e
- ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
- check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
- set -e
-
- # Use of a writeback tier should enable FS creation
- ceph osd tier cache-mode mds-tier writeback
- ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
-
- # While a FS exists using the tiered pools, I should not be allowed
- # to remove the tier
- set +e
- ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
- check_response 'in use by CephFS' $? 16
- ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
- check_response 'in use by CephFS' $? 16
- set -e
-
- fail_all_mds $FS_NAME
- ceph fs rm $FS_NAME --yes-i-really-mean-it
-
- # ... but we should be forbidden from using the cache pool in the FS directly.
- set +e
- ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
- check_response 'in use as a cache tier' $? 22
- ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
- check_response 'in use as a cache tier' $? 22
- ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
- check_response 'in use as a cache tier' $? 22
- set -e
-
- # Clean up tier + EC pools
- ceph osd tier remove-overlay mds-ec-pool
- ceph osd tier remove mds-ec-pool mds-tier
-
- # Create a FS using the 'cache' pool now that it's no longer a tier
- ceph fs new $FS_NAME fs_metadata mds-tier --force
-
- # We should be forbidden from using this pool as a tier now that
- # it's in use for CephFS
- set +e
- ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
- check_response 'in use by CephFS' $? 16
- set -e
-
- fail_all_mds $FS_NAME
- ceph fs rm $FS_NAME --yes-i-really-mean-it
-
- # We should be permitted to use an EC pool with overwrites enabled
- # as the data pool...
- ceph osd pool set mds-ec-pool allow_ec_overwrites true
- ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
- fail_all_mds $FS_NAME
- ceph fs rm $FS_NAME --yes-i-really-mean-it
-
- # ...but not as the metadata pool
- set +e
- ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
- check_response 'erasure-code' $? 22
- set -e
-
- ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
-
- # Create a FS and check that we can subsequently add a cache tier to it
- ceph fs new $FS_NAME fs_metadata fs_data --force
-
- # Adding overlay to FS pool should be permitted, RADOS clients handle this.
- ceph osd tier add fs_metadata mds-tier
- ceph osd tier cache-mode mds-tier writeback
- ceph osd tier set-overlay fs_metadata mds-tier
-
- # Removing tier should be permitted because the underlying pool is
- # replicated (#11504 case)
- ceph osd tier cache-mode mds-tier proxy
- ceph osd tier remove-overlay fs_metadata
- ceph osd tier remove fs_metadata mds-tier
- ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
-
- # Clean up FS
- fail_all_mds $FS_NAME
- ceph fs rm $FS_NAME --yes-i-really-mean-it
-
-
-
- ceph mds stat
- # ceph mds tell mds.a getmap
- # ceph mds rm
- # ceph mds rmfailed
- # ceph mds set_state
- # ceph mds stop
-
- ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
- ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
-}
-
-function test_mon_mds_metadata()
-{
- local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
- test "$nmons" -gt 0
-
- ceph mds dump |
- sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
- while read gid id rank; do
- ceph mds metadata ${gid} | grep '"hostname":'
- ceph mds metadata ${id} | grep '"hostname":'
- ceph mds metadata ${rank} | grep '"hostname":'
-
- local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
- test "$n" -eq "$nmons"
- done
-
- expect_false ceph mds metadata UNKNOWN
-}
-
-function test_mon_mon()
-{
- # print help message
- ceph --help mon
- # no mon add/remove
- ceph mon dump
- ceph mon getmap -o $TEMP_DIR/monmap.$$
- [ -s $TEMP_DIR/monmap.$$ ]
- # ceph mon tell
- ceph mon_status
-
- # test mon features
- ceph mon feature ls
- ceph mon feature set kraken --yes-i-really-mean-it
- expect_false ceph mon feature set abcd
- expect_false ceph mon feature set abcd --yes-i-really-mean-it
-}
-
-function gen_secrets_file()
-{
- # lets assume we can have the following types
- # all - generates both cephx and lockbox, with mock dm-crypt key
- # cephx - only cephx
- # no_cephx - lockbox and dm-crypt, no cephx
- # no_lockbox - dm-crypt and cephx, no lockbox
- # empty - empty file
- # empty_json - correct json, empty map
- # bad_json - bad json :)
- #
- local t=$1
- if [[ -z "$t" ]]; then
- t="all"
- fi
-
- fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
- echo $fn
- if [[ "$t" == "empty" ]]; then
- return 0
- fi
-
- echo "{" > $fn
- if [[ "$t" == "bad_json" ]]; then
- echo "asd: ; }" >> $fn
- return 0
- elif [[ "$t" == "empty_json" ]]; then
- echo "}" >> $fn
- return 0
- fi
-
- cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
- lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
- dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
-
- if [[ "$t" == "all" ]]; then
- echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
- elif [[ "$t" == "cephx" ]]; then
- echo "$cephx_secret" >> $fn
- elif [[ "$t" == "no_cephx" ]]; then
- echo "$lb_secret,$dmcrypt_key" >> $fn
- elif [[ "$t" == "no_lockbox" ]]; then
- echo "$cephx_secret,$dmcrypt_key" >> $fn
- else
- echo "unknown gen_secrets_file() type \'$fn\'"
- return 1
- fi
- echo "}" >> $fn
- return 0
-}
-
-function test_mon_osd_create_destroy()
-{
- ceph osd new 2>&1 | grep 'EINVAL'
- ceph osd new '' -1 2>&1 | grep 'EINVAL'
- ceph osd new '' 10 2>&1 | grep 'EINVAL'
-
- old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
-
- old_osds=$(ceph osd ls)
- num_osds=$(ceph osd ls | wc -l)
-
- uuid=$(uuidgen)
- id=$(ceph osd new $uuid 2>/dev/null)
-
- for i in $old_osds; do
- [[ "$i" != "$id" ]]
- done
-
- ceph osd find $id
-
- id2=`ceph osd new $uuid 2>/dev/null`
-
- [[ $id2 == $id ]]
-
- ceph osd new $uuid $id
-
- id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
- ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
-
- uuid2=$(uuidgen)
- id2=$(ceph osd new $uuid2)
- ceph osd find $id2
- [[ "$id2" != "$id" ]]
-
- ceph osd new $uuid $id2 2>&1 | grep EEXIST
- ceph osd new $uuid2 $id2
-
- # test with secrets
- empty_secrets=$(gen_secrets_file "empty")
- empty_json=$(gen_secrets_file "empty_json")
- all_secrets=$(gen_secrets_file "all")
- cephx_only=$(gen_secrets_file "cephx")
- no_cephx=$(gen_secrets_file "no_cephx")
- no_lockbox=$(gen_secrets_file "no_lockbox")
- bad_json=$(gen_secrets_file "bad_json")
-
- # empty secrets should be idempotent
- new_id=$(ceph osd new $uuid $id -i $empty_secrets)
- [[ "$new_id" == "$id" ]]
-
- # empty json, thus empty secrets
- new_id=$(ceph osd new $uuid $id -i $empty_json)
- [[ "$new_id" == "$id" ]]
-
- ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
-
- ceph osd rm $id
- ceph osd rm $id2
- ceph osd setmaxosd $old_maxosd
-
- ceph osd new $uuid -i $bad_json 2>&1 | grep 'EINVAL'
- ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
- ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
-
- osds=$(ceph osd ls)
- id=$(ceph osd new $uuid -i $all_secrets)
- for i in $osds; do
- [[ "$i" != "$id" ]]
- done
-
- ceph osd find $id
-
- # validate secrets and dm-crypt are set
- k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
- s=$(cat $all_secrets | jq '.cephx_secret')
- [[ $k == $s ]]
- k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
- jq '.key')
- s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
- [[ $k == $s ]]
- ceph config-key exists dm-crypt/osd/$uuid/luks
-
- osds=$(ceph osd ls)
- id2=$(ceph osd new $uuid2 -i $cephx_only)
- for i in $osds; do
- [[ "$i" != "$id2" ]]
- done
-
- ceph osd find $id2
- k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
- s=$(cat $all_secrets | jq '.cephx_secret')
- [[ $k == $s ]]
- expect_false ceph auth get-key client.osd-lockbox.$uuid2
- expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
-
- ceph osd destroy osd.$id2 --yes-i-really-mean-it
- ceph osd destroy $id2 --yes-i-really-mean-it
- ceph osd find $id2
- expect_false ceph auth get-key osd.$id2
- ceph osd dump | grep osd.$id2 | grep destroyed
-
- id3=$id2
- uuid3=$(uuidgen)
- ceph osd new $uuid3 $id3 -i $all_secrets
- ceph osd dump | grep osd.$id3 | expect_false grep destroyed
- ceph auth get-key client.osd-lockbox.$uuid3
- ceph auth get-key osd.$id3
- ceph config-key exists dm-crypt/osd/$uuid3/luks
-
- ceph osd purge osd.$id3 --yes-i-really-mean-it
- expect_false ceph osd find $id2
- expect_false ceph auth get-key osd.$id2
- expect_false ceph auth get-key client.osd-lockbox.$uuid3
- expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
- ceph osd purge osd.$id3 --yes-i-really-mean-it
- ceph osd purge osd.$id3 --yes-i-really-mean-it # idempotent
-
- ceph osd purge osd.$id --yes-i-really-mean-it
- ceph osd purge 123456 --yes-i-really-mean-it
- expect_false ceph osd find $id
- expect_false ceph auth get-key osd.$id
- expect_false ceph auth get-key client.osd-lockbox.$uuid
- expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
-
- rm $empty_secrets $empty_json $all_secrets $cephx_only \
- $no_cephx $no_lockbox $bad_json
-
- for i in $(ceph osd ls); do
- [[ "$i" != "$id" ]]
- [[ "$i" != "$id2" ]]
- [[ "$i" != "$id3" ]]
- done
-
- [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
- ceph osd setmaxosd $old_maxosd
-
-}
-
-function test_mon_config_key()
-{
- key=asdfasdfqwerqwreasdfuniquesa123df
- ceph config-key list | grep -c $key | grep 0
- ceph config-key get $key | grep -c bar | grep 0
- ceph config-key set $key bar
- ceph config-key get $key | grep bar
- ceph config-key list | grep -c $key | grep 1
- ceph config-key dump | grep $key | grep bar
- ceph config-key rm $key
- expect_false ceph config-key get $key
- ceph config-key list | grep -c $key | grep 0
- ceph config-key dump | grep -c $key | grep 0
-}
-
-function test_mon_osd()
-{
- #
- # osd blacklist
- #
- bl=192.168.0.1:0/1000
- ceph osd blacklist add $bl
- ceph osd blacklist ls | grep $bl
- ceph osd blacklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
- ceph osd dump --format=json-pretty | grep $bl
- ceph osd dump | grep "^blacklist $bl"
- ceph osd blacklist rm $bl
- ceph osd blacklist ls | expect_false grep $bl
-
- bl=192.168.0.1
- # test without nonce, invalid nonce
- ceph osd blacklist add $bl
- ceph osd blacklist ls | grep $bl
- ceph osd blacklist rm $bl
- ceph osd blacklist ls | expect_false grep $expect_false bl
- expect_false "ceph osd blacklist $bl/-1"
- expect_false "ceph osd blacklist $bl/foo"
-
- # test with wrong address
- expect_false "ceph osd blacklist 1234.56.78.90/100"
-
- # Test `clear`
- ceph osd blacklist add $bl
- ceph osd blacklist ls | grep $bl
- ceph osd blacklist clear
- ceph osd blacklist ls | expect_false grep $bl
-
- #
- # osd crush
- #
- ceph osd crush reweight-all
- ceph osd crush tunables legacy
- ceph osd crush show-tunables | grep argonaut
- ceph osd crush tunables bobtail
- ceph osd crush show-tunables | grep bobtail
- ceph osd crush tunables firefly
- ceph osd crush show-tunables | grep firefly
-
- ceph osd crush set-tunable straw_calc_version 0
- ceph osd crush get-tunable straw_calc_version | grep 0
- ceph osd crush set-tunable straw_calc_version 1
- ceph osd crush get-tunable straw_calc_version | grep 1
-
- #
- # require-min-compat-client
- expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
- ceph osd set-require-min-compat-client luminous
- ceph osd dump | grep 'require_min_compat_client luminous'
-
- #
- # osd scrub
- #
- # how do I tell when these are done?
- ceph osd scrub 0
- ceph osd deep-scrub 0
- ceph osd repair 0
-
- for f in noup nodown noin noout noscrub nodeep-scrub nobackfill norebalance norecover notieragent full
- do
- ceph osd set $f
- ceph osd unset $f
- done
- expect_false ceph osd unset sortbitwise # cannot be unset
- expect_false ceph osd set bogus
- expect_false ceph osd unset bogus
- ceph osd require-osd-release luminous
- # can't lower (or use new command for anything but jewel)
- expect_false ceph osd require-osd-release jewel
- # these are no-ops but should succeed.
- ceph osd set require_jewel_osds
- ceph osd set require_kraken_osds
- expect_false ceph osd unset require_jewel_osds
-
- ceph osd set noup
- ceph osd down 0
- ceph osd dump | grep 'osd.0 down'
- ceph osd unset noup
- max_run=1000
- for ((i=0; i < $max_run; i++)); do
- if ! ceph osd dump | grep 'osd.0 up'; then
- echo "waiting for osd.0 to come back up ($i/$max_run)"
- sleep 1
- else
- break
- fi
- done
- ceph osd dump | grep 'osd.0 up'
-
- ceph osd dump | grep 'osd.0 up'
- # ceph osd find expects the OsdName, so both ints and osd.n should work.
- ceph osd find 1
- ceph osd find osd.1
- expect_false ceph osd find osd.xyz
- expect_false ceph osd find xyz
- expect_false ceph osd find 0.1
- ceph --format plain osd find 1 # falls back to json-pretty
- if [ `uname` == Linux ]; then
- ceph osd metadata 1 | grep 'distro'
- ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
- fi
- ceph osd out 0
- ceph osd dump | grep 'osd.0.*out'
- ceph osd in 0
- ceph osd dump | grep 'osd.0.*in'
- ceph osd find 0
-
- ceph osd add-nodown 0 1
- ceph health detail | grep 'NODOWN'
- ceph osd rm-nodown 0 1
- ! ceph health detail | grep 'NODOWN'
-
- ceph osd out 0 # so we can mark it as noin later
- ceph osd add-noin 0
- ceph health detail | grep 'NOIN'
- ceph osd rm-noin 0
- ! ceph health detail | grep 'NOIN'
- ceph osd in 0
-
- ceph osd add-noout 0
- ceph health detail | grep 'NOOUT'
- ceph osd rm-noout 0
- ! ceph health detail | grep 'NOOUT'
-
- # test osd id parse
- expect_false ceph osd add-noup 797er
- expect_false ceph osd add-nodown u9uwer
- expect_false ceph osd add-noin 78~15
- expect_false ceph osd add-noout 0 all 1
-
- expect_false ceph osd rm-noup 1234567
- expect_false ceph osd rm-nodown fsadf7
- expect_false ceph osd rm-noin 0 1 any
- expect_false ceph osd rm-noout 790-fd
-
- ids=`ceph osd ls-tree default`
- for osd in $ids
- do
- ceph osd add-nodown $osd
- ceph osd add-noout $osd
- done
- ceph -s | grep 'NODOWN'
- ceph -s | grep 'NOOUT'
- ceph osd rm-nodown any
- ceph osd rm-noout all
- ! ceph -s | grep 'NODOWN'
- ! ceph -s | grep 'NOOUT'
-
- # make sure mark out preserves weight
- ceph osd reweight osd.0 .5
- ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
- ceph osd out 0
- ceph osd in 0
- ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
-
- ceph osd getmap -o $f
- [ -s $f ]
- rm $f
- save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
- [ "$save" -gt 0 ]
- ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
- ceph osd setmaxosd 10
- ceph osd getmaxosd | grep 'max_osd = 10'
- ceph osd setmaxosd $save
- ceph osd getmaxosd | grep "max_osd = $save"
-
- for id in `ceph osd ls` ; do
- retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
- done
-
- ceph osd rm 0 2>&1 | grep 'EBUSY'
-
- local old_osds=$(echo $(ceph osd ls))
- id=`ceph osd create`
- ceph osd find $id
- ceph osd lost $id --yes-i-really-mean-it
- expect_false ceph osd setmaxosd $id
- local new_osds=$(echo $(ceph osd ls))
- for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
- ceph osd rm $id
- done
-
- uuid=`uuidgen`
- id=`ceph osd create $uuid`
- id2=`ceph osd create $uuid`
- [ "$id" = "$id2" ]
- ceph osd rm $id
-
- ceph --help osd
-
- # reset max_osd.
- ceph osd setmaxosd $id
- ceph osd getmaxosd | grep "max_osd = $save"
- local max_osd=$save
-
- ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
- ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
-
- id=`ceph osd create $uuid $max_osd`
- [ "$id" = "$max_osd" ]
- ceph osd find $id
- max_osd=$((max_osd + 1))
- ceph osd getmaxosd | grep "max_osd = $max_osd"
-
- ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
- ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
- id2=`ceph osd create $uuid`
- [ "$id" = "$id2" ]
- id2=`ceph osd create $uuid $id`
- [ "$id" = "$id2" ]
-
- uuid=`uuidgen`
- local gap_start=$max_osd
- id=`ceph osd create $uuid $((gap_start + 100))`
- [ "$id" = "$((gap_start + 100))" ]
- max_osd=$((id + 1))
- ceph osd getmaxosd | grep "max_osd = $max_osd"
-
- ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
-
- #
- # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
- # is repeated and consumes two osd id, not just one.
- #
- local next_osd=$gap_start
- id=`ceph osd create $(uuidgen)`
- [ "$id" = "$next_osd" ]
-
- next_osd=$((id + 1))
- id=`ceph osd create $(uuidgen) $next_osd`
- [ "$id" = "$next_osd" ]
-
- local new_osds=$(echo $(ceph osd ls))
- for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
- [ $id -ge $save ]
- ceph osd rm $id
- done
- ceph osd setmaxosd $save
-
- ceph osd ls
- ceph osd pool create data 10
- ceph osd pool application enable data rados
- ceph osd lspools | grep data
- ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
- ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
- ceph osd pool delete data data --yes-i-really-really-mean-it
-
- ceph osd pause
- ceph osd dump | grep 'flags.*pauserd,pausewr'
- ceph osd unpause
-
- ceph osd tree
- ceph osd tree up
- ceph osd tree down
- ceph osd tree in
- ceph osd tree out
- ceph osd tree destroyed
- ceph osd tree up in
- ceph osd tree up out
- ceph osd tree down in
- ceph osd tree down out
- ceph osd tree out down
- expect_false ceph osd tree up down
- expect_false ceph osd tree up destroyed
- expect_false ceph osd tree down destroyed
- expect_false ceph osd tree up down destroyed
- expect_false ceph osd tree in out
- expect_false ceph osd tree up foo
-
- ceph osd metadata
- ceph osd count-metadata os
- ceph osd versions
-
- ceph osd perf
- ceph osd blocked-by
-
- ceph osd stat | grep up,
-}
-
-function test_mon_crush()
-{
- f=$TEMP_DIR/map.$$
- epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
- [ -s $f ]
- [ "$epoch" -gt 1 ]
- nextepoch=$(( $epoch + 1 ))
- echo epoch $epoch nextepoch $nextepoch
- rm -f $f.epoch
- expect_false ceph osd setcrushmap $nextepoch -i $f
- gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
- echo gotepoch $gotepoch
- [ "$gotepoch" -eq "$nextepoch" ]
- # should be idempotent
- gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
- echo epoch $gotepoch
- [ "$gotepoch" -eq "$nextepoch" ]
- rm $f
-}
-
-function test_mon_osd_pool()
-{
- #
- # osd pool
- #
- ceph osd pool create data 10
- ceph osd pool application enable data rados
- ceph osd pool mksnap data datasnap
- rados -p data lssnap | grep datasnap
- ceph osd pool rmsnap data datasnap
- expect_false ceph osd pool rmsnap pool_fake snapshot
- ceph osd pool delete data data --yes-i-really-really-mean-it
-
- ceph osd pool create data2 10
- ceph osd pool application enable data2 rados
- ceph osd pool rename data2 data3
- ceph osd lspools | grep data3
- ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
-
- ceph osd pool create replicated 12 12 replicated
- ceph osd pool create replicated 12 12 replicated
- ceph osd pool create replicated 12 12 # default is replicated
- ceph osd pool create replicated 12 # default is replicated, pgp_num = pg_num
- ceph osd pool application enable replicated rados
- # should fail because the type is not the same
- expect_false ceph osd pool create replicated 12 12 erasure
- ceph osd lspools | grep replicated
- ceph osd pool create ec_test 1 1 erasure
- ceph osd pool application enable ec_test rados
- set +e
- ceph osd count-metadata osd_objectstore | grep 'bluestore'
- if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
- ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
- check_response "pool must only be stored on bluestore for scrubbing to work" $? 22
- else
- ceph osd pool set ec_test allow_ec_overwrites true || return 1
- expect_false ceph osd pool set ec_test allow_ec_overwrites false
- fi
- set -e
- ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
- ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
-}
-
-function test_mon_osd_pool_quota()
-{
- #
- # test osd pool set/get quota
- #
-
- # create tmp pool
- ceph osd pool create tmp-quota-pool 36
- ceph osd pool application enable tmp-quota-pool rados
- #
- # set erroneous quotas
- #
- expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
- expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
- expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
- #
- # set valid quotas
- #
- ceph osd pool set-quota tmp-quota-pool max_bytes 10
- ceph osd pool set-quota tmp-quota-pool max_objects 10M
- #
- # get quotas
- #
- ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10B'
- ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10240k objects'
- #
- # get quotas in json-pretty format
- #
- ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
- grep '"quota_max_objects":.*10485760'
- ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
- grep '"quota_max_bytes":.*10'
- #
- # reset pool quotas
- #
- ceph osd pool set-quota tmp-quota-pool max_bytes 0
- ceph osd pool set-quota tmp-quota-pool max_objects 0
- #
- # test N/A quotas
- #
- ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
- ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
- #
- # cleanup tmp pool
- ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
-}
-
-function test_mon_pg()
-{
- # Make sure we start healthy.
- wait_for_health_ok
-
- ceph pg debug unfound_objects_exist
- ceph pg debug degraded_pgs_exist
- ceph pg deep-scrub 1.0
- ceph pg dump
- ceph pg dump pgs_brief --format=json
- ceph pg dump pgs --format=json
- ceph pg dump pools --format=json
- ceph pg dump osds --format=json
- ceph pg dump sum --format=json
- ceph pg dump all --format=json
- ceph pg dump pgs_brief osds --format=json
- ceph pg dump pools osds pgs_brief --format=json
- ceph pg dump_json
- ceph pg dump_pools_json
- ceph pg dump_stuck inactive
- ceph pg dump_stuck unclean
- ceph pg dump_stuck stale
- ceph pg dump_stuck undersized
- ceph pg dump_stuck degraded
- ceph pg ls
- ceph pg ls 1
- ceph pg ls stale
- expect_false ceph pg ls scrubq
- ceph pg ls active stale repair recovering
- ceph pg ls 1 active
- ceph pg ls 1 active stale
- ceph pg ls-by-primary osd.0
- ceph pg ls-by-primary osd.0 1
- ceph pg ls-by-primary osd.0 active
- ceph pg ls-by-primary osd.0 active stale
- ceph pg ls-by-primary osd.0 1 active stale
- ceph pg ls-by-osd osd.0
- ceph pg ls-by-osd osd.0 1
- ceph pg ls-by-osd osd.0 active
- ceph pg ls-by-osd osd.0 active stale
- ceph pg ls-by-osd osd.0 1 active stale
- ceph pg ls-by-pool rbd
- ceph pg ls-by-pool rbd active stale
- # can't test this...
- # ceph pg force_create_pg
- ceph pg getmap -o $TEMP_DIR/map.$$
- [ -s $TEMP_DIR/map.$$ ]
- ceph pg map 1.0 | grep acting
- ceph pg repair 1.0
- ceph pg scrub 1.0
-
- ceph osd set-full-ratio .962
- ceph osd dump | grep '^full_ratio 0.962'
- ceph osd set-backfillfull-ratio .912
- ceph osd dump | grep '^backfillfull_ratio 0.912'
- ceph osd set-nearfull-ratio .892
- ceph osd dump | grep '^nearfull_ratio 0.892'
-
- # Check health status
- ceph osd set-nearfull-ratio .913
- ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
- ceph health detail | grep OSD_OUT_OF_ORDER_FULL
- ceph osd set-nearfull-ratio .892
- ceph osd set-backfillfull-ratio .963
- ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
- ceph health detail | grep OSD_OUT_OF_ORDER_FULL
- ceph osd set-backfillfull-ratio .912
-
- # Check injected full results
- $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull nearfull
- wait_for_health "OSD_NEARFULL"
- ceph health detail | grep "osd.0 is near full"
- $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
- wait_for_health_ok
-
- $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull backfillfull
- wait_for_health "OSD_BACKFILLFULL"
- ceph health detail | grep "osd.1 is backfill full"
- $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull none
- wait_for_health_ok
-
- $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull failsafe
- # failsafe and full are the same as far as the monitor is concerned
- wait_for_health "OSD_FULL"
- ceph health detail | grep "osd.2 is full"
- $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull none
- wait_for_health_ok
-
- $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull full
- wait_for_health "OSD_FULL"
- ceph health detail | grep "osd.0 is full"
- $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none
- wait_for_health_ok
-
- ceph pg stat | grep 'pgs:'
- ceph pg 1.0 query
- ceph tell 1.0 query
- ceph quorum enter
- ceph quorum_status
- ceph report | grep osd_stats
- ceph status
- ceph -s
-
- #
- # tell osd version
- #
- ceph tell osd.0 version
- expect_false ceph tell osd.9999 version
- expect_false ceph tell osd.foo version
-
- # back to pg stuff
-
- ceph tell osd.0 dump_pg_recovery_stats | grep Started
-
- ceph osd reweight 0 0.9
- expect_false ceph osd reweight 0 -1
- ceph osd reweight osd.0 1
-
- ceph osd primary-affinity osd.0 .9
- expect_false ceph osd primary-affinity osd.0 -2
- expect_false ceph osd primary-affinity osd.9999 .5
- ceph osd primary-affinity osd.0 1
-
- ceph osd pool set rbd size 2
- ceph osd pg-temp 1.0 0 1
- ceph osd pg-temp 1.0 osd.1 osd.0
- expect_false ceph osd pg-temp 1.0 0 1 2
- expect_false ceph osd pg-temp asdf qwer
- expect_false ceph osd pg-temp 1.0 asdf
- expect_false ceph osd pg-temp 1.0
-
- # don't test ceph osd primary-temp for now
-}
-
-function test_mon_osd_pool_set()
-{
- TEST_POOL_GETSET=pool_getset
- ceph osd pool create $TEST_POOL_GETSET 1
- ceph osd pool application enable $TEST_POOL_GETSET rados
- wait_for_clean
- ceph osd pool get $TEST_POOL_GETSET all
-
- for s in pg_num pgp_num size min_size crush_rule; do
- ceph osd pool get $TEST_POOL_GETSET $s
- done
-
- old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
- (( new_size = old_size + 1 ))
- ceph osd pool set $TEST_POOL_GETSET size $new_size
- ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
- ceph osd pool set $TEST_POOL_GETSET size $old_size
-
- ceph osd pool create pool_erasure 1 1 erasure
- ceph osd pool application enable pool_erasure rados
- wait_for_clean
- set +e
- ceph osd pool set pool_erasure size 4444 2>$TMPFILE
- check_response 'not change the size'
- set -e
- ceph osd pool get pool_erasure erasure_code_profile
-
- auid=5555
- ceph osd pool set $TEST_POOL_GETSET auid $auid
- ceph osd pool get $TEST_POOL_GETSET auid | grep $auid
- ceph --format=xml osd pool get $TEST_POOL_GETSET auid | grep $auid
- ceph osd pool set $TEST_POOL_GETSET auid 0
-
- for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
- ceph osd pool set $TEST_POOL_GETSET $flag false
- ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
- ceph osd pool set $TEST_POOL_GETSET $flag true
- ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
- ceph osd pool set $TEST_POOL_GETSET $flag 1
- ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
- ceph osd pool set $TEST_POOL_GETSET $flag 0
- ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
- expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
- expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
- done
-
- ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
- ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
- ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
- ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
- ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
-
- ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
- ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
- ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
- ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
- ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
-
- ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
- ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
- ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
- ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
- ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
-
- ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
- ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
- ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
- ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
- ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
-
- ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
- ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
- ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
- ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
- ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
-
- ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
- ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
- ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
- ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
- ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
-
- ceph osd pool set $TEST_POOL_GETSET nopgchange 1
- expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
- expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
- ceph osd pool set $TEST_POOL_GETSET nopgchange 0
- ceph osd pool set $TEST_POOL_GETSET pg_num 10
- wait_for_clean
- ceph osd pool set $TEST_POOL_GETSET pgp_num 10
-
- old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
- new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32))
- ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
- ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
- wait_for_clean
- old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
- new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32 + 1))
- expect_false ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
-
- ceph osd pool set $TEST_POOL_GETSET nosizechange 1
- expect_false ceph osd pool set $TEST_POOL_GETSET size 2
- expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
- ceph osd pool set $TEST_POOL_GETSET nosizechange 0
- ceph osd pool set $TEST_POOL_GETSET size 2
- wait_for_clean
- ceph osd pool set $TEST_POOL_GETSET min_size 2
-
- expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
- ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
-
- expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
- ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
-
- ceph osd pool get rbd crush_rule | grep 'crush_rule: '
-
- ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
- ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive
- ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive'
- ceph osd pool set $TEST_POOL_GETSET compression_mode unset
- ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
-
- ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
- ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib
- ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib'
- ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset
- ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
-
- ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
- expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1
- expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2
- ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2
- ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2'
- ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0
- ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
-
- ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
- ceph osd pool set $TEST_POOL_GETSET csum_type crc32c
- ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c'
- ceph osd pool set $TEST_POOL_GETSET csum_type unset
- ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
-
- for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
- ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
- ceph osd pool set $TEST_POOL_GETSET $size 100
- ceph osd pool get $TEST_POOL_GETSET $size | grep '100'
- ceph osd pool set $TEST_POOL_GETSET $size 0
- ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
- done
-
- ceph osd pool set $TEST_POOL_GETSET nodelete 1
- expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
- ceph osd pool set $TEST_POOL_GETSET nodelete 0
- ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
-
-}
-
-function test_mon_osd_tiered_pool_set()
-{
- # this is really a tier pool
- ceph osd pool create real-tier 2
- ceph osd tier add rbd real-tier
-
- ceph osd pool set real-tier hit_set_type explicit_hash
- ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
- ceph osd pool set real-tier hit_set_type explicit_object
- ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
- ceph osd pool set real-tier hit_set_type bloom
- ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
- expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
- ceph osd pool set real-tier hit_set_period 123
- ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
- ceph osd pool set real-tier hit_set_count 12
- ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
- ceph osd pool set real-tier hit_set_fpp .01
- ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
-
- ceph osd pool set real-tier target_max_objects 123
- ceph osd pool get real-tier target_max_objects | \
- grep 'target_max_objects:[ \t]\+123'
- ceph osd pool set real-tier target_max_bytes 123456
- ceph osd pool get real-tier target_max_bytes | \
- grep 'target_max_bytes:[ \t]\+123456'
- ceph osd pool set real-tier cache_target_dirty_ratio .123
- ceph osd pool get real-tier cache_target_dirty_ratio | \
- grep 'cache_target_dirty_ratio:[ \t]\+0.123'
- expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
- expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
- ceph osd pool set real-tier cache_target_dirty_high_ratio .123
- ceph osd pool get real-tier cache_target_dirty_high_ratio | \
- grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
- expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
- expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
- ceph osd pool set real-tier cache_target_full_ratio .123
- ceph osd pool get real-tier cache_target_full_ratio | \
- grep 'cache_target_full_ratio:[ \t]\+0.123'
- ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
- ceph osd pool set real-tier cache_target_full_ratio 1.0
- ceph osd pool set real-tier cache_target_full_ratio 0
- expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
- ceph osd pool set real-tier cache_min_flush_age 123
- ceph osd pool get real-tier cache_min_flush_age | \
- grep 'cache_min_flush_age:[ \t]\+123'
- ceph osd pool set real-tier cache_min_evict_age 234
- ceph osd pool get real-tier cache_min_evict_age | \
- grep 'cache_min_evict_age:[ \t]\+234'
-
- # this is not a tier pool
- ceph osd pool create fake-tier 2
- ceph osd pool application enable fake-tier rados
- wait_for_clean
-
- expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
- expect_false ceph osd pool get fake-tier hit_set_type
- expect_false ceph osd pool set fake-tier hit_set_type explicit_object
- expect_false ceph osd pool get fake-tier hit_set_type
- expect_false ceph osd pool set fake-tier hit_set_type bloom
- expect_false ceph osd pool get fake-tier hit_set_type
- expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
- expect_false ceph osd pool set fake-tier hit_set_period 123
- expect_false ceph osd pool get fake-tier hit_set_period
- expect_false ceph osd pool set fake-tier hit_set_count 12
- expect_false ceph osd pool get fake-tier hit_set_count
- expect_false ceph osd pool set fake-tier hit_set_fpp .01
- expect_false ceph osd pool get fake-tier hit_set_fpp
-
- expect_false ceph osd pool set fake-tier target_max_objects 123
- expect_false ceph osd pool get fake-tier target_max_objects
- expect_false ceph osd pool set fake-tier target_max_bytes 123456
- expect_false ceph osd pool get fake-tier target_max_bytes
- expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
- expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
- expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
- expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
- expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
- expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
- expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
- expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
- expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
- expect_false ceph osd pool get fake-tier cache_target_full_ratio
- expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
- expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
- expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
- expect_false ceph osd pool set fake-tier cache_min_flush_age 123
- expect_false ceph osd pool get fake-tier cache_min_flush_age
- expect_false ceph osd pool set fake-tier cache_min_evict_age 234
- expect_false ceph osd pool get fake-tier cache_min_evict_age
-
- ceph osd tier remove rbd real-tier
- ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
- ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
-}
-
-function test_mon_osd_erasure_code()
-{
-
- ceph osd erasure-code-profile set fooprofile a=b c=d
- ceph osd erasure-code-profile set fooprofile a=b c=d
- expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
- ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
- ceph osd erasure-code-profile set fooprofile a=b c=d e=f
- expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
- # ruleset-foo will work for luminous only
- ceph osd erasure-code-profile set barprofile ruleset-failure-domain=host
- ceph osd erasure-code-profile set barprofile crush-failure-domain=host
- # clean up
- ceph osd erasure-code-profile rm fooprofile
- ceph osd erasure-code-profile rm barprofile
-}
-
-function test_mon_osd_misc()
-{
- set +e
-
- # expect error about missing 'pool' argument
- ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
-
- # expect error about unused argument foo
- ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
-
- # expect "not in range" for invalid full ratio
- ceph pg set_full_ratio 95 2>$TMPFILE; check_response 'not in range' $? 22
-
- # expect "not in range" for invalid overload percentage
- ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
-
- set -e
-
- ceph osd reweight-by-utilization 110
- ceph osd reweight-by-utilization 110 .5
- expect_false ceph osd reweight-by-utilization 110 0
- expect_false ceph osd reweight-by-utilization 110 -0.1
- ceph osd test-reweight-by-utilization 110 .5 --no-increasing
- ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
- expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
- expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
- ceph osd reweight-by-pg 110
- ceph osd test-reweight-by-pg 110 .5
- ceph osd reweight-by-pg 110 rbd
- ceph osd reweight-by-pg 110 .5 rbd
- expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
-}
-
-function test_mon_heap_profiler()
-{
- do_test=1
- set +e
- # expect 'heap' commands to be correctly parsed
- ceph heap stats 2>$TMPFILE
- if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
- echo "tcmalloc not enabled; skip heap profiler test"
- do_test=0
- fi
- set -e
-
- [[ $do_test -eq 0 ]] && return 0
-
- ceph heap start_profiler
- ceph heap dump
- ceph heap stop_profiler
- ceph heap release
-}
-
-function test_admin_heap_profiler()
-{
- do_test=1
- set +e
- # expect 'heap' commands to be correctly parsed
- ceph heap stats 2>$TMPFILE
- if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
- echo "tcmalloc not enabled; skip heap profiler test"
- do_test=0
- fi
- set -e
-
- [[ $do_test -eq 0 ]] && return 0
-
- local admin_socket=$(get_admin_socket osd.0)
-
- $SUDO ceph --admin-daemon $admin_socket heap start_profiler
- $SUDO ceph --admin-daemon $admin_socket heap dump
- $SUDO ceph --admin-daemon $admin_socket heap stop_profiler
- $SUDO ceph --admin-daemon $admin_socket heap release
-}
-
-function test_osd_bench()
-{
- # test osd bench limits
- # As we should not rely on defaults (as they may change over time),
- # lets inject some values and perform some simple tests
- # max iops: 10 # 100 IOPS
- # max throughput: 10485760 # 10MB/s
- # max block size: 2097152 # 2MB
- # duration: 10 # 10 seconds
-
- local args="\
- --osd-bench-duration 10 \
- --osd-bench-max-block-size 2097152 \
- --osd-bench-large-size-max-throughput 10485760 \
- --osd-bench-small-size-max-iops 10"
- ceph tell osd.0 injectargs ${args## }
-
- # anything with a bs larger than 2097152 must fail
- expect_false ceph tell osd.0 bench 1 2097153
- # but using 'osd_bench_max_bs' must succeed
- ceph tell osd.0 bench 1 2097152
-
- # we assume 1MB as a large bs; anything lower is a small bs
- # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
- # max count: 409600 (bytes)
-
- # more than max count must not be allowed
- expect_false ceph tell osd.0 bench 409601 4096
- # but 409600 must be succeed
- ceph tell osd.0 bench 409600 4096
-
- # for a large bs, we are limited by throughput.
- # for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
- # the max count will be (10MB * 10s) = 100MB
- # max count: 104857600 (bytes)
-
- # more than max count must not be allowed
- expect_false ceph tell osd.0 bench 104857601 2097152
- # up to max count must be allowed
- ceph tell osd.0 bench 104857600 2097152
-}
-
-function test_osd_negative_filestore_merge_threshold()
-{
- $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
- expect_config_value "osd.0" "filestore_merge_threshold" -1
-}
-
-function test_mon_tell()
-{
- ceph tell mon.a version
- ceph tell mon.b version
- expect_false ceph tell mon.foo version
-
- sleep 1
-
- ceph_watch_start debug audit
- ceph tell mon.a version
- ceph_watch_wait 'mon.a \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
-
- ceph_watch_start debug audit
- ceph tell mon.b version
- ceph_watch_wait 'mon.b \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch'
-}
-
-function test_mon_ping()
-{
- ceph ping mon.a
- ceph ping mon.b
- expect_false ceph ping mon.foo
-
- ceph ping mon.\*
-}
-
-function test_mon_deprecated_commands()
-{
- # current DEPRECATED commands are:
- # ceph compact
- # ceph scrub
- # ceph sync force
- #
- # Testing should be accomplished by setting
- # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
- # each one of these commands.
-
- ceph tell mon.a injectargs '--mon-debug-deprecated-as-obsolete'
- expect_false ceph tell mon.a compact 2> $TMPFILE
- check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
-
- expect_false ceph tell mon.a scrub 2> $TMPFILE
- check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
-
- expect_false ceph tell mon.a sync force 2> $TMPFILE
- check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
-
- ceph tell mon.a injectargs '--no-mon-debug-deprecated-as-obsolete'
-}
-
-function test_mon_cephdf_commands()
-{
- # ceph df detail:
- # pool section:
- # RAW USED The near raw used per pool in raw total
-
- ceph osd pool create cephdf_for_test 32 32 replicated
- ceph osd pool application enable cephdf_for_test rados
- ceph osd pool set cephdf_for_test size 2
-
- dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
- rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
-
- #wait for update
- for i in `seq 1 10`; do
- rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
- sleep 1
- done
- # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
- # to sync mon with osd
- flush_pg_stats
- local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
- cal_raw_used_size=`ceph df detail --format=json | jq "$jq_filter.raw_bytes_used"`
- raw_used_size=`ceph df detail --format=json | jq "$jq_filter.bytes_used * 2"`
-
- ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
- rm ./cephdf_for_test
-
- expect_false test $cal_raw_used_size != $raw_used_size
-}
-
-function test_mon_pool_application()
-{
- ceph osd pool create app_for_test 10
-
- ceph osd pool application enable app_for_test rbd
- expect_false ceph osd pool application enable app_for_test rgw
- ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it
- ceph osd pool ls detail | grep "application rbd,rgw"
- ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
-
- expect_false ceph osd pool application set app_for_test cephfs key value
- ceph osd pool application set app_for_test rbd key1 value1
- ceph osd pool application set app_for_test rbd key2 value2
- ceph osd pool application set app_for_test rgw key1 value1
- ceph osd pool application get app_for_test rbd key1 | grep 'value1'
- ceph osd pool application get app_for_test rbd key2 | grep 'value2'
- ceph osd pool application get app_for_test rgw key1 | grep 'value1'
-
- ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
-
- ceph osd pool application rm app_for_test rgw key1
- ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
- ceph osd pool application rm app_for_test rbd key2
- ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
- ceph osd pool application rm app_for_test rbd key1
- ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
- ceph osd pool application rm app_for_test rbd key1 # should be idempotent
-
- expect_false ceph osd pool application disable app_for_test rgw
- ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
- ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent
- ceph osd pool ls detail | grep "application rbd"
- ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}'
-
- ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
- ceph osd pool ls detail | grep -v "application "
- ceph osd pool ls detail --format=json | grep '"application_metadata":{}'
-
- ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it
-}
-
-function test_mon_tell_help_command()
-{
- ceph tell mon.a help
-
- # wrong target
- expect_false ceph tell mon.zzz help
-}
-
-function test_mon_stdin_stdout()
-{
- echo foo | ceph config-key set test_key -i -
- ceph config-key get test_key -o - | grep -c foo | grep -q 1
-}
-
-function test_osd_tell_help_command()
-{
- ceph tell osd.1 help
- expect_false ceph tell osd.100 help
-}
-
-function test_osd_compact()
-{
- ceph tell osd.1 compact
- $SUDO ceph daemon osd.1 compact
-}
-
-function test_mds_tell_help_command()
-{
- local FS_NAME=cephfs
- if ! mds_exists ; then
- echo "Skipping test, no MDS found"
- return
- fi
-
- remove_all_fs
- ceph osd pool create fs_data 10
- ceph osd pool create fs_metadata 10
- ceph fs new $FS_NAME fs_metadata fs_data
- wait_mds_active $FS_NAME
-
-
- ceph tell mds.a help
- expect_false ceph tell mds.z help
-
- remove_all_fs
- ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
- ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
-}
-
-function test_mgr_tell()
-{
- ceph tell mgr help
- #ceph tell mgr fs status # see http://tracker.ceph.com/issues/20761
- ceph tell mgr osd status
-}
-
-#
-# New tests should be added to the TESTS array below
-#
-# Individual tests may be run using the '-t <testname>' argument
-# The user can specify '-t <testname>' as many times as she wants
-#
-# Tests will be run in order presented in the TESTS array, or in
-# the order specified by the '-t <testname>' options.
-#
-# '-l' will list all the available test names
-# '-h' will show usage
-#
-# The test maintains backward compatibility: not specifying arguments
-# will run all tests following the order they appear in the TESTS array.
-#
-
-set +x
-MON_TESTS+=" mon_injectargs"
-MON_TESTS+=" mon_injectargs_SI"
-for i in `seq 9`; do
- MON_TESTS+=" tiering_$i";
-done
-MON_TESTS+=" auth"
-MON_TESTS+=" auth_profiles"
-MON_TESTS+=" mon_misc"
-MON_TESTS+=" mon_mon"
-MON_TESTS+=" mon_osd"
-MON_TESTS+=" mon_config_key"
-MON_TESTS+=" mon_crush"
-MON_TESTS+=" mon_osd_create_destroy"
-MON_TESTS+=" mon_osd_pool"
-MON_TESTS+=" mon_osd_pool_quota"
-MON_TESTS+=" mon_pg"
-MON_TESTS+=" mon_osd_pool_set"
-MON_TESTS+=" mon_osd_tiered_pool_set"
-MON_TESTS+=" mon_osd_erasure_code"
-MON_TESTS+=" mon_osd_misc"
-MON_TESTS+=" mon_heap_profiler"
-MON_TESTS+=" mon_tell"
-MON_TESTS+=" mon_ping"
-MON_TESTS+=" mon_deprecated_commands"
-MON_TESTS+=" mon_caps"
-MON_TESTS+=" mon_cephdf_commands"
-MON_TESTS+=" mon_tell_help_command"
-MON_TESTS+=" mon_stdin_stdout"
-
-OSD_TESTS+=" osd_bench"
-OSD_TESTS+=" osd_negative_filestore_merge_threshold"
-OSD_TESTS+=" tiering_agent"
-OSD_TESTS+=" admin_heap_profiler"
-OSD_TESTS+=" osd_tell_help_command"
-OSD_TESTS+=" osd_compact"
-
-MDS_TESTS+=" mds_tell"
-MDS_TESTS+=" mon_mds"
-MDS_TESTS+=" mon_mds_metadata"
-MDS_TESTS+=" mds_tell_help_command"
-
-MGR_TESTS+=" mgr_tell"
-
-TESTS+=$MON_TESTS
-TESTS+=$OSD_TESTS
-TESTS+=$MDS_TESTS
-TESTS+=$MGR_TESTS
-
-#
-# "main" follows
-#
-
-function list_tests()
-{
- echo "AVAILABLE TESTS"
- for i in $TESTS; do
- echo " $i"
- done
-}
-
-function usage()
-{
- echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
-}
-
-tests_to_run=()
-
-sanity_check=true
-
-while [[ $# -gt 0 ]]; do
- opt=$1
-
- case "$opt" in
- "-l" )
- do_list=1
- ;;
- "--asok-does-not-need-root" )
- SUDO=""
- ;;
- "--no-sanity-check" )
- sanity_check=false
- ;;
- "--test-mon" )
- tests_to_run+="$MON_TESTS"
- ;;
- "--test-osd" )
- tests_to_run+="$OSD_TESTS"
- ;;
- "--test-mds" )
- tests_to_run+="$MDS_TESTS"
- ;;
- "--test-mgr" )
- tests_to_run+="$MGR_TESTS"
- ;;
- "-t" )
- shift
- if [[ -z "$1" ]]; then
- echo "missing argument to '-t'"
- usage ;
- exit 1
- fi
- tests_to_run+=" $1"
- ;;
- "-h" )
- usage ;
- exit 0
- ;;
- esac
- shift
-done
-
-if [[ $do_list -eq 1 ]]; then
- list_tests ;
- exit 0
-fi
-
-ceph osd pool create rbd 10
-
-if test -z "$tests_to_run" ; then
- tests_to_run="$TESTS"
-fi
-
-if $sanity_check ; then
- wait_no_osd_down
-fi
-for i in $tests_to_run; do
- if $sanity_check ; then
- check_no_osd_down
- fi
- set -x
- test_${i}
- set +x
-done
-if $sanity_check ; then
- check_no_osd_down
-fi
-
-set -x
-
-echo OK
diff --git a/src/ceph/qa/workunits/cephtool/test_daemon.sh b/src/ceph/qa/workunits/cephtool/test_daemon.sh
deleted file mode 100755
index 413f708..0000000
--- a/src/ceph/qa/workunits/cephtool/test_daemon.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash -x
-
-set -e
-
-expect_false()
-{
- set -x
- if "$@"; then return 1; else return 0; fi
-}
-
-echo note: assuming mon.a is on the current host
-
-# can set to 'sudo ./ceph' to execute tests from current dir for development
-CEPH=${CEPH:-'sudo ceph'}
-
-${CEPH} daemon mon.a version | grep version
-
-# get debug_ms setting and strip it, painfully for reuse
-old_ms=$(${CEPH} daemon mon.a config get debug_ms | \
- grep debug_ms | sed -e 's/.*: //' -e 's/["\}\\]//g')
-${CEPH} daemon mon.a config set debug_ms 13
-new_ms=$(${CEPH} daemon mon.a config get debug_ms | \
- grep debug_ms | sed -e 's/.*: //' -e 's/["\}\\]//g')
-[ "$new_ms" = "13/13" ]
-${CEPH} daemon mon.a config set debug_ms $old_ms
-new_ms=$(${CEPH} daemon mon.a config get debug_ms | \
- grep debug_ms | sed -e 's/.*: //' -e 's/["\}\\]//g')
-[ "$new_ms" = "$old_ms" ]
-
-# unregistered/non-existent command
-expect_false ${CEPH} daemon mon.a bogus_command_blah foo
-
-set +e
-OUTPUT=$(${CEPH} -c /not/a/ceph.conf daemon mon.a help 2>&1)
-# look for EINVAL
-if [ $? != 22 ] ; then exit 1; fi
-if ! echo "$OUTPUT" | grep -q '.*open.*/not/a/ceph.conf'; then
- echo "didn't find expected error in bad conf search"
- exit 1
-fi
-set -e
-
-echo OK
diff --git a/src/ceph/qa/workunits/cls/test_cls_hello.sh b/src/ceph/qa/workunits/cls/test_cls_hello.sh
deleted file mode 100755
index 0a2e096..0000000
--- a/src/ceph/qa/workunits/cls/test_cls_hello.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh -e
-
-ceph_test_cls_hello
-
-exit 0
diff --git a/src/ceph/qa/workunits/cls/test_cls_journal.sh b/src/ceph/qa/workunits/cls/test_cls_journal.sh
deleted file mode 100755
index 9aa7450..0000000
--- a/src/ceph/qa/workunits/cls/test_cls_journal.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh -e
-
-GTEST_FILTER=${CLS_JOURNAL_GTEST_FILTER:-*}
-ceph_test_cls_journal --gtest_filter=${GTEST_FILTER}
-
-exit 0
diff --git a/src/ceph/qa/workunits/cls/test_cls_lock.sh b/src/ceph/qa/workunits/cls/test_cls_lock.sh
deleted file mode 100755
index c145270..0000000
--- a/src/ceph/qa/workunits/cls/test_cls_lock.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh -e
-
-ceph_test_cls_lock
-
-exit 0
diff --git a/src/ceph/qa/workunits/cls/test_cls_numops.sh b/src/ceph/qa/workunits/cls/test_cls_numops.sh
deleted file mode 100755
index dcbafca..0000000
--- a/src/ceph/qa/workunits/cls/test_cls_numops.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh -e
-
-ceph_test_cls_numops
-
-exit 0
diff --git a/src/ceph/qa/workunits/cls/test_cls_rbd.sh b/src/ceph/qa/workunits/cls/test_cls_rbd.sh
deleted file mode 100755
index fd4bec0..0000000
--- a/src/ceph/qa/workunits/cls/test_cls_rbd.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh -e
-
-GTEST_FILTER=${CLS_RBD_GTEST_FILTER:-*}
-ceph_test_cls_rbd --gtest_filter=${GTEST_FILTER}
-
-exit 0
diff --git a/src/ceph/qa/workunits/cls/test_cls_refcount.sh b/src/ceph/qa/workunits/cls/test_cls_refcount.sh
deleted file mode 100755
index d722f5a..0000000
--- a/src/ceph/qa/workunits/cls/test_cls_refcount.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh -e
-
-ceph_test_cls_refcount
-
-exit 0
diff --git a/src/ceph/qa/workunits/cls/test_cls_rgw.sh b/src/ceph/qa/workunits/cls/test_cls_rgw.sh
deleted file mode 100755
index 257338a..0000000
--- a/src/ceph/qa/workunits/cls/test_cls_rgw.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh -e
-
-ceph_test_cls_rgw
-#ceph_test_cls_rgw_meta
-#ceph_test_cls_rgw_log
-#ceph_test_cls_rgw_opstate
-
-exit 0
diff --git a/src/ceph/qa/workunits/cls/test_cls_sdk.sh b/src/ceph/qa/workunits/cls/test_cls_sdk.sh
deleted file mode 100755
index f1ccdc3..0000000
--- a/src/ceph/qa/workunits/cls/test_cls_sdk.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh -e
-
-ceph_test_cls_sdk
-
-exit 0
diff --git a/src/ceph/qa/workunits/direct_io/.gitignore b/src/ceph/qa/workunits/direct_io/.gitignore
deleted file mode 100644
index 80f1fd1..0000000
--- a/src/ceph/qa/workunits/direct_io/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-/direct_io_test
-/test_sync_io
-/test_short_dio_read
diff --git a/src/ceph/qa/workunits/direct_io/Makefile b/src/ceph/qa/workunits/direct_io/Makefile
deleted file mode 100644
index 20fec0b..0000000
--- a/src/ceph/qa/workunits/direct_io/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-CFLAGS = -Wall -Wextra -D_GNU_SOURCE
-
-TARGETS = direct_io_test test_sync_io test_short_dio_read
-
-.c:
- $(CC) $(CFLAGS) $@.c -o $@
-
-all: $(TARGETS)
-
-clean:
- rm $(TARGETS)
diff --git a/src/ceph/qa/workunits/direct_io/big.sh b/src/ceph/qa/workunits/direct_io/big.sh
deleted file mode 100755
index 43bd6d7..0000000
--- a/src/ceph/qa/workunits/direct_io/big.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh -ex
-
-echo "test large (16MB) dio write"
-dd if=/dev/zero of=foo.big bs=16M count=1 oflag=direct
-
-echo OK
diff --git a/src/ceph/qa/workunits/direct_io/direct_io_test.c b/src/ceph/qa/workunits/direct_io/direct_io_test.c
deleted file mode 100644
index ccfbbb8..0000000
--- a/src/ceph/qa/workunits/direct_io/direct_io_test.c
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * Ceph - scalable distributed file system
- *
- * Copyright (C) 2011 New Dream Network
- *
- * This is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License version 2.1, as published by the Free Software
- * Foundation. See file COPYING.
- *
- */
-
-#include <errno.h>
-#include <inttypes.h>
-#include <fcntl.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <time.h>
-#include <unistd.h>
-
-/*
- * direct_io_test
- *
- * This test does some I/O using O_DIRECT.
- *
- * Semantics of O_DIRECT can be found at http://lwn.net/Articles/348739/
- *
- */
-
-static int g_num_pages = 100;
-
-static int g_duration = 10;
-
-struct chunk {
- uint64_t offset;
- uint64_t pad0;
- uint64_t pad1;
- uint64_t pad2;
- uint64_t pad3;
- uint64_t pad4;
- uint64_t pad5;
- uint64_t not_offset;
-} __attribute__((packed));
-
-static int page_size;
-
-static char temp_file[] = "direct_io_temp_file_XXXXXX";
-
-static int safe_write(int fd, const void *buf, signed int len)
-{
- const char *b = (const char*)buf;
- /* Handle EINTR and short writes */
- while (1) {
- int res = write(fd, b, len);
- if (res < 0) {
- int err = errno;
- if (err != EINTR) {
- return err;
- }
- }
- len -= res;
- b += res;
- if (len <= 0)
- return 0;
- }
-}
-
-static int do_read(int fd, char *buf, int buf_sz)
-{
- /* We assume no short reads or EINTR. It's not really clear how
- * those things interact with O_DIRECT. */
- int ret = read(fd, buf, buf_sz);
- if (ret < 0) {
- int err = errno;
- printf("do_read: error: %d (%s)\n", err, strerror(err));
- return err;
- }
- if (ret != buf_sz) {
- printf("do_read: short read\n");
- return -EIO;
- }
- return 0;
-}
-
-static int setup_temp_file(void)
-{
- int fd;
- int64_t num_chunks, i;
-
- if (page_size % sizeof(struct chunk)) {
- printf("setup_big_file: page_size doesn't divide evenly "
- "into data blocks.\n");
- return -EINVAL;
- }
-
- fd = mkstemp(temp_file);
- if (fd < 0) {
- int err = errno;
- printf("setup_big_file: mkostemps failed with error %d\n", err);
- return err;
- }
-
- num_chunks = g_num_pages * (page_size / sizeof(struct chunk));
- for (i = 0; i < num_chunks; ++i) {
- int ret;
- struct chunk c;
- memset(&c, 0, sizeof(c));
- c.offset = i * sizeof(struct chunk);
- c.pad0 = 0;
- c.pad1 = 1;
- c.pad2 = 2;
- c.pad3 = 3;
- c.pad4 = 4;
- c.pad5 = 5;
- c.not_offset = ~c.offset;
- ret = safe_write(fd, &c, sizeof(struct chunk));
- if (ret) {
- printf("setup_big_file: safe_write failed with "
- "error: %d\n", ret);
- TEMP_FAILURE_RETRY(close(fd));
- unlink(temp_file);
- return ret;
- }
- }
- TEMP_FAILURE_RETRY(close(fd));
- return 0;
-}
-
-static int verify_chunk(const struct chunk *c, uint64_t offset)
-{
- if (c->offset != offset) {
- printf("verify_chunk(%" PRId64 "): bad offset value (got: %"
- PRId64 ", expected: %" PRId64 "\n", offset, c->offset, offset);
- return EIO;
- }
- if (c->pad0 != 0) {
- printf("verify_chunk(%" PRId64 "): bad pad0 value\n", offset);
- return EIO;
- }
- if (c->pad1 != 1) {
- printf("verify_chunk(%" PRId64 "): bad pad1 value\n", offset);
- return EIO;
- }
- if (c->pad2 != 2) {
- printf("verify_chunk(%" PRId64 "): bad pad2 value\n", offset);
- return EIO;
- }
- if (c->pad3 != 3) {
- printf("verify_chunk(%" PRId64 "): bad pad3 value\n", offset);
- return EIO;
- }
- if (c->pad4 != 4) {
- printf("verify_chunk(%" PRId64 "): bad pad4 value\n", offset);
- return EIO;
- }
- if (c->pad5 != 5) {
- printf("verify_chunk(%" PRId64 "): bad pad5 value\n", offset);
- return EIO;
- }
- if (c->not_offset != ~offset) {
- printf("verify_chunk(%" PRId64 "): bad not_offset value\n",
- offset);
- return EIO;
- }
- return 0;
-}
-
-static int do_o_direct_reads(void)
-{
- int fd, ret;
- unsigned int i;
- void *buf = 0;
- time_t cur_time, end_time;
- ret = posix_memalign(&buf, page_size, page_size);
- if (ret) {
- printf("do_o_direct_reads: posix_memalign returned %d\n", ret);
- goto done;
- }
-
- fd = open(temp_file, O_RDONLY | O_DIRECT);
- if (fd < 0) {
- ret = errno;
- printf("do_o_direct_reads: error opening fd: %d\n", ret);
- goto free_buf;
- }
-
- // read the first chunk and see if it looks OK
- ret = do_read(fd, buf, page_size);
- if (ret)
- goto close_fd;
- ret = verify_chunk((struct chunk*)buf, 0);
- if (ret)
- goto close_fd;
-
- // read some random chunks and see how they look
- cur_time = time(NULL);
- end_time = cur_time + g_duration;
- i = 0;
- do {
- time_t next_time;
- uint64_t offset;
- int page;
- unsigned int seed;
-
- seed = i++;
- page = rand_r(&seed) % g_num_pages;
- offset = page;
- offset *= page_size;
- if (lseek64(fd, offset, SEEK_SET) == -1) {
- int err = errno;
- printf("lseek64(%" PRId64 ") failed: error %d (%s)\n",
- offset, err, strerror(err));
- goto close_fd;
- }
- ret = do_read(fd, buf, page_size);
- if (ret)
- goto close_fd;
- ret = verify_chunk((struct chunk*)buf, offset);
- if (ret)
- goto close_fd;
- next_time = time(NULL);
- if (next_time > cur_time) {
- printf(".");
- }
- cur_time = next_time;
- } while (time(NULL) < end_time);
-
- printf("\ndo_o_direct_reads: SUCCESS\n");
-close_fd:
- TEMP_FAILURE_RETRY(close(fd));
-free_buf:
- free(buf);
-done:
- return ret;
-}
-
-static void usage(char *argv0)
-{
- printf("%s: tests direct I/O\n", argv0);
- printf("-d <seconds>: sets duration to <seconds>\n");
- printf("-h: this help\n");
- printf("-p <pages>: sets number of pages to allocate\n");
-}
-
-static void parse_args(int argc, char *argv[])
-{
- int c;
- while ((c = getopt (argc, argv, "d:hp:")) != -1) {
- switch (c) {
- case 'd':
- g_duration = atoi(optarg);
- if (g_duration <= 0) {
- printf("tried to set invalid value of "
- "g_duration: %d\n", g_num_pages);
- exit(1);
- }
- break;
- case 'h':
- usage(argv[0]);
- exit(0);
- break;
- case 'p':
- g_num_pages = atoi(optarg);
- if (g_num_pages <= 0) {
- printf("tried to set invalid value of "
- "g_num_pages: %d\n", g_num_pages);
- exit(1);
- }
- break;
- case '?':
- usage(argv[0]);
- exit(1);
- break;
- default:
- usage(argv[0]);
- exit(1);
- break;
- }
- }
-}
-
-int main(int argc, char *argv[])
-{
- int ret;
-
- parse_args(argc, argv);
-
- setvbuf(stdout, NULL, _IONBF, 0);
-
- page_size = getpagesize();
-
- ret = setup_temp_file();
- if (ret) {
- printf("setup_temp_file failed with error %d\n", ret);
- goto done;
- }
-
- ret = do_o_direct_reads();
- if (ret) {
- printf("do_o_direct_reads failed with error %d\n", ret);
- goto unlink_temp_file;
- }
-
-unlink_temp_file:
- unlink(temp_file);
-done:
- return ret;
-}
diff --git a/src/ceph/qa/workunits/direct_io/misc.sh b/src/ceph/qa/workunits/direct_io/misc.sh
deleted file mode 100755
index 6de080d..0000000
--- a/src/ceph/qa/workunits/direct_io/misc.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh -ex
-
-# a few test cases from henry
-echo "test read from hole"
-dd if=/dev/zero of=dd3 bs=1 seek=1048576 count=0
-dd if=dd3 of=/tmp/ddout1 skip=8 bs=512 count=2 iflag=direct
-dd if=/dev/zero of=/tmp/dd3 bs=512 count=2
-cmp /tmp/dd3 /tmp/ddout1
-
-echo "other thing"
-dd if=/dev/urandom of=/tmp/dd10 bs=500 count=1
-dd if=/tmp/dd10 of=dd10 bs=512 seek=8388 count=1
-dd if=dd10 of=/tmp/dd10out bs=512 skip=8388 count=1 iflag=direct
-cmp /tmp/dd10 /tmp/dd10out
-
-echo OK
diff --git a/src/ceph/qa/workunits/direct_io/test_short_dio_read.c b/src/ceph/qa/workunits/direct_io/test_short_dio_read.c
deleted file mode 100644
index 5024855..0000000
--- a/src/ceph/qa/workunits/direct_io/test_short_dio_read.c
+++ /dev/null
@@ -1,57 +0,0 @@
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <errno.h>
-#include <string.h>
-#include <stdlib.h>
-
-int main()
-{
- char buf[409600];
- ssize_t r;
- int err;
- int fd = open("shortfile", O_WRONLY|O_CREAT, 0644);
-
- if (fd < 0) {
- err = errno;
- printf("error: open() failed with: %d (%s)\n", err, strerror(err));
- exit(err);
- }
-
- printf("writing first 3 bytes of 10k file\n");
- r = write(fd, "foo", 3);
- if (r == -1) {
- err = errno;
- printf("error: write() failed with: %d (%s)\n", err, strerror(err));
- close(fd);
- exit(err);
- }
- r = ftruncate(fd, 10000);
- if (r == -1) {
- err = errno;
- printf("error: ftruncate() failed with: %d (%s)\n", err, strerror(err));
- close(fd);
- exit(err);
- }
-
- fsync(fd);
- close(fd);
-
- printf("reading O_DIRECT\n");
- fd = open("shortfile", O_RDONLY|O_DIRECT);
- if (fd < 0) {
- err = errno;
- printf("error: open() failed with: %d (%s)\n", err, strerror(err));
- exit(err);
- }
-
- r = read(fd, buf, sizeof(buf));
- close(fd);
-
- printf("got %d\n", (int)r);
- if (r != 10000)
- return 1;
- return 0;
-}
diff --git a/src/ceph/qa/workunits/direct_io/test_sync_io.c b/src/ceph/qa/workunits/direct_io/test_sync_io.c
deleted file mode 100644
index f393fa6..0000000
--- a/src/ceph/qa/workunits/direct_io/test_sync_io.c
+++ /dev/null
@@ -1,250 +0,0 @@
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <inttypes.h>
-#include <linux/types.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/ioctl.h>
-#include <errno.h>
-
-//#include "../client/ioctl.h"
-
-#include <linux/ioctl.h>
-#define CEPH_IOCTL_MAGIC 0x97
-#define CEPH_IOC_SYNCIO _IO(CEPH_IOCTL_MAGIC, 5)
-
-void write_pattern()
-{
- printf("writing pattern\n");
-
- uint64_t i;
- int r;
-
- int fd = open("foo", O_CREAT|O_WRONLY, 0644);
- if (fd < 0) {
- r = errno;
- printf("write_pattern: error: open() failed with: %d (%s)\n", r, strerror(r));
- exit(r);
- }
- for (i=0; i<1048576 * sizeof(i); i += sizeof(i)) {
- r = write(fd, &i, sizeof(i));
- if (r == -1) {
- r = errno;
- printf("write_pattern: error: write() failed with: %d (%s)\n", r, strerror(r));
- break;
- }
- }
-
- close(fd);
-}
-
-int verify_pattern(char *buf, size_t len, uint64_t off)
-{
- size_t i;
-
- for (i = 0; i < len; i += sizeof(uint64_t)) {
- uint64_t expected = i + off;
- uint64_t actual = *(uint64_t*)(buf + i);
- if (expected != actual) {
- printf("error: offset %llu had %llu\n", (unsigned long long)expected,
- (unsigned long long)actual);
- exit(1);
- }
- }
- return 0;
-}
-
-void generate_pattern(void *buf, size_t len, uint64_t offset)
-{
- uint64_t *v = buf;
- size_t i;
-
- for (i=0; i<len / sizeof(v); i++)
- v[i] = i * sizeof(v) + offset;
- verify_pattern(buf, len, offset);
-}
-
-int read_file(int buf_align, uint64_t offset, int len, int direct) {
-
- printf("read_file buf_align %d offset %llu len %d\n", buf_align,
- (unsigned long long)offset, len);
- void *rawbuf;
- int r;
- int flags;
- int err = 0;
-
- if(direct)
- flags = O_RDONLY|O_DIRECT;
- else
- flags = O_RDONLY;
-
- int fd = open("foo", flags);
- if (fd < 0) {
- err = errno;
- printf("read_file: error: open() failed with: %d (%s)\n", err, strerror(err));
- exit(err);
- }
-
- if (!direct)
- ioctl(fd, CEPH_IOC_SYNCIO);
-
- if ((r = posix_memalign(&rawbuf, 4096, len + buf_align)) != 0) {
- printf("read_file: error: posix_memalign failed with %d", r);
- close(fd);
- exit (r);
- }
-
- void *buf = (char *)rawbuf + buf_align;
- memset(buf, 0, len);
- r = pread(fd, buf, len, offset);
- if (r == -1) {
- err = errno;
- printf("read_file: error: pread() failed with: %d (%s)\n", err, strerror(err));
- goto out;
- }
- r = verify_pattern(buf, len, offset);
-
-out:
- close(fd);
- free(rawbuf);
- return r;
-}
-
-int read_direct(int buf_align, uint64_t offset, int len)
-{
- printf("read_direct buf_align %d offset %llu len %d\n", buf_align,
- (unsigned long long)offset, len);
- return read_file(buf_align, offset, len, 1);
-}
-
-int read_sync(int buf_align, uint64_t offset, int len)
-{
- printf("read_sync buf_align %d offset %llu len %d\n", buf_align,
- (unsigned long long)offset, len);
- return read_file(buf_align, offset, len, 0);
-}
-
-int write_file(int buf_align, uint64_t offset, int len, int direct)
-{
- printf("write_file buf_align %d offset %llu len %d\n", buf_align,
- (unsigned long long)offset, len);
- void *rawbuf;
- int r;
- int err = 0;
- int flags;
- if (direct)
- flags = O_WRONLY|O_DIRECT|O_CREAT;
- else
- flags = O_WRONLY|O_CREAT;
-
- int fd = open("foo", flags, 0644);
- if (fd < 0) {
- int err = errno;
- printf("write_file: error: open() failed with: %d (%s)\n", err, strerror(err));
- exit(err);
- }
-
- if ((r = posix_memalign(&rawbuf, 4096, len + buf_align)) != 0) {
- printf("write_file: error: posix_memalign failed with %d", r);
- err = r;
- goto out_close;
- }
-
- if (!direct)
- ioctl(fd, CEPH_IOC_SYNCIO);
-
- void *buf = (char *)rawbuf + buf_align;
-
- generate_pattern(buf, len, offset);
-
- r = pwrite(fd, buf, len, offset);
- close(fd);
-
- fd = open("foo", O_RDONLY);
- if (fd < 0) {
- err = errno;
- printf("write_file: error: open() failed with: %d (%s)\n", err, strerror(err));
- free(rawbuf);
- goto out_unlink;
- }
- void *buf2 = malloc(len);
- if (!buf2) {
- err = -ENOMEM;
- printf("write_file: error: malloc failed\n");
- goto out_free;
- }
-
- memset(buf2, 0, len);
- r = pread(fd, buf2, len, offset);
- if (r == -1) {
- err = errno;
- printf("write_file: error: pread() failed with: %d (%s)\n", err, strerror(err));
- goto out_free_buf;
- }
- r = verify_pattern(buf2, len, offset);
-
-out_free_buf:
- free(buf2);
-out_free:
- free(rawbuf);
-out_close:
- close(fd);
-out_unlink:
- unlink("foo");
- if (err)
- exit(err);
- return r;
-}
-
-int write_direct(int buf_align, uint64_t offset, int len)
-{
- printf("write_direct buf_align %d offset %llu len %d\n", buf_align,
- (unsigned long long)offset, len);
- return write_file (buf_align, offset, len, 1);
-}
-
-int write_sync(int buf_align, uint64_t offset, int len)
-{
- printf("write_sync buf_align %d offset %llu len %d\n", buf_align,
- (unsigned long long)offset, len);
- return write_file (buf_align, offset, len, 0);
-}
-
-int main(int argc, char **argv)
-{
- uint64_t i, j, k;
- int read = 1;
- int write = 1;
-
- if (argc >= 2 && strcmp(argv[1], "read") == 0)
- write = 0;
- if (argc >= 2 && strcmp(argv[1], "write") == 0)
- read = 0;
-
- if (read) {
- write_pattern();
-
- for (i = 0; i < 4096; i += 512)
- for (j = 4*1024*1024 - 4096; j < 4*1024*1024 + 4096; j += 512)
- for (k = 1024; k <= 16384; k *= 2) {
- read_direct(i, j, k);
- read_sync(i, j, k);
- }
-
- }
- unlink("foo");
- if (write) {
- for (i = 0; i < 4096; i += 512)
- for (j = 4*1024*1024 - 4096 + 512; j < 4*1024*1024 + 4096; j += 512)
- for (k = 1024; k <= 16384; k *= 2) {
- write_direct(i, j, k);
- write_sync(i, j, k);
- }
- }
-
-
- return 0;
-}
diff --git a/src/ceph/qa/workunits/erasure-code/.gitignore b/src/ceph/qa/workunits/erasure-code/.gitignore
deleted file mode 100644
index 7e563b8..0000000
--- a/src/ceph/qa/workunits/erasure-code/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-*.log
-*.trs
diff --git a/src/ceph/qa/workunits/erasure-code/bench.html b/src/ceph/qa/workunits/erasure-code/bench.html
deleted file mode 100644
index 3b4b6c7..0000000
--- a/src/ceph/qa/workunits/erasure-code/bench.html
+++ /dev/null
@@ -1,34 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd" >
-<html>
- <head>
- <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
- <title>Erasure Code Plugins Benchmarks</title>
- <link href="examples.css" rel="stylesheet" type="text/css">
- <script language="javascript" type="text/javascript" src="jquery.js"></script>
- <script language="javascript" type="text/javascript" src="jquery.flot.js"></script>
- <script language="javascript" type="text/javascript" src="jquery.flot.categories.js"></script>
- <script language="javascript" type="text/javascript" src="bench.js"></script>
- <script language="javascript" type="text/javascript" src="plot.js"></script>
- </head>
- <body>
-
- <div id="header">
- <h2>Erasure Code Plugins Benchmarks</h2>
- </div>
-
- <div id="content">
-
- <div class="demo-container">
- <div id="encode" class="demo-placeholder"></div>
- </div>
- <p>encode: Y = GB/s, X = K/M</p>
-
- <div class="demo-container">
- <div id="decode" class="demo-placeholder"></div>
- </div>
- <p>decode: Y = GB/s, X = K/M/erasures</p>
-
- </div>
-
- </body>
-</html>
diff --git a/src/ceph/qa/workunits/erasure-code/bench.sh b/src/ceph/qa/workunits/erasure-code/bench.sh
deleted file mode 100755
index e2bec8e..0000000
--- a/src/ceph/qa/workunits/erasure-code/bench.sh
+++ /dev/null
@@ -1,188 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 Red Hat <contact@redhat.com>
-# Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
-#
-# Author: Loic Dachary <loic@dachary.org>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU Library Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Library Public License for more details.
-#
-# Test that it works from sources with:
-#
-# CEPH_ERASURE_CODE_BENCHMARK=src/ceph_erasure_code_benchmark \
-# PLUGIN_DIRECTORY=src/.libs \
-# qa/workunits/erasure-code/bench.sh fplot jerasure |
-# tee qa/workunits/erasure-code/bench.js
-#
-# This should start immediately and display:
-#
-# ...
-# [ '2/1', .48035538612887358583 ],
-# [ '3/2', .21648470405675016626 ],
-# etc.
-#
-# and complete within a few seconds. The result can then be displayed with:
-#
-# firefox qa/workunits/erasure-code/bench.html
-#
-# Once it is confirmed to work, it can be run with a more significant
-# volume of data so that the measures are more reliable:
-#
-# TOTAL_SIZE=$((4 * 1024 * 1024 * 1024)) \
-# CEPH_ERASURE_CODE_BENCHMARK=src/ceph_erasure_code_benchmark \
-# PLUGIN_DIRECTORY=src/.libs \
-# qa/workunits/erasure-code/bench.sh fplot jerasure |
-# tee qa/workunits/erasure-code/bench.js
-#
-set -e
-
-export PATH=/sbin:$PATH
-
-: ${VERBOSE:=false}
-: ${CEPH_ERASURE_CODE_BENCHMARK:=ceph_erasure_code_benchmark}
-: ${PLUGIN_DIRECTORY:=/usr/lib/ceph/erasure-code}
-: ${PLUGINS:=isa jerasure}
-: ${TECHNIQUES:=vandermonde cauchy}
-: ${TOTAL_SIZE:=$((1024 * 1024))}
-: ${SIZE:=4096}
-: ${PARAMETERS:=--parameter jerasure-per-chunk-alignment=true}
-
-function bench_header() {
- echo -e "seconds\tKB\tplugin\tk\tm\twork.\titer.\tsize\teras.\tcommand."
-}
-
-function bench() {
- local plugin=$1
- shift
- local k=$1
- shift
- local m=$1
- shift
- local workload=$1
- shift
- local iterations=$1
- shift
- local size=$1
- shift
- local erasures=$1
- shift
- command=$(echo $CEPH_ERASURE_CODE_BENCHMARK \
- --plugin $plugin \
- --workload $workload \
- --iterations $iterations \
- --size $size \
- --erasures $erasures \
- --parameter k=$k \
- --parameter m=$m \
- --erasure-code-dir $PLUGIN_DIRECTORY)
- result=$($command "$@")
- echo -e "$result\t$plugin\t$k\t$m\t$workload\t$iterations\t$size\t$erasures\t$command ""$@"
-}
-
-function packetsize() {
- local k=$1
- local w=$2
- local vector_wordsize=$3
- local size=$4
-
- local p=$(( ($size / $k / $w / $vector_wordsize ) * $vector_wordsize))
- if [ $p -gt 3100 ] ; then
- p=3100
- fi
- echo $p
-}
-
-function bench_run() {
- local plugin=jerasure
- local w=8
- local VECTOR_WORDSIZE=16
- local ks="2 3 4 6 10"
- declare -A k2ms
- k2ms[2]="1"
- k2ms[3]="2"
- k2ms[4]="2 3"
- k2ms[6]="2 3 4"
- k2ms[10]="3 4"
- for technique in ${TECHNIQUES} ; do
- for plugin in ${PLUGINS} ; do
- eval technique_parameter=\$${plugin}2technique_${technique}
- echo "serie encode_${technique}_${plugin}"
- for k in $ks ; do
- for m in ${k2ms[$k]} ; do
- bench $plugin $k $m encode $(($TOTAL_SIZE / $SIZE)) $SIZE 0 \
- --parameter packetsize=$(packetsize $k $w $VECTOR_WORDSIZE $SIZE) \
- ${PARAMETERS} \
- --parameter technique=$technique_parameter
-
- done
- done
- done
- done
- for technique in ${TECHNIQUES} ; do
- for plugin in ${PLUGINS} ; do
- eval technique_parameter=\$${plugin}2technique_${technique}
- echo "serie decode_${technique}_${plugin}"
- for k in $ks ; do
- for m in ${k2ms[$k]} ; do
- echo
- for erasures in $(seq 1 $m) ; do
- bench $plugin $k $m decode $(($TOTAL_SIZE / $SIZE)) $SIZE $erasures \
- --parameter packetsize=$(packetsize $k $w $VECTOR_WORDSIZE $SIZE) \
- ${PARAMETERS} \
- --parameter technique=$technique_parameter
- done
- done
- done
- done
- done
-}
-
-function fplot() {
- local serie
- bench_run | while read seconds total plugin k m workload iteration size erasures rest ; do
- if [ -z $seconds ] ; then
- echo null,
- elif [ $seconds = serie ] ; then
- if [ "$serie" ] ; then
- echo '];'
- fi
- local serie=`echo $total | sed 's/cauchy_\([0-9]\)/cauchy_good_\1/g'`
- echo "var $serie = ["
- else
- local x
- if [ $workload = encode ] ; then
- x=$k/$m
- else
- x=$k/$m/$erasures
- fi
- echo "[ '$x', " $(echo "( $total / 1024 / 1024 ) / $seconds" | bc -ql) " ], "
- fi
- done
- echo '];'
-}
-
-function main() {
- bench_header
- bench_run
-}
-
-if [ "$1" = fplot ] ; then
- "$@"
-else
- main
-fi
-# Local Variables:
-# compile-command: "\
-# CEPH_ERASURE_CODE_BENCHMARK=../../../src/ceph_erasure_code_benchmark \
-# PLUGIN_DIRECTORY=../../../src/.libs \
-# ./bench.sh
-# "
-# End:
diff --git a/src/ceph/qa/workunits/erasure-code/encode-decode-non-regression.sh b/src/ceph/qa/workunits/erasure-code/encode-decode-non-regression.sh
deleted file mode 100755
index 2a65d59..0000000
--- a/src/ceph/qa/workunits/erasure-code/encode-decode-non-regression.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash -ex
-#
-# Copyright (C) 2014 Red Hat <contact@redhat.com>
-#
-# Author: Loic Dachary <loic@dachary.org>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU Library Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Library Public License for more details.
-#
-
-: ${CORPUS:=https://github.com/ceph/ceph-erasure-code-corpus.git}
-: ${DIRECTORY:=$CEPH_ROOT/ceph-erasure-code-corpus}
-
-# when running from sources, the current directory must have precedence
-export PATH=:$PATH
-
-if ! test -d $DIRECTORY ; then
- git clone $CORPUS $DIRECTORY
-fi
-
-my_version=v$(ceph --version | cut -f3 -d ' ')
-
-all_versions=$((ls -d $DIRECTORY/v* ; echo $DIRECTORY/$my_version ) | sort)
-
-for version in $all_versions ; do
- if test -d $version ; then
- $version/non-regression.sh
- fi
- if test $version = $DIRECTORY/$my_version ; then
- break
- fi
-done
diff --git a/src/ceph/qa/workunits/erasure-code/examples.css b/src/ceph/qa/workunits/erasure-code/examples.css
deleted file mode 100644
index ee47247..0000000
--- a/src/ceph/qa/workunits/erasure-code/examples.css
+++ /dev/null
@@ -1,97 +0,0 @@
-* { padding: 0; margin: 0; vertical-align: top; }
-
-body {
- background: url(background.png) repeat-x;
- font: 18px/1.5em "proxima-nova", Helvetica, Arial, sans-serif;
-}
-
-a { color: #069; }
-a:hover { color: #28b; }
-
-h2 {
- margin-top: 15px;
- font: normal 32px "omnes-pro", Helvetica, Arial, sans-serif;
-}
-
-h3 {
- margin-left: 30px;
- font: normal 26px "omnes-pro", Helvetica, Arial, sans-serif;
- color: #666;
-}
-
-p {
- margin-top: 10px;
-}
-
-button {
- font-size: 18px;
- padding: 1px 7px;
-}
-
-input {
- font-size: 18px;
-}
-
-input[type=checkbox] {
- margin: 7px;
-}
-
-#header {
- position: relative;
- width: 900px;
- margin: auto;
-}
-
-#header h2 {
- margin-left: 10px;
- vertical-align: middle;
- font-size: 42px;
- font-weight: bold;
- text-decoration: none;
- color: #000;
-}
-
-#content {
- width: 880px;
- margin: 0 auto;
- padding: 10px;
-}
-
-#footer {
- margin-top: 25px;
- margin-bottom: 10px;
- text-align: center;
- font-size: 12px;
- color: #999;
-}
-
-.demo-container {
- box-sizing: border-box;
- width: 850px;
- height: 450px;
- padding: 20px 15px 15px 15px;
- margin: 15px auto 30px auto;
- border: 1px solid #ddd;
- background: #fff;
- background: linear-gradient(#f6f6f6 0, #fff 50px);
- background: -o-linear-gradient(#f6f6f6 0, #fff 50px);
- background: -ms-linear-gradient(#f6f6f6 0, #fff 50px);
- background: -moz-linear-gradient(#f6f6f6 0, #fff 50px);
- background: -webkit-linear-gradient(#f6f6f6 0, #fff 50px);
- box-shadow: 0 3px 10px rgba(0,0,0,0.15);
- -o-box-shadow: 0 3px 10px rgba(0,0,0,0.1);
- -ms-box-shadow: 0 3px 10px rgba(0,0,0,0.1);
- -moz-box-shadow: 0 3px 10px rgba(0,0,0,0.1);
- -webkit-box-shadow: 0 3px 10px rgba(0,0,0,0.1);
-}
-
-.demo-placeholder {
- width: 100%;
- height: 100%;
- font-size: 14px;
- line-height: 1.2em;
-}
-
-.legend table {
- border-spacing: 5px;
-} \ No newline at end of file
diff --git a/src/ceph/qa/workunits/erasure-code/jquery.flot.categories.js b/src/ceph/qa/workunits/erasure-code/jquery.flot.categories.js
deleted file mode 100644
index 2f9b257..0000000
--- a/src/ceph/qa/workunits/erasure-code/jquery.flot.categories.js
+++ /dev/null
@@ -1,190 +0,0 @@
-/* Flot plugin for plotting textual data or categories.
-
-Copyright (c) 2007-2014 IOLA and Ole Laursen.
-Licensed under the MIT license.
-
-Consider a dataset like [["February", 34], ["March", 20], ...]. This plugin
-allows you to plot such a dataset directly.
-
-To enable it, you must specify mode: "categories" on the axis with the textual
-labels, e.g.
-
- $.plot("#placeholder", data, { xaxis: { mode: "categories" } });
-
-By default, the labels are ordered as they are met in the data series. If you
-need a different ordering, you can specify "categories" on the axis options
-and list the categories there:
-
- xaxis: {
- mode: "categories",
- categories: ["February", "March", "April"]
- }
-
-If you need to customize the distances between the categories, you can specify
-"categories" as an object mapping labels to values
-
- xaxis: {
- mode: "categories",
- categories: { "February": 1, "March": 3, "April": 4 }
- }
-
-If you don't specify all categories, the remaining categories will be numbered
-from the max value plus 1 (with a spacing of 1 between each).
-
-Internally, the plugin works by transforming the input data through an auto-
-generated mapping where the first category becomes 0, the second 1, etc.
-Hence, a point like ["February", 34] becomes [0, 34] internally in Flot (this
-is visible in hover and click events that return numbers rather than the
-category labels). The plugin also overrides the tick generator to spit out the
-categories as ticks instead of the values.
-
-If you need to map a value back to its label, the mapping is always accessible
-as "categories" on the axis object, e.g. plot.getAxes().xaxis.categories.
-
-*/
-
-(function ($) {
- var options = {
- xaxis: {
- categories: null
- },
- yaxis: {
- categories: null
- }
- };
-
- function processRawData(plot, series, data, datapoints) {
- // if categories are enabled, we need to disable
- // auto-transformation to numbers so the strings are intact
- // for later processing
-
- var xCategories = series.xaxis.options.mode == "categories",
- yCategories = series.yaxis.options.mode == "categories";
-
- if (!(xCategories || yCategories))
- return;
-
- var format = datapoints.format;
-
- if (!format) {
- // FIXME: auto-detection should really not be defined here
- var s = series;
- format = [];
- format.push({ x: true, number: true, required: true });
- format.push({ y: true, number: true, required: true });
-
- if (s.bars.show || (s.lines.show && s.lines.fill)) {
- var autoscale = !!((s.bars.show && s.bars.zero) || (s.lines.show && s.lines.zero));
- format.push({ y: true, number: true, required: false, defaultValue: 0, autoscale: autoscale });
- if (s.bars.horizontal) {
- delete format[format.length - 1].y;
- format[format.length - 1].x = true;
- }
- }
-
- datapoints.format = format;
- }
-
- for (var m = 0; m < format.length; ++m) {
- if (format[m].x && xCategories)
- format[m].number = false;
-
- if (format[m].y && yCategories)
- format[m].number = false;
- }
- }
-
- function getNextIndex(categories) {
- var index = -1;
-
- for (var v in categories)
- if (categories[v] > index)
- index = categories[v];
-
- return index + 1;
- }
-
- function categoriesTickGenerator(axis) {
- var res = [];
- for (var label in axis.categories) {
- var v = axis.categories[label];
- if (v >= axis.min && v <= axis.max)
- res.push([v, label]);
- }
-
- res.sort(function (a, b) { return a[0] - b[0]; });
-
- return res;
- }
-
- function setupCategoriesForAxis(series, axis, datapoints) {
- if (series[axis].options.mode != "categories")
- return;
-
- if (!series[axis].categories) {
- // parse options
- var c = {}, o = series[axis].options.categories || {};
- if ($.isArray(o)) {
- for (var i = 0; i < o.length; ++i)
- c[o[i]] = i;
- }
- else {
- for (var v in o)
- c[v] = o[v];
- }
-
- series[axis].categories = c;
- }
-
- // fix ticks
- if (!series[axis].options.ticks)
- series[axis].options.ticks = categoriesTickGenerator;
-
- transformPointsOnAxis(datapoints, axis, series[axis].categories);
- }
-
- function transformPointsOnAxis(datapoints, axis, categories) {
- // go through the points, transforming them
- var points = datapoints.points,
- ps = datapoints.pointsize,
- format = datapoints.format,
- formatColumn = axis.charAt(0),
- index = getNextIndex(categories);
-
- for (var i = 0; i < points.length; i += ps) {
- if (points[i] == null)
- continue;
-
- for (var m = 0; m < ps; ++m) {
- var val = points[i + m];
-
- if (val == null || !format[m][formatColumn])
- continue;
-
- if (!(val in categories)) {
- categories[val] = index;
- ++index;
- }
-
- points[i + m] = categories[val];
- }
- }
- }
-
- function processDatapoints(plot, series, datapoints) {
- setupCategoriesForAxis(series, "xaxis", datapoints);
- setupCategoriesForAxis(series, "yaxis", datapoints);
- }
-
- function init(plot) {
- plot.hooks.processRawData.push(processRawData);
- plot.hooks.processDatapoints.push(processDatapoints);
- }
-
- $.plot.plugins.push({
- init: init,
- options: options,
- name: 'categories',
- version: '1.0'
- });
-})(jQuery);
diff --git a/src/ceph/qa/workunits/erasure-code/jquery.flot.js b/src/ceph/qa/workunits/erasure-code/jquery.flot.js
deleted file mode 100644
index 39f3e4c..0000000
--- a/src/ceph/qa/workunits/erasure-code/jquery.flot.js
+++ /dev/null
@@ -1,3168 +0,0 @@
-/* Javascript plotting library for jQuery, version 0.8.3.
-
-Copyright (c) 2007-2014 IOLA and Ole Laursen.
-Licensed under the MIT license.
-
-*/
-
-// first an inline dependency, jquery.colorhelpers.js, we inline it here
-// for convenience
-
-/* Plugin for jQuery for working with colors.
- *
- * Version 1.1.
- *
- * Inspiration from jQuery color animation plugin by John Resig.
- *
- * Released under the MIT license by Ole Laursen, October 2009.
- *
- * Examples:
- *
- * $.color.parse("#fff").scale('rgb', 0.25).add('a', -0.5).toString()
- * var c = $.color.extract($("#mydiv"), 'background-color');
- * console.log(c.r, c.g, c.b, c.a);
- * $.color.make(100, 50, 25, 0.4).toString() // returns "rgba(100,50,25,0.4)"
- *
- * Note that .scale() and .add() return the same modified object
- * instead of making a new one.
- *
- * V. 1.1: Fix error handling so e.g. parsing an empty string does
- * produce a color rather than just crashing.
- */
-(function($){$.color={};$.color.make=function(r,g,b,a){var o={};o.r=r||0;o.g=g||0;o.b=b||0;o.a=a!=null?a:1;o.add=function(c,d){for(var i=0;i<c.length;++i)o[c.charAt(i)]+=d;return o.normalize()};o.scale=function(c,f){for(var i=0;i<c.length;++i)o[c.charAt(i)]*=f;return o.normalize()};o.toString=function(){if(o.a>=1){return"rgb("+[o.r,o.g,o.b].join(",")+")"}else{return"rgba("+[o.r,o.g,o.b,o.a].join(",")+")"}};o.normalize=function(){function clamp(min,value,max){return value<min?min:value>max?max:value}o.r=clamp(0,parseInt(o.r),255);o.g=clamp(0,parseInt(o.g),255);o.b=clamp(0,parseInt(o.b),255);o.a=clamp(0,o.a,1);return o};o.clone=function(){return $.color.make(o.r,o.b,o.g,o.a)};return o.normalize()};$.color.extract=function(elem,css){var c;do{c=elem.css(css).toLowerCase();if(c!=""&&c!="transparent")break;elem=elem.parent()}while(elem.length&&!$.nodeName(elem.get(0),"body"));if(c=="rgba(0, 0, 0, 0)")c="transparent";return $.color.parse(c)};$.color.parse=function(str){var res,m=$.color.make;if(res=/rgb\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*\)/.exec(str))return m(parseInt(res[1],10),parseInt(res[2],10),parseInt(res[3],10));if(res=/rgba\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(str))return m(parseInt(res[1],10),parseInt(res[2],10),parseInt(res[3],10),parseFloat(res[4]));if(res=/rgb\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*\)/.exec(str))return m(parseFloat(res[1])*2.55,parseFloat(res[2])*2.55,parseFloat(res[3])*2.55);if(res=/rgba\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(str))return m(parseFloat(res[1])*2.55,parseFloat(res[2])*2.55,parseFloat(res[3])*2.55,parseFloat(res[4]));if(res=/#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})/.exec(str))return m(parseInt(res[1],16),parseInt(res[2],16),parseInt(res[3],16));if(res=/#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])/.exec(str))return m(parseInt(res[1]+res[1],16),parseInt(res[2]+res[2],16),parseInt(res[3]+res[3],16));var name=$.trim(str).toLowerCase();if(name=="transparent")return m(255,255,255,0);else{res=lookupColors[name]||[0,0,0];return m(res[0],res[1],res[2])}};var lookupColors={aqua:[0,255,255],azure:[240,255,255],beige:[245,245,220],black:[0,0,0],blue:[0,0,255],brown:[165,42,42],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgrey:[169,169,169],darkgreen:[0,100,0],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkviolet:[148,0,211],fuchsia:[255,0,255],gold:[255,215,0],green:[0,128,0],indigo:[75,0,130],khaki:[240,230,140],lightblue:[173,216,230],lightcyan:[224,255,255],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightyellow:[255,255,224],lime:[0,255,0],magenta:[255,0,255],maroon:[128,0,0],navy:[0,0,128],olive:[128,128,0],orange:[255,165,0],pink:[255,192,203],purple:[128,0,128],violet:[128,0,128],red:[255,0,0],silver:[192,192,192],white:[255,255,255],yellow:[255,255,0]}})(jQuery);
-
-// the actual Flot code
-(function($) {
-
- // Cache the prototype hasOwnProperty for faster access
-
- var hasOwnProperty = Object.prototype.hasOwnProperty;
-
- // A shim to provide 'detach' to jQuery versions prior to 1.4. Using a DOM
- // operation produces the same effect as detach, i.e. removing the element
- // without touching its jQuery data.
-
- // Do not merge this into Flot 0.9, since it requires jQuery 1.4.4+.
-
- if (!$.fn.detach) {
- $.fn.detach = function() {
- return this.each(function() {
- if (this.parentNode) {
- this.parentNode.removeChild( this );
- }
- });
- };
- }
-
- ///////////////////////////////////////////////////////////////////////////
- // The Canvas object is a wrapper around an HTML5 <canvas> tag.
- //
- // @constructor
- // @param {string} cls List of classes to apply to the canvas.
- // @param {element} container Element onto which to append the canvas.
- //
- // Requiring a container is a little iffy, but unfortunately canvas
- // operations don't work unless the canvas is attached to the DOM.
-
- function Canvas(cls, container) {
-
- var element = container.children("." + cls)[0];
-
- if (element == null) {
-
- element = document.createElement("canvas");
- element.className = cls;
-
- $(element).css({ direction: "ltr", position: "absolute", left: 0, top: 0 })
- .appendTo(container);
-
- // If HTML5 Canvas isn't available, fall back to [Ex|Flash]canvas
-
- if (!element.getContext) {
- if (window.G_vmlCanvasManager) {
- element = window.G_vmlCanvasManager.initElement(element);
- } else {
- throw new Error("Canvas is not available. If you're using IE with a fall-back such as Excanvas, then there's either a mistake in your conditional include, or the page has no DOCTYPE and is rendering in Quirks Mode.");
- }
- }
- }
-
- this.element = element;
-
- var context = this.context = element.getContext("2d");
-
- // Determine the screen's ratio of physical to device-independent
- // pixels. This is the ratio between the canvas width that the browser
- // advertises and the number of pixels actually present in that space.
-
- // The iPhone 4, for example, has a device-independent width of 320px,
- // but its screen is actually 640px wide. It therefore has a pixel
- // ratio of 2, while most normal devices have a ratio of 1.
-
- var devicePixelRatio = window.devicePixelRatio || 1,
- backingStoreRatio =
- context.webkitBackingStorePixelRatio ||
- context.mozBackingStorePixelRatio ||
- context.msBackingStorePixelRatio ||
- context.oBackingStorePixelRatio ||
- context.backingStorePixelRatio || 1;
-
- this.pixelRatio = devicePixelRatio / backingStoreRatio;
-
- // Size the canvas to match the internal dimensions of its container
-
- this.resize(container.width(), container.height());
-
- // Collection of HTML div layers for text overlaid onto the canvas
-
- this.textContainer = null;
- this.text = {};
-
- // Cache of text fragments and metrics, so we can avoid expensively
- // re-calculating them when the plot is re-rendered in a loop.
-
- this._textCache = {};
- }
-
- // Resizes the canvas to the given dimensions.
- //
- // @param {number} width New width of the canvas, in pixels.
- // @param {number} width New height of the canvas, in pixels.
-
- Canvas.prototype.resize = function(width, height) {
-
- if (width <= 0 || height <= 0) {
- throw new Error("Invalid dimensions for plot, width = " + width + ", height = " + height);
- }
-
- var element = this.element,
- context = this.context,
- pixelRatio = this.pixelRatio;
-
- // Resize the canvas, increasing its density based on the display's
- // pixel ratio; basically giving it more pixels without increasing the
- // size of its element, to take advantage of the fact that retina
- // displays have that many more pixels in the same advertised space.
-
- // Resizing should reset the state (excanvas seems to be buggy though)
-
- if (this.width != width) {
- element.width = width * pixelRatio;
- element.style.width = width + "px";
- this.width = width;
- }
-
- if (this.height != height) {
- element.height = height * pixelRatio;
- element.style.height = height + "px";
- this.height = height;
- }
-
- // Save the context, so we can reset in case we get replotted. The
- // restore ensure that we're really back at the initial state, and
- // should be safe even if we haven't saved the initial state yet.
-
- context.restore();
- context.save();
-
- // Scale the coordinate space to match the display density; so even though we
- // may have twice as many pixels, we still want lines and other drawing to
- // appear at the same size; the extra pixels will just make them crisper.
-
- context.scale(pixelRatio, pixelRatio);
- };
-
- // Clears the entire canvas area, not including any overlaid HTML text
-
- Canvas.prototype.clear = function() {
- this.context.clearRect(0, 0, this.width, this.height);
- };
-
- // Finishes rendering the canvas, including managing the text overlay.
-
- Canvas.prototype.render = function() {
-
- var cache = this._textCache;
-
- // For each text layer, add elements marked as active that haven't
- // already been rendered, and remove those that are no longer active.
-
- for (var layerKey in cache) {
- if (hasOwnProperty.call(cache, layerKey)) {
-
- var layer = this.getTextLayer(layerKey),
- layerCache = cache[layerKey];
-
- layer.hide();
-
- for (var styleKey in layerCache) {
- if (hasOwnProperty.call(layerCache, styleKey)) {
- var styleCache = layerCache[styleKey];
- for (var key in styleCache) {
- if (hasOwnProperty.call(styleCache, key)) {
-
- var positions = styleCache[key].positions;
-
- for (var i = 0, position; position = positions[i]; i++) {
- if (position.active) {
- if (!position.rendered) {
- layer.append(position.element);
- position.rendered = true;
- }
- } else {
- positions.splice(i--, 1);
- if (position.rendered) {
- position.element.detach();
- }
- }
- }
-
- if (positions.length == 0) {
- delete styleCache[key];
- }
- }
- }
- }
- }
-
- layer.show();
- }
- }
- };
-
- // Creates (if necessary) and returns the text overlay container.
- //
- // @param {string} classes String of space-separated CSS classes used to
- // uniquely identify the text layer.
- // @return {object} The jQuery-wrapped text-layer div.
-
- Canvas.prototype.getTextLayer = function(classes) {
-
- var layer = this.text[classes];
-
- // Create the text layer if it doesn't exist
-
- if (layer == null) {
-
- // Create the text layer container, if it doesn't exist
-
- if (this.textContainer == null) {
- this.textContainer = $("<div class='flot-text'></div>")
- .css({
- position: "absolute",
- top: 0,
- left: 0,
- bottom: 0,
- right: 0,
- 'font-size': "smaller",
- color: "#545454"
- })
- .insertAfter(this.element);
- }
-
- layer = this.text[classes] = $("<div></div>")
- .addClass(classes)
- .css({
- position: "absolute",
- top: 0,
- left: 0,
- bottom: 0,
- right: 0
- })
- .appendTo(this.textContainer);
- }
-
- return layer;
- };
-
- // Creates (if necessary) and returns a text info object.
- //
- // The object looks like this:
- //
- // {
- // width: Width of the text's wrapper div.
- // height: Height of the text's wrapper div.
- // element: The jQuery-wrapped HTML div containing the text.
- // positions: Array of positions at which this text is drawn.
- // }
- //
- // The positions array contains objects that look like this:
- //
- // {
- // active: Flag indicating whether the text should be visible.
- // rendered: Flag indicating whether the text is currently visible.
- // element: The jQuery-wrapped HTML div containing the text.
- // x: X coordinate at which to draw the text.
- // y: Y coordinate at which to draw the text.
- // }
- //
- // Each position after the first receives a clone of the original element.
- //
- // The idea is that that the width, height, and general 'identity' of the
- // text is constant no matter where it is placed; the placements are a
- // secondary property.
- //
- // Canvas maintains a cache of recently-used text info objects; getTextInfo
- // either returns the cached element or creates a new entry.
- //
- // @param {string} layer A string of space-separated CSS classes uniquely
- // identifying the layer containing this text.
- // @param {string} text Text string to retrieve info for.
- // @param {(string|object)=} font Either a string of space-separated CSS
- // classes or a font-spec object, defining the text's font and style.
- // @param {number=} angle Angle at which to rotate the text, in degrees.
- // Angle is currently unused, it will be implemented in the future.
- // @param {number=} width Maximum width of the text before it wraps.
- // @return {object} a text info object.
-
- Canvas.prototype.getTextInfo = function(layer, text, font, angle, width) {
-
- var textStyle, layerCache, styleCache, info;
-
- // Cast the value to a string, in case we were given a number or such
-
- text = "" + text;
-
- // If the font is a font-spec object, generate a CSS font definition
-
- if (typeof font === "object") {
- textStyle = font.style + " " + font.variant + " " + font.weight + " " + font.size + "px/" + font.lineHeight + "px " + font.family;
- } else {
- textStyle = font;
- }
-
- // Retrieve (or create) the cache for the text's layer and styles
-
- layerCache = this._textCache[layer];
-
- if (layerCache == null) {
- layerCache = this._textCache[layer] = {};
- }
-
- styleCache = layerCache[textStyle];
-
- if (styleCache == null) {
- styleCache = layerCache[textStyle] = {};
- }
-
- info = styleCache[text];
-
- // If we can't find a matching element in our cache, create a new one
-
- if (info == null) {
-
- var element = $("<div></div>").html(text)
- .css({
- position: "absolute",
- 'max-width': width,
- top: -9999
- })
- .appendTo(this.getTextLayer(layer));
-
- if (typeof font === "object") {
- element.css({
- font: textStyle,
- color: font.color
- });
- } else if (typeof font === "string") {
- element.addClass(font);
- }
-
- info = styleCache[text] = {
- width: element.outerWidth(true),
- height: element.outerHeight(true),
- element: element,
- positions: []
- };
-
- element.detach();
- }
-
- return info;
- };
-
- // Adds a text string to the canvas text overlay.
- //
- // The text isn't drawn immediately; it is marked as rendering, which will
- // result in its addition to the canvas on the next render pass.
- //
- // @param {string} layer A string of space-separated CSS classes uniquely
- // identifying the layer containing this text.
- // @param {number} x X coordinate at which to draw the text.
- // @param {number} y Y coordinate at which to draw the text.
- // @param {string} text Text string to draw.
- // @param {(string|object)=} font Either a string of space-separated CSS
- // classes or a font-spec object, defining the text's font and style.
- // @param {number=} angle Angle at which to rotate the text, in degrees.
- // Angle is currently unused, it will be implemented in the future.
- // @param {number=} width Maximum width of the text before it wraps.
- // @param {string=} halign Horizontal alignment of the text; either "left",
- // "center" or "right".
- // @param {string=} valign Vertical alignment of the text; either "top",
- // "middle" or "bottom".
-
- Canvas.prototype.addText = function(layer, x, y, text, font, angle, width, halign, valign) {
-
- var info = this.getTextInfo(layer, text, font, angle, width),
- positions = info.positions;
-
- // Tweak the div's position to match the text's alignment
-
- if (halign == "center") {
- x -= info.width / 2;
- } else if (halign == "right") {
- x -= info.width;
- }
-
- if (valign == "middle") {
- y -= info.height / 2;
- } else if (valign == "bottom") {
- y -= info.height;
- }
-
- // Determine whether this text already exists at this position.
- // If so, mark it for inclusion in the next render pass.
-
- for (var i = 0, position; position = positions[i]; i++) {
- if (position.x == x && position.y == y) {
- position.active = true;
- return;
- }
- }
-
- // If the text doesn't exist at this position, create a new entry
-
- // For the very first position we'll re-use the original element,
- // while for subsequent ones we'll clone it.
-
- position = {
- active: true,
- rendered: false,
- element: positions.length ? info.element.clone() : info.element,
- x: x,
- y: y
- };
-
- positions.push(position);
-
- // Move the element to its final position within the container
-
- position.element.css({
- top: Math.round(y),
- left: Math.round(x),
- 'text-align': halign // In case the text wraps
- });
- };
-
- // Removes one or more text strings from the canvas text overlay.
- //
- // If no parameters are given, all text within the layer is removed.
- //
- // Note that the text is not immediately removed; it is simply marked as
- // inactive, which will result in its removal on the next render pass.
- // This avoids the performance penalty for 'clear and redraw' behavior,
- // where we potentially get rid of all text on a layer, but will likely
- // add back most or all of it later, as when redrawing axes, for example.
- //
- // @param {string} layer A string of space-separated CSS classes uniquely
- // identifying the layer containing this text.
- // @param {number=} x X coordinate of the text.
- // @param {number=} y Y coordinate of the text.
- // @param {string=} text Text string to remove.
- // @param {(string|object)=} font Either a string of space-separated CSS
- // classes or a font-spec object, defining the text's font and style.
- // @param {number=} angle Angle at which the text is rotated, in degrees.
- // Angle is currently unused, it will be implemented in the future.
-
- Canvas.prototype.removeText = function(layer, x, y, text, font, angle) {
- if (text == null) {
- var layerCache = this._textCache[layer];
- if (layerCache != null) {
- for (var styleKey in layerCache) {
- if (hasOwnProperty.call(layerCache, styleKey)) {
- var styleCache = layerCache[styleKey];
- for (var key in styleCache) {
- if (hasOwnProperty.call(styleCache, key)) {
- var positions = styleCache[key].positions;
- for (var i = 0, position; position = positions[i]; i++) {
- position.active = false;
- }
- }
- }
- }
- }
- }
- } else {
- var positions = this.getTextInfo(layer, text, font, angle).positions;
- for (var i = 0, position; position = positions[i]; i++) {
- if (position.x == x && position.y == y) {
- position.active = false;
- }
- }
- }
- };
-
- ///////////////////////////////////////////////////////////////////////////
- // The top-level container for the entire plot.
-
- function Plot(placeholder, data_, options_, plugins) {
- // data is on the form:
- // [ series1, series2 ... ]
- // where series is either just the data as [ [x1, y1], [x2, y2], ... ]
- // or { data: [ [x1, y1], [x2, y2], ... ], label: "some label", ... }
-
- var series = [],
- options = {
- // the color theme used for graphs
- colors: ["#edc240", "#afd8f8", "#cb4b4b", "#4da74d", "#9440ed"],
- legend: {
- show: true,
- noColumns: 1, // number of colums in legend table
- labelFormatter: null, // fn: string -> string
- labelBoxBorderColor: "#ccc", // border color for the little label boxes
- container: null, // container (as jQuery object) to put legend in, null means default on top of graph
- position: "ne", // position of default legend container within plot
- margin: 5, // distance from grid edge to default legend container within plot
- backgroundColor: null, // null means auto-detect
- backgroundOpacity: 0.85, // set to 0 to avoid background
- sorted: null // default to no legend sorting
- },
- xaxis: {
- show: null, // null = auto-detect, true = always, false = never
- position: "bottom", // or "top"
- mode: null, // null or "time"
- font: null, // null (derived from CSS in placeholder) or object like { size: 11, lineHeight: 13, style: "italic", weight: "bold", family: "sans-serif", variant: "small-caps" }
- color: null, // base color, labels, ticks
- tickColor: null, // possibly different color of ticks, e.g. "rgba(0,0,0,0.15)"
- transform: null, // null or f: number -> number to transform axis
- inverseTransform: null, // if transform is set, this should be the inverse function
- min: null, // min. value to show, null means set automatically
- max: null, // max. value to show, null means set automatically
- autoscaleMargin: null, // margin in % to add if auto-setting min/max
- ticks: null, // either [1, 3] or [[1, "a"], 3] or (fn: axis info -> ticks) or app. number of ticks for auto-ticks
- tickFormatter: null, // fn: number -> string
- labelWidth: null, // size of tick labels in pixels
- labelHeight: null,
- reserveSpace: null, // whether to reserve space even if axis isn't shown
- tickLength: null, // size in pixels of ticks, or "full" for whole line
- alignTicksWithAxis: null, // axis number or null for no sync
- tickDecimals: null, // no. of decimals, null means auto
- tickSize: null, // number or [number, "unit"]
- minTickSize: null // number or [number, "unit"]
- },
- yaxis: {
- autoscaleMargin: 0.02,
- position: "left" // or "right"
- },
- xaxes: [],
- yaxes: [],
- series: {
- points: {
- show: false,
- radius: 3,
- lineWidth: 2, // in pixels
- fill: true,
- fillColor: "#ffffff",
- symbol: "circle" // or callback
- },
- lines: {
- // we don't put in show: false so we can see
- // whether lines were actively disabled
- lineWidth: 2, // in pixels
- fill: false,
- fillColor: null,
- steps: false
- // Omit 'zero', so we can later default its value to
- // match that of the 'fill' option.
- },
- bars: {
- show: false,
- lineWidth: 2, // in pixels
- barWidth: 1, // in units of the x axis
- fill: true,
- fillColor: null,
- align: "left", // "left", "right", or "center"
- horizontal: false,
- zero: true
- },
- shadowSize: 3,
- highlightColor: null
- },
- grid: {
- show: true,
- aboveData: false,
- color: "#545454", // primary color used for outline and labels
- backgroundColor: null, // null for transparent, else color
- borderColor: null, // set if different from the grid color
- tickColor: null, // color for the ticks, e.g. "rgba(0,0,0,0.15)"
- margin: 0, // distance from the canvas edge to the grid
- labelMargin: 5, // in pixels
- axisMargin: 8, // in pixels
- borderWidth: 2, // in pixels
- minBorderMargin: null, // in pixels, null means taken from points radius
- markings: null, // array of ranges or fn: axes -> array of ranges
- markingsColor: "#f4f4f4",
- markingsLineWidth: 2,
- // interactive stuff
- clickable: false,
- hoverable: false,
- autoHighlight: true, // highlight in case mouse is near
- mouseActiveRadius: 10 // how far the mouse can be away to activate an item
- },
- interaction: {
- redrawOverlayInterval: 1000/60 // time between updates, -1 means in same flow
- },
- hooks: {}
- },
- surface = null, // the canvas for the plot itself
- overlay = null, // canvas for interactive stuff on top of plot
- eventHolder = null, // jQuery object that events should be bound to
- ctx = null, octx = null,
- xaxes = [], yaxes = [],
- plotOffset = { left: 0, right: 0, top: 0, bottom: 0},
- plotWidth = 0, plotHeight = 0,
- hooks = {
- processOptions: [],
- processRawData: [],
- processDatapoints: [],
- processOffset: [],
- drawBackground: [],
- drawSeries: [],
- draw: [],
- bindEvents: [],
- drawOverlay: [],
- shutdown: []
- },
- plot = this;
-
- // public functions
- plot.setData = setData;
- plot.setupGrid = setupGrid;
- plot.draw = draw;
- plot.getPlaceholder = function() { return placeholder; };
- plot.getCanvas = function() { return surface.element; };
- plot.getPlotOffset = function() { return plotOffset; };
- plot.width = function () { return plotWidth; };
- plot.height = function () { return plotHeight; };
- plot.offset = function () {
- var o = eventHolder.offset();
- o.left += plotOffset.left;
- o.top += plotOffset.top;
- return o;
- };
- plot.getData = function () { return series; };
- plot.getAxes = function () {
- var res = {}, i;
- $.each(xaxes.concat(yaxes), function (_, axis) {
- if (axis)
- res[axis.direction + (axis.n != 1 ? axis.n : "") + "axis"] = axis;
- });
- return res;
- };
- plot.getXAxes = function () { return xaxes; };
- plot.getYAxes = function () { return yaxes; };
- plot.c2p = canvasToAxisCoords;
- plot.p2c = axisToCanvasCoords;
- plot.getOptions = function () { return options; };
- plot.highlight = highlight;
- plot.unhighlight = unhighlight;
- plot.triggerRedrawOverlay = triggerRedrawOverlay;
- plot.pointOffset = function(point) {
- return {
- left: parseInt(xaxes[axisNumber(point, "x") - 1].p2c(+point.x) + plotOffset.left, 10),
- top: parseInt(yaxes[axisNumber(point, "y") - 1].p2c(+point.y) + plotOffset.top, 10)
- };
- };
- plot.shutdown = shutdown;
- plot.destroy = function () {
- shutdown();
- placeholder.removeData("plot").empty();
-
- series = [];
- options = null;
- surface = null;
- overlay = null;
- eventHolder = null;
- ctx = null;
- octx = null;
- xaxes = [];
- yaxes = [];
- hooks = null;
- highlights = [];
- plot = null;
- };
- plot.resize = function () {
- var width = placeholder.width(),
- height = placeholder.height();
- surface.resize(width, height);
- overlay.resize(width, height);
- };
-
- // public attributes
- plot.hooks = hooks;
-
- // initialize
- initPlugins(plot);
- parseOptions(options_);
- setupCanvases();
- setData(data_);
- setupGrid();
- draw();
- bindEvents();
-
-
- function executeHooks(hook, args) {
- args = [plot].concat(args);
- for (var i = 0; i < hook.length; ++i)
- hook[i].apply(this, args);
- }
-
- function initPlugins() {
-
- // References to key classes, allowing plugins to modify them
-
- var classes = {
- Canvas: Canvas
- };
-
- for (var i = 0; i < plugins.length; ++i) {
- var p = plugins[i];
- p.init(plot, classes);
- if (p.options)
- $.extend(true, options, p.options);
- }
- }
-
- function parseOptions(opts) {
-
- $.extend(true, options, opts);
-
- // $.extend merges arrays, rather than replacing them. When less
- // colors are provided than the size of the default palette, we
- // end up with those colors plus the remaining defaults, which is
- // not expected behavior; avoid it by replacing them here.
-
- if (opts && opts.colors) {
- options.colors = opts.colors;
- }
-
- if (options.xaxis.color == null)
- options.xaxis.color = $.color.parse(options.grid.color).scale('a', 0.22).toString();
- if (options.yaxis.color == null)
- options.yaxis.color = $.color.parse(options.grid.color).scale('a', 0.22).toString();
-
- if (options.xaxis.tickColor == null) // grid.tickColor for back-compatibility
- options.xaxis.tickColor = options.grid.tickColor || options.xaxis.color;
- if (options.yaxis.tickColor == null) // grid.tickColor for back-compatibility
- options.yaxis.tickColor = options.grid.tickColor || options.yaxis.color;
-
- if (options.grid.borderColor == null)
- options.grid.borderColor = options.grid.color;
- if (options.grid.tickColor == null)
- options.grid.tickColor = $.color.parse(options.grid.color).scale('a', 0.22).toString();
-
- // Fill in defaults for axis options, including any unspecified
- // font-spec fields, if a font-spec was provided.
-
- // If no x/y axis options were provided, create one of each anyway,
- // since the rest of the code assumes that they exist.
-
- var i, axisOptions, axisCount,
- fontSize = placeholder.css("font-size"),
- fontSizeDefault = fontSize ? +fontSize.replace("px", "") : 13,
- fontDefaults = {
- style: placeholder.css("font-style"),
- size: Math.round(0.8 * fontSizeDefault),
- variant: placeholder.css("font-variant"),
- weight: placeholder.css("font-weight"),
- family: placeholder.css("font-family")
- };
-
- axisCount = options.xaxes.length || 1;
- for (i = 0; i < axisCount; ++i) {
-
- axisOptions = options.xaxes[i];
- if (axisOptions && !axisOptions.tickColor) {
- axisOptions.tickColor = axisOptions.color;
- }
-
- axisOptions = $.extend(true, {}, options.xaxis, axisOptions);
- options.xaxes[i] = axisOptions;
-
- if (axisOptions.font) {
- axisOptions.font = $.extend({}, fontDefaults, axisOptions.font);
- if (!axisOptions.font.color) {
- axisOptions.font.color = axisOptions.color;
- }
- if (!axisOptions.font.lineHeight) {
- axisOptions.font.lineHeight = Math.round(axisOptions.font.size * 1.15);
- }
- }
- }
-
- axisCount = options.yaxes.length || 1;
- for (i = 0; i < axisCount; ++i) {
-
- axisOptions = options.yaxes[i];
- if (axisOptions && !axisOptions.tickColor) {
- axisOptions.tickColor = axisOptions.color;
- }
-
- axisOptions = $.extend(true, {}, options.yaxis, axisOptions);
- options.yaxes[i] = axisOptions;
-
- if (axisOptions.font) {
- axisOptions.font = $.extend({}, fontDefaults, axisOptions.font);
- if (!axisOptions.font.color) {
- axisOptions.font.color = axisOptions.color;
- }
- if (!axisOptions.font.lineHeight) {
- axisOptions.font.lineHeight = Math.round(axisOptions.font.size * 1.15);
- }
- }
- }
-
- // backwards compatibility, to be removed in future
- if (options.xaxis.noTicks && options.xaxis.ticks == null)
- options.xaxis.ticks = options.xaxis.noTicks;
- if (options.yaxis.noTicks && options.yaxis.ticks == null)
- options.yaxis.ticks = options.yaxis.noTicks;
- if (options.x2axis) {
- options.xaxes[1] = $.extend(true, {}, options.xaxis, options.x2axis);
- options.xaxes[1].position = "top";
- // Override the inherit to allow the axis to auto-scale
- if (options.x2axis.min == null) {
- options.xaxes[1].min = null;
- }
- if (options.x2axis.max == null) {
- options.xaxes[1].max = null;
- }
- }
- if (options.y2axis) {
- options.yaxes[1] = $.extend(true, {}, options.yaxis, options.y2axis);
- options.yaxes[1].position = "right";
- // Override the inherit to allow the axis to auto-scale
- if (options.y2axis.min == null) {
- options.yaxes[1].min = null;
- }
- if (options.y2axis.max == null) {
- options.yaxes[1].max = null;
- }
- }
- if (options.grid.coloredAreas)
- options.grid.markings = options.grid.coloredAreas;
- if (options.grid.coloredAreasColor)
- options.grid.markingsColor = options.grid.coloredAreasColor;
- if (options.lines)
- $.extend(true, options.series.lines, options.lines);
- if (options.points)
- $.extend(true, options.series.points, options.points);
- if (options.bars)
- $.extend(true, options.series.bars, options.bars);
- if (options.shadowSize != null)
- options.series.shadowSize = options.shadowSize;
- if (options.highlightColor != null)
- options.series.highlightColor = options.highlightColor;
-
- // save options on axes for future reference
- for (i = 0; i < options.xaxes.length; ++i)
- getOrCreateAxis(xaxes, i + 1).options = options.xaxes[i];
- for (i = 0; i < options.yaxes.length; ++i)
- getOrCreateAxis(yaxes, i + 1).options = options.yaxes[i];
-
- // add hooks from options
- for (var n in hooks)
- if (options.hooks[n] && options.hooks[n].length)
- hooks[n] = hooks[n].concat(options.hooks[n]);
-
- executeHooks(hooks.processOptions, [options]);
- }
-
- function setData(d) {
- series = parseData(d);
- fillInSeriesOptions();
- processData();
- }
-
- function parseData(d) {
- var res = [];
- for (var i = 0; i < d.length; ++i) {
- var s = $.extend(true, {}, options.series);
-
- if (d[i].data != null) {
- s.data = d[i].data; // move the data instead of deep-copy
- delete d[i].data;
-
- $.extend(true, s, d[i]);
-
- d[i].data = s.data;
- }
- else
- s.data = d[i];
- res.push(s);
- }
-
- return res;
- }
-
- function axisNumber(obj, coord) {
- var a = obj[coord + "axis"];
- if (typeof a == "object") // if we got a real axis, extract number
- a = a.n;
- if (typeof a != "number")
- a = 1; // default to first axis
- return a;
- }
-
- function allAxes() {
- // return flat array without annoying null entries
- return $.grep(xaxes.concat(yaxes), function (a) { return a; });
- }
-
- function canvasToAxisCoords(pos) {
- // return an object with x/y corresponding to all used axes
- var res = {}, i, axis;
- for (i = 0; i < xaxes.length; ++i) {
- axis = xaxes[i];
- if (axis && axis.used)
- res["x" + axis.n] = axis.c2p(pos.left);
- }
-
- for (i = 0; i < yaxes.length; ++i) {
- axis = yaxes[i];
- if (axis && axis.used)
- res["y" + axis.n] = axis.c2p(pos.top);
- }
-
- if (res.x1 !== undefined)
- res.x = res.x1;
- if (res.y1 !== undefined)
- res.y = res.y1;
-
- return res;
- }
-
- function axisToCanvasCoords(pos) {
- // get canvas coords from the first pair of x/y found in pos
- var res = {}, i, axis, key;
-
- for (i = 0; i < xaxes.length; ++i) {
- axis = xaxes[i];
- if (axis && axis.used) {
- key = "x" + axis.n;
- if (pos[key] == null && axis.n == 1)
- key = "x";
-
- if (pos[key] != null) {
- res.left = axis.p2c(pos[key]);
- break;
- }
- }
- }
-
- for (i = 0; i < yaxes.length; ++i) {
- axis = yaxes[i];
- if (axis && axis.used) {
- key = "y" + axis.n;
- if (pos[key] == null && axis.n == 1)
- key = "y";
-
- if (pos[key] != null) {
- res.top = axis.p2c(pos[key]);
- break;
- }
- }
- }
-
- return res;
- }
-
- function getOrCreateAxis(axes, number) {
- if (!axes[number - 1])
- axes[number - 1] = {
- n: number, // save the number for future reference
- direction: axes == xaxes ? "x" : "y",
- options: $.extend(true, {}, axes == xaxes ? options.xaxis : options.yaxis)
- };
-
- return axes[number - 1];
- }
-
- function fillInSeriesOptions() {
-
- var neededColors = series.length, maxIndex = -1, i;
-
- // Subtract the number of series that already have fixed colors or
- // color indexes from the number that we still need to generate.
-
- for (i = 0; i < series.length; ++i) {
- var sc = series[i].color;
- if (sc != null) {
- neededColors--;
- if (typeof sc == "number" && sc > maxIndex) {
- maxIndex = sc;
- }
- }
- }
-
- // If any of the series have fixed color indexes, then we need to
- // generate at least as many colors as the highest index.
-
- if (neededColors <= maxIndex) {
- neededColors = maxIndex + 1;
- }
-
- // Generate all the colors, using first the option colors and then
- // variations on those colors once they're exhausted.
-
- var c, colors = [], colorPool = options.colors,
- colorPoolSize = colorPool.length, variation = 0;
-
- for (i = 0; i < neededColors; i++) {
-
- c = $.color.parse(colorPool[i % colorPoolSize] || "#666");
-
- // Each time we exhaust the colors in the pool we adjust
- // a scaling factor used to produce more variations on
- // those colors. The factor alternates negative/positive
- // to produce lighter/darker colors.
-
- // Reset the variation after every few cycles, or else
- // it will end up producing only white or black colors.
-
- if (i % colorPoolSize == 0 && i) {
- if (variation >= 0) {
- if (variation < 0.5) {
- variation = -variation - 0.2;
- } else variation = 0;
- } else variation = -variation;
- }
-
- colors[i] = c.scale('rgb', 1 + variation);
- }
-
- // Finalize the series options, filling in their colors
-
- var colori = 0, s;
- for (i = 0; i < series.length; ++i) {
- s = series[i];
-
- // assign colors
- if (s.color == null) {
- s.color = colors[colori].toString();
- ++colori;
- }
- else if (typeof s.color == "number")
- s.color = colors[s.color].toString();
-
- // turn on lines automatically in case nothing is set
- if (s.lines.show == null) {
- var v, show = true;
- for (v in s)
- if (s[v] && s[v].show) {
- show = false;
- break;
- }
- if (show)
- s.lines.show = true;
- }
-
- // If nothing was provided for lines.zero, default it to match
- // lines.fill, since areas by default should extend to zero.
-
- if (s.lines.zero == null) {
- s.lines.zero = !!s.lines.fill;
- }
-
- // setup axes
- s.xaxis = getOrCreateAxis(xaxes, axisNumber(s, "x"));
- s.yaxis = getOrCreateAxis(yaxes, axisNumber(s, "y"));
- }
- }
-
- function processData() {
- var topSentry = Number.POSITIVE_INFINITY,
- bottomSentry = Number.NEGATIVE_INFINITY,
- fakeInfinity = Number.MAX_VALUE,
- i, j, k, m, length,
- s, points, ps, x, y, axis, val, f, p,
- data, format;
-
- function updateAxis(axis, min, max) {
- if (min < axis.datamin && min != -fakeInfinity)
- axis.datamin = min;
- if (max > axis.datamax && max != fakeInfinity)
- axis.datamax = max;
- }
-
- $.each(allAxes(), function (_, axis) {
- // init axis
- axis.datamin = topSentry;
- axis.datamax = bottomSentry;
- axis.used = false;
- });
-
- for (i = 0; i < series.length; ++i) {
- s = series[i];
- s.datapoints = { points: [] };
-
- executeHooks(hooks.processRawData, [ s, s.data, s.datapoints ]);
- }
-
- // first pass: clean and copy data
- for (i = 0; i < series.length; ++i) {
- s = series[i];
-
- data = s.data;
- format = s.datapoints.format;
-
- if (!format) {
- format = [];
- // find out how to copy
- format.push({ x: true, number: true, required: true });
- format.push({ y: true, number: true, required: true });
-
- if (s.bars.show || (s.lines.show && s.lines.fill)) {
- var autoscale = !!((s.bars.show && s.bars.zero) || (s.lines.show && s.lines.zero));
- format.push({ y: true, number: true, required: false, defaultValue: 0, autoscale: autoscale });
- if (s.bars.horizontal) {
- delete format[format.length - 1].y;
- format[format.length - 1].x = true;
- }
- }
-
- s.datapoints.format = format;
- }
-
- if (s.datapoints.pointsize != null)
- continue; // already filled in
-
- s.datapoints.pointsize = format.length;
-
- ps = s.datapoints.pointsize;
- points = s.datapoints.points;
-
- var insertSteps = s.lines.show && s.lines.steps;
- s.xaxis.used = s.yaxis.used = true;
-
- for (j = k = 0; j < data.length; ++j, k += ps) {
- p = data[j];
-
- var nullify = p == null;
- if (!nullify) {
- for (m = 0; m < ps; ++m) {
- val = p[m];
- f = format[m];
-
- if (f) {
- if (f.number && val != null) {
- val = +val; // convert to number
- if (isNaN(val))
- val = null;
- else if (val == Infinity)
- val = fakeInfinity;
- else if (val == -Infinity)
- val = -fakeInfinity;
- }
-
- if (val == null) {
- if (f.required)
- nullify = true;
-
- if (f.defaultValue != null)
- val = f.defaultValue;
- }
- }
-
- points[k + m] = val;
- }
- }
-
- if (nullify) {
- for (m = 0; m < ps; ++m) {
- val = points[k + m];
- if (val != null) {
- f = format[m];
- // extract min/max info
- if (f.autoscale !== false) {
- if (f.x) {
- updateAxis(s.xaxis, val, val);
- }
- if (f.y) {
- updateAxis(s.yaxis, val, val);
- }
- }
- }
- points[k + m] = null;
- }
- }
- else {
- // a little bit of line specific stuff that
- // perhaps shouldn't be here, but lacking
- // better means...
- if (insertSteps && k > 0
- && points[k - ps] != null
- && points[k - ps] != points[k]
- && points[k - ps + 1] != points[k + 1]) {
- // copy the point to make room for a middle point
- for (m = 0; m < ps; ++m)
- points[k + ps + m] = points[k + m];
-
- // middle point has same y
- points[k + 1] = points[k - ps + 1];
-
- // we've added a point, better reflect that
- k += ps;
- }
- }
- }
- }
-
- // give the hooks a chance to run
- for (i = 0; i < series.length; ++i) {
- s = series[i];
-
- executeHooks(hooks.processDatapoints, [ s, s.datapoints]);
- }
-
- // second pass: find datamax/datamin for auto-scaling
- for (i = 0; i < series.length; ++i) {
- s = series[i];
- points = s.datapoints.points;
- ps = s.datapoints.pointsize;
- format = s.datapoints.format;
-
- var xmin = topSentry, ymin = topSentry,
- xmax = bottomSentry, ymax = bottomSentry;
-
- for (j = 0; j < points.length; j += ps) {
- if (points[j] == null)
- continue;
-
- for (m = 0; m < ps; ++m) {
- val = points[j + m];
- f = format[m];
- if (!f || f.autoscale === false || val == fakeInfinity || val == -fakeInfinity)
- continue;
-
- if (f.x) {
- if (val < xmin)
- xmin = val;
- if (val > xmax)
- xmax = val;
- }
- if (f.y) {
- if (val < ymin)
- ymin = val;
- if (val > ymax)
- ymax = val;
- }
- }
- }
-
- if (s.bars.show) {
- // make sure we got room for the bar on the dancing floor
- var delta;
-
- switch (s.bars.align) {
- case "left":
- delta = 0;
- break;
- case "right":
- delta = -s.bars.barWidth;
- break;
- default:
- delta = -s.bars.barWidth / 2;
- }
-
- if (s.bars.horizontal) {
- ymin += delta;
- ymax += delta + s.bars.barWidth;
- }
- else {
- xmin += delta;
- xmax += delta + s.bars.barWidth;
- }
- }
-
- updateAxis(s.xaxis, xmin, xmax);
- updateAxis(s.yaxis, ymin, ymax);
- }
-
- $.each(allAxes(), function (_, axis) {
- if (axis.datamin == topSentry)
- axis.datamin = null;
- if (axis.datamax == bottomSentry)
- axis.datamax = null;
- });
- }
-
- function setupCanvases() {
-
- // Make sure the placeholder is clear of everything except canvases
- // from a previous plot in this container that we'll try to re-use.
-
- placeholder.css("padding", 0) // padding messes up the positioning
- .children().filter(function(){
- return !$(this).hasClass("flot-overlay") && !$(this).hasClass('flot-base');
- }).remove();
-
- if (placeholder.css("position") == 'static')
- placeholder.css("position", "relative"); // for positioning labels and overlay
-
- surface = new Canvas("flot-base", placeholder);
- overlay = new Canvas("flot-overlay", placeholder); // overlay canvas for interactive features
-
- ctx = surface.context;
- octx = overlay.context;
-
- // define which element we're listening for events on
- eventHolder = $(overlay.element).unbind();
-
- // If we're re-using a plot object, shut down the old one
-
- var existing = placeholder.data("plot");
-
- if (existing) {
- existing.shutdown();
- overlay.clear();
- }
-
- // save in case we get replotted
- placeholder.data("plot", plot);
- }
-
- function bindEvents() {
- // bind events
- if (options.grid.hoverable) {
- eventHolder.mousemove(onMouseMove);
-
- // Use bind, rather than .mouseleave, because we officially
- // still support jQuery 1.2.6, which doesn't define a shortcut
- // for mouseenter or mouseleave. This was a bug/oversight that
- // was fixed somewhere around 1.3.x. We can return to using
- // .mouseleave when we drop support for 1.2.6.
-
- eventHolder.bind("mouseleave", onMouseLeave);
- }
-
- if (options.grid.clickable)
- eventHolder.click(onClick);
-
- executeHooks(hooks.bindEvents, [eventHolder]);
- }
-
- function shutdown() {
- if (redrawTimeout)
- clearTimeout(redrawTimeout);
-
- eventHolder.unbind("mousemove", onMouseMove);
- eventHolder.unbind("mouseleave", onMouseLeave);
- eventHolder.unbind("click", onClick);
-
- executeHooks(hooks.shutdown, [eventHolder]);
- }
-
- function setTransformationHelpers(axis) {
- // set helper functions on the axis, assumes plot area
- // has been computed already
-
- function identity(x) { return x; }
-
- var s, m, t = axis.options.transform || identity,
- it = axis.options.inverseTransform;
-
- // precompute how much the axis is scaling a point
- // in canvas space
- if (axis.direction == "x") {
- s = axis.scale = plotWidth / Math.abs(t(axis.max) - t(axis.min));
- m = Math.min(t(axis.max), t(axis.min));
- }
- else {
- s = axis.scale = plotHeight / Math.abs(t(axis.max) - t(axis.min));
- s = -s;
- m = Math.max(t(axis.max), t(axis.min));
- }
-
- // data point to canvas coordinate
- if (t == identity) // slight optimization
- axis.p2c = function (p) { return (p - m) * s; };
- else
- axis.p2c = function (p) { return (t(p) - m) * s; };
- // canvas coordinate to data point
- if (!it)
- axis.c2p = function (c) { return m + c / s; };
- else
- axis.c2p = function (c) { return it(m + c / s); };
- }
-
- function measureTickLabels(axis) {
-
- var opts = axis.options,
- ticks = axis.ticks || [],
- labelWidth = opts.labelWidth || 0,
- labelHeight = opts.labelHeight || 0,
- maxWidth = labelWidth || (axis.direction == "x" ? Math.floor(surface.width / (ticks.length || 1)) : null),
- legacyStyles = axis.direction + "Axis " + axis.direction + axis.n + "Axis",
- layer = "flot-" + axis.direction + "-axis flot-" + axis.direction + axis.n + "-axis " + legacyStyles,
- font = opts.font || "flot-tick-label tickLabel";
-
- for (var i = 0; i < ticks.length; ++i) {
-
- var t = ticks[i];
-
- if (!t.label)
- continue;
-
- var info = surface.getTextInfo(layer, t.label, font, null, maxWidth);
-
- labelWidth = Math.max(labelWidth, info.width);
- labelHeight = Math.max(labelHeight, info.height);
- }
-
- axis.labelWidth = opts.labelWidth || labelWidth;
- axis.labelHeight = opts.labelHeight || labelHeight;
- }
-
- function allocateAxisBoxFirstPhase(axis) {
- // find the bounding box of the axis by looking at label
- // widths/heights and ticks, make room by diminishing the
- // plotOffset; this first phase only looks at one
- // dimension per axis, the other dimension depends on the
- // other axes so will have to wait
-
- var lw = axis.labelWidth,
- lh = axis.labelHeight,
- pos = axis.options.position,
- isXAxis = axis.direction === "x",
- tickLength = axis.options.tickLength,
- axisMargin = options.grid.axisMargin,
- padding = options.grid.labelMargin,
- innermost = true,
- outermost = true,
- first = true,
- found = false;
-
- // Determine the axis's position in its direction and on its side
-
- $.each(isXAxis ? xaxes : yaxes, function(i, a) {
- if (a && (a.show || a.reserveSpace)) {
- if (a === axis) {
- found = true;
- } else if (a.options.position === pos) {
- if (found) {
- outermost = false;
- } else {
- innermost = false;
- }
- }
- if (!found) {
- first = false;
- }
- }
- });
-
- // The outermost axis on each side has no margin
-
- if (outermost) {
- axisMargin = 0;
- }
-
- // The ticks for the first axis in each direction stretch across
-
- if (tickLength == null) {
- tickLength = first ? "full" : 5;
- }
-
- if (!isNaN(+tickLength))
- padding += +tickLength;
-
- if (isXAxis) {
- lh += padding;
-
- if (pos == "bottom") {
- plotOffset.bottom += lh + axisMargin;
- axis.box = { top: surface.height - plotOffset.bottom, height: lh };
- }
- else {
- axis.box = { top: plotOffset.top + axisMargin, height: lh };
- plotOffset.top += lh + axisMargin;
- }
- }
- else {
- lw += padding;
-
- if (pos == "left") {
- axis.box = { left: plotOffset.left + axisMargin, width: lw };
- plotOffset.left += lw + axisMargin;
- }
- else {
- plotOffset.right += lw + axisMargin;
- axis.box = { left: surface.width - plotOffset.right, width: lw };
- }
- }
-
- // save for future reference
- axis.position = pos;
- axis.tickLength = tickLength;
- axis.box.padding = padding;
- axis.innermost = innermost;
- }
-
- function allocateAxisBoxSecondPhase(axis) {
- // now that all axis boxes have been placed in one
- // dimension, we can set the remaining dimension coordinates
- if (axis.direction == "x") {
- axis.box.left = plotOffset.left - axis.labelWidth / 2;
- axis.box.width = surface.width - plotOffset.left - plotOffset.right + axis.labelWidth;
- }
- else {
- axis.box.top = plotOffset.top - axis.labelHeight / 2;
- axis.box.height = surface.height - plotOffset.bottom - plotOffset.top + axis.labelHeight;
- }
- }
-
- function adjustLayoutForThingsStickingOut() {
- // possibly adjust plot offset to ensure everything stays
- // inside the canvas and isn't clipped off
-
- var minMargin = options.grid.minBorderMargin,
- axis, i;
-
- // check stuff from the plot (FIXME: this should just read
- // a value from the series, otherwise it's impossible to
- // customize)
- if (minMargin == null) {
- minMargin = 0;
- for (i = 0; i < series.length; ++i)
- minMargin = Math.max(minMargin, 2 * (series[i].points.radius + series[i].points.lineWidth/2));
- }
-
- var margins = {
- left: minMargin,
- right: minMargin,
- top: minMargin,
- bottom: minMargin
- };
-
- // check axis labels, note we don't check the actual
- // labels but instead use the overall width/height to not
- // jump as much around with replots
- $.each(allAxes(), function (_, axis) {
- if (axis.reserveSpace && axis.ticks && axis.ticks.length) {
- if (axis.direction === "x") {
- margins.left = Math.max(margins.left, axis.labelWidth / 2);
- margins.right = Math.max(margins.right, axis.labelWidth / 2);
- } else {
- margins.bottom = Math.max(margins.bottom, axis.labelHeight / 2);
- margins.top = Math.max(margins.top, axis.labelHeight / 2);
- }
- }
- });
-
- plotOffset.left = Math.ceil(Math.max(margins.left, plotOffset.left));
- plotOffset.right = Math.ceil(Math.max(margins.right, plotOffset.right));
- plotOffset.top = Math.ceil(Math.max(margins.top, plotOffset.top));
- plotOffset.bottom = Math.ceil(Math.max(margins.bottom, plotOffset.bottom));
- }
-
- function setupGrid() {
- var i, axes = allAxes(), showGrid = options.grid.show;
-
- // Initialize the plot's offset from the edge of the canvas
-
- for (var a in plotOffset) {
- var margin = options.grid.margin || 0;
- plotOffset[a] = typeof margin == "number" ? margin : margin[a] || 0;
- }
-
- executeHooks(hooks.processOffset, [plotOffset]);
-
- // If the grid is visible, add its border width to the offset
-
- for (var a in plotOffset) {
- if(typeof(options.grid.borderWidth) == "object") {
- plotOffset[a] += showGrid ? options.grid.borderWidth[a] : 0;
- }
- else {
- plotOffset[a] += showGrid ? options.grid.borderWidth : 0;
- }
- }
-
- $.each(axes, function (_, axis) {
- var axisOpts = axis.options;
- axis.show = axisOpts.show == null ? axis.used : axisOpts.show;
- axis.reserveSpace = axisOpts.reserveSpace == null ? axis.show : axisOpts.reserveSpace;
- setRange(axis);
- });
-
- if (showGrid) {
-
- var allocatedAxes = $.grep(axes, function (axis) {
- return axis.show || axis.reserveSpace;
- });
-
- $.each(allocatedAxes, function (_, axis) {
- // make the ticks
- setupTickGeneration(axis);
- setTicks(axis);
- snapRangeToTicks(axis, axis.ticks);
- // find labelWidth/Height for axis
- measureTickLabels(axis);
- });
-
- // with all dimensions calculated, we can compute the
- // axis bounding boxes, start from the outside
- // (reverse order)
- for (i = allocatedAxes.length - 1; i >= 0; --i)
- allocateAxisBoxFirstPhase(allocatedAxes[i]);
-
- // make sure we've got enough space for things that
- // might stick out
- adjustLayoutForThingsStickingOut();
-
- $.each(allocatedAxes, function (_, axis) {
- allocateAxisBoxSecondPhase(axis);
- });
- }
-
- plotWidth = surface.width - plotOffset.left - plotOffset.right;
- plotHeight = surface.height - plotOffset.bottom - plotOffset.top;
-
- // now we got the proper plot dimensions, we can compute the scaling
- $.each(axes, function (_, axis) {
- setTransformationHelpers(axis);
- });
-
- if (showGrid) {
- drawAxisLabels();
- }
-
- insertLegend();
- }
-
- function setRange(axis) {
- var opts = axis.options,
- min = +(opts.min != null ? opts.min : axis.datamin),
- max = +(opts.max != null ? opts.max : axis.datamax),
- delta = max - min;
-
- if (delta == 0.0) {
- // degenerate case
- var widen = max == 0 ? 1 : 0.01;
-
- if (opts.min == null)
- min -= widen;
- // always widen max if we couldn't widen min to ensure we
- // don't fall into min == max which doesn't work
- if (opts.max == null || opts.min != null)
- max += widen;
- }
- else {
- // consider autoscaling
- var margin = opts.autoscaleMargin;
- if (margin != null) {
- if (opts.min == null) {
- min -= delta * margin;
- // make sure we don't go below zero if all values
- // are positive
- if (min < 0 && axis.datamin != null && axis.datamin >= 0)
- min = 0;
- }
- if (opts.max == null) {
- max += delta * margin;
- if (max > 0 && axis.datamax != null && axis.datamax <= 0)
- max = 0;
- }
- }
- }
- axis.min = min;
- axis.max = max;
- }
-
- function setupTickGeneration(axis) {
- var opts = axis.options;
-
- // estimate number of ticks
- var noTicks;
- if (typeof opts.ticks == "number" && opts.ticks > 0)
- noTicks = opts.ticks;
- else
- // heuristic based on the model a*sqrt(x) fitted to
- // some data points that seemed reasonable
- noTicks = 0.3 * Math.sqrt(axis.direction == "x" ? surface.width : surface.height);
-
- var delta = (axis.max - axis.min) / noTicks,
- dec = -Math.floor(Math.log(delta) / Math.LN10),
- maxDec = opts.tickDecimals;
-
- if (maxDec != null && dec > maxDec) {
- dec = maxDec;
- }
-
- var magn = Math.pow(10, -dec),
- norm = delta / magn, // norm is between 1.0 and 10.0
- size;
-
- if (norm < 1.5) {
- size = 1;
- } else if (norm < 3) {
- size = 2;
- // special case for 2.5, requires an extra decimal
- if (norm > 2.25 && (maxDec == null || dec + 1 <= maxDec)) {
- size = 2.5;
- ++dec;
- }
- } else if (norm < 7.5) {
- size = 5;
- } else {
- size = 10;
- }
-
- size *= magn;
-
- if (opts.minTickSize != null && size < opts.minTickSize) {
- size = opts.minTickSize;
- }
-
- axis.delta = delta;
- axis.tickDecimals = Math.max(0, maxDec != null ? maxDec : dec);
- axis.tickSize = opts.tickSize || size;
-
- // Time mode was moved to a plug-in in 0.8, and since so many people use it
- // we'll add an especially friendly reminder to make sure they included it.
-
- if (opts.mode == "time" && !axis.tickGenerator) {
- throw new Error("Time mode requires the flot.time plugin.");
- }
-
- // Flot supports base-10 axes; any other mode else is handled by a plug-in,
- // like flot.time.js.
-
- if (!axis.tickGenerator) {
-
- axis.tickGenerator = function (axis) {
-
- var ticks = [],
- start = floorInBase(axis.min, axis.tickSize),
- i = 0,
- v = Number.NaN,
- prev;
-
- do {
- prev = v;
- v = start + i * axis.tickSize;
- ticks.push(v);
- ++i;
- } while (v < axis.max && v != prev);
- return ticks;
- };
-
- axis.tickFormatter = function (value, axis) {
-
- var factor = axis.tickDecimals ? Math.pow(10, axis.tickDecimals) : 1;
- var formatted = "" + Math.round(value * factor) / factor;
-
- // If tickDecimals was specified, ensure that we have exactly that
- // much precision; otherwise default to the value's own precision.
-
- if (axis.tickDecimals != null) {
- var decimal = formatted.indexOf(".");
- var precision = decimal == -1 ? 0 : formatted.length - decimal - 1;
- if (precision < axis.tickDecimals) {
- return (precision ? formatted : formatted + ".") + ("" + factor).substr(1, axis.tickDecimals - precision);
- }
- }
-
- return formatted;
- };
- }
-
- if ($.isFunction(opts.tickFormatter))
- axis.tickFormatter = function (v, axis) { return "" + opts.tickFormatter(v, axis); };
-
- if (opts.alignTicksWithAxis != null) {
- var otherAxis = (axis.direction == "x" ? xaxes : yaxes)[opts.alignTicksWithAxis - 1];
- if (otherAxis && otherAxis.used && otherAxis != axis) {
- // consider snapping min/max to outermost nice ticks
- var niceTicks = axis.tickGenerator(axis);
- if (niceTicks.length > 0) {
- if (opts.min == null)
- axis.min = Math.min(axis.min, niceTicks[0]);
- if (opts.max == null && niceTicks.length > 1)
- axis.max = Math.max(axis.max, niceTicks[niceTicks.length - 1]);
- }
-
- axis.tickGenerator = function (axis) {
- // copy ticks, scaled to this axis
- var ticks = [], v, i;
- for (i = 0; i < otherAxis.ticks.length; ++i) {
- v = (otherAxis.ticks[i].v - otherAxis.min) / (otherAxis.max - otherAxis.min);
- v = axis.min + v * (axis.max - axis.min);
- ticks.push(v);
- }
- return ticks;
- };
-
- // we might need an extra decimal since forced
- // ticks don't necessarily fit naturally
- if (!axis.mode && opts.tickDecimals == null) {
- var extraDec = Math.max(0, -Math.floor(Math.log(axis.delta) / Math.LN10) + 1),
- ts = axis.tickGenerator(axis);
-
- // only proceed if the tick interval rounded
- // with an extra decimal doesn't give us a
- // zero at end
- if (!(ts.length > 1 && /\..*0$/.test((ts[1] - ts[0]).toFixed(extraDec))))
- axis.tickDecimals = extraDec;
- }
- }
- }
- }
-
- function setTicks(axis) {
- var oticks = axis.options.ticks, ticks = [];
- if (oticks == null || (typeof oticks == "number" && oticks > 0))
- ticks = axis.tickGenerator(axis);
- else if (oticks) {
- if ($.isFunction(oticks))
- // generate the ticks
- ticks = oticks(axis);
- else
- ticks = oticks;
- }
-
- // clean up/labelify the supplied ticks, copy them over
- var i, v;
- axis.ticks = [];
- for (i = 0; i < ticks.length; ++i) {
- var label = null;
- var t = ticks[i];
- if (typeof t == "object") {
- v = +t[0];
- if (t.length > 1)
- label = t[1];
- }
- else
- v = +t;
- if (label == null)
- label = axis.tickFormatter(v, axis);
- if (!isNaN(v))
- axis.ticks.push({ v: v, label: label });
- }
- }
-
- function snapRangeToTicks(axis, ticks) {
- if (axis.options.autoscaleMargin && ticks.length > 0) {
- // snap to ticks
- if (axis.options.min == null)
- axis.min = Math.min(axis.min, ticks[0].v);
- if (axis.options.max == null && ticks.length > 1)
- axis.max = Math.max(axis.max, ticks[ticks.length - 1].v);
- }
- }
-
- function draw() {
-
- surface.clear();
-
- executeHooks(hooks.drawBackground, [ctx]);
-
- var grid = options.grid;
-
- // draw background, if any
- if (grid.show && grid.backgroundColor)
- drawBackground();
-
- if (grid.show && !grid.aboveData) {
- drawGrid();
- }
-
- for (var i = 0; i < series.length; ++i) {
- executeHooks(hooks.drawSeries, [ctx, series[i]]);
- drawSeries(series[i]);
- }
-
- executeHooks(hooks.draw, [ctx]);
-
- if (grid.show && grid.aboveData) {
- drawGrid();
- }
-
- surface.render();
-
- // A draw implies that either the axes or data have changed, so we
- // should probably update the overlay highlights as well.
-
- triggerRedrawOverlay();
- }
-
- function extractRange(ranges, coord) {
- var axis, from, to, key, axes = allAxes();
-
- for (var i = 0; i < axes.length; ++i) {
- axis = axes[i];
- if (axis.direction == coord) {
- key = coord + axis.n + "axis";
- if (!ranges[key] && axis.n == 1)
- key = coord + "axis"; // support x1axis as xaxis
- if (ranges[key]) {
- from = ranges[key].from;
- to = ranges[key].to;
- break;
- }
- }
- }
-
- // backwards-compat stuff - to be removed in future
- if (!ranges[key]) {
- axis = coord == "x" ? xaxes[0] : yaxes[0];
- from = ranges[coord + "1"];
- to = ranges[coord + "2"];
- }
-
- // auto-reverse as an added bonus
- if (from != null && to != null && from > to) {
- var tmp = from;
- from = to;
- to = tmp;
- }
-
- return { from: from, to: to, axis: axis };
- }
-
- function drawBackground() {
- ctx.save();
- ctx.translate(plotOffset.left, plotOffset.top);
-
- ctx.fillStyle = getColorOrGradient(options.grid.backgroundColor, plotHeight, 0, "rgba(255, 255, 255, 0)");
- ctx.fillRect(0, 0, plotWidth, plotHeight);
- ctx.restore();
- }
-
- function drawGrid() {
- var i, axes, bw, bc;
-
- ctx.save();
- ctx.translate(plotOffset.left, plotOffset.top);
-
- // draw markings
- var markings = options.grid.markings;
- if (markings) {
- if ($.isFunction(markings)) {
- axes = plot.getAxes();
- // xmin etc. is backwards compatibility, to be
- // removed in the future
- axes.xmin = axes.xaxis.min;
- axes.xmax = axes.xaxis.max;
- axes.ymin = axes.yaxis.min;
- axes.ymax = axes.yaxis.max;
-
- markings = markings(axes);
- }
-
- for (i = 0; i < markings.length; ++i) {
- var m = markings[i],
- xrange = extractRange(m, "x"),
- yrange = extractRange(m, "y");
-
- // fill in missing
- if (xrange.from == null)
- xrange.from = xrange.axis.min;
- if (xrange.to == null)
- xrange.to = xrange.axis.max;
- if (yrange.from == null)
- yrange.from = yrange.axis.min;
- if (yrange.to == null)
- yrange.to = yrange.axis.max;
-
- // clip
- if (xrange.to < xrange.axis.min || xrange.from > xrange.axis.max ||
- yrange.to < yrange.axis.min || yrange.from > yrange.axis.max)
- continue;
-
- xrange.from = Math.max(xrange.from, xrange.axis.min);
- xrange.to = Math.min(xrange.to, xrange.axis.max);
- yrange.from = Math.max(yrange.from, yrange.axis.min);
- yrange.to = Math.min(yrange.to, yrange.axis.max);
-
- var xequal = xrange.from === xrange.to,
- yequal = yrange.from === yrange.to;
-
- if (xequal && yequal) {
- continue;
- }
-
- // then draw
- xrange.from = Math.floor(xrange.axis.p2c(xrange.from));
- xrange.to = Math.floor(xrange.axis.p2c(xrange.to));
- yrange.from = Math.floor(yrange.axis.p2c(yrange.from));
- yrange.to = Math.floor(yrange.axis.p2c(yrange.to));
-
- if (xequal || yequal) {
- var lineWidth = m.lineWidth || options.grid.markingsLineWidth,
- subPixel = lineWidth % 2 ? 0.5 : 0;
- ctx.beginPath();
- ctx.strokeStyle = m.color || options.grid.markingsColor;
- ctx.lineWidth = lineWidth;
- if (xequal) {
- ctx.moveTo(xrange.to + subPixel, yrange.from);
- ctx.lineTo(xrange.to + subPixel, yrange.to);
- } else {
- ctx.moveTo(xrange.from, yrange.to + subPixel);
- ctx.lineTo(xrange.to, yrange.to + subPixel);
- }
- ctx.stroke();
- } else {
- ctx.fillStyle = m.color || options.grid.markingsColor;
- ctx.fillRect(xrange.from, yrange.to,
- xrange.to - xrange.from,
- yrange.from - yrange.to);
- }
- }
- }
-
- // draw the ticks
- axes = allAxes();
- bw = options.grid.borderWidth;
-
- for (var j = 0; j < axes.length; ++j) {
- var axis = axes[j], box = axis.box,
- t = axis.tickLength, x, y, xoff, yoff;
- if (!axis.show || axis.ticks.length == 0)
- continue;
-
- ctx.lineWidth = 1;
-
- // find the edges
- if (axis.direction == "x") {
- x = 0;
- if (t == "full")
- y = (axis.position == "top" ? 0 : plotHeight);
- else
- y = box.top - plotOffset.top + (axis.position == "top" ? box.height : 0);
- }
- else {
- y = 0;
- if (t == "full")
- x = (axis.position == "left" ? 0 : plotWidth);
- else
- x = box.left - plotOffset.left + (axis.position == "left" ? box.width : 0);
- }
-
- // draw tick bar
- if (!axis.innermost) {
- ctx.strokeStyle = axis.options.color;
- ctx.beginPath();
- xoff = yoff = 0;
- if (axis.direction == "x")
- xoff = plotWidth + 1;
- else
- yoff = plotHeight + 1;
-
- if (ctx.lineWidth == 1) {
- if (axis.direction == "x") {
- y = Math.floor(y) + 0.5;
- } else {
- x = Math.floor(x) + 0.5;
- }
- }
-
- ctx.moveTo(x, y);
- ctx.lineTo(x + xoff, y + yoff);
- ctx.stroke();
- }
-
- // draw ticks
-
- ctx.strokeStyle = axis.options.tickColor;
-
- ctx.beginPath();
- for (i = 0; i < axis.ticks.length; ++i) {
- var v = axis.ticks[i].v;
-
- xoff = yoff = 0;
-
- if (isNaN(v) || v < axis.min || v > axis.max
- // skip those lying on the axes if we got a border
- || (t == "full"
- && ((typeof bw == "object" && bw[axis.position] > 0) || bw > 0)
- && (v == axis.min || v == axis.max)))
- continue;
-
- if (axis.direction == "x") {
- x = axis.p2c(v);
- yoff = t == "full" ? -plotHeight : t;
-
- if (axis.position == "top")
- yoff = -yoff;
- }
- else {
- y = axis.p2c(v);
- xoff = t == "full" ? -plotWidth : t;
-
- if (axis.position == "left")
- xoff = -xoff;
- }
-
- if (ctx.lineWidth == 1) {
- if (axis.direction == "x")
- x = Math.floor(x) + 0.5;
- else
- y = Math.floor(y) + 0.5;
- }
-
- ctx.moveTo(x, y);
- ctx.lineTo(x + xoff, y + yoff);
- }
-
- ctx.stroke();
- }
-
-
- // draw border
- if (bw) {
- // If either borderWidth or borderColor is an object, then draw the border
- // line by line instead of as one rectangle
- bc = options.grid.borderColor;
- if(typeof bw == "object" || typeof bc == "object") {
- if (typeof bw !== "object") {
- bw = {top: bw, right: bw, bottom: bw, left: bw};
- }
- if (typeof bc !== "object") {
- bc = {top: bc, right: bc, bottom: bc, left: bc};
- }
-
- if (bw.top > 0) {
- ctx.strokeStyle = bc.top;
- ctx.lineWidth = bw.top;
- ctx.beginPath();
- ctx.moveTo(0 - bw.left, 0 - bw.top/2);
- ctx.lineTo(plotWidth, 0 - bw.top/2);
- ctx.stroke();
- }
-
- if (bw.right > 0) {
- ctx.strokeStyle = bc.right;
- ctx.lineWidth = bw.right;
- ctx.beginPath();
- ctx.moveTo(plotWidth + bw.right / 2, 0 - bw.top);
- ctx.lineTo(plotWidth + bw.right / 2, plotHeight);
- ctx.stroke();
- }
-
- if (bw.bottom > 0) {
- ctx.strokeStyle = bc.bottom;
- ctx.lineWidth = bw.bottom;
- ctx.beginPath();
- ctx.moveTo(plotWidth + bw.right, plotHeight + bw.bottom / 2);
- ctx.lineTo(0, plotHeight + bw.bottom / 2);
- ctx.stroke();
- }
-
- if (bw.left > 0) {
- ctx.strokeStyle = bc.left;
- ctx.lineWidth = bw.left;
- ctx.beginPath();
- ctx.moveTo(0 - bw.left/2, plotHeight + bw.bottom);
- ctx.lineTo(0- bw.left/2, 0);
- ctx.stroke();
- }
- }
- else {
- ctx.lineWidth = bw;
- ctx.strokeStyle = options.grid.borderColor;
- ctx.strokeRect(-bw/2, -bw/2, plotWidth + bw, plotHeight + bw);
- }
- }
-
- ctx.restore();
- }
-
- function drawAxisLabels() {
-
- $.each(allAxes(), function (_, axis) {
- var box = axis.box,
- legacyStyles = axis.direction + "Axis " + axis.direction + axis.n + "Axis",
- layer = "flot-" + axis.direction + "-axis flot-" + axis.direction + axis.n + "-axis " + legacyStyles,
- font = axis.options.font || "flot-tick-label tickLabel",
- tick, x, y, halign, valign;
-
- // Remove text before checking for axis.show and ticks.length;
- // otherwise plugins, like flot-tickrotor, that draw their own
- // tick labels will end up with both theirs and the defaults.
-
- surface.removeText(layer);
-
- if (!axis.show || axis.ticks.length == 0)
- return;
-
- for (var i = 0; i < axis.ticks.length; ++i) {
-
- tick = axis.ticks[i];
- if (!tick.label || tick.v < axis.min || tick.v > axis.max)
- continue;
-
- if (axis.direction == "x") {
- halign = "center";
- x = plotOffset.left + axis.p2c(tick.v);
- if (axis.position == "bottom") {
- y = box.top + box.padding;
- } else {
- y = box.top + box.height - box.padding;
- valign = "bottom";
- }
- } else {
- valign = "middle";
- y = plotOffset.top + axis.p2c(tick.v);
- if (axis.position == "left") {
- x = box.left + box.width - box.padding;
- halign = "right";
- } else {
- x = box.left + box.padding;
- }
- }
-
- surface.addText(layer, x, y, tick.label, font, null, null, halign, valign);
- }
- });
- }
-
- function drawSeries(series) {
- if (series.lines.show)
- drawSeriesLines(series);
- if (series.bars.show)
- drawSeriesBars(series);
- if (series.points.show)
- drawSeriesPoints(series);
- }
-
- function drawSeriesLines(series) {
- function plotLine(datapoints, xoffset, yoffset, axisx, axisy) {
- var points = datapoints.points,
- ps = datapoints.pointsize,
- prevx = null, prevy = null;
-
- ctx.beginPath();
- for (var i = ps; i < points.length; i += ps) {
- var x1 = points[i - ps], y1 = points[i - ps + 1],
- x2 = points[i], y2 = points[i + 1];
-
- if (x1 == null || x2 == null)
- continue;
-
- // clip with ymin
- if (y1 <= y2 && y1 < axisy.min) {
- if (y2 < axisy.min)
- continue; // line segment is outside
- // compute new intersection point
- x1 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
- y1 = axisy.min;
- }
- else if (y2 <= y1 && y2 < axisy.min) {
- if (y1 < axisy.min)
- continue;
- x2 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
- y2 = axisy.min;
- }
-
- // clip with ymax
- if (y1 >= y2 && y1 > axisy.max) {
- if (y2 > axisy.max)
- continue;
- x1 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
- y1 = axisy.max;
- }
- else if (y2 >= y1 && y2 > axisy.max) {
- if (y1 > axisy.max)
- continue;
- x2 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
- y2 = axisy.max;
- }
-
- // clip with xmin
- if (x1 <= x2 && x1 < axisx.min) {
- if (x2 < axisx.min)
- continue;
- y1 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
- x1 = axisx.min;
- }
- else if (x2 <= x1 && x2 < axisx.min) {
- if (x1 < axisx.min)
- continue;
- y2 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
- x2 = axisx.min;
- }
-
- // clip with xmax
- if (x1 >= x2 && x1 > axisx.max) {
- if (x2 > axisx.max)
- continue;
- y1 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
- x1 = axisx.max;
- }
- else if (x2 >= x1 && x2 > axisx.max) {
- if (x1 > axisx.max)
- continue;
- y2 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
- x2 = axisx.max;
- }
-
- if (x1 != prevx || y1 != prevy)
- ctx.moveTo(axisx.p2c(x1) + xoffset, axisy.p2c(y1) + yoffset);
-
- prevx = x2;
- prevy = y2;
- ctx.lineTo(axisx.p2c(x2) + xoffset, axisy.p2c(y2) + yoffset);
- }
- ctx.stroke();
- }
-
- function plotLineArea(datapoints, axisx, axisy) {
- var points = datapoints.points,
- ps = datapoints.pointsize,
- bottom = Math.min(Math.max(0, axisy.min), axisy.max),
- i = 0, top, areaOpen = false,
- ypos = 1, segmentStart = 0, segmentEnd = 0;
-
- // we process each segment in two turns, first forward
- // direction to sketch out top, then once we hit the
- // end we go backwards to sketch the bottom
- while (true) {
- if (ps > 0 && i > points.length + ps)
- break;
-
- i += ps; // ps is negative if going backwards
-
- var x1 = points[i - ps],
- y1 = points[i - ps + ypos],
- x2 = points[i], y2 = points[i + ypos];
-
- if (areaOpen) {
- if (ps > 0 && x1 != null && x2 == null) {
- // at turning point
- segmentEnd = i;
- ps = -ps;
- ypos = 2;
- continue;
- }
-
- if (ps < 0 && i == segmentStart + ps) {
- // done with the reverse sweep
- ctx.fill();
- areaOpen = false;
- ps = -ps;
- ypos = 1;
- i = segmentStart = segmentEnd + ps;
- continue;
- }
- }
-
- if (x1 == null || x2 == null)
- continue;
-
- // clip x values
-
- // clip with xmin
- if (x1 <= x2 && x1 < axisx.min) {
- if (x2 < axisx.min)
- continue;
- y1 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
- x1 = axisx.min;
- }
- else if (x2 <= x1 && x2 < axisx.min) {
- if (x1 < axisx.min)
- continue;
- y2 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
- x2 = axisx.min;
- }
-
- // clip with xmax
- if (x1 >= x2 && x1 > axisx.max) {
- if (x2 > axisx.max)
- continue;
- y1 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
- x1 = axisx.max;
- }
- else if (x2 >= x1 && x2 > axisx.max) {
- if (x1 > axisx.max)
- continue;
- y2 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
- x2 = axisx.max;
- }
-
- if (!areaOpen) {
- // open area
- ctx.beginPath();
- ctx.moveTo(axisx.p2c(x1), axisy.p2c(bottom));
- areaOpen = true;
- }
-
- // now first check the case where both is outside
- if (y1 >= axisy.max && y2 >= axisy.max) {
- ctx.lineTo(axisx.p2c(x1), axisy.p2c(axisy.max));
- ctx.lineTo(axisx.p2c(x2), axisy.p2c(axisy.max));
- continue;
- }
- else if (y1 <= axisy.min && y2 <= axisy.min) {
- ctx.lineTo(axisx.p2c(x1), axisy.p2c(axisy.min));
- ctx.lineTo(axisx.p2c(x2), axisy.p2c(axisy.min));
- continue;
- }
-
- // else it's a bit more complicated, there might
- // be a flat maxed out rectangle first, then a
- // triangular cutout or reverse; to find these
- // keep track of the current x values
- var x1old = x1, x2old = x2;
-
- // clip the y values, without shortcutting, we
- // go through all cases in turn
-
- // clip with ymin
- if (y1 <= y2 && y1 < axisy.min && y2 >= axisy.min) {
- x1 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
- y1 = axisy.min;
- }
- else if (y2 <= y1 && y2 < axisy.min && y1 >= axisy.min) {
- x2 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
- y2 = axisy.min;
- }
-
- // clip with ymax
- if (y1 >= y2 && y1 > axisy.max && y2 <= axisy.max) {
- x1 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
- y1 = axisy.max;
- }
- else if (y2 >= y1 && y2 > axisy.max && y1 <= axisy.max) {
- x2 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
- y2 = axisy.max;
- }
-
- // if the x value was changed we got a rectangle
- // to fill
- if (x1 != x1old) {
- ctx.lineTo(axisx.p2c(x1old), axisy.p2c(y1));
- // it goes to (x1, y1), but we fill that below
- }
-
- // fill triangular section, this sometimes result
- // in redundant points if (x1, y1) hasn't changed
- // from previous line to, but we just ignore that
- ctx.lineTo(axisx.p2c(x1), axisy.p2c(y1));
- ctx.lineTo(axisx.p2c(x2), axisy.p2c(y2));
-
- // fill the other rectangle if it's there
- if (x2 != x2old) {
- ctx.lineTo(axisx.p2c(x2), axisy.p2c(y2));
- ctx.lineTo(axisx.p2c(x2old), axisy.p2c(y2));
- }
- }
- }
-
- ctx.save();
- ctx.translate(plotOffset.left, plotOffset.top);
- ctx.lineJoin = "round";
-
- var lw = series.lines.lineWidth,
- sw = series.shadowSize;
- // FIXME: consider another form of shadow when filling is turned on
- if (lw > 0 && sw > 0) {
- // draw shadow as a thick and thin line with transparency
- ctx.lineWidth = sw;
- ctx.strokeStyle = "rgba(0,0,0,0.1)";
- // position shadow at angle from the mid of line
- var angle = Math.PI/18;
- plotLine(series.datapoints, Math.sin(angle) * (lw/2 + sw/2), Math.cos(angle) * (lw/2 + sw/2), series.xaxis, series.yaxis);
- ctx.lineWidth = sw/2;
- plotLine(series.datapoints, Math.sin(angle) * (lw/2 + sw/4), Math.cos(angle) * (lw/2 + sw/4), series.xaxis, series.yaxis);
- }
-
- ctx.lineWidth = lw;
- ctx.strokeStyle = series.color;
- var fillStyle = getFillStyle(series.lines, series.color, 0, plotHeight);
- if (fillStyle) {
- ctx.fillStyle = fillStyle;
- plotLineArea(series.datapoints, series.xaxis, series.yaxis);
- }
-
- if (lw > 0)
- plotLine(series.datapoints, 0, 0, series.xaxis, series.yaxis);
- ctx.restore();
- }
-
- function drawSeriesPoints(series) {
- function plotPoints(datapoints, radius, fillStyle, offset, shadow, axisx, axisy, symbol) {
- var points = datapoints.points, ps = datapoints.pointsize;
-
- for (var i = 0; i < points.length; i += ps) {
- var x = points[i], y = points[i + 1];
- if (x == null || x < axisx.min || x > axisx.max || y < axisy.min || y > axisy.max)
- continue;
-
- ctx.beginPath();
- x = axisx.p2c(x);
- y = axisy.p2c(y) + offset;
- if (symbol == "circle")
- ctx.arc(x, y, radius, 0, shadow ? Math.PI : Math.PI * 2, false);
- else
- symbol(ctx, x, y, radius, shadow);
- ctx.closePath();
-
- if (fillStyle) {
- ctx.fillStyle = fillStyle;
- ctx.fill();
- }
- ctx.stroke();
- }
- }
-
- ctx.save();
- ctx.translate(plotOffset.left, plotOffset.top);
-
- var lw = series.points.lineWidth,
- sw = series.shadowSize,
- radius = series.points.radius,
- symbol = series.points.symbol;
-
- // If the user sets the line width to 0, we change it to a very
- // small value. A line width of 0 seems to force the default of 1.
- // Doing the conditional here allows the shadow setting to still be
- // optional even with a lineWidth of 0.
-
- if( lw == 0 )
- lw = 0.0001;
-
- if (lw > 0 && sw > 0) {
- // draw shadow in two steps
- var w = sw / 2;
- ctx.lineWidth = w;
- ctx.strokeStyle = "rgba(0,0,0,0.1)";
- plotPoints(series.datapoints, radius, null, w + w/2, true,
- series.xaxis, series.yaxis, symbol);
-
- ctx.strokeStyle = "rgba(0,0,0,0.2)";
- plotPoints(series.datapoints, radius, null, w/2, true,
- series.xaxis, series.yaxis, symbol);
- }
-
- ctx.lineWidth = lw;
- ctx.strokeStyle = series.color;
- plotPoints(series.datapoints, radius,
- getFillStyle(series.points, series.color), 0, false,
- series.xaxis, series.yaxis, symbol);
- ctx.restore();
- }
-
- function drawBar(x, y, b, barLeft, barRight, fillStyleCallback, axisx, axisy, c, horizontal, lineWidth) {
- var left, right, bottom, top,
- drawLeft, drawRight, drawTop, drawBottom,
- tmp;
-
- // in horizontal mode, we start the bar from the left
- // instead of from the bottom so it appears to be
- // horizontal rather than vertical
- if (horizontal) {
- drawBottom = drawRight = drawTop = true;
- drawLeft = false;
- left = b;
- right = x;
- top = y + barLeft;
- bottom = y + barRight;
-
- // account for negative bars
- if (right < left) {
- tmp = right;
- right = left;
- left = tmp;
- drawLeft = true;
- drawRight = false;
- }
- }
- else {
- drawLeft = drawRight = drawTop = true;
- drawBottom = false;
- left = x + barLeft;
- right = x + barRight;
- bottom = b;
- top = y;
-
- // account for negative bars
- if (top < bottom) {
- tmp = top;
- top = bottom;
- bottom = tmp;
- drawBottom = true;
- drawTop = false;
- }
- }
-
- // clip
- if (right < axisx.min || left > axisx.max ||
- top < axisy.min || bottom > axisy.max)
- return;
-
- if (left < axisx.min) {
- left = axisx.min;
- drawLeft = false;
- }
-
- if (right > axisx.max) {
- right = axisx.max;
- drawRight = false;
- }
-
- if (bottom < axisy.min) {
- bottom = axisy.min;
- drawBottom = false;
- }
-
- if (top > axisy.max) {
- top = axisy.max;
- drawTop = false;
- }
-
- left = axisx.p2c(left);
- bottom = axisy.p2c(bottom);
- right = axisx.p2c(right);
- top = axisy.p2c(top);
-
- // fill the bar
- if (fillStyleCallback) {
- c.fillStyle = fillStyleCallback(bottom, top);
- c.fillRect(left, top, right - left, bottom - top)
- }
-
- // draw outline
- if (lineWidth > 0 && (drawLeft || drawRight || drawTop || drawBottom)) {
- c.beginPath();
-
- // FIXME: inline moveTo is buggy with excanvas
- c.moveTo(left, bottom);
- if (drawLeft)
- c.lineTo(left, top);
- else
- c.moveTo(left, top);
- if (drawTop)
- c.lineTo(right, top);
- else
- c.moveTo(right, top);
- if (drawRight)
- c.lineTo(right, bottom);
- else
- c.moveTo(right, bottom);
- if (drawBottom)
- c.lineTo(left, bottom);
- else
- c.moveTo(left, bottom);
- c.stroke();
- }
- }
-
- function drawSeriesBars(series) {
- function plotBars(datapoints, barLeft, barRight, fillStyleCallback, axisx, axisy) {
- var points = datapoints.points, ps = datapoints.pointsize;
-
- for (var i = 0; i < points.length; i += ps) {
- if (points[i] == null)
- continue;
- drawBar(points[i], points[i + 1], points[i + 2], barLeft, barRight, fillStyleCallback, axisx, axisy, ctx, series.bars.horizontal, series.bars.lineWidth);
- }
- }
-
- ctx.save();
- ctx.translate(plotOffset.left, plotOffset.top);
-
- // FIXME: figure out a way to add shadows (for instance along the right edge)
- ctx.lineWidth = series.bars.lineWidth;
- ctx.strokeStyle = series.color;
-
- var barLeft;
-
- switch (series.bars.align) {
- case "left":
- barLeft = 0;
- break;
- case "right":
- barLeft = -series.bars.barWidth;
- break;
- default:
- barLeft = -series.bars.barWidth / 2;
- }
-
- var fillStyleCallback = series.bars.fill ? function (bottom, top) { return getFillStyle(series.bars, series.color, bottom, top); } : null;
- plotBars(series.datapoints, barLeft, barLeft + series.bars.barWidth, fillStyleCallback, series.xaxis, series.yaxis);
- ctx.restore();
- }
-
- function getFillStyle(filloptions, seriesColor, bottom, top) {
- var fill = filloptions.fill;
- if (!fill)
- return null;
-
- if (filloptions.fillColor)
- return getColorOrGradient(filloptions.fillColor, bottom, top, seriesColor);
-
- var c = $.color.parse(seriesColor);
- c.a = typeof fill == "number" ? fill : 0.4;
- c.normalize();
- return c.toString();
- }
-
- function insertLegend() {
-
- if (options.legend.container != null) {
- $(options.legend.container).html("");
- } else {
- placeholder.find(".legend").remove();
- }
-
- if (!options.legend.show) {
- return;
- }
-
- var fragments = [], entries = [], rowStarted = false,
- lf = options.legend.labelFormatter, s, label;
-
- // Build a list of legend entries, with each having a label and a color
-
- for (var i = 0; i < series.length; ++i) {
- s = series[i];
- if (s.label) {
- label = lf ? lf(s.label, s) : s.label;
- if (label) {
- entries.push({
- label: label,
- color: s.color
- });
- }
- }
- }
-
- // Sort the legend using either the default or a custom comparator
-
- if (options.legend.sorted) {
- if ($.isFunction(options.legend.sorted)) {
- entries.sort(options.legend.sorted);
- } else if (options.legend.sorted == "reverse") {
- entries.reverse();
- } else {
- var ascending = options.legend.sorted != "descending";
- entries.sort(function(a, b) {
- return a.label == b.label ? 0 : (
- (a.label < b.label) != ascending ? 1 : -1 // Logical XOR
- );
- });
- }
- }
-
- // Generate markup for the list of entries, in their final order
-
- for (var i = 0; i < entries.length; ++i) {
-
- var entry = entries[i];
-
- if (i % options.legend.noColumns == 0) {
- if (rowStarted)
- fragments.push('</tr>');
- fragments.push('<tr>');
- rowStarted = true;
- }
-
- fragments.push(
- '<td class="legendColorBox"><div style="border:1px solid ' + options.legend.labelBoxBorderColor + ';padding:1px"><div style="width:4px;height:0;border:5px solid ' + entry.color + ';overflow:hidden"></div></div></td>' +
- '<td class="legendLabel">' + entry.label + '</td>'
- );
- }
-
- if (rowStarted)
- fragments.push('</tr>');
-
- if (fragments.length == 0)
- return;
-
- var table = '<table style="font-size:smaller;color:' + options.grid.color + '">' + fragments.join("") + '</table>';
- if (options.legend.container != null)
- $(options.legend.container).html(table);
- else {
- var pos = "",
- p = options.legend.position,
- m = options.legend.margin;
- if (m[0] == null)
- m = [m, m];
- if (p.charAt(0) == "n")
- pos += 'top:' + (m[1] + plotOffset.top) + 'px;';
- else if (p.charAt(0) == "s")
- pos += 'bottom:' + (m[1] + plotOffset.bottom) + 'px;';
- if (p.charAt(1) == "e")
- pos += 'right:' + (m[0] + plotOffset.right) + 'px;';
- else if (p.charAt(1) == "w")
- pos += 'left:' + (m[0] + plotOffset.left) + 'px;';
- var legend = $('<div class="legend">' + table.replace('style="', 'style="position:absolute;' + pos +';') + '</div>').appendTo(placeholder);
- if (options.legend.backgroundOpacity != 0.0) {
- // put in the transparent background
- // separately to avoid blended labels and
- // label boxes
- var c = options.legend.backgroundColor;
- if (c == null) {
- c = options.grid.backgroundColor;
- if (c && typeof c == "string")
- c = $.color.parse(c);
- else
- c = $.color.extract(legend, 'background-color');
- c.a = 1;
- c = c.toString();
- }
- var div = legend.children();
- $('<div style="position:absolute;width:' + div.width() + 'px;height:' + div.height() + 'px;' + pos +'background-color:' + c + ';"> </div>').prependTo(legend).css('opacity', options.legend.backgroundOpacity);
- }
- }
- }
-
-
- // interactive features
-
- var highlights = [],
- redrawTimeout = null;
-
- // returns the data item the mouse is over, or null if none is found
- function findNearbyItem(mouseX, mouseY, seriesFilter) {
- var maxDistance = options.grid.mouseActiveRadius,
- smallestDistance = maxDistance * maxDistance + 1,
- item = null, foundPoint = false, i, j, ps;
-
- for (i = series.length - 1; i >= 0; --i) {
- if (!seriesFilter(series[i]))
- continue;
-
- var s = series[i],
- axisx = s.xaxis,
- axisy = s.yaxis,
- points = s.datapoints.points,
- mx = axisx.c2p(mouseX), // precompute some stuff to make the loop faster
- my = axisy.c2p(mouseY),
- maxx = maxDistance / axisx.scale,
- maxy = maxDistance / axisy.scale;
-
- ps = s.datapoints.pointsize;
- // with inverse transforms, we can't use the maxx/maxy
- // optimization, sadly
- if (axisx.options.inverseTransform)
- maxx = Number.MAX_VALUE;
- if (axisy.options.inverseTransform)
- maxy = Number.MAX_VALUE;
-
- if (s.lines.show || s.points.show) {
- for (j = 0; j < points.length; j += ps) {
- var x = points[j], y = points[j + 1];
- if (x == null)
- continue;
-
- // For points and lines, the cursor must be within a
- // certain distance to the data point
- if (x - mx > maxx || x - mx < -maxx ||
- y - my > maxy || y - my < -maxy)
- continue;
-
- // We have to calculate distances in pixels, not in
- // data units, because the scales of the axes may be different
- var dx = Math.abs(axisx.p2c(x) - mouseX),
- dy = Math.abs(axisy.p2c(y) - mouseY),
- dist = dx * dx + dy * dy; // we save the sqrt
-
- // use <= to ensure last point takes precedence
- // (last generally means on top of)
- if (dist < smallestDistance) {
- smallestDistance = dist;
- item = [i, j / ps];
- }
- }
- }
-
- if (s.bars.show && !item) { // no other point can be nearby
-
- var barLeft, barRight;
-
- switch (s.bars.align) {
- case "left":
- barLeft = 0;
- break;
- case "right":
- barLeft = -s.bars.barWidth;
- break;
- default:
- barLeft = -s.bars.barWidth / 2;
- }
-
- barRight = barLeft + s.bars.barWidth;
-
- for (j = 0; j < points.length; j += ps) {
- var x = points[j], y = points[j + 1], b = points[j + 2];
- if (x == null)
- continue;
-
- // for a bar graph, the cursor must be inside the bar
- if (series[i].bars.horizontal ?
- (mx <= Math.max(b, x) && mx >= Math.min(b, x) &&
- my >= y + barLeft && my <= y + barRight) :
- (mx >= x + barLeft && mx <= x + barRight &&
- my >= Math.min(b, y) && my <= Math.max(b, y)))
- item = [i, j / ps];
- }
- }
- }
-
- if (item) {
- i = item[0];
- j = item[1];
- ps = series[i].datapoints.pointsize;
-
- return { datapoint: series[i].datapoints.points.slice(j * ps, (j + 1) * ps),
- dataIndex: j,
- series: series[i],
- seriesIndex: i };
- }
-
- return null;
- }
-
- function onMouseMove(e) {
- if (options.grid.hoverable)
- triggerClickHoverEvent("plothover", e,
- function (s) { return s["hoverable"] != false; });
- }
-
- function onMouseLeave(e) {
- if (options.grid.hoverable)
- triggerClickHoverEvent("plothover", e,
- function (s) { return false; });
- }
-
- function onClick(e) {
- triggerClickHoverEvent("plotclick", e,
- function (s) { return s["clickable"] != false; });
- }
-
- // trigger click or hover event (they send the same parameters
- // so we share their code)
- function triggerClickHoverEvent(eventname, event, seriesFilter) {
- var offset = eventHolder.offset(),
- canvasX = event.pageX - offset.left - plotOffset.left,
- canvasY = event.pageY - offset.top - plotOffset.top,
- pos = canvasToAxisCoords({ left: canvasX, top: canvasY });
-
- pos.pageX = event.pageX;
- pos.pageY = event.pageY;
-
- var item = findNearbyItem(canvasX, canvasY, seriesFilter);
-
- if (item) {
- // fill in mouse pos for any listeners out there
- item.pageX = parseInt(item.series.xaxis.p2c(item.datapoint[0]) + offset.left + plotOffset.left, 10);
- item.pageY = parseInt(item.series.yaxis.p2c(item.datapoint[1]) + offset.top + plotOffset.top, 10);
- }
-
- if (options.grid.autoHighlight) {
- // clear auto-highlights
- for (var i = 0; i < highlights.length; ++i) {
- var h = highlights[i];
- if (h.auto == eventname &&
- !(item && h.series == item.series &&
- h.point[0] == item.datapoint[0] &&
- h.point[1] == item.datapoint[1]))
- unhighlight(h.series, h.point);
- }
-
- if (item)
- highlight(item.series, item.datapoint, eventname);
- }
-
- placeholder.trigger(eventname, [ pos, item ]);
- }
-
- function triggerRedrawOverlay() {
- var t = options.interaction.redrawOverlayInterval;
- if (t == -1) { // skip event queue
- drawOverlay();
- return;
- }
-
- if (!redrawTimeout)
- redrawTimeout = setTimeout(drawOverlay, t);
- }
-
- function drawOverlay() {
- redrawTimeout = null;
-
- // draw highlights
- octx.save();
- overlay.clear();
- octx.translate(plotOffset.left, plotOffset.top);
-
- var i, hi;
- for (i = 0; i < highlights.length; ++i) {
- hi = highlights[i];
-
- if (hi.series.bars.show)
- drawBarHighlight(hi.series, hi.point);
- else
- drawPointHighlight(hi.series, hi.point);
- }
- octx.restore();
-
- executeHooks(hooks.drawOverlay, [octx]);
- }
-
- function highlight(s, point, auto) {
- if (typeof s == "number")
- s = series[s];
-
- if (typeof point == "number") {
- var ps = s.datapoints.pointsize;
- point = s.datapoints.points.slice(ps * point, ps * (point + 1));
- }
-
- var i = indexOfHighlight(s, point);
- if (i == -1) {
- highlights.push({ series: s, point: point, auto: auto });
-
- triggerRedrawOverlay();
- }
- else if (!auto)
- highlights[i].auto = false;
- }
-
- function unhighlight(s, point) {
- if (s == null && point == null) {
- highlights = [];
- triggerRedrawOverlay();
- return;
- }
-
- if (typeof s == "number")
- s = series[s];
-
- if (typeof point == "number") {
- var ps = s.datapoints.pointsize;
- point = s.datapoints.points.slice(ps * point, ps * (point + 1));
- }
-
- var i = indexOfHighlight(s, point);
- if (i != -1) {
- highlights.splice(i, 1);
-
- triggerRedrawOverlay();
- }
- }
-
- function indexOfHighlight(s, p) {
- for (var i = 0; i < highlights.length; ++i) {
- var h = highlights[i];
- if (h.series == s && h.point[0] == p[0]
- && h.point[1] == p[1])
- return i;
- }
- return -1;
- }
-
- function drawPointHighlight(series, point) {
- var x = point[0], y = point[1],
- axisx = series.xaxis, axisy = series.yaxis,
- highlightColor = (typeof series.highlightColor === "string") ? series.highlightColor : $.color.parse(series.color).scale('a', 0.5).toString();
-
- if (x < axisx.min || x > axisx.max || y < axisy.min || y > axisy.max)
- return;
-
- var pointRadius = series.points.radius + series.points.lineWidth / 2;
- octx.lineWidth = pointRadius;
- octx.strokeStyle = highlightColor;
- var radius = 1.5 * pointRadius;
- x = axisx.p2c(x);
- y = axisy.p2c(y);
-
- octx.beginPath();
- if (series.points.symbol == "circle")
- octx.arc(x, y, radius, 0, 2 * Math.PI, false);
- else
- series.points.symbol(octx, x, y, radius, false);
- octx.closePath();
- octx.stroke();
- }
-
- function drawBarHighlight(series, point) {
- var highlightColor = (typeof series.highlightColor === "string") ? series.highlightColor : $.color.parse(series.color).scale('a', 0.5).toString(),
- fillStyle = highlightColor,
- barLeft;
-
- switch (series.bars.align) {
- case "left":
- barLeft = 0;
- break;
- case "right":
- barLeft = -series.bars.barWidth;
- break;
- default:
- barLeft = -series.bars.barWidth / 2;
- }
-
- octx.lineWidth = series.bars.lineWidth;
- octx.strokeStyle = highlightColor;
-
- drawBar(point[0], point[1], point[2] || 0, barLeft, barLeft + series.bars.barWidth,
- function () { return fillStyle; }, series.xaxis, series.yaxis, octx, series.bars.horizontal, series.bars.lineWidth);
- }
-
- function getColorOrGradient(spec, bottom, top, defaultColor) {
- if (typeof spec == "string")
- return spec;
- else {
- // assume this is a gradient spec; IE currently only
- // supports a simple vertical gradient properly, so that's
- // what we support too
- var gradient = ctx.createLinearGradient(0, top, 0, bottom);
-
- for (var i = 0, l = spec.colors.length; i < l; ++i) {
- var c = spec.colors[i];
- if (typeof c != "string") {
- var co = $.color.parse(defaultColor);
- if (c.brightness != null)
- co = co.scale('rgb', c.brightness);
- if (c.opacity != null)
- co.a *= c.opacity;
- c = co.toString();
- }
- gradient.addColorStop(i / (l - 1), c);
- }
-
- return gradient;
- }
- }
- }
-
- // Add the plot function to the top level of the jQuery object
-
- $.plot = function(placeholder, data, options) {
- //var t0 = new Date();
- var plot = new Plot($(placeholder), data, options, $.plot.plugins);
- //(window.console ? console.log : alert)("time used (msecs): " + ((new Date()).getTime() - t0.getTime()));
- return plot;
- };
-
- $.plot.version = "0.8.3";
-
- $.plot.plugins = [];
-
- // Also add the plot function as a chainable property
-
- $.fn.plot = function(data, options) {
- return this.each(function() {
- $.plot(this, data, options);
- });
- };
-
- // round to nearby lower multiple of base
- function floorInBase(n, base) {
- return base * Math.floor(n / base);
- }
-
-})(jQuery);
diff --git a/src/ceph/qa/workunits/erasure-code/jquery.js b/src/ceph/qa/workunits/erasure-code/jquery.js
deleted file mode 100644
index 8c24ffc..0000000
--- a/src/ceph/qa/workunits/erasure-code/jquery.js
+++ /dev/null
@@ -1,9472 +0,0 @@
-/*!
- * jQuery JavaScript Library v1.8.3
- * http://jquery.com/
- *
- * Includes Sizzle.js
- * http://sizzlejs.com/
- *
- * Copyright 2012 jQuery Foundation and other contributors
- * Released under the MIT license
- * http://jquery.org/license
- *
- * Date: Tue Nov 13 2012 08:20:33 GMT-0500 (Eastern Standard Time)
- */
-(function( window, undefined ) {
-var
- // A central reference to the root jQuery(document)
- rootjQuery,
-
- // The deferred used on DOM ready
- readyList,
-
- // Use the correct document accordingly with window argument (sandbox)
- document = window.document,
- location = window.location,
- navigator = window.navigator,
-
- // Map over jQuery in case of overwrite
- _jQuery = window.jQuery,
-
- // Map over the $ in case of overwrite
- _$ = window.$,
-
- // Save a reference to some core methods
- core_push = Array.prototype.push,
- core_slice = Array.prototype.slice,
- core_indexOf = Array.prototype.indexOf,
- core_toString = Object.prototype.toString,
- core_hasOwn = Object.prototype.hasOwnProperty,
- core_trim = String.prototype.trim,
-
- // Define a local copy of jQuery
- jQuery = function( selector, context ) {
- // The jQuery object is actually just the init constructor 'enhanced'
- return new jQuery.fn.init( selector, context, rootjQuery );
- },
-
- // Used for matching numbers
- core_pnum = /[\-+]?(?:\d*\.|)\d+(?:[eE][\-+]?\d+|)/.source,
-
- // Used for detecting and trimming whitespace
- core_rnotwhite = /\S/,
- core_rspace = /\s+/,
-
- // Make sure we trim BOM and NBSP (here's looking at you, Safari 5.0 and IE)
- rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,
-
- // A simple way to check for HTML strings
- // Prioritize #id over <tag> to avoid XSS via location.hash (#9521)
- rquickExpr = /^(?:[^#<]*(<[\w\W]+>)[^>]*$|#([\w\-]*)$)/,
-
- // Match a standalone tag
- rsingleTag = /^<(\w+)\s*\/?>(?:<\/\1>|)$/,
-
- // JSON RegExp
- rvalidchars = /^[\],:{}\s]*$/,
- rvalidbraces = /(?:^|:|,)(?:\s*\[)+/g,
- rvalidescape = /\\(?:["\\\/bfnrt]|u[\da-fA-F]{4})/g,
- rvalidtokens = /"[^"\\\r\n]*"|true|false|null|-?(?:\d\d*\.|)\d+(?:[eE][\-+]?\d+|)/g,
-
- // Matches dashed string for camelizing
- rmsPrefix = /^-ms-/,
- rdashAlpha = /-([\da-z])/gi,
-
- // Used by jQuery.camelCase as callback to replace()
- fcamelCase = function( all, letter ) {
- return ( letter + "" ).toUpperCase();
- },
-
- // The ready event handler and self cleanup method
- DOMContentLoaded = function() {
- if ( document.addEventListener ) {
- document.removeEventListener( "DOMContentLoaded", DOMContentLoaded, false );
- jQuery.ready();
- } else if ( document.readyState === "complete" ) {
- // we're here because readyState === "complete" in oldIE
- // which is good enough for us to call the dom ready!
- document.detachEvent( "onreadystatechange", DOMContentLoaded );
- jQuery.ready();
- }
- },
-
- // [[Class]] -> type pairs
- class2type = {};
-
-jQuery.fn = jQuery.prototype = {
- constructor: jQuery,
- init: function( selector, context, rootjQuery ) {
- var match, elem, ret, doc;
-
- // Handle $(""), $(null), $(undefined), $(false)
- if ( !selector ) {
- return this;
- }
-
- // Handle $(DOMElement)
- if ( selector.nodeType ) {
- this.context = this[0] = selector;
- this.length = 1;
- return this;
- }
-
- // Handle HTML strings
- if ( typeof selector === "string" ) {
- if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) {
- // Assume that strings that start and end with <> are HTML and skip the regex check
- match = [ null, selector, null ];
-
- } else {
- match = rquickExpr.exec( selector );
- }
-
- // Match html or make sure no context is specified for #id
- if ( match && (match[1] || !context) ) {
-
- // HANDLE: $(html) -> $(array)
- if ( match[1] ) {
- context = context instanceof jQuery ? context[0] : context;
- doc = ( context && context.nodeType ? context.ownerDocument || context : document );
-
- // scripts is true for back-compat
- selector = jQuery.parseHTML( match[1], doc, true );
- if ( rsingleTag.test( match[1] ) && jQuery.isPlainObject( context ) ) {
- this.attr.call( selector, context, true );
- }
-
- return jQuery.merge( this, selector );
-
- // HANDLE: $(#id)
- } else {
- elem = document.getElementById( match[2] );
-
- // Check parentNode to catch when Blackberry 4.6 returns
- // nodes that are no longer in the document #6963
- if ( elem && elem.parentNode ) {
- // Handle the case where IE and Opera return items
- // by name instead of ID
- if ( elem.id !== match[2] ) {
- return rootjQuery.find( selector );
- }
-
- // Otherwise, we inject the element directly into the jQuery object
- this.length = 1;
- this[0] = elem;
- }
-
- this.context = document;
- this.selector = selector;
- return this;
- }
-
- // HANDLE: $(expr, $(...))
- } else if ( !context || context.jquery ) {
- return ( context || rootjQuery ).find( selector );
-
- // HANDLE: $(expr, context)
- // (which is just equivalent to: $(context).find(expr)
- } else {
- return this.constructor( context ).find( selector );
- }
-
- // HANDLE: $(function)
- // Shortcut for document ready
- } else if ( jQuery.isFunction( selector ) ) {
- return rootjQuery.ready( selector );
- }
-
- if ( selector.selector !== undefined ) {
- this.selector = selector.selector;
- this.context = selector.context;
- }
-
- return jQuery.makeArray( selector, this );
- },
-
- // Start with an empty selector
- selector: "",
-
- // The current version of jQuery being used
- jquery: "1.8.3",
-
- // The default length of a jQuery object is 0
- length: 0,
-
- // The number of elements contained in the matched element set
- size: function() {
- return this.length;
- },
-
- toArray: function() {
- return core_slice.call( this );
- },
-
- // Get the Nth element in the matched element set OR
- // Get the whole matched element set as a clean array
- get: function( num ) {
- return num == null ?
-
- // Return a 'clean' array
- this.toArray() :
-
- // Return just the object
- ( num < 0 ? this[ this.length + num ] : this[ num ] );
- },
-
- // Take an array of elements and push it onto the stack
- // (returning the new matched element set)
- pushStack: function( elems, name, selector ) {
-
- // Build a new jQuery matched element set
- var ret = jQuery.merge( this.constructor(), elems );
-
- // Add the old object onto the stack (as a reference)
- ret.prevObject = this;
-
- ret.context = this.context;
-
- if ( name === "find" ) {
- ret.selector = this.selector + ( this.selector ? " " : "" ) + selector;
- } else if ( name ) {
- ret.selector = this.selector + "." + name + "(" + selector + ")";
- }
-
- // Return the newly-formed element set
- return ret;
- },
-
- // Execute a callback for every element in the matched set.
- // (You can seed the arguments with an array of args, but this is
- // only used internally.)
- each: function( callback, args ) {
- return jQuery.each( this, callback, args );
- },
-
- ready: function( fn ) {
- // Add the callback
- jQuery.ready.promise().done( fn );
-
- return this;
- },
-
- eq: function( i ) {
- i = +i;
- return i === -1 ?
- this.slice( i ) :
- this.slice( i, i + 1 );
- },
-
- first: function() {
- return this.eq( 0 );
- },
-
- last: function() {
- return this.eq( -1 );
- },
-
- slice: function() {
- return this.pushStack( core_slice.apply( this, arguments ),
- "slice", core_slice.call(arguments).join(",") );
- },
-
- map: function( callback ) {
- return this.pushStack( jQuery.map(this, function( elem, i ) {
- return callback.call( elem, i, elem );
- }));
- },
-
- end: function() {
- return this.prevObject || this.constructor(null);
- },
-
- // For internal use only.
- // Behaves like an Array's method, not like a jQuery method.
- push: core_push,
- sort: [].sort,
- splice: [].splice
-};
-
-// Give the init function the jQuery prototype for later instantiation
-jQuery.fn.init.prototype = jQuery.fn;
-
-jQuery.extend = jQuery.fn.extend = function() {
- var options, name, src, copy, copyIsArray, clone,
- target = arguments[0] || {},
- i = 1,
- length = arguments.length,
- deep = false;
-
- // Handle a deep copy situation
- if ( typeof target === "boolean" ) {
- deep = target;
- target = arguments[1] || {};
- // skip the boolean and the target
- i = 2;
- }
-
- // Handle case when target is a string or something (possible in deep copy)
- if ( typeof target !== "object" && !jQuery.isFunction(target) ) {
- target = {};
- }
-
- // extend jQuery itself if only one argument is passed
- if ( length === i ) {
- target = this;
- --i;
- }
-
- for ( ; i < length; i++ ) {
- // Only deal with non-null/undefined values
- if ( (options = arguments[ i ]) != null ) {
- // Extend the base object
- for ( name in options ) {
- src = target[ name ];
- copy = options[ name ];
-
- // Prevent never-ending loop
- if ( target === copy ) {
- continue;
- }
-
- // Recurse if we're merging plain objects or arrays
- if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) {
- if ( copyIsArray ) {
- copyIsArray = false;
- clone = src && jQuery.isArray(src) ? src : [];
-
- } else {
- clone = src && jQuery.isPlainObject(src) ? src : {};
- }
-
- // Never move original objects, clone them
- target[ name ] = jQuery.extend( deep, clone, copy );
-
- // Don't bring in undefined values
- } else if ( copy !== undefined ) {
- target[ name ] = copy;
- }
- }
- }
- }
-
- // Return the modified object
- return target;
-};
-
-jQuery.extend({
- noConflict: function( deep ) {
- if ( window.$ === jQuery ) {
- window.$ = _$;
- }
-
- if ( deep && window.jQuery === jQuery ) {
- window.jQuery = _jQuery;
- }
-
- return jQuery;
- },
-
- // Is the DOM ready to be used? Set to true once it occurs.
- isReady: false,
-
- // A counter to track how many items to wait for before
- // the ready event fires. See #6781
- readyWait: 1,
-
- // Hold (or release) the ready event
- holdReady: function( hold ) {
- if ( hold ) {
- jQuery.readyWait++;
- } else {
- jQuery.ready( true );
- }
- },
-
- // Handle when the DOM is ready
- ready: function( wait ) {
-
- // Abort if there are pending holds or we're already ready
- if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) {
- return;
- }
-
- // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443).
- if ( !document.body ) {
- return setTimeout( jQuery.ready, 1 );
- }
-
- // Remember that the DOM is ready
- jQuery.isReady = true;
-
- // If a normal DOM Ready event fired, decrement, and wait if need be
- if ( wait !== true && --jQuery.readyWait > 0 ) {
- return;
- }
-
- // If there are functions bound, to execute
- readyList.resolveWith( document, [ jQuery ] );
-
- // Trigger any bound ready events
- if ( jQuery.fn.trigger ) {
- jQuery( document ).trigger("ready").off("ready");
- }
- },
-
- // See test/unit/core.js for details concerning isFunction.
- // Since version 1.3, DOM methods and functions like alert
- // aren't supported. They return false on IE (#2968).
- isFunction: function( obj ) {
- return jQuery.type(obj) === "function";
- },
-
- isArray: Array.isArray || function( obj ) {
- return jQuery.type(obj) === "array";
- },
-
- isWindow: function( obj ) {
- return obj != null && obj == obj.window;
- },
-
- isNumeric: function( obj ) {
- return !isNaN( parseFloat(obj) ) && isFinite( obj );
- },
-
- type: function( obj ) {
- return obj == null ?
- String( obj ) :
- class2type[ core_toString.call(obj) ] || "object";
- },
-
- isPlainObject: function( obj ) {
- // Must be an Object.
- // Because of IE, we also have to check the presence of the constructor property.
- // Make sure that DOM nodes and window objects don't pass through, as well
- if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) {
- return false;
- }
-
- try {
- // Not own constructor property must be Object
- if ( obj.constructor &&
- !core_hasOwn.call(obj, "constructor") &&
- !core_hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) {
- return false;
- }
- } catch ( e ) {
- // IE8,9 Will throw exceptions on certain host objects #9897
- return false;
- }
-
- // Own properties are enumerated firstly, so to speed up,
- // if last one is own, then all properties are own.
-
- var key;
- for ( key in obj ) {}
-
- return key === undefined || core_hasOwn.call( obj, key );
- },
-
- isEmptyObject: function( obj ) {
- var name;
- for ( name in obj ) {
- return false;
- }
- return true;
- },
-
- error: function( msg ) {
- throw new Error( msg );
- },
-
- // data: string of html
- // context (optional): If specified, the fragment will be created in this context, defaults to document
- // scripts (optional): If true, will include scripts passed in the html string
- parseHTML: function( data, context, scripts ) {
- var parsed;
- if ( !data || typeof data !== "string" ) {
- return null;
- }
- if ( typeof context === "boolean" ) {
- scripts = context;
- context = 0;
- }
- context = context || document;
-
- // Single tag
- if ( (parsed = rsingleTag.exec( data )) ) {
- return [ context.createElement( parsed[1] ) ];
- }
-
- parsed = jQuery.buildFragment( [ data ], context, scripts ? null : [] );
- return jQuery.merge( [],
- (parsed.cacheable ? jQuery.clone( parsed.fragment ) : parsed.fragment).childNodes );
- },
-
- parseJSON: function( data ) {
- if ( !data || typeof data !== "string") {
- return null;
- }
-
- // Make sure leading/trailing whitespace is removed (IE can't handle it)
- data = jQuery.trim( data );
-
- // Attempt to parse using the native JSON parser first
- if ( window.JSON && window.JSON.parse ) {
- return window.JSON.parse( data );
- }
-
- // Make sure the incoming data is actual JSON
- // Logic borrowed from http://json.org/json2.js
- if ( rvalidchars.test( data.replace( rvalidescape, "@" )
- .replace( rvalidtokens, "]" )
- .replace( rvalidbraces, "")) ) {
-
- return ( new Function( "return " + data ) )();
-
- }
- jQuery.error( "Invalid JSON: " + data );
- },
-
- // Cross-browser xml parsing
- parseXML: function( data ) {
- var xml, tmp;
- if ( !data || typeof data !== "string" ) {
- return null;
- }
- try {
- if ( window.DOMParser ) { // Standard
- tmp = new DOMParser();
- xml = tmp.parseFromString( data , "text/xml" );
- } else { // IE
- xml = new ActiveXObject( "Microsoft.XMLDOM" );
- xml.async = "false";
- xml.loadXML( data );
- }
- } catch( e ) {
- xml = undefined;
- }
- if ( !xml || !xml.documentElement || xml.getElementsByTagName( "parsererror" ).length ) {
- jQuery.error( "Invalid XML: " + data );
- }
- return xml;
- },
-
- noop: function() {},
-
- // Evaluates a script in a global context
- // Workarounds based on findings by Jim Driscoll
- // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-global-context
- globalEval: function( data ) {
- if ( data && core_rnotwhite.test( data ) ) {
- // We use execScript on Internet Explorer
- // We use an anonymous function so that context is window
- // rather than jQuery in Firefox
- ( window.execScript || function( data ) {
- window[ "eval" ].call( window, data );
- } )( data );
- }
- },
-
- // Convert dashed to camelCase; used by the css and data modules
- // Microsoft forgot to hump their vendor prefix (#9572)
- camelCase: function( string ) {
- return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase );
- },
-
- nodeName: function( elem, name ) {
- return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase();
- },
-
- // args is for internal usage only
- each: function( obj, callback, args ) {
- var name,
- i = 0,
- length = obj.length,
- isObj = length === undefined || jQuery.isFunction( obj );
-
- if ( args ) {
- if ( isObj ) {
- for ( name in obj ) {
- if ( callback.apply( obj[ name ], args ) === false ) {
- break;
- }
- }
- } else {
- for ( ; i < length; ) {
- if ( callback.apply( obj[ i++ ], args ) === false ) {
- break;
- }
- }
- }
-
- // A special, fast, case for the most common use of each
- } else {
- if ( isObj ) {
- for ( name in obj ) {
- if ( callback.call( obj[ name ], name, obj[ name ] ) === false ) {
- break;
- }
- }
- } else {
- for ( ; i < length; ) {
- if ( callback.call( obj[ i ], i, obj[ i++ ] ) === false ) {
- break;
- }
- }
- }
- }
-
- return obj;
- },
-
- // Use native String.trim function wherever possible
- trim: core_trim && !core_trim.call("\uFEFF\xA0") ?
- function( text ) {
- return text == null ?
- "" :
- core_trim.call( text );
- } :
-
- // Otherwise use our own trimming functionality
- function( text ) {
- return text == null ?
- "" :
- ( text + "" ).replace( rtrim, "" );
- },
-
- // results is for internal usage only
- makeArray: function( arr, results ) {
- var type,
- ret = results || [];
-
- if ( arr != null ) {
- // The window, strings (and functions) also have 'length'
- // Tweaked logic slightly to handle Blackberry 4.7 RegExp issues #6930
- type = jQuery.type( arr );
-
- if ( arr.length == null || type === "string" || type === "function" || type === "regexp" || jQuery.isWindow( arr ) ) {
- core_push.call( ret, arr );
- } else {
- jQuery.merge( ret, arr );
- }
- }
-
- return ret;
- },
-
- inArray: function( elem, arr, i ) {
- var len;
-
- if ( arr ) {
- if ( core_indexOf ) {
- return core_indexOf.call( arr, elem, i );
- }
-
- len = arr.length;
- i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0;
-
- for ( ; i < len; i++ ) {
- // Skip accessing in sparse arrays
- if ( i in arr && arr[ i ] === elem ) {
- return i;
- }
- }
- }
-
- return -1;
- },
-
- merge: function( first, second ) {
- var l = second.length,
- i = first.length,
- j = 0;
-
- if ( typeof l === "number" ) {
- for ( ; j < l; j++ ) {
- first[ i++ ] = second[ j ];
- }
-
- } else {
- while ( second[j] !== undefined ) {
- first[ i++ ] = second[ j++ ];
- }
- }
-
- first.length = i;
-
- return first;
- },
-
- grep: function( elems, callback, inv ) {
- var retVal,
- ret = [],
- i = 0,
- length = elems.length;
- inv = !!inv;
-
- // Go through the array, only saving the items
- // that pass the validator function
- for ( ; i < length; i++ ) {
- retVal = !!callback( elems[ i ], i );
- if ( inv !== retVal ) {
- ret.push( elems[ i ] );
- }
- }
-
- return ret;
- },
-
- // arg is for internal usage only
- map: function( elems, callback, arg ) {
- var value, key,
- ret = [],
- i = 0,
- length = elems.length,
- // jquery objects are treated as arrays
- isArray = elems instanceof jQuery || length !== undefined && typeof length === "number" && ( ( length > 0 && elems[ 0 ] && elems[ length -1 ] ) || length === 0 || jQuery.isArray( elems ) ) ;
-
- // Go through the array, translating each of the items to their
- if ( isArray ) {
- for ( ; i < length; i++ ) {
- value = callback( elems[ i ], i, arg );
-
- if ( value != null ) {
- ret[ ret.length ] = value;
- }
- }
-
- // Go through every key on the object,
- } else {
- for ( key in elems ) {
- value = callback( elems[ key ], key, arg );
-
- if ( value != null ) {
- ret[ ret.length ] = value;
- }
- }
- }
-
- // Flatten any nested arrays
- return ret.concat.apply( [], ret );
- },
-
- // A global GUID counter for objects
- guid: 1,
-
- // Bind a function to a context, optionally partially applying any
- // arguments.
- proxy: function( fn, context ) {
- var tmp, args, proxy;
-
- if ( typeof context === "string" ) {
- tmp = fn[ context ];
- context = fn;
- fn = tmp;
- }
-
- // Quick check to determine if target is callable, in the spec
- // this throws a TypeError, but we will just return undefined.
- if ( !jQuery.isFunction( fn ) ) {
- return undefined;
- }
-
- // Simulated bind
- args = core_slice.call( arguments, 2 );
- proxy = function() {
- return fn.apply( context, args.concat( core_slice.call( arguments ) ) );
- };
-
- // Set the guid of unique handler to the same of original handler, so it can be removed
- proxy.guid = fn.guid = fn.guid || jQuery.guid++;
-
- return proxy;
- },
-
- // Multifunctional method to get and set values of a collection
- // The value/s can optionally be executed if it's a function
- access: function( elems, fn, key, value, chainable, emptyGet, pass ) {
- var exec,
- bulk = key == null,
- i = 0,
- length = elems.length;
-
- // Sets many values
- if ( key && typeof key === "object" ) {
- for ( i in key ) {
- jQuery.access( elems, fn, i, key[i], 1, emptyGet, value );
- }
- chainable = 1;
-
- // Sets one value
- } else if ( value !== undefined ) {
- // Optionally, function values get executed if exec is true
- exec = pass === undefined && jQuery.isFunction( value );
-
- if ( bulk ) {
- // Bulk operations only iterate when executing function values
- if ( exec ) {
- exec = fn;
- fn = function( elem, key, value ) {
- return exec.call( jQuery( elem ), value );
- };
-
- // Otherwise they run against the entire set
- } else {
- fn.call( elems, value );
- fn = null;
- }
- }
-
- if ( fn ) {
- for (; i < length; i++ ) {
- fn( elems[i], key, exec ? value.call( elems[i], i, fn( elems[i], key ) ) : value, pass );
- }
- }
-
- chainable = 1;
- }
-
- return chainable ?
- elems :
-
- // Gets
- bulk ?
- fn.call( elems ) :
- length ? fn( elems[0], key ) : emptyGet;
- },
-
- now: function() {
- return ( new Date() ).getTime();
- }
-});
-
-jQuery.ready.promise = function( obj ) {
- if ( !readyList ) {
-
- readyList = jQuery.Deferred();
-
- // Catch cases where $(document).ready() is called after the browser event has already occurred.
- // we once tried to use readyState "interactive" here, but it caused issues like the one
- // discovered by ChrisS here: http://bugs.jquery.com/ticket/12282#comment:15
- if ( document.readyState === "complete" ) {
- // Handle it asynchronously to allow scripts the opportunity to delay ready
- setTimeout( jQuery.ready, 1 );
-
- // Standards-based browsers support DOMContentLoaded
- } else if ( document.addEventListener ) {
- // Use the handy event callback
- document.addEventListener( "DOMContentLoaded", DOMContentLoaded, false );
-
- // A fallback to window.onload, that will always work
- window.addEventListener( "load", jQuery.ready, false );
-
- // If IE event model is used
- } else {
- // Ensure firing before onload, maybe late but safe also for iframes
- document.attachEvent( "onreadystatechange", DOMContentLoaded );
-
- // A fallback to window.onload, that will always work
- window.attachEvent( "onload", jQuery.ready );
-
- // If IE and not a frame
- // continually check to see if the document is ready
- var top = false;
-
- try {
- top = window.frameElement == null && document.documentElement;
- } catch(e) {}
-
- if ( top && top.doScroll ) {
- (function doScrollCheck() {
- if ( !jQuery.isReady ) {
-
- try {
- // Use the trick by Diego Perini
- // http://javascript.nwbox.com/IEContentLoaded/
- top.doScroll("left");
- } catch(e) {
- return setTimeout( doScrollCheck, 50 );
- }
-
- // and execute any waiting functions
- jQuery.ready();
- }
- })();
- }
- }
- }
- return readyList.promise( obj );
-};
-
-// Populate the class2type map
-jQuery.each("Boolean Number String Function Array Date RegExp Object".split(" "), function(i, name) {
- class2type[ "[object " + name + "]" ] = name.toLowerCase();
-});
-
-// All jQuery objects should point back to these
-rootjQuery = jQuery(document);
-// String to Object options format cache
-var optionsCache = {};
-
-// Convert String-formatted options into Object-formatted ones and store in cache
-function createOptions( options ) {
- var object = optionsCache[ options ] = {};
- jQuery.each( options.split( core_rspace ), function( _, flag ) {
- object[ flag ] = true;
- });
- return object;
-}
-
-/*
- * Create a callback list using the following parameters:
- *
- * options: an optional list of space-separated options that will change how
- * the callback list behaves or a more traditional option object
- *
- * By default a callback list will act like an event callback list and can be
- * "fired" multiple times.
- *
- * Possible options:
- *
- * once: will ensure the callback list can only be fired once (like a Deferred)
- *
- * memory: will keep track of previous values and will call any callback added
- * after the list has been fired right away with the latest "memorized"
- * values (like a Deferred)
- *
- * unique: will ensure a callback can only be added once (no duplicate in the list)
- *
- * stopOnFalse: interrupt callings when a callback returns false
- *
- */
-jQuery.Callbacks = function( options ) {
-
- // Convert options from String-formatted to Object-formatted if needed
- // (we check in cache first)
- options = typeof options === "string" ?
- ( optionsCache[ options ] || createOptions( options ) ) :
- jQuery.extend( {}, options );
-
- var // Last fire value (for non-forgettable lists)
- memory,
- // Flag to know if list was already fired
- fired,
- // Flag to know if list is currently firing
- firing,
- // First callback to fire (used internally by add and fireWith)
- firingStart,
- // End of the loop when firing
- firingLength,
- // Index of currently firing callback (modified by remove if needed)
- firingIndex,
- // Actual callback list
- list = [],
- // Stack of fire calls for repeatable lists
- stack = !options.once && [],
- // Fire callbacks
- fire = function( data ) {
- memory = options.memory && data;
- fired = true;
- firingIndex = firingStart || 0;
- firingStart = 0;
- firingLength = list.length;
- firing = true;
- for ( ; list && firingIndex < firingLength; firingIndex++ ) {
- if ( list[ firingIndex ].apply( data[ 0 ], data[ 1 ] ) === false && options.stopOnFalse ) {
- memory = false; // To prevent further calls using add
- break;
- }
- }
- firing = false;
- if ( list ) {
- if ( stack ) {
- if ( stack.length ) {
- fire( stack.shift() );
- }
- } else if ( memory ) {
- list = [];
- } else {
- self.disable();
- }
- }
- },
- // Actual Callbacks object
- self = {
- // Add a callback or a collection of callbacks to the list
- add: function() {
- if ( list ) {
- // First, we save the current length
- var start = list.length;
- (function add( args ) {
- jQuery.each( args, function( _, arg ) {
- var type = jQuery.type( arg );
- if ( type === "function" ) {
- if ( !options.unique || !self.has( arg ) ) {
- list.push( arg );
- }
- } else if ( arg && arg.length && type !== "string" ) {
- // Inspect recursively
- add( arg );
- }
- });
- })( arguments );
- // Do we need to add the callbacks to the
- // current firing batch?
- if ( firing ) {
- firingLength = list.length;
- // With memory, if we're not firing then
- // we should call right away
- } else if ( memory ) {
- firingStart = start;
- fire( memory );
- }
- }
- return this;
- },
- // Remove a callback from the list
- remove: function() {
- if ( list ) {
- jQuery.each( arguments, function( _, arg ) {
- var index;
- while( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) {
- list.splice( index, 1 );
- // Handle firing indexes
- if ( firing ) {
- if ( index <= firingLength ) {
- firingLength--;
- }
- if ( index <= firingIndex ) {
- firingIndex--;
- }
- }
- }
- });
- }
- return this;
- },
- // Control if a given callback is in the list
- has: function( fn ) {
- return jQuery.inArray( fn, list ) > -1;
- },
- // Remove all callbacks from the list
- empty: function() {
- list = [];
- return this;
- },
- // Have the list do nothing anymore
- disable: function() {
- list = stack = memory = undefined;
- return this;
- },
- // Is it disabled?
- disabled: function() {
- return !list;
- },
- // Lock the list in its current state
- lock: function() {
- stack = undefined;
- if ( !memory ) {
- self.disable();
- }
- return this;
- },
- // Is it locked?
- locked: function() {
- return !stack;
- },
- // Call all callbacks with the given context and arguments
- fireWith: function( context, args ) {
- args = args || [];
- args = [ context, args.slice ? args.slice() : args ];
- if ( list && ( !fired || stack ) ) {
- if ( firing ) {
- stack.push( args );
- } else {
- fire( args );
- }
- }
- return this;
- },
- // Call all the callbacks with the given arguments
- fire: function() {
- self.fireWith( this, arguments );
- return this;
- },
- // To know if the callbacks have already been called at least once
- fired: function() {
- return !!fired;
- }
- };
-
- return self;
-};
-jQuery.extend({
-
- Deferred: function( func ) {
- var tuples = [
- // action, add listener, listener list, final state
- [ "resolve", "done", jQuery.Callbacks("once memory"), "resolved" ],
- [ "reject", "fail", jQuery.Callbacks("once memory"), "rejected" ],
- [ "notify", "progress", jQuery.Callbacks("memory") ]
- ],
- state = "pending",
- promise = {
- state: function() {
- return state;
- },
- always: function() {
- deferred.done( arguments ).fail( arguments );
- return this;
- },
- then: function( /* fnDone, fnFail, fnProgress */ ) {
- var fns = arguments;
- return jQuery.Deferred(function( newDefer ) {
- jQuery.each( tuples, function( i, tuple ) {
- var action = tuple[ 0 ],
- fn = fns[ i ];
- // deferred[ done | fail | progress ] for forwarding actions to newDefer
- deferred[ tuple[1] ]( jQuery.isFunction( fn ) ?
- function() {
- var returned = fn.apply( this, arguments );
- if ( returned && jQuery.isFunction( returned.promise ) ) {
- returned.promise()
- .done( newDefer.resolve )
- .fail( newDefer.reject )
- .progress( newDefer.notify );
- } else {
- newDefer[ action + "With" ]( this === deferred ? newDefer : this, [ returned ] );
- }
- } :
- newDefer[ action ]
- );
- });
- fns = null;
- }).promise();
- },
- // Get a promise for this deferred
- // If obj is provided, the promise aspect is added to the object
- promise: function( obj ) {
- return obj != null ? jQuery.extend( obj, promise ) : promise;
- }
- },
- deferred = {};
-
- // Keep pipe for back-compat
- promise.pipe = promise.then;
-
- // Add list-specific methods
- jQuery.each( tuples, function( i, tuple ) {
- var list = tuple[ 2 ],
- stateString = tuple[ 3 ];
-
- // promise[ done | fail | progress ] = list.add
- promise[ tuple[1] ] = list.add;
-
- // Handle state
- if ( stateString ) {
- list.add(function() {
- // state = [ resolved | rejected ]
- state = stateString;
-
- // [ reject_list | resolve_list ].disable; progress_list.lock
- }, tuples[ i ^ 1 ][ 2 ].disable, tuples[ 2 ][ 2 ].lock );
- }
-
- // deferred[ resolve | reject | notify ] = list.fire
- deferred[ tuple[0] ] = list.fire;
- deferred[ tuple[0] + "With" ] = list.fireWith;
- });
-
- // Make the deferred a promise
- promise.promise( deferred );
-
- // Call given func if any
- if ( func ) {
- func.call( deferred, deferred );
- }
-
- // All done!
- return deferred;
- },
-
- // Deferred helper
- when: function( subordinate /* , ..., subordinateN */ ) {
- var i = 0,
- resolveValues = core_slice.call( arguments ),
- length = resolveValues.length,
-
- // the count of uncompleted subordinates
- remaining = length !== 1 || ( subordinate && jQuery.isFunction( subordinate.promise ) ) ? length : 0,
-
- // the master Deferred. If resolveValues consist of only a single Deferred, just use that.
- deferred = remaining === 1 ? subordinate : jQuery.Deferred(),
-
- // Update function for both resolve and progress values
- updateFunc = function( i, contexts, values ) {
- return function( value ) {
- contexts[ i ] = this;
- values[ i ] = arguments.length > 1 ? core_slice.call( arguments ) : value;
- if( values === progressValues ) {
- deferred.notifyWith( contexts, values );
- } else if ( !( --remaining ) ) {
- deferred.resolveWith( contexts, values );
- }
- };
- },
-
- progressValues, progressContexts, resolveContexts;
-
- // add listeners to Deferred subordinates; treat others as resolved
- if ( length > 1 ) {
- progressValues = new Array( length );
- progressContexts = new Array( length );
- resolveContexts = new Array( length );
- for ( ; i < length; i++ ) {
- if ( resolveValues[ i ] && jQuery.isFunction( resolveValues[ i ].promise ) ) {
- resolveValues[ i ].promise()
- .done( updateFunc( i, resolveContexts, resolveValues ) )
- .fail( deferred.reject )
- .progress( updateFunc( i, progressContexts, progressValues ) );
- } else {
- --remaining;
- }
- }
- }
-
- // if we're not waiting on anything, resolve the master
- if ( !remaining ) {
- deferred.resolveWith( resolveContexts, resolveValues );
- }
-
- return deferred.promise();
- }
-});
-jQuery.support = (function() {
-
- var support,
- all,
- a,
- select,
- opt,
- input,
- fragment,
- eventName,
- i,
- isSupported,
- clickFn,
- div = document.createElement("div");
-
- // Setup
- div.setAttribute( "className", "t" );
- div.innerHTML = " <link/><table></table><a href='/a'>a</a><input type='checkbox'/>";
-
- // Support tests won't run in some limited or non-browser environments
- all = div.getElementsByTagName("*");
- a = div.getElementsByTagName("a")[ 0 ];
- if ( !all || !a || !all.length ) {
- return {};
- }
-
- // First batch of tests
- select = document.createElement("select");
- opt = select.appendChild( document.createElement("option") );
- input = div.getElementsByTagName("input")[ 0 ];
-
- a.style.cssText = "top:1px;float:left;opacity:.5";
- support = {
- // IE strips leading whitespace when .innerHTML is used
- leadingWhitespace: ( div.firstChild.nodeType === 3 ),
-
- // Make sure that tbody elements aren't automatically inserted
- // IE will insert them into empty tables
- tbody: !div.getElementsByTagName("tbody").length,
-
- // Make sure that link elements get serialized correctly by innerHTML
- // This requires a wrapper element in IE
- htmlSerialize: !!div.getElementsByTagName("link").length,
-
- // Get the style information from getAttribute
- // (IE uses .cssText instead)
- style: /top/.test( a.getAttribute("style") ),
-
- // Make sure that URLs aren't manipulated
- // (IE normalizes it by default)
- hrefNormalized: ( a.getAttribute("href") === "/a" ),
-
- // Make sure that element opacity exists
- // (IE uses filter instead)
- // Use a regex to work around a WebKit issue. See #5145
- opacity: /^0.5/.test( a.style.opacity ),
-
- // Verify style float existence
- // (IE uses styleFloat instead of cssFloat)
- cssFloat: !!a.style.cssFloat,
-
- // Make sure that if no value is specified for a checkbox
- // that it defaults to "on".
- // (WebKit defaults to "" instead)
- checkOn: ( input.value === "on" ),
-
- // Make sure that a selected-by-default option has a working selected property.
- // (WebKit defaults to false instead of true, IE too, if it's in an optgroup)
- optSelected: opt.selected,
-
- // Test setAttribute on camelCase class. If it works, we need attrFixes when doing get/setAttribute (ie6/7)
- getSetAttribute: div.className !== "t",
-
- // Tests for enctype support on a form (#6743)
- enctype: !!document.createElement("form").enctype,
-
- // Makes sure cloning an html5 element does not cause problems
- // Where outerHTML is undefined, this still works
- html5Clone: document.createElement("nav").cloneNode( true ).outerHTML !== "<:nav></:nav>",
-
- // jQuery.support.boxModel DEPRECATED in 1.8 since we don't support Quirks Mode
- boxModel: ( document.compatMode === "CSS1Compat" ),
-
- // Will be defined later
- submitBubbles: true,
- changeBubbles: true,
- focusinBubbles: false,
- deleteExpando: true,
- noCloneEvent: true,
- inlineBlockNeedsLayout: false,
- shrinkWrapBlocks: false,
- reliableMarginRight: true,
- boxSizingReliable: true,
- pixelPosition: false
- };
-
- // Make sure checked status is properly cloned
- input.checked = true;
- support.noCloneChecked = input.cloneNode( true ).checked;
-
- // Make sure that the options inside disabled selects aren't marked as disabled
- // (WebKit marks them as disabled)
- select.disabled = true;
- support.optDisabled = !opt.disabled;
-
- // Test to see if it's possible to delete an expando from an element
- // Fails in Internet Explorer
- try {
- delete div.test;
- } catch( e ) {
- support.deleteExpando = false;
- }
-
- if ( !div.addEventListener && div.attachEvent && div.fireEvent ) {
- div.attachEvent( "onclick", clickFn = function() {
- // Cloning a node shouldn't copy over any
- // bound event handlers (IE does this)
- support.noCloneEvent = false;
- });
- div.cloneNode( true ).fireEvent("onclick");
- div.detachEvent( "onclick", clickFn );
- }
-
- // Check if a radio maintains its value
- // after being appended to the DOM
- input = document.createElement("input");
- input.value = "t";
- input.setAttribute( "type", "radio" );
- support.radioValue = input.value === "t";
-
- input.setAttribute( "checked", "checked" );
-
- // #11217 - WebKit loses check when the name is after the checked attribute
- input.setAttribute( "name", "t" );
-
- div.appendChild( input );
- fragment = document.createDocumentFragment();
- fragment.appendChild( div.lastChild );
-
- // WebKit doesn't clone checked state correctly in fragments
- support.checkClone = fragment.cloneNode( true ).cloneNode( true ).lastChild.checked;
-
- // Check if a disconnected checkbox will retain its checked
- // value of true after appended to the DOM (IE6/7)
- support.appendChecked = input.checked;
-
- fragment.removeChild( input );
- fragment.appendChild( div );
-
- // Technique from Juriy Zaytsev
- // http://perfectionkills.com/detecting-event-support-without-browser-sniffing/
- // We only care about the case where non-standard event systems
- // are used, namely in IE. Short-circuiting here helps us to
- // avoid an eval call (in setAttribute) which can cause CSP
- // to go haywire. See: https://developer.mozilla.org/en/Security/CSP
- if ( div.attachEvent ) {
- for ( i in {
- submit: true,
- change: true,
- focusin: true
- }) {
- eventName = "on" + i;
- isSupported = ( eventName in div );
- if ( !isSupported ) {
- div.setAttribute( eventName, "return;" );
- isSupported = ( typeof div[ eventName ] === "function" );
- }
- support[ i + "Bubbles" ] = isSupported;
- }
- }
-
- // Run tests that need a body at doc ready
- jQuery(function() {
- var container, div, tds, marginDiv,
- divReset = "padding:0;margin:0;border:0;display:block;overflow:hidden;",
- body = document.getElementsByTagName("body")[0];
-
- if ( !body ) {
- // Return for frameset docs that don't have a body
- return;
- }
-
- container = document.createElement("div");
- container.style.cssText = "visibility:hidden;border:0;width:0;height:0;position:static;top:0;margin-top:1px";
- body.insertBefore( container, body.firstChild );
-
- // Construct the test element
- div = document.createElement("div");
- container.appendChild( div );
-
- // Check if table cells still have offsetWidth/Height when they are set
- // to display:none and there are still other visible table cells in a
- // table row; if so, offsetWidth/Height are not reliable for use when
- // determining if an element has been hidden directly using
- // display:none (it is still safe to use offsets if a parent element is
- // hidden; don safety goggles and see bug #4512 for more information).
- // (only IE 8 fails this test)
- div.innerHTML = "<table><tr><td></td><td>t</td></tr></table>";
- tds = div.getElementsByTagName("td");
- tds[ 0 ].style.cssText = "padding:0;margin:0;border:0;display:none";
- isSupported = ( tds[ 0 ].offsetHeight === 0 );
-
- tds[ 0 ].style.display = "";
- tds[ 1 ].style.display = "none";
-
- // Check if empty table cells still have offsetWidth/Height
- // (IE <= 8 fail this test)
- support.reliableHiddenOffsets = isSupported && ( tds[ 0 ].offsetHeight === 0 );
-
- // Check box-sizing and margin behavior
- div.innerHTML = "";
- div.style.cssText = "box-sizing:border-box;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;";
- support.boxSizing = ( div.offsetWidth === 4 );
- support.doesNotIncludeMarginInBodyOffset = ( body.offsetTop !== 1 );
-
- // NOTE: To any future maintainer, we've window.getComputedStyle
- // because jsdom on node.js will break without it.
- if ( window.getComputedStyle ) {
- support.pixelPosition = ( window.getComputedStyle( div, null ) || {} ).top !== "1%";
- support.boxSizingReliable = ( window.getComputedStyle( div, null ) || { width: "4px" } ).width === "4px";
-
- // Check if div with explicit width and no margin-right incorrectly
- // gets computed margin-right based on width of container. For more
- // info see bug #3333
- // Fails in WebKit before Feb 2011 nightlies
- // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right
- marginDiv = document.createElement("div");
- marginDiv.style.cssText = div.style.cssText = divReset;
- marginDiv.style.marginRight = marginDiv.style.width = "0";
- div.style.width = "1px";
- div.appendChild( marginDiv );
- support.reliableMarginRight =
- !parseFloat( ( window.getComputedStyle( marginDiv, null ) || {} ).marginRight );
- }
-
- if ( typeof div.style.zoom !== "undefined" ) {
- // Check if natively block-level elements act like inline-block
- // elements when setting their display to 'inline' and giving
- // them layout
- // (IE < 8 does this)
- div.innerHTML = "";
- div.style.cssText = divReset + "width:1px;padding:1px;display:inline;zoom:1";
- support.inlineBlockNeedsLayout = ( div.offsetWidth === 3 );
-
- // Check if elements with layout shrink-wrap their children
- // (IE 6 does this)
- div.style.display = "block";
- div.style.overflow = "visible";
- div.innerHTML = "<div></div>";
- div.firstChild.style.width = "5px";
- support.shrinkWrapBlocks = ( div.offsetWidth !== 3 );
-
- container.style.zoom = 1;
- }
-
- // Null elements to avoid leaks in IE
- body.removeChild( container );
- container = div = tds = marginDiv = null;
- });
-
- // Null elements to avoid leaks in IE
- fragment.removeChild( div );
- all = a = select = opt = input = fragment = div = null;
-
- return support;
-})();
-var rbrace = /(?:\{[\s\S]*\}|\[[\s\S]*\])$/,
- rmultiDash = /([A-Z])/g;
-
-jQuery.extend({
- cache: {},
-
- deletedIds: [],
-
- // Remove at next major release (1.9/2.0)
- uuid: 0,
-
- // Unique for each copy of jQuery on the page
- // Non-digits removed to match rinlinejQuery
- expando: "jQuery" + ( jQuery.fn.jquery + Math.random() ).replace( /\D/g, "" ),
-
- // The following elements throw uncatchable exceptions if you
- // attempt to add expando properties to them.
- noData: {
- "embed": true,
- // Ban all objects except for Flash (which handle expandos)
- "object": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",
- "applet": true
- },
-
- hasData: function( elem ) {
- elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ];
- return !!elem && !isEmptyDataObject( elem );
- },
-
- data: function( elem, name, data, pvt /* Internal Use Only */ ) {
- if ( !jQuery.acceptData( elem ) ) {
- return;
- }
-
- var thisCache, ret,
- internalKey = jQuery.expando,
- getByName = typeof name === "string",
-
- // We have to handle DOM nodes and JS objects differently because IE6-7
- // can't GC object references properly across the DOM-JS boundary
- isNode = elem.nodeType,
-
- // Only DOM nodes need the global jQuery cache; JS object data is
- // attached directly to the object so GC can occur automatically
- cache = isNode ? jQuery.cache : elem,
-
- // Only defining an ID for JS objects if its cache already exists allows
- // the code to shortcut on the same path as a DOM node with no cache
- id = isNode ? elem[ internalKey ] : elem[ internalKey ] && internalKey;
-
- // Avoid doing any more work than we need to when trying to get data on an
- // object that has no data at all
- if ( (!id || !cache[id] || (!pvt && !cache[id].data)) && getByName && data === undefined ) {
- return;
- }
-
- if ( !id ) {
- // Only DOM nodes need a new unique ID for each element since their data
- // ends up in the global cache
- if ( isNode ) {
- elem[ internalKey ] = id = jQuery.deletedIds.pop() || jQuery.guid++;
- } else {
- id = internalKey;
- }
- }
-
- if ( !cache[ id ] ) {
- cache[ id ] = {};
-
- // Avoids exposing jQuery metadata on plain JS objects when the object
- // is serialized using JSON.stringify
- if ( !isNode ) {
- cache[ id ].toJSON = jQuery.noop;
- }
- }
-
- // An object can be passed to jQuery.data instead of a key/value pair; this gets
- // shallow copied over onto the existing cache
- if ( typeof name === "object" || typeof name === "function" ) {
- if ( pvt ) {
- cache[ id ] = jQuery.extend( cache[ id ], name );
- } else {
- cache[ id ].data = jQuery.extend( cache[ id ].data, name );
- }
- }
-
- thisCache = cache[ id ];
-
- // jQuery data() is stored in a separate object inside the object's internal data
- // cache in order to avoid key collisions between internal data and user-defined
- // data.
- if ( !pvt ) {
- if ( !thisCache.data ) {
- thisCache.data = {};
- }
-
- thisCache = thisCache.data;
- }
-
- if ( data !== undefined ) {
- thisCache[ jQuery.camelCase( name ) ] = data;
- }
-
- // Check for both converted-to-camel and non-converted data property names
- // If a data property was specified
- if ( getByName ) {
-
- // First Try to find as-is property data
- ret = thisCache[ name ];
-
- // Test for null|undefined property data
- if ( ret == null ) {
-
- // Try to find the camelCased property
- ret = thisCache[ jQuery.camelCase( name ) ];
- }
- } else {
- ret = thisCache;
- }
-
- return ret;
- },
-
- removeData: function( elem, name, pvt /* Internal Use Only */ ) {
- if ( !jQuery.acceptData( elem ) ) {
- return;
- }
-
- var thisCache, i, l,
-
- isNode = elem.nodeType,
-
- // See jQuery.data for more information
- cache = isNode ? jQuery.cache : elem,
- id = isNode ? elem[ jQuery.expando ] : jQuery.expando;
-
- // If there is already no cache entry for this object, there is no
- // purpose in continuing
- if ( !cache[ id ] ) {
- return;
- }
-
- if ( name ) {
-
- thisCache = pvt ? cache[ id ] : cache[ id ].data;
-
- if ( thisCache ) {
-
- // Support array or space separated string names for data keys
- if ( !jQuery.isArray( name ) ) {
-
- // try the string as a key before any manipulation
- if ( name in thisCache ) {
- name = [ name ];
- } else {
-
- // split the camel cased version by spaces unless a key with the spaces exists
- name = jQuery.camelCase( name );
- if ( name in thisCache ) {
- name = [ name ];
- } else {
- name = name.split(" ");
- }
- }
- }
-
- for ( i = 0, l = name.length; i < l; i++ ) {
- delete thisCache[ name[i] ];
- }
-
- // If there is no data left in the cache, we want to continue
- // and let the cache object itself get destroyed
- if ( !( pvt ? isEmptyDataObject : jQuery.isEmptyObject )( thisCache ) ) {
- return;
- }
- }
- }
-
- // See jQuery.data for more information
- if ( !pvt ) {
- delete cache[ id ].data;
-
- // Don't destroy the parent cache unless the internal data object
- // had been the only thing left in it
- if ( !isEmptyDataObject( cache[ id ] ) ) {
- return;
- }
- }
-
- // Destroy the cache
- if ( isNode ) {
- jQuery.cleanData( [ elem ], true );
-
- // Use delete when supported for expandos or `cache` is not a window per isWindow (#10080)
- } else if ( jQuery.support.deleteExpando || cache != cache.window ) {
- delete cache[ id ];
-
- // When all else fails, null
- } else {
- cache[ id ] = null;
- }
- },
-
- // For internal use only.
- _data: function( elem, name, data ) {
- return jQuery.data( elem, name, data, true );
- },
-
- // A method for determining if a DOM node can handle the data expando
- acceptData: function( elem ) {
- var noData = elem.nodeName && jQuery.noData[ elem.nodeName.toLowerCase() ];
-
- // nodes accept data unless otherwise specified; rejection can be conditional
- return !noData || noData !== true && elem.getAttribute("classid") === noData;
- }
-});
-
-jQuery.fn.extend({
- data: function( key, value ) {
- var parts, part, attr, name, l,
- elem = this[0],
- i = 0,
- data = null;
-
- // Gets all values
- if ( key === undefined ) {
- if ( this.length ) {
- data = jQuery.data( elem );
-
- if ( elem.nodeType === 1 && !jQuery._data( elem, "parsedAttrs" ) ) {
- attr = elem.attributes;
- for ( l = attr.length; i < l; i++ ) {
- name = attr[i].name;
-
- if ( !name.indexOf( "data-" ) ) {
- name = jQuery.camelCase( name.substring(5) );
-
- dataAttr( elem, name, data[ name ] );
- }
- }
- jQuery._data( elem, "parsedAttrs", true );
- }
- }
-
- return data;
- }
-
- // Sets multiple values
- if ( typeof key === "object" ) {
- return this.each(function() {
- jQuery.data( this, key );
- });
- }
-
- parts = key.split( ".", 2 );
- parts[1] = parts[1] ? "." + parts[1] : "";
- part = parts[1] + "!";
-
- return jQuery.access( this, function( value ) {
-
- if ( value === undefined ) {
- data = this.triggerHandler( "getData" + part, [ parts[0] ] );
-
- // Try to fetch any internally stored data first
- if ( data === undefined && elem ) {
- data = jQuery.data( elem, key );
- data = dataAttr( elem, key, data );
- }
-
- return data === undefined && parts[1] ?
- this.data( parts[0] ) :
- data;
- }
-
- parts[1] = value;
- this.each(function() {
- var self = jQuery( this );
-
- self.triggerHandler( "setData" + part, parts );
- jQuery.data( this, key, value );
- self.triggerHandler( "changeData" + part, parts );
- });
- }, null, value, arguments.length > 1, null, false );
- },
-
- removeData: function( key ) {
- return this.each(function() {
- jQuery.removeData( this, key );
- });
- }
-});
-
-function dataAttr( elem, key, data ) {
- // If nothing was found internally, try to fetch any
- // data from the HTML5 data-* attribute
- if ( data === undefined && elem.nodeType === 1 ) {
-
- var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase();
-
- data = elem.getAttribute( name );
-
- if ( typeof data === "string" ) {
- try {
- data = data === "true" ? true :
- data === "false" ? false :
- data === "null" ? null :
- // Only convert to a number if it doesn't change the string
- +data + "" === data ? +data :
- rbrace.test( data ) ? jQuery.parseJSON( data ) :
- data;
- } catch( e ) {}
-
- // Make sure we set the data so it isn't changed later
- jQuery.data( elem, key, data );
-
- } else {
- data = undefined;
- }
- }
-
- return data;
-}
-
-// checks a cache object for emptiness
-function isEmptyDataObject( obj ) {
- var name;
- for ( name in obj ) {
-
- // if the public data object is empty, the private is still empty
- if ( name === "data" && jQuery.isEmptyObject( obj[name] ) ) {
- continue;
- }
- if ( name !== "toJSON" ) {
- return false;
- }
- }
-
- return true;
-}
-jQuery.extend({
- queue: function( elem, type, data ) {
- var queue;
-
- if ( elem ) {
- type = ( type || "fx" ) + "queue";
- queue = jQuery._data( elem, type );
-
- // Speed up dequeue by getting out quickly if this is just a lookup
- if ( data ) {
- if ( !queue || jQuery.isArray(data) ) {
- queue = jQuery._data( elem, type, jQuery.makeArray(data) );
- } else {
- queue.push( data );
- }
- }
- return queue || [];
- }
- },
-
- dequeue: function( elem, type ) {
- type = type || "fx";
-
- var queue = jQuery.queue( elem, type ),
- startLength = queue.length,
- fn = queue.shift(),
- hooks = jQuery._queueHooks( elem, type ),
- next = function() {
- jQuery.dequeue( elem, type );
- };
-
- // If the fx queue is dequeued, always remove the progress sentinel
- if ( fn === "inprogress" ) {
- fn = queue.shift();
- startLength--;
- }
-
- if ( fn ) {
-
- // Add a progress sentinel to prevent the fx queue from being
- // automatically dequeued
- if ( type === "fx" ) {
- queue.unshift( "inprogress" );
- }
-
- // clear up the last queue stop function
- delete hooks.stop;
- fn.call( elem, next, hooks );
- }
-
- if ( !startLength && hooks ) {
- hooks.empty.fire();
- }
- },
-
- // not intended for public consumption - generates a queueHooks object, or returns the current one
- _queueHooks: function( elem, type ) {
- var key = type + "queueHooks";
- return jQuery._data( elem, key ) || jQuery._data( elem, key, {
- empty: jQuery.Callbacks("once memory").add(function() {
- jQuery.removeData( elem, type + "queue", true );
- jQuery.removeData( elem, key, true );
- })
- });
- }
-});
-
-jQuery.fn.extend({
- queue: function( type, data ) {
- var setter = 2;
-
- if ( typeof type !== "string" ) {
- data = type;
- type = "fx";
- setter--;
- }
-
- if ( arguments.length < setter ) {
- return jQuery.queue( this[0], type );
- }
-
- return data === undefined ?
- this :
- this.each(function() {
- var queue = jQuery.queue( this, type, data );
-
- // ensure a hooks for this queue
- jQuery._queueHooks( this, type );
-
- if ( type === "fx" && queue[0] !== "inprogress" ) {
- jQuery.dequeue( this, type );
- }
- });
- },
- dequeue: function( type ) {
- return this.each(function() {
- jQuery.dequeue( this, type );
- });
- },
- // Based off of the plugin by Clint Helfers, with permission.
- // http://blindsignals.com/index.php/2009/07/jquery-delay/
- delay: function( time, type ) {
- time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time;
- type = type || "fx";
-
- return this.queue( type, function( next, hooks ) {
- var timeout = setTimeout( next, time );
- hooks.stop = function() {
- clearTimeout( timeout );
- };
- });
- },
- clearQueue: function( type ) {
- return this.queue( type || "fx", [] );
- },
- // Get a promise resolved when queues of a certain type
- // are emptied (fx is the type by default)
- promise: function( type, obj ) {
- var tmp,
- count = 1,
- defer = jQuery.Deferred(),
- elements = this,
- i = this.length,
- resolve = function() {
- if ( !( --count ) ) {
- defer.resolveWith( elements, [ elements ] );
- }
- };
-
- if ( typeof type !== "string" ) {
- obj = type;
- type = undefined;
- }
- type = type || "fx";
-
- while( i-- ) {
- tmp = jQuery._data( elements[ i ], type + "queueHooks" );
- if ( tmp && tmp.empty ) {
- count++;
- tmp.empty.add( resolve );
- }
- }
- resolve();
- return defer.promise( obj );
- }
-});
-var nodeHook, boolHook, fixSpecified,
- rclass = /[\t\r\n]/g,
- rreturn = /\r/g,
- rtype = /^(?:button|input)$/i,
- rfocusable = /^(?:button|input|object|select|textarea)$/i,
- rclickable = /^a(?:rea|)$/i,
- rboolean = /^(?:autofocus|autoplay|async|checked|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped|selected)$/i,
- getSetAttribute = jQuery.support.getSetAttribute;
-
-jQuery.fn.extend({
- attr: function( name, value ) {
- return jQuery.access( this, jQuery.attr, name, value, arguments.length > 1 );
- },
-
- removeAttr: function( name ) {
- return this.each(function() {
- jQuery.removeAttr( this, name );
- });
- },
-
- prop: function( name, value ) {
- return jQuery.access( this, jQuery.prop, name, value, arguments.length > 1 );
- },
-
- removeProp: function( name ) {
- name = jQuery.propFix[ name ] || name;
- return this.each(function() {
- // try/catch handles cases where IE balks (such as removing a property on window)
- try {
- this[ name ] = undefined;
- delete this[ name ];
- } catch( e ) {}
- });
- },
-
- addClass: function( value ) {
- var classNames, i, l, elem,
- setClass, c, cl;
-
- if ( jQuery.isFunction( value ) ) {
- return this.each(function( j ) {
- jQuery( this ).addClass( value.call(this, j, this.className) );
- });
- }
-
- if ( value && typeof value === "string" ) {
- classNames = value.split( core_rspace );
-
- for ( i = 0, l = this.length; i < l; i++ ) {
- elem = this[ i ];
-
- if ( elem.nodeType === 1 ) {
- if ( !elem.className && classNames.length === 1 ) {
- elem.className = value;
-
- } else {
- setClass = " " + elem.className + " ";
-
- for ( c = 0, cl = classNames.length; c < cl; c++ ) {
- if ( setClass.indexOf( " " + classNames[ c ] + " " ) < 0 ) {
- setClass += classNames[ c ] + " ";
- }
- }
- elem.className = jQuery.trim( setClass );
- }
- }
- }
- }
-
- return this;
- },
-
- removeClass: function( value ) {
- var removes, className, elem, c, cl, i, l;
-
- if ( jQuery.isFunction( value ) ) {
- return this.each(function( j ) {
- jQuery( this ).removeClass( value.call(this, j, this.className) );
- });
- }
- if ( (value && typeof value === "string") || value === undefined ) {
- removes = ( value || "" ).split( core_rspace );
-
- for ( i = 0, l = this.length; i < l; i++ ) {
- elem = this[ i ];
- if ( elem.nodeType === 1 && elem.className ) {
-
- className = (" " + elem.className + " ").replace( rclass, " " );
-
- // loop over each item in the removal list
- for ( c = 0, cl = removes.length; c < cl; c++ ) {
- // Remove until there is nothing to remove,
- while ( className.indexOf(" " + removes[ c ] + " ") >= 0 ) {
- className = className.replace( " " + removes[ c ] + " " , " " );
- }
- }
- elem.className = value ? jQuery.trim( className ) : "";
- }
- }
- }
-
- return this;
- },
-
- toggleClass: function( value, stateVal ) {
- var type = typeof value,
- isBool = typeof stateVal === "boolean";
-
- if ( jQuery.isFunction( value ) ) {
- return this.each(function( i ) {
- jQuery( this ).toggleClass( value.call(this, i, this.className, stateVal), stateVal );
- });
- }
-
- return this.each(function() {
- if ( type === "string" ) {
- // toggle individual class names
- var className,
- i = 0,
- self = jQuery( this ),
- state = stateVal,
- classNames = value.split( core_rspace );
-
- while ( (className = classNames[ i++ ]) ) {
- // check each className given, space separated list
- state = isBool ? state : !self.hasClass( className );
- self[ state ? "addClass" : "removeClass" ]( className );
- }
-
- } else if ( type === "undefined" || type === "boolean" ) {
- if ( this.className ) {
- // store className if set
- jQuery._data( this, "__className__", this.className );
- }
-
- // toggle whole className
- this.className = this.className || value === false ? "" : jQuery._data( this, "__className__" ) || "";
- }
- });
- },
-
- hasClass: function( selector ) {
- var className = " " + selector + " ",
- i = 0,
- l = this.length;
- for ( ; i < l; i++ ) {
- if ( this[i].nodeType === 1 && (" " + this[i].className + " ").replace(rclass, " ").indexOf( className ) >= 0 ) {
- return true;
- }
- }
-
- return false;
- },
-
- val: function( value ) {
- var hooks, ret, isFunction,
- elem = this[0];
-
- if ( !arguments.length ) {
- if ( elem ) {
- hooks = jQuery.valHooks[ elem.type ] || jQuery.valHooks[ elem.nodeName.toLowerCase() ];
-
- if ( hooks && "get" in hooks && (ret = hooks.get( elem, "value" )) !== undefined ) {
- return ret;
- }
-
- ret = elem.value;
-
- return typeof ret === "string" ?
- // handle most common string cases
- ret.replace(rreturn, "") :
- // handle cases where value is null/undef or number
- ret == null ? "" : ret;
- }
-
- return;
- }
-
- isFunction = jQuery.isFunction( value );
-
- return this.each(function( i ) {
- var val,
- self = jQuery(this);
-
- if ( this.nodeType !== 1 ) {
- return;
- }
-
- if ( isFunction ) {
- val = value.call( this, i, self.val() );
- } else {
- val = value;
- }
-
- // Treat null/undefined as ""; convert numbers to string
- if ( val == null ) {
- val = "";
- } else if ( typeof val === "number" ) {
- val += "";
- } else if ( jQuery.isArray( val ) ) {
- val = jQuery.map(val, function ( value ) {
- return value == null ? "" : value + "";
- });
- }
-
- hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ];
-
- // If set returns undefined, fall back to normal setting
- if ( !hooks || !("set" in hooks) || hooks.set( this, val, "value" ) === undefined ) {
- this.value = val;
- }
- });
- }
-});
-
-jQuery.extend({
- valHooks: {
- option: {
- get: function( elem ) {
- // attributes.value is undefined in Blackberry 4.7 but
- // uses .value. See #6932
- var val = elem.attributes.value;
- return !val || val.specified ? elem.value : elem.text;
- }
- },
- select: {
- get: function( elem ) {
- var value, option,
- options = elem.options,
- index = elem.selectedIndex,
- one = elem.type === "select-one" || index < 0,
- values = one ? null : [],
- max = one ? index + 1 : options.length,
- i = index < 0 ?
- max :
- one ? index : 0;
-
- // Loop through all the selected options
- for ( ; i < max; i++ ) {
- option = options[ i ];
-
- // oldIE doesn't update selected after form reset (#2551)
- if ( ( option.selected || i === index ) &&
- // Don't return options that are disabled or in a disabled optgroup
- ( jQuery.support.optDisabled ? !option.disabled : option.getAttribute("disabled") === null ) &&
- ( !option.parentNode.disabled || !jQuery.nodeName( option.parentNode, "optgroup" ) ) ) {
-
- // Get the specific value for the option
- value = jQuery( option ).val();
-
- // We don't need an array for one selects
- if ( one ) {
- return value;
- }
-
- // Multi-Selects return an array
- values.push( value );
- }
- }
-
- return values;
- },
-
- set: function( elem, value ) {
- var values = jQuery.makeArray( value );
-
- jQuery(elem).find("option").each(function() {
- this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0;
- });
-
- if ( !values.length ) {
- elem.selectedIndex = -1;
- }
- return values;
- }
- }
- },
-
- // Unused in 1.8, left in so attrFn-stabbers won't die; remove in 1.9
- attrFn: {},
-
- attr: function( elem, name, value, pass ) {
- var ret, hooks, notxml,
- nType = elem.nodeType;
-
- // don't get/set attributes on text, comment and attribute nodes
- if ( !elem || nType === 3 || nType === 8 || nType === 2 ) {
- return;
- }
-
- if ( pass && jQuery.isFunction( jQuery.fn[ name ] ) ) {
- return jQuery( elem )[ name ]( value );
- }
-
- // Fallback to prop when attributes are not supported
- if ( typeof elem.getAttribute === "undefined" ) {
- return jQuery.prop( elem, name, value );
- }
-
- notxml = nType !== 1 || !jQuery.isXMLDoc( elem );
-
- // All attributes are lowercase
- // Grab necessary hook if one is defined
- if ( notxml ) {
- name = name.toLowerCase();
- hooks = jQuery.attrHooks[ name ] || ( rboolean.test( name ) ? boolHook : nodeHook );
- }
-
- if ( value !== undefined ) {
-
- if ( value === null ) {
- jQuery.removeAttr( elem, name );
- return;
-
- } else if ( hooks && "set" in hooks && notxml && (ret = hooks.set( elem, value, name )) !== undefined ) {
- return ret;
-
- } else {
- elem.setAttribute( name, value + "" );
- return value;
- }
-
- } else if ( hooks && "get" in hooks && notxml && (ret = hooks.get( elem, name )) !== null ) {
- return ret;
-
- } else {
-
- ret = elem.getAttribute( name );
-
- // Non-existent attributes return null, we normalize to undefined
- return ret === null ?
- undefined :
- ret;
- }
- },
-
- removeAttr: function( elem, value ) {
- var propName, attrNames, name, isBool,
- i = 0;
-
- if ( value && elem.nodeType === 1 ) {
-
- attrNames = value.split( core_rspace );
-
- for ( ; i < attrNames.length; i++ ) {
- name = attrNames[ i ];
-
- if ( name ) {
- propName = jQuery.propFix[ name ] || name;
- isBool = rboolean.test( name );
-
- // See #9699 for explanation of this approach (setting first, then removal)
- // Do not do this for boolean attributes (see #10870)
- if ( !isBool ) {
- jQuery.attr( elem, name, "" );
- }
- elem.removeAttribute( getSetAttribute ? name : propName );
-
- // Set corresponding property to false for boolean attributes
- if ( isBool && propName in elem ) {
- elem[ propName ] = false;
- }
- }
- }
- }
- },
-
- attrHooks: {
- type: {
- set: function( elem, value ) {
- // We can't allow the type property to be changed (since it causes problems in IE)
- if ( rtype.test( elem.nodeName ) && elem.parentNode ) {
- jQuery.error( "type property can't be changed" );
- } else if ( !jQuery.support.radioValue && value === "radio" && jQuery.nodeName(elem, "input") ) {
- // Setting the type on a radio button after the value resets the value in IE6-9
- // Reset value to it's default in case type is set after value
- // This is for element creation
- var val = elem.value;
- elem.setAttribute( "type", value );
- if ( val ) {
- elem.value = val;
- }
- return value;
- }
- }
- },
- // Use the value property for back compat
- // Use the nodeHook for button elements in IE6/7 (#1954)
- value: {
- get: function( elem, name ) {
- if ( nodeHook && jQuery.nodeName( elem, "button" ) ) {
- return nodeHook.get( elem, name );
- }
- return name in elem ?
- elem.value :
- null;
- },
- set: function( elem, value, name ) {
- if ( nodeHook && jQuery.nodeName( elem, "button" ) ) {
- return nodeHook.set( elem, value, name );
- }
- // Does not return so that setAttribute is also used
- elem.value = value;
- }
- }
- },
-
- propFix: {
- tabindex: "tabIndex",
- readonly: "readOnly",
- "for": "htmlFor",
- "class": "className",
- maxlength: "maxLength",
- cellspacing: "cellSpacing",
- cellpadding: "cellPadding",
- rowspan: "rowSpan",
- colspan: "colSpan",
- usemap: "useMap",
- frameborder: "frameBorder",
- contenteditable: "contentEditable"
- },
-
- prop: function( elem, name, value ) {
- var ret, hooks, notxml,
- nType = elem.nodeType;
-
- // don't get/set properties on text, comment and attribute nodes
- if ( !elem || nType === 3 || nType === 8 || nType === 2 ) {
- return;
- }
-
- notxml = nType !== 1 || !jQuery.isXMLDoc( elem );
-
- if ( notxml ) {
- // Fix name and attach hooks
- name = jQuery.propFix[ name ] || name;
- hooks = jQuery.propHooks[ name ];
- }
-
- if ( value !== undefined ) {
- if ( hooks && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ) {
- return ret;
-
- } else {
- return ( elem[ name ] = value );
- }
-
- } else {
- if ( hooks && "get" in hooks && (ret = hooks.get( elem, name )) !== null ) {
- return ret;
-
- } else {
- return elem[ name ];
- }
- }
- },
-
- propHooks: {
- tabIndex: {
- get: function( elem ) {
- // elem.tabIndex doesn't always return the correct value when it hasn't been explicitly set
- // http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/
- var attributeNode = elem.getAttributeNode("tabindex");
-
- return attributeNode && attributeNode.specified ?
- parseInt( attributeNode.value, 10 ) :
- rfocusable.test( elem.nodeName ) || rclickable.test( elem.nodeName ) && elem.href ?
- 0 :
- undefined;
- }
- }
- }
-});
-
-// Hook for boolean attributes
-boolHook = {
- get: function( elem, name ) {
- // Align boolean attributes with corresponding properties
- // Fall back to attribute presence where some booleans are not supported
- var attrNode,
- property = jQuery.prop( elem, name );
- return property === true || typeof property !== "boolean" && ( attrNode = elem.getAttributeNode(name) ) && attrNode.nodeValue !== false ?
- name.toLowerCase() :
- undefined;
- },
- set: function( elem, value, name ) {
- var propName;
- if ( value === false ) {
- // Remove boolean attributes when set to false
- jQuery.removeAttr( elem, name );
- } else {
- // value is true since we know at this point it's type boolean and not false
- // Set boolean attributes to the same name and set the DOM property
- propName = jQuery.propFix[ name ] || name;
- if ( propName in elem ) {
- // Only set the IDL specifically if it already exists on the element
- elem[ propName ] = true;
- }
-
- elem.setAttribute( name, name.toLowerCase() );
- }
- return name;
- }
-};
-
-// IE6/7 do not support getting/setting some attributes with get/setAttribute
-if ( !getSetAttribute ) {
-
- fixSpecified = {
- name: true,
- id: true,
- coords: true
- };
-
- // Use this for any attribute in IE6/7
- // This fixes almost every IE6/7 issue
- nodeHook = jQuery.valHooks.button = {
- get: function( elem, name ) {
- var ret;
- ret = elem.getAttributeNode( name );
- return ret && ( fixSpecified[ name ] ? ret.value !== "" : ret.specified ) ?
- ret.value :
- undefined;
- },
- set: function( elem, value, name ) {
- // Set the existing or create a new attribute node
- var ret = elem.getAttributeNode( name );
- if ( !ret ) {
- ret = document.createAttribute( name );
- elem.setAttributeNode( ret );
- }
- return ( ret.value = value + "" );
- }
- };
-
- // Set width and height to auto instead of 0 on empty string( Bug #8150 )
- // This is for removals
- jQuery.each([ "width", "height" ], function( i, name ) {
- jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], {
- set: function( elem, value ) {
- if ( value === "" ) {
- elem.setAttribute( name, "auto" );
- return value;
- }
- }
- });
- });
-
- // Set contenteditable to false on removals(#10429)
- // Setting to empty string throws an error as an invalid value
- jQuery.attrHooks.contenteditable = {
- get: nodeHook.get,
- set: function( elem, value, name ) {
- if ( value === "" ) {
- value = "false";
- }
- nodeHook.set( elem, value, name );
- }
- };
-}
-
-
-// Some attributes require a special call on IE
-if ( !jQuery.support.hrefNormalized ) {
- jQuery.each([ "href", "src", "width", "height" ], function( i, name ) {
- jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], {
- get: function( elem ) {
- var ret = elem.getAttribute( name, 2 );
- return ret === null ? undefined : ret;
- }
- });
- });
-}
-
-if ( !jQuery.support.style ) {
- jQuery.attrHooks.style = {
- get: function( elem ) {
- // Return undefined in the case of empty string
- // Normalize to lowercase since IE uppercases css property names
- return elem.style.cssText.toLowerCase() || undefined;
- },
- set: function( elem, value ) {
- return ( elem.style.cssText = value + "" );
- }
- };
-}
-
-// Safari mis-reports the default selected property of an option
-// Accessing the parent's selectedIndex property fixes it
-if ( !jQuery.support.optSelected ) {
- jQuery.propHooks.selected = jQuery.extend( jQuery.propHooks.selected, {
- get: function( elem ) {
- var parent = elem.parentNode;
-
- if ( parent ) {
- parent.selectedIndex;
-
- // Make sure that it also works with optgroups, see #5701
- if ( parent.parentNode ) {
- parent.parentNode.selectedIndex;
- }
- }
- return null;
- }
- });
-}
-
-// IE6/7 call enctype encoding
-if ( !jQuery.support.enctype ) {
- jQuery.propFix.enctype = "encoding";
-}
-
-// Radios and checkboxes getter/setter
-if ( !jQuery.support.checkOn ) {
- jQuery.each([ "radio", "checkbox" ], function() {
- jQuery.valHooks[ this ] = {
- get: function( elem ) {
- // Handle the case where in Webkit "" is returned instead of "on" if a value isn't specified
- return elem.getAttribute("value") === null ? "on" : elem.value;
- }
- };
- });
-}
-jQuery.each([ "radio", "checkbox" ], function() {
- jQuery.valHooks[ this ] = jQuery.extend( jQuery.valHooks[ this ], {
- set: function( elem, value ) {
- if ( jQuery.isArray( value ) ) {
- return ( elem.checked = jQuery.inArray( jQuery(elem).val(), value ) >= 0 );
- }
- }
- });
-});
-var rformElems = /^(?:textarea|input|select)$/i,
- rtypenamespace = /^([^\.]*|)(?:\.(.+)|)$/,
- rhoverHack = /(?:^|\s)hover(\.\S+|)\b/,
- rkeyEvent = /^key/,
- rmouseEvent = /^(?:mouse|contextmenu)|click/,
- rfocusMorph = /^(?:focusinfocus|focusoutblur)$/,
- hoverHack = function( events ) {
- return jQuery.event.special.hover ? events : events.replace( rhoverHack, "mouseenter$1 mouseleave$1" );
- };
-
-/*
- * Helper functions for managing events -- not part of the public interface.
- * Props to Dean Edwards' addEvent library for many of the ideas.
- */
-jQuery.event = {
-
- add: function( elem, types, handler, data, selector ) {
-
- var elemData, eventHandle, events,
- t, tns, type, namespaces, handleObj,
- handleObjIn, handlers, special;
-
- // Don't attach events to noData or text/comment nodes (allow plain objects tho)
- if ( elem.nodeType === 3 || elem.nodeType === 8 || !types || !handler || !(elemData = jQuery._data( elem )) ) {
- return;
- }
-
- // Caller can pass in an object of custom data in lieu of the handler
- if ( handler.handler ) {
- handleObjIn = handler;
- handler = handleObjIn.handler;
- selector = handleObjIn.selector;
- }
-
- // Make sure that the handler has a unique ID, used to find/remove it later
- if ( !handler.guid ) {
- handler.guid = jQuery.guid++;
- }
-
- // Init the element's event structure and main handler, if this is the first
- events = elemData.events;
- if ( !events ) {
- elemData.events = events = {};
- }
- eventHandle = elemData.handle;
- if ( !eventHandle ) {
- elemData.handle = eventHandle = function( e ) {
- // Discard the second event of a jQuery.event.trigger() and
- // when an event is called after a page has unloaded
- return typeof jQuery !== "undefined" && (!e || jQuery.event.triggered !== e.type) ?
- jQuery.event.dispatch.apply( eventHandle.elem, arguments ) :
- undefined;
- };
- // Add elem as a property of the handle fn to prevent a memory leak with IE non-native events
- eventHandle.elem = elem;
- }
-
- // Handle multiple events separated by a space
- // jQuery(...).bind("mouseover mouseout", fn);
- types = jQuery.trim( hoverHack(types) ).split( " " );
- for ( t = 0; t < types.length; t++ ) {
-
- tns = rtypenamespace.exec( types[t] ) || [];
- type = tns[1];
- namespaces = ( tns[2] || "" ).split( "." ).sort();
-
- // If event changes its type, use the special event handlers for the changed type
- special = jQuery.event.special[ type ] || {};
-
- // If selector defined, determine special event api type, otherwise given type
- type = ( selector ? special.delegateType : special.bindType ) || type;
-
- // Update special based on newly reset type
- special = jQuery.event.special[ type ] || {};
-
- // handleObj is passed to all event handlers
- handleObj = jQuery.extend({
- type: type,
- origType: tns[1],
- data: data,
- handler: handler,
- guid: handler.guid,
- selector: selector,
- needsContext: selector && jQuery.expr.match.needsContext.test( selector ),
- namespace: namespaces.join(".")
- }, handleObjIn );
-
- // Init the event handler queue if we're the first
- handlers = events[ type ];
- if ( !handlers ) {
- handlers = events[ type ] = [];
- handlers.delegateCount = 0;
-
- // Only use addEventListener/attachEvent if the special events handler returns false
- if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) {
- // Bind the global event handler to the element
- if ( elem.addEventListener ) {
- elem.addEventListener( type, eventHandle, false );
-
- } else if ( elem.attachEvent ) {
- elem.attachEvent( "on" + type, eventHandle );
- }
- }
- }
-
- if ( special.add ) {
- special.add.call( elem, handleObj );
-
- if ( !handleObj.handler.guid ) {
- handleObj.handler.guid = handler.guid;
- }
- }
-
- // Add to the element's handler list, delegates in front
- if ( selector ) {
- handlers.splice( handlers.delegateCount++, 0, handleObj );
- } else {
- handlers.push( handleObj );
- }
-
- // Keep track of which events have ever been used, for event optimization
- jQuery.event.global[ type ] = true;
- }
-
- // Nullify elem to prevent memory leaks in IE
- elem = null;
- },
-
- global: {},
-
- // Detach an event or set of events from an element
- remove: function( elem, types, handler, selector, mappedTypes ) {
-
- var t, tns, type, origType, namespaces, origCount,
- j, events, special, eventType, handleObj,
- elemData = jQuery.hasData( elem ) && jQuery._data( elem );
-
- if ( !elemData || !(events = elemData.events) ) {
- return;
- }
-
- // Once for each type.namespace in types; type may be omitted
- types = jQuery.trim( hoverHack( types || "" ) ).split(" ");
- for ( t = 0; t < types.length; t++ ) {
- tns = rtypenamespace.exec( types[t] ) || [];
- type = origType = tns[1];
- namespaces = tns[2];
-
- // Unbind all events (on this namespace, if provided) for the element
- if ( !type ) {
- for ( type in events ) {
- jQuery.event.remove( elem, type + types[ t ], handler, selector, true );
- }
- continue;
- }
-
- special = jQuery.event.special[ type ] || {};
- type = ( selector? special.delegateType : special.bindType ) || type;
- eventType = events[ type ] || [];
- origCount = eventType.length;
- namespaces = namespaces ? new RegExp("(^|\\.)" + namespaces.split(".").sort().join("\\.(?:.*\\.|)") + "(\\.|$)") : null;
-
- // Remove matching events
- for ( j = 0; j < eventType.length; j++ ) {
- handleObj = eventType[ j ];
-
- if ( ( mappedTypes || origType === handleObj.origType ) &&
- ( !handler || handler.guid === handleObj.guid ) &&
- ( !namespaces || namespaces.test( handleObj.namespace ) ) &&
- ( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) {
- eventType.splice( j--, 1 );
-
- if ( handleObj.selector ) {
- eventType.delegateCount--;
- }
- if ( special.remove ) {
- special.remove.call( elem, handleObj );
- }
- }
- }
-
- // Remove generic event handler if we removed something and no more handlers exist
- // (avoids potential for endless recursion during removal of special event handlers)
- if ( eventType.length === 0 && origCount !== eventType.length ) {
- if ( !special.teardown || special.teardown.call( elem, namespaces, elemData.handle ) === false ) {
- jQuery.removeEvent( elem, type, elemData.handle );
- }
-
- delete events[ type ];
- }
- }
-
- // Remove the expando if it's no longer used
- if ( jQuery.isEmptyObject( events ) ) {
- delete elemData.handle;
-
- // removeData also checks for emptiness and clears the expando if empty
- // so use it instead of delete
- jQuery.removeData( elem, "events", true );
- }
- },
-
- // Events that are safe to short-circuit if no handlers are attached.
- // Native DOM events should not be added, they may have inline handlers.
- customEvent: {
- "getData": true,
- "setData": true,
- "changeData": true
- },
-
- trigger: function( event, data, elem, onlyHandlers ) {
- // Don't do events on text and comment nodes
- if ( elem && (elem.nodeType === 3 || elem.nodeType === 8) ) {
- return;
- }
-
- // Event object or event type
- var cache, exclusive, i, cur, old, ontype, special, handle, eventPath, bubbleType,
- type = event.type || event,
- namespaces = [];
-
- // focus/blur morphs to focusin/out; ensure we're not firing them right now
- if ( rfocusMorph.test( type + jQuery.event.triggered ) ) {
- return;
- }
-
- if ( type.indexOf( "!" ) >= 0 ) {
- // Exclusive events trigger only for the exact event (no namespaces)
- type = type.slice(0, -1);
- exclusive = true;
- }
-
- if ( type.indexOf( "." ) >= 0 ) {
- // Namespaced trigger; create a regexp to match event type in handle()
- namespaces = type.split(".");
- type = namespaces.shift();
- namespaces.sort();
- }
-
- if ( (!elem || jQuery.event.customEvent[ type ]) && !jQuery.event.global[ type ] ) {
- // No jQuery handlers for this event type, and it can't have inline handlers
- return;
- }
-
- // Caller can pass in an Event, Object, or just an event type string
- event = typeof event === "object" ?
- // jQuery.Event object
- event[ jQuery.expando ] ? event :
- // Object literal
- new jQuery.Event( type, event ) :
- // Just the event type (string)
- new jQuery.Event( type );
-
- event.type = type;
- event.isTrigger = true;
- event.exclusive = exclusive;
- event.namespace = namespaces.join( "." );
- event.namespace_re = event.namespace? new RegExp("(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)") : null;
- ontype = type.indexOf( ":" ) < 0 ? "on" + type : "";
-
- // Handle a global trigger
- if ( !elem ) {
-
- // TODO: Stop taunting the data cache; remove global events and always attach to document
- cache = jQuery.cache;
- for ( i in cache ) {
- if ( cache[ i ].events && cache[ i ].events[ type ] ) {
- jQuery.event.trigger( event, data, cache[ i ].handle.elem, true );
- }
- }
- return;
- }
-
- // Clean up the event in case it is being reused
- event.result = undefined;
- if ( !event.target ) {
- event.target = elem;
- }
-
- // Clone any incoming data and prepend the event, creating the handler arg list
- data = data != null ? jQuery.makeArray( data ) : [];
- data.unshift( event );
-
- // Allow special events to draw outside the lines
- special = jQuery.event.special[ type ] || {};
- if ( special.trigger && special.trigger.apply( elem, data ) === false ) {
- return;
- }
-
- // Determine event propagation path in advance, per W3C events spec (#9951)
- // Bubble up to document, then to window; watch for a global ownerDocument var (#9724)
- eventPath = [[ elem, special.bindType || type ]];
- if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) {
-
- bubbleType = special.delegateType || type;
- cur = rfocusMorph.test( bubbleType + type ) ? elem : elem.parentNode;
- for ( old = elem; cur; cur = cur.parentNode ) {
- eventPath.push([ cur, bubbleType ]);
- old = cur;
- }
-
- // Only add window if we got to document (e.g., not plain obj or detached DOM)
- if ( old === (elem.ownerDocument || document) ) {
- eventPath.push([ old.defaultView || old.parentWindow || window, bubbleType ]);
- }
- }
-
- // Fire handlers on the event path
- for ( i = 0; i < eventPath.length && !event.isPropagationStopped(); i++ ) {
-
- cur = eventPath[i][0];
- event.type = eventPath[i][1];
-
- handle = ( jQuery._data( cur, "events" ) || {} )[ event.type ] && jQuery._data( cur, "handle" );
- if ( handle ) {
- handle.apply( cur, data );
- }
- // Note that this is a bare JS function and not a jQuery handler
- handle = ontype && cur[ ontype ];
- if ( handle && jQuery.acceptData( cur ) && handle.apply && handle.apply( cur, data ) === false ) {
- event.preventDefault();
- }
- }
- event.type = type;
-
- // If nobody prevented the default action, do it now
- if ( !onlyHandlers && !event.isDefaultPrevented() ) {
-
- if ( (!special._default || special._default.apply( elem.ownerDocument, data ) === false) &&
- !(type === "click" && jQuery.nodeName( elem, "a" )) && jQuery.acceptData( elem ) ) {
-
- // Call a native DOM method on the target with the same name name as the event.
- // Can't use an .isFunction() check here because IE6/7 fails that test.
- // Don't do default actions on window, that's where global variables be (#6170)
- // IE<9 dies on focus/blur to hidden element (#1486)
- if ( ontype && elem[ type ] && ((type !== "focus" && type !== "blur") || event.target.offsetWidth !== 0) && !jQuery.isWindow( elem ) ) {
-
- // Don't re-trigger an onFOO event when we call its FOO() method
- old = elem[ ontype ];
-
- if ( old ) {
- elem[ ontype ] = null;
- }
-
- // Prevent re-triggering of the same event, since we already bubbled it above
- jQuery.event.triggered = type;
- elem[ type ]();
- jQuery.event.triggered = undefined;
-
- if ( old ) {
- elem[ ontype ] = old;
- }
- }
- }
- }
-
- return event.result;
- },
-
- dispatch: function( event ) {
-
- // Make a writable jQuery.Event from the native event object
- event = jQuery.event.fix( event || window.event );
-
- var i, j, cur, ret, selMatch, matched, matches, handleObj, sel, related,
- handlers = ( (jQuery._data( this, "events" ) || {} )[ event.type ] || []),
- delegateCount = handlers.delegateCount,
- args = core_slice.call( arguments ),
- run_all = !event.exclusive && !event.namespace,
- special = jQuery.event.special[ event.type ] || {},
- handlerQueue = [];
-
- // Use the fix-ed jQuery.Event rather than the (read-only) native event
- args[0] = event;
- event.delegateTarget = this;
-
- // Call the preDispatch hook for the mapped type, and let it bail if desired
- if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) {
- return;
- }
-
- // Determine handlers that should run if there are delegated events
- // Avoid non-left-click bubbling in Firefox (#3861)
- if ( delegateCount && !(event.button && event.type === "click") ) {
-
- for ( cur = event.target; cur != this; cur = cur.parentNode || this ) {
-
- // Don't process clicks (ONLY) on disabled elements (#6911, #8165, #11382, #11764)
- if ( cur.disabled !== true || event.type !== "click" ) {
- selMatch = {};
- matches = [];
- for ( i = 0; i < delegateCount; i++ ) {
- handleObj = handlers[ i ];
- sel = handleObj.selector;
-
- if ( selMatch[ sel ] === undefined ) {
- selMatch[ sel ] = handleObj.needsContext ?
- jQuery( sel, this ).index( cur ) >= 0 :
- jQuery.find( sel, this, null, [ cur ] ).length;
- }
- if ( selMatch[ sel ] ) {
- matches.push( handleObj );
- }
- }
- if ( matches.length ) {
- handlerQueue.push({ elem: cur, matches: matches });
- }
- }
- }
- }
-
- // Add the remaining (directly-bound) handlers
- if ( handlers.length > delegateCount ) {
- handlerQueue.push({ elem: this, matches: handlers.slice( delegateCount ) });
- }
-
- // Run delegates first; they may want to stop propagation beneath us
- for ( i = 0; i < handlerQueue.length && !event.isPropagationStopped(); i++ ) {
- matched = handlerQueue[ i ];
- event.currentTarget = matched.elem;
-
- for ( j = 0; j < matched.matches.length && !event.isImmediatePropagationStopped(); j++ ) {
- handleObj = matched.matches[ j ];
-
- // Triggered event must either 1) be non-exclusive and have no namespace, or
- // 2) have namespace(s) a subset or equal to those in the bound event (both can have no namespace).
- if ( run_all || (!event.namespace && !handleObj.namespace) || event.namespace_re && event.namespace_re.test( handleObj.namespace ) ) {
-
- event.data = handleObj.data;
- event.handleObj = handleObj;
-
- ret = ( (jQuery.event.special[ handleObj.origType ] || {}).handle || handleObj.handler )
- .apply( matched.elem, args );
-
- if ( ret !== undefined ) {
- event.result = ret;
- if ( ret === false ) {
- event.preventDefault();
- event.stopPropagation();
- }
- }
- }
- }
- }
-
- // Call the postDispatch hook for the mapped type
- if ( special.postDispatch ) {
- special.postDispatch.call( this, event );
- }
-
- return event.result;
- },
-
- // Includes some event props shared by KeyEvent and MouseEvent
- // *** attrChange attrName relatedNode srcElement are not normalized, non-W3C, deprecated, will be removed in 1.8 ***
- props: "attrChange attrName relatedNode srcElement altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),
-
- fixHooks: {},
-
- keyHooks: {
- props: "char charCode key keyCode".split(" "),
- filter: function( event, original ) {
-
- // Add which for key events
- if ( event.which == null ) {
- event.which = original.charCode != null ? original.charCode : original.keyCode;
- }
-
- return event;
- }
- },
-
- mouseHooks: {
- props: "button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),
- filter: function( event, original ) {
- var eventDoc, doc, body,
- button = original.button,
- fromElement = original.fromElement;
-
- // Calculate pageX/Y if missing and clientX/Y available
- if ( event.pageX == null && original.clientX != null ) {
- eventDoc = event.target.ownerDocument || document;
- doc = eventDoc.documentElement;
- body = eventDoc.body;
-
- event.pageX = original.clientX + ( doc && doc.scrollLeft || body && body.scrollLeft || 0 ) - ( doc && doc.clientLeft || body && body.clientLeft || 0 );
- event.pageY = original.clientY + ( doc && doc.scrollTop || body && body.scrollTop || 0 ) - ( doc && doc.clientTop || body && body.clientTop || 0 );
- }
-
- // Add relatedTarget, if necessary
- if ( !event.relatedTarget && fromElement ) {
- event.relatedTarget = fromElement === event.target ? original.toElement : fromElement;
- }
-
- // Add which for click: 1 === left; 2 === middle; 3 === right
- // Note: button is not normalized, so don't use it
- if ( !event.which && button !== undefined ) {
- event.which = ( button & 1 ? 1 : ( button & 2 ? 3 : ( button & 4 ? 2 : 0 ) ) );
- }
-
- return event;
- }
- },
-
- fix: function( event ) {
- if ( event[ jQuery.expando ] ) {
- return event;
- }
-
- // Create a writable copy of the event object and normalize some properties
- var i, prop,
- originalEvent = event,
- fixHook = jQuery.event.fixHooks[ event.type ] || {},
- copy = fixHook.props ? this.props.concat( fixHook.props ) : this.props;
-
- event = jQuery.Event( originalEvent );
-
- for ( i = copy.length; i; ) {
- prop = copy[ --i ];
- event[ prop ] = originalEvent[ prop ];
- }
-
- // Fix target property, if necessary (#1925, IE 6/7/8 & Safari2)
- if ( !event.target ) {
- event.target = originalEvent.srcElement || document;
- }
-
- // Target should not be a text node (#504, Safari)
- if ( event.target.nodeType === 3 ) {
- event.target = event.target.parentNode;
- }
-
- // For mouse/key events, metaKey==false if it's undefined (#3368, #11328; IE6/7/8)
- event.metaKey = !!event.metaKey;
-
- return fixHook.filter? fixHook.filter( event, originalEvent ) : event;
- },
-
- special: {
- load: {
- // Prevent triggered image.load events from bubbling to window.load
- noBubble: true
- },
-
- focus: {
- delegateType: "focusin"
- },
- blur: {
- delegateType: "focusout"
- },
-
- beforeunload: {
- setup: function( data, namespaces, eventHandle ) {
- // We only want to do this special case on windows
- if ( jQuery.isWindow( this ) ) {
- this.onbeforeunload = eventHandle;
- }
- },
-
- teardown: function( namespaces, eventHandle ) {
- if ( this.onbeforeunload === eventHandle ) {
- this.onbeforeunload = null;
- }
- }
- }
- },
-
- simulate: function( type, elem, event, bubble ) {
- // Piggyback on a donor event to simulate a different one.
- // Fake originalEvent to avoid donor's stopPropagation, but if the
- // simulated event prevents default then we do the same on the donor.
- var e = jQuery.extend(
- new jQuery.Event(),
- event,
- { type: type,
- isSimulated: true,
- originalEvent: {}
- }
- );
- if ( bubble ) {
- jQuery.event.trigger( e, null, elem );
- } else {
- jQuery.event.dispatch.call( elem, e );
- }
- if ( e.isDefaultPrevented() ) {
- event.preventDefault();
- }
- }
-};
-
-// Some plugins are using, but it's undocumented/deprecated and will be removed.
-// The 1.7 special event interface should provide all the hooks needed now.
-jQuery.event.handle = jQuery.event.dispatch;
-
-jQuery.removeEvent = document.removeEventListener ?
- function( elem, type, handle ) {
- if ( elem.removeEventListener ) {
- elem.removeEventListener( type, handle, false );
- }
- } :
- function( elem, type, handle ) {
- var name = "on" + type;
-
- if ( elem.detachEvent ) {
-
- // #8545, #7054, preventing memory leaks for custom events in IE6-8
- // detachEvent needed property on element, by name of that event, to properly expose it to GC
- if ( typeof elem[ name ] === "undefined" ) {
- elem[ name ] = null;
- }
-
- elem.detachEvent( name, handle );
- }
- };
-
-jQuery.Event = function( src, props ) {
- // Allow instantiation without the 'new' keyword
- if ( !(this instanceof jQuery.Event) ) {
- return new jQuery.Event( src, props );
- }
-
- // Event object
- if ( src && src.type ) {
- this.originalEvent = src;
- this.type = src.type;
-
- // Events bubbling up the document may have been marked as prevented
- // by a handler lower down the tree; reflect the correct value.
- this.isDefaultPrevented = ( src.defaultPrevented || src.returnValue === false ||
- src.getPreventDefault && src.getPreventDefault() ) ? returnTrue : returnFalse;
-
- // Event type
- } else {
- this.type = src;
- }
-
- // Put explicitly provided properties onto the event object
- if ( props ) {
- jQuery.extend( this, props );
- }
-
- // Create a timestamp if incoming event doesn't have one
- this.timeStamp = src && src.timeStamp || jQuery.now();
-
- // Mark it as fixed
- this[ jQuery.expando ] = true;
-};
-
-function returnFalse() {
- return false;
-}
-function returnTrue() {
- return true;
-}
-
-// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding
-// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html
-jQuery.Event.prototype = {
- preventDefault: function() {
- this.isDefaultPrevented = returnTrue;
-
- var e = this.originalEvent;
- if ( !e ) {
- return;
- }
-
- // if preventDefault exists run it on the original event
- if ( e.preventDefault ) {
- e.preventDefault();
-
- // otherwise set the returnValue property of the original event to false (IE)
- } else {
- e.returnValue = false;
- }
- },
- stopPropagation: function() {
- this.isPropagationStopped = returnTrue;
-
- var e = this.originalEvent;
- if ( !e ) {
- return;
- }
- // if stopPropagation exists run it on the original event
- if ( e.stopPropagation ) {
- e.stopPropagation();
- }
- // otherwise set the cancelBubble property of the original event to true (IE)
- e.cancelBubble = true;
- },
- stopImmediatePropagation: function() {
- this.isImmediatePropagationStopped = returnTrue;
- this.stopPropagation();
- },
- isDefaultPrevented: returnFalse,
- isPropagationStopped: returnFalse,
- isImmediatePropagationStopped: returnFalse
-};
-
-// Create mouseenter/leave events using mouseover/out and event-time checks
-jQuery.each({
- mouseenter: "mouseover",
- mouseleave: "mouseout"
-}, function( orig, fix ) {
- jQuery.event.special[ orig ] = {
- delegateType: fix,
- bindType: fix,
-
- handle: function( event ) {
- var ret,
- target = this,
- related = event.relatedTarget,
- handleObj = event.handleObj,
- selector = handleObj.selector;
-
- // For mousenter/leave call the handler if related is outside the target.
- // NB: No relatedTarget if the mouse left/entered the browser window
- if ( !related || (related !== target && !jQuery.contains( target, related )) ) {
- event.type = handleObj.origType;
- ret = handleObj.handler.apply( this, arguments );
- event.type = fix;
- }
- return ret;
- }
- };
-});
-
-// IE submit delegation
-if ( !jQuery.support.submitBubbles ) {
-
- jQuery.event.special.submit = {
- setup: function() {
- // Only need this for delegated form submit events
- if ( jQuery.nodeName( this, "form" ) ) {
- return false;
- }
-
- // Lazy-add a submit handler when a descendant form may potentially be submitted
- jQuery.event.add( this, "click._submit keypress._submit", function( e ) {
- // Node name check avoids a VML-related crash in IE (#9807)
- var elem = e.target,
- form = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.form : undefined;
- if ( form && !jQuery._data( form, "_submit_attached" ) ) {
- jQuery.event.add( form, "submit._submit", function( event ) {
- event._submit_bubble = true;
- });
- jQuery._data( form, "_submit_attached", true );
- }
- });
- // return undefined since we don't need an event listener
- },
-
- postDispatch: function( event ) {
- // If form was submitted by the user, bubble the event up the tree
- if ( event._submit_bubble ) {
- delete event._submit_bubble;
- if ( this.parentNode && !event.isTrigger ) {
- jQuery.event.simulate( "submit", this.parentNode, event, true );
- }
- }
- },
-
- teardown: function() {
- // Only need this for delegated form submit events
- if ( jQuery.nodeName( this, "form" ) ) {
- return false;
- }
-
- // Remove delegated handlers; cleanData eventually reaps submit handlers attached above
- jQuery.event.remove( this, "._submit" );
- }
- };
-}
-
-// IE change delegation and checkbox/radio fix
-if ( !jQuery.support.changeBubbles ) {
-
- jQuery.event.special.change = {
-
- setup: function() {
-
- if ( rformElems.test( this.nodeName ) ) {
- // IE doesn't fire change on a check/radio until blur; trigger it on click
- // after a propertychange. Eat the blur-change in special.change.handle.
- // This still fires onchange a second time for check/radio after blur.
- if ( this.type === "checkbox" || this.type === "radio" ) {
- jQuery.event.add( this, "propertychange._change", function( event ) {
- if ( event.originalEvent.propertyName === "checked" ) {
- this._just_changed = true;
- }
- });
- jQuery.event.add( this, "click._change", function( event ) {
- if ( this._just_changed && !event.isTrigger ) {
- this._just_changed = false;
- }
- // Allow triggered, simulated change events (#11500)
- jQuery.event.simulate( "change", this, event, true );
- });
- }
- return false;
- }
- // Delegated event; lazy-add a change handler on descendant inputs
- jQuery.event.add( this, "beforeactivate._change", function( e ) {
- var elem = e.target;
-
- if ( rformElems.test( elem.nodeName ) && !jQuery._data( elem, "_change_attached" ) ) {
- jQuery.event.add( elem, "change._change", function( event ) {
- if ( this.parentNode && !event.isSimulated && !event.isTrigger ) {
- jQuery.event.simulate( "change", this.parentNode, event, true );
- }
- });
- jQuery._data( elem, "_change_attached", true );
- }
- });
- },
-
- handle: function( event ) {
- var elem = event.target;
-
- // Swallow native change events from checkbox/radio, we already triggered them above
- if ( this !== elem || event.isSimulated || event.isTrigger || (elem.type !== "radio" && elem.type !== "checkbox") ) {
- return event.handleObj.handler.apply( this, arguments );
- }
- },
-
- teardown: function() {
- jQuery.event.remove( this, "._change" );
-
- return !rformElems.test( this.nodeName );
- }
- };
-}
-
-// Create "bubbling" focus and blur events
-if ( !jQuery.support.focusinBubbles ) {
- jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) {
-
- // Attach a single capturing handler while someone wants focusin/focusout
- var attaches = 0,
- handler = function( event ) {
- jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ), true );
- };
-
- jQuery.event.special[ fix ] = {
- setup: function() {
- if ( attaches++ === 0 ) {
- document.addEventListener( orig, handler, true );
- }
- },
- teardown: function() {
- if ( --attaches === 0 ) {
- document.removeEventListener( orig, handler, true );
- }
- }
- };
- });
-}
-
-jQuery.fn.extend({
-
- on: function( types, selector, data, fn, /*INTERNAL*/ one ) {
- var origFn, type;
-
- // Types can be a map of types/handlers
- if ( typeof types === "object" ) {
- // ( types-Object, selector, data )
- if ( typeof selector !== "string" ) { // && selector != null
- // ( types-Object, data )
- data = data || selector;
- selector = undefined;
- }
- for ( type in types ) {
- this.on( type, selector, data, types[ type ], one );
- }
- return this;
- }
-
- if ( data == null && fn == null ) {
- // ( types, fn )
- fn = selector;
- data = selector = undefined;
- } else if ( fn == null ) {
- if ( typeof selector === "string" ) {
- // ( types, selector, fn )
- fn = data;
- data = undefined;
- } else {
- // ( types, data, fn )
- fn = data;
- data = selector;
- selector = undefined;
- }
- }
- if ( fn === false ) {
- fn = returnFalse;
- } else if ( !fn ) {
- return this;
- }
-
- if ( one === 1 ) {
- origFn = fn;
- fn = function( event ) {
- // Can use an empty set, since event contains the info
- jQuery().off( event );
- return origFn.apply( this, arguments );
- };
- // Use same guid so caller can remove using origFn
- fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ );
- }
- return this.each( function() {
- jQuery.event.add( this, types, fn, data, selector );
- });
- },
- one: function( types, selector, data, fn ) {
- return this.on( types, selector, data, fn, 1 );
- },
- off: function( types, selector, fn ) {
- var handleObj, type;
- if ( types && types.preventDefault && types.handleObj ) {
- // ( event ) dispatched jQuery.Event
- handleObj = types.handleObj;
- jQuery( types.delegateTarget ).off(
- handleObj.namespace ? handleObj.origType + "." + handleObj.namespace : handleObj.origType,
- handleObj.selector,
- handleObj.handler
- );
- return this;
- }
- if ( typeof types === "object" ) {
- // ( types-object [, selector] )
- for ( type in types ) {
- this.off( type, selector, types[ type ] );
- }
- return this;
- }
- if ( selector === false || typeof selector === "function" ) {
- // ( types [, fn] )
- fn = selector;
- selector = undefined;
- }
- if ( fn === false ) {
- fn = returnFalse;
- }
- return this.each(function() {
- jQuery.event.remove( this, types, fn, selector );
- });
- },
-
- bind: function( types, data, fn ) {
- return this.on( types, null, data, fn );
- },
- unbind: function( types, fn ) {
- return this.off( types, null, fn );
- },
-
- live: function( types, data, fn ) {
- jQuery( this.context ).on( types, this.selector, data, fn );
- return this;
- },
- die: function( types, fn ) {
- jQuery( this.context ).off( types, this.selector || "**", fn );
- return this;
- },
-
- delegate: function( selector, types, data, fn ) {
- return this.on( types, selector, data, fn );
- },
- undelegate: function( selector, types, fn ) {
- // ( namespace ) or ( selector, types [, fn] )
- return arguments.length === 1 ? this.off( selector, "**" ) : this.off( types, selector || "**", fn );
- },
-
- trigger: function( type, data ) {
- return this.each(function() {
- jQuery.event.trigger( type, data, this );
- });
- },
- triggerHandler: function( type, data ) {
- if ( this[0] ) {
- return jQuery.event.trigger( type, data, this[0], true );
- }
- },
-
- toggle: function( fn ) {
- // Save reference to arguments for access in closure
- var args = arguments,
- guid = fn.guid || jQuery.guid++,
- i = 0,
- toggler = function( event ) {
- // Figure out which function to execute
- var lastToggle = ( jQuery._data( this, "lastToggle" + fn.guid ) || 0 ) % i;
- jQuery._data( this, "lastToggle" + fn.guid, lastToggle + 1 );
-
- // Make sure that clicks stop
- event.preventDefault();
-
- // and execute the function
- return args[ lastToggle ].apply( this, arguments ) || false;
- };
-
- // link all the functions, so any of them can unbind this click handler
- toggler.guid = guid;
- while ( i < args.length ) {
- args[ i++ ].guid = guid;
- }
-
- return this.click( toggler );
- },
-
- hover: function( fnOver, fnOut ) {
- return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver );
- }
-});
-
-jQuery.each( ("blur focus focusin focusout load resize scroll unload click dblclick " +
- "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " +
- "change select submit keydown keypress keyup error contextmenu").split(" "), function( i, name ) {
-
- // Handle event binding
- jQuery.fn[ name ] = function( data, fn ) {
- if ( fn == null ) {
- fn = data;
- data = null;
- }
-
- return arguments.length > 0 ?
- this.on( name, null, data, fn ) :
- this.trigger( name );
- };
-
- if ( rkeyEvent.test( name ) ) {
- jQuery.event.fixHooks[ name ] = jQuery.event.keyHooks;
- }
-
- if ( rmouseEvent.test( name ) ) {
- jQuery.event.fixHooks[ name ] = jQuery.event.mouseHooks;
- }
-});
-/*!
- * Sizzle CSS Selector Engine
- * Copyright 2012 jQuery Foundation and other contributors
- * Released under the MIT license
- * http://sizzlejs.com/
- */
-(function( window, undefined ) {
-
-var cachedruns,
- assertGetIdNotName,
- Expr,
- getText,
- isXML,
- contains,
- compile,
- sortOrder,
- hasDuplicate,
- outermostContext,
-
- baseHasDuplicate = true,
- strundefined = "undefined",
-
- expando = ( "sizcache" + Math.random() ).replace( ".", "" ),
-
- Token = String,
- document = window.document,
- docElem = document.documentElement,
- dirruns = 0,
- done = 0,
- pop = [].pop,
- push = [].push,
- slice = [].slice,
- // Use a stripped-down indexOf if a native one is unavailable
- indexOf = [].indexOf || function( elem ) {
- var i = 0,
- len = this.length;
- for ( ; i < len; i++ ) {
- if ( this[i] === elem ) {
- return i;
- }
- }
- return -1;
- },
-
- // Augment a function for special use by Sizzle
- markFunction = function( fn, value ) {
- fn[ expando ] = value == null || value;
- return fn;
- },
-
- createCache = function() {
- var cache = {},
- keys = [];
-
- return markFunction(function( key, value ) {
- // Only keep the most recent entries
- if ( keys.push( key ) > Expr.cacheLength ) {
- delete cache[ keys.shift() ];
- }
-
- // Retrieve with (key + " ") to avoid collision with native Object.prototype properties (see Issue #157)
- return (cache[ key + " " ] = value);
- }, cache );
- },
-
- classCache = createCache(),
- tokenCache = createCache(),
- compilerCache = createCache(),
-
- // Regex
-
- // Whitespace characters http://www.w3.org/TR/css3-selectors/#whitespace
- whitespace = "[\\x20\\t\\r\\n\\f]",
- // http://www.w3.org/TR/css3-syntax/#characters
- characterEncoding = "(?:\\\\.|[-\\w]|[^\\x00-\\xa0])+",
-
- // Loosely modeled on CSS identifier characters
- // An unquoted value should be a CSS identifier (http://www.w3.org/TR/css3-selectors/#attribute-selectors)
- // Proper syntax: http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier
- identifier = characterEncoding.replace( "w", "w#" ),
-
- // Acceptable operators http://www.w3.org/TR/selectors/#attribute-selectors
- operators = "([*^$|!~]?=)",
- attributes = "\\[" + whitespace + "*(" + characterEncoding + ")" + whitespace +
- "*(?:" + operators + whitespace + "*(?:(['\"])((?:\\\\.|[^\\\\])*?)\\3|(" + identifier + ")|)|)" + whitespace + "*\\]",
-
- // Prefer arguments not in parens/brackets,
- // then attribute selectors and non-pseudos (denoted by :),
- // then anything else
- // These preferences are here to reduce the number of selectors
- // needing tokenize in the PSEUDO preFilter
- pseudos = ":(" + characterEncoding + ")(?:\\((?:(['\"])((?:\\\\.|[^\\\\])*?)\\2|([^()[\\]]*|(?:(?:" + attributes + ")|[^:]|\\\\.)*|.*))\\)|)",
-
- // For matchExpr.POS and matchExpr.needsContext
- pos = ":(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace +
- "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)",
-
- // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter
- rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ),
-
- rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ),
- rcombinators = new RegExp( "^" + whitespace + "*([\\x20\\t\\r\\n\\f>+~])" + whitespace + "*" ),
- rpseudo = new RegExp( pseudos ),
-
- // Easily-parseable/retrievable ID or TAG or CLASS selectors
- rquickExpr = /^(?:#([\w\-]+)|(\w+)|\.([\w\-]+))$/,
-
- rnot = /^:not/,
- rsibling = /[\x20\t\r\n\f]*[+~]/,
- rendsWithNot = /:not\($/,
-
- rheader = /h\d/i,
- rinputs = /input|select|textarea|button/i,
-
- rbackslash = /\\(?!\\)/g,
-
- matchExpr = {
- "ID": new RegExp( "^#(" + characterEncoding + ")" ),
- "CLASS": new RegExp( "^\\.(" + characterEncoding + ")" ),
- "NAME": new RegExp( "^\\[name=['\"]?(" + characterEncoding + ")['\"]?\\]" ),
- "TAG": new RegExp( "^(" + characterEncoding.replace( "w", "w*" ) + ")" ),
- "ATTR": new RegExp( "^" + attributes ),
- "PSEUDO": new RegExp( "^" + pseudos ),
- "POS": new RegExp( pos, "i" ),
- "CHILD": new RegExp( "^:(only|nth|first|last)-child(?:\\(" + whitespace +
- "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace +
- "*(\\d+)|))" + whitespace + "*\\)|)", "i" ),
- // For use in libraries implementing .is()
- "needsContext": new RegExp( "^" + whitespace + "*[>+~]|" + pos, "i" )
- },
-
- // Support
-
- // Used for testing something on an element
- assert = function( fn ) {
- var div = document.createElement("div");
-
- try {
- return fn( div );
- } catch (e) {
- return false;
- } finally {
- // release memory in IE
- div = null;
- }
- },
-
- // Check if getElementsByTagName("*") returns only elements
- assertTagNameNoComments = assert(function( div ) {
- div.appendChild( document.createComment("") );
- return !div.getElementsByTagName("*").length;
- }),
-
- // Check if getAttribute returns normalized href attributes
- assertHrefNotNormalized = assert(function( div ) {
- div.innerHTML = "<a href='#'></a>";
- return div.firstChild && typeof div.firstChild.getAttribute !== strundefined &&
- div.firstChild.getAttribute("href") === "#";
- }),
-
- // Check if attributes should be retrieved by attribute nodes
- assertAttributes = assert(function( div ) {
- div.innerHTML = "<select></select>";
- var type = typeof div.lastChild.getAttribute("multiple");
- // IE8 returns a string for some attributes even when not present
- return type !== "boolean" && type !== "string";
- }),
-
- // Check if getElementsByClassName can be trusted
- assertUsableClassName = assert(function( div ) {
- // Opera can't find a second classname (in 9.6)
- div.innerHTML = "<div class='hidden e'></div><div class='hidden'></div>";
- if ( !div.getElementsByClassName || !div.getElementsByClassName("e").length ) {
- return false;
- }
-
- // Safari 3.2 caches class attributes and doesn't catch changes
- div.lastChild.className = "e";
- return div.getElementsByClassName("e").length === 2;
- }),
-
- // Check if getElementById returns elements by name
- // Check if getElementsByName privileges form controls or returns elements by ID
- assertUsableName = assert(function( div ) {
- // Inject content
- div.id = expando + 0;
- div.innerHTML = "<a name='" + expando + "'></a><div name='" + expando + "'></div>";
- docElem.insertBefore( div, docElem.firstChild );
-
- // Test
- var pass = document.getElementsByName &&
- // buggy browsers will return fewer than the correct 2
- document.getElementsByName( expando ).length === 2 +
- // buggy browsers will return more than the correct 0
- document.getElementsByName( expando + 0 ).length;
- assertGetIdNotName = !document.getElementById( expando );
-
- // Cleanup
- docElem.removeChild( div );
-
- return pass;
- });
-
-// If slice is not available, provide a backup
-try {
- slice.call( docElem.childNodes, 0 )[0].nodeType;
-} catch ( e ) {
- slice = function( i ) {
- var elem,
- results = [];
- for ( ; (elem = this[i]); i++ ) {
- results.push( elem );
- }
- return results;
- };
-}
-
-function Sizzle( selector, context, results, seed ) {
- results = results || [];
- context = context || document;
- var match, elem, xml, m,
- nodeType = context.nodeType;
-
- if ( !selector || typeof selector !== "string" ) {
- return results;
- }
-
- if ( nodeType !== 1 && nodeType !== 9 ) {
- return [];
- }
-
- xml = isXML( context );
-
- if ( !xml && !seed ) {
- if ( (match = rquickExpr.exec( selector )) ) {
- // Speed-up: Sizzle("#ID")
- if ( (m = match[1]) ) {
- if ( nodeType === 9 ) {
- elem = context.getElementById( m );
- // Check parentNode to catch when Blackberry 4.6 returns
- // nodes that are no longer in the document #6963
- if ( elem && elem.parentNode ) {
- // Handle the case where IE, Opera, and Webkit return items
- // by name instead of ID
- if ( elem.id === m ) {
- results.push( elem );
- return results;
- }
- } else {
- return results;
- }
- } else {
- // Context is not a document
- if ( context.ownerDocument && (elem = context.ownerDocument.getElementById( m )) &&
- contains( context, elem ) && elem.id === m ) {
- results.push( elem );
- return results;
- }
- }
-
- // Speed-up: Sizzle("TAG")
- } else if ( match[2] ) {
- push.apply( results, slice.call(context.getElementsByTagName( selector ), 0) );
- return results;
-
- // Speed-up: Sizzle(".CLASS")
- } else if ( (m = match[3]) && assertUsableClassName && context.getElementsByClassName ) {
- push.apply( results, slice.call(context.getElementsByClassName( m ), 0) );
- return results;
- }
- }
- }
-
- // All others
- return select( selector.replace( rtrim, "$1" ), context, results, seed, xml );
-}
-
-Sizzle.matches = function( expr, elements ) {
- return Sizzle( expr, null, null, elements );
-};
-
-Sizzle.matchesSelector = function( elem, expr ) {
- return Sizzle( expr, null, null, [ elem ] ).length > 0;
-};
-
-// Returns a function to use in pseudos for input types
-function createInputPseudo( type ) {
- return function( elem ) {
- var name = elem.nodeName.toLowerCase();
- return name === "input" && elem.type === type;
- };
-}
-
-// Returns a function to use in pseudos for buttons
-function createButtonPseudo( type ) {
- return function( elem ) {
- var name = elem.nodeName.toLowerCase();
- return (name === "input" || name === "button") && elem.type === type;
- };
-}
-
-// Returns a function to use in pseudos for positionals
-function createPositionalPseudo( fn ) {
- return markFunction(function( argument ) {
- argument = +argument;
- return markFunction(function( seed, matches ) {
- var j,
- matchIndexes = fn( [], seed.length, argument ),
- i = matchIndexes.length;
-
- // Match elements found at the specified indexes
- while ( i-- ) {
- if ( seed[ (j = matchIndexes[i]) ] ) {
- seed[j] = !(matches[j] = seed[j]);
- }
- }
- });
- });
-}
-
-/**
- * Utility function for retrieving the text value of an array of DOM nodes
- * @param {Array|Element} elem
- */
-getText = Sizzle.getText = function( elem ) {
- var node,
- ret = "",
- i = 0,
- nodeType = elem.nodeType;
-
- if ( nodeType ) {
- if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) {
- // Use textContent for elements
- // innerText usage removed for consistency of new lines (see #11153)
- if ( typeof elem.textContent === "string" ) {
- return elem.textContent;
- } else {
- // Traverse its children
- for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) {
- ret += getText( elem );
- }
- }
- } else if ( nodeType === 3 || nodeType === 4 ) {
- return elem.nodeValue;
- }
- // Do not include comment or processing instruction nodes
- } else {
-
- // If no nodeType, this is expected to be an array
- for ( ; (node = elem[i]); i++ ) {
- // Do not traverse comment nodes
- ret += getText( node );
- }
- }
- return ret;
-};
-
-isXML = Sizzle.isXML = function( elem ) {
- // documentElement is verified for cases where it doesn't yet exist
- // (such as loading iframes in IE - #4833)
- var documentElement = elem && (elem.ownerDocument || elem).documentElement;
- return documentElement ? documentElement.nodeName !== "HTML" : false;
-};
-
-// Element contains another
-contains = Sizzle.contains = docElem.contains ?
- function( a, b ) {
- var adown = a.nodeType === 9 ? a.documentElement : a,
- bup = b && b.parentNode;
- return a === bup || !!( bup && bup.nodeType === 1 && adown.contains && adown.contains(bup) );
- } :
- docElem.compareDocumentPosition ?
- function( a, b ) {
- return b && !!( a.compareDocumentPosition( b ) & 16 );
- } :
- function( a, b ) {
- while ( (b = b.parentNode) ) {
- if ( b === a ) {
- return true;
- }
- }
- return false;
- };
-
-Sizzle.attr = function( elem, name ) {
- var val,
- xml = isXML( elem );
-
- if ( !xml ) {
- name = name.toLowerCase();
- }
- if ( (val = Expr.attrHandle[ name ]) ) {
- return val( elem );
- }
- if ( xml || assertAttributes ) {
- return elem.getAttribute( name );
- }
- val = elem.getAttributeNode( name );
- return val ?
- typeof elem[ name ] === "boolean" ?
- elem[ name ] ? name : null :
- val.specified ? val.value : null :
- null;
-};
-
-Expr = Sizzle.selectors = {
-
- // Can be adjusted by the user
- cacheLength: 50,
-
- createPseudo: markFunction,
-
- match: matchExpr,
-
- // IE6/7 return a modified href
- attrHandle: assertHrefNotNormalized ?
- {} :
- {
- "href": function( elem ) {
- return elem.getAttribute( "href", 2 );
- },
- "type": function( elem ) {
- return elem.getAttribute("type");
- }
- },
-
- find: {
- "ID": assertGetIdNotName ?
- function( id, context, xml ) {
- if ( typeof context.getElementById !== strundefined && !xml ) {
- var m = context.getElementById( id );
- // Check parentNode to catch when Blackberry 4.6 returns
- // nodes that are no longer in the document #6963
- return m && m.parentNode ? [m] : [];
- }
- } :
- function( id, context, xml ) {
- if ( typeof context.getElementById !== strundefined && !xml ) {
- var m = context.getElementById( id );
-
- return m ?
- m.id === id || typeof m.getAttributeNode !== strundefined && m.getAttributeNode("id").value === id ?
- [m] :
- undefined :
- [];
- }
- },
-
- "TAG": assertTagNameNoComments ?
- function( tag, context ) {
- if ( typeof context.getElementsByTagName !== strundefined ) {
- return context.getElementsByTagName( tag );
- }
- } :
- function( tag, context ) {
- var results = context.getElementsByTagName( tag );
-
- // Filter out possible comments
- if ( tag === "*" ) {
- var elem,
- tmp = [],
- i = 0;
-
- for ( ; (elem = results[i]); i++ ) {
- if ( elem.nodeType === 1 ) {
- tmp.push( elem );
- }
- }
-
- return tmp;
- }
- return results;
- },
-
- "NAME": assertUsableName && function( tag, context ) {
- if ( typeof context.getElementsByName !== strundefined ) {
- return context.getElementsByName( name );
- }
- },
-
- "CLASS": assertUsableClassName && function( className, context, xml ) {
- if ( typeof context.getElementsByClassName !== strundefined && !xml ) {
- return context.getElementsByClassName( className );
- }
- }
- },
-
- relative: {
- ">": { dir: "parentNode", first: true },
- " ": { dir: "parentNode" },
- "+": { dir: "previousSibling", first: true },
- "~": { dir: "previousSibling" }
- },
-
- preFilter: {
- "ATTR": function( match ) {
- match[1] = match[1].replace( rbackslash, "" );
-
- // Move the given value to match[3] whether quoted or unquoted
- match[3] = ( match[4] || match[5] || "" ).replace( rbackslash, "" );
-
- if ( match[2] === "~=" ) {
- match[3] = " " + match[3] + " ";
- }
-
- return match.slice( 0, 4 );
- },
-
- "CHILD": function( match ) {
- /* matches from matchExpr["CHILD"]
- 1 type (only|nth|...)
- 2 argument (even|odd|\d*|\d*n([+-]\d+)?|...)
- 3 xn-component of xn+y argument ([+-]?\d*n|)
- 4 sign of xn-component
- 5 x of xn-component
- 6 sign of y-component
- 7 y of y-component
- */
- match[1] = match[1].toLowerCase();
-
- if ( match[1] === "nth" ) {
- // nth-child requires argument
- if ( !match[2] ) {
- Sizzle.error( match[0] );
- }
-
- // numeric x and y parameters for Expr.filter.CHILD
- // remember that false/true cast respectively to 0/1
- match[3] = +( match[3] ? match[4] + (match[5] || 1) : 2 * ( match[2] === "even" || match[2] === "odd" ) );
- match[4] = +( ( match[6] + match[7] ) || match[2] === "odd" );
-
- // other types prohibit arguments
- } else if ( match[2] ) {
- Sizzle.error( match[0] );
- }
-
- return match;
- },
-
- "PSEUDO": function( match ) {
- var unquoted, excess;
- if ( matchExpr["CHILD"].test( match[0] ) ) {
- return null;
- }
-
- if ( match[3] ) {
- match[2] = match[3];
- } else if ( (unquoted = match[4]) ) {
- // Only check arguments that contain a pseudo
- if ( rpseudo.test(unquoted) &&
- // Get excess from tokenize (recursively)
- (excess = tokenize( unquoted, true )) &&
- // advance to the next closing parenthesis
- (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) {
-
- // excess is a negative index
- unquoted = unquoted.slice( 0, excess );
- match[0] = match[0].slice( 0, excess );
- }
- match[2] = unquoted;
- }
-
- // Return only captures needed by the pseudo filter method (type and argument)
- return match.slice( 0, 3 );
- }
- },
-
- filter: {
- "ID": assertGetIdNotName ?
- function( id ) {
- id = id.replace( rbackslash, "" );
- return function( elem ) {
- return elem.getAttribute("id") === id;
- };
- } :
- function( id ) {
- id = id.replace( rbackslash, "" );
- return function( elem ) {
- var node = typeof elem.getAttributeNode !== strundefined && elem.getAttributeNode("id");
- return node && node.value === id;
- };
- },
-
- "TAG": function( nodeName ) {
- if ( nodeName === "*" ) {
- return function() { return true; };
- }
- nodeName = nodeName.replace( rbackslash, "" ).toLowerCase();
-
- return function( elem ) {
- return elem.nodeName && elem.nodeName.toLowerCase() === nodeName;
- };
- },
-
- "CLASS": function( className ) {
- var pattern = classCache[ expando ][ className + " " ];
-
- return pattern ||
- (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) &&
- classCache( className, function( elem ) {
- return pattern.test( elem.className || (typeof elem.getAttribute !== strundefined && elem.getAttribute("class")) || "" );
- });
- },
-
- "ATTR": function( name, operator, check ) {
- return function( elem, context ) {
- var result = Sizzle.attr( elem, name );
-
- if ( result == null ) {
- return operator === "!=";
- }
- if ( !operator ) {
- return true;
- }
-
- result += "";
-
- return operator === "=" ? result === check :
- operator === "!=" ? result !== check :
- operator === "^=" ? check && result.indexOf( check ) === 0 :
- operator === "*=" ? check && result.indexOf( check ) > -1 :
- operator === "$=" ? check && result.substr( result.length - check.length ) === check :
- operator === "~=" ? ( " " + result + " " ).indexOf( check ) > -1 :
- operator === "|=" ? result === check || result.substr( 0, check.length + 1 ) === check + "-" :
- false;
- };
- },
-
- "CHILD": function( type, argument, first, last ) {
-
- if ( type === "nth" ) {
- return function( elem ) {
- var node, diff,
- parent = elem.parentNode;
-
- if ( first === 1 && last === 0 ) {
- return true;
- }
-
- if ( parent ) {
- diff = 0;
- for ( node = parent.firstChild; node; node = node.nextSibling ) {
- if ( node.nodeType === 1 ) {
- diff++;
- if ( elem === node ) {
- break;
- }
- }
- }
- }
-
- // Incorporate the offset (or cast to NaN), then check against cycle size
- diff -= last;
- return diff === first || ( diff % first === 0 && diff / first >= 0 );
- };
- }
-
- return function( elem ) {
- var node = elem;
-
- switch ( type ) {
- case "only":
- case "first":
- while ( (node = node.previousSibling) ) {
- if ( node.nodeType === 1 ) {
- return false;
- }
- }
-
- if ( type === "first" ) {
- return true;
- }
-
- node = elem;
-
- /* falls through */
- case "last":
- while ( (node = node.nextSibling) ) {
- if ( node.nodeType === 1 ) {
- return false;
- }
- }
-
- return true;
- }
- };
- },
-
- "PSEUDO": function( pseudo, argument ) {
- // pseudo-class names are case-insensitive
- // http://www.w3.org/TR/selectors/#pseudo-classes
- // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters
- // Remember that setFilters inherits from pseudos
- var args,
- fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] ||
- Sizzle.error( "unsupported pseudo: " + pseudo );
-
- // The user may use createPseudo to indicate that
- // arguments are needed to create the filter function
- // just as Sizzle does
- if ( fn[ expando ] ) {
- return fn( argument );
- }
-
- // But maintain support for old signatures
- if ( fn.length > 1 ) {
- args = [ pseudo, pseudo, "", argument ];
- return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ?
- markFunction(function( seed, matches ) {
- var idx,
- matched = fn( seed, argument ),
- i = matched.length;
- while ( i-- ) {
- idx = indexOf.call( seed, matched[i] );
- seed[ idx ] = !( matches[ idx ] = matched[i] );
- }
- }) :
- function( elem ) {
- return fn( elem, 0, args );
- };
- }
-
- return fn;
- }
- },
-
- pseudos: {
- "not": markFunction(function( selector ) {
- // Trim the selector passed to compile
- // to avoid treating leading and trailing
- // spaces as combinators
- var input = [],
- results = [],
- matcher = compile( selector.replace( rtrim, "$1" ) );
-
- return matcher[ expando ] ?
- markFunction(function( seed, matches, context, xml ) {
- var elem,
- unmatched = matcher( seed, null, xml, [] ),
- i = seed.length;
-
- // Match elements unmatched by `matcher`
- while ( i-- ) {
- if ( (elem = unmatched[i]) ) {
- seed[i] = !(matches[i] = elem);
- }
- }
- }) :
- function( elem, context, xml ) {
- input[0] = elem;
- matcher( input, null, xml, results );
- return !results.pop();
- };
- }),
-
- "has": markFunction(function( selector ) {
- return function( elem ) {
- return Sizzle( selector, elem ).length > 0;
- };
- }),
-
- "contains": markFunction(function( text ) {
- return function( elem ) {
- return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1;
- };
- }),
-
- "enabled": function( elem ) {
- return elem.disabled === false;
- },
-
- "disabled": function( elem ) {
- return elem.disabled === true;
- },
-
- "checked": function( elem ) {
- // In CSS3, :checked should return both checked and selected elements
- // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked
- var nodeName = elem.nodeName.toLowerCase();
- return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected);
- },
-
- "selected": function( elem ) {
- // Accessing this property makes selected-by-default
- // options in Safari work properly
- if ( elem.parentNode ) {
- elem.parentNode.selectedIndex;
- }
-
- return elem.selected === true;
- },
-
- "parent": function( elem ) {
- return !Expr.pseudos["empty"]( elem );
- },
-
- "empty": function( elem ) {
- // http://www.w3.org/TR/selectors/#empty-pseudo
- // :empty is only affected by element nodes and content nodes(including text(3), cdata(4)),
- // not comment, processing instructions, or others
- // Thanks to Diego Perini for the nodeName shortcut
- // Greater than "@" means alpha characters (specifically not starting with "#" or "?")
- var nodeType;
- elem = elem.firstChild;
- while ( elem ) {
- if ( elem.nodeName > "@" || (nodeType = elem.nodeType) === 3 || nodeType === 4 ) {
- return false;
- }
- elem = elem.nextSibling;
- }
- return true;
- },
-
- "header": function( elem ) {
- return rheader.test( elem.nodeName );
- },
-
- "text": function( elem ) {
- var type, attr;
- // IE6 and 7 will map elem.type to 'text' for new HTML5 types (search, etc)
- // use getAttribute instead to test this case
- return elem.nodeName.toLowerCase() === "input" &&
- (type = elem.type) === "text" &&
- ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === type );
- },
-
- // Input types
- "radio": createInputPseudo("radio"),
- "checkbox": createInputPseudo("checkbox"),
- "file": createInputPseudo("file"),
- "password": createInputPseudo("password"),
- "image": createInputPseudo("image"),
-
- "submit": createButtonPseudo("submit"),
- "reset": createButtonPseudo("reset"),
-
- "button": function( elem ) {
- var name = elem.nodeName.toLowerCase();
- return name === "input" && elem.type === "button" || name === "button";
- },
-
- "input": function( elem ) {
- return rinputs.test( elem.nodeName );
- },
-
- "focus": function( elem ) {
- var doc = elem.ownerDocument;
- return elem === doc.activeElement && (!doc.hasFocus || doc.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex);
- },
-
- "active": function( elem ) {
- return elem === elem.ownerDocument.activeElement;
- },
-
- // Positional types
- "first": createPositionalPseudo(function() {
- return [ 0 ];
- }),
-
- "last": createPositionalPseudo(function( matchIndexes, length ) {
- return [ length - 1 ];
- }),
-
- "eq": createPositionalPseudo(function( matchIndexes, length, argument ) {
- return [ argument < 0 ? argument + length : argument ];
- }),
-
- "even": createPositionalPseudo(function( matchIndexes, length ) {
- for ( var i = 0; i < length; i += 2 ) {
- matchIndexes.push( i );
- }
- return matchIndexes;
- }),
-
- "odd": createPositionalPseudo(function( matchIndexes, length ) {
- for ( var i = 1; i < length; i += 2 ) {
- matchIndexes.push( i );
- }
- return matchIndexes;
- }),
-
- "lt": createPositionalPseudo(function( matchIndexes, length, argument ) {
- for ( var i = argument < 0 ? argument + length : argument; --i >= 0; ) {
- matchIndexes.push( i );
- }
- return matchIndexes;
- }),
-
- "gt": createPositionalPseudo(function( matchIndexes, length, argument ) {
- for ( var i = argument < 0 ? argument + length : argument; ++i < length; ) {
- matchIndexes.push( i );
- }
- return matchIndexes;
- })
- }
-};
-
-function siblingCheck( a, b, ret ) {
- if ( a === b ) {
- return ret;
- }
-
- var cur = a.nextSibling;
-
- while ( cur ) {
- if ( cur === b ) {
- return -1;
- }
-
- cur = cur.nextSibling;
- }
-
- return 1;
-}
-
-sortOrder = docElem.compareDocumentPosition ?
- function( a, b ) {
- if ( a === b ) {
- hasDuplicate = true;
- return 0;
- }
-
- return ( !a.compareDocumentPosition || !b.compareDocumentPosition ?
- a.compareDocumentPosition :
- a.compareDocumentPosition(b) & 4
- ) ? -1 : 1;
- } :
- function( a, b ) {
- // The nodes are identical, we can exit early
- if ( a === b ) {
- hasDuplicate = true;
- return 0;
-
- // Fallback to using sourceIndex (in IE) if it's available on both nodes
- } else if ( a.sourceIndex && b.sourceIndex ) {
- return a.sourceIndex - b.sourceIndex;
- }
-
- var al, bl,
- ap = [],
- bp = [],
- aup = a.parentNode,
- bup = b.parentNode,
- cur = aup;
-
- // If the nodes are siblings (or identical) we can do a quick check
- if ( aup === bup ) {
- return siblingCheck( a, b );
-
- // If no parents were found then the nodes are disconnected
- } else if ( !aup ) {
- return -1;
-
- } else if ( !bup ) {
- return 1;
- }
-
- // Otherwise they're somewhere else in the tree so we need
- // to build up a full list of the parentNodes for comparison
- while ( cur ) {
- ap.unshift( cur );
- cur = cur.parentNode;
- }
-
- cur = bup;
-
- while ( cur ) {
- bp.unshift( cur );
- cur = cur.parentNode;
- }
-
- al = ap.length;
- bl = bp.length;
-
- // Start walking down the tree looking for a discrepancy
- for ( var i = 0; i < al && i < bl; i++ ) {
- if ( ap[i] !== bp[i] ) {
- return siblingCheck( ap[i], bp[i] );
- }
- }
-
- // We ended someplace up the tree so do a sibling check
- return i === al ?
- siblingCheck( a, bp[i], -1 ) :
- siblingCheck( ap[i], b, 1 );
- };
-
-// Always assume the presence of duplicates if sort doesn't
-// pass them to our comparison function (as in Google Chrome).
-[0, 0].sort( sortOrder );
-baseHasDuplicate = !hasDuplicate;
-
-// Document sorting and removing duplicates
-Sizzle.uniqueSort = function( results ) {
- var elem,
- duplicates = [],
- i = 1,
- j = 0;
-
- hasDuplicate = baseHasDuplicate;
- results.sort( sortOrder );
-
- if ( hasDuplicate ) {
- for ( ; (elem = results[i]); i++ ) {
- if ( elem === results[ i - 1 ] ) {
- j = duplicates.push( i );
- }
- }
- while ( j-- ) {
- results.splice( duplicates[ j ], 1 );
- }
- }
-
- return results;
-};
-
-Sizzle.error = function( msg ) {
- throw new Error( "Syntax error, unrecognized expression: " + msg );
-};
-
-function tokenize( selector, parseOnly ) {
- var matched, match, tokens, type,
- soFar, groups, preFilters,
- cached = tokenCache[ expando ][ selector + " " ];
-
- if ( cached ) {
- return parseOnly ? 0 : cached.slice( 0 );
- }
-
- soFar = selector;
- groups = [];
- preFilters = Expr.preFilter;
-
- while ( soFar ) {
-
- // Comma and first run
- if ( !matched || (match = rcomma.exec( soFar )) ) {
- if ( match ) {
- // Don't consume trailing commas as valid
- soFar = soFar.slice( match[0].length ) || soFar;
- }
- groups.push( tokens = [] );
- }
-
- matched = false;
-
- // Combinators
- if ( (match = rcombinators.exec( soFar )) ) {
- tokens.push( matched = new Token( match.shift() ) );
- soFar = soFar.slice( matched.length );
-
- // Cast descendant combinators to space
- matched.type = match[0].replace( rtrim, " " );
- }
-
- // Filters
- for ( type in Expr.filter ) {
- if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] ||
- (match = preFilters[ type ]( match ))) ) {
-
- tokens.push( matched = new Token( match.shift() ) );
- soFar = soFar.slice( matched.length );
- matched.type = type;
- matched.matches = match;
- }
- }
-
- if ( !matched ) {
- break;
- }
- }
-
- // Return the length of the invalid excess
- // if we're just parsing
- // Otherwise, throw an error or return tokens
- return parseOnly ?
- soFar.length :
- soFar ?
- Sizzle.error( selector ) :
- // Cache the tokens
- tokenCache( selector, groups ).slice( 0 );
-}
-
-function addCombinator( matcher, combinator, base ) {
- var dir = combinator.dir,
- checkNonElements = base && combinator.dir === "parentNode",
- doneName = done++;
-
- return combinator.first ?
- // Check against closest ancestor/preceding element
- function( elem, context, xml ) {
- while ( (elem = elem[ dir ]) ) {
- if ( checkNonElements || elem.nodeType === 1 ) {
- return matcher( elem, context, xml );
- }
- }
- } :
-
- // Check against all ancestor/preceding elements
- function( elem, context, xml ) {
- // We can't set arbitrary data on XML nodes, so they don't benefit from dir caching
- if ( !xml ) {
- var cache,
- dirkey = dirruns + " " + doneName + " ",
- cachedkey = dirkey + cachedruns;
- while ( (elem = elem[ dir ]) ) {
- if ( checkNonElements || elem.nodeType === 1 ) {
- if ( (cache = elem[ expando ]) === cachedkey ) {
- return elem.sizset;
- } else if ( typeof cache === "string" && cache.indexOf(dirkey) === 0 ) {
- if ( elem.sizset ) {
- return elem;
- }
- } else {
- elem[ expando ] = cachedkey;
- if ( matcher( elem, context, xml ) ) {
- elem.sizset = true;
- return elem;
- }
- elem.sizset = false;
- }
- }
- }
- } else {
- while ( (elem = elem[ dir ]) ) {
- if ( checkNonElements || elem.nodeType === 1 ) {
- if ( matcher( elem, context, xml ) ) {
- return elem;
- }
- }
- }
- }
- };
-}
-
-function elementMatcher( matchers ) {
- return matchers.length > 1 ?
- function( elem, context, xml ) {
- var i = matchers.length;
- while ( i-- ) {
- if ( !matchers[i]( elem, context, xml ) ) {
- return false;
- }
- }
- return true;
- } :
- matchers[0];
-}
-
-function condense( unmatched, map, filter, context, xml ) {
- var elem,
- newUnmatched = [],
- i = 0,
- len = unmatched.length,
- mapped = map != null;
-
- for ( ; i < len; i++ ) {
- if ( (elem = unmatched[i]) ) {
- if ( !filter || filter( elem, context, xml ) ) {
- newUnmatched.push( elem );
- if ( mapped ) {
- map.push( i );
- }
- }
- }
- }
-
- return newUnmatched;
-}
-
-function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) {
- if ( postFilter && !postFilter[ expando ] ) {
- postFilter = setMatcher( postFilter );
- }
- if ( postFinder && !postFinder[ expando ] ) {
- postFinder = setMatcher( postFinder, postSelector );
- }
- return markFunction(function( seed, results, context, xml ) {
- var temp, i, elem,
- preMap = [],
- postMap = [],
- preexisting = results.length,
-
- // Get initial elements from seed or context
- elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ),
-
- // Prefilter to get matcher input, preserving a map for seed-results synchronization
- matcherIn = preFilter && ( seed || !selector ) ?
- condense( elems, preMap, preFilter, context, xml ) :
- elems,
-
- matcherOut = matcher ?
- // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results,
- postFinder || ( seed ? preFilter : preexisting || postFilter ) ?
-
- // ...intermediate processing is necessary
- [] :
-
- // ...otherwise use results directly
- results :
- matcherIn;
-
- // Find primary matches
- if ( matcher ) {
- matcher( matcherIn, matcherOut, context, xml );
- }
-
- // Apply postFilter
- if ( postFilter ) {
- temp = condense( matcherOut, postMap );
- postFilter( temp, [], context, xml );
-
- // Un-match failing elements by moving them back to matcherIn
- i = temp.length;
- while ( i-- ) {
- if ( (elem = temp[i]) ) {
- matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem);
- }
- }
- }
-
- if ( seed ) {
- if ( postFinder || preFilter ) {
- if ( postFinder ) {
- // Get the final matcherOut by condensing this intermediate into postFinder contexts
- temp = [];
- i = matcherOut.length;
- while ( i-- ) {
- if ( (elem = matcherOut[i]) ) {
- // Restore matcherIn since elem is not yet a final match
- temp.push( (matcherIn[i] = elem) );
- }
- }
- postFinder( null, (matcherOut = []), temp, xml );
- }
-
- // Move matched elements from seed to results to keep them synchronized
- i = matcherOut.length;
- while ( i-- ) {
- if ( (elem = matcherOut[i]) &&
- (temp = postFinder ? indexOf.call( seed, elem ) : preMap[i]) > -1 ) {
-
- seed[temp] = !(results[temp] = elem);
- }
- }
- }
-
- // Add elements to results, through postFinder if defined
- } else {
- matcherOut = condense(
- matcherOut === results ?
- matcherOut.splice( preexisting, matcherOut.length ) :
- matcherOut
- );
- if ( postFinder ) {
- postFinder( null, results, matcherOut, xml );
- } else {
- push.apply( results, matcherOut );
- }
- }
- });
-}
-
-function matcherFromTokens( tokens ) {
- var checkContext, matcher, j,
- len = tokens.length,
- leadingRelative = Expr.relative[ tokens[0].type ],
- implicitRelative = leadingRelative || Expr.relative[" "],
- i = leadingRelative ? 1 : 0,
-
- // The foundational matcher ensures that elements are reachable from top-level context(s)
- matchContext = addCombinator( function( elem ) {
- return elem === checkContext;
- }, implicitRelative, true ),
- matchAnyContext = addCombinator( function( elem ) {
- return indexOf.call( checkContext, elem ) > -1;
- }, implicitRelative, true ),
- matchers = [ function( elem, context, xml ) {
- return ( !leadingRelative && ( xml || context !== outermostContext ) ) || (
- (checkContext = context).nodeType ?
- matchContext( elem, context, xml ) :
- matchAnyContext( elem, context, xml ) );
- } ];
-
- for ( ; i < len; i++ ) {
- if ( (matcher = Expr.relative[ tokens[i].type ]) ) {
- matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ];
- } else {
- matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches );
-
- // Return special upon seeing a positional matcher
- if ( matcher[ expando ] ) {
- // Find the next relative operator (if any) for proper handling
- j = ++i;
- for ( ; j < len; j++ ) {
- if ( Expr.relative[ tokens[j].type ] ) {
- break;
- }
- }
- return setMatcher(
- i > 1 && elementMatcher( matchers ),
- i > 1 && tokens.slice( 0, i - 1 ).join("").replace( rtrim, "$1" ),
- matcher,
- i < j && matcherFromTokens( tokens.slice( i, j ) ),
- j < len && matcherFromTokens( (tokens = tokens.slice( j )) ),
- j < len && tokens.join("")
- );
- }
- matchers.push( matcher );
- }
- }
-
- return elementMatcher( matchers );
-}
-
-function matcherFromGroupMatchers( elementMatchers, setMatchers ) {
- var bySet = setMatchers.length > 0,
- byElement = elementMatchers.length > 0,
- superMatcher = function( seed, context, xml, results, expandContext ) {
- var elem, j, matcher,
- setMatched = [],
- matchedCount = 0,
- i = "0",
- unmatched = seed && [],
- outermost = expandContext != null,
- contextBackup = outermostContext,
- // We must always have either seed elements or context
- elems = seed || byElement && Expr.find["TAG"]( "*", expandContext && context.parentNode || context ),
- // Nested matchers should use non-integer dirruns
- dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.E);
-
- if ( outermost ) {
- outermostContext = context !== document && context;
- cachedruns = superMatcher.el;
- }
-
- // Add elements passing elementMatchers directly to results
- for ( ; (elem = elems[i]) != null; i++ ) {
- if ( byElement && elem ) {
- for ( j = 0; (matcher = elementMatchers[j]); j++ ) {
- if ( matcher( elem, context, xml ) ) {
- results.push( elem );
- break;
- }
- }
- if ( outermost ) {
- dirruns = dirrunsUnique;
- cachedruns = ++superMatcher.el;
- }
- }
-
- // Track unmatched elements for set filters
- if ( bySet ) {
- // They will have gone through all possible matchers
- if ( (elem = !matcher && elem) ) {
- matchedCount--;
- }
-
- // Lengthen the array for every element, matched or not
- if ( seed ) {
- unmatched.push( elem );
- }
- }
- }
-
- // Apply set filters to unmatched elements
- matchedCount += i;
- if ( bySet && i !== matchedCount ) {
- for ( j = 0; (matcher = setMatchers[j]); j++ ) {
- matcher( unmatched, setMatched, context, xml );
- }
-
- if ( seed ) {
- // Reintegrate element matches to eliminate the need for sorting
- if ( matchedCount > 0 ) {
- while ( i-- ) {
- if ( !(unmatched[i] || setMatched[i]) ) {
- setMatched[i] = pop.call( results );
- }
- }
- }
-
- // Discard index placeholder values to get only actual matches
- setMatched = condense( setMatched );
- }
-
- // Add matches to results
- push.apply( results, setMatched );
-
- // Seedless set matches succeeding multiple successful matchers stipulate sorting
- if ( outermost && !seed && setMatched.length > 0 &&
- ( matchedCount + setMatchers.length ) > 1 ) {
-
- Sizzle.uniqueSort( results );
- }
- }
-
- // Override manipulation of globals by nested matchers
- if ( outermost ) {
- dirruns = dirrunsUnique;
- outermostContext = contextBackup;
- }
-
- return unmatched;
- };
-
- superMatcher.el = 0;
- return bySet ?
- markFunction( superMatcher ) :
- superMatcher;
-}
-
-compile = Sizzle.compile = function( selector, group /* Internal Use Only */ ) {
- var i,
- setMatchers = [],
- elementMatchers = [],
- cached = compilerCache[ expando ][ selector + " " ];
-
- if ( !cached ) {
- // Generate a function of recursive functions that can be used to check each element
- if ( !group ) {
- group = tokenize( selector );
- }
- i = group.length;
- while ( i-- ) {
- cached = matcherFromTokens( group[i] );
- if ( cached[ expando ] ) {
- setMatchers.push( cached );
- } else {
- elementMatchers.push( cached );
- }
- }
-
- // Cache the compiled function
- cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) );
- }
- return cached;
-};
-
-function multipleContexts( selector, contexts, results ) {
- var i = 0,
- len = contexts.length;
- for ( ; i < len; i++ ) {
- Sizzle( selector, contexts[i], results );
- }
- return results;
-}
-
-function select( selector, context, results, seed, xml ) {
- var i, tokens, token, type, find,
- match = tokenize( selector ),
- j = match.length;
-
- if ( !seed ) {
- // Try to minimize operations if there is only one group
- if ( match.length === 1 ) {
-
- // Take a shortcut and set the context if the root selector is an ID
- tokens = match[0] = match[0].slice( 0 );
- if ( tokens.length > 2 && (token = tokens[0]).type === "ID" &&
- context.nodeType === 9 && !xml &&
- Expr.relative[ tokens[1].type ] ) {
-
- context = Expr.find["ID"]( token.matches[0].replace( rbackslash, "" ), context, xml )[0];
- if ( !context ) {
- return results;
- }
-
- selector = selector.slice( tokens.shift().length );
- }
-
- // Fetch a seed set for right-to-left matching
- for ( i = matchExpr["POS"].test( selector ) ? -1 : tokens.length - 1; i >= 0; i-- ) {
- token = tokens[i];
-
- // Abort if we hit a combinator
- if ( Expr.relative[ (type = token.type) ] ) {
- break;
- }
- if ( (find = Expr.find[ type ]) ) {
- // Search, expanding context for leading sibling combinators
- if ( (seed = find(
- token.matches[0].replace( rbackslash, "" ),
- rsibling.test( tokens[0].type ) && context.parentNode || context,
- xml
- )) ) {
-
- // If seed is empty or no tokens remain, we can return early
- tokens.splice( i, 1 );
- selector = seed.length && tokens.join("");
- if ( !selector ) {
- push.apply( results, slice.call( seed, 0 ) );
- return results;
- }
-
- break;
- }
- }
- }
- }
- }
-
- // Compile and execute a filtering function
- // Provide `match` to avoid retokenization if we modified the selector above
- compile( selector, match )(
- seed,
- context,
- xml,
- results,
- rsibling.test( selector )
- );
- return results;
-}
-
-if ( document.querySelectorAll ) {
- (function() {
- var disconnectedMatch,
- oldSelect = select,
- rescape = /'|\\/g,
- rattributeQuotes = /\=[\x20\t\r\n\f]*([^'"\]]*)[\x20\t\r\n\f]*\]/g,
-
- // qSa(:focus) reports false when true (Chrome 21), no need to also add to buggyMatches since matches checks buggyQSA
- // A support test would require too much code (would include document ready)
- rbuggyQSA = [ ":focus" ],
-
- // matchesSelector(:active) reports false when true (IE9/Opera 11.5)
- // A support test would require too much code (would include document ready)
- // just skip matchesSelector for :active
- rbuggyMatches = [ ":active" ],
- matches = docElem.matchesSelector ||
- docElem.mozMatchesSelector ||
- docElem.webkitMatchesSelector ||
- docElem.oMatchesSelector ||
- docElem.msMatchesSelector;
-
- // Build QSA regex
- // Regex strategy adopted from Diego Perini
- assert(function( div ) {
- // Select is set to empty string on purpose
- // This is to test IE's treatment of not explictly
- // setting a boolean content attribute,
- // since its presence should be enough
- // http://bugs.jquery.com/ticket/12359
- div.innerHTML = "<select><option selected=''></option></select>";
-
- // IE8 - Some boolean attributes are not treated correctly
- if ( !div.querySelectorAll("[selected]").length ) {
- rbuggyQSA.push( "\\[" + whitespace + "*(?:checked|disabled|ismap|multiple|readonly|selected|value)" );
- }
-
- // Webkit/Opera - :checked should return selected option elements
- // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked
- // IE8 throws error here (do not put tests after this one)
- if ( !div.querySelectorAll(":checked").length ) {
- rbuggyQSA.push(":checked");
- }
- });
-
- assert(function( div ) {
-
- // Opera 10-12/IE9 - ^= $= *= and empty values
- // Should not select anything
- div.innerHTML = "<p test=''></p>";
- if ( div.querySelectorAll("[test^='']").length ) {
- rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:\"\"|'')" );
- }
-
- // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled)
- // IE8 throws error here (do not put tests after this one)
- div.innerHTML = "<input type='hidden'/>";
- if ( !div.querySelectorAll(":enabled").length ) {
- rbuggyQSA.push(":enabled", ":disabled");
- }
- });
-
- // rbuggyQSA always contains :focus, so no need for a length check
- rbuggyQSA = /* rbuggyQSA.length && */ new RegExp( rbuggyQSA.join("|") );
-
- select = function( selector, context, results, seed, xml ) {
- // Only use querySelectorAll when not filtering,
- // when this is not xml,
- // and when no QSA bugs apply
- if ( !seed && !xml && !rbuggyQSA.test( selector ) ) {
- var groups, i,
- old = true,
- nid = expando,
- newContext = context,
- newSelector = context.nodeType === 9 && selector;
-
- // qSA works strangely on Element-rooted queries
- // We can work around this by specifying an extra ID on the root
- // and working up from there (Thanks to Andrew Dupont for the technique)
- // IE 8 doesn't work on object elements
- if ( context.nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) {
- groups = tokenize( selector );
-
- if ( (old = context.getAttribute("id")) ) {
- nid = old.replace( rescape, "\\$&" );
- } else {
- context.setAttribute( "id", nid );
- }
- nid = "[id='" + nid + "'] ";
-
- i = groups.length;
- while ( i-- ) {
- groups[i] = nid + groups[i].join("");
- }
- newContext = rsibling.test( selector ) && context.parentNode || context;
- newSelector = groups.join(",");
- }
-
- if ( newSelector ) {
- try {
- push.apply( results, slice.call( newContext.querySelectorAll(
- newSelector
- ), 0 ) );
- return results;
- } catch(qsaError) {
- } finally {
- if ( !old ) {
- context.removeAttribute("id");
- }
- }
- }
- }
-
- return oldSelect( selector, context, results, seed, xml );
- };
-
- if ( matches ) {
- assert(function( div ) {
- // Check to see if it's possible to do matchesSelector
- // on a disconnected node (IE 9)
- disconnectedMatch = matches.call( div, "div" );
-
- // This should fail with an exception
- // Gecko does not error, returns false instead
- try {
- matches.call( div, "[test!='']:sizzle" );
- rbuggyMatches.push( "!=", pseudos );
- } catch ( e ) {}
- });
-
- // rbuggyMatches always contains :active and :focus, so no need for a length check
- rbuggyMatches = /* rbuggyMatches.length && */ new RegExp( rbuggyMatches.join("|") );
-
- Sizzle.matchesSelector = function( elem, expr ) {
- // Make sure that attribute selectors are quoted
- expr = expr.replace( rattributeQuotes, "='$1']" );
-
- // rbuggyMatches always contains :active, so no need for an existence check
- if ( !isXML( elem ) && !rbuggyMatches.test( expr ) && !rbuggyQSA.test( expr ) ) {
- try {
- var ret = matches.call( elem, expr );
-
- // IE 9's matchesSelector returns false on disconnected nodes
- if ( ret || disconnectedMatch ||
- // As well, disconnected nodes are said to be in a document
- // fragment in IE 9
- elem.document && elem.document.nodeType !== 11 ) {
- return ret;
- }
- } catch(e) {}
- }
-
- return Sizzle( expr, null, null, [ elem ] ).length > 0;
- };
- }
- })();
-}
-
-// Deprecated
-Expr.pseudos["nth"] = Expr.pseudos["eq"];
-
-// Back-compat
-function setFilters() {}
-Expr.filters = setFilters.prototype = Expr.pseudos;
-Expr.setFilters = new setFilters();
-
-// Override sizzle attribute retrieval
-Sizzle.attr = jQuery.attr;
-jQuery.find = Sizzle;
-jQuery.expr = Sizzle.selectors;
-jQuery.expr[":"] = jQuery.expr.pseudos;
-jQuery.unique = Sizzle.uniqueSort;
-jQuery.text = Sizzle.getText;
-jQuery.isXMLDoc = Sizzle.isXML;
-jQuery.contains = Sizzle.contains;
-
-
-})( window );
-var runtil = /Until$/,
- rparentsprev = /^(?:parents|prev(?:Until|All))/,
- isSimple = /^.[^:#\[\.,]*$/,
- rneedsContext = jQuery.expr.match.needsContext,
- // methods guaranteed to produce a unique set when starting from a unique set
- guaranteedUnique = {
- children: true,
- contents: true,
- next: true,
- prev: true
- };
-
-jQuery.fn.extend({
- find: function( selector ) {
- var i, l, length, n, r, ret,
- self = this;
-
- if ( typeof selector !== "string" ) {
- return jQuery( selector ).filter(function() {
- for ( i = 0, l = self.length; i < l; i++ ) {
- if ( jQuery.contains( self[ i ], this ) ) {
- return true;
- }
- }
- });
- }
-
- ret = this.pushStack( "", "find", selector );
-
- for ( i = 0, l = this.length; i < l; i++ ) {
- length = ret.length;
- jQuery.find( selector, this[i], ret );
-
- if ( i > 0 ) {
- // Make sure that the results are unique
- for ( n = length; n < ret.length; n++ ) {
- for ( r = 0; r < length; r++ ) {
- if ( ret[r] === ret[n] ) {
- ret.splice(n--, 1);
- break;
- }
- }
- }
- }
- }
-
- return ret;
- },
-
- has: function( target ) {
- var i,
- targets = jQuery( target, this ),
- len = targets.length;
-
- return this.filter(function() {
- for ( i = 0; i < len; i++ ) {
- if ( jQuery.contains( this, targets[i] ) ) {
- return true;
- }
- }
- });
- },
-
- not: function( selector ) {
- return this.pushStack( winnow(this, selector, false), "not", selector);
- },
-
- filter: function( selector ) {
- return this.pushStack( winnow(this, selector, true), "filter", selector );
- },
-
- is: function( selector ) {
- return !!selector && (
- typeof selector === "string" ?
- // If this is a positional/relative selector, check membership in the returned set
- // so $("p:first").is("p:last") won't return true for a doc with two "p".
- rneedsContext.test( selector ) ?
- jQuery( selector, this.context ).index( this[0] ) >= 0 :
- jQuery.filter( selector, this ).length > 0 :
- this.filter( selector ).length > 0 );
- },
-
- closest: function( selectors, context ) {
- var cur,
- i = 0,
- l = this.length,
- ret = [],
- pos = rneedsContext.test( selectors ) || typeof selectors !== "string" ?
- jQuery( selectors, context || this.context ) :
- 0;
-
- for ( ; i < l; i++ ) {
- cur = this[i];
-
- while ( cur && cur.ownerDocument && cur !== context && cur.nodeType !== 11 ) {
- if ( pos ? pos.index(cur) > -1 : jQuery.find.matchesSelector(cur, selectors) ) {
- ret.push( cur );
- break;
- }
- cur = cur.parentNode;
- }
- }
-
- ret = ret.length > 1 ? jQuery.unique( ret ) : ret;
-
- return this.pushStack( ret, "closest", selectors );
- },
-
- // Determine the position of an element within
- // the matched set of elements
- index: function( elem ) {
-
- // No argument, return index in parent
- if ( !elem ) {
- return ( this[0] && this[0].parentNode ) ? this.prevAll().length : -1;
- }
-
- // index in selector
- if ( typeof elem === "string" ) {
- return jQuery.inArray( this[0], jQuery( elem ) );
- }
-
- // Locate the position of the desired element
- return jQuery.inArray(
- // If it receives a jQuery object, the first element is used
- elem.jquery ? elem[0] : elem, this );
- },
-
- add: function( selector, context ) {
- var set = typeof selector === "string" ?
- jQuery( selector, context ) :
- jQuery.makeArray( selector && selector.nodeType ? [ selector ] : selector ),
- all = jQuery.merge( this.get(), set );
-
- return this.pushStack( isDisconnected( set[0] ) || isDisconnected( all[0] ) ?
- all :
- jQuery.unique( all ) );
- },
-
- addBack: function( selector ) {
- return this.add( selector == null ?
- this.prevObject : this.prevObject.filter(selector)
- );
- }
-});
-
-jQuery.fn.andSelf = jQuery.fn.addBack;
-
-// A painfully simple check to see if an element is disconnected
-// from a document (should be improved, where feasible).
-function isDisconnected( node ) {
- return !node || !node.parentNode || node.parentNode.nodeType === 11;
-}
-
-function sibling( cur, dir ) {
- do {
- cur = cur[ dir ];
- } while ( cur && cur.nodeType !== 1 );
-
- return cur;
-}
-
-jQuery.each({
- parent: function( elem ) {
- var parent = elem.parentNode;
- return parent && parent.nodeType !== 11 ? parent : null;
- },
- parents: function( elem ) {
- return jQuery.dir( elem, "parentNode" );
- },
- parentsUntil: function( elem, i, until ) {
- return jQuery.dir( elem, "parentNode", until );
- },
- next: function( elem ) {
- return sibling( elem, "nextSibling" );
- },
- prev: function( elem ) {
- return sibling( elem, "previousSibling" );
- },
- nextAll: function( elem ) {
- return jQuery.dir( elem, "nextSibling" );
- },
- prevAll: function( elem ) {
- return jQuery.dir( elem, "previousSibling" );
- },
- nextUntil: function( elem, i, until ) {
- return jQuery.dir( elem, "nextSibling", until );
- },
- prevUntil: function( elem, i, until ) {
- return jQuery.dir( elem, "previousSibling", until );
- },
- siblings: function( elem ) {
- return jQuery.sibling( ( elem.parentNode || {} ).firstChild, elem );
- },
- children: function( elem ) {
- return jQuery.sibling( elem.firstChild );
- },
- contents: function( elem ) {
- return jQuery.nodeName( elem, "iframe" ) ?
- elem.contentDocument || elem.contentWindow.document :
- jQuery.merge( [], elem.childNodes );
- }
-}, function( name, fn ) {
- jQuery.fn[ name ] = function( until, selector ) {
- var ret = jQuery.map( this, fn, until );
-
- if ( !runtil.test( name ) ) {
- selector = until;
- }
-
- if ( selector && typeof selector === "string" ) {
- ret = jQuery.filter( selector, ret );
- }
-
- ret = this.length > 1 && !guaranteedUnique[ name ] ? jQuery.unique( ret ) : ret;
-
- if ( this.length > 1 && rparentsprev.test( name ) ) {
- ret = ret.reverse();
- }
-
- return this.pushStack( ret, name, core_slice.call( arguments ).join(",") );
- };
-});
-
-jQuery.extend({
- filter: function( expr, elems, not ) {
- if ( not ) {
- expr = ":not(" + expr + ")";
- }
-
- return elems.length === 1 ?
- jQuery.find.matchesSelector(elems[0], expr) ? [ elems[0] ] : [] :
- jQuery.find.matches(expr, elems);
- },
-
- dir: function( elem, dir, until ) {
- var matched = [],
- cur = elem[ dir ];
-
- while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) {
- if ( cur.nodeType === 1 ) {
- matched.push( cur );
- }
- cur = cur[dir];
- }
- return matched;
- },
-
- sibling: function( n, elem ) {
- var r = [];
-
- for ( ; n; n = n.nextSibling ) {
- if ( n.nodeType === 1 && n !== elem ) {
- r.push( n );
- }
- }
-
- return r;
- }
-});
-
-// Implement the identical functionality for filter and not
-function winnow( elements, qualifier, keep ) {
-
- // Can't pass null or undefined to indexOf in Firefox 4
- // Set to 0 to skip string check
- qualifier = qualifier || 0;
-
- if ( jQuery.isFunction( qualifier ) ) {
- return jQuery.grep(elements, function( elem, i ) {
- var retVal = !!qualifier.call( elem, i, elem );
- return retVal === keep;
- });
-
- } else if ( qualifier.nodeType ) {
- return jQuery.grep(elements, function( elem, i ) {
- return ( elem === qualifier ) === keep;
- });
-
- } else if ( typeof qualifier === "string" ) {
- var filtered = jQuery.grep(elements, function( elem ) {
- return elem.nodeType === 1;
- });
-
- if ( isSimple.test( qualifier ) ) {
- return jQuery.filter(qualifier, filtered, !keep);
- } else {
- qualifier = jQuery.filter( qualifier, filtered );
- }
- }
-
- return jQuery.grep(elements, function( elem, i ) {
- return ( jQuery.inArray( elem, qualifier ) >= 0 ) === keep;
- });
-}
-function createSafeFragment( document ) {
- var list = nodeNames.split( "|" ),
- safeFrag = document.createDocumentFragment();
-
- if ( safeFrag.createElement ) {
- while ( list.length ) {
- safeFrag.createElement(
- list.pop()
- );
- }
- }
- return safeFrag;
-}
-
-var nodeNames = "abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|" +
- "header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",
- rinlinejQuery = / jQuery\d+="(?:null|\d+)"/g,
- rleadingWhitespace = /^\s+/,
- rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,
- rtagName = /<([\w:]+)/,
- rtbody = /<tbody/i,
- rhtml = /<|&#?\w+;/,
- rnoInnerhtml = /<(?:script|style|link)/i,
- rnocache = /<(?:script|object|embed|option|style)/i,
- rnoshimcache = new RegExp("<(?:" + nodeNames + ")[\\s/>]", "i"),
- rcheckableType = /^(?:checkbox|radio)$/,
- // checked="checked" or checked
- rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i,
- rscriptType = /\/(java|ecma)script/i,
- rcleanScript = /^\s*<!(?:\[CDATA\[|\-\-)|[\]\-]{2}>\s*$/g,
- wrapMap = {
- option: [ 1, "<select multiple='multiple'>", "</select>" ],
- legend: [ 1, "<fieldset>", "</fieldset>" ],
- thead: [ 1, "<table>", "</table>" ],
- tr: [ 2, "<table><tbody>", "</tbody></table>" ],
- td: [ 3, "<table><tbody><tr>", "</tr></tbody></table>" ],
- col: [ 2, "<table><tbody></tbody><colgroup>", "</colgroup></table>" ],
- area: [ 1, "<map>", "</map>" ],
- _default: [ 0, "", "" ]
- },
- safeFragment = createSafeFragment( document ),
- fragmentDiv = safeFragment.appendChild( document.createElement("div") );
-
-wrapMap.optgroup = wrapMap.option;
-wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead;
-wrapMap.th = wrapMap.td;
-
-// IE6-8 can't serialize link, script, style, or any html5 (NoScope) tags,
-// unless wrapped in a div with non-breaking characters in front of it.
-if ( !jQuery.support.htmlSerialize ) {
- wrapMap._default = [ 1, "X<div>", "</div>" ];
-}
-
-jQuery.fn.extend({
- text: function( value ) {
- return jQuery.access( this, function( value ) {
- return value === undefined ?
- jQuery.text( this ) :
- this.empty().append( ( this[0] && this[0].ownerDocument || document ).createTextNode( value ) );
- }, null, value, arguments.length );
- },
-
- wrapAll: function( html ) {
- if ( jQuery.isFunction( html ) ) {
- return this.each(function(i) {
- jQuery(this).wrapAll( html.call(this, i) );
- });
- }
-
- if ( this[0] ) {
- // The elements to wrap the target around
- var wrap = jQuery( html, this[0].ownerDocument ).eq(0).clone(true);
-
- if ( this[0].parentNode ) {
- wrap.insertBefore( this[0] );
- }
-
- wrap.map(function() {
- var elem = this;
-
- while ( elem.firstChild && elem.firstChild.nodeType === 1 ) {
- elem = elem.firstChild;
- }
-
- return elem;
- }).append( this );
- }
-
- return this;
- },
-
- wrapInner: function( html ) {
- if ( jQuery.isFunction( html ) ) {
- return this.each(function(i) {
- jQuery(this).wrapInner( html.call(this, i) );
- });
- }
-
- return this.each(function() {
- var self = jQuery( this ),
- contents = self.contents();
-
- if ( contents.length ) {
- contents.wrapAll( html );
-
- } else {
- self.append( html );
- }
- });
- },
-
- wrap: function( html ) {
- var isFunction = jQuery.isFunction( html );
-
- return this.each(function(i) {
- jQuery( this ).wrapAll( isFunction ? html.call(this, i) : html );
- });
- },
-
- unwrap: function() {
- return this.parent().each(function() {
- if ( !jQuery.nodeName( this, "body" ) ) {
- jQuery( this ).replaceWith( this.childNodes );
- }
- }).end();
- },
-
- append: function() {
- return this.domManip(arguments, true, function( elem ) {
- if ( this.nodeType === 1 || this.nodeType === 11 ) {
- this.appendChild( elem );
- }
- });
- },
-
- prepend: function() {
- return this.domManip(arguments, true, function( elem ) {
- if ( this.nodeType === 1 || this.nodeType === 11 ) {
- this.insertBefore( elem, this.firstChild );
- }
- });
- },
-
- before: function() {
- if ( !isDisconnected( this[0] ) ) {
- return this.domManip(arguments, false, function( elem ) {
- this.parentNode.insertBefore( elem, this );
- });
- }
-
- if ( arguments.length ) {
- var set = jQuery.clean( arguments );
- return this.pushStack( jQuery.merge( set, this ), "before", this.selector );
- }
- },
-
- after: function() {
- if ( !isDisconnected( this[0] ) ) {
- return this.domManip(arguments, false, function( elem ) {
- this.parentNode.insertBefore( elem, this.nextSibling );
- });
- }
-
- if ( arguments.length ) {
- var set = jQuery.clean( arguments );
- return this.pushStack( jQuery.merge( this, set ), "after", this.selector );
- }
- },
-
- // keepData is for internal use only--do not document
- remove: function( selector, keepData ) {
- var elem,
- i = 0;
-
- for ( ; (elem = this[i]) != null; i++ ) {
- if ( !selector || jQuery.filter( selector, [ elem ] ).length ) {
- if ( !keepData && elem.nodeType === 1 ) {
- jQuery.cleanData( elem.getElementsByTagName("*") );
- jQuery.cleanData( [ elem ] );
- }
-
- if ( elem.parentNode ) {
- elem.parentNode.removeChild( elem );
- }
- }
- }
-
- return this;
- },
-
- empty: function() {
- var elem,
- i = 0;
-
- for ( ; (elem = this[i]) != null; i++ ) {
- // Remove element nodes and prevent memory leaks
- if ( elem.nodeType === 1 ) {
- jQuery.cleanData( elem.getElementsByTagName("*") );
- }
-
- // Remove any remaining nodes
- while ( elem.firstChild ) {
- elem.removeChild( elem.firstChild );
- }
- }
-
- return this;
- },
-
- clone: function( dataAndEvents, deepDataAndEvents ) {
- dataAndEvents = dataAndEvents == null ? false : dataAndEvents;
- deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents;
-
- return this.map( function () {
- return jQuery.clone( this, dataAndEvents, deepDataAndEvents );
- });
- },
-
- html: function( value ) {
- return jQuery.access( this, function( value ) {
- var elem = this[0] || {},
- i = 0,
- l = this.length;
-
- if ( value === undefined ) {
- return elem.nodeType === 1 ?
- elem.innerHTML.replace( rinlinejQuery, "" ) :
- undefined;
- }
-
- // See if we can take a shortcut and just use innerHTML
- if ( typeof value === "string" && !rnoInnerhtml.test( value ) &&
- ( jQuery.support.htmlSerialize || !rnoshimcache.test( value ) ) &&
- ( jQuery.support.leadingWhitespace || !rleadingWhitespace.test( value ) ) &&
- !wrapMap[ ( rtagName.exec( value ) || ["", ""] )[1].toLowerCase() ] ) {
-
- value = value.replace( rxhtmlTag, "<$1></$2>" );
-
- try {
- for (; i < l; i++ ) {
- // Remove element nodes and prevent memory leaks
- elem = this[i] || {};
- if ( elem.nodeType === 1 ) {
- jQuery.cleanData( elem.getElementsByTagName( "*" ) );
- elem.innerHTML = value;
- }
- }
-
- elem = 0;
-
- // If using innerHTML throws an exception, use the fallback method
- } catch(e) {}
- }
-
- if ( elem ) {
- this.empty().append( value );
- }
- }, null, value, arguments.length );
- },
-
- replaceWith: function( value ) {
- if ( !isDisconnected( this[0] ) ) {
- // Make sure that the elements are removed from the DOM before they are inserted
- // this can help fix replacing a parent with child elements
- if ( jQuery.isFunction( value ) ) {
- return this.each(function(i) {
- var self = jQuery(this), old = self.html();
- self.replaceWith( value.call( this, i, old ) );
- });
- }
-
- if ( typeof value !== "string" ) {
- value = jQuery( value ).detach();
- }
-
- return this.each(function() {
- var next = this.nextSibling,
- parent = this.parentNode;
-
- jQuery( this ).remove();
-
- if ( next ) {
- jQuery(next).before( value );
- } else {
- jQuery(parent).append( value );
- }
- });
- }
-
- return this.length ?
- this.pushStack( jQuery(jQuery.isFunction(value) ? value() : value), "replaceWith", value ) :
- this;
- },
-
- detach: function( selector ) {
- return this.remove( selector, true );
- },
-
- domManip: function( args, table, callback ) {
-
- // Flatten any nested arrays
- args = [].concat.apply( [], args );
-
- var results, first, fragment, iNoClone,
- i = 0,
- value = args[0],
- scripts = [],
- l = this.length;
-
- // We can't cloneNode fragments that contain checked, in WebKit
- if ( !jQuery.support.checkClone && l > 1 && typeof value === "string" && rchecked.test( value ) ) {
- return this.each(function() {
- jQuery(this).domManip( args, table, callback );
- });
- }
-
- if ( jQuery.isFunction(value) ) {
- return this.each(function(i) {
- var self = jQuery(this);
- args[0] = value.call( this, i, table ? self.html() : undefined );
- self.domManip( args, table, callback );
- });
- }
-
- if ( this[0] ) {
- results = jQuery.buildFragment( args, this, scripts );
- fragment = results.fragment;
- first = fragment.firstChild;
-
- if ( fragment.childNodes.length === 1 ) {
- fragment = first;
- }
-
- if ( first ) {
- table = table && jQuery.nodeName( first, "tr" );
-
- // Use the original fragment for the last item instead of the first because it can end up
- // being emptied incorrectly in certain situations (#8070).
- // Fragments from the fragment cache must always be cloned and never used in place.
- for ( iNoClone = results.cacheable || l - 1; i < l; i++ ) {
- callback.call(
- table && jQuery.nodeName( this[i], "table" ) ?
- findOrAppend( this[i], "tbody" ) :
- this[i],
- i === iNoClone ?
- fragment :
- jQuery.clone( fragment, true, true )
- );
- }
- }
-
- // Fix #11809: Avoid leaking memory
- fragment = first = null;
-
- if ( scripts.length ) {
- jQuery.each( scripts, function( i, elem ) {
- if ( elem.src ) {
- if ( jQuery.ajax ) {
- jQuery.ajax({
- url: elem.src,
- type: "GET",
- dataType: "script",
- async: false,
- global: false,
- "throws": true
- });
- } else {
- jQuery.error("no ajax");
- }
- } else {
- jQuery.globalEval( ( elem.text || elem.textContent || elem.innerHTML || "" ).replace( rcleanScript, "" ) );
- }
-
- if ( elem.parentNode ) {
- elem.parentNode.removeChild( elem );
- }
- });
- }
- }
-
- return this;
- }
-});
-
-function findOrAppend( elem, tag ) {
- return elem.getElementsByTagName( tag )[0] || elem.appendChild( elem.ownerDocument.createElement( tag ) );
-}
-
-function cloneCopyEvent( src, dest ) {
-
- if ( dest.nodeType !== 1 || !jQuery.hasData( src ) ) {
- return;
- }
-
- var type, i, l,
- oldData = jQuery._data( src ),
- curData = jQuery._data( dest, oldData ),
- events = oldData.events;
-
- if ( events ) {
- delete curData.handle;
- curData.events = {};
-
- for ( type in events ) {
- for ( i = 0, l = events[ type ].length; i < l; i++ ) {
- jQuery.event.add( dest, type, events[ type ][ i ] );
- }
- }
- }
-
- // make the cloned public data object a copy from the original
- if ( curData.data ) {
- curData.data = jQuery.extend( {}, curData.data );
- }
-}
-
-function cloneFixAttributes( src, dest ) {
- var nodeName;
-
- // We do not need to do anything for non-Elements
- if ( dest.nodeType !== 1 ) {
- return;
- }
-
- // clearAttributes removes the attributes, which we don't want,
- // but also removes the attachEvent events, which we *do* want
- if ( dest.clearAttributes ) {
- dest.clearAttributes();
- }
-
- // mergeAttributes, in contrast, only merges back on the
- // original attributes, not the events
- if ( dest.mergeAttributes ) {
- dest.mergeAttributes( src );
- }
-
- nodeName = dest.nodeName.toLowerCase();
-
- if ( nodeName === "object" ) {
- // IE6-10 improperly clones children of object elements using classid.
- // IE10 throws NoModificationAllowedError if parent is null, #12132.
- if ( dest.parentNode ) {
- dest.outerHTML = src.outerHTML;
- }
-
- // This path appears unavoidable for IE9. When cloning an object
- // element in IE9, the outerHTML strategy above is not sufficient.
- // If the src has innerHTML and the destination does not,
- // copy the src.innerHTML into the dest.innerHTML. #10324
- if ( jQuery.support.html5Clone && (src.innerHTML && !jQuery.trim(dest.innerHTML)) ) {
- dest.innerHTML = src.innerHTML;
- }
-
- } else if ( nodeName === "input" && rcheckableType.test( src.type ) ) {
- // IE6-8 fails to persist the checked state of a cloned checkbox
- // or radio button. Worse, IE6-7 fail to give the cloned element
- // a checked appearance if the defaultChecked value isn't also set
-
- dest.defaultChecked = dest.checked = src.checked;
-
- // IE6-7 get confused and end up setting the value of a cloned
- // checkbox/radio button to an empty string instead of "on"
- if ( dest.value !== src.value ) {
- dest.value = src.value;
- }
-
- // IE6-8 fails to return the selected option to the default selected
- // state when cloning options
- } else if ( nodeName === "option" ) {
- dest.selected = src.defaultSelected;
-
- // IE6-8 fails to set the defaultValue to the correct value when
- // cloning other types of input fields
- } else if ( nodeName === "input" || nodeName === "textarea" ) {
- dest.defaultValue = src.defaultValue;
-
- // IE blanks contents when cloning scripts
- } else if ( nodeName === "script" && dest.text !== src.text ) {
- dest.text = src.text;
- }
-
- // Event data gets referenced instead of copied if the expando
- // gets copied too
- dest.removeAttribute( jQuery.expando );
-}
-
-jQuery.buildFragment = function( args, context, scripts ) {
- var fragment, cacheable, cachehit,
- first = args[ 0 ];
-
- // Set context from what may come in as undefined or a jQuery collection or a node
- // Updated to fix #12266 where accessing context[0] could throw an exception in IE9/10 &
- // also doubles as fix for #8950 where plain objects caused createDocumentFragment exception
- context = context || document;
- context = !context.nodeType && context[0] || context;
- context = context.ownerDocument || context;
-
- // Only cache "small" (1/2 KB) HTML strings that are associated with the main document
- // Cloning options loses the selected state, so don't cache them
- // IE 6 doesn't like it when you put <object> or <embed> elements in a fragment
- // Also, WebKit does not clone 'checked' attributes on cloneNode, so don't cache
- // Lastly, IE6,7,8 will not correctly reuse cached fragments that were created from unknown elems #10501
- if ( args.length === 1 && typeof first === "string" && first.length < 512 && context === document &&
- first.charAt(0) === "<" && !rnocache.test( first ) &&
- (jQuery.support.checkClone || !rchecked.test( first )) &&
- (jQuery.support.html5Clone || !rnoshimcache.test( first )) ) {
-
- // Mark cacheable and look for a hit
- cacheable = true;
- fragment = jQuery.fragments[ first ];
- cachehit = fragment !== undefined;
- }
-
- if ( !fragment ) {
- fragment = context.createDocumentFragment();
- jQuery.clean( args, context, fragment, scripts );
-
- // Update the cache, but only store false
- // unless this is a second parsing of the same content
- if ( cacheable ) {
- jQuery.fragments[ first ] = cachehit && fragment;
- }
- }
-
- return { fragment: fragment, cacheable: cacheable };
-};
-
-jQuery.fragments = {};
-
-jQuery.each({
- appendTo: "append",
- prependTo: "prepend",
- insertBefore: "before",
- insertAfter: "after",
- replaceAll: "replaceWith"
-}, function( name, original ) {
- jQuery.fn[ name ] = function( selector ) {
- var elems,
- i = 0,
- ret = [],
- insert = jQuery( selector ),
- l = insert.length,
- parent = this.length === 1 && this[0].parentNode;
-
- if ( (parent == null || parent && parent.nodeType === 11 && parent.childNodes.length === 1) && l === 1 ) {
- insert[ original ]( this[0] );
- return this;
- } else {
- for ( ; i < l; i++ ) {
- elems = ( i > 0 ? this.clone(true) : this ).get();
- jQuery( insert[i] )[ original ]( elems );
- ret = ret.concat( elems );
- }
-
- return this.pushStack( ret, name, insert.selector );
- }
- };
-});
-
-function getAll( elem ) {
- if ( typeof elem.getElementsByTagName !== "undefined" ) {
- return elem.getElementsByTagName( "*" );
-
- } else if ( typeof elem.querySelectorAll !== "undefined" ) {
- return elem.querySelectorAll( "*" );
-
- } else {
- return [];
- }
-}
-
-// Used in clean, fixes the defaultChecked property
-function fixDefaultChecked( elem ) {
- if ( rcheckableType.test( elem.type ) ) {
- elem.defaultChecked = elem.checked;
- }
-}
-
-jQuery.extend({
- clone: function( elem, dataAndEvents, deepDataAndEvents ) {
- var srcElements,
- destElements,
- i,
- clone;
-
- if ( jQuery.support.html5Clone || jQuery.isXMLDoc(elem) || !rnoshimcache.test( "<" + elem.nodeName + ">" ) ) {
- clone = elem.cloneNode( true );
-
- // IE<=8 does not properly clone detached, unknown element nodes
- } else {
- fragmentDiv.innerHTML = elem.outerHTML;
- fragmentDiv.removeChild( clone = fragmentDiv.firstChild );
- }
-
- if ( (!jQuery.support.noCloneEvent || !jQuery.support.noCloneChecked) &&
- (elem.nodeType === 1 || elem.nodeType === 11) && !jQuery.isXMLDoc(elem) ) {
- // IE copies events bound via attachEvent when using cloneNode.
- // Calling detachEvent on the clone will also remove the events
- // from the original. In order to get around this, we use some
- // proprietary methods to clear the events. Thanks to MooTools
- // guys for this hotness.
-
- cloneFixAttributes( elem, clone );
-
- // Using Sizzle here is crazy slow, so we use getElementsByTagName instead
- srcElements = getAll( elem );
- destElements = getAll( clone );
-
- // Weird iteration because IE will replace the length property
- // with an element if you are cloning the body and one of the
- // elements on the page has a name or id of "length"
- for ( i = 0; srcElements[i]; ++i ) {
- // Ensure that the destination node is not null; Fixes #9587
- if ( destElements[i] ) {
- cloneFixAttributes( srcElements[i], destElements[i] );
- }
- }
- }
-
- // Copy the events from the original to the clone
- if ( dataAndEvents ) {
- cloneCopyEvent( elem, clone );
-
- if ( deepDataAndEvents ) {
- srcElements = getAll( elem );
- destElements = getAll( clone );
-
- for ( i = 0; srcElements[i]; ++i ) {
- cloneCopyEvent( srcElements[i], destElements[i] );
- }
- }
- }
-
- srcElements = destElements = null;
-
- // Return the cloned set
- return clone;
- },
-
- clean: function( elems, context, fragment, scripts ) {
- var i, j, elem, tag, wrap, depth, div, hasBody, tbody, len, handleScript, jsTags,
- safe = context === document && safeFragment,
- ret = [];
-
- // Ensure that context is a document
- if ( !context || typeof context.createDocumentFragment === "undefined" ) {
- context = document;
- }
-
- // Use the already-created safe fragment if context permits
- for ( i = 0; (elem = elems[i]) != null; i++ ) {
- if ( typeof elem === "number" ) {
- elem += "";
- }
-
- if ( !elem ) {
- continue;
- }
-
- // Convert html string into DOM nodes
- if ( typeof elem === "string" ) {
- if ( !rhtml.test( elem ) ) {
- elem = context.createTextNode( elem );
- } else {
- // Ensure a safe container in which to render the html
- safe = safe || createSafeFragment( context );
- div = context.createElement("div");
- safe.appendChild( div );
-
- // Fix "XHTML"-style tags in all browsers
- elem = elem.replace(rxhtmlTag, "<$1></$2>");
-
- // Go to html and back, then peel off extra wrappers
- tag = ( rtagName.exec( elem ) || ["", ""] )[1].toLowerCase();
- wrap = wrapMap[ tag ] || wrapMap._default;
- depth = wrap[0];
- div.innerHTML = wrap[1] + elem + wrap[2];
-
- // Move to the right depth
- while ( depth-- ) {
- div = div.lastChild;
- }
-
- // Remove IE's autoinserted <tbody> from table fragments
- if ( !jQuery.support.tbody ) {
-
- // String was a <table>, *may* have spurious <tbody>
- hasBody = rtbody.test(elem);
- tbody = tag === "table" && !hasBody ?
- div.firstChild && div.firstChild.childNodes :
-
- // String was a bare <thead> or <tfoot>
- wrap[1] === "<table>" && !hasBody ?
- div.childNodes :
- [];
-
- for ( j = tbody.length - 1; j >= 0 ; --j ) {
- if ( jQuery.nodeName( tbody[ j ], "tbody" ) && !tbody[ j ].childNodes.length ) {
- tbody[ j ].parentNode.removeChild( tbody[ j ] );
- }
- }
- }
-
- // IE completely kills leading whitespace when innerHTML is used
- if ( !jQuery.support.leadingWhitespace && rleadingWhitespace.test( elem ) ) {
- div.insertBefore( context.createTextNode( rleadingWhitespace.exec(elem)[0] ), div.firstChild );
- }
-
- elem = div.childNodes;
-
- // Take out of fragment container (we need a fresh div each time)
- div.parentNode.removeChild( div );
- }
- }
-
- if ( elem.nodeType ) {
- ret.push( elem );
- } else {
- jQuery.merge( ret, elem );
- }
- }
-
- // Fix #11356: Clear elements from safeFragment
- if ( div ) {
- elem = div = safe = null;
- }
-
- // Reset defaultChecked for any radios and checkboxes
- // about to be appended to the DOM in IE 6/7 (#8060)
- if ( !jQuery.support.appendChecked ) {
- for ( i = 0; (elem = ret[i]) != null; i++ ) {
- if ( jQuery.nodeName( elem, "input" ) ) {
- fixDefaultChecked( elem );
- } else if ( typeof elem.getElementsByTagName !== "undefined" ) {
- jQuery.grep( elem.getElementsByTagName("input"), fixDefaultChecked );
- }
- }
- }
-
- // Append elements to a provided document fragment
- if ( fragment ) {
- // Special handling of each script element
- handleScript = function( elem ) {
- // Check if we consider it executable
- if ( !elem.type || rscriptType.test( elem.type ) ) {
- // Detach the script and store it in the scripts array (if provided) or the fragment
- // Return truthy to indicate that it has been handled
- return scripts ?
- scripts.push( elem.parentNode ? elem.parentNode.removeChild( elem ) : elem ) :
- fragment.appendChild( elem );
- }
- };
-
- for ( i = 0; (elem = ret[i]) != null; i++ ) {
- // Check if we're done after handling an executable script
- if ( !( jQuery.nodeName( elem, "script" ) && handleScript( elem ) ) ) {
- // Append to fragment and handle embedded scripts
- fragment.appendChild( elem );
- if ( typeof elem.getElementsByTagName !== "undefined" ) {
- // handleScript alters the DOM, so use jQuery.merge to ensure snapshot iteration
- jsTags = jQuery.grep( jQuery.merge( [], elem.getElementsByTagName("script") ), handleScript );
-
- // Splice the scripts into ret after their former ancestor and advance our index beyond them
- ret.splice.apply( ret, [i + 1, 0].concat( jsTags ) );
- i += jsTags.length;
- }
- }
- }
- }
-
- return ret;
- },
-
- cleanData: function( elems, /* internal */ acceptData ) {
- var data, id, elem, type,
- i = 0,
- internalKey = jQuery.expando,
- cache = jQuery.cache,
- deleteExpando = jQuery.support.deleteExpando,
- special = jQuery.event.special;
-
- for ( ; (elem = elems[i]) != null; i++ ) {
-
- if ( acceptData || jQuery.acceptData( elem ) ) {
-
- id = elem[ internalKey ];
- data = id && cache[ id ];
-
- if ( data ) {
- if ( data.events ) {
- for ( type in data.events ) {
- if ( special[ type ] ) {
- jQuery.event.remove( elem, type );
-
- // This is a shortcut to avoid jQuery.event.remove's overhead
- } else {
- jQuery.removeEvent( elem, type, data.handle );
- }
- }
- }
-
- // Remove cache only if it was not already removed by jQuery.event.remove
- if ( cache[ id ] ) {
-
- delete cache[ id ];
-
- // IE does not allow us to delete expando properties from nodes,
- // nor does it have a removeAttribute function on Document nodes;
- // we must handle all of these cases
- if ( deleteExpando ) {
- delete elem[ internalKey ];
-
- } else if ( elem.removeAttribute ) {
- elem.removeAttribute( internalKey );
-
- } else {
- elem[ internalKey ] = null;
- }
-
- jQuery.deletedIds.push( id );
- }
- }
- }
- }
- }
-});
-// Limit scope pollution from any deprecated API
-(function() {
-
-var matched, browser;
-
-// Use of jQuery.browser is frowned upon.
-// More details: http://api.jquery.com/jQuery.browser
-// jQuery.uaMatch maintained for back-compat
-jQuery.uaMatch = function( ua ) {
- ua = ua.toLowerCase();
-
- var match = /(chrome)[ \/]([\w.]+)/.exec( ua ) ||
- /(webkit)[ \/]([\w.]+)/.exec( ua ) ||
- /(opera)(?:.*version|)[ \/]([\w.]+)/.exec( ua ) ||
- /(msie) ([\w.]+)/.exec( ua ) ||
- ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec( ua ) ||
- [];
-
- return {
- browser: match[ 1 ] || "",
- version: match[ 2 ] || "0"
- };
-};
-
-matched = jQuery.uaMatch( navigator.userAgent );
-browser = {};
-
-if ( matched.browser ) {
- browser[ matched.browser ] = true;
- browser.version = matched.version;
-}
-
-// Chrome is Webkit, but Webkit is also Safari.
-if ( browser.chrome ) {
- browser.webkit = true;
-} else if ( browser.webkit ) {
- browser.safari = true;
-}
-
-jQuery.browser = browser;
-
-jQuery.sub = function() {
- function jQuerySub( selector, context ) {
- return new jQuerySub.fn.init( selector, context );
- }
- jQuery.extend( true, jQuerySub, this );
- jQuerySub.superclass = this;
- jQuerySub.fn = jQuerySub.prototype = this();
- jQuerySub.fn.constructor = jQuerySub;
- jQuerySub.sub = this.sub;
- jQuerySub.fn.init = function init( selector, context ) {
- if ( context && context instanceof jQuery && !(context instanceof jQuerySub) ) {
- context = jQuerySub( context );
- }
-
- return jQuery.fn.init.call( this, selector, context, rootjQuerySub );
- };
- jQuerySub.fn.init.prototype = jQuerySub.fn;
- var rootjQuerySub = jQuerySub(document);
- return jQuerySub;
-};
-
-})();
-var curCSS, iframe, iframeDoc,
- ralpha = /alpha\([^)]*\)/i,
- ropacity = /opacity=([^)]*)/,
- rposition = /^(top|right|bottom|left)$/,
- // swappable if display is none or starts with table except "table", "table-cell", or "table-caption"
- // see here for display values: https://developer.mozilla.org/en-US/docs/CSS/display
- rdisplayswap = /^(none|table(?!-c[ea]).+)/,
- rmargin = /^margin/,
- rnumsplit = new RegExp( "^(" + core_pnum + ")(.*)$", "i" ),
- rnumnonpx = new RegExp( "^(" + core_pnum + ")(?!px)[a-z%]+$", "i" ),
- rrelNum = new RegExp( "^([-+])=(" + core_pnum + ")", "i" ),
- elemdisplay = { BODY: "block" },
-
- cssShow = { position: "absolute", visibility: "hidden", display: "block" },
- cssNormalTransform = {
- letterSpacing: 0,
- fontWeight: 400
- },
-
- cssExpand = [ "Top", "Right", "Bottom", "Left" ],
- cssPrefixes = [ "Webkit", "O", "Moz", "ms" ],
-
- eventsToggle = jQuery.fn.toggle;
-
-// return a css property mapped to a potentially vendor prefixed property
-function vendorPropName( style, name ) {
-
- // shortcut for names that are not vendor prefixed
- if ( name in style ) {
- return name;
- }
-
- // check for vendor prefixed names
- var capName = name.charAt(0).toUpperCase() + name.slice(1),
- origName = name,
- i = cssPrefixes.length;
-
- while ( i-- ) {
- name = cssPrefixes[ i ] + capName;
- if ( name in style ) {
- return name;
- }
- }
-
- return origName;
-}
-
-function isHidden( elem, el ) {
- elem = el || elem;
- return jQuery.css( elem, "display" ) === "none" || !jQuery.contains( elem.ownerDocument, elem );
-}
-
-function showHide( elements, show ) {
- var elem, display,
- values = [],
- index = 0,
- length = elements.length;
-
- for ( ; index < length; index++ ) {
- elem = elements[ index ];
- if ( !elem.style ) {
- continue;
- }
- values[ index ] = jQuery._data( elem, "olddisplay" );
- if ( show ) {
- // Reset the inline display of this element to learn if it is
- // being hidden by cascaded rules or not
- if ( !values[ index ] && elem.style.display === "none" ) {
- elem.style.display = "";
- }
-
- // Set elements which have been overridden with display: none
- // in a stylesheet to whatever the default browser style is
- // for such an element
- if ( elem.style.display === "" && isHidden( elem ) ) {
- values[ index ] = jQuery._data( elem, "olddisplay", css_defaultDisplay(elem.nodeName) );
- }
- } else {
- display = curCSS( elem, "display" );
-
- if ( !values[ index ] && display !== "none" ) {
- jQuery._data( elem, "olddisplay", display );
- }
- }
- }
-
- // Set the display of most of the elements in a second loop
- // to avoid the constant reflow
- for ( index = 0; index < length; index++ ) {
- elem = elements[ index ];
- if ( !elem.style ) {
- continue;
- }
- if ( !show || elem.style.display === "none" || elem.style.display === "" ) {
- elem.style.display = show ? values[ index ] || "" : "none";
- }
- }
-
- return elements;
-}
-
-jQuery.fn.extend({
- css: function( name, value ) {
- return jQuery.access( this, function( elem, name, value ) {
- return value !== undefined ?
- jQuery.style( elem, name, value ) :
- jQuery.css( elem, name );
- }, name, value, arguments.length > 1 );
- },
- show: function() {
- return showHide( this, true );
- },
- hide: function() {
- return showHide( this );
- },
- toggle: function( state, fn2 ) {
- var bool = typeof state === "boolean";
-
- if ( jQuery.isFunction( state ) && jQuery.isFunction( fn2 ) ) {
- return eventsToggle.apply( this, arguments );
- }
-
- return this.each(function() {
- if ( bool ? state : isHidden( this ) ) {
- jQuery( this ).show();
- } else {
- jQuery( this ).hide();
- }
- });
- }
-});
-
-jQuery.extend({
- // Add in style property hooks for overriding the default
- // behavior of getting and setting a style property
- cssHooks: {
- opacity: {
- get: function( elem, computed ) {
- if ( computed ) {
- // We should always get a number back from opacity
- var ret = curCSS( elem, "opacity" );
- return ret === "" ? "1" : ret;
-
- }
- }
- }
- },
-
- // Exclude the following css properties to add px
- cssNumber: {
- "fillOpacity": true,
- "fontWeight": true,
- "lineHeight": true,
- "opacity": true,
- "orphans": true,
- "widows": true,
- "zIndex": true,
- "zoom": true
- },
-
- // Add in properties whose names you wish to fix before
- // setting or getting the value
- cssProps: {
- // normalize float css property
- "float": jQuery.support.cssFloat ? "cssFloat" : "styleFloat"
- },
-
- // Get and set the style property on a DOM Node
- style: function( elem, name, value, extra ) {
- // Don't set styles on text and comment nodes
- if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) {
- return;
- }
-
- // Make sure that we're working with the right name
- var ret, type, hooks,
- origName = jQuery.camelCase( name ),
- style = elem.style;
-
- name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( style, origName ) );
-
- // gets hook for the prefixed version
- // followed by the unprefixed version
- hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ];
-
- // Check if we're setting a value
- if ( value !== undefined ) {
- type = typeof value;
-
- // convert relative number strings (+= or -=) to relative numbers. #7345
- if ( type === "string" && (ret = rrelNum.exec( value )) ) {
- value = ( ret[1] + 1 ) * ret[2] + parseFloat( jQuery.css( elem, name ) );
- // Fixes bug #9237
- type = "number";
- }
-
- // Make sure that NaN and null values aren't set. See: #7116
- if ( value == null || type === "number" && isNaN( value ) ) {
- return;
- }
-
- // If a number was passed in, add 'px' to the (except for certain CSS properties)
- if ( type === "number" && !jQuery.cssNumber[ origName ] ) {
- value += "px";
- }
-
- // If a hook was provided, use that value, otherwise just set the specified value
- if ( !hooks || !("set" in hooks) || (value = hooks.set( elem, value, extra )) !== undefined ) {
- // Wrapped to prevent IE from throwing errors when 'invalid' values are provided
- // Fixes bug #5509
- try {
- style[ name ] = value;
- } catch(e) {}
- }
-
- } else {
- // If a hook was provided get the non-computed value from there
- if ( hooks && "get" in hooks && (ret = hooks.get( elem, false, extra )) !== undefined ) {
- return ret;
- }
-
- // Otherwise just get the value from the style object
- return style[ name ];
- }
- },
-
- css: function( elem, name, numeric, extra ) {
- var val, num, hooks,
- origName = jQuery.camelCase( name );
-
- // Make sure that we're working with the right name
- name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( elem.style, origName ) );
-
- // gets hook for the prefixed version
- // followed by the unprefixed version
- hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ];
-
- // If a hook was provided get the computed value from there
- if ( hooks && "get" in hooks ) {
- val = hooks.get( elem, true, extra );
- }
-
- // Otherwise, if a way to get the computed value exists, use that
- if ( val === undefined ) {
- val = curCSS( elem, name );
- }
-
- //convert "normal" to computed value
- if ( val === "normal" && name in cssNormalTransform ) {
- val = cssNormalTransform[ name ];
- }
-
- // Return, converting to number if forced or a qualifier was provided and val looks numeric
- if ( numeric || extra !== undefined ) {
- num = parseFloat( val );
- return numeric || jQuery.isNumeric( num ) ? num || 0 : val;
- }
- return val;
- },
-
- // A method for quickly swapping in/out CSS properties to get correct calculations
- swap: function( elem, options, callback ) {
- var ret, name,
- old = {};
-
- // Remember the old values, and insert the new ones
- for ( name in options ) {
- old[ name ] = elem.style[ name ];
- elem.style[ name ] = options[ name ];
- }
-
- ret = callback.call( elem );
-
- // Revert the old values
- for ( name in options ) {
- elem.style[ name ] = old[ name ];
- }
-
- return ret;
- }
-});
-
-// NOTE: To any future maintainer, we've window.getComputedStyle
-// because jsdom on node.js will break without it.
-if ( window.getComputedStyle ) {
- curCSS = function( elem, name ) {
- var ret, width, minWidth, maxWidth,
- computed = window.getComputedStyle( elem, null ),
- style = elem.style;
-
- if ( computed ) {
-
- // getPropertyValue is only needed for .css('filter') in IE9, see #12537
- ret = computed.getPropertyValue( name ) || computed[ name ];
-
- if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) {
- ret = jQuery.style( elem, name );
- }
-
- // A tribute to the "awesome hack by Dean Edwards"
- // Chrome < 17 and Safari 5.0 uses "computed value" instead of "used value" for margin-right
- // Safari 5.1.7 (at least) returns percentage for a larger set of values, but width seems to be reliably pixels
- // this is against the CSSOM draft spec: http://dev.w3.org/csswg/cssom/#resolved-values
- if ( rnumnonpx.test( ret ) && rmargin.test( name ) ) {
- width = style.width;
- minWidth = style.minWidth;
- maxWidth = style.maxWidth;
-
- style.minWidth = style.maxWidth = style.width = ret;
- ret = computed.width;
-
- style.width = width;
- style.minWidth = minWidth;
- style.maxWidth = maxWidth;
- }
- }
-
- return ret;
- };
-} else if ( document.documentElement.currentStyle ) {
- curCSS = function( elem, name ) {
- var left, rsLeft,
- ret = elem.currentStyle && elem.currentStyle[ name ],
- style = elem.style;
-
- // Avoid setting ret to empty string here
- // so we don't default to auto
- if ( ret == null && style && style[ name ] ) {
- ret = style[ name ];
- }
-
- // From the awesome hack by Dean Edwards
- // http://erik.eae.net/archives/2007/07/27/18.54.15/#comment-102291
-
- // If we're not dealing with a regular pixel number
- // but a number that has a weird ending, we need to convert it to pixels
- // but not position css attributes, as those are proportional to the parent element instead
- // and we can't measure the parent instead because it might trigger a "stacking dolls" problem
- if ( rnumnonpx.test( ret ) && !rposition.test( name ) ) {
-
- // Remember the original values
- left = style.left;
- rsLeft = elem.runtimeStyle && elem.runtimeStyle.left;
-
- // Put in the new values to get a computed value out
- if ( rsLeft ) {
- elem.runtimeStyle.left = elem.currentStyle.left;
- }
- style.left = name === "fontSize" ? "1em" : ret;
- ret = style.pixelLeft + "px";
-
- // Revert the changed values
- style.left = left;
- if ( rsLeft ) {
- elem.runtimeStyle.left = rsLeft;
- }
- }
-
- return ret === "" ? "auto" : ret;
- };
-}
-
-function setPositiveNumber( elem, value, subtract ) {
- var matches = rnumsplit.exec( value );
- return matches ?
- Math.max( 0, matches[ 1 ] - ( subtract || 0 ) ) + ( matches[ 2 ] || "px" ) :
- value;
-}
-
-function augmentWidthOrHeight( elem, name, extra, isBorderBox ) {
- var i = extra === ( isBorderBox ? "border" : "content" ) ?
- // If we already have the right measurement, avoid augmentation
- 4 :
- // Otherwise initialize for horizontal or vertical properties
- name === "width" ? 1 : 0,
-
- val = 0;
-
- for ( ; i < 4; i += 2 ) {
- // both box models exclude margin, so add it if we want it
- if ( extra === "margin" ) {
- // we use jQuery.css instead of curCSS here
- // because of the reliableMarginRight CSS hook!
- val += jQuery.css( elem, extra + cssExpand[ i ], true );
- }
-
- // From this point on we use curCSS for maximum performance (relevant in animations)
- if ( isBorderBox ) {
- // border-box includes padding, so remove it if we want content
- if ( extra === "content" ) {
- val -= parseFloat( curCSS( elem, "padding" + cssExpand[ i ] ) ) || 0;
- }
-
- // at this point, extra isn't border nor margin, so remove border
- if ( extra !== "margin" ) {
- val -= parseFloat( curCSS( elem, "border" + cssExpand[ i ] + "Width" ) ) || 0;
- }
- } else {
- // at this point, extra isn't content, so add padding
- val += parseFloat( curCSS( elem, "padding" + cssExpand[ i ] ) ) || 0;
-
- // at this point, extra isn't content nor padding, so add border
- if ( extra !== "padding" ) {
- val += parseFloat( curCSS( elem, "border" + cssExpand[ i ] + "Width" ) ) || 0;
- }
- }
- }
-
- return val;
-}
-
-function getWidthOrHeight( elem, name, extra ) {
-
- // Start with offset property, which is equivalent to the border-box value
- var val = name === "width" ? elem.offsetWidth : elem.offsetHeight,
- valueIsBorderBox = true,
- isBorderBox = jQuery.support.boxSizing && jQuery.css( elem, "boxSizing" ) === "border-box";
-
- // some non-html elements return undefined for offsetWidth, so check for null/undefined
- // svg - https://bugzilla.mozilla.org/show_bug.cgi?id=649285
- // MathML - https://bugzilla.mozilla.org/show_bug.cgi?id=491668
- if ( val <= 0 || val == null ) {
- // Fall back to computed then uncomputed css if necessary
- val = curCSS( elem, name );
- if ( val < 0 || val == null ) {
- val = elem.style[ name ];
- }
-
- // Computed unit is not pixels. Stop here and return.
- if ( rnumnonpx.test(val) ) {
- return val;
- }
-
- // we need the check for style in case a browser which returns unreliable values
- // for getComputedStyle silently falls back to the reliable elem.style
- valueIsBorderBox = isBorderBox && ( jQuery.support.boxSizingReliable || val === elem.style[ name ] );
-
- // Normalize "", auto, and prepare for extra
- val = parseFloat( val ) || 0;
- }
-
- // use the active box-sizing model to add/subtract irrelevant styles
- return ( val +
- augmentWidthOrHeight(
- elem,
- name,
- extra || ( isBorderBox ? "border" : "content" ),
- valueIsBorderBox
- )
- ) + "px";
-}
-
-
-// Try to determine the default display value of an element
-function css_defaultDisplay( nodeName ) {
- if ( elemdisplay[ nodeName ] ) {
- return elemdisplay[ nodeName ];
- }
-
- var elem = jQuery( "<" + nodeName + ">" ).appendTo( document.body ),
- display = elem.css("display");
- elem.remove();
-
- // If the simple way fails,
- // get element's real default display by attaching it to a temp iframe
- if ( display === "none" || display === "" ) {
- // Use the already-created iframe if possible
- iframe = document.body.appendChild(
- iframe || jQuery.extend( document.createElement("iframe"), {
- frameBorder: 0,
- width: 0,
- height: 0
- })
- );
-
- // Create a cacheable copy of the iframe document on first call.
- // IE and Opera will allow us to reuse the iframeDoc without re-writing the fake HTML
- // document to it; WebKit & Firefox won't allow reusing the iframe document.
- if ( !iframeDoc || !iframe.createElement ) {
- iframeDoc = ( iframe.contentWindow || iframe.contentDocument ).document;
- iframeDoc.write("<!doctype html><html><body>");
- iframeDoc.close();
- }
-
- elem = iframeDoc.body.appendChild( iframeDoc.createElement(nodeName) );
-
- display = curCSS( elem, "display" );
- document.body.removeChild( iframe );
- }
-
- // Store the correct default display
- elemdisplay[ nodeName ] = display;
-
- return display;
-}
-
-jQuery.each([ "height", "width" ], function( i, name ) {
- jQuery.cssHooks[ name ] = {
- get: function( elem, computed, extra ) {
- if ( computed ) {
- // certain elements can have dimension info if we invisibly show them
- // however, it must have a current display style that would benefit from this
- if ( elem.offsetWidth === 0 && rdisplayswap.test( curCSS( elem, "display" ) ) ) {
- return jQuery.swap( elem, cssShow, function() {
- return getWidthOrHeight( elem, name, extra );
- });
- } else {
- return getWidthOrHeight( elem, name, extra );
- }
- }
- },
-
- set: function( elem, value, extra ) {
- return setPositiveNumber( elem, value, extra ?
- augmentWidthOrHeight(
- elem,
- name,
- extra,
- jQuery.support.boxSizing && jQuery.css( elem, "boxSizing" ) === "border-box"
- ) : 0
- );
- }
- };
-});
-
-if ( !jQuery.support.opacity ) {
- jQuery.cssHooks.opacity = {
- get: function( elem, computed ) {
- // IE uses filters for opacity
- return ropacity.test( (computed && elem.currentStyle ? elem.currentStyle.filter : elem.style.filter) || "" ) ?
- ( 0.01 * parseFloat( RegExp.$1 ) ) + "" :
- computed ? "1" : "";
- },
-
- set: function( elem, value ) {
- var style = elem.style,
- currentStyle = elem.currentStyle,
- opacity = jQuery.isNumeric( value ) ? "alpha(opacity=" + value * 100 + ")" : "",
- filter = currentStyle && currentStyle.filter || style.filter || "";
-
- // IE has trouble with opacity if it does not have layout
- // Force it by setting the zoom level
- style.zoom = 1;
-
- // if setting opacity to 1, and no other filters exist - attempt to remove filter attribute #6652
- if ( value >= 1 && jQuery.trim( filter.replace( ralpha, "" ) ) === "" &&
- style.removeAttribute ) {
-
- // Setting style.filter to null, "" & " " still leave "filter:" in the cssText
- // if "filter:" is present at all, clearType is disabled, we want to avoid this
- // style.removeAttribute is IE Only, but so apparently is this code path...
- style.removeAttribute( "filter" );
-
- // if there there is no filter style applied in a css rule, we are done
- if ( currentStyle && !currentStyle.filter ) {
- return;
- }
- }
-
- // otherwise, set new filter values
- style.filter = ralpha.test( filter ) ?
- filter.replace( ralpha, opacity ) :
- filter + " " + opacity;
- }
- };
-}
-
-// These hooks cannot be added until DOM ready because the support test
-// for it is not run until after DOM ready
-jQuery(function() {
- if ( !jQuery.support.reliableMarginRight ) {
- jQuery.cssHooks.marginRight = {
- get: function( elem, computed ) {
- // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right
- // Work around by temporarily setting element display to inline-block
- return jQuery.swap( elem, { "display": "inline-block" }, function() {
- if ( computed ) {
- return curCSS( elem, "marginRight" );
- }
- });
- }
- };
- }
-
- // Webkit bug: https://bugs.webkit.org/show_bug.cgi?id=29084
- // getComputedStyle returns percent when specified for top/left/bottom/right
- // rather than make the css module depend on the offset module, we just check for it here
- if ( !jQuery.support.pixelPosition && jQuery.fn.position ) {
- jQuery.each( [ "top", "left" ], function( i, prop ) {
- jQuery.cssHooks[ prop ] = {
- get: function( elem, computed ) {
- if ( computed ) {
- var ret = curCSS( elem, prop );
- // if curCSS returns percentage, fallback to offset
- return rnumnonpx.test( ret ) ? jQuery( elem ).position()[ prop ] + "px" : ret;
- }
- }
- };
- });
- }
-
-});
-
-if ( jQuery.expr && jQuery.expr.filters ) {
- jQuery.expr.filters.hidden = function( elem ) {
- return ( elem.offsetWidth === 0 && elem.offsetHeight === 0 ) || (!jQuery.support.reliableHiddenOffsets && ((elem.style && elem.style.display) || curCSS( elem, "display" )) === "none");
- };
-
- jQuery.expr.filters.visible = function( elem ) {
- return !jQuery.expr.filters.hidden( elem );
- };
-}
-
-// These hooks are used by animate to expand properties
-jQuery.each({
- margin: "",
- padding: "",
- border: "Width"
-}, function( prefix, suffix ) {
- jQuery.cssHooks[ prefix + suffix ] = {
- expand: function( value ) {
- var i,
-
- // assumes a single number if not a string
- parts = typeof value === "string" ? value.split(" ") : [ value ],
- expanded = {};
-
- for ( i = 0; i < 4; i++ ) {
- expanded[ prefix + cssExpand[ i ] + suffix ] =
- parts[ i ] || parts[ i - 2 ] || parts[ 0 ];
- }
-
- return expanded;
- }
- };
-
- if ( !rmargin.test( prefix ) ) {
- jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber;
- }
-});
-var r20 = /%20/g,
- rbracket = /\[\]$/,
- rCRLF = /\r?\n/g,
- rinput = /^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,
- rselectTextarea = /^(?:select|textarea)/i;
-
-jQuery.fn.extend({
- serialize: function() {
- return jQuery.param( this.serializeArray() );
- },
- serializeArray: function() {
- return this.map(function(){
- return this.elements ? jQuery.makeArray( this.elements ) : this;
- })
- .filter(function(){
- return this.name && !this.disabled &&
- ( this.checked || rselectTextarea.test( this.nodeName ) ||
- rinput.test( this.type ) );
- })
- .map(function( i, elem ){
- var val = jQuery( this ).val();
-
- return val == null ?
- null :
- jQuery.isArray( val ) ?
- jQuery.map( val, function( val, i ){
- return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) };
- }) :
- { name: elem.name, value: val.replace( rCRLF, "\r\n" ) };
- }).get();
- }
-});
-
-//Serialize an array of form elements or a set of
-//key/values into a query string
-jQuery.param = function( a, traditional ) {
- var prefix,
- s = [],
- add = function( key, value ) {
- // If value is a function, invoke it and return its value
- value = jQuery.isFunction( value ) ? value() : ( value == null ? "" : value );
- s[ s.length ] = encodeURIComponent( key ) + "=" + encodeURIComponent( value );
- };
-
- // Set traditional to true for jQuery <= 1.3.2 behavior.
- if ( traditional === undefined ) {
- traditional = jQuery.ajaxSettings && jQuery.ajaxSettings.traditional;
- }
-
- // If an array was passed in, assume that it is an array of form elements.
- if ( jQuery.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) {
- // Serialize the form elements
- jQuery.each( a, function() {
- add( this.name, this.value );
- });
-
- } else {
- // If traditional, encode the "old" way (the way 1.3.2 or older
- // did it), otherwise encode params recursively.
- for ( prefix in a ) {
- buildParams( prefix, a[ prefix ], traditional, add );
- }
- }
-
- // Return the resulting serialization
- return s.join( "&" ).replace( r20, "+" );
-};
-
-function buildParams( prefix, obj, traditional, add ) {
- var name;
-
- if ( jQuery.isArray( obj ) ) {
- // Serialize array item.
- jQuery.each( obj, function( i, v ) {
- if ( traditional || rbracket.test( prefix ) ) {
- // Treat each array item as a scalar.
- add( prefix, v );
-
- } else {
- // If array item is non-scalar (array or object), encode its
- // numeric index to resolve deserialization ambiguity issues.
- // Note that rack (as of 1.0.0) can't currently deserialize
- // nested arrays properly, and attempting to do so may cause
- // a server error. Possible fixes are to modify rack's
- // deserialization algorithm or to provide an option or flag
- // to force array serialization to be shallow.
- buildParams( prefix + "[" + ( typeof v === "object" ? i : "" ) + "]", v, traditional, add );
- }
- });
-
- } else if ( !traditional && jQuery.type( obj ) === "object" ) {
- // Serialize object item.
- for ( name in obj ) {
- buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add );
- }
-
- } else {
- // Serialize scalar item.
- add( prefix, obj );
- }
-}
-var
- // Document location
- ajaxLocParts,
- ajaxLocation,
-
- rhash = /#.*$/,
- rheaders = /^(.*?):[ \t]*([^\r\n]*)\r?$/mg, // IE leaves an \r character at EOL
- // #7653, #8125, #8152: local protocol detection
- rlocalProtocol = /^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/,
- rnoContent = /^(?:GET|HEAD)$/,
- rprotocol = /^\/\//,
- rquery = /\?/,
- rscript = /<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,
- rts = /([?&])_=[^&]*/,
- rurl = /^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+)|)|)/,
-
- // Keep a copy of the old load method
- _load = jQuery.fn.load,
-
- /* Prefilters
- * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example)
- * 2) These are called:
- * - BEFORE asking for a transport
- * - AFTER param serialization (s.data is a string if s.processData is true)
- * 3) key is the dataType
- * 4) the catchall symbol "*" can be used
- * 5) execution will start with transport dataType and THEN continue down to "*" if needed
- */
- prefilters = {},
-
- /* Transports bindings
- * 1) key is the dataType
- * 2) the catchall symbol "*" can be used
- * 3) selection will start with transport dataType and THEN go to "*" if needed
- */
- transports = {},
-
- // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression
- allTypes = ["*/"] + ["*"];
-
-// #8138, IE may throw an exception when accessing
-// a field from window.location if document.domain has been set
-try {
- ajaxLocation = location.href;
-} catch( e ) {
- // Use the href attribute of an A element
- // since IE will modify it given document.location
- ajaxLocation = document.createElement( "a" );
- ajaxLocation.href = "";
- ajaxLocation = ajaxLocation.href;
-}
-
-// Segment location into parts
-ajaxLocParts = rurl.exec( ajaxLocation.toLowerCase() ) || [];
-
-// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport
-function addToPrefiltersOrTransports( structure ) {
-
- // dataTypeExpression is optional and defaults to "*"
- return function( dataTypeExpression, func ) {
-
- if ( typeof dataTypeExpression !== "string" ) {
- func = dataTypeExpression;
- dataTypeExpression = "*";
- }
-
- var dataType, list, placeBefore,
- dataTypes = dataTypeExpression.toLowerCase().split( core_rspace ),
- i = 0,
- length = dataTypes.length;
-
- if ( jQuery.isFunction( func ) ) {
- // For each dataType in the dataTypeExpression
- for ( ; i < length; i++ ) {
- dataType = dataTypes[ i ];
- // We control if we're asked to add before
- // any existing element
- placeBefore = /^\+/.test( dataType );
- if ( placeBefore ) {
- dataType = dataType.substr( 1 ) || "*";
- }
- list = structure[ dataType ] = structure[ dataType ] || [];
- // then we add to the structure accordingly
- list[ placeBefore ? "unshift" : "push" ]( func );
- }
- }
- };
-}
-
-// Base inspection function for prefilters and transports
-function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR,
- dataType /* internal */, inspected /* internal */ ) {
-
- dataType = dataType || options.dataTypes[ 0 ];
- inspected = inspected || {};
-
- inspected[ dataType ] = true;
-
- var selection,
- list = structure[ dataType ],
- i = 0,
- length = list ? list.length : 0,
- executeOnly = ( structure === prefilters );
-
- for ( ; i < length && ( executeOnly || !selection ); i++ ) {
- selection = list[ i ]( options, originalOptions, jqXHR );
- // If we got redirected to another dataType
- // we try there if executing only and not done already
- if ( typeof selection === "string" ) {
- if ( !executeOnly || inspected[ selection ] ) {
- selection = undefined;
- } else {
- options.dataTypes.unshift( selection );
- selection = inspectPrefiltersOrTransports(
- structure, options, originalOptions, jqXHR, selection, inspected );
- }
- }
- }
- // If we're only executing or nothing was selected
- // we try the catchall dataType if not done already
- if ( ( executeOnly || !selection ) && !inspected[ "*" ] ) {
- selection = inspectPrefiltersOrTransports(
- structure, options, originalOptions, jqXHR, "*", inspected );
- }
- // unnecessary when only executing (prefilters)
- // but it'll be ignored by the caller in that case
- return selection;
-}
-
-// A special extend for ajax options
-// that takes "flat" options (not to be deep extended)
-// Fixes #9887
-function ajaxExtend( target, src ) {
- var key, deep,
- flatOptions = jQuery.ajaxSettings.flatOptions || {};
- for ( key in src ) {
- if ( src[ key ] !== undefined ) {
- ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ];
- }
- }
- if ( deep ) {
- jQuery.extend( true, target, deep );
- }
-}
-
-jQuery.fn.load = function( url, params, callback ) {
- if ( typeof url !== "string" && _load ) {
- return _load.apply( this, arguments );
- }
-
- // Don't do a request if no elements are being requested
- if ( !this.length ) {
- return this;
- }
-
- var selector, type, response,
- self = this,
- off = url.indexOf(" ");
-
- if ( off >= 0 ) {
- selector = url.slice( off, url.length );
- url = url.slice( 0, off );
- }
-
- // If it's a function
- if ( jQuery.isFunction( params ) ) {
-
- // We assume that it's the callback
- callback = params;
- params = undefined;
-
- // Otherwise, build a param string
- } else if ( params && typeof params === "object" ) {
- type = "POST";
- }
-
- // Request the remote document
- jQuery.ajax({
- url: url,
-
- // if "type" variable is undefined, then "GET" method will be used
- type: type,
- dataType: "html",
- data: params,
- complete: function( jqXHR, status ) {
- if ( callback ) {
- self.each( callback, response || [ jqXHR.responseText, status, jqXHR ] );
- }
- }
- }).done(function( responseText ) {
-
- // Save response for use in complete callback
- response = arguments;
-
- // See if a selector was specified
- self.html( selector ?
-
- // Create a dummy div to hold the results
- jQuery("<div>")
-
- // inject the contents of the document in, removing the scripts
- // to avoid any 'Permission Denied' errors in IE
- .append( responseText.replace( rscript, "" ) )
-
- // Locate the specified elements
- .find( selector ) :
-
- // If not, just inject the full result
- responseText );
-
- });
-
- return this;
-};
-
-// Attach a bunch of functions for handling common AJAX events
-jQuery.each( "ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split( " " ), function( i, o ){
- jQuery.fn[ o ] = function( f ){
- return this.on( o, f );
- };
-});
-
-jQuery.each( [ "get", "post" ], function( i, method ) {
- jQuery[ method ] = function( url, data, callback, type ) {
- // shift arguments if data argument was omitted
- if ( jQuery.isFunction( data ) ) {
- type = type || callback;
- callback = data;
- data = undefined;
- }
-
- return jQuery.ajax({
- type: method,
- url: url,
- data: data,
- success: callback,
- dataType: type
- });
- };
-});
-
-jQuery.extend({
-
- getScript: function( url, callback ) {
- return jQuery.get( url, undefined, callback, "script" );
- },
-
- getJSON: function( url, data, callback ) {
- return jQuery.get( url, data, callback, "json" );
- },
-
- // Creates a full fledged settings object into target
- // with both ajaxSettings and settings fields.
- // If target is omitted, writes into ajaxSettings.
- ajaxSetup: function( target, settings ) {
- if ( settings ) {
- // Building a settings object
- ajaxExtend( target, jQuery.ajaxSettings );
- } else {
- // Extending ajaxSettings
- settings = target;
- target = jQuery.ajaxSettings;
- }
- ajaxExtend( target, settings );
- return target;
- },
-
- ajaxSettings: {
- url: ajaxLocation,
- isLocal: rlocalProtocol.test( ajaxLocParts[ 1 ] ),
- global: true,
- type: "GET",
- contentType: "application/x-www-form-urlencoded; charset=UTF-8",
- processData: true,
- async: true,
- /*
- timeout: 0,
- data: null,
- dataType: null,
- username: null,
- password: null,
- cache: null,
- throws: false,
- traditional: false,
- headers: {},
- */
-
- accepts: {
- xml: "application/xml, text/xml",
- html: "text/html",
- text: "text/plain",
- json: "application/json, text/javascript",
- "*": allTypes
- },
-
- contents: {
- xml: /xml/,
- html: /html/,
- json: /json/
- },
-
- responseFields: {
- xml: "responseXML",
- text: "responseText"
- },
-
- // List of data converters
- // 1) key format is "source_type destination_type" (a single space in-between)
- // 2) the catchall symbol "*" can be used for source_type
- converters: {
-
- // Convert anything to text
- "* text": window.String,
-
- // Text to html (true = no transformation)
- "text html": true,
-
- // Evaluate text as a json expression
- "text json": jQuery.parseJSON,
-
- // Parse text as xml
- "text xml": jQuery.parseXML
- },
-
- // For options that shouldn't be deep extended:
- // you can add your own custom options here if
- // and when you create one that shouldn't be
- // deep extended (see ajaxExtend)
- flatOptions: {
- context: true,
- url: true
- }
- },
-
- ajaxPrefilter: addToPrefiltersOrTransports( prefilters ),
- ajaxTransport: addToPrefiltersOrTransports( transports ),
-
- // Main method
- ajax: function( url, options ) {
-
- // If url is an object, simulate pre-1.5 signature
- if ( typeof url === "object" ) {
- options = url;
- url = undefined;
- }
-
- // Force options to be an object
- options = options || {};
-
- var // ifModified key
- ifModifiedKey,
- // Response headers
- responseHeadersString,
- responseHeaders,
- // transport
- transport,
- // timeout handle
- timeoutTimer,
- // Cross-domain detection vars
- parts,
- // To know if global events are to be dispatched
- fireGlobals,
- // Loop variable
- i,
- // Create the final options object
- s = jQuery.ajaxSetup( {}, options ),
- // Callbacks context
- callbackContext = s.context || s,
- // Context for global events
- // It's the callbackContext if one was provided in the options
- // and if it's a DOM node or a jQuery collection
- globalEventContext = callbackContext !== s &&
- ( callbackContext.nodeType || callbackContext instanceof jQuery ) ?
- jQuery( callbackContext ) : jQuery.event,
- // Deferreds
- deferred = jQuery.Deferred(),
- completeDeferred = jQuery.Callbacks( "once memory" ),
- // Status-dependent callbacks
- statusCode = s.statusCode || {},
- // Headers (they are sent all at once)
- requestHeaders = {},
- requestHeadersNames = {},
- // The jqXHR state
- state = 0,
- // Default abort message
- strAbort = "canceled",
- // Fake xhr
- jqXHR = {
-
- readyState: 0,
-
- // Caches the header
- setRequestHeader: function( name, value ) {
- if ( !state ) {
- var lname = name.toLowerCase();
- name = requestHeadersNames[ lname ] = requestHeadersNames[ lname ] || name;
- requestHeaders[ name ] = value;
- }
- return this;
- },
-
- // Raw string
- getAllResponseHeaders: function() {
- return state === 2 ? responseHeadersString : null;
- },
-
- // Builds headers hashtable if needed
- getResponseHeader: function( key ) {
- var match;
- if ( state === 2 ) {
- if ( !responseHeaders ) {
- responseHeaders = {};
- while( ( match = rheaders.exec( responseHeadersString ) ) ) {
- responseHeaders[ match[1].toLowerCase() ] = match[ 2 ];
- }
- }
- match = responseHeaders[ key.toLowerCase() ];
- }
- return match === undefined ? null : match;
- },
-
- // Overrides response content-type header
- overrideMimeType: function( type ) {
- if ( !state ) {
- s.mimeType = type;
- }
- return this;
- },
-
- // Cancel the request
- abort: function( statusText ) {
- statusText = statusText || strAbort;
- if ( transport ) {
- transport.abort( statusText );
- }
- done( 0, statusText );
- return this;
- }
- };
-
- // Callback for when everything is done
- // It is defined here because jslint complains if it is declared
- // at the end of the function (which would be more logical and readable)
- function done( status, nativeStatusText, responses, headers ) {
- var isSuccess, success, error, response, modified,
- statusText = nativeStatusText;
-
- // Called once
- if ( state === 2 ) {
- return;
- }
-
- // State is "done" now
- state = 2;
-
- // Clear timeout if it exists
- if ( timeoutTimer ) {
- clearTimeout( timeoutTimer );
- }
-
- // Dereference transport for early garbage collection
- // (no matter how long the jqXHR object will be used)
- transport = undefined;
-
- // Cache response headers
- responseHeadersString = headers || "";
-
- // Set readyState
- jqXHR.readyState = status > 0 ? 4 : 0;
-
- // Get response data
- if ( responses ) {
- response = ajaxHandleResponses( s, jqXHR, responses );
- }
-
- // If successful, handle type chaining
- if ( status >= 200 && status < 300 || status === 304 ) {
-
- // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode.
- if ( s.ifModified ) {
-
- modified = jqXHR.getResponseHeader("Last-Modified");
- if ( modified ) {
- jQuery.lastModified[ ifModifiedKey ] = modified;
- }
- modified = jqXHR.getResponseHeader("Etag");
- if ( modified ) {
- jQuery.etag[ ifModifiedKey ] = modified;
- }
- }
-
- // If not modified
- if ( status === 304 ) {
-
- statusText = "notmodified";
- isSuccess = true;
-
- // If we have data
- } else {
-
- isSuccess = ajaxConvert( s, response );
- statusText = isSuccess.state;
- success = isSuccess.data;
- error = isSuccess.error;
- isSuccess = !error;
- }
- } else {
- // We extract error from statusText
- // then normalize statusText and status for non-aborts
- error = statusText;
- if ( !statusText || status ) {
- statusText = "error";
- if ( status < 0 ) {
- status = 0;
- }
- }
- }
-
- // Set data for the fake xhr object
- jqXHR.status = status;
- jqXHR.statusText = ( nativeStatusText || statusText ) + "";
-
- // Success/Error
- if ( isSuccess ) {
- deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] );
- } else {
- deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] );
- }
-
- // Status-dependent callbacks
- jqXHR.statusCode( statusCode );
- statusCode = undefined;
-
- if ( fireGlobals ) {
- globalEventContext.trigger( "ajax" + ( isSuccess ? "Success" : "Error" ),
- [ jqXHR, s, isSuccess ? success : error ] );
- }
-
- // Complete
- completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] );
-
- if ( fireGlobals ) {
- globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] );
- // Handle the global AJAX counter
- if ( !( --jQuery.active ) ) {
- jQuery.event.trigger( "ajaxStop" );
- }
- }
- }
-
- // Attach deferreds
- deferred.promise( jqXHR );
- jqXHR.success = jqXHR.done;
- jqXHR.error = jqXHR.fail;
- jqXHR.complete = completeDeferred.add;
-
- // Status-dependent callbacks
- jqXHR.statusCode = function( map ) {
- if ( map ) {
- var tmp;
- if ( state < 2 ) {
- for ( tmp in map ) {
- statusCode[ tmp ] = [ statusCode[tmp], map[tmp] ];
- }
- } else {
- tmp = map[ jqXHR.status ];
- jqXHR.always( tmp );
- }
- }
- return this;
- };
-
- // Remove hash character (#7531: and string promotion)
- // Add protocol if not provided (#5866: IE7 issue with protocol-less urls)
- // We also use the url parameter if available
- s.url = ( ( url || s.url ) + "" ).replace( rhash, "" ).replace( rprotocol, ajaxLocParts[ 1 ] + "//" );
-
- // Extract dataTypes list
- s.dataTypes = jQuery.trim( s.dataType || "*" ).toLowerCase().split( core_rspace );
-
- // A cross-domain request is in order when we have a protocol:host:port mismatch
- if ( s.crossDomain == null ) {
- parts = rurl.exec( s.url.toLowerCase() );
- s.crossDomain = !!( parts &&
- ( parts[ 1 ] !== ajaxLocParts[ 1 ] || parts[ 2 ] !== ajaxLocParts[ 2 ] ||
- ( parts[ 3 ] || ( parts[ 1 ] === "http:" ? 80 : 443 ) ) !=
- ( ajaxLocParts[ 3 ] || ( ajaxLocParts[ 1 ] === "http:" ? 80 : 443 ) ) )
- );
- }
-
- // Convert data if not already a string
- if ( s.data && s.processData && typeof s.data !== "string" ) {
- s.data = jQuery.param( s.data, s.traditional );
- }
-
- // Apply prefilters
- inspectPrefiltersOrTransports( prefilters, s, options, jqXHR );
-
- // If request was aborted inside a prefilter, stop there
- if ( state === 2 ) {
- return jqXHR;
- }
-
- // We can fire global events as of now if asked to
- fireGlobals = s.global;
-
- // Uppercase the type
- s.type = s.type.toUpperCase();
-
- // Determine if request has content
- s.hasContent = !rnoContent.test( s.type );
-
- // Watch for a new set of requests
- if ( fireGlobals && jQuery.active++ === 0 ) {
- jQuery.event.trigger( "ajaxStart" );
- }
-
- // More options handling for requests with no content
- if ( !s.hasContent ) {
-
- // If data is available, append data to url
- if ( s.data ) {
- s.url += ( rquery.test( s.url ) ? "&" : "?" ) + s.data;
- // #9682: remove data so that it's not used in an eventual retry
- delete s.data;
- }
-
- // Get ifModifiedKey before adding the anti-cache parameter
- ifModifiedKey = s.url;
-
- // Add anti-cache in url if needed
- if ( s.cache === false ) {
-
- var ts = jQuery.now(),
- // try replacing _= if it is there
- ret = s.url.replace( rts, "$1_=" + ts );
-
- // if nothing was replaced, add timestamp to the end
- s.url = ret + ( ( ret === s.url ) ? ( rquery.test( s.url ) ? "&" : "?" ) + "_=" + ts : "" );
- }
- }
-
- // Set the correct header, if data is being sent
- if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) {
- jqXHR.setRequestHeader( "Content-Type", s.contentType );
- }
-
- // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode.
- if ( s.ifModified ) {
- ifModifiedKey = ifModifiedKey || s.url;
- if ( jQuery.lastModified[ ifModifiedKey ] ) {
- jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ ifModifiedKey ] );
- }
- if ( jQuery.etag[ ifModifiedKey ] ) {
- jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ ifModifiedKey ] );
- }
- }
-
- // Set the Accepts header for the server, depending on the dataType
- jqXHR.setRequestHeader(
- "Accept",
- s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[0] ] ?
- s.accepts[ s.dataTypes[0] ] + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) :
- s.accepts[ "*" ]
- );
-
- // Check for headers option
- for ( i in s.headers ) {
- jqXHR.setRequestHeader( i, s.headers[ i ] );
- }
-
- // Allow custom headers/mimetypes and early abort
- if ( s.beforeSend && ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || state === 2 ) ) {
- // Abort if not done already and return
- return jqXHR.abort();
-
- }
-
- // aborting is no longer a cancellation
- strAbort = "abort";
-
- // Install callbacks on deferreds
- for ( i in { success: 1, error: 1, complete: 1 } ) {
- jqXHR[ i ]( s[ i ] );
- }
-
- // Get transport
- transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR );
-
- // If no transport, we auto-abort
- if ( !transport ) {
- done( -1, "No Transport" );
- } else {
- jqXHR.readyState = 1;
- // Send global event
- if ( fireGlobals ) {
- globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] );
- }
- // Timeout
- if ( s.async && s.timeout > 0 ) {
- timeoutTimer = setTimeout( function(){
- jqXHR.abort( "timeout" );
- }, s.timeout );
- }
-
- try {
- state = 1;
- transport.send( requestHeaders, done );
- } catch (e) {
- // Propagate exception as error if not done
- if ( state < 2 ) {
- done( -1, e );
- // Simply rethrow otherwise
- } else {
- throw e;
- }
- }
- }
-
- return jqXHR;
- },
-
- // Counter for holding the number of active queries
- active: 0,
-
- // Last-Modified header cache for next request
- lastModified: {},
- etag: {}
-
-});
-
-/* Handles responses to an ajax request:
- * - sets all responseXXX fields accordingly
- * - finds the right dataType (mediates between content-type and expected dataType)
- * - returns the corresponding response
- */
-function ajaxHandleResponses( s, jqXHR, responses ) {
-
- var ct, type, finalDataType, firstDataType,
- contents = s.contents,
- dataTypes = s.dataTypes,
- responseFields = s.responseFields;
-
- // Fill responseXXX fields
- for ( type in responseFields ) {
- if ( type in responses ) {
- jqXHR[ responseFields[type] ] = responses[ type ];
- }
- }
-
- // Remove auto dataType and get content-type in the process
- while( dataTypes[ 0 ] === "*" ) {
- dataTypes.shift();
- if ( ct === undefined ) {
- ct = s.mimeType || jqXHR.getResponseHeader( "content-type" );
- }
- }
-
- // Check if we're dealing with a known content-type
- if ( ct ) {
- for ( type in contents ) {
- if ( contents[ type ] && contents[ type ].test( ct ) ) {
- dataTypes.unshift( type );
- break;
- }
- }
- }
-
- // Check to see if we have a response for the expected dataType
- if ( dataTypes[ 0 ] in responses ) {
- finalDataType = dataTypes[ 0 ];
- } else {
- // Try convertible dataTypes
- for ( type in responses ) {
- if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[0] ] ) {
- finalDataType = type;
- break;
- }
- if ( !firstDataType ) {
- firstDataType = type;
- }
- }
- // Or just use first one
- finalDataType = finalDataType || firstDataType;
- }
-
- // If we found a dataType
- // We add the dataType to the list if needed
- // and return the corresponding response
- if ( finalDataType ) {
- if ( finalDataType !== dataTypes[ 0 ] ) {
- dataTypes.unshift( finalDataType );
- }
- return responses[ finalDataType ];
- }
-}
-
-// Chain conversions given the request and the original response
-function ajaxConvert( s, response ) {
-
- var conv, conv2, current, tmp,
- // Work with a copy of dataTypes in case we need to modify it for conversion
- dataTypes = s.dataTypes.slice(),
- prev = dataTypes[ 0 ],
- converters = {},
- i = 0;
-
- // Apply the dataFilter if provided
- if ( s.dataFilter ) {
- response = s.dataFilter( response, s.dataType );
- }
-
- // Create converters map with lowercased keys
- if ( dataTypes[ 1 ] ) {
- for ( conv in s.converters ) {
- converters[ conv.toLowerCase() ] = s.converters[ conv ];
- }
- }
-
- // Convert to each sequential dataType, tolerating list modification
- for ( ; (current = dataTypes[++i]); ) {
-
- // There's only work to do if current dataType is non-auto
- if ( current !== "*" ) {
-
- // Convert response if prev dataType is non-auto and differs from current
- if ( prev !== "*" && prev !== current ) {
-
- // Seek a direct converter
- conv = converters[ prev + " " + current ] || converters[ "* " + current ];
-
- // If none found, seek a pair
- if ( !conv ) {
- for ( conv2 in converters ) {
-
- // If conv2 outputs current
- tmp = conv2.split(" ");
- if ( tmp[ 1 ] === current ) {
-
- // If prev can be converted to accepted input
- conv = converters[ prev + " " + tmp[ 0 ] ] ||
- converters[ "* " + tmp[ 0 ] ];
- if ( conv ) {
- // Condense equivalence converters
- if ( conv === true ) {
- conv = converters[ conv2 ];
-
- // Otherwise, insert the intermediate dataType
- } else if ( converters[ conv2 ] !== true ) {
- current = tmp[ 0 ];
- dataTypes.splice( i--, 0, current );
- }
-
- break;
- }
- }
- }
- }
-
- // Apply converter (if not an equivalence)
- if ( conv !== true ) {
-
- // Unless errors are allowed to bubble, catch and return them
- if ( conv && s["throws"] ) {
- response = conv( response );
- } else {
- try {
- response = conv( response );
- } catch ( e ) {
- return { state: "parsererror", error: conv ? e : "No conversion from " + prev + " to " + current };
- }
- }
- }
- }
-
- // Update prev for next iteration
- prev = current;
- }
- }
-
- return { state: "success", data: response };
-}
-var oldCallbacks = [],
- rquestion = /\?/,
- rjsonp = /(=)\?(?=&|$)|\?\?/,
- nonce = jQuery.now();
-
-// Default jsonp settings
-jQuery.ajaxSetup({
- jsonp: "callback",
- jsonpCallback: function() {
- var callback = oldCallbacks.pop() || ( jQuery.expando + "_" + ( nonce++ ) );
- this[ callback ] = true;
- return callback;
- }
-});
-
-// Detect, normalize options and install callbacks for jsonp requests
-jQuery.ajaxPrefilter( "json jsonp", function( s, originalSettings, jqXHR ) {
-
- var callbackName, overwritten, responseContainer,
- data = s.data,
- url = s.url,
- hasCallback = s.jsonp !== false,
- replaceInUrl = hasCallback && rjsonp.test( url ),
- replaceInData = hasCallback && !replaceInUrl && typeof data === "string" &&
- !( s.contentType || "" ).indexOf("application/x-www-form-urlencoded") &&
- rjsonp.test( data );
-
- // Handle iff the expected data type is "jsonp" or we have a parameter to set
- if ( s.dataTypes[ 0 ] === "jsonp" || replaceInUrl || replaceInData ) {
-
- // Get callback name, remembering preexisting value associated with it
- callbackName = s.jsonpCallback = jQuery.isFunction( s.jsonpCallback ) ?
- s.jsonpCallback() :
- s.jsonpCallback;
- overwritten = window[ callbackName ];
-
- // Insert callback into url or form data
- if ( replaceInUrl ) {
- s.url = url.replace( rjsonp, "$1" + callbackName );
- } else if ( replaceInData ) {
- s.data = data.replace( rjsonp, "$1" + callbackName );
- } else if ( hasCallback ) {
- s.url += ( rquestion.test( url ) ? "&" : "?" ) + s.jsonp + "=" + callbackName;
- }
-
- // Use data converter to retrieve json after script execution
- s.converters["script json"] = function() {
- if ( !responseContainer ) {
- jQuery.error( callbackName + " was not called" );
- }
- return responseContainer[ 0 ];
- };
-
- // force json dataType
- s.dataTypes[ 0 ] = "json";
-
- // Install callback
- window[ callbackName ] = function() {
- responseContainer = arguments;
- };
-
- // Clean-up function (fires after converters)
- jqXHR.always(function() {
- // Restore preexisting value
- window[ callbackName ] = overwritten;
-
- // Save back as free
- if ( s[ callbackName ] ) {
- // make sure that re-using the options doesn't screw things around
- s.jsonpCallback = originalSettings.jsonpCallback;
-
- // save the callback name for future use
- oldCallbacks.push( callbackName );
- }
-
- // Call if it was a function and we have a response
- if ( responseContainer && jQuery.isFunction( overwritten ) ) {
- overwritten( responseContainer[ 0 ] );
- }
-
- responseContainer = overwritten = undefined;
- });
-
- // Delegate to script
- return "script";
- }
-});
-// Install script dataType
-jQuery.ajaxSetup({
- accepts: {
- script: "text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"
- },
- contents: {
- script: /javascript|ecmascript/
- },
- converters: {
- "text script": function( text ) {
- jQuery.globalEval( text );
- return text;
- }
- }
-});
-
-// Handle cache's special case and global
-jQuery.ajaxPrefilter( "script", function( s ) {
- if ( s.cache === undefined ) {
- s.cache = false;
- }
- if ( s.crossDomain ) {
- s.type = "GET";
- s.global = false;
- }
-});
-
-// Bind script tag hack transport
-jQuery.ajaxTransport( "script", function(s) {
-
- // This transport only deals with cross domain requests
- if ( s.crossDomain ) {
-
- var script,
- head = document.head || document.getElementsByTagName( "head" )[0] || document.documentElement;
-
- return {
-
- send: function( _, callback ) {
-
- script = document.createElement( "script" );
-
- script.async = "async";
-
- if ( s.scriptCharset ) {
- script.charset = s.scriptCharset;
- }
-
- script.src = s.url;
-
- // Attach handlers for all browsers
- script.onload = script.onreadystatechange = function( _, isAbort ) {
-
- if ( isAbort || !script.readyState || /loaded|complete/.test( script.readyState ) ) {
-
- // Handle memory leak in IE
- script.onload = script.onreadystatechange = null;
-
- // Remove the script
- if ( head && script.parentNode ) {
- head.removeChild( script );
- }
-
- // Dereference the script
- script = undefined;
-
- // Callback if not abort
- if ( !isAbort ) {
- callback( 200, "success" );
- }
- }
- };
- // Use insertBefore instead of appendChild to circumvent an IE6 bug.
- // This arises when a base node is used (#2709 and #4378).
- head.insertBefore( script, head.firstChild );
- },
-
- abort: function() {
- if ( script ) {
- script.onload( 0, 1 );
- }
- }
- };
- }
-});
-var xhrCallbacks,
- // #5280: Internet Explorer will keep connections alive if we don't abort on unload
- xhrOnUnloadAbort = window.ActiveXObject ? function() {
- // Abort all pending requests
- for ( var key in xhrCallbacks ) {
- xhrCallbacks[ key ]( 0, 1 );
- }
- } : false,
- xhrId = 0;
-
-// Functions to create xhrs
-function createStandardXHR() {
- try {
- return new window.XMLHttpRequest();
- } catch( e ) {}
-}
-
-function createActiveXHR() {
- try {
- return new window.ActiveXObject( "Microsoft.XMLHTTP" );
- } catch( e ) {}
-}
-
-// Create the request object
-// (This is still attached to ajaxSettings for backward compatibility)
-jQuery.ajaxSettings.xhr = window.ActiveXObject ?
- /* Microsoft failed to properly
- * implement the XMLHttpRequest in IE7 (can't request local files),
- * so we use the ActiveXObject when it is available
- * Additionally XMLHttpRequest can be disabled in IE7/IE8 so
- * we need a fallback.
- */
- function() {
- return !this.isLocal && createStandardXHR() || createActiveXHR();
- } :
- // For all other browsers, use the standard XMLHttpRequest object
- createStandardXHR;
-
-// Determine support properties
-(function( xhr ) {
- jQuery.extend( jQuery.support, {
- ajax: !!xhr,
- cors: !!xhr && ( "withCredentials" in xhr )
- });
-})( jQuery.ajaxSettings.xhr() );
-
-// Create transport if the browser can provide an xhr
-if ( jQuery.support.ajax ) {
-
- jQuery.ajaxTransport(function( s ) {
- // Cross domain only allowed if supported through XMLHttpRequest
- if ( !s.crossDomain || jQuery.support.cors ) {
-
- var callback;
-
- return {
- send: function( headers, complete ) {
-
- // Get a new xhr
- var handle, i,
- xhr = s.xhr();
-
- // Open the socket
- // Passing null username, generates a login popup on Opera (#2865)
- if ( s.username ) {
- xhr.open( s.type, s.url, s.async, s.username, s.password );
- } else {
- xhr.open( s.type, s.url, s.async );
- }
-
- // Apply custom fields if provided
- if ( s.xhrFields ) {
- for ( i in s.xhrFields ) {
- xhr[ i ] = s.xhrFields[ i ];
- }
- }
-
- // Override mime type if needed
- if ( s.mimeType && xhr.overrideMimeType ) {
- xhr.overrideMimeType( s.mimeType );
- }
-
- // X-Requested-With header
- // For cross-domain requests, seeing as conditions for a preflight are
- // akin to a jigsaw puzzle, we simply never set it to be sure.
- // (it can always be set on a per-request basis or even using ajaxSetup)
- // For same-domain requests, won't change header if already provided.
- if ( !s.crossDomain && !headers["X-Requested-With"] ) {
- headers[ "X-Requested-With" ] = "XMLHttpRequest";
- }
-
- // Need an extra try/catch for cross domain requests in Firefox 3
- try {
- for ( i in headers ) {
- xhr.setRequestHeader( i, headers[ i ] );
- }
- } catch( _ ) {}
-
- // Do send the request
- // This may raise an exception which is actually
- // handled in jQuery.ajax (so no try/catch here)
- xhr.send( ( s.hasContent && s.data ) || null );
-
- // Listener
- callback = function( _, isAbort ) {
-
- var status,
- statusText,
- responseHeaders,
- responses,
- xml;
-
- // Firefox throws exceptions when accessing properties
- // of an xhr when a network error occurred
- // http://helpful.knobs-dials.com/index.php/Component_returned_failure_code:_0x80040111_(NS_ERROR_NOT_AVAILABLE)
- try {
-
- // Was never called and is aborted or complete
- if ( callback && ( isAbort || xhr.readyState === 4 ) ) {
-
- // Only called once
- callback = undefined;
-
- // Do not keep as active anymore
- if ( handle ) {
- xhr.onreadystatechange = jQuery.noop;
- if ( xhrOnUnloadAbort ) {
- delete xhrCallbacks[ handle ];
- }
- }
-
- // If it's an abort
- if ( isAbort ) {
- // Abort it manually if needed
- if ( xhr.readyState !== 4 ) {
- xhr.abort();
- }
- } else {
- status = xhr.status;
- responseHeaders = xhr.getAllResponseHeaders();
- responses = {};
- xml = xhr.responseXML;
-
- // Construct response list
- if ( xml && xml.documentElement /* #4958 */ ) {
- responses.xml = xml;
- }
-
- // When requesting binary data, IE6-9 will throw an exception
- // on any attempt to access responseText (#11426)
- try {
- responses.text = xhr.responseText;
- } catch( e ) {
- }
-
- // Firefox throws an exception when accessing
- // statusText for faulty cross-domain requests
- try {
- statusText = xhr.statusText;
- } catch( e ) {
- // We normalize with Webkit giving an empty statusText
- statusText = "";
- }
-
- // Filter status for non standard behaviors
-
- // If the request is local and we have data: assume a success
- // (success with no data won't get notified, that's the best we
- // can do given current implementations)
- if ( !status && s.isLocal && !s.crossDomain ) {
- status = responses.text ? 200 : 404;
- // IE - #1450: sometimes returns 1223 when it should be 204
- } else if ( status === 1223 ) {
- status = 204;
- }
- }
- }
- } catch( firefoxAccessException ) {
- if ( !isAbort ) {
- complete( -1, firefoxAccessException );
- }
- }
-
- // Call complete if needed
- if ( responses ) {
- complete( status, statusText, responses, responseHeaders );
- }
- };
-
- if ( !s.async ) {
- // if we're in sync mode we fire the callback
- callback();
- } else if ( xhr.readyState === 4 ) {
- // (IE6 & IE7) if it's in cache and has been
- // retrieved directly we need to fire the callback
- setTimeout( callback, 0 );
- } else {
- handle = ++xhrId;
- if ( xhrOnUnloadAbort ) {
- // Create the active xhrs callbacks list if needed
- // and attach the unload handler
- if ( !xhrCallbacks ) {
- xhrCallbacks = {};
- jQuery( window ).unload( xhrOnUnloadAbort );
- }
- // Add to list of active xhrs callbacks
- xhrCallbacks[ handle ] = callback;
- }
- xhr.onreadystatechange = callback;
- }
- },
-
- abort: function() {
- if ( callback ) {
- callback(0,1);
- }
- }
- };
- }
- });
-}
-var fxNow, timerId,
- rfxtypes = /^(?:toggle|show|hide)$/,
- rfxnum = new RegExp( "^(?:([-+])=|)(" + core_pnum + ")([a-z%]*)$", "i" ),
- rrun = /queueHooks$/,
- animationPrefilters = [ defaultPrefilter ],
- tweeners = {
- "*": [function( prop, value ) {
- var end, unit,
- tween = this.createTween( prop, value ),
- parts = rfxnum.exec( value ),
- target = tween.cur(),
- start = +target || 0,
- scale = 1,
- maxIterations = 20;
-
- if ( parts ) {
- end = +parts[2];
- unit = parts[3] || ( jQuery.cssNumber[ prop ] ? "" : "px" );
-
- // We need to compute starting value
- if ( unit !== "px" && start ) {
- // Iteratively approximate from a nonzero starting point
- // Prefer the current property, because this process will be trivial if it uses the same units
- // Fallback to end or a simple constant
- start = jQuery.css( tween.elem, prop, true ) || end || 1;
-
- do {
- // If previous iteration zeroed out, double until we get *something*
- // Use a string for doubling factor so we don't accidentally see scale as unchanged below
- scale = scale || ".5";
-
- // Adjust and apply
- start = start / scale;
- jQuery.style( tween.elem, prop, start + unit );
-
- // Update scale, tolerating zero or NaN from tween.cur()
- // And breaking the loop if scale is unchanged or perfect, or if we've just had enough
- } while ( scale !== (scale = tween.cur() / target) && scale !== 1 && --maxIterations );
- }
-
- tween.unit = unit;
- tween.start = start;
- // If a +=/-= token was provided, we're doing a relative animation
- tween.end = parts[1] ? start + ( parts[1] + 1 ) * end : end;
- }
- return tween;
- }]
- };
-
-// Animations created synchronously will run synchronously
-function createFxNow() {
- setTimeout(function() {
- fxNow = undefined;
- }, 0 );
- return ( fxNow = jQuery.now() );
-}
-
-function createTweens( animation, props ) {
- jQuery.each( props, function( prop, value ) {
- var collection = ( tweeners[ prop ] || [] ).concat( tweeners[ "*" ] ),
- index = 0,
- length = collection.length;
- for ( ; index < length; index++ ) {
- if ( collection[ index ].call( animation, prop, value ) ) {
-
- // we're done with this property
- return;
- }
- }
- });
-}
-
-function Animation( elem, properties, options ) {
- var result,
- index = 0,
- tweenerIndex = 0,
- length = animationPrefilters.length,
- deferred = jQuery.Deferred().always( function() {
- // don't match elem in the :animated selector
- delete tick.elem;
- }),
- tick = function() {
- var currentTime = fxNow || createFxNow(),
- remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ),
- // archaic crash bug won't allow us to use 1 - ( 0.5 || 0 ) (#12497)
- temp = remaining / animation.duration || 0,
- percent = 1 - temp,
- index = 0,
- length = animation.tweens.length;
-
- for ( ; index < length ; index++ ) {
- animation.tweens[ index ].run( percent );
- }
-
- deferred.notifyWith( elem, [ animation, percent, remaining ]);
-
- if ( percent < 1 && length ) {
- return remaining;
- } else {
- deferred.resolveWith( elem, [ animation ] );
- return false;
- }
- },
- animation = deferred.promise({
- elem: elem,
- props: jQuery.extend( {}, properties ),
- opts: jQuery.extend( true, { specialEasing: {} }, options ),
- originalProperties: properties,
- originalOptions: options,
- startTime: fxNow || createFxNow(),
- duration: options.duration,
- tweens: [],
- createTween: function( prop, end, easing ) {
- var tween = jQuery.Tween( elem, animation.opts, prop, end,
- animation.opts.specialEasing[ prop ] || animation.opts.easing );
- animation.tweens.push( tween );
- return tween;
- },
- stop: function( gotoEnd ) {
- var index = 0,
- // if we are going to the end, we want to run all the tweens
- // otherwise we skip this part
- length = gotoEnd ? animation.tweens.length : 0;
-
- for ( ; index < length ; index++ ) {
- animation.tweens[ index ].run( 1 );
- }
-
- // resolve when we played the last frame
- // otherwise, reject
- if ( gotoEnd ) {
- deferred.resolveWith( elem, [ animation, gotoEnd ] );
- } else {
- deferred.rejectWith( elem, [ animation, gotoEnd ] );
- }
- return this;
- }
- }),
- props = animation.props;
-
- propFilter( props, animation.opts.specialEasing );
-
- for ( ; index < length ; index++ ) {
- result = animationPrefilters[ index ].call( animation, elem, props, animation.opts );
- if ( result ) {
- return result;
- }
- }
-
- createTweens( animation, props );
-
- if ( jQuery.isFunction( animation.opts.start ) ) {
- animation.opts.start.call( elem, animation );
- }
-
- jQuery.fx.timer(
- jQuery.extend( tick, {
- anim: animation,
- queue: animation.opts.queue,
- elem: elem
- })
- );
-
- // attach callbacks from options
- return animation.progress( animation.opts.progress )
- .done( animation.opts.done, animation.opts.complete )
- .fail( animation.opts.fail )
- .always( animation.opts.always );
-}
-
-function propFilter( props, specialEasing ) {
- var index, name, easing, value, hooks;
-
- // camelCase, specialEasing and expand cssHook pass
- for ( index in props ) {
- name = jQuery.camelCase( index );
- easing = specialEasing[ name ];
- value = props[ index ];
- if ( jQuery.isArray( value ) ) {
- easing = value[ 1 ];
- value = props[ index ] = value[ 0 ];
- }
-
- if ( index !== name ) {
- props[ name ] = value;
- delete props[ index ];
- }
-
- hooks = jQuery.cssHooks[ name ];
- if ( hooks && "expand" in hooks ) {
- value = hooks.expand( value );
- delete props[ name ];
-
- // not quite $.extend, this wont overwrite keys already present.
- // also - reusing 'index' from above because we have the correct "name"
- for ( index in value ) {
- if ( !( index in props ) ) {
- props[ index ] = value[ index ];
- specialEasing[ index ] = easing;
- }
- }
- } else {
- specialEasing[ name ] = easing;
- }
- }
-}
-
-jQuery.Animation = jQuery.extend( Animation, {
-
- tweener: function( props, callback ) {
- if ( jQuery.isFunction( props ) ) {
- callback = props;
- props = [ "*" ];
- } else {
- props = props.split(" ");
- }
-
- var prop,
- index = 0,
- length = props.length;
-
- for ( ; index < length ; index++ ) {
- prop = props[ index ];
- tweeners[ prop ] = tweeners[ prop ] || [];
- tweeners[ prop ].unshift( callback );
- }
- },
-
- prefilter: function( callback, prepend ) {
- if ( prepend ) {
- animationPrefilters.unshift( callback );
- } else {
- animationPrefilters.push( callback );
- }
- }
-});
-
-function defaultPrefilter( elem, props, opts ) {
- var index, prop, value, length, dataShow, toggle, tween, hooks, oldfire,
- anim = this,
- style = elem.style,
- orig = {},
- handled = [],
- hidden = elem.nodeType && isHidden( elem );
-
- // handle queue: false promises
- if ( !opts.queue ) {
- hooks = jQuery._queueHooks( elem, "fx" );
- if ( hooks.unqueued == null ) {
- hooks.unqueued = 0;
- oldfire = hooks.empty.fire;
- hooks.empty.fire = function() {
- if ( !hooks.unqueued ) {
- oldfire();
- }
- };
- }
- hooks.unqueued++;
-
- anim.always(function() {
- // doing this makes sure that the complete handler will be called
- // before this completes
- anim.always(function() {
- hooks.unqueued--;
- if ( !jQuery.queue( elem, "fx" ).length ) {
- hooks.empty.fire();
- }
- });
- });
- }
-
- // height/width overflow pass
- if ( elem.nodeType === 1 && ( "height" in props || "width" in props ) ) {
- // Make sure that nothing sneaks out
- // Record all 3 overflow attributes because IE does not
- // change the overflow attribute when overflowX and
- // overflowY are set to the same value
- opts.overflow = [ style.overflow, style.overflowX, style.overflowY ];
-
- // Set display property to inline-block for height/width
- // animations on inline elements that are having width/height animated
- if ( jQuery.css( elem, "display" ) === "inline" &&
- jQuery.css( elem, "float" ) === "none" ) {
-
- // inline-level elements accept inline-block;
- // block-level elements need to be inline with layout
- if ( !jQuery.support.inlineBlockNeedsLayout || css_defaultDisplay( elem.nodeName ) === "inline" ) {
- style.display = "inline-block";
-
- } else {
- style.zoom = 1;
- }
- }
- }
-
- if ( opts.overflow ) {
- style.overflow = "hidden";
- if ( !jQuery.support.shrinkWrapBlocks ) {
- anim.done(function() {
- style.overflow = opts.overflow[ 0 ];
- style.overflowX = opts.overflow[ 1 ];
- style.overflowY = opts.overflow[ 2 ];
- });
- }
- }
-
-
- // show/hide pass
- for ( index in props ) {
- value = props[ index ];
- if ( rfxtypes.exec( value ) ) {
- delete props[ index ];
- toggle = toggle || value === "toggle";
- if ( value === ( hidden ? "hide" : "show" ) ) {
- continue;
- }
- handled.push( index );
- }
- }
-
- length = handled.length;
- if ( length ) {
- dataShow = jQuery._data( elem, "fxshow" ) || jQuery._data( elem, "fxshow", {} );
- if ( "hidden" in dataShow ) {
- hidden = dataShow.hidden;
- }
-
- // store state if its toggle - enables .stop().toggle() to "reverse"
- if ( toggle ) {
- dataShow.hidden = !hidden;
- }
- if ( hidden ) {
- jQuery( elem ).show();
- } else {
- anim.done(function() {
- jQuery( elem ).hide();
- });
- }
- anim.done(function() {
- var prop;
- jQuery.removeData( elem, "fxshow", true );
- for ( prop in orig ) {
- jQuery.style( elem, prop, orig[ prop ] );
- }
- });
- for ( index = 0 ; index < length ; index++ ) {
- prop = handled[ index ];
- tween = anim.createTween( prop, hidden ? dataShow[ prop ] : 0 );
- orig[ prop ] = dataShow[ prop ] || jQuery.style( elem, prop );
-
- if ( !( prop in dataShow ) ) {
- dataShow[ prop ] = tween.start;
- if ( hidden ) {
- tween.end = tween.start;
- tween.start = prop === "width" || prop === "height" ? 1 : 0;
- }
- }
- }
- }
-}
-
-function Tween( elem, options, prop, end, easing ) {
- return new Tween.prototype.init( elem, options, prop, end, easing );
-}
-jQuery.Tween = Tween;
-
-Tween.prototype = {
- constructor: Tween,
- init: function( elem, options, prop, end, easing, unit ) {
- this.elem = elem;
- this.prop = prop;
- this.easing = easing || "swing";
- this.options = options;
- this.start = this.now = this.cur();
- this.end = end;
- this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" );
- },
- cur: function() {
- var hooks = Tween.propHooks[ this.prop ];
-
- return hooks && hooks.get ?
- hooks.get( this ) :
- Tween.propHooks._default.get( this );
- },
- run: function( percent ) {
- var eased,
- hooks = Tween.propHooks[ this.prop ];
-
- if ( this.options.duration ) {
- this.pos = eased = jQuery.easing[ this.easing ](
- percent, this.options.duration * percent, 0, 1, this.options.duration
- );
- } else {
- this.pos = eased = percent;
- }
- this.now = ( this.end - this.start ) * eased + this.start;
-
- if ( this.options.step ) {
- this.options.step.call( this.elem, this.now, this );
- }
-
- if ( hooks && hooks.set ) {
- hooks.set( this );
- } else {
- Tween.propHooks._default.set( this );
- }
- return this;
- }
-};
-
-Tween.prototype.init.prototype = Tween.prototype;
-
-Tween.propHooks = {
- _default: {
- get: function( tween ) {
- var result;
-
- if ( tween.elem[ tween.prop ] != null &&
- (!tween.elem.style || tween.elem.style[ tween.prop ] == null) ) {
- return tween.elem[ tween.prop ];
- }
-
- // passing any value as a 4th parameter to .css will automatically
- // attempt a parseFloat and fallback to a string if the parse fails
- // so, simple values such as "10px" are parsed to Float.
- // complex values such as "rotate(1rad)" are returned as is.
- result = jQuery.css( tween.elem, tween.prop, false, "" );
- // Empty strings, null, undefined and "auto" are converted to 0.
- return !result || result === "auto" ? 0 : result;
- },
- set: function( tween ) {
- // use step hook for back compat - use cssHook if its there - use .style if its
- // available and use plain properties where available
- if ( jQuery.fx.step[ tween.prop ] ) {
- jQuery.fx.step[ tween.prop ]( tween );
- } else if ( tween.elem.style && ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || jQuery.cssHooks[ tween.prop ] ) ) {
- jQuery.style( tween.elem, tween.prop, tween.now + tween.unit );
- } else {
- tween.elem[ tween.prop ] = tween.now;
- }
- }
- }
-};
-
-// Remove in 2.0 - this supports IE8's panic based approach
-// to setting things on disconnected nodes
-
-Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = {
- set: function( tween ) {
- if ( tween.elem.nodeType && tween.elem.parentNode ) {
- tween.elem[ tween.prop ] = tween.now;
- }
- }
-};
-
-jQuery.each([ "toggle", "show", "hide" ], function( i, name ) {
- var cssFn = jQuery.fn[ name ];
- jQuery.fn[ name ] = function( speed, easing, callback ) {
- return speed == null || typeof speed === "boolean" ||
- // special check for .toggle( handler, handler, ... )
- ( !i && jQuery.isFunction( speed ) && jQuery.isFunction( easing ) ) ?
- cssFn.apply( this, arguments ) :
- this.animate( genFx( name, true ), speed, easing, callback );
- };
-});
-
-jQuery.fn.extend({
- fadeTo: function( speed, to, easing, callback ) {
-
- // show any hidden elements after setting opacity to 0
- return this.filter( isHidden ).css( "opacity", 0 ).show()
-
- // animate to the value specified
- .end().animate({ opacity: to }, speed, easing, callback );
- },
- animate: function( prop, speed, easing, callback ) {
- var empty = jQuery.isEmptyObject( prop ),
- optall = jQuery.speed( speed, easing, callback ),
- doAnimation = function() {
- // Operate on a copy of prop so per-property easing won't be lost
- var anim = Animation( this, jQuery.extend( {}, prop ), optall );
-
- // Empty animations resolve immediately
- if ( empty ) {
- anim.stop( true );
- }
- };
-
- return empty || optall.queue === false ?
- this.each( doAnimation ) :
- this.queue( optall.queue, doAnimation );
- },
- stop: function( type, clearQueue, gotoEnd ) {
- var stopQueue = function( hooks ) {
- var stop = hooks.stop;
- delete hooks.stop;
- stop( gotoEnd );
- };
-
- if ( typeof type !== "string" ) {
- gotoEnd = clearQueue;
- clearQueue = type;
- type = undefined;
- }
- if ( clearQueue && type !== false ) {
- this.queue( type || "fx", [] );
- }
-
- return this.each(function() {
- var dequeue = true,
- index = type != null && type + "queueHooks",
- timers = jQuery.timers,
- data = jQuery._data( this );
-
- if ( index ) {
- if ( data[ index ] && data[ index ].stop ) {
- stopQueue( data[ index ] );
- }
- } else {
- for ( index in data ) {
- if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) {
- stopQueue( data[ index ] );
- }
- }
- }
-
- for ( index = timers.length; index--; ) {
- if ( timers[ index ].elem === this && (type == null || timers[ index ].queue === type) ) {
- timers[ index ].anim.stop( gotoEnd );
- dequeue = false;
- timers.splice( index, 1 );
- }
- }
-
- // start the next in the queue if the last step wasn't forced
- // timers currently will call their complete callbacks, which will dequeue
- // but only if they were gotoEnd
- if ( dequeue || !gotoEnd ) {
- jQuery.dequeue( this, type );
- }
- });
- }
-});
-
-// Generate parameters to create a standard animation
-function genFx( type, includeWidth ) {
- var which,
- attrs = { height: type },
- i = 0;
-
- // if we include width, step value is 1 to do all cssExpand values,
- // if we don't include width, step value is 2 to skip over Left and Right
- includeWidth = includeWidth? 1 : 0;
- for( ; i < 4 ; i += 2 - includeWidth ) {
- which = cssExpand[ i ];
- attrs[ "margin" + which ] = attrs[ "padding" + which ] = type;
- }
-
- if ( includeWidth ) {
- attrs.opacity = attrs.width = type;
- }
-
- return attrs;
-}
-
-// Generate shortcuts for custom animations
-jQuery.each({
- slideDown: genFx("show"),
- slideUp: genFx("hide"),
- slideToggle: genFx("toggle"),
- fadeIn: { opacity: "show" },
- fadeOut: { opacity: "hide" },
- fadeToggle: { opacity: "toggle" }
-}, function( name, props ) {
- jQuery.fn[ name ] = function( speed, easing, callback ) {
- return this.animate( props, speed, easing, callback );
- };
-});
-
-jQuery.speed = function( speed, easing, fn ) {
- var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : {
- complete: fn || !fn && easing ||
- jQuery.isFunction( speed ) && speed,
- duration: speed,
- easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing
- };
-
- opt.duration = jQuery.fx.off ? 0 : typeof opt.duration === "number" ? opt.duration :
- opt.duration in jQuery.fx.speeds ? jQuery.fx.speeds[ opt.duration ] : jQuery.fx.speeds._default;
-
- // normalize opt.queue - true/undefined/null -> "fx"
- if ( opt.queue == null || opt.queue === true ) {
- opt.queue = "fx";
- }
-
- // Queueing
- opt.old = opt.complete;
-
- opt.complete = function() {
- if ( jQuery.isFunction( opt.old ) ) {
- opt.old.call( this );
- }
-
- if ( opt.queue ) {
- jQuery.dequeue( this, opt.queue );
- }
- };
-
- return opt;
-};
-
-jQuery.easing = {
- linear: function( p ) {
- return p;
- },
- swing: function( p ) {
- return 0.5 - Math.cos( p*Math.PI ) / 2;
- }
-};
-
-jQuery.timers = [];
-jQuery.fx = Tween.prototype.init;
-jQuery.fx.tick = function() {
- var timer,
- timers = jQuery.timers,
- i = 0;
-
- fxNow = jQuery.now();
-
- for ( ; i < timers.length; i++ ) {
- timer = timers[ i ];
- // Checks the timer has not already been removed
- if ( !timer() && timers[ i ] === timer ) {
- timers.splice( i--, 1 );
- }
- }
-
- if ( !timers.length ) {
- jQuery.fx.stop();
- }
- fxNow = undefined;
-};
-
-jQuery.fx.timer = function( timer ) {
- if ( timer() && jQuery.timers.push( timer ) && !timerId ) {
- timerId = setInterval( jQuery.fx.tick, jQuery.fx.interval );
- }
-};
-
-jQuery.fx.interval = 13;
-
-jQuery.fx.stop = function() {
- clearInterval( timerId );
- timerId = null;
-};
-
-jQuery.fx.speeds = {
- slow: 600,
- fast: 200,
- // Default speed
- _default: 400
-};
-
-// Back Compat <1.8 extension point
-jQuery.fx.step = {};
-
-if ( jQuery.expr && jQuery.expr.filters ) {
- jQuery.expr.filters.animated = function( elem ) {
- return jQuery.grep(jQuery.timers, function( fn ) {
- return elem === fn.elem;
- }).length;
- };
-}
-var rroot = /^(?:body|html)$/i;
-
-jQuery.fn.offset = function( options ) {
- if ( arguments.length ) {
- return options === undefined ?
- this :
- this.each(function( i ) {
- jQuery.offset.setOffset( this, options, i );
- });
- }
-
- var docElem, body, win, clientTop, clientLeft, scrollTop, scrollLeft,
- box = { top: 0, left: 0 },
- elem = this[ 0 ],
- doc = elem && elem.ownerDocument;
-
- if ( !doc ) {
- return;
- }
-
- if ( (body = doc.body) === elem ) {
- return jQuery.offset.bodyOffset( elem );
- }
-
- docElem = doc.documentElement;
-
- // Make sure it's not a disconnected DOM node
- if ( !jQuery.contains( docElem, elem ) ) {
- return box;
- }
-
- // If we don't have gBCR, just use 0,0 rather than error
- // BlackBerry 5, iOS 3 (original iPhone)
- if ( typeof elem.getBoundingClientRect !== "undefined" ) {
- box = elem.getBoundingClientRect();
- }
- win = getWindow( doc );
- clientTop = docElem.clientTop || body.clientTop || 0;
- clientLeft = docElem.clientLeft || body.clientLeft || 0;
- scrollTop = win.pageYOffset || docElem.scrollTop;
- scrollLeft = win.pageXOffset || docElem.scrollLeft;
- return {
- top: box.top + scrollTop - clientTop,
- left: box.left + scrollLeft - clientLeft
- };
-};
-
-jQuery.offset = {
-
- bodyOffset: function( body ) {
- var top = body.offsetTop,
- left = body.offsetLeft;
-
- if ( jQuery.support.doesNotIncludeMarginInBodyOffset ) {
- top += parseFloat( jQuery.css(body, "marginTop") ) || 0;
- left += parseFloat( jQuery.css(body, "marginLeft") ) || 0;
- }
-
- return { top: top, left: left };
- },
-
- setOffset: function( elem, options, i ) {
- var position = jQuery.css( elem, "position" );
-
- // set position first, in-case top/left are set even on static elem
- if ( position === "static" ) {
- elem.style.position = "relative";
- }
-
- var curElem = jQuery( elem ),
- curOffset = curElem.offset(),
- curCSSTop = jQuery.css( elem, "top" ),
- curCSSLeft = jQuery.css( elem, "left" ),
- calculatePosition = ( position === "absolute" || position === "fixed" ) && jQuery.inArray("auto", [curCSSTop, curCSSLeft]) > -1,
- props = {}, curPosition = {}, curTop, curLeft;
-
- // need to be able to calculate position if either top or left is auto and position is either absolute or fixed
- if ( calculatePosition ) {
- curPosition = curElem.position();
- curTop = curPosition.top;
- curLeft = curPosition.left;
- } else {
- curTop = parseFloat( curCSSTop ) || 0;
- curLeft = parseFloat( curCSSLeft ) || 0;
- }
-
- if ( jQuery.isFunction( options ) ) {
- options = options.call( elem, i, curOffset );
- }
-
- if ( options.top != null ) {
- props.top = ( options.top - curOffset.top ) + curTop;
- }
- if ( options.left != null ) {
- props.left = ( options.left - curOffset.left ) + curLeft;
- }
-
- if ( "using" in options ) {
- options.using.call( elem, props );
- } else {
- curElem.css( props );
- }
- }
-};
-
-
-jQuery.fn.extend({
-
- position: function() {
- if ( !this[0] ) {
- return;
- }
-
- var elem = this[0],
-
- // Get *real* offsetParent
- offsetParent = this.offsetParent(),
-
- // Get correct offsets
- offset = this.offset(),
- parentOffset = rroot.test(offsetParent[0].nodeName) ? { top: 0, left: 0 } : offsetParent.offset();
-
- // Subtract element margins
- // note: when an element has margin: auto the offsetLeft and marginLeft
- // are the same in Safari causing offset.left to incorrectly be 0
- offset.top -= parseFloat( jQuery.css(elem, "marginTop") ) || 0;
- offset.left -= parseFloat( jQuery.css(elem, "marginLeft") ) || 0;
-
- // Add offsetParent borders
- parentOffset.top += parseFloat( jQuery.css(offsetParent[0], "borderTopWidth") ) || 0;
- parentOffset.left += parseFloat( jQuery.css(offsetParent[0], "borderLeftWidth") ) || 0;
-
- // Subtract the two offsets
- return {
- top: offset.top - parentOffset.top,
- left: offset.left - parentOffset.left
- };
- },
-
- offsetParent: function() {
- return this.map(function() {
- var offsetParent = this.offsetParent || document.body;
- while ( offsetParent && (!rroot.test(offsetParent.nodeName) && jQuery.css(offsetParent, "position") === "static") ) {
- offsetParent = offsetParent.offsetParent;
- }
- return offsetParent || document.body;
- });
- }
-});
-
-
-// Create scrollLeft and scrollTop methods
-jQuery.each( {scrollLeft: "pageXOffset", scrollTop: "pageYOffset"}, function( method, prop ) {
- var top = /Y/.test( prop );
-
- jQuery.fn[ method ] = function( val ) {
- return jQuery.access( this, function( elem, method, val ) {
- var win = getWindow( elem );
-
- if ( val === undefined ) {
- return win ? (prop in win) ? win[ prop ] :
- win.document.documentElement[ method ] :
- elem[ method ];
- }
-
- if ( win ) {
- win.scrollTo(
- !top ? val : jQuery( win ).scrollLeft(),
- top ? val : jQuery( win ).scrollTop()
- );
-
- } else {
- elem[ method ] = val;
- }
- }, method, val, arguments.length, null );
- };
-});
-
-function getWindow( elem ) {
- return jQuery.isWindow( elem ) ?
- elem :
- elem.nodeType === 9 ?
- elem.defaultView || elem.parentWindow :
- false;
-}
-// Create innerHeight, innerWidth, height, width, outerHeight and outerWidth methods
-jQuery.each( { Height: "height", Width: "width" }, function( name, type ) {
- jQuery.each( { padding: "inner" + name, content: type, "": "outer" + name }, function( defaultExtra, funcName ) {
- // margin is only for outerHeight, outerWidth
- jQuery.fn[ funcName ] = function( margin, value ) {
- var chainable = arguments.length && ( defaultExtra || typeof margin !== "boolean" ),
- extra = defaultExtra || ( margin === true || value === true ? "margin" : "border" );
-
- return jQuery.access( this, function( elem, type, value ) {
- var doc;
-
- if ( jQuery.isWindow( elem ) ) {
- // As of 5/8/2012 this will yield incorrect results for Mobile Safari, but there
- // isn't a whole lot we can do. See pull request at this URL for discussion:
- // https://github.com/jquery/jquery/pull/764
- return elem.document.documentElement[ "client" + name ];
- }
-
- // Get document width or height
- if ( elem.nodeType === 9 ) {
- doc = elem.documentElement;
-
- // Either scroll[Width/Height] or offset[Width/Height] or client[Width/Height], whichever is greatest
- // unfortunately, this causes bug #3838 in IE6/8 only, but there is currently no good, small way to fix it.
- return Math.max(
- elem.body[ "scroll" + name ], doc[ "scroll" + name ],
- elem.body[ "offset" + name ], doc[ "offset" + name ],
- doc[ "client" + name ]
- );
- }
-
- return value === undefined ?
- // Get width or height on the element, requesting but not forcing parseFloat
- jQuery.css( elem, type, value, extra ) :
-
- // Set width or height on the element
- jQuery.style( elem, type, value, extra );
- }, type, chainable ? margin : undefined, chainable, null );
- };
- });
-});
-// Expose jQuery to the global object
-window.jQuery = window.$ = jQuery;
-
-// Expose jQuery as an AMD module, but only for AMD loaders that
-// understand the issues with loading multiple versions of jQuery
-// in a page that all might call define(). The loader will indicate
-// they have special allowances for multiple jQuery versions by
-// specifying define.amd.jQuery = true. Register as a named module,
-// since jQuery can be concatenated with other files that may use define,
-// but not use a proper concatenation script that understands anonymous
-// AMD modules. A named AMD is safest and most robust way to register.
-// Lowercase jquery is used because AMD module names are derived from
-// file names, and jQuery is normally delivered in a lowercase file name.
-// Do this after creating the global so that if an AMD module wants to call
-// noConflict to hide this version of jQuery, it will work.
-if ( typeof define === "function" && define.amd && define.amd.jQuery ) {
- define( "jquery", [], function () { return jQuery; } );
-}
-
-})( window );
diff --git a/src/ceph/qa/workunits/erasure-code/plot.js b/src/ceph/qa/workunits/erasure-code/plot.js
deleted file mode 100644
index bd2bba5..0000000
--- a/src/ceph/qa/workunits/erasure-code/plot.js
+++ /dev/null
@@ -1,82 +0,0 @@
-$(function() {
- encode = [];
- if (typeof encode_vandermonde_isa != 'undefined') {
- encode.push({
- data: encode_vandermonde_isa,
- label: "ISA, Vandermonde",
- points: { show: true },
- lines: { show: true },
- });
- }
- if (typeof encode_vandermonde_jerasure != 'undefined') {
- encode.push({
- data: encode_vandermonde_jerasure,
- label: "Jerasure Generic, Vandermonde",
- points: { show: true },
- lines: { show: true },
- });
- }
- if (typeof encode_cauchy_isa != 'undefined') {
- encode.push({
- data: encode_cauchy_isa,
- label: "ISA, Cauchy",
- points: { show: true },
- lines: { show: true },
- });
- }
- if (typeof encode_cauchy_jerasure != 'undefined') {
- encode.push({
- data: encode_cauchy_jerasure,
- label: "Jerasure, Cauchy",
- points: { show: true },
- lines: { show: true },
- });
- }
- $.plot("#encode", encode, {
- xaxis: {
- mode: "categories",
- tickLength: 0
- },
- });
-
- decode = [];
- if (typeof decode_vandermonde_isa != 'undefined') {
- decode.push({
- data: decode_vandermonde_isa,
- label: "ISA, Vandermonde",
- points: { show: true },
- lines: { show: true },
- });
- }
- if (typeof decode_vandermonde_jerasure != 'undefined') {
- decode.push({
- data: decode_vandermonde_jerasure,
- label: "Jerasure Generic, Vandermonde",
- points: { show: true },
- lines: { show: true },
- });
- }
- if (typeof decode_cauchy_isa != 'undefined') {
- decode.push({
- data: decode_cauchy_isa,
- label: "ISA, Cauchy",
- points: { show: true },
- lines: { show: true },
- });
- }
- if (typeof decode_cauchy_jerasure != 'undefined') {
- decode.push({
- data: decode_cauchy_jerasure,
- label: "Jerasure, Cauchy",
- points: { show: true },
- lines: { show: true },
- });
- }
- $.plot("#decode", decode, {
- xaxis: {
- mode: "categories",
- tickLength: 0
- },
- });
-
-});
diff --git a/src/ceph/qa/workunits/false.sh b/src/ceph/qa/workunits/false.sh
deleted file mode 100644
index 8a961b3..0000000
--- a/src/ceph/qa/workunits/false.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh -ex
-
-false \ No newline at end of file
diff --git a/src/ceph/qa/workunits/fs/.gitignore b/src/ceph/qa/workunits/fs/.gitignore
deleted file mode 100644
index f7f7a06..0000000
--- a/src/ceph/qa/workunits/fs/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-test_o_trunc
diff --git a/src/ceph/qa/workunits/fs/Makefile b/src/ceph/qa/workunits/fs/Makefile
deleted file mode 100644
index c993425..0000000
--- a/src/ceph/qa/workunits/fs/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-CFLAGS = -Wall -Wextra -D_GNU_SOURCE
-
-TARGETS = test_o_trunc
-
-.c:
- $(CC) $(CFLAGS) $@.c -o $@
-
-all: $(TARGETS)
-
-clean:
- rm $(TARGETS)
diff --git a/src/ceph/qa/workunits/fs/misc/acl.sh b/src/ceph/qa/workunits/fs/misc/acl.sh
deleted file mode 100755
index 198b056..0000000
--- a/src/ceph/qa/workunits/fs/misc/acl.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/sh -x
-
-set -e
-mkdir -p testdir
-cd testdir
-
-set +e
-setfacl -d -m u:nobody:rw .
-if test $? != 0; then
- echo "Filesystem does not support ACL"
- exit 0
-fi
-
-expect_failure() {
- if "$@"; then return 1; else return 0; fi
-}
-
-set -e
-c=0
-while [ $c -lt 100 ]
-do
- c=`expr $c + 1`
- # inherited ACL from parent directory's default ACL
- mkdir d1
- c1=`getfacl d1 | grep -c "nobody:rw"`
- echo 3 | sudo tee /proc/sys/vm/drop_caches > /dev/null
- c2=`getfacl d1 | grep -c "nobody:rw"`
- rmdir d1
- if [ $c1 -ne 2 ] || [ $c2 -ne 2 ]
- then
- echo "ERROR: incorrect ACLs"
- exit 1
- fi
-done
-
-mkdir d1
-
-# The ACL xattr only contains ACL header. ACL should be removed
-# in this case.
-setfattr -n system.posix_acl_access -v 0x02000000 d1
-setfattr -n system.posix_acl_default -v 0x02000000 .
-
-expect_failure getfattr -n system.posix_acl_access d1
-expect_failure getfattr -n system.posix_acl_default .
-
-
-rmdir d1
-cd ..
-rmdir testdir
-echo OK
diff --git a/src/ceph/qa/workunits/fs/misc/chmod.sh b/src/ceph/qa/workunits/fs/misc/chmod.sh
deleted file mode 100755
index de66776..0000000
--- a/src/ceph/qa/workunits/fs/misc/chmod.sh
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/bin/sh -x
-
-set -e
-
-check_perms() {
-
- file=$1
- r=$(ls -la ${file})
- if test $? != 0; then
- echo "ERROR: File listing/stat failed"
- exit 1
- fi
-
- perms=$2
- if test "${perms}" != $(echo ${r} | awk '{print $1}') && \
- test "${perms}." != $(echo ${r} | awk '{print $1}') && \
- test "${perms}+" != $(echo ${r} | awk '{print $1}'); then
- echo "ERROR: Permissions should be ${perms}"
- exit 1
- fi
-}
-
-file=test_chmod.$$
-
-echo "foo" > ${file}
-if test $? != 0; then
- echo "ERROR: Failed to create file ${file}"
- exit 1
-fi
-
-chmod 400 ${file}
-if test $? != 0; then
- echo "ERROR: Failed to change mode of ${file}"
- exit 1
-fi
-
-check_perms ${file} "-r--------"
-
-set +e
-echo "bar" >> ${file}
-if test $? = 0; then
- echo "ERROR: Write to read-only file should Fail"
- exit 1
-fi
-
-set -e
-chmod 600 ${file}
-echo "bar" >> ${file}
-if test $? != 0; then
- echo "ERROR: Write to writeable file failed"
- exit 1
-fi
-
-check_perms ${file} "-rw-------"
-
-echo "foo" >> ${file}
-if test $? != 0; then
- echo "ERROR: Failed to write to file"
- exit 1
-fi
diff --git a/src/ceph/qa/workunits/fs/misc/direct_io.py b/src/ceph/qa/workunits/fs/misc/direct_io.py
deleted file mode 100755
index b5c4226..0000000
--- a/src/ceph/qa/workunits/fs/misc/direct_io.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/python
-
-import json
-import mmap
-import os
-import subprocess
-
-
-def get_data_pool():
- cmd = ['ceph', 'fs', 'ls', '--format=json-pretty']
- proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- out = proc.communicate()[0]
- return json.loads(out)[0]['data_pools'][0]
-
-
-def main():
- fd = os.open("testfile", os.O_RDWR | os.O_CREAT | os.O_TRUNC | os.O_DIRECT, 0o644)
-
- ino = os.fstat(fd).st_ino
- obj_name = "{ino:x}.00000000".format(ino=ino)
- pool_name = get_data_pool()
-
- buf = mmap.mmap(-1, 1)
- buf.write('1')
- os.write(fd, buf)
-
- proc = subprocess.Popen(['rados', '-p', pool_name, 'get', obj_name, 'tmpfile'])
- proc.wait()
-
- with open('tmpfile', 'r') as tmpf:
- out = tmpf.read()
- if out != '1':
- raise RuntimeError("data were not written to object store directly")
-
- with open('tmpfile', 'w') as tmpf:
- tmpf.write('2')
-
- proc = subprocess.Popen(['rados', '-p', pool_name, 'put', obj_name, 'tmpfile'])
- proc.wait()
-
- os.lseek(fd, 0, os.SEEK_SET)
- out = os.read(fd, 1)
- if out != '2':
- raise RuntimeError("data were not directly read from object store")
-
- os.close(fd)
- print('ok')
-
-
-main()
diff --git a/src/ceph/qa/workunits/fs/misc/dirfrag.sh b/src/ceph/qa/workunits/fs/misc/dirfrag.sh
deleted file mode 100755
index c059f88..0000000
--- a/src/ceph/qa/workunits/fs/misc/dirfrag.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/bash
-
-set -e
-
-DEPTH=5
-COUNT=10000
-
-kill_jobs() {
- jobs -p | xargs kill
-}
-trap kill_jobs INT
-
-create_files() {
- for i in `seq 1 $COUNT`
- do
- touch file$i
- done
-}
-
-delete_files() {
- for i in `ls -f`
- do
- if [[ ${i}a = file*a ]]
- then
- rm -f $i
- fi
- done
-}
-
-rm -rf testdir
-mkdir testdir
-cd testdir
-
-echo "creating folder hierarchy"
-for i in `seq 1 $DEPTH`; do
- mkdir dir$i
- cd dir$i
- create_files &
-done
-wait
-
-echo "created hierarchy, now cleaning up"
-
-for i in `seq 1 $DEPTH`; do
- delete_files &
- cd ..
-done
-wait
-
-echo "cleaned up hierarchy"
-cd ..
-rm -rf testdir
diff --git a/src/ceph/qa/workunits/fs/misc/filelock_deadlock.py b/src/ceph/qa/workunits/fs/misc/filelock_deadlock.py
deleted file mode 100755
index 3ebc977..0000000
--- a/src/ceph/qa/workunits/fs/misc/filelock_deadlock.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/python
-
-import errno
-import fcntl
-import os
-import signal
-import struct
-import time
-
-
-def handler(signum, frame):
- pass
-
-
-def lock_two(f1, f2):
- lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 10, 0, 0)
- fcntl.fcntl(f1, fcntl.F_SETLKW, lockdata)
- time.sleep(10)
-
- # don't wait forever
- signal.signal(signal.SIGALRM, handler)
- signal.alarm(10)
- exitcode = 0
- try:
- fcntl.fcntl(f2, fcntl.F_SETLKW, lockdata)
- except IOError as e:
- if e.errno == errno.EDEADLK:
- exitcode = 1
- elif e.errno == errno.EINTR:
- exitcode = 2
- else:
- exitcode = 3
- os._exit(exitcode)
-
-
-def main():
- pid1 = os.fork()
- if pid1 == 0:
- f1 = open("testfile1", 'w')
- f2 = open("testfile2", 'w')
- lock_two(f1, f2)
-
- pid2 = os.fork()
- if pid2 == 0:
- f1 = open("testfile2", 'w')
- f2 = open("testfile3", 'w')
- lock_two(f1, f2)
-
- pid3 = os.fork()
- if pid3 == 0:
- f1 = open("testfile3", 'w')
- f2 = open("testfile1", 'w')
- lock_two(f1, f2)
-
- deadlk_count = 0
- i = 0
- while i < 3:
- pid, status = os.wait()
- exitcode = status >> 8
- if exitcode == 1:
- deadlk_count += 1
- elif exitcode != 0:
- raise RuntimeError("unexpect exit code of child")
- i += 1
-
- if deadlk_count != 1:
- raise RuntimeError("unexpect count of EDEADLK")
-
- print('ok')
-
-
-main()
diff --git a/src/ceph/qa/workunits/fs/misc/filelock_interrupt.py b/src/ceph/qa/workunits/fs/misc/filelock_interrupt.py
deleted file mode 100755
index 2a413a6..0000000
--- a/src/ceph/qa/workunits/fs/misc/filelock_interrupt.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/python
-
-import errno
-import fcntl
-import signal
-import struct
-
-"""
-introduced by Linux 3.15
-"""
-fcntl.F_OFD_GETLK = 36
-fcntl.F_OFD_SETLK = 37
-fcntl.F_OFD_SETLKW = 38
-
-
-def handler(signum, frame):
- pass
-
-
-def main():
- f1 = open("testfile", 'w')
- f2 = open("testfile", 'w')
-
- fcntl.flock(f1, fcntl.LOCK_SH | fcntl.LOCK_NB)
-
- """
- is flock interruptable?
- """
- signal.signal(signal.SIGALRM, handler)
- signal.alarm(5)
- try:
- fcntl.flock(f2, fcntl.LOCK_EX)
- except IOError as e:
- if e.errno != errno.EINTR:
- raise
- else:
- raise RuntimeError("expect flock to block")
-
- fcntl.flock(f1, fcntl.LOCK_UN)
-
- lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 10, 0, 0)
- try:
- fcntl.fcntl(f1, fcntl.F_OFD_SETLK, lockdata)
- except IOError as e:
- if e.errno != errno.EINVAL:
- raise
- else:
- print('kernel does not support fcntl.F_OFD_SETLK')
- return
-
- lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 10, 10, 0, 0)
- fcntl.fcntl(f2, fcntl.F_OFD_SETLK, lockdata)
-
- """
- is poxis lock interruptable?
- """
- signal.signal(signal.SIGALRM, handler)
- signal.alarm(5)
- try:
- lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
- fcntl.fcntl(f2, fcntl.F_OFD_SETLKW, lockdata)
- except IOError as e:
- if e.errno != errno.EINTR:
- raise
- else:
- raise RuntimeError("expect posix lock to block")
-
- """
- file handler 2 should still hold lock on 10~10
- """
- try:
- lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 10, 10, 0, 0)
- fcntl.fcntl(f1, fcntl.F_OFD_SETLK, lockdata)
- except IOError as e:
- if e.errno == errno.EAGAIN:
- pass
- else:
- raise RuntimeError("expect file handler 2 to hold lock on 10~10")
-
- lockdata = struct.pack('hhllhh', fcntl.F_UNLCK, 0, 0, 0, 0, 0)
- fcntl.fcntl(f1, fcntl.F_OFD_SETLK, lockdata)
- fcntl.fcntl(f2, fcntl.F_OFD_SETLK, lockdata)
-
- print('ok')
-
-
-main()
diff --git a/src/ceph/qa/workunits/fs/misc/i_complete_vs_rename.sh b/src/ceph/qa/workunits/fs/misc/i_complete_vs_rename.sh
deleted file mode 100755
index a9b9827..0000000
--- a/src/ceph/qa/workunits/fs/misc/i_complete_vs_rename.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh
-
-set -e
-
-mkdir x
-cd x
-touch a
-touch b
-touch c
-touch d
-ls
-chmod 777 .
-stat e || true
-touch f
-touch g
-
-# over existing file
-echo attempting rename over existing file...
-touch ../xx
-mv ../xx f
-ls | grep f || false
-echo rename over existing file is okay
-
-# over negative dentry
-echo attempting rename over negative dentry...
-touch ../xx
-mv ../xx e
-ls | grep e || false
-echo rename over negative dentry is ok
-
-echo OK
diff --git a/src/ceph/qa/workunits/fs/misc/layout_vxattrs.sh b/src/ceph/qa/workunits/fs/misc/layout_vxattrs.sh
deleted file mode 100755
index 29ac407..0000000
--- a/src/ceph/qa/workunits/fs/misc/layout_vxattrs.sh
+++ /dev/null
@@ -1,116 +0,0 @@
-#!/bin/bash -x
-
-set -e
-set -x
-
-# detect data pool
-datapool=
-dir=.
-while true ; do
- echo $dir
- datapool=$(getfattr -n ceph.dir.layout.pool $dir --only-values) && break
- dir=$dir/..
-done
-
-# file
-rm -f file file2
-touch file file2
-
-getfattr -n ceph.file.layout file
-getfattr -n ceph.file.layout file | grep -q object_size=
-getfattr -n ceph.file.layout file | grep -q stripe_count=
-getfattr -n ceph.file.layout file | grep -q stripe_unit=
-getfattr -n ceph.file.layout file | grep -q pool=
-getfattr -n ceph.file.layout.pool file
-getfattr -n ceph.file.layout.pool_namespace file
-getfattr -n ceph.file.layout.stripe_unit file
-getfattr -n ceph.file.layout.stripe_count file
-getfattr -n ceph.file.layout.object_size file
-
-getfattr -n ceph.file.layout.bogus file 2>&1 | grep -q 'No such attribute'
-getfattr -n ceph.dir.layout file 2>&1 | grep -q 'No such attribute'
-
-setfattr -n ceph.file.layout.stripe_unit -v 1048576 file2
-setfattr -n ceph.file.layout.stripe_count -v 8 file2
-setfattr -n ceph.file.layout.object_size -v 10485760 file2
-
-setfattr -n ceph.file.layout.pool -v $datapool file2
-getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
-setfattr -n ceph.file.layout.pool_namespace -v foons file2
-getfattr -n ceph.file.layout.pool_namespace file2 | grep -q foons
-setfattr -x ceph.file.layout.pool_namespace file2
-getfattr -n ceph.file.layout.pool_namespace file2 | grep -q -v foons
-
-getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 1048576
-getfattr -n ceph.file.layout.stripe_count file2 | grep -q 8
-getfattr -n ceph.file.layout.object_size file2 | grep -q 10485760
-
-setfattr -n ceph.file.layout -v "stripe_unit=4194304 stripe_count=16 object_size=41943040 pool=$datapool pool_namespace=foons" file2
-getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 4194304
-getfattr -n ceph.file.layout.stripe_count file2 | grep -q 16
-getfattr -n ceph.file.layout.object_size file2 | grep -q 41943040
-getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
-getfattr -n ceph.file.layout.pool_namespace file2 | grep -q foons
-
-setfattr -n ceph.file.layout -v "stripe_unit=1048576" file2
-getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 1048576
-getfattr -n ceph.file.layout.stripe_count file2 | grep -q 16
-getfattr -n ceph.file.layout.object_size file2 | grep -q 41943040
-getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
-getfattr -n ceph.file.layout.pool_namespace file2 | grep -q foons
-
-setfattr -n ceph.file.layout -v "stripe_unit=2097152 stripe_count=4 object_size=2097152 pool=$datapool pool_namespace=barns" file2
-getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 2097152
-getfattr -n ceph.file.layout.stripe_count file2 | grep -q 4
-getfattr -n ceph.file.layout.object_size file2 | grep -q 2097152
-getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
-getfattr -n ceph.file.layout.pool_namespace file2 | grep -q barns
-
-# dir
-rm -f dir/file || true
-rmdir dir || true
-mkdir -p dir
-
-getfattr -d -m - dir | grep -q ceph.dir.layout && exit 1 || true
-getfattr -d -m - dir | grep -q ceph.file.layout && exit 1 || true
-getfattr -n ceph.dir.layout dir && exit 1 || true
-
-setfattr -n ceph.dir.layout.stripe_unit -v 1048576 dir
-setfattr -n ceph.dir.layout.stripe_count -v 8 dir
-setfattr -n ceph.dir.layout.object_size -v 10485760 dir
-setfattr -n ceph.dir.layout.pool -v $datapool dir
-setfattr -n ceph.dir.layout.pool_namespace -v dirns dir
-
-getfattr -n ceph.dir.layout dir
-getfattr -n ceph.dir.layout dir | grep -q object_size=10485760
-getfattr -n ceph.dir.layout dir | grep -q stripe_count=8
-getfattr -n ceph.dir.layout dir | grep -q stripe_unit=1048576
-getfattr -n ceph.dir.layout dir | grep -q pool=$datapool
-getfattr -n ceph.dir.layout dir | grep -q pool_namespace=dirns
-getfattr -n ceph.dir.layout.pool dir | grep -q $datapool
-getfattr -n ceph.dir.layout.stripe_unit dir | grep -q 1048576
-getfattr -n ceph.dir.layout.stripe_count dir | grep -q 8
-getfattr -n ceph.dir.layout.object_size dir | grep -q 10485760
-getfattr -n ceph.dir.layout.pool_namespace dir | grep -q dirns
-
-
-setfattr -n ceph.file.layout -v "stripe_count=16" file2
-getfattr -n ceph.file.layout.stripe_count file2 | grep -q 16
-setfattr -n ceph.file.layout -v "object_size=10485760 stripe_count=8 stripe_unit=1048576 pool=$datapool pool_namespace=dirns" file2
-getfattr -n ceph.file.layout.stripe_count file2 | grep -q 8
-
-touch dir/file
-getfattr -n ceph.file.layout.pool dir/file | grep -q $datapool
-getfattr -n ceph.file.layout.stripe_unit dir/file | grep -q 1048576
-getfattr -n ceph.file.layout.stripe_count dir/file | grep -q 8
-getfattr -n ceph.file.layout.object_size dir/file | grep -q 10485760
-getfattr -n ceph.file.layout.pool_namespace dir/file | grep -q dirns
-
-setfattr -x ceph.dir.layout.pool_namespace dir
-getfattr -n ceph.dir.layout dir | grep -q -v pool_namespace=dirns
-
-setfattr -x ceph.dir.layout dir
-getfattr -n ceph.dir.layout dir 2>&1 | grep -q 'No such attribute'
-
-echo OK
-
diff --git a/src/ceph/qa/workunits/fs/misc/mkpool_layout_vxattrs.sh b/src/ceph/qa/workunits/fs/misc/mkpool_layout_vxattrs.sh
deleted file mode 100755
index 91d3166..0000000
--- a/src/ceph/qa/workunits/fs/misc/mkpool_layout_vxattrs.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-
-set -e
-
-touch foo.$$
-rados mkpool foo.$$
-ceph mds add_data_pool foo.$$
-setfattr -n ceph.file.layout.pool -v foo.$$ foo.$$
-
-# cleanup
-rm foo.$$
-ceph mds remove_data_pool foo.$$
-rados rmpool foo.$$ foo.$$ --yes-i-really-really-mean-it
-
-echo OK
diff --git a/src/ceph/qa/workunits/fs/misc/multiple_rsync.sh b/src/ceph/qa/workunits/fs/misc/multiple_rsync.sh
deleted file mode 100755
index 4397c1e..0000000
--- a/src/ceph/qa/workunits/fs/misc/multiple_rsync.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/sh -ex
-
-
-# Populate with some arbitrary files from the local system. Take
-# a copy to protect against false fails from system updates during test.
-export PAYLOAD=/tmp/multiple_rsync_payload.$$
-sudo cp -r /usr/lib/ $PAYLOAD
-
-set -e
-
-sudo rsync -av $PAYLOAD payload.1
-sudo rsync -av $PAYLOAD payload.2
-
-# this shouldn't transfer any additional files
-echo we should get 4 here if no additional files are transferred
-sudo rsync -auv $PAYLOAD payload.1 | tee /tmp/$$
-hexdump -C /tmp/$$
-wc -l /tmp/$$ | grep 4
-sudo rsync -auv $PAYLOAD payload.2 | tee /tmp/$$
-hexdump -C /tmp/$$
-wc -l /tmp/$$ | grep 4
-echo OK
-
-rm /tmp/$$
-sudo rm -rf $PAYLOAD
diff --git a/src/ceph/qa/workunits/fs/misc/trivial_sync.sh b/src/ceph/qa/workunits/fs/misc/trivial_sync.sh
deleted file mode 100755
index 68e4072..0000000
--- a/src/ceph/qa/workunits/fs/misc/trivial_sync.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-set -e
-
-mkdir foo
-echo foo > bar
-sync
diff --git a/src/ceph/qa/workunits/fs/misc/xattrs.sh b/src/ceph/qa/workunits/fs/misc/xattrs.sh
deleted file mode 100755
index fcd94d2..0000000
--- a/src/ceph/qa/workunits/fs/misc/xattrs.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh -x
-
-set -e
-
-touch file
-
-setfattr -n user.foo -v foo file
-setfattr -n user.bar -v bar file
-setfattr -n user.empty file
-getfattr -d file | grep foo
-getfattr -d file | grep bar
-getfattr -d file | grep empty
-
-echo OK.
diff --git a/src/ceph/qa/workunits/fs/multiclient_sync_read_eof.py b/src/ceph/qa/workunits/fs/multiclient_sync_read_eof.py
deleted file mode 100755
index d3e0f8e..0000000
--- a/src/ceph/qa/workunits/fs/multiclient_sync_read_eof.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/python
-
-import argparse
-import os
-import sys
-import time
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument('mnt1')
- parser.add_argument('mnt2')
- parser.add_argument('fn')
- args = parser.parse_args()
-
- open(os.path.join(args.mnt1, args.fn), 'w')
- f1 = open(os.path.join(args.mnt1, args.fn), 'r+')
- f2 = open(os.path.join(args.mnt2, args.fn), 'r+')
-
- f1.write('foo')
- f1.flush()
- a = f2.read(3)
- print('got "%s"' % a)
- assert a == 'foo'
- f2.write('bar')
- f2.flush()
- a = f1.read(3)
- print('got "%s"' % a)
- assert a == 'bar'
-
- ## test short reads
- f1.write('short')
- f1.flush()
- a = f2.read(100)
- print('got "%s"' % a)
- assert a == 'short'
- f2.write('longer')
- f2.flush()
- a = f1.read(1000)
- print('got "%s"' % a)
- assert a == 'longer'
-
- print('ok')
-
-main()
diff --git a/src/ceph/qa/workunits/fs/norstats/kernel_untar_tar.sh b/src/ceph/qa/workunits/fs/norstats/kernel_untar_tar.sh
deleted file mode 100755
index 63f8c74..0000000
--- a/src/ceph/qa/workunits/fs/norstats/kernel_untar_tar.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-# check if there is file changed while being archived
-
-set -e
-
-KERNEL=linux-4.0.5
-
-wget -q http://download.ceph.com/qa/$KERNEL.tar.xz
-
-mkdir untar_tar
-cd untar_tar
-
-tar Jxvf ../$KERNEL.tar.xz $KERNEL/Documentation/
-tar cf doc.tar $KERNEL
-
-tar xf doc.tar
-sync
-tar c $KERNEL >/dev/null
-
-rm -rf $KERNEL
-
-tar xf doc.tar
-sync
-tar c $KERNEL >/dev/null
-
-echo Ok
diff --git a/src/ceph/qa/workunits/fs/quota/quota.sh b/src/ceph/qa/workunits/fs/quota/quota.sh
deleted file mode 100755
index ff27a61..0000000
--- a/src/ceph/qa/workunits/fs/quota/quota.sh
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/bin/bash
-
-set -e
-set -x
-
-function expect_false()
-{
- set -x
- if "$@"; then return 1; else return 0; fi
-}
-
-function write_file()
-{
- set +x
- for ((i=1;i<=$2;i++))
- do
- dd if=/dev/zero of=$1 bs=1M count=1 conv=notrunc oflag=append 2>/dev/null >/dev/null
- if [ $? != 0 ]; then
- echo Try to write $(($i * 1048576))
- set -x
- return 1
- fi
- sleep 0.05
- done
- set -x
- return 0
-}
-
-mkdir quota-test
-cd quota-test
-
-# bytes
-setfattr . -n ceph.quota.max_bytes -v 100000000 # 100m
-expect_false write_file big 1000 # 1g
-expect_false write_file second 10
-setfattr . -n ceph.quota.max_bytes -v 0
-dd if=/dev/zero of=third bs=1M count=10
-dd if=/dev/zero of=big2 bs=1M count=100
-
-
-rm -rf *
-
-# files
-setfattr . -n ceph.quota.max_files -v 5
-mkdir ok
-touch ok/1
-touch ok/2
-touch 3
-expect_false touch shouldbefail # 5 files will include the "."
-expect_false touch ok/shouldbefail # 5 files will include the "."
-setfattr . -n ceph.quota.max_files -v 0
-touch shouldbecreated
-touch shouldbecreated2
-
-
-rm -rf *
-
-# mix
-mkdir bytes bytes/files
-
-setfattr bytes -n ceph.quota.max_bytes -v 10000000 #10m
-setfattr bytes/files -n ceph.quota.max_files -v 5
-dd if=/dev/zero of=bytes/files/1 bs=1M count=4
-dd if=/dev/zero of=bytes/files/2 bs=1M count=4
-expect_false write_file bytes/files/3 1000
-expect_false write_file bytes/files/4 1000
-expect_false write_file bytes/files/5 1000
-stat --printf="%n %s\n" bytes/files/1 #4M
-stat --printf="%n %s\n" bytes/files/2 #4M
-stat --printf="%n %s\n" bytes/files/3 #bigger than 2M
-stat --printf="%n %s\n" bytes/files/4 #should be zero
-expect_false stat bytes/files/5 #shouldn't be exist
-
-
-
-
-rm -rf *
-
-#mv
-mkdir files limit
-truncate files/file -s 10G
-setfattr limit -n ceph.quota.max_bytes -v 1000000 #1m
-expect_false mv files limit/
-
-
-
-rm -rf *
-
-#limit by ancestor
-
-mkdir -p ancestor/p1/p2/parent/p3
-setfattr ancestor -n ceph.quota.max_bytes -v 1000000
-setfattr ancestor/p1/p2/parent -n ceph.quota.max_bytes -v 1000000000 #1g
-expect_false write_file ancestor/p1/p2/parent/p3/file1 900 #900m
-stat --printf="%n %s\n" ancestor/p1/p2/parent/p3/file1
-
-
-#get/set attribute
-
-setfattr -n ceph.quota.max_bytes -v 0 .
-setfattr -n ceph.quota.max_bytes -v 1 .
-setfattr -n ceph.quota.max_bytes -v 9223372036854775807 .
-expect_false setfattr -n ceph.quota.max_bytes -v 9223372036854775808 .
-expect_false setfattr -n ceph.quota.max_bytes -v -1 .
-expect_false setfattr -n ceph.quota.max_bytes -v -9223372036854775808 .
-expect_false setfattr -n ceph.quota.max_bytes -v -9223372036854775809 .
-
-setfattr -n ceph.quota.max_files -v 0 .
-setfattr -n ceph.quota.max_files -v 1 .
-setfattr -n ceph.quota.max_files -v 9223372036854775807 .
-expect_false setfattr -n ceph.quota.max_files -v 9223372036854775808 .
-expect_false setfattr -n ceph.quota.max_files -v -1 .
-expect_false setfattr -n ceph.quota.max_files -v -9223372036854775808 .
-expect_false setfattr -n ceph.quota.max_files -v -9223372036854775809 .
-
-setfattr -n ceph.quota -v "max_bytes=0 max_files=0" .
-setfattr -n ceph.quota -v "max_bytes=1 max_files=0" .
-setfattr -n ceph.quota -v "max_bytes=0 max_files=1" .
-setfattr -n ceph.quota -v "max_bytes=1 max_files=1" .
-expect_false setfattr -n ceph.quota -v "max_bytes=-1 max_files=0" .
-expect_false setfattr -n ceph.quota -v "max_bytes=0 max_files=-1" .
-expect_false setfattr -n ceph.quota -v "max_bytes=-1 max_files=-1" .
-
-#addme
-
-cd ..
-rm -rf quota-test
-
-echo OK
diff --git a/src/ceph/qa/workunits/fs/snaps/snap-rm-diff.sh b/src/ceph/qa/workunits/fs/snaps/snap-rm-diff.sh
deleted file mode 100755
index c1b6c24..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snap-rm-diff.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/sh -ex
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-wget -q http://download.ceph.com/qa/linux-2.6.33.tar.bz2
-mkdir foo
-cp linux* foo
-mkdir foo/.snap/barsnap
-rm foo/linux*
-diff -q foo/.snap/barsnap/linux* linux* && echo "passed: files are identical"
-rmdir foo/.snap/barsnap
-echo OK
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-0.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-0.sh
deleted file mode 100755
index b57763a..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-0.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh -x
-
-expect_failure() {
- if "$@"; then return 1; else return 0; fi
-}
-set -e
-
-ceph mds set allow_new_snaps false
-expect_failure mkdir .snap/foo
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-echo asdf > foo
-mkdir .snap/foo
-grep asdf .snap/foo/foo
-rmdir .snap/foo
-
-echo asdf > bar
-mkdir .snap/bar
-rm bar
-grep asdf .snap/bar/bar
-rmdir .snap/bar
-rm foo
-
-ceph mds set allow_new_snaps false
-expect_failure mkdir .snap/baz
-
-echo OK
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-1.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-1.sh
deleted file mode 100755
index f8fb614..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-1.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash -x
-
-set -e
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-echo 1 > file1
-echo 2 > file2
-echo 3 > file3
-[ -e file4 ] && rm file4
-mkdir .snap/snap1
-echo 4 > file4
-now=`ls`
-then=`ls .snap/snap1`
-rmdir .snap/snap1
-if [ "$now" = "$then" ]; then
- echo live and snap contents are identical?
- false
-fi
-
-# do it again
-echo 1 > file1
-echo 2 > file2
-echo 3 > file3
-mkdir .snap/snap1
-echo 4 > file4
-rmdir .snap/snap1
-
-rm file?
-
-echo OK \ No newline at end of file
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-2.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-2.sh
deleted file mode 100755
index b2458d9..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-2.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/bash
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-echo "Create dir 100 to 199 ..."
-for i in $(seq 100 199); do
- echo " create dir $i"
- mkdir "$i"
- for y in $(seq 10 20); do
- echo "This is a test file before any snapshot was taken." >"$i/$y"
- done
-done
-
-echo "Take first snapshot .snap/test1"
-mkdir .snap/test1
-
-echo "Create dir 200 to 299 ..."
-for i in $(seq 200 299); do
- echo " create dir $i"
- mkdir $i
- for y in $(seq 20 29); do
- echo "This is a test file. Created after .snap/test1" >"$i/$y"
- done
-done
-
-echo "Create a snapshot in every first level dir ..."
-for dir in $(ls); do
- echo " create $dir/.snap/snap-subdir-test"
- mkdir "$dir/.snap/snap-subdir-test"
- for y in $(seq 30 39); do
- echo " create $dir/$y file after the snapshot"
- echo "This is a test file. Created after $dir/.snap/snap-subdir-test" >"$dir/$y"
- done
-done
-
-echo "Take second snapshot .snap/test2"
-mkdir .snap/test2
-
-echo "Copy content of .snap/test1 to copyofsnap1 ..."
-mkdir copyofsnap1
-cp -Rv .snap/test1 copyofsnap1/
-
-
-echo "Take third snapshot .snap/test3"
-mkdir .snap/test3
-
-echo "Delete the snapshots..."
-
-find ./ -type d -print | \
- xargs -I% -n1 find %/.snap -mindepth 1 -maxdepth 1 \
- \( ! -name "_*" \) -print 2>/dev/null
-
-find ./ -type d -print | \
- xargs -I% -n1 find %/.snap -mindepth 1 -maxdepth 1 \
- \( ! -name "_*" \) -print 2>/dev/null | \
- xargs -n1 rmdir
-
-echo "Delete all the files and directories ..."
-rm -Rfv ./*
-
-echo OK
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-authwb.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-authwb.sh
deleted file mode 100755
index 9dd9845..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-authwb.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh -x
-
-set -e
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-touch foo
-chmod +x foo
-mkdir .snap/s
-find .snap/s/foo -executable | grep foo
-rmdir .snap/s
-rm foo
-
-echo OK \ No newline at end of file
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-capwb.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-capwb.sh
deleted file mode 100755
index 3b6a01a..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-capwb.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/sh -x
-
-set -e
-
-mkdir foo
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-# make sure mds handles it when the client does not send flushsnap
-echo x > foo/x
-sync
-mkdir foo/.snap/ss
-ln foo/x foo/xx
-cat foo/.snap/ss/x
-rmdir foo/.snap/ss
-
-#
-echo a > foo/a
-echo b > foo/b
-mkdir foo/.snap/s
-r=`cat foo/.snap/s/a`
-[ -z "$r" ] && echo "a appears empty in snapshot" && false
-
-ln foo/b foo/b2
-cat foo/.snap/s/b
-
-echo "this used to hang:"
-echo more >> foo/b2
-echo "oh, it didn't hang! good job."
-cat foo/b
-rmdir foo/.snap/s
-
-rm -r foo
-
-echo OK \ No newline at end of file
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-dir-rename.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-dir-rename.sh
deleted file mode 100755
index b98358a..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-dir-rename.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/sh -x
-
-set -e
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-#
-# make sure we keep an existing dn's seq
-#
-
-mkdir a
-mkdir .snap/bar
-mkdir a/.snap/foo
-rmdir a/.snap/foo
-rmdir a
-stat .snap/bar/a
-rmdir .snap/bar
-
-echo OK \ No newline at end of file
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-double-null.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-double-null.sh
deleted file mode 100755
index b547213..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-double-null.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/sh -x
-
-set -e
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-# multiple intervening snapshots with no modifications, and thus no
-# snapflush client_caps messages. make sure the mds can handle this.
-
-for f in `seq 1 20` ; do
-
-mkdir a
-cat > a/foo &
-mkdir a/.snap/one
-mkdir a/.snap/two
-chmod 777 a/foo
-sync # this might crash the mds
-ps
-rmdir a/.snap/*
-rm a/foo
-rmdir a
-
-done
-
-echo OK
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-estale.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-estale.sh
deleted file mode 100755
index 1465a35..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-estale.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh -x
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-mkdir .snap/foo
-
-echo "We want ENOENT, not ESTALE, here."
-for f in `seq 1 100`
-do
- stat .snap/foo/$f 2>&1 | grep 'No such file'
-done
-
-rmdir .snap/foo
-
-echo "OK"
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-git-ceph.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-git-ceph.sh
deleted file mode 100755
index 1769fe8..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-git-ceph.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/sh -x
-
-set -e
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-git clone git://git.ceph.com/ceph.git
-cd ceph
-
-versions=`seq 1 21`
-
-for v in $versions
-do
- ver="v0.$v"
- echo $ver
- git reset --hard $ver
- mkdir .snap/$ver
-done
-
-for v in $versions
-do
- ver="v0.$v"
- echo checking $ver
- cd .snap/$ver
- git diff --exit-code
- cd ../..
-done
-
-for v in $versions
-do
- ver="v0.$v"
- rmdir .snap/$ver
-done
-
-echo OK
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-intodir.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-intodir.sh
deleted file mode 100755
index 729baa1..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-intodir.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/sh -ex
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-# this tests fix for #1399
-mkdir foo
-mkdir foo/.snap/one
-touch bar
-mv bar foo
-sync
-# should not crash :)
-
-mkdir baz
-mkdir baz/.snap/two
-mv baz foo
-sync
-# should not crash :)
-
-# clean up.
-rmdir foo/baz/.snap/two
-rmdir foo/.snap/one
-rm -r foo
-
-echo OK \ No newline at end of file
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh
deleted file mode 100755
index bc58bac..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/sh -x
-
-set -e
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-echo asdf > a
-mkdir .snap/1
-chmod 777 a
-mkdir .snap/2
-echo qwer > a
-mkdir .snap/3
-chmod 666 a
-mkdir .snap/4
-echo zxcv > a
-mkdir .snap/5
-
-ls -al .snap/?/a
-
-grep asdf .snap/1/a
-stat .snap/1/a | grep 'Size: 5'
-
-grep asdf .snap/2/a
-stat .snap/2/a | grep 'Size: 5'
-stat .snap/2/a | grep -- '-rwxrwxrwx'
-
-grep qwer .snap/3/a
-stat .snap/3/a | grep 'Size: 5'
-stat .snap/3/a | grep -- '-rwxrwxrwx'
-
-grep qwer .snap/4/a
-stat .snap/4/a | grep 'Size: 5'
-stat .snap/4/a | grep -- '-rw-rw-rw-'
-
-grep zxcv .snap/5/a
-stat .snap/5/a | grep 'Size: 5'
-stat .snap/5/a | grep -- '-rw-rw-rw-'
-
-rmdir .snap/[12345]
-
-echo "OK"
-
-
-
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-parents.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-parents.sh
deleted file mode 100755
index 6b76fdb..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-parents.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/sh
-
-set -e
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-echo "making directory tree and files"
-mkdir -p 1/a/b/c/
-echo "i'm file1" > 1/a/file1
-echo "i'm file2" > 1/a/b/file2
-echo "i'm file3" > 1/a/b/c/file3
-echo "snapshotting"
-mkdir 1/.snap/foosnap1
-mkdir 2
-echo "moving tree"
-mv 1/a 2
-echo "checking snapshot contains tree..."
-dir1=`find 1/.snap/foosnap1 | wc -w`
-dir2=`find 2/ | wc -w`
-#diff $dir1 $dir2 && echo "Success!"
-test $dir1==$dir2 && echo "Success!"
-echo "adding folder and file to tree..."
-mkdir 2/a/b/c/d
-echo "i'm file 4!" > 2/a/b/c/d/file4
-echo "snapshotting tree 2"
-mkdir 2/.snap/barsnap2
-echo "comparing snapshots"
-dir1=`find 1/.snap/foosnap1/ -maxdepth 2 | wc -w`
-dir2=`find 2/.snap/barsnap2/ -maxdepth 2 | wc -w`
-#diff $dir1 $dir2 && echo "Success!"
-test $dir1==$dir2 && echo "Success!"
-echo "moving subtree to first folder"
-mv 2/a/b/c 1
-echo "comparing snapshots and new tree"
-dir1=`find 1/ | wc -w`
-dir2=`find 2/.snap/barsnap2/a/b/c | wc -w`
-#diff $dir1 $dir2 && echo "Success!"
-test $dir1==$dir2 && echo "Sucess!"
-rmdir 1/.snap/*
-rmdir 2/.snap/*
-echo "OK"
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-snap-rename.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-snap-rename.sh
deleted file mode 100755
index e48b10b..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-snap-rename.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/sh -x
-
-expect_failure() {
- if "$@"; then return 1; else return 0; fi
-}
-set -e
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-mkdir -p d1/d2
-mkdir -p d1/d3
-mkdir d1/.snap/foo
-mkdir d1/d2/.snap/foo
-mkdir d1/d3/.snap/foo
-mkdir d1/d3/.snap/bar
-mv d1/d2/.snap/foo d1/d2/.snap/bar
-# snapshot name can't start with _
-expect_failure mv d1/d2/.snap/bar d1/d2/.snap/_bar
-# can't rename parent snapshot
-expect_failure mv d1/d2/.snap/_foo_* d1/d2/.snap/foo
-expect_failure mv d1/d2/.snap/_foo_* d1/d2/.snap/_foo_1
-# can't rename snapshot to different directroy
-expect_failure mv d1/d2/.snap/bar d1/.snap/
-# can't overwrite existing snapshot
-expect_failure python -c "import os; os.rename('d1/d3/.snap/foo', 'd1/d3/.snap/bar')"
-# can't move snaphost out of snapdir
-expect_failure python -c "import os; os.rename('d1/.snap/foo', 'd1/foo')"
-
-rmdir d1/.snap/foo
-rmdir d1/d2/.snap/bar
-rmdir d1/d3/.snap/foo
-rmdir d1/d3/.snap/bar
-rm -rf d1
-
-echo OK
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-snap-rm-cmp.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-snap-rm-cmp.sh
deleted file mode 100755
index 8b1ca5b..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-snap-rm-cmp.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/sh -x
-
-set -e
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-file=linux-2.6.33.tar.bz2
-wget -q http://download.ceph.com/qa/$file
-
-real=`md5sum $file | awk '{print $1}'`
-
-for f in `seq 1 20`
-do
- echo $f
- cp $file a
- mkdir .snap/s
- rm a
- cp .snap/s/a /tmp/a
- cur=`md5sum /tmp/a | awk '{print $1}'`
- if [ "$cur" != "$real" ]; then
- echo "FAIL: bad match, /tmp/a $cur != real $real"
- false
- fi
- rmdir .snap/s
-done
-rm $file
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-upchildrealms.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-upchildrealms.sh
deleted file mode 100755
index 64a99ea..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-upchildrealms.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh -x
-
-set -e
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-#
-# verify that a snap update on a parent realm will induce
-# snap cap writeback for inodes child realms
-#
-
-mkdir a
-mkdir a/b
-mkdir a/.snap/a1
-mkdir a/b/.snap/b1
-echo asdf > a/b/foo
-mkdir a/.snap/a2
-# client _should_ have just queued a capsnap for writeback
-ln a/b/foo a/b/bar # make the server cow the inode
-
-echo "this should not hang..."
-cat a/b/.snap/_a2_*/foo
-echo "good, it did not hang."
-
-rmdir a/b/.snap/b1
-rmdir a/.snap/a1
-rmdir a/.snap/a2
-rm -r a
-
-echo "OK" \ No newline at end of file
diff --git a/src/ceph/qa/workunits/fs/snaps/snaptest-xattrwb.sh b/src/ceph/qa/workunits/fs/snaps/snaptest-xattrwb.sh
deleted file mode 100755
index af28b63..0000000
--- a/src/ceph/qa/workunits/fs/snaps/snaptest-xattrwb.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh -x
-
-set -e
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-echo "testing simple xattr wb"
-touch x
-setfattr -n user.foo x
-mkdir .snap/s1
-getfattr -n user.foo .snap/s1/x | grep user.foo
-rm x
-rmdir .snap/s1
-
-echo "testing wb with pre-wb server cow"
-mkdir a
-mkdir a/b
-mkdir a/b/c
-# b now has As but not Ax
-setfattr -n user.foo a/b
-mkdir a/.snap/s
-mkdir a/b/cc
-# b now has been cowed on the server, but we still have dirty xattr caps
-getfattr -n user.foo a/b # there they are...
-getfattr -n user.foo a/.snap/s/b | grep user.foo # should be there, too!
-
-# ok, clean up
-rmdir a/.snap/s
-rm -r a
-
-echo OK \ No newline at end of file
diff --git a/src/ceph/qa/workunits/fs/snaps/untar_snap_rm.sh b/src/ceph/qa/workunits/fs/snaps/untar_snap_rm.sh
deleted file mode 100755
index b337aea..0000000
--- a/src/ceph/qa/workunits/fs/snaps/untar_snap_rm.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-
-set -e
-
-ceph mds set allow_new_snaps true --yes-i-really-mean-it
-
-do_tarball() {
- wget http://download.ceph.com/qa/$1
- tar xvf$2 $1
- mkdir .snap/k
- sync
- rm -rv $3
- cp -av .snap/k .
- rmdir .snap/k
- rm -rv k
- rm $1
-}
-
-do_tarball coreutils_8.5.orig.tar.gz z coreutils-8.5
-do_tarball linux-2.6.33.tar.bz2 j linux-2.6.33
diff --git a/src/ceph/qa/workunits/fs/test_o_trunc.c b/src/ceph/qa/workunits/fs/test_o_trunc.c
deleted file mode 100644
index 1ce19e4..0000000
--- a/src/ceph/qa/workunits/fs/test_o_trunc.c
+++ /dev/null
@@ -1,45 +0,0 @@
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <string.h>
-#include <stdlib.h>
-
-int main(int argc, char *argv[])
-{
- char obuf[32], ibuf[1024];
- int n, max = 0;
-
- if (argc > 2)
- max = atoi(argv[2]);
- if (!max)
- max = 600;
-
- memset(obuf, 0xff, sizeof(obuf));
-
- for (n = 1; n <= max; ++n) {
- int fd, ret;
- fd = open(argv[1], O_RDWR | O_CREAT | O_TRUNC, 0644);
- printf("%d/%d: open fd = %d\n", n, max, fd);
-
- ret = write(fd, obuf, sizeof(obuf));
- printf("write ret = %d\n", ret);
-
- sleep(1);
-
- ret = write(fd, obuf, sizeof(obuf));
- printf("write ret = %d\n", ret);
-
- ret = pread(fd, ibuf, sizeof(ibuf), 0);
- printf("pread ret = %d\n", ret);
-
- if (memcmp(obuf, ibuf, sizeof(obuf))) {
- printf("mismatch\n");
- close(fd);
- break;
- }
- close(fd);
- }
- return 0;
-}
diff --git a/src/ceph/qa/workunits/fs/test_o_trunc.sh b/src/ceph/qa/workunits/fs/test_o_trunc.sh
deleted file mode 100755
index 90a7260..0000000
--- a/src/ceph/qa/workunits/fs/test_o_trunc.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh -ex
-
-mydir=`dirname $0`
-$mydir/test_o_trunc trunc.foo 600
-
-echo OK
-
diff --git a/src/ceph/qa/workunits/fs/test_python.sh b/src/ceph/qa/workunits/fs/test_python.sh
deleted file mode 100755
index 656d89f..0000000
--- a/src/ceph/qa/workunits/fs/test_python.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh -ex
-
-# Running as root because the filesystem root directory will be
-# owned by uid 0, and that's where we're writing.
-sudo nosetests -v $(dirname $0)/../../../src/test/pybind/test_cephfs.py
-exit 0
diff --git a/src/ceph/qa/workunits/hadoop/repl.sh b/src/ceph/qa/workunits/hadoop/repl.sh
deleted file mode 100755
index f2e9fcc..0000000
--- a/src/ceph/qa/workunits/hadoop/repl.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-
-set -e
-set -x
-
-# bail if $TESTDIR is not set as this test will fail in that scenario
-[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; }
-
-# if HADOOP_PREFIX is not set, use default
-[ -z $HADOOP_PREFIX ] && { HADOOP_PREFIX=$TESTDIR/hadoop; }
-
-# create pools with different replication factors
-for repl in 2 3 7 8 9; do
- name=hadoop.$repl
- ceph osd pool create $name 8 8
- ceph osd pool set $name size $repl
-
- id=`ceph osd dump | sed -n "s/^pool \([0-9]*\) '$name'.*/\1/p"`
- ceph mds add_data_pool $id
-done
-
-# create a file in each of the pools
-for repl in 2 3 7 8 9; do
- name=hadoop.$repl
- $HADOOP_PREFIX/bin/hadoop fs -rm -f /$name.dat
- dd if=/dev/zero bs=1048576 count=1 | \
- $HADOOP_PREFIX/bin/hadoop fs -Dceph.data.pools="$name" \
- -put - /$name.dat
-done
-
-# check that hadoop reports replication matching
-# that of the pool the file was written into
-for repl in 2 3 7 8 9; do
- name=hadoop.$repl
- repl2=$($HADOOP_PREFIX/bin/hadoop fs -ls /$name.dat | awk '{print $2}')
- if [ $repl -ne $repl2 ]; then
- echo "replication factors didn't match!"
- exit 1
- fi
-done
-
-exit 0
diff --git a/src/ceph/qa/workunits/hadoop/terasort.sh b/src/ceph/qa/workunits/hadoop/terasort.sh
deleted file mode 100755
index 7996aec..0000000
--- a/src/ceph/qa/workunits/hadoop/terasort.sh
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/bin/bash
-
-set -e
-set -x
-
-INPUT=/terasort-input
-OUTPUT=/terasort-output
-REPORT=/tersort-report
-
-num_records=100000
-[ ! -z $NUM_RECORDS ] && num_records=$NUM_RECORDS
-
-# bail if $TESTDIR is not set as this test will fail in that scenario
-[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; }
-
-# if HADOOP_PREFIX is not set, use default
-[ -z $HADOOP_PREFIX ] && { HADOOP_PREFIX=$TESTDIR/hadoop; }
-
-# Nuke hadoop directories
-$HADOOP_PREFIX/bin/hadoop fs -rm -r $INPUT $OUTPUT $REPORT || true
-
-# Generate terasort data
-#
-#-Ddfs.blocksize=512M \
-#-Dio.file.buffer.size=131072 \
-#-Dmapreduce.map.java.opts=-Xmx1536m \
-#-Dmapreduce.map.memory.mb=2048 \
-#-Dmapreduce.task.io.sort.mb=256 \
-#-Dyarn.app.mapreduce.am.resource.mb=1024 \
-#-Dmapred.map.tasks=64 \
-$HADOOP_PREFIX/bin/hadoop jar \
- $HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \
- teragen \
- -Dmapred.map.tasks=9 \
- $num_records \
- $INPUT
-
-# Run the sort job
-#
-#-Ddfs.blocksize=512M \
-#-Dio.file.buffer.size=131072 \
-#-Dmapreduce.map.java.opts=-Xmx1536m \
-#-Dmapreduce.map.memory.mb=2048 \
-#-Dmapreduce.map.output.compress=true \
-#-Dmapreduce.map.output.compress.codec=org.apache.hadoop.io.compress.Lz4Codec \
-#-Dmapreduce.reduce.java.opts=-Xmx1536m \
-#-Dmapreduce.reduce.memory.mb=2048 \
-#-Dmapreduce.task.io.sort.factor=100 \
-#-Dmapreduce.task.io.sort.mb=768 \
-#-Dyarn.app.mapreduce.am.resource.mb=1024 \
-#-Dmapred.reduce.tasks=100 \
-#-Dmapreduce.terasort.output.replication=1 \
-$HADOOP_PREFIX/bin/hadoop jar \
- $HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \
- terasort \
- -Dmapred.reduce.tasks=10 \
- $INPUT $OUTPUT
-
-# Validate the sorted data
-#
-#-Ddfs.blocksize=512M \
-#-Dio.file.buffer.size=131072 \
-#-Dmapreduce.map.java.opts=-Xmx1536m \
-#-Dmapreduce.map.memory.mb=2048 \
-#-Dmapreduce.reduce.java.opts=-Xmx1536m \
-#-Dmapreduce.reduce.memory.mb=2048 \
-#-Dmapreduce.task.io.sort.mb=256 \
-#-Dyarn.app.mapreduce.am.resource.mb=1024 \
-#-Dmapred.reduce.tasks=1 \
-$HADOOP_PREFIX/bin/hadoop jar \
- $HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \
- teravalidate \
- -Dmapred.reduce.tasks=1 \
- $OUTPUT $REPORT
-
-exit 0
diff --git a/src/ceph/qa/workunits/hadoop/wordcount.sh b/src/ceph/qa/workunits/hadoop/wordcount.sh
deleted file mode 100755
index 1ff057a..0000000
--- a/src/ceph/qa/workunits/hadoop/wordcount.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-
-set -e
-set -x
-
-WC_INPUT=/wc_input
-WC_OUTPUT=/wc_output
-DATA_INPUT=$(mktemp -d)
-
-echo "starting hadoop-wordcount test"
-
-# bail if $TESTDIR is not set as this test will fail in that scenario
-[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; }
-
-# if HADOOP_PREFIX is not set, use default
-[ -z $HADOOP_PREFIX ] && { HADOOP_PREFIX=$TESTDIR/hadoop; }
-
-# Nuke hadoop directories
-$HADOOP_PREFIX/bin/hadoop fs -rm -r $WC_INPUT $WC_OUTPUT || true
-
-# Fetch and import testing data set
-curl http://download.ceph.com/qa/hadoop_input_files.tar | tar xf - -C $DATA_INPUT
-$HADOOP_PREFIX/bin/hadoop fs -copyFromLocal $DATA_INPUT $WC_INPUT
-rm -rf $DATA_INPUT
-
-# Run the job
-$HADOOP_PREFIX/bin/hadoop jar \
- $HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \
- wordcount $WC_INPUT $WC_OUTPUT
-
-# Cleanup
-$HADOOP_PREFIX/bin/hadoop fs -rm -r $WC_INPUT $WC_OUTPUT || true
-
-echo "completed hadoop-wordcount test"
-exit 0
diff --git a/src/ceph/qa/workunits/kernel_untar_build.sh b/src/ceph/qa/workunits/kernel_untar_build.sh
deleted file mode 100755
index 93fee1f..0000000
--- a/src/ceph/qa/workunits/kernel_untar_build.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-set -e
-
-wget -q http://download.ceph.com/qa/linux-4.0.5.tar.xz
-
-mkdir t
-cd t
-tar Jxvf ../linux*.xz
-cd linux*
-make defconfig
-make -j`grep -c processor /proc/cpuinfo`
-cd ..
-if ! rm -rv linux* ; then
- echo "uh oh rm -r failed, it left behind:"
- find .
- exit 1
-fi
-cd ..
-rm -rv t linux*
diff --git a/src/ceph/qa/workunits/libcephfs-java/test.sh b/src/ceph/qa/workunits/libcephfs-java/test.sh
deleted file mode 100755
index f299e95..0000000
--- a/src/ceph/qa/workunits/libcephfs-java/test.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/sh -e
-
-echo "starting libcephfs-java tests"
-# configure CEPH_CONF and LD_LIBRARY_PATH if they're not already set
-conf="$CEPH_CONF"
-if [ -z "$conf" ] ; then
- echo "Setting conf to /etc/ceph/ceph.conf"
- conf="/etc/ceph/ceph.conf"
-else
- echo "conf is set to $conf"
-fi
-
-ld_lib_path="$LD_LIBRARY_PATH"
-if [ -z "$ld_lib_path" ] ; then
- echo "Setting ld_lib_path to /usr/lib/jni:/usr/lib64"
- ld_lib_path="/usr/lib/jni:/usr/lib64"
-else
- echo "ld_lib_path was set to $ld_lib_path"
-fi
-
-ceph_java="$CEPH_JAVA_PATH"
-if [ -z "$ceph_java" ] ; then
- echo "Setting ceph_java to /usr/share/java"
- ceph_java="/usr/share/java"
-else
- echo "ceph_java was set to $ceph_java"
-fi
-
-command="java -DCEPH_CONF_FILE=$conf -Djava.library.path=$ld_lib_path -cp /usr/share/java/junit4.jar:$ceph_java/libcephfs.jar:$ceph_java/libcephfs-test.jar org.junit.runner.JUnitCore com.ceph.fs.CephAllTests"
-
-echo "----------------------"
-echo $command
-echo "----------------------"
-
-$command
-
-echo "completed libcephfs-java tests"
-
-exit 0
diff --git a/src/ceph/qa/workunits/libcephfs/test.sh b/src/ceph/qa/workunits/libcephfs/test.sh
deleted file mode 100755
index 899fe40..0000000
--- a/src/ceph/qa/workunits/libcephfs/test.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh -e
-
-ceph_test_libcephfs
-ceph_test_libcephfs_access
-
-exit 0
diff --git a/src/ceph/qa/workunits/mgr/test_localpool.sh b/src/ceph/qa/workunits/mgr/test_localpool.sh
deleted file mode 100755
index c5a56a6..0000000
--- a/src/ceph/qa/workunits/mgr/test_localpool.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/sh -ex
-
-ceph config-key set mgr/localpool/subtree host
-ceph config-key set mgr/localpool/failure_domain osd
-ceph mgr module enable localpool
-
-while ! ceph osd pool ls | grep '^by-host-'
-do
- sleep 5
-done
-
-ceph mgr module disable localpool
-for p in `ceph osd pool ls | grep '^by-host-'`
-do
- ceph osd pool rm $p $p --yes-i-really-really-mean-it
-done
-
-ceph config-key rm mgr/localpool/subtree
-ceph config-key rm mgr/localpool/failure_domain
-
-echo OK
diff --git a/src/ceph/qa/workunits/mon/auth_caps.sh b/src/ceph/qa/workunits/mon/auth_caps.sh
deleted file mode 100755
index b8c1094..0000000
--- a/src/ceph/qa/workunits/mon/auth_caps.sh
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/bin/bash
-
-set -e
-set -x
-declare -A keymap
-
-combinations="r w x rw rx wx rwx"
-
-for i in ${combinations}; do
- k="foo_$i"
- k=`ceph auth get-or-create-key client.$i mon "allow $i"` || exit 1
- keymap["$i"]=$k
-done
-
-# add special caps
-keymap["all"]=`ceph auth get-or-create-key client.all mon 'allow *'` || exit 1
-
-tmp=`mktemp`
-ceph auth export > $tmp
-
-trap "rm $tmp" INT ERR EXIT QUIT 0
-
-expect() {
-
- set +e
-
- local expected_ret=$1
- local ret
-
- shift
- cmd=$@
-
- eval $cmd
- ret=$?
-
- set -e
-
- if [[ $ret -ne $expected_ret ]]; then
- echo "ERROR: running \'$cmd\': expected $expected_ret got $ret"
- return 1
- fi
-
- return 0
-}
-
-read_ops() {
- local caps=$1
- local has_read=1 has_exec=1
- local ret
- local args
-
- ( echo $caps | grep 'r' ) || has_read=0
- ( echo $caps | grep 'x' ) || has_exec=0
-
- if [[ "$caps" == "all" ]]; then
- has_read=1
- has_exec=1
- fi
-
- ret=13
- if [[ $has_read -gt 0 && $has_exec -gt 0 ]]; then
- ret=0
- fi
-
- args="--id $caps --key ${keymap[$caps]}"
-
- expect $ret ceph auth get client.admin $args
- expect $ret ceph auth get-key client.admin $args
- expect $ret ceph auth export $args
- expect $ret ceph auth export client.admin $args
- expect $ret ceph auth ls $args
- expect $ret ceph auth print-key client.admin $args
- expect $ret ceph auth print_key client.admin $args
-}
-
-write_ops() {
-
- local caps=$1
- local has_read=1 has_write=1 has_exec=1
- local ret
- local args
-
- ( echo $caps | grep 'r' ) || has_read=0
- ( echo $caps | grep 'w' ) || has_write=0
- ( echo $caps | grep 'x' ) || has_exec=0
-
- if [[ "$caps" == "all" ]]; then
- has_read=1
- has_write=1
- has_exec=1
- fi
-
- ret=13
- if [[ $has_read -gt 0 && $has_write -gt 0 && $has_exec -gt 0 ]]; then
- ret=0
- fi
-
- args="--id $caps --key ${keymap[$caps]}"
-
- expect $ret ceph auth add client.foo $args
- expect $ret "ceph auth caps client.foo mon 'allow *' $args"
- expect $ret ceph auth get-or-create client.admin $args
- expect $ret ceph auth get-or-create-key client.admin $args
- expect $ret ceph auth get-or-create-key client.baz $args
- expect $ret ceph auth del client.foo $args
- expect $ret ceph auth del client.baz $args
- expect $ret ceph auth import -i $tmp $args
-}
-
-echo "running combinations: ${!keymap[@]}"
-
-subcmd=$1
-
-for i in ${!keymap[@]}; do
- echo "caps: $i"
- if [[ -z "$subcmd" || "$subcmd" == "read" || "$subcmd" == "all" ]]; then
- read_ops $i
- fi
-
- if [[ -z "$subcmd" || "$subcmd" == "write" || "$subcmd" == "all" ]]; then
- write_ops $i
- fi
-done
-
-# cleanup
-for i in ${combinations} all; do
- ceph auth del client.$i || exit 1
-done
-
-echo "OK"
diff --git a/src/ceph/qa/workunits/mon/caps.py b/src/ceph/qa/workunits/mon/caps.py
deleted file mode 100644
index 65a6956..0000000
--- a/src/ceph/qa/workunits/mon/caps.py
+++ /dev/null
@@ -1,366 +0,0 @@
-#!/usr/bin/python
-
-import json
-import subprocess
-import shlex
-from StringIO import StringIO
-import errno
-import sys
-import os
-import io
-import re
-
-
-import rados
-from ceph_argparse import *
-
-keyring_base = '/tmp/cephtest-caps.keyring'
-
-class UnexpectedReturn(Exception):
- def __init__(self, cmd, ret, expected, msg):
- if isinstance(cmd, list):
- self.cmd = ' '.join(cmd)
- else:
- assert isinstance(cmd, str) or isinstance(cmd, unicode), \
- 'cmd needs to be either a list or a str'
- self.cmd = cmd
- self.cmd = str(self.cmd)
- self.ret = int(ret)
- self.expected = int(expected)
- self.msg = str(msg)
-
- def __str__(self):
- return repr('{c}: expected return {e}, got {r} ({o})'.format(
- c=self.cmd, e=self.expected, r=self.ret, o=self.msg))
-
-def call(cmd):
- if isinstance(cmd, list):
- args = cmd
- elif isinstance(cmd, str) or isinstance(cmd, unicode):
- args = shlex.split(cmd)
- else:
- assert False, 'cmd is not a string/unicode nor a list!'
-
- print 'call: {0}'.format(args)
- proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- ret = proc.wait()
-
- return (ret, proc)
-
-def expect(cmd, expected_ret):
-
- try:
- (r, p) = call(cmd)
- except ValueError as e:
- print >> sys.stderr, \
- 'unable to run {c}: {err}'.format(c=repr(cmd), err=e.message)
- return errno.EINVAL
-
- assert r == p.returncode, \
- 'wth? r was supposed to match returncode!'
-
- if r != expected_ret:
- raise UnexpectedReturn(repr(cmd), r, expected_ret, str(p.stderr.read()))
-
- return p
-
-def expect_to_file(cmd, expected_ret, out_file, mode='a'):
-
- # Let the exception be propagated to the caller
- p = expect(cmd, expected_ret)
- assert p.returncode == expected_ret, \
- 'expected result doesn\'t match and no exception was thrown!'
-
- with io.open(out_file, mode) as file:
- file.write(unicode(p.stdout.read()))
-
- return p
-
-class Command:
- def __init__(self, cid, j):
- self.cid = cid[3:]
- self.perms = j['perm']
- self.module = j['module']
-
- self.sig = ''
- self.args = []
- for s in j['sig']:
- if not isinstance(s, dict):
- assert isinstance(s, str) or isinstance(s,unicode), \
- 'malformatted signature cid {0}: {1}\n{2}'.format(cid,s,j)
- if len(self.sig) > 0:
- self.sig += ' '
- self.sig += s
- else:
- self.args.append(s)
-
- def __str__(self):
- return repr('command {0}: {1} (requires \'{2}\')'.format(self.cid,\
- self.sig, self.perms))
-
-
-def destroy_keyring(path):
- if not os.path.exists(path):
- raise Exception('oops! cannot remove inexistent keyring {0}'.format(path))
-
- # grab all client entities from the keyring
- entities = [m.group(1) for m in [re.match(r'\[client\.(.*)\]', l)
- for l in [str(line.strip())
- for line in io.open(path,'r')]] if m is not None]
-
- # clean up and make sure each entity is gone
- for e in entities:
- expect('ceph auth del client.{0}'.format(e), 0)
- expect('ceph auth get client.{0}'.format(e), errno.ENOENT)
-
- # remove keyring
- os.unlink(path)
-
- return True
-
-def test_basic_auth():
- # make sure we can successfully add/del entities, change their caps
- # and import/export keyrings.
-
- expect('ceph auth add client.basicauth', 0)
- expect('ceph auth caps client.basicauth mon \'allow *\'', 0)
- # entity exists and caps do not match
- expect('ceph auth add client.basicauth', errno.EINVAL)
- # this command attempts to change an existing state and will fail
- expect('ceph auth add client.basicauth mon \'allow w\'', errno.EINVAL)
- expect('ceph auth get-or-create client.basicauth', 0)
- expect('ceph auth get-key client.basicauth', 0)
- expect('ceph auth get-or-create client.basicauth2', 0)
- # cleanup
- expect('ceph auth del client.basicauth', 0)
- expect('ceph auth del client.basicauth2', 0)
-
- return True
-
-def gen_module_keyring(module):
- module_caps = [
- ('all', '{t} \'allow service {s} rwx\'', 0),
- ('none', '', errno.EACCES),
- ('wrong', '{t} \'allow service foobar rwx\'', errno.EACCES),
- ('right', '{t} \'allow service {s} {p}\'', 0),
- ('no-execute', '{t} \'allow service {s} x\'', errno.EACCES)
- ]
-
- keyring = '{0}.service-{1}'.format(keyring_base,module)
- for perms in 'r rw x'.split():
- for (n,p,r) in module_caps:
- c = p.format(t='mon', s=module, p=perms)
- expect_to_file(
- 'ceph auth get-or-create client.{cn}-{cp} {caps}'.format(
- cn=n,cp=perms,caps=c), 0, keyring)
-
- return keyring
-
-
-def test_all():
-
-
- perms = {
- 'good': {
- 'broad':[
- ('rwx', 'allow *'),
- ('r', 'allow r'),
- ('rw', 'allow rw'),
- ('x', 'allow x'),
- ],
- 'service':[
- ('rwx', 'allow service {s} rwx'),
- ('r', 'allow service {s} r'),
- ('rw', 'allow service {s} rw'),
- ('x', 'allow service {s} x'),
- ],
- 'command':[
- ('rwx', 'allow command "{c}"'),
- ],
- 'command-with':[
- ('rwx', 'allow command "{c}" with {kv}')
- ],
- 'command-with-prefix':[
- ('rwx', 'allow command "{c}" with {key} prefix {val}')
- ]
- },
- 'bad': {
- 'broad':[
- ('none', ''),
- ],
- 'service':[
- ('none1', 'allow service foo rwx'),
- ('none2', 'allow service foo r'),
- ('none3', 'allow service foo rw'),
- ('none4', 'allow service foo x'),
- ],
- 'command':[
- ('none', 'allow command foo'),
- ],
- 'command-with':[
- ('none', 'allow command "{c}" with foo=bar'),
- ],
- 'command-with-prefix':[
- ('none', 'allow command "{c}" with foo prefix bar'),
- ],
- }
- }
-
- cmds = {
- '':[
- {
- 'cmd':('status', '', 'r')
- },
- {
- 'pre':'heap start_profiler',
- 'cmd':('heap', 'heapcmd=stats', 'rw'),
- 'post':'heap stop_profiler'
- }
- ],
- 'auth':[
- {
- 'pre':'',
- 'cmd':('auth ls', '', 'r'),
- 'post':''
- },
- {
- 'pre':'auth get-or-create client.foo mon \'allow *\'',
- 'cmd':('auth caps', 'entity="client.foo"', 'rw'),
- 'post':'auth del client.foo'
- }
- ],
- 'pg':[
- {
- 'cmd':('pg getmap', '', 'r'),
- },
- ],
- 'mds':[
- {
- 'cmd':('mds getmap', '', 'r'),
- },
- {
- 'cmd':('mds cluster_down', '', 'rw'),
- 'post':'mds cluster_up'
- },
- ],
- 'mon':[
- {
- 'cmd':('mon getmap', '', 'r')
- },
- {
- 'cmd':('mon remove', 'name=a', 'rw')
- }
- ],
- 'osd':[
- {
- 'cmd':('osd getmap', '', 'r'),
- },
- {
- 'cmd':('osd pause', '', 'rw'),
- 'post':'osd unpause'
- },
- {
- 'cmd':('osd crush dump', '', 'r')
- },
- ],
- 'config-key':[
- {
- 'pre':'config-key set foo bar',
- 'cmd':('config-key get', 'key=foo', 'r')
- },
- {
- 'pre':'config-key set foo bar',
- 'cmd':('config-key del', 'key=foo', 'rw')
- }
- ]
- }
-
- for (module,cmd_lst) in cmds.iteritems():
- k = keyring_base + '.' + module
- for cmd in cmd_lst:
-
- (cmd_cmd, cmd_args, cmd_perm) = cmd['cmd']
- cmd_args_key = ''
- cmd_args_val = ''
- if len(cmd_args) > 0:
- (cmd_args_key, cmd_args_val) = cmd_args.split('=')
-
- print 'generating keyring for {m}/{c}'.format(m=module,c=cmd_cmd)
- # gen keyring
- for (good_or_bad,kind_map) in perms.iteritems():
- for (kind,lst) in kind_map.iteritems():
- for (perm, cap) in lst:
- cap_formatted = cap.format(
- s=module,
- c=cmd_cmd,
- kv=cmd_args,
- key=cmd_args_key,
- val=cmd_args_val)
-
- if len(cap_formatted) == 0:
- run_cap = ''
- else:
- run_cap = 'mon \'{fc}\''.format(fc=cap_formatted)
-
- cname = 'client.{gb}-{kind}-{p}'.format(
- gb=good_or_bad,kind=kind,p=perm)
- expect_to_file(
- 'ceph auth get-or-create {n} {c}'.format(
- n=cname,c=run_cap), 0, k)
- # keyring generated
- print 'testing {m}/{c}'.format(m=module,c=cmd_cmd)
-
- # test
- for good_bad in perms.iterkeys():
- for (kind,lst) in perms[good_bad].iteritems():
- for (perm,_) in lst:
- cname = 'client.{gb}-{k}-{p}'.format(gb=good_bad,k=kind,p=perm)
-
- if good_bad == 'good':
- expect_ret = 0
- else:
- expect_ret = errno.EACCES
-
- if ( cmd_perm not in perm ):
- expect_ret = errno.EACCES
- if 'with' in kind and len(cmd_args) == 0:
- expect_ret = errno.EACCES
- if 'service' in kind and len(module) == 0:
- expect_ret = errno.EACCES
-
- if 'pre' in cmd and len(cmd['pre']) > 0:
- expect('ceph {0}'.format(cmd['pre']), 0)
- expect('ceph -n {cn} -k {k} {c} {arg_val}'.format(
- cn=cname,k=k,c=cmd_cmd,arg_val=cmd_args_val), expect_ret)
- if 'post' in cmd and len(cmd['post']) > 0:
- expect('ceph {0}'.format(cmd['post']), 0)
- # finish testing
- destroy_keyring(k)
-
-
- return True
-
-
-def test_misc():
-
- k = keyring_base + '.misc'
- expect_to_file(
- 'ceph auth get-or-create client.caps mon \'allow command "auth caps"' \
- ' with entity="client.caps"\'', 0, k)
- expect('ceph -n client.caps -k {kf} mon_status'.format(kf=k), errno.EACCES)
- expect('ceph -n client.caps -k {kf} auth caps client.caps mon \'allow *\''.format(kf=k), 0)
- expect('ceph -n client.caps -k {kf} mon_status'.format(kf=k), 0)
- destroy_keyring(k)
-
-def main():
-
- test_basic_auth()
- test_all()
- test_misc()
-
- print 'OK'
-
- return 0
-
-if __name__ == '__main__':
- main()
diff --git a/src/ceph/qa/workunits/mon/caps.sh b/src/ceph/qa/workunits/mon/caps.sh
deleted file mode 100755
index e00247d..0000000
--- a/src/ceph/qa/workunits/mon/caps.sh
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/bash
-
-tmp=/tmp/cephtest-mon-caps-madness
-
-exit_on_error=1
-
-[[ ! -z $TEST_EXIT_ON_ERROR ]] && exit_on_error=$TEST_EXIT_ON_ERROR
-
-expect()
-{
- cmd=$1
- expected_ret=$2
-
- echo $cmd
- eval $cmd >&/dev/null
- ret=$?
-
- if [[ $ret -ne $expected_ret ]]; then
- echo "Error: Expected return $expected_ret, got $ret"
- [[ $exit_on_error -eq 1 ]] && exit 1
- return 1
- fi
-
- return 0
-}
-
-expect "ceph auth get-or-create client.bazar > $tmp.bazar.keyring" 0
-expect "ceph -k $tmp.bazar.keyring --user bazar mon_status" 13
-ceph auth del client.bazar
-
-c="'allow command \"auth ls\", allow command mon_status'"
-expect "ceph auth get-or-create client.foo mon $c > $tmp.foo.keyring" 0
-expect "ceph -k $tmp.foo.keyring --user foo mon_status" 0
-expect "ceph -k $tmp.foo.keyring --user foo auth ls" 0
-expect "ceph -k $tmp.foo.keyring --user foo auth export" 13
-expect "ceph -k $tmp.foo.keyring --user foo auth del client.bazar" 13
-expect "ceph -k $tmp.foo.keyring --user foo osd dump" 13
-expect "ceph -k $tmp.foo.keyring --user foo pg dump" 13
-expect "ceph -k $tmp.foo.keyring --user foo quorum_status" 13
-ceph auth del client.foo
-
-c="'allow command service with prefix=list, allow command mon_status'"
-expect "ceph auth get-or-create client.bar mon $c > $tmp.bar.keyring" 0
-expect "ceph -k $tmp.bar.keyring --user bar mon_status" 0
-expect "ceph -k $tmp.bar.keyring --user bar auth ls" 13
-expect "ceph -k $tmp.bar.keyring --user bar auth export" 13
-expect "ceph -k $tmp.bar.keyring --user bar auth del client.foo" 13
-expect "ceph -k $tmp.bar.keyring --user bar osd dump" 13
-expect "ceph -k $tmp.bar.keyring --user bar pg dump" 13
-expect "ceph -k $tmp.bar.keyring --user bar quorum_status" 13
-ceph auth del client.bar
-
-rm $tmp.bazar.keyring $tmp.foo.keyring $tmp.bar.keyring
-
-echo OK
diff --git a/src/ceph/qa/workunits/mon/crush_ops.sh b/src/ceph/qa/workunits/mon/crush_ops.sh
deleted file mode 100755
index 348811e..0000000
--- a/src/ceph/qa/workunits/mon/crush_ops.sh
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/bin/bash -x
-
-set -e
-
-function expect_false()
-{
- set -x
- if "$@"; then return 1; else return 0; fi
-}
-
-ceph osd crush dump
-
-# rules
-ceph osd crush rule dump
-ceph osd crush rule ls
-ceph osd crush rule list
-
-ceph osd crush rule create-simple foo default host
-ceph osd crush rule create-simple foo default host
-ceph osd crush rule create-simple bar default host
-
-# make sure we're at luminous+ before using crush device classes
-ceph osd require-osd-release luminous
-ceph osd crush rm-device-class all
-ceph osd crush set-device-class ssd osd.0
-ceph osd crush set-device-class hdd osd.1
-ceph osd crush rule create-replicated foo-ssd default host ssd
-ceph osd crush rule create-replicated foo-hdd default host hdd
-ceph osd crush rule ls-by-class ssd | grep 'foo-ssd'
-ceph osd crush rule ls-by-class ssd | expect_false grep 'foo-hdd'
-ceph osd crush rule ls-by-class hdd | grep 'foo-hdd'
-ceph osd crush rule ls-by-class hdd | expect_false grep 'foo-ssd'
-
-ceph osd erasure-code-profile set ec-foo-ssd crush-device-class=ssd m=2 k=2
-ceph osd pool create ec-foo 2 erasure ec-foo-ssd
-ceph osd pool rm ec-foo ec-foo --yes-i-really-really-mean-it
-
-ceph osd crush rule ls | grep foo
-
-ceph osd crush rule rename foo foo-asdf
-ceph osd crush rule rename foo foo-asdf # idempotent
-ceph osd crush rule rename bar bar-asdf
-ceph osd crush rule ls | grep 'foo-asdf'
-ceph osd crush rule ls | grep 'bar-asdf'
-ceph osd crush rule rm foo 2>&1 | grep 'does not exist'
-ceph osd crush rule rm bar 2>&1 | grep 'does not exist'
-ceph osd crush rule rename foo-asdf foo
-ceph osd crush rule rename foo-asdf foo # idempotent
-ceph osd crush rule rename bar-asdf bar
-ceph osd crush rule ls | expect_false grep 'foo-asdf'
-ceph osd crush rule ls | expect_false grep 'bar-asdf'
-ceph osd crush rule rm foo
-ceph osd crush rule rm foo # idempotent
-ceph osd crush rule rm bar
-
-# can't delete in-use rules, tho:
-ceph osd pool create pinning_pool 1
-expect_false ceph osd crush rule rm replicated_rule
-ceph osd pool rm pinning_pool pinning_pool --yes-i-really-really-mean-it
-
-# build a simple map
-expect_false ceph osd crush add-bucket foo osd
-ceph osd crush add-bucket foo root
-o1=`ceph osd create`
-o2=`ceph osd create`
-ceph osd crush add $o1 1 host=host1 root=foo
-ceph osd crush add $o1 1 host=host1 root=foo # idemptoent
-ceph osd crush add $o2 1 host=host2 root=foo
-ceph osd crush add $o2 1 host=host2 root=foo # idempotent
-ceph osd crush add-bucket bar root
-ceph osd crush add-bucket bar root # idempotent
-ceph osd crush link host1 root=bar
-ceph osd crush link host1 root=bar # idempotent
-ceph osd crush link host2 root=bar
-ceph osd crush link host2 root=bar # idempotent
-
-ceph osd tree | grep -c osd.$o1 | grep -q 2
-ceph osd tree | grep -c host1 | grep -q 2
-ceph osd tree | grep -c osd.$o2 | grep -q 2
-ceph osd tree | grep -c host2 | grep -q 2
-expect_false ceph osd crush rm host1 foo # not empty
-ceph osd crush unlink host1 foo
-ceph osd crush unlink host1 foo
-ceph osd tree | grep -c host1 | grep -q 1
-
-expect_false ceph osd crush rm foo # not empty
-expect_false ceph osd crush rm bar # not empty
-ceph osd crush unlink host1 bar
-ceph osd tree | grep -c host1 | grep -q 1 # now an orphan
-ceph osd crush rm osd.$o1 host1
-ceph osd crush rm host1
-ceph osd tree | grep -c host1 | grep -q 0
-
-expect_false ceph osd crush rm bar # not empty
-ceph osd crush unlink host2
-
-# reference foo and bar with a rule
-ceph osd crush rule create-simple foo-rule foo host firstn
-expect_false ceph osd crush rm foo
-ceph osd crush rule rm foo-rule
-
-ceph osd crush rm bar
-ceph osd crush rm foo
-ceph osd crush rm osd.$o2 host2
-ceph osd crush rm host2
-
-ceph osd crush add-bucket foo host
-ceph osd crush move foo root=default rack=localrack
-
-ceph osd crush create-or-move osd.$o1 1.0 root=default
-ceph osd crush move osd.$o1 host=foo
-ceph osd find osd.$o1 | grep host | grep foo
-
-ceph osd crush rm osd.$o1
-ceph osd crush rm osd.$o2
-
-ceph osd crush rm foo
-
-# test reweight
-o3=`ceph osd create`
-ceph osd crush add $o3 123 root=default
-ceph osd tree | grep osd.$o3 | grep 123
-ceph osd crush reweight osd.$o3 113
-expect_false ceph osd crush reweight osd.$o3 123456
-ceph osd tree | grep osd.$o3 | grep 113
-ceph osd crush rm osd.$o3
-ceph osd rm osd.$o3
-
-# test reweight-subtree
-o4=`ceph osd create`
-o5=`ceph osd create`
-ceph osd crush add $o4 123 root=default host=foobaz
-ceph osd crush add $o5 123 root=default host=foobaz
-ceph osd tree | grep osd.$o4 | grep 123
-ceph osd tree | grep osd.$o5 | grep 123
-ceph osd crush reweight-subtree foobaz 155
-expect_false ceph osd crush reweight-subtree foobaz 123456
-ceph osd tree | grep osd.$o4 | grep 155
-ceph osd tree | grep osd.$o5 | grep 155
-ceph osd crush rm osd.$o4
-ceph osd crush rm osd.$o5
-ceph osd rm osd.$o4
-ceph osd rm osd.$o5
-
-# weight sets
-# make sure we require luminous before testing weight-sets
-ceph osd set-require-min-compat-client luminous
-ceph osd crush weight-set dump
-ceph osd crush weight-set ls
-expect_false ceph osd crush weight-set reweight fooset osd.0 .9
-ceph osd pool create fooset 8
-ceph osd pool create barset 8
-ceph osd pool set barset size 3
-expect_false ceph osd crush weight-set reweight fooset osd.0 .9
-ceph osd crush weight-set create fooset flat
-ceph osd crush weight-set create barset positional
-ceph osd crush weight-set ls | grep fooset
-ceph osd crush weight-set ls | grep barset
-ceph osd crush weight-set dump
-ceph osd crush weight-set reweight fooset osd.0 .9
-expect_false ceph osd crush weight-set reweight fooset osd.0 .9 .9
-expect_false ceph osd crush weight-set reweight barset osd.0 .9
-ceph osd crush weight-set reweight barset osd.0 .9 .9 .9
-ceph osd crush weight-set ls | grep -c fooset | grep -q 1
-ceph osd crush weight-set rm fooset
-ceph osd crush weight-set ls | grep -c fooset | grep -q 0
-ceph osd crush weight-set ls | grep barset
-ceph osd crush weight-set rm barset
-ceph osd crush weight-set ls | grep -c barset | grep -q 0
-ceph osd crush weight-set create-compat
-ceph osd crush weight-set ls | grep '(compat)'
-ceph osd crush weight-set rm-compat
-
-# weight set vs device classes
-ceph osd pool create cool 2
-ceph osd pool create cold 2
-ceph osd pool set cold size 2
-ceph osd crush weight-set create-compat
-ceph osd crush weight-set create cool flat
-ceph osd crush weight-set create cold positional
-ceph osd crush rm-device-class osd.0
-ceph osd crush weight-set reweight-compat osd.0 10.5
-ceph osd crush weight-set reweight cool osd.0 11.5
-ceph osd crush weight-set reweight cold osd.0 12.5 12.4
-ceph osd crush set-device-class fish osd.0
-ceph osd crush tree --show-shadow | grep osd\\.0 | grep fish | grep 10\\.
-ceph osd crush tree --show-shadow | grep osd\\.0 | grep fish | grep 11\\.
-ceph osd crush tree --show-shadow | grep osd\\.0 | grep fish | grep 12\\.
-ceph osd crush rm-device-class osd.0
-ceph osd crush set-device-class globster osd.0
-ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 10\\.
-ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 11\\.
-ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 12\\.
-ceph osd crush weight-set reweight-compat osd.0 7.5
-ceph osd crush weight-set reweight cool osd.0 8.5
-ceph osd crush weight-set reweight cold osd.0 6.5 6.6
-ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 7\\.
-ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 8\\.
-ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 6\\.
-ceph osd crush rm-device-class osd.0
-ceph osd pool rm cool cool --yes-i-really-really-mean-it
-ceph osd pool rm cold cold --yes-i-really-really-mean-it
-ceph osd crush weight-set rm-compat
-
-echo OK
diff --git a/src/ceph/qa/workunits/mon/osd.sh b/src/ceph/qa/workunits/mon/osd.sh
deleted file mode 100755
index 75bf220..0000000
--- a/src/ceph/qa/workunits/mon/osd.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/sh -x
-
-set -e
-
-ua=`uuidgen`
-ub=`uuidgen`
-
-# shoudl get same id with same uuid
-na=`ceph osd create $ua`
-test $na -eq `ceph osd create $ua`
-
-nb=`ceph osd create $ub`
-test $nb -eq `ceph osd create $ub`
-test $nb -ne $na
-
-ceph osd rm $na
-ceph osd rm $na
-ceph osd rm $nb
-ceph osd rm 1000
-
-na2=`ceph osd create $ua`
-
-echo OK
-
diff --git a/src/ceph/qa/workunits/mon/ping.py b/src/ceph/qa/workunits/mon/ping.py
deleted file mode 100755
index 1773c73..0000000
--- a/src/ceph/qa/workunits/mon/ping.py
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/python
-
-import json
-import shlex
-import subprocess
-import sys
-
-if sys.version_info[0] == 2:
- string = basestring
- unicode = unicode
-elif sys.version_info[0] == 3:
- string = str
- unicode = str
-
-
-class UnexpectedReturn(Exception):
- def __init__(self, cmd, ret, expected, msg):
- if isinstance(cmd, list):
- self.cmd = ' '.join(cmd)
- else:
- assert isinstance(cmd, string) or isinstance(cmd, unicode), \
- 'cmd needs to be either a list or a str'
- self.cmd = cmd
- self.cmd = str(self.cmd)
- self.ret = int(ret)
- self.expected = int(expected)
- self.msg = str(msg)
-
- def __str__(self):
- return repr('{c}: expected return {e}, got {r} ({o})'.format(
- c=self.cmd, e=self.expected, r=self.ret, o=self.msg))
-
-
-def call(cmd):
- if isinstance(cmd, list):
- args = cmd
- elif isinstance(cmd, string) or isinstance(cmd, unicode):
- args = shlex.split(cmd)
- else:
- assert False, 'cmd is not a string/unicode nor a list!'
-
- print('call: {0}'.format(args))
- proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- procout, procerr = proc.communicate(None)
-
- return proc.returncode, procout, procerr
-
-
-def expect(cmd, expected_ret):
- try:
- (r, out, err) = call(cmd)
- except ValueError as e:
- assert False, \
- 'unable to run {c}: {err}'.format(c=repr(cmd), err=str(e))
-
- if r != expected_ret:
- raise UnexpectedReturn(repr(cmd), r, expected_ret, err)
-
- return out.decode() if isinstance(out, bytes) else out
-
-
-def get_quorum_status(timeout=300):
- cmd = 'ceph quorum_status'
- if timeout > 0:
- cmd += ' --connect-timeout {0}'.format(timeout)
-
- out = expect(cmd, 0)
- j = json.loads(out)
- return j
-
-
-def main():
- quorum_status = get_quorum_status()
- mon_names = [mon['name'] for mon in quorum_status['monmap']['mons']]
-
- print('ping all monitors')
- for m in mon_names:
- print('ping mon.{0}'.format(m))
- out = expect('ceph ping mon.{0}'.format(m), 0)
- reply = json.loads(out)
-
- assert reply['mon_status']['name'] == m, \
- 'reply obtained from mon.{0}, expected mon.{1}'.format(
- reply['mon_status']['name'], m)
-
- print('test out-of-quorum reply')
- for m in mon_names:
- print('testing mon.{0}'.format(m))
- expect('ceph daemon mon.{0} quorum exit'.format(m), 0)
-
- quorum_status = get_quorum_status()
- assert m not in quorum_status['quorum_names'], \
- 'mon.{0} was not supposed to be in quorum ({1})'.format(
- m, quorum_status['quorum_names'])
-
- out = expect('ceph ping mon.{0}'.format(m), 0)
- reply = json.loads(out)
- mon_status = reply['mon_status']
-
- assert mon_status['name'] == m, \
- 'reply obtained from mon.{0}, expected mon.{1}'.format(
- mon_status['name'], m)
-
- assert mon_status['state'] == 'electing', \
- 'mon.{0} is in state {1}, expected electing'.format(
- m, mon_status['state'])
-
- expect('ceph daemon mon.{0} quorum enter'.format(m), 0)
-
- print('OK')
-
-
-if __name__ == '__main__':
- main()
diff --git a/src/ceph/qa/workunits/mon/pool_ops.sh b/src/ceph/qa/workunits/mon/pool_ops.sh
deleted file mode 100755
index b19dbd1..0000000
--- a/src/ceph/qa/workunits/mon/pool_ops.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash -x
-
-set -e
-
-function expect_false()
-{
- set -x
- if "$@"; then return 1; else return 0; fi
-}
-
-# note: we need to pass the other args or ceph_argparse.py will take
-# 'invalid' that is not replicated|erasure and assume it is the next
-# argument, which is a string.
-expect_false ceph osd pool create foo 123 123 invalid foo-profile foo-ruleset
-
-ceph osd pool create foo 123 123 replicated
-ceph osd pool create fooo 123 123 erasure default
-ceph osd pool create foooo 123
-
-ceph osd pool create foo 123 # idempotent
-
-ceph osd pool set foo size 1
-ceph osd pool set foo size 4
-ceph osd pool set foo size 10
-expect_false ceph osd pool set foo size 0
-expect_false ceph osd pool set foo size 20
-
-# should fail due to safety interlock
-expect_false ceph osd pool delete foo
-expect_false ceph osd pool delete foo foo
-expect_false ceph osd pool delete foo foo --force
-expect_false ceph osd pool delete foo fooo --yes-i-really-mean-it
-expect_false ceph osd pool delete foo --yes-i-really-mean-it foo
-
-ceph osd pool delete foooo foooo --yes-i-really-really-mean-it
-ceph osd pool delete fooo fooo --yes-i-really-really-mean-it
-ceph osd pool delete foo foo --yes-i-really-really-mean-it
-
-# idempotent
-ceph osd pool delete foo foo --yes-i-really-really-mean-it
-ceph osd pool delete fooo fooo --yes-i-really-really-mean-it
-ceph osd pool delete fooo fooo --yes-i-really-really-mean-it
-
-# non-existent pool
-ceph osd pool delete fuggg fuggg --yes-i-really-really-mean-it
-
-echo OK
-
-
diff --git a/src/ceph/qa/workunits/mon/rbd_snaps_ops.sh b/src/ceph/qa/workunits/mon/rbd_snaps_ops.sh
deleted file mode 100755
index 2bff335..0000000
--- a/src/ceph/qa/workunits/mon/rbd_snaps_ops.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/bash
-
-# attempt to trigger #6047
-
-
-cmd_no=0
-expect()
-{
- cmd_no=$(($cmd_no+1))
- cmd="$1"
- expected=$2
- echo "[$cmd_no] $cmd"
- eval $cmd
- ret=$?
- if [[ $ret -ne $expected ]]; then
- echo "[$cmd_no] unexpected return '$ret', expected '$expected'"
- exit 1
- fi
-}
-
-ceph osd pool delete test test --yes-i-really-really-mean-it || true
-expect 'ceph osd pool create test 256 256' 0
-expect 'rbd --pool=test pool init' 0
-expect 'ceph osd pool mksnap test snapshot' 0
-expect 'ceph osd pool rmsnap test snapshot' 0
-
-expect 'rbd --pool=test --rbd_validate_pool=false create --size=102400 image' 0
-expect 'rbd --pool=test snap create image@snapshot' 22
-
-expect 'ceph osd pool delete test test --yes-i-really-really-mean-it' 0
-expect 'ceph osd pool create test 256 256' 0
-expect 'rbd --pool=test pool init' 0
-expect 'rbd --pool=test create --size=102400 image' 0
-expect 'rbd --pool=test snap create image@snapshot' 0
-expect 'rbd --pool=test snap ls image' 0
-expect 'rbd --pool=test snap rm image@snapshot' 0
-
-expect 'ceph osd pool mksnap test snapshot' 22
-
-expect 'ceph osd pool delete test test --yes-i-really-really-mean-it' 0
-
-# reproduce 7210 and expect it to be fixed
-# basically create such a scenario where we end up deleting what used to
-# be an unmanaged snapshot from a not-unmanaged pool
-
-ceph osd pool delete test-foo test-foo --yes-i-really-really-mean-it || true
-expect 'rados mkpool test-foo' 0
-expect 'rbd pool init test-foo'
-expect 'rbd --pool test-foo create --size 1024 image' 0
-expect 'rbd --pool test-foo snap create image@snapshot' 0
-
-ceph osd pool delete test-bar test-bar --yes-i-really-really-mean-it || true
-expect 'rados mkpool test-bar' 0
-expect 'rbd pool init test-bar'
-expect 'rados cppool test-foo test-bar --yes-i-really-mean-it' 0
-expect 'rbd --pool test-bar snap rm image@snapshot' 95
-expect 'ceph osd pool delete test-foo test-foo --yes-i-really-really-mean-it' 0
-expect 'ceph osd pool delete test-bar test-bar --yes-i-really-really-mean-it' 0
-
-
-echo OK
diff --git a/src/ceph/qa/workunits/mon/test_mon_config_key.py b/src/ceph/qa/workunits/mon/test_mon_config_key.py
deleted file mode 100755
index 168f6db..0000000
--- a/src/ceph/qa/workunits/mon/test_mon_config_key.py
+++ /dev/null
@@ -1,481 +0,0 @@
-#!/usr/bin/python
-#
-# test_mon_config_key - Test 'ceph config-key' interface
-#
-# Copyright (C) 2013 Inktank
-#
-# This is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License version 2.1, as published by the Free Software
-# Foundation. See file COPYING.
-#
-import argparse
-import base64
-import errno
-import json
-import logging
-import os
-import random
-import string
-import subprocess
-import sys
-import time
-
-#
-# Accepted Environment variables:
-# CEPH_TEST_VERBOSE - be more verbose; '1' enables; '0' disables
-# CEPH_TEST_DURATION - test duration in seconds
-# CEPH_TEST_SEED - seed to be used during the test
-#
-# Accepted arguments and options (see --help):
-# -v, --verbose - be more verbose
-# -d, --duration SECS - test duration in seconds
-# -s, --seed SEED - seed to be used during the test
-#
-
-
-LOG = logging.getLogger(os.path.basename(sys.argv[0].replace('.py', '')))
-
-SIZES = [
- (0, 0),
- (10, 0),
- (25, 0),
- (50, 0),
- (100, 0),
- (1000, 0),
- (4096, 0),
- (4097, -errno.EFBIG),
- (8192, -errno.EFBIG)
-]
-
-# tests will be randomly selected from the keys here, and the test
-# suboperation will be randomly selected from the list in the values
-# here. i.e. 'exists/existing' would test that a key the test put into
-# the store earlier actually does still exist in the config store,
-# and that's a separate test case from 'exists/enoent', which tests
-# nonexistence of a key known to not be present.
-
-OPS = {
- 'put': ['existing', 'new'],
- 'del': ['existing', 'enoent'],
- 'exists': ['existing', 'enoent'],
- 'get': ['existing', 'enoent'],
- 'list': ['existing', 'enoent'],
- 'dump': ['existing', 'enoent'],
-}
-
-CONFIG_PUT = [] # list: keys
-CONFIG_DEL = [] # list: keys
-CONFIG_EXISTING = {} # map: key -> size
-
-
-def run_cmd(cmd, expects=0):
- full_cmd = ['ceph', 'config-key'] + cmd
-
- if expects < 0:
- expects = -expects
-
- cmdlog = LOG.getChild('run_cmd')
- cmdlog.debug('{fc}'.format(fc=' '.join(full_cmd)))
-
- proc = subprocess.Popen(full_cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
-
- stdout = []
- stderr = []
- while True:
- try:
- out, err = proc.communicate()
- if out is not None:
- stdout += out.decode().split('\n')
- cmdlog.debug('stdout: {s}'.format(s=out))
- if err is not None:
- stdout += err.decode().split('\n')
- cmdlog.debug('stderr: {s}'.format(s=err))
- except ValueError:
- ret = proc.wait()
- break
-
- if ret != expects:
- cmdlog.error('cmd > {cmd}'.format(cmd=full_cmd))
- cmdlog.error("expected return '{expected}' got '{got}'".format(
- expected=expects, got=ret))
- cmdlog.error('stdout')
- for i in stdout:
- cmdlog.error('{x}'.format(x=i))
- cmdlog.error('stderr')
- for i in stderr:
- cmdlog.error('{x}'.format(x=i))
-
-
-# end run_cmd
-
-def gen_data(size, rnd):
- chars = string.ascii_letters + string.digits
- return ''.join(rnd.choice(chars) for _ in range(size))
-
-
-def gen_key(rnd):
- return gen_data(20, rnd)
-
-
-def gen_tmp_file_path(rnd):
- file_name = gen_data(20, rnd)
- file_path = os.path.join('/tmp', 'ceph-test.' + file_name)
- return file_path
-
-
-def destroy_tmp_file(fpath):
- if os.path.exists(fpath) and os.path.isfile(fpath):
- os.unlink(fpath)
-
-
-def write_data_file(data, rnd):
- file_path = gen_tmp_file_path(rnd)
- data_file = open(file_path, 'a+')
- data_file.truncate()
- data_file.write(data)
- data_file.close()
- return file_path
-
-
-# end write_data_file
-
-def choose_random_op(rnd):
- op = rnd.choice(
- list(OPS.keys())
- )
- sop = rnd.choice(OPS[op])
- return op, sop
-
-
-def parse_args(args):
- parser = argparse.ArgumentParser(
- description="Test the monitor's 'config-key' API",
- )
- parser.add_argument(
- '-v', '--verbose',
- action='store_true',
- help='be more verbose',
- )
- parser.add_argument(
- '-s', '--seed',
- metavar='SEED',
- help='use SEED instead of generating it in run-time',
- )
- parser.add_argument(
- '-d', '--duration',
- metavar='SECS',
- help='run test for SECS seconds (default: 300)',
- )
- parser.set_defaults(
- seed=None,
- duration=300,
- verbose=False,
- )
- return parser.parse_args(args)
-
-
-def main():
- args = parse_args(sys.argv[1:])
-
- verbose = args.verbose
- if os.environ.get('CEPH_TEST_VERBOSE') is not None:
- verbose = (os.environ.get('CEPH_TEST_VERBOSE') == '1')
-
- duration = int(os.environ.get('CEPH_TEST_DURATION', args.duration))
- seed = os.environ.get('CEPH_TEST_SEED', args.seed)
- seed = int(time.time()) if seed is None else int(seed)
-
- rnd = random.Random()
- rnd.seed(seed)
-
- loglevel = logging.INFO
- if verbose:
- loglevel = logging.DEBUG
-
- logging.basicConfig(level=loglevel)
-
- LOG.info('seed: {s}'.format(s=seed))
-
- start = time.time()
-
- while (time.time() - start) < duration:
- (op, sop) = choose_random_op(rnd)
-
- LOG.info('{o}({s})'.format(o=op, s=sop))
- op_log = LOG.getChild('{o}({s})'.format(o=op, s=sop))
-
- if op == 'put':
- via_file = (rnd.uniform(0, 100) < 50.0)
-
- expected = 0
- cmd = ['put']
- key = None
-
- if sop == 'existing':
- if len(CONFIG_EXISTING) == 0:
- op_log.debug('no existing keys; continue')
- continue
- key = rnd.choice(CONFIG_PUT)
- assert key in CONFIG_EXISTING, \
- "key '{k_}' not in CONFIG_EXISTING".format(k_=key)
-
- expected = 0 # the store just overrides the value if the key exists
- # end if sop == 'existing'
- elif sop == 'new':
- for x in range(0, 10):
- key = gen_key(rnd)
- if key not in CONFIG_EXISTING:
- break
- key = None
- if key is None:
- op_log.error('unable to generate an unique key -- try again later.')
- continue
-
- assert key not in CONFIG_PUT and key not in CONFIG_EXISTING, \
- 'key {k} was not supposed to exist!'.format(k=key)
-
- assert key is not None, \
- 'key must be != None'
-
- cmd += [key]
-
- (size, error) = rnd.choice(SIZES)
- if size > 25:
- via_file = True
-
- data = gen_data(size, rnd)
-
- if error == 0: # only add if we expect the put to be successful
- if sop == 'new':
- CONFIG_PUT.append(key)
- CONFIG_EXISTING[key] = size
- expected = error
-
- if via_file:
- data_file = write_data_file(data, rnd)
- cmd += ['-i', data_file]
- else:
- cmd += [data]
-
- op_log.debug('size: {sz}, via: {v}'.format(
- sz=size,
- v='file: {f}'.format(f=data_file) if via_file == True else 'cli')
- )
- run_cmd(cmd, expects=expected)
- if via_file:
- destroy_tmp_file(data_file)
- continue
-
- elif op == 'del':
- expected = 0
- cmd = ['del']
- key = None
-
- if sop == 'existing':
- if len(CONFIG_EXISTING) == 0:
- op_log.debug('no existing keys; continue')
- continue
- key = rnd.choice(CONFIG_PUT)
- assert key in CONFIG_EXISTING, \
- "key '{k_}' not in CONFIG_EXISTING".format(k_=key)
-
- if sop == 'enoent':
- for x in range(0, 10):
- key = base64.b64encode(os.urandom(20)).decode()
- if key not in CONFIG_EXISTING:
- break
- key = None
- if key is None:
- op_log.error('unable to generate an unique key -- try again later.')
- continue
- assert key not in CONFIG_PUT and key not in CONFIG_EXISTING, \
- 'key {k} was not supposed to exist!'.format(k=key)
- expected = 0 # deleting a non-existent key succeeds
-
- assert key is not None, \
- 'key must be != None'
-
- cmd += [key]
- op_log.debug('key: {k}'.format(k=key))
- run_cmd(cmd, expects=expected)
- if sop == 'existing':
- CONFIG_DEL.append(key)
- CONFIG_PUT.remove(key)
- del CONFIG_EXISTING[key]
- continue
-
- elif op == 'exists':
- expected = 0
- cmd = ['exists']
- key = None
-
- if sop == 'existing':
- if len(CONFIG_EXISTING) == 0:
- op_log.debug('no existing keys; continue')
- continue
- key = rnd.choice(CONFIG_PUT)
- assert key in CONFIG_EXISTING, \
- "key '{k_}' not in CONFIG_EXISTING".format(k_=key)
-
- if sop == 'enoent':
- for x in range(0, 10):
- key = base64.b64encode(os.urandom(20)).decode()
- if key not in CONFIG_EXISTING:
- break
- key = None
- if key is None:
- op_log.error('unable to generate an unique key -- try again later.')
- continue
- assert key not in CONFIG_PUT and key not in CONFIG_EXISTING, \
- 'key {k} was not supposed to exist!'.format(k=key)
- expected = -errno.ENOENT
-
- assert key is not None, \
- 'key must be != None'
-
- cmd += [key]
- op_log.debug('key: {k}'.format(k=key))
- run_cmd(cmd, expects=expected)
- continue
-
- elif op == 'get':
- expected = 0
- cmd = ['get']
- key = None
-
- if sop == 'existing':
- if len(CONFIG_EXISTING) == 0:
- op_log.debug('no existing keys; continue')
- continue
- key = rnd.choice(CONFIG_PUT)
- assert key in CONFIG_EXISTING, \
- "key '{k_}' not in CONFIG_EXISTING".format(k_=key)
-
- if sop == 'enoent':
- for x in range(0, 10):
- key = base64.b64encode(os.urandom(20)).decode()
- if key not in CONFIG_EXISTING:
- break
- key = None
- if key is None:
- op_log.error('unable to generate an unique key -- try again later.')
- continue
- assert key not in CONFIG_PUT and key not in CONFIG_EXISTING, \
- 'key {k} was not supposed to exist!'.format(k=key)
- expected = -errno.ENOENT
-
- assert key is not None, \
- 'key must be != None'
-
- file_path = gen_tmp_file_path(rnd)
- cmd += [key, '-o', file_path]
- op_log.debug('key: {k}'.format(k=key))
- run_cmd(cmd, expects=expected)
- if sop == 'existing':
- try:
- temp_file = open(file_path, 'r+')
- except IOError as err:
- if err.errno == errno.ENOENT:
- assert CONFIG_EXISTING[key] == 0, \
- "error opening '{fp}': {e}".format(fp=file_path, e=err)
- continue
- else:
- assert False, \
- 'some error occurred: {e}'.format(e=err)
- cnt = 0
- while True:
- read_data = temp_file.read()
- if read_data == '':
- break
- cnt += len(read_data)
- assert cnt == CONFIG_EXISTING[key], \
- "wrong size from store for key '{k}': {sz}, expected {es}".format(
- k=key, sz=cnt, es=CONFIG_EXISTING[key])
- destroy_tmp_file(file_path)
- continue
-
- elif op == 'list' or op == 'dump':
- expected = 0
- cmd = [op]
- key = None
-
- if sop == 'existing':
- if len(CONFIG_EXISTING) == 0:
- op_log.debug('no existing keys; continue')
- continue
- key = rnd.choice(CONFIG_PUT)
- assert key in CONFIG_EXISTING, \
- "key '{k_}' not in CONFIG_EXISTING".format(k_=key)
-
- if sop == 'enoent':
- for x in range(0, 10):
- key = base64.b64encode(os.urandom(20)).decode()
- if key not in CONFIG_EXISTING:
- break
- key = None
- if key is None:
- op_log.error('unable to generate an unique key -- try again later.')
- continue
- assert key not in CONFIG_PUT and key not in CONFIG_EXISTING, \
- 'key {k} was not supposed to exist!'.format(k=key)
-
- assert key is not None, \
- 'key must be != None'
-
- file_path = gen_tmp_file_path(rnd)
- cmd += ['-o', file_path]
- op_log.debug('key: {k}'.format(k=key))
- run_cmd(cmd, expects=expected)
- try:
- temp_file = open(file_path, 'r+')
- except IOError as err:
- if err.errno == errno.ENOENT:
- assert CONFIG_EXISTING[key] == 0, \
- "error opening '{fp}': {e}".format(fp=file_path, e=err)
- continue
- else:
- assert False, \
- 'some error occurred: {e}'.format(e=err)
- cnt = 0
- try:
- read_data = json.load(temp_file)
- except ValueError:
- temp_file.seek(0)
- assert False, "{op} output was not valid JSON:\n{filedata}".format(op, temp_file.readlines())
-
- if sop == 'existing':
- assert key in read_data, "key '{k}' not found in list/dump output".format(k=key)
- if op == 'dump':
- cnt = len(read_data[key])
- assert cnt == CONFIG_EXISTING[key], \
- "wrong size from list for key '{k}': {sz}, expected {es}".format(
- k=key, sz=cnt, es=CONFIG_EXISTING[key])
- elif sop == 'enoent':
- assert key not in read_data, "key '{k}' found in list/dump output".format(k=key)
- destroy_tmp_file(file_path)
- continue
- else:
- assert False, 'unknown op {o}'.format(o=op)
-
- # check if all keys in 'CONFIG_PUT' exist and
- # if all keys on 'CONFIG_DEL' don't.
- # but first however, remove all keys in CONFIG_PUT that might
- # be in CONFIG_DEL as well.
- config_put_set = set(CONFIG_PUT)
- config_del_set = set(CONFIG_DEL).difference(config_put_set)
-
- LOG.info('perform sanity checks on store')
-
- for k in config_put_set:
- LOG.getChild('check(puts)').debug('key: {k_}'.format(k_=k))
- run_cmd(['exists', k], expects=0)
- for k in config_del_set:
- LOG.getChild('check(dels)').debug('key: {k_}'.format(k_=k))
- run_cmd(['exists', k], expects=-errno.ENOENT)
-
-
-if __name__ == "__main__":
- main()
diff --git a/src/ceph/qa/workunits/objectstore/test_fuse.sh b/src/ceph/qa/workunits/objectstore/test_fuse.sh
deleted file mode 100755
index 9314ab4..0000000
--- a/src/ceph/qa/workunits/objectstore/test_fuse.sh
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/bin/sh -ex
-
-if ! id -u | grep -q '^0$'; then
- echo "not root, re-running self via sudo"
- sudo PATH=$PATH TYPE=$TYPE $0
- exit 0
-fi
-
-expect_false()
-{
- set -x
- if "$@"; then return 1; else return 0; fi
-}
-
-COT=ceph-objectstore-tool
-DATA=store_test_fuse_dir
-[ -z "$TYPE" ] && TYPE=bluestore
-MNT=store_test_fuse_mnt
-
-rm -rf $DATA
-mkdir -p $DATA
-
-test -d $MNT && fusermount -u $MNT || true
-rmdir $MNT || true
-mkdir $MNT
-
-export CEPH_ARGS=--enable_experimental_unrecoverable_data_corrupting_features=bluestore
-
-$COT --op mkfs --data-path $DATA --type $TYPE
-$COT --op fuse --data-path $DATA --mountpoint $MNT &
-
-while ! test -e $MNT/type ; do
- echo waiting for $MNT/type to appear
- sleep 1
-done
-
-umask 0
-
-grep $TYPE $MNT/type
-
-# create collection
-mkdir $MNT/meta
-test -e $MNT/meta/bitwise_hash_start
-test -d $MNT/meta/all
-test -d $MNT/meta/by_bitwise_hash
-
-# create object
-mkdir $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#
-test -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
-test -d $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr
-test -d $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap
-test -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/bitwise_hash
-test -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap_header
-
-# omap header
-echo omap header > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap_header
-grep -q omap $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap_header
-
-# omap
-echo value a > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya
-echo value b > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb
-ls $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap | grep -c key | grep -q 2
-grep 'value a' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya
-grep 'value b' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb
-rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya
-test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya
-rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb
-test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb
-
-# attr
-echo value a > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya
-echo value b > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb
-ls $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr | grep -c key | grep -q 2
-grep 'value a' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya
-grep 'value b' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb
-rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya
-test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya
-rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb
-test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb
-
-# data
-test ! -s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
-echo asdfasdfasdf > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
-test -s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
-grep -q asdfasdfasdf $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
-truncate --size 4 $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
-stat --format=%s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data | grep -q ^4$
-expect_false grep -q asdfasdfasdf $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
-rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
-test ! -s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
-
-
-# create pg collection
-mkdir --mode 0003 $MNT/0.0_head
-grep -q 00000000 $MNT/0.0_head/bitwise_hash_start
-if [ "$TYPE" = "bluestore" ]; then
- cat $MNT/0.0_head/bitwise_hash_bits
- grep -q 3 $MNT/0.0_head/bitwise_hash_bits
- grep -q 1fffffff $MNT/0.0_head/bitwise_hash_end
-fi
-test -d $MNT/0.0_head/all
-
-mkdir --mode 0003 $MNT/0.1_head
-grep -q 80000000 $MNT/0.1_head/bitwise_hash_start
-if [ "$TYPE" = "bluestore" ]; then
- grep -q 3 $MNT/0.1_head/bitwise_hash_bits
- grep -q 9fffffff $MNT/0.1_head/bitwise_hash_end
-fi
-
-# create pg object
-mkdir $MNT/0.0_head/all/#0:00000000::::head#/
-mkdir $MNT/0.0_head/all/#0:10000000:::foo:head#/
-
-# verify pg bounds check
-if [ "$TYPE" = "bluestore" ]; then
- expect_false mkdir $MNT/0.0_head/all/#0:20000000:::bar:head#/
-fi
-
-# remove a collection
-expect_false rmdir $MNT/0.0_head
-rmdir $MNT/0.0_head/all/#0:10000000:::foo:head#/
-rmdir $MNT/0.0_head/all/#0:00000000::::head#/
-rmdir $MNT/0.0_head
-rmdir $MNT/0.1_head
-
-fusermount -u $MNT
-wait
-
-echo OK
diff --git a/src/ceph/qa/workunits/osdc/stress_objectcacher.sh b/src/ceph/qa/workunits/osdc/stress_objectcacher.sh
deleted file mode 100755
index 67baadc..0000000
--- a/src/ceph/qa/workunits/osdc/stress_objectcacher.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh -ex
-
-for i in $(seq 1 10)
-do
- for DELAY in 0 1000
- do
- for OPS in 1000 10000
- do
- for OBJECTS in 10 50 100
- do
- for READS in 0.90 0.50 0.10
- do
- for OP_SIZE in 4096 131072 1048576
- do
- for MAX_DIRTY in 0 25165824
- do
- ceph_test_objectcacher_stress --ops $OPS --percent-read $READS --delay-ns $DELAY --objects $OBJECTS --max-op-size $OP_SIZE --client-oc-max-dirty $MAX_DIRTY --stress-test > /dev/null 2>&1
- done
- done
- done
- done
- done
- done
-done
-
-ceph_test_objectcacher_stress --correctness-test > /dev/null 2>&1
-
-echo OK
diff --git a/src/ceph/qa/workunits/post-file.sh b/src/ceph/qa/workunits/post-file.sh
deleted file mode 100755
index 133e668..0000000
--- a/src/ceph/qa/workunits/post-file.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash -ex
-
-what="$1"
-[ -z "$what" ] && what=/etc/udev/rules.d
-sudo ceph-post-file -d ceph-test-workunit $what
-
-echo OK
diff --git a/src/ceph/qa/workunits/rados/clone.sh b/src/ceph/qa/workunits/rados/clone.sh
deleted file mode 100755
index 281e89f..0000000
--- a/src/ceph/qa/workunits/rados/clone.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh -x
-
-set -e
-
-rados -p data rm foo || true
-rados -p data put foo.tmp /etc/passwd --object-locator foo
-rados -p data clonedata foo.tmp foo --object-locator foo
-rados -p data get foo /tmp/foo
-cmp /tmp/foo /etc/passwd
-rados -p data rm foo.tmp --object-locator foo
-rados -p data rm foo
-
-echo OK \ No newline at end of file
diff --git a/src/ceph/qa/workunits/rados/load-gen-big.sh b/src/ceph/qa/workunits/rados/load-gen-big.sh
deleted file mode 100755
index 6715658..0000000
--- a/src/ceph/qa/workunits/rados/load-gen-big.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-
-rados -p rbd load-gen \
- --num-objects 10240 \
- --min-object-size 1048576 \
- --max-object-size 25600000 \
- --max-ops 1024 \
- --max-backlog 1024 \
- --read-percent 50 \
- --run-length 1200
diff --git a/src/ceph/qa/workunits/rados/load-gen-mix-small-long.sh b/src/ceph/qa/workunits/rados/load-gen-mix-small-long.sh
deleted file mode 100755
index 593bad5..0000000
--- a/src/ceph/qa/workunits/rados/load-gen-mix-small-long.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-
-rados -p rbd load-gen \
- --num-objects 1024 \
- --min-object-size 1 \
- --max-object-size 1048576 \
- --max-ops 128 \
- --max-backlog 128 \
- --read-percent 50 \
- --run-length 1800
diff --git a/src/ceph/qa/workunits/rados/load-gen-mix-small.sh b/src/ceph/qa/workunits/rados/load-gen-mix-small.sh
deleted file mode 100755
index 02db77b..0000000
--- a/src/ceph/qa/workunits/rados/load-gen-mix-small.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-
-rados -p rbd load-gen \
- --num-objects 1024 \
- --min-object-size 1 \
- --max-object-size 1048576 \
- --max-ops 128 \
- --max-backlog 128 \
- --read-percent 50 \
- --run-length 600
diff --git a/src/ceph/qa/workunits/rados/load-gen-mix.sh b/src/ceph/qa/workunits/rados/load-gen-mix.sh
deleted file mode 100755
index ad3b4be..0000000
--- a/src/ceph/qa/workunits/rados/load-gen-mix.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-
-rados -p rbd load-gen \
- --num-objects 10240 \
- --min-object-size 1 \
- --max-object-size 1048576 \
- --max-ops 128 \
- --max-backlog 128 \
- --read-percent 50 \
- --run-length 600
diff --git a/src/ceph/qa/workunits/rados/load-gen-mostlyread.sh b/src/ceph/qa/workunits/rados/load-gen-mostlyread.sh
deleted file mode 100755
index 236f82d..0000000
--- a/src/ceph/qa/workunits/rados/load-gen-mostlyread.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-
-rados -p rbd load-gen \
- --num-objects 51200 \
- --min-object-size 1 \
- --max-object-size 1048576 \
- --max-ops 128 \
- --max-backlog 128 \
- --read-percent 90 \
- --run-length 600
diff --git a/src/ceph/qa/workunits/rados/stress_watch.sh b/src/ceph/qa/workunits/rados/stress_watch.sh
deleted file mode 100755
index 49f144b..0000000
--- a/src/ceph/qa/workunits/rados/stress_watch.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh -e
-
-ceph_test_stress_watch
-ceph_multi_stress_watch rep reppool repobj
-ceph_multi_stress_watch ec ecpool ecobj
-
-exit 0
diff --git a/src/ceph/qa/workunits/rados/test.sh b/src/ceph/qa/workunits/rados/test.sh
deleted file mode 100755
index cbf398f..0000000
--- a/src/ceph/qa/workunits/rados/test.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash -ex
-
-parallel=1
-[ "$1" = "--serial" ] && parallel=0
-
-color=""
-[ -t 1 ] && color="--gtest_color=yes"
-
-function cleanup() {
- pkill -P $$ || true
-}
-trap cleanup EXIT ERR HUP INT QUIT
-
-declare -A pids
-
-for f in \
- api_aio api_io api_list api_lock api_misc \
- api_tier api_pool api_snapshots api_stat api_watch_notify api_cmd \
- api_service \
- api_c_write_operations \
- api_c_read_operations \
- list_parallel \
- open_pools_parallel \
- delete_pools_parallel \
- watch_notify
-do
- if [ $parallel -eq 1 ]; then
- r=`printf '%25s' $f`
- bash -o pipefail -exc "ceph_test_rados_$f $color 2>&1 | tee ceph_test_rados_$f.log | sed \"s/^/$r: /\"" &
- pid=$!
- echo "test $f on pid $pid"
- pids[$f]=$pid
- else
- ceph_test_rados_$f
- fi
-done
-
-ret=0
-if [ $parallel -eq 1 ]; then
-for t in "${!pids[@]}"
-do
- pid=${pids[$t]}
- if ! wait $pid
- then
- echo "error in $t ($pid)"
- ret=1
- fi
-done
-fi
-
-exit $ret
diff --git a/src/ceph/qa/workunits/rados/test_alloc_hint.sh b/src/ceph/qa/workunits/rados/test_alloc_hint.sh
deleted file mode 100755
index 3e24694..0000000
--- a/src/ceph/qa/workunits/rados/test_alloc_hint.sh
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/bin/bash
-
-set -ex
-shopt -s nullglob # fns glob expansion in expect_alloc_hint_eq()
-
-#
-# Helpers
-#
-
-function get_xml_val() {
- local xml="$1"
- local tag="$2"
-
- local regex=".*<${tag}>(.*)</${tag}>.*"
- if [[ ! "${xml}" =~ ${regex} ]]; then
- echo "'${xml}' xml doesn't match '${tag}' tag regex" >&2
- return 2
- fi
-
- echo "${BASH_REMATCH[1]}"
-}
-
-function get_conf_val() {
- set -e
-
- local entity="$1"
- local option="$2"
-
- local val
- val="$(sudo ceph daemon "${entity}" config get --format=xml "${option}")"
- val="$(get_xml_val "${val}" "${option}")"
-
- echo "${val}"
-}
-
-function setup_osd_data() {
- for (( i = 0 ; i < "${NUM_OSDS}" ; i++ )); do
- OSD_DATA[i]="$(get_conf_val "osd.$i" "osd_data")"
- done
-}
-
-function setup_pgid() {
- local poolname="$1"
- local objname="$2"
-
- local pgid
- pgid="$(ceph osd map "${poolname}" "${objname}" --format=xml)"
- pgid="$(get_xml_val "${pgid}" "pgid")"
-
- PGID="${pgid}"
-}
-
-function expect_alloc_hint_eq() {
- local expected_extsize="$1"
-
- for (( i = 0 ; i < "${NUM_OSDS}" ; i++ )); do
- # Make sure that stuff is flushed from the journal to the store
- # by the time we get to it, as we prod the actual files and not
- # the journal.
- sudo ceph daemon "osd.${i}" "flush_journal"
-
- # e.g., .../25.6_head/foo__head_7FC1F406__19
- # .../26.bs1_head/bar__head_EFE6384B__1a_ffffffffffffffff_1
- local fns=$(sudo sh -c "ls ${OSD_DATA[i]}/current/${PGID}*_head/${OBJ}_*")
- local count="${#fns[@]}"
- if [ "${count}" -ne 1 ]; then
- echo "bad fns count: ${count}" >&2
- return 2
- fi
-
- local extsize
- extsize="$(sudo xfs_io -c extsize "${fns[0]}")"
- local extsize_regex="^\[(.*)\] ${fns[0]}$"
- if [[ ! "${extsize}" =~ ${extsize_regex} ]]; then
- echo "extsize doesn't match extsize_regex: ${extsize}" >&2
- return 2
- fi
- extsize="${BASH_REMATCH[1]}"
-
- if [ "${extsize}" -ne "${expected_extsize}" ]; then
- echo "FAIL: alloc_hint: actual ${extsize}, expected ${expected_extsize}" >&2
- return 1
- fi
- done
-}
-
-#
-# Global setup
-#
-
-EC_K="2"
-EC_M="1"
-NUM_OSDS="$((EC_K + EC_M))"
-
-NUM_PG="12"
-NUM_PGP="${NUM_PG}"
-
-LOW_CAP="$(get_conf_val "osd.0" "filestore_max_alloc_hint_size")"
-HIGH_CAP="$((LOW_CAP * 10))" # 10M, assuming 1M default cap
-SMALL_HINT="$((LOW_CAP / 4))" # 256K, assuming 1M default cap
-BIG_HINT="$((LOW_CAP * 6))" # 6M, assuming 1M default cap
-
-setup_osd_data
-
-#
-# ReplicatedBackend tests
-#
-
-POOL="alloc_hint-rep"
-ceph osd pool create "${POOL}" "${NUM_PG}"
-ceph osd pool set "${POOL}" size "${NUM_OSDS}"
-ceph osd pool application enable "${POOL}" rados
-
-OBJ="foo"
-setup_pgid "${POOL}" "${OBJ}"
-rados -p "${POOL}" create "${OBJ}"
-
-# Empty object, SMALL_HINT - expect SMALL_HINT
-rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
-expect_alloc_hint_eq "${SMALL_HINT}"
-
-# Try changing to BIG_HINT (1) - expect LOW_CAP (BIG_HINT > LOW_CAP)
-rados -p "${POOL}" set-alloc-hint "${OBJ}" "${BIG_HINT}" "${BIG_HINT}"
-expect_alloc_hint_eq "${LOW_CAP}"
-
-# Bump the cap to HIGH_CAP
-ceph tell 'osd.*' injectargs "--filestore_max_alloc_hint_size ${HIGH_CAP}"
-
-# Try changing to BIG_HINT (2) - expect BIG_HINT (BIG_HINT < HIGH_CAP)
-rados -p "${POOL}" set-alloc-hint "${OBJ}" "${BIG_HINT}" "${BIG_HINT}"
-expect_alloc_hint_eq "${BIG_HINT}"
-
-ceph tell 'osd.*' injectargs "--filestore_max_alloc_hint_size ${LOW_CAP}"
-
-# Populate object with some data
-rados -p "${POOL}" put "${OBJ}" /etc/passwd
-
-# Try changing back to SMALL_HINT - expect BIG_HINT (non-empty object)
-rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
-expect_alloc_hint_eq "${BIG_HINT}"
-
-OBJ="bar"
-setup_pgid "${POOL}" "${OBJ}"
-
-# Non-existent object, SMALL_HINT - expect SMALL_HINT (object creation)
-rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
-expect_alloc_hint_eq "${SMALL_HINT}"
-
-ceph osd pool delete "${POOL}" "${POOL}" --yes-i-really-really-mean-it
-
-#
-# ECBackend tests
-#
-
-PROFILE="alloc_hint-ecprofile"
-POOL="alloc_hint-ec"
-ceph osd erasure-code-profile set "${PROFILE}" k=2 m=1 crush-failure-domain=osd
-ceph osd erasure-code-profile get "${PROFILE}" # just so it's logged
-ceph osd pool create "${POOL}" "${NUM_PG}" "${NUM_PGP}" erasure "${PROFILE}"
-ceph osd pool application enable "${POOL}" rados
-
-OBJ="baz"
-setup_pgid "${POOL}" "${OBJ}"
-rados -p "${POOL}" create "${OBJ}"
-
-# Empty object, SMALL_HINT - expect scaled-down SMALL_HINT
-rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
-expect_alloc_hint_eq "$((SMALL_HINT / EC_K))"
-
-ceph osd pool delete "${POOL}" "${POOL}" --yes-i-really-really-mean-it
-
-#
-# Global teardown
-#
-
-echo "OK"
diff --git a/src/ceph/qa/workunits/rados/test_cache_pool.sh b/src/ceph/qa/workunits/rados/test_cache_pool.sh
deleted file mode 100755
index 5975893..0000000
--- a/src/ceph/qa/workunits/rados/test_cache_pool.sh
+++ /dev/null
@@ -1,170 +0,0 @@
-#!/bin/bash -x
-
-set -e
-
-expect_false()
-{
- set -x
- if "$@"; then return 1; else return 0; fi
-}
-
-# create pools, set up tier relationship
-ceph osd pool create base_pool 2
-ceph osd pool application enable base_pool rados
-ceph osd pool create partial_wrong 2
-ceph osd pool create wrong_cache 2
-ceph osd tier add base_pool partial_wrong
-ceph osd tier add base_pool wrong_cache
-
-# populate base_pool with some data
-echo "foo" > foo.txt
-echo "bar" > bar.txt
-echo "baz" > baz.txt
-rados -p base_pool put fooobj foo.txt
-rados -p base_pool put barobj bar.txt
-# fill in wrong_cache backwards so we can tell we read from it
-rados -p wrong_cache put fooobj bar.txt
-rados -p wrong_cache put barobj foo.txt
-# partial_wrong gets barobj backwards so we can check promote and non-promote
-rados -p partial_wrong put barobj foo.txt
-
-# get the objects back before setting a caching pool
-rados -p base_pool get fooobj tmp.txt
-diff -q tmp.txt foo.txt
-rados -p base_pool get barobj tmp.txt
-diff -q tmp.txt bar.txt
-
-# set up redirect and make sure we get backwards results
-ceph osd tier set-overlay base_pool wrong_cache
-ceph osd tier cache-mode wrong_cache writeback
-rados -p base_pool get fooobj tmp.txt
-diff -q tmp.txt bar.txt
-rados -p base_pool get barobj tmp.txt
-diff -q tmp.txt foo.txt
-
-# switch cache pools and make sure we're doing promote
-ceph osd tier remove-overlay base_pool
-ceph osd tier set-overlay base_pool partial_wrong
-ceph osd tier cache-mode partial_wrong writeback
-rados -p base_pool get fooobj tmp.txt
-diff -q tmp.txt foo.txt # hurray, it promoted!
-rados -p base_pool get barobj tmp.txt
-diff -q tmp.txt foo.txt # yep, we read partial_wrong's local object!
-
-# try a nonexistent object and make sure we get an error
-expect_false rados -p base_pool get bazobj tmp.txt
-
-# drop the cache entirely and make sure contents are still the same
-ceph osd tier remove-overlay base_pool
-rados -p base_pool get fooobj tmp.txt
-diff -q tmp.txt foo.txt
-rados -p base_pool get barobj tmp.txt
-diff -q tmp.txt bar.txt
-
-# create an empty cache pool and make sure it has objects after reading
-ceph osd pool create empty_cache 2
-
-touch empty.txt
-rados -p empty_cache ls > tmp.txt
-diff -q tmp.txt empty.txt
-
-ceph osd tier add base_pool empty_cache
-ceph osd tier set-overlay base_pool empty_cache
-ceph osd tier cache-mode empty_cache writeback
-rados -p base_pool get fooobj tmp.txt
-rados -p base_pool get barobj tmp.txt
-expect_false rados -p base_pool get bazobj tmp.txt
-
-rados -p empty_cache ls > tmp.txt
-expect_false diff -q tmp.txt empty.txt
-
-# cleanup
-ceph osd tier remove-overlay base_pool
-ceph osd tier remove base_pool wrong_cache
-ceph osd tier remove base_pool partial_wrong
-ceph osd tier remove base_pool empty_cache
-ceph osd pool delete base_pool base_pool --yes-i-really-really-mean-it
-ceph osd pool delete empty_cache empty_cache --yes-i-really-really-mean-it
-ceph osd pool delete wrong_cache wrong_cache --yes-i-really-really-mean-it
-ceph osd pool delete partial_wrong partial_wrong --yes-i-really-really-mean-it
-
-## set of base, cache
-ceph osd pool create base 8
-ceph osd pool application enable base rados
-ceph osd pool create cache 8
-
-ceph osd tier add base cache
-ceph osd tier cache-mode cache writeback
-ceph osd tier set-overlay base cache
-
-# cache-flush, cache-evict
-rados -p base put foo /etc/passwd
-expect_false rados -p base cache-evict foo
-expect_false rados -p base cache-flush foo
-expect_false rados -p cache cache-evict foo
-rados -p cache cache-flush foo
-rados -p cache cache-evict foo
-rados -p cache ls - | wc -l | grep 0
-
-# cache-try-flush, cache-evict
-rados -p base put foo /etc/passwd
-expect_false rados -p base cache-evict foo
-expect_false rados -p base cache-flush foo
-expect_false rados -p cache cache-evict foo
-rados -p cache cache-try-flush foo
-rados -p cache cache-evict foo
-rados -p cache ls - | wc -l | grep 0
-
-# cache-flush-evict-all
-rados -p base put bar /etc/passwd
-rados -p cache ls - | wc -l | grep 1
-expect_false rados -p base cache-flush-evict-all
-rados -p cache cache-flush-evict-all
-rados -p cache ls - | wc -l | grep 0
-
-# cache-try-flush-evict-all
-rados -p base put bar /etc/passwd
-rados -p cache ls - | wc -l | grep 1
-expect_false rados -p base cache-flush-evict-all
-rados -p cache cache-try-flush-evict-all
-rados -p cache ls - | wc -l | grep 0
-
-# cache flush/evit when clone objects exist
-rados -p base put testclone /etc/passwd
-rados -p cache ls - | wc -l | grep 1
-ceph osd pool mksnap base snap
-rados -p base put testclone /etc/hosts
-rados -p cache cache-flush-evict-all
-rados -p cache ls - | wc -l | grep 0
-
-ceph osd tier cache-mode cache forward --yes-i-really-mean-it
-rados -p base -s snap get testclone testclone.txt
-diff -q testclone.txt /etc/passwd
-rados -p base get testclone testclone.txt
-diff -q testclone.txt /etc/hosts
-
-# test --with-clones option
-ceph osd tier cache-mode cache writeback
-rados -p base put testclone2 /etc/passwd
-rados -p cache ls - | wc -l | grep 1
-ceph osd pool mksnap base snap1
-rados -p base put testclone2 /etc/hosts
-expect_false rados -p cache cache-flush testclone2
-rados -p cache cache-flush testclone2 --with-clones
-expect_false rados -p cache cache-evict testclone2
-rados -p cache cache-evict testclone2 --with-clones
-rados -p cache ls - | wc -l | grep 0
-
-rados -p base -s snap1 get testclone2 testclone2.txt
-diff -q testclone2.txt /etc/passwd
-rados -p base get testclone2 testclone2.txt
-diff -q testclone2.txt /etc/hosts
-
-# cleanup
-ceph osd tier remove-overlay base
-ceph osd tier remove base cache
-
-ceph osd pool delete cache cache --yes-i-really-really-mean-it
-ceph osd pool delete base base --yes-i-really-really-mean-it
-
-echo OK
diff --git a/src/ceph/qa/workunits/rados/test_envlibrados_for_rocksdb.sh b/src/ceph/qa/workunits/rados/test_envlibrados_for_rocksdb.sh
deleted file mode 100755
index 94580c2..0000000
--- a/src/ceph/qa/workunits/rados/test_envlibrados_for_rocksdb.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/bin/bash -ex
-############################################
-# Helper functions
-############################################
-function install() {
- for package in "$@" ; do
- install_one $package
- done
- return 0
-}
-
-function install_one() {
- case $(lsb_release -si) in
- Ubuntu|Debian|Devuan)
- sudo apt-get install -y --force-yes "$@"
- ;;
- CentOS|Fedora|RedHatEnterpriseServer)
- sudo yum install -y "$@"
- ;;
- *SUSE*)
- sudo zypper --non-interactive install "$@"
- ;;
- *)
- echo "$(lsb_release -si) is unknown, $@ will have to be installed manually."
- ;;
- esac
-}
-############################################
-# Install required tools
-############################################
-echo "Install required tools"
-install git automake
-
-CURRENT_PATH=`pwd`
-
-############################################
-# Compile&Start RocksDB
-############################################
-# install prerequisites
-# for rocksdb
-case $(lsb_release -si) in
- Ubuntu|Debian|Devuan)
- install g++-4.7 libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev librados-dev
- ;;
- CentOS|Fedora|RedHatEnterpriseServer)
- install gcc-c++.x86_64 gflags-devel snappy-devel zlib zlib-devel bzip2 bzip2-devel librados2-devel.x86_64
- ;;
- *)
- echo "$(lsb_release -si) is unknown, $@ will have to be installed manually."
- ;;
-esac
-
-# # gflags
-# sudo yum install gflags-devel
-#
-# wget https://github.com/schuhschuh/gflags/archive/master.zip
-# unzip master.zip
-# cd gflags-master
-# mkdir build && cd build
-# export CXXFLAGS="-fPIC" && cmake .. && make VERBOSE=1
-# make && make install
-
-# # snappy-devel
-
-
-echo "Compile rocksdb"
-if [ -e rocksdb ]; then
- rm -fr rocksdb
-fi
-git clone https://github.com/facebook/rocksdb.git --depth 1
-
-# compile code
-cd rocksdb
-make env_librados_test ROCKSDB_USE_LIBRADOS=1 -j8
-
-echo "Copy ceph.conf"
-# prepare ceph.conf
-mkdir -p ../ceph/src/
-if [ -f "/etc/ceph/ceph.conf" ]; then
- cp /etc/ceph/ceph.conf ../ceph/src/
-elif [ -f "/etc/ceph/ceph/ceph.conf" ]; then
- cp /etc/ceph/ceph/ceph.conf ../ceph/src/
-else
- echo "/etc/ceph/ceph/ceph.conf doesn't exist"
-fi
-
-echo "Run EnvLibrados test"
-# run test
-if [ -f "../ceph/src/ceph.conf" ]
- then
- cp env_librados_test ~/cephtest/archive
- ./env_librados_test
-else
- echo "../ceph/src/ceph.conf doesn't exist"
-fi
-cd ${CURRENT_PATH}
diff --git a/src/ceph/qa/workunits/rados/test_hang.sh b/src/ceph/qa/workunits/rados/test_hang.sh
deleted file mode 100755
index 724e0bb..0000000
--- a/src/ceph/qa/workunits/rados/test_hang.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh -ex
-
-# Hang forever for manual testing using the thrasher
-while(true)
-do
- sleep 300
-done
-exit 0
diff --git a/src/ceph/qa/workunits/rados/test_health_warnings.sh b/src/ceph/qa/workunits/rados/test_health_warnings.sh
deleted file mode 100755
index a4a9c11..0000000
--- a/src/ceph/qa/workunits/rados/test_health_warnings.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/bin/bash -ex
-
-set -u
-
-# number of osds = 10
-crushtool -o crushmap --build --num_osds 10 host straw 2 rack straw 2 row straw 2 root straw 0
-ceph osd setcrushmap -i crushmap
-ceph osd tree
-ceph tell osd.* injectargs --osd_max_markdown_count 1024 --osd_max_markdown_period 1
-
-wait_for_healthy() {
- while ceph health | grep down
- do
- sleep 1
- done
-}
-
-test_mark_two_osds_same_host_down() {
- ceph osd set noup
- ceph osd down osd.0 osd.1
- ceph health detail
- ceph health | grep "1 host"
- ceph health | grep "2 osds"
- ceph health detail | grep "osd.0"
- ceph health detail | grep "osd.1"
- ceph osd unset noup
- wait_for_healthy
-}
-
-test_mark_two_osds_same_rack_down() {
- ceph osd set noup
- ceph osd down osd.8 osd.9
- ceph health detail
- ceph health | grep "1 host"
- ceph health | grep "1 rack"
- ceph health | grep "1 row"
- ceph health | grep "2 osds"
- ceph health detail | grep "osd.8"
- ceph health detail | grep "osd.9"
- ceph osd unset noup
- wait_for_healthy
-}
-
-test_mark_all_but_last_osds_down() {
- ceph osd set noup
- ceph osd down $(ceph osd ls | sed \$d)
- ceph health detail
- ceph health | grep "1 row"
- ceph health | grep "2 racks"
- ceph health | grep "4 hosts"
- ceph health | grep "9 osds"
- ceph osd unset noup
- wait_for_healthy
-}
-
-test_mark_two_osds_same_host_down_with_classes() {
- ceph osd set noup
- ceph osd crush set-device-class ssd osd.0 osd.2 osd.4 osd.6 osd.8
- ceph osd crush set-device-class hdd osd.1 osd.3 osd.5 osd.7 osd.9
- ceph osd down osd.0 osd.1
- ceph health detail
- ceph health | grep "1 host"
- ceph health | grep "2 osds"
- ceph health detail | grep "osd.0"
- ceph health detail | grep "osd.1"
- ceph osd unset noup
- wait_for_healthy
-}
-
-test_mark_two_osds_same_host_down
-test_mark_two_osds_same_rack_down
-test_mark_all_but_last_osds_down
-test_mark_two_osds_same_host_down_with_classes
-
-exit 0
diff --git a/src/ceph/qa/workunits/rados/test_pool_access.sh b/src/ceph/qa/workunits/rados/test_pool_access.sh
deleted file mode 100755
index 8597b71..0000000
--- a/src/ceph/qa/workunits/rados/test_pool_access.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash -x
-
-set -e
-
-expect_1()
-{
- set -x
- set +e
- "$@"
- if [ $? == 1 ]; then return 0; else return 1; fi
-}
-
-
-key=`ceph auth get-or-create-key client.poolaccess1 mon 'allow r' osd 'allow *'`
-rados --id poolaccess1 --key $key -p rbd ls
-
-key=`ceph auth get-or-create-key client.poolaccess2 mon 'allow r' osd 'allow * pool=nopool'`
-expect_1 rados --id poolaccess2 --key $key -p rbd ls
-
-key=`ceph auth get-or-create-key client.poolaccess3 mon 'allow r' osd 'allow rw pool=nopool'`
-expect_1 rados --id poolaccess3 --key $key -p rbd ls
-
-echo OK
diff --git a/src/ceph/qa/workunits/rados/test_pool_quota.sh b/src/ceph/qa/workunits/rados/test_pool_quota.sh
deleted file mode 100755
index 0eacefc..0000000
--- a/src/ceph/qa/workunits/rados/test_pool_quota.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/bin/sh -ex
-
-p=`uuidgen`
-
-# objects
-ceph osd pool create $p 12
-ceph osd pool set-quota $p max_objects 10
-ceph osd pool application enable $p rados
-
-for f in `seq 1 10` ; do
- rados -p $p put obj$f /etc/passwd
-done
-
-sleep 30
-
-rados -p $p put onemore /etc/passwd &
-pid=$!
-
-ceph osd pool set-quota $p max_objects 100
-wait $pid
-[ $? -ne 0 ] && exit 1 || true
-
-rados -p $p put twomore /etc/passwd
-
-# bytes
-ceph osd pool set-quota $p max_bytes 100
-sleep 30
-
-rados -p $p put two /etc/passwd &
-pid=$!
-
-ceph osd pool set-quota $p max_bytes 0
-ceph osd pool set-quota $p max_objects 0
-wait $pid
-[ $? -ne 0 ] && exit 1 || true
-
-rados -p $p put three /etc/passwd
-
-
-#one pool being full does not block a different pool
-
-pp=`uuidgen`
-
-ceph osd pool create $pp 12
-ceph osd pool application enable $pp rados
-
-# set objects quota
-ceph osd pool set-quota $pp max_objects 10
-sleep 30
-
-for f in `seq 1 10` ; do
- rados -p $pp put obj$f /etc/passwd
-done
-
-sleep 30
-
-rados -p $p put threemore /etc/passwd
-
-ceph osd pool set-quota $p max_bytes 0
-ceph osd pool set-quota $p max_objects 0
-
-sleep 30
-# done
-ceph osd pool delete $p $p --yes-i-really-really-mean-it
-ceph osd pool delete $pp $pp --yes-i-really-really-mean-it
-
-echo OK
-
diff --git a/src/ceph/qa/workunits/rados/test_python.sh b/src/ceph/qa/workunits/rados/test_python.sh
deleted file mode 100755
index 80369c8..0000000
--- a/src/ceph/qa/workunits/rados/test_python.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh -ex
-
-${PYTHON:-python} -m nose -v $(dirname $0)/../../../src/test/pybind/test_rados.py
-exit 0
diff --git a/src/ceph/qa/workunits/rados/test_rados_timeouts.sh b/src/ceph/qa/workunits/rados/test_rados_timeouts.sh
deleted file mode 100755
index bb35d72..0000000
--- a/src/ceph/qa/workunits/rados/test_rados_timeouts.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash -x
-
-delay_mon() {
- MSGTYPE=$1
- shift
- $@ --rados-mon-op-timeout 1 --ms-inject-delay-type mon --ms-inject-delay-max 10000000 --ms-inject-delay-probability 1 --ms-inject-delay-msg-type $MSGTYPE
- if [ $? -eq 0 ]; then
- exit 1
- fi
-}
-
-delay_osd() {
- MSGTYPE=$1
- shift
- $@ --rados-osd-op-timeout 1 --ms-inject-delay-type osd --ms-inject-delay-max 10000000 --ms-inject-delay-probability 1 --ms-inject-delay-msg-type $MSGTYPE
- if [ $? -eq 0 ]; then
- exit 2
- fi
-}
-
-# pool ops
-delay_mon omap rados lspools
-delay_mon poolopreply rados mkpool test
-delay_mon poolopreply rados mksnap -p test snap
-delay_mon poolopreply rados rmpool test test --yes-i-really-really-mean-it
-
-# other mon ops
-delay_mon getpoolstats rados df
-delay_mon mon_command ceph df
-delay_mon omap ceph osd dump
-delay_mon omap ceph -s
-
-# osd ops
-delay_osd osd_op_reply rados -p data put ls /bin/ls
-delay_osd osd_op_reply rados -p data get ls - >/dev/null
-delay_osd osd_op_reply rados -p data ls
-delay_osd command_reply ceph tell osd.0 bench 1 1
-
-# rbd commands, using more kinds of osd ops
-rbd create -s 1 test
-delay_osd osd_op_reply rbd watch test
-delay_osd osd_op_reply rbd info test
-delay_osd osd_op_reply rbd snap create test@snap
-delay_osd osd_op_reply rbd import /bin/ls ls
-rbd rm test
-
-echo OK
diff --git a/src/ceph/qa/workunits/rados/test_rados_tool.sh b/src/ceph/qa/workunits/rados/test_rados_tool.sh
deleted file mode 100755
index 87c86ee..0000000
--- a/src/ceph/qa/workunits/rados/test_rados_tool.sh
+++ /dev/null
@@ -1,575 +0,0 @@
-#!/bin/bash
-
-die() {
- echo "$@"
- exit 1
-}
-
-usage() {
- cat <<EOF
-test_rados_tool.sh: tests rados_tool
--c: RADOS configuration file to use [optional]
--k: keep temp files
--h: this help message
--p: set temporary pool to use [optional]
-EOF
-}
-
-do_run() {
- if [ "$1" == "--tee" ]; then
- shift
- tee_out="$1"
- shift
- "$@" | tee $tee_out
- else
- "$@"
- fi
-}
-
-run_expect_fail() {
- echo "RUN_EXPECT_FAIL: " "$@"
- do_run "$@"
- [ $? -eq 0 ] && die "expected failure, but got success! cmd: $@"
-}
-
-run_expect_succ() {
- echo "RUN_EXPECT_SUCC: " "$@"
- do_run "$@"
- [ $? -ne 0 ] && die "expected success, but got failure! cmd: $@"
-}
-
-run_expect_nosignal() {
- echo "RUN_EXPECT_NOSIGNAL: " "$@"
- do_run "$@"
- [ $? -ge 128 ] && die "expected succes or fail, but got signal! cmd: $@"
-}
-
-run() {
- echo "RUN: " $@
- do_run "$@"
-}
-
-if [ -n "$CEPH_BIN" ] ; then
- # CMake env
- RADOS_TOOL="$CEPH_BIN/rados"
- CEPH_TOOL="$CEPH_BIN/ceph"
-else
- # executables should be installed by the QA env
- RADOS_TOOL=$(which rados)
- CEPH_TOOL=$(which ceph)
-fi
-
-KEEP_TEMP_FILES=0
-POOL=trs_pool
-POOL_CP_TARGET=trs_pool.2
-POOL_EC=trs_pool_ec
-
-[ -x "$RADOS_TOOL" ] || die "couldn't find $RADOS_TOOL binary to test"
-[ -x "$CEPH_TOOL" ] || die "couldn't find $CEPH_TOOL binary to test"
-
-while getopts "c:hkp:" flag; do
- case $flag in
- c) RADOS_TOOL="$RADOS_TOOL -c $OPTARG";;
- k) KEEP_TEMP_FILES=1;;
- h) usage; exit 0;;
- p) POOL=$OPTARG;;
- *) echo; usage; exit 1;;
- esac
-done
-
-TDIR=`mktemp -d -t test_rados_tool.XXXXXXXXXX` || die "mktemp failed"
-[ $KEEP_TEMP_FILES -eq 0 ] && trap "rm -rf ${TDIR}; exit" INT TERM EXIT
-
-# ensure rados doesn't segfault without --pool
-run_expect_nosignal "$RADOS_TOOL" --snap "asdf" ls
-run_expect_nosignal "$RADOS_TOOL" --snapid "0" ls
-run_expect_nosignal "$RADOS_TOOL" --object_locator "asdf" ls
-run_expect_nosignal "$RADOS_TOOL" --namespace "asdf" ls
-
-run_expect_succ "$RADOS_TOOL" mkpool "$POOL"
-run_expect_succ "$CEPH_TOOL" osd erasure-code-profile set myprofile k=2 m=1 stripe_unit=2K crush-failure-domain=osd --force
-run_expect_succ "$CEPH_TOOL" osd pool create "$POOL_EC" 100 100 erasure myprofile
-
-
-# expb happens to be the empty export for legacy reasons
-run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/expb"
-
-# expa has objects foo, foo2 and bar
-run_expect_succ "$RADOS_TOOL" -p "$POOL" put foo /etc/fstab
-run_expect_succ "$RADOS_TOOL" -p "$POOL" put foo2 /etc/fstab
-run_expect_succ "$RADOS_TOOL" -p "$POOL" put bar /etc/fstab
-run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/expa"
-
-# expc has foo and foo2 with some attributes and omaps set
-run_expect_succ "$RADOS_TOOL" -p "$POOL" rm bar
-run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo "rados.toothbrush" "toothbrush"
-run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo "rados.toothpaste" "crest"
-run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapval foo "rados.floss" "myfloss"
-run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo2 "rados.toothbrush" "green"
-run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapheader foo2 "foo2.header"
-run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/expc"
-
-# make sure that --create works
-run "$RADOS_TOOL" rmpool "$POOL" "$POOL" --yes-i-really-really-mean-it
-run_expect_succ "$RADOS_TOOL" -p "$POOL" --create import "$TDIR/expa"
-
-# make sure that lack of --create fails
-run_expect_succ "$RADOS_TOOL" rmpool "$POOL" "$POOL" --yes-i-really-really-mean-it
-run_expect_fail "$RADOS_TOOL" -p "$POOL" import "$TDIR/expa"
-
-run_expect_succ "$RADOS_TOOL" -p "$POOL" --create import "$TDIR/expa"
-
-# inaccessible import src should fail
-run_expect_fail "$RADOS_TOOL" -p "$POOL" import "$TDIR/dir_nonexistent"
-
-# export an empty pool to test purge
-run_expect_succ "$RADOS_TOOL" purge "$POOL" --yes-i-really-really-mean-it
-run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/empty"
-cmp -s "$TDIR/expb" "$TDIR/empty" \
- || die "failed to export the same stuff we imported!"
-rm -f "$TDIR/empty"
-
-# import some stuff with extended attributes on it
-run_expect_succ "$RADOS_TOOL" -p "$POOL" import "$TDIR/expc"
-VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
-[ ${VAL} = "toothbrush" ] || die "Invalid attribute after import"
-
-# the second time, the xattrs should match, so there should be nothing to do.
-run_expect_succ "$RADOS_TOOL" -p "$POOL" import "$TDIR/expc"
-VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
-[ "${VAL}" = "toothbrush" ] || die "Invalid attribute after second import"
-
-# Now try with --no-overwrite option after changing an attribute
-run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo "rados.toothbrush" "dentist"
-run_expect_succ "$RADOS_TOOL" -p "$POOL" import --no-overwrite "$TDIR/expc"
-VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
-[ "${VAL}" = "dentist" ] || die "Invalid attribute after second import"
-
-# now force it to copy everything
-run_expect_succ "$RADOS_TOOL" -p "$POOL" import "$TDIR/expc"
-VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
-[ "${VAL}" = "toothbrush" ] || die "Invalid attribute after second import"
-
-# test copy pool
-run "$RADOS_TOOL" rmpool "$POOL" "$POOL" --yes-i-really-really-mean-it
-run "$RADOS_TOOL" rmpool "$POOL_CP_TARGET" "$POOL_CP_TARGET" --yes-i-really-really-mean-it
-run_expect_succ "$RADOS_TOOL" mkpool "$POOL"
-run_expect_succ "$RADOS_TOOL" mkpool "$POOL_CP_TARGET"
-
-# create src files
-mkdir -p "$TDIR/dir_cp_src"
-for i in `seq 1 5`; do
- fname="$TDIR/dir_cp_src/f.$i"
- objname="f.$i"
- dd if=/dev/urandom of="$fname" bs=$((1024*1024)) count=$i
- run_expect_succ "$RADOS_TOOL" -p "$POOL" put $objname "$fname"
-
-# a few random attrs
- for j in `seq 1 4`; do
- rand_str=`dd if=/dev/urandom bs=4 count=1 | hexdump -x`
- run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr $objname attr.$j "$rand_str"
- run_expect_succ --tee "$fname.attr.$j" "$RADOS_TOOL" -p "$POOL" getxattr $objname attr.$j
- done
-
- rand_str=`dd if=/dev/urandom bs=4 count=1 | hexdump -x`
- run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapheader $objname "$rand_str"
- run_expect_succ --tee "$fname.omap.header" "$RADOS_TOOL" -p "$POOL" getomapheader $objname
-
-# a few random omap keys
- for j in `seq 1 4`; do
- rand_str=`dd if=/dev/urandom bs=4 count=1 | hexdump -x`
- run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapval $objname key.$j "$rand_str"
- done
- run_expect_succ --tee "$fname.omap.vals" "$RADOS_TOOL" -p "$POOL" listomapvals $objname
-done
-
-run_expect_succ "$RADOS_TOOL" cppool "$POOL" "$POOL_CP_TARGET"
-
-mkdir -p "$TDIR/dir_cp_dst"
-for i in `seq 1 5`; do
- fname="$TDIR/dir_cp_dst/f.$i"
- objname="f.$i"
- run_expect_succ "$RADOS_TOOL" -p "$POOL_CP_TARGET" get $objname "$fname"
-
-# a few random attrs
- for j in `seq 1 4`; do
- run_expect_succ --tee "$fname.attr.$j" "$RADOS_TOOL" -p "$POOL_CP_TARGET" getxattr $objname attr.$j
- done
-
- run_expect_succ --tee "$fname.omap.header" "$RADOS_TOOL" -p "$POOL_CP_TARGET" getomapheader $objname
- run_expect_succ --tee "$fname.omap.vals" "$RADOS_TOOL" -p "$POOL_CP_TARGET" listomapvals $objname
-done
-
-diff -q -r "$TDIR/dir_cp_src" "$TDIR/dir_cp_dst" \
- || die "copy pool validation failed!"
-
-for opt in \
- block-size \
- concurrent-ios \
- min-object-size \
- max-object-size \
- min-op-len \
- max-op-len \
- max-ops \
- max-backlog \
- target-throughput \
- read-percent \
- num-objects \
- run-length \
- ; do
- run_expect_succ "$RADOS_TOOL" --$opt 4 df
- run_expect_fail "$RADOS_TOOL" --$opt 4k df
-done
-
-run_expect_succ "$RADOS_TOOL" lock list f.1 --lock-duration 4 --pool "$POOL"
-echo # previous command doesn't output an end of line: issue #9735
-run_expect_fail "$RADOS_TOOL" lock list f.1 --lock-duration 4k --pool "$POOL"
-
-run_expect_succ "$RADOS_TOOL" mksnap snap1 --pool "$POOL"
-snapid=$("$RADOS_TOOL" lssnap --pool "$POOL" | grep snap1 | cut -f1)
-[ $? -ne 0 ] && die "expected success, but got failure! cmd: \"$RADOS_TOOL\" lssnap --pool \"$POOL\" | grep snap1 | cut -f1"
-run_expect_succ "$RADOS_TOOL" ls --pool "$POOL" --snapid="$snapid"
-run_expect_fail "$RADOS_TOOL" ls --pool "$POOL" --snapid="$snapid"k
-
-run_expect_succ "$RADOS_TOOL" chown 1 --pool "$POOL"
-run_expect_fail "$RADOS_TOOL" chown 1k --pool "$POOL"
-
-run_expect_succ "$RADOS_TOOL" truncate f.1 0 --pool "$POOL"
-run_expect_fail "$RADOS_TOOL" truncate f.1 0k --pool "$POOL"
-
-run "$RADOS_TOOL" rmpool delete_me_mkpool_test delete_me_mkpool_test --yes-i-really-really-mean-it
-run_expect_succ "$RADOS_TOOL" mkpool delete_me_mkpool_test 0 0
-run_expect_fail "$RADOS_TOOL" mkpool delete_me_mkpool_test2 0k 0
-run_expect_fail "$RADOS_TOOL" mkpool delete_me_mkpool_test3 0 0k
-
-run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 write
-run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 1k write
-run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 write --format json --output "$TDIR/bench.json"
-run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 1 write --output "$TDIR/bench.json"
-run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --format json --no-cleanup
-run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 rand --format json
-run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 seq --format json
-run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-omap
-run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-object
-run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr
-run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr --write-object
-run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr --write-omap
-run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-omap --write-object
-run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr --write-omap --write-object
-run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-omap
-run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-object
-run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr
-run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr --write-object
-run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr --write-omap
-run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-omap --write-object
-run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr --write-omap --write-object
-
-for i in $("$RADOS_TOOL" --pool "$POOL" ls | grep "benchmark_data"); do
- "$RADOS_TOOL" --pool "$POOL" truncate $i 0
-done
-
-run_expect_nosignal "$RADOS_TOOL" --pool "$POOL" bench 1 rand
-run_expect_nosignal "$RADOS_TOOL" --pool "$POOL" bench 1 seq
-
-set -e
-
-OBJ=test_rados_obj
-
-expect_false()
-{
- if "$@"; then return 1; else return 0; fi
-}
-
-cleanup() {
- $RADOS_TOOL -p $POOL rm $OBJ > /dev/null 2>&1 || true
- $RADOS_TOOL -p $POOL_EC rm $OBJ > /dev/null 2>&1 || true
-}
-
-test_omap() {
- cleanup
- for i in $(seq 1 1 10)
- do
- if [ $(($i % 2)) -eq 0 ]; then
- $RADOS_TOOL -p $POOL setomapval $OBJ $i $i
- else
- echo -n "$i" | $RADOS_TOOL -p $POOL setomapval $OBJ $i
- fi
- $RADOS_TOOL -p $POOL getomapval $OBJ $i | grep -q "|$i|\$"
- done
- $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 10
- for i in $(seq 1 1 5)
- do
- $RADOS_TOOL -p $POOL rmomapkey $OBJ $i
- done
- $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 5
- cleanup
-
- for i in $(seq 1 1 10)
- do
- dd if=/dev/urandom bs=128 count=1 > $TDIR/omap_key
- if [ $(($i % 2)) -eq 0 ]; then
- $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key setomapval $OBJ $i
- else
- echo -n "$i" | $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key setomapval $OBJ
- fi
- $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key getomapval $OBJ | grep -q "|$i|\$"
- $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key rmomapkey $OBJ
- $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 0
- done
- cleanup
-}
-
-test_xattr() {
- cleanup
- $RADOS_TOOL -p $POOL put $OBJ /etc/passwd
- V1=`mktemp fooattrXXXXXXX`
- V2=`mktemp fooattrXXXXXXX`
- echo -n fooval > $V1
- expect_false $RADOS_TOOL -p $POOL setxattr $OBJ 2>/dev/null
- expect_false $RADOS_TOOL -p $POOL setxattr $OBJ foo fooval extraarg 2>/dev/null
- $RADOS_TOOL -p $POOL setxattr $OBJ foo fooval
- $RADOS_TOOL -p $POOL getxattr $OBJ foo > $V2
- cmp $V1 $V2
- cat $V1 | $RADOS_TOOL -p $POOL setxattr $OBJ bar
- $RADOS_TOOL -p $POOL getxattr $OBJ bar > $V2
- cmp $V1 $V2
- $RADOS_TOOL -p $POOL listxattr $OBJ > $V1
- grep -q foo $V1
- grep -q bar $V1
- [ `cat $V1 | wc -l` -eq 2 ]
- rm $V1 $V2
- cleanup
-}
-test_rmobj() {
- p=`uuidgen`
- $CEPH_TOOL osd pool create $p 1
- $CEPH_TOOL osd pool set-quota $p max_objects 1
- V1=`mktemp fooattrXXXXXXX`
- $RADOS_TOOL put $OBJ $V1 -p $p
- while ! $CEPH_TOOL osd dump | grep 'full_no_quota max_objects'
- do
- sleep 2
- done
- $RADOS_TOOL -p $p rm $OBJ --force-full
- $RADOS_TOOL rmpool $p $p --yes-i-really-really-mean-it
- rm $V1
-}
-
-test_ls() {
- echo "Testing rados ls command"
- p=`uuidgen`
- $CEPH_TOOL osd pool create $p 1
- NS=10
- OBJS=20
- # Include default namespace (0) in the total
- TOTAL=$(expr $OBJS \* $(expr $NS + 1))
-
- for nsnum in `seq 0 $NS`
- do
- for onum in `seq 1 $OBJS`
- do
- if [ "$nsnum" = "0" ];
- then
- "$RADOS_TOOL" -p $p put obj${onum} /etc/fstab 2> /dev/null
- else
- "$RADOS_TOOL" -p $p -N "NS${nsnum}" put obj${onum} /etc/fstab 2> /dev/null
- fi
- done
- done
- CHECK=$("$RADOS_TOOL" -p $p ls 2> /dev/null | wc -l)
- if [ "$OBJS" -ne "$CHECK" ];
- then
- die "Created $OBJS objects in default namespace but saw $CHECK"
- fi
- TESTNS=NS${NS}
- CHECK=$("$RADOS_TOOL" -p $p -N $TESTNS ls 2> /dev/null | wc -l)
- if [ "$OBJS" -ne "$CHECK" ];
- then
- die "Created $OBJS objects in $TESTNS namespace but saw $CHECK"
- fi
- CHECK=$("$RADOS_TOOL" -p $p --all ls 2> /dev/null | wc -l)
- if [ "$TOTAL" -ne "$CHECK" ];
- then
- die "Created $TOTAL objects but saw $CHECK"
- fi
-
- $RADOS_TOOL rmpool $p $p --yes-i-really-really-mean-it
-}
-
-test_cleanup() {
- echo "Testing rados cleanup command"
- p=`uuidgen`
- $CEPH_TOOL osd pool create $p 1
- NS=5
- OBJS=4
- # Include default namespace (0) in the total
- TOTAL=$(expr $OBJS \* $(expr $NS + 1))
-
- for nsnum in `seq 0 $NS`
- do
- for onum in `seq 1 $OBJS`
- do
- if [ "$nsnum" = "0" ];
- then
- "$RADOS_TOOL" -p $p put obj${onum} /etc/fstab 2> /dev/null
- else
- "$RADOS_TOOL" -p $p -N "NS${nsnum}" put obj${onum} /etc/fstab 2> /dev/null
- fi
- done
- done
-
- $RADOS_TOOL -p $p --all ls > $TDIR/before.ls.out 2> /dev/null
-
- $RADOS_TOOL -p $p bench 3 write --no-cleanup 2> /dev/null
- $RADOS_TOOL -p $p -N NS1 bench 3 write --no-cleanup 2> /dev/null
- $RADOS_TOOL -p $p -N NS2 bench 3 write --no-cleanup 2> /dev/null
- $RADOS_TOOL -p $p -N NS3 bench 3 write --no-cleanup 2> /dev/null
- # Leave dangling objects without a benchmark_last_metadata in NS4
- expect_false timeout 3 $RADOS_TOOL -p $p -N NS4 bench 30 write --no-cleanup 2> /dev/null
- $RADOS_TOOL -p $p -N NS5 bench 3 write --no-cleanup 2> /dev/null
-
- $RADOS_TOOL -p $p -N NS3 cleanup 2> /dev/null
- #echo "Check NS3 after specific cleanup"
- CHECK=$($RADOS_TOOL -p $p -N NS3 ls | wc -l)
- if [ "$OBJS" -ne "$CHECK" ] ;
- then
- die "Expected $OBJS objects in NS3 but saw $CHECK"
- fi
-
- #echo "Try to cleanup all"
- $RADOS_TOOL -p $p --all cleanup
- #echo "Check all namespaces"
- $RADOS_TOOL -p $p --all ls > $TDIR/after.ls.out 2> /dev/null
- CHECK=$(cat $TDIR/after.ls.out | wc -l)
- if [ "$TOTAL" -ne "$CHECK" ];
- then
- die "Expected $TOTAL objects but saw $CHECK"
- fi
- if ! diff $TDIR/before.ls.out $TDIR/after.ls.out
- then
- die "Different objects found after cleanup"
- fi
-
- set +e
- run_expect_fail $RADOS_TOOL -p $p cleanup --prefix illegal_prefix
- run_expect_succ $RADOS_TOOL -p $p cleanup --prefix benchmark_data_otherhost
- set -e
-
- $RADOS_TOOL rmpool $p $p --yes-i-really-really-mean-it
-}
-
-function test_append()
-{
- cleanup
-
- # create object
- touch ./rados_append_null
- $RADOS_TOOL -p $POOL append $OBJ ./rados_append_null
- $RADOS_TOOL -p $POOL get $OBJ ./rados_append_0_out
- cmp ./rados_append_null ./rados_append_0_out
-
- # append 4k, total size 4k
- dd if=/dev/zero of=./rados_append_4k bs=4k count=1
- $RADOS_TOOL -p $POOL append $OBJ ./rados_append_4k
- $RADOS_TOOL -p $POOL get $OBJ ./rados_append_4k_out
- cmp ./rados_append_4k ./rados_append_4k_out
-
- # append 4k, total size 8k
- $RADOS_TOOL -p $POOL append $OBJ ./rados_append_4k
- $RADOS_TOOL -p $POOL get $OBJ ./rados_append_4k_out
- read_size=`ls -l ./rados_append_4k_out | awk -F ' ' '{print $5}'`
- if [ 8192 -ne $read_size ];
- then
- die "Append failed expecting 8192 read $read_size"
- fi
-
- # append 10M, total size 10493952
- dd if=/dev/zero of=./rados_append_10m bs=10M count=1
- $RADOS_TOOL -p $POOL append $OBJ ./rados_append_10m
- $RADOS_TOOL -p $POOL get $OBJ ./rados_append_10m_out
- read_size=`ls -l ./rados_append_10m_out | awk -F ' ' '{print $5}'`
- if [ 10493952 -ne $read_size ];
- then
- die "Append failed expecting 10493952 read $read_size"
- fi
-
- # cleanup
- cleanup
-
- # create object
- $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_null
- $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_0_out
- cmp rados_append_null rados_append_0_out
-
- # append 4k, total size 4k
- $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_4k
- $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_4k_out
- cmp rados_append_4k rados_append_4k_out
-
- # append 4k, total size 8k
- $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_4k
- $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_4k_out
- read_size=`ls -l ./rados_append_4k_out | awk -F ' ' '{print $5}'`
- if [ 8192 -ne $read_size ];
- then
- die "Append failed expecting 8192 read $read_size"
- fi
-
- # append 10M, total size 10493952
- $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_10m
- $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_10m_out
- read_size=`ls -l ./rados_append_10m_out | awk -F ' ' '{print $5}'`
- if [ 10493952 -ne $read_size ];
- then
- die "Append failed expecting 10493952 read $read_size"
- fi
-
- cleanup
- rm -rf ./rados_append_null ./rados_append_0_out
- rm -rf ./rados_append_4k ./rados_append_4k_out ./rados_append_10m ./rados_append_10m_out
-}
-
-function test_put()
-{
- # rados put test:
- cleanup
-
- # create file in local fs
- dd if=/dev/urandom of=rados_object_10k bs=1K count=10
-
- # test put command
- $RADOS_TOOL -p $POOL put $OBJ ./rados_object_10k
- $RADOS_TOOL -p $POOL get $OBJ ./rados_object_10k_out
- cmp ./rados_object_10k ./rados_object_10k_out
- cleanup
-
- # test put command with offset 0
- $RADOS_TOOL -p $POOL put $OBJ ./rados_object_10k --offset 0
- $RADOS_TOOL -p $POOL get $OBJ ./rados_object_offset_0_out
- cmp ./rados_object_10k ./rados_object_offset_0_out
- cleanup
-
- # test put command with offset 1000
- $RADOS_TOOL -p $POOL put $OBJ ./rados_object_10k --offset 1000
- $RADOS_TOOL -p $POOL get $OBJ ./rados_object_offset_1000_out
- cmp ./rados_object_10k ./rados_object_offset_1000_out 0 1000
- cleanup
-
- rm -rf ./rados_object_10k ./rados_object_10k_out ./rados_object_offset_0_out ./rados_object_offset_1000_out
-}
-
-test_xattr
-test_omap
-test_rmobj
-test_ls
-test_cleanup
-test_append
-test_put
-
-# clean up environment, delete pool
-$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
-$CEPH_TOOL osd pool delete $POOL_EC $POOL_EC --yes-i-really-really-mean-it
-$CEPH_TOOL osd pool delete $POOL_CP_TARGET $POOL_CP_TARGET --yes-i-really-really-mean-it
-
-echo "SUCCESS!"
-exit 0
diff --git a/src/ceph/qa/workunits/rados/test_tmap_to_omap.sh b/src/ceph/qa/workunits/rados/test_tmap_to_omap.sh
deleted file mode 100755
index 76656ad..0000000
--- a/src/ceph/qa/workunits/rados/test_tmap_to_omap.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh -ex
-
-expect_false()
-{
- set -x
- if "$@"; then return 1; else return 0; fi
-}
-
-pool="pool-$$"
-rados mkpool $pool
-
-rados -p $pool tmap set foo key1 value1
-rados -p $pool tmap set foo key2 value2
-rados -p $pool tmap set foo key2 value2
-rados -p $pool tmap dump foo | grep key1
-rados -p $pool tmap dump foo | grep key2
-rados -p $pool tmap-to-omap foo
-expect_false rados -p $pool tmap dump foo
-expect_false rados -p $pool tmap dump foo
-
-rados -p $pool listomapkeys foo | grep key1
-rados -p $pool listomapkeys foo | grep key2
-rados -p $pool getomapval foo key1 | grep value1
-rados -p $pool getomapval foo key2 | grep value2
-
-rados rmpool $pool $pool --yes-i-really-really-mean-it
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/cli_generic.sh b/src/ceph/qa/workunits/rbd/cli_generic.sh
deleted file mode 100755
index f958520..0000000
--- a/src/ceph/qa/workunits/rbd/cli_generic.sh
+++ /dev/null
@@ -1,470 +0,0 @@
-#!/bin/sh -ex
-
-# make sure rbd pool is EMPTY.. this is a test script!!
-rbd ls | wc -l | grep -v '^0$' && echo "nonempty rbd pool, aborting! run this script on an empty test cluster only." && exit 1
-
-IMGS="testimg1 testimg2 testimg3 testimg-diff1 testimg-diff2 testimg-diff3 foo foo2 bar bar2 test1 test2 test3 clone2"
-
-tiered=0
-if ceph osd dump | grep ^pool | grep "'rbd'" | grep tier; then
- tiered=1
-fi
-
-remove_images() {
- for img in $IMGS
- do
- (rbd snap purge $img || true) >/dev/null 2>&1
- (rbd rm $img || true) >/dev/null 2>&1
- done
-}
-
-test_others() {
- echo "testing import, export, resize, and snapshots..."
- TMP_FILES="/tmp/img1 /tmp/img1.new /tmp/img2 /tmp/img2.new /tmp/img3 /tmp/img3.new /tmp/img-diff1.new /tmp/img-diff2.new /tmp/img-diff3.new /tmp/img1.snap1 /tmp/img1.snap1 /tmp/img-diff1.snap1"
-
- remove_images
- rm -f $TMP_FILES
-
- # create an image
- dd if=/bin/sh of=/tmp/img1 bs=1k count=1 seek=10
- dd if=/bin/dd of=/tmp/img1 bs=1k count=10 seek=100
- dd if=/bin/rm of=/tmp/img1 bs=1k count=100 seek=1000
- dd if=/bin/ls of=/tmp/img1 bs=1k seek=10000
- dd if=/bin/ln of=/tmp/img1 bs=1k seek=100000
-
- # import, snapshot
- rbd import $RBD_CREATE_ARGS /tmp/img1 testimg1
- rbd resize testimg1 --size=256 --allow-shrink
- rbd export testimg1 /tmp/img2
- rbd snap create testimg1 --snap=snap1
- rbd resize testimg1 --size=128 && exit 1 || true # shrink should fail
- rbd resize testimg1 --size=128 --allow-shrink
- rbd export testimg1 /tmp/img3
-
- # info
- rbd info testimg1 | grep 'size 128 MB'
- rbd info --snap=snap1 testimg1 | grep 'size 256 MB'
-
- # export-diff
- rm -rf /tmp/diff-testimg1-1 /tmp/diff-testimg1-2
- rbd export-diff testimg1 --snap=snap1 /tmp/diff-testimg1-1
- rbd export-diff testimg1 --from-snap=snap1 /tmp/diff-testimg1-2
-
- # import-diff
- rbd create $RBD_CREATE_ARGS --size=1 testimg-diff1
- rbd import-diff --sparse-size 8K /tmp/diff-testimg1-1 testimg-diff1
- rbd import-diff --sparse-size 8K /tmp/diff-testimg1-2 testimg-diff1
-
- # info
- rbd info testimg1 | grep 'size 128 MB'
- rbd info --snap=snap1 testimg1 | grep 'size 256 MB'
- rbd info testimg-diff1 | grep 'size 128 MB'
- rbd info --snap=snap1 testimg-diff1 | grep 'size 256 MB'
-
- # make copies
- rbd copy testimg1 --snap=snap1 testimg2
- rbd copy testimg1 testimg3
- rbd copy testimg-diff1 --sparse-size 768K --snap=snap1 testimg-diff2
- rbd copy testimg-diff1 --sparse-size 768K testimg-diff3
-
- # verify the result
- rbd info testimg2 | grep 'size 256 MB'
- rbd info testimg3 | grep 'size 128 MB'
- rbd info testimg-diff2 | grep 'size 256 MB'
- rbd info testimg-diff3 | grep 'size 128 MB'
-
- rbd export testimg1 /tmp/img1.new
- rbd export testimg2 /tmp/img2.new
- rbd export testimg3 /tmp/img3.new
- rbd export testimg-diff1 /tmp/img-diff1.new
- rbd export testimg-diff2 /tmp/img-diff2.new
- rbd export testimg-diff3 /tmp/img-diff3.new
-
- cmp /tmp/img2 /tmp/img2.new
- cmp /tmp/img3 /tmp/img3.new
- cmp /tmp/img2 /tmp/img-diff2.new
- cmp /tmp/img3 /tmp/img-diff3.new
-
- # rollback
- rbd snap rollback --snap=snap1 testimg1
- rbd snap rollback --snap=snap1 testimg-diff1
- rbd info testimg1 | grep 'size 256 MB'
- rbd info testimg-diff1 | grep 'size 256 MB'
- rbd export testimg1 /tmp/img1.snap1
- rbd export testimg-diff1 /tmp/img-diff1.snap1
- cmp /tmp/img2 /tmp/img1.snap1
- cmp /tmp/img2 /tmp/img-diff1.snap1
-
- # test create, copy of zero-length images
- rbd rm testimg2
- rbd rm testimg3
- rbd create testimg2 -s 0
- rbd cp testimg2 testimg3
-
- # remove snapshots
- rbd snap rm --snap=snap1 testimg1
- rbd snap rm --snap=snap1 testimg-diff1
- rbd info --snap=snap1 testimg1 2>&1 | grep 'error setting snapshot context: (2) No such file or directory'
- rbd info --snap=snap1 testimg-diff1 2>&1 | grep 'error setting snapshot context: (2) No such file or directory'
-
- remove_images
- rm -f $TMP_FILES
-}
-
-test_rename() {
- echo "testing rename..."
- remove_images
-
- rbd create --image-format 1 -s 1 foo
- rbd create --image-format 2 -s 1 bar
- rbd rename foo foo2
- rbd rename foo2 bar 2>&1 | grep exists
- rbd rename bar bar2
- rbd rename bar2 foo2 2>&1 | grep exists
-
- rados mkpool rbd2
- rbd pool init rbd2
- rbd create -p rbd2 -s 1 foo
- rbd rename rbd2/foo rbd2/bar
- rbd -p rbd2 ls | grep bar
- rbd rename rbd2/bar foo
- rbd rename --pool rbd2 foo bar
- ! rbd rename rbd2/bar --dest-pool rbd foo
- rbd rename --pool rbd2 bar --dest-pool rbd2 foo
- rbd -p rbd2 ls | grep foo
- rados rmpool rbd2 rbd2 --yes-i-really-really-mean-it
-
- remove_images
-}
-
-test_ls() {
- echo "testing ls..."
- remove_images
-
- rbd create --image-format 1 -s 1 test1
- rbd create --image-format 1 -s 1 test2
- rbd ls | grep test1
- rbd ls | grep test2
- rbd ls | wc -l | grep 2
- # look for fields in output of ls -l without worrying about space
- rbd ls -l | grep 'test1.*1024k.*1'
- rbd ls -l | grep 'test2.*1024k.*1'
-
- rbd rm test1
- rbd rm test2
-
- rbd create --image-format 2 -s 1 test1
- rbd create --image-format 2 -s 1 test2
- rbd ls | grep test1
- rbd ls | grep test2
- rbd ls | wc -l | grep 2
- rbd ls -l | grep 'test1.*1024k.*2'
- rbd ls -l | grep 'test2.*1024k.*2'
-
- rbd rm test1
- rbd rm test2
-
- rbd create --image-format 2 -s 1 test1
- rbd create --image-format 1 -s 1 test2
- rbd ls | grep test1
- rbd ls | grep test2
- rbd ls | wc -l | grep 2
- rbd ls -l | grep 'test1.*1024k.*2'
- rbd ls -l | grep 'test2.*1024k.*1'
- remove_images
-
- # test that many images can be shown by ls
- for i in $(seq -w 00 99); do
- rbd create image.$i -s 1
- done
- rbd ls | wc -l | grep 100
- rbd ls -l | grep image | wc -l | grep 100
- for i in $(seq -w 00 99); do
- rbd rm image.$i
- done
-
- for i in $(seq -w 00 99); do
- rbd create image.$i --image-format 2 -s 1
- done
- rbd ls | wc -l | grep 100
- rbd ls -l | grep image | wc -l | grep 100
- for i in $(seq -w 00 99); do
- rbd rm image.$i
- done
-}
-
-test_remove() {
- echo "testing remove..."
- remove_images
-
- rbd remove "NOT_EXIST" && exit 1 || true # remove should fail
- rbd create --image-format 1 -s 1 test1
- rbd rm test1
- rbd ls | wc -l | grep "^0$"
-
- rbd create --image-format 2 -s 1 test2
- rbd rm test2
- rbd ls | wc -l | grep "^0$"
-
- # check that remove succeeds even if it's
- # interrupted partway through. simulate this
- # by removing some objects manually.
-
- # remove with header missing (old format)
- rbd create --image-format 1 -s 1 test1
- rados rm -p rbd test1.rbd
- rbd rm test1
- rbd ls | wc -l | grep "^0$"
-
- if [ $tiered -eq 0 ]; then
- # remove with header missing
- rbd create --image-format 2 -s 1 test2
- HEADER=$(rados -p rbd ls | grep '^rbd_header')
- rados -p rbd rm $HEADER
- rbd rm test2
- rbd ls | wc -l | grep "^0$"
-
- # remove with id missing
- rbd create --image-format 2 -s 1 test2
- rados -p rbd rm rbd_id.test2
- rbd rm test2
- rbd ls | wc -l | grep "^0$"
-
- # remove with header and id missing
- rbd create --image-format 2 -s 1 test2
- HEADER=$(rados -p rbd ls | grep '^rbd_header')
- rados -p rbd rm $HEADER
- rados -p rbd rm rbd_id.test2
- rbd rm test2
- rbd ls | wc -l | grep "^0$"
- fi
-
- # remove with rbd_children object missing (and, by extension,
- # with child not mentioned in rbd_children)
- rbd create --image-format 2 -s 1 test2
- rbd snap create test2@snap
- rbd snap protect test2@snap
- rbd clone test2@snap clone
-
- rados -p rbd rm rbd_children
- rbd rm clone
- rbd ls | grep clone | wc -l | grep '^0$'
-
- rbd snap unprotect test2@snap
- rbd snap rm test2@snap
- rbd rm test2
-}
-
-test_locking() {
- echo "testing locking..."
- remove_images
-
- rbd create -s 1 test1
- rbd lock list test1 | wc -l | grep '^0$'
- rbd lock add test1 id
- rbd lock list test1 | grep ' 1 '
- LOCKER=$(rbd lock list test1 | tail -n 1 | awk '{print $1;}')
- rbd lock remove test1 id $LOCKER
- rbd lock list test1 | wc -l | grep '^0$'
-
- rbd lock add test1 id --shared tag
- rbd lock list test1 | grep ' 1 '
- rbd lock add test1 id --shared tag
- rbd lock list test1 | grep ' 2 '
- rbd lock add test1 id2 --shared tag
- rbd lock list test1 | grep ' 3 '
- rbd lock list test1 | tail -n 1 | awk '{print $2, $1;}' | xargs rbd lock remove test1
- if rbd info test1 | grep -qE "features:.*exclusive"
- then
- # new locking functionality requires all locks to be released
- while [ -n "$(rbd lock list test1)" ]
- do
- rbd lock list test1 | tail -n 1 | awk '{print $2, $1;}' | xargs rbd lock remove test1
- done
- fi
- rbd rm test1
-}
-
-test_pool_image_args() {
- echo "testing pool and image args..."
- remove_images
-
- ceph osd pool delete test test --yes-i-really-really-mean-it || true
- ceph osd pool create test 100
- rbd pool init test
- truncate -s 1 /tmp/empty /tmp/empty@snap
-
- rbd ls | wc -l | grep 0
- rbd create -s 1 test1
- rbd ls | grep -q test1
- rbd import --image test2 /tmp/empty
- rbd ls | grep -q test2
- rbd --dest test3 import /tmp/empty
- rbd ls | grep -q test3
- rbd import /tmp/empty foo
- rbd ls | grep -q foo
-
- # should fail due to "destination snapname specified"
- rbd import --dest test/empty@snap /tmp/empty && exit 1 || true
- rbd import /tmp/empty test/empty@snap && exit 1 || true
- rbd import --image test/empty@snap /tmp/empty && exit 1 || true
- rbd import /tmp/empty@snap && exit 1 || true
-
- rbd ls test | wc -l | grep 0
- rbd import /tmp/empty test/test1
- rbd ls test | grep -q test1
- rbd -p test import /tmp/empty test2
- rbd ls test | grep -q test2
- rbd --image test3 -p test import /tmp/empty
- rbd ls test | grep -q test3
- rbd --image test4 -p test import /tmp/empty
- rbd ls test | grep -q test4
- rbd --dest test5 -p test import /tmp/empty
- rbd ls test | grep -q test5
- rbd --dest test6 --dest-pool test import /tmp/empty
- rbd ls test | grep -q test6
- rbd --image test7 --dest-pool test import /tmp/empty
- rbd ls test | grep -q test7
- rbd --image test/test8 import /tmp/empty
- rbd ls test | grep -q test8
- rbd --dest test/test9 import /tmp/empty
- rbd ls test | grep -q test9
- rbd import --pool test /tmp/empty
- rbd ls test | grep -q empty
-
- # copy with no explicit pool goes to pool rbd
- rbd copy test/test9 test10
- rbd ls test | grep -qv test10
- rbd ls | grep -q test10
- rbd copy test/test9 test/test10
- rbd ls test | grep -q test10
- rbd copy --pool test test10 --dest-pool test test11
- rbd ls test | grep -q test11
- rbd copy --dest-pool rbd --pool test test11 test12
- rbd ls | grep test12
- rbd ls test | grep -qv test12
-
- rm -f /tmp/empty /tmp/empty@snap
- ceph osd pool delete test test --yes-i-really-really-mean-it
-
- for f in foo test1 test10 test12 test2 test3 ; do
- rbd rm $f
- done
-}
-
-test_clone() {
- echo "testing clone..."
- remove_images
- rbd create test1 $RBD_CREATE_ARGS -s 1
- rbd snap create test1@s1
- rbd snap protect test1@s1
-
- rados mkpool rbd2
- rbd pool init rbd2
- rbd clone test1@s1 rbd2/clone
- rbd -p rbd2 ls | grep clone
- rbd -p rbd2 ls -l | grep clone | grep test1@s1
- rbd ls | grep -v clone
- rbd flatten rbd2/clone
- rbd snap create rbd2/clone@s1
- rbd snap protect rbd2/clone@s1
- rbd clone rbd2/clone@s1 clone2
- rbd ls | grep clone2
- rbd ls -l | grep clone2 | grep rbd2/clone@s1
- rbd -p rbd2 ls | grep -v clone2
-
- rbd rm clone2
- rbd snap unprotect rbd2/clone@s1
- rbd snap rm rbd2/clone@s1
- rbd rm rbd2/clone
- rbd snap unprotect test1@s1
- rbd snap rm test1@s1
- rbd rm test1
- rados rmpool rbd2 rbd2 --yes-i-really-really-mean-it
-}
-
-test_trash() {
- echo "testing trash..."
- remove_images
-
- rbd create --image-format 2 -s 1 test1
- rbd create --image-format 2 -s 1 test2
- rbd ls | grep test1
- rbd ls | grep test2
- rbd ls | wc -l | grep 2
- rbd ls -l | grep 'test1.*2.*'
- rbd ls -l | grep 'test2.*2.*'
-
- rbd trash mv test1
- rbd ls | grep test2
- rbd ls | wc -l | grep 1
- rbd ls -l | grep 'test2.*2.*'
-
- rbd trash ls | grep test1
- rbd trash ls | wc -l | grep 1
- rbd trash ls -l | grep 'test1.*USER.*'
- rbd trash ls -l | grep -v 'protected until'
-
- ID=`rbd trash ls | cut -d ' ' -f 1`
- rbd trash rm $ID
-
- rbd trash mv test2
- ID=`rbd trash ls | cut -d ' ' -f 1`
- rbd info --image-id $ID | grep "rbd image '$ID'"
-
- rbd trash restore $ID
- rbd ls | grep test2
- rbd ls | wc -l | grep 1
- rbd ls -l | grep 'test2.*2.*'
-
- rbd trash mv test2 --delay 3600
- rbd trash ls | grep test2
- rbd trash ls | wc -l | grep 1
- rbd trash ls -l | grep 'test2.*USER.*protected until'
-
- rbd trash rm $ID 2>&1 | grep 'Deferment time has not expired'
- rbd trash rm --image-id $ID --force
-
- rbd create --image-format 2 -s 1 test1
- rbd snap create test1@snap1
- rbd snap protect test1@snap1
- rbd trash mv test1
-
- rbd trash ls | grep test1
- rbd trash ls | wc -l | grep 1
- rbd trash ls -l | grep 'test1.*USER.*'
- rbd trash ls -l | grep -v 'protected until'
-
- ID=`rbd trash ls | cut -d ' ' -f 1`
- rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 1
- rbd snap ls --image-id $ID | grep '.*snap1.*'
-
- rbd snap unprotect --image-id $ID --snap snap1
- rbd snap rm --image-id $ID --snap snap1
- rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 0
-
- rbd trash restore $ID
- rbd snap create test1@snap1
- rbd snap create test1@snap2
- rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 2
- rbd snap purge --image-id $ID
- rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 0
-
- remove_images
-}
-
-
-test_pool_image_args
-test_rename
-test_ls
-test_remove
-RBD_CREATE_ARGS=""
-test_others
-test_locking
-RBD_CREATE_ARGS="--image-format 2"
-test_others
-test_locking
-test_clone
-test_trash
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/concurrent.sh b/src/ceph/qa/workunits/rbd/concurrent.sh
deleted file mode 100755
index e2fb797..0000000
--- a/src/ceph/qa/workunits/rbd/concurrent.sh
+++ /dev/null
@@ -1,375 +0,0 @@
-#!/bin/bash -e
-
-# Copyright (C) 2013 Inktank Storage, Inc.
-#
-# This is free software; see the source for copying conditions.
-# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR
-# A PARTICULAR PURPOSE.
-#
-# This is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as
-# published by the Free Software Foundation version 2.
-
-# Alex Elder <elder@inktank.com>
-# January 29, 2013
-
-################################################################
-
-# The purpose of this test is to exercise paths through the rbd
-# code, making sure no bad pointer references or invalid reference
-# count operations occur in the face of concurrent activity.
-#
-# Each pass of the test creates an rbd image, maps it, and writes
-# some data into the image. It also reads some data from all of the
-# other images that exist at the time the pass executes. Finally,
-# the image is unmapped and removed. The image removal completes in
-# the background.
-#
-# An iteration of the test consists of performing some number of
-# passes, initating each pass as a background job, and finally
-# sleeping for a variable delay. The delay is initially a specified
-# value, but each iteration shortens that proportionally, such that
-# the last iteration will not delay at all.
-#
-# The result exercises concurrent creates and deletes of rbd images,
-# writes to new images, reads from both written and unwritten image
-# data (including reads concurrent with writes), and attempts to
-# unmap images being read.
-
-# Usage: concurrent [-i <iter>] [-c <count>] [-d <delay>]
-#
-# Exit status:
-# 0: success
-# 1: usage error
-# 2: other runtime error
-# 99: argument count error (programming error)
-# 100: getopt error (internal error)
-
-################################################################
-
-set -x
-
-# Default flag values; RBD_CONCURRENT_ITER names are intended
-# to be used in yaml scripts to pass in alternate values, e.g.:
-# env:
-# RBD_CONCURRENT_ITER: 20
-# RBD_CONCURRENT_COUNT: 5
-# RBD_CONCURRENT_DELAY: 3
-ITER_DEFAULT=${RBD_CONCURRENT_ITER:-100}
-COUNT_DEFAULT=${RBD_CONCURRENT_COUNT:-5}
-DELAY_DEFAULT=${RBD_CONCURRENT_DELAY:-5} # seconds
-
-CEPH_SECRET_FILE=${CEPH_SECRET_FILE:-}
-CEPH_ID=${CEPH_ID:-admin}
-SECRET_ARGS=""
-if [ "${CEPH_SECRET_FILE}" ]; then
- SECRET_ARGS="--secret $CEPH_SECRET_FILE"
-fi
-
-################################################################
-
-function setup() {
- ID_MAX_DIR=$(mktemp -d /tmp/image_max_id.XXXXX)
- ID_COUNT_DIR=$(mktemp -d /tmp/image_ids.XXXXXX)
- NAMES_DIR=$(mktemp -d /tmp/image_names.XXXXXX)
- SOURCE_DATA=$(mktemp /tmp/source_data.XXXXXX)
-
- # Use urandom to generate SOURCE_DATA
- dd if=/dev/urandom of=${SOURCE_DATA} bs=2048 count=66 \
- >/dev/null 2>&1
-
- # List of rbd id's *not* created by this script
- export INITIAL_RBD_IDS=$(ls /sys/bus/rbd/devices)
-
- # Set up some environment for normal teuthology test setup.
- # This really should not be necessary but I found it was.
-
- export CEPH_ARGS=" --name client.0"
-}
-
-function cleanup() {
- [ ! "${ID_MAX_DIR}" ] && return
- local id
- local image
-
- # Unmap mapped devices
- for id in $(rbd_ids); do
- image=$(cat "/sys/bus/rbd/devices/${id}/name")
- rbd_unmap_image "${id}"
- rbd_destroy_image "${image}"
- done
- # Get any leftover images
- for image in $(rbd ls 2>/dev/null); do
- rbd_destroy_image "${image}"
- done
- wait
- sync
- rm -f "${SOURCE_DATA}"
- [ -d "${NAMES_DIR}" ] && rmdir "${NAMES_DIR}"
- echo "Max concurrent rbd image count was $(get_max "${ID_COUNT_DIR}")"
- rm -rf "${ID_COUNT_DIR}"
- echo "Max rbd image id was $(get_max "${ID_MAX_DIR}")"
- rm -rf "${ID_MAX_DIR}"
-}
-
-function get_max() {
- [ $# -eq 1 ] || exit 99
- local dir="$1"
-
- ls -U "${dir}" | sort -n | tail -1
-}
-
-trap cleanup HUP INT QUIT
-
-# print a usage message and quit
-#
-# if a message is supplied, print that first, and then exit
-# with non-zero status
-function usage() {
- if [ $# -gt 0 ]; then
- echo "" >&2
- echo "$@" >&2
- fi
-
- echo "" >&2
- echo "Usage: ${PROGNAME} <options> <tests>" >&2
- echo "" >&2
- echo " options:" >&2
- echo " -h or --help" >&2
- echo " show this message" >&2
- echo " -i or --iterations" >&2
- echo " iteration count (1 or more)" >&2
- echo " -c or --count" >&2
- echo " images created per iteration (1 or more)" >&2
- echo " -d or --delay" >&2
- echo " maximum delay between iterations" >&2
- echo "" >&2
- echo " defaults:" >&2
- echo " iterations: ${ITER_DEFAULT}"
- echo " count: ${COUNT_DEFAULT}"
- echo " delay: ${DELAY_DEFAULT} (seconds)"
- echo "" >&2
-
- [ $# -gt 0 ] && exit 1
-
- exit 0 # This is used for a --help
-}
-
-# parse command line arguments
-function parseargs() {
- ITER="${ITER_DEFAULT}"
- COUNT="${COUNT_DEFAULT}"
- DELAY="${DELAY_DEFAULT}"
-
- # Short option flags
- SHORT_OPTS=""
- SHORT_OPTS="${SHORT_OPTS},h"
- SHORT_OPTS="${SHORT_OPTS},i:"
- SHORT_OPTS="${SHORT_OPTS},c:"
- SHORT_OPTS="${SHORT_OPTS},d:"
-
- # Short option flags
- LONG_OPTS=""
- LONG_OPTS="${LONG_OPTS},help"
- LONG_OPTS="${LONG_OPTS},iterations:"
- LONG_OPTS="${LONG_OPTS},count:"
- LONG_OPTS="${LONG_OPTS},delay:"
-
- TEMP=$(getopt --name "${PROGNAME}" \
- --options "${SHORT_OPTS}" \
- --longoptions "${LONG_OPTS}" \
- -- "$@")
- eval set -- "$TEMP"
-
- while [ "$1" != "--" ]; do
- case "$1" in
- -h|--help)
- usage
- ;;
- -i|--iterations)
- ITER="$2"
- [ "${ITER}" -lt 1 ] &&
- usage "bad iterations value"
- shift
- ;;
- -c|--count)
- COUNT="$2"
- [ "${COUNT}" -lt 1 ] &&
- usage "bad count value"
- shift
- ;;
- -d|--delay)
- DELAY="$2"
- shift
- ;;
- *)
- exit 100 # Internal error
- ;;
- esac
- shift
- done
- shift
-}
-
-function rbd_ids() {
- [ $# -eq 0 ] || exit 99
- local ids
- local i
-
- [ -d /sys/bus/rbd ] || return
- ids=" $(echo $(ls /sys/bus/rbd/devices)) "
- for i in ${INITIAL_RBD_IDS}; do
- ids=${ids/ ${i} / }
- done
- echo ${ids}
-}
-
-function update_maxes() {
- local ids="$@"
- local last_id
- # These aren't 100% safe against concurrent updates but it
- # should be pretty close
- count=$(echo ${ids} | wc -w)
- touch "${ID_COUNT_DIR}/${count}"
- last_id=${ids% }
- last_id=${last_id##* }
- touch "${ID_MAX_DIR}/${last_id}"
-}
-
-function rbd_create_image() {
- [ $# -eq 0 ] || exit 99
- local image=$(basename $(mktemp "${NAMES_DIR}/image.XXXXXX"))
-
- rbd create "${image}" --size=1024
- echo "${image}"
-}
-
-function rbd_image_id() {
- [ $# -eq 1 ] || exit 99
- local image="$1"
-
- grep -l "${image}" /sys/bus/rbd/devices/*/name 2>/dev/null |
- cut -d / -f 6
-}
-
-function rbd_map_image() {
- [ $# -eq 1 ] || exit 99
- local image="$1"
- local id
-
- sudo rbd map "${image}" --user "${CEPH_ID}" ${SECRET_ARGS} \
- > /dev/null 2>&1
-
- id=$(rbd_image_id "${image}")
- echo "${id}"
-}
-
-function rbd_write_image() {
- [ $# -eq 1 ] || exit 99
- local id="$1"
-
- # Offset and size here are meant to ensure beginning and end
- # cross both (4K or 64K) page and (4MB) rbd object boundaries.
- # It assumes the SOURCE_DATA file has size 66 * 2048 bytes
- dd if="${SOURCE_DATA}" of="/dev/rbd${id}" bs=2048 seek=2015 \
- > /dev/null 2>&1
-}
-
-# All starting and ending offsets here are selected so they are not
-# aligned on a (4 KB or 64 KB) page boundary
-function rbd_read_image() {
- [ $# -eq 1 ] || exit 99
- local id="$1"
-
- # First read starting and ending at an offset before any
- # written data. The osd zero-fills data read from an
- # existing rbd object, but before any previously-written
- # data.
- dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=3 \
- > /dev/null 2>&1
- # Next read starting at an offset before any written data,
- # but ending at an offset that includes data that's been
- # written. The osd zero-fills unwritten data at the
- # beginning of a read.
- dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=1983 \
- > /dev/null 2>&1
- # Read the data at offset 2015 * 2048 bytes (where it was
- # written) and make sure it matches the original data.
- cmp --quiet "${SOURCE_DATA}" "/dev/rbd${id}" 0 4126720 ||
- echo "MISMATCH!!!"
- # Now read starting within the pre-written data, but ending
- # beyond it. The rbd client zero-fills the unwritten
- # portion at the end of a read.
- dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=2079 \
- > /dev/null 2>&1
- # Now read starting from an unwritten range within a written
- # rbd object. The rbd client zero-fills this.
- dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=2115 \
- > /dev/null 2>&1
- # Finally read from an unwritten region which would reside
- # in a different (non-existent) osd object. The osd client
- # zero-fills unwritten data when the target object doesn't
- # exist.
- dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=4098 \
- > /dev/null 2>&1
-}
-
-function rbd_unmap_image() {
- [ $# -eq 1 ] || exit 99
- local id="$1"
-
- sudo rbd unmap "/dev/rbd${id}"
-}
-
-function rbd_destroy_image() {
- [ $# -eq 1 ] || exit 99
- local image="$1"
-
- # Don't wait for it to complete, to increase concurrency
- rbd rm "${image}" >/dev/null 2>&1 &
- rm -f "${NAMES_DIR}/${image}"
-}
-
-function one_pass() {
- [ $# -eq 0 ] || exit 99
- local image
- local id
- local ids
- local i
-
- image=$(rbd_create_image)
- id=$(rbd_map_image "${image}")
- ids=$(rbd_ids)
- update_maxes "${ids}"
- for i in ${rbd_ids}; do
- if [ "${i}" -eq "${id}" ]; then
- rbd_write_image "${i}"
- else
- rbd_read_image "${i}"
- fi
- done
- rbd_unmap_image "${id}"
- rbd_destroy_image "${image}"
-}
-
-################################################################
-
-parseargs "$@"
-
-setup
-
-for iter in $(seq 1 "${ITER}"); do
- for count in $(seq 1 "${COUNT}"); do
- one_pass &
- done
- # Sleep longer at first, overlap iterations more later.
- # Use awk to get sub-second granularity (see sleep(1)).
- sleep $(echo "${DELAY}" "${iter}" "${ITER}" |
- awk '{ printf("%.2f\n", $1 - $1 * $2 / $3);}')
-
-done
-wait
-
-cleanup
-
-exit 0
diff --git a/src/ceph/qa/workunits/rbd/diff.sh b/src/ceph/qa/workunits/rbd/diff.sh
deleted file mode 100755
index bab84e9..0000000
--- a/src/ceph/qa/workunits/rbd/diff.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/bash -ex
-
-function cleanup() {
- rbd snap purge foo || :
- rbd rm foo || :
- rbd snap purge foo.copy || :
- rbd rm foo.copy || :
- rbd snap purge foo.copy2 || :
- rbd rm foo.copy2 || :
- rm -f foo.diff foo.out
-}
-
-cleanup
-
-rbd create foo --size 1000
-rbd bench-write foo --io-size 4096 --io-threads 5 --io-total 4096000 --io-pattern rand
-
-#rbd cp foo foo.copy
-rbd create foo.copy --size 1000
-rbd export-diff foo - | rbd import-diff - foo.copy
-
-rbd snap create foo --snap=two
-rbd bench-write foo --io-size 4096 --io-threads 5 --io-total 4096000 --io-pattern rand
-rbd snap create foo --snap=three
-rbd snap create foo.copy --snap=two
-
-rbd export-diff foo@two --from-snap three foo.diff && exit 1 || true # wrong snap order
-rm -f foo.diff
-
-rbd export-diff foo@three --from-snap two foo.diff
-rbd import-diff foo.diff foo.copy
-rbd import-diff foo.diff foo.copy && exit 1 || true # this should fail with EEXIST on the end snap
-rbd snap ls foo.copy | grep three
-
-rbd create foo.copy2 --size 1000
-rbd import-diff foo.diff foo.copy2 && exit 1 || true # this should fail bc the start snap dne
-
-rbd export foo foo.out
-orig=`md5sum foo.out | awk '{print $1}'`
-rm foo.out
-rbd export foo.copy foo.out
-copy=`md5sum foo.out | awk '{print $1}'`
-
-if [ "$orig" != "$copy" ]; then
- echo does not match
- exit 1
-fi
-
-cleanup
-
-echo OK
-
diff --git a/src/ceph/qa/workunits/rbd/diff_continuous.sh b/src/ceph/qa/workunits/rbd/diff_continuous.sh
deleted file mode 100755
index 41e4412..0000000
--- a/src/ceph/qa/workunits/rbd/diff_continuous.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/bin/bash -ex
-
-max=20
-size=1500
-
-iosize=16384
-iototal=16384000
-iothreads=16
-
-parent=`uuidgen`"-parent"
-src=`uuidgen`"-src";
-dst=`uuidgen`"-dst";
-
-function cleanup() {
- rbd snap purge $src || :
- rbd rm $src || :
- rbd snap purge $dst || :
- rbd rm $dst || :
- rbd snap unprotect $parent --snap parent || :
- rbd snap purge $parent || :
- rbd rm $parent || :
-}
-trap cleanup EXIT
-
-# start from a clone
-rbd create $parent --size $size --image-format 2 --stripe-count 8 --stripe-unit 65536
-rbd bench-write $parent --io-size $iosize --io-threads $iothreads --io-total $iototal --io-pattern rand
-rbd snap create $parent --snap parent
-rbd snap protect $parent --snap parent
-rbd clone $parent@parent $src --stripe-count 4 --stripe-unit 262144
-rbd create $dst --size $size --image-format 2 --order 19
-
-# mirror for a while
-for s in `seq 1 $max`; do
- rbd snap create $src --snap=snap$s
- rbd export-diff $src@snap$s - $lastsnap | rbd import-diff - $dst &
- rbd bench-write $src --io-size $iosize --io-threads $iothreads --io-total $iototal --io-pattern rand &
- wait
- lastsnap="--from-snap snap$s"
-done
-
-#trap "" EXIT
-#exit 0
-
-# validate
-for s in `seq 1 $max`; do
- ssum=`rbd export $src@snap$s - | md5sum`
- dsum=`rbd export $dst@snap$s - | md5sum`
- if [ "$ssum" != "$dsum" ]; then
- echo different sum at snap$s
- exit 1
- fi
-done
-
-cleanup
-trap "" EXIT
-
-echo OK
-
diff --git a/src/ceph/qa/workunits/rbd/huge-tickets.sh b/src/ceph/qa/workunits/rbd/huge-tickets.sh
deleted file mode 100755
index 63a6384..0000000
--- a/src/ceph/qa/workunits/rbd/huge-tickets.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-
-# This is a test for http://tracker.ceph.com/issues/8979 and the fallout
-# from triaging it. #8979 itself was random crashes on corrupted memory
-# due to a buffer overflow (for tickets larger than 256 bytes), further
-# inspection showed that vmalloced tickets weren't handled correctly as
-# well.
-#
-# What we are doing here is generating three huge keyrings and feeding
-# them to libceph (through 'rbd map' on a scratch image). Bad kernels
-# will crash reliably either on corrupted memory somewhere or a bad page
-# fault in scatterwalk_pagedone().
-
-set -ex
-
-function generate_keyring() {
- local user=$1
- local n=$2
-
- ceph-authtool -C -n client.$user --cap mon 'allow *' --gen-key /tmp/keyring-$user
-
- set +x # don't pollute trace with echos
- echo -en "\tcaps osd = \"allow rwx pool=rbd" >>/tmp/keyring-$user
- for i in $(seq 1 $n); do
- echo -n ", allow rwx pool=pool$i" >>/tmp/keyring-$user
- done
- echo "\"" >>/tmp/keyring-$user
- set -x
-}
-
-generate_keyring foo 1000 # ~25K, kmalloc
-generate_keyring bar 20000 # ~500K, vmalloc
-generate_keyring baz 300000 # ~8M, vmalloc + sg chaining
-
-rbd create --size 1 test
-
-for user in {foo,bar,baz}; do
- ceph auth import -i /tmp/keyring-$user
- DEV=$(sudo rbd map -n client.$user --keyring /tmp/keyring-$user test)
- sudo rbd unmap $DEV
-done
diff --git a/src/ceph/qa/workunits/rbd/image_read.sh b/src/ceph/qa/workunits/rbd/image_read.sh
deleted file mode 100755
index 907ce86..0000000
--- a/src/ceph/qa/workunits/rbd/image_read.sh
+++ /dev/null
@@ -1,677 +0,0 @@
-#!/bin/bash -e
-
-# Copyright (C) 2013 Inktank Storage, Inc.
-#
-# This is free software; see the source for copying conditions.
-# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR
-# A PARTICULAR PURPOSE.
-#
-# This is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as
-# published by the Free Software Foundation version 2.
-
-# Alex Elder <elder@inktank.com>
-# April 10, 2013
-
-################################################################
-
-# The purpose of this test is to validate that data read from a
-# mapped rbd image is what it's expected to be.
-#
-# By default it creates an image and fills it with some data. It
-# then reads back the data at a series of offsets known to cover
-# various situations (such as reading the beginning, end, or the
-# entirety of an object, or doing a read that spans multiple
-# objects), and stashes the results in a set of local files.
-#
-# It also creates and maps a snapshot of the original image after
-# it's been filled, and reads back the same ranges of data from the
-# snapshot. It then compares the data read back with what was read
-# back from the original image, verifying they match.
-#
-# Clone functionality is tested as well, in which case a clone is
-# made of the snapshot, and the same ranges of data are again read
-# and compared with the original. In addition, a snapshot of that
-# clone is created, and a clone of *that* snapshot is put through
-# the same set of tests. (Clone testing can be optionally skipped.)
-
-################################################################
-
-# Default parameter values. Environment variables, if set, will
-# supercede these defaults. Such variables have names that begin
-# with "IMAGE_READ_", for e.g. use IMAGE_READ_PAGE_SIZE=65536
-# to use 65536 as the page size.
-
-DEFAULT_VERBOSE=true
-DEFAULT_TEST_CLONES=true
-DEFAULT_LOCAL_FILES=false
-DEFAULT_FORMAT=2
-DEFAULT_DOUBLE_ORDER=true
-DEFAULT_HALF_ORDER=false
-DEFAULT_PAGE_SIZE=4096
-DEFAULT_OBJECT_ORDER=22
-MIN_OBJECT_ORDER=12 # technically 9, but the rbd CLI enforces 12
-MAX_OBJECT_ORDER=32
-
-PROGNAME=$(basename $0)
-
-ORIGINAL=original-$$
-SNAP1=snap1-$$
-CLONE1=clone1-$$
-SNAP2=snap2-$$
-CLONE2=clone2-$$
-
-function err() {
- if [ $# -gt 0 ]; then
- echo "${PROGNAME}: $@" >&2
- fi
- exit 2
-}
-
-function usage() {
- if [ $# -gt 0 ]; then
- echo "" >&2
- echo "${PROGNAME}: $@" >&2
- fi
- echo "" >&2
- echo "Usage: ${PROGNAME} [<options>]" >&2
- echo "" >&2
- echo "options are:" >&2
- echo " -o object_order" >&2
- echo " must be ${MIN_OBJECT_ORDER}..${MAX_OBJECT_ORDER}" >&2
- echo " -p page_size (in bytes)" >&2
- echo " note: there must be at least 4 pages per object" >&2
- echo " -1" >&2
- echo " test using format 1 rbd images (default)" >&2
- echo " -2" >&2
- echo " test using format 2 rbd images" >&2
- echo " -c" >&2
- echo " also test rbd clone images (implies format 2)" >&2
- echo " -d" >&2
- echo " clone object order double its parent's (format 2)" >&2
- echo " -h" >&2
- echo " clone object order half of its parent's (format 2)" >&2
- echo " -l" >&2
- echo " use local files rather than rbd images" >&2
- echo " -v" >&2
- echo " disable reporting of what's going on" >&2
- echo "" >&2
- exit 1
-}
-
-function verbose() {
- [ "${VERBOSE}" = true ] && echo "$@"
- true # Don't let the verbose test spoil our return value
-}
-
-function quiet() {
- "$@" 2> /dev/null
-}
-
-function boolean_toggle() {
- [ $# -eq 1 ] || exit 99
- test "$1" = "true" && echo false || echo true
-}
-
-function parseargs() {
- local opts="o:p:12clv"
- local lopts="order:,page_size:,local,clone,verbose"
- local parsed
- local clone_order_msg
-
- # use values from environment if available
- VERBOSE="${IMAGE_READ_VERBOSE:-${DEFAULT_VERBOSE}}"
- TEST_CLONES="${IMAGE_READ_TEST_CLONES:-${DEFAULT_TEST_CLONES}}"
- LOCAL_FILES="${IMAGE_READ_LOCAL_FILES:-${DEFAULT_LOCAL_FILES}}"
- DOUBLE_ORDER="${IMAGE_READ_DOUBLE_ORDER:-${DEFAULT_DOUBLE_ORDER}}"
- HALF_ORDER="${IMAGE_READ_HALF_ORDER:-${DEFAULT_HALF_ORDER}}"
- FORMAT="${IMAGE_READ_FORMAT:-${DEFAULT_FORMAT}}"
- PAGE_SIZE="${IMAGE_READ_PAGE_SIZE:-${DEFAULT_PAGE_SIZE}}"
- OBJECT_ORDER="${IMAGE_READ_OBJECT_ORDER:-${DEFAULT_OBJECT_ORDER}}"
-
- parsed=$(getopt -o "${opts}" -l "${lopts}" -n "${PROGNAME}" -- "$@") ||
- usage
- eval set -- "${parsed}"
- while true; do
- case "$1" in
- -v|--verbose)
- VERBOSE=$(boolean_toggle "${VERBOSE}");;
- -c|--clone)
- TEST_CLONES=$(boolean_toggle "${TEST_CLONES}");;
- -d|--double)
- DOUBLE_ORDER=$(boolean_toggle "${DOUBLE_ORDER}");;
- -h|--half)
- HALF_ORDER=$(boolean_toggle "${HALF_ORDER}");;
- -l|--local)
- LOCAL_FILES=$(boolean_toggle "${LOCAL_FILES}");;
- -1|-2)
- FORMAT="${1:1}";;
- -p|--page_size)
- PAGE_SIZE="$2"; shift;;
- -o|--order)
- OBJECT_ORDER="$2"; shift;;
- --)
- shift; break;;
- *)
- err "getopt internal error"
- esac
- shift
- done
- [ $# -gt 0 ] && usage "excess arguments ($*)"
-
- if [ "${TEST_CLONES}" = true ]; then
- # If we're using different object orders for clones,
- # make sure the limits are updated accordingly. If
- # both "half" and "double" are specified, just
- # ignore them both.
- if [ "${DOUBLE_ORDER}" = true ]; then
- if [ "${HALF_ORDER}" = true ]; then
- DOUBLE_ORDER=false
- HALF_ORDER=false
- else
- ((MAX_OBJECT_ORDER -= 2))
- fi
- elif [ "${HALF_ORDER}" = true ]; then
- ((MIN_OBJECT_ORDER += 2))
- fi
- fi
-
- [ "${OBJECT_ORDER}" -lt "${MIN_OBJECT_ORDER}" ] &&
- usage "object order (${OBJECT_ORDER}) must be" \
- "at least ${MIN_OBJECT_ORDER}"
- [ "${OBJECT_ORDER}" -gt "${MAX_OBJECT_ORDER}" ] &&
- usage "object order (${OBJECT_ORDER}) must be" \
- "at most ${MAX_OBJECT_ORDER}"
-
- if [ "${TEST_CLONES}" = true ]; then
- if [ "${DOUBLE_ORDER}" = true ]; then
- ((CLONE1_ORDER = OBJECT_ORDER + 1))
- ((CLONE2_ORDER = OBJECT_ORDER + 2))
- clone_order_msg="double"
- elif [ "${HALF_ORDER}" = true ]; then
- ((CLONE1_ORDER = OBJECT_ORDER - 1))
- ((CLONE2_ORDER = OBJECT_ORDER - 2))
- clone_order_msg="half of"
- else
- CLONE1_ORDER="${OBJECT_ORDER}"
- CLONE2_ORDER="${OBJECT_ORDER}"
- clone_order_msg="the same as"
- fi
- fi
-
- [ "${TEST_CLONES}" != true ] || FORMAT=2
-
- OBJECT_SIZE=$(echo "2 ^ ${OBJECT_ORDER}" | bc)
- OBJECT_PAGES=$(echo "${OBJECT_SIZE} / ${PAGE_SIZE}" | bc)
- IMAGE_SIZE=$((2 * 16 * OBJECT_SIZE / (1024 * 1024)))
- [ "${IMAGE_SIZE}" -lt 1 ] && IMAGE_SIZE=1
- IMAGE_OBJECTS=$((IMAGE_SIZE * (1024 * 1024) / OBJECT_SIZE))
-
- [ "${OBJECT_PAGES}" -lt 4 ] &&
- usage "object size (${OBJECT_SIZE}) must be" \
- "at least 4 * page size (${PAGE_SIZE})"
-
- echo "parameters for this run:"
- echo " format ${FORMAT} images will be tested"
- echo " object order is ${OBJECT_ORDER}, so" \
- "objects are ${OBJECT_SIZE} bytes"
- echo " page size is ${PAGE_SIZE} bytes, so" \
- "there are are ${OBJECT_PAGES} pages in an object"
- echo " derived image size is ${IMAGE_SIZE} MB, so" \
- "there are ${IMAGE_OBJECTS} objects in an image"
- if [ "${TEST_CLONES}" = true ]; then
- echo " clone functionality will be tested"
- echo " object size for a clone will be ${clone_order_msg}"
- echo " the object size of its parent image"
- fi
-
- true # Don't let the clones test spoil our return value
-}
-
-function image_dev_path() {
- [ $# -eq 1 ] || exit 99
- local image_name="$1"
-
- if [ "${LOCAL_FILES}" = true ]; then
- echo "${TEMP}/${image_name}"
- return
- fi
-
- echo "/dev/rbd/rbd/${image_name}"
-}
-
-function out_data_dir() {
- [ $# -lt 2 ] || exit 99
- local out_data="${TEMP}/data"
- local image_name
-
- if [ $# -eq 1 ]; then
- image_name="$1"
- echo "${out_data}/${image_name}"
- else
- echo "${out_data}"
- fi
-}
-
-function setup() {
- verbose "===== setting up ====="
- TEMP=$(mktemp -d /tmp/rbd_image_read.XXXXX)
- mkdir -p $(out_data_dir)
-
- # create and fill the original image with some data
- create_image "${ORIGINAL}"
- map_image "${ORIGINAL}"
- fill_original
-
- # create a snapshot of the original
- create_image_snap "${ORIGINAL}" "${SNAP1}"
- map_image_snap "${ORIGINAL}" "${SNAP1}"
-
- if [ "${TEST_CLONES}" = true ]; then
- # create a clone of the original snapshot
- create_snap_clone "${ORIGINAL}" "${SNAP1}" \
- "${CLONE1}" "${CLONE1_ORDER}"
- map_image "${CLONE1}"
-
- # create a snapshot of that clone
- create_image_snap "${CLONE1}" "${SNAP2}"
- map_image_snap "${CLONE1}" "${SNAP2}"
-
- # create a clone of that clone's snapshot
- create_snap_clone "${CLONE1}" "${SNAP2}" \
- "${CLONE2}" "${CLONE2_ORDER}"
- map_image "${CLONE2}"
- fi
-}
-
-function teardown() {
- verbose "===== cleaning up ====="
- if [ "${TEST_CLONES}" = true ]; then
- unmap_image "${CLONE2}" || true
- destroy_snap_clone "${CLONE1}" "${SNAP2}" "${CLONE2}" || true
-
- unmap_image_snap "${CLONE1}" "${SNAP2}" || true
- destroy_image_snap "${CLONE1}" "${SNAP2}" || true
-
- unmap_image "${CLONE1}" || true
- destroy_snap_clone "${ORIGINAL}" "${SNAP1}" "${CLONE1}" || true
- fi
- unmap_image_snap "${ORIGINAL}" "${SNAP1}" || true
- destroy_image_snap "${ORIGINAL}" "${SNAP1}" || true
- unmap_image "${ORIGINAL}" || true
- destroy_image "${ORIGINAL}" || true
-
- rm -rf $(out_data_dir)
- rmdir "${TEMP}"
-}
-
-function create_image() {
- [ $# -eq 1 ] || exit 99
- local image_name="$1"
- local image_path
- local bytes
-
- verbose "creating image \"${image_name}\""
- if [ "${LOCAL_FILES}" = true ]; then
- image_path=$(image_dev_path "${image_name}")
- bytes=$(echo "${IMAGE_SIZE} * 1024 * 1024 - 1" | bc)
- quiet dd if=/dev/zero bs=1 count=1 seek="${bytes}" \
- of="${image_path}"
- return
- fi
-
- rbd create "${image_name}" --image-format "${FORMAT}" \
- --size "${IMAGE_SIZE}" --order "${OBJECT_ORDER}" \
- --image-shared
-}
-
-function destroy_image() {
- [ $# -eq 1 ] || exit 99
- local image_name="$1"
- local image_path
-
- verbose "destroying image \"${image_name}\""
- if [ "${LOCAL_FILES}" = true ]; then
- image_path=$(image_dev_path "${image_name}")
- rm -f "${image_path}"
- return
- fi
-
- rbd rm "${image_name}"
-}
-
-function map_image() {
- [ $# -eq 1 ] || exit 99
- local image_name="$1" # can be image@snap too
-
- if [ "${LOCAL_FILES}" = true ]; then
- return
- fi
-
- sudo rbd map "${image_name}"
-}
-
-function unmap_image() {
- [ $# -eq 1 ] || exit 99
- local image_name="$1" # can be image@snap too
- local image_path
-
- if [ "${LOCAL_FILES}" = true ]; then
- return
- fi
- image_path=$(image_dev_path "${image_name}")
-
- if [ -e "${image_path}" ]; then
- sudo rbd unmap "${image_path}"
- fi
-}
-
-function map_image_snap() {
- [ $# -eq 2 ] || exit 99
- local image_name="$1"
- local snap_name="$2"
- local image_snap
-
- if [ "${LOCAL_FILES}" = true ]; then
- return
- fi
-
- image_snap="${image_name}@${snap_name}"
- map_image "${image_snap}"
-}
-
-function unmap_image_snap() {
- [ $# -eq 2 ] || exit 99
- local image_name="$1"
- local snap_name="$2"
- local image_snap
-
- if [ "${LOCAL_FILES}" = true ]; then
- return
- fi
-
- image_snap="${image_name}@${snap_name}"
- unmap_image "${image_snap}"
-}
-
-function create_image_snap() {
- [ $# -eq 2 ] || exit 99
- local image_name="$1"
- local snap_name="$2"
- local image_snap="${image_name}@${snap_name}"
- local image_path
- local snap_path
-
- verbose "creating snapshot \"${snap_name}\"" \
- "of image \"${image_name}\""
- if [ "${LOCAL_FILES}" = true ]; then
- image_path=$(image_dev_path "${image_name}")
- snap_path=$(image_dev_path "${image_snap}")
-
- cp "${image_path}" "${snap_path}"
- return
- fi
-
- rbd snap create "${image_snap}"
-}
-
-function destroy_image_snap() {
- [ $# -eq 2 ] || exit 99
- local image_name="$1"
- local snap_name="$2"
- local image_snap="${image_name}@${snap_name}"
- local snap_path
-
- verbose "destroying snapshot \"${snap_name}\"" \
- "of image \"${image_name}\""
- if [ "${LOCAL_FILES}" = true ]; then
- snap_path=$(image_dev_path "${image_snap}")
- rm -rf "${snap_path}"
- return
- fi
-
- rbd snap rm "${image_snap}"
-}
-
-function create_snap_clone() {
- [ $# -eq 4 ] || exit 99
- local image_name="$1"
- local snap_name="$2"
- local clone_name="$3"
- local clone_order="$4"
- local image_snap="${image_name}@${snap_name}"
- local snap_path
- local clone_path
-
- verbose "creating clone image \"${clone_name}\"" \
- "of image snapshot \"${image_name}@${snap_name}\""
- if [ "${LOCAL_FILES}" = true ]; then
- snap_path=$(image_dev_path "${image_name}@${snap_name}")
- clone_path=$(image_dev_path "${clone_name}")
-
- cp "${snap_path}" "${clone_path}"
- return
- fi
-
- rbd snap protect "${image_snap}"
- rbd clone --order "${clone_order}" --image-shared \
- "${image_snap}" "${clone_name}"
-}
-
-function destroy_snap_clone() {
- [ $# -eq 3 ] || exit 99
- local image_name="$1"
- local snap_name="$2"
- local clone_name="$3"
- local image_snap="${image_name}@${snap_name}"
- local clone_path
-
- verbose "destroying clone image \"${clone_name}\""
- if [ "${LOCAL_FILES}" = true ]; then
- clone_path=$(image_dev_path "${clone_name}")
-
- rm -rf "${clone_path}"
- return
- fi
-
- rbd rm "${clone_name}"
- rbd snap unprotect "${image_snap}"
-}
-
-# function that produces "random" data with which to fill the image
-function source_data() {
- while quiet dd if=/bin/bash skip=$(($$ % 199)) bs="${PAGE_SIZE}"; do
- : # Just do the dd
- done
-}
-
-function fill_original() {
- local image_path=$(image_dev_path "${ORIGINAL}")
-
- verbose "filling original image"
- # Fill 16 objects worth of "random" data
- source_data |
- quiet dd bs="${PAGE_SIZE}" count=$((16 * OBJECT_PAGES)) \
- of="${image_path}"
-}
-
-function do_read() {
- [ $# -eq 3 -o $# -eq 4 ] || exit 99
- local image_name="$1"
- local offset="$2"
- local length="$3"
- [ "${length}" -gt 0 ] || err "do_read: length must be non-zero"
- local image_path=$(image_dev_path "${image_name}")
- local out_data=$(out_data_dir "${image_name}")
- local range=$(printf "%06u~%04u" "${offset}" "${length}")
- local out_file
-
- [ $# -eq 4 ] && offset=$((offset + 16 * OBJECT_PAGES))
-
- verbose "reading \"${image_name}\" pages ${range}"
-
- out_file="${out_data}/pages_${range}"
-
- quiet dd bs="${PAGE_SIZE}" skip="${offset}" count="${length}" \
- if="${image_path}" of="${out_file}"
-}
-
-function one_pass() {
- [ $# -eq 1 -o $# -eq 2 ] || exit 99
- local image_name="$1"
- local extended
- [ $# -eq 2 ] && extended="true"
- local offset
- local length
-
- offset=0
-
- # +-----------+-----------+---
- # |X:X:X...X:X| : : ... : | :
- # +-----------+-----------+---
- length="${OBJECT_PAGES}"
- do_read "${image_name}" "${offset}" "${length}" ${extended}
- offset=$((offset + length))
-
- # ---+-----------+---
- # : |X: : ... : | :
- # ---+-----------+---
- length=1
- do_read "${image_name}" "${offset}" "${length}" ${extended}
- offset=$((offset + length))
-
- # ---+-----------+---
- # : | :X: ... : | :
- # ---+-----------+---
- length=1
- do_read "${image_name}" "${offset}" "${length}" ${extended}
- offset=$((offset + length))
-
- # ---+-----------+---
- # : | : :X...X: | :
- # ---+-----------+---
- length=$((OBJECT_PAGES - 3))
- do_read "${image_name}" "${offset}" "${length}" ${extended}
- offset=$((offset + length))
-
- # ---+-----------+---
- # : | : : ... :X| :
- # ---+-----------+---
- length=1
- do_read "${image_name}" "${offset}" "${length}" ${extended}
- offset=$((offset + length))
-
- # ---+-----------+---
- # : |X:X:X...X:X| :
- # ---+-----------+---
- length="${OBJECT_PAGES}"
- do_read "${image_name}" "${offset}" "${length}" ${extended}
- offset=$((offset + length))
-
- offset=$((offset + 1)) # skip 1
-
- # ---+-----------+---
- # : | :X:X...X:X| :
- # ---+-----------+---
- length=$((OBJECT_PAGES - 1))
- do_read "${image_name}" "${offset}" "${length}" ${extended}
- offset=$((offset + length))
-
- # ---+-----------+-----------+---
- # : |X:X:X...X:X|X: : ... : | :
- # ---+-----------+-----------+---
- length=$((OBJECT_PAGES + 1))
- do_read "${image_name}" "${offset}" "${length}" ${extended}
- offset=$((offset + length))
-
- # ---+-----------+-----------+---
- # : | :X:X...X:X|X: : ... : | :
- # ---+-----------+-----------+---
- length="${OBJECT_PAGES}"
- do_read "${image_name}" "${offset}" "${length}" ${extended}
- offset=$((offset + length))
-
- # ---+-----------+-----------+---
- # : | :X:X...X:X|X:X: ... : | :
- # ---+-----------+-----------+---
- length=$((OBJECT_PAGES + 1))
- do_read "${image_name}" "${offset}" "${length}" ${extended}
- offset=$((offset + length))
-
- # ---+-----------+-----------+---
- # : | : :X...X:X|X:X:X...X:X| :
- # ---+-----------+-----------+---
- length=$((2 * OBJECT_PAGES + 2))
- do_read "${image_name}" "${offset}" "${length}" ${extended}
- offset=$((offset + length))
-
- offset=$((offset + 1)) # skip 1
-
- # ---+-----------+-----------+-----
- # : | :X:X...X:X|X:X:X...X:X|X: :
- # ---+-----------+-----------+-----
- length=$((2 * OBJECT_PAGES))
- do_read "${image_name}" "${offset}" "${length}" ${extended}
- offset=$((offset + length))
-
- # --+-----------+-----------+--------
- # : | :X:X...X:X|X:X:X...X:X|X:X: :
- # --+-----------+-----------+--------
- length=2049
- length=$((2 * OBJECT_PAGES + 1))
- do_read "${image_name}" "${offset}" "${length}" ${extended}
- # offset=$((offset + length))
-}
-
-function run_using() {
- [ $# -eq 1 ] || exit 99
- local image_name="$1"
- local out_data=$(out_data_dir "${image_name}")
-
- verbose "===== running using \"${image_name}\" ====="
- mkdir -p "${out_data}"
- one_pass "${image_name}"
- one_pass "${image_name}" extended
-}
-
-function compare() {
- [ $# -eq 1 ] || exit 99
- local image_name="$1"
- local out_data=$(out_data_dir "${image_name}")
- local original=$(out_data_dir "${ORIGINAL}")
-
- verbose "===== comparing \"${image_name}\" ====="
- for i in $(ls "${original}"); do
- verbose compare "\"${image_name}\" \"${i}\""
- cmp "${original}/${i}" "${out_data}/${i}"
- done
- [ "${image_name}" = "${ORIGINAL}" ] || rm -rf "${out_data}"
-}
-
-function doit() {
- [ $# -eq 1 ] || exit 99
- local image_name="$1"
-
- run_using "${image_name}"
- compare "${image_name}"
-}
-
-########## Start
-
-parseargs "$@"
-
-trap teardown EXIT HUP INT
-setup
-
-run_using "${ORIGINAL}"
-doit "${ORIGINAL}@${SNAP1}"
-if [ "${TEST_CLONES}" = true ]; then
- doit "${CLONE1}"
- doit "${CLONE1}@${SNAP2}"
- doit "${CLONE2}"
-fi
-rm -rf $(out_data_dir "${ORIGINAL}")
-
-echo "Success!"
-
-exit 0
diff --git a/src/ceph/qa/workunits/rbd/import_export.sh b/src/ceph/qa/workunits/rbd/import_export.sh
deleted file mode 100755
index c9ecb8b..0000000
--- a/src/ceph/qa/workunits/rbd/import_export.sh
+++ /dev/null
@@ -1,233 +0,0 @@
-#!/bin/sh -ex
-
-# returns data pool for a given image
-get_image_data_pool () {
- image=$1
- data_pool=$(rbd info $image | grep "data_pool: " | awk -F':' '{ print $NF }')
- if [ -z $data_pool ]; then
- data_pool='rbd'
- fi
-
- echo $data_pool
-}
-
-# return list of object numbers populated in image
-objects () {
- image=$1
- prefix=$(rbd info $image | grep block_name_prefix | awk '{print $NF;}')
-
- # strip off prefix and leading zeros from objects; sort, although
- # it doesn't necessarily make sense as they're hex, at least it makes
- # the list repeatable and comparable
- objects=$(rados ls -p $(get_image_data_pool $image) | grep $prefix | \
- sed -e 's/'$prefix'\.//' -e 's/^0*\([0-9a-f]\)/\1/' | sort -u)
- echo $objects
-}
-
-# return false if either files don't compare or their ondisk
-# sizes don't compare
-
-compare_files_and_ondisk_sizes () {
- cmp -l $1 $2 || return 1
- origsize=$(stat $1 --format %b)
- exportsize=$(stat $2 --format %b)
- difference=$(($exportsize - $origsize))
- difference=${difference#-} # absolute value
- test $difference -ge 0 -a $difference -lt 4096
-}
-
-TMPDIR=/tmp/rbd_import_export_$$
-rm -rf $TMPDIR
-mkdir $TMPDIR
-trap "rm -rf $TMPDIR" INT TERM EXIT
-
-# cannot import a dir
-mkdir foo.$$
-rbd import foo.$$ foo.dir && exit 1 || true # should fail
-rmdir foo.$$
-
-# create a sparse file
-dd if=/bin/sh of=${TMPDIR}/img bs=1k count=1 seek=10
-dd if=/bin/dd of=${TMPDIR}/img bs=1k count=10 seek=100
-dd if=/bin/rm of=${TMPDIR}/img bs=1k count=100 seek=1000
-dd if=/bin/ls of=${TMPDIR}/img bs=1k seek=10000
-dd if=/bin/ln of=${TMPDIR}/img bs=1k seek=100000
-dd if=/bin/grep of=${TMPDIR}/img bs=1k seek=1000000
-
-rbd rm testimg || true
-
-rbd import $RBD_CREATE_ARGS ${TMPDIR}/img testimg
-rbd export testimg ${TMPDIR}/img2
-rbd export testimg - > ${TMPDIR}/img3
-rbd rm testimg
-cmp ${TMPDIR}/img ${TMPDIR}/img2
-cmp ${TMPDIR}/img ${TMPDIR}/img3
-rm ${TMPDIR}/img2 ${TMPDIR}/img3
-
-# try again, importing from stdin
-rbd import $RBD_CREATE_ARGS - testimg < ${TMPDIR}/img
-rbd export testimg ${TMPDIR}/img2
-rbd export testimg - > ${TMPDIR}/img3
-rbd rm testimg
-cmp ${TMPDIR}/img ${TMPDIR}/img2
-cmp ${TMPDIR}/img ${TMPDIR}/img3
-
-rm ${TMPDIR}/img ${TMPDIR}/img2 ${TMPDIR}/img3
-
-if rbd help export | grep -q export-format; then
- # try with --export-format for snapshots
- dd if=/bin/dd of=${TMPDIR}/img bs=1k count=10 seek=100
- rbd import $RBD_CREATE_ARGS ${TMPDIR}/img testimg
- rbd snap create testimg@snap
- rbd export --export-format 2 testimg ${TMPDIR}/img_v2
- rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
- rbd info testimg_import
- rbd info testimg_import@snap
-
- # compare the contents between testimg and testimg_import
- rbd export testimg_import ${TMPDIR}/img_import
- compare_files_and_ondisk_sizes ${TMPDIR}/img ${TMPDIR}/img_import
-
- rbd export testimg@snap ${TMPDIR}/img_snap
- rbd export testimg_import@snap ${TMPDIR}/img_snap_import
- compare_files_and_ondisk_sizes ${TMPDIR}/img_snap ${TMPDIR}/img_snap_import
-
- rm ${TMPDIR}/img_v2
- rm ${TMPDIR}/img_import
- rm ${TMPDIR}/img_snap
- rm ${TMPDIR}/img_snap_import
-
- rbd snap rm testimg_import@snap
- rbd remove testimg_import
- rbd snap rm testimg@snap
- rbd rm testimg
-
- # order
- rbd import --order 20 ${TMPDIR}/img testimg
- rbd export --export-format 2 testimg ${TMPDIR}/img_v2
- rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
- rbd info testimg_import|grep order|awk '{print $2}'|grep 20
-
- rm ${TMPDIR}/img_v2
-
- rbd remove testimg_import
- rbd remove testimg
-
- # features
- rbd import --image-feature layering ${TMPDIR}/img testimg
- FEATURES_BEFORE=`rbd info testimg|grep features`
- rbd export --export-format 2 testimg ${TMPDIR}/img_v2
- rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
- FEATURES_AFTER=`rbd info testimg_import|grep features`
- if [ "$FEATURES_BEFORE" != "$FEATURES_AFTER" ]; then
- false
- fi
-
- rm ${TMPDIR}/img_v2
-
- rbd remove testimg_import
- rbd remove testimg
-
- # stripe
- rbd import --stripe-count 1000 --stripe-unit 4096 ${TMPDIR}/img testimg
- rbd export --export-format 2 testimg ${TMPDIR}/img_v2
- rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
- rbd info testimg_import|grep "stripe unit"|awk '{print $3}'|grep 4096
- rbd info testimg_import|grep "stripe count"|awk '{print $3}'|grep 1000
-
- rm ${TMPDIR}/img_v2
-
- rbd remove testimg_import
- rbd remove testimg
-fi
-
-tiered=0
-if ceph osd dump | grep ^pool | grep "'rbd'" | grep tier; then
- tiered=1
-fi
-
-# create specifically sparse files
-# 1 1M block of sparse, 1 1M block of random
-dd if=/dev/urandom bs=1M seek=1 count=1 of=${TMPDIR}/sparse1
-
-# 1 1M block of random, 1 1M block of sparse
-dd if=/dev/urandom bs=1M count=1 of=${TMPDIR}/sparse2; truncate ${TMPDIR}/sparse2 -s 2M
-
-# 1M-block images; validate resulting blocks
-
-# 1M sparse, 1M data
-rbd rm sparse1 || true
-rbd import $RBD_CREATE_ARGS --order 20 ${TMPDIR}/sparse1
-rbd ls -l | grep sparse1 | grep -Ei '(2M|2048k)'
-[ $tiered -eq 1 -o "$(objects sparse1)" = '1' ]
-
-# export, compare contents and on-disk size
-rbd export sparse1 ${TMPDIR}/sparse1.out
-compare_files_and_ondisk_sizes ${TMPDIR}/sparse1 ${TMPDIR}/sparse1.out
-rm ${TMPDIR}/sparse1.out
-rbd rm sparse1
-
-# 1M data, 1M sparse
-rbd rm sparse2 || true
-rbd import $RBD_CREATE_ARGS --order 20 ${TMPDIR}/sparse2
-rbd ls -l | grep sparse2 | grep -Ei '(2M|2048k)'
-[ $tiered -eq 1 -o "$(objects sparse2)" = '0' ]
-rbd export sparse2 ${TMPDIR}/sparse2.out
-compare_files_and_ondisk_sizes ${TMPDIR}/sparse2 ${TMPDIR}/sparse2.out
-rm ${TMPDIR}/sparse2.out
-rbd rm sparse2
-
-# extend sparse1 to 10 1M blocks, sparse at the end
-truncate ${TMPDIR}/sparse1 -s 10M
-# import from stdin just for fun, verify still sparse
-rbd import $RBD_CREATE_ARGS --order 20 - sparse1 < ${TMPDIR}/sparse1
-rbd ls -l | grep sparse1 | grep -Ei '(10M|10240k)'
-[ $tiered -eq 1 -o "$(objects sparse1)" = '1' ]
-rbd export sparse1 ${TMPDIR}/sparse1.out
-compare_files_and_ondisk_sizes ${TMPDIR}/sparse1 ${TMPDIR}/sparse1.out
-rm ${TMPDIR}/sparse1.out
-rbd rm sparse1
-
-# extend sparse2 to 4M total with two more nonsparse megs
-dd if=/dev/urandom bs=2M count=1 of=${TMPDIR}/sparse2 oflag=append conv=notrunc
-# again from stding
-rbd import $RBD_CREATE_ARGS --order 20 - sparse2 < ${TMPDIR}/sparse2
-rbd ls -l | grep sparse2 | grep -Ei '(4M|4096k)'
-[ $tiered -eq 1 -o "$(objects sparse2)" = '0 2 3' ]
-rbd export sparse2 ${TMPDIR}/sparse2.out
-compare_files_and_ondisk_sizes ${TMPDIR}/sparse2 ${TMPDIR}/sparse2.out
-rm ${TMPDIR}/sparse2.out
-rbd rm sparse2
-
-# zeros import to a sparse image. Note: all zeros currently
-# doesn't work right now due to the way we handle 'empty' fiemaps;
-# the image ends up zero-filled.
-
-echo "partially-sparse file imports to partially-sparse image"
-rbd import $RBD_CREATE_ARGS --order 20 ${TMPDIR}/sparse1 sparse
-[ $tiered -eq 1 -o "$(objects sparse)" = '1' ]
-rbd rm sparse
-
-echo "zeros import through stdin to sparse image"
-# stdin
-dd if=/dev/zero bs=1M count=4 | rbd import $RBD_CREATE_ARGS - sparse
-[ $tiered -eq 1 -o "$(objects sparse)" = '' ]
-rbd rm sparse
-
-echo "zeros export to sparse file"
-# Must be tricky to make image "by hand" ; import won't create a zero image
-rbd create $RBD_CREATE_ARGS sparse --size 4
-prefix=$(rbd info sparse | grep block_name_prefix | awk '{print $NF;}')
-# drop in 0 object directly
-dd if=/dev/zero bs=4M count=1 | rados -p $(get_image_data_pool sparse) \
- put ${prefix}.000000000000 -
-[ $tiered -eq 1 -o "$(objects sparse)" = '0' ]
-# 1 object full of zeros; export should still create 0-disk-usage file
-rm ${TMPDIR}/sparse || true
-rbd export sparse ${TMPDIR}/sparse
-[ $(stat ${TMPDIR}/sparse --format=%b) = '0' ]
-rbd rm sparse
-
-rm ${TMPDIR}/sparse ${TMPDIR}/sparse1 ${TMPDIR}/sparse2 ${TMPDIR}/sparse3 || true
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/issue-20295.sh b/src/ceph/qa/workunits/rbd/issue-20295.sh
deleted file mode 100755
index 3d617a0..0000000
--- a/src/ceph/qa/workunits/rbd/issue-20295.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/sh -ex
-
-TEST_POOL=ecpool
-TEST_IMAGE=test1
-PGS=12
-
-ceph osd pool create $TEST_POOL $PGS $PGS erasure
-ceph osd pool application enable $TEST_POOL rbd
-ceph osd pool set $TEST_POOL allow_ec_overwrites true
-rbd --data-pool $TEST_POOL create --size 1024G $TEST_IMAGE
-rbd bench \
- --io-type write \
- --io-size 4096 \
- --io-pattern=rand \
- --io-total 100M \
- $TEST_IMAGE
-
-echo "OK"
diff --git a/src/ceph/qa/workunits/rbd/journal.sh b/src/ceph/qa/workunits/rbd/journal.sh
deleted file mode 100755
index 60b5a41..0000000
--- a/src/ceph/qa/workunits/rbd/journal.sh
+++ /dev/null
@@ -1,310 +0,0 @@
-#!/bin/bash -e
-
-. $(dirname $0)/../../standalone/ceph-helpers.sh
-
-function list_tests()
-{
- echo "AVAILABLE TESTS"
- for i in $TESTS; do
- echo " $i"
- done
-}
-
-function usage()
-{
- echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...] [--no-sanity-check] [--no-cleanup]]"
-}
-
-function expect_false()
-{
- set -x
- if "$@"; then return 1; else return 0; fi
-}
-
-function save_commit_position()
-{
- local journal=$1
-
- rados -p rbd getomapval journal.${journal} client_ \
- $TMPDIR/${journal}.client_.omap
-}
-
-function restore_commit_position()
-{
- local journal=$1
-
- rados -p rbd setomapval journal.${journal} client_ \
- < $TMPDIR/${journal}.client_.omap
-}
-
-test_rbd_journal()
-{
- local image=testrbdjournal$$
-
- rbd create --image-feature exclusive-lock --image-feature journaling \
- --size 128 ${image}
- local journal=$(rbd info ${image} --format=xml 2>/dev/null |
- $XMLSTARLET sel -t -v "//image/journal")
- test -n "${journal}"
- rbd journal info ${journal}
- rbd journal info --journal ${journal}
- rbd journal info --image ${image}
-
- rbd feature disable ${image} journaling
-
- rbd info ${image} --format=xml 2>/dev/null |
- expect_false $XMLSTARLET sel -t -v "//image/journal"
- expect_false rbd journal info ${journal}
- expect_false rbd journal info --image ${image}
-
- rbd feature enable ${image} journaling
-
- local journal1=$(rbd info ${image} --format=xml 2>/dev/null |
- $XMLSTARLET sel -t -v "//image/journal")
- test "${journal}" = "${journal1}"
-
- rbd journal info ${journal}
-
- rbd journal status ${journal}
-
- local count=10
- save_commit_position ${journal}
- rbd bench-write ${image} --io-size 4096 --io-threads 1 \
- --io-total $((4096 * count)) --io-pattern seq
- rbd journal status --image ${image} | fgrep "tid=$((count - 1))"
- restore_commit_position ${journal}
- rbd journal status --image ${image} | fgrep "positions=[]"
- local count1=$(rbd journal inspect --verbose ${journal} |
- grep -c 'event_type.*AioWrite')
- test "${count}" -eq "${count1}"
-
- rbd journal export ${journal} $TMPDIR/journal.export
- local size=$(stat -c "%s" $TMPDIR/journal.export)
- test "${size}" -gt 0
-
- rbd export ${image} $TMPDIR/${image}.export
-
- local image1=${image}1
- rbd create --image-feature exclusive-lock --image-feature journaling \
- --size 128 ${image1}
- journal1=$(rbd info ${image1} --format=xml 2>/dev/null |
- $XMLSTARLET sel -t -v "//image/journal")
-
- save_commit_position ${journal1}
- rbd journal import --dest ${image1} $TMPDIR/journal.export
- rbd snap create ${image1}@test
- restore_commit_position ${journal1}
- # check that commit position is properly updated: the journal should contain
- # 12 entries (10 AioWrite + 1 SnapCreate + 1 OpFinish) and commit
- # position set to tid=11
- rbd journal inspect --image ${image1} --verbose | awk '
- /AioWrite/ {w++} # match: "event_type": "AioWrite",
- /SnapCreate/ {s++} # match: "event_type": "SnapCreate",
- /OpFinish/ {f++} # match: "event_type": "OpFinish",
- /entries inspected/ {t=$1; e=$4} # match: 12 entries inspected, 0 errors
- {print} # for diagnostic
- END {
- if (w != 10 || s != 1 || f != 1 || t != 12 || e != 0) exit(1)
- }
- '
-
- rbd export ${image1}@test $TMPDIR/${image1}.export
- cmp $TMPDIR/${image}.export $TMPDIR/${image1}.export
-
- rbd journal reset ${journal}
-
- rbd journal inspect --verbose ${journal} | expect_false grep 'event_type'
-
- rbd snap purge ${image1}
- rbd remove ${image1}
- rbd remove ${image}
-}
-
-
-rbd_assert_eq() {
- local image=$1
- local cmd=$2
- local param=$3
- local expected_val=$4
-
- local val=$(rbd --format xml ${cmd} --image ${image} |
- $XMLSTARLET sel -t -v "${param}")
- test "${val}" = "${expected_val}"
-}
-
-test_rbd_create()
-{
- local image=testrbdcreate$$
-
- rbd create --image-feature exclusive-lock --image-feature journaling \
- --journal-pool rbd \
- --journal-object-size 20M \
- --journal-splay-width 6 \
- --size 256 ${image}
-
- rbd_assert_eq ${image} 'journal info' '//journal/order' 25
- rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
- rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
-
- rbd remove ${image}
-}
-
-test_rbd_copy()
-{
- local src=testrbdcopys$$
- rbd create --size 256 ${src}
-
- local image=testrbdcopy$$
- rbd copy --image-feature exclusive-lock --image-feature journaling \
- --journal-pool rbd \
- --journal-object-size 20M \
- --journal-splay-width 6 \
- ${src} ${image}
-
- rbd remove ${src}
-
- rbd_assert_eq ${image} 'journal info' '//journal/order' 25
- rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
- rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
-
- rbd remove ${image}
-}
-
-test_rbd_clone()
-{
- local parent=testrbdclonep$$
- rbd create --image-feature layering --size 256 ${parent}
- rbd snap create ${parent}@snap
- rbd snap protect ${parent}@snap
-
- local image=testrbdclone$$
- rbd clone --image-feature layering --image-feature exclusive-lock --image-feature journaling \
- --journal-pool rbd \
- --journal-object-size 20M \
- --journal-splay-width 6 \
- ${parent}@snap ${image}
-
- rbd_assert_eq ${image} 'journal info' '//journal/order' 25
- rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
- rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
-
- rbd remove ${image}
- rbd snap unprotect ${parent}@snap
- rbd snap purge ${parent}
- rbd remove ${parent}
-}
-
-test_rbd_import()
-{
- local src=testrbdimports$$
- rbd create --size 256 ${src}
-
- rbd export ${src} $TMPDIR/${src}.export
- rbd remove ${src}
-
- local image=testrbdimport$$
- rbd import --image-feature exclusive-lock --image-feature journaling \
- --journal-pool rbd \
- --journal-object-size 20M \
- --journal-splay-width 6 \
- $TMPDIR/${src}.export ${image}
-
- rbd_assert_eq ${image} 'journal info' '//journal/order' 25
- rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
- rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
-
- rbd remove ${image}
-}
-
-test_rbd_feature()
-{
- local image=testrbdfeature$$
-
- rbd create --image-feature exclusive-lock --size 256 ${image}
-
- rbd feature enable ${image} journaling \
- --journal-pool rbd \
- --journal-object-size 20M \
- --journal-splay-width 6
-
- rbd_assert_eq ${image} 'journal info' '//journal/order' 25
- rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
- rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
-
- rbd remove ${image}
-}
-
-TESTS+=" rbd_journal"
-TESTS+=" rbd_create"
-TESTS+=" rbd_copy"
-TESTS+=" rbd_clone"
-TESTS+=" rbd_import"
-TESTS+=" rbd_feature"
-
-#
-# "main" follows
-#
-
-tests_to_run=()
-
-sanity_check=true
-cleanup=true
-
-while [[ $# -gt 0 ]]; do
- opt=$1
-
- case "$opt" in
- "-l" )
- do_list=1
- ;;
- "--no-sanity-check" )
- sanity_check=false
- ;;
- "--no-cleanup" )
- cleanup=false
- ;;
- "-t" )
- shift
- if [[ -z "$1" ]]; then
- echo "missing argument to '-t'"
- usage ;
- exit 1
- fi
- tests_to_run+=" $1"
- ;;
- "-h" )
- usage ;
- exit 0
- ;;
- esac
- shift
-done
-
-if [[ $do_list -eq 1 ]]; then
- list_tests ;
- exit 0
-fi
-
-TMPDIR=/tmp/rbd_journal$$
-mkdir $TMPDIR
-if $cleanup; then
- trap "rm -fr $TMPDIR" 0
-fi
-
-if test -z "$tests_to_run" ; then
- tests_to_run="$TESTS"
-fi
-
-for i in $tests_to_run; do
- if $sanity_check ; then
- wait_for_clean
- fi
- set -x
- test_${i}
- set +x
-done
-if $sanity_check ; then
- wait_for_clean
-fi
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/kernel.sh b/src/ceph/qa/workunits/rbd/kernel.sh
deleted file mode 100755
index 5fb6b93..0000000
--- a/src/ceph/qa/workunits/rbd/kernel.sh
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/bin/bash -ex
-
-CEPH_SECRET_FILE=${CEPH_SECRET_FILE:-}
-CEPH_ID=${CEPH_ID:-admin}
-SECRET_ARGS=''
-if [ ! -z $CEPH_SECRET_FILE ]; then
- SECRET_ARGS="--secret $CEPH_SECRET_FILE"
-fi
-
-TMP_FILES="/tmp/img1 /tmp/img1.small /tmp/img1.snap1 /tmp/img1.export /tmp/img1.trunc"
-
-function get_device_dir {
- local POOL=$1
- local IMAGE=$2
- local SNAP=$3
- rbd showmapped | tail -n +2 | egrep "\s+$POOL\s+$IMAGE\s+$SNAP\s+" | awk '{print $1;}'
-}
-
-function clean_up {
- [ -e /dev/rbd/rbd/testimg1@snap1 ] &&
- sudo rbd unmap /dev/rbd/rbd/testimg1@snap1
- if [ -e /dev/rbd/rbd/testimg1 ]; then
- sudo rbd unmap /dev/rbd/rbd/testimg1
- rbd snap purge testimg1 || true
- fi
- rbd ls | grep testimg1 > /dev/null && rbd rm testimg1 || true
- sudo rm -f $TMP_FILES
-}
-
-clean_up
-
-trap clean_up INT TERM EXIT
-
-# create an image
-dd if=/bin/sh of=/tmp/img1 bs=1k count=1 seek=10
-dd if=/bin/dd of=/tmp/img1 bs=1k count=10 seek=100
-dd if=/bin/rm of=/tmp/img1 bs=1k count=100 seek=1000
-dd if=/bin/ls of=/tmp/img1 bs=1k seek=10000
-dd if=/bin/ln of=/tmp/img1 bs=1k seek=100000
-dd if=/dev/zero of=/tmp/img1 count=0 seek=150000
-
-# import
-rbd import /tmp/img1 testimg1
-sudo rbd map testimg1 --user $CEPH_ID $SECRET_ARGS
-
-DEV_ID1=$(get_device_dir rbd testimg1 -)
-echo "dev_id1 = $DEV_ID1"
-cat /sys/bus/rbd/devices/$DEV_ID1/size
-cat /sys/bus/rbd/devices/$DEV_ID1/size | grep 76800000
-
-sudo dd if=/dev/rbd/rbd/testimg1 of=/tmp/img1.export
-cmp /tmp/img1 /tmp/img1.export
-
-# snapshot
-rbd snap create testimg1 --snap=snap1
-sudo rbd map --snap=snap1 testimg1 --user $CEPH_ID $SECRET_ARGS
-
-DEV_ID2=$(get_device_dir rbd testimg1 snap1)
-cat /sys/bus/rbd/devices/$DEV_ID2/size | grep 76800000
-
-sudo dd if=/dev/rbd/rbd/testimg1@snap1 of=/tmp/img1.snap1
-cmp /tmp/img1 /tmp/img1.snap1
-
-# resize
-rbd resize testimg1 --size=40 --allow-shrink
-cat /sys/bus/rbd/devices/$DEV_ID1/size | grep 41943040
-cat /sys/bus/rbd/devices/$DEV_ID2/size | grep 76800000
-
-sudo dd if=/dev/rbd/rbd/testimg1 of=/tmp/img1.small
-cp /tmp/img1 /tmp/img1.trunc
-truncate -s 41943040 /tmp/img1.trunc
-cmp /tmp/img1.trunc /tmp/img1.small
-
-# rollback and check data again
-rbd snap rollback --snap=snap1 testimg1
-cat /sys/bus/rbd/devices/$DEV_ID1/size | grep 76800000
-cat /sys/bus/rbd/devices/$DEV_ID2/size | grep 76800000
-sudo rm -f /tmp/img1.snap1 /tmp/img1.export
-
-sudo dd if=/dev/rbd/rbd/testimg1@snap1 of=/tmp/img1.snap1
-cmp /tmp/img1 /tmp/img1.snap1
-sudo dd if=/dev/rbd/rbd/testimg1 of=/tmp/img1.export
-cmp /tmp/img1 /tmp/img1.export
-
-# remove snapshot and detect error from mapped snapshot
-rbd snap rm --snap=snap1 testimg1
-sudo dd if=/dev/rbd/rbd/testimg1@snap1 of=/tmp/img1.snap1 2>&1 | grep 'Input/output error'
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/krbd_data_pool.sh b/src/ceph/qa/workunits/rbd/krbd_data_pool.sh
deleted file mode 100755
index 7d72882..0000000
--- a/src/ceph/qa/workunits/rbd/krbd_data_pool.sh
+++ /dev/null
@@ -1,203 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-function fill_image() {
- local spec=$1
-
- local dev
- dev=$(sudo rbd map $spec)
- xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 -W 0 $IMAGE_SIZE" $dev
- sudo rbd unmap $dev
-}
-
-function create_clones() {
- local spec=$1
-
- rbd snap create $spec@snap
- rbd snap protect $spec@snap
-
- local pool=${spec%/*} # pool/image is assumed
- local image=${spec#*/}
- local child_pool
- for child_pool in $pool clonesonly; do
- rbd clone $spec@snap $child_pool/$pool-$image-clone1
- rbd clone $spec@snap --data-pool repdata $child_pool/$pool-$image-clone2
- rbd clone $spec@snap --data-pool ecdata $child_pool/$pool-$image-clone3
- done
-}
-
-function trigger_copyup() {
- local spec=$1
-
- local dev
- dev=$(sudo rbd map $spec)
- local i
- {
- for ((i = 0; i < $NUM_OBJECTS; i++)); do
- echo pwrite -b $OBJECT_SIZE -S 0x59 $((i * OBJECT_SIZE + OBJECT_SIZE / 2)) $((OBJECT_SIZE / 2))
- done
- echo fsync
- echo quit
- } | xfs_io $dev
- sudo rbd unmap $dev
-}
-
-function compare() {
- local spec=$1
- local object=$2
-
- local dev
- dev=$(sudo rbd map $spec)
- local i
- for ((i = 0; i < $NUM_OBJECTS; i++)); do
- dd if=$dev bs=$OBJECT_SIZE count=1 skip=$i | cmp $object -
- done
- sudo rbd unmap $dev
-}
-
-function mkfs_and_mount() {
- local spec=$1
-
- local dev
- dev=$(sudo rbd map $spec)
- mkfs.ext4 -q -E discard $dev
- sudo mount $dev /mnt
- sudo umount /mnt
- sudo rbd unmap $dev
-}
-
-function list_HEADs() {
- local pool=$1
-
- rados -p $pool ls | while read obj; do
- if rados -p $pool stat $obj >/dev/null 2>&1; then
- echo $obj
- fi
- done
-}
-
-function count_data_objects() {
- local spec=$1
-
- local pool
- pool=$(rbd info $spec | grep 'data_pool: ' | awk '{ print $NF }')
- if [[ -z $pool ]]; then
- pool=${spec%/*} # pool/image is assumed
- fi
-
- local prefix
- prefix=$(rbd info $spec | grep 'block_name_prefix: ' | awk '{ print $NF }')
- rados -p $pool ls | grep -c $prefix
-}
-
-function get_num_clones() {
- local pool=$1
-
- rados -p $pool --format=json df |
- python -c 'import sys, json; print json.load(sys.stdin)["pools"][0]["num_object_clones"]'
-}
-
-ceph osd pool create repdata 24 24
-rbd pool init repdata
-ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
-ceph osd pool create ecdata 24 24 erasure teuthologyprofile
-rbd pool init ecdata
-ceph osd pool set ecdata allow_ec_overwrites true
-ceph osd pool create rbdnonzero 24 24
-rbd pool init rbdnonzero
-ceph osd pool create clonesonly 24 24
-rbd pool init clonesonly
-
-for pool in rbd rbdnonzero; do
- rbd create --size 200 --image-format 1 $pool/img0
- rbd create --size 200 $pool/img1
- rbd create --size 200 --data-pool repdata $pool/img2
- rbd create --size 200 --data-pool ecdata $pool/img3
-done
-
-IMAGE_SIZE=$(rbd info --format=json img1 | python -c 'import sys, json; print json.load(sys.stdin)["size"]')
-OBJECT_SIZE=$(rbd info --format=json img1 | python -c 'import sys, json; print json.load(sys.stdin)["object_size"]')
-NUM_OBJECTS=$((IMAGE_SIZE / OBJECT_SIZE))
-[[ $((IMAGE_SIZE % OBJECT_SIZE)) -eq 0 ]]
-
-OBJECT_X=$(mktemp) # xxxx
-xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 0 $OBJECT_SIZE" $OBJECT_X
-
-OBJECT_XY=$(mktemp) # xxYY
-xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 0 $((OBJECT_SIZE / 2))" \
- -c "pwrite -b $OBJECT_SIZE -S 0x59 $((OBJECT_SIZE / 2)) $((OBJECT_SIZE / 2))" \
- $OBJECT_XY
-
-for pool in rbd rbdnonzero; do
- for i in {0..3}; do
- fill_image $pool/img$i
- if [[ $i -ne 0 ]]; then
- create_clones $pool/img$i
- for child_pool in $pool clonesonly; do
- for j in {1..3}; do
- trigger_copyup $child_pool/$pool-img$i-clone$j
- done
- done
- fi
- done
-done
-
-# rbd_directory, rbd_children, rbd_info + img0 header + ...
-NUM_META_RBDS=$((3 + 1 + 3 * (1*2 + 3*2)))
-# rbd_directory, rbd_children, rbd_info + ...
-NUM_META_CLONESONLY=$((3 + 2 * 3 * (3*2)))
-
-[[ $(rados -p rbd ls | wc -l) -eq $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
-[[ $(rados -p repdata ls | wc -l) -eq $((1 + 14 * NUM_OBJECTS)) ]]
-[[ $(rados -p ecdata ls | wc -l) -eq $((1 + 14 * NUM_OBJECTS)) ]]
-[[ $(rados -p rbdnonzero ls | wc -l) -eq $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
-[[ $(rados -p clonesonly ls | wc -l) -eq $((NUM_META_CLONESONLY + 6 * NUM_OBJECTS)) ]]
-
-for pool in rbd rbdnonzero; do
- for i in {0..3}; do
- [[ $(count_data_objects $pool/img$i) -eq $NUM_OBJECTS ]]
- if [[ $i -ne 0 ]]; then
- for child_pool in $pool clonesonly; do
- for j in {1..3}; do
- [[ $(count_data_objects $child_pool/$pool-img$i-clone$j) -eq $NUM_OBJECTS ]]
- done
- done
- fi
- done
-done
-
-[[ $(get_num_clones rbd) -eq 0 ]]
-[[ $(get_num_clones repdata) -eq 0 ]]
-[[ $(get_num_clones ecdata) -eq 0 ]]
-[[ $(get_num_clones rbdnonzero) -eq 0 ]]
-[[ $(get_num_clones clonesonly) -eq 0 ]]
-
-for pool in rbd rbdnonzero; do
- for i in {0..3}; do
- compare $pool/img$i $OBJECT_X
- mkfs_and_mount $pool/img$i
- if [[ $i -ne 0 ]]; then
- for child_pool in $pool clonesonly; do
- for j in {1..3}; do
- compare $child_pool/$pool-img$i-clone$j $OBJECT_XY
- done
- done
- fi
- done
-done
-
-# mkfs should discard some objects everywhere but in clonesonly
-[[ $(list_HEADs rbd | wc -l) -lt $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
-[[ $(list_HEADs repdata | wc -l) -lt $((1 + 14 * NUM_OBJECTS)) ]]
-[[ $(list_HEADs ecdata | wc -l) -lt $((1 + 14 * NUM_OBJECTS)) ]]
-[[ $(list_HEADs rbdnonzero | wc -l) -lt $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
-[[ $(list_HEADs clonesonly | wc -l) -eq $((NUM_META_CLONESONLY + 6 * NUM_OBJECTS)) ]]
-
-[[ $(get_num_clones rbd) -eq $NUM_OBJECTS ]]
-[[ $(get_num_clones repdata) -eq $((2 * NUM_OBJECTS)) ]]
-[[ $(get_num_clones ecdata) -eq $((2 * NUM_OBJECTS)) ]]
-[[ $(get_num_clones rbdnonzero) -eq $NUM_OBJECTS ]]
-[[ $(get_num_clones clonesonly) -eq 0 ]]
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/krbd_exclusive_option.sh b/src/ceph/qa/workunits/rbd/krbd_exclusive_option.sh
deleted file mode 100755
index 958aecf..0000000
--- a/src/ceph/qa/workunits/rbd/krbd_exclusive_option.sh
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-function expect_false() {
- if "$@"; then return 1; else return 0; fi
-}
-
-function assert_locked() {
- local dev_id="${1#/dev/rbd}"
-
- local client_addr
- client_addr="$(< $SYSFS_DIR/$dev_id/client_addr)"
-
- local client_id
- client_id="$(< $SYSFS_DIR/$dev_id/client_id)"
- # client4324 -> client.4324
- client_id="client.${client_id#client}"
-
- local watch_cookie
- watch_cookie="$(rados -p rbd listwatchers rbd_header.$IMAGE_ID |
- grep $client_id | cut -d ' ' -f 3 | cut -d '=' -f 2)"
- [[ $(echo -n "$watch_cookie" | grep -c '^') -eq 1 ]]
-
- local actual
- actual="$(rados -p rbd --format=json lock info rbd_header.$IMAGE_ID rbd_lock |
- python -m json.tool)"
-
- local expected
- expected="$(cat <<EOF | python -m json.tool
-{
- "lockers": [
- {
- "addr": "$client_addr",
- "cookie": "auto $watch_cookie",
- "description": "",
- "expiration": "0.000000",
- "name": "$client_id"
- }
- ],
- "name": "rbd_lock",
- "tag": "internal",
- "type": "exclusive"
-}
-EOF
- )"
-
- [ "$actual" = "$expected" ]
-}
-
-function assert_unlocked() {
- rados -p rbd --format=json lock info rbd_header.$IMAGE_ID rbd_lock |
- grep '"lockers":\[\]'
-}
-
-SYSFS_DIR="/sys/bus/rbd/devices"
-IMAGE_NAME="exclusive-option-test"
-
-rbd create --size 1 --image-feature '' $IMAGE_NAME
-
-IMAGE_ID="$(rbd info --format=json $IMAGE_NAME |
- python -c "import sys, json; print json.load(sys.stdin)['block_name_prefix'].split('.')[1]")"
-
-DEV=$(sudo rbd map $IMAGE_NAME)
-assert_unlocked
-sudo rbd unmap $DEV
-assert_unlocked
-
-expect_false sudo rbd map -o exclusive $IMAGE_NAME
-assert_unlocked
-
-rbd feature enable $IMAGE_NAME exclusive-lock
-rbd snap create $IMAGE_NAME@snap
-
-DEV=$(sudo rbd map $IMAGE_NAME)
-assert_unlocked
-sudo rbd unmap $DEV
-assert_unlocked
-
-DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
-assert_locked $DEV
-[[ $(blockdev --getro $DEV) -eq 0 ]]
-sudo rbd unmap $DEV
-assert_unlocked
-
-DEV=$(sudo rbd map -o exclusive $IMAGE_NAME@snap)
-assert_locked $DEV
-[[ $(blockdev --getro $DEV) -eq 1 ]]
-sudo rbd unmap $DEV
-assert_unlocked
-
-DEV=$(sudo rbd map -o exclusive,ro $IMAGE_NAME)
-assert_locked $DEV
-[[ $(blockdev --getro $DEV) -eq 1 ]]
-sudo rbd unmap $DEV
-assert_unlocked
-
-# alternate syntax
-DEV=$(sudo rbd map --exclusive --read-only $IMAGE_NAME)
-assert_locked $DEV
-[[ $(blockdev --getro $DEV) -eq 1 ]]
-sudo rbd unmap $DEV
-assert_unlocked
-
-DEV=$(sudo rbd map $IMAGE_NAME)
-assert_unlocked
-dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
-assert_locked $DEV
-OTHER_DEV=$(sudo rbd map -o noshare,exclusive $IMAGE_NAME)
-assert_locked $OTHER_DEV
-sudo rbd unmap $DEV
-sudo rbd unmap $OTHER_DEV
-assert_unlocked
-
-DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
-assert_locked $DEV
-expect_false sudo rbd map -o noshare,exclusive $IMAGE_NAME
-assert_locked $DEV
-sudo rbd unmap $DEV
-assert_unlocked
-
-DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
-assert_locked $DEV
-OTHER_DEV=$(sudo rbd map -o noshare $IMAGE_NAME)
-dd if=/dev/urandom of=$OTHER_DEV bs=4k count=10 oflag=direct &
-PID=$!
-sleep 20
-assert_locked $DEV
-[ "$(ps -o stat= $PID)" = "D" ]
-sudo rbd unmap $DEV
-wait $PID
-assert_locked $OTHER_DEV
-sudo rbd unmap $OTHER_DEV
-assert_unlocked
-
-DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
-assert_locked $DEV
-sudo rbd map -o noshare,lock_on_read $IMAGE_NAME &
-SUDO_PID=$!
-sleep 20
-assert_locked $DEV
-PID="$(ps -o pid= --ppid $SUDO_PID)"
-[ "$(ps -o stat= $PID)" = "Dl" ]
-sudo rbd unmap $DEV
-wait $SUDO_PID
-assert_locked $OTHER_DEV
-sudo rbd unmap $OTHER_DEV
-assert_unlocked
-
-# induce a watch error after 30 seconds
-DEV=$(sudo rbd map -o exclusive,osdkeepalive=60 $IMAGE_NAME)
-assert_locked $DEV
-OLD_WATCHER="$(rados -p rbd listwatchers rbd_header.$IMAGE_ID)"
-sleep 40
-assert_locked $DEV
-NEW_WATCHER="$(rados -p rbd listwatchers rbd_header.$IMAGE_ID)"
-# same client_id, old cookie < new cookie
-[ "$(echo "$OLD_WATCHER" | cut -d ' ' -f 2)" = \
- "$(echo "$NEW_WATCHER" | cut -d ' ' -f 2)" ]
-[[ $(echo "$OLD_WATCHER" | cut -d ' ' -f 3 | cut -d '=' -f 2) -lt \
- $(echo "$NEW_WATCHER" | cut -d ' ' -f 3 | cut -d '=' -f 2) ]]
-sudo rbd unmap $DEV
-assert_unlocked
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/krbd_fallocate.sh b/src/ceph/qa/workunits/rbd/krbd_fallocate.sh
deleted file mode 100755
index 05fc8a9..0000000
--- a/src/ceph/qa/workunits/rbd/krbd_fallocate.sh
+++ /dev/null
@@ -1,124 +0,0 @@
-#!/bin/bash
-
-# This documents the state of things as of 4.12-rc4.
-#
-# - fallocate -z deallocates because BLKDEV_ZERO_NOUNMAP hint is ignored by
-# krbd
-#
-# - unaligned fallocate -z/-p appear to not deallocate -- see caveat #2 in
-# linux.git commit 6ac56951dc10 ("rbd: implement REQ_OP_WRITE_ZEROES")
-
-set -ex
-
-# no blkdiscard(8) in trusty
-function py_blkdiscard() {
- local offset=$1
-
- python <<EOF
-import fcntl, struct
-BLKDISCARD = 0x1277
-with open('$DEV', 'w') as dev:
- fcntl.ioctl(dev, BLKDISCARD, struct.pack('QQ', $offset, $IMAGE_SIZE - $offset))
-EOF
-}
-
-# fallocate(1) in trusty doesn't support -z/-p
-function py_fallocate() {
- local mode=$1
- local offset=$2
-
- python <<EOF
-import os, ctypes, ctypes.util
-FALLOC_FL_KEEP_SIZE = 0x01
-FALLOC_FL_PUNCH_HOLE = 0x02
-FALLOC_FL_ZERO_RANGE = 0x10
-libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
-with open('$DEV', 'w') as dev:
- if libc.fallocate(dev.fileno(), ctypes.c_int($mode), ctypes.c_long($offset), ctypes.c_long($IMAGE_SIZE - $offset)):
- err = ctypes.get_errno()
- raise OSError(err, os.strerror(err))
-EOF
-}
-
-function allocate() {
- xfs_io -c "pwrite -b $OBJECT_SIZE -W 0 $IMAGE_SIZE" $DEV
- cmp <(od -xAx $DEV) - <<EOF
-000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd
-*
-$(printf %x $IMAGE_SIZE)
-EOF
- [[ $(rados -p rbd ls | grep -c rbd_data.$IMAGE_ID) -eq $NUM_OBJECTS ]]
-}
-
-function assert_deallocated() {
- cmp <(od -xAx $DEV) - <<EOF
-000000 0000 0000 0000 0000 0000 0000 0000 0000
-*
-$(printf %x $IMAGE_SIZE)
-EOF
- [[ $(rados -p rbd ls | grep -c rbd_data.$IMAGE_ID) -eq 0 ]]
-}
-
-function assert_deallocated_unaligned() {
- local num_objects_expected=$1
-
- cmp <(od -xAx $DEV) - <<EOF
-000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd
-*
-$(printf %x $((OBJECT_SIZE / 2))) 0000 0000 0000 0000 0000 0000 0000 0000
-*
-$(printf %x $IMAGE_SIZE)
-EOF
- [[ $(rados -p rbd ls | grep -c rbd_data.$IMAGE_ID) -eq $num_objects_expected ]]
- for ((i = 0; i < $num_objects_expected; i++)); do
- rados -p rbd stat rbd_data.$IMAGE_ID.$(printf %016x $i) | grep "size $((OBJECT_SIZE / 2))"
- done
-}
-
-IMAGE_NAME="fallocate-test"
-
-rbd create --size 200 $IMAGE_NAME
-
-IMAGE_SIZE=$(rbd info --format=json $IMAGE_NAME | python -c 'import sys, json; print json.load(sys.stdin)["size"]')
-OBJECT_SIZE=$(rbd info --format=json $IMAGE_NAME | python -c 'import sys, json; print json.load(sys.stdin)["object_size"]')
-NUM_OBJECTS=$((IMAGE_SIZE / OBJECT_SIZE))
-[[ $((IMAGE_SIZE % OBJECT_SIZE)) -eq 0 ]]
-
-IMAGE_ID="$(rbd info --format=json $IMAGE_NAME |
- python -c "import sys, json; print json.load(sys.stdin)['block_name_prefix'].split('.')[1]")"
-
-DEV=$(sudo rbd map $IMAGE_NAME)
-
-# blkdev_issue_discard
-allocate
-py_blkdiscard 0
-assert_deallocated
-
-# blkdev_issue_zeroout w/ BLKDEV_ZERO_NOUNMAP
-allocate
-py_fallocate FALLOC_FL_ZERO_RANGE\|FALLOC_FL_KEEP_SIZE 0
-assert_deallocated
-
-# blkdev_issue_zeroout w/ BLKDEV_ZERO_NOFALLBACK
-allocate
-py_fallocate FALLOC_FL_PUNCH_HOLE\|FALLOC_FL_KEEP_SIZE 0
-assert_deallocated
-
-# unaligned blkdev_issue_discard
-allocate
-py_blkdiscard $((OBJECT_SIZE / 2))
-assert_deallocated_unaligned 1
-
-# unaligned blkdev_issue_zeroout w/ BLKDEV_ZERO_NOUNMAP
-allocate
-py_fallocate FALLOC_FL_ZERO_RANGE\|FALLOC_FL_KEEP_SIZE $((OBJECT_SIZE / 2))
-assert_deallocated_unaligned $NUM_OBJECTS
-
-# unaligned blkdev_issue_zeroout w/ BLKDEV_ZERO_NOFALLBACK
-allocate
-py_fallocate FALLOC_FL_PUNCH_HOLE\|FALLOC_FL_KEEP_SIZE $((OBJECT_SIZE / 2))
-assert_deallocated_unaligned $NUM_OBJECTS
-
-sudo rbd unmap $DEV
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/krbd_stable_pages_required.sh b/src/ceph/qa/workunits/rbd/krbd_stable_pages_required.sh
deleted file mode 100755
index a7c44c8..0000000
--- a/src/ceph/qa/workunits/rbd/krbd_stable_pages_required.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-IMAGE_NAME="stable-pages-required-test"
-
-rbd create --size 1 $IMAGE_NAME
-DEV=$(sudo rbd map $IMAGE_NAME)
-[[ $(blockdev --getsize64 $DEV) -eq 1048576 ]]
-grep -q 1 /sys/block/${DEV#/dev/}/bdi/stable_pages_required
-
-rbd resize --size 2 $IMAGE_NAME
-[[ $(blockdev --getsize64 $DEV) -eq 2097152 ]]
-grep -q 1 /sys/block/${DEV#/dev/}/bdi/stable_pages_required
-sudo rbd unmap $DEV
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/map-snapshot-io.sh b/src/ceph/qa/workunits/rbd/map-snapshot-io.sh
deleted file mode 100755
index a69d848..0000000
--- a/src/ceph/qa/workunits/rbd/map-snapshot-io.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh
-
-# http://tracker.ceph.com/issues/3964
-
-set -ex
-
-rbd create image -s 100
-DEV=$(sudo rbd map image)
-dd if=/dev/zero of=$DEV oflag=direct count=10
-rbd snap create image@s1
-dd if=/dev/zero of=$DEV oflag=direct count=10 # used to fail
-rbd snap rm image@s1
-dd if=/dev/zero of=$DEV oflag=direct count=10
-sudo rbd unmap $DEV
-rbd rm image
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/map-unmap.sh b/src/ceph/qa/workunits/rbd/map-unmap.sh
deleted file mode 100755
index ce7d20f..0000000
--- a/src/ceph/qa/workunits/rbd/map-unmap.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/bash -ex
-
-RUN_TIME=300 # approximate duration of run (seconds)
-
-[ $# -eq 1 ] && RUN_TIME="$1"
-
-IMAGE_NAME="image-$$"
-IMAGE_SIZE="1024" # MB
-
-function get_time() {
- date '+%s'
-}
-
-function times_up() {
- local end_time="$1"
-
- test $(get_time) -ge "${end_time}"
-}
-
-function map_unmap() {
- [ $# -eq 1 ] || exit 99
- local image_name="$1"
-
- local dev
- dev="$(sudo rbd map "${image_name}")"
- sudo rbd unmap "${dev}"
-}
-
-#### Start
-
-rbd create "${IMAGE_NAME}" --size="${IMAGE_SIZE}"
-
-COUNT=0
-START_TIME=$(get_time)
-END_TIME=$(expr $(get_time) + ${RUN_TIME})
-while ! times_up "${END_TIME}"; do
- map_unmap "${IMAGE_NAME}"
- COUNT=$(expr $COUNT + 1)
-done
-ELAPSED=$(expr "$(get_time)" - "${START_TIME}")
-
-rbd rm "${IMAGE_NAME}"
-
-echo "${COUNT} iterations completed in ${ELAPSED} seconds"
diff --git a/src/ceph/qa/workunits/rbd/merge_diff.sh b/src/ceph/qa/workunits/rbd/merge_diff.sh
deleted file mode 100755
index 0b6643d..0000000
--- a/src/ceph/qa/workunits/rbd/merge_diff.sh
+++ /dev/null
@@ -1,474 +0,0 @@
-#!/bin/bash -ex
-
-pool=rbd
-gen=$pool/gen
-out=$pool/out
-testno=1
-
-mkdir -p merge_diff_test
-pushd merge_diff_test
-
-function expect_false()
-{
- if "$@"; then return 1; else return 0; fi
-}
-
-function clear_all()
-{
- fusermount -u mnt || true
-
- rbd snap purge --no-progress $gen || true
- rbd rm --no-progress $gen || true
- rbd snap purge --no-progress $out || true
- rbd rm --no-progress $out || true
-
- rm -rf diffs || true
-}
-
-function rebuild()
-{
- clear_all
- echo Starting test $testno
- ((testno++))
- if [[ "$2" -lt "$1" ]] && [[ "$3" -gt "1" ]]; then
- rbd create $gen --size 100 --object-size $1 --stripe-unit $2 --stripe-count $3 --image-format $4
- else
- rbd create $gen --size 100 --object-size $1 --image-format $4
- fi
- rbd create $out --size 1 --object-size 524288
- mkdir -p mnt diffs
- # lttng has atexit handlers that need to be fork/clone aware
- LD_PRELOAD=liblttng-ust-fork.so.0 rbd-fuse -p $pool mnt
-}
-
-function write()
-{
- dd if=/dev/urandom of=mnt/gen bs=1M conv=notrunc seek=$1 count=$2
-}
-
-function snap()
-{
- rbd snap create $gen@$1
-}
-
-function resize()
-{
- rbd resize --no-progress $gen --size $1 --allow-shrink
-}
-
-function export_diff()
-{
- if [ $2 == "head" ]; then
- target="$gen"
- else
- target="$gen@$2"
- fi
- if [ $1 == "null" ]; then
- rbd export-diff --no-progress $target diffs/$1.$2
- else
- rbd export-diff --no-progress $target --from-snap $1 diffs/$1.$2
- fi
-}
-
-function merge_diff()
-{
- rbd merge-diff diffs/$1.$2 diffs/$2.$3 diffs/$1.$3
-}
-
-function check()
-{
- rbd import-diff --no-progress diffs/$1.$2 $out || return -1
- if [ "$2" == "head" ]; then
- sum1=`rbd export $gen - | md5sum`
- else
- sum1=`rbd export $gen@$2 - | md5sum`
- fi
- sum2=`rbd export $out - | md5sum`
- if [ "$sum1" != "$sum2" ]; then
- exit -1
- fi
- if [ "$2" != "head" ]; then
- rbd snap ls $out | awk '{print $2}' | grep "^$2\$" || return -1
- fi
-}
-
-#test f/t header
-rebuild 4194304 4194304 1 2
-write 0 1
-snap a
-write 1 1
-export_diff null a
-export_diff a head
-merge_diff null a head
-check null head
-
-rebuild 4194304 4194304 1 2
-write 0 1
-snap a
-write 1 1
-snap b
-write 2 1
-export_diff null a
-export_diff a b
-export_diff b head
-merge_diff null a b
-check null b
-
-rebuild 4194304 4194304 1 2
-write 0 1
-snap a
-write 1 1
-snap b
-write 2 1
-export_diff null a
-export_diff a b
-export_diff b head
-merge_diff a b head
-check null a
-check a head
-
-rebuild 4194304 4194304 1 2
-write 0 1
-snap a
-write 1 1
-snap b
-write 2 1
-export_diff null a
-export_diff a b
-export_diff b head
-rbd merge-diff diffs/null.a diffs/a.b - | rbd merge-diff - diffs/b.head - > diffs/null.head
-check null head
-
-#data test
-rebuild 4194304 4194304 1 2
-write 4 2
-snap s101
-write 0 3
-write 8 2
-snap s102
-export_diff null s101
-export_diff s101 s102
-merge_diff null s101 s102
-check null s102
-
-rebuild 4194304 4194304 1 2
-write 0 3
-write 2 5
-write 8 2
-snap s201
-write 0 2
-write 6 3
-snap s202
-export_diff null s201
-export_diff s201 s202
-merge_diff null s201 s202
-check null s202
-
-rebuild 4194304 4194304 1 2
-write 0 4
-write 12 6
-snap s301
-write 0 6
-write 10 5
-write 16 4
-snap s302
-export_diff null s301
-export_diff s301 s302
-merge_diff null s301 s302
-check null s302
-
-rebuild 4194304 4194304 1 2
-write 0 12
-write 14 2
-write 18 2
-snap s401
-write 1 2
-write 5 6
-write 13 3
-write 18 2
-snap s402
-export_diff null s401
-export_diff s401 s402
-merge_diff null s401 s402
-check null s402
-
-rebuild 4194304 4194304 1 2
-write 2 4
-write 10 12
-write 27 6
-write 36 4
-snap s501
-write 0 24
-write 28 4
-write 36 4
-snap s502
-export_diff null s501
-export_diff s501 s502
-merge_diff null s501 s502
-check null s502
-
-rebuild 4194304 4194304 1 2
-write 0 8
-resize 5
-snap r1
-resize 20
-write 12 8
-snap r2
-resize 8
-write 4 4
-snap r3
-export_diff null r1
-export_diff r1 r2
-export_diff r2 r3
-merge_diff null r1 r2
-merge_diff null r2 r3
-check null r3
-
-rebuild 4194304 4194304 1 2
-write 0 8
-resize 5
-snap r1
-resize 20
-write 12 8
-snap r2
-resize 8
-write 4 4
-snap r3
-resize 10
-snap r4
-export_diff null r1
-export_diff r1 r2
-export_diff r2 r3
-export_diff r3 r4
-merge_diff null r1 r2
-merge_diff null r2 r3
-merge_diff null r3 r4
-check null r4
-
-# merge diff doesn't yet support fancy striping
-# rebuild 4194304 65536 8 2
-# write 0 32
-# snap r1
-# write 16 32
-# snap r2
-# export_diff null r1
-# export_diff r1 r2
-# expect_false merge_diff null r1 r2
-
-rebuild 4194304 4194304 1 2
-write 0 1
-write 2 1
-write 4 1
-write 6 1
-snap s1
-write 1 1
-write 3 1
-write 5 1
-snap s2
-export_diff null s1
-export_diff s1 s2
-merge_diff null s1 s2
-check null s2
-
-rebuild 4194304 4194304 1 2
-write 1 1
-write 3 1
-write 5 1
-snap s1
-write 0 1
-write 2 1
-write 4 1
-write 6 1
-snap s2
-export_diff null s1
-export_diff s1 s2
-merge_diff null s1 s2
-check null s2
-
-rebuild 4194304 4194304 1 2
-write 0 3
-write 6 3
-write 12 3
-snap s1
-write 1 1
-write 7 1
-write 13 1
-snap s2
-export_diff null s1
-export_diff s1 s2
-merge_diff null s1 s2
-check null s2
-
-rebuild 4194304 4194304 1 2
-write 0 3
-write 6 3
-write 12 3
-snap s1
-write 0 1
-write 6 1
-write 12 1
-snap s2
-export_diff null s1
-export_diff s1 s2
-merge_diff null s1 s2
-check null s2
-
-rebuild 4194304 4194304 1 2
-write 0 3
-write 6 3
-write 12 3
-snap s1
-write 2 1
-write 8 1
-write 14 1
-snap s2
-export_diff null s1
-export_diff s1 s2
-merge_diff null s1 s2
-check null s2
-
-rebuild 4194304 4194304 1 2
-write 1 1
-write 7 1
-write 13 1
-snap s1
-write 0 3
-write 6 3
-write 12 3
-snap s2
-export_diff null s1
-export_diff s1 s2
-merge_diff null s1 s2
-check null s2
-
-rebuild 4194304 4194304 1 2
-write 0 1
-write 6 1
-write 12 1
-snap s1
-write 0 3
-write 6 3
-write 12 3
-snap s2
-export_diff null s1
-export_diff s1 s2
-merge_diff null s1 s2
-check null s2
-
-rebuild 4194304 4194304 1 2
-write 2 1
-write 8 1
-write 14 1
-snap s1
-write 0 3
-write 6 3
-write 12 3
-snap s2
-export_diff null s1
-export_diff s1 s2
-merge_diff null s1 s2
-check null s2
-
-rebuild 4194304 4194304 1 2
-write 0 3
-write 6 3
-write 12 3
-snap s1
-write 0 3
-write 6 3
-write 12 3
-snap s2
-export_diff null s1
-export_diff s1 s2
-merge_diff null s1 s2
-check null s2
-
-rebuild 4194304 4194304 1 2
-write 2 4
-write 8 4
-write 14 4
-snap s1
-write 0 3
-write 6 3
-write 12 3
-snap s2
-export_diff null s1
-export_diff s1 s2
-merge_diff null s1 s2
-check null s2
-
-rebuild 4194304 4194304 1 2
-write 0 4
-write 6 4
-write 12 4
-snap s1
-write 0 3
-write 6 3
-write 12 3
-snap s2
-export_diff null s1
-export_diff s1 s2
-merge_diff null s1 s2
-check null s2
-
-rebuild 4194304 4194304 1 2
-write 0 6
-write 6 6
-write 12 6
-snap s1
-write 0 3
-write 6 3
-write 12 3
-snap s2
-export_diff null s1
-export_diff s1 s2
-merge_diff null s1 s2
-check null s2
-
-rebuild 4194304 4194304 1 2
-write 3 6
-write 9 6
-write 15 6
-snap s1
-write 0 3
-write 6 3
-write 12 3
-snap s2
-export_diff null s1
-export_diff s1 s2
-merge_diff null s1 s2
-check null s2
-
-rebuild 4194304 4194304 1 2
-write 0 8
-snap s1
-resize 2
-resize 100
-snap s2
-export_diff null s1
-export_diff s1 s2
-merge_diff null s1 s2
-check null s2
-
-rebuild 4194304 4194304 1 2
-write 0 8
-snap s1
-resize 2
-resize 100
-snap s2
-write 20 2
-snap s3
-export_diff null s1
-export_diff s1 s2
-export_diff s2 s3
-merge_diff s1 s2 s3
-check null s1
-check s1 s3
-
-#addme
-
-clear_all
-popd
-rm -rf merge_diff_test
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/notify_master.sh b/src/ceph/qa/workunits/rbd/notify_master.sh
deleted file mode 100755
index 6ebea31..0000000
--- a/src/ceph/qa/workunits/rbd/notify_master.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh -ex
-
-relpath=$(dirname $0)/../../../src/test/librbd
-python $relpath/test_notify.py master
-exit 0
diff --git a/src/ceph/qa/workunits/rbd/notify_slave.sh b/src/ceph/qa/workunits/rbd/notify_slave.sh
deleted file mode 100755
index ea66161..0000000
--- a/src/ceph/qa/workunits/rbd/notify_slave.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh -ex
-
-relpath=$(dirname $0)/../../../src/test/librbd
-python $relpath/test_notify.py slave
-exit 0
diff --git a/src/ceph/qa/workunits/rbd/permissions.sh b/src/ceph/qa/workunits/rbd/permissions.sh
deleted file mode 100755
index a435a67..0000000
--- a/src/ceph/qa/workunits/rbd/permissions.sh
+++ /dev/null
@@ -1,148 +0,0 @@
-#!/bin/bash -ex
-
-IMAGE_FEATURES="layering,exclusive-lock,object-map,fast-diff"
-
-create_pools() {
- ceph osd pool create images 100
- rbd pool init images
- ceph osd pool create volumes 100
- rbd pool init volumes
-}
-
-delete_pools() {
- (ceph osd pool delete images images --yes-i-really-really-mean-it || true) >/dev/null 2>&1
- (ceph osd pool delete volumes volumes --yes-i-really-really-mean-it || true) >/dev/null 2>&1
-
-}
-
-recreate_pools() {
- delete_pools
- create_pools
-}
-
-delete_users() {
- (ceph auth del client.volumes || true) >/dev/null 2>&1
- (ceph auth del client.images || true) >/dev/null 2>&1
-}
-
-create_users() {
- ceph auth get-or-create client.volumes mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow r class-read pool images, allow rwx pool volumes' >> $KEYRING
- ceph auth get-or-create client.images mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool images' >> $KEYRING
-}
-
-expect() {
-
- set +e
-
- local expected_ret=$1
- local ret
-
- shift
- cmd=$@
-
- eval $cmd
- ret=$?
-
- set -e
-
- if [[ $ret -ne $expected_ret ]]; then
- echo "ERROR: running \'$cmd\': expected $expected_ret got $ret"
- return 1
- fi
-
- return 0
-}
-
-test_images_access() {
- rbd -k $KEYRING --id images create --image-format 2 --image-feature $IMAGE_FEATURES -s 1 images/foo
- rbd -k $KEYRING --id images snap create images/foo@snap
- rbd -k $KEYRING --id images snap protect images/foo@snap
- rbd -k $KEYRING --id images snap unprotect images/foo@snap
- rbd -k $KEYRING --id images snap protect images/foo@snap
- rbd -k $KEYRING --id images export images/foo@snap - >/dev/null
- expect 16 rbd -k $KEYRING --id images snap rm images/foo@snap
-
- rbd -k $KEYRING --id volumes clone --image-feature $IMAGE_FEATURES images/foo@snap volumes/child
- expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
- expect 1 rbd -k $KEYRING --id volumes snap unprotect images/foo@snap
- expect 1 rbd -k $KEYRING --id images flatten volumes/child
- rbd -k $KEYRING --id volumes flatten volumes/child
- expect 1 rbd -k $KEYRING --id volumes snap unprotect images/foo@snap
- rbd -k $KEYRING --id images snap unprotect images/foo@snap
-
- expect 39 rbd -k $KEYRING --id images rm images/foo
- rbd -k $KEYRING --id images snap rm images/foo@snap
- rbd -k $KEYRING --id images rm images/foo
- rbd -k $KEYRING --id volumes rm volumes/child
-}
-
-test_volumes_access() {
- rbd -k $KEYRING --id images create --image-format 2 --image-feature $IMAGE_FEATURES -s 1 images/foo
- rbd -k $KEYRING --id images snap create images/foo@snap
- rbd -k $KEYRING --id images snap protect images/foo@snap
-
- # commands that work with read-only access
- rbd -k $KEYRING --id volumes info images/foo@snap
- rbd -k $KEYRING --id volumes snap ls images/foo
- rbd -k $KEYRING --id volumes export images/foo - >/dev/null
- rbd -k $KEYRING --id volumes cp images/foo volumes/foo_copy
- rbd -k $KEYRING --id volumes rm volumes/foo_copy
- rbd -k $KEYRING --id volumes children images/foo@snap
- rbd -k $KEYRING --id volumes lock list images/foo
-
- # commands that fail with read-only access
- expect 1 rbd -k $KEYRING --id volumes resize -s 2 images/foo --allow-shrink
- expect 1 rbd -k $KEYRING --id volumes snap create images/foo@2
- expect 1 rbd -k $KEYRING --id volumes snap rollback images/foo@snap
- expect 1 rbd -k $KEYRING --id volumes snap remove images/foo@snap
- expect 1 rbd -k $KEYRING --id volumes snap purge images/foo
- expect 1 rbd -k $KEYRING --id volumes snap unprotect images/foo@snap
- expect 1 rbd -k $KEYRING --id volumes flatten images/foo
- expect 1 rbd -k $KEYRING --id volumes lock add images/foo test
- expect 1 rbd -k $KEYRING --id volumes lock remove images/foo test locker
- expect 1 rbd -k $KEYRING --id volumes ls rbd
-
- # create clone and snapshot
- rbd -k $KEYRING --id volumes clone --image-feature $IMAGE_FEATURES images/foo@snap volumes/child
- rbd -k $KEYRING --id volumes snap create volumes/child@snap1
- rbd -k $KEYRING --id volumes snap protect volumes/child@snap1
- rbd -k $KEYRING --id volumes snap create volumes/child@snap2
-
- # make sure original snapshot stays protected
- expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
- rbd -k $KEYRING --id volumes flatten volumes/child
- expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
- rbd -k $KEYRING --id volumes snap rm volumes/child@snap2
- expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
- expect 2 rbd -k $KEYRING --id volumes snap rm volumes/child@snap2
- rbd -k $KEYRING --id volumes snap unprotect volumes/child@snap1
- expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
-
- # clean up
- rbd -k $KEYRING --id volumes snap rm volumes/child@snap1
- rbd -k $KEYRING --id images snap unprotect images/foo@snap
- rbd -k $KEYRING --id images snap rm images/foo@snap
- rbd -k $KEYRING --id images rm images/foo
- rbd -k $KEYRING --id volumes rm volumes/child
-}
-
-cleanup() {
- rm -f $KEYRING
-}
-KEYRING=$(mktemp)
-trap cleanup EXIT ERR HUP INT QUIT
-
-delete_users
-create_users
-
-recreate_pools
-test_images_access
-
-recreate_pools
-test_volumes_access
-
-delete_pools
-delete_users
-
-echo OK
-exit 0
diff --git a/src/ceph/qa/workunits/rbd/qemu-iotests.sh b/src/ceph/qa/workunits/rbd/qemu-iotests.sh
deleted file mode 100755
index e775ade..0000000
--- a/src/ceph/qa/workunits/rbd/qemu-iotests.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/sh -ex
-
-# Run qemu-iotests against rbd. These are block-level tests that go
-# through qemu but do not involve running a full vm. Note that these
-# require the admin ceph user, as there's no way to pass the ceph user
-# to qemu-iotests currently.
-
-testlist='001 002 003 004 005 008 009 010 011 021 025 032 033 055'
-
-git clone https://github.com/qemu/qemu.git
-cd qemu
-if lsb_release -da | grep -iq xenial; then
- # Xenial requires a recent test harness
- git checkout v2.3.0
-else
- # use v2.2.0-rc3 (last released version that handles all the tests
- git checkout 2528043f1f299e0e88cb026f1ca7c40bbb4e1f80
-
-fi
-
-cd tests/qemu-iotests
-mkdir bin
-# qemu-iotests expects a binary called just 'qemu' to be available
-if [ -x '/usr/bin/qemu-system-x86_64' ]
-then
- QEMU='/usr/bin/qemu-system-x86_64'
-else
- QEMU='/usr/libexec/qemu-kvm'
-
- # disable test 055 since qemu-kvm (RHEL/CentOS) doesn't support the
- # required QMP commands
- testlist=$(echo ${testlist} | sed "s/ 055//g")
-fi
-ln -s $QEMU bin/qemu
-
-# this is normally generated by configure, but has nothing but a python
-# binary definition, which we don't care about. for some reason it is
-# not present on trusty.
-touch common.env
-
-# TEST_DIR is the pool for rbd
-TEST_DIR=rbd PATH="$PATH:$PWD/bin" ./check -rbd $testlist
-
-cd ../../..
-rm -rf qemu
diff --git a/src/ceph/qa/workunits/rbd/qemu_dynamic_features.sh b/src/ceph/qa/workunits/rbd/qemu_dynamic_features.sh
deleted file mode 100755
index f237f66..0000000
--- a/src/ceph/qa/workunits/rbd/qemu_dynamic_features.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash -x
-
-if [[ -z "${IMAGE_NAME}" ]]; then
- echo image name must be provided
- exit 1
-fi
-
-is_qemu_running() {
- rbd status ${IMAGE_NAME} | grep -v "Watchers: none"
-}
-
-wait_for_qemu() {
- while ! is_qemu_running ; do
- echo "*** Waiting for QEMU"
- sleep 30
- done
-}
-
-wait_for_qemu
-rbd feature disable ${IMAGE_NAME} journaling
-rbd feature disable ${IMAGE_NAME} fast-diff
-rbd feature disable ${IMAGE_NAME} object-map
-rbd feature disable ${IMAGE_NAME} exclusive-lock
-
-while is_qemu_running ; do
- echo "*** Enabling all features"
- rbd feature enable ${IMAGE_NAME} exclusive-lock || break
- rbd feature enable ${IMAGE_NAME} journaling || break
- rbd feature enable ${IMAGE_NAME} object-map || break
- rbd feature enable ${IMAGE_NAME} fast-diff || break
- if is_qemu_running ; then
- sleep 60
- fi
-
- echo "*** Disabling all features"
- rbd feature disable ${IMAGE_NAME} journaling || break
- rbd feature disable ${IMAGE_NAME} fast-diff || break
- rbd feature disable ${IMAGE_NAME} object-map || break
- rbd feature disable ${IMAGE_NAME} exclusive-lock || break
- if is_qemu_running ; then
- sleep 60
- fi
-done
-
-if is_qemu_running ; then
- echo "RBD command failed on alive QEMU"
- exit 1
-fi
diff --git a/src/ceph/qa/workunits/rbd/qemu_rebuild_object_map.sh b/src/ceph/qa/workunits/rbd/qemu_rebuild_object_map.sh
deleted file mode 100755
index c064ee9..0000000
--- a/src/ceph/qa/workunits/rbd/qemu_rebuild_object_map.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash -ex
-
-if [[ -z "${IMAGE_NAME}" ]]; then
- echo image name must be provided
- exit 1
-fi
-
-is_qemu_running() {
- rbd status ${IMAGE_NAME} | grep -v "Watchers: none"
-}
-
-wait_for_qemu() {
- while ! is_qemu_running ; do
- echo "*** Waiting for QEMU"
- sleep 30
- done
-}
-
-wait_for_qemu
-rbd feature disable ${IMAGE_NAME} journaling || true
-rbd feature disable ${IMAGE_NAME} fast-diff || true
-rbd feature disable ${IMAGE_NAME} object-map || true
-rbd feature disable ${IMAGE_NAME} exclusive-lock || true
-
-rbd feature enable ${IMAGE_NAME} exclusive-lock
-rbd feature enable ${IMAGE_NAME} object-map
-
-while is_qemu_running ; do
- echo "*** Rebuilding object map"
- rbd object-map rebuild ${IMAGE_NAME}
-
- if is_qemu_running ; then
- sleep 60
- fi
-done
-
diff --git a/src/ceph/qa/workunits/rbd/rbd-ggate.sh b/src/ceph/qa/workunits/rbd/rbd-ggate.sh
deleted file mode 100755
index 536070a..0000000
--- a/src/ceph/qa/workunits/rbd/rbd-ggate.sh
+++ /dev/null
@@ -1,182 +0,0 @@
-#!/bin/sh -ex
-
-POOL=testrbdggate$$
-IMAGE=test
-SIZE=64
-DATA=
-DEV=
-
-if which xmlstarlet > /dev/null 2>&1; then
- XMLSTARLET=xmlstarlet
-elif which xml > /dev/null 2>&1; then
- XMLSTARLET=xml
-else
- echo "Missing xmlstarlet binary!"
- exit 1
-fi
-
-_sudo()
-{
- local cmd
-
- if [ `id -u` -eq 0 ]
- then
- "$@"
- return $?
- fi
-
- # Look for the command in the user path. If it fails run it as is,
- # supposing it is in sudo path.
- cmd=`which $1 2>/dev/null` || cmd=$1
- shift
- sudo -nE "${cmd}" "$@"
-}
-
-setup()
-{
- if [ -e CMakeCache.txt ]; then
- # running under cmake build dir
-
- CEPH_SRC=$(readlink -f $(dirname $0)/../../../src)
- CEPH_ROOT=${PWD}
- CEPH_BIN=${CEPH_ROOT}/bin
-
- export LD_LIBRARY_PATH=${CEPH_ROOT}/lib:${LD_LIBRARY_PATH}
- export PYTHONPATH=${PYTHONPATH}:${CEPH_SRC}/pybind
- for x in ${CEPH_ROOT}/lib/cython_modules/lib* ; do
- PYTHONPATH="${PYTHONPATH}:${x}"
- done
- PATH=${CEPH_BIN}:${PATH}
- fi
-
- _sudo echo test sudo
-
- trap cleanup INT TERM EXIT
- TEMPDIR=`mktemp -d`
- DATA=${TEMPDIR}/data
- dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
- ceph osd pool create ${POOL} 64 64
- rbd --dest-pool ${POOL} --no-progress import ${DATA} ${IMAGE}
-}
-
-cleanup()
-{
- set +e
- rm -Rf ${TEMPDIR}
- if [ -n "${DEV}" ]
- then
- _sudo rbd-ggate unmap ${DEV}
- fi
- ceph osd pool delete ${POOL} ${POOL} --yes-i-really-really-mean-it
-}
-
-expect_false()
-{
- if "$@"; then return 1; else return 0; fi
-}
-
-#
-# main
-#
-
-setup
-
-# exit status test
-expect_false rbd-ggate
-expect_false rbd-ggate INVALIDCMD
-if [ `id -u` -ne 0 ]
-then
- expect_false rbd-ggate map ${IMAGE}
-fi
-expect_false _sudo rbd-ggate map INVALIDIMAGE
-
-# map test using the first unused device
-DEV=`_sudo rbd-ggate map ${POOL}/${IMAGE}`
-_sudo rbd-ggate list | grep "^${DEV}$"
-
-# map test specifying the device
-expect_false _sudo rbd-ggate --device ${DEV} map ${POOL}/${IMAGE}
-dev1=${DEV}
-_sudo rbd-ggate unmap ${DEV}
-_sudo rbd-ggate list | expect_false grep "^${DEV}$"
-DEV=
-# XXX: race possible when the device is reused by other process
-DEV=`_sudo rbd-ggate --device ${dev1} map ${POOL}/${IMAGE}`
-[ "${DEV}" = "${dev1}" ]
-_sudo rbd-ggate list | grep "^${DEV}$"
-
-# read test
-[ "`dd if=${DATA} bs=1M | md5`" = "`_sudo dd if=${DEV} bs=1M | md5`" ]
-
-# write test
-dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
-_sudo dd if=${DATA} of=${DEV} bs=1M
-_sudo sync
-[ "`dd if=${DATA} bs=1M | md5`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5`" ]
-
-# trim test
-provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
- $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
-used=`rbd -p ${POOL} --format xml du ${IMAGE} |
- $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
-[ "${used}" -eq "${provisioned}" ]
-_sudo newfs -E ${DEV}
-_sudo sync
-provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
- $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
-used=`rbd -p ${POOL} --format xml du ${IMAGE} |
- $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
-[ "${used}" -lt "${provisioned}" ]
-
-# resize test
-devname=$(basename ${DEV})
-size=$(geom gate list ${devname} | awk '$1 ~ /Mediasize:/ {print $2}')
-test -n "${size}"
-rbd resize ${POOL}/${IMAGE} --size $((SIZE * 2))M
-rbd info ${POOL}/${IMAGE}
-if [ -z "$RBD_GGATE_RESIZE_SUPPORTED" ]; then
- # XXX: ggate device resize is not supported by vanila kernel.
- # rbd-ggate should terminate when detecting resize.
- _sudo rbd-ggate list | expect_false grep "^${DEV}$"
-else
- _sudo rbd-ggate list | grep "^${DEV}$"
- size2=$(geom gate list ${devname} | awk '$1 ~ /Mediasize:/ {print $2}')
- test -n "${size2}"
- test ${size2} -eq $((size * 2))
- dd if=/dev/urandom of=${DATA} bs=1M count=$((SIZE * 2))
- _sudo dd if=${DATA} of=${DEV} bs=1M
- _sudo sync
- [ "`dd if=${DATA} bs=1M | md5`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5`" ]
- rbd resize ${POOL}/${IMAGE} --allow-shrink --size ${SIZE}M
- rbd info ${POOL}/${IMAGE}
- size2=$(geom gate list ${devname} | awk '$1 ~ /Mediasize:/ {print $2}')
- test -n "${size2}"
- test ${size2} -eq ${size}
- truncate -s ${SIZE}M ${DATA}
- [ "`dd if=${DATA} bs=1M | md5`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5`" ]
- _sudo rbd-ggate unmap ${DEV}
-fi
-DEV=
-
-# read-only option test
-DEV=`_sudo rbd-ggate map --read-only ${POOL}/${IMAGE}`
-devname=$(basename ${DEV})
-_sudo rbd-ggate list | grep "^${DEV}$"
-access=$(geom gate list ${devname} | awk '$1 == "access:" {print $2}')
-test "${access}" = "read-only"
-_sudo dd if=${DEV} of=/dev/null bs=1M
-expect_false _sudo dd if=${DATA} of=${DEV} bs=1M
-_sudo rbd-ggate unmap ${DEV}
-
-# exclusive option test
-DEV=`_sudo rbd-ggate map --exclusive ${POOL}/${IMAGE}`
-_sudo rbd-ggate list | grep "^${DEV}$"
-_sudo dd if=${DATA} of=${DEV} bs=1M
-_sudo sync
-expect_false timeout 10 \
- rbd -p ${POOL} bench ${IMAGE} --io-type=write --io-size=1024 --io-total=1024
-_sudo rbd-ggate unmap ${DEV}
-DEV=
-rbd bench -p ${POOL} ${IMAGE} --io-type=write --io-size=1024 --io-total=1024
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/rbd-nbd.sh b/src/ceph/qa/workunits/rbd/rbd-nbd.sh
deleted file mode 100755
index 524f8bd..0000000
--- a/src/ceph/qa/workunits/rbd/rbd-nbd.sh
+++ /dev/null
@@ -1,189 +0,0 @@
-#!/bin/bash -ex
-
-. $(dirname $0)/../../standalone/ceph-helpers.sh
-
-POOL=rbd
-IMAGE=testrbdnbd$$
-SIZE=64
-DATA=
-DEV=
-
-_sudo()
-{
- local cmd
-
- if [ `id -u` -eq 0 ]
- then
- "$@"
- return $?
- fi
-
- # Look for the command in the user path. If it fails run it as is,
- # supposing it is in sudo path.
- cmd=`which $1 2>/dev/null` || cmd=$1
- shift
- sudo -nE "${cmd}" "$@"
-}
-
-setup()
-{
- if [ -e CMakeCache.txt ]; then
- # running under cmake build dir
-
- CEPH_SRC=$(readlink -f $(dirname $0)/../../../src)
- CEPH_ROOT=${PWD}
- CEPH_BIN=${CEPH_ROOT}/bin
-
- export LD_LIBRARY_PATH=${CEPH_ROOT}/lib:${LD_LIBRARY_PATH}
- export PYTHONPATH=${PYTHONPATH}:${CEPH_SRC}/pybind
- for x in ${CEPH_ROOT}/lib/cython_modules/lib* ; do
- PYTHONPATH="${PYTHONPATH}:${x}"
- done
- PATH=${CEPH_BIN}:${PATH}
- fi
-
- _sudo echo test sudo
-
- trap cleanup INT TERM EXIT
- TEMPDIR=`mktemp -d`
- DATA=${TEMPDIR}/data
- dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
- rbd --dest-pool ${POOL} --no-progress import ${DATA} ${IMAGE}
-}
-
-function cleanup()
-{
- set +e
- rm -Rf ${TMPDIR}
- if [ -n "${DEV}" ]
- then
- _sudo rbd-nbd unmap ${DEV}
- fi
- if rbd -p ${POOL} status ${IMAGE} 2>/dev/null; then
- for s in 0.5 1 2 4 8 16 32; do
- sleep $s
- rbd -p ${POOL} status ${IMAGE} | grep 'Watchers: none' && break
- done
- rbd -p ${POOL} remove ${IMAGE}
- fi
-}
-
-function expect_false()
-{
- if "$@"; then return 1; else return 0; fi
-}
-
-#
-# main
-#
-
-setup
-
-# exit status test
-expect_false rbd-nbd
-expect_false rbd-nbd INVALIDCMD
-if [ `id -u` -ne 0 ]
-then
- expect_false rbd-nbd map ${IMAGE}
-fi
-expect_false _sudo rbd-nbd map INVALIDIMAGE
-expect_false _sudo rbd-nbd --device INVALIDDEV map ${IMAGE}
-
-# map test using the first unused device
-DEV=`_sudo rbd-nbd map ${POOL}/${IMAGE}`
-PID=$(rbd-nbd list-mapped | awk -v pool=${POOL} -v img=${IMAGE} -v dev=${DEV} \
- '$2 == pool && $3 == img && $5 == dev {print $1}')
-test -n "${PID}"
-ps -p ${PID} -o cmd | grep rbd-nbd
-# map test specifying the device
-expect_false _sudo rbd-nbd --device ${DEV} map ${POOL}/${IMAGE}
-dev1=${DEV}
-_sudo rbd-nbd unmap ${DEV}
-rbd-nbd list-mapped | expect_false grep "${DEV} $"
-DEV=
-# XXX: race possible when the device is reused by other process
-DEV=`_sudo rbd-nbd --device ${dev1} map ${POOL}/${IMAGE}`
-[ "${DEV}" = "${dev1}" ]
-rbd-nbd list-mapped | grep "${IMAGE}"
-PID=$(rbd-nbd list-mapped | awk -v pool=${POOL} -v img=${IMAGE} -v dev=${DEV} \
- '$2 == pool && $3 == img && $5 == dev {print $1}')
-test -n "${PID}"
-ps -p ${PID} -o cmd | grep rbd-nbd
-
-# read test
-[ "`dd if=${DATA} bs=1M | md5sum`" = "`_sudo dd if=${DEV} bs=1M | md5sum`" ]
-
-# write test
-dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
-_sudo dd if=${DATA} of=${DEV} bs=1M oflag=direct
-[ "`dd if=${DATA} bs=1M | md5sum`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5sum`" ]
-
-# trim test
-provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
- $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
-used=`rbd -p ${POOL} --format xml du ${IMAGE} |
- $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
-[ "${used}" -eq "${provisioned}" ]
-_sudo mkfs.ext4 -E discard ${DEV} # better idea?
-sync
-provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
- $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
-used=`rbd -p ${POOL} --format xml du ${IMAGE} |
- $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
-[ "${used}" -lt "${provisioned}" ]
-
-# resize test
-devname=$(basename ${DEV})
-blocks=$(awk -v dev=${devname} '$4 == dev {print $3}' /proc/partitions)
-test -n "${blocks}"
-rbd resize ${POOL}/${IMAGE} --size $((SIZE * 2))M
-rbd info ${POOL}/${IMAGE}
-blocks2=$(awk -v dev=${devname} '$4 == dev {print $3}' /proc/partitions)
-test -n "${blocks2}"
-test ${blocks2} -eq $((blocks * 2))
-rbd resize ${POOL}/${IMAGE} --allow-shrink --size ${SIZE}M
-blocks2=$(awk -v dev=${devname} '$4 == dev {print $3}' /proc/partitions)
-test -n "${blocks2}"
-test ${blocks2} -eq ${blocks}
-
-# read-only option test
-_sudo rbd-nbd unmap ${DEV}
-DEV=`_sudo rbd-nbd map --read-only ${POOL}/${IMAGE}`
-PID=$(rbd-nbd list-mapped | awk -v pool=${POOL} -v img=${IMAGE} -v dev=${DEV} \
- '$2 == pool && $3 == img && $5 == dev {print $1}')
-test -n "${PID}"
-ps -p ${PID} -o cmd | grep rbd-nbd
-
-_sudo dd if=${DEV} of=/dev/null bs=1M
-expect_false _sudo dd if=${DATA} of=${DEV} bs=1M oflag=direct
-_sudo rbd-nbd unmap ${DEV}
-
-# exclusive option test
-DEV=`_sudo rbd-nbd map --exclusive ${POOL}/${IMAGE}`
-PID=$(rbd-nbd list-mapped | awk -v pool=${POOL} -v img=${IMAGE} -v dev=${DEV} \
- '$2 == pool && $3 == img && $5 == dev {print $1}')
-test -n "${PID}"
-ps -p ${PID} -o cmd | grep rbd-nbd
-
-_sudo dd if=${DATA} of=${DEV} bs=1M oflag=direct
-expect_false timeout 10 \
- rbd bench ${IMAGE} --io-type write --io-size=1024 --io-total=1024
-_sudo rbd-nbd unmap ${DEV}
-
-# auto unmap test
-DEV=`_sudo rbd-nbd map ${POOL}/${IMAGE}`
-PID=$(rbd-nbd list-mapped | awk -v pool=${POOL} -v img=${IMAGE} -v dev=${DEV} \
- '$2 == pool && $3 == img && $5 == dev {print $1}')
-test -n "${PID}"
-ps -p ${PID} -o cmd | grep rbd-nbd
-_sudo kill ${PID}
-for i in `seq 10`; do
- rbd-nbd list-mapped | expect_false grep "^${PID} *${POOL} *${IMAGE}" && break
- sleep 1
-done
-rbd-nbd list-mapped | expect_false grep "^${PID} *${POOL} *${IMAGE}"
-
-DEV=
-rbd bench ${IMAGE} --io-type write --io-size=1024 --io-total=1024
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/rbd_mirror.sh b/src/ceph/qa/workunits/rbd/rbd_mirror.sh
deleted file mode 100755
index 5195e6c..0000000
--- a/src/ceph/qa/workunits/rbd/rbd_mirror.sh
+++ /dev/null
@@ -1,433 +0,0 @@
-#!/bin/sh
-#
-# rbd_mirror.sh - test rbd-mirror daemon
-#
-# The scripts starts two ("local" and "remote") clusters using mstart.sh script,
-# creates a temporary directory, used for cluster configs, daemon logs, admin
-# socket, temporary files, and launches rbd-mirror daemon.
-#
-
-. $(dirname $0)/rbd_mirror_helpers.sh
-
-testlog "TEST: add image and test replay"
-start_mirror ${CLUSTER1}
-image=test
-create_image ${CLUSTER2} ${POOL} ${image}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-write_image ${CLUSTER2} ${POOL} ${image} 100
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
-if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
- wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'down+unknown'
-fi
-compare_images ${POOL} ${image}
-
-testlog "TEST: stop mirror, add image, start mirror and test replay"
-stop_mirror ${CLUSTER1}
-image1=test1
-create_image ${CLUSTER2} ${POOL} ${image1}
-write_image ${CLUSTER2} ${POOL} ${image1} 100
-start_mirror ${CLUSTER1}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image1}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying' 'master_position'
-if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
- wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image1} 'down+unknown'
-fi
-compare_images ${POOL} ${image1}
-
-testlog "TEST: test the first image is replaying after restart"
-write_image ${CLUSTER2} ${POOL} ${image} 100
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
-compare_images ${POOL} ${image}
-
-testlog "TEST: stop/start/restart mirror via admin socket"
-admin_daemon ${CLUSTER1} rbd mirror stop
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
-
-admin_daemon ${CLUSTER1} rbd mirror start
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
-
-admin_daemon ${CLUSTER1} rbd mirror restart
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
-
-admin_daemon ${CLUSTER1} rbd mirror stop
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
-
-admin_daemon ${CLUSTER1} rbd mirror restart
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
-
-admin_daemon ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
-
-admin_daemon ${CLUSTER1} rbd mirror start ${POOL}/${image}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
-
-admin_daemon ${CLUSTER1} rbd mirror start ${POOL} ${CLUSTER2}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
-
-admin_daemon ${CLUSTER1} rbd mirror restart ${POOL}/${image}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
-
-admin_daemon ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
-
-admin_daemon ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
-
-admin_daemon ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
-
-admin_daemon ${CLUSTER1} rbd mirror flush
-admin_daemon ${CLUSTER1} rbd mirror status
-
-testlog "TEST: test image rename"
-new_name="${image}_RENAMED"
-rename_image ${CLUSTER2} ${POOL} ${image} ${new_name}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
-admin_daemon ${CLUSTER1} rbd mirror status ${POOL}/${new_name}
-admin_daemon ${CLUSTER1} rbd mirror restart ${POOL}/${new_name}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
-rename_image ${CLUSTER2} ${POOL} ${new_name} ${image}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-
-testlog "TEST: failover and failback"
-start_mirror ${CLUSTER2}
-
-# demote and promote same cluster
-demote_image ${CLUSTER2} ${POOL} ${image}
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
-wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
-promote_image ${CLUSTER2} ${POOL} ${image}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-write_image ${CLUSTER2} ${POOL} ${image} 100
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
-compare_images ${POOL} ${image}
-
-# failover (unmodified)
-demote_image ${CLUSTER2} ${POOL} ${image}
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
-wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
-promote_image ${CLUSTER1} ${POOL} ${image}
-wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
-
-# failback (unmodified)
-demote_image ${CLUSTER1} ${POOL} ${image}
-wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
-wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
-promote_image ${CLUSTER2} ${POOL} ${image}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
-wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
-compare_images ${POOL} ${image}
-
-# failover
-demote_image ${CLUSTER2} ${POOL} ${image}
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
-wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
-promote_image ${CLUSTER1} ${POOL} ${image}
-wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
-write_image ${CLUSTER1} ${POOL} ${image} 100
-wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
-wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+replaying' 'master_position'
-compare_images ${POOL} ${image}
-
-# failback
-demote_image ${CLUSTER1} ${POOL} ${image}
-wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
-wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
-promote_image ${CLUSTER2} ${POOL} ${image}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-write_image ${CLUSTER2} ${POOL} ${image} 100
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
-wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
-compare_images ${POOL} ${image}
-
-# force promote
-force_promote_image=test_force_promote
-create_image ${CLUSTER2} ${POOL} ${force_promote_image}
-write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
-wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${force_promote_image}
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${force_promote_image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+replaying' 'master_position'
-wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
-promote_image ${CLUSTER1} ${POOL} ${force_promote_image} '--force'
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${force_promote_image}
-wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+stopped'
-wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
-write_image ${CLUSTER1} ${POOL} ${force_promote_image} 100
-write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
-
-testlog "TEST: cloned images"
-parent_image=test_parent
-parent_snap=snap
-create_image ${CLUSTER2} ${PARENT_POOL} ${parent_image}
-write_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} 100
-create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
-protect_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
-
-clone_image=test_clone
-clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} ${clone_image}
-write_image ${CLUSTER2} ${POOL} ${clone_image} 100
-
-enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image}
-wait_for_image_replay_started ${CLUSTER1} ${PARENT_POOL} ${parent_image}
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${PARENT_POOL} ${parent_image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${PARENT_POOL} ${parent_image} 'up+replaying' 'master_position'
-compare_images ${PARENT_POOL} ${parent_image}
-
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${clone_image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${clone_image} 'up+replaying' 'master_position'
-compare_images ${POOL} ${clone_image}
-
-expect_failure "is non-primary" clone_image ${CLUSTER1} ${PARENT_POOL} \
- ${parent_image} ${parent_snap} ${POOL} ${clone_image}1
-
-testlog "TEST: data pool"
-dp_image=test_data_pool
-create_image ${CLUSTER2} ${POOL} ${dp_image} 128 --data-pool ${PARENT_POOL}
-data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL} ${dp_image})
-test "${data_pool}" = "${PARENT_POOL}"
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${dp_image}
-data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL} ${dp_image})
-test "${data_pool}" = "${PARENT_POOL}"
-create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap1'
-write_image ${CLUSTER2} ${POOL} ${dp_image} 100
-create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap2'
-write_image ${CLUSTER2} ${POOL} ${dp_image} 100
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${dp_image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${dp_image} 'up+replaying' 'master_position'
-compare_images ${POOL} ${dp_image}@snap1
-compare_images ${POOL} ${dp_image}@snap2
-compare_images ${POOL} ${dp_image}
-
-testlog "TEST: disable mirroring / delete non-primary image"
-image2=test2
-image3=test3
-image4=test4
-image5=test5
-for i in ${image2} ${image3} ${image4} ${image5}; do
- create_image ${CLUSTER2} ${POOL} ${i}
- write_image ${CLUSTER2} ${POOL} ${i} 100
- create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
- create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
- if [ "${i}" = "${image4}" ] || [ "${i}" = "${image5}" ]; then
- protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
- protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
- fi
- write_image ${CLUSTER2} ${POOL} ${i} 100
- wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'present'
- wait_for_snap_present ${CLUSTER1} ${POOL} ${i} 'snap2'
-done
-
-set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
-for i in ${image2} ${image4}; do
- disable_mirror ${CLUSTER2} ${POOL} ${i}
-done
-
-unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap1'
-unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap2'
-for i in ${image3} ${image5}; do
- remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
- remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
- remove_image_retry ${CLUSTER2} ${POOL} ${i}
-done
-
-for i in ${image2} ${image3} ${image4} ${image5}; do
- wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'deleted'
-done
-
-set_pool_mirror_mode ${CLUSTER2} ${POOL} 'pool'
-for i in ${image2} ${image4}; do
- wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'present'
- wait_for_snap_present ${CLUSTER1} ${POOL} ${i} 'snap2'
- wait_for_image_replay_started ${CLUSTER1} ${POOL} ${i}
- wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${i}
- compare_images ${POOL} ${i}
-done
-
-testlog "TEST: snapshot rename"
-snap_name='snap_rename'
-create_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_0"
-for i in `seq 1 20`; do
- rename_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_$(expr ${i} - 1)" "${snap_name}_${i}"
-done
-wait_for_snap_present ${CLUSTER1} ${POOL} ${image2} "${snap_name}_${i}"
-
-testlog "TEST: disable mirror while daemon is stopped"
-stop_mirror ${CLUSTER1}
-stop_mirror ${CLUSTER2}
-set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
-disable_mirror ${CLUSTER2} ${POOL} ${image}
-if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
- test_image_present ${CLUSTER1} ${POOL} ${image} 'present'
-fi
-start_mirror ${CLUSTER1}
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
-set_pool_mirror_mode ${CLUSTER2} ${POOL} 'pool'
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-
-testlog "TEST: simple image resync"
-request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
-compare_images ${POOL} ${image}
-
-testlog "TEST: image resync while replayer is stopped"
-admin_daemon ${CLUSTER1} rbd mirror stop ${POOL}/${image}
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
-request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
-admin_daemon ${CLUSTER1} rbd mirror start ${POOL}/${image}
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
-admin_daemon ${CLUSTER1} rbd mirror start ${POOL}/${image}
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
-compare_images ${POOL} ${image}
-
-testlog "TEST: request image resync while daemon is offline"
-stop_mirror ${CLUSTER1}
-request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
-start_mirror ${CLUSTER1}
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
-compare_images ${POOL} ${image}
-
-testlog "TEST: client disconnect"
-image=laggy
-create_image ${CLUSTER2} ${POOL} ${image} 128 --journal-object-size 64K
-write_image ${CLUSTER2} ${POOL} ${image} 10
-
-testlog " - replay stopped after disconnect"
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
-test -n "$(get_mirror_position ${CLUSTER2} ${POOL} ${image})"
-disconnect_image ${CLUSTER2} ${POOL} ${image}
-test -z "$(get_mirror_position ${CLUSTER2} ${POOL} ${image})"
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
-
-testlog " - replay started after resync requested"
-request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
-test -n "$(get_mirror_position ${CLUSTER2} ${POOL} ${image})"
-compare_images ${POOL} ${image}
-
-testlog " - disconnected after max_concurrent_object_sets reached"
-admin_daemon ${CLUSTER1} rbd mirror stop ${POOL}/${image}
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
-test -n "$(get_mirror_position ${CLUSTER2} ${POOL} ${image})"
-set_image_meta ${CLUSTER2} ${POOL} ${image} \
- conf_rbd_journal_max_concurrent_object_sets 1
-write_image ${CLUSTER2} ${POOL} ${image} 20 16384
-write_image ${CLUSTER2} ${POOL} ${image} 20 16384
-test -z "$(get_mirror_position ${CLUSTER2} ${POOL} ${image})"
-set_image_meta ${CLUSTER2} ${POOL} ${image} \
- conf_rbd_journal_max_concurrent_object_sets 0
-
-testlog " - replay is still stopped (disconnected) after restart"
-admin_daemon ${CLUSTER1} rbd mirror start ${POOL}/${image}
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
-
-testlog " - replay started after resync requested"
-request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
-test -n "$(get_mirror_position ${CLUSTER2} ${POOL} ${image})"
-compare_images ${POOL} ${image}
-
-testlog " - rbd_mirroring_resync_after_disconnect config option"
-set_image_meta ${CLUSTER2} ${POOL} ${image} \
- conf_rbd_mirroring_resync_after_disconnect true
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
-image_id=$(get_image_id ${CLUSTER1} ${pool} ${image})
-disconnect_image ${CLUSTER2} ${POOL} ${image}
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
-test -n "$(get_mirror_position ${CLUSTER2} ${POOL} ${image})"
-compare_images ${POOL} ${image}
-set_image_meta ${CLUSTER2} ${POOL} ${image} \
- conf_rbd_mirroring_resync_after_disconnect false
-wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
-disconnect_image ${CLUSTER2} ${POOL} ${image}
-test -z "$(get_mirror_position ${CLUSTER2} ${POOL} ${image})"
-wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
-
-testlog "TEST: split-brain"
-image=split-brain
-create_image ${CLUSTER2} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
-demote_image ${CLUSTER2} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
-promote_image ${CLUSTER1} ${POOL} ${image}
-write_image ${CLUSTER1} ${POOL} ${image} 10
-demote_image ${CLUSTER1} ${POOL} ${image}
-promote_image ${CLUSTER2} ${POOL} ${image}
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'split-brain'
-request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
-wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
-
-testlog "TEST: no blacklists"
-CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER1} osd blacklist ls 2>&1 | grep -q "listed 0 entries"
-CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER2} osd blacklist ls 2>&1 | grep -q "listed 0 entries"
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/rbd_mirror_ha.sh b/src/ceph/qa/workunits/rbd/rbd_mirror_ha.sh
deleted file mode 100755
index fcb8d76..0000000
--- a/src/ceph/qa/workunits/rbd/rbd_mirror_ha.sh
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/bin/sh
-#
-# rbd_mirror_ha.sh - test rbd-mirror daemons in HA mode
-#
-
-. $(dirname $0)/rbd_mirror_helpers.sh
-
-is_leader()
-{
- local instance=$1
- local pool=$2
-
- test -n "${pool}" || pool=${POOL}
-
- admin_daemon "${CLUSTER1}:${instance}" \
- rbd mirror status ${pool} ${CLUSTER2} |
- grep '"leader": true'
-}
-
-wait_for_leader()
-{
- local s instance
-
- for s in 1 1 2 4 4 4 4 4 8 8 8 8 16 16 32 64; do
- sleep $s
- for instance in `seq 0 9`; do
- is_leader ${instance} || continue
- LEADER=${instance}
- return 0
- done
- done
-
- LEADER=
- return 1
-}
-
-release_leader()
-{
- local pool=$1
- local cmd="rbd mirror leader release"
-
- test -n "${pool}" && cmd="${cmd} ${pool} ${CLUSTER2}"
-
- admin_daemon "${CLUSTER1}:${LEADER}" ${cmd}
-}
-
-wait_for_leader_released()
-{
- local i
-
- test -n "${LEADER}"
- for i in `seq 10`; do
- is_leader ${LEADER} || return 0
- sleep 1
- done
-
- return 1
-}
-
-test_replay()
-{
- local image
-
- for image; do
- wait_for_image_replay_started ${CLUSTER1}:${LEADER} ${POOL} ${image}
- write_image ${CLUSTER2} ${POOL} ${image} 100
- wait_for_replay_complete ${CLUSTER1}:${LEADER} ${CLUSTER2} ${POOL} \
- ${image}
- wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' \
- 'master_position'
- if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
- wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} \
- 'down+unknown'
- fi
- compare_images ${POOL} ${image}
- done
-}
-
-testlog "TEST: start first daemon instance and test replay"
-start_mirror ${CLUSTER1}:0
-image1=test1
-create_image ${CLUSTER2} ${POOL} ${image1}
-LEADER=0
-test_replay ${image1}
-
-testlog "TEST: release leader and wait it is reacquired"
-is_leader 0 ${POOL}
-is_leader 0 ${PARENT_POOL}
-release_leader ${POOL}
-wait_for_leader_released
-is_leader 0 ${PARENT_POOL}
-wait_for_leader
-release_leader
-wait_for_leader_released
-expect_failure "" is_leader 0 ${PARENT_POOL}
-wait_for_leader
-
-testlog "TEST: start second daemon instance and test replay"
-start_mirror ${CLUSTER1}:1
-image2=test2
-create_image ${CLUSTER2} ${POOL} ${image2}
-test_replay ${image1} ${image2}
-
-testlog "TEST: release leader and test it is acquired by secondary"
-is_leader 0 ${POOL}
-is_leader 0 ${PARENT_POOL}
-release_leader ${POOL}
-wait_for_leader_released
-wait_for_leader
-test_replay ${image1} ${image2}
-release_leader
-wait_for_leader_released
-wait_for_leader
-test "${LEADER}" = 0
-
-testlog "TEST: stop first daemon instance and test replay"
-stop_mirror ${CLUSTER1}:0
-image3=test3
-create_image ${CLUSTER2} ${POOL} ${image3}
-LEADER=1
-test_replay ${image1} ${image2} ${image3}
-
-testlog "TEST: start first daemon instance and test replay"
-start_mirror ${CLUSTER1}:0
-image4=test4
-create_image ${CLUSTER2} ${POOL} ${image4}
-test_replay ${image3} ${image4}
-
-testlog "TEST: crash leader and test replay"
-stop_mirror ${CLUSTER1}:1 -KILL
-image5=test5
-create_image ${CLUSTER2} ${POOL} ${image5}
-LEADER=0
-test_replay ${image1} ${image4} ${image5}
-
-testlog "TEST: start crashed leader and test replay"
-start_mirror ${CLUSTER1}:1
-image6=test6
-create_image ${CLUSTER2} ${POOL} ${image6}
-test_replay ${image1} ${image6}
-
-testlog "TEST: start yet another daemon instance and test replay"
-start_mirror ${CLUSTER1}:2
-image7=test7
-create_image ${CLUSTER2} ${POOL} ${image7}
-test_replay ${image1} ${image7}
-
-testlog "TEST: release leader and test it is acquired by secondary"
-is_leader 0
-release_leader
-wait_for_leader_released
-wait_for_leader
-test_replay ${image1} ${image2}
-
-testlog "TEST: stop leader and test replay"
-stop_mirror ${CLUSTER1}:${LEADER}
-image8=test8
-create_image ${CLUSTER2} ${POOL} ${image8}
-prev_leader=${LEADER}
-wait_for_leader
-test_replay ${image1} ${image8}
-
-testlog "TEST: start previous leader and test replay"
-start_mirror ${CLUSTER1}:${prev_leader}
-image9=test9
-create_image ${CLUSTER2} ${POOL} ${image9}
-test_replay ${image1} ${image9}
-
-testlog "TEST: crash leader and test replay"
-stop_mirror ${CLUSTER1}:${LEADER} -KILL
-image10=test10
-create_image ${CLUSTER2} ${POOL} ${image10}
-prev_leader=${LEADER}
-wait_for_leader
-test_replay ${image1} ${image10}
-
-testlog "TEST: start previous leader and test replay"
-start_mirror ${CLUSTER1}:${prev_leader}
-image11=test11
-create_image ${CLUSTER2} ${POOL} ${image11}
-test_replay ${image1} ${image11}
-
-testlog "TEST: start some more daemon instances and test replay"
-start_mirror ${CLUSTER1}:3
-start_mirror ${CLUSTER1}:4
-start_mirror ${CLUSTER1}:5
-start_mirror ${CLUSTER1}:6
-image13=test13
-create_image ${CLUSTER2} ${POOL} ${image13}
-test_replay ${leader} ${image1} ${image13}
-
-testlog "TEST: release leader and test it is acquired by secondary"
-release_leader
-wait_for_leader_released
-wait_for_leader
-test_replay ${image1} ${image2}
-
-testlog "TEST: in loop: stop leader and test replay"
-for i in 0 1 2 3 4 5; do
- stop_mirror ${CLUSTER1}:${LEADER}
- wait_for_leader
- test_replay ${image1}
-done
-
-stop_mirror ${CLUSTER1}:${LEADER}
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh b/src/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh
deleted file mode 100755
index 325353b..0000000
--- a/src/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh
+++ /dev/null
@@ -1,910 +0,0 @@
-#!/bin/sh
-#
-# rbd_mirror_helpers.sh - shared rbd-mirror daemon helper functions
-#
-# The scripts starts two ("local" and "remote") clusters using mstart.sh script,
-# creates a temporary directory, used for cluster configs, daemon logs, admin
-# socket, temporary files, and launches rbd-mirror daemon.
-#
-# There are several env variables useful when troubleshooting a test failure:
-#
-# RBD_MIRROR_NOCLEANUP - if not empty, don't run the cleanup (stop processes,
-# destroy the clusters and remove the temp directory)
-# on exit, so it is possible to check the test state
-# after failure.
-# RBD_MIRROR_TEMDIR - use this path when creating the temporary directory
-# (should not exist) instead of running mktemp(1).
-# RBD_MIRROR_ARGS - use this to pass additional arguments to started
-# rbd-mirror daemons.
-# RBD_MIRROR_VARGS - use this to pass additional arguments to vstart.sh
-# when starting clusters.
-#
-# The cleanup can be done as a separate step, running the script with
-# `cleanup ${RBD_MIRROR_TEMDIR}' arguments.
-#
-# Note, as other workunits tests, rbd_mirror.sh expects to find ceph binaries
-# in PATH.
-#
-# Thus a typical troubleshooting session:
-#
-# From Ceph src dir (CEPH_SRC_PATH), start the test in NOCLEANUP mode and with
-# TEMPDIR pointing to a known location:
-#
-# cd $CEPH_SRC_PATH
-# PATH=$CEPH_SRC_PATH:$PATH
-# RBD_MIRROR_NOCLEANUP=1 RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror \
-# ../qa/workunits/rbd/rbd_mirror.sh
-#
-# After the test failure cd to TEMPDIR and check the current state:
-#
-# cd /tmp/tmp.rbd_mirror
-# ls
-# less rbd-mirror.cluster1_daemon.$pid.log
-# ceph --cluster cluster1 -s
-# ceph --cluster cluster1 -s
-# rbd --cluster cluster2 -p mirror ls
-# rbd --cluster cluster2 -p mirror journal status --image test
-# ceph --admin-daemon rbd-mirror.cluster1_daemon.cluster1.$pid.asok help
-# ...
-#
-# Also you can execute commands (functions) from the script:
-#
-# cd $CEPH_SRC_PATH
-# export RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror
-# ../qa/workunits/rbd/rbd_mirror.sh status
-# ../qa/workunits/rbd/rbd_mirror.sh stop_mirror cluster1
-# ../qa/workunits/rbd/rbd_mirror.sh start_mirror cluster2
-# ../qa/workunits/rbd/rbd_mirror.sh flush cluster2
-# ...
-#
-# Eventually, run the cleanup:
-#
-# cd $CEPH_SRC_PATH
-# RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror \
-# ../qa/workunits/rbd/rbd_mirror.sh cleanup
-#
-
-CLUSTER1=cluster1
-CLUSTER2=cluster2
-POOL=mirror
-PARENT_POOL=mirror_parent
-TEMPDIR=
-USER_ID=mirror
-export CEPH_ARGS="--id ${USER_ID}"
-
-CEPH_ROOT=$(readlink -f $(dirname $0)/../../../src)
-CEPH_BIN=.
-CEPH_SRC=.
-if [ -e CMakeCache.txt ]; then
- CEPH_SRC=${CEPH_ROOT}
- CEPH_ROOT=${PWD}
- CEPH_BIN=./bin
-
- # needed for ceph CLI under cmake
- export LD_LIBRARY_PATH=${CEPH_ROOT}/lib:${LD_LIBRARY_PATH}
- export PYTHONPATH=${PYTHONPATH}:${CEPH_SRC}/pybind
- for x in ${CEPH_ROOT}/lib/cython_modules/lib* ; do
- export PYTHONPATH="${PYTHONPATH}:${x}"
- done
-fi
-
-# These vars facilitate running this script in an environment with
-# ceph installed from packages, like teuthology. These are not defined
-# by default.
-#
-# RBD_MIRROR_USE_EXISTING_CLUSTER - if set, do not start and stop ceph clusters
-# RBD_MIRROR_USE_RBD_MIRROR - if set, use an existing instance of rbd-mirror
-# running as ceph client $CEPH_ID. If empty,
-# this script will start and stop rbd-mirror
-
-#
-# Functions
-#
-
-# Parse a value in format cluster[:instance] and set cluster and instance vars.
-set_cluster_instance()
-{
- local val=$1
- local cluster_var_name=$2
- local instance_var_name=$3
-
- cluster=${val%:*}
- instance=${val##*:}
-
- if [ "${instance}" = "${val}" ]; then
- # instance was not specified, use default
- instance=0
- fi
-
- eval ${cluster_var_name}=${cluster}
- eval ${instance_var_name}=${instance}
-}
-
-daemon_asok_file()
-{
- local local_cluster=$1
- local cluster=$2
- local instance
-
- set_cluster_instance "${local_cluster}" local_cluster instance
-
- if [ -n "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
- echo $(ceph-conf --cluster $local_cluster --name "client.${CEPH_ID}" 'admin socket')
- else
- echo "${TEMPDIR}/rbd-mirror.${local_cluster}_daemon.${instance}.${cluster}.asok"
- fi
-}
-
-daemon_pid_file()
-{
- local cluster=$1
- local instance
-
- set_cluster_instance "${cluster}" cluster instance
-
- if [ -n "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
- echo $(ceph-conf --cluster $cluster --name "client.${CEPH_ID}" 'pid file')
- else
- echo "${TEMPDIR}/rbd-mirror.${cluster}_daemon.${instance}.pid"
- fi
-}
-
-testlog()
-{
- echo $(date '+%F %T') $@ | tee -a "${TEMPDIR}/rbd-mirror.test.log" >&2
-}
-
-expect_failure()
-{
- local expected="$1" ; shift
- local out=${TEMPDIR}/expect_failure.out
-
- if "$@" > ${out} 2>&1 ; then
- cat ${out} >&2
- return 1
- fi
-
- if [ -z "${expected}" ]; then
- return 0
- fi
-
- if ! grep -q "${expected}" ${out} ; then
- cat ${out} >&2
- return 1
- fi
-
- return 0
-}
-
-setup()
-{
- local c
- trap cleanup INT TERM EXIT
-
- if [ -n "${RBD_MIRROR_TEMDIR}" ]; then
- test -d "${RBD_MIRROR_TEMDIR}" ||
- mkdir "${RBD_MIRROR_TEMDIR}"
- TEMPDIR="${RBD_MIRROR_TEMDIR}"
- cd ${TEMPDIR}
- else
- TEMPDIR=`mktemp -d`
- fi
-
- if [ -z "${RBD_MIRROR_USE_EXISTING_CLUSTER}" ]; then
- cd ${CEPH_ROOT}
- CEPH_ARGS='' ${CEPH_SRC}/mstart.sh ${CLUSTER1} -n ${RBD_MIRROR_VARGS}
- CEPH_ARGS='' ${CEPH_SRC}/mstart.sh ${CLUSTER2} -n ${RBD_MIRROR_VARGS}
-
- CEPH_ARGS='' ceph --conf run/${CLUSTER1}/ceph.conf \
- auth get-or-create client.${USER_ID} mon 'profile rbd' osd 'profile rbd' >> \
- run/${CLUSTER1}/keyring
- CEPH_ARGS='' ceph --conf run/${CLUSTER2}/ceph.conf \
- auth get-or-create client.${USER_ID} mon 'profile rbd' osd 'profile rbd' >> \
- run/${CLUSTER2}/keyring
-
- rm -f ${TEMPDIR}/${CLUSTER1}.conf
- ln -s $(readlink -f run/${CLUSTER1}/ceph.conf) \
- ${TEMPDIR}/${CLUSTER1}.conf
- rm -f ${TEMPDIR}/${CLUSTER2}.conf
- ln -s $(readlink -f run/${CLUSTER2}/ceph.conf) \
- ${TEMPDIR}/${CLUSTER2}.conf
-
- cd ${TEMPDIR}
- fi
-
- CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool create ${POOL} 64 64
- CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool create ${PARENT_POOL} 64 64
- CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool create ${PARENT_POOL} 64 64
- CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool create ${POOL} 64 64
-
- CEPH_ARGS='' rbd --cluster ${CLUSTER1} pool init ${POOL}
- CEPH_ARGS='' rbd --cluster ${CLUSTER2} pool init ${POOL}
- CEPH_ARGS='' rbd --cluster ${CLUSTER1} pool init ${PARENT_POOL}
- CEPH_ARGS='' rbd --cluster ${CLUSTER2} pool init ${PARENT_POOL}
-
- rbd --cluster ${CLUSTER1} mirror pool enable ${POOL} pool
- rbd --cluster ${CLUSTER2} mirror pool enable ${POOL} pool
- rbd --cluster ${CLUSTER1} mirror pool enable ${PARENT_POOL} image
- rbd --cluster ${CLUSTER2} mirror pool enable ${PARENT_POOL} image
-
- rbd --cluster ${CLUSTER1} mirror pool peer add ${POOL} ${CLUSTER2}
- rbd --cluster ${CLUSTER2} mirror pool peer add ${POOL} ${CLUSTER1}
- rbd --cluster ${CLUSTER1} mirror pool peer add ${PARENT_POOL} ${CLUSTER2}
- rbd --cluster ${CLUSTER2} mirror pool peer add ${PARENT_POOL} ${CLUSTER1}
-}
-
-cleanup()
-{
- test -n "${RBD_MIRROR_NOCLEANUP}" && return
- local cluster instance
-
- set +e
-
- for cluster in "${CLUSTER1}" "${CLUSTER2}"; do
- for instance in `seq 0 9`; do
- stop_mirror "${cluster}:${instance}"
- done
- done
-
- if [ -z "${RBD_MIRROR_USE_EXISTING_CLUSTER}" ]; then
- cd ${CEPH_ROOT}
- CEPH_ARGS='' ${CEPH_SRC}/mstop.sh ${CLUSTER1}
- CEPH_ARGS='' ${CEPH_SRC}/mstop.sh ${CLUSTER2}
- else
- CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
- CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
- CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool rm ${PARENT_POOL} ${PARENT_POOL} --yes-i-really-really-mean-it
- CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool rm ${PARENT_POOL} ${PARENT_POOL} --yes-i-really-really-mean-it
- fi
- test "${RBD_MIRROR_TEMDIR}" = "${TEMPDIR}" ||
- rm -Rf ${TEMPDIR}
-}
-
-start_mirror()
-{
- local cluster=$1
- local instance
-
- set_cluster_instance "${cluster}" cluster instance
-
- test -n "${RBD_MIRROR_USE_RBD_MIRROR}" && return
-
- rbd-mirror \
- --cluster ${cluster} \
- --id mirror \
- --pid-file=$(daemon_pid_file "${cluster}:${instance}") \
- --log-file=${TEMPDIR}/rbd-mirror.${cluster}_daemon.${instance}.log \
- --admin-socket=${TEMPDIR}/rbd-mirror.${cluster}_daemon.${instance}.\$cluster.asok \
- --rbd-mirror-delete-retry-interval=5 \
- --rbd-mirror-image-state-check-interval=5 \
- --rbd-mirror-journal-poll-age=1 \
- --rbd-mirror-pool-replayers-refresh-interval=5 \
- --debug-rbd=30 --debug-journaler=30 \
- --debug-rbd_mirror=30 \
- --daemonize=true \
- ${RBD_MIRROR_ARGS}
-}
-
-stop_mirror()
-{
- local cluster=$1
- local sig=$2
-
- test -n "${RBD_MIRROR_USE_RBD_MIRROR}" && return
-
- local pid
- pid=$(cat $(daemon_pid_file "${cluster}") 2>/dev/null) || :
- if [ -n "${pid}" ]
- then
- kill ${sig} ${pid}
- for s in 1 2 4 8 16 32; do
- sleep $s
- ps auxww | awk -v pid=${pid} '$2 == pid {print; exit 1}' && break
- done
- ps auxww | awk -v pid=${pid} '$2 == pid {print; exit 1}'
- fi
- rm -f $(daemon_asok_file "${cluster}" "${CLUSTER1}")
- rm -f $(daemon_asok_file "${cluster}" "${CLUSTER2}")
- rm -f $(daemon_pid_file "${cluster}")
-}
-
-admin_daemon()
-{
- local cluster=$1 ; shift
- local instance
-
- set_cluster_instance "${cluster}" cluster instance
-
- local asok_file=$(daemon_asok_file "${cluster}:${instance}" "${cluster}")
- test -S "${asok_file}"
-
- ceph --admin-daemon ${asok_file} $@
-}
-
-status()
-{
- local cluster daemon image_pool image
-
- for cluster in ${CLUSTER1} ${CLUSTER2}
- do
- echo "${cluster} status"
- ceph --cluster ${cluster} -s
- echo
-
- for image_pool in ${POOL} ${PARENT_POOL}
- do
- echo "${cluster} ${image_pool} images"
- rbd --cluster ${cluster} -p ${image_pool} ls
- echo
-
- echo "${cluster} ${image_pool} mirror pool status"
- rbd --cluster ${cluster} -p ${image_pool} mirror pool status --verbose
- echo
-
- for image in `rbd --cluster ${cluster} -p ${image_pool} ls 2>/dev/null`
- do
- echo "image ${image} info"
- rbd --cluster ${cluster} -p ${image_pool} info ${image}
- echo
- echo "image ${image} journal status"
- rbd --cluster ${cluster} -p ${image_pool} journal status --image ${image}
- echo
- done
- done
- done
-
- local ret
-
- for cluster in "${CLUSTER1}" "${CLUSTER2}"
- do
- local pid_file=$(daemon_pid_file ${cluster} )
- if [ ! -e ${pid_file} ]
- then
- echo "${cluster} rbd-mirror not running or unknown" \
- "(${pid_file} not exist)"
- continue
- fi
-
- local pid
- pid=$(cat ${pid_file} 2>/dev/null) || :
- if [ -z "${pid}" ]
- then
- echo "${cluster} rbd-mirror not running or unknown" \
- "(can't find pid using ${pid_file})"
- ret=1
- continue
- fi
-
- echo "${daemon} rbd-mirror process in ps output:"
- if ps auxww |
- awk -v pid=${pid} 'NR == 1 {print} $2 == pid {print; exit 1}'
- then
- echo
- echo "${cluster} rbd-mirror not running" \
- "(can't find pid $pid in ps output)"
- ret=1
- continue
- fi
- echo
-
- local asok_file=$(daemon_asok_file ${cluster} ${cluster})
- if [ ! -S "${asok_file}" ]
- then
- echo "${cluster} rbd-mirror asok is unknown (${asok_file} not exits)"
- ret=1
- continue
- fi
-
- echo "${cluster} rbd-mirror status"
- ceph --admin-daemon ${asok_file} rbd mirror status
- echo
- done
-
- return ${ret}
-}
-
-flush()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local cmd="rbd mirror flush"
-
- if [ -n "${image}" ]
- then
- cmd="${cmd} ${pool}/${image}"
- fi
-
- admin_daemon "${cluster}" ${cmd}
-}
-
-test_image_replay_state()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local test_state=$4
- local current_state=stopped
-
- admin_daemon "${cluster}" help |
- fgrep "\"rbd mirror status ${pool}/${image}\"" &&
- admin_daemon "${cluster}" rbd mirror status ${pool}/${image} |
- grep -i 'state.*Replaying' &&
- current_state=started
-
- test "${test_state}" = "${current_state}"
-}
-
-wait_for_image_replay_state()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local state=$4
- local s
-
- # TODO: add a way to force rbd-mirror to update replayers
- for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
- sleep ${s}
- test_image_replay_state "${cluster}" "${pool}" "${image}" "${state}" && return 0
- done
- return 1
-}
-
-wait_for_image_replay_started()
-{
- local cluster=$1
- local pool=$2
- local image=$3
-
- wait_for_image_replay_state "${cluster}" "${pool}" "${image}" started
-}
-
-wait_for_image_replay_stopped()
-{
- local cluster=$1
- local pool=$2
- local image=$3
-
- wait_for_image_replay_state "${cluster}" "${pool}" "${image}" stopped
-}
-
-get_position()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local id_regexp=$4
-
- # Parse line like below, looking for the first position
- # [id=, commit_position=[positions=[[object_number=1, tag_tid=3, entry_tid=9], [object_number=0, tag_tid=3, entry_tid=8], [object_number=3, tag_tid=3, entry_tid=7], [object_number=2, tag_tid=3, entry_tid=6]]]]
-
- local status_log=${TEMPDIR}/${CLUSTER2}-${pool}-${image}.status
- rbd --cluster ${cluster} -p ${pool} journal status --image ${image} |
- tee ${status_log} >&2
- sed -nEe 's/^.*\[id='"${id_regexp}"',.*positions=\[\[([^]]*)\],.*state=connected.*$/\1/p' \
- ${status_log}
-}
-
-get_master_position()
-{
- local cluster=$1
- local pool=$2
- local image=$3
-
- get_position "${cluster}" "${pool}" "${image}" ''
-}
-
-get_mirror_position()
-{
- local cluster=$1
- local pool=$2
- local image=$3
-
- get_position "${cluster}" "${pool}" "${image}" '..*'
-}
-
-wait_for_replay_complete()
-{
- local local_cluster=$1
- local cluster=$2
- local pool=$3
- local image=$4
- local s master_pos mirror_pos last_mirror_pos
- local master_tag master_entry mirror_tag mirror_entry
-
- while true; do
- for s in 0.2 0.4 0.8 1.6 2 2 4 4 8 8 16 16 32 32; do
- sleep ${s}
- flush "${local_cluster}" "${pool}" "${image}"
- master_pos=$(get_master_position "${cluster}" "${pool}" "${image}")
- mirror_pos=$(get_mirror_position "${cluster}" "${pool}" "${image}")
- test -n "${master_pos}" -a "${master_pos}" = "${mirror_pos}" && return 0
- test "${mirror_pos}" != "${last_mirror_pos}" && break
- done
-
- test "${mirror_pos}" = "${last_mirror_pos}" && return 1
- last_mirror_pos="${mirror_pos}"
-
- # handle the case where the mirror is ahead of the master
- master_tag=$(echo "${master_pos}" | grep -Eo "tag_tid=[0-9]*" | cut -d'=' -f 2)
- mirror_tag=$(echo "${mirror_pos}" | grep -Eo "tag_tid=[0-9]*" | cut -d'=' -f 2)
- master_entry=$(echo "${master_pos}" | grep -Eo "entry_tid=[0-9]*" | cut -d'=' -f 2)
- mirror_entry=$(echo "${mirror_pos}" | grep -Eo "entry_tid=[0-9]*" | cut -d'=' -f 2)
- test "${master_tag}" = "${mirror_tag}" -a ${master_entry} -le ${mirror_entry} && return 0
- done
- return 1
-}
-
-test_status_in_pool_dir()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local state_pattern=$4
- local description_pattern=$5
-
- local status_log=${TEMPDIR}/${cluster}-${image}.mirror_status
- rbd --cluster ${cluster} -p ${pool} mirror image status ${image} |
- tee ${status_log} >&2
- grep "state: .*${state_pattern}" ${status_log} || return 1
- grep "description: .*${description_pattern}" ${status_log} || return 1
-}
-
-wait_for_status_in_pool_dir()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local state_pattern=$4
- local description_pattern=$5
-
- for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
- sleep ${s}
- test_status_in_pool_dir ${cluster} ${pool} ${image} ${state_pattern} ${description_pattern} && return 0
- done
- return 1
-}
-
-create_image()
-{
- local cluster=$1 ; shift
- local pool=$1 ; shift
- local image=$1 ; shift
- local size=128
-
- if [ -n "$1" ]; then
- size=$1
- shift
- fi
-
- rbd --cluster ${cluster} -p ${pool} create --size ${size} \
- --image-feature layering,exclusive-lock,journaling $@ ${image}
-}
-
-set_image_meta()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local key=$4
- local val=$5
-
- rbd --cluster ${cluster} -p ${pool} image-meta set ${image} $key $val
-}
-
-rename_image()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local new_name=$4
-
- rbd --cluster=${cluster} -p ${pool} rename ${image} ${new_name}
-}
-
-remove_image()
-{
- local cluster=$1
- local pool=$2
- local image=$3
-
- rbd --cluster=${cluster} -p ${pool} snap purge ${image}
- rbd --cluster=${cluster} -p ${pool} rm ${image}
-}
-
-remove_image_retry()
-{
- local cluster=$1
- local pool=$2
- local image=$3
-
- for s in 1 2 4 8 16 32; do
- remove_image ${cluster} ${pool} ${image} && return 0
- sleep ${s}
- done
- return 1
-}
-
-clone_image()
-{
- local cluster=$1
- local parent_pool=$2
- local parent_image=$3
- local parent_snap=$4
- local clone_pool=$5
- local clone_image=$6
-
- rbd --cluster ${cluster} clone ${parent_pool}/${parent_image}@${parent_snap} \
- ${clone_pool}/${clone_image} --image-feature layering,exclusive-lock,journaling
-}
-
-disconnect_image()
-{
- local cluster=$1
- local pool=$2
- local image=$3
-
- rbd --cluster ${cluster} -p ${pool} journal client disconnect \
- --image ${image}
-}
-
-create_snapshot()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local snap=$4
-
- rbd --cluster ${cluster} -p ${pool} snap create ${image}@${snap}
-}
-
-remove_snapshot()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local snap=$4
-
- rbd --cluster ${cluster} -p ${pool} snap rm ${image}@${snap}
-}
-
-rename_snapshot()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local snap=$4
- local new_snap=$5
-
- rbd --cluster ${cluster} -p ${pool} snap rename ${image}@${snap} ${image}@${new_snap}
-}
-
-purge_snapshots()
-{
- local cluster=$1
- local pool=$2
- local image=$3
-
- rbd --cluster ${cluster} -p ${pool} snap purge ${image}
-}
-
-protect_snapshot()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local snap=$4
-
- rbd --cluster ${cluster} -p ${pool} snap protect ${image}@${snap}
-}
-
-unprotect_snapshot()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local snap=$4
-
- rbd --cluster ${cluster} -p ${pool} snap unprotect ${image}@${snap}
-}
-
-wait_for_snap_present()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local snap_name=$4
- local s
-
- for s in 1 2 4 8 8 8 8 8 8 8 8 16 16 16 16 32 32 32 32; do
- sleep ${s}
- rbd --cluster ${cluster} -p ${pool} info ${image}@${snap_name} || continue
- return 0
- done
- return 1
-}
-
-write_image()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local count=$4
- local size=$5
-
- test -n "${size}" || size=4096
-
- rbd --cluster ${cluster} -p ${pool} bench ${image} --io-type write \
- --io-size ${size} --io-threads 1 --io-total $((size * count)) \
- --io-pattern rand
-}
-
-stress_write_image()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local duration=$(awk 'BEGIN {srand(); print int(10 * rand()) + 5}')
-
- timeout ${duration}s ceph_test_rbd_mirror_random_write \
- --cluster ${cluster} ${pool} ${image} \
- --debug-rbd=20 --debug-journaler=20 \
- 2> ${TEMPDIR}/rbd-mirror-random-write.log || true
-}
-
-compare_images()
-{
- local pool=$1
- local image=$2
-
- local rmt_export=${TEMPDIR}/${CLUSTER2}-${pool}-${image}.export
- local loc_export=${TEMPDIR}/${CLUSTER1}-${pool}-${image}.export
-
- rm -f ${rmt_export} ${loc_export}
- rbd --cluster ${CLUSTER2} -p ${pool} export ${image} ${rmt_export}
- rbd --cluster ${CLUSTER1} -p ${pool} export ${image} ${loc_export}
- cmp ${rmt_export} ${loc_export}
- rm -f ${rmt_export} ${loc_export}
-}
-
-demote_image()
-{
- local cluster=$1
- local pool=$2
- local image=$3
-
- rbd --cluster=${cluster} mirror image demote ${pool}/${image}
-}
-
-promote_image()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local force=$4
-
- rbd --cluster=${cluster} mirror image promote ${pool}/${image} ${force}
-}
-
-set_pool_mirror_mode()
-{
- local cluster=$1
- local pool=$2
- local mode=$3
-
- rbd --cluster=${cluster} -p ${pool} mirror pool enable ${mode}
-}
-
-disable_mirror()
-{
- local cluster=$1
- local pool=$2
- local image=$3
-
- rbd --cluster=${cluster} mirror image disable ${pool}/${image}
-}
-
-enable_mirror()
-{
- local cluster=$1
- local pool=$2
- local image=$3
-
- rbd --cluster=${cluster} mirror image enable ${pool}/${image}
-}
-
-test_image_present()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local test_state=$4
- local image_id=$5
- local current_state=deleted
- local current_image_id
-
- current_image_id=$(get_image_id ${cluster} ${pool} ${image})
- test -n "${current_image_id}" &&
- test -z "${image_id}" -o "${image_id}" = "${current_image_id}" &&
- current_state=present
-
- test "${test_state}" = "${current_state}"
-}
-
-wait_for_image_present()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local state=$4
- local image_id=$5
- local s
-
- test -n "${image_id}" ||
- image_id=$(get_image_id ${cluster} ${pool} ${image})
-
- # TODO: add a way to force rbd-mirror to update replayers
- for s in 0.1 1 2 4 8 8 8 8 8 8 8 8 16 16 32 32; do
- sleep ${s}
- test_image_present \
- "${cluster}" "${pool}" "${image}" "${state}" "${image_id}" &&
- return 0
- done
- return 1
-}
-
-get_image_id()
-{
- local cluster=$1
- local pool=$2
- local image=$3
-
- rbd --cluster=${cluster} -p ${pool} info ${image} |
- sed -ne 's/^.*block_name_prefix: rbd_data\.//p'
-}
-
-request_resync_image()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local image_id_var_name=$1
-
- eval "${image_id_var_name}='$(get_image_id ${cluster} ${pool} ${image})'"
- eval 'test -n "$'${image_id_var_name}'"'
-
- rbd --cluster=${cluster} -p ${pool} mirror image resync ${image}
-}
-
-get_image_data_pool()
-{
- local cluster=$1
- local pool=$2
- local image=$3
-
- rbd --cluster ${cluster} -p ${pool} info ${image} |
- awk '$1 == "data_pool:" {print $2}'
-}
-
-#
-# Main
-#
-
-if [ "$#" -gt 0 ]
-then
- if [ -z "${RBD_MIRROR_TEMDIR}" ]
- then
- echo "RBD_MIRROR_TEMDIR is not set" >&2
- exit 1
- fi
-
- TEMPDIR="${RBD_MIRROR_TEMDIR}"
- cd ${TEMPDIR}
- $@
- exit $?
-fi
-
-set -xe
-
-setup
diff --git a/src/ceph/qa/workunits/rbd/rbd_mirror_stress.sh b/src/ceph/qa/workunits/rbd/rbd_mirror_stress.sh
deleted file mode 100755
index b07bf0e..0000000
--- a/src/ceph/qa/workunits/rbd/rbd_mirror_stress.sh
+++ /dev/null
@@ -1,186 +0,0 @@
-#!/bin/sh
-#
-# rbd_mirror_stress.sh - stress test rbd-mirror daemon
-#
-# The following additional environment variables affect the test:
-#
-# RBD_MIRROR_REDUCE_WRITES - if not empty, don't run the stress bench write
-# tool during the many image test
-#
-
-IMAGE_COUNT=50
-export LOCKDEP=0
-
-. $(dirname $0)/rbd_mirror_helpers.sh
-
-create_snap()
-{
- local cluster=$1
- local pool=$2
- local image=$3
- local snap_name=$4
-
- rbd --cluster ${cluster} -p ${pool} snap create ${image}@${snap_name} \
- --debug-rbd=20 --debug-journaler=20 2> ${TEMPDIR}/rbd-snap-create.log
-}
-
-compare_image_snaps()
-{
- local pool=$1
- local image=$2
- local snap_name=$3
-
- local rmt_export=${TEMPDIR}/${CLUSTER2}-${pool}-${image}.export
- local loc_export=${TEMPDIR}/${CLUSTER1}-${pool}-${image}.export
-
- rm -f ${rmt_export} ${loc_export}
- rbd --cluster ${CLUSTER2} -p ${pool} export ${image}@${snap_name} ${rmt_export}
- rbd --cluster ${CLUSTER1} -p ${pool} export ${image}@${snap_name} ${loc_export}
- cmp ${rmt_export} ${loc_export}
- rm -f ${rmt_export} ${loc_export}
-}
-
-wait_for_pool_images()
-{
- local cluster=$1
- local pool=$2
- local image_count=$3
- local s
- local count
- local last_count=0
-
- while true; do
- for s in `seq 1 40`; do
- test $s -ne 1 && sleep 30
- count=$(rbd --cluster ${cluster} -p ${pool} mirror pool status | grep 'images: ' | cut -d' ' -f 2)
- test "${count}" = "${image_count}" && return 0
-
- # reset timeout if making forward progress
- test $count -ne $last_count && break
- done
-
- test $count -eq $last_count && break
- last_count=$count
- done
- rbd --cluster ${cluster} -p ${pool} mirror pool status --verbose >&2
- return 1
-}
-
-wait_for_pool_healthy()
-{
- local cluster=$1
- local pool=$2
- local s
- local state
-
- for s in `seq 1 40`; do
- test $s -ne 1 && sleep 30
- state=$(rbd --cluster ${cluster} -p ${pool} mirror pool status | grep 'health:' | cut -d' ' -f 2)
- test "${state}" = "ERROR" && break
- test "${state}" = "OK" && return 0
- done
- rbd --cluster ${cluster} -p ${pool} mirror pool status --verbose >&2
- return 1
-}
-
-start_mirror ${CLUSTER1}
-start_mirror ${CLUSTER2}
-
-testlog "TEST: add image and test replay after client crashes"
-image=test
-create_image ${CLUSTER2} ${POOL} ${image} '512M'
-wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
-
-for i in `seq 1 10`
-do
- stress_write_image ${CLUSTER2} ${POOL} ${image}
-
- wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'master_position'
-
- snap_name="snap${i}"
- create_snap ${CLUSTER2} ${POOL} ${image} ${snap_name}
- wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
- wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
- wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${snap_name}
- compare_image_snaps ${POOL} ${image} ${snap_name}
-done
-
-for i in `seq 1 10`
-do
- snap_name="snap${i}"
- remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name}
-done
-
-remove_image_retry ${CLUSTER2} ${POOL} ${image}
-wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
-
-testlog "TEST: create many images"
-snap_name="snap"
-for i in `seq 1 ${IMAGE_COUNT}`
-do
- image="image_${i}"
- create_image ${CLUSTER2} ${POOL} ${image} '128M'
- if [ -n "${RBD_MIRROR_REDUCE_WRITES}" ]; then
- write_image ${CLUSTER2} ${POOL} ${image} 100
- else
- stress_write_image ${CLUSTER2} ${POOL} ${image}
- fi
-done
-
-wait_for_pool_images ${CLUSTER2} ${POOL} ${IMAGE_COUNT}
-wait_for_pool_healthy ${CLUSTER2} ${POOL}
-
-wait_for_pool_images ${CLUSTER1} ${POOL} ${IMAGE_COUNT}
-wait_for_pool_healthy ${CLUSTER1} ${POOL}
-
-testlog "TEST: compare many images"
-for i in `seq 1 ${IMAGE_COUNT}`
-do
- image="image_${i}"
- create_snap ${CLUSTER2} ${POOL} ${image} ${snap_name}
- wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
- wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
- wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${snap_name}
- compare_image_snaps ${POOL} ${image} ${snap_name}
-done
-
-testlog "TEST: delete many images"
-for i in `seq 1 ${IMAGE_COUNT}`
-do
- image="image_${i}"
- remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name}
- remove_image_retry ${CLUSTER2} ${POOL} ${image}
-done
-
-testlog "TEST: image deletions should propagate"
-wait_for_pool_images ${CLUSTER1} ${POOL} 0
-wait_for_pool_healthy ${CLUSTER1} ${POOL} 0
-for i in `seq 1 ${IMAGE_COUNT}`
-do
- image="image_${i}"
- wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
-done
-
-testlog "TEST: delete images during bootstrap"
-set_pool_mirror_mode ${CLUSTER1} ${POOL} 'image'
-set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
-
-start_mirror ${CLUSTER1}
-image=test
-
-for i in `seq 1 10`
-do
- image="image_${i}"
- create_image ${CLUSTER2} ${POOL} ${image} '512M'
- enable_mirror ${CLUSTER2} ${POOL} ${image}
-
- stress_write_image ${CLUSTER2} ${POOL} ${image}
- wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
-
- disable_mirror ${CLUSTER2} ${POOL} ${image}
- wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
- purge_snapshots ${CLUSTER2} ${POOL} ${image}
- remove_image_retry ${CLUSTER2} ${POOL} ${image}
-done
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/read-flags.sh b/src/ceph/qa/workunits/rbd/read-flags.sh
deleted file mode 100755
index 7c24fde..0000000
--- a/src/ceph/qa/workunits/rbd/read-flags.sh
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/bin/bash -ex
-
-# create a snapshot, then export it and check that setting read flags works
-# by looking at --debug-ms output
-
-function clean_up {
- rm -f test.log || true
- rbd snap remove test@snap || true
- rbd rm test || true
-}
-
-function test_read_flags {
- local IMAGE=$1
- local SET_BALANCED=$2
- local SET_LOCALIZED=$3
- local EXPECT_BALANCED=$4
- local EXPECT_LOCALIZED=$5
-
- local EXTRA_ARGS="--log-file test.log --debug-ms 1 --no-log-to-stderr"
- if [ "$SET_BALANCED" = 'y' ]; then
- EXTRA_ARGS="$EXTRA_ARGS --rbd-balance-snap-reads"
- elif [ "$SET_LOCALIZED" = 'y' ]; then
- EXTRA_ARGS="$EXTRA_ARGS --rbd-localize-snap-reads"
- fi
-
- rbd export $IMAGE - $EXTRA_ARGS > /dev/null
- if [ "$EXPECT_BALANCED" = 'y' ]; then
- grep -q balance_reads test.log
- else
- grep -L balance_reads test.log | grep -q test.log
- fi
- if [ "$EXPECT_LOCALIZED" = 'y' ]; then
- grep -q localize_reads test.log
- else
- grep -L localize_reads test.log | grep -q test.log
- fi
- rm -f test.log
-
-}
-
-clean_up
-
-trap clean_up INT TERM EXIT
-
-rbd create --image-feature layering -s 10 test
-rbd snap create test@snap
-
-# export from non snapshot with or without settings should not have flags
-test_read_flags test n n n n
-test_read_flags test y y n n
-
-# export from snapshot should have read flags in log if they are set
-test_read_flags test@snap n n n n
-test_read_flags test@snap y n y n
-test_read_flags test@snap n y n y
-
-# balanced_reads happens to take priority over localize_reads
-test_read_flags test@snap y y y n
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/run_devstack_tempest.sh b/src/ceph/qa/workunits/rbd/run_devstack_tempest.sh
deleted file mode 100755
index 8e627dd..0000000
--- a/src/ceph/qa/workunits/rbd/run_devstack_tempest.sh
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/bin/bash -ex
-
-STACK_BRANCH=stable/ocata
-
-STACK_USER=${STACK_USER:-stack}
-STACK_GROUP=${STACK_GROUP:-stack}
-TEMPEST_USER=${TEMPEST_USER:-tempest}
-
-STACK_HOME_PATH=${STACK_HOME_PATH:-/home/stack}
-STACK_OPT_PATH=${STACK_OPT_PATH:-/opt/stack}
-STACK_LOG_PATH=${STACK_LOG_PATH:-/mnt/log/stack}
-
-cleanup() {
- echo "**** cleanup"
-
- # ensure teuthology can clean up the logs
- [ -d ${STACK_LOG_PATH} ] && chmod -R a+rwx ${STACK_LOG_PATH}
-
- mkdir ${STACK_LOG_PATH}/etc
- cp -dpr /etc/cinder ${STACK_LOG_PATH}/etc || true
- cp -dpr /etc/glance ${STACK_LOG_PATH}/etc || true
- cp -dpr /etc/nova ${STACK_LOG_PATH}/etc || true
-
- # kill all OpenStack services
- if [ -d ${STACK_OPT_PATH}/devstack ]; then
- cd ${STACK_OPT_PATH}/devstack
- sudo -H -u ${STACK_USER} ./unstack.sh || true
- fi
-}
-
-trap cleanup INT TERM EXIT
-
-# devstack configuration adapted from upstream gate
-cat<<EOF > ${STACK_HOME_PATH}/local.conf
-[[local|localrc]]
-Q_USE_DEBUG_COMMAND=True
-NETWORK_GATEWAY=10.1.0.1
-USE_SCREEN=False
-DATA_DIR=${STACK_OPT_PATH}/data
-ACTIVE_TIMEOUT=90
-BOOT_TIMEOUT=90
-ASSOCIATE_TIMEOUT=60
-TERMINATE_TIMEOUT=60
-MYSQL_PASSWORD=secretmysql
-DATABASE_PASSWORD=secretdatabase
-RABBIT_PASSWORD=secretrabbit
-ADMIN_PASSWORD=secretadmin
-SERVICE_PASSWORD=secretservice
-SERVICE_TOKEN=111222333444
-SWIFT_HASH=1234123412341234
-ROOTSLEEP=0
-NOVNC_FROM_PACKAGE=True
-ENABLED_SERVICES=c-api,c-bak,c-sch,c-vol,ceilometer-acentral,ceilometer-acompute,ceilometer-alarm-evaluator,ceilometer-alarm-notifier,ceilometer-anotification,ceilometer-api,ceilometer-collector,cinder,dstat,g-api,g-reg,horizon,key,mysql,n-api,n-cauth,n-cond,n-cpu,n-novnc,n-obj,n-sch,peakmem_tracker,placement-api,q-agt,q-dhcp,q-l3,q-meta,q-metering,q-svc,rabbit,s-account,s-container,s-object,s-proxy,tempest
-SKIP_EXERCISES=boot_from_volume,bundle,client-env,euca
-SYSLOG=False
-SCREEN_LOGDIR=${STACK_LOG_PATH}/screen-logs
-LOGFILE=${STACK_LOG_PATH}/devstacklog.txt
-VERBOSE=True
-FIXED_RANGE=10.1.0.0/20
-IPV4_ADDRS_SAFE_TO_USE=10.1.0.0/20
-FLOATING_RANGE=172.24.5.0/24
-PUBLIC_NETWORK_GATEWAY=172.24.5.1
-FIXED_NETWORK_SIZE=4096
-VIRT_DRIVER=libvirt
-SWIFT_REPLICAS=1
-LOG_COLOR=False
-UNDO_REQUIREMENTS=False
-CINDER_PERIODIC_INTERVAL=10
-
-export OS_NO_CACHE=True
-OS_NO_CACHE=True
-CEILOMETER_BACKEND=mysql
-LIBS_FROM_GIT=
-DATABASE_QUERY_LOGGING=True
-EBTABLES_RACE_FIX=True
-CINDER_SECURE_DELETE=False
-CINDER_VOLUME_CLEAR=none
-LIBVIRT_TYPE=kvm
-VOLUME_BACKING_FILE_SIZE=24G
-TEMPEST_HTTP_IMAGE=http://git.openstack.org/static/openstack.png
-FORCE_CONFIG_DRIVE=False
-
-CINDER_ENABLED_BACKENDS=ceph:ceph
-TEMPEST_STORAGE_PROTOCOL=ceph
-REMOTE_CEPH=True
-enable_plugin devstack-plugin-ceph git://git.openstack.org/openstack/devstack-plugin-ceph
-EOF
-
-cat<<EOF > ${STACK_HOME_PATH}/start.sh
-#!/bin/bash -ex
-cd ${STACK_OPT_PATH}
-git clone https://git.openstack.org/openstack-dev/devstack -b ${STACK_BRANCH}
-
-# TODO workaround for https://github.com/pypa/setuptools/issues/951
-git clone https://git.openstack.org/openstack/requirements.git -b ${STACK_BRANCH}
-sed -i 's/appdirs===1.4.0/appdirs===1.4.3/' requirements/upper-constraints.txt
-
-cd devstack
-cp ${STACK_HOME_PATH}/local.conf .
-
-export PYTHONUNBUFFERED=true
-export PROJECTS="openstack/devstack-plugin-ceph"
-
-./stack.sh
-EOF
-
-# execute devstack
-chmod 0755 ${STACK_HOME_PATH}/start.sh
-sudo -H -u ${STACK_USER} ${STACK_HOME_PATH}/start.sh
-
-# switch to rbd profile caps
-ceph auth caps client.cinder mon 'profile rbd' osd 'profile rbd pool=volumes, profile rbd pool=vms, profile rbd pool=images'
-ceph auth caps client.cinder-bak mon 'profile rbd' osd 'profile rbd pool=backups, profile rbd pool=volumes'
-ceph auth caps client.glance mon 'profile rbd' osd 'profile rbd pool=images'
-
-# execute tempest
-chown -R ${TEMPEST_USER}:${STACK_GROUP} ${STACK_OPT_PATH}/tempest
-chown -R ${TEMPEST_USER}:${STACK_GROUP} ${STACK_OPT_PATH}/data/tempest
-chmod -R o+rx ${STACK_OPT_PATH}/devstack/files
-
-cd ${STACK_OPT_PATH}/tempest
-sudo -H -u ${TEMPEST_USER} tox -eall-plugin -- '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)|(^cinder\.tests.tempest))' --concurrency=3
diff --git a/src/ceph/qa/workunits/rbd/set_ro.py b/src/ceph/qa/workunits/rbd/set_ro.py
deleted file mode 100755
index 83c43bf..0000000
--- a/src/ceph/qa/workunits/rbd/set_ro.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env python
-
-import logging
-import subprocess
-import sys
-
-logging.basicConfig(level=logging.DEBUG)
-log = logging.getLogger()
-
-def run_command(args, except_on_error=True):
- log.debug('running command "%s"', ' '.join(args))
- proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, err = proc.communicate()
- if out:
- log.debug('stdout: %s', out)
- if err:
- log.debug('stderr: %s', err)
- if proc.returncode:
- log.debug('ret: %d', proc.returncode)
- if except_on_error:
- raise subprocess.CalledProcessError(proc.returncode, ' '.join(args))
- return (proc.returncode, out, err)
-
-def setup(image_name):
- run_command(['rbd', 'create', '-s', '100', image_name])
- run_command(['rbd', 'snap', 'create', image_name + '@snap'])
- run_command(['rbd', 'map', image_name])
- run_command(['rbd', 'map', image_name + '@snap'])
-
-def teardown(image_name, fail_on_error=True):
- run_command(['rbd', 'unmap', '/dev/rbd/rbd/' + image_name + '@snap'], fail_on_error)
- run_command(['rbd', 'unmap', '/dev/rbd/rbd/' + image_name], fail_on_error)
- run_command(['rbd', 'snap', 'rm', image_name + '@snap'], fail_on_error)
- run_command(['rbd', 'rm', image_name], fail_on_error)
-
-def write(target, expect_fail=False):
- try:
- with open(target, 'w', 0) as f:
- f.write('test')
- f.flush()
- assert not expect_fail, 'writing should have failed'
- except IOError:
- assert expect_fail, 'writing should not have failed'
-
-def test_ro(image_name):
- dev = '/dev/rbd/rbd/' + image_name
- snap_dev = dev + '@snap'
-
- log.info('basic device is readable')
- write(dev)
-
- log.info('basic snapshot is read-only')
- write(snap_dev, True)
-
- log.info('cannot set snapshot rw')
- ret, _, _ = run_command(['blockdev', '--setrw', snap_dev], False)
- assert ret != 0, 'snapshot was set read-write!'
- run_command(['udevadm', 'settle'])
- write(snap_dev, True)
-
- log.info('set device ro')
- run_command(['blockdev', '--setro', dev])
- run_command(['udevadm', 'settle'])
- write(dev, True)
-
- log.info('cannot set device rw when in-use')
- with open(dev, 'r') as f:
- ret, _, _ = run_command(['blockdev', '--setro', dev], False)
- assert ret != 0, 'in-use device was set read-only!'
- run_command(['udevadm', 'settle'])
-
- write(dev, True)
- run_command(['blockdev', '--setro', dev])
- run_command(['udevadm', 'settle'])
- write(dev, True)
-
- run_command(['blockdev', '--setrw', dev])
- run_command(['udevadm', 'settle'])
- write(dev)
- run_command(['udevadm', 'settle'])
- run_command(['blockdev', '--setrw', dev])
- run_command(['udevadm', 'settle'])
- write(dev)
-
- log.info('cannot set device ro when in-use')
- with open(dev, 'r') as f:
- ret, _, _ = run_command(['blockdev', '--setro', dev], False)
- assert ret != 0, 'in-use device was set read-only!'
- run_command(['udevadm', 'settle'])
-
- run_command(['rbd', 'unmap', '/dev/rbd/rbd/' + image_name])
- run_command(['rbd', 'map', '--read-only', image_name])
-
- log.info('cannot write to newly mapped ro device')
- write(dev, True)
-
- log.info('can set ro mapped device rw')
- run_command(['blockdev', '--setrw', dev])
- run_command(['udevadm', 'settle'])
- write(dev)
-
-def main():
- image_name = 'test1'
- # clean up any state from previous test runs
- teardown(image_name, False)
- setup(image_name)
-
- test_ro(image_name)
-
- teardown(image_name)
-
-if __name__ == '__main__':
- main()
diff --git a/src/ceph/qa/workunits/rbd/simple_big.sh b/src/ceph/qa/workunits/rbd/simple_big.sh
deleted file mode 100755
index 70aafda..0000000
--- a/src/ceph/qa/workunits/rbd/simple_big.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh -ex
-
-mb=100000
-
-rbd create foo --size $mb
-DEV=$(sudo rbd map foo)
-dd if=/dev/zero of=$DEV bs=1M count=$mb
-dd if=$DEV of=/dev/null bs=1M count=$mb
-sudo rbd unmap $DEV
-rbd rm foo
-
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/smalliobench.sh b/src/ceph/qa/workunits/rbd/smalliobench.sh
deleted file mode 100755
index f25fae4..0000000
--- a/src/ceph/qa/workunits/rbd/smalliobench.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/sh
-
-NUM="$1"
-GAP="$2"
-DUR="$3"
-
-[ -z "$NUM" ] && NUM=30
-[ -z "$GAP" ] && GAP=5
-[ -z "$DUR" ] && DUR=30
-
-for n in `seq 1 $NUM`; do
- echo "Starting $n of $NUM ..."
- ceph_smalliobenchrbd --pool rbd --duration $DUR --disable-detailed-ops 1 &
- sleep $GAP
-done
-echo "Waiting..."
-wait
-echo "OK"
diff --git a/src/ceph/qa/workunits/rbd/test_admin_socket.sh b/src/ceph/qa/workunits/rbd/test_admin_socket.sh
deleted file mode 100755
index a7ecd83..0000000
--- a/src/ceph/qa/workunits/rbd/test_admin_socket.sh
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/bin/bash -ex
-
-TMPDIR=/tmp/rbd_test_admin_socket$$
-mkdir $TMPDIR
-trap "rm -fr $TMPDIR" 0
-
-. $(dirname $0)/../../standalone/ceph-helpers.sh
-
-function expect_false()
-{
- set -x
- if "$@"; then return 1; else return 0; fi
-}
-
-function rbd_watch_out_file()
-{
- echo ${TMPDIR}/rbd_watch_$1.out
-}
-
-function rbd_watch_pid_file()
-{
- echo ${TMPDIR}/rbd_watch_$1.pid
-}
-
-function rbd_watch_fifo()
-{
- echo ${TMPDIR}/rbd_watch_$1.fifo
-}
-
-function rbd_watch_asok()
-{
- echo ${TMPDIR}/rbd_watch_$1.asok
-}
-
-function rbd_get_perfcounter()
-{
- local image=$1
- local counter=$2
- local name
-
- name=$(ceph --format xml --admin-daemon $(rbd_watch_asok ${image}) \
- perf schema | $XMLSTARLET el -d3 |
- grep "/librbd-.*-${image}/${counter}\$")
- test -n "${name}" || return 1
-
- ceph --format xml --admin-daemon $(rbd_watch_asok ${image}) perf dump |
- $XMLSTARLET sel -t -m "${name}" -v .
-}
-
-function rbd_check_perfcounter()
-{
- local image=$1
- local counter=$2
- local expected_val=$3
- local val=
-
- val=$(rbd_get_perfcounter ${image} ${counter})
-
- test "${val}" -eq "${expected_val}"
-}
-
-function rbd_watch_start()
-{
- local image=$1
- local asok=$(rbd_watch_asok ${image})
-
- mkfifo $(rbd_watch_fifo ${image})
- (cat $(rbd_watch_fifo ${image}) |
- rbd --admin-socket ${asok} watch ${image} \
- > $(rbd_watch_out_file ${image}) 2>&1)&
-
- # find pid of the started rbd watch process
- local pid
- for i in `seq 10`; do
- pid=$(ps auxww | awk "/[r]bd --admin.* watch ${image}/ {print \$2}")
- test -n "${pid}" && break
- sleep 0.1
- done
- test -n "${pid}"
- echo ${pid} > $(rbd_watch_pid_file ${image})
-
- # find watcher admin socket
- test -n "${asok}"
- for i in `seq 10`; do
- test -S "${asok}" && break
- sleep 0.1
- done
- test -S "${asok}"
-
- # configure debug level
- ceph --admin-daemon "${asok}" config set debug_rbd 20
-
- # check that watcher is registered
- rbd status ${image} | expect_false grep "Watchers: none"
-}
-
-function rbd_watch_end()
-{
- local image=$1
- local regexp=$2
-
- # send 'enter' to watch to exit
- echo > $(rbd_watch_fifo ${image})
- # just in case it is not terminated
- kill $(cat $(rbd_watch_pid_file ${image})) || :
-
- # output rbd watch out file for easier troubleshooting
- cat $(rbd_watch_out_file ${image})
-
- # cleanup
- rm -f $(rbd_watch_fifo ${image}) $(rbd_watch_pid_file ${image}) \
- $(rbd_watch_out_file ${image}) $(rbd_watch_asok ${image})
-}
-
-wait_for_clean
-
-pool="rbd"
-image=testimg$$
-ceph_admin="ceph --admin-daemon $(rbd_watch_asok ${image})"
-
-rbd create --size 128 ${pool}/${image}
-
-# check rbd cache commands are present in help output
-rbd_cache_flush="rbd cache flush ${pool}/${image}"
-rbd_cache_invalidate="rbd cache invalidate ${pool}/${image}"
-
-rbd_watch_start ${image}
-${ceph_admin} help | fgrep "${rbd_cache_flush}"
-${ceph_admin} help | fgrep "${rbd_cache_invalidate}"
-rbd_watch_end ${image}
-
-# test rbd cache commands with disabled and enabled cache
-for conf_rbd_cache in false true; do
-
- rbd image-meta set ${image} conf_rbd_cache ${conf_rbd_cache}
-
- rbd_watch_start ${image}
-
- rbd_check_perfcounter ${image} flush 0
- ${ceph_admin} ${rbd_cache_flush}
- # 'flush' counter should increase regardless if cache is enabled
- rbd_check_perfcounter ${image} flush 1
-
- rbd_check_perfcounter ${image} invalidate_cache 0
- ${ceph_admin} ${rbd_cache_invalidate}
- # 'invalidate_cache' counter should increase regardless if cache is enabled
- rbd_check_perfcounter ${image} invalidate_cache 1
-
- rbd_watch_end ${image}
-done
-
-rbd rm ${image}
diff --git a/src/ceph/qa/workunits/rbd/test_librbd.sh b/src/ceph/qa/workunits/rbd/test_librbd.sh
deleted file mode 100755
index 447306b..0000000
--- a/src/ceph/qa/workunits/rbd/test_librbd.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/sh -e
-
-if [ -n "${VALGRIND}" ]; then
- valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
- --error-exitcode=1 ceph_test_librbd
-else
- ceph_test_librbd
-fi
-exit 0
diff --git a/src/ceph/qa/workunits/rbd/test_librbd_api.sh b/src/ceph/qa/workunits/rbd/test_librbd_api.sh
deleted file mode 100755
index 975144b..0000000
--- a/src/ceph/qa/workunits/rbd/test_librbd_api.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh -e
-
-ceph_test_librbd_api
-exit 0
diff --git a/src/ceph/qa/workunits/rbd/test_librbd_python.sh b/src/ceph/qa/workunits/rbd/test_librbd_python.sh
deleted file mode 100755
index 656a5bd..0000000
--- a/src/ceph/qa/workunits/rbd/test_librbd_python.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh -ex
-
-relpath=$(dirname $0)/../../../src/test/pybind
-
-if [ -n "${VALGRIND}" ]; then
- valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
- --errors-for-leak-kinds=definite --error-exitcode=1 \
- nosetests -v $relpath/test_rbd.py
-else
- nosetests -v $relpath/test_rbd.py
-fi
-exit 0
diff --git a/src/ceph/qa/workunits/rbd/test_lock_fence.sh b/src/ceph/qa/workunits/rbd/test_lock_fence.sh
deleted file mode 100755
index 7ecafd4..0000000
--- a/src/ceph/qa/workunits/rbd/test_lock_fence.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash -x
-# can't use -e because of background process
-
-IMAGE=rbdrw-image
-LOCKID=rbdrw
-RELPATH=$(dirname $0)/../../../src/test/librbd
-RBDRW=$RELPATH/rbdrw.py
-
-rbd create $IMAGE --size 10 --image-format 2 --image-shared || exit 1
-
-# rbdrw loops doing I/O to $IMAGE after locking with lockid $LOCKID
-python $RBDRW $IMAGE $LOCKID &
-iochild=$!
-
-# give client time to lock and start reading/writing
-LOCKS='{}'
-while [ "$LOCKS" == "{}" ]
-do
- LOCKS=$(rbd lock list $IMAGE --format json)
- sleep 1
-done
-
-clientaddr=$(rbd lock list $IMAGE | tail -1 | awk '{print $NF;}')
-clientid=$(rbd lock list $IMAGE | tail -1 | awk '{print $1;}')
-echo "clientaddr: $clientaddr"
-echo "clientid: $clientid"
-
-ceph osd blacklist add $clientaddr || exit 1
-
-wait $iochild
-rbdrw_exitcode=$?
-if [ $rbdrw_exitcode != 108 ]
-then
- echo "wrong exitcode from rbdrw: $rbdrw_exitcode"
- exit 1
-else
- echo "rbdrw stopped with ESHUTDOWN"
-fi
-
-set -e
-ceph osd blacklist rm $clientaddr
-rbd lock remove $IMAGE $LOCKID "$clientid"
-# rbdrw will have exited with an existing watch, so, until #3527 is fixed,
-# hang out until the watch expires
-sleep 30
-rbd rm $IMAGE
-echo OK
diff --git a/src/ceph/qa/workunits/rbd/test_rbd_mirror.sh b/src/ceph/qa/workunits/rbd/test_rbd_mirror.sh
deleted file mode 100755
index e139dd7..0000000
--- a/src/ceph/qa/workunits/rbd/test_rbd_mirror.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/sh -e
-
-if [ -n "${VALGRIND}" ]; then
- valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
- --error-exitcode=1 ceph_test_rbd_mirror
-else
- ceph_test_rbd_mirror
-fi
-exit 0
diff --git a/src/ceph/qa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh b/src/ceph/qa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh
deleted file mode 100755
index e5377f4..0000000
--- a/src/ceph/qa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-#
-# Regression test for http://tracker.ceph.com/issues/14984
-#
-# When the bug is present, starting the rbdmap service causes
-# a bogus log message to be emitted to the log because the RBDMAPFILE
-# environment variable is not set.
-#
-# When the bug is not present, starting the rbdmap service will emit
-# no log messages, because /etc/ceph/rbdmap does not contain any lines
-# that require processing.
-#
-set -ex
-
-which ceph-detect-init >/dev/null || exit 1
-[ "$(ceph-detect-init)" = "systemd" ] || exit 0
-
-echo "TEST: save timestamp for use later with journalctl --since"
-TIMESTAMP=$(date +%Y-%m-%d\ %H:%M:%S)
-
-echo "TEST: assert that rbdmap has not logged anything since boot"
-journalctl -b 0 -t rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
-journalctl -b 0 -t init-rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
-
-echo "TEST: restart the rbdmap.service"
-sudo systemctl restart rbdmap.service
-
-echo "TEST: ensure that /usr/bin/rbdmap runs to completion"
-until sudo systemctl status rbdmap.service | grep 'active (exited)' ; do
- sleep 0.5
-done
-
-echo "TEST: assert that rbdmap has not logged anything since TIMESTAMP"
-journalctl --since "$TIMESTAMP" -t rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
-journalctl --since "$TIMESTAMP" -t init-rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
-
-exit 0
diff --git a/src/ceph/qa/workunits/rbd/verify_pool.sh b/src/ceph/qa/workunits/rbd/verify_pool.sh
deleted file mode 100755
index f008fb6..0000000
--- a/src/ceph/qa/workunits/rbd/verify_pool.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh -ex
-
-POOL_NAME=rbd_test_validate_pool
-PG_NUM=100
-
-tear_down () {
- ceph osd pool delete $POOL_NAME $POOL_NAME --yes-i-really-really-mean-it || true
-}
-
-set_up () {
- tear_down
- ceph osd pool create $POOL_NAME $PG_NUM
- ceph osd pool mksnap $POOL_NAME snap
- rbd pool init $POOL_NAME
-}
-
-trap tear_down EXIT HUP INT
-set_up
-
-# creating an image in a pool-managed snapshot pool should fail
-rbd create --pool $POOL_NAME --size 1 foo && exit 1 || true
-
-# should succeed if images already exist in the pool
-rados --pool $POOL_NAME create rbd_directory
-rbd create --pool $POOL_NAME --size 1 foo
-
-echo OK
diff --git a/src/ceph/qa/workunits/rename/all.sh b/src/ceph/qa/workunits/rename/all.sh
deleted file mode 100755
index 8a493d0..0000000
--- a/src/ceph/qa/workunits/rename/all.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash -ex
-
-dir=`dirname $0`
-
-CEPH_TOOL='./ceph'
-$CEPH_TOOL || CEPH_TOOL='ceph'
-
-CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/prepare.sh
-
-CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_nul.sh
-rm ./?/* || true
-
-CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/rem_nul.sh
-rm ./?/* || true
-
-CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_pri.sh
-rm ./?/* || true
-
-CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/rem_pri.sh
-rm ./?/* || true
-
-CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/rem_rem.sh
-rm ./?/* || true
-
-CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_nul.sh
-rm -r ./?/* || true
-
-CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_pri.sh
-rm -r ./?/* || true
-
-CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/dir_pri_pri.sh
-rm -r ./?/* || true
-
-CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/dir_pri_nul.sh
-rm -r ./?/* || true
-
diff --git a/src/ceph/qa/workunits/rename/dir_pri_nul.sh b/src/ceph/qa/workunits/rename/dir_pri_nul.sh
deleted file mode 100755
index dd8106b..0000000
--- a/src/ceph/qa/workunits/rename/dir_pri_nul.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh -ex
-
-# dir: srcdn=destdn
-mkdir ./a/dir1
-mv ./a/dir1 ./a/dir1.renamed
-
-# dir: diff
-mkdir ./a/dir2
-mv ./a/dir2 ./b/dir2
-
-# dir: diff, child subtree on target
-mkdir -p ./a/dir3/child/foo
-$CEPH_TOOL mds tell 0 export_dir /a/dir3/child 1
-sleep 5
-mv ./a/dir3 ./b/dir3
-
-# dir: diff, child subtree on other
-mkdir -p ./a/dir4/child/foo
-$CEPH_TOOL mds tell 0 export_dir /a/dir4/child 2
-sleep 5
-mv ./a/dir4 ./b/dir4
-
-# dir: witness subtree adjustment
-mkdir -p ./a/dir5/1/2/3/4
-$CEPH_TOOL mds tell 0 export_dir /a/dir5/1/2/3 2
-sleep 5
-mv ./a/dir5 ./b
-
diff --git a/src/ceph/qa/workunits/rename/dir_pri_pri.sh b/src/ceph/qa/workunits/rename/dir_pri_pri.sh
deleted file mode 100755
index de235fc..0000000
--- a/src/ceph/qa/workunits/rename/dir_pri_pri.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/sh -ex
-
-# dir, srcdn=destdn
-mkdir ./a/dir1
-mkdir ./a/dir2
-mv -T ./a/dir1 ./a/dir2
-
-# dir, different
-mkdir ./a/dir3
-mkdir ./b/dir4
-mv -T ./a/dir3 ./b/dir4
diff --git a/src/ceph/qa/workunits/rename/plan.txt b/src/ceph/qa/workunits/rename/plan.txt
deleted file mode 100644
index b423b41..0000000
--- a/src/ceph/qa/workunits/rename/plan.txt
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/bin/sh
-
-# srcdn destdn targeti
-
-## pri auth null auth -
-## pri rep null auth -
-## rem auth null auth -
-## rem rep null auth -
-
-#/ pri auth null rep - dup of pr_na
-#/ pri rep null rep -
-#/ rem auth null rep - dup of rr_na
-#/ rem rep null rep -
-
-
-## pri auth pri auth -
-# pri rep pri auth -
-## rem auth pri auth -
-# rem rep pri auth -
-
-# pri auth pri rep -
-# pri rep pri rep -
-# rem auth pri rep -
-# rem rep pri rep -
-
-## pri auth rem auth auth
-# pri rep rem auth auth
-## rem auth rem auth auth
-# rem rep rem auth auth
-
-# pri auth rem rep auth
-# pri rep rem rep auth
-# rem auth rem rep auth
-# rem rep rem rep auth
-
-# pri auth rem auth rep
-# pri rep rem auth rep
-# rem auth rem auth rep
-# rem rep rem auth rep
-
-# pri auth rem rep rep
-# pri rep rem rep rep
-# rem auth rem rep rep
-# rem rep rem rep rep
-
-
-types of operations
-
-pri nul
- srcdn=destdn
- diff
-
-rem nul
- srci=srcdn=destdn
- srci=srcdn
- srcdn=destdn
- srci=destdn
- all different
-
-pri pri
- srcdn=destdn
- different
-
-rem pri
- srci=srcdn=destdn
- srci=srcdn
- srcdn=destdn
- srci=destdn
- all different
-
-pri rem
- srcdn=destdn=desti
- srcdn=destdn
- destdn=desti
- srcdn=desti
- all different
-
-rem rem
- srci=srcdn=destdn=desti
- srci=srcdn=destdn
- srci=srcdn=desti
- srci=destdn=desti
- srcdni=destdn=desti
- srci=srcdn destdn=desti
- srci=destdn srcdn=desti
- srci=desti srcdn=destdn
- srci=srcdn
- srci=destdn
- srci=desti
- srcdn=destdn
- srcdn=desti
- destdn=desti
- all different
-
-
-
-
-
-
-
-
-
-p n same
-r n same
-p n diff
-r n diff
-
-p p same
-r p same
-
-p r
diff --git a/src/ceph/qa/workunits/rename/prepare.sh b/src/ceph/qa/workunits/rename/prepare.sh
deleted file mode 100755
index b5ba4ae..0000000
--- a/src/ceph/qa/workunits/rename/prepare.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/sh -ex
-
-$CEPH_TOOL mds tell 0 injectargs '--mds-bal-interval 0'
-$CEPH_TOOL mds tell 1 injectargs '--mds-bal-interval 0'
-$CEPH_TOOL mds tell 2 injectargs '--mds-bal-interval 0'
-$CEPH_TOOL mds tell 3 injectargs '--mds-bal-interval 0'
-#$CEPH_TOOL mds tell 4 injectargs '--mds-bal-interval 0'
-
-mkdir -p ./a/a
-mkdir -p ./b/b
-mkdir -p ./c/c
-mkdir -p ./d/d
-
-mount_dir=`df . | grep -o " /.*" | grep -o "/.*"`
-cur_dir=`pwd`
-ceph_dir=${cur_dir##$mount_dir}
-$CEPH_TOOL mds tell 0 export_dir $ceph_dir/b 1
-$CEPH_TOOL mds tell 0 export_dir $ceph_dir/c 2
-$CEPH_TOOL mds tell 0 export_dir $ceph_dir/d 3
-sleep 5
-
diff --git a/src/ceph/qa/workunits/rename/pri_nul.sh b/src/ceph/qa/workunits/rename/pri_nul.sh
deleted file mode 100755
index c40ec1d..0000000
--- a/src/ceph/qa/workunits/rename/pri_nul.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/sh -ex
-
-# srcdn=destdn
-touch ./a/file1
-mv ./a/file1 ./a/file1.renamed
-
-# different
-touch ./a/file2
-mv ./a/file2 ./b
-
-
diff --git a/src/ceph/qa/workunits/rename/pri_pri.sh b/src/ceph/qa/workunits/rename/pri_pri.sh
deleted file mode 100755
index b74985f..0000000
--- a/src/ceph/qa/workunits/rename/pri_pri.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh -ex
-
-# srcdn=destdn
-touch ./a/file1
-touch ./a/file2
-mv ./a/file1 ./a/file2
-
-# different (srcdn != destdn)
-touch ./a/file3
-touch ./b/file4
-mv ./a/file3 ./b/file4
-
diff --git a/src/ceph/qa/workunits/rename/pri_rem.sh b/src/ceph/qa/workunits/rename/pri_rem.sh
deleted file mode 100755
index a1cd03d..0000000
--- a/src/ceph/qa/workunits/rename/pri_rem.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh -ex
-
-dotest() {
- src=$1
- desti=$2
- destdn=$3
- n=$4
-
- touch ./$src/src$n
- touch ./$desti/desti$n
- ln ./$desti/desti$n ./$destdn/destdn$n
-
- mv ./$src/src$n ./$destdn/destdn$n
-}
-
-
-# srcdn=destdn=desti
-dotest 'a' 'a' 'a' 1
-
-# destdn=desti
-dotest 'b' 'a' 'a' 2
-
-# srcdn=destdn
-dotest 'a' 'b' 'a' 3
-
-# srcdn=desti
-dotest 'a' 'a' 'b' 4
-
-# all different
-dotest 'a' 'b' 'c' 5
-
diff --git a/src/ceph/qa/workunits/rename/rem_nul.sh b/src/ceph/qa/workunits/rename/rem_nul.sh
deleted file mode 100755
index a710331..0000000
--- a/src/ceph/qa/workunits/rename/rem_nul.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/sh -ex
-
-dotest() {
- srci=$1
- srcdn=$2
- dest=$3
- n=$4
-
- touch ./$srci/srci$n
- ln ./$srci/srci$n ./$srcdn/srcdn$n
-
- mv ./$srcdn/srcdn$n ./$dest/dest$n
-}
-
-# srci=srcdn=destdn
-dotest 'a' 'a' 'a' 1
-
-# srcdn=destdn
-dotest 'b' 'a' 'a' 2
-
-# srci=destdn
-dotest 'a' 'b' 'a' 3
-
-# srci=srcdn
-dotest 'a' 'a' 'b' 4
-
-# all different
-dotest 'a' 'b' 'c' 5
-
diff --git a/src/ceph/qa/workunits/rename/rem_pri.sh b/src/ceph/qa/workunits/rename/rem_pri.sh
deleted file mode 100755
index 501ac5e..0000000
--- a/src/ceph/qa/workunits/rename/rem_pri.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/sh -ex
-
-dotest() {
- srci=$1
- srcdn=$2
- dest=$3
- n=$4
-
- touch ./$srci/srci$n
- ln ./$srci/srci$n ./$srcdn/srcdn$n
- touch ./$dest/dest$n
-
- mv ./$srcdn/srcdn$n ./$dest/dest$n
-}
-
-# srci=srcdn=destdn
-dotest 'a' 'a' 'a' 1
-
-# srcdn=destdn
-dotest 'b' 'a' 'a' 2
-
-# srci=destdn
-dotest 'a' 'b' 'a' 3
-
-# srci=srcdn
-dotest 'a' 'a' 'b' 4
-
-# all different
-dotest 'a' 'b' 'c' 5
diff --git a/src/ceph/qa/workunits/rename/rem_rem.sh b/src/ceph/qa/workunits/rename/rem_rem.sh
deleted file mode 100755
index 80028c5..0000000
--- a/src/ceph/qa/workunits/rename/rem_rem.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/sh -ex
-
-dotest() {
- srci=$1
- srcdn=$2
- desti=$3
- destdn=$4
- n=$5
-
- touch ./$srci/srci$n
- ln ./$srci/srci$n ./$srcdn/srcdn$n
- touch ./$desti/desti$n
- ln ./$desti/desti$n ./$destdn/destdn$n
-
- mv ./$srcdn/srcdn$n ./$destdn/destdn$n
-}
-
-# srci=srcdn=destdn=desti
-dotest 'a' 'a' 'a' 'a' 1
-
-# srcdn=destdn=desti
-dotest 'b' 'a' 'a' 'a' 2
-
-# srci=destdn=desti
-dotest 'a' 'b' 'a' 'a' 3
-
-# srci=srcdn=destdn
-dotest 'a' 'a' 'b' 'a' 4
-
-# srci=srcdn=desti
-dotest 'a' 'a' 'a' 'b' 5
-
-# srci=srcdn destdn=desti
-dotest 'a' 'a' 'b' 'b' 6
-
-# srci=destdn srcdn=desti
-dotest 'a' 'b' 'b' 'a' 7
-
-# srci=desti srcdn=destdn
-dotest 'a' 'b' 'a' 'b' 8
-
-# srci=srcdn
-dotest 'a' 'a' 'b' 'c' 9
-
-# srci=desti
-dotest 'a' 'b' 'a' 'c' 10
-
-# srci=destdn
-dotest 'a' 'b' 'c' 'a' 11
-
-# srcdn=desti
-dotest 'a' 'b' 'b' 'c' 12
-
-# srcdn=destdn
-dotest 'a' 'b' 'c' 'b' 13
-
-# destdn=desti
-dotest 'a' 'b' 'c' 'c' 14
-
-# all different
-dotest 'a' 'b' 'c' 'd' 15
diff --git a/src/ceph/qa/workunits/rest/test-restful.sh b/src/ceph/qa/workunits/rest/test-restful.sh
deleted file mode 100755
index 34fb189..0000000
--- a/src/ceph/qa/workunits/rest/test-restful.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh -ex
-
-mydir=`dirname $0`
-
-secret=`ceph config-key get mgr/restful/keys/admin`
-active=`ceph mgr dump | jq -r .active_name`
-echo "active $active admin secret $secret"
-
-prefix="mgr/restful/$active"
-addr=`ceph config-key get $prefix/server_addr || echo 127.0.0.1`
-port=`ceph config-key get $prefix/server_port || echo 8003`
-url="https://$addr:$port"
-echo "prefix $prefix url $url"
-$mydir/test_mgr_rest_api.py $url $secret
-
-echo $0 OK
diff --git a/src/ceph/qa/workunits/rest/test.py b/src/ceph/qa/workunits/rest/test.py
deleted file mode 100755
index 8b55378..0000000
--- a/src/ceph/qa/workunits/rest/test.py
+++ /dev/null
@@ -1,424 +0,0 @@
-#!/usr/bin/python
-
-from __future__ import print_function
-
-import json
-import os
-import requests
-import subprocess
-import sys
-import time
-import uuid
-import xml.etree.ElementTree
-
-BASEURL = os.environ.get('BASEURL', 'http://localhost:5000/api/v0.1')
-
-
-def fail(r, msg):
- print('FAILURE: url ', r.url, file=sys.stderr)
- print(msg, file=sys.stderr)
- print('Response content: ', r.text, file=sys.stderr)
- print('Headers: ', r.headers, file=sys.stderr)
- sys.exit(1)
-
-
-def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None):
- failmsg, r = expect_nofail(url, method, respcode, contenttype, extra_hdrs,
- data)
- if failmsg:
- fail(r, failmsg)
- return r
-
-
-def expect_nofail(url, method, respcode, contenttype, extra_hdrs=None,
- data=None):
-
- fdict = {'get':requests.get, 'put':requests.put}
- f = fdict[method.lower()]
- r = f(BASEURL + '/' + url, headers=extra_hdrs, data=data)
-
- print('{0} {1}: {2} {3}'.format(method, url, contenttype, r.status_code))
-
- if r.status_code != respcode:
- return 'expected {0}, got {1}'.format(respcode, r.status_code), r
-
- r_contenttype = r.headers['content-type']
-
- if contenttype in ['json', 'xml']:
- contenttype = 'application/' + contenttype
- elif contenttype:
- contenttype = 'text/' + contenttype
-
- if contenttype and r_contenttype != contenttype:
- return 'expected {0}, got "{1}"'.format(contenttype, r_contenttype), r
-
- if contenttype.startswith('application'):
- if r_contenttype == 'application/json':
- try:
- # older requests.py doesn't create r.myjson; create it myself
- r.myjson = json.loads(r.text)
- assert(r.myjson is not None)
- except Exception as e:
- return 'Invalid JSON returned: "{0}"'.format(str(e)), r
-
- if r_contenttype == 'application/xml':
- try:
- # if it's there, squirrel it away for use in the caller
- r.tree = xml.etree.ElementTree.fromstring(r.text)
- except Exception as e:
- return 'Invalid XML returned: "{0}"'.format(str(e)), r
-
- return '', r
-
-
-JSONHDR={'accept':'application/json'}
-XMLHDR={'accept':'application/xml'}
-
-if __name__ == '__main__':
- expect('auth/export', 'GET', 200, 'plain')
- expect('auth/export.json', 'GET', 200, 'json')
- expect('auth/export.xml', 'GET', 200, 'xml')
- expect('auth/export', 'GET', 200, 'json', JSONHDR)
- expect('auth/export', 'GET', 200, 'xml', XMLHDR)
-
- expect('auth/add?entity=client.xx&'
- 'caps=mon&caps=allow&caps=osd&caps=allow+*', 'PUT', 200, 'json',
- JSONHDR)
-
- r = expect('auth/export?entity=client.xx', 'GET', 200, 'plain')
- # must use text/plain; default is application/x-www-form-urlencoded
- expect('auth/add?entity=client.xx', 'PUT', 200, 'plain',
- {'Content-Type':'text/plain'}, data=r.text)
-
- r = expect('auth/list', 'GET', 200, 'plain')
- assert('client.xx' in r.text)
-
- r = expect('auth/list.json', 'GET', 200, 'json')
- dictlist = r.myjson['output']['auth_dump']
- xxdict = [d for d in dictlist if d['entity'] == 'client.xx'][0]
- assert(xxdict)
- assert('caps' in xxdict)
- assert('mon' in xxdict['caps'])
- assert('osd' in xxdict['caps'])
-
- expect('auth/get-key?entity=client.xx', 'GET', 200, 'json', JSONHDR)
- expect('auth/print-key?entity=client.xx', 'GET', 200, 'json', JSONHDR)
- expect('auth/print_key?entity=client.xx', 'GET', 200, 'json', JSONHDR)
-
- expect('auth/caps?entity=client.xx&caps=osd&caps=allow+rw', 'PUT', 200,
- 'json', JSONHDR)
- r = expect('auth/list.json', 'GET', 200, 'json')
- dictlist = r.myjson['output']['auth_dump']
- xxdict = [d for d in dictlist if d['entity'] == 'client.xx'][0]
- assert(xxdict)
- assert('caps' in xxdict)
- assert(not 'mon' in xxdict['caps'])
- assert('osd' in xxdict['caps'])
- assert(xxdict['caps']['osd'] == 'allow rw')
-
- # export/import/export, compare
- r = expect('auth/export', 'GET', 200, 'plain')
- exp1 = r.text
- assert('client.xx' in exp1)
- r = expect('auth/import', 'PUT', 200, 'plain',
- {'Content-Type':'text/plain'}, data=r.text)
- r2 = expect('auth/export', 'GET', 200, 'plain')
- assert(exp1 == r2.text)
- expect('auth/del?entity=client.xx', 'PUT', 200, 'json', JSONHDR)
-
- r = expect('osd/dump', 'GET', 200, 'json', JSONHDR)
- assert('epoch' in r.myjson['output'])
-
- assert('GLOBAL' in expect('df', 'GET', 200, 'plain').text)
- assert('DIRTY' in expect('df?detail=detail', 'GET', 200, 'plain').text)
- # test param with no value (treated as param=param)
- assert('DIRTY' in expect('df?detail', 'GET', 200, 'plain').text)
-
- r = expect('df', 'GET', 200, 'json', JSONHDR)
- assert('total_used_bytes' in r.myjson['output']['stats'])
- r = expect('df', 'GET', 200, 'xml', XMLHDR)
- assert(r.tree.find('output/stats/stats/total_used_bytes') is not None)
-
- r = expect('df?detail', 'GET', 200, 'json', JSONHDR)
- assert('rd_bytes' in r.myjson['output']['pools'][0]['stats'])
- r = expect('df?detail', 'GET', 200, 'xml', XMLHDR)
- assert(r.tree.find('output/stats/pools/pool/stats/rd_bytes') is not None)
-
- expect('fsid', 'GET', 200, 'json', JSONHDR)
- expect('health', 'GET', 200, 'json', JSONHDR)
- expect('health?detail', 'GET', 200, 'json', JSONHDR)
- expect('health?detail', 'GET', 200, 'plain')
-
- # XXX no ceph -w equivalent yet
-
- expect('mds/cluster_down', 'PUT', 200, '')
- expect('mds/cluster_down', 'PUT', 200, '')
- expect('mds/cluster_up', 'PUT', 200, '')
- expect('mds/cluster_up', 'PUT', 200, '')
-
- expect('mds/compat/rm_incompat?feature=4', 'PUT', 200, '')
- expect('mds/compat/rm_incompat?feature=4', 'PUT', 200, '')
-
- r = expect('mds/compat/show', 'GET', 200, 'json', JSONHDR)
- assert('incompat' in r.myjson['output'])
- r = expect('mds/compat/show', 'GET', 200, 'xml', XMLHDR)
- assert(r.tree.find('output/mds_compat/incompat') is not None)
-
- # EEXIST from CLI
- expect('mds/deactivate?who=2', 'PUT', 400, '')
-
- r = expect('mds/dump.xml', 'GET', 200, 'xml')
- assert(r.tree.find('output/mdsmap/created') is not None)
-
- expect('fs/flag/set?flag_name=enable_multiple&val=true', 'PUT', 200, '')
- expect('osd/pool/create?pg_num=1&pool=my_cephfs_metadata', 'PUT', 200, '')
- expect('osd/pool/create?pg_num=1&pool=my_cephfs_data', 'PUT', 200, '')
- expect('fs/new?fs_name=mycephfs&metadata=my_cephfs_metadata&data=my_cephfs_data', 'PUT', 200, '')
- expect('osd/pool/create?pool=data2&pg_num=10', 'PUT', 200, '')
- r = expect('osd/dump', 'GET', 200, 'json', JSONHDR)
- pools = r.myjson['output']['pools']
- poolnum = None
- for p in pools:
- if p['pool_name'] == 'data2':
- poolnum = p['pool']
- assert(p['pg_num'] == 10)
- break
- assert(poolnum is not None)
- expect('mds/add_data_pool?pool={0}'.format(poolnum), 'PUT', 200, '')
- expect('mds/remove_data_pool?pool={0}'.format(poolnum), 'PUT', 200, '')
- expect('osd/pool/delete?pool=data2&pool2=data2'
- '&sure=--yes-i-really-really-mean-it', 'PUT', 200, '')
- expect('mds/set?var=allow_multimds&val=true&confirm=--yes-i-really-mean-it', 'PUT', 200, '')
- expect('mds/set_max_mds?maxmds=4', 'PUT', 200, '')
- expect('mds/set?var=max_mds&val=4', 'PUT', 200, '')
- expect('mds/set?var=max_file_size&val=1048576', 'PUT', 200, '')
- expect('mds/set?var=allow_new_snaps&val=true&confirm=--yes-i-really-mean-it', 'PUT', 200, '')
- expect('mds/set?var=allow_new_snaps&val=0', 'PUT', 200, '')
- expect('mds/set?var=inline_data&val=true&confirm=--yes-i-really-mean-it', 'PUT', 200, '')
- expect('mds/set?var=inline_data&val=0', 'PUT', 200, '')
- r = expect('mds/dump.json', 'GET', 200, 'json')
- assert(r.myjson['output']['max_mds'] == 4)
- expect('mds/set_max_mds?maxmds=3', 'PUT', 200, '')
- r = expect('mds/stat.json', 'GET', 200, 'json')
- expect('mds/set?var=max_mds&val=2', 'PUT', 200, '')
- r = expect('mds/stat.json', 'GET', 200, 'json')
- assert('epoch' in r.myjson['output']['fsmap'])
- r = expect('mds/stat.xml', 'GET', 200, 'xml')
- assert(r.tree.find('output/mds_stat/fsmap/epoch') is not None)
-
- # more content tests below, just check format here
- expect('mon/dump.json', 'GET', 200, 'json')
- expect('mon/dump.xml', 'GET', 200, 'xml')
-
- r = expect('mon/getmap', 'GET', 200, '')
- assert(len(r.text) != 0)
- r = expect('mon_status.json', 'GET', 200, 'json')
- assert('name' in r.myjson['output'])
- r = expect('mon_status.xml', 'GET', 200, 'xml')
- assert(r.tree.find('output/mon_status/name') is not None)
-
- bl = '192.168.0.1:0/1000'
- expect('osd/blacklist?blacklistop=add&addr=' + bl, 'PUT', 200, '')
- r = expect('osd/blacklist/ls.json', 'GET', 200, 'json')
- assert([b for b in r.myjson['output'] if b['addr'] == bl])
- expect('osd/blacklist?blacklistop=rm&addr=' + bl, 'PUT', 200, '')
- r = expect('osd/blacklist/ls.json', 'GET', 200, 'json')
- assert([b for b in r.myjson['output'] if b['addr'] == bl] == [])
-
- expect('osd/crush/tunables?profile=legacy', 'PUT', 200, '')
- expect('osd/crush/tunables?profile=bobtail', 'PUT', 200, '')
-
- expect('osd/scrub?who=0', 'PUT', 200, '')
- expect('osd/deep-scrub?who=0', 'PUT', 200, '')
- expect('osd/repair?who=0', 'PUT', 200, '')
-
- expect('osd/set?key=noup', 'PUT', 200, '')
-
- expect('osd/down?ids=0', 'PUT', 200, '')
- r = expect('osd/dump', 'GET', 200, 'json', JSONHDR)
- assert(r.myjson['output']['osds'][0]['osd'] == 0)
- assert(r.myjson['output']['osds'][0]['up'] == 0)
-
- expect('osd/unset?key=noup', 'PUT', 200, '')
-
- for i in range(0,100):
- r = expect('osd/dump', 'GET', 200, 'json', JSONHDR)
- assert(r.myjson['output']['osds'][0]['osd'] == 0)
- if r.myjson['output']['osds'][0]['up'] == 1:
- break
- else:
- print("waiting for osd.0 to come back up", file=sys.stderr)
- time.sleep(10)
-
- r = expect('osd/dump', 'GET', 200, 'json', JSONHDR)
- assert(r.myjson['output']['osds'][0]['osd'] == 0)
- assert(r.myjson['output']['osds'][0]['up'] == 1)
-
- r = expect('osd/find?id=1', 'GET', 200, 'json', JSONHDR)
- assert(r.myjson['output']['osd'] == 1)
-
- expect('osd/out?ids=1', 'PUT', 200, '')
- r = expect('osd/dump', 'GET', 200, 'json', JSONHDR)
- assert(r.myjson['output']['osds'][1]['osd'] == 1)
- assert(r.myjson['output']['osds'][1]['in'] == 0)
-
- expect('osd/in?ids=1', 'PUT', 200, '')
- r = expect('osd/dump', 'GET', 200, 'json', JSONHDR)
- assert(r.myjson['output']['osds'][1]['osd'] == 1)
- assert(r.myjson['output']['osds'][1]['in'] == 1)
-
- r = expect('osd/find?id=0', 'GET', 200, 'json', JSONHDR)
- assert(r.myjson['output']['osd'] == 0)
-
- r = expect('osd/getmaxosd', 'GET', 200, 'xml', XMLHDR)
- assert(r.tree.find('output/getmaxosd/max_osd') is not None)
- r = expect('osd/getmaxosd', 'GET', 200, 'json', JSONHDR)
- saved_maxosd = r.myjson['output']['max_osd']
- expect('osd/setmaxosd?newmax=10', 'PUT', 200, '')
- r = expect('osd/getmaxosd', 'GET', 200, 'json', JSONHDR)
- assert(r.myjson['output']['max_osd'] == 10)
- expect('osd/setmaxosd?newmax={0}'.format(saved_maxosd), 'PUT', 200, '')
- r = expect('osd/getmaxosd', 'GET', 200, 'json', JSONHDR)
- assert(r.myjson['output']['max_osd'] == saved_maxosd)
-
- osd_uuid=uuid.uuid1()
- r = expect('osd/create?uuid={0}'.format(osd_uuid), 'PUT', 200, 'json', JSONHDR)
- assert('osdid' in r.myjson['output'])
- osdid = r.myjson['output']['osdid']
- expect('osd/lost?id={0}'.format(osdid), 'PUT', 400, '')
- expect('osd/lost?id={0}&sure=--yes-i-really-mean-it'.format(osdid),
- 'PUT', 200, 'json', JSONHDR)
- expect('osd/rm?ids={0}'.format(osdid), 'PUT', 200, '')
- r = expect('osd/ls', 'GET', 200, 'json', JSONHDR)
- assert(isinstance(r.myjson['output'], list))
- r = expect('osd/ls', 'GET', 200, 'xml', XMLHDR)
- assert(r.tree.find('output/osds/osd') is not None)
-
- expect('osd/pause', 'PUT', 200, '')
- r = expect('osd/dump', 'GET', 200, 'json', JSONHDR)
- assert('pauserd,pausewr' in r.myjson['output']['flags'])
- expect('osd/unpause', 'PUT', 200, '')
- r = expect('osd/dump', 'GET', 200, 'json', JSONHDR)
- assert('pauserd,pausewr' not in r.myjson['output']['flags'])
-
- r = expect('osd/tree', 'GET', 200, 'json', JSONHDR)
- assert('nodes' in r.myjson['output'])
- r = expect('osd/tree', 'GET', 200, 'xml', XMLHDR)
- assert(r.tree.find('output/tree/nodes') is not None)
-
- expect('osd/pool/create?pool=data2&pg_num=10', 'PUT', 200, '')
- r = expect('osd/lspools', 'GET', 200, 'json', JSONHDR)
- assert([p for p in r.myjson['output'] if p['poolname'] == 'data2'])
- expect('osd/pool/rename?srcpool=data2&destpool=data3', 'PUT', 200, '')
- r = expect('osd/lspools', 'GET', 200, 'json', JSONHDR)
- assert([p for p in r.myjson['output'] if p['poolname'] == 'data3'])
- expect('osd/pool/mksnap?pool=data3&snap=datasnap', 'PUT', 200, '')
- r = subprocess.call('rados -p data3 lssnap | grep -q datasnap', shell=True)
- assert(r == 0)
- expect('osd/pool/rmsnap?pool=data3&snap=datasnap', 'PUT', 200, '')
- expect('osd/pool/delete?pool=data3', 'PUT', 400, '')
- expect('osd/pool/delete?pool=data3&pool2=data3&sure=--yes-i-really-really-mean-it', 'PUT', 200, '')
-
- r = expect('osd/stat', 'GET', 200, 'json', JSONHDR)
- assert('num_up_osds' in r.myjson['output'])
- r = expect('osd/stat', 'GET', 200, 'xml', XMLHDR)
- assert(r.tree.find('output/osdmap/num_up_osds') is not None)
-
- r = expect('osd/ls', 'GET', 200, 'json', JSONHDR)
- for osdid in r.myjson['output']:
- expect('tell/osd.{0}/version'.format(osdid), 'GET', 200, '')
-
- expect('pg/debug?debugop=unfound_objects_exist', 'GET', 200, '')
- expect('pg/debug?debugop=degraded_pgs_exist', 'GET', 200, '')
- expect('pg/deep-scrub?pgid=1.0', 'PUT', 200, '')
- r = expect('pg/dump', 'GET', 200, 'json', JSONHDR)
- assert('pg_stats_sum' in r.myjson['output'])
- r = expect('pg/dump', 'GET', 200, 'xml', XMLHDR)
- assert(r.tree.find('output/pg_map/pg_stats_sum') is not None)
-
- expect('pg/dump_json', 'GET', 200, 'json', JSONHDR)
- expect('pg/dump_pools_json', 'GET', 200, 'json', JSONHDR)
- expect('pg/dump_stuck?stuckops=inactive', 'GET', 200, '')
- expect('pg/dump_stuck?stuckops=unclean', 'GET', 200, '')
- expect('pg/dump_stuck?stuckops=stale', 'GET', 200, '')
-
- r = expect('pg/getmap', 'GET', 200, '')
- assert(len(r.text) != 0)
-
- r = expect('pg/map?pgid=1.0', 'GET', 200, 'json', JSONHDR)
- assert('acting' in r.myjson['output'])
- assert(r.myjson['output']['pgid'] == '1.0')
- r = expect('pg/map?pgid=1.0', 'GET', 200, 'xml', XMLHDR)
- assert(r.tree.find('output/pg_map/acting') is not None)
- assert(r.tree.find('output/pg_map/pgid').text == '1.0')
-
- expect('pg/repair?pgid=1.0', 'PUT', 200, '')
- expect('pg/scrub?pgid=1.0', 'PUT', 200, '')
-
- expect('osd/set-full-ratio?ratio=0.90', 'PUT', 200, '')
- r = expect('osd/dump', 'GET', 200, 'json', JSONHDR)
- assert(float(r.myjson['output']['full_ratio']) == 0.90)
- expect('osd/set-full-ratio?ratio=0.95', 'PUT', 200, '')
- expect('osd/set-backfillfull-ratio?ratio=0.88', 'PUT', 200, '')
- r = expect('osd/dump', 'GET', 200, 'json', JSONHDR)
- assert(float(r.myjson['output']['backfillfull_ratio']) == 0.88)
- expect('osd/set-backfillfull-ratio?ratio=0.90', 'PUT', 200, '')
- expect('osd/set-nearfull-ratio?ratio=0.90', 'PUT', 200, '')
- r = expect('osd/dump', 'GET', 200, 'json', JSONHDR)
- assert(float(r.myjson['output']['nearfull_ratio']) == 0.90)
- expect('osd/set-nearfull-ratio?ratio=0.85', 'PUT', 200, '')
-
- r = expect('pg/stat', 'GET', 200, 'json', JSONHDR)
- assert('num_pgs' in r.myjson['output'])
- r = expect('pg/stat', 'GET', 200, 'xml', XMLHDR)
- assert(r.tree.find('output/pg_summary/num_pgs') is not None)
-
- expect('tell/1.0/query', 'GET', 200, 'json', JSONHDR)
- expect('quorum?quorumcmd=enter', 'PUT', 200, 'json', JSONHDR)
- expect('quorum?quorumcmd=enter', 'PUT', 200, 'xml', XMLHDR)
- expect('quorum_status', 'GET', 200, 'json', JSONHDR)
- expect('quorum_status', 'GET', 200, 'xml', XMLHDR)
-
- # report's CRC needs to be handled
- # r = expect('report', 'GET', 200, 'json', JSONHDR)
- # assert('osd_stats' in r.myjson['output'])
- # r = expect('report', 'GET', 200, 'xml', XMLHDR)
- # assert(r.tree.find('output/report/osdmap') is not None)
-
- r = expect('status', 'GET', 200, 'json', JSONHDR)
- assert('osdmap' in r.myjson['output'])
- r = expect('status', 'GET', 200, 'xml', XMLHDR)
- assert(r.tree.find('output/status/osdmap') is not None)
-
- r = expect('tell/osd.0/version', 'GET', 200, '')
- assert('ceph version' in r.text)
- expect('tell/osd.999/version', 'GET', 400, '')
- expect('tell/osd.foo/version', 'GET', 400, '')
-
- r = expect('tell/osd.0/dump_pg_recovery_stats', 'GET', 200, '')
- assert('Started' in r.text)
-
- expect('osd/reweight?id=0&weight=0.9', 'PUT', 200, '')
- expect('osd/reweight?id=0&weight=-1', 'PUT', 400, '')
- expect('osd/reweight?id=0&weight=1', 'PUT', 200, '')
-
- for v in ['pg_num', 'pgp_num', 'size', 'min_size',
- 'crush_rule']:
- r = expect('osd/pool/get.json?pool=rbd&var=' + v, 'GET', 200, 'json')
- assert(v in r.myjson['output'])
-
- r = expect('osd/pool/get.json?pool=rbd&var=size', 'GET', 200, 'json')
- assert(r.myjson['output']['size'] >= 2)
-
- expect('osd/pool/set?pool=rbd&var=size&val=3', 'PUT', 200, 'plain')
- r = expect('osd/pool/get.json?pool=rbd&var=size', 'GET', 200, 'json')
- assert(r.myjson['output']['size'] == 3)
-
- expect('osd/pool/set?pool=rbd&var=size&val=2', 'PUT', 200, 'plain')
- r = expect('osd/pool/get.json?pool=rbd&var=size', 'GET', 200, 'json')
- assert(r.myjson['output']['size'] == 2)
-
- r = expect('osd/pool/get.json?pool=rbd&var=crush_rule', 'GET', 200, 'json')
- assert(r.myjson['output']['crush_rule'] == "replicated_rule")
-
- print('OK')
diff --git a/src/ceph/qa/workunits/rest/test_mgr_rest_api.py b/src/ceph/qa/workunits/rest/test_mgr_rest_api.py
deleted file mode 100755
index 7c4335c..0000000
--- a/src/ceph/qa/workunits/rest/test_mgr_rest_api.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#! /usr/bin/env python
-
-import requests
-import time
-import sys
-import json
-
-# Do not show the stupid message about verify=False. ignore exceptions bc
-# this doesn't work on some distros.
-try:
- from requests.packages.urllib3.exceptions import InsecureRequestWarning
- requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
-except:
- pass
-
-if len(sys.argv) < 3:
- print("Usage: %s <url> <admin_key>" % sys.argv[0])
- sys.exit(1)
-
-addr = sys.argv[1]
-auth = ('admin', sys.argv[2])
-headers = {'Content-type': 'application/json'}
-
-request = None
-
-# Create a pool and get its id
-request = requests.post(
- addr + '/pool?wait=yes',
- data=json.dumps({'name': 'supertestfriends', 'pg_num': 128}),
- headers=headers,
- verify=False,
- auth=auth)
-print(request.text)
-request = requests.get(addr + '/pool', verify=False, auth=auth)
-assert(request.json()[-1]['pool_name'] == 'supertestfriends')
-pool_id = request.json()[-1]['pool']
-
-# get a mon name
-request = requests.get(addr + '/mon', verify=False, auth=auth)
-firstmon = request.json()[0]['name']
-print('first mon is %s' % firstmon)
-
-# get a server name
-request = requests.get(addr + '/osd', verify=False, auth=auth)
-aserver = request.json()[0]['server']
-print('a server is %s' % aserver)
-
-
-screenplay = [
- ('get', '/', {}),
- ('get', '/config/cluster', {}),
- ('get', '/crush/rule', {}),
- ('get', '/doc', {}),
- ('get', '/mon', {}),
- ('get', '/mon/' + firstmon, {}),
- ('get', '/osd', {}),
- ('get', '/osd/0', {}),
- ('get', '/osd/0/command', {}),
- ('get', '/pool/1', {}),
- ('get', '/server', {}),
- ('get', '/server/' + aserver, {}),
- ('post', '/osd/0/command', {'command': 'scrub'}),
- ('post', '/pool?wait=1', {'name': 'supertestfriends', 'pg_num': 128}),
- ('patch', '/osd/0', {'in': False}),
- ('patch', '/config/osd', {'pause': True}),
- ('get', '/config/osd', {}),
- ('patch', '/pool/' + str(pool_id), {'size': 2}),
- ('patch', '/config/osd', {'pause': False}),
- ('patch', '/osd/0', {'in': True}),
- ('get', '/pool', {}),
- ('delete', '/pool/' + str(pool_id) + '?wait=1', {}),
- ('get', '/request?page=0', {}),
- ('delete', '/request', {}),
- ('get', '/request', {}),
-]
-
-for method, endpoint, args in screenplay:
- if method == 'sleep':
- time.sleep(endpoint)
- continue
- url = addr + endpoint
- print("URL = " + url)
- request = getattr(requests, method)(
- url,
- data=json.dumps(args),
- headers=headers,
- verify=False,
- auth=auth)
- print(request.text)
- if request.status_code != 200 or 'error' in request.json():
- print('ERROR: %s request for URL "%s" failed' % (method, url))
- sys.exit(1)
-
-print('OK')
diff --git a/src/ceph/qa/workunits/restart/test-backtraces.py b/src/ceph/qa/workunits/restart/test-backtraces.py
deleted file mode 100755
index 2fa67a2..0000000
--- a/src/ceph/qa/workunits/restart/test-backtraces.py
+++ /dev/null
@@ -1,262 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import print_function
-
-import subprocess
-import json
-import os
-import time
-import sys
-
-if sys.version_info[0] == 2:
- from cStringIO import StringIO
-
- range = xrange
-
-elif sys.version_info[0] == 3:
- from io import StringIO
-
- range = range
-
-import rados as rados
-import cephfs as cephfs
-
-prefix='testbt'
-
-def get_name(b, i, j):
- c = '{pre}.{pid}.{i}.{j}'.format(pre=prefix, pid=os.getpid(), i=i, j=j)
- return c, b + '/' + c
-
-def mkdir(ceph, d):
- print("mkdir {d}".format(d=d), file=sys.stderr)
- ceph.mkdir(d, 0o755)
- return ceph.stat(d)['st_ino']
-
-def create(ceph, f):
- print("creating {f}".format(f=f), file=sys.stderr)
- fd = ceph.open(f, os.O_CREAT | os.O_RDWR, 0o644)
- ceph.close(fd)
- return ceph.stat(f)['st_ino']
-
-def set_mds_config_param(ceph, param):
- with open('/dev/null', 'rb') as devnull:
- confarg = ''
- if conf != '':
- confarg = '-c {c}'.format(c=conf)
- r = subprocess.call("ceph {ca} mds tell a injectargs '{p}'".format(ca=confarg, p=param), shell=True, stdout=devnull)
- if r != 0:
- raise Exception
-
-import ConfigParser
-import contextlib
-
-class _TrimIndentFile(object):
- def __init__(self, fp):
- self.fp = fp
-
- def readline(self):
- line = self.fp.readline()
- return line.lstrip(' \t')
-
-def _optionxform(s):
- s = s.replace('_', ' ')
- s = '_'.join(s.split())
- return s
-
-def conf_set_kill_mds(location, killnum):
- print('setting mds kill config option for {l}.{k}'.format(l=location, k=killnum), file=sys.stderr)
- print("restart mds a mds_kill_{l}_at {k}".format(l=location, k=killnum))
- sys.stdout.flush()
- for l in sys.stdin.readline():
- if l == 'restarted':
- break
-
-def flush(ceph, testnum):
- print('flushing {t}'.format(t=testnum), file=sys.stderr)
- set_mds_config_param(ceph, '--mds_log_max_segments 1')
-
- for i in range(1, 500):
- f = '{p}.{pid}.{t}.{i}'.format(p=prefix, pid=os.getpid(), t=testnum, i=i)
- print('flushing with create {f}'.format(f=f), file=sys.stderr)
- fd = ceph.open(f, os.O_CREAT | os.O_RDWR, 0o644)
- ceph.close(fd)
- ceph.unlink(f)
-
- print('flush doing shutdown', file=sys.stderr)
- ceph.shutdown()
- print('flush reinitializing ceph', file=sys.stderr)
- ceph = cephfs.LibCephFS(conffile=conf)
- print('flush doing mount', file=sys.stderr)
- ceph.mount()
- return ceph
-
-def kill_mds(ceph, location, killnum):
- print('killing mds: {l}.{k}'.format(l=location, k=killnum), file=sys.stderr)
- set_mds_config_param(ceph, '--mds_kill_{l}_at {k}'.format(l=location, k=killnum))
-
-def wait_for_mds(ceph):
- # wait for restart
- while True:
- confarg = ''
- if conf != '':
- confarg = '-c {c}'.format(c=conf)
- r = subprocess.check_output("ceph {ca} mds stat".format(ca=confarg), shell=True).decode()
- if r.find('a=up:active'):
- break
- time.sleep(1)
-
-def decode(value):
-
- tmpfile = '/tmp/{p}.{pid}'.format(p=prefix, pid=os.getpid())
- with open(tmpfile, 'w+') as f:
- f.write(value)
-
- p = subprocess.Popen(
- [
- 'ceph-dencoder',
- 'import',
- tmpfile,
- 'type',
- 'inode_backtrace_t',
- 'decode',
- 'dump_json',
- ],
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- )
- (stdout, _) = p.communicate(input=value)
- p.stdin.close()
- if p.returncode != 0:
- raise Exception
- os.remove(tmpfile)
- return json.loads(stdout)
-
-class VerifyFailure(Exception):
- pass
-
-def verify(rados_ioctx, ino, values, pool):
- print('getting parent attr for ino: %lx.00000000' % ino, file=sys.stderr)
- savede = None
- for i in range(1, 20):
- try:
- savede = None
- binbt = rados_ioctx.get_xattr('%lx.00000000' % ino, 'parent')
- except rados.ObjectNotFound as e:
- # wait for a bit to let segments get flushed out
- savede = e
- time.sleep(10)
- if savede:
- raise savede
-
- bt = decode(binbt)
-
- if bt['ino'] != ino:
- raise VerifyFailure('inode mismatch: {bi} != {ino}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(
- bi=bt['ancestors'][ind]['dname'], ino=ino, bt=bt, i=ino, v=values))
- ind = 0
- for (n, i) in values:
- if bt['ancestors'][ind]['dirino'] != i:
- raise VerifyFailure('ancestor dirino mismatch: {b} != {ind}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(
- b=bt['ancestors'][ind]['dirino'], ind=i, bt=bt, i=ino, v=values))
- if bt['ancestors'][ind]['dname'] != n:
- raise VerifyFailure('ancestor dname mismatch: {b} != {n}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(
- b=bt['ancestors'][ind]['dname'], n=n, bt=bt, i=ino, v=values))
- ind += 1
-
- if bt['pool'] != pool:
- raise VerifyFailure('pool mismatch: {btp} != {p}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(
- btp=bt['pool'], p=pool, bt=bt, i=ino, v=values))
-
-def make_abc(ceph, rooti, i):
- expected_bt = []
- c, d = get_name("/", i, 0)
- expected_bt = [(c, rooti)] + expected_bt
- di = mkdir(ceph, d)
- c, d = get_name(d, i, 1)
- expected_bt = [(c, di)] + expected_bt
- di = mkdir(ceph, d)
- c, f = get_name(d, i, 2)
- fi = create(ceph, f)
- expected_bt = [(c, di)] + expected_bt
- return fi, expected_bt
-
-test = -1
-if len(sys.argv) > 1:
- test = int(sys.argv[1])
-
-conf = ''
-if len(sys.argv) > 2:
- conf = sys.argv[2]
-
-radosobj = rados.Rados(conffile=conf)
-radosobj.connect()
-ioctx = radosobj.open_ioctx('data')
-
-ceph = cephfs.LibCephFS(conffile=conf)
-ceph.mount()
-
-rooti = ceph.stat('/')['st_ino']
-
-test = -1
-if len(sys.argv) > 1:
- test = int(sys.argv[1])
-
-conf = '/etc/ceph/ceph.conf'
-if len(sys.argv) > 2:
- conf = sys.argv[2]
-
-# create /a/b/c
-# flush
-# verify
-
-i = 0
-if test < 0 or test == i:
- print('Running test %d: basic verify' % i, file=sys.stderr)
- ino, expected_bt = make_abc(ceph, rooti, i)
- ceph = flush(ceph, i)
- verify(ioctx, ino, expected_bt, 0)
-
-i += 1
-
-# kill-mds-at-openc-1
-# create /a/b/c
-# restart-mds
-# flush
-# verify
-
-if test < 0 or test == i:
- print('Running test %d: kill openc' % i, file=sys.stderr)
- print("restart mds a")
- sys.stdout.flush()
- kill_mds(ceph, 'openc', 1)
- ino, expected_bt = make_abc(ceph, rooti, i)
- ceph = flush(ceph, i)
- verify(ioctx, ino, expected_bt, 0)
-
-i += 1
-
-# kill-mds-at-openc-1
-# create /a/b/c
-# restart-mds with kill-mds-at-replay-1
-# restart-mds
-# flush
-# verify
-if test < 0 or test == i:
- print('Running test %d: kill openc/replay' % i, file=sys.stderr)
- # these are reversed because we want to prepare the config
- conf_set_kill_mds('journal_replay', 1)
- kill_mds(ceph, 'openc', 1)
- print("restart mds a")
- sys.stdout.flush()
- ino, expected_bt = make_abc(ceph, rooti, i)
- ceph = flush(ceph, i)
- verify(ioctx, ino, expected_bt, 0)
-
-i += 1
-
-ioctx.close()
-radosobj.shutdown()
-ceph.shutdown()
-
-print("done")
-sys.stdout.flush()
diff --git a/src/ceph/qa/workunits/rgw/run-s3tests.sh b/src/ceph/qa/workunits/rgw/run-s3tests.sh
deleted file mode 100755
index 31c091e..0000000
--- a/src/ceph/qa/workunits/rgw/run-s3tests.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/bash -ex
-
-# run s3-tests from current directory. assume working
-# ceph environment (radosgw-admin in path) and rgw on localhost:8000
-# (the vstart default).
-
-branch=$1
-[ -z "$1" ] && branch=master
-port=$2
-[ -z "$2" ] && port=8000 # this is vstart's default
-
-##
-
-if [ -e CMakeCache.txt ]; then
- BIN_PATH=$PWD/bin
-elif [ -e $root_path/../build/CMakeCache.txt ]; then
- cd $root_path/../build
- BIN_PATH=$PWD/bin
-fi
-PATH=$PATH:$BIN_PATH
-
-dir=tmp.s3-tests.$$
-
-# clone and bootstrap
-mkdir $dir
-cd $dir
-git clone https://github.com/ceph/s3-tests
-cd s3-tests
-git checkout ceph-$branch
-VIRTUALENV_PYTHON=/usr/bin/python2 ./bootstrap
-cd ../..
-
-# users
-akey1=access1
-skey1=secret1
-radosgw-admin user create --uid=s3test1 --display-name='tester1' \
- --access-key=$akey1 --secret=$skey1 --email=tester1@ceph.com
-
-akey2=access2
-skey2=secret2
-radosgw-admin user create --uid=s3test2 --display-name='tester2' \
- --access-key=$akey2 --secret=$skey2 --email=tester2@ceph.com
-
-cat <<EOF > s3.conf
-[DEFAULT]
-## replace with e.g. "localhost" to run against local software
-host = 127.0.0.1
-## uncomment the port to use something other than 80
-port = $port
-## say "no" to disable TLS
-is_secure = no
-[fixtures]
-## all the buckets created will start with this prefix;
-## {random} will be filled with random characters to pad
-## the prefix to 30 characters long, and avoid collisions
-bucket prefix = s3testbucket-{random}-
-[s3 main]
-## the tests assume two accounts are defined, "main" and "alt".
-## user_id is a 64-character hexstring
-user_id = s3test1
-## display name typically looks more like a unix login, "jdoe" etc
-display_name = tester1
-## replace these with your access keys
-access_key = $akey1
-secret_key = $skey1
-email = tester1@ceph.com
-[s3 alt]
-## another user account, used for ACL-related tests
-user_id = s3test2
-display_name = tester2
-## the "alt" user needs to have email set, too
-email = tester2@ceph.com
-access_key = $akey2
-secret_key = $skey2
-EOF
-
-S3TEST_CONF=`pwd`/s3.conf $dir/s3-tests/virtualenv/bin/nosetests -a '!fails_on_rgw' -v
-
-rm -rf $dir
-
-echo OK.
-
diff --git a/src/ceph/qa/workunits/rgw/s3_bucket_quota.pl b/src/ceph/qa/workunits/rgw/s3_bucket_quota.pl
deleted file mode 100755
index 6a4a1a4..0000000
--- a/src/ceph/qa/workunits/rgw/s3_bucket_quota.pl
+++ /dev/null
@@ -1,393 +0,0 @@
-#! /usr/bin/perl
-
-=head1 NAME
-
-s3_bucket_quota.pl - Script to test the rgw bucket quota functionality using s3 interface.
-
-=head1 SYNOPSIS
-
-Use:
- perl s3_bucket_quota.pl [--help]
-
-Examples:
- perl s3_bucket_quota.pl
- or
- perl s3_bucket_quota.pl --help
-
-=head1 DESCRIPTION
-
-This script intends to test the rgw bucket quota funcionality using s3 interface
-and reports the test results
-
-=head1 ARGUMENTS
-
-s3_bucket_quota.pl takes the following arguments:
- --help
- (optional) Displays the usage message.
-
-=cut
-
-use Amazon::S3;
-use Data::Dumper;
-#use strict;
-use IO::File;
-use Getopt::Long;
-use Digest::MD5;
-use Pod::Usage();
-use FindBin;
-use lib $FindBin::Bin;
-use s3_utilities;
-use Net::Domain qw(hostfqdn);
-
-my $help;
-
-Getopt::Long::GetOptions(
- 'help' => \$help
-);
-Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
-
-#== local variables ===
-our $mytestfilename;
-my $mytestfilename1;
-my $logmsg;
-my $kruft;
-my $s3;
-my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
-my $port = $ENV{RGW_PORT}||7280;
-our $hostname = "$hostdom:$port";
-our $testfileloc;
-my $rgw_user = "qa_user";
-
-# Function that deletes the user $rgw_user and write to logfile.
-sub delete_user
-{
- my $cmd = "$radosgw_admin user rm --uid=$rgw_user";
- my $cmd_op = get_command_output($cmd);
- if ($cmd_op !~ /aborting/){
- print "user $rgw_user deleted\n";
- } else {
- print "user $rgw_user NOT deleted\n";
- return 1;
- }
- return 0;
-}
-
-sub quota_set_max_size {
- my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=1048576000`;
- if ($set_quota !~ /./){
- print "quota set for the bucket: $bucketname \n";
- } else {
- print "quota set failed for the bucket: $bucketname \n";
- exit 1;
- }
- return 0;
-}
-
-sub quota_set_max_size_zero {
- run_s3($rgw_user);
- my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=0`;
- if ($set_quota !~ /./){
- pass ("quota set for the bucket: $bucketname with max size as zero\n");
- } else {
- fail ("quota set with max size 0 failed for the bucket: $bucketname \n");
- }
- delete_bucket();
-}
-
-sub quota_set_max_objs_zero {
- run_s3($rgw_user);
- my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=0`;
- if ($set_quota !~ /./){
- pass ("quota set for the bucket: $bucketname with max objects as zero\n");
- } else {
- fail ("quota set with max objects 0 failed for the bucket: $bucketname \n");
- }
- delete_bucket();
-}
-
-sub quota_set_neg_size {
- run_s3($rgw_user);
- my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=-1`;
- if ($set_quota !~ /./){
- pass ("quota set for the bucket: $bucketname with max size -1\n");
- } else {
- fail ("quota set failed for the bucket: $bucketname with max size -1 \n");
- }
- delete_bucket();
-}
-
-sub quota_set_neg_objs {
- run_s3($rgw_user);
- my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=-1`;
- if ($set_quota !~ /./){
- pass ("quota set for the bucket: $bucketname max objects -1 \n");
- } else {
- fail ("quota set failed for the bucket: $bucketname \n with max objects -1");
- }
- delete_bucket();
-}
-
-sub quota_set_user_objs {
- my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=bucket`;
- my $set_quota1 = `$radosgw_admin quota set --bucket=$bucketname --max-objects=1`;
- if ($set_quota1 !~ /./){
- print "bucket quota max_objs set for the given user: $bucketname \n";
- } else {
- print "bucket quota max_objs set failed for the given user: $bucketname \n";
- exit 1;
- }
- return 0;
-}
-
-sub quota_set_user_size {
- my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=bucket`;
- my $set_quota1 = `$radosgw_admin quota set --bucket=$bucketname --max-size=1048576000`;
- if ($set_quota1 !~ /./){
- print "bucket quota max size set for the given user: $bucketname \n";
- } else {
- print "bucket quota max size set failed for the user: $bucketname \n";
- exit 1;
- }
- return 0;
-}
-
-sub quota_set_max_obj {
- # set max objects
- my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=1`;
- if ($set_quota !~ /./){
- print "quota set for the bucket: $bucketname \n";
- } else {
- print "quota set failed for the bucket: $bucketname \n";
- exit 1;
- }
- return 0;
-}
-
-sub quota_enable {
- my $en_quota = `$radosgw_admin quota enable --bucket=$bucketname`;
- if ($en_quota !~ /./){
- print "quota enabled for the bucket: $bucketname \n";
- } else {
- print "quota enable failed for the bucket: $bucketname \n";
- exit 1;
- }
- return 0;
-}
-
-sub quota_disable {
- my $dis_quota = `$radosgw_admin quota disable --bucket=$bucketname`;
- if ($dis_quota !~ /./){
- print "quota disabled for the bucket: $bucketname \n";
- } else {
- print "quota disable failed for the bucket: $bucketname \n";
- exit 1;
- }
- return 0;
-}
-
-# upload a file to the bucket
-sub upload_file {
- print "adding file to bucket: $mytestfilename\n";
- ($bucket->add_key_filename( $mytestfilename, $testfileloc,
- { content_type => 'text/plain', },
- ) and (print "upload file successful\n" ) and return 0 ) or (return 1);
-}
-
-# delete the bucket
-sub delete_bucket {
- #($bucket->delete_key($mytestfilename1) and print "delete keys on bucket succeeded second time\n" ) or die $s3->err . "delete keys on bucket failed second time\n" . $s3->errstr;
- ($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
-}
-
-# set bucket quota with max_objects and verify
-sub test_max_objects {
- my $size = '10Mb';
- create_file($size);
- run_s3($rgw_user);
- quota_set_max_obj();
- quota_enable();
- my $ret_value = upload_file();
- if ($ret_value == 0){
- pass ( "Test max objects passed" );
- } else {
- fail ( "Test max objects failed" );
- }
- delete_user();
- delete_keys($mytestfilename);
- delete_bucket();
-}
-
-# Set bucket quota for specific user and ensure max objects set for the user is validated
-sub test_max_objects_per_user{
- my $size = '10Mb';
- create_file($size);
- run_s3($rgw_user);
- quota_set_user_objs();
- quota_enable();
- my $ret_value = upload_file();
- if ($ret_value == 0){
- pass ( "Test max objects for the given user passed" );
- } else {
- fail ( "Test max objects for the given user failed" );
- }
- delete_user();
- delete_keys($mytestfilename);
- delete_bucket();
-}
-
-# set bucket quota with max_objects and try to exceed the max_objects and verify
-sub test_beyond_max_objs {
- my $size = "10Mb";
- create_file($size);
- run_s3($rgw_user);
- quota_set_max_obj();
- quota_enable();
- upload_file();
- my $ret_value = readd_file();
- if ($ret_value == 1){
- pass ( "set max objects and test beyond max objects passed" );
- } else {
- fail ( "set max objects and test beyond max objects failed" );
- }
- delete_user();
- delete_keys($mytestfilename);
- delete_bucket();
-}
-
-# set bucket quota for a user with max_objects and try to exceed the max_objects and verify
-sub test_beyond_max_objs_user {
- my $size = "10Mb";
- create_file($size);
- run_s3($rgw_user);
- quota_set_user_objs();
- quota_enable();
- upload_file();
- my $ret_value = readd_file();
- if ($ret_value == 1){
- pass ( "set max objects for a given user and test beyond max objects passed" );
- } else {
- fail ( "set max objects for a given user and test beyond max objects failed" );
- }
- delete_user();
- delete_keys($mytestfilename);
- delete_bucket();
-}
-
-# set bucket quota for max size and ensure it is validated
-sub test_quota_size {
- my $ret_value;
- my $size = "2Gb";
- create_file($size);
- run_s3($rgw_user);
- quota_set_max_size();
- quota_enable();
- my $ret_value = upload_file();
- if ($ret_value == 1) {
- pass ( "set max size and ensure that objects upload beyond max size is not entertained" );
- my $retdel = delete_keys($mytestfilename);
- if ($retdel == 0) {
- print "delete objects successful \n";
- my $size1 = "1Gb";
- create_file($size1);
- my $ret_val1 = upload_file();
- if ($ret_val1 == 0) {
- pass ( "set max size and ensure that the max size is in effect" );
- } else {
- fail ( "set max size and ensure the max size takes effect" );
- }
- }
- } else {
- fail ( "set max size and ensure that objects beyond max size is not allowed" );
- }
- delete_user();
- delete_keys($mytestfilename);
- delete_bucket();
-}
-
-# set bucket quota for max size for a given user and ensure it is validated
-sub test_quota_size_user {
- my $ret_value;
- my $size = "2Gb";
- create_file($size);
- run_s3($rgw_user);
- quota_set_user_size();
- quota_enable();
- my $ret_value = upload_file();
- if ($ret_value == 1) {
- pass ( "set max size for a given user and ensure that objects upload beyond max size is not entertained" );
- my $retdel = delete_keys($mytestfilename);
- if ($retdel == 0) {
- print "delete objects successful \n";
- my $size1 = "1Gb";
- create_file($size1);
- my $ret_val1 = upload_file();
- if ($ret_val1 == 0) {
- pass ( "set max size for a given user and ensure that the max size is in effect" );
- } else {
- fail ( "set max size for a given user and ensure the max size takes effect" );
- }
- }
- } else {
- fail ( "set max size for a given user and ensure that objects beyond max size is not allowed" );
- }
- delete_user();
- delete_keys($mytestfilename);
- delete_bucket();
-}
-
-# set bucket quota size but disable quota and verify
-sub test_quota_size_disabled {
- my $ret_value;
- my $size = "2Gb";
- create_file($size);
- run_s3($rgw_user);
- quota_set_max_size();
- quota_disable();
- my $ret_value = upload_file();
- if ($ret_value == 0) {
- pass ( "bucket quota size doesnt take effect when quota is disabled" );
- } else {
- fail ( "bucket quota size doesnt take effect when quota is disabled" );
- }
- delete_user();
- delete_keys($mytestfilename);
- delete_bucket();
-}
-
-# set bucket quota size for a given user but disable quota and verify
-sub test_quota_size_disabled_user {
- my $ret_value;
- my $size = "2Gb";
- create_file($size);
- run_s3($rgw_user);
- quota_set_user_size();
- quota_disable();
- my $ret_value = upload_file();
- if ($ret_value == 0) {
- pass ( "bucket quota size for a given user doesnt take effect when quota is disabled" );
- } else {
- fail ( "bucket quota size for a given user doesnt take effect when quota is disabled" );
- }
- delete_user();
- delete_keys($mytestfilename);
- delete_bucket();
-}
-
-# set bucket quota for specified user and verify
-
-#== Main starts here===
-ceph_os_info();
-test_max_objects();
-test_max_objects_per_user();
-test_beyond_max_objs();
-test_beyond_max_objs_user();
-quota_set_max_size_zero();
-quota_set_max_objs_zero();
-quota_set_neg_objs();
-quota_set_neg_size();
-test_quota_size();
-test_quota_size_user();
-test_quota_size_disabled();
-test_quota_size_disabled_user();
-
-print "OK";
diff --git a/src/ceph/qa/workunits/rgw/s3_multipart_upload.pl b/src/ceph/qa/workunits/rgw/s3_multipart_upload.pl
deleted file mode 100755
index 5bf7af2..0000000
--- a/src/ceph/qa/workunits/rgw/s3_multipart_upload.pl
+++ /dev/null
@@ -1,151 +0,0 @@
-#! /usr/bin/perl
-
-=head1 NAME
-
-s3_multipart_upload.pl - Script to test rgw multipart upload using s3 interface.
-
-=head1 SYNOPSIS
-
-Use:
- perl s3_multipart_upload.pl [--help]
-
-Examples:
- perl s3_multipart_upload.pl
- or
- perl s3_multipart_upload.pl --help
-
-=head1 DESCRIPTION
-
-This script intends to test the rgw multipart upload followed by a download
-and verify checksum using s3 interface and reports test results
-
-=head1 ARGUMENTS
-
-s3_multipart_upload.pl takes the following arguments:
- --help
- (optional) Displays the usage message.
-
-=cut
-
-use Amazon::S3;
-use Data::Dumper;
-use IO::File;
-use Getopt::Long;
-use Digest::MD5;
-use Pod::Usage();
-use FindBin;
-use lib $FindBin::Bin;
-use s3_utilities;
-use Net::Domain qw(hostfqdn);
-
-my $help;
-
-Getopt::Long::GetOptions(
- 'help' => \$help
-);
-Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
-
-#== local variables ===
-my $s3;
-my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
-my $port = $ENV{RGW_PORT}||7280;
-our $hostname = "$hostdom:$port";
-our $testfileloc;
-our $mytestfilename;
-
-# upload a file to the bucket
-sub upload_file {
- my ($fsize, $i) = @_;
- create_file($fsize, $i);
- print "adding file to bucket $bucketname: $mytestfilename\n";
- ($bucket->add_key_filename( $mytestfilename, $testfileloc,
- { content_type => 'text/plain', },
- ) and (print "upload file successful\n" ) and return 0 ) or (print "upload failed\n" and return 1);
-}
-
-# delete the bucket
-sub delete_bucket {
- ($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
-}
-
-# Function to perform multipart upload of given file size to the user bucket via s3 interface
-sub multipart_upload
-{
- my ($size, $parts) = @_;
- # generate random user every time
- my $user = rand();
- # Divide the file size in to equal parts and upload to bucket in multiple parts
- my $fsize = ($size/$parts);
- my $fsize1;
- run_s3($user);
- if ($parts == 10){
- $fsize1 = '100Mb';
- } elsif ($parts == 100){
- $fsize1 = '10Mb';
- }
- foreach my $i(1..$parts){
- print "uploading file - part $i \n";
- upload_file($fsize1, $i);
- }
- fetch_file_from_bucket($fsize1, $parts);
- compare_cksum($fsize1, $parts);
- purge_data($user);
-}
-
-# Function to download the files from bucket to verify there is no data corruption
-sub fetch_file_from_bucket
-{
- # fetch file from the bucket
- my ($fsize, $parts) = @_;
- foreach my $i(1..$parts){
- my $src_file = "$fsize.$i";
- my $dest_file = "/tmp/downloadfile.$i";
- print
- "Downloading $src_file from bucket to $dest_file \n";
- $response =
- $bucket->get_key_filename( $src_file, GET,
- $dest_file )
- or die $s3->err . ": " . $s3->errstr;
- }
-}
-
-# Compare the source file with destination file and verify checksum to ensure
-# the files are not corrupted
-sub compare_cksum
-{
- my ($fsize, $parts)=@_;
- my $md5 = Digest::MD5->new;
- my $flag = 0;
- foreach my $i (1..$parts){
- my $src_file = "/tmp/"."$fsize".".$i";
- my $dest_file = "/tmp/downloadfile".".$i";
- open( FILE, $src_file )
- or die "Error: Could not open $src_file for MD5 checksum...";
- open( DLFILE, $dest_file )
- or die "Error: Could not open $dest_file for MD5 checksum.";
- binmode(FILE);
- binmode(DLFILE);
- my $md5sum = $md5->addfile(*FILE)->hexdigest;
- my $md5sumdl = $md5->addfile(*DLFILE)->hexdigest;
- close FILE;
- close DLFILE;
- # compare the checksums
- if ( $md5sum eq $md5sumdl ) {
- $flag++;
- }
- }
- if ($flag == $parts){
- pass("checksum verification for multipart upload passed" );
- }else{
- fail("checksum verification for multipart upload failed" );
- }
-}
-
-#== Main starts here===
-ceph_os_info();
-check();
-# The following test runs multi part upload of file size 1Gb in 10 parts
-multipart_upload('1048576000', 10);
-# The following test runs multipart upload of 1 Gb file in 100 parts
-multipart_upload('1048576000', 100);
-print "OK";
diff --git a/src/ceph/qa/workunits/rgw/s3_user_quota.pl b/src/ceph/qa/workunits/rgw/s3_user_quota.pl
deleted file mode 100755
index fbda89a..0000000
--- a/src/ceph/qa/workunits/rgw/s3_user_quota.pl
+++ /dev/null
@@ -1,191 +0,0 @@
-#! /usr/bin/perl
-
-=head1 NAME
-
-s3_user_quota.pl - Script to test the rgw user quota functionality using s3 interface.
-
-=head1 SYNOPSIS
-
-Use:
- perl s3_user_quota.pl [--help]
-
-Examples:
- perl s3_user_quota.pl
- or
- perl s3_user_quota.pl --help
-
-=head1 DESCRIPTION
-
-This script intends to test the rgw user quota funcionality using s3 interface
-and reports the test results
-
-=head1 ARGUMENTS
-
-s3_user_quota.pl takes the following arguments:
- --help
- (optional) Displays the usage message.
-
-=cut
-
-use Amazon::S3;
-use Data::Dumper;
-use IO::File;
-use Getopt::Long;
-use Digest::MD5;
-use Pod::Usage();
-use FindBin;
-use lib $FindBin::Bin;
-use s3_utilities;
-use Net::Domain qw(hostfqdn);
-
-my $help;
-
-Getopt::Long::GetOptions(
- 'help' => \$help
-);
-Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
-
-#== local variables ===
-our $mytestfilename;
-my $mytestfilename1;
-my $logmsg;
-my $kruft;
-my $s3;
-my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
-my $port = $ENV{RGW_PORT}||7280;
-our $hostname = "$hostdom:$port";
-our $testfileloc;
-our $cnt;
-
-sub quota_set_max_size_per_user {
- my ($maxsize, $size1,$rgw_user) = @_;
- run_s3($rgw_user);
- my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-size=$maxsize`;
- if (($set_quota !~ /./)&&($maxsize == 0)){
- my $ret = test_max_objs($size1, $rgw_user);
- if ($ret == 1){
- pass("quota set for user: $rgw_user with max_size=$maxsize passed" );
- }else {
- fail("quota set for user: $rgw_user with max_size=$maxsize failed" );
- }
- } elsif (($set_quota !~ /./) && ($maxsize != 0)) {
- my $ret = test_max_objs($size1, $rgw_user);
- if ($ret == 0){
- pass("quota set for user: $rgw_user with max_size=$maxsize passed" );
- }else {
- fail("quota set for user: $rgw_user with max_size=$maxsize failed" );
- }
- }
- delete_keys($mytestfilename);
- purge_data($rgw_user);
- return 0;
-}
-
-sub max_size_per_user {
- my ($maxsize, $size1,$rgw_user) = @_;
- run_s3($rgw_user);
- my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-size=$maxsize`;
- if (($set_quota !~ /./) && ($maxsize != 0)) {
- my $ret = test_max_objs($size1, $rgw_user);
- if ($ret == 0){
- $cnt++;
- }
- }
- return $cnt;
-}
-
-sub quota_set_max_obj_per_user {
- # set max objects
- my ($maxobjs, $size1, $rgw_user) = @_;
- run_s3($rgw_user);
- my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-objects=$maxobjs`;
- if (($set_quota !~ /./) && ($maxobjs == 0)){
- my $ret = test_max_objs($size1, $rgw_user);
- if ($ret == 1){
- pass("quota set for user: $rgw_user with max_objects=$maxobjs passed" );
- }else {
- fail("quota set for user: $rgw_user with max_objects=$maxobjs failed" );
- }
- } elsif (($set_quota !~ /./) && ($maxobjs == 1)) {
- my $ret = test_max_objs($size1, $rgw_user);
- if ($ret == 0){
- pass("quota set for user: $rgw_user with max_objects=$maxobjs passed" );
- }else {
- fail("quota set for user: $rgw_user with max_objects=$maxobjs failed" );
- }
- }
- delete_keys($mytestfilename);
- purge_data($rgw_user);
-}
-
-sub quota_enable_user {
- my ($rgw_user) = @_;
- my $en_quota = `$radosgw_admin quota enable --uid=$rgw_user --quota-scope=user`;
- if ($en_quota !~ /./){
- print "quota enabled for the user $rgw_user \n";
- } else {
- print "quota enable failed for the user $rgw_user \n";
- exit 1;
- }
- return 0;
-}
-
-sub quota_disable_user {
- my $dis_quota = `$radosgw_admin quota disable --uid=$rgw_user --quota-scope=user`;
- if ($dis_quota !~ /./){
- print "quota disabled for the user $rgw_user \n";
- } else {
- print "quota disable failed for the user $rgw_user \n";
- exit 1;
- }
- return 0;
-}
-
-# upload a file to the bucket
-sub upload_file {
- print "adding file to bucket $bucketname: $mytestfilename\n";
- ($bucket->add_key_filename( $mytestfilename, $testfileloc,
- { content_type => 'text/plain', },
- ) and (print "upload file successful\n" ) and return 0 ) or (return 1);
-}
-
-# delete the bucket
-sub delete_bucket {
- ($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
-}
-
-#Function to upload the given file size to bucket and verify
-sub test_max_objs {
- my ($size, $rgw_user) = @_;
- create_file($size);
- quota_enable_user($rgw_user);
- my $ret_value = upload_file();
- return $ret_value;
-}
-
-# set user quota and ensure it is validated
-sub test_user_quota_max_size{
- my ($max_buckets,$size, $fsize) = @_;
- my $usr = rand();
- foreach my $i (1..$max_buckets){
- my $ret_value = max_size_per_user($size, $fsize, $usr );
- }
- if ($ret_value == $max_buckets){
- fail( "user quota max size for $usr failed on $max_buckets buckets" );
- } else {
- pass( "user quota max size for $usr passed on $max_buckets buckets" );
- }
- delete_keys($mytestfilename);
- purge_data($usr);
-}
-
-#== Main starts here===
-ceph_os_info();
-check();
-quota_set_max_obj_per_user('0', '10Mb', 'usr1');
-quota_set_max_obj_per_user('1', '10Mb', 'usr2');
-quota_set_max_size_per_user(0, '10Mb', 'usr1');
-quota_set_max_size_per_user(1048576000, '1Gb', 'usr2');
-test_user_quota_max_size(3,1048576000,'100Mb');
-test_user_quota_max_size(2,1048576000, '1Gb');
-print "OK";
diff --git a/src/ceph/qa/workunits/rgw/s3_utilities.pm b/src/ceph/qa/workunits/rgw/s3_utilities.pm
deleted file mode 100644
index 8492dd3..0000000
--- a/src/ceph/qa/workunits/rgw/s3_utilities.pm
+++ /dev/null
@@ -1,220 +0,0 @@
-# Common subroutines shared by the s3 testing code
-my $sec;
-my $min;
-my $hour;
-my $mon;
-my $year;
-my $mday;
-my $wday;
-my $yday;
-my $isdst;
-my $PASS_CNT = 0;
-my $FAIL_CNT = 0;
-
-our $radosgw_admin = $ENV{RGW_ADMIN}||"sudo radosgw-admin";
-
-# function to get the current time stamp from the test set up
-sub get_timestamp {
- ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
- if ($mon < 10) { $mon = "0$mon"; }
- if ($hour < 10) { $hour = "0$hour"; }
- if ($min < 10) { $min = "0$min"; }
- if ($sec < 10) { $sec = "0$sec"; }
- $year=$year+1900;
- return $year . '_' . $mon . '_' . $mday . '_' . $hour . '_' . $min . '_' . $sec;
-}
-
-# Function to check if radosgw is already running
-sub get_status {
- my $service = "radosgw";
- my $cmd = "pgrep $service";
- my $status = get_cmd_op($cmd);
- if ($status =~ /\d+/ ){
- return 0;
- }
- return 1;
-}
-
-# function to execute the command and return output
-sub get_cmd_op
-{
- my $cmd = shift;
- my $excmd = `$cmd`;
- return $excmd;
-}
-
-#Function that executes the CLI commands and returns the output of the command
-sub get_command_output {
- my $cmd_output = shift;
- open( FH, ">>$test_log" );
- print FH "\"$cmd_output\"\n";
- my $exec_cmd = `$cmd_output 2>&1`;
- print FH "$exec_cmd\n";
- close(FH);
- return $exec_cmd;
-}
-
-# Function to get the hostname
-sub get_hostname
-{
- my $cmd = "hostname";
- my $get_host = get_command_output($cmd);
- chomp($get_host);
- return($get_host);
-}
-
-sub pass {
- my ($comment) = @_;
- print "Comment required." unless length $comment;
- chomp $comment;
- print_border2();
- print "Test case: $TC_CNT PASSED - $comment \n";
- print_border2();
- $PASS_CNT++;
-}
-
-sub fail {
- my ($comment) = @_;
- print "Comment required." unless length $comment;
- chomp $comment;
- print_border2();
- print "Test case: $TC_CNT FAILED - $comment \n";
- print_border2();
- $FAIL_CNT++;
-}
-
-sub print_border2 {
- print "~" x 90 . "\n";
-}
-
-# Function to create the user "qa_user" and extract the user access_key and secret_key of the user
-sub get_user_info
-{
- my ($rgw_user) = @_;
- my $cmd = "$radosgw_admin user create --uid=$rgw_user --display-name=$rgw_user";
- my $cmd_op = get_command_output($cmd);
- if ($cmd_op !~ /keys/){
- return (0,0);
- }
- my @get_user = (split/\n/,$cmd_op);
- foreach (@get_user) {
- if ($_ =~ /access_key/ ){
- $get_acc_key = $_;
- } elsif ($_ =~ /secret_key/ ){
- $get_sec_key = $_;
- }
- }
- my $access_key = $get_acc_key;
- my $acc_key = (split /:/, $access_key)[1];
- $acc_key =~ s/\\//g;
- $acc_key =~ s/ //g;
- $acc_key =~ s/"//g;
- $acc_key =~ s/,//g;
- my $secret_key = $get_sec_key;
- my $sec_key = (split /:/, $secret_key)[1];
- $sec_key =~ s/\\//g;
- $sec_key =~ s/ //g;
- $sec_key =~ s/"//g;
- $sec_key =~ s/,//g;
- return ($acc_key, $sec_key);
-}
-
-# Function that deletes the given user and all associated user data
-sub purge_data
-{
- my ($rgw_user) = @_;
- my $cmd = "$radosgw_admin user rm --uid=$rgw_user --purge-data";
- my $cmd_op = get_command_output($cmd);
- if ($cmd_op !~ /./){
- print "user $rgw_user deleted\n";
- } else {
- print "user $rgw_user NOT deleted\n";
- return 1;
- }
- return 0;
-}
-
-# Function to get the Ceph and distro info
-sub ceph_os_info
-{
- my $ceph_v = get_command_output ( "ceph -v" );
- my @ceph_arr = split(" ",$ceph_v);
- $ceph_v = "Ceph Version: $ceph_arr[2]";
- my $os_distro = get_command_output ( "lsb_release -d" );
- my @os_arr = split(":",$os_distro);
- $os_distro = "Linux Flavor:$os_arr[1]";
- return ($ceph_v, $os_distro);
-}
-
-# Execute the test case based on the input to the script
-sub create_file {
- my ($file_size, $part) = @_;
- my $cnt;
- $mytestfilename = "$file_size.$part";
- $testfileloc = "/tmp/".$mytestfilename;
- if ($file_size == '10Mb'){
- $cnt = 1;
- } elsif ($file_size == '100Mb'){
- $cnt = 10;
- } elsif ($file_size == '500Mb'){
- $cnt = 50;
- } elsif ($file_size == '1Gb'){
- $cnt = 100;
- } elsif ($file_size == '2Gb'){
- $cnt = 200;
- }
- my $ret = system("dd if=/dev/zero of=$testfileloc bs=10485760 count=$cnt");
- if ($ret) { exit 1 };
- return 0;
-}
-
-sub run_s3
-{
-# Run tests for the S3 functionality
- # Modify access key and secret key to suit the user account
- my ($user) = @_;
- our ( $access_key, $secret_key ) = get_user_info($user);
- if ( ($access_key) && ($secret_key) ) {
- $s3 = Amazon::S3->new(
- {
- aws_access_key_id => $access_key,
- aws_secret_access_key => $secret_key,
- host => $hostname,
- secure => 0,
- retry => 1,
- }
- );
- }
-
-our $bucketname = 'buck_'.get_timestamp();
-# create a new bucket (the test bucket)
-our $bucket = $s3->add_bucket( { bucket => $bucketname } )
- or die $s3->err. "bucket $bucketname create failed\n". $s3->errstr;
- print "Bucket Created: $bucketname \n";
- return 0;
-}
-
-# delete keys
-sub delete_keys {
- (($bucket->delete_key($_[0])) and return 0) or return 1;
-}
-
-# Readd the file back to bucket
-sub readd_file {
- system("dd if=/dev/zero of=/tmp/10MBfile1 bs=10485760 count=1");
- $mytestfilename1 = '10MBfile1';
- print "readding file to bucket: $mytestfilename1\n";
- ((($bucket->add_key_filename( $mytestfilename1, $testfileloc,
- { content_type => 'text/plain', },
- )) and (print "readding file success\n") and return 0) or (return 1));
-}
-
-# check if rgw service is already running
-sub check
-{
- my $state = get_status();
- if ($state) {
- exit 1;
- }
-}
-1
diff --git a/src/ceph/qa/workunits/suites/blogbench.sh b/src/ceph/qa/workunits/suites/blogbench.sh
deleted file mode 100755
index 17c91c8..0000000
--- a/src/ceph/qa/workunits/suites/blogbench.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-set -e
-
-echo "getting blogbench"
-wget http://download.ceph.com/qa/blogbench-1.0.tar.bz2
-#cp /home/gregf/src/blogbench-1.0.tar.bz2 .
-tar -xvf blogbench-1.0.tar.bz2
-cd blogbench*
-echo "making blogbench"
-./configure
-make
-cd src
-mkdir blogtest_in
-echo "running blogbench"
-./blogbench -d blogtest_in
diff --git a/src/ceph/qa/workunits/suites/bonnie.sh b/src/ceph/qa/workunits/suites/bonnie.sh
deleted file mode 100755
index 698ba9c..0000000
--- a/src/ceph/qa/workunits/suites/bonnie.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-set -e
-
-bonnie_bin=`which bonnie++`
-[ $? -eq 1 ] && bonnie_bin=/usr/sbin/bonnie++
-
-uid_flags=""
-[ "`id -u`" == "0" ] && uid_flags="-u root"
-
-$bonnie_bin $uid_flags -n 100
diff --git a/src/ceph/qa/workunits/suites/cephfs_journal_tool_smoke.sh b/src/ceph/qa/workunits/suites/cephfs_journal_tool_smoke.sh
deleted file mode 100755
index 60e9149..0000000
--- a/src/ceph/qa/workunits/suites/cephfs_journal_tool_smoke.sh
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/bin/bash
-
-set -e
-set -x
-
-export BIN="${BIN:-cephfs-journal-tool}"
-export JOURNAL_FILE=/tmp/journal.bin
-export JSON_OUTPUT=/tmp/json.tmp
-export BINARY_OUTPUT=/tmp/binary.tmp
-
-if [ -d $BINARY_OUTPUT ] ; then
- rm -rf $BINARY_OUTPUT
-fi
-
-# Check that the import/export stuff really works as expected
-# first because it's used as the reset method between
-# following checks.
-echo "Testing that export/import cycle preserves state"
-HEADER_STATE=`$BIN header get`
-EVENT_LIST=`$BIN event get list`
-$BIN journal export $JOURNAL_FILE
-$BIN journal import $JOURNAL_FILE
-NEW_HEADER_STATE=`$BIN header get`
-NEW_EVENT_LIST=`$BIN event get list`
-
-if [ ! "$HEADER_STATE" = "$NEW_HEADER_STATE" ] ; then
- echo "Import failed to preserve header state"
- echo $HEADER_STATE
- echo $NEW_HEADER_STATE
- exit -1
-fi
-
-if [ ! "$EVENT_LIST" = "$NEW_EVENT_LIST" ] ; then
- echo "Import failed to preserve event state"
- echo $EVENT_LIST
- echo $NEW_EVENT_LIST
- exit -1
-fi
-
-echo "Testing 'journal' commands..."
-
-# Simplest thing: print the vital statistics of the journal
-$BIN journal inspect
-$BIN header get
-
-# Make a copy of the journal in its original state
-$BIN journal export $JOURNAL_FILE
-if [ ! -s $JOURNAL_FILE ] ; then
- echo "Export to $JOURNAL_FILE failed"
- exit -1
-fi
-
-# Can we execute a journal reset?
-$BIN journal reset
-$BIN journal inspect
-$BIN header get
-
-echo "Rolling back journal to original state..."
-$BIN journal import $JOURNAL_FILE
-
-echo "Testing 'header' commands..."
-$BIN header get
-$BIN header set write_pos 123
-$BIN header set expire_pos 123
-$BIN header set trimmed_pos 123
-
-echo "Rolling back journal to original state..."
-$BIN journal import $JOURNAL_FILE
-
-echo "Testing 'event' commands..."
-$BIN event get summary
-$BIN event get --type=UPDATE --path=/ --inode=0 --frag=0x100 summary
-$BIN event get json --path $JSON_OUTPUT
-if [ ! -s $JSON_OUTPUT ] ; then
- echo "Export to $JSON_OUTPUT failed"
- exit -1
-fi
-$BIN event get binary --path $BINARY_OUTPUT
-if [ ! -s $BINARY_OUTPUT ] ; then
- echo "Export to $BINARY_OUTPUT failed"
- exit -1
-fi
-$BIN event recover_dentries summary
-$BIN event splice summary
-
-# Tests finish.
-# Metadata objects have been modified by the 'event recover_dentries' command.
-# Journal is no long consistent with respect to metadata objects (especially inotable).
-# To ensure mds successfully replays its journal, we need to do journal reset.
-$BIN journal reset
-cephfs-table-tool all reset session
-
diff --git a/src/ceph/qa/workunits/suites/dbench-short.sh b/src/ceph/qa/workunits/suites/dbench-short.sh
deleted file mode 100755
index 7297d83..0000000
--- a/src/ceph/qa/workunits/suites/dbench-short.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-
-set -e
-
-dbench 1
diff --git a/src/ceph/qa/workunits/suites/dbench.sh b/src/ceph/qa/workunits/suites/dbench.sh
deleted file mode 100755
index ea2be1c..0000000
--- a/src/ceph/qa/workunits/suites/dbench.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-set -e
-
-dbench 1
-dbench 10
diff --git a/src/ceph/qa/workunits/suites/ffsb.sh b/src/ceph/qa/workunits/suites/ffsb.sh
deleted file mode 100755
index 9ed66ab..0000000
--- a/src/ceph/qa/workunits/suites/ffsb.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-set -e
-
-mydir=`dirname $0`
-
-wget http://download.ceph.com/qa/ffsb.tar.bz2
-tar jxvf ffsb.tar.bz2
-cd ffsb-*
-./configure
-make
-cd ..
-mkdir tmp
-cd tmp
-
-for f in $mydir/*.ffsb
-do
- ../ffsb-*/ffsb $f
-done
-cd ..
-rm -r tmp ffsb*
-
diff --git a/src/ceph/qa/workunits/suites/fio.sh b/src/ceph/qa/workunits/suites/fio.sh
deleted file mode 100755
index 04e0645..0000000
--- a/src/ceph/qa/workunits/suites/fio.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-
-set -x
-
-gen_fio_file() {
- iter=$1
- f=$2
- cat > randio-$$-${iter}.fio <<EOF
-[randio]
-blocksize_range=32m:128m
-blocksize_unaligned=1
-filesize=10G:20G
-readwrite=randrw
-runtime=300
-size=20G
-filename=${f}
-EOF
-}
-
-sudo apt-get -y install fio
-for i in $(seq 1 20); do
- fcount=$(ls donetestfile* 2>/dev/null | wc -l)
- donef="foo"
- fiof="bar"
- if test ${fcount} -gt 0; then
- # choose random file
- r=$[ ${RANDOM} % ${fcount} ]
- testfiles=( $(ls donetestfile*) )
- donef=${testfiles[${r}]}
- fiof=$(echo ${donef} | sed -e "s|done|fio|")
- gen_fio_file $i ${fiof}
- else
- fiof=fiotestfile.$$.$i
- donef=donetestfile.$$.$i
- gen_fio_file $i ${fiof}
- fi
-
- sudo rm -f ${donef}
- sudo fio randio-$$-$i.fio
- sudo ln ${fiof} ${donef}
- ls -la
-done
diff --git a/src/ceph/qa/workunits/suites/fsstress.sh b/src/ceph/qa/workunits/suites/fsstress.sh
deleted file mode 100755
index 92e123b..0000000
--- a/src/ceph/qa/workunits/suites/fsstress.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-BIN_PATH=${TESTDIR}/fsstress/ltp-full-20091231/testcases/kernel/fs/fsstress/fsstress
-
-path=`pwd`
-trap "rm -rf ${TESTDIR}/fsstress" EXIT
-mkdir -p ${TESTDIR}/fsstress
-cd ${TESTDIR}/fsstress
-wget -q -O ${TESTDIR}/fsstress/ltp-full.tgz http://download.ceph.com/qa/ltp-full-20091231.tgz
-tar xzf ${TESTDIR}/fsstress/ltp-full.tgz
-rm ${TESTDIR}/fsstress/ltp-full.tgz
-cd ${TESTDIR}/fsstress/ltp-full-20091231/testcases/kernel/fs/fsstress
-make
-cd $path
-
-command="${BIN_PATH} -d fsstress-`hostname`$$ -l 1 -n 1000 -p 10 -v"
-
-echo "Starting fsstress $command"
-mkdir fsstress`hostname`-$$
-$command
diff --git a/src/ceph/qa/workunits/suites/fsx.sh b/src/ceph/qa/workunits/suites/fsx.sh
deleted file mode 100755
index 8a34806..0000000
--- a/src/ceph/qa/workunits/suites/fsx.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh -x
-
-set -e
-
-git clone git://git.ceph.com/xfstests.git
-cd xfstests
-git checkout b7fd3f05d6a7a320d13ff507eda2e5b183cae180
-make
-cd ..
-cp xfstests/ltp/fsx .
-
-OPTIONS="-z" # don't use zero range calls; not supported by cephfs
-
-./fsx $OPTIONS 1MB -N 50000 -p 10000 -l 1048576
-./fsx $OPTIONS 10MB -N 50000 -p 10000 -l 10485760
-./fsx $OPTIONS 100MB -N 50000 -p 10000 -l 104857600
diff --git a/src/ceph/qa/workunits/suites/fsync-tester.sh b/src/ceph/qa/workunits/suites/fsync-tester.sh
deleted file mode 100755
index 345fbde..0000000
--- a/src/ceph/qa/workunits/suites/fsync-tester.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh -x
-
-set -e
-
-wget http://download.ceph.com/qa/fsync-tester.c
-gcc fsync-tester.c -o fsync-tester
-
-./fsync-tester
-
-echo $PATH
-whereis lsof
-lsof
diff --git a/src/ceph/qa/workunits/suites/iogen.sh b/src/ceph/qa/workunits/suites/iogen.sh
deleted file mode 100755
index d159bde..0000000
--- a/src/ceph/qa/workunits/suites/iogen.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-set -e
-
-echo "getting iogen"
-wget http://download.ceph.com/qa/iogen_3.1p0.tar
-tar -xvzf iogen_3.1p0.tar
-cd iogen*
-echo "making iogen"
-make
-echo "running iogen"
-./iogen -n 5 -s 2g
-echo "sleep for 10 min"
-sleep 600
-echo "stopping iogen"
-./iogen -k
-
-echo "OK"
diff --git a/src/ceph/qa/workunits/suites/iozone-sync.sh b/src/ceph/qa/workunits/suites/iozone-sync.sh
deleted file mode 100755
index c094952..0000000
--- a/src/ceph/qa/workunits/suites/iozone-sync.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-set -e
-
-# basic tests of O_SYNC, O_DSYNC, O_RSYNC
-# test O_SYNC
-iozone -c -e -s 512M -r 1M -t 1 -F osync1 -i 0 -i 1 -o
-# test O_DSYNC
-iozone -c -e -s 512M -r 1M -t 1 -F odsync1 -i 0 -i 1 -+D
-# test O_RSYNC
-iozone -c -e -s 512M -r 1M -t 1 -F orsync1 -i 0 -i 1 -+r
-
-# test same file with O_SYNC in one process, buffered in the other
-# the sync test starts first, so the buffered test should blow
-# past it and
-iozone -c -e -s 512M -r 1M -t 1 -F osync2 -i 0 -i 1 -o &
-sleep 1
-iozone -c -e -s 512M -r 256K -t 1 -F osync2 -i 0
-wait $!
-
-# test same file with O_SYNC from different threads
-iozone -c -e -s 512M -r 1M -t 2 -F osync3 -i 2 -o
diff --git a/src/ceph/qa/workunits/suites/iozone.sh b/src/ceph/qa/workunits/suites/iozone.sh
deleted file mode 100755
index 4fcf8f1..0000000
--- a/src/ceph/qa/workunits/suites/iozone.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-iozone -c -e -s 1024M -r 16K -t 1 -F f1 -i 0 -i 1
-iozone -c -e -s 1024M -r 1M -t 1 -F f2 -i 0 -i 1
-iozone -c -e -s 10240M -r 1M -t 1 -F f3 -i 0 -i 1
diff --git a/src/ceph/qa/workunits/suites/pjd.sh b/src/ceph/qa/workunits/suites/pjd.sh
deleted file mode 100755
index e6df309..0000000
--- a/src/ceph/qa/workunits/suites/pjd.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-
-set -e
-
-wget http://download.ceph.com/qa/pjd-fstest-20090130-RC-aclfixes.tgz
-tar zxvf pjd*.tgz
-cd pjd*
-make clean
-make
-cd ..
-mkdir tmp
-cd tmp
-# must be root!
-sudo prove -r -v --exec 'bash -x' ../pjd*/tests
-cd ..
-rm -rf tmp pjd*
-
diff --git a/src/ceph/qa/workunits/suites/random_write.32.ffsb b/src/ceph/qa/workunits/suites/random_write.32.ffsb
deleted file mode 100644
index ba83e47..0000000
--- a/src/ceph/qa/workunits/suites/random_write.32.ffsb
+++ /dev/null
@@ -1,48 +0,0 @@
-# Large file random writes.
-# 1024 files, 100MB per file.
-
-time=300 # 5 min
-alignio=1
-
-[filesystem0]
- location=.
- num_files=128
- min_filesize=104857600 # 100 MB
- max_filesize=104857600
- reuse=1
-[end0]
-
-[threadgroup0]
- num_threads=32
-
- write_random=1
- write_weight=1
-
- write_size=5242880 # 5 MB
- write_blocksize=4096
-
- [stats]
- enable_stats=1
- enable_range=1
-
- msec_range 0.00 0.01
- msec_range 0.01 0.02
- msec_range 0.02 0.05
- msec_range 0.05 0.10
- msec_range 0.10 0.20
- msec_range 0.20 0.50
- msec_range 0.50 1.00
- msec_range 1.00 2.00
- msec_range 2.00 5.00
- msec_range 5.00 10.00
- msec_range 10.00 20.00
- msec_range 20.00 50.00
- msec_range 50.00 100.00
- msec_range 100.00 200.00
- msec_range 200.00 500.00
- msec_range 500.00 1000.00
- msec_range 1000.00 2000.00
- msec_range 2000.00 5000.00
- msec_range 5000.00 10000.00
- [end]
-[end0]
diff --git a/src/ceph/qa/workunits/suites/wac.sh b/src/ceph/qa/workunits/suites/wac.sh
deleted file mode 100755
index 49b4f14..0000000
--- a/src/ceph/qa/workunits/suites/wac.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-wget http://download.ceph.com/qa/wac.c
-gcc -o wac wac.c
-set +e
-timeout 5m ./wac -l 65536 -n 64 -r wac-test
-RET=$?
-set -e
-[[ $RET -eq 124 ]]
-echo OK
diff --git a/src/ceph/qa/workunits/true.sh b/src/ceph/qa/workunits/true.sh
deleted file mode 100755
index 296ef78..0000000
--- a/src/ceph/qa/workunits/true.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-
-true