From 7da45d65be36d36b880cc55c5036e96c24b53f00 Mon Sep 17 00:00:00 2001 From: Qiaowei Ren Date: Thu, 1 Mar 2018 14:38:11 +0800 Subject: remove ceph code This patch removes initial ceph code, due to license issue. Change-Id: I092d44f601cdf34aed92300fe13214925563081c Signed-off-by: Qiaowei Ren --- src/ceph/doc/man/8/CMakeLists.txt | 85 -- src/ceph/doc/man/8/ceph-authtool.rst | 204 ---- src/ceph/doc/man/8/ceph-bluestore-tool.rst | 123 --- src/ceph/doc/man/8/ceph-clsinfo.rst | 49 - src/ceph/doc/man/8/ceph-conf.rst | 129 --- src/ceph/doc/man/8/ceph-create-keys.rst | 67 -- src/ceph/doc/man/8/ceph-debugpack.rst | 50 - src/ceph/doc/man/8/ceph-dencoder.rst | 151 --- src/ceph/doc/man/8/ceph-deploy.rst | 609 ----------- src/ceph/doc/man/8/ceph-detect-init.rst | 65 -- src/ceph/doc/man/8/ceph-disk.rst | 97 -- src/ceph/doc/man/8/ceph-fuse.rst | 64 -- src/ceph/doc/man/8/ceph-kvstore-tool.rst | 85 -- src/ceph/doc/man/8/ceph-mds.rst | 87 -- src/ceph/doc/man/8/ceph-mon.rst | 94 -- src/ceph/doc/man/8/ceph-osd.rst | 134 --- src/ceph/doc/man/8/ceph-post-file.rst | 71 -- src/ceph/doc/man/8/ceph-rbdnamer.rst | 41 - src/ceph/doc/man/8/ceph-rest-api.rst | 150 --- src/ceph/doc/man/8/ceph-run.rst | 45 - src/ceph/doc/man/8/ceph-syn.rst | 99 -- src/ceph/doc/man/8/ceph-volume-systemd.rst | 56 - src/ceph/doc/man/8/ceph-volume.rst | 122 --- src/ceph/doc/man/8/ceph.rst | 1550 ---------------------------- src/ceph/doc/man/8/crushtool.rst | 284 ----- src/ceph/doc/man/8/librados-config.rst | 46 - src/ceph/doc/man/8/monmaptool.rst | 107 -- src/ceph/doc/man/8/mount.ceph.rst | 168 --- src/ceph/doc/man/8/osdmaptool.rst | 157 --- src/ceph/doc/man/8/rados.rst | 223 ---- src/ceph/doc/man/8/radosgw-admin.rst | 504 --------- src/ceph/doc/man/8/radosgw.rst | 256 ----- src/ceph/doc/man/8/rbd-fuse.rst | 56 - src/ceph/doc/man/8/rbd-ggate.rst | 79 -- src/ceph/doc/man/8/rbd-mirror.rst | 75 -- src/ceph/doc/man/8/rbd-nbd.rst | 67 -- src/ceph/doc/man/8/rbd-replay-many.rst | 73 -- src/ceph/doc/man/8/rbd-replay-prep.rst | 55 - src/ceph/doc/man/8/rbd-replay.rst | 78 -- src/ceph/doc/man/8/rbd.rst | 615 ----------- src/ceph/doc/man/8/rbdmap.rst | 123 --- src/ceph/doc/man/CMakeLists.txt | 15 - 42 files changed, 7208 deletions(-) delete mode 100644 src/ceph/doc/man/8/CMakeLists.txt delete mode 100644 src/ceph/doc/man/8/ceph-authtool.rst delete mode 100644 src/ceph/doc/man/8/ceph-bluestore-tool.rst delete mode 100644 src/ceph/doc/man/8/ceph-clsinfo.rst delete mode 100644 src/ceph/doc/man/8/ceph-conf.rst delete mode 100644 src/ceph/doc/man/8/ceph-create-keys.rst delete mode 100644 src/ceph/doc/man/8/ceph-debugpack.rst delete mode 100644 src/ceph/doc/man/8/ceph-dencoder.rst delete mode 100644 src/ceph/doc/man/8/ceph-deploy.rst delete mode 100644 src/ceph/doc/man/8/ceph-detect-init.rst delete mode 100644 src/ceph/doc/man/8/ceph-disk.rst delete mode 100644 src/ceph/doc/man/8/ceph-fuse.rst delete mode 100644 src/ceph/doc/man/8/ceph-kvstore-tool.rst delete mode 100644 src/ceph/doc/man/8/ceph-mds.rst delete mode 100644 src/ceph/doc/man/8/ceph-mon.rst delete mode 100644 src/ceph/doc/man/8/ceph-osd.rst delete mode 100644 src/ceph/doc/man/8/ceph-post-file.rst delete mode 100644 src/ceph/doc/man/8/ceph-rbdnamer.rst delete mode 100644 src/ceph/doc/man/8/ceph-rest-api.rst delete mode 100644 src/ceph/doc/man/8/ceph-run.rst delete mode 100644 src/ceph/doc/man/8/ceph-syn.rst delete mode 100644 src/ceph/doc/man/8/ceph-volume-systemd.rst delete mode 100644 src/ceph/doc/man/8/ceph-volume.rst delete mode 100644 src/ceph/doc/man/8/ceph.rst delete mode 100644 src/ceph/doc/man/8/crushtool.rst delete mode 100644 src/ceph/doc/man/8/librados-config.rst delete mode 100644 src/ceph/doc/man/8/monmaptool.rst delete mode 100644 src/ceph/doc/man/8/mount.ceph.rst delete mode 100644 src/ceph/doc/man/8/osdmaptool.rst delete mode 100644 src/ceph/doc/man/8/rados.rst delete mode 100644 src/ceph/doc/man/8/radosgw-admin.rst delete mode 100644 src/ceph/doc/man/8/radosgw.rst delete mode 100644 src/ceph/doc/man/8/rbd-fuse.rst delete mode 100644 src/ceph/doc/man/8/rbd-ggate.rst delete mode 100644 src/ceph/doc/man/8/rbd-mirror.rst delete mode 100644 src/ceph/doc/man/8/rbd-nbd.rst delete mode 100644 src/ceph/doc/man/8/rbd-replay-many.rst delete mode 100644 src/ceph/doc/man/8/rbd-replay-prep.rst delete mode 100644 src/ceph/doc/man/8/rbd-replay.rst delete mode 100644 src/ceph/doc/man/8/rbd.rst delete mode 100644 src/ceph/doc/man/8/rbdmap.rst delete mode 100644 src/ceph/doc/man/CMakeLists.txt (limited to 'src/ceph/doc/man') diff --git a/src/ceph/doc/man/8/CMakeLists.txt b/src/ceph/doc/man/8/CMakeLists.txt deleted file mode 100644 index 84e7640..0000000 --- a/src/ceph/doc/man/8/CMakeLists.txt +++ /dev/null @@ -1,85 +0,0 @@ -set(client_srcs - ceph-syn.rst - ceph-conf.rst - ceph.rst - ceph-authtool.rst - ceph-kvstore-tool.rst - rados.rst - ceph-post-file.rst - ceph-dencoder.rst) - -set(server_srcs - ceph-deploy.rst - crushtool.rst - ceph-run.rst - mount.ceph.rst - ceph-create-keys.rst - ceph-rest-api.rst) -if(WITH_TESTS) -list(APPEND server_srcs - ceph-debugpack.rst) -endif(WITH_TESTS) - -set(osd_srcs - ceph-clsinfo.rst - ceph-detect-init.rst - ceph-disk.rst - ceph-volume.rst - ceph-volume-systemd.rst - ceph-osd.rst - osdmaptool.rst - ceph-bluestore-tool.rst) - -set(mon_srcs - ceph-mon.rst - monmaptool.rst) - -list(APPEND man_srcs - ${client_srcs} - ${server_srcs} - ${osd_srcs} - ${mon_srcs} - ceph-mds.rst - librados-config.rst) - -if(HAVE_LIBFUSE) - list(APPEND man_srcs - ceph-fuse.rst - rbd-fuse.rst) -endif() - -if(WITH_RADOSGW) - list(APPEND man_srcs - radosgw.rst - radosgw-admin.rst) -endif() - -if(WITH_RBD) - list(APPEND man_srcs - ceph-rbdnamer.rst - rbd-mirror.rst - rbd-replay-many.rst - rbd-replay-prep.rst - rbd-replay.rst - rbdmap.rst - rbd.rst) - if(LINUX) - list(APPEND man_srcs rbd-nbd.rst) - endif() - if(FREEBSD) - list(APPEND man_srcs rbd-ggate.rst) - endif() -endif() - -foreach(man ${man_srcs}) - list(APPEND sphinx_input ${CMAKE_CURRENT_SOURCE_DIR}/${man}) - # mount.ceph.rst => mount if we use - # get_filename_component(cmd ${man} NAME_WE) - string(REGEX REPLACE ".rst$" "" cmd ${man}) - list(APPEND sphinx_output ${sphinx_output_dir}/${cmd}.8) - install(FILES ${sphinx_output_dir}/${cmd}.8 - DESTINATION ${CEPH_MAN_DIR}/man8) -endforeach() - -set(sphinx_input ${sphinx_input} PARENT_SCOPE) -set(sphinx_output ${sphinx_output} PARENT_SCOPE) diff --git a/src/ceph/doc/man/8/ceph-authtool.rst b/src/ceph/doc/man/8/ceph-authtool.rst deleted file mode 100644 index f1ac152..0000000 --- a/src/ceph/doc/man/8/ceph-authtool.rst +++ /dev/null @@ -1,204 +0,0 @@ -:orphan: - -================================================= - ceph-authtool -- ceph keyring manipulation tool -================================================= - -.. program:: ceph-authtool - -Synopsis -======== - -| **ceph-authtool** *keyringfile* - [ -l | --list ] - [ -p | --print-key ] - [ -C | --create-keyring ] - [ -g | --gen-key ] - [ --gen-print-key ] - [ --import-keyring *otherkeyringfile* ] - [ -n | --name *entityname* ] - [ -u | --set-uid *auid* ] - [ -a | --add-key *base64_key* ] - [ --cap *subsystem* *capability* ] - [ --caps *capfile* ] - - -Description -=========== - -**ceph-authtool** is a utility to create, view, and modify a Ceph keyring -file. A keyring file stores one or more Ceph authentication keys and -possibly an associated capability specification. Each key is -associated with an entity name, of the form -``{client,mon,mds,osd}.name``. - -**WARNING** Ceph provides authentication and protection against -man-in-the-middle attacks once secret keys are in place. However, -data over the wire is not encrypted, which may include the messages -used to configure said keys. The system is primarily intended to be -used in trusted environments. - -Options -======= - -.. option:: -l, --list - - will list all keys and capabilities present in the keyring - -.. option:: -p, --print-key - - will print an encoded key for the specified entityname. This is - suitable for the ``mount -o secret=`` argument - -.. option:: -C, --create-keyring - - will create a new keyring, overwriting any existing keyringfile - -.. option:: -g, --gen-key - - will generate a new secret key for the specified entityname - -.. option:: --gen-print-key - - will generate a new secret key for the specified entityname, - without altering the keyringfile, printing the secret to stdout - -.. option:: --import-keyring *secondkeyringfile* - - will import the content of a given keyring to the keyringfile - -.. option:: -n, --name *name* - - specify entityname to operate on - -.. option:: -u, --set-uid *auid* - - sets the auid (authenticated user id) for the specified entityname - -.. option:: -a, --add-key *base64_key* - - will add an encoded key to the keyring - -.. option:: --cap *subsystem* *capability* - - will set the capability for given subsystem - -.. option:: --caps *capsfile* - - will set all of capabilities associated with a given key, for all subsystems - - -Capabilities -============ - -The subsystem is the name of a Ceph subsystem: ``mon``, ``mds``, or -``osd``. - -The capability is a string describing what the given user is allowed -to do. This takes the form of a comma separated list of allow -clauses with a permission specifier containing one or more of rwx for -read, write, and execute permission. The ``allow *`` grants full -superuser permissions for the given subsystem. - -For example:: - - # can read, write, and execute objects - osd = "allow rwx" - - # can access mds server - mds = "allow" - - # can modify cluster state (i.e., is a server daemon) - mon = "allow rwx" - -A librados user restricted to a single pool might look like:: - - mon = "allow r" - - osd = "allow rw pool foo" - -A client using rbd with read access to one pool and read/write access to another:: - - mon = "allow r" - - osd = "allow class-read object_prefix rbd_children, allow pool templates r class-read, allow pool vms rwx" - -A client mounting the file system with minimal permissions would need caps like:: - - mds = "allow" - - osd = "allow rw pool data" - - mon = "allow r" - - -OSD Capabilities -================ - -In general, an osd capability follows the grammar:: - - osdcap := grant[,grant...] - grant := allow (match capspec | capspec match) - match := [pool[=] | object_prefix ] - capspec := * | [r][w][x] [class-read] [class-write] - -The capspec determines what kind of operations the entity can perform:: - - r = read access to objects - w = write access to objects - x = can call any class method (same as class-read class-write) - class-read = can call class methods that are reads - class-write = can call class methods that are writes - * = equivalent to rwx, plus the ability to run osd admin commands, - i.e. ceph osd tell ... - -The match criteria restrict a grant based on the pool being accessed. -Grants are additive if the client fulfills the match condition. For -example, if a client has the osd capabilities: "allow r object_prefix -prefix, allow w pool foo, allow x pool bar", then it has rw access to -pool foo, rx access to pool bar, and r access to objects whose -names begin with 'prefix' in any pool. - -Caps file format -================ - -The caps file format consists of zero or more key/value pairs, one per -line. The key and value are separated by an ``=``, and the value must -be quoted (with ``'`` or ``"``) if it contains any whitespace. The key -is the name of the Ceph subsystem (``osd``, ``mds``, ``mon``), and the -value is the capability string (see above). - - -Example -======= - -To create a new keyring containing a key for client.foo:: - - ceph-authtool -C -n client.foo --gen-key keyring - -To associate some capabilities with the key (namely, the ability to -mount a Ceph filesystem):: - - ceph-authtool -n client.foo --cap mds 'allow' --cap osd 'allow rw pool=data' --cap mon 'allow r' keyring - -To display the contents of the keyring:: - - ceph-authtool -l keyring - -When mounting a Ceph file system, you can grab the appropriately encoded secret key with:: - - mount -t ceph serverhost:/ mountpoint -o name=foo,secret=`ceph-authtool -p -n client.foo keyring` - - -Availability -============ - -**ceph-authtool** is part of Ceph, a massively scalable, open-source, distributed storage system. Please -refer to the Ceph documentation at http://ceph.com/docs for more -information. - - -See also -======== - -:doc:`ceph `\(8) diff --git a/src/ceph/doc/man/8/ceph-bluestore-tool.rst b/src/ceph/doc/man/8/ceph-bluestore-tool.rst deleted file mode 100644 index 7a7b0ea..0000000 --- a/src/ceph/doc/man/8/ceph-bluestore-tool.rst +++ /dev/null @@ -1,123 +0,0 @@ -:orphan: - -====================================================== - ceph-bluestore-tool -- bluestore administrative tool -====================================================== - -.. program:: ceph-bluestore-tool - -Synopsis -======== - -| **ceph-bluestore-tool** *command* - [ --dev *device* ... ] - [ --path *osd path* ] - [ --out-dir *dir* ] - [ --log-file | -l *filename* ] - [ --deep ] -| **ceph-bluestore-tool** fsck|repair --path *osd path* [ --deep ] -| **ceph-bluestore-tool** show-label --dev *device* ... -| **ceph-bluestore-tool** prime-osd-dir --dev *device* --path *osd path* -| **ceph-bluestore-tool** bluefs-export --path *osd path* --out-dir *dir* -| **ceph-bluestore-tool** bluefs-export --path *osd path* --out-dir *dir* - - -Description -=========== - -**ceph-bluestore-tool** is a utility to perform low-level administrative -operations on a BlueStore instance. - -Commands -======== - -.. option:: help - - show help - -.. option:: fsck - - run consistency check on BlueStore metadata. If *--deep* is specified, also read all object data and verify checksums. - -.. option:: repair - - Run a consistency check *and* repair any errors we can. - -.. option:: bluefs-export - - Export the contents of BlueFS (i.e., rocksdb files) to an output directory. - -.. option:: bluefs-bdev-sizes --path *osd path* - - Print the device sizes, as understood by BlueFS, to stdout. - -.. option:: bluefs-bdev-expand --path *osd path* - - Instruct BlueFS to check the size of its block devices and, if they have expanded, make use of the additional space. - -.. option:: show-label --dev *device* [...] - - Show device label(s). - -Options -======= - -.. option:: --dev *device* - - Add *device* to the list of devices to consider - -.. option:: --path *osd path* - - Specify an osd path. In most cases, the device list is inferred from the symlinks present in *osd path*. This is usually simpler than explicitly specifying the device(s) with --dev. - -.. option:: --out-dir *dir* - - Output directory for bluefs-export - -.. option:: -l, --log-file *log file* - - file to log to - -.. option:: --log-level *num* - - debug log level. Default is 30 (extremely verbose), 20 is very - verbose, 10 is verbose, and 1 is not very verbose. - -.. option:: --deep - - deep scrub/repair (read and validate object data, not just metadata) - -Device labels -============= - -Every BlueStore block device has a single block label at the beginning of the -device. You can dump the contents of the label with:: - - ceph-bluestore-tool show-label --dev *device* - -The main device will have a lot of metadata, including information -that used to be stored in small files in the OSD data directory. The -auxilliary devices (db and wal) will only have the minimum required -fields (OSD UUID, size, device type, birth time). - -OSD directory priming -===================== - -You can generate the content for an OSD data directory that can start up a -BlueStore OSD with the *prime-osd-dir* command:: - - ceph-bluestore-tool prime-osd-dir --dev *main device* --path /var/lib/ceph/osd/ceph-*id* - - -Availability -============ - -**ceph-bluestore-tool** is part of Ceph, a massively scalable, -open-source, distributed storage system. Please refer to the Ceph -documentation at http://ceph.com/docs for more information. - - -See also -======== - -:doc:`ceph-osd `\(8) diff --git a/src/ceph/doc/man/8/ceph-clsinfo.rst b/src/ceph/doc/man/8/ceph-clsinfo.rst deleted file mode 100644 index 0188ce1..0000000 --- a/src/ceph/doc/man/8/ceph-clsinfo.rst +++ /dev/null @@ -1,49 +0,0 @@ -:orphan: - -=============================================== - ceph-clsinfo -- show class object information -=============================================== - -.. program:: ceph-clsinfo - -Synopsis -======== - -| **ceph-clsinfo** [ *options* ] ... *filename* - - -Description -=========== - -**ceph-clsinfo** can show name, version, and architecture information -about a specific class object. - - -Options -======= - -.. option:: -n, --name - - Shows the class name - -.. option:: -v, --version - - Shows the class version - -.. option:: -a, --arch - - Shows the class architecture - - -Availability -============ - -**ceph-clsinfo** is part of Ceph, a massively scalable, open-source, distributed storage system. Please -refer to the Ceph documentation at http://ceph.com/docs for more -information. - - -See also -======== - -:doc:`ceph `\(8) diff --git a/src/ceph/doc/man/8/ceph-conf.rst b/src/ceph/doc/man/8/ceph-conf.rst deleted file mode 100644 index 9782e38..0000000 --- a/src/ceph/doc/man/8/ceph-conf.rst +++ /dev/null @@ -1,129 +0,0 @@ -:orphan: - -================================== - ceph-conf -- ceph conf file tool -================================== - -.. program:: ceph-conf - -Synopsis -======== - -| **ceph-conf** -c *conffile* --list-all-sections -| **ceph-conf** -c *conffile* -L -| **ceph-conf** -c *conffile* -l *prefix* -| **ceph-conf** *key* -s *section1* ... -| **ceph-conf** [-s *section* ] [-r] --lookup *key* -| **ceph-conf** [-s *section* ] *key* - - -Description -=========== - -**ceph-conf** is a utility for getting information about a ceph -configuration file. As with most Ceph programs, you can specify which -Ceph configuration file to use with the ``-c`` flag. - - -Actions -======= - -**ceph-conf** performs one of the following actions: - -.. option:: -L, --list-all-sections - - list all sections in the configuration file. - -.. option:: -l, --list-sections *prefix* - - list the sections with the given *prefix*. For example, ``--list-sections mon`` - would list all sections beginning with ``mon``. - -.. option:: --lookup *key* - - search and print the specified configuration setting. Note: ``--lookup`` is - the default action. If no other actions are given on the command line, we will - default to doing a lookup. - -.. option:: -h, --help - - print a summary of usage. - - -Options -======= - -.. option:: -c *conffile* - - the Ceph configuration file. - -.. option:: --filter-key *key* - - filter section list to only include sections with given *key* defined. - -.. option:: --filter-key-value *key* ``=`` *value* - - filter section list to only include sections with given *key*/*value* pair. - -.. option:: --name *type.id* - - the Ceph name in which the sections are searched (default 'client.admin'). - For example, if we specify ``--name osd.0``, the following sections will be - searched: [osd.0], [osd], [global] - -.. option:: -r, --resolve-search - - search for the first file that exists and can be opened in the resulted - comma delimited search list. - -.. option:: -s, --section - - additional sections to search. These additional sections will be searched - before the sections that would normally be searched. As always, the first - matching entry we find will be returned. - - -Examples -======== - -To find out what value osd 0 will use for the "osd data" option:: - - ceph-conf -c foo.conf --name osd.0 --lookup "osd data" - -To find out what value will mds a use for the "log file" option:: - - ceph-conf -c foo.conf --name mds.a "log file" - -To list all sections that begin with "osd":: - - ceph-conf -c foo.conf -l osd - -To list all sections:: - - ceph-conf -c foo.conf -L - -To print the path of the "keyring" used by "client.0":: - - ceph-conf --name client.0 -r -l keyring - - -Files -===== - -``/etc/ceph/$cluster.conf``, ``~/.ceph/$cluster.conf``, ``$cluster.conf`` - -the Ceph configuration files to use if not specified. - - -Availability -============ - -**ceph-conf** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer -to the Ceph documentation at http://ceph.com/docs for more -information. - - -See also -======== - -:doc:`ceph `\(8), diff --git a/src/ceph/doc/man/8/ceph-create-keys.rst b/src/ceph/doc/man/8/ceph-create-keys.rst deleted file mode 100644 index 20b6560..0000000 --- a/src/ceph/doc/man/8/ceph-create-keys.rst +++ /dev/null @@ -1,67 +0,0 @@ -:orphan: - -=============================================== -ceph-create-keys -- ceph keyring generate tool -=============================================== - -.. program:: ceph-create-keys - -Synopsis -======== - -| **ceph-create-keys** [-h] [-v] [-t seconds] [--cluster *name*] --id *id* - - -Description -=========== - -:program:`ceph-create-keys` is a utility to generate bootstrap keyrings using -the given monitor when it is ready. - -It creates following auth entities (or users) - -``client.admin`` - - and its key for your client host. - -``client.bootstrap-{osd, rgw, mds}`` - - and their keys for bootstrapping corresponding services - -To list all users in the cluster:: - - ceph auth ls - - -Options -======= - -.. option:: --cluster - - name of the cluster (default 'ceph'). - -.. option:: -t - - time out after **seconds** (default: 600) waiting for a response from the monitor - -.. option:: -i, --id - - id of a ceph-mon that is coming up. **ceph-create-keys** will wait until it joins quorum. - -.. option:: -v, --verbose - - be more verbose. - - -Availability -============ - -**ceph-create-keys** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer -to the Ceph documentation at http://ceph.com/docs for more -information. - - -See also -======== - -:doc:`ceph `\(8) diff --git a/src/ceph/doc/man/8/ceph-debugpack.rst b/src/ceph/doc/man/8/ceph-debugpack.rst deleted file mode 100644 index 4f2c4f2..0000000 --- a/src/ceph/doc/man/8/ceph-debugpack.rst +++ /dev/null @@ -1,50 +0,0 @@ -:orphan: - -============================================= - ceph-debugpack -- ceph debug packer utility -============================================= - -.. program:: ceph-debugpack - -Synopsis -======== - -| **ceph-debugpack** [ *options* ] *filename.tar.gz* - - -Description -=========== - -**ceph-debugpack** will build a tarball containing various items that are -useful for debugging crashes. The resulting tarball can be shared with -Ceph developers when debugging a problem. - -The tarball will include the binaries for ceph-mds, ceph-osd, and ceph-mon, radosgw, any -log files, the ceph.conf configuration file, any core files we can -find, and (if the system is running) dumps of the current cluster state -as reported by 'ceph report'. - - -Options -======= - -.. option:: -c ceph.conf, --conf=ceph.conf - - Use *ceph.conf* configuration file instead of the default - ``/etc/ceph/ceph.conf`` to determine monitor addresses during - startup. - - -Availability -============ - -**ceph-debugpack** is part of Ceph, a massively scalable, open-source, distributed storage system. Please -refer to the Ceph documentation at http://ceph.com/docs for more -information. - - -See also -======== - -:doc:`ceph `\(8) -:doc:`ceph-post-file `\(8) diff --git a/src/ceph/doc/man/8/ceph-dencoder.rst b/src/ceph/doc/man/8/ceph-dencoder.rst deleted file mode 100644 index cf2e429..0000000 --- a/src/ceph/doc/man/8/ceph-dencoder.rst +++ /dev/null @@ -1,151 +0,0 @@ -:orphan: - -============================================== - ceph-dencoder -- ceph encoder/decoder utility -============================================== - -.. program:: ceph-dencoder - -Synopsis -======== - -| **ceph-dencoder** [commands...] - - -Description -=========== - -**ceph-dencoder** is a utility to encode, decode, and dump ceph data -structures. It is used for debugging and for testing inter-version -compatibility. - -**ceph-dencoder** takes a simple list of commands and performs them -in order. - -Commands -======== - -.. option:: version - - Print the version string for the **ceph-dencoder** binary. - -.. option:: import - - Read a binary blob of encoded data from the given file. It will be - placed in an in-memory buffer. - -.. option:: export - - Write the contents of the current in-memory buffer to the given - file. - -.. option:: list_types - - List the data types known to this build of **ceph-dencoder**. - -.. option:: type - - Select the given type for future ``encode`` or ``decode`` operations. - -.. option:: skip - - Seek into the imported file before reading data structure, use - this with objects that have a preamble/header before the object of interest. - -.. option:: decode - - Decode the contents of the in-memory buffer into an instance of the - previously selected type. If there is an error, report it. - -.. option:: encode - - Encode the contents of the in-memory instance of the previously - selected type to the in-memory buffer. - -.. option:: dump_json - - Print a JSON-formatted description of the in-memory object. - -.. option:: count_tests - - Print the number of built-in test instances of the previosly - selected type that **ceph-dencoder** is able to generate. - -.. option:: select_test - - Select the given build-in test instance as a the in-memory instance - of the type. - -.. option:: get_features - - Print the decimal value of the feature set supported by this version - of **ceph-dencoder**. Each bit represents a feature. These correspond to - CEPH_FEATURE_* defines in src/include/ceph_features.h. - -.. option:: set_features - - Set the feature bits provided to ``encode`` to *f*. This allows - you to encode objects such that they can be understood by old - versions of the software (for those types that support it). - -Example -======= - -Say you want to examine an attribute on an object stored by ``ceph-osd``. You can do this: - -:: - - $ cd /mnt/osd.12/current/2.b_head - $ attr -l foo_bar_head_EFE6384B - Attribute "ceph.snapset" has a 31 byte value for foo_bar_head_EFE6384B - Attribute "ceph._" has a 195 byte value for foo_bar_head_EFE6384B - $ attr foo_bar_head_EFE6384B -g ceph._ -q > /tmp/a - $ ceph-dencoder type object_info_t import /tmp/a decode dump_json - { "oid": { "oid": "foo", - "key": "bar", - "snapid": -2, - "hash": 4024842315, - "max": 0}, - "locator": { "pool": 2, - "preferred": -1, - "key": "bar"}, - "category": "", - "version": "9'1", - "prior_version": "0'0", - "last_reqid": "client.4116.0:1", - "size": 1681, - "mtime": "2012-02-21 08:58:23.666639", - "lost": 0, - "wrlock_by": "unknown.0.0:0", - "snaps": [], - "truncate_seq": 0, - "truncate_size": 0, - "watchers": {}} - -Alternatively, perhaps you wish to dump an internal CephFS metadata object, you might -do that like this: - -:: - - $ rados -p metadata get mds_snaptable mds_snaptable.bin - $ ceph-dencoder type SnapServer skip 8 import mds_snaptable.bin decode dump_json - { "snapserver": { "last_snap": 1, - "pending_noop": [], - "snaps": [], - "need_to_purge": {}, - "pending_create": [], - "pending_destroy": []}} - - -Availability -============ - -**ceph-dencoder** is part of Ceph, a massively scalable, open-source, distributed storage system. Please -refer to the Ceph documentation at http://ceph.com/docs for more -information. - - -See also -======== - -:doc:`ceph `\(8) diff --git a/src/ceph/doc/man/8/ceph-deploy.rst b/src/ceph/doc/man/8/ceph-deploy.rst deleted file mode 100644 index ff96574..0000000 --- a/src/ceph/doc/man/8/ceph-deploy.rst +++ /dev/null @@ -1,609 +0,0 @@ -:orphan: - -===================================== - ceph-deploy -- Ceph deployment tool -===================================== - -.. program:: ceph-deploy - -Synopsis -======== - -| **ceph-deploy** **new** [*initial-monitor-node(s)*] - -| **ceph-deploy** **install** [*ceph-node*] [*ceph-node*...] - -| **ceph-deploy** **mon** *create-initial* - -| **ceph-deploy** **osd** *prepare* [*ceph-node*]:[*dir-path*] - -| **ceph-deploy** **osd** *activate* [*ceph-node*]:[*dir-path*] - -| **ceph-deploy** **osd** *create* [*ceph-node*]:[*dir-path*] - -| **ceph-deploy** **admin** [*admin-node*][*ceph-node*...] - -| **ceph-deploy** **purgedata** [*ceph-node*][*ceph-node*...] - -| **ceph-deploy** **forgetkeys** - -Description -=========== - -:program:`ceph-deploy` is a tool which allows easy and quick deployment of a -Ceph cluster without involving complex and detailed manual configuration. It -uses ssh to gain access to other Ceph nodes from the admin node, sudo for -administrator privileges on them and the underlying Python scripts automates -the manual process of Ceph installation on each node from the admin node itself. -It can be easily run on an workstation and doesn't require servers, databases or -any other automated tools. With :program:`ceph-deploy`, it is really easy to set -up and take down a cluster. However, it is not a generic deployment tool. It is -a specific tool which is designed for those who want to get Ceph up and running -quickly with only the unavoidable initial configuration settings and without the -overhead of installing other tools like ``Chef``, ``Puppet`` or ``Juju``. Those -who want to customize security settings, partitions or directory locations and -want to set up a cluster following detailed manual steps, should use other tools -i.e, ``Chef``, ``Puppet``, ``Juju`` or ``Crowbar``. - -With :program:`ceph-deploy`, you can install Ceph packages on remote nodes, -create a cluster, add monitors, gather/forget keys, add OSDs and metadata -servers, configure admin hosts or take down the cluster. - -Commands -======== - -new ---- - -Start deploying a new cluster and write a configuration file and keyring for it. -It tries to copy ssh keys from admin node to gain passwordless ssh to monitor -node(s), validates host IP, creates a cluster with a new initial monitor node or -nodes for monitor quorum, a ceph configuration file, a monitor secret keyring and -a log file for the new cluster. It populates the newly created Ceph configuration -file with ``fsid`` of cluster, hostnames and IP addresses of initial monitor -members under ``[global]`` section. - -Usage:: - - ceph-deploy new [MON][MON...] - -Here, [MON] is the initial monitor hostname (short hostname i.e, ``hostname -s``). - -Other options like :option:`--no-ssh-copykey`, :option:`--fsid`, -:option:`--cluster-network` and :option:`--public-network` can also be used with -this command. - -If more than one network interface is used, ``public network`` setting has to be -added under ``[global]`` section of Ceph configuration file. If the public subnet -is given, ``new`` command will choose the one IP from the remote host that exists -within the subnet range. Public network can also be added at runtime using -:option:`--public-network` option with the command as mentioned above. - - -install -------- - -Install Ceph packages on remote hosts. As a first step it installs -``yum-plugin-priorities`` in admin and other nodes using passwordless ssh and sudo -so that Ceph packages from upstream repository get more priority. It then detects -the platform and distribution for the hosts and installs Ceph normally by -downloading distro compatible packages if adequate repo for Ceph is already added. -``--release`` flag is used to get the latest release for installation. During -detection of platform and distribution before installation, if it finds the -``distro.init`` to be ``sysvinit`` (Fedora, CentOS/RHEL etc), it doesn't allow -installation with custom cluster name and uses the default name ``ceph`` for the -cluster. - -If the user explicitly specifies a custom repo url with :option:`--repo-url` for -installation, anything detected from the configuration will be overridden and -the custom repository location will be used for installation of Ceph packages. -If required, valid custom repositories are also detected and installed. In case -of installation from a custom repo a boolean is used to determine the logic -needed to proceed with a custom repo installation. A custom repo install helper -is used that goes through config checks to retrieve repos (and any extra repos -defined) and installs them. ``cd_conf`` is the object built from ``argparse`` -that holds the flags and information needed to determine what metadata from the -configuration is to be used. - -A user can also opt to install only the repository without installing Ceph and -its dependencies by using :option:`--repo` option. - -Usage:: - - ceph-deploy install [HOST][HOST...] - -Here, [HOST] is/are the host node(s) where Ceph is to be installed. - -An option ``--release`` is used to install a release known as CODENAME -(default: firefly). - -Other options like :option:`--testing`, :option:`--dev`, :option:`--adjust-repos`, -:option:`--no-adjust-repos`, :option:`--repo`, :option:`--local-mirror`, -:option:`--repo-url` and :option:`--gpg-url` can also be used with this command. - - -mds ---- - -Deploy Ceph mds on remote hosts. A metadata server is needed to use CephFS and -the ``mds`` command is used to create one on the desired host node. It uses the -subcommand ``create`` to do so. ``create`` first gets the hostname and distro -information of the desired mds host. It then tries to read the ``bootstrap-mds`` -key for the cluster and deploy it in the desired host. The key generally has a -format of ``{cluster}.bootstrap-mds.keyring``. If it doesn't finds a keyring, -it runs ``gatherkeys`` to get the keyring. It then creates a mds on the desired -host under the path ``/var/lib/ceph/mds/`` in ``/var/lib/ceph/mds/{cluster}-{name}`` -format and a bootstrap keyring under ``/var/lib/ceph/bootstrap-mds/`` in -``/var/lib/ceph/bootstrap-mds/{cluster}.keyring`` format. It then runs appropriate -commands based on ``distro.init`` to start the ``mds``. - -Usage:: - - ceph-deploy mds create [HOST[:DAEMON-NAME]] [HOST[:DAEMON-NAME]...] - -The [DAEMON-NAME] is optional. - - -mon ---- - -Deploy Ceph monitor on remote hosts. ``mon`` makes use of certain subcommands -to deploy Ceph monitors on other nodes. - -Subcommand ``create-initial`` deploys for monitors defined in -``mon initial members`` under ``[global]`` section in Ceph configuration file, -wait until they form quorum and then gatherkeys, reporting the monitor status -along the process. If monitors don't form quorum the command will eventually -time out. - -Usage:: - - ceph-deploy mon create-initial - -Subcommand ``create`` is used to deploy Ceph monitors by explicitly specifying -the hosts which are desired to be made monitors. If no hosts are specified it -will default to use the ``mon initial members`` defined under ``[global]`` -section of Ceph configuration file. ``create`` first detects platform and distro -for desired hosts and checks if hostname is compatible for deployment. It then -uses the monitor keyring initially created using ``new`` command and deploys the -monitor in desired host. If multiple hosts were specified during ``new`` command -i.e, if there are multiple hosts in ``mon initial members`` and multiple keyrings -were created then a concatenated keyring is used for deployment of monitors. In -this process a keyring parser is used which looks for ``[entity]`` sections in -monitor keyrings and returns a list of those sections. A helper is then used to -collect all keyrings into a single blob that will be used to inject it to monitors -with :option:`--mkfs` on remote nodes. All keyring files are concatenated to be -in a directory ending with ``.keyring``. During this process the helper uses list -of sections returned by keyring parser to check if an entity is already present -in a keyring and if not, adds it. The concatenated keyring is used for deployment -of monitors to desired multiple hosts. - -Usage:: - - ceph-deploy mon create [HOST] [HOST...] - -Here, [HOST] is hostname of desired monitor host(s). - -Subcommand ``add`` is used to add a monitor to an existing cluster. It first -detects platform and distro for desired host and checks if hostname is compatible -for deployment. It then uses the monitor keyring, ensures configuration for new -monitor host and adds the monitor to the cluster. If the section for the monitor -exists and defines a mon addr that will be used, otherwise it will fallback by -resolving the hostname to an IP. If :option:`--address` is used it will override -all other options. After adding the monitor to the cluster, it gives it some time -to start. It then looks for any monitor errors and checks monitor status. Monitor -errors arise if the monitor is not added in ``mon initial members``, if it doesn't -exist in ``monmap`` and if neither ``public_addr`` nor ``public_network`` keys -were defined for monitors. Under such conditions, monitors may not be able to -form quorum. Monitor status tells if the monitor is up and running normally. The -status is checked by running ``ceph daemon mon.hostname mon_status`` on remote -end which provides the output and returns a boolean status of what is going on. -``False`` means a monitor that is not fine even if it is up and running, while -``True`` means the monitor is up and running correctly. - -Usage:: - - ceph-deploy mon add [HOST] - - ceph-deploy mon add [HOST] --address [IP] - -Here, [HOST] is the hostname and [IP] is the IP address of the desired monitor -node. Please note, unlike other ``mon`` subcommands, only one node can be -specified at a time. - -Subcommand ``destroy`` is used to completely remove monitors on remote hosts. -It takes hostnames as arguments. It stops the monitor, verifies if ``ceph-mon`` -daemon really stopped, creates an archive directory ``mon-remove`` under -``/var/lib/ceph/``, archives old monitor directory in -``{cluster}-{hostname}-{stamp}`` format in it and removes the monitor from -cluster by running ``ceph remove...`` command. - -Usage:: - - ceph-deploy mon destroy [HOST] [HOST...] - -Here, [HOST] is hostname of monitor that is to be removed. - - -gatherkeys ----------- - -Gather authentication keys for provisioning new nodes. It takes hostnames as -arguments. It checks for and fetches ``client.admin`` keyring, monitor keyring -and ``bootstrap-mds/bootstrap-osd`` keyring from monitor host. These -authentication keys are used when new ``monitors/OSDs/MDS`` are added to the -cluster. - -Usage:: - - ceph-deploy gatherkeys [HOST] [HOST...] - -Here, [HOST] is hostname of the monitor from where keys are to be pulled. - - -disk ----- - -Manage disks on a remote host. It actually triggers the ``ceph-disk`` utility -and it's subcommands to manage disks. - -Subcommand ``list`` lists disk partitions and Ceph OSDs. - -Usage:: - - ceph-deploy disk list [HOST:[DISK]] - -Here, [HOST] is hostname of the node and [DISK] is disk name or path. - -Subcommand ``prepare`` prepares a directory, disk or drive for a Ceph OSD. It -creates a GPT partition, marks the partition with Ceph type uuid, creates a -file system, marks the file system as ready for Ceph consumption, uses entire -partition and adds a new partition to the journal disk. - -Usage:: - - ceph-deploy disk prepare [HOST:[DISK]] - -Here, [HOST] is hostname of the node and [DISK] is disk name or path. - -Subcommand ``activate`` activates the Ceph OSD. It mounts the volume in a -temporary location, allocates an OSD id (if needed), remounts in the correct -location ``/var/lib/ceph/osd/$cluster-$id`` and starts ``ceph-osd``. It is -triggered by ``udev`` when it sees the OSD GPT partition type or on ceph service -start with ``ceph disk activate-all``. - -Usage:: - - ceph-deploy disk activate [HOST:[DISK]] - -Here, [HOST] is hostname of the node and [DISK] is disk name or path. - -Subcommand ``zap`` zaps/erases/destroys a device's partition table and contents. -It actually uses ``sgdisk`` and it's option ``--zap-all`` to destroy both GPT and -MBR data structures so that the disk becomes suitable for repartitioning. -``sgdisk`` then uses ``--mbrtogpt`` to convert the MBR or BSD disklabel disk to a -GPT disk. The ``prepare`` subcommand can now be executed which will create a new -GPT partition. - -Usage:: - - ceph-deploy disk zap [HOST:[DISK]] - -Here, [HOST] is hostname of the node and [DISK] is disk name or path. - - -osd ---- - -Manage OSDs by preparing data disk on remote host. ``osd`` makes use of certain -subcommands for managing OSDs. - -Subcommand ``prepare`` prepares a directory, disk or drive for a Ceph OSD. It -first checks against multiple OSDs getting created and warns about the -possibility of more than the recommended which would cause issues with max -allowed PIDs in a system. It then reads the bootstrap-osd key for the cluster or -writes the bootstrap key if not found. It then uses :program:`ceph-disk` -utility's ``prepare`` subcommand to prepare the disk, journal and deploy the OSD -on the desired host. Once prepared, it gives some time to the OSD to settle and -checks for any possible errors and if found, reports to the user. - -Usage:: - - ceph-deploy osd prepare HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL]...] - -Subcommand ``activate`` activates the OSD prepared using ``prepare`` subcommand. -It actually uses :program:`ceph-disk` utility's ``activate`` subcommand with -appropriate init type based on distro to activate the OSD. Once activated, it -gives some time to the OSD to start and checks for any possible errors and if -found, reports to the user. It checks the status of the prepared OSD, checks the -OSD tree and makes sure the OSDs are up and in. - -Usage:: - - ceph-deploy osd activate HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL]...] - -Subcommand ``create`` uses ``prepare`` and ``activate`` subcommands to create an -OSD. - -Usage:: - - ceph-deploy osd create HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL]...] - -Subcommand ``list`` lists disk partitions, Ceph OSDs and prints OSD metadata. -It gets the osd tree from a monitor host, uses the ``ceph-disk-list`` output -and gets the mount point by matching the line where the partition mentions -the OSD name, reads metadata from files, checks if a journal path exists, -if the OSD is in a OSD tree and prints the OSD metadata. - -Usage:: - - ceph-deploy osd list HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL]...] - - -admin ------ - -Push configuration and ``client.admin`` key to a remote host. It takes -the ``{cluster}.client.admin.keyring`` from admin node and writes it under -``/etc/ceph`` directory of desired node. - -Usage:: - - ceph-deploy admin [HOST] [HOST...] - -Here, [HOST] is desired host to be configured for Ceph administration. - - -config ------- - -Push/pull configuration file to/from a remote host. It uses ``push`` subcommand -to takes the configuration file from admin host and write it to remote host under -``/etc/ceph`` directory. It uses ``pull`` subcommand to do the opposite i.e, pull -the configuration file under ``/etc/ceph`` directory of remote host to admin node. - -Usage:: - - ceph-deploy config push [HOST] [HOST...] - - ceph-deploy config pull [HOST] [HOST...] - -Here, [HOST] is the hostname of the node where config file will be pushed to or -pulled from. - - -uninstall ---------- - -Remove Ceph packages from remote hosts. It detects the platform and distro of -selected host and uninstalls Ceph packages from it. However, some dependencies -like ``librbd1`` and ``librados2`` will not be removed because they can cause -issues with ``qemu-kvm``. - -Usage:: - - ceph-deploy uninstall [HOST] [HOST...] - -Here, [HOST] is hostname of the node from where Ceph will be uninstalled. - - -purge ------ - -Remove Ceph packages from remote hosts and purge all data. It detects the -platform and distro of selected host, uninstalls Ceph packages and purges all -data. However, some dependencies like ``librbd1`` and ``librados2`` will not be -removed because they can cause issues with ``qemu-kvm``. - -Usage:: - - ceph-deploy purge [HOST] [HOST...] - -Here, [HOST] is hostname of the node from where Ceph will be purged. - - -purgedata ---------- - -Purge (delete, destroy, discard, shred) any Ceph data from ``/var/lib/ceph``. -Once it detects the platform and distro of desired host, it first checks if Ceph -is still installed on the selected host and if installed, it won't purge data -from it. If Ceph is already uninstalled from the host, it tries to remove the -contents of ``/var/lib/ceph``. If it fails then probably OSDs are still mounted -and needs to be unmounted to continue. It unmount the OSDs and tries to remove -the contents of ``/var/lib/ceph`` again and checks for errors. It also removes -contents of ``/etc/ceph``. Once all steps are successfully completed, all the -Ceph data from the selected host are removed. - -Usage:: - - ceph-deploy purgedata [HOST] [HOST...] - -Here, [HOST] is hostname of the node from where Ceph data will be purged. - - -forgetkeys ----------- - -Remove authentication keys from the local directory. It removes all the -authentication keys i.e, monitor keyring, client.admin keyring, bootstrap-osd -and bootstrap-mds keyring from the node. - -Usage:: - - ceph-deploy forgetkeys - - -pkg ---- - -Manage packages on remote hosts. It is used for installing or removing packages -from remote hosts. The package names for installation or removal are to be -specified after the command. Two options :option:`--install` and -:option:`--remove` are used for this purpose. - -Usage:: - - ceph-deploy pkg --install [PKGs] [HOST] [HOST...] - - ceph-deploy pkg --remove [PKGs] [HOST] [HOST...] - -Here, [PKGs] is comma-separated package names and [HOST] is hostname of the -remote node where packages are to be installed or removed from. - - -calamari --------- - -Install and configure Calamari nodes. It first checks if distro is supported -for Calamari installation by ceph-deploy. An argument ``connect`` is used for -installation and configuration. It checks for ``ceph-deploy`` configuration -file (cd_conf) and Calamari release repo or ``calamari-minion`` repo. It relies -on default for repo installation as it doesn't install Ceph unless specified -otherwise. ``options`` dictionary is also defined because ``ceph-deploy`` -pops items internally which causes issues when those items are needed to be -available for every host. If the distro is Debian/Ubuntu, it is ensured that -proxy is disabled for ``calamari-minion`` repo. ``calamari-minion`` package is -then installed and custom repository files are added. minion config is placed -prior to installation so that it is present when the minion first starts. -config directory, calamari salt config are created and salt-minion package -is installed. If the distro is Redhat/CentOS, the salt-minion service needs to -be started. - -Usage:: - - ceph-deploy calamari {connect} [HOST] [HOST...] - -Here, [HOST] is the hostname where Calamari is to be installed. - -An option ``--release`` can be used to use a given release from repositories -defined in :program:`ceph-deploy`'s configuration. Defaults to ``calamari-minion``. - -Another option :option:`--master` can also be used with this command. - -Options -======= - -.. option:: --address - - IP address of the host node to be added to the cluster. - -.. option:: --adjust-repos - - Install packages modifying source repos. - -.. option:: --ceph-conf - - Use (or reuse) a given ``ceph.conf`` file. - -.. option:: --cluster - - Name of the cluster. - -.. option:: --dev - - Install a bleeding edge built from Git branch or tag (default: master). - -.. option:: --cluster-network - - Specify the (internal) cluster network. - -.. option:: --dmcrypt - - Encrypt [data-path] and/or journal devices with ``dm-crypt``. - -.. option:: --dmcrypt-key-dir - - Directory where ``dm-crypt`` keys are stored. - -.. option:: --install - - Comma-separated package(s) to install on remote hosts. - -.. option:: --fs-type - - Filesystem to use to format disk ``(xfs, btrfs or ext4)``. Note that support for btrfs and ext4 is no longer tested or recommended; please use xfs. - -.. option:: --fsid - - Provide an alternate FSID for ``ceph.conf`` generation. - -.. option:: --gpg-url - - Specify a GPG key url to be used with custom repos (defaults to ceph.com). - -.. option:: --keyrings - - Concatenate multiple keyrings to be seeded on new monitors. - -.. option:: --local-mirror - - Fetch packages and push them to hosts for a local repo mirror. - -.. option:: --master - - The domain for the Calamari master server. - -.. option:: --mkfs - - Inject keys to MONs on remote nodes. - -.. option:: --no-adjust-repos - - Install packages without modifying source repos. - -.. option:: --no-ssh-copykey - - Do not attempt to copy ssh keys. - -.. option:: --overwrite-conf - - Overwrite an existing conf file on remote host (if present). - -.. option:: --public-network - - Specify the public network for a cluster. - -.. option:: --remove - - Comma-separated package(s) to remove from remote hosts. - -.. option:: --repo - - Install repo files only (skips package installation). - -.. option:: --repo-url - - Specify a repo url that mirrors/contains Ceph packages. - -.. option:: --testing - - Install the latest development release. - -.. option:: --username - - The username to connect to the remote host. - -.. option:: --version - - The current installed version of :program:`ceph-deploy`. - -.. option:: --zap-disk - - Destroy the partition table and content of a disk. - - -Availability -============ - -:program:`ceph-deploy` is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to -the documentation at http://ceph.com/ceph-deploy/docs for more information. - - -See also -======== - -:doc:`ceph-mon `\(8), -:doc:`ceph-osd `\(8), -:doc:`ceph-disk `\(8), -:doc:`ceph-mds `\(8) diff --git a/src/ceph/doc/man/8/ceph-detect-init.rst b/src/ceph/doc/man/8/ceph-detect-init.rst deleted file mode 100644 index c409a94..0000000 --- a/src/ceph/doc/man/8/ceph-detect-init.rst +++ /dev/null @@ -1,65 +0,0 @@ -:orphan: - -============================================================ - ceph-detect-init -- display the init system Ceph should use -============================================================ - -.. program:: ceph-detect-init - -Synopsis -======== - -| **ceph-detect-init** [--verbose] [--use-rhceph] [--default *init*] - -Description -=========== - -:program:`ceph-detect-init` is a utility that prints the init system -Ceph uses. It can be one of ``sysvinit``, ``upstart`` or ``systemd``. -The init system Ceph uses may not be the default init system of the -host operating system. For instance on Debian Jessie, Ceph may use -``sysvinit`` although ``systemd`` is the default. - -If the init system of the host operating system is unknown, return on -error, unless :option:`--default` is specified. - -Options -======= - -.. option:: --use-rhceph - - When an operating system identifies itself as Red Hat, it is - treated as if it was CentOS. With :option:`--use-rhceph` it is - treated as RHEL instead. - -.. option:: --default INIT - - If the init system of the host operating system is unkown, return - the value of *INIT* instead of failing with an error. - -.. option:: --verbose - - Display additional information for debugging. - -Bugs -==== - -:program:`ceph-detect-init` is used by :program:`ceph-disk` to figure out the init system to manage the mount directory of an OSD. But only following combinations are fully tested: - -- `upstart` on `Ubuntu 14.04` -- `systemd` on `Ubuntu 15.04` and up -- `systemd` on `Debian 8` and up -- `systemd` on `RHEL/CentOS 7` and up -- `systemd` on `Fedora 22` and up - -Availability -============ - -:program:`ceph-detect-init` is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to -the Ceph documentation at http://ceph.com/docs for more information. - -See also -======== - -:doc:`ceph-disk `\(8), -:doc:`ceph-deploy `\(8) diff --git a/src/ceph/doc/man/8/ceph-disk.rst b/src/ceph/doc/man/8/ceph-disk.rst deleted file mode 100644 index 4635937..0000000 --- a/src/ceph/doc/man/8/ceph-disk.rst +++ /dev/null @@ -1,97 +0,0 @@ -:orphan: - -=================================================================== - ceph-disk -- Ceph disk utility for OSD -=================================================================== - -.. program:: ceph-disk - -Synopsis -======== - -| **ceph-disk** [-h] [-v] [--log-stdout] [--prepend-to-path PATH] -| [--statedir PATH] [--sysconfdir PATH] -| [--setuser USER] [--setgroup GROUP] -| ... - -optional arguments ------------------- - --h, --help show this help message and exit --v, --verbose be more verbose ---log-stdout log to stdout ---prepend-to-path PATH - prepend PATH to $PATH for backward compatibility (default /usr/bin) ---statedir PATH directory in which ceph state is preserved (default /var/lib/ceph) ---sysconfdir PATH directory in which ceph configuration files are found (default /etc/ceph) ---setuser USER use the given user for subprocesses, rather than ceph or root ---setgroup GROUP use the given group for subprocesses, rather than ceph or root - -subcommands ------------ - -prepare - Prepare a directory or disk for a Ceph OSD -activate - Activate a Ceph OSD -activate-lockbox - Activate a Ceph lockbox -activate-block - Activate an OSD via its block device -activate-journal - Activate an OSD via its journal device -activate-all - Activate all tagged OSD partitions -list - List disks, partitions, and Ceph OSDs -suppress-activate - Suppress activate on a device (prefix) -unsuppress-activate - Stop suppressing activate on a device (prefix) -deactivate - Deactivate a Ceph OSD -destroy - Destroy a Ceph OSD -zap - Zap/erase/destroy a device's partition table (and contents) -trigger - Trigger an event (caled by udev) -fix - Fix SELinux labels and/or file permissions - -Description -=========== - -:program:`ceph-disk` is a utility that can prepare and activate a disk, partition or -directory as a Ceph OSD. It is run directly or triggered by :program:`ceph-deploy` -or ``udev``. It can also be triggered by other deployment utilities like ``Chef``, -``Juju``, ``Puppet`` etc. - -It actually automates the multiple steps involved in manual creation and start -of an OSD into two steps of preparing and activating the OSD by using the -subcommands ``prepare`` and ``activate``. - -:program:`ceph-disk` also automates the multiple steps involved to manually stop -and destroy an OSD into two steps of deactivating and destroying the OSD by using -the subcommands ``deactivate`` and ``destroy``. - -The documentation for each subcommand (prepare, activate, etc.) can be displayed -with its ``--help`` option. For instance ``ceph-disk prepare --help``. - -Bugs -==== - -See also the ``Bugs`` section in :doc:`ceph-detect-init `\(8). - -Availability -============ - -:program:`ceph-disk` is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to -the Ceph documentation at http://ceph.com/docs for more information. - -See also -======== - -:doc:`ceph-detect-init `\(8) -:doc:`ceph-osd `\(8), -:doc:`ceph-deploy `\(8) diff --git a/src/ceph/doc/man/8/ceph-fuse.rst b/src/ceph/doc/man/8/ceph-fuse.rst deleted file mode 100644 index cede60e..0000000 --- a/src/ceph/doc/man/8/ceph-fuse.rst +++ /dev/null @@ -1,64 +0,0 @@ -:orphan: - -========================================= - ceph-fuse -- FUSE-based client for ceph -========================================= - -.. program:: ceph-fuse - -Synopsis -======== - -| **ceph-fuse** [ -m *monaddr*:*port* ] *mountpoint* [ *fuse options* ] - - -Description -=========== - -**ceph-fuse** is a FUSE (File system in USErspace) client for Ceph -distributed file system. It will mount a ceph file system (specified -via the -m option for described by ceph.conf (see below) at the -specific mount point. - -The file system can be unmounted with:: - - fusermount -u mountpoint - -or by sending ``SIGINT`` to the ``ceph-fuse`` process. - - -Options -======= - -Any options not recognized by ceph-fuse will be passed on to libfuse. - -.. option:: -d - - Detach from console and daemonize after startup. - -.. option:: -c ceph.conf, --conf=ceph.conf - - Use *ceph.conf* configuration file instead of the default - ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup. - -.. option:: -m monaddress[:port] - - Connect to specified monitor (instead of looking through ceph.conf). - -.. option:: -r root_directory - - Use root_directory as the mounted root, rather than the full Ceph tree. - - -Availability -============ - -**ceph-fuse** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to -the Ceph documentation at http://ceph.com/docs for more information. - - -See also -======== - -fusermount(8), -:doc:`ceph `\(8) diff --git a/src/ceph/doc/man/8/ceph-kvstore-tool.rst b/src/ceph/doc/man/8/ceph-kvstore-tool.rst deleted file mode 100644 index 4baa492..0000000 --- a/src/ceph/doc/man/8/ceph-kvstore-tool.rst +++ /dev/null @@ -1,85 +0,0 @@ -:orphan: - -===================================================== - ceph-kvstore-tool -- ceph kvstore manipulation tool -===================================================== - -.. program:: ceph-kvstore-tool - -Synopsis -======== - -| **ceph-kvstore-tool** *command* [args...] - - -Description -=========== - -:program:`ceph-kvstore-tool` is a kvstore manipulation tool. It allows users to manipule -leveldb/rocksdb's data (like OSD's omap) offline. - -Commands -======== - -:program:`ceph-kvstore-tool` utility uses many commands for debugging purpose -which are as follows: - -:command:`list [prefix]` - Print key of all KV pairs stored with the URL encoded prefix. - -:command:`list-crc [prefix]` - Print CRC of all KV pairs stored with the URL encoded prefix. - -:command:`exists [key]` - Check if there is any KV pair stored with the URL encoded prefix. If key - is also specified, check for the key with the prefix instead. - -:command:`get [out ]` - Get the value of the KV pair stored with the URL encoded prefix and key. - If file is also specified, write the value to the file. - -:command:`crc ` - Get the CRC of the KV pair stored with the URL encoded prefix and key. - -:command:`get-size [ ]` - Get estimated store size or size of value specified by prefix and key. - -:command:`set [ver |in ]` - Set the value of the KV pair stored with the URL encoded prefix and key. - The value could be *version_t* or text. - -:command:`rm ` - Remove the KV pair stored with the URL encoded prefix and key. - -:command:`rm-prefix ` - Remove all KV pairs stored with the URL encoded prefix. - -:command:`store-copy [num-keys-per-tx]` - Copy all KV pairs to another directory specified by ``path``. - [num-keys-per-tx] is the number of KV pairs copied for a transaction. - -:command:`store-crc ` - Store CRC of all KV pairs to a file specified by ``path``. - -:command:`compact` - Subcommand ``compact`` is used to compact all data of kvstore. It will open - the database, and trigger a database's compaction. After compaction, some - disk space may be released. - -:command:`compact-prefix ` - Compact all entries specified by the URL encoded prefix. - -:command:`compact-range ` - Compact some entries specified by the URL encoded prefix and range. - -Availability -============ - -**ceph-kvstore-tool** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to -the Ceph documentation at http://ceph.com/docs for more information. - - -See also -======== - -:doc:`ceph `\(8) diff --git a/src/ceph/doc/man/8/ceph-mds.rst b/src/ceph/doc/man/8/ceph-mds.rst deleted file mode 100644 index b17fd70..0000000 --- a/src/ceph/doc/man/8/ceph-mds.rst +++ /dev/null @@ -1,87 +0,0 @@ -:orphan: - -========================================= - ceph-mds -- ceph metadata server daemon -========================================= - -.. program:: ceph-mds - -Synopsis -======== - -| **ceph-mds** -i *name* [ --hot-standby [*rank*] ] - - -Description -=========== - -**ceph-mds** is the metadata server daemon for the Ceph distributed file -system. One or more instances of ceph-mds collectively manage the file -system namespace, coordinating access to the shared OSD cluster. - -Each ceph-mds daemon instance should have a unique name. The name is used -to identify daemon instances in the ceph.conf. - -Once the daemon has started, the monitor cluster will normally assign -it a logical rank, or put it in a standby pool to take over for -another daemon that crashes. Some of the specified options can cause -other behaviors. - -If you specify hot-standby, you must either specify the rank on the command -line, or specify one of the mds_standby_for_[rank|name] parameters in the -config. The command line specification overrides the config, and specifying -the rank overrides specifying the name. - - -Options -======= - -.. option:: -f, --foreground - - Foreground: do not daemonize after startup (run in foreground). Do - not generate a pid file. Useful when run via :doc:`ceph-run - `\(8). - -.. option:: -d - - Debug mode: like ``-f``, but also send all log output to stderr. - -.. option:: --setuser userorgid - - Set uid after starting. If a username is specified, the user - record is looked up to get a uid and a gid, and the gid is also set - as well, unless --setgroup is also specified. - -.. option:: --setgroup grouporgid - - Set gid after starting. If a group name is specified the group - record is looked up to get a gid. - -.. option:: -c ceph.conf, --conf=ceph.conf - - Use *ceph.conf* configuration file instead of the default - ``/etc/ceph/ceph.conf`` to determine monitor addresses during - startup. - -.. option:: -m monaddress[:port] - - Connect to specified monitor (instead of looking through - ``ceph.conf``). - -.. option:: --hot-standby - - Start as a hot standby for MDS . - -Availability -============ - -**ceph-mds** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to the Ceph documentation at -http://ceph.com/docs for more information. - - -See also -======== - -:doc:`ceph `\(8), -:doc:`ceph-mon `\(8), -:doc:`ceph-osd `\(8) diff --git a/src/ceph/doc/man/8/ceph-mon.rst b/src/ceph/doc/man/8/ceph-mon.rst deleted file mode 100644 index 7a2cd03..0000000 --- a/src/ceph/doc/man/8/ceph-mon.rst +++ /dev/null @@ -1,94 +0,0 @@ -:orphan: - -================================= - ceph-mon -- ceph monitor daemon -================================= - -.. program:: ceph-mon - -Synopsis -======== - -| **ceph-mon** -i *monid* [ --mon-data *mondatapath* ] - - -Description -=========== - -**ceph-mon** is the cluster monitor daemon for the Ceph distributed -file system. One or more instances of **ceph-mon** form a Paxos -part-time parliament cluster that provides extremely reliable and -durable storage of cluster membership, configuration, and state. - -The *mondatapath* refers to a directory on a local file system storing -monitor data. It is normally specified via the ``mon data`` option in -the configuration file. - -Options -======= - -.. option:: -f, --foreground - - Foreground: do not daemonize after startup (run in foreground). Do - not generate a pid file. Useful when run via :doc:`ceph-run `\(8). - -.. option:: -d - - Debug mode: like ``-f``, but also send all log output to stderr. - -.. option:: --setuser userorgid - - Set uid after starting. If a username is specified, the user - record is looked up to get a uid and a gid, and the gid is also set - as well, unless --setgroup is also specified. - -.. option:: --setgroup grouporgid - - Set gid after starting. If a group name is specified the group - record is looked up to get a gid. - -.. option:: -c ceph.conf, --conf=ceph.conf - - Use *ceph.conf* configuration file instead of the default - ``/etc/ceph/ceph.conf`` to determine monitor addresses during - startup. - -.. option:: --mkfs - - Initialize the ``mon data`` directory with seed information to form - and initial ceph file system or to join an existing monitor - cluster. Three pieces of information must be provided: - - - The cluster fsid. This can come from a monmap (``--monmap ``) or - explicitly via ``--fsid ``. - - A list of monitors and their addresses. This list of monitors - can come from a monmap (``--monmap ``), the ``mon host`` - configuration value (in *ceph.conf* or via ``-m - host1,host2,...``), or ``mon addr`` lines in *ceph.conf*. If this - monitor is to be part of the initial monitor quorum for a new - Ceph cluster, then it must be included in the initial list, - matching either the name or address of a monitor in the list. - When matching by address, either the ``public addr`` or ``public - subnet`` options may be used. - - The monitor secret key ``mon.``. This must be included in the - keyring provided via ``--keyring ``. - -.. option:: --keyring - - Specify a keyring for use with ``--mkfs``. - - -Availability -============ - -**ceph-mon** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer -to the Ceph documentation at http://ceph.com/docs for more -information. - - -See also -======== - -:doc:`ceph `\(8), -:doc:`ceph-mds `\(8), -:doc:`ceph-osd `\(8) diff --git a/src/ceph/doc/man/8/ceph-osd.rst b/src/ceph/doc/man/8/ceph-osd.rst deleted file mode 100644 index 388e339..0000000 --- a/src/ceph/doc/man/8/ceph-osd.rst +++ /dev/null @@ -1,134 +0,0 @@ -:orphan: - -======================================== - ceph-osd -- ceph object storage daemon -======================================== - -.. program:: ceph-osd - -Synopsis -======== - -| **ceph-osd** -i *osdnum* [ --osd-data *datapath* ] [ --osd-journal - *journal* ] [ --mkfs ] [ --mkjournal ] [--flush-journal] [--check-allows-journal] [--check-wants-journal] [--check-needs-journal] [ --mkkey ] - - -Description -=========== - -**ceph-osd** is the object storage daemon for the Ceph distributed file -system. It is responsible for storing objects on a local file system -and providing access to them over the network. - -The datapath argument should be a directory on a xfs file system -where the object data resides. The journal is optional, and is only -useful performance-wise when it resides on a different disk than -datapath with low latency (ideally, an NVRAM device). - - -Options -======= - -.. option:: -f, --foreground - - Foreground: do not daemonize after startup (run in foreground). Do - not generate a pid file. Useful when run via :doc:`ceph-run `\(8). - -.. option:: -d - - Debug mode: like ``-f``, but also send all log output to stderr. - -.. option:: --setuser userorgid - - Set uid after starting. If a username is specified, the user - record is looked up to get a uid and a gid, and the gid is also set - as well, unless --setgroup is also specified. - -.. option:: --setgroup grouporgid - - Set gid after starting. If a group name is specified the group - record is looked up to get a gid. - -.. option:: --osd-data osddata - - Use object store at *osddata*. - -.. option:: --osd-journal journal - - Journal updates to *journal*. - -.. option:: --check-wants-journal - - Check whether a journal is desired. - -.. option:: --check-allows-journal - - Check whether a journal is allowed. - -.. option:: --check-needs-journal - - Check whether a journal is required. - -.. option:: --mkfs - - Create an empty object repository. This also initializes the journal - (if one is defined). - -.. option:: --mkkey - - Generate a new secret key. This is normally used in combination - with ``--mkfs`` as it is more convenient than generating a key by - hand with :doc:`ceph-authtool `\(8). - -.. option:: --mkjournal - - Create a new journal file to match an existing object repository. - This is useful if the journal device or file is wiped out due to a - disk or file system failure. - -.. option:: --flush-journal - - Flush the journal to permanent store. This runs in the foreground - so you know when it's completed. This can be useful if you want to - resize the journal or need to otherwise destroy it: this guarantees - you won't lose data. - -.. option:: --get-cluster-fsid - - Print the cluster fsid (uuid) and exit. - -.. option:: --get-osd-fsid - - Print the OSD's fsid and exit. The OSD's uuid is generated at - --mkfs time and is thus unique to a particular instantiation of - this OSD. - -.. option:: --get-journal-fsid - - Print the journal's uuid. The journal fsid is set to match the OSD - fsid at --mkfs time. - -.. option:: -c ceph.conf, --conf=ceph.conf - - Use *ceph.conf* configuration file instead of the default - ``/etc/ceph/ceph.conf`` for runtime configuration options. - -.. option:: -m monaddress[:port] - - Connect to specified monitor (instead of looking through - ``ceph.conf``). - - -Availability -============ - -**ceph-osd** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to -the Ceph documentation at http://ceph.com/docs for more information. - -See also -======== - -:doc:`ceph `\(8), -:doc:`ceph-mds `\(8), -:doc:`ceph-mon `\(8), -:doc:`ceph-authtool `\(8) diff --git a/src/ceph/doc/man/8/ceph-post-file.rst b/src/ceph/doc/man/8/ceph-post-file.rst deleted file mode 100644 index 7e4899f..0000000 --- a/src/ceph/doc/man/8/ceph-post-file.rst +++ /dev/null @@ -1,71 +0,0 @@ -:orphan: - -================================================== - ceph-post-file -- post files for ceph developers -================================================== - -.. program:: ceph-post-file - -Synopsis -======== - -| **ceph-post-file** [-d *description] [-u *user*] *file or dir* ... - - -Description -=========== - -**ceph-post-file** will upload files or directories to ceph.com for -later analysis by Ceph developers. - -Each invocation uploads files or directories to a separate directory -with a unique tag. That tag can be passed to a developer or -referenced in a bug report (http://tracker.ceph.com/). Once the -upload completes, the directory is marked non-readable and -non-writeable to prevent access or modification by other users. - -Warning -======= - -Basic measures are taken to make posted data be visible only to -developers with access to ceph.com infrastructure. However, users -should think twice and/or take appropriate precautions before -posting potentially sensitive data (for example, logs or data -directories that contain Ceph secrets). - - -Options -======= - -.. option:: -d *description*, --description *description* - - Add a short description for the upload. This is a good opportunity - to reference a bug number. There is no default value. - -.. option:: -u *user* - - Set the user metadata for the upload. This defaults to `whoami`@`hostname -f`. - -Examples -======== - -To upload a single log:: - - ceph-post-file /var/log/ceph/ceph-mon.`hostname`.log - -To upload several directories:: - - ceph-post-file -d 'mon data directories' /var/log/ceph/mon/* - - -Availability -============ - -**ceph-post-file** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to -the Ceph documentation at http://ceph.com/docs for more information. - -See also -======== - -:doc:`ceph `\(8), -:doc:`ceph-debugpack `\(8), diff --git a/src/ceph/doc/man/8/ceph-rbdnamer.rst b/src/ceph/doc/man/8/ceph-rbdnamer.rst deleted file mode 100644 index 123c6e2..0000000 --- a/src/ceph/doc/man/8/ceph-rbdnamer.rst +++ /dev/null @@ -1,41 +0,0 @@ -:orphan: - -================================================== - ceph-rbdnamer -- udev helper to name RBD devices -================================================== - -.. program:: ceph-rbdnamer - - -Synopsis -======== - -| **ceph-rbdnamer** *num* - - -Description -=========== - -**ceph-rbdnamer** prints the pool and image name for the given RBD devices -to stdout. It is used by `udev` (using a rule like the one below) to -set up a device symlink. - - -:: - - KERNEL=="rbd[0-9]*", PROGRAM="/usr/bin/ceph-rbdnamer %n", SYMLINK+="rbd/%c{1}/%c{2}" - - -Availability -============ - -**ceph-rbdnamer** is part of Ceph, a massively scalable, open-source, distributed storage system. Please -refer to the Ceph documentation at http://ceph.com/docs for more -information. - - -See also -======== - -:doc:`rbd `\(8), -:doc:`ceph `\(8) diff --git a/src/ceph/doc/man/8/ceph-rest-api.rst b/src/ceph/doc/man/8/ceph-rest-api.rst deleted file mode 100644 index 9864a9b..0000000 --- a/src/ceph/doc/man/8/ceph-rest-api.rst +++ /dev/null @@ -1,150 +0,0 @@ -:orphan: - -===================================================== - ceph-rest-api -- ceph RESTlike administration server -===================================================== - -.. program:: ceph-rest-api - -Synopsis -======== - -| **ceph-rest-api** [ -c *conffile* ] [--cluster *clustername* ] [ -n *name* ] [-i *id* ] - - -Description -=========== - -**ceph-rest-api** is a WSGI application that can run as a -standalone web service or run under a web server that supports -WSGI. It provides much of the functionality of the **ceph** -command-line tool through an HTTP-accessible interface. - -Options -======= - -.. option:: -c/--conf conffile - - names the ceph.conf file to use for configuration. If -c is not - specified, the default depends on the state of the --cluster option - (default 'ceph'; see below). The configuration file is searched - for in this order: - - * $CEPH_CONF - * /etc/ceph/${cluster}.conf - * ~/.ceph/${cluster}.conf - * ${cluster}.conf (in the current directory) - - so you can also pass this option in the environment as CEPH_CONF. - -.. option:: --cluster clustername - - set *clustername* for use in the $cluster metavariable, for - locating the ceph.conf file. The default is 'ceph'. - -.. option:: -n/--name name - - specifies the client 'name', which is used to find the - client-specific configuration options in the config file, and - also is the name used for authentication when connecting - to the cluster (the entity name appearing in 'ceph auth ls' output, - for example). The default is 'client.restapi'. - -.. option:: -i/--id id - - specifies the client 'id', which will form the clientname - as 'client.' if clientname is not set. If -n/-name is - set, that takes precedence. - - Also, global Ceph options are supported. - - -Configuration parameters -======================== - -Supported configuration parameters include: - -* **keyring** the keyring file holding the key for 'clientname' -* **public addr** ip:port to listen on (default 0.0.0.0:5000) -* **log file** (usual Ceph default) -* **restapi base url** the base URL to answer requests on (default /api/v0.1) -* **restapi log level** critical, error, warning, info, debug (default warning) - -Configuration parameters are searched in the standard order: -first in the section named '', then 'client', then 'global'. - - is either supplied by -n/--name, "client." where - is supplied by -i/--id, or 'client.restapi' if neither option -is present. - -A single-threaded server will run on **public addr** if the ceph-rest-api -executed directly; otherwise, configuration is specified by the enclosing -WSGI web server. - -Commands -======== - -Commands are submitted with HTTP GET requests (for commands that -primarily return data) or PUT (for commands that affect cluster state). -HEAD and OPTIONS are also supported. Standard HTTP status codes -are returned. - -For commands that return bulk data, the request can include -Accept: application/json or Accept: application/xml to select the -desired structured output, or you may use a .json or .xml addition -to the requested PATH. Parameters are supplied as query parameters -in the request; for parameters that take more than one value, repeat -the key=val construct. For instance, to remove OSDs 2 and 3, -send a PUT request to ``osd/rm?ids=2&ids=3``. - -Discovery -========= - -Human-readable discovery of supported commands and parameters, along -with a small description of each command, is provided when the requested -path is incomplete/partially matching. Requesting / will redirect to -the value of **restapi base url**, and that path will give a full list -of all known commands. -For example, requesting ``api/vX.X/mon`` will return the list of API calls for -monitors - ``api/vX.X/osd`` will return the list of API calls for OSD and so on. - -The command set is very similar to the commands -supported by the **ceph** tool. One notable exception is that the -``ceph pg `` style of commands is supported here -as ``tell//command?args``. - -Deployment as WSGI application -============================== - -When deploying as WSGI application (say, with Apache/mod_wsgi, -or nginx/uwsgi, or gunicorn, etc.), use the ``ceph_rest_api.py`` module -(``ceph-rest-api`` is a thin layer around this module). The standalone web -server is of course not used, so address/port configuration is done in -the WSGI server. Use a python .wsgi module or the equivalent to call -``app = generate_app(conf, cluster, clientname, clientid, args)`` where: - -* conf is as -c/--conf above -* cluster is as --cluster above -* clientname, -n/--name -* clientid, -i/--id, and -* args are any other generic Ceph arguments - -When app is returned, it will have attributes 'ceph_addr' and 'ceph_port' -set to what the address and port are in the Ceph configuration; -those may be used for the server, or ignored. - -Any errors reading configuration or connecting to the cluster cause an -exception to be raised; see your WSGI server documentation for how to -see those messages in case of problem. - -Availability -============ - -**ceph-rest-api** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to the Ceph documentation at -http://ceph.com/docs for more information. - - -See also -======== - -:doc:`ceph `\(8) diff --git a/src/ceph/doc/man/8/ceph-run.rst b/src/ceph/doc/man/8/ceph-run.rst deleted file mode 100644 index ed76c28..0000000 --- a/src/ceph/doc/man/8/ceph-run.rst +++ /dev/null @@ -1,45 +0,0 @@ -:orphan: - -========================================= - ceph-run -- restart daemon on core dump -========================================= - -.. program:: ceph-run - -Synopsis -======== - -| **ceph-run** *command* ... - - -Description -=========== - -**ceph-run** is a simple wrapper that will restart a daemon if it exits -with a signal indicating it crashed and possibly core dumped (that is, -signals 3, 4, 5, 6, 8, or 11). - -The command should run the daemon in the foreground. For Ceph daemons, -that means the ``-f`` option. - - -Options -======= - -None - - -Availability -============ - -**ceph-run** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to -the Ceph documentation at http://ceph.com/docs for more information. - - -See also -======== - -:doc:`ceph `\(8), -:doc:`ceph-mon `\(8), -:doc:`ceph-mds `\(8), -:doc:`ceph-osd `\(8) diff --git a/src/ceph/doc/man/8/ceph-syn.rst b/src/ceph/doc/man/8/ceph-syn.rst deleted file mode 100644 index a30c460..0000000 --- a/src/ceph/doc/man/8/ceph-syn.rst +++ /dev/null @@ -1,99 +0,0 @@ -:orphan: - -=============================================== - ceph-syn -- ceph synthetic workload generator -=============================================== - -.. program:: ceph-syn - -Synopsis -======== - -| **ceph-syn** [ -m *monaddr*:*port* ] --syn *command* *...* - - -Description -=========== - -**ceph-syn** is a simple synthetic workload generator for the Ceph -distributed file system. It uses the userspace client library to -generate simple workloads against a currently running file system. The -file system need not be mounted via ceph-fuse(8) or the kernel client. - -One or more ``--syn`` command arguments specify the particular -workload, as documented below. - - -Options -======= - -.. option:: -d - - Detach from console and daemonize after startup. - -.. option:: -c ceph.conf, --conf=ceph.conf - - Use *ceph.conf* configuration file instead of the default - ``/etc/ceph/ceph.conf`` to determine monitor addresses during - startup. - -.. option:: -m monaddress[:port] - - Connect to specified monitor (instead of looking through - ``ceph.conf``). - -.. option:: --num_client num - - Run num different clients, each in a separate thread. - -.. option:: --syn workloadspec - - Run the given workload. May be specified as many times as - needed. Workloads will normally run sequentially. - - -Workloads -========= - -Each workload should be preceded by ``--syn`` on the command -line. This is not a complete list. - -:command:`mknap` *path* *snapname* - Create a snapshot called *snapname* on *path*. - -:command:`rmsnap` *path* *snapname* - Delete snapshot called *snapname* on *path*. - -:command:`rmfile` *path* - Delete/unlink *path*. - -:command:`writefile` *sizeinmb* *blocksize* - Create a file, named after our client id, that is *sizeinmb* MB by - writing *blocksize* chunks. - -:command:`readfile` *sizeinmb* *blocksize* - Read file, named after our client id, that is *sizeinmb* MB by - writing *blocksize* chunks. - -:command:`rw` *sizeinmb* *blocksize* - Write file, then read it back, as above. - -:command:`makedirs` *numsubdirs* *numfiles* *depth* - Create a hierarchy of directories that is *depth* levels deep. Give - each directory *numsubdirs* subdirectories and *numfiles* files. - -:command:`walk` - Recursively walk the file system (like find). - - -Availability -============ - -**ceph-syn** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to -the Ceph documentation at http://ceph.com/docs for more information. - -See also -======== - -:doc:`ceph `\(8), -:doc:`ceph-fuse `\(8) diff --git a/src/ceph/doc/man/8/ceph-volume-systemd.rst b/src/ceph/doc/man/8/ceph-volume-systemd.rst deleted file mode 100644 index 1a7b481..0000000 --- a/src/ceph/doc/man/8/ceph-volume-systemd.rst +++ /dev/null @@ -1,56 +0,0 @@ -:orphan: - -======================================================= - ceph-volume-systemd -- systemd ceph-volume helper tool -======================================================= - -.. program:: ceph-volume-systemd - -Synopsis -======== - -| **ceph-volume-systemd** *systemd instance name* - - -Description -=========== -:program:`ceph-volume-systemd` is a systemd helper tool that receives input -from (dynamically created) systemd units so that activation of OSDs can -proceed. - -It translates the input into a system call to ceph-volume for activation -purposes only. - - -Examples -======== -Its input is the ``systemd instance name`` (represented by ``%i`` in a systemd -unit), and it should be in the following format:: - - - - -In the case of ``lvm`` a call could look like:: - - /usr/bin/ceph-volume-systemd lvm-0-8715BEB4-15C5-49DE-BA6F-401086EC7B41 - -Which in turn will call ``ceph-volume`` in the following way:: - - ceph-volume lvm trigger 0-8715BEB4-15C5-49DE-BA6F-401086EC7B41 - -Any other subcommand will need to have implemented a ``trigger`` command that -can consume the extra metadata in this format. - - -Availability -============ - -:program:`ceph-volume-systemd` is part of Ceph, a massively scalable, -open-source, distributed storage system. Please refer to the documentation at -http://docs.ceph.com/ for more information. - - -See also -======== - -:doc:`ceph-osd `\(8), -:doc:`ceph-disk `\(8), diff --git a/src/ceph/doc/man/8/ceph-volume.rst b/src/ceph/doc/man/8/ceph-volume.rst deleted file mode 100644 index 431e82c..0000000 --- a/src/ceph/doc/man/8/ceph-volume.rst +++ /dev/null @@ -1,122 +0,0 @@ -:orphan: - -======================================== - ceph-volume -- Ceph OSD deployment tool -======================================== - -.. program:: ceph-volume - -Synopsis -======== - -| **ceph-volume** [-h] [--cluster CLUSTER] [--log-level LOG_LEVEL] -| [--log-path LOG_PATH] - -| **ceph-volume** **lvm** [ *trigger* | *create* | *activate* | *prepare* ] - -Description -=========== - -:program:`ceph-volume` is a single purpose command line tool to deploy logical -volumes as OSDs, trying to maintain a similar API to ``ceph-disk`` when -preparing, activating, and creating OSDs. - -It deviates from ``ceph-disk`` by not interacting or relying on the udev rules -that come installed for Ceph. These rules allow automatic detection of -previously setup devices that are in turn fed into ``ceph-disk`` to activate -them. - - -Commands -======== - -lvm ---- - -By making use of LVM tags, the ``lvm`` sub-command is able to store and later -re-discover and query devices associated with OSDs so that they can later -activated. - -Subcommands: - -**activate** -Enables a systemd unit that persists the OSD ID and its UUID (also called -``fsid`` in Ceph CLI tools), so that at boot time it can understand what OSD is -enabled and needs to be mounted. - -Usage:: - - ceph-volume lvm activate --filestore - -Optional Arguments: - -* [-h, --help] show the help message and exit -* [--bluestore] filestore objectstore (not yet implemented) -* [--filestore] filestore objectstore (current default) - - -**prepare** -Prepares a logical volume to be used as an OSD and journal using a ``filestore`` setup -(``bluestore`` support is planned). It will not create or modify the logical volumes -except for adding extra metadata. - -Usage:: - - ceph-volume lvm prepare --filestore --data --journal - -Optional arguments: - -* [-h, --help] show the help message and exit -* [--journal JOURNAL] A logical group name, path to a logical volume, or path to a device -* [--journal-size GB] Size (in GB) A logical group name or a path to a logical volume -* [--bluestore] Use the bluestore objectstore (not currently supported) -* [--filestore] Use the filestore objectstore (currently the only supported object store) -* [--osd-id OSD_ID] Reuse an existing OSD id -* [--osd-fsid OSD_FSID] Reuse an existing OSD fsid - -Required arguments: - -* --data A logical group name or a path to a logical volume - -**create** -Wraps the two-step process to provision a new osd (calling ``prepare`` first -and then ``activate``) into a single one. The reason to prefer ``prepare`` and -then ``activate`` is to gradually introduce new OSDs into a cluster, and -avoiding large amounts of data being rebalanced. - -The single-call process unifies exactly what ``prepare`` and ``activate`` do, -with the convenience of doing it all at once. Flags and general usage are -equivalent to those of the ``prepare`` subcommand. - -**trigger** -This subcommand is not meant to be used directly, and it is used by systemd so -that it proxies input to ``ceph-volume lvm activate`` by parsing the -input from systemd, detecting the UUID and ID associated with an OSD. - -Usage:: - - ceph-volume lvm trigger - -The systemd "data" is expected to be in the format of:: - - - - -The lvs associated with the OSD need to have been prepared previously, -so that all needed tags and metadata exist. - -Positional arguments: - -* Data from a systemd unit containing ID and UUID of the OSD. - -Availability -============ - -:program:`ceph-volume` is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to -the documentation at http://docs.ceph.com/ for more information. - - -See also -======== - -:doc:`ceph-osd `\(8), -:doc:`ceph-disk `\(8), diff --git a/src/ceph/doc/man/8/ceph.rst b/src/ceph/doc/man/8/ceph.rst deleted file mode 100644 index 1a18000..0000000 --- a/src/ceph/doc/man/8/ceph.rst +++ /dev/null @@ -1,1550 +0,0 @@ -:orphan: - -================================== - ceph -- ceph administration tool -================================== - -.. program:: ceph - -Synopsis -======== - -| **ceph** **auth** [ *add* \| *caps* \| *del* \| *export* \| *get* \| *get-key* \| *get-or-create* \| *get-or-create-key* \| *import* \| *list* \| *print-key* \| *print_key* ] ... - -| **ceph** **compact** - -| **ceph** **config-key** [ *del* | *exists* | *get* | *list* | *dump* | *put* ] ... - -| **ceph** **daemon** ** \| ** ** ... - -| **ceph** **daemonperf** ** \| ** [ *interval* [ *count* ] ] - -| **ceph** **df** *{detail}* - -| **ceph** **fs** [ *ls* \| *new* \| *reset* \| *rm* ] ... - -| **ceph** **fsid** - -| **ceph** **health** *{detail}* - -| **ceph** **heap** [ *dump* \| *start_profiler* \| *stop_profiler* \| *release* \| *stats* ] ... - -| **ceph** **injectargs** ** [ **... ] - -| **ceph** **log** ** [ **... ] - -| **ceph** **mds** [ *compat* \| *deactivate* \| *fail* \| *rm* \| *rmfailed* \| *set_state* \| *stat* \| *tell* ] ... - -| **ceph** **mon** [ *add* \| *dump* \| *getmap* \| *remove* \| *stat* ] ... - -| **ceph** **mon_status** - -| **ceph** **osd** [ *blacklist* \| *blocked-by* \| *create* \| *new* \| *deep-scrub* \| *df* \| *down* \| *dump* \| *erasure-code-profile* \| *find* \| *getcrushmap* \| *getmap* \| *getmaxosd* \| *in* \| *lspools* \| *map* \| *metadata* \| *ok-to-stop* \| *out* \| *pause* \| *perf* \| *pg-temp* \| *force-create-pg* \| *primary-affinity* \| *primary-temp* \| *repair* \| *reweight* \| *reweight-by-pg* \| *rm* \| *destroy* \| *purge* \| *safe-to-destroy* \| *scrub* \| *set* \| *setcrushmap* \| *setmaxosd* \| *stat* \| *tree* \| *unpause* \| *unset* ] ... - -| **ceph** **osd** **crush** [ *add* \| *add-bucket* \| *create-or-move* \| *dump* \| *get-tunable* \| *link* \| *move* \| *remove* \| *rename-bucket* \| *reweight* \| *reweight-all* \| *reweight-subtree* \| *rm* \| *rule* \| *set* \| *set-tunable* \| *show-tunables* \| *tunables* \| *unlink* ] ... - -| **ceph** **osd** **pool** [ *create* \| *delete* \| *get* \| *get-quota* \| *ls* \| *mksnap* \| *rename* \| *rmsnap* \| *set* \| *set-quota* \| *stats* ] ... - -| **ceph** **osd** **tier** [ *add* \| *add-cache* \| *cache-mode* \| *remove* \| *remove-overlay* \| *set-overlay* ] ... - -| **ceph** **pg** [ *debug* \| *deep-scrub* \| *dump* \| *dump_json* \| *dump_pools_json* \| *dump_stuck* \| *force_create_pg* \| *getmap* \| *ls* \| *ls-by-osd* \| *ls-by-pool* \| *ls-by-primary* \| *map* \| *repair* \| *scrub* \| *set_full_ratio* \| *set_nearfull_ratio* \| *stat* ] ... - -| **ceph** **quorum** [ *enter* \| *exit* ] - -| **ceph** **quorum_status** - -| **ceph** **report** { ** [ *...* ] } - -| **ceph** **scrub** - -| **ceph** **status** - -| **ceph** **sync** **force** {--yes-i-really-mean-it} {--i-know-what-i-am-doing} - -| **ceph** **tell** * [...]* - -| **ceph** **version** - -Description -=========== - -:program:`ceph` is a control utility which is used for manual deployment and maintenance -of a Ceph cluster. It provides a diverse set of commands that allows deployment of -monitors, OSDs, placement groups, MDS and overall maintenance, administration -of the cluster. - -Commands -======== - -auth ----- - -Manage authentication keys. It is used for adding, removing, exporting -or updating of authentication keys for a particular entity such as a monitor or -OSD. It uses some additional subcommands. - -Subcommand ``add`` adds authentication info for a particular entity from input -file, or random key if no input is given and/or any caps specified in the command. - -Usage:: - - ceph auth add { [...]} - -Subcommand ``caps`` updates caps for **name** from caps specified in the command. - -Usage:: - - ceph auth caps [...] - -Subcommand ``del`` deletes all caps for ``name``. - -Usage:: - - ceph auth del - -Subcommand ``export`` writes keyring for requested entity, or master keyring if -none given. - -Usage:: - - ceph auth export {} - -Subcommand ``get`` writes keyring file with requested key. - -Usage:: - - ceph auth get - -Subcommand ``get-key`` displays requested key. - -Usage:: - - ceph auth get-key - -Subcommand ``get-or-create`` adds authentication info for a particular entity -from input file, or random key if no input given and/or any caps specified in the -command. - -Usage:: - - ceph auth get-or-create { [...]} - -Subcommand ``get-or-create-key`` gets or adds key for ``name`` from system/caps -pairs specified in the command. If key already exists, any given caps must match -the existing caps for that key. - -Usage:: - - ceph auth get-or-create-key { [...]} - -Subcommand ``import`` reads keyring from input file. - -Usage:: - - ceph auth import - -Subcommand ``ls`` lists authentication state. - -Usage:: - - ceph auth ls - -Subcommand ``print-key`` displays requested key. - -Usage:: - - ceph auth print-key - -Subcommand ``print_key`` displays requested key. - -Usage:: - - ceph auth print_key - - -compact -------- - -Causes compaction of monitor's leveldb storage. - -Usage:: - - ceph compact - - -config-key ----------- - -Manage configuration key. It uses some additional subcommands. - -Subcommand ``del`` deletes configuration key. - -Usage:: - - ceph config-key del - -Subcommand ``exists`` checks for configuration keys existence. - -Usage:: - - ceph config-key exists - -Subcommand ``get`` gets the configuration key. - -Usage:: - - ceph config-key get - -Subcommand ``list`` lists configuration keys. - -Usage:: - - ceph config-key ls - -Subcommand ``dump`` dumps configuration keys and values. - -Usage:: - - ceph config-key dump - -Subcommand ``set`` puts configuration key and value. - -Usage:: - - ceph config-key set {} - - -daemon ------- - -Submit admin-socket commands. - -Usage:: - - ceph daemon {daemon_name|socket_path} {command} ... - -Example:: - - ceph daemon osd.0 help - - -daemonperf ----------- - -Watch performance counters from a Ceph daemon. - -Usage:: - - ceph daemonperf {daemon_name|socket_path} [{interval} [{count}]] - - -df --- - -Show cluster's free space status. - -Usage:: - - ceph df {detail} - -.. _ceph features: - -features --------- - -Show the releases and features of all connected daemons and clients connected -to the cluster, along with the numbers of them in each bucket grouped by the -corresponding features/releases. Each release of Ceph supports a different set -of features, expressed by the features bitmask. New cluster features require -that clients support the feature, or else they are not allowed to connect to -these new features. As new features or capabilities are enabled after an -upgrade, older clients are prevented from connecting. - -Usage:: - - ceph features - -fs --- - -Manage cephfs filesystems. It uses some additional subcommands. - -Subcommand ``ls`` to list filesystems - -Usage:: - - ceph fs ls - -Subcommand ``new`` to make a new filesystem using named pools and - -Usage:: - - ceph fs new - -Subcommand ``reset`` is used for disaster recovery only: reset to a single-MDS map - -Usage:: - - ceph fs reset {--yes-i-really-mean-it} - -Subcommand ``rm`` to disable the named filesystem - -Usage:: - - ceph fs rm {--yes-i-really-mean-it} - - -fsid ----- - -Show cluster's FSID/UUID. - -Usage:: - - ceph fsid - - -health ------- - -Show cluster's health. - -Usage:: - - ceph health {detail} - - -heap ----- - -Show heap usage info (available only if compiled with tcmalloc) - -Usage:: - - ceph heap dump|start_profiler|stop_profiler|release|stats - - -injectargs ----------- - -Inject configuration arguments into monitor. - -Usage:: - - ceph injectargs [...] - - -log ---- - -Log supplied text to the monitor log. - -Usage:: - - ceph log [...] - - -mds ---- - -Manage metadata server configuration and administration. It uses some -additional subcommands. - -Subcommand ``compat`` manages compatible features. It uses some additional -subcommands. - -Subcommand ``rm_compat`` removes compatible feature. - -Usage:: - - ceph mds compat rm_compat - -Subcommand ``rm_incompat`` removes incompatible feature. - -Usage:: - - ceph mds compat rm_incompat - -Subcommand ``show`` shows mds compatibility settings. - -Usage:: - - ceph mds compat show - -Subcommand ``deactivate`` stops mds. - -Usage:: - - ceph mds deactivate - -Subcommand ``fail`` forces mds to status fail. - -Usage:: - - ceph mds fail - -Subcommand ``rm`` removes inactive mds. - -Usage:: - - ceph mds rm (type.id)> - -Subcommand ``rmfailed`` removes failed mds. - -Usage:: - - ceph mds rmfailed - -Subcommand ``set_state`` sets mds state of to . - -Usage:: - - ceph mds set_state - -Subcommand ``stat`` shows MDS status. - -Usage:: - - ceph mds stat - -Subcommand ``tell`` sends command to particular mds. - -Usage:: - - ceph mds tell [...] - -mon ---- - -Manage monitor configuration and administration. It uses some additional -subcommands. - -Subcommand ``add`` adds new monitor named at . - -Usage:: - - ceph mon add - -Subcommand ``dump`` dumps formatted monmap (optionally from epoch) - -Usage:: - - ceph mon dump {} - -Subcommand ``getmap`` gets monmap. - -Usage:: - - ceph mon getmap {} - -Subcommand ``remove`` removes monitor named . - -Usage:: - - ceph mon remove - -Subcommand ``stat`` summarizes monitor status. - -Usage:: - - ceph mon stat - -mon_status ----------- - -Reports status of monitors. - -Usage:: - - ceph mon_status - -mgr ---- - -Ceph manager daemon configuration and management. - -Subcommand ``dump`` dumps the latest MgrMap, which describes the active -and standby manager daemons. - -Usage:: - - ceph mgr dump - -Subcommand ``fail`` will mark a manager daemon as failed, removing it -from the manager map. If it is the active manager daemon a standby -will take its place. - -Usage:: - - ceph mgr fail - -Subcommand ``module ls`` will list currently enabled manager modules (plugins). - -Usage:: - - ceph mgr module ls - -Subcommand ``module enable`` will enable a manager module. Available modules are included in MgrMap and visible via ``mgr dump``. - -Usage:: - - ceph mgr module enable - -Subcommand ``module disable`` will disable an active manager module. - -Usage:: - - ceph mgr module disable - -Subcommand ``metadata`` will report metadata about all manager daemons or, if the name is specified, a single manager daemon. - -Usage:: - - ceph mgr metadata [name] - -Subcommand ``versions`` will report a count of running daemon versions. - -Usage:: - - ceph mgr versions - -Subcommand ``count-metadata`` will report a count of any daemon metadata field. - -Usage:: - - ceph mgr count-metadata - - -osd ---- - -Manage OSD configuration and administration. It uses some additional -subcommands. - -Subcommand ``blacklist`` manage blacklisted clients. It uses some additional -subcommands. - -Subcommand ``add`` add to blacklist (optionally until seconds -from now) - -Usage:: - - ceph osd blacklist add {} - -Subcommand ``ls`` show blacklisted clients - -Usage:: - - ceph osd blacklist ls - -Subcommand ``rm`` remove from blacklist - -Usage:: - - ceph osd blacklist rm - -Subcommand ``blocked-by`` prints a histogram of which OSDs are blocking their peers - -Usage:: - - ceph osd blocked-by - -Subcommand ``create`` creates new osd (with optional UUID and ID). - -This command is DEPRECATED as of the Luminous release, and will be removed in -a future release. - -Subcommand ``new`` should instead be used. - -Usage:: - - ceph osd create {} {} - -Subcommand ``new`` can be used to create a new OSD or to recreate a previously -destroyed OSD with a specific *id*. The new OSD will have the specified *uuid*, -and the command expects a JSON file containing the base64 cephx key for auth -entity *client.osd.*, as well as optional base64 cepx key for dm-crypt -lockbox access and a dm-crypt key. Specifying a dm-crypt requires specifying -the accompanying lockbox cephx key. - -Usage:: - - ceph osd new {} {} -i {} - -The secrets JSON file is optional but if provided, is expected to maintain -a form of the following format:: - - { - "cephx_secret": "AQBWtwhZdBO5ExAAIDyjK2Bh16ZXylmzgYYEjg==" - } - -Or:: - - { - "cephx_secret": "AQBWtwhZdBO5ExAAIDyjK2Bh16ZXylmzgYYEjg==", - "cephx_lockbox_secret": "AQDNCglZuaeVCRAAYr76PzR1Anh7A0jswkODIQ==", - "dmcrypt_key": "" - } - - -Subcommand ``crush`` is used for CRUSH management. It uses some additional -subcommands. - -Subcommand ``add`` adds or updates crushmap position and weight for with - and location . - -Usage:: - - ceph osd crush add [...] - -Subcommand ``add-bucket`` adds no-parent (probably root) crush bucket of -type . - -Usage:: - - ceph osd crush add-bucket - -Subcommand ``create-or-move`` creates entry or moves existing entry for - at/to location . - -Usage:: - - ceph osd crush create-or-move - [...] - -Subcommand ``dump`` dumps crush map. - -Usage:: - - ceph osd crush dump - -Subcommand ``get-tunable`` get crush tunable straw_calc_version - -Usage:: - - ceph osd crush get-tunable straw_calc_version - -Subcommand ``link`` links existing entry for under location . - -Usage:: - - ceph osd crush link [...] - -Subcommand ``move`` moves existing entry for to location . - -Usage:: - - ceph osd crush move [...] - -Subcommand ``remove`` removes from crush map (everywhere, or just at -). - -Usage:: - - ceph osd crush remove {} - -Subcommand ``rename-bucket`` renames buchket to - -Usage:: - - ceph osd crush rename-bucket - -Subcommand ``reweight`` change 's weight to in crush map. - -Usage:: - - ceph osd crush reweight - -Subcommand ``reweight-all`` recalculate the weights for the tree to -ensure they sum correctly - -Usage:: - - ceph osd crush reweight-all - -Subcommand ``reweight-subtree`` changes all leaf items beneath -to in crush map - -Usage:: - - ceph osd crush reweight-subtree - -Subcommand ``rm`` removes from crush map (everywhere, or just at -). - -Usage:: - - ceph osd crush rm {} - -Subcommand ``rule`` is used for creating crush rules. It uses some additional -subcommands. - -Subcommand ``create-erasure`` creates crush rule for erasure coded pool -created with (default default). - -Usage:: - - ceph osd crush rule create-erasure {} - -Subcommand ``create-simple`` creates crush rule to start from , -replicate across buckets of type , using a choose mode of -(default firstn; indep best for erasure pools). - -Usage:: - - ceph osd crush rule create-simple {firstn|indep} - -Subcommand ``dump`` dumps crush rule (default all). - -Usage:: - - ceph osd crush rule dump {} - -Subcommand ``ls`` lists crush rules. - -Usage:: - - ceph osd crush rule ls - -Subcommand ``rm`` removes crush rule . - -Usage:: - - ceph osd crush rule rm - -Subcommand ``set`` used alone, sets crush map from input file. - -Usage:: - - ceph osd crush set - -Subcommand ``set`` with osdname/osd.id update crushmap position and weight -for to with location . - -Usage:: - - ceph osd crush set [...] - -Subcommand ``set-tunable`` set crush tunable to . The only -tunable that can be set is straw_calc_version. - -Usage:: - - ceph osd crush set-tunable straw_calc_version - -Subcommand ``show-tunables`` shows current crush tunables. - -Usage:: - - ceph osd crush show-tunables - -Subcommand ``tree`` shows the crush buckets and items in a tree view. - -Usage:: - - ceph osd crush tree - -Subcommand ``tunables`` sets crush tunables values to . - -Usage:: - - ceph osd crush tunables legacy|argonaut|bobtail|firefly|hammer|optimal|default - -Subcommand ``unlink`` unlinks from crush map (everywhere, or just at -). - -Usage:: - - ceph osd crush unlink {} - -Subcommand ``df`` shows OSD utilization - -Usage:: - - ceph osd df {plain|tree} - -Subcommand ``deep-scrub`` initiates deep scrub on specified osd. - -Usage:: - - ceph osd deep-scrub - -Subcommand ``down`` sets osd(s) [...] down. - -Usage:: - - ceph osd down [...] - -Subcommand ``dump`` prints summary of OSD map. - -Usage:: - - ceph osd dump {} - -Subcommand ``erasure-code-profile`` is used for managing the erasure code -profiles. It uses some additional subcommands. - -Subcommand ``get`` gets erasure code profile . - -Usage:: - - ceph osd erasure-code-profile get - -Subcommand ``ls`` lists all erasure code profiles. - -Usage:: - - ceph osd erasure-code-profile ls - -Subcommand ``rm`` removes erasure code profile . - -Usage:: - - ceph osd erasure-code-profile rm - -Subcommand ``set`` creates erasure code profile with [ ...] -pairs. Add a --force at the end to override an existing profile (IT IS RISKY). - -Usage:: - - ceph osd erasure-code-profile set { [...]} - -Subcommand ``find`` find osd in the CRUSH map and shows its location. - -Usage:: - - ceph osd find - -Subcommand ``getcrushmap`` gets CRUSH map. - -Usage:: - - ceph osd getcrushmap {} - -Subcommand ``getmap`` gets OSD map. - -Usage:: - - ceph osd getmap {} - -Subcommand ``getmaxosd`` shows largest OSD id. - -Usage:: - - ceph osd getmaxosd - -Subcommand ``in`` sets osd(s) [...] in. - -Usage:: - - ceph osd in [...] - -Subcommand ``lost`` marks osd as permanently lost. THIS DESTROYS DATA IF NO -MORE REPLICAS EXIST, BE CAREFUL. - -Usage:: - - ceph osd lost {--yes-i-really-mean-it} - -Subcommand ``ls`` shows all OSD ids. - -Usage:: - - ceph osd ls {} - -Subcommand ``lspools`` lists pools. - -Usage:: - - ceph osd lspools {} - -Subcommand ``map`` finds pg for in . - -Usage:: - - ceph osd map - -Subcommand ``metadata`` fetches metadata for osd . - -Usage:: - - ceph osd metadata {int[0-]} (default all) - -Subcommand ``out`` sets osd(s) [...] out. - -Usage:: - - ceph osd out [...] - -Subcommand ``ok-to-stop`` checks whether the list of OSD(s) can be -stopped without immediately making data unavailable. That is, all -data should remain readable and writeable, although data redundancy -may be reduced as some PGs may end up in a degraded (but active) -state. It will return a success code if it is okay to stop the -OSD(s), or an error code and informative message if it is not or if no -conclusion can be drawn at the current time. - -Usage:: - - ceph osd ok-to-stop [...] - -Subcommand ``pause`` pauses osd. - -Usage:: - - ceph osd pause - -Subcommand ``perf`` prints dump of OSD perf summary stats. - -Usage:: - - ceph osd perf - -Subcommand ``pg-temp`` set pg_temp mapping pgid:[ [...]] (developers -only). - -Usage:: - - ceph osd pg-temp { [...]} - -Subcommand ``force-create-pg`` forces creation of pg . - -Usage:: - - ceph osd force-create-pg - - -Subcommand ``pool`` is used for managing data pools. It uses some additional -subcommands. - -Subcommand ``create`` creates pool. - -Usage:: - - ceph osd pool create {} {replicated|erasure} - {} {} {} - -Subcommand ``delete`` deletes pool. - -Usage:: - - ceph osd pool delete {} {--yes-i-really-really-mean-it} - -Subcommand ``get`` gets pool parameter . - -Usage:: - - ceph osd pool get size|min_size|crash_replay_interval|pg_num| - pgp_num|crush_ruleset|auid|write_fadvise_dontneed - -Only for tiered pools:: - - ceph osd pool get hit_set_type|hit_set_period|hit_set_count|hit_set_fpp| - target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_dirty_high_ratio| - cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age| - min_read_recency_for_promote|hit_set_grade_decay_rate|hit_set_search_last_n - -Only for erasure coded pools:: - - ceph osd pool get erasure_code_profile - -Use ``all`` to get all pool parameters that apply to the pool's type:: - - ceph osd pool get all - -Subcommand ``get-quota`` obtains object or byte limits for pool. - -Usage:: - - ceph osd pool get-quota - -Subcommand ``ls`` list pools - -Usage:: - - ceph osd pool ls {detail} - -Subcommand ``mksnap`` makes snapshot in . - -Usage:: - - ceph osd pool mksnap - -Subcommand ``rename`` renames to . - -Usage:: - - ceph osd pool rename - -Subcommand ``rmsnap`` removes snapshot from . - -Usage:: - - ceph osd pool rmsnap - -Subcommand ``set`` sets pool parameter to . - -Usage:: - - ceph osd pool set size|min_size|crash_replay_interval|pg_num| - pgp_num|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange| - hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|debug_fake_ec_pool| - target_max_bytes|target_max_objects|cache_target_dirty_ratio| - cache_target_dirty_high_ratio| - cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|auid| - min_read_recency_for_promote|write_fadvise_dontneed|hit_set_grade_decay_rate| - hit_set_search_last_n - {--yes-i-really-mean-it} - -Subcommand ``set-quota`` sets object or byte limit on pool. - -Usage:: - - ceph osd pool set-quota max_objects|max_bytes - -Subcommand ``stats`` obtain stats from all pools, or from specified pool. - -Usage:: - - ceph osd pool stats {} - -Subcommand ``primary-affinity`` adjust osd primary-affinity from 0.0 <= -<= 1.0 - -Usage:: - - ceph osd primary-affinity - -Subcommand ``primary-temp`` sets primary_temp mapping pgid:|-1 (developers -only). - -Usage:: - - ceph osd primary-temp - -Subcommand ``repair`` initiates repair on a specified osd. - -Usage:: - - ceph osd repair - -Subcommand ``reweight`` reweights osd to 0.0 < < 1.0. - -Usage:: - - osd reweight - -Subcommand ``reweight-by-pg`` reweight OSDs by PG distribution -[overload-percentage-for-consideration, default 120]. - -Usage:: - - ceph osd reweight-by-pg {} { [} - {--no-increasing} - -Subcommand ``rm`` removes osd(s) [...] from the OSD map. - - -Usage:: - - ceph osd rm [...] - -Subcommand ``destroy`` marks OSD *id* as *destroyed*, removing its cephx -entity's keys and all of its dm-crypt and daemon-private config key -entries. - -This command will not remove the OSD from crush, nor will it remove the -OSD from the OSD map. Instead, once the command successfully completes, -the OSD will show marked as *destroyed*. - -In order to mark an OSD as destroyed, the OSD must first be marked as -**lost**. - -Usage:: - - ceph osd destroy {--yes-i-really-mean-it} - - -Subcommand ``purge`` performs a combination of ``osd destroy``, -``osd rm`` and ``osd crush remove``. - -Usage:: - - ceph osd purge {--yes-i-really-mean-it} - -Subcommand ``safe-to-destroy`` checks whether it is safe to remove or -destroy an OSD without reducing overall data redundancy or durability. -It will return a success code if it is definitely safe, or an error -code and informative message if it is not or if no conclusion can be -drawn at the current time. - -Usage:: - - ceph osd safe-to-destroy [...] - -Subcommand ``scrub`` initiates scrub on specified osd. - -Usage:: - - ceph osd scrub - -Subcommand ``set`` sets . - -Usage:: - - ceph osd set full|pause|noup|nodown|noout|noin|nobackfill| - norebalance|norecover|noscrub|nodeep-scrub|notieragent - -Subcommand ``setcrushmap`` sets crush map from input file. - -Usage:: - - ceph osd setcrushmap - -Subcommand ``setmaxosd`` sets new maximum osd value. - -Usage:: - - ceph osd setmaxosd - -Subcommand ``set-require-min-compat-client`` enforces the cluster to be backward -compatible with the specified client version. This subcommand prevents you from -making any changes (e.g., crush tunables, or using new features) that -would violate the current setting. Please note, This subcommand will fail if -any connected daemon or client is not compatible with the features offered by -the given . To see the features and releases of all clients connected -to cluster, please see `ceph features`_. - -Usage:: - - ceph osd set-require-min-compat-client - -Subcommand ``stat`` prints summary of OSD map. - -Usage:: - - ceph osd stat - -Subcommand ``tier`` is used for managing tiers. It uses some additional -subcommands. - -Subcommand ``add`` adds the tier (the second one) to base pool -(the first one). - -Usage:: - - ceph osd tier add {--force-nonempty} - -Subcommand ``add-cache`` adds a cache (the second one) of size -to existing pool (the first one). - -Usage:: - - ceph osd tier add-cache - -Subcommand ``cache-mode`` specifies the caching mode for cache tier . - -Usage:: - - ceph osd tier cache-mode none|writeback|forward|readonly| - readforward|readproxy - -Subcommand ``remove`` removes the tier (the second one) from base pool - (the first one). - -Usage:: - - ceph osd tier remove - -Subcommand ``remove-overlay`` removes the overlay pool for base pool . - -Usage:: - - ceph osd tier remove-overlay - -Subcommand ``set-overlay`` set the overlay pool for base pool to be -. - -Usage:: - - ceph osd tier set-overlay - -Subcommand ``tree`` prints OSD tree. - -Usage:: - - ceph osd tree {} - -Subcommand ``unpause`` unpauses osd. - -Usage:: - - ceph osd unpause - -Subcommand ``unset`` unsets . - -Usage:: - - ceph osd unset full|pause|noup|nodown|noout|noin|nobackfill| - norebalance|norecover|noscrub|nodeep-scrub|notieragent - - -pg --- - -It is used for managing the placement groups in OSDs. It uses some -additional subcommands. - -Subcommand ``debug`` shows debug info about pgs. - -Usage:: - - ceph pg debug unfound_objects_exist|degraded_pgs_exist - -Subcommand ``deep-scrub`` starts deep-scrub on . - -Usage:: - - ceph pg deep-scrub - -Subcommand ``dump`` shows human-readable versions of pg map (only 'all' valid -with plain). - -Usage:: - - ceph pg dump {all|summary|sum|delta|pools|osds|pgs|pgs_brief} [{all|summary|sum|delta|pools|osds|pgs|pgs_brief...]} - -Subcommand ``dump_json`` shows human-readable version of pg map in json only. - -Usage:: - - ceph pg dump_json {all|summary|sum|delta|pools|osds|pgs|pgs_brief} [{all|summary|sum|delta|pools|osds|pgs|pgs_brief...]} - -Subcommand ``dump_pools_json`` shows pg pools info in json only. - -Usage:: - - ceph pg dump_pools_json - -Subcommand ``dump_stuck`` shows information about stuck pgs. - -Usage:: - - ceph pg dump_stuck {inactive|unclean|stale|undersized|degraded [inactive|unclean|stale|undersized|degraded...]} - {} - -Subcommand ``getmap`` gets binary pg map to -o/stdout. - -Usage:: - - ceph pg getmap - -Subcommand ``ls`` lists pg with specific pool, osd, state - -Usage:: - - ceph pg ls {} {active|clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| - recovery|backfill_wait|incomplete|stale| remapped| - deep_scrub|backfill|backfill_toofull|recovery_wait| - undersized [active|clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| - recovery|backfill_wait|incomplete|stale|remapped| - deep_scrub|backfill|backfill_toofull|recovery_wait| - undersized...]} - -Subcommand ``ls-by-osd`` lists pg on osd [osd] - -Usage:: - - ceph pg ls-by-osd {} - {active|clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| - recovery|backfill_wait|incomplete|stale| remapped| - deep_scrub|backfill|backfill_toofull|recovery_wait| - undersized [active|clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| - recovery|backfill_wait|incomplete|stale|remapped| - deep_scrub|backfill|backfill_toofull|recovery_wait| - undersized...]} - -Subcommand ``ls-by-pool`` lists pg with pool = [poolname] - -Usage:: - - ceph pg ls-by-pool {} {active| - clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| - recovery|backfill_wait|incomplete|stale| remapped| - deep_scrub|backfill|backfill_toofull|recovery_wait| - undersized [active|clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| - recovery|backfill_wait|incomplete|stale|remapped| - deep_scrub|backfill|backfill_toofull|recovery_wait| - undersized...]} - -Subcommand ``ls-by-primary`` lists pg with primary = [osd] - -Usage:: - - ceph pg ls-by-primary {} - {active|clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| - recovery|backfill_wait|incomplete|stale| remapped| - deep_scrub|backfill|backfill_toofull|recovery_wait| - undersized [active|clean|down|replay|splitting| - scrubbing|scrubq|degraded|inconsistent|peering|repair| - recovery|backfill_wait|incomplete|stale|remapped| - deep_scrub|backfill|backfill_toofull|recovery_wait| - undersized...]} - -Subcommand ``map`` shows mapping of pg to osds. - -Usage:: - - ceph pg map - -Subcommand ``repair`` starts repair on . - -Usage:: - - ceph pg repair - -Subcommand ``scrub`` starts scrub on . - -Usage:: - - ceph pg scrub - -Subcommand ``set_full_ratio`` sets ratio at which pgs are considered full. - -Usage:: - - ceph pg set_full_ratio - -Subcommand ``set_backfillfull_ratio`` sets ratio at which pgs are considered too full to backfill. - -Usage:: - - ceph pg set_backfillfull_ratio - -Subcommand ``set_nearfull_ratio`` sets ratio at which pgs are considered nearly -full. - -Usage:: - - ceph pg set_nearfull_ratio - -Subcommand ``stat`` shows placement group status. - -Usage:: - - ceph pg stat - - -quorum ------- - -Cause MON to enter or exit quorum. - -Usage:: - - ceph quorum enter|exit - -Note: this only works on the MON to which the ``ceph`` command is connected. -If you want a specific MON to enter or exit quorum, use this syntax:: - - ceph tell mon. quorum enter|exit - -quorum_status -------------- - -Reports status of monitor quorum. - -Usage:: - - ceph quorum_status - - -report ------- - -Reports full status of cluster, optional title tag strings. - -Usage:: - - ceph report { [...]} - - -scrub ------ - -Scrubs the monitor stores. - -Usage:: - - ceph scrub - - -status ------- - -Shows cluster status. - -Usage:: - - ceph status - - -sync force ----------- - -Forces sync of and clear monitor store. - -Usage:: - - ceph sync force {--yes-i-really-mean-it} {--i-know-what-i-am-doing} - - -tell ----- - -Sends a command to a specific daemon. - -Usage:: - - ceph tell [...] - - -List all available commands. - -Usage:: - - ceph tell help - -version -------- - -Show mon daemon version - -Usage:: - - ceph version - -Options -======= - -.. option:: -i infile - - will specify an input file to be passed along as a payload with the - command to the monitor cluster. This is only used for specific - monitor commands. - -.. option:: -o outfile - - will write any payload returned by the monitor cluster with its - reply to outfile. Only specific monitor commands (e.g. osd getmap) - return a payload. - -.. option:: -c ceph.conf, --conf=ceph.conf - - Use ceph.conf configuration file instead of the default - ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup. - -.. option:: --id CLIENT_ID, --user CLIENT_ID - - Client id for authentication. - -.. option:: --name CLIENT_NAME, -n CLIENT_NAME - - Client name for authentication. - -.. option:: --cluster CLUSTER - - Name of the Ceph cluster. - -.. option:: --admin-daemon ADMIN_SOCKET, daemon DAEMON_NAME - - Submit admin-socket commands via admin sockets in /var/run/ceph. - -.. option:: --admin-socket ADMIN_SOCKET_NOPE - - You probably mean --admin-daemon - -.. option:: -s, --status - - Show cluster status. - -.. option:: -w, --watch - - Watch live cluster changes. - -.. option:: --watch-debug - - Watch debug events. - -.. option:: --watch-info - - Watch info events. - -.. option:: --watch-sec - - Watch security events. - -.. option:: --watch-warn - - Watch warning events. - -.. option:: --watch-error - - Watch error events. - -.. option:: --version, -v - - Display version. - -.. option:: --verbose - - Make verbose. - -.. option:: --concise - - Make less verbose. - -.. option:: -f {json,json-pretty,xml,xml-pretty,plain}, --format - - Format of output. - -.. option:: --connect-timeout CLUSTER_TIMEOUT - - Set a timeout for connecting to the cluster. - -.. option:: --no-increasing - - ``--no-increasing`` is off by default. So increasing the osd weight is allowed - using the ``reweight-by-utilization`` or ``test-reweight-by-utilization`` commands. - If this option is used with these commands, it will help not to increase osd weight - even the osd is under utilized. - - -Availability -============ - -:program:`ceph` is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to -the Ceph documentation at http://ceph.com/docs for more information. - - -See also -======== - -:doc:`ceph-mon `\(8), -:doc:`ceph-osd `\(8), -:doc:`ceph-mds `\(8) diff --git a/src/ceph/doc/man/8/crushtool.rst b/src/ceph/doc/man/8/crushtool.rst deleted file mode 100644 index c7b88f8..0000000 --- a/src/ceph/doc/man/8/crushtool.rst +++ /dev/null @@ -1,284 +0,0 @@ -:orphan: - -========================================== - crushtool -- CRUSH map manipulation tool -========================================== - -.. program:: crushtool - -Synopsis -======== - -| **crushtool** ( -d *map* | -c *map.txt* | --build --num_osds *numosds* - *layer1* *...* | --test ) [ -o *outfile* ] - - -Description -=========== - -**crushtool** is a utility that lets you create, compile, decompile -and test CRUSH map files. - -CRUSH is a pseudo-random data distribution algorithm that efficiently -maps input values (which, in the context of Ceph, correspond to Placement -Groups) across a heterogeneous, hierarchically structured device map. -The algorithm was originally described in detail in the following paper -(although it has evolved some since then):: - - http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf - -The tool has four modes of operation. - -.. option:: --compile|-c map.txt - - will compile a plaintext map.txt into a binary map file. - -.. option:: --decompile|-d map - - will take the compiled map and decompile it into a plaintext source - file, suitable for editing. - -.. option:: --build --num_osds {num-osds} layer1 ... - - will create map with the given layer structure. See below for a - detailed explanation. - -.. option:: --test - - will perform a dry run of a CRUSH mapping for a range of input - values ``[--min-x,--max-x]`` (default ``[0,1023]``) which can be - thought of as simulated Placement Groups. See below for a more - detailed explanation. - -Unlike other Ceph tools, **crushtool** does not accept generic options -such as **--debug-crush** from the command line. They can, however, be -provided via the CEPH_ARGS environment variable. For instance, to -silence all output from the CRUSH subsystem:: - - CEPH_ARGS="--debug-crush 0" crushtool ... - - -Running tests with --test -========================= - -The test mode will use the input crush map ( as specified with **-i -map** ) and perform a dry run of CRUSH mapping or random placement -(if **--simulate** is set ). On completion, two kinds of reports can be -created. -1) The **--show-...** option outputs human readable information -on stderr. -2) The **--output-csv** option creates CSV files that are -documented by the **--help-output** option. - -Note: Each Placement Group (PG) has an integer ID which can be obtained -from ``ceph pg dump`` (for example PG 2.2f means pool id 2, PG id 32). -The pool and PG IDs are combined by a function to get a value which is -given to CRUSH to map it to OSDs. crushtool does not know about PGs or -pools; it only runs simulations by mapping values in the range -``[--min-x,--max-x]``. - - -.. option:: --show-statistics - - Displays a summary of the distribution. For instance:: - - rule 1 (metadata) num_rep 5 result size == 5: 1024/1024 - - shows that rule **1** which is named **metadata** successfully - mapped **1024** values to **result size == 5** devices when trying - to map them to **num_rep 5** replicas. When it fails to provide the - required mapping, presumably because the number of **tries** must - be increased, a breakdown of the failures is displayed. For instance:: - - rule 1 (metadata) num_rep 10 result size == 8: 4/1024 - rule 1 (metadata) num_rep 10 result size == 9: 93/1024 - rule 1 (metadata) num_rep 10 result size == 10: 927/1024 - - shows that although **num_rep 10** replicas were required, **4** - out of **1024** values ( **4/1024** ) were mapped to **result size - == 8** devices only. - -.. option:: --show-mappings - - Displays the mapping of each value in the range ``[--min-x,--max-x]``. - For instance:: - - CRUSH rule 1 x 24 [11,6] - - shows that value **24** is mapped to devices **[11,6]** by rule - **1**. - -.. option:: --show-bad-mappings - - Displays which value failed to be mapped to the required number of - devices. For instance:: - - bad mapping rule 1 x 781 num_rep 7 result [8,10,2,11,6,9] - - shows that when rule **1** was required to map **7** devices, it - could map only six : **[8,10,2,11,6,9]**. - -.. option:: --show-utilization - - Displays the expected and actual utilisation for each device, for - each number of replicas. For instance:: - - device 0: stored : 951 expected : 853.333 - device 1: stored : 963 expected : 853.333 - ... - - shows that device **0** stored **951** values and was expected to store **853**. - Implies **--show-statistics**. - -.. option:: --show-utilization-all - - Displays the same as **--show-utilization** but does not suppress - output when the weight of a device is zero. - Implies **--show-statistics**. - -.. option:: --show-choose-tries - - Displays how many attempts were needed to find a device mapping. - For instance:: - - 0: 95224 - 1: 3745 - 2: 2225 - .. - - shows that **95224** mappings succeeded without retries, **3745** - mappings succeeded with one attempts, etc. There are as many rows - as the value of the **--set-choose-total-tries** option. - -.. option:: --output-csv - - Creates CSV files (in the current directory) containing information - documented by **--help-output**. The files are named after the rule - used when collecting the statistics. For instance, if the rule - : 'metadata' is used, the CSV files will be:: - - metadata-absolute_weights.csv - metadata-device_utilization.csv - ... - - The first line of the file shortly explains the column layout. For - instance:: - - metadata-absolute_weights.csv - Device ID, Absolute Weight - 0,1 - ... - -.. option:: --output-name NAME - - Prepend **NAME** to the file names generated when **--output-csv** - is specified. For instance **--output-name FOO** will create - files:: - - FOO-metadata-absolute_weights.csv - FOO-metadata-device_utilization.csv - ... - -The **--set-...** options can be used to modify the tunables of the -input crush map. The input crush map is modified in -memory. For example:: - - $ crushtool -i mymap --test --show-bad-mappings - bad mapping rule 1 x 781 num_rep 7 result [8,10,2,11,6,9] - -could be fixed by increasing the **choose-total-tries** as follows: - - $ crushtool -i mymap --test \ - --show-bad-mappings \ - --set-choose-total-tries 500 - -Building a map with --build -=========================== - -The build mode will generate hierarchical maps. The first argument -specifies the number of devices (leaves) in the CRUSH hierarchy. Each -layer describes how the layer (or devices) preceding it should be -grouped. - -Each layer consists of:: - - bucket ( uniform | list | tree | straw ) size - -The **bucket** is the type of the buckets in the layer -(e.g. "rack"). Each bucket name will be built by appending a unique -number to the **bucket** string (e.g. "rack0", "rack1"...). - -The second component is the type of bucket: **straw** should be used -most of the time. - -The third component is the maximum size of the bucket. A size of zero -means a bucket of infinite capacity. - - -Example -======= - -Suppose we have two rows with two racks each and 20 nodes per rack. Suppose -each node contains 4 storage devices for Ceph OSD Daemons. This configuration -allows us to deploy 320 Ceph OSD Daemons. Lets assume a 42U rack with 2U nodes, -leaving an extra 2U for a rack switch. - -To reflect our hierarchy of devices, nodes, racks and rows, we would execute -the following:: - - $ crushtool -o crushmap --build --num_osds 320 \ - node straw 4 \ - rack straw 20 \ - row straw 2 \ - root straw 0 - # id weight type name reweight - -87 320 root root - -85 160 row row0 - -81 80 rack rack0 - -1 4 node node0 - 0 1 osd.0 1 - 1 1 osd.1 1 - 2 1 osd.2 1 - 3 1 osd.3 1 - -2 4 node node1 - 4 1 osd.4 1 - 5 1 osd.5 1 - ... - -CRUSH rulesets are created so the generated crushmap can be -tested. They are the same rulesets as the one created by default when -creating a new Ceph cluster. They can be further edited with:: - - # decompile - crushtool -d crushmap -o map.txt - - # edit - emacs map.txt - - # recompile - crushtool -c map.txt -o crushmap - -Example output from --test -========================== - -See https://github.com/ceph/ceph/blob/master/src/test/cli/crushtool/set-choose.t -for sample ``crushtool --test`` commands and output produced thereby. - -Availability -============ - -**crushtool** is part of Ceph, a massively scalable, open-source, distributed storage system. Please -refer to the Ceph documentation at http://ceph.com/docs for more -information. - - -See also -======== - -:doc:`ceph `\(8), -:doc:`osdmaptool `\(8), - -Authors -======= - -John Wilkins, Sage Weil, Loic Dachary diff --git a/src/ceph/doc/man/8/librados-config.rst b/src/ceph/doc/man/8/librados-config.rst deleted file mode 100644 index 940e8c2..0000000 --- a/src/ceph/doc/man/8/librados-config.rst +++ /dev/null @@ -1,46 +0,0 @@ -:orphan: - -======================================================= - librados-config -- display information about librados -======================================================= - -.. program:: librados-config - -Synopsis -======== - -| **librados-config** [ --version ] [ --vernum ] - - -Description -=========== - -**librados-config** is a utility that displays information about the - installed ``librados``. - - -Options -======= - -.. option:: --version - - Display ``librados`` version - -.. option:: --vernum - - Display the ``librados`` version code - - -Availability -============ - -**librados-config** is part of Ceph, a massively scalable, open-source, distributed storage system. -Please refer to the Ceph documentation at http://ceph.com/docs for -more information. - - -See also -======== - -:doc:`ceph `\(8), -:doc:`rados `\(8) diff --git a/src/ceph/doc/man/8/monmaptool.rst b/src/ceph/doc/man/8/monmaptool.rst deleted file mode 100644 index bed0c94..0000000 --- a/src/ceph/doc/man/8/monmaptool.rst +++ /dev/null @@ -1,107 +0,0 @@ -:orphan: - -========================================================== - monmaptool -- ceph monitor cluster map manipulation tool -========================================================== - -.. program:: monmaptool - -Synopsis -======== - -| **monmaptool** *mapfilename* [ --clobber ] [ --print ] [ --create ] - [ --add *ip*:*port* *...* ] [ --rm *ip*:*port* *...* ] - - -Description -=========== - -**monmaptool** is a utility to create, view, and modify a monitor -cluster map for the Ceph distributed storage system. The monitor map -specifies the only fixed addresses in the Ceph distributed system. -All other daemons bind to arbitrary addresses and register themselves -with the monitors. - -When creating a map with --create, a new monitor map with a new, -random UUID will be created. It should be followed by one or more -monitor addresses. - -The default Ceph monitor port is 6789. - - -Options -======= - -.. option:: --print - - will print a plaintext dump of the map, after any modifications are - made. - -.. option:: --clobber - - will allow monmaptool to overwrite mapfilename if changes are made. - -.. option:: --create - - will create a new monitor map with a new UUID (and with it, a new, - empty Ceph file system). - -.. option:: --generate - - generate a new monmap based on the values on the command line or specified - in the ceph configuration. This is, in order of preference, - - #. ``--monmap filename`` to specify a monmap to load - #. ``--mon-host 'host1,ip2'`` to specify a list of hosts or ip addresses - #. ``[mon.foo]`` sections containing ``mon addr`` settings in the config - -.. option:: --filter-initial-members - - filter the initial monmap by applying the ``mon initial members`` - setting. Monitors not present in that list will be removed, and - initial members not present in the map will be added with dummy - addresses. - -.. option:: --add name ip:port - - will add a monitor with the specified ip:port to the map. - -.. option:: --rm name - - will remove the monitor with the specified ip:port from the map. - -.. option:: --fsid uuid - - will set the fsid to the given uuid. If not specified with --create, a random fsid will be generated. - - -Example -======= - -To create a new map with three monitors (for a fresh Ceph file system):: - - monmaptool --create --add mon.a 192.168.0.10:6789 --add mon.b 192.168.0.11:6789 \ - --add mon.c 192.168.0.12:6789 --clobber monmap - -To display the contents of the map:: - - monmaptool --print monmap - -To replace one monitor:: - - monmaptool --rm mon.a --add mon.a 192.168.0.9:6789 --clobber monmap - - -Availability -============ - -**monmaptool** is part of Ceph, a massively scalable, open-source, distributed -storage system. Please refer to the Ceph documentation at http://ceph.com/docs -for more information. - - -See also -======== - -:doc:`ceph `\(8), -:doc:`crushtool `\(8), diff --git a/src/ceph/doc/man/8/mount.ceph.rst b/src/ceph/doc/man/8/mount.ceph.rst deleted file mode 100644 index 56900a9..0000000 --- a/src/ceph/doc/man/8/mount.ceph.rst +++ /dev/null @@ -1,168 +0,0 @@ -:orphan: - -======================================== - mount.ceph -- mount a ceph file system -======================================== - -.. program:: mount.ceph - -Synopsis -======== - -| **mount.ceph** *monaddr1*\ [,\ *monaddr2*\ ,...]:/[*subdir*] *dir* [ - -o *options* ] - - -Description -=========== - -**mount.ceph** is a simple helper for mounting the Ceph file system on -a Linux host. It serves to resolve monitor hostname(s) into IP -addresses and read authentication keys from disk; the Linux kernel -client component does most of the real work. In fact, it is possible -to mount a non-authenticated Ceph file system without mount.ceph by -specifying monitor address(es) by IP:: - - mount -t ceph 1.2.3.4:/ mountpoint - -Each monitor address monaddr takes the form host[:port]. If the port -is not specified, the Ceph default of 6789 is assumed. - -Multiple monitor addresses can be separated by commas. Only one -responsible monitor is needed to successfully mount; the client will -learn about all monitors from any responsive monitor. However, it is a -good idea to specify more than one in case one happens to be down at -the time of mount. - -A subdirectory subdir may be specified if a subset of the file system -is to be mounted. - -Mount helper application conventions dictate that the first two -options are device to be mounted and destination path. Options must be -passed only after these fixed arguments. - - -Options -======= - -:command:`wsize` - int (bytes), max write size. Default: none (writeback uses smaller of wsize - and stripe unit) - -:command:`rsize` - int (bytes), max read size. Default: none - -:command:`rasize` - int (bytes), max readahead, multiple of 1024, Default: 8388608 - (8192*1024) - -:command:`osdtimeout` - int (seconds), Default: 60 - -:command:`osdkeepalive` - int, Default: 5 - -:command:`mount_timeout` - int (seconds), Default: 60 - -:command:`osd_idle_ttl` - int (seconds), Default: 60 - -:command:`caps_wanted_delay_min` - int, cap release delay, Default: 5 - -:command:`caps_wanted_delay_max` - int, cap release delay, Default: 60 - -:command:`cap_release_safety` - int, Default: calculated - -:command:`readdir_max_entries` - int, Default: 1024 - -:command:`readdir_max_bytes` - int, Default: 524288 (512*1024) - -:command:`write_congestion_kb` - int (kb), max writeback in flight. scale with available - memory. Default: calculated from available memory - -:command:`snapdirname` - string, set the name of the hidden snapdir. Default: .snap - -:command:`name` - RADOS user to authenticate as when using cephx. Default: guest - -:command:`secret` - secret key for use with cephx. This option is insecure because it exposes - the secret on the command line. To avoid this, use the secretfile option. - -:command:`secretfile` - path to file containing the secret key to use with cephx - -:command:`ip` - my ip - -:command:`noshare` - create a new client instance, instead of sharing an existing - instance of a client mounting the same cluster - -:command:`dirstat` - funky `cat dirname` for stats, Default: off - -:command:`nodirstat` - no funky `cat dirname` for stats - -:command:`rbytes` - Report the recursive size of the directory contents for st_size on - directories. Default: on - -:command:`norbytes` - Do not report the recursive size of the directory contents for - st_size on directories. - -:command:`nocrc` - no data crc on writes - -:command:`noasyncreaddir` - no dcache readdir - - -Examples -======== - -Mount the full file system:: - - mount.ceph monhost:/ /mnt/foo - -If there are multiple monitors:: - - mount.ceph monhost1,monhost2,monhost3:/ /mnt/foo - -If :doc:`ceph-mon `\(8) is running on a non-standard -port:: - - mount.ceph monhost1:7000,monhost2:7000,monhost3:7000:/ /mnt/foo - -To mount only part of the namespace:: - - mount.ceph monhost1:/some/small/thing /mnt/thing - -Assuming mount.ceph(8) is installed properly, it should be -automatically invoked by mount(8) like so:: - - mount -t ceph monhost:/ /mnt/foo - - -Availability -============ - -**mount.ceph** is part of Ceph, a massively scalable, open-source, distributed storage system. Please -refer to the Ceph documentation at http://ceph.com/docs for more -information. - -See also -======== - -:doc:`ceph-fuse `\(8), -:doc:`ceph `\(8) diff --git a/src/ceph/doc/man/8/osdmaptool.rst b/src/ceph/doc/man/8/osdmaptool.rst deleted file mode 100644 index f58d29c..0000000 --- a/src/ceph/doc/man/8/osdmaptool.rst +++ /dev/null @@ -1,157 +0,0 @@ -:orphan: - -====================================================== - osdmaptool -- ceph osd cluster map manipulation tool -====================================================== - -.. program:: osdmaptool - -Synopsis -======== - -| **osdmaptool** *mapfilename* [--print] [--createsimple *numosd* - [--pgbits *bitsperosd* ] ] [--clobber] - - -Description -=========== - -**osdmaptool** is a utility that lets you create, view, and manipulate -OSD cluster maps from the Ceph distributed storage system. Notably, it -lets you extract the embedded CRUSH map or import a new CRUSH map. - - -Options -======= - -.. option:: --print - - will simply make the tool print a plaintext dump of the map, after - any modifications are made. - -.. option:: --clobber - - will allow osdmaptool to overwrite mapfilename if changes are made. - -.. option:: --import-crush mapfile - - will load the CRUSH map from mapfile and embed it in the OSD map. - -.. option:: --export-crush mapfile - - will extract the CRUSH map from the OSD map and write it to - mapfile. - -.. option:: --createsimple numosd [--pgbits bitsperosd] - - will create a relatively generic OSD map with the numosd devices. - If --pgbits is specified, the initial placement group counts will - be set with bitsperosd bits per OSD. That is, the pg_num map - attribute will be set to numosd shifted by bitsperosd. - -.. option:: --test-map-pgs [--pool poolid] - - will print out the mappings from placement groups to OSDs. - -.. option:: --test-map-pgs-dump [--pool poolid] - - will print out the summary of all placement groups and the mappings - from them to the mapped OSDs. - - -Example -======= - -To create a simple map with 16 devices:: - - osdmaptool --createsimple 16 osdmap --clobber - -To view the result:: - - osdmaptool --print osdmap - -To view the mappings of placement groups for pool 0:: - - osdmaptool --test-map-pgs-dump rbd --pool 0 - - pool 0 pg_num 8 - 0.0 [0,2,1] 0 - 0.1 [2,0,1] 2 - 0.2 [0,1,2] 0 - 0.3 [2,0,1] 2 - 0.4 [0,2,1] 0 - 0.5 [0,2,1] 0 - 0.6 [0,1,2] 0 - 0.7 [1,0,2] 1 - #osd count first primary c wt wt - osd.0 8 5 5 1 1 - osd.1 8 1 1 1 1 - osd.2 8 2 2 1 1 - in 3 - avg 8 stddev 0 (0x) (expected 2.3094 0.288675x)) - min osd.0 8 - max osd.0 8 - size 0 0 - size 1 0 - size 2 0 - size 3 8 - -In which, - #. pool 0 has 8 placement groups. And two tables follow: - #. A table for placement groups. Each row presents a placement group. With columns of: - - * placement group id, - * acting set, and - * primary OSD. - #. A table for all OSDs. Each row presents an OSD. With columns of: - - * count of placement groups being mapped to this OSD, - * count of placement groups where this OSD is the first one in their acting sets, - * count of placement groups where this OSD is the primary of them, - * the CRUSH weight of this OSD, and - * the weight of this OSD. - #. Looking at the number of placement groups held by 3 OSDs. We have - - * avarge, stddev, stddev/average, expected stddev, expected stddev / average - * min and max - #. The number of placement groups mapping to n OSDs. In this case, all 8 placement - groups are mapping to 3 different OSDs. - -In a less-balanced cluster, we could have following output for the statistics of -placement group distribution, whose standard deviation is 1.41421:: - - #osd count first primary c wt wt - osd.0 8 5 5 1 1 - osd.1 8 1 1 1 1 - osd.2 8 2 2 1 1 - - #osd count first primary c wt wt - osd.0 33 9 9 0.0145874 1 - osd.1 34 14 14 0.0145874 1 - osd.2 31 7 7 0.0145874 1 - osd.3 31 13 13 0.0145874 1 - osd.4 30 14 14 0.0145874 1 - osd.5 33 7 7 0.0145874 1 - in 6 - avg 32 stddev 1.41421 (0.0441942x) (expected 5.16398 0.161374x)) - min osd.4 30 - max osd.1 34 - size 00 - size 10 - size 20 - size 364 - - -Availability -============ - -**osdmaptool** is part of Ceph, a massively scalable, open-source, distributed storage system. Please -refer to the Ceph documentation at http://ceph.com/docs for more -information. - - -See also -======== - -:doc:`ceph `\(8), -:doc:`crushtool `\(8), diff --git a/src/ceph/doc/man/8/rados.rst b/src/ceph/doc/man/8/rados.rst deleted file mode 100644 index 9490105..0000000 --- a/src/ceph/doc/man/8/rados.rst +++ /dev/null @@ -1,223 +0,0 @@ -:orphan: - -======================================= - rados -- rados object storage utility -======================================= - -.. program:: rados - -Synopsis -======== - -| **rados** [ -m *monaddr* ] [ mkpool | rmpool *foo* ] [ -p | --pool - *pool* ] [ -s | --snap *snap* ] [ -i *infile* ] [ -o *outfile* ] - *command* ... - - -Description -=========== - -**rados** is a utility for interacting with a Ceph object storage -cluster (RADOS), part of the Ceph distributed storage system. - - -Options -======= - -.. option:: -p pool, --pool pool - - Interact with the given pool. Required by most commands. - -.. option:: -s snap, --snap snap - - Read from the given pool snapshot. Valid for all pool-specific read operations. - -.. option:: -i infile - - will specify an input file to be passed along as a payload with the - command to the monitor cluster. This is only used for specific - monitor commands. - -.. option:: -o outfile - - will write any payload returned by the monitor cluster with its - reply to outfile. Only specific monitor commands (e.g. osd getmap) - return a payload. - -.. option:: -c ceph.conf, --conf=ceph.conf - - Use ceph.conf configuration file instead of the default - /etc/ceph/ceph.conf to determine monitor addresses during startup. - -.. option:: -m monaddress[:port] - - Connect to specified monitor (instead of looking through ceph.conf). - -.. option:: -b block_size - - Set the block size for put/get/append ops and for write benchmarking. - -.. option:: --striper - - Uses the striping API of rados rather than the default one. - Available for stat, get, put, append, truncate, rm, ls and all xattr related operation - - -Global commands -=============== - -:command:`lspools` - List object pools - -:command:`df` - Show utilization statistics, including disk usage (bytes) and object - counts, over the entire system and broken down by pool. - -:command:`mkpool` *foo* - Create a pool with name foo. - -:command:`rmpool` *foo* [ *foo* --yes-i-really-really-mean-it ] - Delete the pool foo (and all its data). - -:command:`list-inconsistent-pg` *pool* - List inconsistent PGs in given pool. - -:command:`list-inconsistent-obj` *pgid* - List inconsistent objects in given PG. - -:command:`list-inconsistent-snapset` *pgid* - List inconsistent snapsets in given PG. - -Pool specific commands -====================== - -:command:`get` *name* *outfile* - Read object name from the cluster and write it to outfile. - -:command:`put` *name* *infile* [--offset offset] - Write object name with start offset (default:0) to the cluster with contents from infile. - -:command:`append` *name* *infile* - Append object name to the cluster with contents from infile. - -:command:`rm` *name* - Remove object name. - -:command:`listwatchers` *name* - List the watchers of object name. - -:command:`ls` *outfile* - List objects in given pool and write to outfile. - -:command:`lssnap` - List snapshots for given pool. - -:command:`clonedata` *srcname* *dstname* --object-locator *key* - Clone object byte data from *srcname* to *dstname*. Both objects must be stored with the locator key *key* (usually either *srcname* or *dstname*). Object attributes and omap keys are not copied or cloned. - -:command:`mksnap` *foo* - Create pool snapshot named *foo*. - -:command:`rmsnap` *foo* - Remove pool snapshot named *foo*. - -:command:`bench` *seconds* *mode* [ -b *objsize* ] [ -t *threads* ] - Benchmark for *seconds*. The mode can be *write*, *seq*, or - *rand*. *seq* and *rand* are read benchmarks, either - sequential or random. Before running one of the reading benchmarks, - run a write benchmark with the *--no-cleanup* option. The default - object size is 4 MB, and the default number of simulated threads - (parallel writes) is 16. The *--run-name