summaryrefslogtreecommitdiffstats
path: root/src/ceph/doc/man
diff options
context:
space:
mode:
Diffstat (limited to 'src/ceph/doc/man')
-rw-r--r--src/ceph/doc/man/8/CMakeLists.txt85
-rw-r--r--src/ceph/doc/man/8/ceph-authtool.rst204
-rw-r--r--src/ceph/doc/man/8/ceph-bluestore-tool.rst123
-rw-r--r--src/ceph/doc/man/8/ceph-clsinfo.rst49
-rw-r--r--src/ceph/doc/man/8/ceph-conf.rst129
-rw-r--r--src/ceph/doc/man/8/ceph-create-keys.rst67
-rw-r--r--src/ceph/doc/man/8/ceph-debugpack.rst50
-rw-r--r--src/ceph/doc/man/8/ceph-dencoder.rst151
-rw-r--r--src/ceph/doc/man/8/ceph-deploy.rst609
-rw-r--r--src/ceph/doc/man/8/ceph-detect-init.rst65
-rw-r--r--src/ceph/doc/man/8/ceph-disk.rst97
-rw-r--r--src/ceph/doc/man/8/ceph-fuse.rst64
-rw-r--r--src/ceph/doc/man/8/ceph-kvstore-tool.rst85
-rw-r--r--src/ceph/doc/man/8/ceph-mds.rst87
-rw-r--r--src/ceph/doc/man/8/ceph-mon.rst94
-rw-r--r--src/ceph/doc/man/8/ceph-osd.rst134
-rw-r--r--src/ceph/doc/man/8/ceph-post-file.rst71
-rw-r--r--src/ceph/doc/man/8/ceph-rbdnamer.rst41
-rw-r--r--src/ceph/doc/man/8/ceph-rest-api.rst150
-rw-r--r--src/ceph/doc/man/8/ceph-run.rst45
-rw-r--r--src/ceph/doc/man/8/ceph-syn.rst99
-rw-r--r--src/ceph/doc/man/8/ceph-volume-systemd.rst56
-rw-r--r--src/ceph/doc/man/8/ceph-volume.rst122
-rw-r--r--src/ceph/doc/man/8/ceph.rst1550
-rw-r--r--src/ceph/doc/man/8/crushtool.rst284
-rw-r--r--src/ceph/doc/man/8/librados-config.rst46
-rw-r--r--src/ceph/doc/man/8/monmaptool.rst107
-rw-r--r--src/ceph/doc/man/8/mount.ceph.rst168
-rw-r--r--src/ceph/doc/man/8/osdmaptool.rst157
-rw-r--r--src/ceph/doc/man/8/rados.rst223
-rw-r--r--src/ceph/doc/man/8/radosgw-admin.rst504
-rw-r--r--src/ceph/doc/man/8/radosgw.rst256
-rw-r--r--src/ceph/doc/man/8/rbd-fuse.rst56
-rw-r--r--src/ceph/doc/man/8/rbd-ggate.rst79
-rw-r--r--src/ceph/doc/man/8/rbd-mirror.rst75
-rw-r--r--src/ceph/doc/man/8/rbd-nbd.rst67
-rw-r--r--src/ceph/doc/man/8/rbd-replay-many.rst73
-rw-r--r--src/ceph/doc/man/8/rbd-replay-prep.rst55
-rw-r--r--src/ceph/doc/man/8/rbd-replay.rst78
-rw-r--r--src/ceph/doc/man/8/rbd.rst615
-rw-r--r--src/ceph/doc/man/8/rbdmap.rst123
-rw-r--r--src/ceph/doc/man/CMakeLists.txt15
42 files changed, 0 insertions, 7208 deletions
diff --git a/src/ceph/doc/man/8/CMakeLists.txt b/src/ceph/doc/man/8/CMakeLists.txt
deleted file mode 100644
index 84e7640..0000000
--- a/src/ceph/doc/man/8/CMakeLists.txt
+++ /dev/null
@@ -1,85 +0,0 @@
-set(client_srcs
- ceph-syn.rst
- ceph-conf.rst
- ceph.rst
- ceph-authtool.rst
- ceph-kvstore-tool.rst
- rados.rst
- ceph-post-file.rst
- ceph-dencoder.rst)
-
-set(server_srcs
- ceph-deploy.rst
- crushtool.rst
- ceph-run.rst
- mount.ceph.rst
- ceph-create-keys.rst
- ceph-rest-api.rst)
-if(WITH_TESTS)
-list(APPEND server_srcs
- ceph-debugpack.rst)
-endif(WITH_TESTS)
-
-set(osd_srcs
- ceph-clsinfo.rst
- ceph-detect-init.rst
- ceph-disk.rst
- ceph-volume.rst
- ceph-volume-systemd.rst
- ceph-osd.rst
- osdmaptool.rst
- ceph-bluestore-tool.rst)
-
-set(mon_srcs
- ceph-mon.rst
- monmaptool.rst)
-
-list(APPEND man_srcs
- ${client_srcs}
- ${server_srcs}
- ${osd_srcs}
- ${mon_srcs}
- ceph-mds.rst
- librados-config.rst)
-
-if(HAVE_LIBFUSE)
- list(APPEND man_srcs
- ceph-fuse.rst
- rbd-fuse.rst)
-endif()
-
-if(WITH_RADOSGW)
- list(APPEND man_srcs
- radosgw.rst
- radosgw-admin.rst)
-endif()
-
-if(WITH_RBD)
- list(APPEND man_srcs
- ceph-rbdnamer.rst
- rbd-mirror.rst
- rbd-replay-many.rst
- rbd-replay-prep.rst
- rbd-replay.rst
- rbdmap.rst
- rbd.rst)
- if(LINUX)
- list(APPEND man_srcs rbd-nbd.rst)
- endif()
- if(FREEBSD)
- list(APPEND man_srcs rbd-ggate.rst)
- endif()
-endif()
-
-foreach(man ${man_srcs})
- list(APPEND sphinx_input ${CMAKE_CURRENT_SOURCE_DIR}/${man})
- # mount.ceph.rst => mount if we use
- # get_filename_component(cmd ${man} NAME_WE)
- string(REGEX REPLACE ".rst$" "" cmd ${man})
- list(APPEND sphinx_output ${sphinx_output_dir}/${cmd}.8)
- install(FILES ${sphinx_output_dir}/${cmd}.8
- DESTINATION ${CEPH_MAN_DIR}/man8)
-endforeach()
-
-set(sphinx_input ${sphinx_input} PARENT_SCOPE)
-set(sphinx_output ${sphinx_output} PARENT_SCOPE)
diff --git a/src/ceph/doc/man/8/ceph-authtool.rst b/src/ceph/doc/man/8/ceph-authtool.rst
deleted file mode 100644
index f1ac152..0000000
--- a/src/ceph/doc/man/8/ceph-authtool.rst
+++ /dev/null
@@ -1,204 +0,0 @@
-:orphan:
-
-=================================================
- ceph-authtool -- ceph keyring manipulation tool
-=================================================
-
-.. program:: ceph-authtool
-
-Synopsis
-========
-
-| **ceph-authtool** *keyringfile*
- [ -l | --list ]
- [ -p | --print-key ]
- [ -C | --create-keyring ]
- [ -g | --gen-key ]
- [ --gen-print-key ]
- [ --import-keyring *otherkeyringfile* ]
- [ -n | --name *entityname* ]
- [ -u | --set-uid *auid* ]
- [ -a | --add-key *base64_key* ]
- [ --cap *subsystem* *capability* ]
- [ --caps *capfile* ]
-
-
-Description
-===========
-
-**ceph-authtool** is a utility to create, view, and modify a Ceph keyring
-file. A keyring file stores one or more Ceph authentication keys and
-possibly an associated capability specification. Each key is
-associated with an entity name, of the form
-``{client,mon,mds,osd}.name``.
-
-**WARNING** Ceph provides authentication and protection against
-man-in-the-middle attacks once secret keys are in place. However,
-data over the wire is not encrypted, which may include the messages
-used to configure said keys. The system is primarily intended to be
-used in trusted environments.
-
-Options
-=======
-
-.. option:: -l, --list
-
- will list all keys and capabilities present in the keyring
-
-.. option:: -p, --print-key
-
- will print an encoded key for the specified entityname. This is
- suitable for the ``mount -o secret=`` argument
-
-.. option:: -C, --create-keyring
-
- will create a new keyring, overwriting any existing keyringfile
-
-.. option:: -g, --gen-key
-
- will generate a new secret key for the specified entityname
-
-.. option:: --gen-print-key
-
- will generate a new secret key for the specified entityname,
- without altering the keyringfile, printing the secret to stdout
-
-.. option:: --import-keyring *secondkeyringfile*
-
- will import the content of a given keyring to the keyringfile
-
-.. option:: -n, --name *name*
-
- specify entityname to operate on
-
-.. option:: -u, --set-uid *auid*
-
- sets the auid (authenticated user id) for the specified entityname
-
-.. option:: -a, --add-key *base64_key*
-
- will add an encoded key to the keyring
-
-.. option:: --cap *subsystem* *capability*
-
- will set the capability for given subsystem
-
-.. option:: --caps *capsfile*
-
- will set all of capabilities associated with a given key, for all subsystems
-
-
-Capabilities
-============
-
-The subsystem is the name of a Ceph subsystem: ``mon``, ``mds``, or
-``osd``.
-
-The capability is a string describing what the given user is allowed
-to do. This takes the form of a comma separated list of allow
-clauses with a permission specifier containing one or more of rwx for
-read, write, and execute permission. The ``allow *`` grants full
-superuser permissions for the given subsystem.
-
-For example::
-
- # can read, write, and execute objects
- osd = "allow rwx"
-
- # can access mds server
- mds = "allow"
-
- # can modify cluster state (i.e., is a server daemon)
- mon = "allow rwx"
-
-A librados user restricted to a single pool might look like::
-
- mon = "allow r"
-
- osd = "allow rw pool foo"
-
-A client using rbd with read access to one pool and read/write access to another::
-
- mon = "allow r"
-
- osd = "allow class-read object_prefix rbd_children, allow pool templates r class-read, allow pool vms rwx"
-
-A client mounting the file system with minimal permissions would need caps like::
-
- mds = "allow"
-
- osd = "allow rw pool data"
-
- mon = "allow r"
-
-
-OSD Capabilities
-================
-
-In general, an osd capability follows the grammar::
-
- osdcap := grant[,grant...]
- grant := allow (match capspec | capspec match)
- match := [pool[=]<poolname> | object_prefix <prefix>]
- capspec := * | [r][w][x] [class-read] [class-write]
-
-The capspec determines what kind of operations the entity can perform::
-
- r = read access to objects
- w = write access to objects
- x = can call any class method (same as class-read class-write)
- class-read = can call class methods that are reads
- class-write = can call class methods that are writes
- * = equivalent to rwx, plus the ability to run osd admin commands,
- i.e. ceph osd tell ...
-
-The match criteria restrict a grant based on the pool being accessed.
-Grants are additive if the client fulfills the match condition. For
-example, if a client has the osd capabilities: "allow r object_prefix
-prefix, allow w pool foo, allow x pool bar", then it has rw access to
-pool foo, rx access to pool bar, and r access to objects whose
-names begin with 'prefix' in any pool.
-
-Caps file format
-================
-
-The caps file format consists of zero or more key/value pairs, one per
-line. The key and value are separated by an ``=``, and the value must
-be quoted (with ``'`` or ``"``) if it contains any whitespace. The key
-is the name of the Ceph subsystem (``osd``, ``mds``, ``mon``), and the
-value is the capability string (see above).
-
-
-Example
-=======
-
-To create a new keyring containing a key for client.foo::
-
- ceph-authtool -C -n client.foo --gen-key keyring
-
-To associate some capabilities with the key (namely, the ability to
-mount a Ceph filesystem)::
-
- ceph-authtool -n client.foo --cap mds 'allow' --cap osd 'allow rw pool=data' --cap mon 'allow r' keyring
-
-To display the contents of the keyring::
-
- ceph-authtool -l keyring
-
-When mounting a Ceph file system, you can grab the appropriately encoded secret key with::
-
- mount -t ceph serverhost:/ mountpoint -o name=foo,secret=`ceph-authtool -p -n client.foo keyring`
-
-
-Availability
-============
-
-**ceph-authtool** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
-refer to the Ceph documentation at http://ceph.com/docs for more
-information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-bluestore-tool.rst b/src/ceph/doc/man/8/ceph-bluestore-tool.rst
deleted file mode 100644
index 7a7b0ea..0000000
--- a/src/ceph/doc/man/8/ceph-bluestore-tool.rst
+++ /dev/null
@@ -1,123 +0,0 @@
-:orphan:
-
-======================================================
- ceph-bluestore-tool -- bluestore administrative tool
-======================================================
-
-.. program:: ceph-bluestore-tool
-
-Synopsis
-========
-
-| **ceph-bluestore-tool** *command*
- [ --dev *device* ... ]
- [ --path *osd path* ]
- [ --out-dir *dir* ]
- [ --log-file | -l *filename* ]
- [ --deep ]
-| **ceph-bluestore-tool** fsck|repair --path *osd path* [ --deep ]
-| **ceph-bluestore-tool** show-label --dev *device* ...
-| **ceph-bluestore-tool** prime-osd-dir --dev *device* --path *osd path*
-| **ceph-bluestore-tool** bluefs-export --path *osd path* --out-dir *dir*
-| **ceph-bluestore-tool** bluefs-export --path *osd path* --out-dir *dir*
-
-
-Description
-===========
-
-**ceph-bluestore-tool** is a utility to perform low-level administrative
-operations on a BlueStore instance.
-
-Commands
-========
-
-.. option:: help
-
- show help
-
-.. option:: fsck
-
- run consistency check on BlueStore metadata. If *--deep* is specified, also read all object data and verify checksums.
-
-.. option:: repair
-
- Run a consistency check *and* repair any errors we can.
-
-.. option:: bluefs-export
-
- Export the contents of BlueFS (i.e., rocksdb files) to an output directory.
-
-.. option:: bluefs-bdev-sizes --path *osd path*
-
- Print the device sizes, as understood by BlueFS, to stdout.
-
-.. option:: bluefs-bdev-expand --path *osd path*
-
- Instruct BlueFS to check the size of its block devices and, if they have expanded, make use of the additional space.
-
-.. option:: show-label --dev *device* [...]
-
- Show device label(s).
-
-Options
-=======
-
-.. option:: --dev *device*
-
- Add *device* to the list of devices to consider
-
-.. option:: --path *osd path*
-
- Specify an osd path. In most cases, the device list is inferred from the symlinks present in *osd path*. This is usually simpler than explicitly specifying the device(s) with --dev.
-
-.. option:: --out-dir *dir*
-
- Output directory for bluefs-export
-
-.. option:: -l, --log-file *log file*
-
- file to log to
-
-.. option:: --log-level *num*
-
- debug log level. Default is 30 (extremely verbose), 20 is very
- verbose, 10 is verbose, and 1 is not very verbose.
-
-.. option:: --deep
-
- deep scrub/repair (read and validate object data, not just metadata)
-
-Device labels
-=============
-
-Every BlueStore block device has a single block label at the beginning of the
-device. You can dump the contents of the label with::
-
- ceph-bluestore-tool show-label --dev *device*
-
-The main device will have a lot of metadata, including information
-that used to be stored in small files in the OSD data directory. The
-auxilliary devices (db and wal) will only have the minimum required
-fields (OSD UUID, size, device type, birth time).
-
-OSD directory priming
-=====================
-
-You can generate the content for an OSD data directory that can start up a
-BlueStore OSD with the *prime-osd-dir* command::
-
- ceph-bluestore-tool prime-osd-dir --dev *main device* --path /var/lib/ceph/osd/ceph-*id*
-
-
-Availability
-============
-
-**ceph-bluestore-tool** is part of Ceph, a massively scalable,
-open-source, distributed storage system. Please refer to the Ceph
-documentation at http://ceph.com/docs for more information.
-
-
-See also
-========
-
-:doc:`ceph-osd <ceph-osd>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-clsinfo.rst b/src/ceph/doc/man/8/ceph-clsinfo.rst
deleted file mode 100644
index 0188ce1..0000000
--- a/src/ceph/doc/man/8/ceph-clsinfo.rst
+++ /dev/null
@@ -1,49 +0,0 @@
-:orphan:
-
-===============================================
- ceph-clsinfo -- show class object information
-===============================================
-
-.. program:: ceph-clsinfo
-
-Synopsis
-========
-
-| **ceph-clsinfo** [ *options* ] ... *filename*
-
-
-Description
-===========
-
-**ceph-clsinfo** can show name, version, and architecture information
-about a specific class object.
-
-
-Options
-=======
-
-.. option:: -n, --name
-
- Shows the class name
-
-.. option:: -v, --version
-
- Shows the class version
-
-.. option:: -a, --arch
-
- Shows the class architecture
-
-
-Availability
-============
-
-**ceph-clsinfo** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
-refer to the Ceph documentation at http://ceph.com/docs for more
-information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-conf.rst b/src/ceph/doc/man/8/ceph-conf.rst
deleted file mode 100644
index 9782e38..0000000
--- a/src/ceph/doc/man/8/ceph-conf.rst
+++ /dev/null
@@ -1,129 +0,0 @@
-:orphan:
-
-==================================
- ceph-conf -- ceph conf file tool
-==================================
-
-.. program:: ceph-conf
-
-Synopsis
-========
-
-| **ceph-conf** -c *conffile* --list-all-sections
-| **ceph-conf** -c *conffile* -L
-| **ceph-conf** -c *conffile* -l *prefix*
-| **ceph-conf** *key* -s *section1* ...
-| **ceph-conf** [-s *section* ] [-r] --lookup *key*
-| **ceph-conf** [-s *section* ] *key*
-
-
-Description
-===========
-
-**ceph-conf** is a utility for getting information about a ceph
-configuration file. As with most Ceph programs, you can specify which
-Ceph configuration file to use with the ``-c`` flag.
-
-
-Actions
-=======
-
-**ceph-conf** performs one of the following actions:
-
-.. option:: -L, --list-all-sections
-
- list all sections in the configuration file.
-
-.. option:: -l, --list-sections *prefix*
-
- list the sections with the given *prefix*. For example, ``--list-sections mon``
- would list all sections beginning with ``mon``.
-
-.. option:: --lookup *key*
-
- search and print the specified configuration setting. Note: ``--lookup`` is
- the default action. If no other actions are given on the command line, we will
- default to doing a lookup.
-
-.. option:: -h, --help
-
- print a summary of usage.
-
-
-Options
-=======
-
-.. option:: -c *conffile*
-
- the Ceph configuration file.
-
-.. option:: --filter-key *key*
-
- filter section list to only include sections with given *key* defined.
-
-.. option:: --filter-key-value *key* ``=`` *value*
-
- filter section list to only include sections with given *key*/*value* pair.
-
-.. option:: --name *type.id*
-
- the Ceph name in which the sections are searched (default 'client.admin').
- For example, if we specify ``--name osd.0``, the following sections will be
- searched: [osd.0], [osd], [global]
-
-.. option:: -r, --resolve-search
-
- search for the first file that exists and can be opened in the resulted
- comma delimited search list.
-
-.. option:: -s, --section
-
- additional sections to search. These additional sections will be searched
- before the sections that would normally be searched. As always, the first
- matching entry we find will be returned.
-
-
-Examples
-========
-
-To find out what value osd 0 will use for the "osd data" option::
-
- ceph-conf -c foo.conf --name osd.0 --lookup "osd data"
-
-To find out what value will mds a use for the "log file" option::
-
- ceph-conf -c foo.conf --name mds.a "log file"
-
-To list all sections that begin with "osd"::
-
- ceph-conf -c foo.conf -l osd
-
-To list all sections::
-
- ceph-conf -c foo.conf -L
-
-To print the path of the "keyring" used by "client.0"::
-
- ceph-conf --name client.0 -r -l keyring
-
-
-Files
-=====
-
-``/etc/ceph/$cluster.conf``, ``~/.ceph/$cluster.conf``, ``$cluster.conf``
-
-the Ceph configuration files to use if not specified.
-
-
-Availability
-============
-
-**ceph-conf** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer
-to the Ceph documentation at http://ceph.com/docs for more
-information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8),
diff --git a/src/ceph/doc/man/8/ceph-create-keys.rst b/src/ceph/doc/man/8/ceph-create-keys.rst
deleted file mode 100644
index 20b6560..0000000
--- a/src/ceph/doc/man/8/ceph-create-keys.rst
+++ /dev/null
@@ -1,67 +0,0 @@
-:orphan:
-
-===============================================
-ceph-create-keys -- ceph keyring generate tool
-===============================================
-
-.. program:: ceph-create-keys
-
-Synopsis
-========
-
-| **ceph-create-keys** [-h] [-v] [-t seconds] [--cluster *name*] --id *id*
-
-
-Description
-===========
-
-:program:`ceph-create-keys` is a utility to generate bootstrap keyrings using
-the given monitor when it is ready.
-
-It creates following auth entities (or users)
-
-``client.admin``
-
- and its key for your client host.
-
-``client.bootstrap-{osd, rgw, mds}``
-
- and their keys for bootstrapping corresponding services
-
-To list all users in the cluster::
-
- ceph auth ls
-
-
-Options
-=======
-
-.. option:: --cluster
-
- name of the cluster (default 'ceph').
-
-.. option:: -t
-
- time out after **seconds** (default: 600) waiting for a response from the monitor
-
-.. option:: -i, --id
-
- id of a ceph-mon that is coming up. **ceph-create-keys** will wait until it joins quorum.
-
-.. option:: -v, --verbose
-
- be more verbose.
-
-
-Availability
-============
-
-**ceph-create-keys** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer
-to the Ceph documentation at http://ceph.com/docs for more
-information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-debugpack.rst b/src/ceph/doc/man/8/ceph-debugpack.rst
deleted file mode 100644
index 4f2c4f2..0000000
--- a/src/ceph/doc/man/8/ceph-debugpack.rst
+++ /dev/null
@@ -1,50 +0,0 @@
-:orphan:
-
-=============================================
- ceph-debugpack -- ceph debug packer utility
-=============================================
-
-.. program:: ceph-debugpack
-
-Synopsis
-========
-
-| **ceph-debugpack** [ *options* ] *filename.tar.gz*
-
-
-Description
-===========
-
-**ceph-debugpack** will build a tarball containing various items that are
-useful for debugging crashes. The resulting tarball can be shared with
-Ceph developers when debugging a problem.
-
-The tarball will include the binaries for ceph-mds, ceph-osd, and ceph-mon, radosgw, any
-log files, the ceph.conf configuration file, any core files we can
-find, and (if the system is running) dumps of the current cluster state
-as reported by 'ceph report'.
-
-
-Options
-=======
-
-.. option:: -c ceph.conf, --conf=ceph.conf
-
- Use *ceph.conf* configuration file instead of the default
- ``/etc/ceph/ceph.conf`` to determine monitor addresses during
- startup.
-
-
-Availability
-============
-
-**ceph-debugpack** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
-refer to the Ceph documentation at http://ceph.com/docs for more
-information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8)
-:doc:`ceph-post-file <ceph-post-file>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-dencoder.rst b/src/ceph/doc/man/8/ceph-dencoder.rst
deleted file mode 100644
index cf2e429..0000000
--- a/src/ceph/doc/man/8/ceph-dencoder.rst
+++ /dev/null
@@ -1,151 +0,0 @@
-:orphan:
-
-==============================================
- ceph-dencoder -- ceph encoder/decoder utility
-==============================================
-
-.. program:: ceph-dencoder
-
-Synopsis
-========
-
-| **ceph-dencoder** [commands...]
-
-
-Description
-===========
-
-**ceph-dencoder** is a utility to encode, decode, and dump ceph data
-structures. It is used for debugging and for testing inter-version
-compatibility.
-
-**ceph-dencoder** takes a simple list of commands and performs them
-in order.
-
-Commands
-========
-
-.. option:: version
-
- Print the version string for the **ceph-dencoder** binary.
-
-.. option:: import <file>
-
- Read a binary blob of encoded data from the given file. It will be
- placed in an in-memory buffer.
-
-.. option:: export <file>
-
- Write the contents of the current in-memory buffer to the given
- file.
-
-.. option:: list_types
-
- List the data types known to this build of **ceph-dencoder**.
-
-.. option:: type <name>
-
- Select the given type for future ``encode`` or ``decode`` operations.
-
-.. option:: skip <bytes>
-
- Seek <bytes> into the imported file before reading data structure, use
- this with objects that have a preamble/header before the object of interest.
-
-.. option:: decode
-
- Decode the contents of the in-memory buffer into an instance of the
- previously selected type. If there is an error, report it.
-
-.. option:: encode
-
- Encode the contents of the in-memory instance of the previously
- selected type to the in-memory buffer.
-
-.. option:: dump_json
-
- Print a JSON-formatted description of the in-memory object.
-
-.. option:: count_tests
-
- Print the number of built-in test instances of the previosly
- selected type that **ceph-dencoder** is able to generate.
-
-.. option:: select_test <n>
-
- Select the given build-in test instance as a the in-memory instance
- of the type.
-
-.. option:: get_features
-
- Print the decimal value of the feature set supported by this version
- of **ceph-dencoder**. Each bit represents a feature. These correspond to
- CEPH_FEATURE_* defines in src/include/ceph_features.h.
-
-.. option:: set_features <f>
-
- Set the feature bits provided to ``encode`` to *f*. This allows
- you to encode objects such that they can be understood by old
- versions of the software (for those types that support it).
-
-Example
-=======
-
-Say you want to examine an attribute on an object stored by ``ceph-osd``. You can do this:
-
-::
-
- $ cd /mnt/osd.12/current/2.b_head
- $ attr -l foo_bar_head_EFE6384B
- Attribute "ceph.snapset" has a 31 byte value for foo_bar_head_EFE6384B
- Attribute "ceph._" has a 195 byte value for foo_bar_head_EFE6384B
- $ attr foo_bar_head_EFE6384B -g ceph._ -q > /tmp/a
- $ ceph-dencoder type object_info_t import /tmp/a decode dump_json
- { "oid": { "oid": "foo",
- "key": "bar",
- "snapid": -2,
- "hash": 4024842315,
- "max": 0},
- "locator": { "pool": 2,
- "preferred": -1,
- "key": "bar"},
- "category": "",
- "version": "9'1",
- "prior_version": "0'0",
- "last_reqid": "client.4116.0:1",
- "size": 1681,
- "mtime": "2012-02-21 08:58:23.666639",
- "lost": 0,
- "wrlock_by": "unknown.0.0:0",
- "snaps": [],
- "truncate_seq": 0,
- "truncate_size": 0,
- "watchers": {}}
-
-Alternatively, perhaps you wish to dump an internal CephFS metadata object, you might
-do that like this:
-
-::
-
- $ rados -p metadata get mds_snaptable mds_snaptable.bin
- $ ceph-dencoder type SnapServer skip 8 import mds_snaptable.bin decode dump_json
- { "snapserver": { "last_snap": 1,
- "pending_noop": [],
- "snaps": [],
- "need_to_purge": {},
- "pending_create": [],
- "pending_destroy": []}}
-
-
-Availability
-============
-
-**ceph-dencoder** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
-refer to the Ceph documentation at http://ceph.com/docs for more
-information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-deploy.rst b/src/ceph/doc/man/8/ceph-deploy.rst
deleted file mode 100644
index ff96574..0000000
--- a/src/ceph/doc/man/8/ceph-deploy.rst
+++ /dev/null
@@ -1,609 +0,0 @@
-:orphan:
-
-=====================================
- ceph-deploy -- Ceph deployment tool
-=====================================
-
-.. program:: ceph-deploy
-
-Synopsis
-========
-
-| **ceph-deploy** **new** [*initial-monitor-node(s)*]
-
-| **ceph-deploy** **install** [*ceph-node*] [*ceph-node*...]
-
-| **ceph-deploy** **mon** *create-initial*
-
-| **ceph-deploy** **osd** *prepare* [*ceph-node*]:[*dir-path*]
-
-| **ceph-deploy** **osd** *activate* [*ceph-node*]:[*dir-path*]
-
-| **ceph-deploy** **osd** *create* [*ceph-node*]:[*dir-path*]
-
-| **ceph-deploy** **admin** [*admin-node*][*ceph-node*...]
-
-| **ceph-deploy** **purgedata** [*ceph-node*][*ceph-node*...]
-
-| **ceph-deploy** **forgetkeys**
-
-Description
-===========
-
-:program:`ceph-deploy` is a tool which allows easy and quick deployment of a
-Ceph cluster without involving complex and detailed manual configuration. It
-uses ssh to gain access to other Ceph nodes from the admin node, sudo for
-administrator privileges on them and the underlying Python scripts automates
-the manual process of Ceph installation on each node from the admin node itself.
-It can be easily run on an workstation and doesn't require servers, databases or
-any other automated tools. With :program:`ceph-deploy`, it is really easy to set
-up and take down a cluster. However, it is not a generic deployment tool. It is
-a specific tool which is designed for those who want to get Ceph up and running
-quickly with only the unavoidable initial configuration settings and without the
-overhead of installing other tools like ``Chef``, ``Puppet`` or ``Juju``. Those
-who want to customize security settings, partitions or directory locations and
-want to set up a cluster following detailed manual steps, should use other tools
-i.e, ``Chef``, ``Puppet``, ``Juju`` or ``Crowbar``.
-
-With :program:`ceph-deploy`, you can install Ceph packages on remote nodes,
-create a cluster, add monitors, gather/forget keys, add OSDs and metadata
-servers, configure admin hosts or take down the cluster.
-
-Commands
-========
-
-new
----
-
-Start deploying a new cluster and write a configuration file and keyring for it.
-It tries to copy ssh keys from admin node to gain passwordless ssh to monitor
-node(s), validates host IP, creates a cluster with a new initial monitor node or
-nodes for monitor quorum, a ceph configuration file, a monitor secret keyring and
-a log file for the new cluster. It populates the newly created Ceph configuration
-file with ``fsid`` of cluster, hostnames and IP addresses of initial monitor
-members under ``[global]`` section.
-
-Usage::
-
- ceph-deploy new [MON][MON...]
-
-Here, [MON] is the initial monitor hostname (short hostname i.e, ``hostname -s``).
-
-Other options like :option:`--no-ssh-copykey`, :option:`--fsid`,
-:option:`--cluster-network` and :option:`--public-network` can also be used with
-this command.
-
-If more than one network interface is used, ``public network`` setting has to be
-added under ``[global]`` section of Ceph configuration file. If the public subnet
-is given, ``new`` command will choose the one IP from the remote host that exists
-within the subnet range. Public network can also be added at runtime using
-:option:`--public-network` option with the command as mentioned above.
-
-
-install
--------
-
-Install Ceph packages on remote hosts. As a first step it installs
-``yum-plugin-priorities`` in admin and other nodes using passwordless ssh and sudo
-so that Ceph packages from upstream repository get more priority. It then detects
-the platform and distribution for the hosts and installs Ceph normally by
-downloading distro compatible packages if adequate repo for Ceph is already added.
-``--release`` flag is used to get the latest release for installation. During
-detection of platform and distribution before installation, if it finds the
-``distro.init`` to be ``sysvinit`` (Fedora, CentOS/RHEL etc), it doesn't allow
-installation with custom cluster name and uses the default name ``ceph`` for the
-cluster.
-
-If the user explicitly specifies a custom repo url with :option:`--repo-url` for
-installation, anything detected from the configuration will be overridden and
-the custom repository location will be used for installation of Ceph packages.
-If required, valid custom repositories are also detected and installed. In case
-of installation from a custom repo a boolean is used to determine the logic
-needed to proceed with a custom repo installation. A custom repo install helper
-is used that goes through config checks to retrieve repos (and any extra repos
-defined) and installs them. ``cd_conf`` is the object built from ``argparse``
-that holds the flags and information needed to determine what metadata from the
-configuration is to be used.
-
-A user can also opt to install only the repository without installing Ceph and
-its dependencies by using :option:`--repo` option.
-
-Usage::
-
- ceph-deploy install [HOST][HOST...]
-
-Here, [HOST] is/are the host node(s) where Ceph is to be installed.
-
-An option ``--release`` is used to install a release known as CODENAME
-(default: firefly).
-
-Other options like :option:`--testing`, :option:`--dev`, :option:`--adjust-repos`,
-:option:`--no-adjust-repos`, :option:`--repo`, :option:`--local-mirror`,
-:option:`--repo-url` and :option:`--gpg-url` can also be used with this command.
-
-
-mds
----
-
-Deploy Ceph mds on remote hosts. A metadata server is needed to use CephFS and
-the ``mds`` command is used to create one on the desired host node. It uses the
-subcommand ``create`` to do so. ``create`` first gets the hostname and distro
-information of the desired mds host. It then tries to read the ``bootstrap-mds``
-key for the cluster and deploy it in the desired host. The key generally has a
-format of ``{cluster}.bootstrap-mds.keyring``. If it doesn't finds a keyring,
-it runs ``gatherkeys`` to get the keyring. It then creates a mds on the desired
-host under the path ``/var/lib/ceph/mds/`` in ``/var/lib/ceph/mds/{cluster}-{name}``
-format and a bootstrap keyring under ``/var/lib/ceph/bootstrap-mds/`` in
-``/var/lib/ceph/bootstrap-mds/{cluster}.keyring`` format. It then runs appropriate
-commands based on ``distro.init`` to start the ``mds``.
-
-Usage::
-
- ceph-deploy mds create [HOST[:DAEMON-NAME]] [HOST[:DAEMON-NAME]...]
-
-The [DAEMON-NAME] is optional.
-
-
-mon
----
-
-Deploy Ceph monitor on remote hosts. ``mon`` makes use of certain subcommands
-to deploy Ceph monitors on other nodes.
-
-Subcommand ``create-initial`` deploys for monitors defined in
-``mon initial members`` under ``[global]`` section in Ceph configuration file,
-wait until they form quorum and then gatherkeys, reporting the monitor status
-along the process. If monitors don't form quorum the command will eventually
-time out.
-
-Usage::
-
- ceph-deploy mon create-initial
-
-Subcommand ``create`` is used to deploy Ceph monitors by explicitly specifying
-the hosts which are desired to be made monitors. If no hosts are specified it
-will default to use the ``mon initial members`` defined under ``[global]``
-section of Ceph configuration file. ``create`` first detects platform and distro
-for desired hosts and checks if hostname is compatible for deployment. It then
-uses the monitor keyring initially created using ``new`` command and deploys the
-monitor in desired host. If multiple hosts were specified during ``new`` command
-i.e, if there are multiple hosts in ``mon initial members`` and multiple keyrings
-were created then a concatenated keyring is used for deployment of monitors. In
-this process a keyring parser is used which looks for ``[entity]`` sections in
-monitor keyrings and returns a list of those sections. A helper is then used to
-collect all keyrings into a single blob that will be used to inject it to monitors
-with :option:`--mkfs` on remote nodes. All keyring files are concatenated to be
-in a directory ending with ``.keyring``. During this process the helper uses list
-of sections returned by keyring parser to check if an entity is already present
-in a keyring and if not, adds it. The concatenated keyring is used for deployment
-of monitors to desired multiple hosts.
-
-Usage::
-
- ceph-deploy mon create [HOST] [HOST...]
-
-Here, [HOST] is hostname of desired monitor host(s).
-
-Subcommand ``add`` is used to add a monitor to an existing cluster. It first
-detects platform and distro for desired host and checks if hostname is compatible
-for deployment. It then uses the monitor keyring, ensures configuration for new
-monitor host and adds the monitor to the cluster. If the section for the monitor
-exists and defines a mon addr that will be used, otherwise it will fallback by
-resolving the hostname to an IP. If :option:`--address` is used it will override
-all other options. After adding the monitor to the cluster, it gives it some time
-to start. It then looks for any monitor errors and checks monitor status. Monitor
-errors arise if the monitor is not added in ``mon initial members``, if it doesn't
-exist in ``monmap`` and if neither ``public_addr`` nor ``public_network`` keys
-were defined for monitors. Under such conditions, monitors may not be able to
-form quorum. Monitor status tells if the monitor is up and running normally. The
-status is checked by running ``ceph daemon mon.hostname mon_status`` on remote
-end which provides the output and returns a boolean status of what is going on.
-``False`` means a monitor that is not fine even if it is up and running, while
-``True`` means the monitor is up and running correctly.
-
-Usage::
-
- ceph-deploy mon add [HOST]
-
- ceph-deploy mon add [HOST] --address [IP]
-
-Here, [HOST] is the hostname and [IP] is the IP address of the desired monitor
-node. Please note, unlike other ``mon`` subcommands, only one node can be
-specified at a time.
-
-Subcommand ``destroy`` is used to completely remove monitors on remote hosts.
-It takes hostnames as arguments. It stops the monitor, verifies if ``ceph-mon``
-daemon really stopped, creates an archive directory ``mon-remove`` under
-``/var/lib/ceph/``, archives old monitor directory in
-``{cluster}-{hostname}-{stamp}`` format in it and removes the monitor from
-cluster by running ``ceph remove...`` command.
-
-Usage::
-
- ceph-deploy mon destroy [HOST] [HOST...]
-
-Here, [HOST] is hostname of monitor that is to be removed.
-
-
-gatherkeys
-----------
-
-Gather authentication keys for provisioning new nodes. It takes hostnames as
-arguments. It checks for and fetches ``client.admin`` keyring, monitor keyring
-and ``bootstrap-mds/bootstrap-osd`` keyring from monitor host. These
-authentication keys are used when new ``monitors/OSDs/MDS`` are added to the
-cluster.
-
-Usage::
-
- ceph-deploy gatherkeys [HOST] [HOST...]
-
-Here, [HOST] is hostname of the monitor from where keys are to be pulled.
-
-
-disk
-----
-
-Manage disks on a remote host. It actually triggers the ``ceph-disk`` utility
-and it's subcommands to manage disks.
-
-Subcommand ``list`` lists disk partitions and Ceph OSDs.
-
-Usage::
-
- ceph-deploy disk list [HOST:[DISK]]
-
-Here, [HOST] is hostname of the node and [DISK] is disk name or path.
-
-Subcommand ``prepare`` prepares a directory, disk or drive for a Ceph OSD. It
-creates a GPT partition, marks the partition with Ceph type uuid, creates a
-file system, marks the file system as ready for Ceph consumption, uses entire
-partition and adds a new partition to the journal disk.
-
-Usage::
-
- ceph-deploy disk prepare [HOST:[DISK]]
-
-Here, [HOST] is hostname of the node and [DISK] is disk name or path.
-
-Subcommand ``activate`` activates the Ceph OSD. It mounts the volume in a
-temporary location, allocates an OSD id (if needed), remounts in the correct
-location ``/var/lib/ceph/osd/$cluster-$id`` and starts ``ceph-osd``. It is
-triggered by ``udev`` when it sees the OSD GPT partition type or on ceph service
-start with ``ceph disk activate-all``.
-
-Usage::
-
- ceph-deploy disk activate [HOST:[DISK]]
-
-Here, [HOST] is hostname of the node and [DISK] is disk name or path.
-
-Subcommand ``zap`` zaps/erases/destroys a device's partition table and contents.
-It actually uses ``sgdisk`` and it's option ``--zap-all`` to destroy both GPT and
-MBR data structures so that the disk becomes suitable for repartitioning.
-``sgdisk`` then uses ``--mbrtogpt`` to convert the MBR or BSD disklabel disk to a
-GPT disk. The ``prepare`` subcommand can now be executed which will create a new
-GPT partition.
-
-Usage::
-
- ceph-deploy disk zap [HOST:[DISK]]
-
-Here, [HOST] is hostname of the node and [DISK] is disk name or path.
-
-
-osd
----
-
-Manage OSDs by preparing data disk on remote host. ``osd`` makes use of certain
-subcommands for managing OSDs.
-
-Subcommand ``prepare`` prepares a directory, disk or drive for a Ceph OSD. It
-first checks against multiple OSDs getting created and warns about the
-possibility of more than the recommended which would cause issues with max
-allowed PIDs in a system. It then reads the bootstrap-osd key for the cluster or
-writes the bootstrap key if not found. It then uses :program:`ceph-disk`
-utility's ``prepare`` subcommand to prepare the disk, journal and deploy the OSD
-on the desired host. Once prepared, it gives some time to the OSD to settle and
-checks for any possible errors and if found, reports to the user.
-
-Usage::
-
- ceph-deploy osd prepare HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL]...]
-
-Subcommand ``activate`` activates the OSD prepared using ``prepare`` subcommand.
-It actually uses :program:`ceph-disk` utility's ``activate`` subcommand with
-appropriate init type based on distro to activate the OSD. Once activated, it
-gives some time to the OSD to start and checks for any possible errors and if
-found, reports to the user. It checks the status of the prepared OSD, checks the
-OSD tree and makes sure the OSDs are up and in.
-
-Usage::
-
- ceph-deploy osd activate HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL]...]
-
-Subcommand ``create`` uses ``prepare`` and ``activate`` subcommands to create an
-OSD.
-
-Usage::
-
- ceph-deploy osd create HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL]...]
-
-Subcommand ``list`` lists disk partitions, Ceph OSDs and prints OSD metadata.
-It gets the osd tree from a monitor host, uses the ``ceph-disk-list`` output
-and gets the mount point by matching the line where the partition mentions
-the OSD name, reads metadata from files, checks if a journal path exists,
-if the OSD is in a OSD tree and prints the OSD metadata.
-
-Usage::
-
- ceph-deploy osd list HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL]...]
-
-
-admin
------
-
-Push configuration and ``client.admin`` key to a remote host. It takes
-the ``{cluster}.client.admin.keyring`` from admin node and writes it under
-``/etc/ceph`` directory of desired node.
-
-Usage::
-
- ceph-deploy admin [HOST] [HOST...]
-
-Here, [HOST] is desired host to be configured for Ceph administration.
-
-
-config
-------
-
-Push/pull configuration file to/from a remote host. It uses ``push`` subcommand
-to takes the configuration file from admin host and write it to remote host under
-``/etc/ceph`` directory. It uses ``pull`` subcommand to do the opposite i.e, pull
-the configuration file under ``/etc/ceph`` directory of remote host to admin node.
-
-Usage::
-
- ceph-deploy config push [HOST] [HOST...]
-
- ceph-deploy config pull [HOST] [HOST...]
-
-Here, [HOST] is the hostname of the node where config file will be pushed to or
-pulled from.
-
-
-uninstall
----------
-
-Remove Ceph packages from remote hosts. It detects the platform and distro of
-selected host and uninstalls Ceph packages from it. However, some dependencies
-like ``librbd1`` and ``librados2`` will not be removed because they can cause
-issues with ``qemu-kvm``.
-
-Usage::
-
- ceph-deploy uninstall [HOST] [HOST...]
-
-Here, [HOST] is hostname of the node from where Ceph will be uninstalled.
-
-
-purge
------
-
-Remove Ceph packages from remote hosts and purge all data. It detects the
-platform and distro of selected host, uninstalls Ceph packages and purges all
-data. However, some dependencies like ``librbd1`` and ``librados2`` will not be
-removed because they can cause issues with ``qemu-kvm``.
-
-Usage::
-
- ceph-deploy purge [HOST] [HOST...]
-
-Here, [HOST] is hostname of the node from where Ceph will be purged.
-
-
-purgedata
----------
-
-Purge (delete, destroy, discard, shred) any Ceph data from ``/var/lib/ceph``.
-Once it detects the platform and distro of desired host, it first checks if Ceph
-is still installed on the selected host and if installed, it won't purge data
-from it. If Ceph is already uninstalled from the host, it tries to remove the
-contents of ``/var/lib/ceph``. If it fails then probably OSDs are still mounted
-and needs to be unmounted to continue. It unmount the OSDs and tries to remove
-the contents of ``/var/lib/ceph`` again and checks for errors. It also removes
-contents of ``/etc/ceph``. Once all steps are successfully completed, all the
-Ceph data from the selected host are removed.
-
-Usage::
-
- ceph-deploy purgedata [HOST] [HOST...]
-
-Here, [HOST] is hostname of the node from where Ceph data will be purged.
-
-
-forgetkeys
-----------
-
-Remove authentication keys from the local directory. It removes all the
-authentication keys i.e, monitor keyring, client.admin keyring, bootstrap-osd
-and bootstrap-mds keyring from the node.
-
-Usage::
-
- ceph-deploy forgetkeys
-
-
-pkg
----
-
-Manage packages on remote hosts. It is used for installing or removing packages
-from remote hosts. The package names for installation or removal are to be
-specified after the command. Two options :option:`--install` and
-:option:`--remove` are used for this purpose.
-
-Usage::
-
- ceph-deploy pkg --install [PKGs] [HOST] [HOST...]
-
- ceph-deploy pkg --remove [PKGs] [HOST] [HOST...]
-
-Here, [PKGs] is comma-separated package names and [HOST] is hostname of the
-remote node where packages are to be installed or removed from.
-
-
-calamari
---------
-
-Install and configure Calamari nodes. It first checks if distro is supported
-for Calamari installation by ceph-deploy. An argument ``connect`` is used for
-installation and configuration. It checks for ``ceph-deploy`` configuration
-file (cd_conf) and Calamari release repo or ``calamari-minion`` repo. It relies
-on default for repo installation as it doesn't install Ceph unless specified
-otherwise. ``options`` dictionary is also defined because ``ceph-deploy``
-pops items internally which causes issues when those items are needed to be
-available for every host. If the distro is Debian/Ubuntu, it is ensured that
-proxy is disabled for ``calamari-minion`` repo. ``calamari-minion`` package is
-then installed and custom repository files are added. minion config is placed
-prior to installation so that it is present when the minion first starts.
-config directory, calamari salt config are created and salt-minion package
-is installed. If the distro is Redhat/CentOS, the salt-minion service needs to
-be started.
-
-Usage::
-
- ceph-deploy calamari {connect} [HOST] [HOST...]
-
-Here, [HOST] is the hostname where Calamari is to be installed.
-
-An option ``--release`` can be used to use a given release from repositories
-defined in :program:`ceph-deploy`'s configuration. Defaults to ``calamari-minion``.
-
-Another option :option:`--master` can also be used with this command.
-
-Options
-=======
-
-.. option:: --address
-
- IP address of the host node to be added to the cluster.
-
-.. option:: --adjust-repos
-
- Install packages modifying source repos.
-
-.. option:: --ceph-conf
-
- Use (or reuse) a given ``ceph.conf`` file.
-
-.. option:: --cluster
-
- Name of the cluster.
-
-.. option:: --dev
-
- Install a bleeding edge built from Git branch or tag (default: master).
-
-.. option:: --cluster-network
-
- Specify the (internal) cluster network.
-
-.. option:: --dmcrypt
-
- Encrypt [data-path] and/or journal devices with ``dm-crypt``.
-
-.. option:: --dmcrypt-key-dir
-
- Directory where ``dm-crypt`` keys are stored.
-
-.. option:: --install
-
- Comma-separated package(s) to install on remote hosts.
-
-.. option:: --fs-type
-
- Filesystem to use to format disk ``(xfs, btrfs or ext4)``. Note that support for btrfs and ext4 is no longer tested or recommended; please use xfs.
-
-.. option:: --fsid
-
- Provide an alternate FSID for ``ceph.conf`` generation.
-
-.. option:: --gpg-url
-
- Specify a GPG key url to be used with custom repos (defaults to ceph.com).
-
-.. option:: --keyrings
-
- Concatenate multiple keyrings to be seeded on new monitors.
-
-.. option:: --local-mirror
-
- Fetch packages and push them to hosts for a local repo mirror.
-
-.. option:: --master
-
- The domain for the Calamari master server.
-
-.. option:: --mkfs
-
- Inject keys to MONs on remote nodes.
-
-.. option:: --no-adjust-repos
-
- Install packages without modifying source repos.
-
-.. option:: --no-ssh-copykey
-
- Do not attempt to copy ssh keys.
-
-.. option:: --overwrite-conf
-
- Overwrite an existing conf file on remote host (if present).
-
-.. option:: --public-network
-
- Specify the public network for a cluster.
-
-.. option:: --remove
-
- Comma-separated package(s) to remove from remote hosts.
-
-.. option:: --repo
-
- Install repo files only (skips package installation).
-
-.. option:: --repo-url
-
- Specify a repo url that mirrors/contains Ceph packages.
-
-.. option:: --testing
-
- Install the latest development release.
-
-.. option:: --username
-
- The username to connect to the remote host.
-
-.. option:: --version
-
- The current installed version of :program:`ceph-deploy`.
-
-.. option:: --zap-disk
-
- Destroy the partition table and content of a disk.
-
-
-Availability
-============
-
-:program:`ceph-deploy` is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the documentation at http://ceph.com/ceph-deploy/docs for more information.
-
-
-See also
-========
-
-:doc:`ceph-mon <ceph-mon>`\(8),
-:doc:`ceph-osd <ceph-osd>`\(8),
-:doc:`ceph-disk <ceph-disk>`\(8),
-:doc:`ceph-mds <ceph-mds>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-detect-init.rst b/src/ceph/doc/man/8/ceph-detect-init.rst
deleted file mode 100644
index c409a94..0000000
--- a/src/ceph/doc/man/8/ceph-detect-init.rst
+++ /dev/null
@@ -1,65 +0,0 @@
-:orphan:
-
-============================================================
- ceph-detect-init -- display the init system Ceph should use
-============================================================
-
-.. program:: ceph-detect-init
-
-Synopsis
-========
-
-| **ceph-detect-init** [--verbose] [--use-rhceph] [--default *init*]
-
-Description
-===========
-
-:program:`ceph-detect-init` is a utility that prints the init system
-Ceph uses. It can be one of ``sysvinit``, ``upstart`` or ``systemd``.
-The init system Ceph uses may not be the default init system of the
-host operating system. For instance on Debian Jessie, Ceph may use
-``sysvinit`` although ``systemd`` is the default.
-
-If the init system of the host operating system is unknown, return on
-error, unless :option:`--default` is specified.
-
-Options
-=======
-
-.. option:: --use-rhceph
-
- When an operating system identifies itself as Red Hat, it is
- treated as if it was CentOS. With :option:`--use-rhceph` it is
- treated as RHEL instead.
-
-.. option:: --default INIT
-
- If the init system of the host operating system is unkown, return
- the value of *INIT* instead of failing with an error.
-
-.. option:: --verbose
-
- Display additional information for debugging.
-
-Bugs
-====
-
-:program:`ceph-detect-init` is used by :program:`ceph-disk` to figure out the init system to manage the mount directory of an OSD. But only following combinations are fully tested:
-
-- `upstart` on `Ubuntu 14.04`
-- `systemd` on `Ubuntu 15.04` and up
-- `systemd` on `Debian 8` and up
-- `systemd` on `RHEL/CentOS 7` and up
-- `systemd` on `Fedora 22` and up
-
-Availability
-============
-
-:program:`ceph-detect-init` is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-See also
-========
-
-:doc:`ceph-disk <ceph-disk>`\(8),
-:doc:`ceph-deploy <ceph-deploy>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-disk.rst b/src/ceph/doc/man/8/ceph-disk.rst
deleted file mode 100644
index 4635937..0000000
--- a/src/ceph/doc/man/8/ceph-disk.rst
+++ /dev/null
@@ -1,97 +0,0 @@
-:orphan:
-
-===================================================================
- ceph-disk -- Ceph disk utility for OSD
-===================================================================
-
-.. program:: ceph-disk
-
-Synopsis
-========
-
-| **ceph-disk** [-h] [-v] [--log-stdout] [--prepend-to-path PATH]
-| [--statedir PATH] [--sysconfdir PATH]
-| [--setuser USER] [--setgroup GROUP]
-| ...
-
-optional arguments
-------------------
-
--h, --help show this help message and exit
--v, --verbose be more verbose
---log-stdout log to stdout
---prepend-to-path PATH
- prepend PATH to $PATH for backward compatibility (default /usr/bin)
---statedir PATH directory in which ceph state is preserved (default /var/lib/ceph)
---sysconfdir PATH directory in which ceph configuration files are found (default /etc/ceph)
---setuser USER use the given user for subprocesses, rather than ceph or root
---setgroup GROUP use the given group for subprocesses, rather than ceph or root
-
-subcommands
------------
-
-prepare
- Prepare a directory or disk for a Ceph OSD
-activate
- Activate a Ceph OSD
-activate-lockbox
- Activate a Ceph lockbox
-activate-block
- Activate an OSD via its block device
-activate-journal
- Activate an OSD via its journal device
-activate-all
- Activate all tagged OSD partitions
-list
- List disks, partitions, and Ceph OSDs
-suppress-activate
- Suppress activate on a device (prefix)
-unsuppress-activate
- Stop suppressing activate on a device (prefix)
-deactivate
- Deactivate a Ceph OSD
-destroy
- Destroy a Ceph OSD
-zap
- Zap/erase/destroy a device's partition table (and contents)
-trigger
- Trigger an event (caled by udev)
-fix
- Fix SELinux labels and/or file permissions
-
-Description
-===========
-
-:program:`ceph-disk` is a utility that can prepare and activate a disk, partition or
-directory as a Ceph OSD. It is run directly or triggered by :program:`ceph-deploy`
-or ``udev``. It can also be triggered by other deployment utilities like ``Chef``,
-``Juju``, ``Puppet`` etc.
-
-It actually automates the multiple steps involved in manual creation and start
-of an OSD into two steps of preparing and activating the OSD by using the
-subcommands ``prepare`` and ``activate``.
-
-:program:`ceph-disk` also automates the multiple steps involved to manually stop
-and destroy an OSD into two steps of deactivating and destroying the OSD by using
-the subcommands ``deactivate`` and ``destroy``.
-
-The documentation for each subcommand (prepare, activate, etc.) can be displayed
-with its ``--help`` option. For instance ``ceph-disk prepare --help``.
-
-Bugs
-====
-
-See also the ``Bugs`` section in :doc:`ceph-detect-init <ceph-detect-init>`\(8).
-
-Availability
-============
-
-:program:`ceph-disk` is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-See also
-========
-
-:doc:`ceph-detect-init <ceph-detect-init>`\(8)
-:doc:`ceph-osd <ceph-osd>`\(8),
-:doc:`ceph-deploy <ceph-deploy>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-fuse.rst b/src/ceph/doc/man/8/ceph-fuse.rst
deleted file mode 100644
index cede60e..0000000
--- a/src/ceph/doc/man/8/ceph-fuse.rst
+++ /dev/null
@@ -1,64 +0,0 @@
-:orphan:
-
-=========================================
- ceph-fuse -- FUSE-based client for ceph
-=========================================
-
-.. program:: ceph-fuse
-
-Synopsis
-========
-
-| **ceph-fuse** [ -m *monaddr*:*port* ] *mountpoint* [ *fuse options* ]
-
-
-Description
-===========
-
-**ceph-fuse** is a FUSE (File system in USErspace) client for Ceph
-distributed file system. It will mount a ceph file system (specified
-via the -m option for described by ceph.conf (see below) at the
-specific mount point.
-
-The file system can be unmounted with::
-
- fusermount -u mountpoint
-
-or by sending ``SIGINT`` to the ``ceph-fuse`` process.
-
-
-Options
-=======
-
-Any options not recognized by ceph-fuse will be passed on to libfuse.
-
-.. option:: -d
-
- Detach from console and daemonize after startup.
-
-.. option:: -c ceph.conf, --conf=ceph.conf
-
- Use *ceph.conf* configuration file instead of the default
- ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup.
-
-.. option:: -m monaddress[:port]
-
- Connect to specified monitor (instead of looking through ceph.conf).
-
-.. option:: -r root_directory
-
- Use root_directory as the mounted root, rather than the full Ceph tree.
-
-
-Availability
-============
-
-**ceph-fuse** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-
-See also
-========
-
-fusermount(8),
-:doc:`ceph <ceph>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-kvstore-tool.rst b/src/ceph/doc/man/8/ceph-kvstore-tool.rst
deleted file mode 100644
index 4baa492..0000000
--- a/src/ceph/doc/man/8/ceph-kvstore-tool.rst
+++ /dev/null
@@ -1,85 +0,0 @@
-:orphan:
-
-=====================================================
- ceph-kvstore-tool -- ceph kvstore manipulation tool
-=====================================================
-
-.. program:: ceph-kvstore-tool
-
-Synopsis
-========
-
-| **ceph-kvstore-tool** <leveldb|rocksdb|bluestore-kv> <store path> *command* [args...]
-
-
-Description
-===========
-
-:program:`ceph-kvstore-tool` is a kvstore manipulation tool. It allows users to manipule
-leveldb/rocksdb's data (like OSD's omap) offline.
-
-Commands
-========
-
-:program:`ceph-kvstore-tool` utility uses many commands for debugging purpose
-which are as follows:
-
-:command:`list [prefix]`
- Print key of all KV pairs stored with the URL encoded prefix.
-
-:command:`list-crc [prefix]`
- Print CRC of all KV pairs stored with the URL encoded prefix.
-
-:command:`exists <prefix> [key]`
- Check if there is any KV pair stored with the URL encoded prefix. If key
- is also specified, check for the key with the prefix instead.
-
-:command:`get <prefix> <key> [out <file>]`
- Get the value of the KV pair stored with the URL encoded prefix and key.
- If file is also specified, write the value to the file.
-
-:command:`crc <prefix> <key>`
- Get the CRC of the KV pair stored with the URL encoded prefix and key.
-
-:command:`get-size [<prefix> <key>]`
- Get estimated store size or size of value specified by prefix and key.
-
-:command:`set <prefix> <key> [ver <N>|in <file>]`
- Set the value of the KV pair stored with the URL encoded prefix and key.
- The value could be *version_t* or text.
-
-:command:`rm <prefix> <key>`
- Remove the KV pair stored with the URL encoded prefix and key.
-
-:command:`rm-prefix <prefix>`
- Remove all KV pairs stored with the URL encoded prefix.
-
-:command:`store-copy <path> [num-keys-per-tx]`
- Copy all KV pairs to another directory specified by ``path``.
- [num-keys-per-tx] is the number of KV pairs copied for a transaction.
-
-:command:`store-crc <path>`
- Store CRC of all KV pairs to a file specified by ``path``.
-
-:command:`compact`
- Subcommand ``compact`` is used to compact all data of kvstore. It will open
- the database, and trigger a database's compaction. After compaction, some
- disk space may be released.
-
-:command:`compact-prefix <prefix>`
- Compact all entries specified by the URL encoded prefix.
-
-:command:`compact-range <prefix> <start> <end>`
- Compact some entries specified by the URL encoded prefix and range.
-
-Availability
-============
-
-**ceph-kvstore-tool** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-mds.rst b/src/ceph/doc/man/8/ceph-mds.rst
deleted file mode 100644
index b17fd70..0000000
--- a/src/ceph/doc/man/8/ceph-mds.rst
+++ /dev/null
@@ -1,87 +0,0 @@
-:orphan:
-
-=========================================
- ceph-mds -- ceph metadata server daemon
-=========================================
-
-.. program:: ceph-mds
-
-Synopsis
-========
-
-| **ceph-mds** -i *name* [ --hot-standby [*rank*] ]
-
-
-Description
-===========
-
-**ceph-mds** is the metadata server daemon for the Ceph distributed file
-system. One or more instances of ceph-mds collectively manage the file
-system namespace, coordinating access to the shared OSD cluster.
-
-Each ceph-mds daemon instance should have a unique name. The name is used
-to identify daemon instances in the ceph.conf.
-
-Once the daemon has started, the monitor cluster will normally assign
-it a logical rank, or put it in a standby pool to take over for
-another daemon that crashes. Some of the specified options can cause
-other behaviors.
-
-If you specify hot-standby, you must either specify the rank on the command
-line, or specify one of the mds_standby_for_[rank|name] parameters in the
-config. The command line specification overrides the config, and specifying
-the rank overrides specifying the name.
-
-
-Options
-=======
-
-.. option:: -f, --foreground
-
- Foreground: do not daemonize after startup (run in foreground). Do
- not generate a pid file. Useful when run via :doc:`ceph-run
- <ceph-run>`\(8).
-
-.. option:: -d
-
- Debug mode: like ``-f``, but also send all log output to stderr.
-
-.. option:: --setuser userorgid
-
- Set uid after starting. If a username is specified, the user
- record is looked up to get a uid and a gid, and the gid is also set
- as well, unless --setgroup is also specified.
-
-.. option:: --setgroup grouporgid
-
- Set gid after starting. If a group name is specified the group
- record is looked up to get a gid.
-
-.. option:: -c ceph.conf, --conf=ceph.conf
-
- Use *ceph.conf* configuration file instead of the default
- ``/etc/ceph/ceph.conf`` to determine monitor addresses during
- startup.
-
-.. option:: -m monaddress[:port]
-
- Connect to specified monitor (instead of looking through
- ``ceph.conf``).
-
-.. option:: --hot-standby <rank>
-
- Start as a hot standby for MDS <rank>.
-
-Availability
-============
-
-**ceph-mds** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to the Ceph documentation at
-http://ceph.com/docs for more information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8),
-:doc:`ceph-mon <ceph-mon>`\(8),
-:doc:`ceph-osd <ceph-osd>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-mon.rst b/src/ceph/doc/man/8/ceph-mon.rst
deleted file mode 100644
index 7a2cd03..0000000
--- a/src/ceph/doc/man/8/ceph-mon.rst
+++ /dev/null
@@ -1,94 +0,0 @@
-:orphan:
-
-=================================
- ceph-mon -- ceph monitor daemon
-=================================
-
-.. program:: ceph-mon
-
-Synopsis
-========
-
-| **ceph-mon** -i *monid* [ --mon-data *mondatapath* ]
-
-
-Description
-===========
-
-**ceph-mon** is the cluster monitor daemon for the Ceph distributed
-file system. One or more instances of **ceph-mon** form a Paxos
-part-time parliament cluster that provides extremely reliable and
-durable storage of cluster membership, configuration, and state.
-
-The *mondatapath* refers to a directory on a local file system storing
-monitor data. It is normally specified via the ``mon data`` option in
-the configuration file.
-
-Options
-=======
-
-.. option:: -f, --foreground
-
- Foreground: do not daemonize after startup (run in foreground). Do
- not generate a pid file. Useful when run via :doc:`ceph-run <ceph-run>`\(8).
-
-.. option:: -d
-
- Debug mode: like ``-f``, but also send all log output to stderr.
-
-.. option:: --setuser userorgid
-
- Set uid after starting. If a username is specified, the user
- record is looked up to get a uid and a gid, and the gid is also set
- as well, unless --setgroup is also specified.
-
-.. option:: --setgroup grouporgid
-
- Set gid after starting. If a group name is specified the group
- record is looked up to get a gid.
-
-.. option:: -c ceph.conf, --conf=ceph.conf
-
- Use *ceph.conf* configuration file instead of the default
- ``/etc/ceph/ceph.conf`` to determine monitor addresses during
- startup.
-
-.. option:: --mkfs
-
- Initialize the ``mon data`` directory with seed information to form
- and initial ceph file system or to join an existing monitor
- cluster. Three pieces of information must be provided:
-
- - The cluster fsid. This can come from a monmap (``--monmap <path>``) or
- explicitly via ``--fsid <uuid>``.
- - A list of monitors and their addresses. This list of monitors
- can come from a monmap (``--monmap <path>``), the ``mon host``
- configuration value (in *ceph.conf* or via ``-m
- host1,host2,...``), or ``mon addr`` lines in *ceph.conf*. If this
- monitor is to be part of the initial monitor quorum for a new
- Ceph cluster, then it must be included in the initial list,
- matching either the name or address of a monitor in the list.
- When matching by address, either the ``public addr`` or ``public
- subnet`` options may be used.
- - The monitor secret key ``mon.``. This must be included in the
- keyring provided via ``--keyring <path>``.
-
-.. option:: --keyring
-
- Specify a keyring for use with ``--mkfs``.
-
-
-Availability
-============
-
-**ceph-mon** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer
-to the Ceph documentation at http://ceph.com/docs for more
-information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8),
-:doc:`ceph-mds <ceph-mds>`\(8),
-:doc:`ceph-osd <ceph-osd>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-osd.rst b/src/ceph/doc/man/8/ceph-osd.rst
deleted file mode 100644
index 388e339..0000000
--- a/src/ceph/doc/man/8/ceph-osd.rst
+++ /dev/null
@@ -1,134 +0,0 @@
-:orphan:
-
-========================================
- ceph-osd -- ceph object storage daemon
-========================================
-
-.. program:: ceph-osd
-
-Synopsis
-========
-
-| **ceph-osd** -i *osdnum* [ --osd-data *datapath* ] [ --osd-journal
- *journal* ] [ --mkfs ] [ --mkjournal ] [--flush-journal] [--check-allows-journal] [--check-wants-journal] [--check-needs-journal] [ --mkkey ]
-
-
-Description
-===========
-
-**ceph-osd** is the object storage daemon for the Ceph distributed file
-system. It is responsible for storing objects on a local file system
-and providing access to them over the network.
-
-The datapath argument should be a directory on a xfs file system
-where the object data resides. The journal is optional, and is only
-useful performance-wise when it resides on a different disk than
-datapath with low latency (ideally, an NVRAM device).
-
-
-Options
-=======
-
-.. option:: -f, --foreground
-
- Foreground: do not daemonize after startup (run in foreground). Do
- not generate a pid file. Useful when run via :doc:`ceph-run <ceph-run>`\(8).
-
-.. option:: -d
-
- Debug mode: like ``-f``, but also send all log output to stderr.
-
-.. option:: --setuser userorgid
-
- Set uid after starting. If a username is specified, the user
- record is looked up to get a uid and a gid, and the gid is also set
- as well, unless --setgroup is also specified.
-
-.. option:: --setgroup grouporgid
-
- Set gid after starting. If a group name is specified the group
- record is looked up to get a gid.
-
-.. option:: --osd-data osddata
-
- Use object store at *osddata*.
-
-.. option:: --osd-journal journal
-
- Journal updates to *journal*.
-
-.. option:: --check-wants-journal
-
- Check whether a journal is desired.
-
-.. option:: --check-allows-journal
-
- Check whether a journal is allowed.
-
-.. option:: --check-needs-journal
-
- Check whether a journal is required.
-
-.. option:: --mkfs
-
- Create an empty object repository. This also initializes the journal
- (if one is defined).
-
-.. option:: --mkkey
-
- Generate a new secret key. This is normally used in combination
- with ``--mkfs`` as it is more convenient than generating a key by
- hand with :doc:`ceph-authtool <ceph-authtool>`\(8).
-
-.. option:: --mkjournal
-
- Create a new journal file to match an existing object repository.
- This is useful if the journal device or file is wiped out due to a
- disk or file system failure.
-
-.. option:: --flush-journal
-
- Flush the journal to permanent store. This runs in the foreground
- so you know when it's completed. This can be useful if you want to
- resize the journal or need to otherwise destroy it: this guarantees
- you won't lose data.
-
-.. option:: --get-cluster-fsid
-
- Print the cluster fsid (uuid) and exit.
-
-.. option:: --get-osd-fsid
-
- Print the OSD's fsid and exit. The OSD's uuid is generated at
- --mkfs time and is thus unique to a particular instantiation of
- this OSD.
-
-.. option:: --get-journal-fsid
-
- Print the journal's uuid. The journal fsid is set to match the OSD
- fsid at --mkfs time.
-
-.. option:: -c ceph.conf, --conf=ceph.conf
-
- Use *ceph.conf* configuration file instead of the default
- ``/etc/ceph/ceph.conf`` for runtime configuration options.
-
-.. option:: -m monaddress[:port]
-
- Connect to specified monitor (instead of looking through
- ``ceph.conf``).
-
-
-Availability
-============
-
-**ceph-osd** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8),
-:doc:`ceph-mds <ceph-mds>`\(8),
-:doc:`ceph-mon <ceph-mon>`\(8),
-:doc:`ceph-authtool <ceph-authtool>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-post-file.rst b/src/ceph/doc/man/8/ceph-post-file.rst
deleted file mode 100644
index 7e4899f..0000000
--- a/src/ceph/doc/man/8/ceph-post-file.rst
+++ /dev/null
@@ -1,71 +0,0 @@
-:orphan:
-
-==================================================
- ceph-post-file -- post files for ceph developers
-==================================================
-
-.. program:: ceph-post-file
-
-Synopsis
-========
-
-| **ceph-post-file** [-d *description] [-u *user*] *file or dir* ...
-
-
-Description
-===========
-
-**ceph-post-file** will upload files or directories to ceph.com for
-later analysis by Ceph developers.
-
-Each invocation uploads files or directories to a separate directory
-with a unique tag. That tag can be passed to a developer or
-referenced in a bug report (http://tracker.ceph.com/). Once the
-upload completes, the directory is marked non-readable and
-non-writeable to prevent access or modification by other users.
-
-Warning
-=======
-
-Basic measures are taken to make posted data be visible only to
-developers with access to ceph.com infrastructure. However, users
-should think twice and/or take appropriate precautions before
-posting potentially sensitive data (for example, logs or data
-directories that contain Ceph secrets).
-
-
-Options
-=======
-
-.. option:: -d *description*, --description *description*
-
- Add a short description for the upload. This is a good opportunity
- to reference a bug number. There is no default value.
-
-.. option:: -u *user*
-
- Set the user metadata for the upload. This defaults to `whoami`@`hostname -f`.
-
-Examples
-========
-
-To upload a single log::
-
- ceph-post-file /var/log/ceph/ceph-mon.`hostname`.log
-
-To upload several directories::
-
- ceph-post-file -d 'mon data directories' /var/log/ceph/mon/*
-
-
-Availability
-============
-
-**ceph-post-file** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8),
-:doc:`ceph-debugpack <ceph-debugpack>`\(8),
diff --git a/src/ceph/doc/man/8/ceph-rbdnamer.rst b/src/ceph/doc/man/8/ceph-rbdnamer.rst
deleted file mode 100644
index 123c6e2..0000000
--- a/src/ceph/doc/man/8/ceph-rbdnamer.rst
+++ /dev/null
@@ -1,41 +0,0 @@
-:orphan:
-
-==================================================
- ceph-rbdnamer -- udev helper to name RBD devices
-==================================================
-
-.. program:: ceph-rbdnamer
-
-
-Synopsis
-========
-
-| **ceph-rbdnamer** *num*
-
-
-Description
-===========
-
-**ceph-rbdnamer** prints the pool and image name for the given RBD devices
-to stdout. It is used by `udev` (using a rule like the one below) to
-set up a device symlink.
-
-
-::
-
- KERNEL=="rbd[0-9]*", PROGRAM="/usr/bin/ceph-rbdnamer %n", SYMLINK+="rbd/%c{1}/%c{2}"
-
-
-Availability
-============
-
-**ceph-rbdnamer** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
-refer to the Ceph documentation at http://ceph.com/docs for more
-information.
-
-
-See also
-========
-
-:doc:`rbd <rbd>`\(8),
-:doc:`ceph <ceph>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-rest-api.rst b/src/ceph/doc/man/8/ceph-rest-api.rst
deleted file mode 100644
index 9864a9b..0000000
--- a/src/ceph/doc/man/8/ceph-rest-api.rst
+++ /dev/null
@@ -1,150 +0,0 @@
-:orphan:
-
-=====================================================
- ceph-rest-api -- ceph RESTlike administration server
-=====================================================
-
-.. program:: ceph-rest-api
-
-Synopsis
-========
-
-| **ceph-rest-api** [ -c *conffile* ] [--cluster *clustername* ] [ -n *name* ] [-i *id* ]
-
-
-Description
-===========
-
-**ceph-rest-api** is a WSGI application that can run as a
-standalone web service or run under a web server that supports
-WSGI. It provides much of the functionality of the **ceph**
-command-line tool through an HTTP-accessible interface.
-
-Options
-=======
-
-.. option:: -c/--conf conffile
-
- names the ceph.conf file to use for configuration. If -c is not
- specified, the default depends on the state of the --cluster option
- (default 'ceph'; see below). The configuration file is searched
- for in this order:
-
- * $CEPH_CONF
- * /etc/ceph/${cluster}.conf
- * ~/.ceph/${cluster}.conf
- * ${cluster}.conf (in the current directory)
-
- so you can also pass this option in the environment as CEPH_CONF.
-
-.. option:: --cluster clustername
-
- set *clustername* for use in the $cluster metavariable, for
- locating the ceph.conf file. The default is 'ceph'.
-
-.. option:: -n/--name name
-
- specifies the client 'name', which is used to find the
- client-specific configuration options in the config file, and
- also is the name used for authentication when connecting
- to the cluster (the entity name appearing in 'ceph auth ls' output,
- for example). The default is 'client.restapi'.
-
-.. option:: -i/--id id
-
- specifies the client 'id', which will form the clientname
- as 'client.<id>' if clientname is not set. If -n/-name is
- set, that takes precedence.
-
- Also, global Ceph options are supported.
-
-
-Configuration parameters
-========================
-
-Supported configuration parameters include:
-
-* **keyring** the keyring file holding the key for 'clientname'
-* **public addr** ip:port to listen on (default 0.0.0.0:5000)
-* **log file** (usual Ceph default)
-* **restapi base url** the base URL to answer requests on (default /api/v0.1)
-* **restapi log level** critical, error, warning, info, debug (default warning)
-
-Configuration parameters are searched in the standard order:
-first in the section named '<clientname>', then 'client', then 'global'.
-
-<clientname> is either supplied by -n/--name, "client.<id>" where
-<id> is supplied by -i/--id, or 'client.restapi' if neither option
-is present.
-
-A single-threaded server will run on **public addr** if the ceph-rest-api
-executed directly; otherwise, configuration is specified by the enclosing
-WSGI web server.
-
-Commands
-========
-
-Commands are submitted with HTTP GET requests (for commands that
-primarily return data) or PUT (for commands that affect cluster state).
-HEAD and OPTIONS are also supported. Standard HTTP status codes
-are returned.
-
-For commands that return bulk data, the request can include
-Accept: application/json or Accept: application/xml to select the
-desired structured output, or you may use a .json or .xml addition
-to the requested PATH. Parameters are supplied as query parameters
-in the request; for parameters that take more than one value, repeat
-the key=val construct. For instance, to remove OSDs 2 and 3,
-send a PUT request to ``osd/rm?ids=2&ids=3``.
-
-Discovery
-=========
-
-Human-readable discovery of supported commands and parameters, along
-with a small description of each command, is provided when the requested
-path is incomplete/partially matching. Requesting / will redirect to
-the value of **restapi base url**, and that path will give a full list
-of all known commands.
-For example, requesting ``api/vX.X/mon`` will return the list of API calls for
-monitors - ``api/vX.X/osd`` will return the list of API calls for OSD and so on.
-
-The command set is very similar to the commands
-supported by the **ceph** tool. One notable exception is that the
-``ceph pg <pgid> <command>`` style of commands is supported here
-as ``tell/<pgid>/command?args``.
-
-Deployment as WSGI application
-==============================
-
-When deploying as WSGI application (say, with Apache/mod_wsgi,
-or nginx/uwsgi, or gunicorn, etc.), use the ``ceph_rest_api.py`` module
-(``ceph-rest-api`` is a thin layer around this module). The standalone web
-server is of course not used, so address/port configuration is done in
-the WSGI server. Use a python .wsgi module or the equivalent to call
-``app = generate_app(conf, cluster, clientname, clientid, args)`` where:
-
-* conf is as -c/--conf above
-* cluster is as --cluster above
-* clientname, -n/--name
-* clientid, -i/--id, and
-* args are any other generic Ceph arguments
-
-When app is returned, it will have attributes 'ceph_addr' and 'ceph_port'
-set to what the address and port are in the Ceph configuration;
-those may be used for the server, or ignored.
-
-Any errors reading configuration or connecting to the cluster cause an
-exception to be raised; see your WSGI server documentation for how to
-see those messages in case of problem.
-
-Availability
-============
-
-**ceph-rest-api** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to the Ceph documentation at
-http://ceph.com/docs for more information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-run.rst b/src/ceph/doc/man/8/ceph-run.rst
deleted file mode 100644
index ed76c28..0000000
--- a/src/ceph/doc/man/8/ceph-run.rst
+++ /dev/null
@@ -1,45 +0,0 @@
-:orphan:
-
-=========================================
- ceph-run -- restart daemon on core dump
-=========================================
-
-.. program:: ceph-run
-
-Synopsis
-========
-
-| **ceph-run** *command* ...
-
-
-Description
-===========
-
-**ceph-run** is a simple wrapper that will restart a daemon if it exits
-with a signal indicating it crashed and possibly core dumped (that is,
-signals 3, 4, 5, 6, 8, or 11).
-
-The command should run the daemon in the foreground. For Ceph daemons,
-that means the ``-f`` option.
-
-
-Options
-=======
-
-None
-
-
-Availability
-============
-
-**ceph-run** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8),
-:doc:`ceph-mon <ceph-mon>`\(8),
-:doc:`ceph-mds <ceph-mds>`\(8),
-:doc:`ceph-osd <ceph-osd>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-syn.rst b/src/ceph/doc/man/8/ceph-syn.rst
deleted file mode 100644
index a30c460..0000000
--- a/src/ceph/doc/man/8/ceph-syn.rst
+++ /dev/null
@@ -1,99 +0,0 @@
-:orphan:
-
-===============================================
- ceph-syn -- ceph synthetic workload generator
-===============================================
-
-.. program:: ceph-syn
-
-Synopsis
-========
-
-| **ceph-syn** [ -m *monaddr*:*port* ] --syn *command* *...*
-
-
-Description
-===========
-
-**ceph-syn** is a simple synthetic workload generator for the Ceph
-distributed file system. It uses the userspace client library to
-generate simple workloads against a currently running file system. The
-file system need not be mounted via ceph-fuse(8) or the kernel client.
-
-One or more ``--syn`` command arguments specify the particular
-workload, as documented below.
-
-
-Options
-=======
-
-.. option:: -d
-
- Detach from console and daemonize after startup.
-
-.. option:: -c ceph.conf, --conf=ceph.conf
-
- Use *ceph.conf* configuration file instead of the default
- ``/etc/ceph/ceph.conf`` to determine monitor addresses during
- startup.
-
-.. option:: -m monaddress[:port]
-
- Connect to specified monitor (instead of looking through
- ``ceph.conf``).
-
-.. option:: --num_client num
-
- Run num different clients, each in a separate thread.
-
-.. option:: --syn workloadspec
-
- Run the given workload. May be specified as many times as
- needed. Workloads will normally run sequentially.
-
-
-Workloads
-=========
-
-Each workload should be preceded by ``--syn`` on the command
-line. This is not a complete list.
-
-:command:`mknap` *path* *snapname*
- Create a snapshot called *snapname* on *path*.
-
-:command:`rmsnap` *path* *snapname*
- Delete snapshot called *snapname* on *path*.
-
-:command:`rmfile` *path*
- Delete/unlink *path*.
-
-:command:`writefile` *sizeinmb* *blocksize*
- Create a file, named after our client id, that is *sizeinmb* MB by
- writing *blocksize* chunks.
-
-:command:`readfile` *sizeinmb* *blocksize*
- Read file, named after our client id, that is *sizeinmb* MB by
- writing *blocksize* chunks.
-
-:command:`rw` *sizeinmb* *blocksize*
- Write file, then read it back, as above.
-
-:command:`makedirs` *numsubdirs* *numfiles* *depth*
- Create a hierarchy of directories that is *depth* levels deep. Give
- each directory *numsubdirs* subdirectories and *numfiles* files.
-
-:command:`walk`
- Recursively walk the file system (like find).
-
-
-Availability
-============
-
-**ceph-syn** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8),
-:doc:`ceph-fuse <ceph-fuse>`\(8)
diff --git a/src/ceph/doc/man/8/ceph-volume-systemd.rst b/src/ceph/doc/man/8/ceph-volume-systemd.rst
deleted file mode 100644
index 1a7b481..0000000
--- a/src/ceph/doc/man/8/ceph-volume-systemd.rst
+++ /dev/null
@@ -1,56 +0,0 @@
-:orphan:
-
-=======================================================
- ceph-volume-systemd -- systemd ceph-volume helper tool
-=======================================================
-
-.. program:: ceph-volume-systemd
-
-Synopsis
-========
-
-| **ceph-volume-systemd** *systemd instance name*
-
-
-Description
-===========
-:program:`ceph-volume-systemd` is a systemd helper tool that receives input
-from (dynamically created) systemd units so that activation of OSDs can
-proceed.
-
-It translates the input into a system call to ceph-volume for activation
-purposes only.
-
-
-Examples
-========
-Its input is the ``systemd instance name`` (represented by ``%i`` in a systemd
-unit), and it should be in the following format::
-
- <ceph-volume subcommand>-<extra metadata>
-
-In the case of ``lvm`` a call could look like::
-
- /usr/bin/ceph-volume-systemd lvm-0-8715BEB4-15C5-49DE-BA6F-401086EC7B41
-
-Which in turn will call ``ceph-volume`` in the following way::
-
- ceph-volume lvm trigger 0-8715BEB4-15C5-49DE-BA6F-401086EC7B41
-
-Any other subcommand will need to have implemented a ``trigger`` command that
-can consume the extra metadata in this format.
-
-
-Availability
-============
-
-:program:`ceph-volume-systemd` is part of Ceph, a massively scalable,
-open-source, distributed storage system. Please refer to the documentation at
-http://docs.ceph.com/ for more information.
-
-
-See also
-========
-
-:doc:`ceph-osd <ceph-osd>`\(8),
-:doc:`ceph-disk <ceph-volume>`\(8),
diff --git a/src/ceph/doc/man/8/ceph-volume.rst b/src/ceph/doc/man/8/ceph-volume.rst
deleted file mode 100644
index 431e82c..0000000
--- a/src/ceph/doc/man/8/ceph-volume.rst
+++ /dev/null
@@ -1,122 +0,0 @@
-:orphan:
-
-========================================
- ceph-volume -- Ceph OSD deployment tool
-========================================
-
-.. program:: ceph-volume
-
-Synopsis
-========
-
-| **ceph-volume** [-h] [--cluster CLUSTER] [--log-level LOG_LEVEL]
-| [--log-path LOG_PATH]
-
-| **ceph-volume** **lvm** [ *trigger* | *create* | *activate* | *prepare* ]
-
-Description
-===========
-
-:program:`ceph-volume` is a single purpose command line tool to deploy logical
-volumes as OSDs, trying to maintain a similar API to ``ceph-disk`` when
-preparing, activating, and creating OSDs.
-
-It deviates from ``ceph-disk`` by not interacting or relying on the udev rules
-that come installed for Ceph. These rules allow automatic detection of
-previously setup devices that are in turn fed into ``ceph-disk`` to activate
-them.
-
-
-Commands
-========
-
-lvm
----
-
-By making use of LVM tags, the ``lvm`` sub-command is able to store and later
-re-discover and query devices associated with OSDs so that they can later
-activated.
-
-Subcommands:
-
-**activate**
-Enables a systemd unit that persists the OSD ID and its UUID (also called
-``fsid`` in Ceph CLI tools), so that at boot time it can understand what OSD is
-enabled and needs to be mounted.
-
-Usage::
-
- ceph-volume lvm activate --filestore <osd id> <osd fsid>
-
-Optional Arguments:
-
-* [-h, --help] show the help message and exit
-* [--bluestore] filestore objectstore (not yet implemented)
-* [--filestore] filestore objectstore (current default)
-
-
-**prepare**
-Prepares a logical volume to be used as an OSD and journal using a ``filestore`` setup
-(``bluestore`` support is planned). It will not create or modify the logical volumes
-except for adding extra metadata.
-
-Usage::
-
- ceph-volume lvm prepare --filestore --data <data lv> --journal <journal device>
-
-Optional arguments:
-
-* [-h, --help] show the help message and exit
-* [--journal JOURNAL] A logical group name, path to a logical volume, or path to a device
-* [--journal-size GB] Size (in GB) A logical group name or a path to a logical volume
-* [--bluestore] Use the bluestore objectstore (not currently supported)
-* [--filestore] Use the filestore objectstore (currently the only supported object store)
-* [--osd-id OSD_ID] Reuse an existing OSD id
-* [--osd-fsid OSD_FSID] Reuse an existing OSD fsid
-
-Required arguments:
-
-* --data A logical group name or a path to a logical volume
-
-**create**
-Wraps the two-step process to provision a new osd (calling ``prepare`` first
-and then ``activate``) into a single one. The reason to prefer ``prepare`` and
-then ``activate`` is to gradually introduce new OSDs into a cluster, and
-avoiding large amounts of data being rebalanced.
-
-The single-call process unifies exactly what ``prepare`` and ``activate`` do,
-with the convenience of doing it all at once. Flags and general usage are
-equivalent to those of the ``prepare`` subcommand.
-
-**trigger**
-This subcommand is not meant to be used directly, and it is used by systemd so
-that it proxies input to ``ceph-volume lvm activate`` by parsing the
-input from systemd, detecting the UUID and ID associated with an OSD.
-
-Usage::
-
- ceph-volume lvm trigger <SYSTEMD-DATA>
-
-The systemd "data" is expected to be in the format of::
-
- <OSD ID>-<OSD UUID>
-
-The lvs associated with the OSD need to have been prepared previously,
-so that all needed tags and metadata exist.
-
-Positional arguments:
-
-* <SYSTEMD_DATA> Data from a systemd unit containing ID and UUID of the OSD.
-
-Availability
-============
-
-:program:`ceph-volume` is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the documentation at http://docs.ceph.com/ for more information.
-
-
-See also
-========
-
-:doc:`ceph-osd <ceph-osd>`\(8),
-:doc:`ceph-disk <ceph-disk>`\(8),
diff --git a/src/ceph/doc/man/8/ceph.rst b/src/ceph/doc/man/8/ceph.rst
deleted file mode 100644
index 1a18000..0000000
--- a/src/ceph/doc/man/8/ceph.rst
+++ /dev/null
@@ -1,1550 +0,0 @@
-:orphan:
-
-==================================
- ceph -- ceph administration tool
-==================================
-
-.. program:: ceph
-
-Synopsis
-========
-
-| **ceph** **auth** [ *add* \| *caps* \| *del* \| *export* \| *get* \| *get-key* \| *get-or-create* \| *get-or-create-key* \| *import* \| *list* \| *print-key* \| *print_key* ] ...
-
-| **ceph** **compact**
-
-| **ceph** **config-key** [ *del* | *exists* | *get* | *list* | *dump* | *put* ] ...
-
-| **ceph** **daemon** *<name>* \| *<path>* *<command>* ...
-
-| **ceph** **daemonperf** *<name>* \| *<path>* [ *interval* [ *count* ] ]
-
-| **ceph** **df** *{detail}*
-
-| **ceph** **fs** [ *ls* \| *new* \| *reset* \| *rm* ] ...
-
-| **ceph** **fsid**
-
-| **ceph** **health** *{detail}*
-
-| **ceph** **heap** [ *dump* \| *start_profiler* \| *stop_profiler* \| *release* \| *stats* ] ...
-
-| **ceph** **injectargs** *<injectedargs>* [ *<injectedargs>*... ]
-
-| **ceph** **log** *<logtext>* [ *<logtext>*... ]
-
-| **ceph** **mds** [ *compat* \| *deactivate* \| *fail* \| *rm* \| *rmfailed* \| *set_state* \| *stat* \| *tell* ] ...
-
-| **ceph** **mon** [ *add* \| *dump* \| *getmap* \| *remove* \| *stat* ] ...
-
-| **ceph** **mon_status**
-
-| **ceph** **osd** [ *blacklist* \| *blocked-by* \| *create* \| *new* \| *deep-scrub* \| *df* \| *down* \| *dump* \| *erasure-code-profile* \| *find* \| *getcrushmap* \| *getmap* \| *getmaxosd* \| *in* \| *lspools* \| *map* \| *metadata* \| *ok-to-stop* \| *out* \| *pause* \| *perf* \| *pg-temp* \| *force-create-pg* \| *primary-affinity* \| *primary-temp* \| *repair* \| *reweight* \| *reweight-by-pg* \| *rm* \| *destroy* \| *purge* \| *safe-to-destroy* \| *scrub* \| *set* \| *setcrushmap* \| *setmaxosd* \| *stat* \| *tree* \| *unpause* \| *unset* ] ...
-
-| **ceph** **osd** **crush** [ *add* \| *add-bucket* \| *create-or-move* \| *dump* \| *get-tunable* \| *link* \| *move* \| *remove* \| *rename-bucket* \| *reweight* \| *reweight-all* \| *reweight-subtree* \| *rm* \| *rule* \| *set* \| *set-tunable* \| *show-tunables* \| *tunables* \| *unlink* ] ...
-
-| **ceph** **osd** **pool** [ *create* \| *delete* \| *get* \| *get-quota* \| *ls* \| *mksnap* \| *rename* \| *rmsnap* \| *set* \| *set-quota* \| *stats* ] ...
-
-| **ceph** **osd** **tier** [ *add* \| *add-cache* \| *cache-mode* \| *remove* \| *remove-overlay* \| *set-overlay* ] ...
-
-| **ceph** **pg** [ *debug* \| *deep-scrub* \| *dump* \| *dump_json* \| *dump_pools_json* \| *dump_stuck* \| *force_create_pg* \| *getmap* \| *ls* \| *ls-by-osd* \| *ls-by-pool* \| *ls-by-primary* \| *map* \| *repair* \| *scrub* \| *set_full_ratio* \| *set_nearfull_ratio* \| *stat* ] ...
-
-| **ceph** **quorum** [ *enter* \| *exit* ]
-
-| **ceph** **quorum_status**
-
-| **ceph** **report** { *<tags>* [ *<tags>...* ] }
-
-| **ceph** **scrub**
-
-| **ceph** **status**
-
-| **ceph** **sync** **force** {--yes-i-really-mean-it} {--i-know-what-i-am-doing}
-
-| **ceph** **tell** *<name (type.id)> <args> [<args>...]*
-
-| **ceph** **version**
-
-Description
-===========
-
-:program:`ceph` is a control utility which is used for manual deployment and maintenance
-of a Ceph cluster. It provides a diverse set of commands that allows deployment of
-monitors, OSDs, placement groups, MDS and overall maintenance, administration
-of the cluster.
-
-Commands
-========
-
-auth
-----
-
-Manage authentication keys. It is used for adding, removing, exporting
-or updating of authentication keys for a particular entity such as a monitor or
-OSD. It uses some additional subcommands.
-
-Subcommand ``add`` adds authentication info for a particular entity from input
-file, or random key if no input is given and/or any caps specified in the command.
-
-Usage::
-
- ceph auth add <entity> {<caps> [<caps>...]}
-
-Subcommand ``caps`` updates caps for **name** from caps specified in the command.
-
-Usage::
-
- ceph auth caps <entity> <caps> [<caps>...]
-
-Subcommand ``del`` deletes all caps for ``name``.
-
-Usage::
-
- ceph auth del <entity>
-
-Subcommand ``export`` writes keyring for requested entity, or master keyring if
-none given.
-
-Usage::
-
- ceph auth export {<entity>}
-
-Subcommand ``get`` writes keyring file with requested key.
-
-Usage::
-
- ceph auth get <entity>
-
-Subcommand ``get-key`` displays requested key.
-
-Usage::
-
- ceph auth get-key <entity>
-
-Subcommand ``get-or-create`` adds authentication info for a particular entity
-from input file, or random key if no input given and/or any caps specified in the
-command.
-
-Usage::
-
- ceph auth get-or-create <entity> {<caps> [<caps>...]}
-
-Subcommand ``get-or-create-key`` gets or adds key for ``name`` from system/caps
-pairs specified in the command. If key already exists, any given caps must match
-the existing caps for that key.
-
-Usage::
-
- ceph auth get-or-create-key <entity> {<caps> [<caps>...]}
-
-Subcommand ``import`` reads keyring from input file.
-
-Usage::
-
- ceph auth import
-
-Subcommand ``ls`` lists authentication state.
-
-Usage::
-
- ceph auth ls
-
-Subcommand ``print-key`` displays requested key.
-
-Usage::
-
- ceph auth print-key <entity>
-
-Subcommand ``print_key`` displays requested key.
-
-Usage::
-
- ceph auth print_key <entity>
-
-
-compact
--------
-
-Causes compaction of monitor's leveldb storage.
-
-Usage::
-
- ceph compact
-
-
-config-key
-----------
-
-Manage configuration key. It uses some additional subcommands.
-
-Subcommand ``del`` deletes configuration key.
-
-Usage::
-
- ceph config-key del <key>
-
-Subcommand ``exists`` checks for configuration keys existence.
-
-Usage::
-
- ceph config-key exists <key>
-
-Subcommand ``get`` gets the configuration key.
-
-Usage::
-
- ceph config-key get <key>
-
-Subcommand ``list`` lists configuration keys.
-
-Usage::
-
- ceph config-key ls
-
-Subcommand ``dump`` dumps configuration keys and values.
-
-Usage::
-
- ceph config-key dump
-
-Subcommand ``set`` puts configuration key and value.
-
-Usage::
-
- ceph config-key set <key> {<val>}
-
-
-daemon
-------
-
-Submit admin-socket commands.
-
-Usage::
-
- ceph daemon {daemon_name|socket_path} {command} ...
-
-Example::
-
- ceph daemon osd.0 help
-
-
-daemonperf
-----------
-
-Watch performance counters from a Ceph daemon.
-
-Usage::
-
- ceph daemonperf {daemon_name|socket_path} [{interval} [{count}]]
-
-
-df
---
-
-Show cluster's free space status.
-
-Usage::
-
- ceph df {detail}
-
-.. _ceph features:
-
-features
---------
-
-Show the releases and features of all connected daemons and clients connected
-to the cluster, along with the numbers of them in each bucket grouped by the
-corresponding features/releases. Each release of Ceph supports a different set
-of features, expressed by the features bitmask. New cluster features require
-that clients support the feature, or else they are not allowed to connect to
-these new features. As new features or capabilities are enabled after an
-upgrade, older clients are prevented from connecting.
-
-Usage::
-
- ceph features
-
-fs
---
-
-Manage cephfs filesystems. It uses some additional subcommands.
-
-Subcommand ``ls`` to list filesystems
-
-Usage::
-
- ceph fs ls
-
-Subcommand ``new`` to make a new filesystem using named pools <metadata> and <data>
-
-Usage::
-
- ceph fs new <fs_name> <metadata> <data>
-
-Subcommand ``reset`` is used for disaster recovery only: reset to a single-MDS map
-
-Usage::
-
- ceph fs reset <fs_name> {--yes-i-really-mean-it}
-
-Subcommand ``rm`` to disable the named filesystem
-
-Usage::
-
- ceph fs rm <fs_name> {--yes-i-really-mean-it}
-
-
-fsid
-----
-
-Show cluster's FSID/UUID.
-
-Usage::
-
- ceph fsid
-
-
-health
-------
-
-Show cluster's health.
-
-Usage::
-
- ceph health {detail}
-
-
-heap
-----
-
-Show heap usage info (available only if compiled with tcmalloc)
-
-Usage::
-
- ceph heap dump|start_profiler|stop_profiler|release|stats
-
-
-injectargs
-----------
-
-Inject configuration arguments into monitor.
-
-Usage::
-
- ceph injectargs <injected_args> [<injected_args>...]
-
-
-log
----
-
-Log supplied text to the monitor log.
-
-Usage::
-
- ceph log <logtext> [<logtext>...]
-
-
-mds
----
-
-Manage metadata server configuration and administration. It uses some
-additional subcommands.
-
-Subcommand ``compat`` manages compatible features. It uses some additional
-subcommands.
-
-Subcommand ``rm_compat`` removes compatible feature.
-
-Usage::
-
- ceph mds compat rm_compat <int[0-]>
-
-Subcommand ``rm_incompat`` removes incompatible feature.
-
-Usage::
-
- ceph mds compat rm_incompat <int[0-]>
-
-Subcommand ``show`` shows mds compatibility settings.
-
-Usage::
-
- ceph mds compat show
-
-Subcommand ``deactivate`` stops mds.
-
-Usage::
-
- ceph mds deactivate <who>
-
-Subcommand ``fail`` forces mds to status fail.
-
-Usage::
-
- ceph mds fail <who>
-
-Subcommand ``rm`` removes inactive mds.
-
-Usage::
-
- ceph mds rm <int[0-]> <name> (type.id)>
-
-Subcommand ``rmfailed`` removes failed mds.
-
-Usage::
-
- ceph mds rmfailed <int[0-]>
-
-Subcommand ``set_state`` sets mds state of <gid> to <numeric-state>.
-
-Usage::
-
- ceph mds set_state <int[0-]> <int[0-20]>
-
-Subcommand ``stat`` shows MDS status.
-
-Usage::
-
- ceph mds stat
-
-Subcommand ``tell`` sends command to particular mds.
-
-Usage::
-
- ceph mds tell <who> <args> [<args>...]
-
-mon
----
-
-Manage monitor configuration and administration. It uses some additional
-subcommands.
-
-Subcommand ``add`` adds new monitor named <name> at <addr>.
-
-Usage::
-
- ceph mon add <name> <IPaddr[:port]>
-
-Subcommand ``dump`` dumps formatted monmap (optionally from epoch)
-
-Usage::
-
- ceph mon dump {<int[0-]>}
-
-Subcommand ``getmap`` gets monmap.
-
-Usage::
-
- ceph mon getmap {<int[0-]>}
-
-Subcommand ``remove`` removes monitor named <name>.
-
-Usage::
-
- ceph mon remove <name>
-
-Subcommand ``stat`` summarizes monitor status.
-
-Usage::
-
- ceph mon stat
-
-mon_status
-----------
-
-Reports status of monitors.
-
-Usage::
-
- ceph mon_status
-
-mgr
----
-
-Ceph manager daemon configuration and management.
-
-Subcommand ``dump`` dumps the latest MgrMap, which describes the active
-and standby manager daemons.
-
-Usage::
-
- ceph mgr dump
-
-Subcommand ``fail`` will mark a manager daemon as failed, removing it
-from the manager map. If it is the active manager daemon a standby
-will take its place.
-
-Usage::
-
- ceph mgr fail <name>
-
-Subcommand ``module ls`` will list currently enabled manager modules (plugins).
-
-Usage::
-
- ceph mgr module ls
-
-Subcommand ``module enable`` will enable a manager module. Available modules are included in MgrMap and visible via ``mgr dump``.
-
-Usage::
-
- ceph mgr module enable <module>
-
-Subcommand ``module disable`` will disable an active manager module.
-
-Usage::
-
- ceph mgr module disable <module>
-
-Subcommand ``metadata`` will report metadata about all manager daemons or, if the name is specified, a single manager daemon.
-
-Usage::
-
- ceph mgr metadata [name]
-
-Subcommand ``versions`` will report a count of running daemon versions.
-
-Usage::
-
- ceph mgr versions
-
-Subcommand ``count-metadata`` will report a count of any daemon metadata field.
-
-Usage::
-
- ceph mgr count-metadata <field>
-
-
-osd
----
-
-Manage OSD configuration and administration. It uses some additional
-subcommands.
-
-Subcommand ``blacklist`` manage blacklisted clients. It uses some additional
-subcommands.
-
-Subcommand ``add`` add <addr> to blacklist (optionally until <expire> seconds
-from now)
-
-Usage::
-
- ceph osd blacklist add <EntityAddr> {<float[0.0-]>}
-
-Subcommand ``ls`` show blacklisted clients
-
-Usage::
-
- ceph osd blacklist ls
-
-Subcommand ``rm`` remove <addr> from blacklist
-
-Usage::
-
- ceph osd blacklist rm <EntityAddr>
-
-Subcommand ``blocked-by`` prints a histogram of which OSDs are blocking their peers
-
-Usage::
-
- ceph osd blocked-by
-
-Subcommand ``create`` creates new osd (with optional UUID and ID).
-
-This command is DEPRECATED as of the Luminous release, and will be removed in
-a future release.
-
-Subcommand ``new`` should instead be used.
-
-Usage::
-
- ceph osd create {<uuid>} {<id>}
-
-Subcommand ``new`` can be used to create a new OSD or to recreate a previously
-destroyed OSD with a specific *id*. The new OSD will have the specified *uuid*,
-and the command expects a JSON file containing the base64 cephx key for auth
-entity *client.osd.<id>*, as well as optional base64 cepx key for dm-crypt
-lockbox access and a dm-crypt key. Specifying a dm-crypt requires specifying
-the accompanying lockbox cephx key.
-
-Usage::
-
- ceph osd new {<uuid>} {<id>} -i {<secrets.json>}
-
-The secrets JSON file is optional but if provided, is expected to maintain
-a form of the following format::
-
- {
- "cephx_secret": "AQBWtwhZdBO5ExAAIDyjK2Bh16ZXylmzgYYEjg=="
- }
-
-Or::
-
- {
- "cephx_secret": "AQBWtwhZdBO5ExAAIDyjK2Bh16ZXylmzgYYEjg==",
- "cephx_lockbox_secret": "AQDNCglZuaeVCRAAYr76PzR1Anh7A0jswkODIQ==",
- "dmcrypt_key": "<dm-crypt key>"
- }
-
-
-Subcommand ``crush`` is used for CRUSH management. It uses some additional
-subcommands.
-
-Subcommand ``add`` adds or updates crushmap position and weight for <name> with
-<weight> and location <args>.
-
-Usage::
-
- ceph osd crush add <osdname (id|osd.id)> <float[0.0-]> <args> [<args>...]
-
-Subcommand ``add-bucket`` adds no-parent (probably root) crush bucket <name> of
-type <type>.
-
-Usage::
-
- ceph osd crush add-bucket <name> <type>
-
-Subcommand ``create-or-move`` creates entry or moves existing entry for <name>
-<weight> at/to location <args>.
-
-Usage::
-
- ceph osd crush create-or-move <osdname (id|osd.id)> <float[0.0-]> <args>
- [<args>...]
-
-Subcommand ``dump`` dumps crush map.
-
-Usage::
-
- ceph osd crush dump
-
-Subcommand ``get-tunable`` get crush tunable straw_calc_version
-
-Usage::
-
- ceph osd crush get-tunable straw_calc_version
-
-Subcommand ``link`` links existing entry for <name> under location <args>.
-
-Usage::
-
- ceph osd crush link <name> <args> [<args>...]
-
-Subcommand ``move`` moves existing entry for <name> to location <args>.
-
-Usage::
-
- ceph osd crush move <name> <args> [<args>...]
-
-Subcommand ``remove`` removes <name> from crush map (everywhere, or just at
-<ancestor>).
-
-Usage::
-
- ceph osd crush remove <name> {<ancestor>}
-
-Subcommand ``rename-bucket`` renames buchket <srcname> to <stname>
-
-Usage::
-
- ceph osd crush rename-bucket <srcname> <dstname>
-
-Subcommand ``reweight`` change <name>'s weight to <weight> in crush map.
-
-Usage::
-
- ceph osd crush reweight <name> <float[0.0-]>
-
-Subcommand ``reweight-all`` recalculate the weights for the tree to
-ensure they sum correctly
-
-Usage::
-
- ceph osd crush reweight-all
-
-Subcommand ``reweight-subtree`` changes all leaf items beneath <name>
-to <weight> in crush map
-
-Usage::
-
- ceph osd crush reweight-subtree <name> <weight>
-
-Subcommand ``rm`` removes <name> from crush map (everywhere, or just at
-<ancestor>).
-
-Usage::
-
- ceph osd crush rm <name> {<ancestor>}
-
-Subcommand ``rule`` is used for creating crush rules. It uses some additional
-subcommands.
-
-Subcommand ``create-erasure`` creates crush rule <name> for erasure coded pool
-created with <profile> (default default).
-
-Usage::
-
- ceph osd crush rule create-erasure <name> {<profile>}
-
-Subcommand ``create-simple`` creates crush rule <name> to start from <root>,
-replicate across buckets of type <type>, using a choose mode of <firstn|indep>
-(default firstn; indep best for erasure pools).
-
-Usage::
-
- ceph osd crush rule create-simple <name> <root> <type> {firstn|indep}
-
-Subcommand ``dump`` dumps crush rule <name> (default all).
-
-Usage::
-
- ceph osd crush rule dump {<name>}
-
-Subcommand ``ls`` lists crush rules.
-
-Usage::
-
- ceph osd crush rule ls
-
-Subcommand ``rm`` removes crush rule <name>.
-
-Usage::
-
- ceph osd crush rule rm <name>
-
-Subcommand ``set`` used alone, sets crush map from input file.
-
-Usage::
-
- ceph osd crush set
-
-Subcommand ``set`` with osdname/osd.id update crushmap position and weight
-for <name> to <weight> with location <args>.
-
-Usage::
-
- ceph osd crush set <osdname (id|osd.id)> <float[0.0-]> <args> [<args>...]
-
-Subcommand ``set-tunable`` set crush tunable <tunable> to <value>. The only
-tunable that can be set is straw_calc_version.
-
-Usage::
-
- ceph osd crush set-tunable straw_calc_version <value>
-
-Subcommand ``show-tunables`` shows current crush tunables.
-
-Usage::
-
- ceph osd crush show-tunables
-
-Subcommand ``tree`` shows the crush buckets and items in a tree view.
-
-Usage::
-
- ceph osd crush tree
-
-Subcommand ``tunables`` sets crush tunables values to <profile>.
-
-Usage::
-
- ceph osd crush tunables legacy|argonaut|bobtail|firefly|hammer|optimal|default
-
-Subcommand ``unlink`` unlinks <name> from crush map (everywhere, or just at
-<ancestor>).
-
-Usage::
-
- ceph osd crush unlink <name> {<ancestor>}
-
-Subcommand ``df`` shows OSD utilization
-
-Usage::
-
- ceph osd df {plain|tree}
-
-Subcommand ``deep-scrub`` initiates deep scrub on specified osd.
-
-Usage::
-
- ceph osd deep-scrub <who>
-
-Subcommand ``down`` sets osd(s) <id> [<id>...] down.
-
-Usage::
-
- ceph osd down <ids> [<ids>...]
-
-Subcommand ``dump`` prints summary of OSD map.
-
-Usage::
-
- ceph osd dump {<int[0-]>}
-
-Subcommand ``erasure-code-profile`` is used for managing the erasure code
-profiles. It uses some additional subcommands.
-
-Subcommand ``get`` gets erasure code profile <name>.
-
-Usage::
-
- ceph osd erasure-code-profile get <name>
-
-Subcommand ``ls`` lists all erasure code profiles.
-
-Usage::
-
- ceph osd erasure-code-profile ls
-
-Subcommand ``rm`` removes erasure code profile <name>.
-
-Usage::
-
- ceph osd erasure-code-profile rm <name>
-
-Subcommand ``set`` creates erasure code profile <name> with [<key[=value]> ...]
-pairs. Add a --force at the end to override an existing profile (IT IS RISKY).
-
-Usage::
-
- ceph osd erasure-code-profile set <name> {<profile> [<profile>...]}
-
-Subcommand ``find`` find osd <id> in the CRUSH map and shows its location.
-
-Usage::
-
- ceph osd find <int[0-]>
-
-Subcommand ``getcrushmap`` gets CRUSH map.
-
-Usage::
-
- ceph osd getcrushmap {<int[0-]>}
-
-Subcommand ``getmap`` gets OSD map.
-
-Usage::
-
- ceph osd getmap {<int[0-]>}
-
-Subcommand ``getmaxosd`` shows largest OSD id.
-
-Usage::
-
- ceph osd getmaxosd
-
-Subcommand ``in`` sets osd(s) <id> [<id>...] in.
-
-Usage::
-
- ceph osd in <ids> [<ids>...]
-
-Subcommand ``lost`` marks osd as permanently lost. THIS DESTROYS DATA IF NO
-MORE REPLICAS EXIST, BE CAREFUL.
-
-Usage::
-
- ceph osd lost <int[0-]> {--yes-i-really-mean-it}
-
-Subcommand ``ls`` shows all OSD ids.
-
-Usage::
-
- ceph osd ls {<int[0-]>}
-
-Subcommand ``lspools`` lists pools.
-
-Usage::
-
- ceph osd lspools {<int>}
-
-Subcommand ``map`` finds pg for <object> in <pool>.
-
-Usage::
-
- ceph osd map <poolname> <objectname>
-
-Subcommand ``metadata`` fetches metadata for osd <id>.
-
-Usage::
-
- ceph osd metadata {int[0-]} (default all)
-
-Subcommand ``out`` sets osd(s) <id> [<id>...] out.
-
-Usage::
-
- ceph osd out <ids> [<ids>...]
-
-Subcommand ``ok-to-stop`` checks whether the list of OSD(s) can be
-stopped without immediately making data unavailable. That is, all
-data should remain readable and writeable, although data redundancy
-may be reduced as some PGs may end up in a degraded (but active)
-state. It will return a success code if it is okay to stop the
-OSD(s), or an error code and informative message if it is not or if no
-conclusion can be drawn at the current time.
-
-Usage::
-
- ceph osd ok-to-stop <id> [<ids>...]
-
-Subcommand ``pause`` pauses osd.
-
-Usage::
-
- ceph osd pause
-
-Subcommand ``perf`` prints dump of OSD perf summary stats.
-
-Usage::
-
- ceph osd perf
-
-Subcommand ``pg-temp`` set pg_temp mapping pgid:[<id> [<id>...]] (developers
-only).
-
-Usage::
-
- ceph osd pg-temp <pgid> {<id> [<id>...]}
-
-Subcommand ``force-create-pg`` forces creation of pg <pgid>.
-
-Usage::
-
- ceph osd force-create-pg <pgid>
-
-
-Subcommand ``pool`` is used for managing data pools. It uses some additional
-subcommands.
-
-Subcommand ``create`` creates pool.
-
-Usage::
-
- ceph osd pool create <poolname> <int[0-]> {<int[0-]>} {replicated|erasure}
- {<erasure_code_profile>} {<ruleset>} {<int>}
-
-Subcommand ``delete`` deletes pool.
-
-Usage::
-
- ceph osd pool delete <poolname> {<poolname>} {--yes-i-really-really-mean-it}
-
-Subcommand ``get`` gets pool parameter <var>.
-
-Usage::
-
- ceph osd pool get <poolname> size|min_size|crash_replay_interval|pg_num|
- pgp_num|crush_ruleset|auid|write_fadvise_dontneed
-
-Only for tiered pools::
-
- ceph osd pool get <poolname> hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|
- target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_dirty_high_ratio|
- cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|
- min_read_recency_for_promote|hit_set_grade_decay_rate|hit_set_search_last_n
-
-Only for erasure coded pools::
-
- ceph osd pool get <poolname> erasure_code_profile
-
-Use ``all`` to get all pool parameters that apply to the pool's type::
-
- ceph osd pool get <poolname> all
-
-Subcommand ``get-quota`` obtains object or byte limits for pool.
-
-Usage::
-
- ceph osd pool get-quota <poolname>
-
-Subcommand ``ls`` list pools
-
-Usage::
-
- ceph osd pool ls {detail}
-
-Subcommand ``mksnap`` makes snapshot <snap> in <pool>.
-
-Usage::
-
- ceph osd pool mksnap <poolname> <snap>
-
-Subcommand ``rename`` renames <srcpool> to <destpool>.
-
-Usage::
-
- ceph osd pool rename <poolname> <poolname>
-
-Subcommand ``rmsnap`` removes snapshot <snap> from <pool>.
-
-Usage::
-
- ceph osd pool rmsnap <poolname> <snap>
-
-Subcommand ``set`` sets pool parameter <var> to <val>.
-
-Usage::
-
- ceph osd pool set <poolname> size|min_size|crash_replay_interval|pg_num|
- pgp_num|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|
- hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|debug_fake_ec_pool|
- target_max_bytes|target_max_objects|cache_target_dirty_ratio|
- cache_target_dirty_high_ratio|
- cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|auid|
- min_read_recency_for_promote|write_fadvise_dontneed|hit_set_grade_decay_rate|
- hit_set_search_last_n
- <val> {--yes-i-really-mean-it}
-
-Subcommand ``set-quota`` sets object or byte limit on pool.
-
-Usage::
-
- ceph osd pool set-quota <poolname> max_objects|max_bytes <val>
-
-Subcommand ``stats`` obtain stats from all pools, or from specified pool.
-
-Usage::
-
- ceph osd pool stats {<name>}
-
-Subcommand ``primary-affinity`` adjust osd primary-affinity from 0.0 <=<weight>
-<= 1.0
-
-Usage::
-
- ceph osd primary-affinity <osdname (id|osd.id)> <float[0.0-1.0]>
-
-Subcommand ``primary-temp`` sets primary_temp mapping pgid:<id>|-1 (developers
-only).
-
-Usage::
-
- ceph osd primary-temp <pgid> <id>
-
-Subcommand ``repair`` initiates repair on a specified osd.
-
-Usage::
-
- ceph osd repair <who>
-
-Subcommand ``reweight`` reweights osd to 0.0 < <weight> < 1.0.
-
-Usage::
-
- osd reweight <int[0-]> <float[0.0-1.0]>
-
-Subcommand ``reweight-by-pg`` reweight OSDs by PG distribution
-[overload-percentage-for-consideration, default 120].
-
-Usage::
-
- ceph osd reweight-by-pg {<int[100-]>} {<poolname> [<poolname...]}
- {--no-increasing}
-
-Subcommand ``reweight-by-utilization`` reweight OSDs by utilization
-[overload-percentage-for-consideration, default 120].
-
-Usage::
-
- ceph osd reweight-by-utilization {<int[100-]>}
- {--no-increasing}
-
-Subcommand ``rm`` removes osd(s) <id> [<id>...] from the OSD map.
-
-
-Usage::
-
- ceph osd rm <ids> [<ids>...]
-
-Subcommand ``destroy`` marks OSD *id* as *destroyed*, removing its cephx
-entity's keys and all of its dm-crypt and daemon-private config key
-entries.
-
-This command will not remove the OSD from crush, nor will it remove the
-OSD from the OSD map. Instead, once the command successfully completes,
-the OSD will show marked as *destroyed*.
-
-In order to mark an OSD as destroyed, the OSD must first be marked as
-**lost**.
-
-Usage::
-
- ceph osd destroy <id> {--yes-i-really-mean-it}
-
-
-Subcommand ``purge`` performs a combination of ``osd destroy``,
-``osd rm`` and ``osd crush remove``.
-
-Usage::
-
- ceph osd purge <id> {--yes-i-really-mean-it}
-
-Subcommand ``safe-to-destroy`` checks whether it is safe to remove or
-destroy an OSD without reducing overall data redundancy or durability.
-It will return a success code if it is definitely safe, or an error
-code and informative message if it is not or if no conclusion can be
-drawn at the current time.
-
-Usage::
-
- ceph osd safe-to-destroy <id> [<ids>...]
-
-Subcommand ``scrub`` initiates scrub on specified osd.
-
-Usage::
-
- ceph osd scrub <who>
-
-Subcommand ``set`` sets <key>.
-
-Usage::
-
- ceph osd set full|pause|noup|nodown|noout|noin|nobackfill|
- norebalance|norecover|noscrub|nodeep-scrub|notieragent
-
-Subcommand ``setcrushmap`` sets crush map from input file.
-
-Usage::
-
- ceph osd setcrushmap
-
-Subcommand ``setmaxosd`` sets new maximum osd value.
-
-Usage::
-
- ceph osd setmaxosd <int[0-]>
-
-Subcommand ``set-require-min-compat-client`` enforces the cluster to be backward
-compatible with the specified client version. This subcommand prevents you from
-making any changes (e.g., crush tunables, or using new features) that
-would violate the current setting. Please note, This subcommand will fail if
-any connected daemon or client is not compatible with the features offered by
-the given <version>. To see the features and releases of all clients connected
-to cluster, please see `ceph features`_.
-
-Usage::
-
- ceph osd set-require-min-compat-client <version>
-
-Subcommand ``stat`` prints summary of OSD map.
-
-Usage::
-
- ceph osd stat
-
-Subcommand ``tier`` is used for managing tiers. It uses some additional
-subcommands.
-
-Subcommand ``add`` adds the tier <tierpool> (the second one) to base pool <pool>
-(the first one).
-
-Usage::
-
- ceph osd tier add <poolname> <poolname> {--force-nonempty}
-
-Subcommand ``add-cache`` adds a cache <tierpool> (the second one) of size <size>
-to existing pool <pool> (the first one).
-
-Usage::
-
- ceph osd tier add-cache <poolname> <poolname> <int[0-]>
-
-Subcommand ``cache-mode`` specifies the caching mode for cache tier <pool>.
-
-Usage::
-
- ceph osd tier cache-mode <poolname> none|writeback|forward|readonly|
- readforward|readproxy
-
-Subcommand ``remove`` removes the tier <tierpool> (the second one) from base pool
-<pool> (the first one).
-
-Usage::
-
- ceph osd tier remove <poolname> <poolname>
-
-Subcommand ``remove-overlay`` removes the overlay pool for base pool <pool>.
-
-Usage::
-
- ceph osd tier remove-overlay <poolname>
-
-Subcommand ``set-overlay`` set the overlay pool for base pool <pool> to be
-<overlaypool>.
-
-Usage::
-
- ceph osd tier set-overlay <poolname> <poolname>
-
-Subcommand ``tree`` prints OSD tree.
-
-Usage::
-
- ceph osd tree {<int[0-]>}
-
-Subcommand ``unpause`` unpauses osd.
-
-Usage::
-
- ceph osd unpause
-
-Subcommand ``unset`` unsets <key>.
-
-Usage::
-
- ceph osd unset full|pause|noup|nodown|noout|noin|nobackfill|
- norebalance|norecover|noscrub|nodeep-scrub|notieragent
-
-
-pg
---
-
-It is used for managing the placement groups in OSDs. It uses some
-additional subcommands.
-
-Subcommand ``debug`` shows debug info about pgs.
-
-Usage::
-
- ceph pg debug unfound_objects_exist|degraded_pgs_exist
-
-Subcommand ``deep-scrub`` starts deep-scrub on <pgid>.
-
-Usage::
-
- ceph pg deep-scrub <pgid>
-
-Subcommand ``dump`` shows human-readable versions of pg map (only 'all' valid
-with plain).
-
-Usage::
-
- ceph pg dump {all|summary|sum|delta|pools|osds|pgs|pgs_brief} [{all|summary|sum|delta|pools|osds|pgs|pgs_brief...]}
-
-Subcommand ``dump_json`` shows human-readable version of pg map in json only.
-
-Usage::
-
- ceph pg dump_json {all|summary|sum|delta|pools|osds|pgs|pgs_brief} [{all|summary|sum|delta|pools|osds|pgs|pgs_brief...]}
-
-Subcommand ``dump_pools_json`` shows pg pools info in json only.
-
-Usage::
-
- ceph pg dump_pools_json
-
-Subcommand ``dump_stuck`` shows information about stuck pgs.
-
-Usage::
-
- ceph pg dump_stuck {inactive|unclean|stale|undersized|degraded [inactive|unclean|stale|undersized|degraded...]}
- {<int>}
-
-Subcommand ``getmap`` gets binary pg map to -o/stdout.
-
-Usage::
-
- ceph pg getmap
-
-Subcommand ``ls`` lists pg with specific pool, osd, state
-
-Usage::
-
- ceph pg ls {<int>} {active|clean|down|replay|splitting|
- scrubbing|scrubq|degraded|inconsistent|peering|repair|
- recovery|backfill_wait|incomplete|stale| remapped|
- deep_scrub|backfill|backfill_toofull|recovery_wait|
- undersized [active|clean|down|replay|splitting|
- scrubbing|scrubq|degraded|inconsistent|peering|repair|
- recovery|backfill_wait|incomplete|stale|remapped|
- deep_scrub|backfill|backfill_toofull|recovery_wait|
- undersized...]}
-
-Subcommand ``ls-by-osd`` lists pg on osd [osd]
-
-Usage::
-
- ceph pg ls-by-osd <osdname (id|osd.id)> {<int>}
- {active|clean|down|replay|splitting|
- scrubbing|scrubq|degraded|inconsistent|peering|repair|
- recovery|backfill_wait|incomplete|stale| remapped|
- deep_scrub|backfill|backfill_toofull|recovery_wait|
- undersized [active|clean|down|replay|splitting|
- scrubbing|scrubq|degraded|inconsistent|peering|repair|
- recovery|backfill_wait|incomplete|stale|remapped|
- deep_scrub|backfill|backfill_toofull|recovery_wait|
- undersized...]}
-
-Subcommand ``ls-by-pool`` lists pg with pool = [poolname]
-
-Usage::
-
- ceph pg ls-by-pool <poolstr> {<int>} {active|
- clean|down|replay|splitting|
- scrubbing|scrubq|degraded|inconsistent|peering|repair|
- recovery|backfill_wait|incomplete|stale| remapped|
- deep_scrub|backfill|backfill_toofull|recovery_wait|
- undersized [active|clean|down|replay|splitting|
- scrubbing|scrubq|degraded|inconsistent|peering|repair|
- recovery|backfill_wait|incomplete|stale|remapped|
- deep_scrub|backfill|backfill_toofull|recovery_wait|
- undersized...]}
-
-Subcommand ``ls-by-primary`` lists pg with primary = [osd]
-
-Usage::
-
- ceph pg ls-by-primary <osdname (id|osd.id)> {<int>}
- {active|clean|down|replay|splitting|
- scrubbing|scrubq|degraded|inconsistent|peering|repair|
- recovery|backfill_wait|incomplete|stale| remapped|
- deep_scrub|backfill|backfill_toofull|recovery_wait|
- undersized [active|clean|down|replay|splitting|
- scrubbing|scrubq|degraded|inconsistent|peering|repair|
- recovery|backfill_wait|incomplete|stale|remapped|
- deep_scrub|backfill|backfill_toofull|recovery_wait|
- undersized...]}
-
-Subcommand ``map`` shows mapping of pg to osds.
-
-Usage::
-
- ceph pg map <pgid>
-
-Subcommand ``repair`` starts repair on <pgid>.
-
-Usage::
-
- ceph pg repair <pgid>
-
-Subcommand ``scrub`` starts scrub on <pgid>.
-
-Usage::
-
- ceph pg scrub <pgid>
-
-Subcommand ``set_full_ratio`` sets ratio at which pgs are considered full.
-
-Usage::
-
- ceph pg set_full_ratio <float[0.0-1.0]>
-
-Subcommand ``set_backfillfull_ratio`` sets ratio at which pgs are considered too full to backfill.
-
-Usage::
-
- ceph pg set_backfillfull_ratio <float[0.0-1.0]>
-
-Subcommand ``set_nearfull_ratio`` sets ratio at which pgs are considered nearly
-full.
-
-Usage::
-
- ceph pg set_nearfull_ratio <float[0.0-1.0]>
-
-Subcommand ``stat`` shows placement group status.
-
-Usage::
-
- ceph pg stat
-
-
-quorum
-------
-
-Cause MON to enter or exit quorum.
-
-Usage::
-
- ceph quorum enter|exit
-
-Note: this only works on the MON to which the ``ceph`` command is connected.
-If you want a specific MON to enter or exit quorum, use this syntax::
-
- ceph tell mon.<id> quorum enter|exit
-
-quorum_status
--------------
-
-Reports status of monitor quorum.
-
-Usage::
-
- ceph quorum_status
-
-
-report
-------
-
-Reports full status of cluster, optional title tag strings.
-
-Usage::
-
- ceph report {<tags> [<tags>...]}
-
-
-scrub
------
-
-Scrubs the monitor stores.
-
-Usage::
-
- ceph scrub
-
-
-status
-------
-
-Shows cluster status.
-
-Usage::
-
- ceph status
-
-
-sync force
-----------
-
-Forces sync of and clear monitor store.
-
-Usage::
-
- ceph sync force {--yes-i-really-mean-it} {--i-know-what-i-am-doing}
-
-
-tell
-----
-
-Sends a command to a specific daemon.
-
-Usage::
-
- ceph tell <name (type.id)> <args> [<args>...]
-
-
-List all available commands.
-
-Usage::
-
- ceph tell <name (type.id)> help
-
-version
--------
-
-Show mon daemon version
-
-Usage::
-
- ceph version
-
-Options
-=======
-
-.. option:: -i infile
-
- will specify an input file to be passed along as a payload with the
- command to the monitor cluster. This is only used for specific
- monitor commands.
-
-.. option:: -o outfile
-
- will write any payload returned by the monitor cluster with its
- reply to outfile. Only specific monitor commands (e.g. osd getmap)
- return a payload.
-
-.. option:: -c ceph.conf, --conf=ceph.conf
-
- Use ceph.conf configuration file instead of the default
- ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup.
-
-.. option:: --id CLIENT_ID, --user CLIENT_ID
-
- Client id for authentication.
-
-.. option:: --name CLIENT_NAME, -n CLIENT_NAME
-
- Client name for authentication.
-
-.. option:: --cluster CLUSTER
-
- Name of the Ceph cluster.
-
-.. option:: --admin-daemon ADMIN_SOCKET, daemon DAEMON_NAME
-
- Submit admin-socket commands via admin sockets in /var/run/ceph.
-
-.. option:: --admin-socket ADMIN_SOCKET_NOPE
-
- You probably mean --admin-daemon
-
-.. option:: -s, --status
-
- Show cluster status.
-
-.. option:: -w, --watch
-
- Watch live cluster changes.
-
-.. option:: --watch-debug
-
- Watch debug events.
-
-.. option:: --watch-info
-
- Watch info events.
-
-.. option:: --watch-sec
-
- Watch security events.
-
-.. option:: --watch-warn
-
- Watch warning events.
-
-.. option:: --watch-error
-
- Watch error events.
-
-.. option:: --version, -v
-
- Display version.
-
-.. option:: --verbose
-
- Make verbose.
-
-.. option:: --concise
-
- Make less verbose.
-
-.. option:: -f {json,json-pretty,xml,xml-pretty,plain}, --format
-
- Format of output.
-
-.. option:: --connect-timeout CLUSTER_TIMEOUT
-
- Set a timeout for connecting to the cluster.
-
-.. option:: --no-increasing
-
- ``--no-increasing`` is off by default. So increasing the osd weight is allowed
- using the ``reweight-by-utilization`` or ``test-reweight-by-utilization`` commands.
- If this option is used with these commands, it will help not to increase osd weight
- even the osd is under utilized.
-
-
-Availability
-============
-
-:program:`ceph` is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-
-See also
-========
-
-:doc:`ceph-mon <ceph-mon>`\(8),
-:doc:`ceph-osd <ceph-osd>`\(8),
-:doc:`ceph-mds <ceph-mds>`\(8)
diff --git a/src/ceph/doc/man/8/crushtool.rst b/src/ceph/doc/man/8/crushtool.rst
deleted file mode 100644
index c7b88f8..0000000
--- a/src/ceph/doc/man/8/crushtool.rst
+++ /dev/null
@@ -1,284 +0,0 @@
-:orphan:
-
-==========================================
- crushtool -- CRUSH map manipulation tool
-==========================================
-
-.. program:: crushtool
-
-Synopsis
-========
-
-| **crushtool** ( -d *map* | -c *map.txt* | --build --num_osds *numosds*
- *layer1* *...* | --test ) [ -o *outfile* ]
-
-
-Description
-===========
-
-**crushtool** is a utility that lets you create, compile, decompile
-and test CRUSH map files.
-
-CRUSH is a pseudo-random data distribution algorithm that efficiently
-maps input values (which, in the context of Ceph, correspond to Placement
-Groups) across a heterogeneous, hierarchically structured device map.
-The algorithm was originally described in detail in the following paper
-(although it has evolved some since then)::
-
- http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
-
-The tool has four modes of operation.
-
-.. option:: --compile|-c map.txt
-
- will compile a plaintext map.txt into a binary map file.
-
-.. option:: --decompile|-d map
-
- will take the compiled map and decompile it into a plaintext source
- file, suitable for editing.
-
-.. option:: --build --num_osds {num-osds} layer1 ...
-
- will create map with the given layer structure. See below for a
- detailed explanation.
-
-.. option:: --test
-
- will perform a dry run of a CRUSH mapping for a range of input
- values ``[--min-x,--max-x]`` (default ``[0,1023]``) which can be
- thought of as simulated Placement Groups. See below for a more
- detailed explanation.
-
-Unlike other Ceph tools, **crushtool** does not accept generic options
-such as **--debug-crush** from the command line. They can, however, be
-provided via the CEPH_ARGS environment variable. For instance, to
-silence all output from the CRUSH subsystem::
-
- CEPH_ARGS="--debug-crush 0" crushtool ...
-
-
-Running tests with --test
-=========================
-
-The test mode will use the input crush map ( as specified with **-i
-map** ) and perform a dry run of CRUSH mapping or random placement
-(if **--simulate** is set ). On completion, two kinds of reports can be
-created.
-1) The **--show-...** option outputs human readable information
-on stderr.
-2) The **--output-csv** option creates CSV files that are
-documented by the **--help-output** option.
-
-Note: Each Placement Group (PG) has an integer ID which can be obtained
-from ``ceph pg dump`` (for example PG 2.2f means pool id 2, PG id 32).
-The pool and PG IDs are combined by a function to get a value which is
-given to CRUSH to map it to OSDs. crushtool does not know about PGs or
-pools; it only runs simulations by mapping values in the range
-``[--min-x,--max-x]``.
-
-
-.. option:: --show-statistics
-
- Displays a summary of the distribution. For instance::
-
- rule 1 (metadata) num_rep 5 result size == 5: 1024/1024
-
- shows that rule **1** which is named **metadata** successfully
- mapped **1024** values to **result size == 5** devices when trying
- to map them to **num_rep 5** replicas. When it fails to provide the
- required mapping, presumably because the number of **tries** must
- be increased, a breakdown of the failures is displayed. For instance::
-
- rule 1 (metadata) num_rep 10 result size == 8: 4/1024
- rule 1 (metadata) num_rep 10 result size == 9: 93/1024
- rule 1 (metadata) num_rep 10 result size == 10: 927/1024
-
- shows that although **num_rep 10** replicas were required, **4**
- out of **1024** values ( **4/1024** ) were mapped to **result size
- == 8** devices only.
-
-.. option:: --show-mappings
-
- Displays the mapping of each value in the range ``[--min-x,--max-x]``.
- For instance::
-
- CRUSH rule 1 x 24 [11,6]
-
- shows that value **24** is mapped to devices **[11,6]** by rule
- **1**.
-
-.. option:: --show-bad-mappings
-
- Displays which value failed to be mapped to the required number of
- devices. For instance::
-
- bad mapping rule 1 x 781 num_rep 7 result [8,10,2,11,6,9]
-
- shows that when rule **1** was required to map **7** devices, it
- could map only six : **[8,10,2,11,6,9]**.
-
-.. option:: --show-utilization
-
- Displays the expected and actual utilisation for each device, for
- each number of replicas. For instance::
-
- device 0: stored : 951 expected : 853.333
- device 1: stored : 963 expected : 853.333
- ...
-
- shows that device **0** stored **951** values and was expected to store **853**.
- Implies **--show-statistics**.
-
-.. option:: --show-utilization-all
-
- Displays the same as **--show-utilization** but does not suppress
- output when the weight of a device is zero.
- Implies **--show-statistics**.
-
-.. option:: --show-choose-tries
-
- Displays how many attempts were needed to find a device mapping.
- For instance::
-
- 0: 95224
- 1: 3745
- 2: 2225
- ..
-
- shows that **95224** mappings succeeded without retries, **3745**
- mappings succeeded with one attempts, etc. There are as many rows
- as the value of the **--set-choose-total-tries** option.
-
-.. option:: --output-csv
-
- Creates CSV files (in the current directory) containing information
- documented by **--help-output**. The files are named after the rule
- used when collecting the statistics. For instance, if the rule
- : 'metadata' is used, the CSV files will be::
-
- metadata-absolute_weights.csv
- metadata-device_utilization.csv
- ...
-
- The first line of the file shortly explains the column layout. For
- instance::
-
- metadata-absolute_weights.csv
- Device ID, Absolute Weight
- 0,1
- ...
-
-.. option:: --output-name NAME
-
- Prepend **NAME** to the file names generated when **--output-csv**
- is specified. For instance **--output-name FOO** will create
- files::
-
- FOO-metadata-absolute_weights.csv
- FOO-metadata-device_utilization.csv
- ...
-
-The **--set-...** options can be used to modify the tunables of the
-input crush map. The input crush map is modified in
-memory. For example::
-
- $ crushtool -i mymap --test --show-bad-mappings
- bad mapping rule 1 x 781 num_rep 7 result [8,10,2,11,6,9]
-
-could be fixed by increasing the **choose-total-tries** as follows:
-
- $ crushtool -i mymap --test \
- --show-bad-mappings \
- --set-choose-total-tries 500
-
-Building a map with --build
-===========================
-
-The build mode will generate hierarchical maps. The first argument
-specifies the number of devices (leaves) in the CRUSH hierarchy. Each
-layer describes how the layer (or devices) preceding it should be
-grouped.
-
-Each layer consists of::
-
- bucket ( uniform | list | tree | straw ) size
-
-The **bucket** is the type of the buckets in the layer
-(e.g. "rack"). Each bucket name will be built by appending a unique
-number to the **bucket** string (e.g. "rack0", "rack1"...).
-
-The second component is the type of bucket: **straw** should be used
-most of the time.
-
-The third component is the maximum size of the bucket. A size of zero
-means a bucket of infinite capacity.
-
-
-Example
-=======
-
-Suppose we have two rows with two racks each and 20 nodes per rack. Suppose
-each node contains 4 storage devices for Ceph OSD Daemons. This configuration
-allows us to deploy 320 Ceph OSD Daemons. Lets assume a 42U rack with 2U nodes,
-leaving an extra 2U for a rack switch.
-
-To reflect our hierarchy of devices, nodes, racks and rows, we would execute
-the following::
-
- $ crushtool -o crushmap --build --num_osds 320 \
- node straw 4 \
- rack straw 20 \
- row straw 2 \
- root straw 0
- # id weight type name reweight
- -87 320 root root
- -85 160 row row0
- -81 80 rack rack0
- -1 4 node node0
- 0 1 osd.0 1
- 1 1 osd.1 1
- 2 1 osd.2 1
- 3 1 osd.3 1
- -2 4 node node1
- 4 1 osd.4 1
- 5 1 osd.5 1
- ...
-
-CRUSH rulesets are created so the generated crushmap can be
-tested. They are the same rulesets as the one created by default when
-creating a new Ceph cluster. They can be further edited with::
-
- # decompile
- crushtool -d crushmap -o map.txt
-
- # edit
- emacs map.txt
-
- # recompile
- crushtool -c map.txt -o crushmap
-
-Example output from --test
-==========================
-
-See https://github.com/ceph/ceph/blob/master/src/test/cli/crushtool/set-choose.t
-for sample ``crushtool --test`` commands and output produced thereby.
-
-Availability
-============
-
-**crushtool** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
-refer to the Ceph documentation at http://ceph.com/docs for more
-information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8),
-:doc:`osdmaptool <osdmaptool>`\(8),
-
-Authors
-=======
-
-John Wilkins, Sage Weil, Loic Dachary
diff --git a/src/ceph/doc/man/8/librados-config.rst b/src/ceph/doc/man/8/librados-config.rst
deleted file mode 100644
index 940e8c2..0000000
--- a/src/ceph/doc/man/8/librados-config.rst
+++ /dev/null
@@ -1,46 +0,0 @@
-:orphan:
-
-=======================================================
- librados-config -- display information about librados
-=======================================================
-
-.. program:: librados-config
-
-Synopsis
-========
-
-| **librados-config** [ --version ] [ --vernum ]
-
-
-Description
-===========
-
-**librados-config** is a utility that displays information about the
- installed ``librados``.
-
-
-Options
-=======
-
-.. option:: --version
-
- Display ``librados`` version
-
-.. option:: --vernum
-
- Display the ``librados`` version code
-
-
-Availability
-============
-
-**librados-config** is part of Ceph, a massively scalable, open-source, distributed storage system.
-Please refer to the Ceph documentation at http://ceph.com/docs for
-more information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8),
-:doc:`rados <rados>`\(8)
diff --git a/src/ceph/doc/man/8/monmaptool.rst b/src/ceph/doc/man/8/monmaptool.rst
deleted file mode 100644
index bed0c94..0000000
--- a/src/ceph/doc/man/8/monmaptool.rst
+++ /dev/null
@@ -1,107 +0,0 @@
-:orphan:
-
-==========================================================
- monmaptool -- ceph monitor cluster map manipulation tool
-==========================================================
-
-.. program:: monmaptool
-
-Synopsis
-========
-
-| **monmaptool** *mapfilename* [ --clobber ] [ --print ] [ --create ]
- [ --add *ip*:*port* *...* ] [ --rm *ip*:*port* *...* ]
-
-
-Description
-===========
-
-**monmaptool** is a utility to create, view, and modify a monitor
-cluster map for the Ceph distributed storage system. The monitor map
-specifies the only fixed addresses in the Ceph distributed system.
-All other daemons bind to arbitrary addresses and register themselves
-with the monitors.
-
-When creating a map with --create, a new monitor map with a new,
-random UUID will be created. It should be followed by one or more
-monitor addresses.
-
-The default Ceph monitor port is 6789.
-
-
-Options
-=======
-
-.. option:: --print
-
- will print a plaintext dump of the map, after any modifications are
- made.
-
-.. option:: --clobber
-
- will allow monmaptool to overwrite mapfilename if changes are made.
-
-.. option:: --create
-
- will create a new monitor map with a new UUID (and with it, a new,
- empty Ceph file system).
-
-.. option:: --generate
-
- generate a new monmap based on the values on the command line or specified
- in the ceph configuration. This is, in order of preference,
-
- #. ``--monmap filename`` to specify a monmap to load
- #. ``--mon-host 'host1,ip2'`` to specify a list of hosts or ip addresses
- #. ``[mon.foo]`` sections containing ``mon addr`` settings in the config
-
-.. option:: --filter-initial-members
-
- filter the initial monmap by applying the ``mon initial members``
- setting. Monitors not present in that list will be removed, and
- initial members not present in the map will be added with dummy
- addresses.
-
-.. option:: --add name ip:port
-
- will add a monitor with the specified ip:port to the map.
-
-.. option:: --rm name
-
- will remove the monitor with the specified ip:port from the map.
-
-.. option:: --fsid uuid
-
- will set the fsid to the given uuid. If not specified with --create, a random fsid will be generated.
-
-
-Example
-=======
-
-To create a new map with three monitors (for a fresh Ceph file system)::
-
- monmaptool --create --add mon.a 192.168.0.10:6789 --add mon.b 192.168.0.11:6789 \
- --add mon.c 192.168.0.12:6789 --clobber monmap
-
-To display the contents of the map::
-
- monmaptool --print monmap
-
-To replace one monitor::
-
- monmaptool --rm mon.a --add mon.a 192.168.0.9:6789 --clobber monmap
-
-
-Availability
-============
-
-**monmaptool** is part of Ceph, a massively scalable, open-source, distributed
-storage system. Please refer to the Ceph documentation at http://ceph.com/docs
-for more information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8),
-:doc:`crushtool <crushtool>`\(8),
diff --git a/src/ceph/doc/man/8/mount.ceph.rst b/src/ceph/doc/man/8/mount.ceph.rst
deleted file mode 100644
index 56900a9..0000000
--- a/src/ceph/doc/man/8/mount.ceph.rst
+++ /dev/null
@@ -1,168 +0,0 @@
-:orphan:
-
-========================================
- mount.ceph -- mount a ceph file system
-========================================
-
-.. program:: mount.ceph
-
-Synopsis
-========
-
-| **mount.ceph** *monaddr1*\ [,\ *monaddr2*\ ,...]:/[*subdir*] *dir* [
- -o *options* ]
-
-
-Description
-===========
-
-**mount.ceph** is a simple helper for mounting the Ceph file system on
-a Linux host. It serves to resolve monitor hostname(s) into IP
-addresses and read authentication keys from disk; the Linux kernel
-client component does most of the real work. In fact, it is possible
-to mount a non-authenticated Ceph file system without mount.ceph by
-specifying monitor address(es) by IP::
-
- mount -t ceph 1.2.3.4:/ mountpoint
-
-Each monitor address monaddr takes the form host[:port]. If the port
-is not specified, the Ceph default of 6789 is assumed.
-
-Multiple monitor addresses can be separated by commas. Only one
-responsible monitor is needed to successfully mount; the client will
-learn about all monitors from any responsive monitor. However, it is a
-good idea to specify more than one in case one happens to be down at
-the time of mount.
-
-A subdirectory subdir may be specified if a subset of the file system
-is to be mounted.
-
-Mount helper application conventions dictate that the first two
-options are device to be mounted and destination path. Options must be
-passed only after these fixed arguments.
-
-
-Options
-=======
-
-:command:`wsize`
- int (bytes), max write size. Default: none (writeback uses smaller of wsize
- and stripe unit)
-
-:command:`rsize`
- int (bytes), max read size. Default: none
-
-:command:`rasize`
- int (bytes), max readahead, multiple of 1024, Default: 8388608
- (8192*1024)
-
-:command:`osdtimeout`
- int (seconds), Default: 60
-
-:command:`osdkeepalive`
- int, Default: 5
-
-:command:`mount_timeout`
- int (seconds), Default: 60
-
-:command:`osd_idle_ttl`
- int (seconds), Default: 60
-
-:command:`caps_wanted_delay_min`
- int, cap release delay, Default: 5
-
-:command:`caps_wanted_delay_max`
- int, cap release delay, Default: 60
-
-:command:`cap_release_safety`
- int, Default: calculated
-
-:command:`readdir_max_entries`
- int, Default: 1024
-
-:command:`readdir_max_bytes`
- int, Default: 524288 (512*1024)
-
-:command:`write_congestion_kb`
- int (kb), max writeback in flight. scale with available
- memory. Default: calculated from available memory
-
-:command:`snapdirname`
- string, set the name of the hidden snapdir. Default: .snap
-
-:command:`name`
- RADOS user to authenticate as when using cephx. Default: guest
-
-:command:`secret`
- secret key for use with cephx. This option is insecure because it exposes
- the secret on the command line. To avoid this, use the secretfile option.
-
-:command:`secretfile`
- path to file containing the secret key to use with cephx
-
-:command:`ip`
- my ip
-
-:command:`noshare`
- create a new client instance, instead of sharing an existing
- instance of a client mounting the same cluster
-
-:command:`dirstat`
- funky `cat dirname` for stats, Default: off
-
-:command:`nodirstat`
- no funky `cat dirname` for stats
-
-:command:`rbytes`
- Report the recursive size of the directory contents for st_size on
- directories. Default: on
-
-:command:`norbytes`
- Do not report the recursive size of the directory contents for
- st_size on directories.
-
-:command:`nocrc`
- no data crc on writes
-
-:command:`noasyncreaddir`
- no dcache readdir
-
-
-Examples
-========
-
-Mount the full file system::
-
- mount.ceph monhost:/ /mnt/foo
-
-If there are multiple monitors::
-
- mount.ceph monhost1,monhost2,monhost3:/ /mnt/foo
-
-If :doc:`ceph-mon <ceph-mon>`\(8) is running on a non-standard
-port::
-
- mount.ceph monhost1:7000,monhost2:7000,monhost3:7000:/ /mnt/foo
-
-To mount only part of the namespace::
-
- mount.ceph monhost1:/some/small/thing /mnt/thing
-
-Assuming mount.ceph(8) is installed properly, it should be
-automatically invoked by mount(8) like so::
-
- mount -t ceph monhost:/ /mnt/foo
-
-
-Availability
-============
-
-**mount.ceph** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
-refer to the Ceph documentation at http://ceph.com/docs for more
-information.
-
-See also
-========
-
-:doc:`ceph-fuse <ceph-fuse>`\(8),
-:doc:`ceph <ceph>`\(8)
diff --git a/src/ceph/doc/man/8/osdmaptool.rst b/src/ceph/doc/man/8/osdmaptool.rst
deleted file mode 100644
index f58d29c..0000000
--- a/src/ceph/doc/man/8/osdmaptool.rst
+++ /dev/null
@@ -1,157 +0,0 @@
-:orphan:
-
-======================================================
- osdmaptool -- ceph osd cluster map manipulation tool
-======================================================
-
-.. program:: osdmaptool
-
-Synopsis
-========
-
-| **osdmaptool** *mapfilename* [--print] [--createsimple *numosd*
- [--pgbits *bitsperosd* ] ] [--clobber]
-
-
-Description
-===========
-
-**osdmaptool** is a utility that lets you create, view, and manipulate
-OSD cluster maps from the Ceph distributed storage system. Notably, it
-lets you extract the embedded CRUSH map or import a new CRUSH map.
-
-
-Options
-=======
-
-.. option:: --print
-
- will simply make the tool print a plaintext dump of the map, after
- any modifications are made.
-
-.. option:: --clobber
-
- will allow osdmaptool to overwrite mapfilename if changes are made.
-
-.. option:: --import-crush mapfile
-
- will load the CRUSH map from mapfile and embed it in the OSD map.
-
-.. option:: --export-crush mapfile
-
- will extract the CRUSH map from the OSD map and write it to
- mapfile.
-
-.. option:: --createsimple numosd [--pgbits bitsperosd]
-
- will create a relatively generic OSD map with the numosd devices.
- If --pgbits is specified, the initial placement group counts will
- be set with bitsperosd bits per OSD. That is, the pg_num map
- attribute will be set to numosd shifted by bitsperosd.
-
-.. option:: --test-map-pgs [--pool poolid]
-
- will print out the mappings from placement groups to OSDs.
-
-.. option:: --test-map-pgs-dump [--pool poolid]
-
- will print out the summary of all placement groups and the mappings
- from them to the mapped OSDs.
-
-
-Example
-=======
-
-To create a simple map with 16 devices::
-
- osdmaptool --createsimple 16 osdmap --clobber
-
-To view the result::
-
- osdmaptool --print osdmap
-
-To view the mappings of placement groups for pool 0::
-
- osdmaptool --test-map-pgs-dump rbd --pool 0
-
- pool 0 pg_num 8
- 0.0 [0,2,1] 0
- 0.1 [2,0,1] 2
- 0.2 [0,1,2] 0
- 0.3 [2,0,1] 2
- 0.4 [0,2,1] 0
- 0.5 [0,2,1] 0
- 0.6 [0,1,2] 0
- 0.7 [1,0,2] 1
- #osd count first primary c wt wt
- osd.0 8 5 5 1 1
- osd.1 8 1 1 1 1
- osd.2 8 2 2 1 1
- in 3
- avg 8 stddev 0 (0x) (expected 2.3094 0.288675x))
- min osd.0 8
- max osd.0 8
- size 0 0
- size 1 0
- size 2 0
- size 3 8
-
-In which,
- #. pool 0 has 8 placement groups. And two tables follow:
- #. A table for placement groups. Each row presents a placement group. With columns of:
-
- * placement group id,
- * acting set, and
- * primary OSD.
- #. A table for all OSDs. Each row presents an OSD. With columns of:
-
- * count of placement groups being mapped to this OSD,
- * count of placement groups where this OSD is the first one in their acting sets,
- * count of placement groups where this OSD is the primary of them,
- * the CRUSH weight of this OSD, and
- * the weight of this OSD.
- #. Looking at the number of placement groups held by 3 OSDs. We have
-
- * avarge, stddev, stddev/average, expected stddev, expected stddev / average
- * min and max
- #. The number of placement groups mapping to n OSDs. In this case, all 8 placement
- groups are mapping to 3 different OSDs.
-
-In a less-balanced cluster, we could have following output for the statistics of
-placement group distribution, whose standard deviation is 1.41421::
-
- #osd count first primary c wt wt
- osd.0 8 5 5 1 1
- osd.1 8 1 1 1 1
- osd.2 8 2 2 1 1
-
- #osd count first primary c wt wt
- osd.0 33 9 9 0.0145874 1
- osd.1 34 14 14 0.0145874 1
- osd.2 31 7 7 0.0145874 1
- osd.3 31 13 13 0.0145874 1
- osd.4 30 14 14 0.0145874 1
- osd.5 33 7 7 0.0145874 1
- in 6
- avg 32 stddev 1.41421 (0.0441942x) (expected 5.16398 0.161374x))
- min osd.4 30
- max osd.1 34
- size 00
- size 10
- size 20
- size 364
-
-
-Availability
-============
-
-**osdmaptool** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
-refer to the Ceph documentation at http://ceph.com/docs for more
-information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8),
-:doc:`crushtool <crushtool>`\(8),
diff --git a/src/ceph/doc/man/8/rados.rst b/src/ceph/doc/man/8/rados.rst
deleted file mode 100644
index 9490105..0000000
--- a/src/ceph/doc/man/8/rados.rst
+++ /dev/null
@@ -1,223 +0,0 @@
-:orphan:
-
-=======================================
- rados -- rados object storage utility
-=======================================
-
-.. program:: rados
-
-Synopsis
-========
-
-| **rados** [ -m *monaddr* ] [ mkpool | rmpool *foo* ] [ -p | --pool
- *pool* ] [ -s | --snap *snap* ] [ -i *infile* ] [ -o *outfile* ]
- *command* ...
-
-
-Description
-===========
-
-**rados** is a utility for interacting with a Ceph object storage
-cluster (RADOS), part of the Ceph distributed storage system.
-
-
-Options
-=======
-
-.. option:: -p pool, --pool pool
-
- Interact with the given pool. Required by most commands.
-
-.. option:: -s snap, --snap snap
-
- Read from the given pool snapshot. Valid for all pool-specific read operations.
-
-.. option:: -i infile
-
- will specify an input file to be passed along as a payload with the
- command to the monitor cluster. This is only used for specific
- monitor commands.
-
-.. option:: -o outfile
-
- will write any payload returned by the monitor cluster with its
- reply to outfile. Only specific monitor commands (e.g. osd getmap)
- return a payload.
-
-.. option:: -c ceph.conf, --conf=ceph.conf
-
- Use ceph.conf configuration file instead of the default
- /etc/ceph/ceph.conf to determine monitor addresses during startup.
-
-.. option:: -m monaddress[:port]
-
- Connect to specified monitor (instead of looking through ceph.conf).
-
-.. option:: -b block_size
-
- Set the block size for put/get/append ops and for write benchmarking.
-
-.. option:: --striper
-
- Uses the striping API of rados rather than the default one.
- Available for stat, get, put, append, truncate, rm, ls and all xattr related operation
-
-
-Global commands
-===============
-
-:command:`lspools`
- List object pools
-
-:command:`df`
- Show utilization statistics, including disk usage (bytes) and object
- counts, over the entire system and broken down by pool.
-
-:command:`mkpool` *foo*
- Create a pool with name foo.
-
-:command:`rmpool` *foo* [ *foo* --yes-i-really-really-mean-it ]
- Delete the pool foo (and all its data).
-
-:command:`list-inconsistent-pg` *pool*
- List inconsistent PGs in given pool.
-
-:command:`list-inconsistent-obj` *pgid*
- List inconsistent objects in given PG.
-
-:command:`list-inconsistent-snapset` *pgid*
- List inconsistent snapsets in given PG.
-
-Pool specific commands
-======================
-
-:command:`get` *name* *outfile*
- Read object name from the cluster and write it to outfile.
-
-:command:`put` *name* *infile* [--offset offset]
- Write object name with start offset (default:0) to the cluster with contents from infile.
-
-:command:`append` *name* *infile*
- Append object name to the cluster with contents from infile.
-
-:command:`rm` *name*
- Remove object name.
-
-:command:`listwatchers` *name*
- List the watchers of object name.
-
-:command:`ls` *outfile*
- List objects in given pool and write to outfile.
-
-:command:`lssnap`
- List snapshots for given pool.
-
-:command:`clonedata` *srcname* *dstname* --object-locator *key*
- Clone object byte data from *srcname* to *dstname*. Both objects must be stored with the locator key *key* (usually either *srcname* or *dstname*). Object attributes and omap keys are not copied or cloned.
-
-:command:`mksnap` *foo*
- Create pool snapshot named *foo*.
-
-:command:`rmsnap` *foo*
- Remove pool snapshot named *foo*.
-
-:command:`bench` *seconds* *mode* [ -b *objsize* ] [ -t *threads* ]
- Benchmark for *seconds*. The mode can be *write*, *seq*, or
- *rand*. *seq* and *rand* are read benchmarks, either
- sequential or random. Before running one of the reading benchmarks,
- run a write benchmark with the *--no-cleanup* option. The default
- object size is 4 MB, and the default number of simulated threads
- (parallel writes) is 16. The *--run-name <label>* option is useful
- for benchmarking a workload test from multiple clients. The *<label>*
- is an arbitrary object name. It is "benchmark_last_metadata" by
- default, and is used as the underlying object name for "read" and
- "write" ops.
- Note: -b *objsize* option is valid only in *write* mode.
- Note: *write* and *seq* must be run on the same host otherwise the
- objects created by *write* will have names that will fail *seq*.
-
-:command:`cleanup` [ --run-name *run_name* ] [ --prefix *prefix* ]
- Clean up a previous benchmark operation.
- Note: the default run-name is "benchmark_last_metadata"
-
-:command:`listxattr` *name*
- List all extended attributes of an object.
-
-:command:`getxattr` *name* *attr*
- Dump the extended attribute value of *attr* of an object.
-
-:command:`setxattr` *name* *attr* *value*
- Set the value of *attr* in the extended attributes of an object.
-
-:command:`rmxattr` *name* *attr*
- Remove *attr* from the extended attributes of an object.
-
-:command:`listomapkeys` *name*
- List all the keys stored in the object map of object name.
-
-:command:`listomapvals` *name*
- List all key/value pairs stored in the object map of object name.
- The values are dumped in hexadecimal.
-
-:command:`getomapval` [ --omap-key-file *file* ] *name* *key* [ *out-file* ]
- Dump the hexadecimal value of key in the object map of object name.
- If the optional *out-file* argument is not provided, the value will be
- written to standard output.
-
-:command:`setomapval` [ --omap-key-file *file* ] *name* *key* [ *value* ]
- Set the value of key in the object map of object name. If the optional
- *value* argument is not provided, the value will be read from standard
- input.
-
-:command:`rmomapkey` [ --omap-key-file *file* ] *name* *key*
- Remove key from the object map of object name.
-
-:command:`getomapheader` *name*
- Dump the hexadecimal value of the object map header of object name.
-
-:command:`setomapheader` *name* *value*
- Set the value of the object map header of object name.
-
-Examples
-========
-
-To view cluster utilization::
-
- rados df
-
-To get a list object in pool foo sent to stdout::
-
- rados -p foo ls -
-
-To write an object::
-
- rados -p foo put myobject blah.txt
-
-To create a snapshot::
-
- rados -p foo mksnap mysnap
-
-To delete the object::
-
- rados -p foo rm myobject
-
-To read a previously snapshotted version of an object::
-
- rados -p foo -s mysnap get myobject blah.txt.old
-
-To list inconsistent objects in PG 0.6::
-
- rados list-inconsistent-obj 0.6 --format=json-pretty
-
-
-Availability
-============
-
-**rados** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8)
diff --git a/src/ceph/doc/man/8/radosgw-admin.rst b/src/ceph/doc/man/8/radosgw-admin.rst
deleted file mode 100644
index ab33f7e..0000000
--- a/src/ceph/doc/man/8/radosgw-admin.rst
+++ /dev/null
@@ -1,504 +0,0 @@
-:orphan:
-
-=================================================================
- radosgw-admin -- rados REST gateway user administration utility
-=================================================================
-
-.. program:: radosgw-admin
-
-Synopsis
-========
-
-| **radosgw-admin** *command* [ *options* *...* ]
-
-
-Description
-===========
-
-:program:`radosgw-admin` is a RADOS gateway user administration utility. It
-allows creating and modifying users.
-
-
-Commands
-========
-
-:program:`radosgw-admin` utility uses many commands for administration purpose
-which are as follows:
-
-:command:`user create`
- Create a new user.
-
-:command:`user modify`
- Modify a user.
-
-:command:`user info`
- Display information of a user, and any potentially available
- subusers and keys.
-
-:command:`user rm`
- Remove a user.
-
-:command:`user suspend`
- Suspend a user.
-
-:command:`user enable`
- Re-enable user after suspension.
-
-:command:`user check`
- Check user info.
-
-:command:`user stats`
- Show user stats as accounted by quota subsystem.
-
-:command:`caps add`
- Add user capabilities.
-
-:command:`caps rm`
- Remove user capabilities.
-
-:command:`subuser create`
- Create a new subuser (primarily useful for clients using the Swift API).
-
-:command:`subuser modify`
- Modify a subuser.
-
-:command:`subuser rm`
- Remove a subuser.
-
-:command:`key create`
- Create access key.
-
-:command:`key rm`
- Remove access key.
-
-:command:`bucket list`
- List all buckets.
-
-:command:`bucket link`
- Link bucket to specified user.
-
-:command:`bucket unlink`
- Unlink bucket from specified user.
-
-:command:`bucket stats`
- Returns bucket statistics.
-
-:command:`bucket rm`
- Remove a bucket.
-
-:command:`bucket check`
- Check bucket index.
-
-:command:`object rm`
- Remove an object.
-
-:command:`object unlink`
- Unlink object from bucket index.
-
-:command:`quota set`
- Set quota params.
-
-:command:`quota enable`
- Enable quota.
-
-:command:`quota disable`
- Disable quota.
-
-:command:`region get`
- Show region info.
-
-:command:`region list`
- List all regions set on this cluster.
-
-:command:`region set`
- Set region info (requires infile).
-
-:command:`region default`
- Set default region.
-
-:command:`region-map get`
- Show region-map.
-
-:command:`region-map set`
- Set region-map (requires infile).
-
-:command:`zone get`
- Show zone cluster params.
-
-:command:`zone set`
- Set zone cluster params (requires infile).
-
-:command:`zone list`
- List all zones set on this cluster.
-
-:command:`pool add`
- Add an existing pool for data placement.
-
-:command:`pool rm`
- Remove an existing pool from data placement set.
-
-:command:`pools list`
- List placement active set.
-
-:command:`policy`
- Display bucket/object policy.
-
-:command:`log list`
- List log objects.
-
-:command:`log show`
- Dump a log from specific object or (bucket + date + bucket-id).
- (NOTE: required to specify formatting of date to "YYYY-MM-DD-hh")
-
-:command:`log rm`
- Remove log object.
-
-:command:`usage show`
- Show the usage information (with optional user and date range).
-
-:command:`usage trim`
- Trim usage information (with optional user and date range).
-
-:command:`gc list`
- Dump expired garbage collection objects (specify --include-all to list all
- entries, including unexpired).
-
-:command:`gc process`
- Manually process garbage.
-
-:command:`metadata get`
- Get metadata info.
-
-:command:`metadata put`
- Put metadata info.
-
-:command:`metadata rm`
- Remove metadata info.
-
-:command:`metadata list`
- List metadata info.
-
-:command:`mdlog list`
- List metadata log.
-
-:command:`mdlog trim`
- Trim metadata log.
-
-:command:`bilog list`
- List bucket index log.
-
-:command:`bilog trim`
- Trim bucket index log (use start-marker, end-marker).
-
-:command:`datalog list`
- List data log.
-
-:command:`datalog trim`
- Trim data log.
-
-:command:`opstate list`
- List stateful operations entries (use client_id, op_id, object).
-
-:command:`opstate set`
- Set state on an entry (use client_id, op_id, object, state).
-
-:command:`opstate renew`
- Renew state on an entry (use client_id, op_id, object).
-
-:command:`opstate rm`
- Remove entry (use client_id, op_id, object).
-
-:command:`replicalog get`
- Get replica metadata log entry.
-
-:command:`replicalog delete`
- Delete replica metadata log entry.
-
-:command:`orphans find`
- Init and run search for leaked rados objects
-
-:command:`orphans finish`
- Clean up search for leaked rados objects
-
-
-Options
-=======
-
-.. option:: -c ceph.conf, --conf=ceph.conf
-
- Use ``ceph.conf`` configuration file instead of the default
- ``/etc/ceph/ceph.conf`` to determine monitor addresses during
- startup.
-
-.. option:: -m monaddress[:port]
-
- Connect to specified monitor (instead of looking through ceph.conf).
-
-.. option:: --uid=uid
-
- The radosgw user ID.
-
-.. option:: --subuser=<name>
-
- Name of the subuser.
-
-.. option:: --email=email
-
- The e-mail address of the user.
-
-.. option:: --display-name=name
-
- Configure the display name of the user.
-
-.. option:: --access-key=<key>
-
- S3 access key.
-
-.. option:: --gen-access-key
-
- Generate random access key (for S3).
-
-.. option:: --secret=secret
-
- The secret associated with a given key.
-
-.. option:: --gen-secret
-
- Generate random secret key.
-
-.. option:: --key-type=<type>
-
- key type, options are: swift, s3.
-
-.. option:: --temp-url-key[-2]=<key>
-
- Temporary url key.
-
-.. option:: --system
-
- Set the system flag on the user.
-
-.. option:: --bucket=bucket
-
- Specify the bucket name.
-
-.. option:: --object=object
-
- Specify the object name.
-
-.. option:: --date=yyyy-mm-dd
-
- The date needed for some commands.
-
-.. option:: --start-date=yyyy-mm-dd
-
- The start date needed for some commands.
-
-.. option:: --end-date=yyyy-mm-dd
-
- The end date needed for some commands.
-
-.. option:: --shard-id=<shard-id>
-
- Optional for mdlog list. Required for ``mdlog trim``,
- ``replica mdlog get/delete``, ``replica datalog get/delete``.
-
-.. option:: --auth-uid=auid
-
- The librados auid.
-
-.. option:: --purge-data
-
- Remove user data before user removal.
-
-.. option:: --purge-keys
-
- When specified, subuser removal will also purge all the subuser keys.
-
-.. option:: --purge-objects
-
- Remove all objects before bucket removal.
-
-.. option:: --metadata-key=<key>
-
- Key to retrieve metadata from with ``metadata get``.
-
-.. option:: --rgw-region=<region>
-
- Region in which radosgw is running.
-
-.. option:: --rgw-zone=<zone>
-
- Zone in which radosgw is running.
-
-.. option:: --fix
-
- Besides checking bucket index, will also fix it.
-
-.. option:: --check-objects
-
- bucket check: Rebuilds bucket index according to actual objects state.
-
-.. option:: --format=<format>
-
- Specify output format for certain operations: xml, json.
-
-.. option:: --sync-stats
-
- Option to 'user stats', update user stats with current stats reported by
- user's buckets indexes.
-
-.. option:: --show-log-entries=<flag>
-
- Enable/disable dump of log entries on log show.
-
-.. option:: --show-log-sum=<flag>
-
- Enable/disable dump of log summation on log show.
-
-.. option:: --skip-zero-entries
-
- Log show only dumps entries that don't have zero value in one of the numeric
- field.
-
-.. option:: --infile
-
- Specify a file to read in when setting data.
-
-.. option:: --state=<state string>
-
- Specify a state for the opstate set command.
-
-.. option:: --replica-log-type
-
- Replica log type (metadata, data, bucket), required for replica log
- operations.
-
-.. option:: --categories=<list>
-
- Comma separated list of categories, used in usage show.
-
-.. option:: --caps=<caps>
-
- List of caps (e.g., "usage=read, write; user=read".
-
-.. option:: --compression=<compression-algorithm>
-
- Placement target compression algorithm (lz4|snappy|zlib|zstd)
-
-.. option:: --yes-i-really-mean-it
-
- Required for certain operations.
-
-
-Quota Options
-=============
-
-.. option:: --max-objects
-
- Specify max objects (negative value to disable).
-
-.. option:: --max-size
-
- Specify max size (in bytes, negative value to disable).
-
-.. option:: --quota-scope
-
- Scope of quota (bucket, user).
-
-
-Orphans Search Options
-======================
-
-.. option:: --pool
-
- Data pool to scan for leaked rados objects
-
-.. option:: --num-shards
-
- Number of shards to use for keeping the temporary scan info
-
-.. option:: --orphan-stale-secs
-
- Number of seconds to wait before declaring an object to be an orphan.
- Default is 86400 (24 hours).
-
-.. option:: --job-id
-
- Set the job id (for orphans find)
-
-.. option:: --max-concurrent-ios
-
- Maximum concurrent ios for orphans find.
- Default is 32.
-
-
-Examples
-========
-
-Generate a new user::
-
- $ radosgw-admin user create --display-name="johnny rotten" --uid=johnny
- { "user_id": "johnny",
- "rados_uid": 0,
- "display_name": "johnny rotten",
- "email": "",
- "suspended": 0,
- "subusers": [],
- "keys": [
- { "user": "johnny",
- "access_key": "TCICW53D9BQ2VGC46I44",
- "secret_key": "tfm9aHMI8X76L3UdgE+ZQaJag1vJQmE6HDb5Lbrz"}],
- "swift_keys": []}
-
-Remove a user::
-
- $ radosgw-admin user rm --uid=johnny
-
-Remove a user and all associated buckets with their contents::
-
- $ radosgw-admin user rm --uid=johnny --purge-data
-
-Remove a bucket::
-
- $ radosgw-admin bucket rm --bucket=foo
-
-Link bucket to specified user::
-
- $ radosgw-admin bucket link --bucket=foo --bucket_id=<bucket id> --uid=johnny
-
-Unlink bucket from specified user::
-
- $ radosgw-admin bucket unlink --bucket=foo --uid=johnny
-
-Show the logs of a bucket from April 1st, 2012::
-
- $ radosgw-admin log show --bucket=foo --date=2012-04-01-01 --bucket-id=default.14193.1
-
-Show usage information for user from March 1st to (but not including) April 1st, 2012::
-
- $ radosgw-admin usage show --uid=johnny \
- --start-date=2012-03-01 --end-date=2012-04-01
-
-Show only summary of usage information for all users::
-
- $ radosgw-admin usage show --show-log-entries=false
-
-Trim usage information for user until March 1st, 2012::
-
- $ radosgw-admin usage trim --uid=johnny --end-date=2012-04-01
-
-
-Availability
-============
-
-:program:`radosgw-admin` is part of Ceph, a massively scalable, open-source,
-distributed storage system. Please refer to the Ceph documentation at
-http://ceph.com/docs for more information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8)
-:doc:`radosgw <radosgw>`\(8)
diff --git a/src/ceph/doc/man/8/radosgw.rst b/src/ceph/doc/man/8/radosgw.rst
deleted file mode 100644
index f57b346..0000000
--- a/src/ceph/doc/man/8/radosgw.rst
+++ /dev/null
@@ -1,256 +0,0 @@
-:orphan:
-
-===============================
- radosgw -- rados REST gateway
-===============================
-
-.. program:: radosgw
-
-Synopsis
-========
-
-| **radosgw**
-
-
-Description
-===========
-
-:program:`radosgw` is an HTTP REST gateway for the RADOS object store, a part
-of the Ceph distributed storage system. It is implemented as a FastCGI
-module using libfcgi, and can be used in conjunction with any FastCGI
-capable web server.
-
-
-Options
-=======
-
-.. option:: -c ceph.conf, --conf=ceph.conf
-
- Use ``ceph.conf`` configuration file instead of the default
- ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup.
-
-.. option:: -m monaddress[:port]
-
- Connect to specified monitor (instead of looking through ``ceph.conf``).
-
-.. option:: -i ID, --id ID
-
- Set the ID portion of name for radosgw
-
-.. option:: -n TYPE.ID, --name TYPE.ID
-
- Set the rados user name for the gateway (eg. client.radosgw.gateway)
-
-.. option:: --cluster NAME
-
- Set the cluster name (default: ceph)
-
-.. option:: -d
-
- Run in foreground, log to stderr
-
-.. option:: -f
-
- Run in foreground, log to usual location
-
-.. option:: --rgw-socket-path=path
-
- Specify a unix domain socket path.
-
-.. option:: --rgw-region=region
-
- The region where radosgw runs
-
-.. option:: --rgw-zone=zone
-
- The zone where radosgw runs
-
-
-Configuration
-=============
-
-Earlier RADOS Gateway had to be configured with ``Apache`` and ``mod_fastcgi``.
-Now, ``mod_proxy_fcgi`` module is used instead of ``mod_fastcgi``.
-``mod_proxy_fcgi`` works differently than a traditional FastCGI module. This
-module requires the service of ``mod_proxy`` which provides support for the
-FastCGI protocol. So, to be able to handle FastCGI protocol, both ``mod_proxy``
-and ``mod_proxy_fcgi`` have to be present in the server. Unlike ``mod_fastcgi``,
-``mod_proxy_fcgi`` cannot start the application process. Some platforms have
-``fcgistarter`` for that purpose. However, external launching of application
-or process management may be available in the FastCGI application framework
-in use.
-
-``Apache`` can be configured in a way that enables ``mod_proxy_fcgi`` to be used
-with localhost tcp or through unix domain socket. ``mod_proxy_fcgi`` that doesn't
-support unix domain socket such as the ones in Apache 2.2 and earlier versions of
-Apache 2.4, needs to be configured for use with localhost tcp. Later versions of
-Apache like Apache 2.4.9 or later support unix domain socket and as such they
-allow for the configuration with unix domain socket instead of localhost tcp.
-
-The following steps show the configuration in Ceph's configuration file i.e,
-``/etc/ceph/ceph.conf`` and the gateway configuration file i.e,
-``/etc/httpd/conf.d/rgw.conf`` (RPM-based distros) or
-``/etc/apache2/conf-available/rgw.conf`` (Debian-based distros) with localhost
-tcp and through unix domain socket:
-
-#. For distros with Apache 2.2 and early versions of Apache 2.4 that use
- localhost TCP and do not support Unix Domain Socket, append the following
- contents to ``/etc/ceph/ceph.conf``::
-
- [client.radosgw.gateway]
- host = {hostname}
- keyring = /etc/ceph/ceph.client.radosgw.keyring
- rgw socket path = ""
- log file = /var/log/ceph/client.radosgw.gateway.log
- rgw frontends = fastcgi socket_port=9000 socket_host=0.0.0.0
- rgw print continue = false
-
-#. Add the following content in the gateway configuration file:
-
- For Debian/Ubuntu add in ``/etc/apache2/conf-available/rgw.conf``::
-
- <VirtualHost *:80>
- ServerName localhost
- DocumentRoot /var/www/html
-
- ErrorLog /var/log/apache2/rgw_error.log
- CustomLog /var/log/apache2/rgw_access.log combined
-
- # LogLevel debug
-
- RewriteEngine On
-
- RewriteRule .* - [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
-
- SetEnv proxy-nokeepalive 1
-
- ProxyPass / fcgi://localhost:9000/
-
- </VirtualHost>
-
- For CentOS/RHEL add in ``/etc/httpd/conf.d/rgw.conf``::
-
- <VirtualHost *:80>
- ServerName localhost
- DocumentRoot /var/www/html
-
- ErrorLog /var/log/httpd/rgw_error.log
- CustomLog /var/log/httpd/rgw_access.log combined
-
- # LogLevel debug
-
- RewriteEngine On
-
- RewriteRule .* - [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
-
- SetEnv proxy-nokeepalive 1
-
- ProxyPass / fcgi://localhost:9000/
-
- </VirtualHost>
-
-#. For distros with Apache 2.4.9 or later that support Unix Domain Socket,
- append the following configuration to ``/etc/ceph/ceph.conf``::
-
- [client.radosgw.gateway]
- host = {hostname}
- keyring = /etc/ceph/ceph.client.radosgw.keyring
- rgw socket path = /var/run/ceph/ceph.radosgw.gateway.fastcgi.sock
- log file = /var/log/ceph/client.radosgw.gateway.log
- rgw print continue = false
-
-#. Add the following content in the gateway configuration file:
-
- For CentOS/RHEL add in ``/etc/httpd/conf.d/rgw.conf``::
-
- <VirtualHost *:80>
- ServerName localhost
- DocumentRoot /var/www/html
-
- ErrorLog /var/log/httpd/rgw_error.log
- CustomLog /var/log/httpd/rgw_access.log combined
-
- # LogLevel debug
-
- RewriteEngine On
-
- RewriteRule .* - [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
-
- SetEnv proxy-nokeepalive 1
-
- ProxyPass / unix:///var/run/ceph/ceph.radosgw.gateway.fastcgi.sock|fcgi://localhost:9000/
-
- </VirtualHost>
-
- The latest version of Ubuntu i.e, 14.04 ships with ``Apache 2.4.7`` that
- does not have Unix Domain Socket support in it and as such it has to be
- configured with localhost tcp. The Unix Domain Socket support is available in
- ``Apache 2.4.9`` and later versions. A bug has been filed to backport the UDS
- support to ``Apache 2.4.7`` for ``Ubuntu 14.04``.
- See: https://bugs.launchpad.net/ubuntu/+source/apache2/+bug/1411030
-
-#. Generate a key for radosgw to use for authentication with the cluster. ::
-
- ceph-authtool -C -n client.radosgw.gateway --gen-key /etc/ceph/keyring.radosgw.gateway
- ceph-authtool -n client.radosgw.gateway --cap mon 'allow rw' --cap osd 'allow rwx' /etc/ceph/keyring.radosgw.gateway
-
-#. Add the key to the auth entries. ::
-
- ceph auth add client.radosgw.gateway --in-file=keyring.radosgw.gateway
-
-#. Start Apache and radosgw.
-
- Debian/Ubuntu::
-
- sudo /etc/init.d/apache2 start
- sudo /etc/init.d/radosgw start
-
- CentOS/RHEL::
-
- sudo apachectl start
- sudo /etc/init.d/ceph-radosgw start
-
-Usage Logging
-=============
-
-:program:`radosgw` maintains an asynchronous usage log. It accumulates
-statistics about user operations and flushes it periodically. The
-logs can be accessed and managed through :program:`radosgw-admin`.
-
-The information that is being logged contains total data transfer,
-total operations, and total successful operations. The data is being
-accounted in an hourly resolution under the bucket owner, unless the
-operation was done on the service (e.g., when listing a bucket) in
-which case it is accounted under the operating user.
-
-Following is an example configuration::
-
- [client.radosgw.gateway]
- rgw enable usage log = true
- rgw usage log tick interval = 30
- rgw usage log flush threshold = 1024
- rgw usage max shards = 32
- rgw usage max user shards = 1
-
-
-The total number of shards determines how many total objects hold the
-usage log information. The per-user number of shards specify how many
-objects hold usage information for a single user. The tick interval
-configures the number of seconds between log flushes, and the flush
-threshold specify how many entries can be kept before resorting to
-synchronous flush.
-
-
-Availability
-============
-
-:program:`radosgw` is part of Ceph, a massively scalable, open-source, distributed
-storage system. Please refer to the Ceph documentation at http://ceph.com/docs for
-more information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8)
-:doc:`radosgw-admin <radosgw-admin>`\(8)
diff --git a/src/ceph/doc/man/8/rbd-fuse.rst b/src/ceph/doc/man/8/rbd-fuse.rst
deleted file mode 100644
index 394bdba..0000000
--- a/src/ceph/doc/man/8/rbd-fuse.rst
+++ /dev/null
@@ -1,56 +0,0 @@
-:orphan:
-
-=======================================
- rbd-fuse -- expose rbd images as files
-=======================================
-
-.. program:: rbd-fuse
-
-Synopsis
-========
-
-| **rbd-fuse** [ -p pool ] [-c conffile] *mountpoint* [ *fuse options* ]
-
-
-Description
-===========
-
-**rbd-fuse** is a FUSE (File system in USErspace) client for RADOS
-block device (rbd) images. Given a pool containing rbd images,
-it will mount a userspace filesystem allowing access to those images
-as regular files at **mountpoint**.
-
-The file system can be unmounted with::
-
- fusermount -u mountpoint
-
-or by sending ``SIGINT`` to the ``rbd-fuse`` process.
-
-
-Options
-=======
-
-Any options not recognized by rbd-fuse will be passed on to libfuse.
-
-.. option:: -c ceph.conf
-
- Use *ceph.conf* configuration file instead of the default
- ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup.
-
-.. option:: -p pool
-
- Use *pool* as the pool to search for rbd images. Default is ``rbd``.
-
-
-Availability
-============
-
-**rbd-fuse** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-
-See also
-========
-
-fusermount(8),
-:doc:`rbd <rbd>`\(8)
diff --git a/src/ceph/doc/man/8/rbd-ggate.rst b/src/ceph/doc/man/8/rbd-ggate.rst
deleted file mode 100644
index 67d0c81..0000000
--- a/src/ceph/doc/man/8/rbd-ggate.rst
+++ /dev/null
@@ -1,79 +0,0 @@
-:orphan:
-
-==================================================
- rbd-ggate -- map rbd images via FreeBSD GEOM Gate
-==================================================
-
-.. program:: rbd-ggate
-
-Synopsis
-========
-
-| **rbd-ggate** [--read-only] [--exclusive] [--device *ggate device*] map *image-spec* | *snap-spec*
-| **rbd-ggate** unmap *ggate device*
-| **rbd-ggate** list
-
-Description
-===========
-
-**rbd-ggate** is a client for RADOS block device (rbd) images. It will
-map a rbd image to a ggate (FreeBSD GEOM Gate class) device, allowing
-access it as regular local block device.
-
-Commands
-========
-
-map
----
-
-Spawn a process responsible for the creation of ggate device and
-forwarding I/O requests between the GEOM Gate kernel subsystem and
-RADOS.
-
-unmap
------
-
-Destroy ggate device and terminate the process responsible for it.
-
-list
-----
-
-List mapped ggate devices.
-
-Options
-=======
-
-.. option:: --device *ggate device*
-
- Specify ggate device path.
-
-.. option:: --read-only
-
- Map read-only.
-
-.. option:: --exclusive
-
- Forbid writes by other clients.
-
-Image and snap specs
-====================
-
-| *image-spec* is [*pool-name*]/*image-name*
-| *snap-spec* is [*pool-name*]/*image-name*\ @\ *snap-name*
-
-The default for *pool-name* is "rbd". If an image name contains a slash
-character ('/'), *pool-name* is required.
-
-Availability
-============
-
-**rbd-ggate** is part of Ceph, a massively scalable, open-source,
-distributed storage system. Please refer to the Ceph documentation at
-http://ceph.com/docs for more information.
-
-
-See also
-========
-
-:doc:`rbd <rbd>`\(8)
-:doc:`ceph <ceph>`\(8)
diff --git a/src/ceph/doc/man/8/rbd-mirror.rst b/src/ceph/doc/man/8/rbd-mirror.rst
deleted file mode 100644
index b35787f..0000000
--- a/src/ceph/doc/man/8/rbd-mirror.rst
+++ /dev/null
@@ -1,75 +0,0 @@
-:orphan:
-
-===================================================
- rbd-mirror -- Ceph daemon for mirroring RBD images
-===================================================
-
-.. program:: rbd-mirror
-
-Synopsis
-========
-
-| **rbd-mirror**
-
-
-Description
-===========
-
-:program:`rbd-mirror` is a daemon for asynchronous mirroring of RADOS
-block device (rbd) images among Ceph clusters. It replays changes to
-images in remote clusters in a local cluster, for disaster recovery.
-
-It connects to remote clusters via the RADOS protocol, relying on
-default search paths to find ceph.conf files, monitor addresses and
-authentication information for them, i.e. ``/etc/ceph/$cluster.conf``,
-``/etc/ceph/$cluster.keyring``, and
-``/etc/ceph/$cluster.$name.keyring``, where ``$cluster`` is the
-human-friendly name of the cluster, and ``$name`` is the rados user to
-connect as, e.g. ``client.rbd-mirror``.
-
-
-Options
-=======
-
-.. option:: -c ceph.conf, --conf=ceph.conf
-
- Use ``ceph.conf`` configuration file instead of the default
- ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup.
-
-.. option:: -m monaddress[:port]
-
- Connect to specified monitor (instead of looking through ``ceph.conf``).
-
-.. option:: -i ID, --id ID
-
- Set the ID portion of name for rbd-mirror
-
-.. option:: -n TYPE.ID, --name TYPE.ID
-
- Set the rados user name for the gateway (eg. client.rbd-mirror)
-
-.. option:: --cluster NAME
-
- Set the cluster name (default: ceph)
-
-.. option:: -d
-
- Run in foreground, log to stderr
-
-.. option:: -f
-
- Run in foreground, log to usual location
-
-
-Availability
-============
-
-:program:`rbd-mirror` is part of Ceph, a massively scalable, open-source, distributed
-storage system. Please refer to the Ceph documentation at http://ceph.com/docs for
-more information.
-
-
-See also
-========
-
-:doc:`rbd <rbd>`\(8)
diff --git a/src/ceph/doc/man/8/rbd-nbd.rst b/src/ceph/doc/man/8/rbd-nbd.rst
deleted file mode 100644
index bb20a4d..0000000
--- a/src/ceph/doc/man/8/rbd-nbd.rst
+++ /dev/null
@@ -1,67 +0,0 @@
-:orphan:
-
-=========================================
- rbd-nbd -- map rbd images to nbd device
-=========================================
-
-.. program:: rbd-nbd
-
-Synopsis
-========
-
-| **rbd-nbd** [-c conf] [--read-only] [--device *nbd device*] [--nbds_max *limit*] [--max_part *limit*] [--exclusive] map *image-spec* | *snap-spec*
-| **rbd-nbd** unmap *nbd device*
-| **rbd-nbd** list-mapped
-
-Description
-===========
-
-**rbd-nbd** is a client for RADOS block device (rbd) images like rbd kernel module.
-It will map a rbd image to a nbd (Network Block Device) device, allowing access it
-as regular local block device.
-
-Options
-=======
-
-.. option:: -c ceph.conf
-
- Use *ceph.conf* configuration file instead of the default
- ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup.
-
-.. option:: --read-only
-
- Map read-only.
-
-.. option:: --nbds_max *limit*
-
- Override the parameter of NBD kernel module when modprobe, used to
- limit the count of nbd device.
-
-.. option:: --max_part *limit*
-
- Override for module param nbds_max.
-
-.. option:: --exclusive
-
- Forbid writes by other clients.
-
-Image and snap specs
-====================
-
-| *image-spec* is [*pool-name*]/*image-name*
-| *snap-spec* is [*pool-name*]/*image-name*\ @\ *snap-name*
-
-The default for *pool-name* is "rbd". If an image name contains a slash
-character ('/'), *pool-name* is required.
-
-Availability
-============
-
-**rbd-nbd** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-
-See also
-========
-
-:doc:`rbd <rbd>`\(8)
diff --git a/src/ceph/doc/man/8/rbd-replay-many.rst b/src/ceph/doc/man/8/rbd-replay-many.rst
deleted file mode 100644
index 5fb9349..0000000
--- a/src/ceph/doc/man/8/rbd-replay-many.rst
+++ /dev/null
@@ -1,73 +0,0 @@
-:orphan:
-
-==================================================================================
- rbd-replay-many -- replay a rados block device (RBD) workload on several clients
-==================================================================================
-
-.. program:: rbd-replay-many
-
-Synopsis
-========
-
-| **rbd-replay-many** [ *options* ] --original-image *name* *host1* [ *host2* [ ... ] ] -- *rbd_replay_args*
-
-
-Description
-===========
-
-**rbd-replay-many** is a utility for replaying a rados block device (RBD) workload on several clients.
-Although all clients use the same workload, they replay against separate images.
-This matches normal use of librbd, where each original client is a VM with its own image.
-
-Configuration and replay files are not automatically copied to clients.
-Replay images must already exist.
-
-
-Options
-=======
-
-.. option:: --original-image name
-
- Specifies the name (and snap) of the originally traced image.
- Necessary for correct name mapping.
-
-.. option:: --image-prefix prefix
-
- Prefix of image names to replay against.
- Specifying --image-prefix=foo results in clients replaying against foo-0, foo-1, etc.
- Defaults to the original image name.
-
-.. option:: --exec program
-
- Path to the rbd-replay executable.
-
-.. option:: --delay seconds
-
- Delay between starting each client. Defaults to 0.
-
-
-Examples
-========
-
-Typical usage::
-
- rbd-replay-many host-0 host-1 --original-image=image -- -c ceph.conf replay.bin
-
-This results in the following commands being executed::
-
- ssh host-0 'rbd-replay' --map-image 'image=image-0' -c ceph.conf replay.bin
- ssh host-1 'rbd-replay' --map-image 'image=image-1' -c ceph.conf replay.bin
-
-
-Availability
-============
-
-**rbd-replay-many** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-
-See also
-========
-
-:doc:`rbd-replay <rbd-replay>`\(8),
-:doc:`rbd <rbd>`\(8)
diff --git a/src/ceph/doc/man/8/rbd-replay-prep.rst b/src/ceph/doc/man/8/rbd-replay-prep.rst
deleted file mode 100644
index abb08de..0000000
--- a/src/ceph/doc/man/8/rbd-replay-prep.rst
+++ /dev/null
@@ -1,55 +0,0 @@
-:orphan:
-
-====================================================================================
- rbd-replay-prep -- prepare captured rados block device (RBD) workloads for replay
-====================================================================================
-
-.. program:: rbd-replay-prep
-
-Synopsis
-========
-
-| **rbd-replay-prep** [ --window *seconds* ] [ --anonymize ] *trace_dir* *replay_file*
-
-
-Description
-===========
-
-**rbd-replay-prep** processes raw rados block device (RBD) traces to prepare them for **rbd-replay**.
-
-
-Options
-=======
-
-.. option:: --window seconds
-
- Requests further apart than 'seconds' seconds are assumed to be independent.
-
-.. option:: --anonymize
-
- Anonymizes image and snap names.
-
-.. option:: --verbose
-
- Print all processed events to console
-
-Examples
-========
-
-To prepare workload1-trace for replay::
-
- rbd-replay-prep workload1-trace/ust/uid/1000/64-bit workload1
-
-
-Availability
-============
-
-**rbd-replay-prep** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-
-See also
-========
-
-:doc:`rbd-replay <rbd-replay>`\(8),
-:doc:`rbd <rbd>`\(8)
diff --git a/src/ceph/doc/man/8/rbd-replay.rst b/src/ceph/doc/man/8/rbd-replay.rst
deleted file mode 100644
index 74b8018..0000000
--- a/src/ceph/doc/man/8/rbd-replay.rst
+++ /dev/null
@@ -1,78 +0,0 @@
-:orphan:
-
-=========================================================
- rbd-replay -- replay rados block device (RBD) workloads
-=========================================================
-
-.. program:: rbd-replay
-
-Synopsis
-========
-
-| **rbd-replay** [ *options* ] *replay_file*
-
-
-Description
-===========
-
-**rbd-replay** is a utility for replaying rados block device (RBD) workloads.
-
-
-Options
-=======
-
-.. option:: -c ceph.conf, --conf ceph.conf
-
- Use ceph.conf configuration file instead of the default /etc/ceph/ceph.conf to
- determine monitor addresses during startup.
-
-.. option:: -p pool, --pool pool
-
- Interact with the given pool. Defaults to 'rbd'.
-
-.. option:: --latency-multiplier
-
- Multiplies inter-request latencies. Default: 1.
-
-.. option:: --read-only
-
- Only replay non-destructive requests.
-
-.. option:: --map-image rule
-
- Add a rule to map image names in the trace to image names in the replay cluster.
- A rule of image1@snap1=image2@snap2 would map snap1 of image1 to snap2 of image2.
-
-.. option:: --dump-perf-counters
-
- **Experimental**
- Dump performance counters to standard out before an image is closed.
- Performance counters may be dumped multiple times if multiple images are closed,
- or if the same image is opened and closed multiple times.
- Performance counters and their meaning may change between versions.
-
-
-Examples
-========
-
-To replay workload1 as fast as possible::
-
- rbd-replay --latency-multiplier=0 workload1
-
-To replay workload1 but use test_image instead of prod_image::
-
- rbd-replay --map-image=prod_image=test_image workload1
-
-
-Availability
-============
-
-**rbd-replay** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-
-See also
-========
-
-:doc:`rbd-replay-prep <rbd-replay-prep>`\(8),
-:doc:`rbd <rbd>`\(8)
diff --git a/src/ceph/doc/man/8/rbd.rst b/src/ceph/doc/man/8/rbd.rst
deleted file mode 100644
index c37ca44..0000000
--- a/src/ceph/doc/man/8/rbd.rst
+++ /dev/null
@@ -1,615 +0,0 @@
-:orphan:
-
-===============================================
- rbd -- manage rados block device (RBD) images
-===============================================
-
-.. program:: rbd
-
-Synopsis
-========
-
-| **rbd** [ -c *ceph.conf* ] [ -m *monaddr* ] [--cluster *cluster-name*]
- [ -p | --pool *pool* ] [ *command* ... ]
-
-
-Description
-===========
-
-**rbd** is a utility for manipulating rados block device (RBD) images,
-used by the Linux rbd driver and the rbd storage driver for QEMU/KVM.
-RBD images are simple block devices that are striped over objects and
-stored in a RADOS object store. The size of the objects the image is
-striped over must be a power of two.
-
-
-Options
-=======
-
-.. option:: -c ceph.conf, --conf ceph.conf
-
- Use ceph.conf configuration file instead of the default /etc/ceph/ceph.conf to
- determine monitor addresses during startup.
-
-.. option:: -m monaddress[:port]
-
- Connect to specified monitor (instead of looking through ceph.conf).
-
-.. option:: --cluster cluster-name
-
- Use different cluster name as compared to default cluster name *ceph*.
-
-.. option:: -p pool-name, --pool pool-name
-
- Interact with the given pool. Required by most commands.
-
-.. option:: --no-progress
-
- Do not output progress information (goes to standard error by
- default for some commands).
-
-
-Parameters
-==========
-
-.. option:: --image-format format-id
-
- Specifies which object layout to use. The default is 2.
-
- * format 1 - (deprecated) Use the original format for a new rbd image. This
- format is understood by all versions of librbd and the kernel rbd module,
- but does not support newer features like cloning.
-
- * format 2 - Use the second rbd format, which is supported by
- librbd and kernel since version 3.11 (except for striping). This adds
- support for cloning and is more easily extensible to allow more
- features in the future.
-
-.. option:: -s size-in-M/G/T, --size size-in-M/G/T
-
- Specifies the size of the new rbd image or the new size of the existing rbd
- image in M/G/T. If no suffix is given, unit M is assumed.
-
-.. option:: --object-size size-in-B/K/M
-
- Specifies the object size in B/K/M. Object size will be rounded up the
- nearest power of two; if no suffix is given, unit B is assumed. The default
- object size is 4M, smallest is 4K and maximum is 32M.
-
-.. option:: --stripe-unit size-in-B/K/M
-
- Specifies the stripe unit size in B/K/M. If no suffix is given, unit B is
- assumed. See striping section (below) for more details.
-
-.. option:: --stripe-count num
-
- Specifies the number of objects to stripe over before looping back
- to the first object. See striping section (below) for more details.
-
-.. option:: --snap snap
-
- Specifies the snapshot name for the specific operation.
-
-.. option:: --id username
-
- Specifies the username (without the ``client.`` prefix) to use with the map command.
-
-.. option:: --keyring filename
-
- Specifies a keyring file containing a secret for the specified user
- to use with the map command. If not specified, the default keyring
- locations will be searched.
-
-.. option:: --keyfile filename
-
- Specifies a file containing the secret key of ``--id user`` to use with the map command.
- This option is overridden by ``--keyring`` if the latter is also specified.
-
-.. option:: --shared lock-tag
-
- Option for `lock add` that allows multiple clients to lock the
- same image if they use the same tag. The tag is an arbitrary
- string. This is useful for situations where an image must
- be open from more than one client at once, like during
- live migration of a virtual machine, or for use underneath
- a clustered filesystem.
-
-.. option:: --format format
-
- Specifies output formatting (default: plain, json, xml)
-
-.. option:: --pretty-format
-
- Make json or xml formatted output more human-readable.
-
-.. option:: -o krbd-options, --options krbd-options
-
- Specifies which options to use when mapping or unmapping an image via the
- rbd kernel driver. krbd-options is a comma-separated list of options
- (similar to mount(8) mount options). See kernel rbd (krbd) options section
- below for more details.
-
-.. option:: --read-only
-
- Map the image read-only. Equivalent to -o ro.
-
-.. option:: --image-feature feature-name
-
- Specifies which RBD format 2 feature should be enabled when creating
- an image. Multiple features can be enabled by repeating this option
- multiple times. The following features are supported:
-
- * layering: layering support
- * striping: striping v2 support
- * exclusive-lock: exclusive locking support
- * object-map: object map support (requires exclusive-lock)
- * fast-diff: fast diff calculations (requires object-map)
- * deep-flatten: snapshot flatten support
- * journaling: journaled IO support (requires exclusive-lock)
-
-.. option:: --image-shared
-
- Specifies that the image will be used concurrently by multiple clients.
- This will disable features that are dependent upon exclusive ownership
- of the image.
-
-.. option:: --whole-object
-
- Specifies that the diff should be limited to the extents of a full object
- instead of showing intra-object deltas. When the object map feature is
- enabled on an image, limiting the diff to the object extents will
- dramatically improve performance since the differences can be computed
- by examining the in-memory object map instead of querying RADOS for each
- object within the image.
-
-.. option:: --limit
-
- Specifies the limit for the number of snapshots permitted.
-
-Commands
-========
-
-.. TODO rst "option" directive seems to require --foo style options, parsing breaks on subcommands.. the args show up as bold too
-
-:command:`ls` [-l | --long] [*pool-name*]
- Will list all rbd images listed in the rbd_directory object. With
- -l, also show snapshots, and use longer-format output including
- size, parent (if clone), format, etc.
-
-:command:`du` [-p | --pool *pool-name*] [*image-spec* | *snap-spec*]
- Will calculate the provisioned and actual disk usage of all images and
- associated snapshots within the specified pool. It can also be used against
- individual images and snapshots.
-
- If the RBD fast-diff feature is not enabled on images, this operation will
- require querying the OSDs for every potential object within the image.
-
-:command:`info` *image-spec* | *snap-spec*
- Will dump information (such as size and object size) about a specific rbd image.
- If image is a clone, information about its parent is also displayed.
- If a snapshot is specified, whether it is protected is shown as well.
-
-:command:`create` (-s | --size *size-in-M/G/T*) [--image-format *format-id*] [--object-size *size-in-B/K/M*] [--stripe-unit *size-in-B/K/M* --stripe-count *num*] [--image-feature *feature-name*]... [--image-shared] *image-spec*
- Will create a new rbd image. You must also specify the size via --size. The
- --stripe-unit and --stripe-count arguments are optional, but must be used together.
-
-:command:`clone` [--object-size *size-in-B/K/M*] [--stripe-unit *size-in-B/K/M* --stripe-count *num*] [--image-feature *feature-name*] [--image-shared] *parent-snap-spec* *child-image-spec*
- Will create a clone (copy-on-write child) of the parent snapshot.
- Object size will be identical to that of the parent image unless
- specified. Size will be the same as the parent snapshot. The --stripe-unit
- and --stripe-count arguments are optional, but must be used together.
-
- The parent snapshot must be protected (see `rbd snap protect`).
- This requires image format 2.
-
-:command:`flatten` *image-spec*
- If image is a clone, copy all shared blocks from the parent snapshot and
- make the child independent of the parent, severing the link between
- parent snap and child. The parent snapshot can be unprotected and
- deleted if it has no further dependent clones.
-
- This requires image format 2.
-
-:command:`children` *snap-spec*
- List the clones of the image at the given snapshot. This checks
- every pool, and outputs the resulting poolname/imagename.
-
- This requires image format 2.
-
-:command:`resize` (-s | --size *size-in-M/G/T*) [--allow-shrink] *image-spec*
- Resize rbd image. The size parameter also needs to be specified.
- The --allow-shrink option lets the size be reduced.
-
-:command:`rm` *image-spec*
- Delete an rbd image (including all data blocks). If the image has
- snapshots, this fails and nothing is deleted.
-
-:command:`export` [--export-format *format (1 or 2)*] (*image-spec* | *snap-spec*) [*dest-path*]
- Export image to dest path (use - for stdout).
- The --export-format accepts '1' or '2' currently. Format 2 allow us to export not only the content
- of image, but also the snapshots and other properties, such as image_order, features.
-
-:command:`import` [--export-format *format (1 or 2)*] [--image-format *format-id*] [--object-size *size-in-B/K/M*] [--stripe-unit *size-in-B/K/M* --stripe-count *num*] [--image-feature *feature-name*]... [--image-shared] *src-path* [*image-spec*]
- Create a new image and imports its data from path (use - for
- stdin). The import operation will try to create sparse rbd images
- if possible. For import from stdin, the sparsification unit is
- the data block size of the destination image (object size).
-
- The --stripe-unit and --stripe-count arguments are optional, but must be
- used together.
-
- The --export-format accepts '1' or '2' currently. Format 2 allow us to import not only the content
- of image, but also the snapshots and other properties, such as image_order, features.
-
-:command:`export-diff` [--from-snap *snap-name*] [--whole-object] (*image-spec* | *snap-spec*) *dest-path*
- Export an incremental diff for an image to dest path (use - for stdout). If
- an initial snapshot is specified, only changes since that snapshot are included; otherwise,
- any regions of the image that contain data are included. The end snapshot is specified
- using the standard --snap option or @snap syntax (see below). The image diff format includes
- metadata about image size changes, and the start and end snapshots. It efficiently represents
- discarded or 'zero' regions of the image.
-
-:command:`merge-diff` *first-diff-path* *second-diff-path* *merged-diff-path*
- Merge two continuous incremental diffs of an image into one single diff. The
- first diff's end snapshot must be equal with the second diff's start snapshot.
- The first diff could be - for stdin, and merged diff could be - for stdout, which
- enables multiple diff files to be merged using something like
- 'rbd merge-diff first second - | rbd merge-diff - third result'. Note this command
- currently only support the source incremental diff with stripe_count == 1
-
-:command:`import-diff` *src-path* *image-spec*
- Import an incremental diff of an image and applies it to the current image. If the diff
- was generated relative to a start snapshot, we verify that snapshot already exists before
- continuing. If there was an end snapshot we verify it does not already exist before
- applying the changes, and create the snapshot when we are done.
-
-:command:`diff` [--from-snap *snap-name*] [--whole-object] *image-spec* | *snap-spec*
- Dump a list of byte extents in the image that have changed since the specified start
- snapshot, or since the image was created. Each output line includes the starting offset
- (in bytes), the length of the region (in bytes), and either 'zero' or 'data' to indicate
- whether the region is known to be zeros or may contain other data.
-
-:command:`cp` (*src-image-spec* | *src-snap-spec*) *dest-image-spec*
- Copy the content of a src-image into the newly created dest-image.
- dest-image will have the same size, object size, and image format as src-image.
-
-:command:`mv` *src-image-spec* *dest-image-spec*
- Rename an image. Note: rename across pools is not supported.
-
-:command:`image-meta list` *image-spec*
- Show metadata held on the image. The first column is the key
- and the second column is the value.
-
-:command:`image-meta get` *image-spec* *key*
- Get metadata value with the key.
-
-:command:`image-meta set` *image-spec* *key* *value*
- Set metadata key with the value. They will displayed in `image-meta list`.
-
-:command:`image-meta remove` *image-spec* *key*
- Remove metadata key with the value.
-
-:command:`object-map rebuild` *image-spec* | *snap-spec*
- Rebuild an invalid object map for the specified image. An image snapshot can be
- specified to rebuild an invalid object map for a snapshot.
-
-:command:`snap ls` *image-spec*
- Dump the list of snapshots inside a specific image.
-
-:command:`snap create` *snap-spec*
- Create a new snapshot. Requires the snapshot name parameter specified.
-
-:command:`snap rollback` *snap-spec*
- Rollback image content to snapshot. This will iterate through the entire blocks
- array and update the data head content to the snapshotted version.
-
-:command:`snap rm` [--force] *snap-spec*
- Remove the specified snapshot.
-
-:command:`snap purge` *image-spec*
- Remove all snapshots from an image.
-
-:command:`snap protect` *snap-spec*
- Protect a snapshot from deletion, so that clones can be made of it
- (see `rbd clone`). Snapshots must be protected before clones are made;
- protection implies that there exist dependent cloned children that
- refer to this snapshot. `rbd clone` will fail on a nonprotected
- snapshot.
-
- This requires image format 2.
-
-:command:`snap unprotect` *snap-spec*
- Unprotect a snapshot from deletion (undo `snap protect`). If cloned
- children remain, `snap unprotect` fails. (Note that clones may exist
- in different pools than the parent snapshot.)
-
- This requires image format 2.
-
-:command:`snap limit set` [--limit] *limit* *image-spec*
- Set a limit for the number of snapshots allowed on an image.
-
-:command:`snap limit clear` *image-spec*
- Remove any previously set limit on the number of snapshots allowed on
- an image.
-
-:command:`map` [-o | --options *krbd-options* ] [--read-only] *image-spec* | *snap-spec*
- Map the specified image to a block device via the rbd kernel module.
-
-:command:`unmap` [-o | --options *krbd-options* ] *image-spec* | *snap-spec* | *device-path*
- Unmap the block device that was mapped via the rbd kernel module.
-
-:command:`showmapped`
- Show the rbd images that are mapped via the rbd kernel module.
-
-:command:`nbd map` [--device *device-path*] [--read-only] *image-spec* | *snap-spec*
- Map the specified image to a block device via the rbd-nbd tool.
-
-:command:`nbd unmap` *device-path*
- Unmap the block device that was mapped via the rbd-nbd tool.
-
-:command:`nbd list`
- Show the list of used nbd devices via the rbd-nbd tool.
-
-:command:`status` *image-spec*
- Show the status of the image, including which clients have it open.
-
-:command:`feature disable` *image-spec* *feature-name*...
- Disable the specified feature on the specified image. Multiple features can
- be specified.
-
-:command:`feature enable` *image-spec* *feature-name*...
- Enable the specified feature on the specified image. Multiple features can
- be specified.
-
-:command:`lock list` *image-spec*
- Show locks held on the image. The first column is the locker
- to use with the `lock remove` command.
-
-:command:`lock add` [--shared *lock-tag*] *image-spec* *lock-id*
- Lock an image. The lock-id is an arbitrary name for the user's
- convenience. By default, this is an exclusive lock, meaning it
- will fail if the image is already locked. The --shared option
- changes this behavior. Note that locking does not affect
- any operation other than adding a lock. It does not
- protect an image from being deleted.
-
-:command:`lock remove` *image-spec* *lock-id* *locker*
- Release a lock on an image. The lock id and locker are
- as output by lock ls.
-
-:command:`bench` --io-type <read | write> [--io-size *size-in-B/K/M/G/T*] [--io-threads *num-ios-in-flight*] [--io-total *size-in-B/K/M/G/T*] [--io-pattern seq | rand] *image-spec*
- Generate a series of IOs to the image and measure the IO throughput and
- latency. If no suffix is given, unit B is assumed for both --io-size and
- --io-total. Defaults are: --io-size 4096, --io-threads 16, --io-total 1G,
- --io-pattern seq.
-
-:command:`trash ls` [*pool-name*]
- List all entries from trash.
-
-:command:`trash mv` *image-spec*
- Move an image to the trash. Images, even ones actively in-use by
- clones, can be moved to the trash and deleted at a later time.
-
-:command:`trash rm` *image-id*
- Delete an image from trash. If image deferment time has not expired
- you can not removed it unless use force. But an actively in-use by clones
- or has snapshots can not be removed.
-
-:command:`trash restore` *image-id*
- Restore an image from trash.
-
-Image and snap specs
-====================
-
-| *image-spec* is [*pool-name*]/*image-name*
-| *snap-spec* is [*pool-name*]/*image-name*\ @\ *snap-name*
-
-The default for *pool-name* is "rbd". If an image name contains a slash
-character ('/'), *pool-name* is required.
-
-You may specify each name individually, using --pool, --image and --snap
-options, but this is discouraged in favor of the above spec syntax.
-
-Striping
-========
-
-RBD images are striped over many objects, which are then stored by the
-Ceph distributed object store (RADOS). As a result, read and write
-requests for the image are distributed across many nodes in the
-cluster, generally preventing any single node from becoming a
-bottleneck when individual images get large or busy.
-
-The striping is controlled by three parameters:
-
-.. option:: object-size
-
- The size of objects we stripe over is a power of two. It will be rounded up the nearest power of two.
- The default object size is 4 MB, smallest is 4K and maximum is 32M.
-
-.. option:: stripe_unit
-
- Each [*stripe_unit*] contiguous bytes are stored adjacently in the same object, before we move on
- to the next object.
-
-.. option:: stripe_count
-
- After we write [*stripe_unit*] bytes to [*stripe_count*] objects, we loop back to the initial object
- and write another stripe, until the object reaches its maximum size. At that point,
- we move on to the next [*stripe_count*] objects.
-
-By default, [*stripe_unit*] is the same as the object size and [*stripe_count*] is 1. Specifying a different
-[*stripe_unit*] requires that the STRIPINGV2 feature be supported (added in Ceph v0.53) and format 2 images be
-used.
-
-
-Kernel rbd (krbd) options
-=========================
-
-Most of these options are useful mainly for debugging and benchmarking. The
-default values are set in the kernel and may therefore depend on the version of
-the running kernel.
-
-Per client instance `rbd map` options:
-
-* fsid=aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee - FSID that should be assumed by
- the client.
-
-* ip=a.b.c.d[:p] - IP and, optionally, port the client should use.
-
-* share - Enable sharing of client instances with other mappings (default).
-
-* noshare - Disable sharing of client instances with other mappings.
-
-* crc - Enable CRC32C checksumming for data writes (default).
-
-* nocrc - Disable CRC32C checksumming for data writes.
-
-* cephx_require_signatures - Require cephx message signing (since 3.19,
- default).
-
-* nocephx_require_signatures - Don't require cephx message signing (since
- 3.19).
-
-* tcp_nodelay - Disable Nagle's algorithm on client sockets (since 4.0,
- default).
-
-* notcp_nodelay - Enable Nagle's algorithm on client sockets (since 4.0).
-
-* cephx_sign_messages - Enable message signing (since 4.4, default).
-
-* nocephx_sign_messages - Disable message signing (since 4.4).
-
-* mount_timeout=x - A timeout on various steps in `rbd map` and `rbd unmap`
- sequences (default is 60 seconds). In particular, since 4.2 this can be used
- to ensure that `rbd unmap` eventually times out when there is no network
- connection to a cluster.
-
-* osdkeepalive=x - OSD keepalive timeout (default is 5 seconds).
-
-* osd_idle_ttl=x - OSD idle TTL (default is 60 seconds).
-
-Per mapping (block device) `rbd map` options:
-
-* rw - Map the image read-write (default).
-
-* ro - Map the image read-only. Equivalent to --read-only.
-
-* queue_depth=x - queue depth (since 4.2, default is 128 requests).
-
-* lock_on_read - Acquire exclusive lock on reads, in addition to writes and
- discards (since 4.9).
-
-* exclusive - Disable automatic exclusive lock transitions (since 4.12).
-
-`rbd unmap` options:
-
-* force - Force the unmapping of a block device that is open (since 4.9). The
- driver will wait for running requests to complete and then unmap; requests
- sent to the driver after initiating the unmap will be failed.
-
-
-Examples
-========
-
-To create a new rbd image that is 100 GB::
-
- rbd create mypool/myimage --size 102400
-
-To use a non-default object size (8 MB)::
-
- rbd create mypool/myimage --size 102400 --object-size 8M
-
-To delete an rbd image (be careful!)::
-
- rbd rm mypool/myimage
-
-To create a new snapshot::
-
- rbd snap create mypool/myimage@mysnap
-
-To create a copy-on-write clone of a protected snapshot::
-
- rbd clone mypool/myimage@mysnap otherpool/cloneimage
-
-To see which clones of a snapshot exist::
-
- rbd children mypool/myimage@mysnap
-
-To delete a snapshot::
-
- rbd snap rm mypool/myimage@mysnap
-
-To map an image via the kernel with cephx enabled::
-
- rbd map mypool/myimage --id admin --keyfile secretfile
-
-To map an image via the kernel with different cluster name other than default *ceph*::
-
- rbd map mypool/myimage --cluster cluster-name
-
-To unmap an image::
-
- rbd unmap /dev/rbd0
-
-To create an image and a clone from it::
-
- rbd import --image-format 2 image mypool/parent
- rbd snap create mypool/parent@snap
- rbd snap protect mypool/parent@snap
- rbd clone mypool/parent@snap otherpool/child
-
-To create an image with a smaller stripe_unit (to better distribute small writes in some workloads)::
-
- rbd create mypool/myimage --size 102400 --stripe-unit 65536B --stripe-count 16
-
-To change an image from one image format to another, export it and then
-import it as the desired image format::
-
- rbd export mypool/myimage@snap /tmp/img
- rbd import --image-format 2 /tmp/img mypool/myimage2
-
-To lock an image for exclusive use::
-
- rbd lock add mypool/myimage mylockid
-
-To release a lock::
-
- rbd lock remove mypool/myimage mylockid client.2485
-
-To list images from trash::
-
- rbd trash ls mypool
-
-To defer delete an image (use *--delay* to set delay-time, default is 0)::
-
- rbd trash mv mypool/myimage
-
-To delete an image from trash (be careful!)::
-
- rbd trash rm mypool/myimage-id
-
-To force delete an image from trash (be careful!)::
-
- rbd trash rm mypool/myimage-id --force
-
-To restore an image from trash::
-
- rbd trash restore mypool/myimage-id
-
-To restore an image from trash and rename it::
-
- rbd trash restore mypool/myimage-id --image mynewimage
-
-
-Availability
-============
-
-**rbd** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
-the Ceph documentation at http://ceph.com/docs for more information.
-
-
-See also
-========
-
-:doc:`ceph <ceph>`\(8),
-:doc:`rados <rados>`\(8)
diff --git a/src/ceph/doc/man/8/rbdmap.rst b/src/ceph/doc/man/8/rbdmap.rst
deleted file mode 100644
index ba8001f..0000000
--- a/src/ceph/doc/man/8/rbdmap.rst
+++ /dev/null
@@ -1,123 +0,0 @@
-:orphan:
-
-=========================================
- rbdmap -- map RBD devices at boot time
-=========================================
-
-.. program:: rbdmap
-
-Synopsis
-========
-
-| **rbdmap map**
-| **rbdmap unmap**
-
-
-Description
-===========
-
-**rbdmap** is a shell script that automates ``rbd map`` and ``rbd unmap``
-operations on one or more RBD (RADOS Block Device) images. While the script can be
-run manually by the system administrator at any time, the principal use case is
-automatic mapping/mounting of RBD images at boot time (and unmounting/unmapping
-at shutdown), as triggered by the init system (a systemd unit file,
-``rbdmap.service`` is included with the ceph-common package for this purpose).
-
-The script takes a single argument, which can be either "map" or "unmap".
-In either case, the script parses a configuration file (defaults to ``/etc/ceph/rbdmap``,
-but can be overridden via an environment variable ``RBDMAPFILE``). Each line
-of the configuration file corresponds to an RBD image which is to be mapped, or
-unmapped.
-
-The configuration file format is::
-
- IMAGESPEC RBDOPTS
-
-where ``IMAGESPEC`` should be specified as ``POOLNAME/IMAGENAME`` (the pool
-name, a forward slash, and the image name), or merely ``IMAGENAME``, in which
-case the ``POOLNAME`` defaults to "rbd". ``RBDOPTS`` is an optional list of
-parameters to be passed to the underlying ``rbd map`` command. These parameters
-and their values should be specified as a comma-separated string::
-
- PARAM1=VAL1,PARAM2=VAL2,...,PARAMN=VALN
-
-This will cause the script to issue an ``rbd map`` command like the following::
-
- rbd map POOLNAME/IMAGENAME --PARAM1 VAL1 --PARAM2 VAL2
-
-(See the ``rbd`` manpage for a full list of possible options.)
-
-When run as ``rbdmap map``, the script parses the configuration file, and for
-each RBD image specified attempts to first map the image (using the ``rbd map``
-command) and, second, to mount the image.
-
-When run as ``rbdmap unmap``, images listed in the configuration file will
-be unmounted and unmapped.
-
-``rbdmap unmap-all`` attempts to unmount and subsequently unmap all currently
-mapped RBD images, regardless of whether or not they are listed in the
-configuration file.
-
-If successful, the ``rbd map`` operation maps the image to a ``/dev/rbdX``
-device, at which point a udev rule is triggered to create a friendly device
-name symlink, ``/dev/rbd/POOLNAME/IMAGENAME``, pointing to the real mapped
-device.
-
-In order for mounting/unmounting to succeed, the friendly device name must
-have a corresponding entry in ``/etc/fstab``.
-
-When writing ``/etc/fstab`` entries for RBD images, it's a good idea to specify
-the "noauto" (or "nofail") mount option. This prevents the init system from
-trying to mount the device too early - before the device in question even
-exists. (Since ``rbdmap.service``
-executes a shell script, it is typically triggered quite late in the boot
-sequence.)
-
-
-Examples
-========
-
-Example ``/etc/ceph/rbdmap`` for two RBD images called "bar1" and "bar2", both
-in pool "foopool"::
-
- foopool/bar1 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
- foopool/bar2 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
-
-Each line in the file contains two strings: the image spec and the options to
-be passed to ``rbd map``. These two lines get transformed into the following
-commands::
-
- rbd map foopool/bar1 --id admin --keyring /etc/ceph/ceph.client.admin.keyring
- rbd map foopool/bar2 --id admin --keyring /etc/ceph/ceph.client.admin.keyring
-
-If the images had XFS filesystems on them, the corresponding ``/etc/fstab``
-entries might look like this::
-
- /dev/rbd/foopool/bar1 /mnt/bar1 xfs noauto 0 0
- /dev/rbd/foopool/bar2 /mnt/bar2 xfs noauto 0 0
-
-After creating the images and populating the ``/etc/ceph/rbdmap`` file, making
-the images get automatically mapped and mounted at boot is just a matter of
-enabling that unit::
-
- systemctl enable rbdmap.service
-
-
-Options
-=======
-
-None
-
-
-Availability
-============
-
-**rbdmap** is part of Ceph, a massively scalable, open-source, distributed
-storage system. Please refer to the Ceph documentation at
-http://ceph.com/docs for more information.
-
-
-See also
-========
-
-:doc:`rbd <rbd>`\(8),
diff --git a/src/ceph/doc/man/CMakeLists.txt b/src/ceph/doc/man/CMakeLists.txt
deleted file mode 100644
index e81631b..0000000
--- a/src/ceph/doc/man/CMakeLists.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-set(sphinx_input)
-set(sphinx_output)
-set(sphinx_output_dir ${CMAKE_BINARY_DIR}/doc/man)
-
-add_subdirectory(8)
-
-add_custom_command(
- OUTPUT ${sphinx_output}
- COMMAND ${SPHINX_BUILD} -b man -t man -d ${CMAKE_BINARY_DIR}/doc/doctrees -c ${CMAKE_SOURCE_DIR}/man ${CMAKE_CURRENT_SOURCE_DIR} ${sphinx_output_dir}
- DEPENDS ${sphinx_input})
-
-add_custom_target(
- manpages ALL
- DEPENDS ${sphinx_output}
- COMMENT "manpages building")