summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/nvdimm/dimm_devs.c
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/nvdimm/dimm_devs.c
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/nvdimm/dimm_devs.c')
-rw-r--r--kernel/drivers/nvdimm/dimm_devs.c548
1 files changed, 548 insertions, 0 deletions
diff --git a/kernel/drivers/nvdimm/dimm_devs.c b/kernel/drivers/nvdimm/dimm_devs.c
new file mode 100644
index 000000000..651b8d19d
--- /dev/null
+++ b/kernel/drivers/nvdimm/dimm_devs.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/vmalloc.h>
+#include <linux/device.h>
+#include <linux/ndctl.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include "nd-core.h"
+#include "label.h"
+#include "nd.h"
+
+static DEFINE_IDA(dimm_ida);
+
+/*
+ * Retrieve bus and dimm handle and return if this bus supports
+ * get_config_data commands
+ */
+static int __validate_dimm(struct nvdimm_drvdata *ndd)
+{
+ struct nvdimm *nvdimm;
+
+ if (!ndd)
+ return -EINVAL;
+
+ nvdimm = to_nvdimm(ndd->dev);
+
+ if (!nvdimm->dsm_mask)
+ return -ENXIO;
+ if (!test_bit(ND_CMD_GET_CONFIG_DATA, nvdimm->dsm_mask))
+ return -ENXIO;
+
+ return 0;
+}
+
+static int validate_dimm(struct nvdimm_drvdata *ndd)
+{
+ int rc = __validate_dimm(ndd);
+
+ if (rc && ndd)
+ dev_dbg(ndd->dev, "%pf: %s error: %d\n",
+ __builtin_return_address(0), __func__, rc);
+ return rc;
+}
+
+/**
+ * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
+ * @nvdimm: dimm to initialize
+ */
+int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
+{
+ struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
+ struct nvdimm_bus_descriptor *nd_desc;
+ int rc = validate_dimm(ndd);
+
+ if (rc)
+ return rc;
+
+ if (cmd->config_size)
+ return 0; /* already valid */
+
+ memset(cmd, 0, sizeof(*cmd));
+ nd_desc = nvdimm_bus->nd_desc;
+ return nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
+ ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd));
+}
+
+int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
+{
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
+ struct nd_cmd_get_config_data_hdr *cmd;
+ struct nvdimm_bus_descriptor *nd_desc;
+ int rc = validate_dimm(ndd);
+ u32 max_cmd_size, config_size;
+ size_t offset;
+
+ if (rc)
+ return rc;
+
+ if (ndd->data)
+ return 0;
+
+ if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0
+ || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) {
+ dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n",
+ ndd->nsarea.max_xfer, ndd->nsarea.config_size);
+ return -ENXIO;
+ }
+
+ ndd->data = kmalloc(ndd->nsarea.config_size, GFP_KERNEL);
+ if (!ndd->data)
+ ndd->data = vmalloc(ndd->nsarea.config_size);
+
+ if (!ndd->data)
+ return -ENOMEM;
+
+ max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer);
+ cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ nd_desc = nvdimm_bus->nd_desc;
+ for (config_size = ndd->nsarea.config_size, offset = 0;
+ config_size; config_size -= cmd->in_length,
+ offset += cmd->in_length) {
+ cmd->in_length = min(config_size, max_cmd_size);
+ cmd->in_offset = offset;
+ rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
+ ND_CMD_GET_CONFIG_DATA, cmd,
+ cmd->in_length + sizeof(*cmd));
+ if (rc || cmd->status) {
+ rc = -ENXIO;
+ break;
+ }
+ memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
+ }
+ dev_dbg(ndd->dev, "%s: len: %zu rc: %d\n", __func__, offset, rc);
+ kfree(cmd);
+
+ return rc;
+}
+
+int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
+ void *buf, size_t len)
+{
+ int rc = validate_dimm(ndd);
+ size_t max_cmd_size, buf_offset;
+ struct nd_cmd_set_config_hdr *cmd;
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+
+ if (rc)
+ return rc;
+
+ if (!ndd->data)
+ return -ENXIO;
+
+ if (offset + len > ndd->nsarea.config_size)
+ return -ENXIO;
+
+ max_cmd_size = min_t(u32, PAGE_SIZE, len);
+ max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer);
+ cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ for (buf_offset = 0; len; len -= cmd->in_length,
+ buf_offset += cmd->in_length) {
+ size_t cmd_size;
+ u32 *status;
+
+ cmd->in_offset = offset + buf_offset;
+ cmd->in_length = min(max_cmd_size, len);
+ memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
+
+ /* status is output in the last 4-bytes of the command buffer */
+ cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
+ status = ((void *) cmd) + cmd_size - sizeof(u32);
+
+ rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
+ ND_CMD_SET_CONFIG_DATA, cmd, cmd_size);
+ if (rc || *status) {
+ rc = rc ? rc : -ENXIO;
+ break;
+ }
+ }
+ kfree(cmd);
+
+ return rc;
+}
+
+static void nvdimm_release(struct device *dev)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+
+ ida_simple_remove(&dimm_ida, nvdimm->id);
+ kfree(nvdimm);
+}
+
+static struct device_type nvdimm_device_type = {
+ .name = "nvdimm",
+ .release = nvdimm_release,
+};
+
+bool is_nvdimm(struct device *dev)
+{
+ return dev->type == &nvdimm_device_type;
+}
+
+struct nvdimm *to_nvdimm(struct device *dev)
+{
+ struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
+
+ WARN_ON(!is_nvdimm(dev));
+ return nvdimm;
+}
+EXPORT_SYMBOL_GPL(to_nvdimm);
+
+struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
+{
+ struct nd_region *nd_region = &ndbr->nd_region;
+ struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+
+ return nd_mapping->nvdimm;
+}
+EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
+
+struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
+{
+ struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+ WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
+
+ return dev_get_drvdata(&nvdimm->dev);
+}
+EXPORT_SYMBOL(to_ndd);
+
+void nvdimm_drvdata_release(struct kref *kref)
+{
+ struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
+ struct device *dev = ndd->dev;
+ struct resource *res, *_r;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ nvdimm_bus_lock(dev);
+ for_each_dpa_resource_safe(ndd, res, _r)
+ nvdimm_free_dpa(ndd, res);
+ nvdimm_bus_unlock(dev);
+
+ kvfree(ndd->data);
+ kfree(ndd);
+ put_device(dev);
+}
+
+void get_ndd(struct nvdimm_drvdata *ndd)
+{
+ kref_get(&ndd->kref);
+}
+
+void put_ndd(struct nvdimm_drvdata *ndd)
+{
+ if (ndd)
+ kref_put(&ndd->kref, nvdimm_drvdata_release);
+}
+
+const char *nvdimm_name(struct nvdimm *nvdimm)
+{
+ return dev_name(&nvdimm->dev);
+}
+EXPORT_SYMBOL_GPL(nvdimm_name);
+
+void *nvdimm_provider_data(struct nvdimm *nvdimm)
+{
+ if (nvdimm)
+ return nvdimm->provider_data;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(nvdimm_provider_data);
+
+static ssize_t commands_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ int cmd, len = 0;
+
+ if (!nvdimm->dsm_mask)
+ return sprintf(buf, "\n");
+
+ for_each_set_bit(cmd, nvdimm->dsm_mask, BITS_PER_LONG)
+ len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
+ len += sprintf(buf + len, "\n");
+ return len;
+}
+static DEVICE_ATTR_RO(commands);
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+
+ /*
+ * The state may be in the process of changing, userspace should
+ * quiesce probing if it wants a static answer
+ */
+ nvdimm_bus_lock(dev);
+ nvdimm_bus_unlock(dev);
+ return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
+ ? "active" : "idle");
+}
+static DEVICE_ATTR_RO(state);
+
+static ssize_t available_slots_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
+ ssize_t rc;
+ u32 nfree;
+
+ if (!ndd)
+ return -ENXIO;
+
+ nvdimm_bus_lock(dev);
+ nfree = nd_label_nfree(ndd);
+ if (nfree - 1 > nfree) {
+ dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
+ nfree = 0;
+ } else
+ nfree--;
+ rc = sprintf(buf, "%d\n", nfree);
+ nvdimm_bus_unlock(dev);
+ return rc;
+}
+static DEVICE_ATTR_RO(available_slots);
+
+static struct attribute *nvdimm_attributes[] = {
+ &dev_attr_state.attr,
+ &dev_attr_commands.attr,
+ &dev_attr_available_slots.attr,
+ NULL,
+};
+
+struct attribute_group nvdimm_attribute_group = {
+ .attrs = nvdimm_attributes,
+};
+EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
+
+struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
+ const struct attribute_group **groups, unsigned long flags,
+ unsigned long *dsm_mask)
+{
+ struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
+ struct device *dev;
+
+ if (!nvdimm)
+ return NULL;
+
+ nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
+ if (nvdimm->id < 0) {
+ kfree(nvdimm);
+ return NULL;
+ }
+ nvdimm->provider_data = provider_data;
+ nvdimm->flags = flags;
+ nvdimm->dsm_mask = dsm_mask;
+ atomic_set(&nvdimm->busy, 0);
+ dev = &nvdimm->dev;
+ dev_set_name(dev, "nmem%d", nvdimm->id);
+ dev->parent = &nvdimm_bus->dev;
+ dev->type = &nvdimm_device_type;
+ dev->devt = MKDEV(nvdimm_major, nvdimm->id);
+ dev->groups = groups;
+ nd_device_register(dev);
+
+ return nvdimm;
+}
+EXPORT_SYMBOL_GPL(nvdimm_create);
+
+/**
+ * nd_blk_available_dpa - account the unused dpa of BLK region
+ * @nd_mapping: container of dpa-resource-root + labels
+ *
+ * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges.
+ */
+resource_size_t nd_blk_available_dpa(struct nd_mapping *nd_mapping)
+{
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ resource_size_t map_end, busy = 0, available;
+ struct resource *res;
+
+ if (!ndd)
+ return 0;
+
+ map_end = nd_mapping->start + nd_mapping->size - 1;
+ for_each_dpa_resource(ndd, res)
+ if (res->start >= nd_mapping->start && res->start < map_end) {
+ resource_size_t end = min(map_end, res->end);
+
+ busy += end - res->start + 1;
+ } else if (res->end >= nd_mapping->start
+ && res->end <= map_end) {
+ busy += res->end - nd_mapping->start;
+ } else if (nd_mapping->start > res->start
+ && nd_mapping->start < res->end) {
+ /* total eclipse of the BLK region mapping */
+ busy += nd_mapping->size;
+ }
+
+ available = map_end - nd_mapping->start + 1;
+ if (busy < available)
+ return available - busy;
+ return 0;
+}
+
+/**
+ * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
+ * @nd_mapping: container of dpa-resource-root + labels
+ * @nd_region: constrain available space check to this reference region
+ * @overlap: calculate available space assuming this level of overlap
+ *
+ * Validate that a PMEM label, if present, aligns with the start of an
+ * interleave set and truncate the available size at the lowest BLK
+ * overlap point.
+ *
+ * The expectation is that this routine is called multiple times as it
+ * probes for the largest BLK encroachment for any single member DIMM of
+ * the interleave set. Once that value is determined the PMEM-limit for
+ * the set can be established.
+ */
+resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
+ struct nd_mapping *nd_mapping, resource_size_t *overlap)
+{
+ resource_size_t map_start, map_end, busy = 0, available, blk_start;
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ struct resource *res;
+ const char *reason;
+
+ if (!ndd)
+ return 0;
+
+ map_start = nd_mapping->start;
+ map_end = map_start + nd_mapping->size - 1;
+ blk_start = max(map_start, map_end + 1 - *overlap);
+ for_each_dpa_resource(ndd, res)
+ if (res->start >= map_start && res->start < map_end) {
+ if (strncmp(res->name, "blk", 3) == 0)
+ blk_start = min(blk_start, res->start);
+ else if (res->start != map_start) {
+ reason = "misaligned to iset";
+ goto err;
+ } else {
+ if (busy) {
+ reason = "duplicate overlapping PMEM reservations?";
+ goto err;
+ }
+ busy += resource_size(res);
+ continue;
+ }
+ } else if (res->end >= map_start && res->end <= map_end) {
+ if (strncmp(res->name, "blk", 3) == 0) {
+ /*
+ * If a BLK allocation overlaps the start of
+ * PMEM the entire interleave set may now only
+ * be used for BLK.
+ */
+ blk_start = map_start;
+ } else {
+ reason = "misaligned to iset";
+ goto err;
+ }
+ } else if (map_start > res->start && map_start < res->end) {
+ /* total eclipse of the mapping */
+ busy += nd_mapping->size;
+ blk_start = map_start;
+ }
+
+ *overlap = map_end + 1 - blk_start;
+ available = blk_start - map_start;
+ if (busy < available)
+ return available - busy;
+ return 0;
+
+ err:
+ /*
+ * Something is wrong, PMEM must align with the start of the
+ * interleave set, and there can only be one allocation per set.
+ */
+ nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
+ return 0;
+}
+
+void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
+{
+ WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
+ kfree(res->name);
+ __release_region(&ndd->dpa, res->start, resource_size(res));
+}
+
+struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
+ struct nd_label_id *label_id, resource_size_t start,
+ resource_size_t n)
+{
+ char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
+ struct resource *res;
+
+ if (!name)
+ return NULL;
+
+ WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
+ res = __request_region(&ndd->dpa, start, n, name, 0);
+ if (!res)
+ kfree(name);
+ return res;
+}
+
+/**
+ * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
+ * @nvdimm: container of dpa-resource-root + labels
+ * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
+ */
+resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
+ struct nd_label_id *label_id)
+{
+ resource_size_t allocated = 0;
+ struct resource *res;
+
+ for_each_dpa_resource(ndd, res)
+ if (strcmp(res->name, label_id->id) == 0)
+ allocated += resource_size(res);
+
+ return allocated;
+}
+
+static int count_dimms(struct device *dev, void *c)
+{
+ int *count = c;
+
+ if (is_nvdimm(dev))
+ (*count)++;
+ return 0;
+}
+
+int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
+{
+ int count = 0;
+ /* Flush any possible dimm registration failures */
+ nd_synchronize();
+
+ device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
+ dev_dbg(&nvdimm_bus->dev, "%s: count: %d\n", __func__, count);
+ if (count != dimm_count)
+ return -ENXIO;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);