summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/base')
-rw-r--r--kernel/drivers/base/Makefile1
-rw-r--r--kernel/drivers/base/base.h4
-rw-r--r--kernel/drivers/base/bus.c31
-rw-r--r--kernel/drivers/base/cacheinfo.c14
-rw-r--r--kernel/drivers/base/class.c2
-rw-r--r--kernel/drivers/base/core.c106
-rw-r--r--kernel/drivers/base/cpu.c31
-rw-r--r--kernel/drivers/base/dd.c193
-rw-r--r--kernel/drivers/base/devres.c19
-rw-r--r--kernel/drivers/base/dma-contiguous.c4
-rw-r--r--kernel/drivers/base/firmware_class.c63
-rw-r--r--kernel/drivers/base/memory.c4
-rw-r--r--kernel/drivers/base/node.c6
-rw-r--r--kernel/drivers/base/pinctrl.c15
-rw-r--r--kernel/drivers/base/platform-msi.c270
-rw-r--r--kernel/drivers/base/platform.c102
-rw-r--r--kernel/drivers/base/power/Makefile4
-rw-r--r--kernel/drivers/base/power/clock_ops.c53
-rw-r--r--kernel/drivers/base/power/domain.c830
-rw-r--r--kernel/drivers/base/power/domain_governor.c23
-rw-r--r--kernel/drivers/base/power/generic_ops.c23
-rw-r--r--kernel/drivers/base/power/main.c16
-rw-r--r--kernel/drivers/base/power/opp/Makefile2
-rw-r--r--kernel/drivers/base/power/opp/core.c (renamed from kernel/drivers/base/power/opp.c)941
-rw-r--r--kernel/drivers/base/power/opp/cpu.c270
-rw-r--r--kernel/drivers/base/power/opp/opp.h146
-rw-r--r--kernel/drivers/base/power/power.h50
-rw-r--r--kernel/drivers/base/power/qos.c37
-rw-r--r--kernel/drivers/base/power/runtime.c6
-rw-r--r--kernel/drivers/base/power/sysfs.c11
-rw-r--r--kernel/drivers/base/power/wakeirq.c277
-rw-r--r--kernel/drivers/base/power/wakeup.c151
-rw-r--r--kernel/drivers/base/property.c239
-rw-r--r--kernel/drivers/base/regmap/internal.h24
-rw-r--r--kernel/drivers/base/regmap/regcache-lzo.c4
-rw-r--r--kernel/drivers/base/regmap/regcache.c71
-rw-r--r--kernel/drivers/base/regmap/regmap-ac97.c41
-rw-r--r--kernel/drivers/base/regmap/regmap-debugfs.c127
-rw-r--r--kernel/drivers/base/regmap/regmap-i2c.c90
-rw-r--r--kernel/drivers/base/regmap/regmap-irq.c58
-rw-r--r--kernel/drivers/base/regmap/regmap-mmio.c52
-rw-r--r--kernel/drivers/base/regmap/regmap-spi.c41
-rw-r--r--kernel/drivers/base/regmap/regmap-spmi.c78
-rw-r--r--kernel/drivers/base/regmap/regmap.c434
-rw-r--r--kernel/drivers/base/soc.c21
-rw-r--r--kernel/drivers/base/topology.c2
46 files changed, 3584 insertions, 1403 deletions
diff --git a/kernel/drivers/base/Makefile b/kernel/drivers/base/Makefile
index 527d29170..6b2a84e7f 100644
--- a/kernel/drivers/base/Makefile
+++ b/kernel/drivers/base/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_REGMAP) += regmap/
obj-$(CONFIG_SOC_BUS) += soc.o
obj-$(CONFIG_PINCTRL) += pinctrl.o
obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
+obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/kernel/drivers/base/base.h b/kernel/drivers/base/base.h
index 251c5d30f..1782f3aa3 100644
--- a/kernel/drivers/base/base.h
+++ b/kernel/drivers/base/base.h
@@ -63,7 +63,7 @@ struct driver_private {
* binding of drivers which were unable to get all the resources needed by
* the device; typically because it depends on another driver getting
* probed first.
- * @device - pointer back to the struct class that this structure is
+ * @device - pointer back to the struct device that this structure is
* associated with.
*
* Nothing outside of the driver core should ever touch these fields.
@@ -116,6 +116,7 @@ static inline int driver_match_device(struct device_driver *drv,
{
return drv->bus->match ? drv->bus->match(dev, drv) : 1;
}
+extern bool driver_allows_async_probing(struct device_driver *drv);
extern int driver_add_groups(struct device_driver *drv,
const struct attribute_group **groups);
@@ -133,6 +134,7 @@ extern int devres_release_all(struct device *dev);
/* /sys/devices directory */
extern struct kset *devices_kset;
+extern void devices_kset_move_last(struct device *dev);
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
extern void module_add_driver(struct module *mod, struct device_driver *drv);
diff --git a/kernel/drivers/base/bus.c b/kernel/drivers/base/bus.c
index 79bc203f5..500592486 100644
--- a/kernel/drivers/base/bus.c
+++ b/kernel/drivers/base/bus.c
@@ -10,6 +10,7 @@
*
*/
+#include <linux/async.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/errno.h>
@@ -549,15 +550,12 @@ void bus_probe_device(struct device *dev)
{
struct bus_type *bus = dev->bus;
struct subsys_interface *sif;
- int ret;
if (!bus)
return;
- if (bus->p->drivers_autoprobe) {
- ret = device_attach(dev);
- WARN_ON(ret < 0);
- }
+ if (bus->p->drivers_autoprobe)
+ device_initial_probe(dev);
mutex_lock(&bus->p->mutex);
list_for_each_entry(sif, &bus->p->interfaces, node)
@@ -659,6 +657,17 @@ static ssize_t uevent_store(struct device_driver *drv, const char *buf,
}
static DRIVER_ATTR_WO(uevent);
+static void driver_attach_async(void *_drv, async_cookie_t cookie)
+{
+ struct device_driver *drv = _drv;
+ int ret;
+
+ ret = driver_attach(drv);
+
+ pr_debug("bus: '%s': driver %s async attach completed: %d\n",
+ drv->bus->name, drv->name, ret);
+}
+
/**
* bus_add_driver - Add a driver to the bus.
* @drv: driver.
@@ -691,9 +700,15 @@ int bus_add_driver(struct device_driver *drv)
klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
if (drv->bus->p->drivers_autoprobe) {
- error = driver_attach(drv);
- if (error)
- goto out_unregister;
+ if (driver_allows_async_probing(drv)) {
+ pr_debug("bus: '%s': probing driver %s asynchronously\n",
+ drv->bus->name, drv->name);
+ async_schedule(driver_attach_async, drv);
+ } else {
+ error = driver_attach(drv);
+ if (error)
+ goto out_unregister;
+ }
}
module_add_driver(drv->owner, drv);
diff --git a/kernel/drivers/base/cacheinfo.c b/kernel/drivers/base/cacheinfo.c
index df0c66cb7..e9fd32e91 100644
--- a/kernel/drivers/base/cacheinfo.c
+++ b/kernel/drivers/base/cacheinfo.c
@@ -148,7 +148,11 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
if (sibling == cpu) /* skip itself */
continue;
+
sib_cpu_ci = get_cpu_cacheinfo(sibling);
+ if (!sib_cpu_ci->info_list)
+ continue;
+
sib_leaf = sib_cpu_ci->info_list + index;
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
@@ -159,6 +163,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
static void free_cache_attributes(unsigned int cpu)
{
+ if (!per_cpu_cacheinfo(cpu))
+ return;
+
cache_shared_cpu_map_remove(cpu);
kfree(per_cpu_cacheinfo(cpu));
@@ -191,12 +198,12 @@ static int detect_cache_attributes(unsigned int cpu)
if (ret)
goto free_ci;
/*
- * For systems using DT for cache hierarcy, of_node and shared_cpu_map
+ * For systems using DT for cache hierarchy, of_node and shared_cpu_map
* will be set up here only if they are not populated already
*/
ret = cache_shared_cpu_map_setup(cpu);
if (ret) {
- pr_warn("Unable to detect cache hierarcy from DT for CPU %d\n",
+ pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
cpu);
goto free_ci;
}
@@ -514,8 +521,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
break;
case CPU_DEAD:
cache_remove_dev(cpu);
- if (per_cpu_cacheinfo(cpu))
- free_cache_attributes(cpu);
+ free_cache_attributes(cpu);
break;
}
return notifier_from_errno(rc);
diff --git a/kernel/drivers/base/class.c b/kernel/drivers/base/class.c
index 6e810881e..71059e32b 100644
--- a/kernel/drivers/base/class.c
+++ b/kernel/drivers/base/class.c
@@ -406,7 +406,7 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
*
* Note, you will need to drop the reference with put_device() after use.
*
- * @fn is allowed to do anything including calling back into class
+ * @match is allowed to do anything including calling back into class
* code. There's no locking restriction.
*/
struct device *class_find_device(struct class *class, struct device *start,
diff --git a/kernel/drivers/base/core.c b/kernel/drivers/base/core.c
index 21d130385..b7d56c5ea 100644
--- a/kernel/drivers/base/core.c
+++ b/kernel/drivers/base/core.c
@@ -534,6 +534,52 @@ static DEVICE_ATTR_RO(dev);
struct kset *devices_kset;
/**
+ * devices_kset_move_before - Move device in the devices_kset's list.
+ * @deva: Device to move.
+ * @devb: Device @deva should come before.
+ */
+static void devices_kset_move_before(struct device *deva, struct device *devb)
+{
+ if (!devices_kset)
+ return;
+ pr_debug("devices_kset: Moving %s before %s\n",
+ dev_name(deva), dev_name(devb));
+ spin_lock(&devices_kset->list_lock);
+ list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
+ spin_unlock(&devices_kset->list_lock);
+}
+
+/**
+ * devices_kset_move_after - Move device in the devices_kset's list.
+ * @deva: Device to move
+ * @devb: Device @deva should come after.
+ */
+static void devices_kset_move_after(struct device *deva, struct device *devb)
+{
+ if (!devices_kset)
+ return;
+ pr_debug("devices_kset: Moving %s after %s\n",
+ dev_name(deva), dev_name(devb));
+ spin_lock(&devices_kset->list_lock);
+ list_move(&deva->kobj.entry, &devb->kobj.entry);
+ spin_unlock(&devices_kset->list_lock);
+}
+
+/**
+ * devices_kset_move_last - move the device to the end of devices_kset's list.
+ * @dev: device to move
+ */
+void devices_kset_move_last(struct device *dev)
+{
+ if (!devices_kset)
+ return;
+ pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
+ spin_lock(&devices_kset->list_lock);
+ list_move_tail(&dev->kobj.entry, &devices_kset->list);
+ spin_unlock(&devices_kset->list_lock);
+}
+
+/**
* device_create_file - create sysfs attribute file for device.
* @dev: device.
* @attr: device attribute descriptor.
@@ -662,6 +708,9 @@ void device_initialize(struct device *dev)
INIT_LIST_HEAD(&dev->devres_head);
device_pm_init(dev);
set_dev_node(dev, -1);
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ INIT_LIST_HEAD(&dev->msi_list);
+#endif
}
EXPORT_SYMBOL_GPL(device_initialize);
@@ -1017,7 +1066,7 @@ int device_add(struct device *dev)
dev->kobj.parent = kobj;
/* use parent numa_node */
- if (parent)
+ if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
set_dev_node(dev, dev_to_node(parent));
/* first, register with generic layer. */
@@ -1252,6 +1301,19 @@ void device_unregister(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_unregister);
+static struct device *prev_device(struct klist_iter *i)
+{
+ struct klist_node *n = klist_prev(i);
+ struct device *dev = NULL;
+ struct device_private *p;
+
+ if (n) {
+ p = to_device_private_parent(n);
+ dev = p->device;
+ }
+ return dev;
+}
+
static struct device *next_device(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
@@ -1303,12 +1365,11 @@ const char *device_get_devnode(struct device *dev,
return dev_name(dev);
/* replace '!' in the name with '/' */
- *tmp = kstrdup(dev_name(dev), GFP_KERNEL);
- if (!*tmp)
+ s = kstrdup(dev_name(dev), GFP_KERNEL);
+ if (!s)
return NULL;
- while ((s = strchr(*tmp, '!')))
- s[0] = '/';
- return *tmp;
+ strreplace(s, '!', '/');
+ return *tmp = s;
}
/**
@@ -1342,6 +1403,36 @@ int device_for_each_child(struct device *parent, void *data,
EXPORT_SYMBOL_GPL(device_for_each_child);
/**
+ * device_for_each_child_reverse - device child iterator in reversed order.
+ * @parent: parent struct device.
+ * @fn: function to be called for each device.
+ * @data: data for the callback.
+ *
+ * Iterate over @parent's child devices, and call @fn for each,
+ * passing it @data.
+ *
+ * We check the return of @fn each time. If it returns anything
+ * other than 0, we break out and return that value.
+ */
+int device_for_each_child_reverse(struct device *parent, void *data,
+ int (*fn)(struct device *dev, void *data))
+{
+ struct klist_iter i;
+ struct device *child;
+ int error = 0;
+
+ if (!parent->p)
+ return 0;
+
+ klist_iter_init(&parent->p->klist_children, &i);
+ while ((child = prev_device(&i)) && !error)
+ error = fn(child, data);
+ klist_iter_exit(&i);
+ return error;
+}
+EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
+
+/**
* device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
* @match: Callback function to check device
@@ -1924,12 +2015,15 @@ int device_move(struct device *dev, struct device *new_parent,
break;
case DPM_ORDER_DEV_AFTER_PARENT:
device_pm_move_after(dev, new_parent);
+ devices_kset_move_after(dev, new_parent);
break;
case DPM_ORDER_PARENT_BEFORE_DEV:
device_pm_move_before(new_parent, dev);
+ devices_kset_move_before(new_parent, dev);
break;
case DPM_ORDER_DEV_LAST:
device_pm_move_last(dev);
+ devices_kset_move_last(dev);
break;
}
diff --git a/kernel/drivers/base/cpu.c b/kernel/drivers/base/cpu.c
index f160ea44a..91bbb1959 100644
--- a/kernel/drivers/base/cpu.c
+++ b/kernel/drivers/base/cpu.c
@@ -16,6 +16,7 @@
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/cpufeature.h>
+#include <linux/tick.h>
#include "base.h"
@@ -40,7 +41,7 @@ static void change_cpu_under_node(struct cpu *cpu,
cpu->node_id = to_nid;
}
-static int __ref cpu_subsys_online(struct device *dev)
+static int cpu_subsys_online(struct device *dev)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
int cpuid = dev->id;
@@ -265,6 +266,30 @@ static ssize_t print_cpus_offline(struct device *dev,
}
static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
+static ssize_t print_cpus_isolated(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int n = 0, len = PAGE_SIZE-2;
+
+ n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(cpu_isolated_map));
+
+ return n;
+}
+static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
+
+#ifdef CONFIG_NO_HZ_FULL
+static ssize_t print_cpus_nohz_full(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int n = 0, len = PAGE_SIZE-2;
+
+ n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
+
+ return n;
+}
+static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
+#endif
+
static void cpu_device_release(struct device *dev)
{
/*
@@ -431,6 +456,10 @@ static struct attribute *cpu_root_attrs[] = {
&cpu_attrs[2].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
+ &dev_attr_isolated.attr,
+#ifdef CONFIG_NO_HZ_FULL
+ &dev_attr_nohz_full.attr,
+#endif
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
&dev_attr_modalias.attr,
#endif
diff --git a/kernel/drivers/base/dd.c b/kernel/drivers/base/dd.c
index e843fdbe4..a641cf3cc 100644
--- a/kernel/drivers/base/dd.c
+++ b/kernel/drivers/base/dd.c
@@ -141,7 +141,7 @@ static bool driver_deferred_probe_enable = false;
* more than one device is probing at the same time, it is possible for one
* probe to complete successfully while another is about to defer. If the second
* depends on the first, then it will get put on the pending list after the
- * trigger event has already occured and will be stuck there.
+ * trigger event has already occurred and will be stuck there.
*
* The atomic 'deferred_trigger_count' is used to determine if a successful
* trigger has occurred in the midst of probing a driver. If the trigger count
@@ -304,6 +304,14 @@ static int really_probe(struct device *dev, struct device_driver *drv)
goto probe_failed;
}
+ /*
+ * Ensure devices are listed in devices_kset in correct order
+ * It's important to move Dev to the end of devices_kset before
+ * calling .probe, because it could be recursive and parent Dev
+ * should always go first
+ */
+ devices_kset_move_last(dev);
+
if (dev->bus->probe) {
ret = dev->bus->probe(dev);
if (ret)
@@ -314,6 +322,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
goto probe_failed;
}
+ pinctrl_init_done(dev);
+
if (dev->pm_domain && dev->pm_domain->sync)
dev->pm_domain->sync(dev);
@@ -399,6 +409,8 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe);
*
* This function must be called with @dev lock held. When called for a
* USB interface, @dev->parent lock must be held as well.
+ *
+ * If the device has a parent, runtime-resume the parent before driver probing.
*/
int driver_probe_device(struct device_driver *drv, struct device *dev)
{
@@ -410,38 +422,126 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
+ if (dev->parent)
+ pm_runtime_get_sync(dev->parent);
+
pm_runtime_barrier(dev);
ret = really_probe(dev, drv);
pm_request_idle(dev);
+ if (dev->parent)
+ pm_runtime_put(dev->parent);
+
return ret;
}
-static int __device_attach(struct device_driver *drv, void *data)
+bool driver_allows_async_probing(struct device_driver *drv)
{
- struct device *dev = data;
+ switch (drv->probe_type) {
+ case PROBE_PREFER_ASYNCHRONOUS:
+ return true;
+
+ case PROBE_FORCE_SYNCHRONOUS:
+ return false;
+
+ default:
+ if (module_requested_async_probing(drv->owner))
+ return true;
+
+ return false;
+ }
+}
+
+struct device_attach_data {
+ struct device *dev;
+
+ /*
+ * Indicates whether we are are considering asynchronous probing or
+ * not. Only initial binding after device or driver registration
+ * (including deferral processing) may be done asynchronously, the
+ * rest is always synchronous, as we expect it is being done by
+ * request from userspace.
+ */
+ bool check_async;
+
+ /*
+ * Indicates if we are binding synchronous or asynchronous drivers.
+ * When asynchronous probing is enabled we'll execute 2 passes
+ * over drivers: first pass doing synchronous probing and second
+ * doing asynchronous probing (if synchronous did not succeed -
+ * most likely because there was no driver requiring synchronous
+ * probing - and we found asynchronous driver during first pass).
+ * The 2 passes are done because we can't shoot asynchronous
+ * probe for given device and driver from bus_for_each_drv() since
+ * driver pointer is not guaranteed to stay valid once
+ * bus_for_each_drv() iterates to the next driver on the bus.
+ */
+ bool want_async;
+
+ /*
+ * We'll set have_async to 'true' if, while scanning for matching
+ * driver, we'll encounter one that requests asynchronous probing.
+ */
+ bool have_async;
+};
+
+static int __device_attach_driver(struct device_driver *drv, void *_data)
+{
+ struct device_attach_data *data = _data;
+ struct device *dev = data->dev;
+ bool async_allowed;
+
+ /*
+ * Check if device has already been claimed. This may
+ * happen with driver loading, device discovery/registration,
+ * and deferred probe processing happens all at once with
+ * multiple threads.
+ */
+ if (dev->driver)
+ return -EBUSY;
if (!driver_match_device(drv, dev))
return 0;
+ async_allowed = driver_allows_async_probing(drv);
+
+ if (async_allowed)
+ data->have_async = true;
+
+ if (data->check_async && async_allowed != data->want_async)
+ return 0;
+
return driver_probe_device(drv, dev);
}
-/**
- * device_attach - try to attach device to a driver.
- * @dev: device.
- *
- * Walk the list of drivers that the bus has and call
- * driver_probe_device() for each pair. If a compatible
- * pair is found, break out and return.
- *
- * Returns 1 if the device was bound to a driver;
- * 0 if no matching driver was found;
- * -ENODEV if the device is not registered.
- *
- * When called for a USB interface, @dev->parent lock must be held.
- */
-int device_attach(struct device *dev)
+static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
+{
+ struct device *dev = _dev;
+ struct device_attach_data data = {
+ .dev = dev,
+ .check_async = true,
+ .want_async = true,
+ };
+
+ device_lock(dev);
+
+ if (dev->parent)
+ pm_runtime_get_sync(dev->parent);
+
+ bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
+ dev_dbg(dev, "async probe completed\n");
+
+ pm_request_idle(dev);
+
+ if (dev->parent)
+ pm_runtime_put(dev->parent);
+
+ device_unlock(dev);
+
+ put_device(dev);
+}
+
+static int __device_attach(struct device *dev, bool allow_async)
{
int ret = 0;
@@ -459,15 +559,65 @@ int device_attach(struct device *dev)
ret = 0;
}
} else {
- ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach);
- pm_request_idle(dev);
+ struct device_attach_data data = {
+ .dev = dev,
+ .check_async = allow_async,
+ .want_async = false,
+ };
+
+ if (dev->parent)
+ pm_runtime_get_sync(dev->parent);
+
+ ret = bus_for_each_drv(dev->bus, NULL, &data,
+ __device_attach_driver);
+ if (!ret && allow_async && data.have_async) {
+ /*
+ * If we could not find appropriate driver
+ * synchronously and we are allowed to do
+ * async probes and there are drivers that
+ * want to probe asynchronously, we'll
+ * try them.
+ */
+ dev_dbg(dev, "scheduling asynchronous probe\n");
+ get_device(dev);
+ async_schedule(__device_attach_async_helper, dev);
+ } else {
+ pm_request_idle(dev);
+ }
+
+ if (dev->parent)
+ pm_runtime_put(dev->parent);
}
out_unlock:
device_unlock(dev);
return ret;
}
+
+/**
+ * device_attach - try to attach device to a driver.
+ * @dev: device.
+ *
+ * Walk the list of drivers that the bus has and call
+ * driver_probe_device() for each pair. If a compatible
+ * pair is found, break out and return.
+ *
+ * Returns 1 if the device was bound to a driver;
+ * 0 if no matching driver was found;
+ * -ENODEV if the device is not registered.
+ *
+ * When called for a USB interface, @dev->parent lock must be held.
+ */
+int device_attach(struct device *dev)
+{
+ return __device_attach(dev, false);
+}
EXPORT_SYMBOL_GPL(device_attach);
+void device_initial_probe(struct device *dev)
+{
+ __device_attach(dev, true);
+}
+
static int __driver_attach(struct device *dev, void *data)
{
struct device_driver *drv = data;
@@ -522,6 +672,9 @@ static void __device_release_driver(struct device *dev)
drv = dev->driver;
if (drv) {
+ if (driver_allows_async_probing(drv))
+ async_synchronize_full();
+
pm_runtime_get_sync(dev);
driver_sysfs_remove(dev);
diff --git a/kernel/drivers/base/devres.c b/kernel/drivers/base/devres.c
index 875464690..8fc654f08 100644
--- a/kernel/drivers/base/devres.c
+++ b/kernel/drivers/base/devres.c
@@ -82,12 +82,12 @@ static struct devres_group * node_to_group(struct devres_node *node)
}
static __always_inline struct devres * alloc_dr(dr_release_t release,
- size_t size, gfp_t gfp)
+ size_t size, gfp_t gfp, int nid)
{
size_t tot_size = sizeof(struct devres) + size;
struct devres *dr;
- dr = kmalloc_track_caller(tot_size, gfp);
+ dr = kmalloc_node_track_caller(tot_size, gfp, nid);
if (unlikely(!dr))
return NULL;
@@ -106,24 +106,25 @@ static void add_dr(struct device *dev, struct devres_node *node)
}
#ifdef CONFIG_DEBUG_DEVRES
-void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
+void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
const char *name)
{
struct devres *dr;
- dr = alloc_dr(release, size, gfp | __GFP_ZERO);
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
if (unlikely(!dr))
return NULL;
set_node_dbginfo(&dr->node, name, size);
return dr->data;
}
-EXPORT_SYMBOL_GPL(__devres_alloc);
+EXPORT_SYMBOL_GPL(__devres_alloc_node);
#else
/**
* devres_alloc - Allocate device resource data
* @release: Release function devres will be associated with
* @size: Allocation size
* @gfp: Allocation flags
+ * @nid: NUMA node
*
* Allocate devres of @size bytes. The allocated area is zeroed, then
* associated with @release. The returned pointer can be passed to
@@ -132,16 +133,16 @@ EXPORT_SYMBOL_GPL(__devres_alloc);
* RETURNS:
* Pointer to allocated devres on success, NULL on failure.
*/
-void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
+void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid)
{
struct devres *dr;
- dr = alloc_dr(release, size, gfp | __GFP_ZERO);
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
if (unlikely(!dr))
return NULL;
return dr->data;
}
-EXPORT_SYMBOL_GPL(devres_alloc);
+EXPORT_SYMBOL_GPL(devres_alloc_node);
#endif
/**
@@ -776,7 +777,7 @@ void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
struct devres *dr;
/* use raw alloc_dr for kmalloc caller tracing */
- dr = alloc_dr(devm_kmalloc_release, size, gfp);
+ dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
if (unlikely(!dr))
return NULL;
diff --git a/kernel/drivers/base/dma-contiguous.c b/kernel/drivers/base/dma-contiguous.c
index 950fff9ce..e167a1e1b 100644
--- a/kernel/drivers/base/dma-contiguous.c
+++ b/kernel/drivers/base/dma-contiguous.c
@@ -46,7 +46,7 @@ struct cma *dma_contiguous_default_area;
* Users, who want to set the size of global CMA area for their system
* should use cma= kernel parameter.
*/
-static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
static phys_addr_t size_cmdline = -1;
static phys_addr_t base_cmdline;
static phys_addr_t limit_cmdline;
@@ -187,7 +187,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
* global one. Requires architecture specific dev_get_cma_area() helper
* function.
*/
-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
unsigned int align)
{
if (align > CONFIG_CMA_ALIGNMENT)
diff --git a/kernel/drivers/base/firmware_class.c b/kernel/drivers/base/firmware_class.c
index 4d1d9de4f..8524450e7 100644
--- a/kernel/drivers/base/firmware_class.c
+++ b/kernel/drivers/base/firmware_class.c
@@ -150,17 +150,17 @@ struct firmware_buf {
int page_array_size;
struct list_head pending_list;
#endif
- char fw_id[];
+ const char *fw_id;
};
struct fw_cache_entry {
struct list_head list;
- char name[];
+ const char *name;
};
struct fw_name_devm {
unsigned long magic;
- char name[];
+ const char *name;
};
#define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
@@ -181,13 +181,17 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
{
struct firmware_buf *buf;
- buf = kzalloc(sizeof(*buf) + strlen(fw_name) + 1, GFP_ATOMIC);
-
+ buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
if (!buf)
- return buf;
+ return NULL;
+
+ buf->fw_id = kstrdup_const(fw_name, GFP_ATOMIC);
+ if (!buf->fw_id) {
+ kfree(buf);
+ return NULL;
+ }
kref_init(&buf->ref);
- strcpy(buf->fw_id, fw_name);
buf->fwc = fwc;
init_completion(&buf->completion);
#ifdef CONFIG_FW_LOADER_USER_HELPER
@@ -257,6 +261,7 @@ static void __fw_free_buf(struct kref *ref)
} else
#endif
vfree(buf->data);
+ kfree_const(buf->fw_id);
kfree(buf);
}
@@ -320,9 +325,13 @@ fail:
static int fw_get_filesystem_firmware(struct device *device,
struct firmware_buf *buf)
{
- int i;
+ int i, len;
int rc = -ENOENT;
- char *path = __getname();
+ char *path;
+
+ path = __getname();
+ if (!path)
+ return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
struct file *file;
@@ -331,7 +340,12 @@ static int fw_get_filesystem_firmware(struct device *device,
if (!fw_path[i][0])
continue;
- snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id);
+ len = snprintf(path, PATH_MAX, "%s/%s",
+ fw_path[i], buf->fw_id);
+ if (len >= PATH_MAX) {
+ rc = -ENAMETOOLONG;
+ break;
+ }
file = filp_open(path, O_RDONLY, 0);
if (IS_ERR(file))
@@ -392,6 +406,7 @@ static void fw_name_devm_release(struct device *dev, void *res)
if (fwn->magic == (unsigned long)&fw_cache)
pr_debug("%s: fw_name-%s devm-%p released\n",
__func__, fwn->name, res);
+ kfree_const(fwn->name);
}
static int fw_devm_match(struct device *dev, void *res,
@@ -422,13 +437,17 @@ static int fw_add_devm_name(struct device *dev, const char *name)
if (fwn)
return 1;
- fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) +
- strlen(name) + 1, GFP_KERNEL);
+ fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
+ GFP_KERNEL);
if (!fwn)
return -ENOMEM;
+ fwn->name = kstrdup_const(name, GFP_KERNEL);
+ if (!fwn->name) {
+ devres_free(fwn);
+ return -ENOMEM;
+ }
fwn->magic = (unsigned long)&fw_cache;
- strcpy(fwn->name, name);
devres_add(dev, fwn);
return 0;
@@ -1257,6 +1276,7 @@ static void request_firmware_work_func(struct work_struct *work)
put_device(fw_work->device); /* taken in request_firmware_nowait() */
module_put(fw_work->module);
+ kfree_const(fw_work->name);
kfree(fw_work);
}
@@ -1296,7 +1316,11 @@ request_firmware_nowait(
return -ENOMEM;
fw_work->module = module;
- fw_work->name = name;
+ fw_work->name = kstrdup_const(name, gfp);
+ if (!fw_work->name) {
+ kfree(fw_work);
+ return -ENOMEM;
+ }
fw_work->device = device;
fw_work->context = context;
fw_work->cont = cont;
@@ -1304,6 +1328,7 @@ request_firmware_nowait(
(uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
if (!try_module_get(module)) {
+ kfree_const(fw_work->name);
kfree(fw_work);
return -EFAULT;
}
@@ -1394,11 +1419,16 @@ static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
{
struct fw_cache_entry *fce;
- fce = kzalloc(sizeof(*fce) + strlen(name) + 1, GFP_ATOMIC);
+ fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
if (!fce)
goto exit;
- strcpy(fce->name, name);
+ fce->name = kstrdup_const(name, GFP_ATOMIC);
+ if (!fce->name) {
+ kfree(fce);
+ fce = NULL;
+ goto exit;
+ }
exit:
return fce;
}
@@ -1438,6 +1468,7 @@ found:
static void free_fw_cache_entry(struct fw_cache_entry *fce)
{
+ kfree_const(fce->name);
kfree(fce);
}
diff --git a/kernel/drivers/base/memory.c b/kernel/drivers/base/memory.c
index 2804aed3f..25425d3f2 100644
--- a/kernel/drivers/base/memory.c
+++ b/kernel/drivers/base/memory.c
@@ -303,6 +303,10 @@ static int memory_subsys_offline(struct device *dev)
if (mem->state == MEM_OFFLINE)
return 0;
+ /* Can't offline block with non-present sections */
+ if (mem->section_count != sections_per_block)
+ return -EINVAL;
+
return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
}
diff --git a/kernel/drivers/base/node.c b/kernel/drivers/base/node.c
index b10479c87..560751bad 100644
--- a/kernel/drivers/base/node.c
+++ b/kernel/drivers/base/node.c
@@ -359,12 +359,16 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
#define page_initialized(page) (page->lru.next)
-static int get_nid_for_pfn(unsigned long pfn)
+static int __init_refok get_nid_for_pfn(unsigned long pfn)
{
struct page *page;
if (!pfn_valid_within(pfn))
return -1;
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+ if (system_state == SYSTEM_BOOTING)
+ return early_pfn_to_nid(pfn);
+#endif
page = pfn_to_page(pfn);
if (!page_initialized(page))
return -1;
diff --git a/kernel/drivers/base/pinctrl.c b/kernel/drivers/base/pinctrl.c
index 5fb74b438..076297592 100644
--- a/kernel/drivers/base/pinctrl.c
+++ b/kernel/drivers/base/pinctrl.c
@@ -42,9 +42,20 @@ int pinctrl_bind_pins(struct device *dev)
goto cleanup_get;
}
- ret = pinctrl_select_state(dev->pins->p, dev->pins->default_state);
+ dev->pins->init_state = pinctrl_lookup_state(dev->pins->p,
+ PINCTRL_STATE_INIT);
+ if (IS_ERR(dev->pins->init_state)) {
+ /* Not supplying this state is perfectly legal */
+ dev_dbg(dev, "no init pinctrl state\n");
+
+ ret = pinctrl_select_state(dev->pins->p,
+ dev->pins->default_state);
+ } else {
+ ret = pinctrl_select_state(dev->pins->p, dev->pins->init_state);
+ }
+
if (ret) {
- dev_dbg(dev, "failed to activate default pinctrl state\n");
+ dev_dbg(dev, "failed to activate initial pinctrl state\n");
goto cleanup_get;
}
diff --git a/kernel/drivers/base/platform-msi.c b/kernel/drivers/base/platform-msi.c
new file mode 100644
index 000000000..5df4575b5
--- /dev/null
+++ b/kernel/drivers/base/platform-msi.c
@@ -0,0 +1,270 @@
+/*
+ * MSI framework for platform devices
+ *
+ * Copyright (C) 2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/slab.h>
+
+#define DEV_ID_SHIFT 24
+
+/*
+ * Internal data structure containing a (made up, but unique) devid
+ * and the callback to write the MSI message.
+ */
+struct platform_msi_priv_data {
+ irq_write_msi_msg_t write_msg;
+ int devid;
+};
+
+/* The devid allocator */
+static DEFINE_IDA(platform_msi_devid_ida);
+
+#ifdef GENERIC_MSI_DOMAIN_OPS
+/*
+ * Convert an msi_desc to a globaly unique identifier (per-device
+ * devid + msi_desc position in the msi_list).
+ */
+static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
+{
+ u32 devid;
+
+ devid = desc->platform.msi_priv_data->devid;
+
+ return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index;
+}
+
+static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = platform_msi_calc_hwirq(desc);
+}
+
+static int platform_msi_init(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ unsigned int virq, irq_hw_number_t hwirq,
+ msi_alloc_info_t *arg)
+{
+ return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ info->chip, info->chip_data);
+}
+#else
+#define platform_msi_set_desc NULL
+#define platform_msi_init NULL
+#endif
+
+static void platform_msi_update_dom_ops(struct msi_domain_info *info)
+{
+ struct msi_domain_ops *ops = info->ops;
+
+ BUG_ON(!ops);
+
+ if (ops->msi_init == NULL)
+ ops->msi_init = platform_msi_init;
+ if (ops->set_desc == NULL)
+ ops->set_desc = platform_msi_set_desc;
+}
+
+static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct msi_desc *desc = irq_data_get_msi_desc(data);
+ struct platform_msi_priv_data *priv_data;
+
+ priv_data = desc->platform.msi_priv_data;
+
+ priv_data->write_msg(desc, msg);
+}
+
+static void platform_msi_update_chip_ops(struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ BUG_ON(!chip);
+ if (!chip->irq_mask)
+ chip->irq_mask = irq_chip_mask_parent;
+ if (!chip->irq_unmask)
+ chip->irq_unmask = irq_chip_unmask_parent;
+ if (!chip->irq_eoi)
+ chip->irq_eoi = irq_chip_eoi_parent;
+ if (!chip->irq_set_affinity)
+ chip->irq_set_affinity = msi_domain_set_affinity;
+ if (!chip->irq_write_msi_msg)
+ chip->irq_write_msi_msg = platform_msi_write_msg;
+}
+
+static void platform_msi_free_descs(struct device *dev)
+{
+ struct msi_desc *desc, *tmp;
+
+ list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
+ list_del(&desc->list);
+ free_msi_entry(desc);
+ }
+}
+
+static int platform_msi_alloc_descs(struct device *dev, int nvec,
+ struct platform_msi_priv_data *data)
+
+{
+ int i;
+
+ for (i = 0; i < nvec; i++) {
+ struct msi_desc *desc;
+
+ desc = alloc_msi_entry(dev);
+ if (!desc)
+ break;
+
+ desc->platform.msi_priv_data = data;
+ desc->platform.msi_index = i;
+ desc->nvec_used = 1;
+
+ list_add_tail(&desc->list, dev_to_msi_list(dev));
+ }
+
+ if (i != nvec) {
+ /* Clean up the mess */
+ platform_msi_free_descs(dev);
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
+ * @fwnode: Optional fwnode of the interrupt controller
+ * @info: MSI domain info
+ * @parent: Parent irq domain
+ *
+ * Updates the domain and chip ops and creates a platform MSI
+ * interrupt domain.
+ *
+ * Returns:
+ * A domain pointer or NULL in case of failure.
+ */
+struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent)
+{
+ struct irq_domain *domain;
+
+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+ platform_msi_update_dom_ops(info);
+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+ platform_msi_update_chip_ops(info);
+
+ domain = msi_create_irq_domain(fwnode, info, parent);
+ if (domain)
+ domain->bus_token = DOMAIN_BUS_PLATFORM_MSI;
+
+ return domain;
+}
+
+/**
+ * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
+ * @dev: The device for which to allocate interrupts
+ * @nvec: The number of interrupts to allocate
+ * @write_msi_msg: Callback to write an interrupt message for @dev
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
+{
+ struct platform_msi_priv_data *priv_data;
+ int err;
+
+ /*
+ * Limit the number of interrupts to 256 per device. Should we
+ * need to bump this up, DEV_ID_SHIFT should be adjusted
+ * accordingly (which would impact the max number of MSI
+ * capable devices).
+ */
+ if (!dev->msi_domain || !write_msi_msg || !nvec ||
+ nvec > (1 << (32 - DEV_ID_SHIFT)))
+ return -EINVAL;
+
+ if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
+ dev_err(dev, "Incompatible msi_domain, giving up\n");
+ return -EINVAL;
+ }
+
+ /* Already had a helping of MSI? Greed... */
+ if (!list_empty(dev_to_msi_list(dev)))
+ return -EBUSY;
+
+ priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data)
+ return -ENOMEM;
+
+ priv_data->devid = ida_simple_get(&platform_msi_devid_ida,
+ 0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
+ if (priv_data->devid < 0) {
+ err = priv_data->devid;
+ goto out_free_data;
+ }
+
+ priv_data->write_msg = write_msi_msg;
+
+ err = platform_msi_alloc_descs(dev, nvec, priv_data);
+ if (err)
+ goto out_free_id;
+
+ err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec);
+ if (err)
+ goto out_free_desc;
+
+ return 0;
+
+out_free_desc:
+ platform_msi_free_descs(dev);
+out_free_id:
+ ida_simple_remove(&platform_msi_devid_ida, priv_data->devid);
+out_free_data:
+ kfree(priv_data);
+
+ return err;
+}
+
+/**
+ * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
+ * @dev: The device for which to free interrupts
+ */
+void platform_msi_domain_free_irqs(struct device *dev)
+{
+ struct msi_desc *desc;
+
+ desc = first_msi_entry(dev);
+ if (desc) {
+ struct platform_msi_priv_data *data;
+
+ data = desc->platform.msi_priv_data;
+
+ ida_simple_remove(&platform_msi_devid_ida, data->devid);
+ kfree(data);
+ }
+
+ msi_domain_free_irqs(dev->msi_domain, dev);
+ platform_msi_free_descs(dev);
+}
diff --git a/kernel/drivers/base/platform.c b/kernel/drivers/base/platform.c
index 7403de948..176b59f5b 100644
--- a/kernel/drivers/base/platform.c
+++ b/kernel/drivers/base/platform.c
@@ -514,9 +514,14 @@ static int platform_drv_probe(struct device *_dev)
ret = dev_pm_domain_attach(_dev, true);
if (ret != -EPROBE_DEFER) {
- ret = drv->probe(dev);
- if (ret)
- dev_pm_domain_detach(_dev, true);
+ if (drv->probe) {
+ ret = drv->probe(dev);
+ if (ret)
+ dev_pm_domain_detach(_dev, true);
+ } else {
+ /* don't fail if just dev_pm_domain_attach failed */
+ ret = 0;
+ }
}
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
@@ -536,9 +541,10 @@ static int platform_drv_remove(struct device *_dev)
{
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
- int ret;
+ int ret = 0;
- ret = drv->remove(dev);
+ if (drv->remove)
+ ret = drv->remove(dev);
dev_pm_domain_detach(_dev, true);
return ret;
@@ -549,7 +555,8 @@ static void platform_drv_shutdown(struct device *_dev)
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
- drv->shutdown(dev);
+ if (drv->shutdown)
+ drv->shutdown(dev);
dev_pm_domain_detach(_dev, true);
}
@@ -563,12 +570,9 @@ int __platform_driver_register(struct platform_driver *drv,
{
drv->driver.owner = owner;
drv->driver.bus = &platform_bus_type;
- if (drv->probe)
- drv->driver.probe = platform_drv_probe;
- if (drv->remove)
- drv->driver.remove = platform_drv_remove;
- if (drv->shutdown)
- drv->driver.shutdown = platform_drv_shutdown;
+ drv->driver.probe = platform_drv_probe;
+ drv->driver.remove = platform_drv_remove;
+ drv->driver.shutdown = platform_drv_shutdown;
return driver_register(&drv->driver);
}
@@ -609,6 +613,19 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv,
{
int retval, code;
+ if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
+ pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
+ drv->driver.name, __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * We have to run our probes synchronously because we check if
+ * we find any devices to bind to and exit with error if there
+ * are any.
+ */
+ drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
+
/*
* Prevent driver from requesting probe deferral to avoid further
* futile probe attempts.
@@ -698,6 +715,67 @@ err_out:
}
EXPORT_SYMBOL_GPL(__platform_create_bundle);
+/**
+ * __platform_register_drivers - register an array of platform drivers
+ * @drivers: an array of drivers to register
+ * @count: the number of drivers to register
+ * @owner: module owning the drivers
+ *
+ * Registers platform drivers specified by an array. On failure to register a
+ * driver, all previously registered drivers will be unregistered. Callers of
+ * this API should use platform_unregister_drivers() to unregister drivers in
+ * the reverse order.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int __platform_register_drivers(struct platform_driver * const *drivers,
+ unsigned int count, struct module *owner)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < count; i++) {
+ pr_debug("registering platform driver %ps\n", drivers[i]);
+
+ err = __platform_driver_register(drivers[i], owner);
+ if (err < 0) {
+ pr_err("failed to register platform driver %ps: %d\n",
+ drivers[i], err);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ while (i--) {
+ pr_debug("unregistering platform driver %ps\n", drivers[i]);
+ platform_driver_unregister(drivers[i]);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(__platform_register_drivers);
+
+/**
+ * platform_unregister_drivers - unregister an array of platform drivers
+ * @drivers: an array of drivers to unregister
+ * @count: the number of drivers to unregister
+ *
+ * Unegisters platform drivers specified by an array. This is typically used
+ * to complement an earlier call to platform_register_drivers(). Drivers are
+ * unregistered in the reverse order in which they were registered.
+ */
+void platform_unregister_drivers(struct platform_driver * const *drivers,
+ unsigned int count)
+{
+ while (count--) {
+ pr_debug("unregistering platform driver %ps\n", drivers[count]);
+ platform_driver_unregister(drivers[count]);
+ }
+}
+EXPORT_SYMBOL_GPL(platform_unregister_drivers);
+
/* modalias support enables more hands-off userspace setup:
* (a) environment variable lets new-style hotplug events work once system is
* fully running: "modprobe $MODALIAS"
diff --git a/kernel/drivers/base/power/Makefile b/kernel/drivers/base/power/Makefile
index 1cb854459..5998c5328 100644
--- a/kernel/drivers/base/power/Makefile
+++ b/kernel/drivers/base/power/Makefile
@@ -1,7 +1,7 @@
-obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o
+obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
-obj-$(CONFIG_PM_OPP) += opp.o
+obj-$(CONFIG_PM_OPP) += opp/
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
diff --git a/kernel/drivers/base/power/clock_ops.c b/kernel/drivers/base/power/clock_ops.c
index ac3c07db9..60ee5591e 100644
--- a/kernel/drivers/base/power/clock_ops.c
+++ b/kernel/drivers/base/power/clock_ops.c
@@ -15,8 +15,9 @@
#include <linux/clkdev.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/pm_runtime.h>
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_CLK
enum pce_status {
PCE_STATUS_NONE = 0,
@@ -65,7 +66,8 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
} else {
clk_prepare(ce->clk);
ce->status = PCE_STATUS_ACQUIRED;
- dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
+ dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
+ ce->clk, ce->con_id);
}
}
@@ -91,7 +93,7 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
return -ENOMEM;
}
} else {
- if (IS_ERR(clk) || !__clk_get(clk)) {
+ if (IS_ERR(clk)) {
kfree(ce);
return -ENOENT;
}
@@ -125,7 +127,9 @@ int pm_clk_add(struct device *dev, const char *con_id)
* @clk: Clock pointer
*
* Add the clock to the list of clocks used for the power management of @dev.
- * It will increment refcount on clock pointer, use clk_put() on it when done.
+ * The power-management code will take control of the clock reference, so
+ * callers should not call clk_put() on @clk after this function sucessfully
+ * returned.
*/
int pm_clk_add_clk(struct device *dev, struct clk *clk)
{
@@ -365,7 +369,44 @@ static int pm_clk_notify(struct notifier_block *nb,
return 0;
}
-#else /* !CONFIG_PM */
+int pm_clk_runtime_suspend(struct device *dev)
+{
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ ret = pm_generic_runtime_suspend(dev);
+ if (ret) {
+ dev_err(dev, "failed to suspend device\n");
+ return ret;
+ }
+
+ ret = pm_clk_suspend(dev);
+ if (ret) {
+ dev_err(dev, "failed to suspend clock\n");
+ pm_generic_runtime_resume(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+int pm_clk_runtime_resume(struct device *dev)
+{
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ ret = pm_clk_resume(dev);
+ if (ret) {
+ dev_err(dev, "failed to resume clock\n");
+ return ret;
+ }
+
+ return pm_generic_runtime_resume(dev);
+}
+
+#else /* !CONFIG_PM_CLK */
/**
* enable_clock - Enable a device clock.
@@ -445,7 +486,7 @@ static int pm_clk_notify(struct notifier_block *nb,
return 0;
}
-#endif /* !CONFIG_PM */
+#endif /* !CONFIG_PM_CLK */
/**
* pm_clk_add_notifier - Add bus type notifier for power management clocks.
diff --git a/kernel/drivers/base/power/domain.c b/kernel/drivers/base/power/domain.c
index 2327613d4..65f50eccd 100644
--- a/kernel/drivers/base/power/domain.c
+++ b/kernel/drivers/base/power/domain.c
@@ -6,6 +6,7 @@
* This file is released under the GPLv2.
*/
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/platform_device.h>
@@ -19,6 +20,8 @@
#include <linux/suspend.h>
#include <linux/export.h>
+#define GENPD_RETRY_MAX_MS 250 /* Approximate */
+
#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
({ \
type (*__routine)(struct device *__d); \
@@ -31,43 +34,9 @@
__ret; \
})
-#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
-({ \
- ktime_t __start = ktime_get(); \
- type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
- s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
- struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
- if (!__retval && __elapsed > __td->field) { \
- __td->field = __elapsed; \
- dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \
- __elapsed); \
- genpd->max_off_time_changed = true; \
- __td->constraint_changed = true; \
- } \
- __retval; \
-})
-
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);
-static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
-{
- struct generic_pm_domain *genpd = NULL, *gpd;
-
- if (IS_ERR_OR_NULL(domain_name))
- return NULL;
-
- mutex_lock(&gpd_list_lock);
- list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
- if (!strcmp(gpd->name, domain_name)) {
- genpd = gpd;
- break;
- }
- }
- mutex_unlock(&gpd_list_lock);
- return genpd;
-}
-
/*
* Get the generic PM domain for a particular struct device.
* This validates the struct device pointer, the PM domain pointer,
@@ -107,14 +76,12 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
- stop_latency_ns, "stop");
+ return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
}
static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
- start_latency_ns, "start");
+ return GENPD_DEV_CALLBACK(genpd, int, start, dev);
}
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
@@ -133,55 +100,7 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
smp_mb__after_atomic();
}
-static void genpd_acquire_lock(struct generic_pm_domain *genpd)
-{
- DEFINE_WAIT(wait);
-
- mutex_lock(&genpd->lock);
- /*
- * Wait for the domain to transition into either the active,
- * or the power off state.
- */
- for (;;) {
- prepare_to_wait(&genpd->status_wait_queue, &wait,
- TASK_UNINTERRUPTIBLE);
- if (genpd->status == GPD_STATE_ACTIVE
- || genpd->status == GPD_STATE_POWER_OFF)
- break;
- mutex_unlock(&genpd->lock);
-
- schedule();
-
- mutex_lock(&genpd->lock);
- }
- finish_wait(&genpd->status_wait_queue, &wait);
-}
-
-static void genpd_release_lock(struct generic_pm_domain *genpd)
-{
- mutex_unlock(&genpd->lock);
-}
-
-static void genpd_set_active(struct generic_pm_domain *genpd)
-{
- if (genpd->resume_count == 0)
- genpd->status = GPD_STATE_ACTIVE;
-}
-
-static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
-{
- s64 usecs64;
-
- if (!genpd->cpuidle_data)
- return;
-
- usecs64 = genpd->power_on_latency_ns;
- do_div(usecs64, NSEC_PER_USEC);
- usecs64 += genpd->cpuidle_data->saved_exit_latency;
- genpd->cpuidle_data->idle_state->exit_latency = usecs64;
-}
-
-static int genpd_power_on(struct generic_pm_domain *genpd)
+static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
ktime_t time_start;
s64 elapsed_ns;
@@ -190,6 +109,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd)
if (!genpd->power_on)
return 0;
+ if (!timed)
+ return genpd->power_on(genpd);
+
time_start = ktime_get();
ret = genpd->power_on(genpd);
if (ret)
@@ -201,14 +123,13 @@ static int genpd_power_on(struct generic_pm_domain *genpd)
genpd->power_on_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
- genpd_recalc_cpu_exit_latency(genpd);
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
genpd->name, "on", elapsed_ns);
return ret;
}
-static int genpd_power_off(struct generic_pm_domain *genpd)
+static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
{
ktime_t time_start;
s64 elapsed_ns;
@@ -217,6 +138,9 @@ static int genpd_power_off(struct generic_pm_domain *genpd)
if (!genpd->power_off)
return 0;
+ if (!timed)
+ return genpd->power_off(genpd);
+
time_start = ktime_get();
ret = genpd->power_off(genpd);
if (ret == -EBUSY)
@@ -235,49 +159,35 @@ static int genpd_power_off(struct generic_pm_domain *genpd)
}
/**
- * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
+ * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
+ * @genpd: PM domait to power off.
+ *
+ * Queue up the execution of genpd_poweroff() unless it's already been done
+ * before.
+ */
+static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+{
+ queue_work(pm_wq, &genpd->power_off_work);
+}
+
+static int genpd_poweron(struct generic_pm_domain *genpd);
+
+/**
+ * __genpd_poweron - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
*
* Restore power to @genpd and all of its masters so that it is possible to
* resume a device belonging to it.
*/
-static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
- __releases(&genpd->lock) __acquires(&genpd->lock)
+static int __genpd_poweron(struct generic_pm_domain *genpd)
{
struct gpd_link *link;
- DEFINE_WAIT(wait);
int ret = 0;
- /* If the domain's master is being waited for, we have to wait too. */
- for (;;) {
- prepare_to_wait(&genpd->status_wait_queue, &wait,
- TASK_UNINTERRUPTIBLE);
- if (genpd->status != GPD_STATE_WAIT_MASTER)
- break;
- mutex_unlock(&genpd->lock);
-
- schedule();
-
- mutex_lock(&genpd->lock);
- }
- finish_wait(&genpd->status_wait_queue, &wait);
-
if (genpd->status == GPD_STATE_ACTIVE
|| (genpd->prepared_count > 0 && genpd->suspend_power_off))
return 0;
- if (genpd->status != GPD_STATE_POWER_OFF) {
- genpd_set_active(genpd);
- return 0;
- }
-
- if (genpd->cpuidle_data) {
- cpuidle_pause_and_lock();
- genpd->cpuidle_data->idle_state->disabled = true;
- cpuidle_resume_and_unlock();
- goto out;
- }
-
/*
* The list is guaranteed not to change while the loop below is being
* executed, unless one of the masters' .power_on() callbacks fiddles
@@ -285,85 +195,55 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
*/
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_inc(link->master);
- genpd->status = GPD_STATE_WAIT_MASTER;
- mutex_unlock(&genpd->lock);
-
- ret = pm_genpd_poweron(link->master);
-
- mutex_lock(&genpd->lock);
-
- /*
- * The "wait for parent" status is guaranteed not to change
- * while the master is powering on.
- */
- genpd->status = GPD_STATE_POWER_OFF;
- wake_up_all(&genpd->status_wait_queue);
+ ret = genpd_poweron(link->master);
if (ret) {
genpd_sd_counter_dec(link->master);
goto err;
}
}
- ret = genpd_power_on(genpd);
+ ret = genpd_power_on(genpd, true);
if (ret)
goto err;
- out:
- genpd_set_active(genpd);
-
+ genpd->status = GPD_STATE_ACTIVE;
return 0;
err:
- list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
+ list_for_each_entry_continue_reverse(link,
+ &genpd->slave_links,
+ slave_node) {
genpd_sd_counter_dec(link->master);
+ genpd_queue_power_off_work(link->master);
+ }
return ret;
}
/**
- * pm_genpd_poweron - Restore power to a given PM domain and its masters.
+ * genpd_poweron - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
*/
-int pm_genpd_poweron(struct generic_pm_domain *genpd)
+static int genpd_poweron(struct generic_pm_domain *genpd)
{
int ret;
mutex_lock(&genpd->lock);
- ret = __pm_genpd_poweron(genpd);
+ ret = __genpd_poweron(genpd);
mutex_unlock(&genpd->lock);
return ret;
}
-/**
- * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
- * @domain_name: Name of the PM domain to power up.
- */
-int pm_genpd_name_poweron(const char *domain_name)
-{
- struct generic_pm_domain *genpd;
-
- genpd = pm_genpd_lookup_name(domain_name);
- return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
-}
-
-static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
- struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, start, dev);
-}
-
static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
- save_state_latency_ns, "state save");
+ return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
}
-static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
+static int genpd_restore_dev(struct generic_pm_domain *genpd,
+ struct device *dev)
{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
- restore_state_latency_ns,
- "state restore");
+ return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
}
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
@@ -407,133 +287,31 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
}
/**
- * __pm_genpd_save_device - Save the pre-suspend state of a device.
- * @pdd: Domain data of the device to save the state of.
- * @genpd: PM domain the device belongs to.
- */
-static int __pm_genpd_save_device(struct pm_domain_data *pdd,
- struct generic_pm_domain *genpd)
- __releases(&genpd->lock) __acquires(&genpd->lock)
-{
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
- struct device *dev = pdd->dev;
- int ret = 0;
-
- if (gpd_data->need_restore > 0)
- return 0;
-
- /*
- * If the value of the need_restore flag is still unknown at this point,
- * we trust that pm_genpd_poweroff() has verified that the device is
- * already runtime PM suspended.
- */
- if (gpd_data->need_restore < 0) {
- gpd_data->need_restore = 1;
- return 0;
- }
-
- mutex_unlock(&genpd->lock);
-
- genpd_start_dev(genpd, dev);
- ret = genpd_save_dev(genpd, dev);
- genpd_stop_dev(genpd, dev);
-
- mutex_lock(&genpd->lock);
-
- if (!ret)
- gpd_data->need_restore = 1;
-
- return ret;
-}
-
-/**
- * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
- * @pdd: Domain data of the device to restore the state of.
- * @genpd: PM domain the device belongs to.
- */
-static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
- struct generic_pm_domain *genpd)
- __releases(&genpd->lock) __acquires(&genpd->lock)
-{
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
- struct device *dev = pdd->dev;
- int need_restore = gpd_data->need_restore;
-
- gpd_data->need_restore = 0;
- mutex_unlock(&genpd->lock);
-
- genpd_start_dev(genpd, dev);
-
- /*
- * Call genpd_restore_dev() for recently added devices too (need_restore
- * is negative then).
- */
- if (need_restore)
- genpd_restore_dev(genpd, dev);
-
- mutex_lock(&genpd->lock);
-}
-
-/**
- * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
- * @genpd: PM domain to check.
- *
- * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
- * a "power off" operation, which means that a "power on" has occured in the
- * meantime, or if its resume_count field is different from zero, which means
- * that one of its devices has been resumed in the meantime.
- */
-static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
-{
- return genpd->status == GPD_STATE_WAIT_MASTER
- || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
-}
-
-/**
- * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
- * @genpd: PM domait to power off.
- *
- * Queue up the execution of pm_genpd_poweroff() unless it's already been done
- * before.
- */
-static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
-{
- queue_work(pm_wq, &genpd->power_off_work);
-}
-
-/**
- * pm_genpd_poweroff - Remove power from a given PM domain.
+ * genpd_poweroff - Remove power from a given PM domain.
* @genpd: PM domain to power down.
+ * @is_async: PM domain is powered down from a scheduled work
*
* If all of the @genpd's devices have been suspended and all of its subdomains
- * have been powered down, run the runtime suspend callbacks provided by all of
- * the @genpd's devices' drivers and remove power from @genpd.
+ * have been powered down, remove power from @genpd.
*/
-static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
- __releases(&genpd->lock) __acquires(&genpd->lock)
+static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
{
struct pm_domain_data *pdd;
struct gpd_link *link;
- unsigned int not_suspended;
- int ret = 0;
+ unsigned int not_suspended = 0;
- start:
/*
* Do not try to power off the domain in the following situations:
* (1) The domain is already in the "power off" state.
- * (2) The domain is waiting for its master to power up.
- * (3) One of the domain's devices is being resumed right now.
- * (4) System suspend is in progress.
+ * (2) System suspend is in progress.
*/
if (genpd->status == GPD_STATE_POWER_OFF
- || genpd->status == GPD_STATE_WAIT_MASTER
- || genpd->resume_count > 0 || genpd->prepared_count > 0)
+ || genpd->prepared_count > 0)
return 0;
if (atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
- not_suspended = 0;
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
enum pm_qos_flags_status stat;
@@ -543,83 +321,35 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
if (stat > PM_QOS_FLAGS_NONE)
return -EBUSY;
- if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
- || pdd->dev->power.irq_safe))
+ if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe)
not_suspended++;
}
- if (not_suspended > genpd->in_progress)
+ if (not_suspended > 1 || (not_suspended == 1 && is_async))
return -EBUSY;
- if (genpd->poweroff_task) {
- /*
- * Another instance of pm_genpd_poweroff() is executing
- * callbacks, so tell it to start over and return.
- */
- genpd->status = GPD_STATE_REPEAT;
- return 0;
- }
-
if (genpd->gov && genpd->gov->power_down_ok) {
if (!genpd->gov->power_down_ok(&genpd->domain))
return -EAGAIN;
}
- genpd->status = GPD_STATE_BUSY;
- genpd->poweroff_task = current;
-
- list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
- ret = atomic_read(&genpd->sd_count) == 0 ?
- __pm_genpd_save_device(pdd, genpd) : -EBUSY;
-
- if (genpd_abort_poweroff(genpd))
- goto out;
-
- if (ret) {
- genpd_set_active(genpd);
- goto out;
- }
-
- if (genpd->status == GPD_STATE_REPEAT) {
- genpd->poweroff_task = NULL;
- goto start;
- }
- }
-
- if (genpd->cpuidle_data) {
- /*
- * If cpuidle_data is set, cpuidle should turn the domain off
- * when the CPU in it is idle. In that case we don't decrement
- * the subdomain counts of the master domains, so that power is
- * not removed from the current domain prematurely as a result
- * of cutting off the masters' power.
- */
- genpd->status = GPD_STATE_POWER_OFF;
- cpuidle_pause_and_lock();
- genpd->cpuidle_data->idle_state->disabled = false;
- cpuidle_resume_and_unlock();
- goto out;
- }
-
if (genpd->power_off) {
- if (atomic_read(&genpd->sd_count) > 0) {
- ret = -EBUSY;
- goto out;
- }
+ int ret;
+
+ if (atomic_read(&genpd->sd_count) > 0)
+ return -EBUSY;
/*
* If sd_count > 0 at this point, one of the subdomains hasn't
- * managed to call pm_genpd_poweron() for the master yet after
- * incrementing it. In that case pm_genpd_poweron() will wait
+ * managed to call genpd_poweron() for the master yet after
+ * incrementing it. In that case genpd_poweron() will wait
* for us to drop the lock, so we can call .power_off() and let
- * the pm_genpd_poweron() restore power for us (this shouldn't
+ * the genpd_poweron() restore power for us (this shouldn't
* happen very often).
*/
- ret = genpd_power_off(genpd);
- if (ret == -EBUSY) {
- genpd_set_active(genpd);
- goto out;
- }
+ ret = genpd_power_off(genpd, true);
+ if (ret)
+ return ret;
}
genpd->status = GPD_STATE_POWER_OFF;
@@ -629,10 +359,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
genpd_queue_power_off_work(link->master);
}
- out:
- genpd->poweroff_task = NULL;
- wake_up_all(&genpd->status_wait_queue);
- return ret;
+ return 0;
}
/**
@@ -645,9 +372,9 @@ static void genpd_power_off_work_fn(struct work_struct *work)
genpd = container_of(work, struct generic_pm_domain, power_off_work);
- genpd_acquire_lock(genpd);
- pm_genpd_poweroff(genpd);
- genpd_release_lock(genpd);
+ mutex_lock(&genpd->lock);
+ genpd_poweroff(genpd, true);
+ mutex_unlock(&genpd->lock);
}
/**
@@ -661,8 +388,11 @@ static void genpd_power_off_work_fn(struct work_struct *work)
static int pm_genpd_runtime_suspend(struct device *dev)
{
struct generic_pm_domain *genpd;
- struct generic_pm_domain_data *gpd_data;
bool (*stop_ok)(struct device *__dev);
+ struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+ bool runtime_pm = pm_runtime_enabled(dev);
+ ktime_t time_start;
+ s64 elapsed_ns;
int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -671,14 +401,42 @@ static int pm_genpd_runtime_suspend(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
+ /*
+ * A runtime PM centric subsystem/driver may re-use the runtime PM
+ * callbacks for other purposes than runtime PM. In those scenarios
+ * runtime PM is disabled. Under these circumstances, we shall skip
+ * validating/measuring the PM QoS latency.
+ */
stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
- if (stop_ok && !stop_ok(dev))
+ if (runtime_pm && stop_ok && !stop_ok(dev))
return -EBUSY;
- ret = genpd_stop_dev(genpd, dev);
+ /* Measure suspend latency. */
+ if (runtime_pm)
+ time_start = ktime_get();
+
+ ret = genpd_save_dev(genpd, dev);
if (ret)
return ret;
+ ret = genpd_stop_dev(genpd, dev);
+ if (ret) {
+ genpd_restore_dev(genpd, dev);
+ return ret;
+ }
+
+ /* Update suspend latency value if the measured time exceeds it. */
+ if (runtime_pm) {
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > td->suspend_latency_ns) {
+ td->suspend_latency_ns = elapsed_ns;
+ dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
+ elapsed_ns);
+ genpd->max_off_time_changed = true;
+ td->constraint_changed = true;
+ }
+ }
+
/*
* If power.irq_safe is set, this routine will be run with interrupts
* off, so it can't use mutexes.
@@ -687,19 +445,7 @@ static int pm_genpd_runtime_suspend(struct device *dev)
return 0;
mutex_lock(&genpd->lock);
-
- /*
- * If we have an unknown state of the need_restore flag, it means none
- * of the runtime PM callbacks has been invoked yet. Let's update the
- * flag to reflect that the current state is active.
- */
- gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
- if (gpd_data->need_restore < 0)
- gpd_data->need_restore = 0;
-
- genpd->in_progress++;
- pm_genpd_poweroff(genpd);
- genpd->in_progress--;
+ genpd_poweroff(genpd, false);
mutex_unlock(&genpd->lock);
return 0;
@@ -716,8 +462,12 @@ static int pm_genpd_runtime_suspend(struct device *dev)
static int pm_genpd_runtime_resume(struct device *dev)
{
struct generic_pm_domain *genpd;
- DEFINE_WAIT(wait);
+ struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+ bool runtime_pm = pm_runtime_enabled(dev);
+ ktime_t time_start;
+ s64 elapsed_ns;
int ret;
+ bool timed = true;
dev_dbg(dev, "%s()\n", __func__);
@@ -726,39 +476,37 @@ static int pm_genpd_runtime_resume(struct device *dev)
return -EINVAL;
/* If power.irq_safe, the PM domain is never powered off. */
- if (dev->power.irq_safe)
- return genpd_start_dev_no_timing(genpd, dev);
+ if (dev->power.irq_safe) {
+ timed = false;
+ goto out;
+ }
mutex_lock(&genpd->lock);
- ret = __pm_genpd_poweron(genpd);
- if (ret) {
- mutex_unlock(&genpd->lock);
+ ret = __genpd_poweron(genpd);
+ mutex_unlock(&genpd->lock);
+
+ if (ret)
return ret;
- }
- genpd->status = GPD_STATE_BUSY;
- genpd->resume_count++;
- for (;;) {
- prepare_to_wait(&genpd->status_wait_queue, &wait,
- TASK_UNINTERRUPTIBLE);
- /*
- * If current is the powering off task, we have been called
- * reentrantly from one of the device callbacks, so we should
- * not wait.
- */
- if (!genpd->poweroff_task || genpd->poweroff_task == current)
- break;
- mutex_unlock(&genpd->lock);
- schedule();
+ out:
+ /* Measure resume latency. */
+ if (timed && runtime_pm)
+ time_start = ktime_get();
- mutex_lock(&genpd->lock);
+ genpd_start_dev(genpd, dev);
+ genpd_restore_dev(genpd, dev);
+
+ /* Update resume latency value if the measured time exceeds it. */
+ if (timed && runtime_pm) {
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > td->resume_latency_ns) {
+ td->resume_latency_ns = elapsed_ns;
+ dev_dbg(dev, "resume latency exceeded, %lld ns\n",
+ elapsed_ns);
+ genpd->max_off_time_changed = true;
+ td->constraint_changed = true;
+ }
}
- finish_wait(&genpd->status_wait_queue, &wait);
- __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
- genpd->resume_count--;
- genpd_set_active(genpd);
- wake_up_all(&genpd->status_wait_queue);
- mutex_unlock(&genpd->lock);
return 0;
}
@@ -772,15 +520,15 @@ static int __init pd_ignore_unused_setup(char *__unused)
__setup("pd_ignore_unused", pd_ignore_unused_setup);
/**
- * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
+ * genpd_poweroff_unused - Power off all PM domains with no devices in use.
*/
-void pm_genpd_poweroff_unused(void)
+static int __init genpd_poweroff_unused(void)
{
struct generic_pm_domain *genpd;
if (pd_ignore_unused) {
pr_warn("genpd: Not disabling unused power domains\n");
- return;
+ return 0;
}
mutex_lock(&gpd_list_lock);
@@ -789,11 +537,7 @@ void pm_genpd_poweroff_unused(void)
genpd_queue_power_off_work(genpd);
mutex_unlock(&gpd_list_lock);
-}
-static int __init genpd_poweroff_unused(void)
-{
- pm_genpd_poweroff_unused();
return 0;
}
late_initcall(genpd_poweroff_unused);
@@ -827,6 +571,7 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
/**
* pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible.
+ * @timed: True if latency measurements are allowed.
*
* Check if the given PM domain can be powered off (during system suspend or
* hibernation) and do that if so. Also, in that case propagate to its masters.
@@ -836,7 +581,8 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
* executed sequentially, so it is guaranteed that it will never run twice in
* parallel).
*/
-static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
+static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
+ bool timed)
{
struct gpd_link *link;
@@ -847,38 +593,40 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
|| atomic_read(&genpd->sd_count) > 0)
return;
- genpd_power_off(genpd);
+ genpd_power_off(genpd, timed);
genpd->status = GPD_STATE_POWER_OFF;
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master);
- pm_genpd_sync_poweroff(link->master);
+ pm_genpd_sync_poweroff(link->master, timed);
}
}
/**
* pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
* @genpd: PM domain to power on.
+ * @timed: True if latency measurements are allowed.
*
* This function is only called in "noirq" and "syscore" stages of system power
* transitions, so it need not acquire locks (all of the "noirq" callbacks are
* executed sequentially, so it is guaranteed that it will never run twice in
* parallel).
*/
-static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
+static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
+ bool timed)
{
struct gpd_link *link;
- if (genpd->status != GPD_STATE_POWER_OFF)
+ if (genpd->status == GPD_STATE_ACTIVE)
return;
list_for_each_entry(link, &genpd->slave_links, slave_node) {
- pm_genpd_sync_poweron(link->master);
+ pm_genpd_sync_poweron(link->master, timed);
genpd_sd_counter_inc(link->master);
}
- genpd_power_on(genpd);
+ genpd_power_on(genpd, timed);
genpd->status = GPD_STATE_ACTIVE;
}
@@ -947,14 +695,14 @@ static int pm_genpd_prepare(struct device *dev)
if (resume_needed(dev, genpd))
pm_runtime_resume(dev);
- genpd_acquire_lock(genpd);
+ mutex_lock(&genpd->lock);
if (genpd->prepared_count++ == 0) {
genpd->suspended_count = 0;
genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
}
- genpd_release_lock(genpd);
+ mutex_unlock(&genpd->lock);
if (genpd->suspend_power_off) {
pm_runtime_put_noidle(dev);
@@ -963,7 +711,7 @@ static int pm_genpd_prepare(struct device *dev)
/*
* The PM domain must be in the GPD_STATE_ACTIVE state at this point,
- * so pm_genpd_poweron() will return immediately, but if the device
+ * so genpd_poweron() will return immediately, but if the device
* is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
* to make it operational.
*/
@@ -1056,7 +804,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
* the same PM domain, so it is not necessary to use locking here.
*/
genpd->suspended_count++;
- pm_genpd_sync_poweroff(genpd);
+ pm_genpd_sync_poweroff(genpd, true);
return 0;
}
@@ -1086,7 +834,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
* guaranteed that this function will never run twice in parallel for
* the same PM domain, so it is not necessary to use locking here.
*/
- pm_genpd_sync_poweron(genpd);
+ pm_genpd_sync_poweron(genpd, true);
genpd->suspended_count--;
return genpd_start_dev(genpd, dev);
@@ -1217,7 +965,8 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
+ return genpd->suspend_power_off ?
+ 0 : genpd_start_dev(genpd, dev);
}
/**
@@ -1300,7 +1049,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
* If the domain was off before the hibernation, make
* sure it will be off going forward.
*/
- genpd_power_off(genpd);
+ genpd_power_off(genpd, true);
return 0;
}
@@ -1309,7 +1058,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
if (genpd->suspend_power_off)
return 0;
- pm_genpd_sync_poweron(genpd);
+ pm_genpd_sync_poweron(genpd, true);
return genpd_start_dev(genpd, dev);
}
@@ -1367,9 +1116,9 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
if (suspend) {
genpd->suspended_count++;
- pm_genpd_sync_poweroff(genpd);
+ pm_genpd_sync_poweroff(genpd, false);
} else {
- pm_genpd_sync_poweron(genpd);
+ pm_genpd_sync_poweron(genpd, false);
genpd->suspended_count--;
}
}
@@ -1427,7 +1176,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
gpd_data->td = *td;
gpd_data->base.dev = dev;
- gpd_data->need_restore = -1;
gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = -1;
gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
@@ -1489,7 +1237,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR(gpd_data))
return PTR_ERR(gpd_data);
- genpd_acquire_lock(genpd);
+ mutex_lock(&genpd->lock);
if (genpd->prepared_count > 0) {
ret = -EAGAIN;
@@ -1506,7 +1254,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
out:
- genpd_release_lock(genpd);
+ mutex_unlock(&genpd->lock);
if (ret)
genpd_free_dev_data(dev, gpd_data);
@@ -1517,18 +1265,6 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
}
/**
- * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
- * @domain_name: Name of the PM domain to add the device to.
- * @dev: Device to be added.
- * @td: Set of PM QoS timing parameters to attach to the device.
- */
-int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
- struct gpd_timing_data *td)
-{
- return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
-}
-
-/**
* pm_genpd_remove_device - Remove a device from an I/O PM domain.
* @genpd: PM domain to remove the device from.
* @dev: Device to be removed.
@@ -1550,7 +1286,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
gpd_data = to_gpd_data(pdd);
dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
- genpd_acquire_lock(genpd);
+ mutex_lock(&genpd->lock);
if (genpd->prepared_count > 0) {
ret = -EAGAIN;
@@ -1565,14 +1301,14 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
list_del_init(&pdd->list_node);
- genpd_release_lock(genpd);
+ mutex_unlock(&genpd->lock);
genpd_free_dev_data(dev, gpd_data);
return 0;
out:
- genpd_release_lock(genpd);
+ mutex_unlock(&genpd->lock);
dev_pm_qos_add_notifier(dev, &gpd_data->nb);
return ret;
@@ -1586,23 +1322,19 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *subdomain)
{
- struct gpd_link *link;
+ struct gpd_link *link, *itr;
int ret = 0;
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
|| genpd == subdomain)
return -EINVAL;
- start:
- genpd_acquire_lock(genpd);
- mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
- if (subdomain->status != GPD_STATE_POWER_OFF
- && subdomain->status != GPD_STATE_ACTIVE) {
- mutex_unlock(&subdomain->lock);
- genpd_release_lock(genpd);
- goto start;
- }
+ mutex_lock(&genpd->lock);
+ mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
if (genpd->status == GPD_STATE_POWER_OFF
&& subdomain->status != GPD_STATE_POWER_OFF) {
@@ -1610,18 +1342,13 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
goto out;
}
- list_for_each_entry(link, &genpd->master_links, master_node) {
- if (link->slave == subdomain && link->master == genpd) {
+ list_for_each_entry(itr, &genpd->master_links, master_node) {
+ if (itr->slave == subdomain && itr->master == genpd) {
ret = -EINVAL;
goto out;
}
}
- link = kzalloc(sizeof(*link), GFP_KERNEL);
- if (!link) {
- ret = -ENOMEM;
- goto out;
- }
link->master = genpd;
list_add_tail(&link->master_node, &genpd->master_links);
link->slave = subdomain;
@@ -1631,39 +1358,12 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
out:
mutex_unlock(&subdomain->lock);
- genpd_release_lock(genpd);
-
+ mutex_unlock(&genpd->lock);
+ if (ret)
+ kfree(link);
return ret;
}
-
-/**
- * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
- * @master_name: Name of the master PM domain to add the subdomain to.
- * @subdomain_name: Name of the subdomain to be added.
- */
-int pm_genpd_add_subdomain_names(const char *master_name,
- const char *subdomain_name)
-{
- struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
-
- if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
- return -EINVAL;
-
- mutex_lock(&gpd_list_lock);
- list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
- if (!master && !strcmp(gpd->name, master_name))
- master = gpd;
-
- if (!subdomain && !strcmp(gpd->name, subdomain_name))
- subdomain = gpd;
-
- if (master && subdomain)
- break;
- }
- mutex_unlock(&gpd_list_lock);
-
- return pm_genpd_add_subdomain(master, subdomain);
-}
+EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
/**
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
@@ -1679,8 +1379,14 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
return -EINVAL;
- start:
- genpd_acquire_lock(genpd);
+ mutex_lock(&genpd->lock);
+
+ if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
+ pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
+ subdomain->name);
+ ret = -EBUSY;
+ goto out;
+ }
list_for_each_entry(link, &genpd->master_links, master_node) {
if (link->slave != subdomain)
@@ -1688,13 +1394,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
- if (subdomain->status != GPD_STATE_POWER_OFF
- && subdomain->status != GPD_STATE_ACTIVE) {
- mutex_unlock(&subdomain->lock);
- genpd_release_lock(genpd);
- goto start;
- }
-
list_del(&link->master_node);
list_del(&link->slave_node);
kfree(link);
@@ -1707,128 +1406,12 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
break;
}
- genpd_release_lock(genpd);
-
- return ret;
-}
-
-/**
- * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
- * @genpd: PM domain to be connected with cpuidle.
- * @state: cpuidle state this domain can disable/enable.
- *
- * Make a PM domain behave as though it contained a CPU core, that is, instead
- * of calling its power down routine it will enable the given cpuidle state so
- * that the cpuidle subsystem can power it down (if possible and desirable).
- */
-int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
-{
- struct cpuidle_driver *cpuidle_drv;
- struct gpd_cpuidle_data *cpuidle_data;
- struct cpuidle_state *idle_state;
- int ret = 0;
-
- if (IS_ERR_OR_NULL(genpd) || state < 0)
- return -EINVAL;
-
- genpd_acquire_lock(genpd);
-
- if (genpd->cpuidle_data) {
- ret = -EEXIST;
- goto out;
- }
- cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL);
- if (!cpuidle_data) {
- ret = -ENOMEM;
- goto out;
- }
- cpuidle_drv = cpuidle_driver_ref();
- if (!cpuidle_drv) {
- ret = -ENODEV;
- goto err_drv;
- }
- if (cpuidle_drv->state_count <= state) {
- ret = -EINVAL;
- goto err;
- }
- idle_state = &cpuidle_drv->states[state];
- if (!idle_state->disabled) {
- ret = -EAGAIN;
- goto err;
- }
- cpuidle_data->idle_state = idle_state;
- cpuidle_data->saved_exit_latency = idle_state->exit_latency;
- genpd->cpuidle_data = cpuidle_data;
- genpd_recalc_cpu_exit_latency(genpd);
-
- out:
- genpd_release_lock(genpd);
- return ret;
-
- err:
- cpuidle_driver_unref();
-
- err_drv:
- kfree(cpuidle_data);
- goto out;
-}
-
-/**
- * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
- * @name: Name of the domain to connect to cpuidle.
- * @state: cpuidle state this domain can manipulate.
- */
-int pm_genpd_name_attach_cpuidle(const char *name, int state)
-{
- return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
-}
-
-/**
- * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
- * @genpd: PM domain to remove the cpuidle connection from.
- *
- * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
- * given PM domain.
- */
-int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
-{
- struct gpd_cpuidle_data *cpuidle_data;
- struct cpuidle_state *idle_state;
- int ret = 0;
-
- if (IS_ERR_OR_NULL(genpd))
- return -EINVAL;
-
- genpd_acquire_lock(genpd);
-
- cpuidle_data = genpd->cpuidle_data;
- if (!cpuidle_data) {
- ret = -ENODEV;
- goto out;
- }
- idle_state = cpuidle_data->idle_state;
- if (!idle_state->disabled) {
- ret = -EAGAIN;
- goto out;
- }
- idle_state->exit_latency = cpuidle_data->saved_exit_latency;
- cpuidle_driver_unref();
- genpd->cpuidle_data = NULL;
- kfree(cpuidle_data);
+out:
+ mutex_unlock(&genpd->lock);
- out:
- genpd_release_lock(genpd);
return ret;
}
-
-/**
- * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
- * @name: Name of the domain to disconnect cpuidle from.
- */
-int pm_genpd_name_detach_cpuidle(const char *name)
-{
- return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
-}
+EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
/* Default device callbacks for generic PM domains. */
@@ -1896,12 +1479,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
mutex_init(&genpd->lock);
genpd->gov = gov;
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
- genpd->in_progress = 0;
atomic_set(&genpd->sd_count, 0);
genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
- init_waitqueue_head(&genpd->status_wait_queue);
- genpd->poweroff_task = NULL;
- genpd->resume_count = 0;
genpd->device_count = 0;
genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = true;
@@ -1939,6 +1518,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
}
+EXPORT_SYMBOL_GPL(pm_genpd_init);
#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
/*
@@ -2112,7 +1692,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
/**
* genpd_dev_pm_detach - Detach a device from its PM domain.
- * @dev: Device to attach.
+ * @dev: Device to detach.
* @power_off: Currently not used
*
* Try to locate a corresponding generic PM domain, which the device was
@@ -2121,6 +1701,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
static void genpd_dev_pm_detach(struct device *dev, bool power_off)
{
struct generic_pm_domain *pd;
+ unsigned int i;
int ret = 0;
pd = pm_genpd_lookup_dev(dev);
@@ -2129,10 +1710,12 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off)
dev_dbg(dev, "removing from PM domain %s\n", pd->name);
- while (1) {
+ for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
ret = pm_genpd_remove_device(pd, dev);
if (ret != -EAGAIN)
break;
+
+ mdelay(i);
cond_resched();
}
@@ -2167,12 +1750,16 @@ static void genpd_dev_pm_sync(struct device *dev)
* Both generic and legacy Samsung-specific DT bindings are supported to keep
* backwards compatibility with existing DTBs.
*
- * Returns 0 on successfully attached PM domain or negative error code.
+ * Returns 0 on successfully attached PM domain or negative error code. Note
+ * that if a power-domain exists for the device, but it cannot be found or
+ * turned on, then return -EPROBE_DEFER to ensure that the device is not
+ * probed and to re-try again later.
*/
int genpd_dev_pm_attach(struct device *dev)
{
struct of_phandle_args pd_args;
struct generic_pm_domain *pd;
+ unsigned int i;
int ret;
if (!dev->of_node)
@@ -2199,34 +1786,36 @@ int genpd_dev_pm_attach(struct device *dev)
}
pd = of_genpd_get_from_provider(&pd_args);
+ of_node_put(pd_args.np);
if (IS_ERR(pd)) {
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
- of_node_put(dev->of_node);
- return PTR_ERR(pd);
+ return -EPROBE_DEFER;
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
- while (1) {
+ for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
ret = pm_genpd_add_device(pd, dev);
if (ret != -EAGAIN)
break;
+
+ mdelay(i);
cond_resched();
}
if (ret < 0) {
dev_err(dev, "failed to add to PM domain %s: %d",
pd->name, ret);
- of_node_put(dev->of_node);
- return ret;
+ goto out;
}
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
- pm_genpd_poweron(pd);
+ ret = genpd_poweron(pd);
- return 0;
+out:
+ return ret ? -EPROBE_DEFER : 0;
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
@@ -2274,9 +1863,6 @@ static int pm_genpd_summary_one(struct seq_file *s,
{
static const char * const status_lookup[] = {
[GPD_STATE_ACTIVE] = "on",
- [GPD_STATE_WAIT_MASTER] = "wait-master",
- [GPD_STATE_BUSY] = "busy",
- [GPD_STATE_REPEAT] = "off-in-progress",
[GPD_STATE_POWER_OFF] = "off"
};
struct pm_domain_data *pm_data;
@@ -2290,7 +1876,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
goto exit;
- seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
+ seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
/*
* Modifications on the list require holding locks on both
@@ -2325,8 +1911,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data)
struct generic_pm_domain *genpd;
int ret = 0;
- seq_puts(s, " domain status slaves\n");
- seq_puts(s, " /device runtime status\n");
+ seq_puts(s, "domain status slaves\n");
+ seq_puts(s, " /device runtime status\n");
seq_puts(s, "----------------------------------------------------------------------\n");
ret = mutex_lock_interruptible(&gpd_list_lock);
diff --git a/kernel/drivers/base/power/domain_governor.c b/kernel/drivers/base/power/domain_governor.c
index 2a4154a09..1e937ac5f 100644
--- a/kernel/drivers/base/power/domain_governor.c
+++ b/kernel/drivers/base/power/domain_governor.c
@@ -77,13 +77,14 @@ static bool default_stop_ok(struct device *dev)
dev_update_qos_constraint);
if (constraint_ns > 0) {
- constraint_ns -= td->start_latency_ns;
+ constraint_ns -= td->suspend_latency_ns +
+ td->resume_latency_ns;
if (constraint_ns == 0)
return false;
}
td->effective_constraint_ns = constraint_ns;
- td->cached_stop_ok = constraint_ns > td->stop_latency_ns ||
- constraint_ns == 0;
+ td->cached_stop_ok = constraint_ns >= 0;
+
/*
* The children have been suspended already, so we don't need to take
* their stop latencies into account here.
@@ -126,18 +127,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
off_on_time_ns = genpd->power_off_latency_ns +
genpd->power_on_latency_ns;
- /*
- * It doesn't make sense to remove power from the domain if saving
- * the state of all devices in it and the power off/power on operations
- * take too much time.
- *
- * All devices in this domain have been stopped already at this point.
- */
- list_for_each_entry(pdd, &genpd->dev_list, list_node) {
- if (pdd->dev->driver)
- off_on_time_ns +=
- to_gpd_data(pdd)->td.save_state_latency_ns;
- }
min_off_time_ns = -1;
/*
@@ -171,9 +160,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
struct gpd_timing_data *td;
s64 constraint_ns;
- if (!pdd->dev->driver)
- continue;
-
/*
* Check if the device is allowed to be off long enough for the
* domain to turn off and on (that's how much time it will
@@ -193,7 +179,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
* constraint_ns cannot be negative here, because the device has
* been suspended.
*/
- constraint_ns -= td->restore_state_latency_ns;
if (constraint_ns <= off_on_time_ns)
return false;
diff --git a/kernel/drivers/base/power/generic_ops.c b/kernel/drivers/base/power/generic_ops.c
index 96a92db83..07c3c4a95 100644
--- a/kernel/drivers/base/power/generic_ops.c
+++ b/kernel/drivers/base/power/generic_ops.c
@@ -9,6 +9,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
+#include <linux/suspend.h>
#ifdef CONFIG_PM
/**
@@ -296,11 +297,27 @@ void pm_generic_complete(struct device *dev)
if (drv && drv->pm && drv->pm->complete)
drv->pm->complete(dev);
+}
+/**
+ * pm_complete_with_resume_check - Complete a device power transition.
+ * @dev: Device to handle.
+ *
+ * Complete a device power transition during a system-wide power transition and
+ * optionally schedule a runtime resume of the device if the system resume in
+ * progress has been initated by the platform firmware and the device had its
+ * power.direct_complete flag set.
+ */
+void pm_complete_with_resume_check(struct device *dev)
+{
+ pm_generic_complete(dev);
/*
- * Let runtime PM try to suspend devices that haven't been in use before
- * going into the system-wide sleep state we're resuming from.
+ * If the device had been runtime-suspended before the system went into
+ * the sleep state it is going out of and it has never been resumed till
+ * now, resume it in case the firmware powered it up.
*/
- pm_request_idle(dev);
+ if (dev->power.direct_complete && pm_resume_via_firmware())
+ pm_request_resume(dev);
}
+EXPORT_SYMBOL_GPL(pm_complete_with_resume_check);
#endif /* CONFIG_PM_SLEEP */
diff --git a/kernel/drivers/base/power/main.c b/kernel/drivers/base/power/main.c
index 3d874eca7..1710c26ba 100644
--- a/kernel/drivers/base/power/main.c
+++ b/kernel/drivers/base/power/main.c
@@ -24,6 +24,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pm-trace.h>
+#include <linux/pm_wakeirq.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/async.h>
@@ -587,6 +588,7 @@ void dpm_resume_noirq(pm_message_t state)
async_synchronize_full();
dpm_show_time(starttime, state, "noirq");
resume_device_irqs();
+ device_wakeup_disarm_wake_irqs();
cpuidle_resume();
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
@@ -920,9 +922,7 @@ static void device_complete(struct device *dev, pm_message_t state)
if (callback) {
pm_dev_dbg(dev, state, info);
- trace_device_pm_callback_start(dev, info, state.event);
callback(dev);
- trace_device_pm_callback_end(dev, 0);
}
device_unlock(dev);
@@ -954,7 +954,9 @@ void dpm_complete(pm_message_t state)
list_move(&dev->power.entry, &list);
mutex_unlock(&dpm_list_mtx);
+ trace_device_pm_callback_start(dev, "", state.event);
device_complete(dev, state);
+ trace_device_pm_callback_end(dev, 0);
mutex_lock(&dpm_list_mtx);
put_device(dev);
@@ -1104,6 +1106,7 @@ int dpm_suspend_noirq(pm_message_t state)
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
cpuidle_pause();
+ device_wakeup_arm_wake_irqs();
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
pm_transition = state;
@@ -1374,7 +1377,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.direct_complete) {
if (pm_runtime_status_suspended(dev)) {
pm_runtime_disable(dev);
- if (pm_runtime_suspended_if_enabled(dev))
+ if (pm_runtime_status_suspended(dev))
goto Complete;
pm_runtime_enable(dev);
@@ -1585,11 +1588,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
callback = dev->driver->pm->prepare;
}
- if (callback) {
- trace_device_pm_callback_start(dev, info, state.event);
+ if (callback)
ret = callback(dev);
- trace_device_pm_callback_end(dev, ret);
- }
device_unlock(dev);
@@ -1631,7 +1631,9 @@ int dpm_prepare(pm_message_t state)
get_device(dev);
mutex_unlock(&dpm_list_mtx);
+ trace_device_pm_callback_start(dev, "", state.event);
error = device_prepare(dev, state);
+ trace_device_pm_callback_end(dev, error);
mutex_lock(&dpm_list_mtx);
if (error) {
diff --git a/kernel/drivers/base/power/opp/Makefile b/kernel/drivers/base/power/opp/Makefile
new file mode 100644
index 000000000..33c1e18c4
--- /dev/null
+++ b/kernel/drivers/base/power/opp/Makefile
@@ -0,0 +1,2 @@
+ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
+obj-y += core.o cpu.o
diff --git a/kernel/drivers/base/power/opp.c b/kernel/drivers/base/power/opp/core.c
index 677fb2843..b8e76f750 100644
--- a/kernel/drivers/base/power/opp.c
+++ b/kernel/drivers/base/power/opp/core.c
@@ -11,93 +11,16 @@
* published by the Free Software Foundation.
*/
-#include <linux/kernel.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-#include <linux/pm_opp.h>
#include <linux/of.h>
#include <linux/export.h>
-/*
- * Internal data structure organization with the OPP layer library is as
- * follows:
- * dev_opp_list (root)
- * |- device 1 (represents voltage domain 1)
- * | |- opp 1 (availability, freq, voltage)
- * | |- opp 2 ..
- * ... ...
- * | `- opp n ..
- * |- device 2 (represents the next voltage domain)
- * ...
- * `- device m (represents mth voltage domain)
- * device 1, 2.. are represented by dev_opp structure while each opp
- * is represented by the opp structure.
- */
-
-/**
- * struct dev_pm_opp - Generic OPP description structure
- * @node: opp list node. The nodes are maintained throughout the lifetime
- * of boot. It is expected only an optimal set of OPPs are
- * added to the library by the SoC framework.
- * RCU usage: opp list is traversed with RCU locks. node
- * modification is possible realtime, hence the modifications
- * are protected by the dev_opp_list_lock for integrity.
- * IMPORTANT: the opp nodes should be maintained in increasing
- * order.
- * @dynamic: not-created from static DT entries.
- * @available: true/false - marks if this OPP as available or not
- * @rate: Frequency in hertz
- * @u_volt: Nominal voltage in microvolts corresponding to this OPP
- * @dev_opp: points back to the device_opp struct this opp belongs to
- * @rcu_head: RCU callback head used for deferred freeing
- *
- * This structure stores the OPP information for a given device.
- */
-struct dev_pm_opp {
- struct list_head node;
-
- bool available;
- bool dynamic;
- unsigned long rate;
- unsigned long u_volt;
-
- struct device_opp *dev_opp;
- struct rcu_head rcu_head;
-};
-
-/**
- * struct device_opp - Device opp structure
- * @node: list node - contains the devices with OPPs that
- * have been registered. Nodes once added are not modified in this
- * list.
- * RCU usage: nodes are not modified in the list of device_opp,
- * however addition is possible and is secured by dev_opp_list_lock
- * @dev: device pointer
- * @srcu_head: notifier head to notify the OPP availability changes.
- * @rcu_head: RCU callback head used for deferred freeing
- * @opp_list: list of opps
- *
- * This is an internal data structure maintaining the link to opps attached to
- * a device. This structure is not meant to be shared to users as it is
- * meant for book keeping and private to OPP library.
- *
- * Because the opp structures can be used from both rcu and srcu readers, we
- * need to wait for the grace period of both of them before freeing any
- * resources. And so we have used kfree_rcu() from within call_srcu() handlers.
- */
-struct device_opp {
- struct list_head node;
-
- struct device *dev;
- struct srcu_notifier_head srcu_head;
- struct rcu_head rcu_head;
- struct list_head opp_list;
-};
+#include "opp.h"
/*
* The root of the list of all devices. All device_opp structures branch off
@@ -106,16 +29,48 @@ struct device_opp {
*/
static LIST_HEAD(dev_opp_list);
/* Lock to allow exclusive modification to the device and opp lists */
-static DEFINE_MUTEX(dev_opp_list_lock);
+DEFINE_MUTEX(dev_opp_list_lock);
#define opp_rcu_lockdep_assert() \
do { \
- rcu_lockdep_assert(rcu_read_lock_held() || \
- lockdep_is_held(&dev_opp_list_lock), \
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
+ !lockdep_is_held(&dev_opp_list_lock), \
"Missing rcu_read_lock() or " \
"dev_opp_list_lock protection"); \
} while (0)
+static struct device_list_opp *_find_list_dev(const struct device *dev,
+ struct device_opp *dev_opp)
+{
+ struct device_list_opp *list_dev;
+
+ list_for_each_entry(list_dev, &dev_opp->dev_list, node)
+ if (list_dev->dev == dev)
+ return list_dev;
+
+ return NULL;
+}
+
+static struct device_opp *_managed_opp(const struct device_node *np)
+{
+ struct device_opp *dev_opp;
+
+ list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
+ if (dev_opp->np == np) {
+ /*
+ * Multiple devices can point to the same OPP table and
+ * so will have same node-pointer, np.
+ *
+ * But the OPPs will be considered as shared only if the
+ * OPP table contains a "opp-shared" property.
+ */
+ return dev_opp->shared_opp ? dev_opp : NULL;
+ }
+ }
+
+ return NULL;
+}
+
/**
* _find_device_opp() - find device_opp struct using device pointer
* @dev: device pointer used to lookup device OPPs
@@ -126,31 +81,32 @@ do { \
* Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
* -EINVAL based on type of error.
*
- * Locking: This function must be called under rcu_read_lock(). device_opp
- * is a RCU protected pointer. This means that device_opp is valid as long
- * as we are under RCU lock.
+ * Locking: For readers, this function must be called under rcu_read_lock().
+ * device_opp is a RCU protected pointer, which means that device_opp is valid
+ * as long as we are under RCU lock.
+ *
+ * For Writers, this function must be called with dev_opp_list_lock held.
*/
-static struct device_opp *_find_device_opp(struct device *dev)
+struct device_opp *_find_device_opp(struct device *dev)
{
- struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
+ struct device_opp *dev_opp;
+
+ opp_rcu_lockdep_assert();
- if (unlikely(IS_ERR_OR_NULL(dev))) {
+ if (IS_ERR_OR_NULL(dev)) {
pr_err("%s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
- list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
- if (tmp_dev_opp->dev == dev) {
- dev_opp = tmp_dev_opp;
- break;
- }
- }
+ list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
+ if (_find_list_dev(dev, dev_opp))
+ return dev_opp;
- return dev_opp;
+ return ERR_PTR(-ENODEV);
}
/**
- * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
+ * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
* @opp: opp for which voltage has to be returned for
*
* Return: voltage in micro volt corresponding to the opp, else
@@ -172,7 +128,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
opp_rcu_lockdep_assert();
tmp_opp = rcu_dereference(opp);
- if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
+ if (IS_ERR_OR_NULL(tmp_opp))
pr_err("%s: Invalid parameters\n", __func__);
else
v = tmp_opp->u_volt;
@@ -204,7 +160,7 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
opp_rcu_lockdep_assert();
tmp_opp = rcu_dereference(opp);
- if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
+ if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
pr_err("%s: Invalid parameters\n", __func__);
else
f = tmp_opp->rate;
@@ -214,6 +170,94 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
/**
+ * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
+ * @opp: opp for which turbo mode is being verified
+ *
+ * Turbo OPPs are not for normal use, and can be enabled (under certain
+ * conditions) for short duration of times to finish high throughput work
+ * quickly. Running on them for longer times may overheat the chip.
+ *
+ * Return: true if opp is turbo opp, else false.
+ *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. This means that opp which could have been fetched by
+ * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
+ * under RCU lock. The pointer returned by the opp_find_freq family must be
+ * used in the same section as the usage of this function with the pointer
+ * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
+ * pointer.
+ */
+bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
+{
+ struct dev_pm_opp *tmp_opp;
+
+ opp_rcu_lockdep_assert();
+
+ tmp_opp = rcu_dereference(opp);
+ if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return false;
+ }
+
+ return tmp_opp->turbo;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
+
+/**
+ * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max clock latency in nanoseconds.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
+{
+ struct device_opp *dev_opp;
+ unsigned long clock_latency_ns;
+
+ rcu_read_lock();
+
+ dev_opp = _find_device_opp(dev);
+ if (IS_ERR(dev_opp))
+ clock_latency_ns = 0;
+ else
+ clock_latency_ns = dev_opp->clock_latency_ns_max;
+
+ rcu_read_unlock();
+ return clock_latency_ns;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
+
+/**
+ * dev_pm_opp_get_suspend_opp() - Get suspend opp
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns pointer to the suspend opp if it is
+ * defined and available, otherwise it returns NULL.
+ *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
+ */
+struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
+{
+ struct device_opp *dev_opp;
+
+ opp_rcu_lockdep_assert();
+
+ dev_opp = _find_device_opp(dev);
+ if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
+ !dev_opp->suspend_opp->available)
+ return NULL;
+
+ return dev_opp->suspend_opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
+
+/**
* dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
* @dev: device for which we do this operation
*
@@ -407,18 +451,57 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
+/* List-dev Helpers */
+static void _kfree_list_dev_rcu(struct rcu_head *head)
+{
+ struct device_list_opp *list_dev;
+
+ list_dev = container_of(head, struct device_list_opp, rcu_head);
+ kfree_rcu(list_dev, rcu_head);
+}
+
+static void _remove_list_dev(struct device_list_opp *list_dev,
+ struct device_opp *dev_opp)
+{
+ list_del(&list_dev->node);
+ call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
+ _kfree_list_dev_rcu);
+}
+
+struct device_list_opp *_add_list_dev(const struct device *dev,
+ struct device_opp *dev_opp)
+{
+ struct device_list_opp *list_dev;
+
+ list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
+ if (!list_dev)
+ return NULL;
+
+ /* Initialize list-dev */
+ list_dev->dev = dev;
+ list_add_rcu(&list_dev->node, &dev_opp->dev_list);
+
+ return list_dev;
+}
+
/**
- * _add_device_opp() - Allocate a new device OPP table
+ * _add_device_opp() - Find device OPP table or allocate a new one
* @dev: device for which we do this operation
*
- * New device node which uses OPPs - used when multiple devices with OPP tables
- * are maintained.
+ * It tries to find an existing table first, if it couldn't find one, it
+ * allocates a new OPP table and returns that.
*
* Return: valid device_opp pointer if success, else NULL.
*/
static struct device_opp *_add_device_opp(struct device *dev)
{
struct device_opp *dev_opp;
+ struct device_list_opp *list_dev;
+
+ /* Check for existing list for 'dev' first */
+ dev_opp = _find_device_opp(dev);
+ if (!IS_ERR(dev_opp))
+ return dev_opp;
/*
* Allocate a new device OPP table. In the infrequent case where a new
@@ -428,7 +511,14 @@ static struct device_opp *_add_device_opp(struct device *dev)
if (!dev_opp)
return NULL;
- dev_opp->dev = dev;
+ INIT_LIST_HEAD(&dev_opp->dev_list);
+
+ list_dev = _add_list_dev(dev, dev_opp);
+ if (!list_dev) {
+ kfree(dev_opp);
+ return NULL;
+ }
+
srcu_init_notifier_head(&dev_opp->srcu_head);
INIT_LIST_HEAD(&dev_opp->opp_list);
@@ -438,117 +528,197 @@ static struct device_opp *_add_device_opp(struct device *dev)
}
/**
- * _opp_add_dynamic() - Allocate a dynamic OPP.
- * @dev: device for which we do this operation
- * @freq: Frequency in Hz for this OPP
- * @u_volt: Voltage in uVolts for this OPP
- * @dynamic: Dynamically added OPPs.
+ * _kfree_device_rcu() - Free device_opp RCU handler
+ * @head: RCU head
+ */
+static void _kfree_device_rcu(struct rcu_head *head)
+{
+ struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
+
+ kfree_rcu(device_opp, rcu_head);
+}
+
+/**
+ * _remove_device_opp() - Removes a device OPP table
+ * @dev_opp: device OPP table to be removed.
*
- * This function adds an opp definition to the opp list and returns status.
- * The opp is made available by default and it can be controlled using
- * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
+ * Removes/frees device OPP table it it doesn't contain any OPPs.
+ */
+static void _remove_device_opp(struct device_opp *dev_opp)
+{
+ struct device_list_opp *list_dev;
+
+ if (!list_empty(&dev_opp->opp_list))
+ return;
+
+ list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
+ node);
+
+ _remove_list_dev(list_dev, dev_opp);
+
+ /* dev_list must be empty now */
+ WARN_ON(!list_empty(&dev_opp->dev_list));
+
+ list_del_rcu(&dev_opp->node);
+ call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
+ _kfree_device_rcu);
+}
+
+/**
+ * _kfree_opp_rcu() - Free OPP RCU handler
+ * @head: RCU head
+ */
+static void _kfree_opp_rcu(struct rcu_head *head)
+{
+ struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
+
+ kfree_rcu(opp, rcu_head);
+}
+
+/**
+ * _opp_remove() - Remove an OPP from a table definition
+ * @dev_opp: points back to the device_opp struct this opp belongs to
+ * @opp: pointer to the OPP to remove
+ * @notify: OPP_EVENT_REMOVE notification should be sent or not
+ *
+ * This function removes an opp definition from the opp list.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * It is assumed that the caller holds required mutex for an RCU updater
+ * strategy.
+ */
+static void _opp_remove(struct device_opp *dev_opp,
+ struct dev_pm_opp *opp, bool notify)
+{
+ /*
+ * Notify the changes in the availability of the operable
+ * frequency/voltage list.
+ */
+ if (notify)
+ srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
+ list_del_rcu(&opp->node);
+ call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+
+ _remove_device_opp(dev_opp);
+}
+
+/**
+ * dev_pm_opp_remove() - Remove an OPP from OPP list
+ * @dev: device for which we do this operation
+ * @freq: OPP to remove with matching 'freq'
*
- * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and
- * freed by of_free_opp_table.
+ * This function removes an opp from the opp list.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
- *
- * Return:
- * 0 On success OR
- * Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST Freq are same and volt are different OR
- * Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM Memory allocation failure
*/
-static int _opp_add_dynamic(struct device *dev, unsigned long freq,
- long u_volt, bool dynamic)
+void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
- struct device_opp *dev_opp = NULL;
- struct dev_pm_opp *opp, *new_opp;
- struct list_head *head;
- int ret;
-
- /* allocate new OPP node */
- new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
- if (!new_opp)
- return -ENOMEM;
+ struct dev_pm_opp *opp;
+ struct device_opp *dev_opp;
+ bool found = false;
/* Hold our list modification lock here */
mutex_lock(&dev_opp_list_lock);
- /* populate the opp table */
- new_opp->rate = freq;
- new_opp->u_volt = u_volt;
- new_opp->available = true;
- new_opp->dynamic = dynamic;
-
- /* Check for existing list for 'dev' */
dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- dev_opp = _add_device_opp(dev);
- if (!dev_opp) {
- ret = -ENOMEM;
- goto free_opp;
+ if (IS_ERR(dev_opp))
+ goto unlock;
+
+ list_for_each_entry(opp, &dev_opp->opp_list, node) {
+ if (opp->rate == freq) {
+ found = true;
+ break;
}
+ }
- head = &dev_opp->opp_list;
- goto list_add;
+ if (!found) {
+ dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
+ __func__, freq);
+ goto unlock;
}
+ _opp_remove(dev_opp, opp, true);
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
+
+static struct dev_pm_opp *_allocate_opp(struct device *dev,
+ struct device_opp **dev_opp)
+{
+ struct dev_pm_opp *opp;
+
+ /* allocate new OPP node */
+ opp = kzalloc(sizeof(*opp), GFP_KERNEL);
+ if (!opp)
+ return NULL;
+
+ INIT_LIST_HEAD(&opp->node);
+
+ *dev_opp = _add_device_opp(dev);
+ if (!*dev_opp) {
+ kfree(opp);
+ return NULL;
+ }
+
+ return opp;
+}
+
+static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
+ struct device_opp *dev_opp)
+{
+ struct dev_pm_opp *opp;
+ struct list_head *head = &dev_opp->opp_list;
+
/*
- * Insert new OPP in order of increasing frequency
- * and discard if already present
+ * Insert new OPP in order of increasing frequency and discard if
+ * already present.
+ *
+ * Need to use &dev_opp->opp_list in the condition part of the 'for'
+ * loop, don't replace it with head otherwise it will become an infinite
+ * loop.
*/
- head = &dev_opp->opp_list;
list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
- if (new_opp->rate <= opp->rate)
- break;
- else
+ if (new_opp->rate > opp->rate) {
head = &opp->node;
- }
+ continue;
+ }
- /* Duplicate OPPs ? */
- if (new_opp->rate == opp->rate) {
- ret = opp->available && new_opp->u_volt == opp->u_volt ?
- 0 : -EEXIST;
+ if (new_opp->rate < opp->rate)
+ break;
+ /* Duplicate OPPs */
dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
__func__, opp->rate, opp->u_volt, opp->available,
new_opp->rate, new_opp->u_volt, new_opp->available);
- goto free_opp;
+
+ return opp->available && new_opp->u_volt == opp->u_volt ?
+ 0 : -EEXIST;
}
-list_add:
new_opp->dev_opp = dev_opp;
list_add_rcu(&new_opp->node, head);
- mutex_unlock(&dev_opp_list_lock);
- /*
- * Notify the changes in the availability of the operable
- * frequency/voltage list.
- */
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
return 0;
-
-free_opp:
- mutex_unlock(&dev_opp_list_lock);
- kfree(new_opp);
- return ret;
}
/**
- * dev_pm_opp_add() - Add an OPP table from a table definitions
+ * _opp_add_v1() - Allocate a OPP based on v1 bindings.
* @dev: device for which we do this operation
* @freq: Frequency in Hz for this OPP
* @u_volt: Voltage in uVolts for this OPP
+ * @dynamic: Dynamically added OPPs.
*
* This function adds an opp definition to the opp list and returns status.
* The opp is made available by default and it can be controlled using
- * dev_pm_opp_enable/disable functions.
+ * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
+ *
+ * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
+ * and freed by dev_pm_opp_of_remove_table.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
@@ -563,107 +733,222 @@ free_opp:
* Duplicate OPPs (both freq and volt are same) and !opp->available
* -ENOMEM Memory allocation failure
*/
-int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
+ bool dynamic)
{
- return _opp_add_dynamic(dev, freq, u_volt, true);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_add);
+ struct device_opp *dev_opp;
+ struct dev_pm_opp *new_opp;
+ int ret;
-/**
- * _kfree_opp_rcu() - Free OPP RCU handler
- * @head: RCU head
- */
-static void _kfree_opp_rcu(struct rcu_head *head)
-{
- struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
+ /* Hold our list modification lock here */
+ mutex_lock(&dev_opp_list_lock);
- kfree_rcu(opp, rcu_head);
-}
+ new_opp = _allocate_opp(dev, &dev_opp);
+ if (!new_opp) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
-/**
- * _kfree_device_rcu() - Free device_opp RCU handler
- * @head: RCU head
- */
-static void _kfree_device_rcu(struct rcu_head *head)
-{
- struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
+ /* populate the opp table */
+ new_opp->rate = freq;
+ new_opp->u_volt = u_volt;
+ new_opp->available = true;
+ new_opp->dynamic = dynamic;
- kfree_rcu(device_opp, rcu_head);
-}
+ ret = _opp_add(dev, new_opp, dev_opp);
+ if (ret)
+ goto free_opp;
+
+ mutex_unlock(&dev_opp_list_lock);
-/**
- * _opp_remove() - Remove an OPP from a table definition
- * @dev_opp: points back to the device_opp struct this opp belongs to
- * @opp: pointer to the OPP to remove
- *
- * This function removes an opp definition from the opp list.
- *
- * Locking: The internal device_opp and opp structures are RCU protected.
- * It is assumed that the caller holds required mutex for an RCU updater
- * strategy.
- */
-static void _opp_remove(struct device_opp *dev_opp,
- struct dev_pm_opp *opp)
-{
/*
* Notify the changes in the availability of the operable
* frequency/voltage list.
*/
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
- list_del_rcu(&opp->node);
- call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+ srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+ return 0;
+
+free_opp:
+ _opp_remove(dev_opp, new_opp, false);
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
+ return ret;
+}
+
+/* TODO: Support multiple regulators */
+static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
+{
+ u32 microvolt[3] = {0};
+ u32 val;
+ int count, ret;
+
+ /* Missing property isn't a problem, but an invalid entry is */
+ if (!of_find_property(opp->np, "opp-microvolt", NULL))
+ return 0;
- if (list_empty(&dev_opp->opp_list)) {
- list_del_rcu(&dev_opp->node);
- call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
- _kfree_device_rcu);
+ count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+ if (count < 0) {
+ dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
+ __func__, count);
+ return count;
}
+
+ /* There can be one or three elements here */
+ if (count != 1 && count != 3) {
+ dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
+ __func__, count);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt,
+ count);
+ if (ret) {
+ dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__,
+ ret);
+ return -EINVAL;
+ }
+
+ opp->u_volt = microvolt[0];
+ opp->u_volt_min = microvolt[1];
+ opp->u_volt_max = microvolt[2];
+
+ if (!of_property_read_u32(opp->np, "opp-microamp", &val))
+ opp->u_amp = val;
+
+ return 0;
}
/**
- * dev_pm_opp_remove() - Remove an OPP from OPP list
+ * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
* @dev: device for which we do this operation
- * @freq: OPP to remove with matching 'freq'
+ * @np: device node
*
- * This function removes an opp from the opp list.
+ * This function adds an opp definition to the opp list and returns status. The
+ * opp can be controlled using dev_pm_opp_enable/disable functions and may be
+ * removed by dev_pm_opp_remove.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
+ *
+ * Return:
+ * 0 On success OR
+ * Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST Freq are same and volt are different OR
+ * Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM Memory allocation failure
+ * -EINVAL Failed parsing the OPP node
*/
-void dev_pm_opp_remove(struct device *dev, unsigned long freq)
+static int _opp_add_static_v2(struct device *dev, struct device_node *np)
{
- struct dev_pm_opp *opp;
struct device_opp *dev_opp;
- bool found = false;
+ struct dev_pm_opp *new_opp;
+ u64 rate;
+ u32 val;
+ int ret;
/* Hold our list modification lock here */
mutex_lock(&dev_opp_list_lock);
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
+ new_opp = _allocate_opp(dev, &dev_opp);
+ if (!new_opp) {
+ ret = -ENOMEM;
goto unlock;
+ }
- list_for_each_entry(opp, &dev_opp->opp_list, node) {
- if (opp->rate == freq) {
- found = true;
- break;
- }
+ ret = of_property_read_u64(np, "opp-hz", &rate);
+ if (ret < 0) {
+ dev_err(dev, "%s: opp-hz not found\n", __func__);
+ goto free_opp;
}
- if (!found) {
- dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
- __func__, freq);
- goto unlock;
+ /*
+ * Rate is defined as an unsigned long in clk API, and so casting
+ * explicitly to its type. Must be fixed once rate is 64 bit
+ * guaranteed in clk API.
+ */
+ new_opp->rate = (unsigned long)rate;
+ new_opp->turbo = of_property_read_bool(np, "turbo-mode");
+
+ new_opp->np = np;
+ new_opp->dynamic = false;
+ new_opp->available = true;
+
+ if (!of_property_read_u32(np, "clock-latency-ns", &val))
+ new_opp->clock_latency_ns = val;
+
+ ret = opp_parse_supplies(new_opp, dev);
+ if (ret)
+ goto free_opp;
+
+ ret = _opp_add(dev, new_opp, dev_opp);
+ if (ret)
+ goto free_opp;
+
+ /* OPP to select on device suspend */
+ if (of_property_read_bool(np, "opp-suspend")) {
+ if (dev_opp->suspend_opp)
+ dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
+ __func__, dev_opp->suspend_opp->rate,
+ new_opp->rate);
+ else
+ dev_opp->suspend_opp = new_opp;
}
- _opp_remove(dev_opp, opp);
+ if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
+ dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
+
+ mutex_unlock(&dev_opp_list_lock);
+
+ pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
+ __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
+ new_opp->u_volt_min, new_opp->u_volt_max,
+ new_opp->clock_latency_ns);
+
+ /*
+ * Notify the changes in the availability of the operable
+ * frequency/voltage list.
+ */
+ srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+ return 0;
+
+free_opp:
+ _opp_remove(dev_opp, new_opp, false);
unlock:
mutex_unlock(&dev_opp_list_lock);
+ return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
+
+/**
+ * dev_pm_opp_add() - Add an OPP table from a table definitions
+ * @dev: device for which we do this operation
+ * @freq: Frequency in Hz for this OPP
+ * @u_volt: Voltage in uVolts for this OPP
+ *
+ * This function adds an opp definition to the opp list and returns status.
+ * The opp is made available by default and it can be controlled using
+ * dev_pm_opp_enable/disable functions.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0 On success OR
+ * Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST Freq are same and volt are different OR
+ * Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM Memory allocation failure
+ */
+int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+{
+ return _opp_add_v1(dev, freq, u_volt, true);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_add);
/**
* _opp_set_availability() - helper to set the availability of an opp
@@ -675,7 +960,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
* share a common logic which is isolated here.
*
* Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
* successful.
*
* Locking: The internal device_opp and opp structures are RCU protected.
@@ -763,7 +1048,7 @@ unlock:
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*
* Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
* successful.
*/
int dev_pm_opp_enable(struct device *dev, unsigned long freq)
@@ -789,7 +1074,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*
* Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
* successful.
*/
int dev_pm_opp_disable(struct device *dev, unsigned long freq)
@@ -825,28 +1110,127 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
#ifdef CONFIG_OF
/**
- * of_init_opp_table() - Initialize opp table from device tree
+ * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
+ * entries
* @dev: device pointer used to lookup device OPPs.
*
- * Register the initial OPP table with the OPP library for given device.
+ * Free OPPs created using static entries present in DT.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function indirectly uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
- *
- * Return:
- * 0 On success OR
- * Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST Freq are same and volt are different OR
- * Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM Memory allocation failure
- * -ENODEV when 'operating-points' property is not found or is invalid data
- * in device node.
- * -ENODATA when empty 'operating-points' property is found
*/
-int of_init_opp_table(struct device *dev)
+void dev_pm_opp_of_remove_table(struct device *dev)
+{
+ struct device_opp *dev_opp;
+ struct dev_pm_opp *opp, *tmp;
+
+ /* Hold our list modification lock here */
+ mutex_lock(&dev_opp_list_lock);
+
+ /* Check for existing list for 'dev' */
+ dev_opp = _find_device_opp(dev);
+ if (IS_ERR(dev_opp)) {
+ int error = PTR_ERR(dev_opp);
+
+ if (error != -ENODEV)
+ WARN(1, "%s: dev_opp: %d\n",
+ IS_ERR_OR_NULL(dev) ?
+ "Invalid device" : dev_name(dev),
+ error);
+ goto unlock;
+ }
+
+ /* Find if dev_opp manages a single device */
+ if (list_is_singular(&dev_opp->dev_list)) {
+ /* Free static OPPs */
+ list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
+ if (!opp->dynamic)
+ _opp_remove(dev_opp, opp, true);
+ }
+ } else {
+ _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
+ }
+
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
+
+/* Returns opp descriptor node for a device, caller must do of_node_put() */
+struct device_node *_of_get_opp_desc_node(struct device *dev)
+{
+ /*
+ * TODO: Support for multiple OPP tables.
+ *
+ * There should be only ONE phandle present in "operating-points-v2"
+ * property.
+ */
+
+ return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
+}
+
+/* Initializes OPP tables based on new bindings */
+static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
+{
+ struct device_node *np;
+ struct device_opp *dev_opp;
+ int ret = 0, count = 0;
+
+ mutex_lock(&dev_opp_list_lock);
+
+ dev_opp = _managed_opp(opp_np);
+ if (dev_opp) {
+ /* OPPs are already managed */
+ if (!_add_list_dev(dev, dev_opp))
+ ret = -ENOMEM;
+ mutex_unlock(&dev_opp_list_lock);
+ return ret;
+ }
+ mutex_unlock(&dev_opp_list_lock);
+
+ /* We have opp-list node now, iterate over it and add OPPs */
+ for_each_available_child_of_node(opp_np, np) {
+ count++;
+
+ ret = _opp_add_static_v2(dev, np);
+ if (ret) {
+ dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
+ ret);
+ goto free_table;
+ }
+ }
+
+ /* There should be one of more OPP defined */
+ if (WARN_ON(!count))
+ return -ENOENT;
+
+ mutex_lock(&dev_opp_list_lock);
+
+ dev_opp = _find_device_opp(dev);
+ if (WARN_ON(IS_ERR(dev_opp))) {
+ ret = PTR_ERR(dev_opp);
+ mutex_unlock(&dev_opp_list_lock);
+ goto free_table;
+ }
+
+ dev_opp->np = opp_np;
+ dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+
+ mutex_unlock(&dev_opp_list_lock);
+
+ return 0;
+
+free_table:
+ dev_pm_opp_of_remove_table(dev);
+
+ return ret;
+}
+
+/* Initializes OPP tables based on old-deprecated bindings */
+static int _of_add_opp_table_v1(struct device *dev)
{
const struct property *prop;
const __be32 *val;
@@ -873,7 +1257,7 @@ int of_init_opp_table(struct device *dev)
unsigned long freq = be32_to_cpup(val++) * 1000;
unsigned long volt = be32_to_cpup(val++);
- if (_opp_add_dynamic(dev, freq, volt, false))
+ if (_opp_add_v1(dev, freq, volt, false))
dev_warn(dev, "%s: Failed to add OPP %ld\n",
__func__, freq);
nr -= 2;
@@ -881,47 +1265,52 @@ int of_init_opp_table(struct device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(of_init_opp_table);
/**
- * of_free_opp_table() - Free OPP table entries created from static DT entries
+ * dev_pm_opp_of_add_table() - Initialize opp table from device tree
* @dev: device pointer used to lookup device OPPs.
*
- * Free OPPs created using static entries present in DT.
+ * Register the initial OPP table with the OPP library for given device.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function indirectly uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
+ *
+ * Return:
+ * 0 On success OR
+ * Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST Freq are same and volt are different OR
+ * Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM Memory allocation failure
+ * -ENODEV when 'operating-points' property is not found or is invalid data
+ * in device node.
+ * -ENODATA when empty 'operating-points' property is found
+ * -EINVAL when invalid entries are found in opp-v2 table
*/
-void of_free_opp_table(struct device *dev)
+int dev_pm_opp_of_add_table(struct device *dev)
{
- struct device_opp *dev_opp;
- struct dev_pm_opp *opp, *tmp;
+ struct device_node *opp_np;
+ int ret;
- /* Check for existing list for 'dev' */
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- int error = PTR_ERR(dev_opp);
- if (error != -ENODEV)
- WARN(1, "%s: dev_opp: %d\n",
- IS_ERR_OR_NULL(dev) ?
- "Invalid device" : dev_name(dev),
- error);
- return;
+ /*
+ * OPPs have two version of bindings now. The older one is deprecated,
+ * try for the new binding first.
+ */
+ opp_np = _of_get_opp_desc_node(dev);
+ if (!opp_np) {
+ /*
+ * Try old-deprecated bindings for backward compatibility with
+ * older dtbs.
+ */
+ return _of_add_opp_table_v1(dev);
}
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
-
- /* Free static OPPs */
- list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
- if (!opp->dynamic)
- _opp_remove(dev_opp, opp);
- }
+ ret = _of_add_opp_table_v2(dev, opp_np);
+ of_node_put(opp_np);
- mutex_unlock(&dev_opp_list_lock);
+ return ret;
}
-EXPORT_SYMBOL_GPL(of_free_opp_table);
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
#endif
diff --git a/kernel/drivers/base/power/opp/cpu.c b/kernel/drivers/base/power/opp/cpu.c
new file mode 100644
index 000000000..7b445e88a
--- /dev/null
+++ b/kernel/drivers/base/power/opp/cpu.c
@@ -0,0 +1,270 @@
+/*
+ * Generic OPP helper interface for CPU device
+ *
+ * Copyright (C) 2009-2014 Texas Instruments Incorporated.
+ * Nishanth Menon
+ * Romit Dasgupta
+ * Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "opp.h"
+
+#ifdef CONFIG_CPU_FREQ
+
+/**
+ * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
+ * @dev: device for which we do this operation
+ * @table: Cpufreq table returned back to caller
+ *
+ * Generate a cpufreq table for a provided device- this assumes that the
+ * opp list is already initialized and ready for usage.
+ *
+ * This function allocates required memory for the cpufreq table. It is
+ * expected that the caller does the required maintenance such as freeing
+ * the table as required.
+ *
+ * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
+ * if no memory available for the operation (table is not populated), returns 0
+ * if successful and table is populated.
+ *
+ * WARNING: It is important for the callers to ensure refreshing their copy of
+ * the table if any of the mentioned functions have been invoked in the interim.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Since we just use the regular accessor functions to access the internal data
+ * structures, we use RCU read lock inside this function. As a result, users of
+ * this function DONOT need to use explicit locks for invoking.
+ */
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ struct dev_pm_opp *opp;
+ struct cpufreq_frequency_table *freq_table = NULL;
+ int i, max_opps, ret = 0;
+ unsigned long rate;
+
+ rcu_read_lock();
+
+ max_opps = dev_pm_opp_get_opp_count(dev);
+ if (max_opps <= 0) {
+ ret = max_opps ? max_opps : -ENODATA;
+ goto out;
+ }
+
+ freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
+ if (!freq_table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0, rate = 0; i < max_opps; i++, rate++) {
+ /* find next rate */
+ opp = dev_pm_opp_find_freq_ceil(dev, &rate);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto out;
+ }
+ freq_table[i].driver_data = i;
+ freq_table[i].frequency = rate / 1000;
+
+ /* Is Boost/turbo opp ? */
+ if (dev_pm_opp_is_turbo(opp))
+ freq_table[i].flags = CPUFREQ_BOOST_FREQ;
+ }
+
+ freq_table[i].driver_data = i;
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ *table = &freq_table[0];
+
+out:
+ rcu_read_unlock();
+ if (ret)
+ kfree(freq_table);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
+
+/**
+ * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
+ * @dev: device for which we do this operation
+ * @table: table to free
+ *
+ * Free up the table allocated by dev_pm_opp_init_cpufreq_table
+ */
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ if (!table)
+ return;
+
+ kfree(*table);
+ *table = NULL;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
+#endif /* CONFIG_CPU_FREQ */
+
+/* Required only for V1 bindings, as v2 can manage it from DT itself */
+int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ struct device_list_opp *list_dev;
+ struct device_opp *dev_opp;
+ struct device *dev;
+ int cpu, ret = 0;
+
+ mutex_lock(&dev_opp_list_lock);
+
+ dev_opp = _find_device_opp(cpu_dev);
+ if (IS_ERR(dev_opp)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ for_each_cpu(cpu, cpumask) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ dev = get_cpu_device(cpu);
+ if (!dev) {
+ dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+ __func__, cpu);
+ continue;
+ }
+
+ list_dev = _add_list_dev(dev, dev_opp);
+ if (!list_dev) {
+ dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
+ __func__, cpu);
+ continue;
+ }
+ }
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
+
+#ifdef CONFIG_OF
+void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
+{
+ struct device *cpu_dev;
+ int cpu;
+
+ WARN_ON(cpumask_empty(cpumask));
+
+ for_each_cpu(cpu, cpumask) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ cpu);
+ continue;
+ }
+
+ dev_pm_opp_of_remove_table(cpu_dev);
+ }
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
+
+int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
+{
+ struct device *cpu_dev;
+ int cpu, ret = 0;
+
+ WARN_ON(cpumask_empty(cpumask));
+
+ for_each_cpu(cpu, cpumask) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ cpu);
+ continue;
+ }
+
+ ret = dev_pm_opp_of_add_table(cpu_dev);
+ if (ret) {
+ pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
+ __func__, cpu, ret);
+
+ /* Free all other OPPs */
+ dev_pm_opp_of_cpumask_remove_table(cpumask);
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
+
+/*
+ * Works only for OPP v2 bindings.
+ *
+ * cpumask should be already set to mask of cpu_dev->id.
+ * Returns -ENOENT if operating-points-v2 bindings aren't supported.
+ */
+int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ struct device_node *np, *tmp_np;
+ struct device *tcpu_dev;
+ int cpu, ret = 0;
+
+ /* Get OPP descriptor node */
+ np = _of_get_opp_desc_node(cpu_dev);
+ if (!np) {
+ dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
+ return -ENOENT;
+ }
+
+ /* OPPs are shared ? */
+ if (!of_property_read_bool(np, "opp-shared"))
+ goto put_cpu_node;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ tcpu_dev = get_cpu_device(cpu);
+ if (!tcpu_dev) {
+ dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+ __func__, cpu);
+ ret = -ENODEV;
+ goto put_cpu_node;
+ }
+
+ /* Get OPP descriptor node */
+ tmp_np = _of_get_opp_desc_node(tcpu_dev);
+ if (!tmp_np) {
+ dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
+ __func__);
+ ret = -ENOENT;
+ goto put_cpu_node;
+ }
+
+ /* CPUs are sharing opp node */
+ if (np == tmp_np)
+ cpumask_set_cpu(cpu, cpumask);
+
+ of_node_put(tmp_np);
+ }
+
+put_cpu_node:
+ of_node_put(np);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
+#endif
diff --git a/kernel/drivers/base/power/opp/opp.h b/kernel/drivers/base/power/opp/opp.h
new file mode 100644
index 000000000..7366b2aa8
--- /dev/null
+++ b/kernel/drivers/base/power/opp/opp.h
@@ -0,0 +1,146 @@
+/*
+ * Generic OPP Interface
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated.
+ * Nishanth Menon
+ * Romit Dasgupta
+ * Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DRIVER_OPP_H__
+#define __DRIVER_OPP_H__
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pm_opp.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+
+/* Lock to allow exclusive modification to the device and opp lists */
+extern struct mutex dev_opp_list_lock;
+
+/*
+ * Internal data structure organization with the OPP layer library is as
+ * follows:
+ * dev_opp_list (root)
+ * |- device 1 (represents voltage domain 1)
+ * | |- opp 1 (availability, freq, voltage)
+ * | |- opp 2 ..
+ * ... ...
+ * | `- opp n ..
+ * |- device 2 (represents the next voltage domain)
+ * ...
+ * `- device m (represents mth voltage domain)
+ * device 1, 2.. are represented by dev_opp structure while each opp
+ * is represented by the opp structure.
+ */
+
+/**
+ * struct dev_pm_opp - Generic OPP description structure
+ * @node: opp list node. The nodes are maintained throughout the lifetime
+ * of boot. It is expected only an optimal set of OPPs are
+ * added to the library by the SoC framework.
+ * RCU usage: opp list is traversed with RCU locks. node
+ * modification is possible realtime, hence the modifications
+ * are protected by the dev_opp_list_lock for integrity.
+ * IMPORTANT: the opp nodes should be maintained in increasing
+ * order.
+ * @dynamic: not-created from static DT entries.
+ * @available: true/false - marks if this OPP as available or not
+ * @turbo: true if turbo (boost) OPP
+ * @rate: Frequency in hertz
+ * @u_volt: Target voltage in microvolts corresponding to this OPP
+ * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
+ * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
+ * @u_amp: Maximum current drawn by the device in microamperes
+ * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
+ * frequency from any other OPP's frequency.
+ * @dev_opp: points back to the device_opp struct this opp belongs to
+ * @rcu_head: RCU callback head used for deferred freeing
+ * @np: OPP's device node.
+ *
+ * This structure stores the OPP information for a given device.
+ */
+struct dev_pm_opp {
+ struct list_head node;
+
+ bool available;
+ bool dynamic;
+ bool turbo;
+ unsigned long rate;
+
+ unsigned long u_volt;
+ unsigned long u_volt_min;
+ unsigned long u_volt_max;
+ unsigned long u_amp;
+ unsigned long clock_latency_ns;
+
+ struct device_opp *dev_opp;
+ struct rcu_head rcu_head;
+
+ struct device_node *np;
+};
+
+/**
+ * struct device_list_opp - devices managed by 'struct device_opp'
+ * @node: list node
+ * @dev: device to which the struct object belongs
+ * @rcu_head: RCU callback head used for deferred freeing
+ *
+ * This is an internal data structure maintaining the list of devices that are
+ * managed by 'struct device_opp'.
+ */
+struct device_list_opp {
+ struct list_head node;
+ const struct device *dev;
+ struct rcu_head rcu_head;
+};
+
+/**
+ * struct device_opp - Device opp structure
+ * @node: list node - contains the devices with OPPs that
+ * have been registered. Nodes once added are not modified in this
+ * list.
+ * RCU usage: nodes are not modified in the list of device_opp,
+ * however addition is possible and is secured by dev_opp_list_lock
+ * @srcu_head: notifier head to notify the OPP availability changes.
+ * @rcu_head: RCU callback head used for deferred freeing
+ * @dev_list: list of devices that share these OPPs
+ * @opp_list: list of opps
+ * @np: struct device_node pointer for opp's DT node.
+ * @shared_opp: OPP is shared between multiple devices.
+ *
+ * This is an internal data structure maintaining the link to opps attached to
+ * a device. This structure is not meant to be shared to users as it is
+ * meant for book keeping and private to OPP library.
+ *
+ * Because the opp structures can be used from both rcu and srcu readers, we
+ * need to wait for the grace period of both of them before freeing any
+ * resources. And so we have used kfree_rcu() from within call_srcu() handlers.
+ */
+struct device_opp {
+ struct list_head node;
+
+ struct srcu_notifier_head srcu_head;
+ struct rcu_head rcu_head;
+ struct list_head dev_list;
+ struct list_head opp_list;
+
+ struct device_node *np;
+ unsigned long clock_latency_ns_max;
+ bool shared_opp;
+ struct dev_pm_opp *suspend_opp;
+};
+
+/* Routines internal to opp core */
+struct device_opp *_find_device_opp(struct device *dev);
+struct device_list_opp *_add_list_dev(const struct device *dev,
+ struct device_opp *dev_opp);
+struct device_node *_of_get_opp_desc_node(struct device *dev);
+
+#endif /* __DRIVER_OPP_H__ */
diff --git a/kernel/drivers/base/power/power.h b/kernel/drivers/base/power/power.h
index b6b8a273c..998fa6b23 100644
--- a/kernel/drivers/base/power/power.h
+++ b/kernel/drivers/base/power/power.h
@@ -20,6 +20,46 @@ static inline void pm_runtime_early_init(struct device *dev)
extern void pm_runtime_init(struct device *dev);
extern void pm_runtime_remove(struct device *dev);
+struct wake_irq {
+ struct device *dev;
+ int irq;
+ bool dedicated_irq:1;
+};
+
+extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
+extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
+
+#ifdef CONFIG_PM_SLEEP
+
+extern int device_wakeup_attach_irq(struct device *dev,
+ struct wake_irq *wakeirq);
+extern void device_wakeup_detach_irq(struct device *dev);
+extern void device_wakeup_arm_wake_irqs(void);
+extern void device_wakeup_disarm_wake_irqs(void);
+
+#else
+
+static inline int
+device_wakeup_attach_irq(struct device *dev,
+ struct wake_irq *wakeirq)
+{
+ return 0;
+}
+
+static inline void device_wakeup_detach_irq(struct device *dev)
+{
+}
+
+static inline void device_wakeup_arm_wake_irqs(void)
+{
+}
+
+static inline void device_wakeup_disarm_wake_irqs(void)
+{
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
/*
* sysfs.c
*/
@@ -33,6 +73,8 @@ extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
extern int pm_qos_sysfs_add_flags(struct device *dev);
extern void pm_qos_sysfs_remove_flags(struct device *dev);
+extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev);
+extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev);
#else /* CONFIG_PM */
@@ -52,6 +94,14 @@ static inline void wakeup_sysfs_remove(struct device *dev) {}
static inline int pm_qos_sysfs_add(struct device *dev) { return 0; }
static inline void pm_qos_sysfs_remove(struct device *dev) {}
+static inline void dev_pm_arm_wake_irq(struct wake_irq *wirq)
+{
+}
+
+static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
+{
+}
+
#endif
#ifdef CONFIG_PM_SLEEP
diff --git a/kernel/drivers/base/power/qos.c b/kernel/drivers/base/power/qos.c
index e56d538d0..7f3646e45 100644
--- a/kernel/drivers/base/power/qos.c
+++ b/kernel/drivers/base/power/qos.c
@@ -883,3 +883,40 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
+
+/**
+ * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
+ * @dev: Device whose latency tolerance to expose
+ */
+int dev_pm_qos_expose_latency_tolerance(struct device *dev)
+{
+ int ret;
+
+ if (!dev->power.set_latency_tolerance)
+ return -EINVAL;
+
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+ ret = pm_qos_sysfs_add_latency_tolerance(dev);
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
+
+/**
+ * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
+ * @dev: Device whose latency tolerance to hide
+ */
+void dev_pm_qos_hide_latency_tolerance(struct device *dev)
+{
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+ pm_qos_sysfs_remove_latency_tolerance(dev);
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+
+ /* Remove the request from user space now */
+ pm_runtime_get_sync(dev);
+ dev_pm_qos_update_user_latency_tolerance(dev,
+ PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
+ pm_runtime_put(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
diff --git a/kernel/drivers/base/power/runtime.c b/kernel/drivers/base/power/runtime.c
index 5070c4fe8..e1a10a03d 100644
--- a/kernel/drivers/base/power/runtime.c
+++ b/kernel/drivers/base/power/runtime.c
@@ -10,6 +10,7 @@
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <trace/events/rpm.h>
#include "power.h"
@@ -514,6 +515,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
+ dev_pm_enable_wake_irq(dev);
retval = rpm_callback(callback, dev);
if (retval)
goto fail;
@@ -552,6 +554,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
return retval;
fail:
+ dev_pm_disable_wake_irq(dev);
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
@@ -734,13 +737,16 @@ static int rpm_resume(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_resume);
+ dev_pm_disable_wake_irq(dev);
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_cancel_pending(dev);
+ dev_pm_enable_wake_irq(dev);
} else {
no_callback:
__update_runtime_status(dev, RPM_ACTIVE);
+ pm_runtime_mark_last_busy(dev);
if (parent)
atomic_inc(&parent->power.child_count);
}
diff --git a/kernel/drivers/base/power/sysfs.c b/kernel/drivers/base/power/sysfs.c
index d2be3f9c2..a7b46798c 100644
--- a/kernel/drivers/base/power/sysfs.c
+++ b/kernel/drivers/base/power/sysfs.c
@@ -738,6 +738,17 @@ void pm_qos_sysfs_remove_flags(struct device *dev)
sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
}
+int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
+{
+ return sysfs_merge_group(&dev->kobj,
+ &pm_qos_latency_tolerance_attr_group);
+}
+
+void pm_qos_sysfs_remove_latency_tolerance(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
+}
+
void rpm_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
diff --git a/kernel/drivers/base/power/wakeirq.c b/kernel/drivers/base/power/wakeirq.c
new file mode 100644
index 000000000..0d77cd6fd
--- /dev/null
+++ b/kernel/drivers/base/power/wakeirq.c
@@ -0,0 +1,277 @@
+/*
+ * wakeirq.c - Device wakeirq helper functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
+
+#include "power.h"
+
+/**
+ * dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
+ * @dev: Device entry
+ * @irq: Device wake-up capable interrupt
+ * @wirq: Wake irq specific data
+ *
+ * Internal function to attach either a device IO interrupt or a
+ * dedicated wake-up interrupt as a wake IRQ.
+ */
+static int dev_pm_attach_wake_irq(struct device *dev, int irq,
+ struct wake_irq *wirq)
+{
+ unsigned long flags;
+ int err;
+
+ if (!dev || !wirq)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ if (dev_WARN_ONCE(dev, dev->power.wakeirq,
+ "wake irq already initialized\n")) {
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ return -EEXIST;
+ }
+
+ err = device_wakeup_attach_irq(dev, wirq);
+ if (!err)
+ dev->power.wakeirq = wirq;
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ return err;
+}
+
+/**
+ * dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ
+ * @dev: Device entry
+ * @irq: Device IO interrupt
+ *
+ * Attach a device IO interrupt as a wake IRQ. The wake IRQ gets
+ * automatically configured for wake-up from suspend based
+ * on the device specific sysfs wakeup entry. Typically called
+ * during driver probe after calling device_init_wakeup().
+ */
+int dev_pm_set_wake_irq(struct device *dev, int irq)
+{
+ struct wake_irq *wirq;
+ int err;
+
+ if (irq < 0)
+ return -EINVAL;
+
+ wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
+ if (!wirq)
+ return -ENOMEM;
+
+ wirq->dev = dev;
+ wirq->irq = irq;
+
+ err = dev_pm_attach_wake_irq(dev, irq, wirq);
+ if (err)
+ kfree(wirq);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
+
+/**
+ * dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ
+ * @dev: Device entry
+ *
+ * Detach a device wake IRQ and free resources.
+ *
+ * Note that it's OK for drivers to call this without calling
+ * dev_pm_set_wake_irq() as all the driver instances may not have
+ * a wake IRQ configured. This avoid adding wake IRQ specific
+ * checks into the drivers.
+ */
+void dev_pm_clear_wake_irq(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+ unsigned long flags;
+
+ if (!wirq)
+ return;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ device_wakeup_detach_irq(dev);
+ dev->power.wakeirq = NULL;
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ if (wirq->dedicated_irq)
+ free_irq(wirq->irq, wirq);
+ kfree(wirq);
+}
+EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
+
+/**
+ * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
+ * @irq: Device specific dedicated wake-up interrupt
+ * @_wirq: Wake IRQ data
+ *
+ * Some devices have a separate wake-up interrupt in addition to the
+ * device IO interrupt. The wake-up interrupt signals that a device
+ * should be woken up from it's idle state. This handler uses device
+ * specific pm_runtime functions to wake the device, and then it's
+ * up to the device to do whatever it needs to. Note that as the
+ * device may need to restore context and start up regulators, we
+ * use a threaded IRQ.
+ *
+ * Also note that we are not resending the lost device interrupts.
+ * We assume that the wake-up interrupt just needs to wake-up the
+ * device, and then device's pm_runtime_resume() can deal with the
+ * situation.
+ */
+static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
+{
+ struct wake_irq *wirq = _wirq;
+ int res;
+
+ /* We don't want RPM_ASYNC or RPM_NOWAIT here */
+ res = pm_runtime_resume(wirq->dev);
+ if (res < 0)
+ dev_warn(wirq->dev,
+ "wake IRQ with no resume: %i\n", res);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
+ * @dev: Device entry
+ * @irq: Device wake-up interrupt
+ *
+ * Unless your hardware has separate wake-up interrupts in addition
+ * to the device IO interrupts, you don't need this.
+ *
+ * Sets up a threaded interrupt handler for a device that has
+ * a dedicated wake-up interrupt in addition to the device IO
+ * interrupt.
+ *
+ * The interrupt starts disabled, and needs to be managed for
+ * the device by the bus code or the device driver using
+ * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq()
+ * functions.
+ */
+int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+{
+ struct wake_irq *wirq;
+ int err;
+
+ if (irq < 0)
+ return -EINVAL;
+
+ wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
+ if (!wirq)
+ return -ENOMEM;
+
+ wirq->dev = dev;
+ wirq->irq = irq;
+ wirq->dedicated_irq = true;
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+ /*
+ * Consumer device may need to power up and restore state
+ * so we use a threaded irq.
+ */
+ err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
+ IRQF_ONESHOT, dev_name(dev), wirq);
+ if (err)
+ goto err_free;
+
+ err = dev_pm_attach_wake_irq(dev, irq, wirq);
+ if (err)
+ goto err_free_irq;
+
+ return err;
+
+err_free_irq:
+ free_irq(irq, wirq);
+err_free:
+ kfree(wirq);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
+
+/**
+ * dev_pm_enable_wake_irq - Enable device wake-up interrupt
+ * @dev: Device
+ *
+ * Called from the bus code or the device driver for
+ * runtime_suspend() to enable the wake-up interrupt while
+ * the device is running.
+ *
+ * Note that for runtime_suspend()) the wake-up interrupts
+ * should be unconditionally enabled unlike for suspend()
+ * that is conditional.
+ */
+void dev_pm_enable_wake_irq(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (wirq && wirq->dedicated_irq)
+ enable_irq(wirq->irq);
+}
+EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
+
+/**
+ * dev_pm_disable_wake_irq - Disable device wake-up interrupt
+ * @dev: Device
+ *
+ * Called from the bus code or the device driver for
+ * runtime_resume() to disable the wake-up interrupt while
+ * the device is running.
+ */
+void dev_pm_disable_wake_irq(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (wirq && wirq->dedicated_irq)
+ disable_irq_nosync(wirq->irq);
+}
+EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
+
+/**
+ * dev_pm_arm_wake_irq - Arm device wake-up
+ * @wirq: Device wake-up interrupt
+ *
+ * Sets up the wake-up event conditionally based on the
+ * device_may_wake().
+ */
+void dev_pm_arm_wake_irq(struct wake_irq *wirq)
+{
+ if (!wirq)
+ return;
+
+ if (device_may_wakeup(wirq->dev))
+ enable_irq_wake(wirq->irq);
+}
+
+/**
+ * dev_pm_disarm_wake_irq - Disarm device wake-up
+ * @wirq: Device wake-up interrupt
+ *
+ * Clears up the wake-up event conditionally based on the
+ * device_may_wake().
+ */
+void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
+{
+ if (!wirq)
+ return;
+
+ if (device_may_wakeup(wirq->dev))
+ disable_irq_wake(wirq->irq);
+}
diff --git a/kernel/drivers/base/power/wakeup.c b/kernel/drivers/base/power/wakeup.c
index 77262009f..a1e0b9ab8 100644
--- a/kernel/drivers/base/power/wakeup.c
+++ b/kernel/drivers/base/power/wakeup.c
@@ -14,6 +14,7 @@
#include <linux/suspend.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/pm_wakeirq.h>
#include <trace/events/power.h>
#include "power.h"
@@ -24,6 +25,9 @@
*/
bool events_check_enabled __read_mostly;
+/* First wakeup IRQ seen by the kernel in the last cycle. */
+unsigned int pm_wakeup_irq __read_mostly;
+
/* If set and the system is suspending, terminate the suspend. */
static bool pm_abort_suspend __read_mostly;
@@ -56,6 +60,11 @@ static LIST_HEAD(wakeup_sources);
static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
+static struct wakeup_source deleted_ws = {
+ .name = "deleted",
+ .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
+};
+
/**
* wakeup_source_prepare - Prepare a new wakeup source for initialization.
* @ws: Wakeup source to prepare.
@@ -85,7 +94,7 @@ struct wakeup_source *wakeup_source_create(const char *name)
if (!ws)
return NULL;
- wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
+ wakeup_source_prepare(ws, name ? kstrdup_const(name, GFP_KERNEL) : NULL);
return ws;
}
EXPORT_SYMBOL_GPL(wakeup_source_create);
@@ -107,6 +116,34 @@ void wakeup_source_drop(struct wakeup_source *ws)
}
EXPORT_SYMBOL_GPL(wakeup_source_drop);
+/*
+ * Record wakeup_source statistics being deleted into a dummy wakeup_source.
+ */
+static void wakeup_source_record(struct wakeup_source *ws)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&deleted_ws.lock, flags);
+
+ if (ws->event_count) {
+ deleted_ws.total_time =
+ ktime_add(deleted_ws.total_time, ws->total_time);
+ deleted_ws.prevent_sleep_time =
+ ktime_add(deleted_ws.prevent_sleep_time,
+ ws->prevent_sleep_time);
+ deleted_ws.max_time =
+ ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
+ deleted_ws.max_time : ws->max_time;
+ deleted_ws.event_count += ws->event_count;
+ deleted_ws.active_count += ws->active_count;
+ deleted_ws.relax_count += ws->relax_count;
+ deleted_ws.expire_count += ws->expire_count;
+ deleted_ws.wakeup_count += ws->wakeup_count;
+ }
+
+ spin_unlock_irqrestore(&deleted_ws.lock, flags);
+}
+
/**
* wakeup_source_destroy - Destroy a struct wakeup_source object.
* @ws: Wakeup source to destroy.
@@ -119,7 +156,8 @@ void wakeup_source_destroy(struct wakeup_source *ws)
return;
wakeup_source_drop(ws);
- kfree(ws->name);
+ wakeup_source_record(ws);
+ kfree_const(ws->name);
kfree(ws);
}
EXPORT_SYMBOL_GPL(wakeup_source_destroy);
@@ -239,6 +277,86 @@ int device_wakeup_enable(struct device *dev)
EXPORT_SYMBOL_GPL(device_wakeup_enable);
/**
+ * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
+ * @dev: Device to handle
+ * @wakeirq: Device specific wakeirq entry
+ *
+ * Attach a device wakeirq to the wakeup source so the device
+ * wake IRQ can be configured automatically for suspend and
+ * resume.
+ *
+ * Call under the device's power.lock lock.
+ */
+int device_wakeup_attach_irq(struct device *dev,
+ struct wake_irq *wakeirq)
+{
+ struct wakeup_source *ws;
+
+ ws = dev->power.wakeup;
+ if (!ws) {
+ dev_err(dev, "forgot to call call device_init_wakeup?\n");
+ return -EINVAL;
+ }
+
+ if (ws->wakeirq)
+ return -EEXIST;
+
+ ws->wakeirq = wakeirq;
+ return 0;
+}
+
+/**
+ * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
+ * @dev: Device to handle
+ *
+ * Removes a device wakeirq from the wakeup source.
+ *
+ * Call under the device's power.lock lock.
+ */
+void device_wakeup_detach_irq(struct device *dev)
+{
+ struct wakeup_source *ws;
+
+ ws = dev->power.wakeup;
+ if (ws)
+ ws->wakeirq = NULL;
+}
+
+/**
+ * device_wakeup_arm_wake_irqs(void)
+ *
+ * Itereates over the list of device wakeirqs to arm them.
+ */
+void device_wakeup_arm_wake_irqs(void)
+{
+ struct wakeup_source *ws;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ if (ws->wakeirq)
+ dev_pm_arm_wake_irq(ws->wakeirq);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * device_wakeup_disarm_wake_irqs(void)
+ *
+ * Itereates over the list of device wakeirqs to disarm them.
+ */
+void device_wakeup_disarm_wake_irqs(void)
+{
+ struct wakeup_source *ws;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ if (ws->wakeirq)
+ dev_pm_disarm_wake_irq(ws->wakeirq);
+ }
+ rcu_read_unlock();
+}
+
+/**
* device_wakeup_detach - Detach a device's wakeup source object from it.
* @dev: Device to detach the wakeup source object from.
*
@@ -351,6 +469,20 @@ int device_set_wakeup_enable(struct device *dev, bool enable)
}
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
+/**
+ * wakeup_source_not_registered - validate the given wakeup source.
+ * @ws: Wakeup source to be validated.
+ */
+static bool wakeup_source_not_registered(struct wakeup_source *ws)
+{
+ /*
+ * Use timer struct to check if the given source is initialized
+ * by wakeup_source_add.
+ */
+ return ws->timer.function != pm_wakeup_timer_fn ||
+ ws->timer.data != (unsigned long)ws;
+}
+
/*
* The functions below use the observation that each wakeup event starts a
* period in which the system should not be suspended. The moment this period
@@ -391,6 +523,10 @@ static void wakeup_source_activate(struct wakeup_source *ws)
{
unsigned int cec;
+ if (WARN_ONCE(wakeup_source_not_registered(ws),
+ "unregistered wakeup source\n"))
+ return;
+
/*
* active wakeup source should bring the system
* out of PM_SUSPEND_FREEZE state
@@ -735,6 +871,15 @@ EXPORT_SYMBOL_GPL(pm_system_wakeup);
void pm_wakeup_clear(void)
{
pm_abort_suspend = false;
+ pm_wakeup_irq = 0;
+}
+
+void pm_system_irq_wakeup(unsigned int irq_number)
+{
+ if (pm_wakeup_irq == 0) {
+ pm_wakeup_irq = irq_number;
+ pm_system_wakeup();
+ }
}
/**
@@ -894,6 +1039,8 @@ static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
print_wakeup_source_stats(m, ws);
rcu_read_unlock();
+ print_wakeup_source_stats(m, &deleted_ws);
+
return 0;
}
diff --git a/kernel/drivers/base/property.c b/kernel/drivers/base/property.c
index 1d0b116ca..1325ff225 100644
--- a/kernel/drivers/base/property.c
+++ b/kernel/drivers/base/property.c
@@ -14,7 +14,10 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/property.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
/**
* device_add_property_set - Add a collection of properties to a device object.
@@ -26,9 +29,10 @@
*/
void device_add_property_set(struct device *dev, struct property_set *pset)
{
- if (pset)
- pset->fwnode.type = FWNODE_PDATA;
+ if (!pset)
+ return;
+ pset->fwnode.type = FWNODE_PDATA;
set_secondary_fwnode(dev, &pset->fwnode);
}
EXPORT_SYMBOL_GPL(device_add_property_set);
@@ -128,9 +132,9 @@ EXPORT_SYMBOL_GPL(device_property_present);
bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname)
{
if (is_of_node(fwnode))
- return of_property_read_bool(of_node(fwnode), propname);
+ return of_property_read_bool(to_of_node(fwnode), propname);
else if (is_acpi_node(fwnode))
- return !acpi_dev_prop_get(acpi_node(fwnode), propname, NULL);
+ return !acpi_node_prop_get(fwnode, propname, NULL);
return !!pset_prop_get(to_pset(fwnode), propname);
}
@@ -152,6 +156,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_present);
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
+ * %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u8_array(struct device *dev, const char *propname,
u8 *val, size_t nval)
@@ -176,6 +181,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u8_array);
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
+ * %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u16_array(struct device *dev, const char *propname,
u16 *val, size_t nval)
@@ -200,6 +206,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u16_array);
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
+ * %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u32_array(struct device *dev, const char *propname,
u32 *val, size_t nval)
@@ -224,6 +231,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u32_array);
* %-ENODATA if the property does not have a value,
* %-EPROTO if the property is not an array of numbers,
* %-EOVERFLOW if the size of the property is not as expected.
+ * %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_u64_array(struct device *dev, const char *propname,
u64 *val, size_t nval)
@@ -248,6 +256,7 @@ EXPORT_SYMBOL_GPL(device_property_read_u64_array);
* %-ENODATA if the property does not have a value,
* %-EPROTO or %-EILSEQ if the property is not an array of strings,
* %-EOVERFLOW if the size of the property is not as expected.
+ * %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_string_array(struct device *dev, const char *propname,
const char **val, size_t nval)
@@ -269,6 +278,7 @@ EXPORT_SYMBOL_GPL(device_property_read_string_array);
* %-EINVAL if given arguments are not valid,
* %-ENODATA if the property does not have a value,
* %-EPROTO or %-EILSEQ if the property type is not a string.
+ * %-ENXIO if no suitable firmware interface is present.
*/
int device_property_read_string(struct device *dev, const char *propname,
const char **val)
@@ -277,6 +287,28 @@ int device_property_read_string(struct device *dev, const char *propname,
}
EXPORT_SYMBOL_GPL(device_property_read_string);
+/**
+ * device_property_match_string - find a string in an array and return index
+ * @dev: Device to get the property of
+ * @propname: Name of the property holding the array
+ * @string: String to look for
+ *
+ * Find a given string in a string array and if it is found return the
+ * index back.
+ *
+ * Return: %0 if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO if the property is not an array of strings,
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_match_string(struct device *dev, const char *propname,
+ const char *string)
+{
+ return fwnode_property_match_string(dev_fwnode(dev), propname, string);
+}
+EXPORT_SYMBOL_GPL(device_property_match_string);
+
#define OF_DEV_PROP_READ_ARRAY(node, propname, type, val, nval) \
(val) ? of_property_read_##type##_array((node), (propname), (val), (nval)) \
: of_property_count_elems_of_size((node), (propname), sizeof(type))
@@ -285,14 +317,16 @@ EXPORT_SYMBOL_GPL(device_property_read_string);
({ \
int _ret_; \
if (is_of_node(_fwnode_)) \
- _ret_ = OF_DEV_PROP_READ_ARRAY(of_node(_fwnode_), _propname_, \
+ _ret_ = OF_DEV_PROP_READ_ARRAY(to_of_node(_fwnode_), _propname_, \
_type_, _val_, _nval_); \
else if (is_acpi_node(_fwnode_)) \
- _ret_ = acpi_dev_prop_read(acpi_node(_fwnode_), _propname_, \
- _proptype_, _val_, _nval_); \
- else \
+ _ret_ = acpi_node_prop_read(_fwnode_, _propname_, _proptype_, \
+ _val_, _nval_); \
+ else if (is_pset(_fwnode_)) \
_ret_ = pset_prop_read_array(to_pset(_fwnode_), _propname_, \
_proptype_, _val_, _nval_); \
+ else \
+ _ret_ = -ENXIO; \
_ret_; \
})
@@ -424,15 +458,16 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
{
if (is_of_node(fwnode))
return val ?
- of_property_read_string_array(of_node(fwnode), propname,
- val, nval) :
- of_property_count_strings(of_node(fwnode), propname);
+ of_property_read_string_array(to_of_node(fwnode),
+ propname, val, nval) :
+ of_property_count_strings(to_of_node(fwnode), propname);
else if (is_acpi_node(fwnode))
- return acpi_dev_prop_read(acpi_node(fwnode), propname,
- DEV_PROP_STRING, val, nval);
-
- return pset_prop_read_array(to_pset(fwnode), propname,
- DEV_PROP_STRING, val, nval);
+ return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING,
+ val, nval);
+ else if (is_pset(fwnode))
+ return pset_prop_read_array(to_pset(fwnode), propname,
+ DEV_PROP_STRING, val, nval);
+ return -ENXIO;
}
EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
@@ -455,16 +490,63 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,
const char *propname, const char **val)
{
if (is_of_node(fwnode))
- return of_property_read_string(of_node(fwnode), propname, val);
+ return of_property_read_string(to_of_node(fwnode), propname, val);
else if (is_acpi_node(fwnode))
- return acpi_dev_prop_read(acpi_node(fwnode), propname,
- DEV_PROP_STRING, val, 1);
+ return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING,
+ val, 1);
- return -ENXIO;
+ return pset_prop_read_array(to_pset(fwnode), propname,
+ DEV_PROP_STRING, val, 1);
}
EXPORT_SYMBOL_GPL(fwnode_property_read_string);
/**
+ * fwnode_property_match_string - find a string in an array and return index
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property holding the array
+ * @string: String to look for
+ *
+ * Find a given string in a string array and if it is found return the
+ * index back.
+ *
+ * Return: %0 if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO if the property is not an array of strings,
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_match_string(struct fwnode_handle *fwnode,
+ const char *propname, const char *string)
+{
+ const char **values;
+ int nval, ret, i;
+
+ nval = fwnode_property_read_string_array(fwnode, propname, NULL, 0);
+ if (nval < 0)
+ return nval;
+
+ values = kcalloc(nval, sizeof(*values), GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ ret = fwnode_property_read_string_array(fwnode, propname, values, nval);
+ if (ret < 0)
+ goto out;
+
+ ret = -ENODATA;
+ for (i = 0; i < nval; i++) {
+ if (!strcmp(values[i], string)) {
+ ret = i;
+ break;
+ }
+ }
+out:
+ kfree(values);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fwnode_property_match_string);
+
+/**
* device_get_next_child_node - Return the next child node handle for a device
* @dev: Device to find the next child node for.
* @child: Handle to one of the device's child nodes or a null handle.
@@ -475,15 +557,11 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
struct device_node *node;
- node = of_get_next_available_child(dev->of_node, of_node(child));
+ node = of_get_next_available_child(dev->of_node, to_of_node(child));
if (node)
return &node->fwnode;
} else if (IS_ENABLED(CONFIG_ACPI)) {
- struct acpi_device *node;
-
- node = acpi_get_next_child(dev, acpi_node(child));
- if (node)
- return acpi_fwnode_handle(node);
+ return acpi_get_next_subnode(dev, child);
}
return NULL;
}
@@ -500,7 +578,7 @@ EXPORT_SYMBOL_GPL(device_get_next_child_node);
void fwnode_handle_put(struct fwnode_handle *fwnode)
{
if (is_of_node(fwnode))
- of_node_put(of_node(fwnode));
+ of_node_put(to_of_node(fwnode));
}
EXPORT_SYMBOL_GPL(fwnode_handle_put);
@@ -519,3 +597,110 @@ unsigned int device_get_child_node_count(struct device *dev)
return count;
}
EXPORT_SYMBOL_GPL(device_get_child_node_count);
+
+bool device_dma_supported(struct device *dev)
+{
+ /* For DT, this is always supported.
+ * For ACPI, this depends on CCA, which
+ * is determined by the acpi_dma_supported().
+ */
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node)
+ return true;
+
+ return acpi_dma_supported(ACPI_COMPANION(dev));
+}
+EXPORT_SYMBOL_GPL(device_dma_supported);
+
+enum dev_dma_attr device_get_dma_attr(struct device *dev)
+{
+ enum dev_dma_attr attr = DEV_DMA_NOT_SUPPORTED;
+
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ if (of_dma_is_coherent(dev->of_node))
+ attr = DEV_DMA_COHERENT;
+ else
+ attr = DEV_DMA_NON_COHERENT;
+ } else
+ attr = acpi_get_dma_attr(ACPI_COMPANION(dev));
+
+ return attr;
+}
+EXPORT_SYMBOL_GPL(device_get_dma_attr);
+
+/**
+ * device_get_phy_mode - Get phy mode for given device
+ * @dev: Pointer to the given device
+ *
+ * The function gets phy interface string from property 'phy-mode' or
+ * 'phy-connection-type', and return its index in phy_modes table, or errno in
+ * error case.
+ */
+int device_get_phy_mode(struct device *dev)
+{
+ const char *pm;
+ int err, i;
+
+ err = device_property_read_string(dev, "phy-mode", &pm);
+ if (err < 0)
+ err = device_property_read_string(dev,
+ "phy-connection-type", &pm);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
+ if (!strcasecmp(pm, phy_modes(i)))
+ return i;
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(device_get_phy_mode);
+
+static void *device_get_mac_addr(struct device *dev,
+ const char *name, char *addr,
+ int alen)
+{
+ int ret = device_property_read_u8_array(dev, name, addr, alen);
+
+ if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr))
+ return addr;
+ return NULL;
+}
+
+/**
+ * device_get_mac_address - Get the MAC for a given device
+ * @dev: Pointer to the device
+ * @addr: Address of buffer to store the MAC in
+ * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN
+ *
+ * Search the firmware node for the best MAC address to use. 'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+ * address. If that isn't set, then 'local-mac-address' is checked next,
+ * because that is the default address. If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree.
+ *
+ * Note that the 'address' property is supposed to contain a virtual address of
+ * the register set, but some DTS files have redefined that property to be the
+ * MAC address.
+ *
+ * All-zero MAC addresses are rejected, because those could be properties that
+ * exist in the firmware tables, but were not updated by the firmware. For
+ * example, the DTS could define 'mac-address' and 'local-mac-address', with
+ * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'.
+ * In this case, the real MAC is in 'local-mac-address', and 'mac-address'
+ * exists but is all zeros.
+*/
+void *device_get_mac_address(struct device *dev, char *addr, int alen)
+{
+ char *res;
+
+ res = device_get_mac_addr(dev, "mac-address", addr, alen);
+ if (res)
+ return res;
+
+ res = device_get_mac_addr(dev, "local-mac-address", addr, alen);
+ if (res)
+ return res;
+
+ return device_get_mac_addr(dev, "address", addr, alen);
+}
+EXPORT_SYMBOL(device_get_mac_address);
diff --git a/kernel/drivers/base/regmap/internal.h b/kernel/drivers/base/regmap/internal.h
index a13587b5c..3df977054 100644
--- a/kernel/drivers/base/regmap/internal.h
+++ b/kernel/drivers/base/regmap/internal.h
@@ -59,6 +59,7 @@ struct regmap {
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */
+ gfp_t alloc_flags;
struct device *dev; /* Device we do I/O on */
void *work_buf; /* Scratch buffer used to format I/O */
@@ -98,6 +99,8 @@ struct regmap {
int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+ int (*reg_update_bits)(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val);
bool defer_caching;
@@ -122,25 +125,34 @@ struct regmap {
unsigned int num_reg_defaults_raw;
/* if set, only the cache is modified not the HW */
- u32 cache_only;
+ bool cache_only;
/* if set, only the HW is modified not the cache */
- u32 cache_bypass;
+ bool cache_bypass;
/* if set, remember to free reg_defaults_raw */
bool cache_free;
struct reg_default *reg_defaults;
const void *reg_defaults_raw;
void *cache;
- u32 cache_dirty;
+ /* if set, the cache contains newer data than the HW */
+ bool cache_dirty;
+ /* if set, the HW registers are known to match map->reg_defaults */
+ bool no_sync_defaults;
- struct reg_default *patch;
+ struct reg_sequence *patch;
int patch_regs;
- /* if set, converts bulk rw to single rw */
- bool use_single_rw;
+ /* if set, converts bulk read to single read */
+ bool use_single_read;
+ /* if set, converts bulk read to single read */
+ bool use_single_write;
/* if set, the device supports multi write mode */
bool can_multi_write;
+ /* if set, raw reads/writes are limited to this size */
+ size_t max_raw_read;
+ size_t max_raw_write;
+
struct rb_root range_tree;
void *selector_work_buf; /* Scratch buffer used for selector */
};
diff --git a/kernel/drivers/base/regmap/regcache-lzo.c b/kernel/drivers/base/regmap/regcache-lzo.c
index 2d53f6f13..736e0d378 100644
--- a/kernel/drivers/base/regmap/regcache-lzo.c
+++ b/kernel/drivers/base/regmap/regcache-lzo.c
@@ -355,9 +355,9 @@ static int regcache_lzo_sync(struct regmap *map, unsigned int min,
if (ret > 0 && val == map->reg_defaults[ret].def)
continue;
- map->cache_bypass = 1;
+ map->cache_bypass = true;
ret = _regmap_write(map, i, val);
- map->cache_bypass = 0;
+ map->cache_bypass = false;
if (ret)
return ret;
dev_dbg(map->dev, "Synced register %#x, value %#x\n",
diff --git a/kernel/drivers/base/regmap/regcache.c b/kernel/drivers/base/regmap/regcache.c
index 7eb7b3b98..4c0780298 100644
--- a/kernel/drivers/base/regmap/regcache.c
+++ b/kernel/drivers/base/regmap/regcache.c
@@ -54,11 +54,11 @@ static int regcache_hw_init(struct regmap *map)
return -ENOMEM;
if (!map->reg_defaults_raw) {
- u32 cache_bypass = map->cache_bypass;
+ bool cache_bypass = map->cache_bypass;
dev_warn(map->dev, "No cache defaults, reading back from HW\n");
/* Bypass the cache access till data read from HW*/
- map->cache_bypass = 1;
+ map->cache_bypass = true;
tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
if (!tmp_buf) {
ret = -ENOMEM;
@@ -249,6 +249,22 @@ int regcache_write(struct regmap *map,
return 0;
}
+static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+
+ /* If we don't know the chip just got reset, then sync everything. */
+ if (!map->no_sync_defaults)
+ return true;
+
+ /* Is this the hardware default? If so skip. */
+ ret = regcache_lookup_reg(map, reg);
+ if (ret >= 0 && val == map->reg_defaults[ret].def)
+ return false;
+ return true;
+}
+
static int regcache_default_sync(struct regmap *map, unsigned int min,
unsigned int max)
{
@@ -266,14 +282,12 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
if (ret)
return ret;
- /* Is this the hardware default? If so skip. */
- ret = regcache_lookup_reg(map, reg);
- if (ret >= 0 && val == map->reg_defaults[ret].def)
+ if (!regcache_reg_needs_sync(map, reg, val))
continue;
- map->cache_bypass = 1;
+ map->cache_bypass = true;
ret = _regmap_write(map, reg, val);
- map->cache_bypass = 0;
+ map->cache_bypass = false;
if (ret) {
dev_err(map->dev, "Unable to sync register %#x. %d\n",
reg, ret);
@@ -301,7 +315,7 @@ int regcache_sync(struct regmap *map)
int ret = 0;
unsigned int i;
const char *name;
- unsigned int bypass;
+ bool bypass;
BUG_ON(!map->cache_ops);
@@ -319,7 +333,7 @@ int regcache_sync(struct regmap *map)
map->async = true;
/* Apply any patch first */
- map->cache_bypass = 1;
+ map->cache_bypass = true;
for (i = 0; i < map->patch_regs; i++) {
ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
if (ret != 0) {
@@ -328,7 +342,7 @@ int regcache_sync(struct regmap *map)
goto out;
}
}
- map->cache_bypass = 0;
+ map->cache_bypass = false;
if (map->cache_ops->sync)
ret = map->cache_ops->sync(map, 0, map->max_register);
@@ -342,6 +356,7 @@ out:
/* Restore the bypass state */
map->async = false;
map->cache_bypass = bypass;
+ map->no_sync_defaults = false;
map->unlock(map->lock_arg);
regmap_async_complete(map);
@@ -369,7 +384,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
{
int ret = 0;
const char *name;
- unsigned int bypass;
+ bool bypass;
BUG_ON(!map->cache_ops);
@@ -397,6 +412,7 @@ out:
/* Restore the bypass state */
map->cache_bypass = bypass;
map->async = false;
+ map->no_sync_defaults = false;
map->unlock(map->lock_arg);
regmap_async_complete(map);
@@ -461,18 +477,23 @@ void regcache_cache_only(struct regmap *map, bool enable)
EXPORT_SYMBOL_GPL(regcache_cache_only);
/**
- * regcache_mark_dirty: Mark the register cache as dirty
+ * regcache_mark_dirty: Indicate that HW registers were reset to default values
*
* @map: map to mark
*
- * Mark the register cache as dirty, for example due to the device
- * having been powered down for suspend. If the cache is not marked
- * as dirty then the cache sync will be suppressed.
+ * Inform regcache that the device has been powered down or reset, so that
+ * on resume, regcache_sync() knows to write out all non-default values
+ * stored in the cache.
+ *
+ * If this function is not called, regcache_sync() will assume that
+ * the hardware state still matches the cache state, modulo any writes that
+ * happened when cache_only was true.
*/
void regcache_mark_dirty(struct regmap *map)
{
map->lock(map->lock_arg);
map->cache_dirty = true;
+ map->no_sync_defaults = true;
map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_mark_dirty);
@@ -613,17 +634,14 @@ static int regcache_sync_block_single(struct regmap *map, void *block,
continue;
val = regcache_get_val(map, block, i);
-
- /* Is this the hardware default? If so skip. */
- ret = regcache_lookup_reg(map, regtmp);
- if (ret >= 0 && val == map->reg_defaults[ret].def)
+ if (!regcache_reg_needs_sync(map, regtmp, val))
continue;
- map->cache_bypass = 1;
+ map->cache_bypass = true;
ret = _regmap_write(map, regtmp, val);
- map->cache_bypass = 0;
+ map->cache_bypass = false;
if (ret != 0) {
dev_err(map->dev, "Unable to sync register %#x. %d\n",
regtmp, ret);
@@ -650,14 +668,14 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
count * val_bytes, count, base, cur - map->reg_stride);
- map->cache_bypass = 1;
+ map->cache_bypass = true;
ret = _regmap_raw_write(map, base, *data, count * val_bytes);
if (ret)
dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
base, cur - map->reg_stride, ret);
- map->cache_bypass = 0;
+ map->cache_bypass = false;
*data = NULL;
@@ -688,10 +706,7 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
}
val = regcache_get_val(map, block, i);
-
- /* Is this the hardware default? If so skip. */
- ret = regcache_lookup_reg(map, regtmp);
- if (ret >= 0 && val == map->reg_defaults[ret].def) {
+ if (!regcache_reg_needs_sync(map, regtmp, val)) {
ret = regcache_sync_block_raw_flush(map, &data,
base, regtmp);
if (ret != 0)
@@ -714,7 +729,7 @@ int regcache_sync_block(struct regmap *map, void *block,
unsigned int block_base, unsigned int start,
unsigned int end)
{
- if (regmap_can_raw_write(map) && !map->use_single_rw)
+ if (regmap_can_raw_write(map) && !map->use_single_write)
return regcache_sync_block_raw(map, block, cache_present,
block_base, start, end);
else
diff --git a/kernel/drivers/base/regmap/regmap-ac97.c b/kernel/drivers/base/regmap/regmap-ac97.c
index 8d304e2a9..c03ebfd4c 100644
--- a/kernel/drivers/base/regmap/regmap-ac97.c
+++ b/kernel/drivers/base/regmap/regmap-ac97.c
@@ -78,37 +78,24 @@ static const struct regmap_bus ac97_regmap_bus = {
.reg_read = regmap_ac97_reg_read,
};
-/**
- * regmap_init_ac97(): Initialise AC'97 register map
- *
- * @ac97: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
- const struct regmap_config *config)
+struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config);
+ return __regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(regmap_init_ac97);
+EXPORT_SYMBOL_GPL(__regmap_init_ac97);
-/**
- * devm_regmap_init_ac97(): Initialise AC'97 register map
- *
- * @ac97: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
- const struct regmap_config *config)
+struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config);
+ return __devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_ac97);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_ac97);
MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/base/regmap/regmap-debugfs.c b/kernel/drivers/base/regmap/regmap-debugfs.c
index 5799a0b9e..3f0a7e262 100644
--- a/kernel/drivers/base/regmap/regmap-debugfs.c
+++ b/kernel/drivers/base/regmap/regmap-debugfs.c
@@ -30,10 +30,9 @@ static LIST_HEAD(regmap_debugfs_early_list);
static DEFINE_MUTEX(regmap_debugfs_early_lock);
/* Calculate the length of a fixed format */
-static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
+static size_t regmap_calc_reg_len(int max_val)
{
- snprintf(buf, buf_size, "%x", max_val);
- return strlen(buf);
+ return snprintf(NULL, 0, "%x", max_val);
}
static ssize_t regmap_name_read_file(struct file *file,
@@ -174,8 +173,7 @@ static inline void regmap_calc_tot_len(struct regmap *map,
{
/* Calculate the length of a fixed format */
if (!map->debugfs_tot_len) {
- map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
- buf, count);
+ map->debugfs_reg_len = regmap_calc_reg_len(map->max_register),
map->debugfs_val_len = 2 * map->format.val_bytes;
map->debugfs_tot_len = map->debugfs_reg_len +
map->debugfs_val_len + 3; /* : \n */
@@ -339,6 +337,7 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
char *buf;
char *entry;
int ret;
+ unsigned entry_len;
if (*ppos < 0 || !count)
return -EINVAL;
@@ -366,18 +365,15 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
p = 0;
mutex_lock(&map->cache_lock);
list_for_each_entry(c, &map->debugfs_off_cache, list) {
- snprintf(entry, PAGE_SIZE, "%x-%x",
- c->base_reg, c->max_reg);
+ entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
+ c->base_reg, c->max_reg);
if (p >= *ppos) {
- if (buf_pos + 1 + strlen(entry) > count)
+ if (buf_pos + entry_len > count)
break;
- snprintf(buf + buf_pos, count - buf_pos,
- "%s", entry);
- buf_pos += strlen(entry);
- buf[buf_pos] = '\n';
- buf_pos++;
+ memcpy(buf + buf_pos, entry, entry_len);
+ buf_pos += entry_len;
}
- p += strlen(entry) + 1;
+ p += entry_len;
}
mutex_unlock(&map->cache_lock);
@@ -421,7 +417,7 @@ static ssize_t regmap_access_read_file(struct file *file,
return -ENOMEM;
/* Calculate the length of a fixed format */
- reg_len = regmap_calc_reg_len(map->max_register, buf, count);
+ reg_len = regmap_calc_reg_len(map->max_register);
tot_len = reg_len + 10; /* ': R W V P\n' */
for (i = 0; i <= map->max_register; i += map->reg_stride) {
@@ -432,7 +428,7 @@ static ssize_t regmap_access_read_file(struct file *file,
/* If we're in the region the user is trying to read */
if (p >= *ppos) {
/* ...but not beyond it */
- if (buf_pos >= count - 1 - tot_len)
+ if (buf_pos + tot_len + 1 >= count)
break;
/* Format the register */
@@ -469,6 +465,87 @@ static const struct file_operations regmap_access_fops = {
.llseek = default_llseek,
};
+static ssize_t regmap_cache_only_write_file(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct regmap *map = container_of(file->private_data,
+ struct regmap, cache_only);
+ ssize_t result;
+ bool was_enabled, require_sync = false;
+ int err;
+
+ map->lock(map->lock_arg);
+
+ was_enabled = map->cache_only;
+
+ result = debugfs_write_file_bool(file, user_buf, count, ppos);
+ if (result < 0) {
+ map->unlock(map->lock_arg);
+ return result;
+ }
+
+ if (map->cache_only && !was_enabled) {
+ dev_warn(map->dev, "debugfs cache_only=Y forced\n");
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+ } else if (!map->cache_only && was_enabled) {
+ dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
+ require_sync = true;
+ }
+
+ map->unlock(map->lock_arg);
+
+ if (require_sync) {
+ err = regcache_sync(map);
+ if (err)
+ dev_err(map->dev, "Failed to sync cache %d\n", err);
+ }
+
+ return result;
+}
+
+static const struct file_operations regmap_cache_only_fops = {
+ .open = simple_open,
+ .read = debugfs_read_file_bool,
+ .write = regmap_cache_only_write_file,
+};
+
+static ssize_t regmap_cache_bypass_write_file(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct regmap *map = container_of(file->private_data,
+ struct regmap, cache_bypass);
+ ssize_t result;
+ bool was_enabled;
+
+ map->lock(map->lock_arg);
+
+ was_enabled = map->cache_bypass;
+
+ result = debugfs_write_file_bool(file, user_buf, count, ppos);
+ if (result < 0)
+ goto out;
+
+ if (map->cache_bypass && !was_enabled) {
+ dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+ } else if (!map->cache_bypass && was_enabled) {
+ dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
+ }
+
+out:
+ map->unlock(map->lock_arg);
+
+ return result;
+}
+
+static const struct file_operations regmap_cache_bypass_fops = {
+ .open = simple_open,
+ .read = debugfs_read_file_bool,
+ .write = regmap_cache_bypass_write_file,
+};
+
void regmap_debugfs_init(struct regmap *map, const char *name)
{
struct rb_node *next;
@@ -518,10 +595,11 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
if (map->max_register || regmap_readable(map, 0)) {
umode_t registers_mode;
- if (IS_ENABLED(REGMAP_ALLOW_WRITE_DEBUGFS))
- registers_mode = 0600;
- else
- registers_mode = 0400;
+#if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
+ registers_mode = 0600;
+#else
+ registers_mode = 0400;
+#endif
debugfs_create_file("registers", registers_mode, map->debugfs,
map, &regmap_map_fops);
@@ -530,12 +608,13 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
}
if (map->cache_type) {
- debugfs_create_bool("cache_only", 0400, map->debugfs,
- &map->cache_only);
+ debugfs_create_file("cache_only", 0600, map->debugfs,
+ &map->cache_only, &regmap_cache_only_fops);
debugfs_create_bool("cache_dirty", 0400, map->debugfs,
&map->cache_dirty);
- debugfs_create_bool("cache_bypass", 0400, map->debugfs,
- &map->cache_bypass);
+ debugfs_create_file("cache_bypass", 0600, map->debugfs,
+ &map->cache_bypass,
+ &regmap_cache_bypass_fops);
}
next = rb_first(&map->range_tree);
diff --git a/kernel/drivers/base/regmap/regmap-i2c.c b/kernel/drivers/base/regmap/regmap-i2c.c
index 4b76e3311..1a8ec3b2b 100644
--- a/kernel/drivers/base/regmap/regmap-i2c.c
+++ b/kernel/drivers/base/regmap/regmap-i2c.c
@@ -209,11 +209,60 @@ static struct regmap_bus regmap_i2c = {
.val_format_endian_default = REGMAP_ENDIAN_BIG,
};
+static int regmap_i2c_smbus_i2c_write(void *context, const void *data,
+ size_t count)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (count < 1)
+ return -EINVAL;
+ if (count >= I2C_SMBUS_BLOCK_MAX)
+ return -E2BIG;
+
+ --count;
+ return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count,
+ ((u8 *)data + 1));
+}
+
+static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
+ size_t reg_size, void *val,
+ size_t val_size)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+
+ if (reg_size != 1 || val_size < 1)
+ return -EINVAL;
+ if (val_size >= I2C_SMBUS_BLOCK_MAX)
+ return -E2BIG;
+
+ ret = i2c_smbus_read_i2c_block_data(i2c, ((u8 *)reg)[0], val_size, val);
+ if (ret == val_size)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static struct regmap_bus regmap_i2c_smbus_i2c_block = {
+ .write = regmap_i2c_smbus_i2c_write,
+ .read = regmap_i2c_smbus_i2c_read,
+ .max_raw_read = I2C_SMBUS_BLOCK_MAX,
+ .max_raw_write = I2C_SMBUS_BLOCK_MAX,
+};
+
static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
const struct regmap_config *config)
{
if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C))
return &regmap_i2c;
+ else if (config->reg_bits == 8 &&
+ i2c_check_functionality(i2c->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return &regmap_i2c_smbus_i2c_block;
else if (config->val_bits == 16 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_WORD_DATA))
@@ -233,47 +282,34 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
return ERR_PTR(-ENOTSUPP);
}
-/**
- * regmap_init_i2c(): Initialise register map
- *
- * @i2c: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_i2c(struct i2c_client *i2c,
- const struct regmap_config *config)
+struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
- return regmap_init(&i2c->dev, bus, &i2c->dev, config);
+ return __regmap_init(&i2c->dev, bus, &i2c->dev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(regmap_init_i2c);
+EXPORT_SYMBOL_GPL(__regmap_init_i2c);
-/**
- * devm_regmap_init_i2c(): Initialise managed register map
- *
- * @i2c: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
- const struct regmap_config *config)
+struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
if (IS_ERR(bus))
return ERR_CAST(bus);
- return devm_regmap_init(&i2c->dev, bus, &i2c->dev, config);
+ return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_i2c);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_i2c);
MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/base/regmap/regmap-irq.c b/kernel/drivers/base/regmap/regmap-irq.c
index a6c3f75b4..8d16db533 100644
--- a/kernel/drivers/base/regmap/regmap-irq.c
+++ b/kernel/drivers/base/regmap/regmap-irq.c
@@ -63,6 +63,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
struct regmap *map = d->map;
int i, ret;
u32 reg;
+ u32 unmask_offset;
if (d->chip->runtime_pm) {
ret = pm_runtime_get_sync(map->dev);
@@ -79,12 +80,28 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
for (i = 0; i < d->chip->num_regs; i++) {
reg = d->chip->mask_base +
(i * map->reg_stride * d->irq_reg_stride);
- if (d->chip->mask_invert)
+ if (d->chip->mask_invert) {
ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i], ~d->mask_buf[i]);
- else
+ } else if (d->chip->unmask_base) {
+ /* set mask with mask_base register */
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i], ~d->mask_buf[i]);
+ if (ret < 0)
+ dev_err(d->map->dev,
+ "Failed to sync unmasks in %x\n",
+ reg);
+ unmask_offset = d->chip->unmask_base -
+ d->chip->mask_base;
+ /* clear mask with unmask_base register */
+ ret = regmap_update_bits(d->map,
+ reg + unmask_offset,
+ d->mask_buf_def[i],
+ d->mask_buf[i]);
+ } else {
ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i], d->mask_buf[i]);
+ }
if (ret != 0)
dev_err(d->map->dev, "Failed to sync masks in %x\n",
reg);
@@ -109,14 +126,18 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
if (!d->chip->init_ack_masked)
continue;
/*
- * Ack all the masked interrupts uncondictionly,
+ * Ack all the masked interrupts unconditionally,
* OR if there is masked interrupt which hasn't been Acked,
* it'll be ignored in irq handler, then may introduce irq storm
*/
if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
reg = d->chip->ack_base +
(i * map->reg_stride * d->irq_reg_stride);
- ret = regmap_write(map, reg, d->mask_buf[i]);
+ /* some chips ack by write 0 */
+ if (d->chip->ack_invert)
+ ret = regmap_write(map, reg, ~d->mask_buf[i]);
+ else
+ ret = regmap_write(map, reg, d->mask_buf[i]);
if (ret != 0)
dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
reg, ret);
@@ -209,7 +230,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
* Read in the statuses, using a single bulk read if possible
* in order to reduce the I/O overheads.
*/
- if (!map->use_single_rw && map->reg_stride == 1 &&
+ if (!map->use_single_read && map->reg_stride == 1 &&
data->irq_reg_stride == 1) {
u8 *buf8 = data->status_reg_buf;
u16 *buf16 = data->status_reg_buf;
@@ -306,19 +327,12 @@ static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_data(virq, data);
irq_set_chip(virq, &data->irq_chip);
irq_set_nested_thread(virq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(virq, IRQF_VALID);
-#else
irq_set_noprobe(virq);
-#endif
return 0;
}
-static struct irq_domain_ops regmap_domain_ops = {
+static const struct irq_domain_ops regmap_domain_ops = {
.map = regmap_irq_map,
.xlate = irq_domain_xlate_twocell,
};
@@ -346,6 +360,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
int i;
int ret = -ENOMEM;
u32 reg;
+ u32 unmask_offset;
if (chip->num_regs <= 0)
return -EINVAL;
@@ -405,7 +420,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
else
d->irq_reg_stride = 1;
- if (!map->use_single_rw && map->reg_stride == 1 &&
+ if (!map->use_single_read && map->reg_stride == 1 &&
d->irq_reg_stride == 1) {
d->status_reg_buf = kmalloc(map->format.val_bytes *
chip->num_regs, GFP_KERNEL);
@@ -427,7 +442,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (chip->mask_invert)
ret = regmap_update_bits(map, reg,
d->mask_buf[i], ~d->mask_buf[i]);
- else
+ else if (d->chip->unmask_base) {
+ unmask_offset = d->chip->unmask_base -
+ d->chip->mask_base;
+ ret = regmap_update_bits(d->map,
+ reg + unmask_offset,
+ d->mask_buf[i],
+ d->mask_buf[i]);
+ } else
ret = regmap_update_bits(map, reg,
d->mask_buf[i], d->mask_buf[i]);
if (ret != 0) {
@@ -452,7 +474,11 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
reg = chip->ack_base +
(i * map->reg_stride * d->irq_reg_stride);
- ret = regmap_write(map, reg,
+ if (chip->ack_invert)
+ ret = regmap_write(map, reg,
+ ~(d->status_buf[i] & d->mask_buf[i]));
+ else
+ ret = regmap_write(map, reg,
d->status_buf[i] & d->mask_buf[i]);
if (ret != 0) {
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
diff --git a/kernel/drivers/base/regmap/regmap-mmio.c b/kernel/drivers/base/regmap/regmap-mmio.c
index 04a329a37..426a57e41 100644
--- a/kernel/drivers/base/regmap/regmap-mmio.c
+++ b/kernel/drivers/base/regmap/regmap-mmio.c
@@ -296,20 +296,11 @@ err_free:
return ERR_PTR(ret);
}
-/**
- * regmap_init_mmio_clk(): Initialise register map with register clock
- *
- * @dev: Device that will be interacted with
- * @clk_id: register clock consumer ID
- * @regs: Pointer to memory-mapped IO region
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
- void __iomem *regs,
- const struct regmap_config *config)
+struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
struct regmap_mmio_context *ctx;
@@ -317,25 +308,17 @@ struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
if (IS_ERR(ctx))
return ERR_CAST(ctx);
- return regmap_init(dev, &regmap_mmio, ctx, config);
+ return __regmap_init(dev, &regmap_mmio, ctx, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(regmap_init_mmio_clk);
-
-/**
- * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
- *
- * @dev: Device that will be interacted with
- * @clk_id: register clock consumer ID
- * @regs: Pointer to memory-mapped IO region
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
- void __iomem *regs,
- const struct regmap_config *config)
+EXPORT_SYMBOL_GPL(__regmap_init_mmio_clk);
+
+struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
struct regmap_mmio_context *ctx;
@@ -343,8 +326,9 @@ struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
if (IS_ERR(ctx))
return ERR_CAST(ctx);
- return devm_regmap_init(dev, &regmap_mmio, ctx, config);
+ return __devm_regmap_init(dev, &regmap_mmio, ctx, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_mmio_clk);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk);
MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/base/regmap/regmap-spi.c b/kernel/drivers/base/regmap/regmap-spi.c
index 53d1148e8..edd9a839d 100644
--- a/kernel/drivers/base/regmap/regmap-spi.c
+++ b/kernel/drivers/base/regmap/regmap-spi.c
@@ -113,37 +113,24 @@ static struct regmap_bus regmap_spi = {
.val_format_endian_default = REGMAP_ENDIAN_BIG,
};
-/**
- * regmap_init_spi(): Initialise register map
- *
- * @spi: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_spi(struct spi_device *spi,
- const struct regmap_config *config)
+struct regmap *__regmap_init_spi(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return regmap_init(&spi->dev, &regmap_spi, &spi->dev, config);
+ return __regmap_init(&spi->dev, &regmap_spi, &spi->dev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(regmap_init_spi);
+EXPORT_SYMBOL_GPL(__regmap_init_spi);
-/**
- * devm_regmap_init_spi(): Initialise register map
- *
- * @spi: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. The map will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_spi(struct spi_device *spi,
- const struct regmap_config *config)
+struct regmap *__devm_regmap_init_spi(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return devm_regmap_init(&spi->dev, &regmap_spi, &spi->dev, config);
+ return __devm_regmap_init(&spi->dev, &regmap_spi, &spi->dev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_spi);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spi);
MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/base/regmap/regmap-spmi.c b/kernel/drivers/base/regmap/regmap-spmi.c
index d7026dc33..7e58f6560 100644
--- a/kernel/drivers/base/regmap/regmap-spmi.c
+++ b/kernel/drivers/base/regmap/regmap-spmi.c
@@ -91,36 +91,25 @@ static struct regmap_bus regmap_spmi_base = {
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
-/**
- * regmap_init_spmi_base(): Create regmap for the Base register space
- * @sdev: SPMI device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_spmi_base(struct spmi_device *sdev,
- const struct regmap_config *config)
+struct regmap *__regmap_init_spmi_base(struct spmi_device *sdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
+ return __regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(regmap_init_spmi_base);
+EXPORT_SYMBOL_GPL(__regmap_init_spmi_base);
-/**
- * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
- * @sdev: SPMI device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_spmi_base(struct spmi_device *sdev,
- const struct regmap_config *config)
+struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *sdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return devm_regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
+ return __devm_regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_base);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_base);
static int regmap_spmi_ext_read(void *context,
const void *reg, size_t reg_size,
@@ -222,35 +211,24 @@ static struct regmap_bus regmap_spmi_ext = {
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
-/**
- * regmap_init_spmi_ext(): Create regmap for Ext register space
- * @sdev: Device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
- */
-struct regmap *regmap_init_spmi_ext(struct spmi_device *sdev,
- const struct regmap_config *config)
+struct regmap *__regmap_init_spmi_ext(struct spmi_device *sdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
+ return __regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(regmap_init_spmi_ext);
+EXPORT_SYMBOL_GPL(__regmap_init_spmi_ext);
-/**
- * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
- * @sdev: SPMI device that will be interacted with
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. The regmap will be automatically freed by the
- * device management code.
- */
-struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *sdev,
- const struct regmap_config *config)
+struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *sdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
- return devm_regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
+ return __devm_regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config,
+ lock_key, lock_name);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_ext);
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_ext);
MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/base/regmap/regmap.c b/kernel/drivers/base/regmap/regmap.c
index 1c76dcb50..4ac63c0e5 100644
--- a/kernel/drivers/base/regmap/regmap.c
+++ b/kernel/drivers/base/regmap/regmap.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
+#include <linux/delay.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -34,7 +35,7 @@
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
- bool *change);
+ bool *change, bool force_write);
static int _regmap_bus_reg_read(void *context, unsigned int reg,
unsigned int *val);
@@ -93,6 +94,9 @@ bool regmap_writeable(struct regmap *map, unsigned int reg)
bool regmap_readable(struct regmap *map, unsigned int reg)
{
+ if (!map->reg_read)
+ return false;
+
if (map->max_register && reg > map->max_register)
return false;
@@ -515,22 +519,12 @@ enum regmap_endian regmap_get_val_endian(struct device *dev,
}
EXPORT_SYMBOL_GPL(regmap_get_val_endian);
-/**
- * regmap_init(): Initialise register map
- *
- * @dev: Device that will be interacted with
- * @bus: Bus-specific callbacks to use with device
- * @bus_context: Data passed to bus-specific callbacks
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap. This function should generally not be called
- * directly, it should be called by bus-specific init functions.
- */
-struct regmap *regmap_init(struct device *dev,
- const struct regmap_bus *bus,
- void *bus_context,
- const struct regmap_config *config)
+struct regmap *__regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
struct regmap *map;
int ret = -EINVAL;
@@ -556,13 +550,27 @@ struct regmap *regmap_init(struct device *dev,
spin_lock_init(&map->spinlock);
map->lock = regmap_lock_spinlock;
map->unlock = regmap_unlock_spinlock;
+ lockdep_set_class_and_name(&map->spinlock,
+ lock_key, lock_name);
} else {
mutex_init(&map->mutex);
map->lock = regmap_lock_mutex;
map->unlock = regmap_unlock_mutex;
+ lockdep_set_class_and_name(&map->mutex,
+ lock_key, lock_name);
}
map->lock_arg = map;
}
+
+ /*
+ * When we write in fast-paths with regmap_bulk_write() don't allocate
+ * scratch buffers with sleeping allocations.
+ */
+ if ((bus && bus->fast_io) || config->fast_io)
+ map->alloc_flags = GFP_ATOMIC;
+ else
+ map->alloc_flags = GFP_KERNEL;
+
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
@@ -573,8 +581,13 @@ struct regmap *regmap_init(struct device *dev,
map->reg_stride = config->reg_stride;
else
map->reg_stride = 1;
- map->use_single_rw = config->use_single_rw;
- map->can_multi_write = config->can_multi_write;
+ map->use_single_read = config->use_single_rw || !bus || !bus->read;
+ map->use_single_write = config->use_single_rw || !bus || !bus->write;
+ map->can_multi_write = config->can_multi_write && bus && bus->write;
+ if (bus) {
+ map->max_raw_read = bus->max_raw_read;
+ map->max_raw_write = bus->max_raw_write;
+ }
map->dev = dev;
map->bus = bus;
map->bus_context = bus_context;
@@ -616,6 +629,7 @@ struct regmap *regmap_init(struct device *dev,
goto skip_format_initialization;
} else {
map->reg_read = _regmap_bus_read;
+ map->reg_update_bits = bus->reg_update_bits;
}
reg_endian = regmap_get_reg_endian(bus, config);
@@ -763,7 +777,7 @@ struct regmap *regmap_init(struct device *dev,
if ((reg_endian != REGMAP_ENDIAN_BIG) ||
(val_endian != REGMAP_ENDIAN_BIG))
goto err_map;
- map->use_single_rw = true;
+ map->use_single_write = true;
}
if (!map->format.format_write &&
@@ -899,30 +913,19 @@ err_map:
err:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(regmap_init);
+EXPORT_SYMBOL_GPL(__regmap_init);
static void devm_regmap_release(struct device *dev, void *res)
{
regmap_exit(*(struct regmap **)res);
}
-/**
- * devm_regmap_init(): Initialise managed register map
- *
- * @dev: Device that will be interacted with
- * @bus: Bus-specific callbacks to use with device
- * @bus_context: Data passed to bus-specific callbacks
- * @config: Configuration for register map
- *
- * The return value will be an ERR_PTR() on error or a valid pointer
- * to a struct regmap. This function should generally not be called
- * directly, it should be called by bus-specific init functions. The
- * map will be automatically freed by the device management code.
- */
-struct regmap *devm_regmap_init(struct device *dev,
- const struct regmap_bus *bus,
- void *bus_context,
- const struct regmap_config *config)
+struct regmap *__devm_regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
{
struct regmap **ptr, *regmap;
@@ -930,7 +933,8 @@ struct regmap *devm_regmap_init(struct device *dev,
if (!ptr)
return ERR_PTR(-ENOMEM);
- regmap = regmap_init(dev, bus, bus_context, config);
+ regmap = __regmap_init(dev, bus, bus_context, config,
+ lock_key, lock_name);
if (!IS_ERR(regmap)) {
*ptr = regmap;
devres_add(dev, ptr);
@@ -940,7 +944,7 @@ struct regmap *devm_regmap_init(struct device *dev,
return regmap;
}
-EXPORT_SYMBOL_GPL(devm_regmap_init);
+EXPORT_SYMBOL_GPL(__devm_regmap_init);
static void regmap_field_init(struct regmap_field *rm_field,
struct regmap *regmap, struct reg_field reg_field)
@@ -1178,7 +1182,7 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
ret = _regmap_update_bits(map, range->selector_reg,
range->selector_mask,
win_page << range->selector_shift,
- &page_chg);
+ &page_chg, false);
map->work_buf = orig_work_buf;
@@ -1382,10 +1386,33 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
*/
bool regmap_can_raw_write(struct regmap *map)
{
- return map->bus && map->format.format_val && map->format.format_reg;
+ return map->bus && map->bus->write && map->format.format_val &&
+ map->format.format_reg;
}
EXPORT_SYMBOL_GPL(regmap_can_raw_write);
+/**
+ * regmap_get_raw_read_max - Get the maximum size we can read
+ *
+ * @map: Map to check.
+ */
+size_t regmap_get_raw_read_max(struct regmap *map)
+{
+ return map->max_raw_read;
+}
+EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
+
+/**
+ * regmap_get_raw_write_max - Get the maximum size we can read
+ *
+ * @map: Map to check.
+ */
+size_t regmap_get_raw_write_max(struct regmap *map)
+{
+ return map->max_raw_write;
+}
+EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
+
static int _regmap_bus_formatted_write(void *context, unsigned int reg,
unsigned int val)
{
@@ -1555,6 +1582,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
+ if (map->max_raw_write && map->max_raw_write > val_len)
+ return -E2BIG;
map->lock(map->lock_arg);
@@ -1624,6 +1653,18 @@ int regmap_fields_write(struct regmap_field *field, unsigned int id,
}
EXPORT_SYMBOL_GPL(regmap_fields_write);
+int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
+ unsigned int val)
+{
+ if (id >= field->id_size)
+ return -EINVAL;
+
+ return regmap_write_bits(field->regmap,
+ field->reg + (field->id_offset * id),
+ field->mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_fields_force_write);
+
/**
* regmap_fields_update_bits(): Perform a read/modify/write cycle
* on the register field
@@ -1669,6 +1710,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
{
int ret = 0, i;
size_t val_bytes = map->format.val_bytes;
+ size_t total_size = val_bytes * val_count;
if (map->bus && !map->format.parse_inplace)
return -EINVAL;
@@ -1677,9 +1719,15 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
/*
* Some devices don't support bulk write, for
- * them we have a series of single write operations.
+ * them we have a series of single write operations in the first two if
+ * blocks.
+ *
+ * The first if block is used for memory mapped io. It does not allow
+ * val_bytes of 3 for example.
+ * The second one is used for busses which do not have this limitation
+ * and can write arbitrary value lengths.
*/
- if (!map->bus || map->use_single_rw) {
+ if (!map->bus) {
map->lock(map->lock_arg);
for (i = 0; i < val_count; i++) {
unsigned int ival;
@@ -1711,13 +1759,45 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
}
out:
map->unlock(map->lock_arg);
+ } else if (map->use_single_write ||
+ (map->max_raw_write && map->max_raw_write < total_size)) {
+ int chunk_stride = map->reg_stride;
+ size_t chunk_size = val_bytes;
+ size_t chunk_count = val_count;
+
+ if (!map->use_single_write) {
+ chunk_size = map->max_raw_write;
+ if (chunk_size % val_bytes)
+ chunk_size -= chunk_size % val_bytes;
+ chunk_count = total_size / chunk_size;
+ chunk_stride *= chunk_size / val_bytes;
+ }
+
+ map->lock(map->lock_arg);
+ /* Write as many bytes as possible with chunk_size */
+ for (i = 0; i < chunk_count; i++) {
+ ret = _regmap_raw_write(map,
+ reg + (i * chunk_stride),
+ val + (i * chunk_size),
+ chunk_size);
+ if (ret)
+ break;
+ }
+
+ /* Write remaining bytes */
+ if (!ret && chunk_size * i < total_size) {
+ ret = _regmap_raw_write(map, reg + (i * chunk_stride),
+ val + (i * chunk_size),
+ total_size - i * chunk_size);
+ }
+ map->unlock(map->lock_arg);
} else {
void *wval;
if (!val_count)
return -EINVAL;
- wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
+ wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
if (!wval) {
dev_err(map->dev, "Error in memory allocation\n");
return -ENOMEM;
@@ -1740,10 +1820,10 @@ EXPORT_SYMBOL_GPL(regmap_bulk_write);
*
* the (register,newvalue) pairs in regs have not been formatted, but
* they are all in the same page and have been changed to being page
- * relative. The page register has been written if that was neccessary.
+ * relative. The page register has been written if that was necessary.
*/
static int _regmap_raw_multi_reg_write(struct regmap *map,
- const struct reg_default *regs,
+ const struct reg_sequence *regs,
size_t num_regs)
{
int ret;
@@ -1768,8 +1848,8 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
u8 = buf;
for (i = 0; i < num_regs; i++) {
- int reg = regs[i].reg;
- int val = regs[i].def;
+ unsigned int reg = regs[i].reg;
+ unsigned int val = regs[i].def;
trace_regmap_hw_write_start(map, reg, 1);
map->format.format_reg(u8, reg, map->reg_shift);
u8 += reg_bytes + pad_bytes;
@@ -1800,17 +1880,19 @@ static unsigned int _regmap_register_page(struct regmap *map,
}
static int _regmap_range_multi_paged_reg_write(struct regmap *map,
- struct reg_default *regs,
+ struct reg_sequence *regs,
size_t num_regs)
{
int ret;
int i, n;
- struct reg_default *base;
+ struct reg_sequence *base;
unsigned int this_page = 0;
+ unsigned int page_change = 0;
/*
* the set of registers are not neccessarily in order, but
* since the order of write must be preserved this algorithm
- * chops the set each time the page changes
+ * chops the set each time the page changes. This also applies
+ * if there is a delay required at any point in the sequence.
*/
base = regs;
for (i = 0, n = 0; i < num_regs; i++, n++) {
@@ -1826,16 +1908,48 @@ static int _regmap_range_multi_paged_reg_write(struct regmap *map,
this_page = win_page;
if (win_page != this_page) {
this_page = win_page;
+ page_change = 1;
+ }
+ }
+
+ /* If we have both a page change and a delay make sure to
+ * write the regs and apply the delay before we change the
+ * page.
+ */
+
+ if (page_change || regs[i].delay_us) {
+
+ /* For situations where the first write requires
+ * a delay we need to make sure we don't call
+ * raw_multi_reg_write with n=0
+ * This can't occur with page breaks as we
+ * never write on the first iteration
+ */
+ if (regs[i].delay_us && i == 0)
+ n = 1;
+
ret = _regmap_raw_multi_reg_write(map, base, n);
if (ret != 0)
return ret;
+
+ if (regs[i].delay_us)
+ udelay(regs[i].delay_us);
+
base += n;
n = 0;
- }
- ret = _regmap_select_page(map, &base[n].reg, range, 1);
- if (ret != 0)
- return ret;
+
+ if (page_change) {
+ ret = _regmap_select_page(map,
+ &base[n].reg,
+ range, 1);
+ if (ret != 0)
+ return ret;
+
+ page_change = 0;
+ }
+
}
+
}
if (n > 0)
return _regmap_raw_multi_reg_write(map, base, n);
@@ -1843,7 +1957,7 @@ static int _regmap_range_multi_paged_reg_write(struct regmap *map,
}
static int _regmap_multi_reg_write(struct regmap *map,
- const struct reg_default *regs,
+ const struct reg_sequence *regs,
size_t num_regs)
{
int i;
@@ -1854,6 +1968,9 @@ static int _regmap_multi_reg_write(struct regmap *map,
ret = _regmap_write(map, regs[i].reg, regs[i].def);
if (ret != 0)
return ret;
+
+ if (regs[i].delay_us)
+ udelay(regs[i].delay_us);
}
return 0;
}
@@ -1893,10 +2010,14 @@ static int _regmap_multi_reg_write(struct regmap *map,
for (i = 0; i < num_regs; i++) {
unsigned int reg = regs[i].reg;
struct regmap_range_node *range;
+
+ /* Coalesce all the writes between a page break or a delay
+ * in a sequence
+ */
range = _regmap_range_lookup(map, reg);
- if (range) {
- size_t len = sizeof(struct reg_default)*num_regs;
- struct reg_default *base = kmemdup(regs, len,
+ if (range || regs[i].delay_us) {
+ size_t len = sizeof(struct reg_sequence)*num_regs;
+ struct reg_sequence *base = kmemdup(regs, len,
GFP_KERNEL);
if (!base)
return -ENOMEM;
@@ -1929,7 +2050,7 @@ static int _regmap_multi_reg_write(struct regmap *map,
* A value of zero will be returned on success, a negative errno will be
* returned in error cases.
*/
-int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
int num_regs)
{
int ret;
@@ -1962,7 +2083,7 @@ EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
* be returned in error cases.
*/
int regmap_multi_reg_write_bypassed(struct regmap *map,
- const struct reg_default *regs,
+ const struct reg_sequence *regs,
int num_regs)
{
int ret;
@@ -2050,7 +2171,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
/*
* Some buses or devices flag reads by setting the high bits in the
- * register addresss; since it's always the high bits for all
+ * register address; since it's always the high bits for all
* current formats we can do this here rather than in
* formatting. This may break if we get interesting formats.
*/
@@ -2097,8 +2218,6 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
int ret;
void *context = _regmap_map_get_context(map);
- WARN_ON(!map->reg_read);
-
if (!map->cache_bypass) {
ret = regcache_read(map, reg, val);
if (ret == 0)
@@ -2179,11 +2298,22 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
+ if (val_count == 0)
+ return -EINVAL;
map->lock(map->lock_arg);
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
map->cache_type == REGCACHE_NONE) {
+ if (!map->bus->read) {
+ ret = -ENOTSUPP;
+ goto out;
+ }
+ if (map->max_raw_read && map->max_raw_read < val_len) {
+ ret = -E2BIG;
+ goto out;
+ }
+
/* Physical block read if there's no cache involved */
ret = _regmap_raw_read(map, reg, val, val_len);
@@ -2292,20 +2422,51 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
* Some devices does not support bulk read, for
* them we have a series of single read operations.
*/
- if (map->use_single_rw) {
- for (i = 0; i < val_count; i++) {
- ret = regmap_raw_read(map,
- reg + (i * map->reg_stride),
- val + (i * val_bytes),
- val_bytes);
- if (ret != 0)
- return ret;
- }
- } else {
+ size_t total_size = val_bytes * val_count;
+
+ if (!map->use_single_read &&
+ (!map->max_raw_read || map->max_raw_read > total_size)) {
ret = regmap_raw_read(map, reg, val,
val_bytes * val_count);
if (ret != 0)
return ret;
+ } else {
+ /*
+ * Some devices do not support bulk read or do not
+ * support large bulk reads, for them we have a series
+ * of read operations.
+ */
+ int chunk_stride = map->reg_stride;
+ size_t chunk_size = val_bytes;
+ size_t chunk_count = val_count;
+
+ if (!map->use_single_read) {
+ chunk_size = map->max_raw_read;
+ if (chunk_size % val_bytes)
+ chunk_size -= chunk_size % val_bytes;
+ chunk_count = total_size / chunk_size;
+ chunk_stride *= chunk_size / val_bytes;
+ }
+
+ /* Read bytes that fit into a multiple of chunk_size */
+ for (i = 0; i < chunk_count; i++) {
+ ret = regmap_raw_read(map,
+ reg + (i * chunk_stride),
+ val + (i * chunk_size),
+ chunk_size);
+ if (ret != 0)
+ return ret;
+ }
+
+ /* Read remaining bytes */
+ if (chunk_size * i < total_size) {
+ ret = regmap_raw_read(map,
+ reg + (i * chunk_stride),
+ val + (i * chunk_size),
+ total_size - i * chunk_size);
+ if (ret != 0)
+ return ret;
+ }
}
for (i = 0; i < val_count * val_bytes; i += val_bytes)
@@ -2317,7 +2478,34 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
&ival);
if (ret != 0)
return ret;
- map->format.format_val(val + (i * val_bytes), ival, 0);
+
+ if (map->format.format_val) {
+ map->format.format_val(val + (i * val_bytes), ival, 0);
+ } else {
+ /* Devices providing read and write
+ * operations can use the bulk I/O
+ * functions if they define a val_bytes,
+ * we assume that the values are native
+ * endian.
+ */
+ u32 *u32 = val;
+ u16 *u16 = val;
+ u8 *u8 = val;
+
+ switch (map->format.val_bytes) {
+ case 4:
+ u32[i] = ival;
+ break;
+ case 2:
+ u16[i] = ival;
+ break;
+ case 1:
+ u8[i] = ival;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
}
}
@@ -2327,25 +2515,31 @@ EXPORT_SYMBOL_GPL(regmap_bulk_read);
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
- bool *change)
+ bool *change, bool force_write)
{
int ret;
unsigned int tmp, orig;
- ret = _regmap_read(map, reg, &orig);
- if (ret != 0)
- return ret;
+ if (change)
+ *change = false;
- tmp = orig & ~mask;
- tmp |= val & mask;
-
- if (tmp != orig) {
- ret = _regmap_write(map, reg, tmp);
- if (change)
+ if (regmap_volatile(map, reg) && map->reg_update_bits) {
+ ret = map->reg_update_bits(map->bus_context, reg, mask, val);
+ if (ret == 0 && change)
*change = true;
} else {
- if (change)
- *change = false;
+ ret = _regmap_read(map, reg, &orig);
+ if (ret != 0)
+ return ret;
+
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+
+ if (force_write || (tmp != orig)) {
+ ret = _regmap_write(map, reg, tmp);
+ if (ret == 0 && change)
+ *change = true;
+ }
}
return ret;
@@ -2367,7 +2561,7 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
int ret;
map->lock(map->lock_arg);
- ret = _regmap_update_bits(map, reg, mask, val, NULL);
+ ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
map->unlock(map->lock_arg);
return ret;
@@ -2375,6 +2569,29 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
EXPORT_SYMBOL_GPL(regmap_update_bits);
/**
+ * regmap_write_bits: Perform a read/modify/write cycle on the register map
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ int ret;
+
+ map->lock(map->lock_arg);
+ ret = _regmap_update_bits(map, reg, mask, val, NULL, true);
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_write_bits);
+
+/**
* regmap_update_bits_async: Perform a read/modify/write cycle on the register
* map asynchronously
*
@@ -2398,7 +2615,7 @@ int regmap_update_bits_async(struct regmap *map, unsigned int reg,
map->async = true;
- ret = _regmap_update_bits(map, reg, mask, val, NULL);
+ ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
map->async = false;
@@ -2427,7 +2644,7 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
int ret;
map->lock(map->lock_arg);
- ret = _regmap_update_bits(map, reg, mask, val, change);
+ ret = _regmap_update_bits(map, reg, mask, val, change, false);
map->unlock(map->lock_arg);
return ret;
}
@@ -2460,7 +2677,7 @@ int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
map->async = true;
- ret = _regmap_update_bits(map, reg, mask, val, change);
+ ret = _regmap_update_bits(map, reg, mask, val, change, false);
map->async = false;
@@ -2552,10 +2769,10 @@ EXPORT_SYMBOL_GPL(regmap_async_complete);
* The caller must ensure that this function cannot be called
* concurrently with either itself or regcache_sync().
*/
-int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
+int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
int num_regs)
{
- struct reg_default *p;
+ struct reg_sequence *p;
int ret;
bool bypass;
@@ -2564,7 +2781,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
return 0;
p = krealloc(map->patch,
- sizeof(struct reg_default) * (map->patch_regs + num_regs),
+ sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
GFP_KERNEL);
if (p) {
memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
@@ -2582,10 +2799,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
map->async = true;
ret = _regmap_multi_reg_write(map, regs, num_regs);
- if (ret != 0)
- goto out;
-out:
map->async = false;
map->cache_bypass = bypass;
@@ -2612,6 +2826,30 @@ int regmap_get_val_bytes(struct regmap *map)
}
EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
+/**
+ * regmap_get_max_register(): Report the max register value
+ *
+ * Report the max register value, mainly intended to for use by
+ * generic infrastructure built on top of regmap.
+ */
+int regmap_get_max_register(struct regmap *map)
+{
+ return map->max_register ? map->max_register : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regmap_get_max_register);
+
+/**
+ * regmap_get_reg_stride(): Report the register address stride
+ *
+ * Report the register address stride, mainly intended to for use by
+ * generic infrastructure built on top of regmap.
+ */
+int regmap_get_reg_stride(struct regmap *map)
+{
+ return map->reg_stride;
+}
+EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
+
int regmap_parse_val(struct regmap *map, const void *buf,
unsigned int *val)
{
diff --git a/kernel/drivers/base/soc.c b/kernel/drivers/base/soc.c
index 39fca01c8..75b98aad6 100644
--- a/kernel/drivers/base/soc.c
+++ b/kernel/drivers/base/soc.c
@@ -16,7 +16,6 @@
#include <linux/err.h>
static DEFINE_IDA(soc_ida);
-static DEFINE_SPINLOCK(soc_lock);
static ssize_t soc_info_get(struct device *dev,
struct device_attribute *attr,
@@ -122,20 +121,10 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
}
/* Fetch a unique (reclaimable) SOC ID. */
- do {
- if (!ida_pre_get(&soc_ida, GFP_KERNEL)) {
- ret = -ENOMEM;
- goto out2;
- }
-
- spin_lock(&soc_lock);
- ret = ida_get_new(&soc_ida, &soc_dev->soc_dev_num);
- spin_unlock(&soc_lock);
-
- } while (ret == -EAGAIN);
-
- if (ret)
+ ret = ida_simple_get(&soc_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0)
goto out2;
+ soc_dev->soc_dev_num = ret;
soc_dev->attr = soc_dev_attr;
soc_dev->dev.bus = &soc_bus_type;
@@ -151,7 +140,7 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
return soc_dev;
out3:
- ida_remove(&soc_ida, soc_dev->soc_dev_num);
+ ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
out2:
kfree(soc_dev);
out1:
@@ -161,7 +150,7 @@ out1:
/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
void soc_device_unregister(struct soc_device *soc_dev)
{
- ida_remove(&soc_ida, soc_dev->soc_dev_num);
+ ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
device_unregister(&soc_dev->dev);
}
diff --git a/kernel/drivers/base/topology.c b/kernel/drivers/base/topology.c
index 6491f4520..8b7d7f8e5 100644
--- a/kernel/drivers/base/topology.c
+++ b/kernel/drivers/base/topology.c
@@ -61,7 +61,7 @@ static DEVICE_ATTR_RO(physical_package_id);
define_id_show_func(core_id);
static DEVICE_ATTR_RO(core_id);
-define_siblings_show_func(thread_siblings, thread_cpumask);
+define_siblings_show_func(thread_siblings, sibling_cpumask);
static DEVICE_ATTR_RO(thread_siblings);
static DEVICE_ATTR_RO(thread_siblings_list);