diff options
Diffstat (limited to 'kernel/drivers/base/power')
-rw-r--r-- | kernel/drivers/base/power/Makefile | 4 | ||||
-rw-r--r-- | kernel/drivers/base/power/clock_ops.c | 53 | ||||
-rw-r--r-- | kernel/drivers/base/power/domain.c | 830 | ||||
-rw-r--r-- | kernel/drivers/base/power/domain_governor.c | 23 | ||||
-rw-r--r-- | kernel/drivers/base/power/generic_ops.c | 23 | ||||
-rw-r--r-- | kernel/drivers/base/power/main.c | 16 | ||||
-rw-r--r-- | kernel/drivers/base/power/opp/Makefile | 2 | ||||
-rw-r--r-- | kernel/drivers/base/power/opp/core.c (renamed from kernel/drivers/base/power/opp.c) | 941 | ||||
-rw-r--r-- | kernel/drivers/base/power/opp/cpu.c | 270 | ||||
-rw-r--r-- | kernel/drivers/base/power/opp/opp.h | 146 | ||||
-rw-r--r-- | kernel/drivers/base/power/power.h | 50 | ||||
-rw-r--r-- | kernel/drivers/base/power/qos.c | 37 | ||||
-rw-r--r-- | kernel/drivers/base/power/runtime.c | 6 | ||||
-rw-r--r-- | kernel/drivers/base/power/sysfs.c | 11 | ||||
-rw-r--r-- | kernel/drivers/base/power/wakeirq.c | 277 | ||||
-rw-r--r-- | kernel/drivers/base/power/wakeup.c | 151 |
16 files changed, 1903 insertions, 937 deletions
diff --git a/kernel/drivers/base/power/Makefile b/kernel/drivers/base/power/Makefile index 1cb854459..5998c5328 100644 --- a/kernel/drivers/base/power/Makefile +++ b/kernel/drivers/base/power/Makefile @@ -1,7 +1,7 @@ -obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o +obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o -obj-$(CONFIG_PM_OPP) += opp.o +obj-$(CONFIG_PM_OPP) += opp/ obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o obj-$(CONFIG_HAVE_CLK) += clock_ops.o diff --git a/kernel/drivers/base/power/clock_ops.c b/kernel/drivers/base/power/clock_ops.c index ac3c07db9..60ee5591e 100644 --- a/kernel/drivers/base/power/clock_ops.c +++ b/kernel/drivers/base/power/clock_ops.c @@ -15,8 +15,9 @@ #include <linux/clkdev.h> #include <linux/slab.h> #include <linux/err.h> +#include <linux/pm_runtime.h> -#ifdef CONFIG_PM +#ifdef CONFIG_PM_CLK enum pce_status { PCE_STATUS_NONE = 0, @@ -65,7 +66,8 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) } else { clk_prepare(ce->clk); ce->status = PCE_STATUS_ACQUIRED; - dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); + dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n", + ce->clk, ce->con_id); } } @@ -91,7 +93,7 @@ static int __pm_clk_add(struct device *dev, const char *con_id, return -ENOMEM; } } else { - if (IS_ERR(clk) || !__clk_get(clk)) { + if (IS_ERR(clk)) { kfree(ce); return -ENOENT; } @@ -125,7 +127,9 @@ int pm_clk_add(struct device *dev, const char *con_id) * @clk: Clock pointer * * Add the clock to the list of clocks used for the power management of @dev. - * It will increment refcount on clock pointer, use clk_put() on it when done. + * The power-management code will take control of the clock reference, so + * callers should not call clk_put() on @clk after this function sucessfully + * returned. */ int pm_clk_add_clk(struct device *dev, struct clk *clk) { @@ -365,7 +369,44 @@ static int pm_clk_notify(struct notifier_block *nb, return 0; } -#else /* !CONFIG_PM */ +int pm_clk_runtime_suspend(struct device *dev) +{ + int ret; + + dev_dbg(dev, "%s\n", __func__); + + ret = pm_generic_runtime_suspend(dev); + if (ret) { + dev_err(dev, "failed to suspend device\n"); + return ret; + } + + ret = pm_clk_suspend(dev); + if (ret) { + dev_err(dev, "failed to suspend clock\n"); + pm_generic_runtime_resume(dev); + return ret; + } + + return 0; +} + +int pm_clk_runtime_resume(struct device *dev) +{ + int ret; + + dev_dbg(dev, "%s\n", __func__); + + ret = pm_clk_resume(dev); + if (ret) { + dev_err(dev, "failed to resume clock\n"); + return ret; + } + + return pm_generic_runtime_resume(dev); +} + +#else /* !CONFIG_PM_CLK */ /** * enable_clock - Enable a device clock. @@ -445,7 +486,7 @@ static int pm_clk_notify(struct notifier_block *nb, return 0; } -#endif /* !CONFIG_PM */ +#endif /* !CONFIG_PM_CLK */ /** * pm_clk_add_notifier - Add bus type notifier for power management clocks. diff --git a/kernel/drivers/base/power/domain.c b/kernel/drivers/base/power/domain.c index 2327613d4..65f50eccd 100644 --- a/kernel/drivers/base/power/domain.c +++ b/kernel/drivers/base/power/domain.c @@ -6,6 +6,7 @@ * This file is released under the GPLv2. */ +#include <linux/delay.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/platform_device.h> @@ -19,6 +20,8 @@ #include <linux/suspend.h> #include <linux/export.h> +#define GENPD_RETRY_MAX_MS 250 /* Approximate */ + #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ ({ \ type (*__routine)(struct device *__d); \ @@ -31,43 +34,9 @@ __ret; \ }) -#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ -({ \ - ktime_t __start = ktime_get(); \ - type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ - s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ - struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ - if (!__retval && __elapsed > __td->field) { \ - __td->field = __elapsed; \ - dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \ - __elapsed); \ - genpd->max_off_time_changed = true; \ - __td->constraint_changed = true; \ - } \ - __retval; \ -}) - static LIST_HEAD(gpd_list); static DEFINE_MUTEX(gpd_list_lock); -static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) -{ - struct generic_pm_domain *genpd = NULL, *gpd; - - if (IS_ERR_OR_NULL(domain_name)) - return NULL; - - mutex_lock(&gpd_list_lock); - list_for_each_entry(gpd, &gpd_list, gpd_list_node) { - if (!strcmp(gpd->name, domain_name)) { - genpd = gpd; - break; - } - } - mutex_unlock(&gpd_list_lock); - return genpd; -} - /* * Get the generic PM domain for a particular struct device. * This validates the struct device pointer, the PM domain pointer, @@ -107,14 +76,12 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev) static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) { - return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, - stop_latency_ns, "stop"); + return GENPD_DEV_CALLBACK(genpd, int, stop, dev); } static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) { - return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, - start_latency_ns, "start"); + return GENPD_DEV_CALLBACK(genpd, int, start, dev); } static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) @@ -133,55 +100,7 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) smp_mb__after_atomic(); } -static void genpd_acquire_lock(struct generic_pm_domain *genpd) -{ - DEFINE_WAIT(wait); - - mutex_lock(&genpd->lock); - /* - * Wait for the domain to transition into either the active, - * or the power off state. - */ - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - if (genpd->status == GPD_STATE_ACTIVE - || genpd->status == GPD_STATE_POWER_OFF) - break; - mutex_unlock(&genpd->lock); - - schedule(); - - mutex_lock(&genpd->lock); - } - finish_wait(&genpd->status_wait_queue, &wait); -} - -static void genpd_release_lock(struct generic_pm_domain *genpd) -{ - mutex_unlock(&genpd->lock); -} - -static void genpd_set_active(struct generic_pm_domain *genpd) -{ - if (genpd->resume_count == 0) - genpd->status = GPD_STATE_ACTIVE; -} - -static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) -{ - s64 usecs64; - - if (!genpd->cpuidle_data) - return; - - usecs64 = genpd->power_on_latency_ns; - do_div(usecs64, NSEC_PER_USEC); - usecs64 += genpd->cpuidle_data->saved_exit_latency; - genpd->cpuidle_data->idle_state->exit_latency = usecs64; -} - -static int genpd_power_on(struct generic_pm_domain *genpd) +static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) { ktime_t time_start; s64 elapsed_ns; @@ -190,6 +109,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd) if (!genpd->power_on) return 0; + if (!timed) + return genpd->power_on(genpd); + time_start = ktime_get(); ret = genpd->power_on(genpd); if (ret) @@ -201,14 +123,13 @@ static int genpd_power_on(struct generic_pm_domain *genpd) genpd->power_on_latency_ns = elapsed_ns; genpd->max_off_time_changed = true; - genpd_recalc_cpu_exit_latency(genpd); pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", genpd->name, "on", elapsed_ns); return ret; } -static int genpd_power_off(struct generic_pm_domain *genpd) +static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) { ktime_t time_start; s64 elapsed_ns; @@ -217,6 +138,9 @@ static int genpd_power_off(struct generic_pm_domain *genpd) if (!genpd->power_off) return 0; + if (!timed) + return genpd->power_off(genpd); + time_start = ktime_get(); ret = genpd->power_off(genpd); if (ret == -EBUSY) @@ -235,49 +159,35 @@ static int genpd_power_off(struct generic_pm_domain *genpd) } /** - * __pm_genpd_poweron - Restore power to a given PM domain and its masters. + * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff(). + * @genpd: PM domait to power off. + * + * Queue up the execution of genpd_poweroff() unless it's already been done + * before. + */ +static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) +{ + queue_work(pm_wq, &genpd->power_off_work); +} + +static int genpd_poweron(struct generic_pm_domain *genpd); + +/** + * __genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. * * Restore power to @genpd and all of its masters so that it is possible to * resume a device belonging to it. */ -static int __pm_genpd_poweron(struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) +static int __genpd_poweron(struct generic_pm_domain *genpd) { struct gpd_link *link; - DEFINE_WAIT(wait); int ret = 0; - /* If the domain's master is being waited for, we have to wait too. */ - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - if (genpd->status != GPD_STATE_WAIT_MASTER) - break; - mutex_unlock(&genpd->lock); - - schedule(); - - mutex_lock(&genpd->lock); - } - finish_wait(&genpd->status_wait_queue, &wait); - if (genpd->status == GPD_STATE_ACTIVE || (genpd->prepared_count > 0 && genpd->suspend_power_off)) return 0; - if (genpd->status != GPD_STATE_POWER_OFF) { - genpd_set_active(genpd); - return 0; - } - - if (genpd->cpuidle_data) { - cpuidle_pause_and_lock(); - genpd->cpuidle_data->idle_state->disabled = true; - cpuidle_resume_and_unlock(); - goto out; - } - /* * The list is guaranteed not to change while the loop below is being * executed, unless one of the masters' .power_on() callbacks fiddles @@ -285,85 +195,55 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) */ list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_inc(link->master); - genpd->status = GPD_STATE_WAIT_MASTER; - mutex_unlock(&genpd->lock); - - ret = pm_genpd_poweron(link->master); - - mutex_lock(&genpd->lock); - - /* - * The "wait for parent" status is guaranteed not to change - * while the master is powering on. - */ - genpd->status = GPD_STATE_POWER_OFF; - wake_up_all(&genpd->status_wait_queue); + ret = genpd_poweron(link->master); if (ret) { genpd_sd_counter_dec(link->master); goto err; } } - ret = genpd_power_on(genpd); + ret = genpd_power_on(genpd, true); if (ret) goto err; - out: - genpd_set_active(genpd); - + genpd->status = GPD_STATE_ACTIVE; return 0; err: - list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) + list_for_each_entry_continue_reverse(link, + &genpd->slave_links, + slave_node) { genpd_sd_counter_dec(link->master); + genpd_queue_power_off_work(link->master); + } return ret; } /** - * pm_genpd_poweron - Restore power to a given PM domain and its masters. + * genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. */ -int pm_genpd_poweron(struct generic_pm_domain *genpd) +static int genpd_poweron(struct generic_pm_domain *genpd) { int ret; mutex_lock(&genpd->lock); - ret = __pm_genpd_poweron(genpd); + ret = __genpd_poweron(genpd); mutex_unlock(&genpd->lock); return ret; } -/** - * pm_genpd_name_poweron - Restore power to a given PM domain and its masters. - * @domain_name: Name of the PM domain to power up. - */ -int pm_genpd_name_poweron(const char *domain_name) -{ - struct generic_pm_domain *genpd; - - genpd = pm_genpd_lookup_name(domain_name); - return genpd ? pm_genpd_poweron(genpd) : -EINVAL; -} - -static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, - struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, start, dev); -} - static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) { - return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, - save_state_latency_ns, "state save"); + return GENPD_DEV_CALLBACK(genpd, int, save_state, dev); } -static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) +static int genpd_restore_dev(struct generic_pm_domain *genpd, + struct device *dev) { - return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, - restore_state_latency_ns, - "state restore"); + return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev); } static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, @@ -407,133 +287,31 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, } /** - * __pm_genpd_save_device - Save the pre-suspend state of a device. - * @pdd: Domain data of the device to save the state of. - * @genpd: PM domain the device belongs to. - */ -static int __pm_genpd_save_device(struct pm_domain_data *pdd, - struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) -{ - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); - struct device *dev = pdd->dev; - int ret = 0; - - if (gpd_data->need_restore > 0) - return 0; - - /* - * If the value of the need_restore flag is still unknown at this point, - * we trust that pm_genpd_poweroff() has verified that the device is - * already runtime PM suspended. - */ - if (gpd_data->need_restore < 0) { - gpd_data->need_restore = 1; - return 0; - } - - mutex_unlock(&genpd->lock); - - genpd_start_dev(genpd, dev); - ret = genpd_save_dev(genpd, dev); - genpd_stop_dev(genpd, dev); - - mutex_lock(&genpd->lock); - - if (!ret) - gpd_data->need_restore = 1; - - return ret; -} - -/** - * __pm_genpd_restore_device - Restore the pre-suspend state of a device. - * @pdd: Domain data of the device to restore the state of. - * @genpd: PM domain the device belongs to. - */ -static void __pm_genpd_restore_device(struct pm_domain_data *pdd, - struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) -{ - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); - struct device *dev = pdd->dev; - int need_restore = gpd_data->need_restore; - - gpd_data->need_restore = 0; - mutex_unlock(&genpd->lock); - - genpd_start_dev(genpd, dev); - - /* - * Call genpd_restore_dev() for recently added devices too (need_restore - * is negative then). - */ - if (need_restore) - genpd_restore_dev(genpd, dev); - - mutex_lock(&genpd->lock); -} - -/** - * genpd_abort_poweroff - Check if a PM domain power off should be aborted. - * @genpd: PM domain to check. - * - * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during - * a "power off" operation, which means that a "power on" has occured in the - * meantime, or if its resume_count field is different from zero, which means - * that one of its devices has been resumed in the meantime. - */ -static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) -{ - return genpd->status == GPD_STATE_WAIT_MASTER - || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; -} - -/** - * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). - * @genpd: PM domait to power off. - * - * Queue up the execution of pm_genpd_poweroff() unless it's already been done - * before. - */ -static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) -{ - queue_work(pm_wq, &genpd->power_off_work); -} - -/** - * pm_genpd_poweroff - Remove power from a given PM domain. + * genpd_poweroff - Remove power from a given PM domain. * @genpd: PM domain to power down. + * @is_async: PM domain is powered down from a scheduled work * * If all of the @genpd's devices have been suspended and all of its subdomains - * have been powered down, run the runtime suspend callbacks provided by all of - * the @genpd's devices' drivers and remove power from @genpd. + * have been powered down, remove power from @genpd. */ -static int pm_genpd_poweroff(struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) +static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async) { struct pm_domain_data *pdd; struct gpd_link *link; - unsigned int not_suspended; - int ret = 0; + unsigned int not_suspended = 0; - start: /* * Do not try to power off the domain in the following situations: * (1) The domain is already in the "power off" state. - * (2) The domain is waiting for its master to power up. - * (3) One of the domain's devices is being resumed right now. - * (4) System suspend is in progress. + * (2) System suspend is in progress. */ if (genpd->status == GPD_STATE_POWER_OFF - || genpd->status == GPD_STATE_WAIT_MASTER - || genpd->resume_count > 0 || genpd->prepared_count > 0) + || genpd->prepared_count > 0) return 0; if (atomic_read(&genpd->sd_count) > 0) return -EBUSY; - not_suspended = 0; list_for_each_entry(pdd, &genpd->dev_list, list_node) { enum pm_qos_flags_status stat; @@ -543,83 +321,35 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) if (stat > PM_QOS_FLAGS_NONE) return -EBUSY; - if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) - || pdd->dev->power.irq_safe)) + if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe) not_suspended++; } - if (not_suspended > genpd->in_progress) + if (not_suspended > 1 || (not_suspended == 1 && is_async)) return -EBUSY; - if (genpd->poweroff_task) { - /* - * Another instance of pm_genpd_poweroff() is executing - * callbacks, so tell it to start over and return. - */ - genpd->status = GPD_STATE_REPEAT; - return 0; - } - if (genpd->gov && genpd->gov->power_down_ok) { if (!genpd->gov->power_down_ok(&genpd->domain)) return -EAGAIN; } - genpd->status = GPD_STATE_BUSY; - genpd->poweroff_task = current; - - list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { - ret = atomic_read(&genpd->sd_count) == 0 ? - __pm_genpd_save_device(pdd, genpd) : -EBUSY; - - if (genpd_abort_poweroff(genpd)) - goto out; - - if (ret) { - genpd_set_active(genpd); - goto out; - } - - if (genpd->status == GPD_STATE_REPEAT) { - genpd->poweroff_task = NULL; - goto start; - } - } - - if (genpd->cpuidle_data) { - /* - * If cpuidle_data is set, cpuidle should turn the domain off - * when the CPU in it is idle. In that case we don't decrement - * the subdomain counts of the master domains, so that power is - * not removed from the current domain prematurely as a result - * of cutting off the masters' power. - */ - genpd->status = GPD_STATE_POWER_OFF; - cpuidle_pause_and_lock(); - genpd->cpuidle_data->idle_state->disabled = false; - cpuidle_resume_and_unlock(); - goto out; - } - if (genpd->power_off) { - if (atomic_read(&genpd->sd_count) > 0) { - ret = -EBUSY; - goto out; - } + int ret; + + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; /* * If sd_count > 0 at this point, one of the subdomains hasn't - * managed to call pm_genpd_poweron() for the master yet after - * incrementing it. In that case pm_genpd_poweron() will wait + * managed to call genpd_poweron() for the master yet after + * incrementing it. In that case genpd_poweron() will wait * for us to drop the lock, so we can call .power_off() and let - * the pm_genpd_poweron() restore power for us (this shouldn't + * the genpd_poweron() restore power for us (this shouldn't * happen very often). */ - ret = genpd_power_off(genpd); - if (ret == -EBUSY) { - genpd_set_active(genpd); - goto out; - } + ret = genpd_power_off(genpd, true); + if (ret) + return ret; } genpd->status = GPD_STATE_POWER_OFF; @@ -629,10 +359,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) genpd_queue_power_off_work(link->master); } - out: - genpd->poweroff_task = NULL; - wake_up_all(&genpd->status_wait_queue); - return ret; + return 0; } /** @@ -645,9 +372,9 @@ static void genpd_power_off_work_fn(struct work_struct *work) genpd = container_of(work, struct generic_pm_domain, power_off_work); - genpd_acquire_lock(genpd); - pm_genpd_poweroff(genpd); - genpd_release_lock(genpd); + mutex_lock(&genpd->lock); + genpd_poweroff(genpd, true); + mutex_unlock(&genpd->lock); } /** @@ -661,8 +388,11 @@ static void genpd_power_off_work_fn(struct work_struct *work) static int pm_genpd_runtime_suspend(struct device *dev) { struct generic_pm_domain *genpd; - struct generic_pm_domain_data *gpd_data; bool (*stop_ok)(struct device *__dev); + struct gpd_timing_data *td = &dev_gpd_data(dev)->td; + bool runtime_pm = pm_runtime_enabled(dev); + ktime_t time_start; + s64 elapsed_ns; int ret; dev_dbg(dev, "%s()\n", __func__); @@ -671,14 +401,42 @@ static int pm_genpd_runtime_suspend(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; + /* + * A runtime PM centric subsystem/driver may re-use the runtime PM + * callbacks for other purposes than runtime PM. In those scenarios + * runtime PM is disabled. Under these circumstances, we shall skip + * validating/measuring the PM QoS latency. + */ stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; - if (stop_ok && !stop_ok(dev)) + if (runtime_pm && stop_ok && !stop_ok(dev)) return -EBUSY; - ret = genpd_stop_dev(genpd, dev); + /* Measure suspend latency. */ + if (runtime_pm) + time_start = ktime_get(); + + ret = genpd_save_dev(genpd, dev); if (ret) return ret; + ret = genpd_stop_dev(genpd, dev); + if (ret) { + genpd_restore_dev(genpd, dev); + return ret; + } + + /* Update suspend latency value if the measured time exceeds it. */ + if (runtime_pm) { + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns > td->suspend_latency_ns) { + td->suspend_latency_ns = elapsed_ns; + dev_dbg(dev, "suspend latency exceeded, %lld ns\n", + elapsed_ns); + genpd->max_off_time_changed = true; + td->constraint_changed = true; + } + } + /* * If power.irq_safe is set, this routine will be run with interrupts * off, so it can't use mutexes. @@ -687,19 +445,7 @@ static int pm_genpd_runtime_suspend(struct device *dev) return 0; mutex_lock(&genpd->lock); - - /* - * If we have an unknown state of the need_restore flag, it means none - * of the runtime PM callbacks has been invoked yet. Let's update the - * flag to reflect that the current state is active. - */ - gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); - if (gpd_data->need_restore < 0) - gpd_data->need_restore = 0; - - genpd->in_progress++; - pm_genpd_poweroff(genpd); - genpd->in_progress--; + genpd_poweroff(genpd, false); mutex_unlock(&genpd->lock); return 0; @@ -716,8 +462,12 @@ static int pm_genpd_runtime_suspend(struct device *dev) static int pm_genpd_runtime_resume(struct device *dev) { struct generic_pm_domain *genpd; - DEFINE_WAIT(wait); + struct gpd_timing_data *td = &dev_gpd_data(dev)->td; + bool runtime_pm = pm_runtime_enabled(dev); + ktime_t time_start; + s64 elapsed_ns; int ret; + bool timed = true; dev_dbg(dev, "%s()\n", __func__); @@ -726,39 +476,37 @@ static int pm_genpd_runtime_resume(struct device *dev) return -EINVAL; /* If power.irq_safe, the PM domain is never powered off. */ - if (dev->power.irq_safe) - return genpd_start_dev_no_timing(genpd, dev); + if (dev->power.irq_safe) { + timed = false; + goto out; + } mutex_lock(&genpd->lock); - ret = __pm_genpd_poweron(genpd); - if (ret) { - mutex_unlock(&genpd->lock); + ret = __genpd_poweron(genpd); + mutex_unlock(&genpd->lock); + + if (ret) return ret; - } - genpd->status = GPD_STATE_BUSY; - genpd->resume_count++; - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - /* - * If current is the powering off task, we have been called - * reentrantly from one of the device callbacks, so we should - * not wait. - */ - if (!genpd->poweroff_task || genpd->poweroff_task == current) - break; - mutex_unlock(&genpd->lock); - schedule(); + out: + /* Measure resume latency. */ + if (timed && runtime_pm) + time_start = ktime_get(); - mutex_lock(&genpd->lock); + genpd_start_dev(genpd, dev); + genpd_restore_dev(genpd, dev); + + /* Update resume latency value if the measured time exceeds it. */ + if (timed && runtime_pm) { + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns > td->resume_latency_ns) { + td->resume_latency_ns = elapsed_ns; + dev_dbg(dev, "resume latency exceeded, %lld ns\n", + elapsed_ns); + genpd->max_off_time_changed = true; + td->constraint_changed = true; + } } - finish_wait(&genpd->status_wait_queue, &wait); - __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); - genpd->resume_count--; - genpd_set_active(genpd); - wake_up_all(&genpd->status_wait_queue); - mutex_unlock(&genpd->lock); return 0; } @@ -772,15 +520,15 @@ static int __init pd_ignore_unused_setup(char *__unused) __setup("pd_ignore_unused", pd_ignore_unused_setup); /** - * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. + * genpd_poweroff_unused - Power off all PM domains with no devices in use. */ -void pm_genpd_poweroff_unused(void) +static int __init genpd_poweroff_unused(void) { struct generic_pm_domain *genpd; if (pd_ignore_unused) { pr_warn("genpd: Not disabling unused power domains\n"); - return; + return 0; } mutex_lock(&gpd_list_lock); @@ -789,11 +537,7 @@ void pm_genpd_poweroff_unused(void) genpd_queue_power_off_work(genpd); mutex_unlock(&gpd_list_lock); -} -static int __init genpd_poweroff_unused(void) -{ - pm_genpd_poweroff_unused(); return 0; } late_initcall(genpd_poweroff_unused); @@ -827,6 +571,7 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, /** * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. * @genpd: PM domain to power off, if possible. + * @timed: True if latency measurements are allowed. * * Check if the given PM domain can be powered off (during system suspend or * hibernation) and do that if so. Also, in that case propagate to its masters. @@ -836,7 +581,8 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, * executed sequentially, so it is guaranteed that it will never run twice in * parallel). */ -static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) +static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd, + bool timed) { struct gpd_link *link; @@ -847,38 +593,40 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) || atomic_read(&genpd->sd_count) > 0) return; - genpd_power_off(genpd); + genpd_power_off(genpd, timed); genpd->status = GPD_STATE_POWER_OFF; list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); - pm_genpd_sync_poweroff(link->master); + pm_genpd_sync_poweroff(link->master, timed); } } /** * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. * @genpd: PM domain to power on. + * @timed: True if latency measurements are allowed. * * This function is only called in "noirq" and "syscore" stages of system power * transitions, so it need not acquire locks (all of the "noirq" callbacks are * executed sequentially, so it is guaranteed that it will never run twice in * parallel). */ -static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd) +static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd, + bool timed) { struct gpd_link *link; - if (genpd->status != GPD_STATE_POWER_OFF) + if (genpd->status == GPD_STATE_ACTIVE) return; list_for_each_entry(link, &genpd->slave_links, slave_node) { - pm_genpd_sync_poweron(link->master); + pm_genpd_sync_poweron(link->master, timed); genpd_sd_counter_inc(link->master); } - genpd_power_on(genpd); + genpd_power_on(genpd, timed); genpd->status = GPD_STATE_ACTIVE; } @@ -947,14 +695,14 @@ static int pm_genpd_prepare(struct device *dev) if (resume_needed(dev, genpd)) pm_runtime_resume(dev); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->prepared_count++ == 0) { genpd->suspended_count = 0; genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; } - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); if (genpd->suspend_power_off) { pm_runtime_put_noidle(dev); @@ -963,7 +711,7 @@ static int pm_genpd_prepare(struct device *dev) /* * The PM domain must be in the GPD_STATE_ACTIVE state at this point, - * so pm_genpd_poweron() will return immediately, but if the device + * so genpd_poweron() will return immediately, but if the device * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need * to make it operational. */ @@ -1056,7 +804,7 @@ static int pm_genpd_suspend_noirq(struct device *dev) * the same PM domain, so it is not necessary to use locking here. */ genpd->suspended_count++; - pm_genpd_sync_poweroff(genpd); + pm_genpd_sync_poweroff(genpd, true); return 0; } @@ -1086,7 +834,7 @@ static int pm_genpd_resume_noirq(struct device *dev) * guaranteed that this function will never run twice in parallel for * the same PM domain, so it is not necessary to use locking here. */ - pm_genpd_sync_poweron(genpd); + pm_genpd_sync_poweron(genpd, true); genpd->suspended_count--; return genpd_start_dev(genpd, dev); @@ -1217,7 +965,8 @@ static int pm_genpd_thaw_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); + return genpd->suspend_power_off ? + 0 : genpd_start_dev(genpd, dev); } /** @@ -1300,7 +1049,7 @@ static int pm_genpd_restore_noirq(struct device *dev) * If the domain was off before the hibernation, make * sure it will be off going forward. */ - genpd_power_off(genpd); + genpd_power_off(genpd, true); return 0; } @@ -1309,7 +1058,7 @@ static int pm_genpd_restore_noirq(struct device *dev) if (genpd->suspend_power_off) return 0; - pm_genpd_sync_poweron(genpd); + pm_genpd_sync_poweron(genpd, true); return genpd_start_dev(genpd, dev); } @@ -1367,9 +1116,9 @@ static void genpd_syscore_switch(struct device *dev, bool suspend) if (suspend) { genpd->suspended_count++; - pm_genpd_sync_poweroff(genpd); + pm_genpd_sync_poweroff(genpd, false); } else { - pm_genpd_sync_poweron(genpd); + pm_genpd_sync_poweron(genpd, false); genpd->suspended_count--; } } @@ -1427,7 +1176,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, gpd_data->td = *td; gpd_data->base.dev = dev; - gpd_data->need_restore = -1; gpd_data->td.constraint_changed = true; gpd_data->td.effective_constraint_ns = -1; gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; @@ -1489,7 +1237,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (IS_ERR(gpd_data)) return PTR_ERR(gpd_data); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->prepared_count > 0) { ret = -EAGAIN; @@ -1506,7 +1254,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); if (ret) genpd_free_dev_data(dev, gpd_data); @@ -1517,18 +1265,6 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, } /** - * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. - * @domain_name: Name of the PM domain to add the device to. - * @dev: Device to be added. - * @td: Set of PM QoS timing parameters to attach to the device. - */ -int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, - struct gpd_timing_data *td) -{ - return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td); -} - -/** * pm_genpd_remove_device - Remove a device from an I/O PM domain. * @genpd: PM domain to remove the device from. * @dev: Device to be removed. @@ -1550,7 +1286,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, gpd_data = to_gpd_data(pdd); dev_pm_qos_remove_notifier(dev, &gpd_data->nb); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->prepared_count > 0) { ret = -EAGAIN; @@ -1565,14 +1301,14 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, list_del_init(&pdd->list_node); - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); genpd_free_dev_data(dev, gpd_data); return 0; out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); dev_pm_qos_add_notifier(dev, &gpd_data->nb); return ret; @@ -1586,23 +1322,19 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *subdomain) { - struct gpd_link *link; + struct gpd_link *link, *itr; int ret = 0; if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) || genpd == subdomain) return -EINVAL; - start: - genpd_acquire_lock(genpd); - mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) + return -ENOMEM; - if (subdomain->status != GPD_STATE_POWER_OFF - && subdomain->status != GPD_STATE_ACTIVE) { - mutex_unlock(&subdomain->lock); - genpd_release_lock(genpd); - goto start; - } + mutex_lock(&genpd->lock); + mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); if (genpd->status == GPD_STATE_POWER_OFF && subdomain->status != GPD_STATE_POWER_OFF) { @@ -1610,18 +1342,13 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, goto out; } - list_for_each_entry(link, &genpd->master_links, master_node) { - if (link->slave == subdomain && link->master == genpd) { + list_for_each_entry(itr, &genpd->master_links, master_node) { + if (itr->slave == subdomain && itr->master == genpd) { ret = -EINVAL; goto out; } } - link = kzalloc(sizeof(*link), GFP_KERNEL); - if (!link) { - ret = -ENOMEM; - goto out; - } link->master = genpd; list_add_tail(&link->master_node, &genpd->master_links); link->slave = subdomain; @@ -1631,39 +1358,12 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, out: mutex_unlock(&subdomain->lock); - genpd_release_lock(genpd); - + mutex_unlock(&genpd->lock); + if (ret) + kfree(link); return ret; } - -/** - * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain. - * @master_name: Name of the master PM domain to add the subdomain to. - * @subdomain_name: Name of the subdomain to be added. - */ -int pm_genpd_add_subdomain_names(const char *master_name, - const char *subdomain_name) -{ - struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd; - - if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name)) - return -EINVAL; - - mutex_lock(&gpd_list_lock); - list_for_each_entry(gpd, &gpd_list, gpd_list_node) { - if (!master && !strcmp(gpd->name, master_name)) - master = gpd; - - if (!subdomain && !strcmp(gpd->name, subdomain_name)) - subdomain = gpd; - - if (master && subdomain) - break; - } - mutex_unlock(&gpd_list_lock); - - return pm_genpd_add_subdomain(master, subdomain); -} +EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); /** * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. @@ -1679,8 +1379,14 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) return -EINVAL; - start: - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); + + if (!list_empty(&subdomain->slave_links) || subdomain->device_count) { + pr_warn("%s: unable to remove subdomain %s\n", genpd->name, + subdomain->name); + ret = -EBUSY; + goto out; + } list_for_each_entry(link, &genpd->master_links, master_node) { if (link->slave != subdomain) @@ -1688,13 +1394,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); - if (subdomain->status != GPD_STATE_POWER_OFF - && subdomain->status != GPD_STATE_ACTIVE) { - mutex_unlock(&subdomain->lock); - genpd_release_lock(genpd); - goto start; - } - list_del(&link->master_node); list_del(&link->slave_node); kfree(link); @@ -1707,128 +1406,12 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, break; } - genpd_release_lock(genpd); - - return ret; -} - -/** - * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. - * @genpd: PM domain to be connected with cpuidle. - * @state: cpuidle state this domain can disable/enable. - * - * Make a PM domain behave as though it contained a CPU core, that is, instead - * of calling its power down routine it will enable the given cpuidle state so - * that the cpuidle subsystem can power it down (if possible and desirable). - */ -int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) -{ - struct cpuidle_driver *cpuidle_drv; - struct gpd_cpuidle_data *cpuidle_data; - struct cpuidle_state *idle_state; - int ret = 0; - - if (IS_ERR_OR_NULL(genpd) || state < 0) - return -EINVAL; - - genpd_acquire_lock(genpd); - - if (genpd->cpuidle_data) { - ret = -EEXIST; - goto out; - } - cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL); - if (!cpuidle_data) { - ret = -ENOMEM; - goto out; - } - cpuidle_drv = cpuidle_driver_ref(); - if (!cpuidle_drv) { - ret = -ENODEV; - goto err_drv; - } - if (cpuidle_drv->state_count <= state) { - ret = -EINVAL; - goto err; - } - idle_state = &cpuidle_drv->states[state]; - if (!idle_state->disabled) { - ret = -EAGAIN; - goto err; - } - cpuidle_data->idle_state = idle_state; - cpuidle_data->saved_exit_latency = idle_state->exit_latency; - genpd->cpuidle_data = cpuidle_data; - genpd_recalc_cpu_exit_latency(genpd); - - out: - genpd_release_lock(genpd); - return ret; - - err: - cpuidle_driver_unref(); - - err_drv: - kfree(cpuidle_data); - goto out; -} - -/** - * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it. - * @name: Name of the domain to connect to cpuidle. - * @state: cpuidle state this domain can manipulate. - */ -int pm_genpd_name_attach_cpuidle(const char *name, int state) -{ - return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state); -} - -/** - * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain. - * @genpd: PM domain to remove the cpuidle connection from. - * - * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the - * given PM domain. - */ -int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) -{ - struct gpd_cpuidle_data *cpuidle_data; - struct cpuidle_state *idle_state; - int ret = 0; - - if (IS_ERR_OR_NULL(genpd)) - return -EINVAL; - - genpd_acquire_lock(genpd); - - cpuidle_data = genpd->cpuidle_data; - if (!cpuidle_data) { - ret = -ENODEV; - goto out; - } - idle_state = cpuidle_data->idle_state; - if (!idle_state->disabled) { - ret = -EAGAIN; - goto out; - } - idle_state->exit_latency = cpuidle_data->saved_exit_latency; - cpuidle_driver_unref(); - genpd->cpuidle_data = NULL; - kfree(cpuidle_data); +out: + mutex_unlock(&genpd->lock); - out: - genpd_release_lock(genpd); return ret; } - -/** - * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it. - * @name: Name of the domain to disconnect cpuidle from. - */ -int pm_genpd_name_detach_cpuidle(const char *name) -{ - return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name)); -} +EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); /* Default device callbacks for generic PM domains. */ @@ -1896,12 +1479,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd, mutex_init(&genpd->lock); genpd->gov = gov; INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); - genpd->in_progress = 0; atomic_set(&genpd->sd_count, 0); genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; - init_waitqueue_head(&genpd->status_wait_queue); - genpd->poweroff_task = NULL; - genpd->resume_count = 0; genpd->device_count = 0; genpd->max_off_time_ns = -1; genpd->max_off_time_changed = true; @@ -1939,6 +1518,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd, list_add(&genpd->gpd_list_node, &gpd_list); mutex_unlock(&gpd_list_lock); } +EXPORT_SYMBOL_GPL(pm_genpd_init); #ifdef CONFIG_PM_GENERIC_DOMAINS_OF /* @@ -2112,7 +1692,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider); /** * genpd_dev_pm_detach - Detach a device from its PM domain. - * @dev: Device to attach. + * @dev: Device to detach. * @power_off: Currently not used * * Try to locate a corresponding generic PM domain, which the device was @@ -2121,6 +1701,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider); static void genpd_dev_pm_detach(struct device *dev, bool power_off) { struct generic_pm_domain *pd; + unsigned int i; int ret = 0; pd = pm_genpd_lookup_dev(dev); @@ -2129,10 +1710,12 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off) dev_dbg(dev, "removing from PM domain %s\n", pd->name); - while (1) { + for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { ret = pm_genpd_remove_device(pd, dev); if (ret != -EAGAIN) break; + + mdelay(i); cond_resched(); } @@ -2167,12 +1750,16 @@ static void genpd_dev_pm_sync(struct device *dev) * Both generic and legacy Samsung-specific DT bindings are supported to keep * backwards compatibility with existing DTBs. * - * Returns 0 on successfully attached PM domain or negative error code. + * Returns 0 on successfully attached PM domain or negative error code. Note + * that if a power-domain exists for the device, but it cannot be found or + * turned on, then return -EPROBE_DEFER to ensure that the device is not + * probed and to re-try again later. */ int genpd_dev_pm_attach(struct device *dev) { struct of_phandle_args pd_args; struct generic_pm_domain *pd; + unsigned int i; int ret; if (!dev->of_node) @@ -2199,34 +1786,36 @@ int genpd_dev_pm_attach(struct device *dev) } pd = of_genpd_get_from_provider(&pd_args); + of_node_put(pd_args.np); if (IS_ERR(pd)) { dev_dbg(dev, "%s() failed to find PM domain: %ld\n", __func__, PTR_ERR(pd)); - of_node_put(dev->of_node); - return PTR_ERR(pd); + return -EPROBE_DEFER; } dev_dbg(dev, "adding to PM domain %s\n", pd->name); - while (1) { + for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { ret = pm_genpd_add_device(pd, dev); if (ret != -EAGAIN) break; + + mdelay(i); cond_resched(); } if (ret < 0) { dev_err(dev, "failed to add to PM domain %s: %d", pd->name, ret); - of_node_put(dev->of_node); - return ret; + goto out; } dev->pm_domain->detach = genpd_dev_pm_detach; dev->pm_domain->sync = genpd_dev_pm_sync; - pm_genpd_poweron(pd); + ret = genpd_poweron(pd); - return 0; +out: + return ret ? -EPROBE_DEFER : 0; } EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ @@ -2274,9 +1863,6 @@ static int pm_genpd_summary_one(struct seq_file *s, { static const char * const status_lookup[] = { [GPD_STATE_ACTIVE] = "on", - [GPD_STATE_WAIT_MASTER] = "wait-master", - [GPD_STATE_BUSY] = "busy", - [GPD_STATE_REPEAT] = "off-in-progress", [GPD_STATE_POWER_OFF] = "off" }; struct pm_domain_data *pm_data; @@ -2290,7 +1876,7 @@ static int pm_genpd_summary_one(struct seq_file *s, if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) goto exit; - seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); + seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); /* * Modifications on the list require holding locks on both @@ -2325,8 +1911,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data) struct generic_pm_domain *genpd; int ret = 0; - seq_puts(s, " domain status slaves\n"); - seq_puts(s, " /device runtime status\n"); + seq_puts(s, "domain status slaves\n"); + seq_puts(s, " /device runtime status\n"); seq_puts(s, "----------------------------------------------------------------------\n"); ret = mutex_lock_interruptible(&gpd_list_lock); diff --git a/kernel/drivers/base/power/domain_governor.c b/kernel/drivers/base/power/domain_governor.c index 2a4154a09..1e937ac5f 100644 --- a/kernel/drivers/base/power/domain_governor.c +++ b/kernel/drivers/base/power/domain_governor.c @@ -77,13 +77,14 @@ static bool default_stop_ok(struct device *dev) dev_update_qos_constraint); if (constraint_ns > 0) { - constraint_ns -= td->start_latency_ns; + constraint_ns -= td->suspend_latency_ns + + td->resume_latency_ns; if (constraint_ns == 0) return false; } td->effective_constraint_ns = constraint_ns; - td->cached_stop_ok = constraint_ns > td->stop_latency_ns || - constraint_ns == 0; + td->cached_stop_ok = constraint_ns >= 0; + /* * The children have been suspended already, so we don't need to take * their stop latencies into account here. @@ -126,18 +127,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) off_on_time_ns = genpd->power_off_latency_ns + genpd->power_on_latency_ns; - /* - * It doesn't make sense to remove power from the domain if saving - * the state of all devices in it and the power off/power on operations - * take too much time. - * - * All devices in this domain have been stopped already at this point. - */ - list_for_each_entry(pdd, &genpd->dev_list, list_node) { - if (pdd->dev->driver) - off_on_time_ns += - to_gpd_data(pdd)->td.save_state_latency_ns; - } min_off_time_ns = -1; /* @@ -171,9 +160,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) struct gpd_timing_data *td; s64 constraint_ns; - if (!pdd->dev->driver) - continue; - /* * Check if the device is allowed to be off long enough for the * domain to turn off and on (that's how much time it will @@ -193,7 +179,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) * constraint_ns cannot be negative here, because the device has * been suspended. */ - constraint_ns -= td->restore_state_latency_ns; if (constraint_ns <= off_on_time_ns) return false; diff --git a/kernel/drivers/base/power/generic_ops.c b/kernel/drivers/base/power/generic_ops.c index 96a92db83..07c3c4a95 100644 --- a/kernel/drivers/base/power/generic_ops.c +++ b/kernel/drivers/base/power/generic_ops.c @@ -9,6 +9,7 @@ #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/export.h> +#include <linux/suspend.h> #ifdef CONFIG_PM /** @@ -296,11 +297,27 @@ void pm_generic_complete(struct device *dev) if (drv && drv->pm && drv->pm->complete) drv->pm->complete(dev); +} +/** + * pm_complete_with_resume_check - Complete a device power transition. + * @dev: Device to handle. + * + * Complete a device power transition during a system-wide power transition and + * optionally schedule a runtime resume of the device if the system resume in + * progress has been initated by the platform firmware and the device had its + * power.direct_complete flag set. + */ +void pm_complete_with_resume_check(struct device *dev) +{ + pm_generic_complete(dev); /* - * Let runtime PM try to suspend devices that haven't been in use before - * going into the system-wide sleep state we're resuming from. + * If the device had been runtime-suspended before the system went into + * the sleep state it is going out of and it has never been resumed till + * now, resume it in case the firmware powered it up. */ - pm_request_idle(dev); + if (dev->power.direct_complete && pm_resume_via_firmware()) + pm_request_resume(dev); } +EXPORT_SYMBOL_GPL(pm_complete_with_resume_check); #endif /* CONFIG_PM_SLEEP */ diff --git a/kernel/drivers/base/power/main.c b/kernel/drivers/base/power/main.c index 3d874eca7..1710c26ba 100644 --- a/kernel/drivers/base/power/main.c +++ b/kernel/drivers/base/power/main.c @@ -24,6 +24,7 @@ #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/pm-trace.h> +#include <linux/pm_wakeirq.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/async.h> @@ -587,6 +588,7 @@ void dpm_resume_noirq(pm_message_t state) async_synchronize_full(); dpm_show_time(starttime, state, "noirq"); resume_device_irqs(); + device_wakeup_disarm_wake_irqs(); cpuidle_resume(); trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); } @@ -920,9 +922,7 @@ static void device_complete(struct device *dev, pm_message_t state) if (callback) { pm_dev_dbg(dev, state, info); - trace_device_pm_callback_start(dev, info, state.event); callback(dev); - trace_device_pm_callback_end(dev, 0); } device_unlock(dev); @@ -954,7 +954,9 @@ void dpm_complete(pm_message_t state) list_move(&dev->power.entry, &list); mutex_unlock(&dpm_list_mtx); + trace_device_pm_callback_start(dev, "", state.event); device_complete(dev, state); + trace_device_pm_callback_end(dev, 0); mutex_lock(&dpm_list_mtx); put_device(dev); @@ -1104,6 +1106,7 @@ int dpm_suspend_noirq(pm_message_t state) trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); cpuidle_pause(); + device_wakeup_arm_wake_irqs(); suspend_device_irqs(); mutex_lock(&dpm_list_mtx); pm_transition = state; @@ -1374,7 +1377,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.direct_complete) { if (pm_runtime_status_suspended(dev)) { pm_runtime_disable(dev); - if (pm_runtime_suspended_if_enabled(dev)) + if (pm_runtime_status_suspended(dev)) goto Complete; pm_runtime_enable(dev); @@ -1585,11 +1588,8 @@ static int device_prepare(struct device *dev, pm_message_t state) callback = dev->driver->pm->prepare; } - if (callback) { - trace_device_pm_callback_start(dev, info, state.event); + if (callback) ret = callback(dev); - trace_device_pm_callback_end(dev, ret); - } device_unlock(dev); @@ -1631,7 +1631,9 @@ int dpm_prepare(pm_message_t state) get_device(dev); mutex_unlock(&dpm_list_mtx); + trace_device_pm_callback_start(dev, "", state.event); error = device_prepare(dev, state); + trace_device_pm_callback_end(dev, error); mutex_lock(&dpm_list_mtx); if (error) { diff --git a/kernel/drivers/base/power/opp/Makefile b/kernel/drivers/base/power/opp/Makefile new file mode 100644 index 000000000..33c1e18c4 --- /dev/null +++ b/kernel/drivers/base/power/opp/Makefile @@ -0,0 +1,2 @@ +ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG +obj-y += core.o cpu.o diff --git a/kernel/drivers/base/power/opp.c b/kernel/drivers/base/power/opp/core.c index 677fb2843..b8e76f750 100644 --- a/kernel/drivers/base/power/opp.c +++ b/kernel/drivers/base/power/opp/core.c @@ -11,93 +11,16 @@ * published by the Free Software Foundation. */ -#include <linux/kernel.h> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/errno.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/device.h> -#include <linux/list.h> -#include <linux/rculist.h> -#include <linux/rcupdate.h> -#include <linux/pm_opp.h> #include <linux/of.h> #include <linux/export.h> -/* - * Internal data structure organization with the OPP layer library is as - * follows: - * dev_opp_list (root) - * |- device 1 (represents voltage domain 1) - * | |- opp 1 (availability, freq, voltage) - * | |- opp 2 .. - * ... ... - * | `- opp n .. - * |- device 2 (represents the next voltage domain) - * ... - * `- device m (represents mth voltage domain) - * device 1, 2.. are represented by dev_opp structure while each opp - * is represented by the opp structure. - */ - -/** - * struct dev_pm_opp - Generic OPP description structure - * @node: opp list node. The nodes are maintained throughout the lifetime - * of boot. It is expected only an optimal set of OPPs are - * added to the library by the SoC framework. - * RCU usage: opp list is traversed with RCU locks. node - * modification is possible realtime, hence the modifications - * are protected by the dev_opp_list_lock for integrity. - * IMPORTANT: the opp nodes should be maintained in increasing - * order. - * @dynamic: not-created from static DT entries. - * @available: true/false - marks if this OPP as available or not - * @rate: Frequency in hertz - * @u_volt: Nominal voltage in microvolts corresponding to this OPP - * @dev_opp: points back to the device_opp struct this opp belongs to - * @rcu_head: RCU callback head used for deferred freeing - * - * This structure stores the OPP information for a given device. - */ -struct dev_pm_opp { - struct list_head node; - - bool available; - bool dynamic; - unsigned long rate; - unsigned long u_volt; - - struct device_opp *dev_opp; - struct rcu_head rcu_head; -}; - -/** - * struct device_opp - Device opp structure - * @node: list node - contains the devices with OPPs that - * have been registered. Nodes once added are not modified in this - * list. - * RCU usage: nodes are not modified in the list of device_opp, - * however addition is possible and is secured by dev_opp_list_lock - * @dev: device pointer - * @srcu_head: notifier head to notify the OPP availability changes. - * @rcu_head: RCU callback head used for deferred freeing - * @opp_list: list of opps - * - * This is an internal data structure maintaining the link to opps attached to - * a device. This structure is not meant to be shared to users as it is - * meant for book keeping and private to OPP library. - * - * Because the opp structures can be used from both rcu and srcu readers, we - * need to wait for the grace period of both of them before freeing any - * resources. And so we have used kfree_rcu() from within call_srcu() handlers. - */ -struct device_opp { - struct list_head node; - - struct device *dev; - struct srcu_notifier_head srcu_head; - struct rcu_head rcu_head; - struct list_head opp_list; -}; +#include "opp.h" /* * The root of the list of all devices. All device_opp structures branch off @@ -106,16 +29,48 @@ struct device_opp { */ static LIST_HEAD(dev_opp_list); /* Lock to allow exclusive modification to the device and opp lists */ -static DEFINE_MUTEX(dev_opp_list_lock); +DEFINE_MUTEX(dev_opp_list_lock); #define opp_rcu_lockdep_assert() \ do { \ - rcu_lockdep_assert(rcu_read_lock_held() || \ - lockdep_is_held(&dev_opp_list_lock), \ + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&dev_opp_list_lock), \ "Missing rcu_read_lock() or " \ "dev_opp_list_lock protection"); \ } while (0) +static struct device_list_opp *_find_list_dev(const struct device *dev, + struct device_opp *dev_opp) +{ + struct device_list_opp *list_dev; + + list_for_each_entry(list_dev, &dev_opp->dev_list, node) + if (list_dev->dev == dev) + return list_dev; + + return NULL; +} + +static struct device_opp *_managed_opp(const struct device_node *np) +{ + struct device_opp *dev_opp; + + list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) { + if (dev_opp->np == np) { + /* + * Multiple devices can point to the same OPP table and + * so will have same node-pointer, np. + * + * But the OPPs will be considered as shared only if the + * OPP table contains a "opp-shared" property. + */ + return dev_opp->shared_opp ? dev_opp : NULL; + } + } + + return NULL; +} + /** * _find_device_opp() - find device_opp struct using device pointer * @dev: device pointer used to lookup device OPPs @@ -126,31 +81,32 @@ do { \ * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or * -EINVAL based on type of error. * - * Locking: This function must be called under rcu_read_lock(). device_opp - * is a RCU protected pointer. This means that device_opp is valid as long - * as we are under RCU lock. + * Locking: For readers, this function must be called under rcu_read_lock(). + * device_opp is a RCU protected pointer, which means that device_opp is valid + * as long as we are under RCU lock. + * + * For Writers, this function must be called with dev_opp_list_lock held. */ -static struct device_opp *_find_device_opp(struct device *dev) +struct device_opp *_find_device_opp(struct device *dev) { - struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); + struct device_opp *dev_opp; + + opp_rcu_lockdep_assert(); - if (unlikely(IS_ERR_OR_NULL(dev))) { + if (IS_ERR_OR_NULL(dev)) { pr_err("%s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); } - list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) { - if (tmp_dev_opp->dev == dev) { - dev_opp = tmp_dev_opp; - break; - } - } + list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) + if (_find_list_dev(dev, dev_opp)) + return dev_opp; - return dev_opp; + return ERR_PTR(-ENODEV); } /** - * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp + * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp * @opp: opp for which voltage has to be returned for * * Return: voltage in micro volt corresponding to the opp, else @@ -172,7 +128,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) opp_rcu_lockdep_assert(); tmp_opp = rcu_dereference(opp); - if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) + if (IS_ERR_OR_NULL(tmp_opp)) pr_err("%s: Invalid parameters\n", __func__); else v = tmp_opp->u_volt; @@ -204,7 +160,7 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) opp_rcu_lockdep_assert(); tmp_opp = rcu_dereference(opp); - if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) + if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) pr_err("%s: Invalid parameters\n", __func__); else f = tmp_opp->rate; @@ -214,6 +170,94 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); /** + * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not + * @opp: opp for which turbo mode is being verified + * + * Turbo OPPs are not for normal use, and can be enabled (under certain + * conditions) for short duration of times to finish high throughput work + * quickly. Running on them for longer times may overheat the chip. + * + * Return: true if opp is turbo opp, else false. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. This means that opp which could have been fetched by + * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are + * under RCU lock. The pointer returned by the opp_find_freq family must be + * used in the same section as the usage of this function with the pointer + * prior to unlocking with rcu_read_unlock() to maintain the integrity of the + * pointer. + */ +bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) +{ + struct dev_pm_opp *tmp_opp; + + opp_rcu_lockdep_assert(); + + tmp_opp = rcu_dereference(opp); + if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) { + pr_err("%s: Invalid parameters\n", __func__); + return false; + } + + return tmp_opp->turbo; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); + +/** + * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds + * @dev: device for which we do this operation + * + * Return: This function returns the max clock latency in nanoseconds. + * + * Locking: This function takes rcu_read_lock(). + */ +unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) +{ + struct device_opp *dev_opp; + unsigned long clock_latency_ns; + + rcu_read_lock(); + + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) + clock_latency_ns = 0; + else + clock_latency_ns = dev_opp->clock_latency_ns_max; + + rcu_read_unlock(); + return clock_latency_ns; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); + +/** + * dev_pm_opp_get_suspend_opp() - Get suspend opp + * @dev: device for which we do this operation + * + * Return: This function returns pointer to the suspend opp if it is + * defined and available, otherwise it returns NULL. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) +{ + struct device_opp *dev_opp; + + opp_rcu_lockdep_assert(); + + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp) || !dev_opp->suspend_opp || + !dev_opp->suspend_opp->available) + return NULL; + + return dev_opp->suspend_opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp); + +/** * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list * @dev: device for which we do this operation * @@ -407,18 +451,57 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, } EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); +/* List-dev Helpers */ +static void _kfree_list_dev_rcu(struct rcu_head *head) +{ + struct device_list_opp *list_dev; + + list_dev = container_of(head, struct device_list_opp, rcu_head); + kfree_rcu(list_dev, rcu_head); +} + +static void _remove_list_dev(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ + list_del(&list_dev->node); + call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head, + _kfree_list_dev_rcu); +} + +struct device_list_opp *_add_list_dev(const struct device *dev, + struct device_opp *dev_opp) +{ + struct device_list_opp *list_dev; + + list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL); + if (!list_dev) + return NULL; + + /* Initialize list-dev */ + list_dev->dev = dev; + list_add_rcu(&list_dev->node, &dev_opp->dev_list); + + return list_dev; +} + /** - * _add_device_opp() - Allocate a new device OPP table + * _add_device_opp() - Find device OPP table or allocate a new one * @dev: device for which we do this operation * - * New device node which uses OPPs - used when multiple devices with OPP tables - * are maintained. + * It tries to find an existing table first, if it couldn't find one, it + * allocates a new OPP table and returns that. * * Return: valid device_opp pointer if success, else NULL. */ static struct device_opp *_add_device_opp(struct device *dev) { struct device_opp *dev_opp; + struct device_list_opp *list_dev; + + /* Check for existing list for 'dev' first */ + dev_opp = _find_device_opp(dev); + if (!IS_ERR(dev_opp)) + return dev_opp; /* * Allocate a new device OPP table. In the infrequent case where a new @@ -428,7 +511,14 @@ static struct device_opp *_add_device_opp(struct device *dev) if (!dev_opp) return NULL; - dev_opp->dev = dev; + INIT_LIST_HEAD(&dev_opp->dev_list); + + list_dev = _add_list_dev(dev, dev_opp); + if (!list_dev) { + kfree(dev_opp); + return NULL; + } + srcu_init_notifier_head(&dev_opp->srcu_head); INIT_LIST_HEAD(&dev_opp->opp_list); @@ -438,117 +528,197 @@ static struct device_opp *_add_device_opp(struct device *dev) } /** - * _opp_add_dynamic() - Allocate a dynamic OPP. - * @dev: device for which we do this operation - * @freq: Frequency in Hz for this OPP - * @u_volt: Voltage in uVolts for this OPP - * @dynamic: Dynamically added OPPs. + * _kfree_device_rcu() - Free device_opp RCU handler + * @head: RCU head + */ +static void _kfree_device_rcu(struct rcu_head *head) +{ + struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); + + kfree_rcu(device_opp, rcu_head); +} + +/** + * _remove_device_opp() - Removes a device OPP table + * @dev_opp: device OPP table to be removed. * - * This function adds an opp definition to the opp list and returns status. - * The opp is made available by default and it can be controlled using - * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. + * Removes/frees device OPP table it it doesn't contain any OPPs. + */ +static void _remove_device_opp(struct device_opp *dev_opp) +{ + struct device_list_opp *list_dev; + + if (!list_empty(&dev_opp->opp_list)) + return; + + list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp, + node); + + _remove_list_dev(list_dev, dev_opp); + + /* dev_list must be empty now */ + WARN_ON(!list_empty(&dev_opp->dev_list)); + + list_del_rcu(&dev_opp->node); + call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, + _kfree_device_rcu); +} + +/** + * _kfree_opp_rcu() - Free OPP RCU handler + * @head: RCU head + */ +static void _kfree_opp_rcu(struct rcu_head *head) +{ + struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); + + kfree_rcu(opp, rcu_head); +} + +/** + * _opp_remove() - Remove an OPP from a table definition + * @dev_opp: points back to the device_opp struct this opp belongs to + * @opp: pointer to the OPP to remove + * @notify: OPP_EVENT_REMOVE notification should be sent or not + * + * This function removes an opp definition from the opp list. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * It is assumed that the caller holds required mutex for an RCU updater + * strategy. + */ +static void _opp_remove(struct device_opp *dev_opp, + struct dev_pm_opp *opp, bool notify) +{ + /* + * Notify the changes in the availability of the operable + * frequency/voltage list. + */ + if (notify) + srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); + list_del_rcu(&opp->node); + call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); + + _remove_device_opp(dev_opp); +} + +/** + * dev_pm_opp_remove() - Remove an OPP from OPP list + * @dev: device for which we do this operation + * @freq: OPP to remove with matching 'freq' * - * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and - * freed by of_free_opp_table. + * This function removes an opp from the opp list. * * Locking: The internal device_opp and opp structures are RCU protected. * Hence this function internally uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. - * - * Return: - * 0 On success OR - * Duplicate OPPs (both freq and volt are same) and opp->available - * -EEXIST Freq are same and volt are different OR - * Duplicate OPPs (both freq and volt are same) and !opp->available - * -ENOMEM Memory allocation failure */ -static int _opp_add_dynamic(struct device *dev, unsigned long freq, - long u_volt, bool dynamic) +void dev_pm_opp_remove(struct device *dev, unsigned long freq) { - struct device_opp *dev_opp = NULL; - struct dev_pm_opp *opp, *new_opp; - struct list_head *head; - int ret; - - /* allocate new OPP node */ - new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL); - if (!new_opp) - return -ENOMEM; + struct dev_pm_opp *opp; + struct device_opp *dev_opp; + bool found = false; /* Hold our list modification lock here */ mutex_lock(&dev_opp_list_lock); - /* populate the opp table */ - new_opp->rate = freq; - new_opp->u_volt = u_volt; - new_opp->available = true; - new_opp->dynamic = dynamic; - - /* Check for existing list for 'dev' */ dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) { - dev_opp = _add_device_opp(dev); - if (!dev_opp) { - ret = -ENOMEM; - goto free_opp; + if (IS_ERR(dev_opp)) + goto unlock; + + list_for_each_entry(opp, &dev_opp->opp_list, node) { + if (opp->rate == freq) { + found = true; + break; } + } - head = &dev_opp->opp_list; - goto list_add; + if (!found) { + dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", + __func__, freq); + goto unlock; } + _opp_remove(dev_opp, opp, true); +unlock: + mutex_unlock(&dev_opp_list_lock); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_remove); + +static struct dev_pm_opp *_allocate_opp(struct device *dev, + struct device_opp **dev_opp) +{ + struct dev_pm_opp *opp; + + /* allocate new OPP node */ + opp = kzalloc(sizeof(*opp), GFP_KERNEL); + if (!opp) + return NULL; + + INIT_LIST_HEAD(&opp->node); + + *dev_opp = _add_device_opp(dev); + if (!*dev_opp) { + kfree(opp); + return NULL; + } + + return opp; +} + +static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, + struct device_opp *dev_opp) +{ + struct dev_pm_opp *opp; + struct list_head *head = &dev_opp->opp_list; + /* - * Insert new OPP in order of increasing frequency - * and discard if already present + * Insert new OPP in order of increasing frequency and discard if + * already present. + * + * Need to use &dev_opp->opp_list in the condition part of the 'for' + * loop, don't replace it with head otherwise it will become an infinite + * loop. */ - head = &dev_opp->opp_list; list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { - if (new_opp->rate <= opp->rate) - break; - else + if (new_opp->rate > opp->rate) { head = &opp->node; - } + continue; + } - /* Duplicate OPPs ? */ - if (new_opp->rate == opp->rate) { - ret = opp->available && new_opp->u_volt == opp->u_volt ? - 0 : -EEXIST; + if (new_opp->rate < opp->rate) + break; + /* Duplicate OPPs */ dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", __func__, opp->rate, opp->u_volt, opp->available, new_opp->rate, new_opp->u_volt, new_opp->available); - goto free_opp; + + return opp->available && new_opp->u_volt == opp->u_volt ? + 0 : -EEXIST; } -list_add: new_opp->dev_opp = dev_opp; list_add_rcu(&new_opp->node, head); - mutex_unlock(&dev_opp_list_lock); - /* - * Notify the changes in the availability of the operable - * frequency/voltage list. - */ - srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); return 0; - -free_opp: - mutex_unlock(&dev_opp_list_lock); - kfree(new_opp); - return ret; } /** - * dev_pm_opp_add() - Add an OPP table from a table definitions + * _opp_add_v1() - Allocate a OPP based on v1 bindings. * @dev: device for which we do this operation * @freq: Frequency in Hz for this OPP * @u_volt: Voltage in uVolts for this OPP + * @dynamic: Dynamically added OPPs. * * This function adds an opp definition to the opp list and returns status. * The opp is made available by default and it can be controlled using - * dev_pm_opp_enable/disable functions. + * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. + * + * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table + * and freed by dev_pm_opp_of_remove_table. * * Locking: The internal device_opp and opp structures are RCU protected. * Hence this function internally uses RCU updater strategy with mutex locks @@ -563,107 +733,222 @@ free_opp: * Duplicate OPPs (both freq and volt are same) and !opp->available * -ENOMEM Memory allocation failure */ -int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) +static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, + bool dynamic) { - return _opp_add_dynamic(dev, freq, u_volt, true); -} -EXPORT_SYMBOL_GPL(dev_pm_opp_add); + struct device_opp *dev_opp; + struct dev_pm_opp *new_opp; + int ret; -/** - * _kfree_opp_rcu() - Free OPP RCU handler - * @head: RCU head - */ -static void _kfree_opp_rcu(struct rcu_head *head) -{ - struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); - kfree_rcu(opp, rcu_head); -} + new_opp = _allocate_opp(dev, &dev_opp); + if (!new_opp) { + ret = -ENOMEM; + goto unlock; + } -/** - * _kfree_device_rcu() - Free device_opp RCU handler - * @head: RCU head - */ -static void _kfree_device_rcu(struct rcu_head *head) -{ - struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); + /* populate the opp table */ + new_opp->rate = freq; + new_opp->u_volt = u_volt; + new_opp->available = true; + new_opp->dynamic = dynamic; - kfree_rcu(device_opp, rcu_head); -} + ret = _opp_add(dev, new_opp, dev_opp); + if (ret) + goto free_opp; + + mutex_unlock(&dev_opp_list_lock); -/** - * _opp_remove() - Remove an OPP from a table definition - * @dev_opp: points back to the device_opp struct this opp belongs to - * @opp: pointer to the OPP to remove - * - * This function removes an opp definition from the opp list. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * It is assumed that the caller holds required mutex for an RCU updater - * strategy. - */ -static void _opp_remove(struct device_opp *dev_opp, - struct dev_pm_opp *opp) -{ /* * Notify the changes in the availability of the operable * frequency/voltage list. */ - srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); - list_del_rcu(&opp->node); - call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); + srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); + return 0; + +free_opp: + _opp_remove(dev_opp, new_opp, false); +unlock: + mutex_unlock(&dev_opp_list_lock); + return ret; +} + +/* TODO: Support multiple regulators */ +static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev) +{ + u32 microvolt[3] = {0}; + u32 val; + int count, ret; + + /* Missing property isn't a problem, but an invalid entry is */ + if (!of_find_property(opp->np, "opp-microvolt", NULL)) + return 0; - if (list_empty(&dev_opp->opp_list)) { - list_del_rcu(&dev_opp->node); - call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, - _kfree_device_rcu); + count = of_property_count_u32_elems(opp->np, "opp-microvolt"); + if (count < 0) { + dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n", + __func__, count); + return count; } + + /* There can be one or three elements here */ + if (count != 1 && count != 3) { + dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", + __func__, count); + return -EINVAL; + } + + ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt, + count); + if (ret) { + dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__, + ret); + return -EINVAL; + } + + opp->u_volt = microvolt[0]; + opp->u_volt_min = microvolt[1]; + opp->u_volt_max = microvolt[2]; + + if (!of_property_read_u32(opp->np, "opp-microamp", &val)) + opp->u_amp = val; + + return 0; } /** - * dev_pm_opp_remove() - Remove an OPP from OPP list + * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) * @dev: device for which we do this operation - * @freq: OPP to remove with matching 'freq' + * @np: device node * - * This function removes an opp from the opp list. + * This function adds an opp definition to the opp list and returns status. The + * opp can be controlled using dev_pm_opp_enable/disable functions and may be + * removed by dev_pm_opp_remove. * * Locking: The internal device_opp and opp structures are RCU protected. * Hence this function internally uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. + * + * Return: + * 0 On success OR + * Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST Freq are same and volt are different OR + * Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM Memory allocation failure + * -EINVAL Failed parsing the OPP node */ -void dev_pm_opp_remove(struct device *dev, unsigned long freq) +static int _opp_add_static_v2(struct device *dev, struct device_node *np) { - struct dev_pm_opp *opp; struct device_opp *dev_opp; - bool found = false; + struct dev_pm_opp *new_opp; + u64 rate; + u32 val; + int ret; /* Hold our list modification lock here */ mutex_lock(&dev_opp_list_lock); - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) + new_opp = _allocate_opp(dev, &dev_opp); + if (!new_opp) { + ret = -ENOMEM; goto unlock; + } - list_for_each_entry(opp, &dev_opp->opp_list, node) { - if (opp->rate == freq) { - found = true; - break; - } + ret = of_property_read_u64(np, "opp-hz", &rate); + if (ret < 0) { + dev_err(dev, "%s: opp-hz not found\n", __func__); + goto free_opp; } - if (!found) { - dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", - __func__, freq); - goto unlock; + /* + * Rate is defined as an unsigned long in clk API, and so casting + * explicitly to its type. Must be fixed once rate is 64 bit + * guaranteed in clk API. + */ + new_opp->rate = (unsigned long)rate; + new_opp->turbo = of_property_read_bool(np, "turbo-mode"); + + new_opp->np = np; + new_opp->dynamic = false; + new_opp->available = true; + + if (!of_property_read_u32(np, "clock-latency-ns", &val)) + new_opp->clock_latency_ns = val; + + ret = opp_parse_supplies(new_opp, dev); + if (ret) + goto free_opp; + + ret = _opp_add(dev, new_opp, dev_opp); + if (ret) + goto free_opp; + + /* OPP to select on device suspend */ + if (of_property_read_bool(np, "opp-suspend")) { + if (dev_opp->suspend_opp) + dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", + __func__, dev_opp->suspend_opp->rate, + new_opp->rate); + else + dev_opp->suspend_opp = new_opp; } - _opp_remove(dev_opp, opp); + if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max) + dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns; + + mutex_unlock(&dev_opp_list_lock); + + pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", + __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, + new_opp->u_volt_min, new_opp->u_volt_max, + new_opp->clock_latency_ns); + + /* + * Notify the changes in the availability of the operable + * frequency/voltage list. + */ + srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); + return 0; + +free_opp: + _opp_remove(dev_opp, new_opp, false); unlock: mutex_unlock(&dev_opp_list_lock); + return ret; } -EXPORT_SYMBOL_GPL(dev_pm_opp_remove); + +/** + * dev_pm_opp_add() - Add an OPP table from a table definitions + * @dev: device for which we do this operation + * @freq: Frequency in Hz for this OPP + * @u_volt: Voltage in uVolts for this OPP + * + * This function adds an opp definition to the opp list and returns status. + * The opp is made available by default and it can be controlled using + * dev_pm_opp_enable/disable functions. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + * + * Return: + * 0 On success OR + * Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST Freq are same and volt are different OR + * Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM Memory allocation failure + */ +int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) +{ + return _opp_add_v1(dev, freq, u_volt, true); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_add); /** * _opp_set_availability() - helper to set the availability of an opp @@ -675,7 +960,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove); * share a common logic which is isolated here. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. * * Locking: The internal device_opp and opp structures are RCU protected. @@ -763,7 +1048,7 @@ unlock: * mutex locking or synchronize_rcu() blocking calls cannot be used. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. */ int dev_pm_opp_enable(struct device *dev, unsigned long freq) @@ -789,7 +1074,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable); * mutex locking or synchronize_rcu() blocking calls cannot be used. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. */ int dev_pm_opp_disable(struct device *dev, unsigned long freq) @@ -825,28 +1110,127 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); #ifdef CONFIG_OF /** - * of_init_opp_table() - Initialize opp table from device tree + * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT + * entries * @dev: device pointer used to lookup device OPPs. * - * Register the initial OPP table with the OPP library for given device. + * Free OPPs created using static entries present in DT. * * Locking: The internal device_opp and opp structures are RCU protected. * Hence this function indirectly uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. - * - * Return: - * 0 On success OR - * Duplicate OPPs (both freq and volt are same) and opp->available - * -EEXIST Freq are same and volt are different OR - * Duplicate OPPs (both freq and volt are same) and !opp->available - * -ENOMEM Memory allocation failure - * -ENODEV when 'operating-points' property is not found or is invalid data - * in device node. - * -ENODATA when empty 'operating-points' property is found */ -int of_init_opp_table(struct device *dev) +void dev_pm_opp_of_remove_table(struct device *dev) +{ + struct device_opp *dev_opp; + struct dev_pm_opp *opp, *tmp; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + /* Check for existing list for 'dev' */ + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) { + int error = PTR_ERR(dev_opp); + + if (error != -ENODEV) + WARN(1, "%s: dev_opp: %d\n", + IS_ERR_OR_NULL(dev) ? + "Invalid device" : dev_name(dev), + error); + goto unlock; + } + + /* Find if dev_opp manages a single device */ + if (list_is_singular(&dev_opp->dev_list)) { + /* Free static OPPs */ + list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { + if (!opp->dynamic) + _opp_remove(dev_opp, opp, true); + } + } else { + _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp); + } + +unlock: + mutex_unlock(&dev_opp_list_lock); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); + +/* Returns opp descriptor node for a device, caller must do of_node_put() */ +struct device_node *_of_get_opp_desc_node(struct device *dev) +{ + /* + * TODO: Support for multiple OPP tables. + * + * There should be only ONE phandle present in "operating-points-v2" + * property. + */ + + return of_parse_phandle(dev->of_node, "operating-points-v2", 0); +} + +/* Initializes OPP tables based on new bindings */ +static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) +{ + struct device_node *np; + struct device_opp *dev_opp; + int ret = 0, count = 0; + + mutex_lock(&dev_opp_list_lock); + + dev_opp = _managed_opp(opp_np); + if (dev_opp) { + /* OPPs are already managed */ + if (!_add_list_dev(dev, dev_opp)) + ret = -ENOMEM; + mutex_unlock(&dev_opp_list_lock); + return ret; + } + mutex_unlock(&dev_opp_list_lock); + + /* We have opp-list node now, iterate over it and add OPPs */ + for_each_available_child_of_node(opp_np, np) { + count++; + + ret = _opp_add_static_v2(dev, np); + if (ret) { + dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, + ret); + goto free_table; + } + } + + /* There should be one of more OPP defined */ + if (WARN_ON(!count)) + return -ENOENT; + + mutex_lock(&dev_opp_list_lock); + + dev_opp = _find_device_opp(dev); + if (WARN_ON(IS_ERR(dev_opp))) { + ret = PTR_ERR(dev_opp); + mutex_unlock(&dev_opp_list_lock); + goto free_table; + } + + dev_opp->np = opp_np; + dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared"); + + mutex_unlock(&dev_opp_list_lock); + + return 0; + +free_table: + dev_pm_opp_of_remove_table(dev); + + return ret; +} + +/* Initializes OPP tables based on old-deprecated bindings */ +static int _of_add_opp_table_v1(struct device *dev) { const struct property *prop; const __be32 *val; @@ -873,7 +1257,7 @@ int of_init_opp_table(struct device *dev) unsigned long freq = be32_to_cpup(val++) * 1000; unsigned long volt = be32_to_cpup(val++); - if (_opp_add_dynamic(dev, freq, volt, false)) + if (_opp_add_v1(dev, freq, volt, false)) dev_warn(dev, "%s: Failed to add OPP %ld\n", __func__, freq); nr -= 2; @@ -881,47 +1265,52 @@ int of_init_opp_table(struct device *dev) return 0; } -EXPORT_SYMBOL_GPL(of_init_opp_table); /** - * of_free_opp_table() - Free OPP table entries created from static DT entries + * dev_pm_opp_of_add_table() - Initialize opp table from device tree * @dev: device pointer used to lookup device OPPs. * - * Free OPPs created using static entries present in DT. + * Register the initial OPP table with the OPP library for given device. * * Locking: The internal device_opp and opp structures are RCU protected. * Hence this function indirectly uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. + * + * Return: + * 0 On success OR + * Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST Freq are same and volt are different OR + * Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM Memory allocation failure + * -ENODEV when 'operating-points' property is not found or is invalid data + * in device node. + * -ENODATA when empty 'operating-points' property is found + * -EINVAL when invalid entries are found in opp-v2 table */ -void of_free_opp_table(struct device *dev) +int dev_pm_opp_of_add_table(struct device *dev) { - struct device_opp *dev_opp; - struct dev_pm_opp *opp, *tmp; + struct device_node *opp_np; + int ret; - /* Check for existing list for 'dev' */ - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) { - int error = PTR_ERR(dev_opp); - if (error != -ENODEV) - WARN(1, "%s: dev_opp: %d\n", - IS_ERR_OR_NULL(dev) ? - "Invalid device" : dev_name(dev), - error); - return; + /* + * OPPs have two version of bindings now. The older one is deprecated, + * try for the new binding first. + */ + opp_np = _of_get_opp_desc_node(dev); + if (!opp_np) { + /* + * Try old-deprecated bindings for backward compatibility with + * older dtbs. + */ + return _of_add_opp_table_v1(dev); } - /* Hold our list modification lock here */ - mutex_lock(&dev_opp_list_lock); - - /* Free static OPPs */ - list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { - if (!opp->dynamic) - _opp_remove(dev_opp, opp); - } + ret = _of_add_opp_table_v2(dev, opp_np); + of_node_put(opp_np); - mutex_unlock(&dev_opp_list_lock); + return ret; } -EXPORT_SYMBOL_GPL(of_free_opp_table); +EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); #endif diff --git a/kernel/drivers/base/power/opp/cpu.c b/kernel/drivers/base/power/opp/cpu.c new file mode 100644 index 000000000..7b445e88a --- /dev/null +++ b/kernel/drivers/base/power/opp/cpu.c @@ -0,0 +1,270 @@ +/* + * Generic OPP helper interface for CPU device + * + * Copyright (C) 2009-2014 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/cpu.h> +#include <linux/cpufreq.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/of.h> +#include <linux/slab.h> + +#include "opp.h" + +#ifdef CONFIG_CPU_FREQ + +/** + * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device + * @dev: device for which we do this operation + * @table: Cpufreq table returned back to caller + * + * Generate a cpufreq table for a provided device- this assumes that the + * opp list is already initialized and ready for usage. + * + * This function allocates required memory for the cpufreq table. It is + * expected that the caller does the required maintenance such as freeing + * the table as required. + * + * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM + * if no memory available for the operation (table is not populated), returns 0 + * if successful and table is populated. + * + * WARNING: It is important for the callers to ensure refreshing their copy of + * the table if any of the mentioned functions have been invoked in the interim. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Since we just use the regular accessor functions to access the internal data + * structures, we use RCU read lock inside this function. As a result, users of + * this function DONOT need to use explicit locks for invoking. + */ +int dev_pm_opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + struct dev_pm_opp *opp; + struct cpufreq_frequency_table *freq_table = NULL; + int i, max_opps, ret = 0; + unsigned long rate; + + rcu_read_lock(); + + max_opps = dev_pm_opp_get_opp_count(dev); + if (max_opps <= 0) { + ret = max_opps ? max_opps : -ENODATA; + goto out; + } + + freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC); + if (!freq_table) { + ret = -ENOMEM; + goto out; + } + + for (i = 0, rate = 0; i < max_opps; i++, rate++) { + /* find next rate */ + opp = dev_pm_opp_find_freq_ceil(dev, &rate); + if (IS_ERR(opp)) { + ret = PTR_ERR(opp); + goto out; + } + freq_table[i].driver_data = i; + freq_table[i].frequency = rate / 1000; + + /* Is Boost/turbo opp ? */ + if (dev_pm_opp_is_turbo(opp)) + freq_table[i].flags = CPUFREQ_BOOST_FREQ; + } + + freq_table[i].driver_data = i; + freq_table[i].frequency = CPUFREQ_TABLE_END; + + *table = &freq_table[0]; + +out: + rcu_read_unlock(); + if (ret) + kfree(freq_table); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); + +/** + * dev_pm_opp_free_cpufreq_table() - free the cpufreq table + * @dev: device for which we do this operation + * @table: table to free + * + * Free up the table allocated by dev_pm_opp_init_cpufreq_table + */ +void dev_pm_opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + if (!table) + return; + + kfree(*table); + *table = NULL; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); +#endif /* CONFIG_CPU_FREQ */ + +/* Required only for V1 bindings, as v2 can manage it from DT itself */ +int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) +{ + struct device_list_opp *list_dev; + struct device_opp *dev_opp; + struct device *dev; + int cpu, ret = 0; + + mutex_lock(&dev_opp_list_lock); + + dev_opp = _find_device_opp(cpu_dev); + if (IS_ERR(dev_opp)) { + ret = -EINVAL; + goto unlock; + } + + for_each_cpu(cpu, cpumask) { + if (cpu == cpu_dev->id) + continue; + + dev = get_cpu_device(cpu); + if (!dev) { + dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + __func__, cpu); + continue; + } + + list_dev = _add_list_dev(dev, dev_opp); + if (!list_dev) { + dev_err(dev, "%s: failed to add list-dev for cpu%d device\n", + __func__, cpu); + continue; + } + } +unlock: + mutex_unlock(&dev_opp_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); + +#ifdef CONFIG_OF +void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask) +{ + struct device *cpu_dev; + int cpu; + + WARN_ON(cpumask_empty(cpumask)); + + for_each_cpu(cpu, cpumask) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + cpu); + continue; + } + + dev_pm_opp_of_remove_table(cpu_dev); + } +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table); + +int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask) +{ + struct device *cpu_dev; + int cpu, ret = 0; + + WARN_ON(cpumask_empty(cpumask)); + + for_each_cpu(cpu, cpumask) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + cpu); + continue; + } + + ret = dev_pm_opp_of_add_table(cpu_dev); + if (ret) { + pr_err("%s: couldn't find opp table for cpu:%d, %d\n", + __func__, cpu, ret); + + /* Free all other OPPs */ + dev_pm_opp_of_cpumask_remove_table(cpumask); + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); + +/* + * Works only for OPP v2 bindings. + * + * cpumask should be already set to mask of cpu_dev->id. + * Returns -ENOENT if operating-points-v2 bindings aren't supported. + */ +int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) +{ + struct device_node *np, *tmp_np; + struct device *tcpu_dev; + int cpu, ret = 0; + + /* Get OPP descriptor node */ + np = _of_get_opp_desc_node(cpu_dev); + if (!np) { + dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__); + return -ENOENT; + } + + /* OPPs are shared ? */ + if (!of_property_read_bool(np, "opp-shared")) + goto put_cpu_node; + + for_each_possible_cpu(cpu) { + if (cpu == cpu_dev->id) + continue; + + tcpu_dev = get_cpu_device(cpu); + if (!tcpu_dev) { + dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + __func__, cpu); + ret = -ENODEV; + goto put_cpu_node; + } + + /* Get OPP descriptor node */ + tmp_np = _of_get_opp_desc_node(tcpu_dev); + if (!tmp_np) { + dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n", + __func__); + ret = -ENOENT; + goto put_cpu_node; + } + + /* CPUs are sharing opp node */ + if (np == tmp_np) + cpumask_set_cpu(cpu, cpumask); + + of_node_put(tmp_np); + } + +put_cpu_node: + of_node_put(np); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); +#endif diff --git a/kernel/drivers/base/power/opp/opp.h b/kernel/drivers/base/power/opp/opp.h new file mode 100644 index 000000000..7366b2aa8 --- /dev/null +++ b/kernel/drivers/base/power/opp/opp.h @@ -0,0 +1,146 @@ +/* + * Generic OPP Interface + * + * Copyright (C) 2009-2010 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __DRIVER_OPP_H__ +#define __DRIVER_OPP_H__ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/pm_opp.h> +#include <linux/rculist.h> +#include <linux/rcupdate.h> + +/* Lock to allow exclusive modification to the device and opp lists */ +extern struct mutex dev_opp_list_lock; + +/* + * Internal data structure organization with the OPP layer library is as + * follows: + * dev_opp_list (root) + * |- device 1 (represents voltage domain 1) + * | |- opp 1 (availability, freq, voltage) + * | |- opp 2 .. + * ... ... + * | `- opp n .. + * |- device 2 (represents the next voltage domain) + * ... + * `- device m (represents mth voltage domain) + * device 1, 2.. are represented by dev_opp structure while each opp + * is represented by the opp structure. + */ + +/** + * struct dev_pm_opp - Generic OPP description structure + * @node: opp list node. The nodes are maintained throughout the lifetime + * of boot. It is expected only an optimal set of OPPs are + * added to the library by the SoC framework. + * RCU usage: opp list is traversed with RCU locks. node + * modification is possible realtime, hence the modifications + * are protected by the dev_opp_list_lock for integrity. + * IMPORTANT: the opp nodes should be maintained in increasing + * order. + * @dynamic: not-created from static DT entries. + * @available: true/false - marks if this OPP as available or not + * @turbo: true if turbo (boost) OPP + * @rate: Frequency in hertz + * @u_volt: Target voltage in microvolts corresponding to this OPP + * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP + * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP + * @u_amp: Maximum current drawn by the device in microamperes + * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's + * frequency from any other OPP's frequency. + * @dev_opp: points back to the device_opp struct this opp belongs to + * @rcu_head: RCU callback head used for deferred freeing + * @np: OPP's device node. + * + * This structure stores the OPP information for a given device. + */ +struct dev_pm_opp { + struct list_head node; + + bool available; + bool dynamic; + bool turbo; + unsigned long rate; + + unsigned long u_volt; + unsigned long u_volt_min; + unsigned long u_volt_max; + unsigned long u_amp; + unsigned long clock_latency_ns; + + struct device_opp *dev_opp; + struct rcu_head rcu_head; + + struct device_node *np; +}; + +/** + * struct device_list_opp - devices managed by 'struct device_opp' + * @node: list node + * @dev: device to which the struct object belongs + * @rcu_head: RCU callback head used for deferred freeing + * + * This is an internal data structure maintaining the list of devices that are + * managed by 'struct device_opp'. + */ +struct device_list_opp { + struct list_head node; + const struct device *dev; + struct rcu_head rcu_head; +}; + +/** + * struct device_opp - Device opp structure + * @node: list node - contains the devices with OPPs that + * have been registered. Nodes once added are not modified in this + * list. + * RCU usage: nodes are not modified in the list of device_opp, + * however addition is possible and is secured by dev_opp_list_lock + * @srcu_head: notifier head to notify the OPP availability changes. + * @rcu_head: RCU callback head used for deferred freeing + * @dev_list: list of devices that share these OPPs + * @opp_list: list of opps + * @np: struct device_node pointer for opp's DT node. + * @shared_opp: OPP is shared between multiple devices. + * + * This is an internal data structure maintaining the link to opps attached to + * a device. This structure is not meant to be shared to users as it is + * meant for book keeping and private to OPP library. + * + * Because the opp structures can be used from both rcu and srcu readers, we + * need to wait for the grace period of both of them before freeing any + * resources. And so we have used kfree_rcu() from within call_srcu() handlers. + */ +struct device_opp { + struct list_head node; + + struct srcu_notifier_head srcu_head; + struct rcu_head rcu_head; + struct list_head dev_list; + struct list_head opp_list; + + struct device_node *np; + unsigned long clock_latency_ns_max; + bool shared_opp; + struct dev_pm_opp *suspend_opp; +}; + +/* Routines internal to opp core */ +struct device_opp *_find_device_opp(struct device *dev); +struct device_list_opp *_add_list_dev(const struct device *dev, + struct device_opp *dev_opp); +struct device_node *_of_get_opp_desc_node(struct device *dev); + +#endif /* __DRIVER_OPP_H__ */ diff --git a/kernel/drivers/base/power/power.h b/kernel/drivers/base/power/power.h index b6b8a273c..998fa6b23 100644 --- a/kernel/drivers/base/power/power.h +++ b/kernel/drivers/base/power/power.h @@ -20,6 +20,46 @@ static inline void pm_runtime_early_init(struct device *dev) extern void pm_runtime_init(struct device *dev); extern void pm_runtime_remove(struct device *dev); +struct wake_irq { + struct device *dev; + int irq; + bool dedicated_irq:1; +}; + +extern void dev_pm_arm_wake_irq(struct wake_irq *wirq); +extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq); + +#ifdef CONFIG_PM_SLEEP + +extern int device_wakeup_attach_irq(struct device *dev, + struct wake_irq *wakeirq); +extern void device_wakeup_detach_irq(struct device *dev); +extern void device_wakeup_arm_wake_irqs(void); +extern void device_wakeup_disarm_wake_irqs(void); + +#else + +static inline int +device_wakeup_attach_irq(struct device *dev, + struct wake_irq *wakeirq) +{ + return 0; +} + +static inline void device_wakeup_detach_irq(struct device *dev) +{ +} + +static inline void device_wakeup_arm_wake_irqs(void) +{ +} + +static inline void device_wakeup_disarm_wake_irqs(void) +{ +} + +#endif /* CONFIG_PM_SLEEP */ + /* * sysfs.c */ @@ -33,6 +73,8 @@ extern int pm_qos_sysfs_add_resume_latency(struct device *dev); extern void pm_qos_sysfs_remove_resume_latency(struct device *dev); extern int pm_qos_sysfs_add_flags(struct device *dev); extern void pm_qos_sysfs_remove_flags(struct device *dev); +extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev); +extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev); #else /* CONFIG_PM */ @@ -52,6 +94,14 @@ static inline void wakeup_sysfs_remove(struct device *dev) {} static inline int pm_qos_sysfs_add(struct device *dev) { return 0; } static inline void pm_qos_sysfs_remove(struct device *dev) {} +static inline void dev_pm_arm_wake_irq(struct wake_irq *wirq) +{ +} + +static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq) +{ +} + #endif #ifdef CONFIG_PM_SLEEP diff --git a/kernel/drivers/base/power/qos.c b/kernel/drivers/base/power/qos.c index e56d538d0..7f3646e45 100644 --- a/kernel/drivers/base/power/qos.c +++ b/kernel/drivers/base/power/qos.c @@ -883,3 +883,40 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) mutex_unlock(&dev_pm_qos_mtx); return ret; } + +/** + * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace + * @dev: Device whose latency tolerance to expose + */ +int dev_pm_qos_expose_latency_tolerance(struct device *dev) +{ + int ret; + + if (!dev->power.set_latency_tolerance) + return -EINVAL; + + mutex_lock(&dev_pm_qos_sysfs_mtx); + ret = pm_qos_sysfs_add_latency_tolerance(dev); + mutex_unlock(&dev_pm_qos_sysfs_mtx); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance); + +/** + * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace + * @dev: Device whose latency tolerance to hide + */ +void dev_pm_qos_hide_latency_tolerance(struct device *dev) +{ + mutex_lock(&dev_pm_qos_sysfs_mtx); + pm_qos_sysfs_remove_latency_tolerance(dev); + mutex_unlock(&dev_pm_qos_sysfs_mtx); + + /* Remove the request from user space now */ + pm_runtime_get_sync(dev); + dev_pm_qos_update_user_latency_tolerance(dev, + PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT); + pm_runtime_put(dev); +} +EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance); diff --git a/kernel/drivers/base/power/runtime.c b/kernel/drivers/base/power/runtime.c index 5070c4fe8..e1a10a03d 100644 --- a/kernel/drivers/base/power/runtime.c +++ b/kernel/drivers/base/power/runtime.c @@ -10,6 +10,7 @@ #include <linux/sched.h> #include <linux/export.h> #include <linux/pm_runtime.h> +#include <linux/pm_wakeirq.h> #include <trace/events/rpm.h> #include "power.h" @@ -514,6 +515,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) callback = RPM_GET_CALLBACK(dev, runtime_suspend); + dev_pm_enable_wake_irq(dev); retval = rpm_callback(callback, dev); if (retval) goto fail; @@ -552,6 +554,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) return retval; fail: + dev_pm_disable_wake_irq(dev); __update_runtime_status(dev, RPM_ACTIVE); dev->power.deferred_resume = false; wake_up_all(&dev->power.wait_queue); @@ -734,13 +737,16 @@ static int rpm_resume(struct device *dev, int rpmflags) callback = RPM_GET_CALLBACK(dev, runtime_resume); + dev_pm_disable_wake_irq(dev); retval = rpm_callback(callback, dev); if (retval) { __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_cancel_pending(dev); + dev_pm_enable_wake_irq(dev); } else { no_callback: __update_runtime_status(dev, RPM_ACTIVE); + pm_runtime_mark_last_busy(dev); if (parent) atomic_inc(&parent->power.child_count); } diff --git a/kernel/drivers/base/power/sysfs.c b/kernel/drivers/base/power/sysfs.c index d2be3f9c2..a7b46798c 100644 --- a/kernel/drivers/base/power/sysfs.c +++ b/kernel/drivers/base/power/sysfs.c @@ -738,6 +738,17 @@ void pm_qos_sysfs_remove_flags(struct device *dev) sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); } +int pm_qos_sysfs_add_latency_tolerance(struct device *dev) +{ + return sysfs_merge_group(&dev->kobj, + &pm_qos_latency_tolerance_attr_group); +} + +void pm_qos_sysfs_remove_latency_tolerance(struct device *dev) +{ + sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); +} + void rpm_sysfs_remove(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); diff --git a/kernel/drivers/base/power/wakeirq.c b/kernel/drivers/base/power/wakeirq.c new file mode 100644 index 000000000..0d77cd6fd --- /dev/null +++ b/kernel/drivers/base/power/wakeirq.c @@ -0,0 +1,277 @@ +/* + * wakeirq.c - Device wakeirq helper functions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/slab.h> +#include <linux/pm_runtime.h> +#include <linux/pm_wakeirq.h> + +#include "power.h" + +/** + * dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ + * @dev: Device entry + * @irq: Device wake-up capable interrupt + * @wirq: Wake irq specific data + * + * Internal function to attach either a device IO interrupt or a + * dedicated wake-up interrupt as a wake IRQ. + */ +static int dev_pm_attach_wake_irq(struct device *dev, int irq, + struct wake_irq *wirq) +{ + unsigned long flags; + int err; + + if (!dev || !wirq) + return -EINVAL; + + spin_lock_irqsave(&dev->power.lock, flags); + if (dev_WARN_ONCE(dev, dev->power.wakeirq, + "wake irq already initialized\n")) { + spin_unlock_irqrestore(&dev->power.lock, flags); + return -EEXIST; + } + + err = device_wakeup_attach_irq(dev, wirq); + if (!err) + dev->power.wakeirq = wirq; + + spin_unlock_irqrestore(&dev->power.lock, flags); + return err; +} + +/** + * dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ + * @dev: Device entry + * @irq: Device IO interrupt + * + * Attach a device IO interrupt as a wake IRQ. The wake IRQ gets + * automatically configured for wake-up from suspend based + * on the device specific sysfs wakeup entry. Typically called + * during driver probe after calling device_init_wakeup(). + */ +int dev_pm_set_wake_irq(struct device *dev, int irq) +{ + struct wake_irq *wirq; + int err; + + if (irq < 0) + return -EINVAL; + + wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); + if (!wirq) + return -ENOMEM; + + wirq->dev = dev; + wirq->irq = irq; + + err = dev_pm_attach_wake_irq(dev, irq, wirq); + if (err) + kfree(wirq); + + return err; +} +EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq); + +/** + * dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ + * @dev: Device entry + * + * Detach a device wake IRQ and free resources. + * + * Note that it's OK for drivers to call this without calling + * dev_pm_set_wake_irq() as all the driver instances may not have + * a wake IRQ configured. This avoid adding wake IRQ specific + * checks into the drivers. + */ +void dev_pm_clear_wake_irq(struct device *dev) +{ + struct wake_irq *wirq = dev->power.wakeirq; + unsigned long flags; + + if (!wirq) + return; + + spin_lock_irqsave(&dev->power.lock, flags); + device_wakeup_detach_irq(dev); + dev->power.wakeirq = NULL; + spin_unlock_irqrestore(&dev->power.lock, flags); + + if (wirq->dedicated_irq) + free_irq(wirq->irq, wirq); + kfree(wirq); +} +EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq); + +/** + * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts + * @irq: Device specific dedicated wake-up interrupt + * @_wirq: Wake IRQ data + * + * Some devices have a separate wake-up interrupt in addition to the + * device IO interrupt. The wake-up interrupt signals that a device + * should be woken up from it's idle state. This handler uses device + * specific pm_runtime functions to wake the device, and then it's + * up to the device to do whatever it needs to. Note that as the + * device may need to restore context and start up regulators, we + * use a threaded IRQ. + * + * Also note that we are not resending the lost device interrupts. + * We assume that the wake-up interrupt just needs to wake-up the + * device, and then device's pm_runtime_resume() can deal with the + * situation. + */ +static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq) +{ + struct wake_irq *wirq = _wirq; + int res; + + /* We don't want RPM_ASYNC or RPM_NOWAIT here */ + res = pm_runtime_resume(wirq->dev); + if (res < 0) + dev_warn(wirq->dev, + "wake IRQ with no resume: %i\n", res); + + return IRQ_HANDLED; +} + +/** + * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt + * @dev: Device entry + * @irq: Device wake-up interrupt + * + * Unless your hardware has separate wake-up interrupts in addition + * to the device IO interrupts, you don't need this. + * + * Sets up a threaded interrupt handler for a device that has + * a dedicated wake-up interrupt in addition to the device IO + * interrupt. + * + * The interrupt starts disabled, and needs to be managed for + * the device by the bus code or the device driver using + * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq() + * functions. + */ +int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) +{ + struct wake_irq *wirq; + int err; + + if (irq < 0) + return -EINVAL; + + wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); + if (!wirq) + return -ENOMEM; + + wirq->dev = dev; + wirq->irq = irq; + wirq->dedicated_irq = true; + irq_set_status_flags(irq, IRQ_NOAUTOEN); + + /* + * Consumer device may need to power up and restore state + * so we use a threaded irq. + */ + err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq, + IRQF_ONESHOT, dev_name(dev), wirq); + if (err) + goto err_free; + + err = dev_pm_attach_wake_irq(dev, irq, wirq); + if (err) + goto err_free_irq; + + return err; + +err_free_irq: + free_irq(irq, wirq); +err_free: + kfree(wirq); + + return err; +} +EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq); + +/** + * dev_pm_enable_wake_irq - Enable device wake-up interrupt + * @dev: Device + * + * Called from the bus code or the device driver for + * runtime_suspend() to enable the wake-up interrupt while + * the device is running. + * + * Note that for runtime_suspend()) the wake-up interrupts + * should be unconditionally enabled unlike for suspend() + * that is conditional. + */ +void dev_pm_enable_wake_irq(struct device *dev) +{ + struct wake_irq *wirq = dev->power.wakeirq; + + if (wirq && wirq->dedicated_irq) + enable_irq(wirq->irq); +} +EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq); + +/** + * dev_pm_disable_wake_irq - Disable device wake-up interrupt + * @dev: Device + * + * Called from the bus code or the device driver for + * runtime_resume() to disable the wake-up interrupt while + * the device is running. + */ +void dev_pm_disable_wake_irq(struct device *dev) +{ + struct wake_irq *wirq = dev->power.wakeirq; + + if (wirq && wirq->dedicated_irq) + disable_irq_nosync(wirq->irq); +} +EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq); + +/** + * dev_pm_arm_wake_irq - Arm device wake-up + * @wirq: Device wake-up interrupt + * + * Sets up the wake-up event conditionally based on the + * device_may_wake(). + */ +void dev_pm_arm_wake_irq(struct wake_irq *wirq) +{ + if (!wirq) + return; + + if (device_may_wakeup(wirq->dev)) + enable_irq_wake(wirq->irq); +} + +/** + * dev_pm_disarm_wake_irq - Disarm device wake-up + * @wirq: Device wake-up interrupt + * + * Clears up the wake-up event conditionally based on the + * device_may_wake(). + */ +void dev_pm_disarm_wake_irq(struct wake_irq *wirq) +{ + if (!wirq) + return; + + if (device_may_wakeup(wirq->dev)) + disable_irq_wake(wirq->irq); +} diff --git a/kernel/drivers/base/power/wakeup.c b/kernel/drivers/base/power/wakeup.c index 77262009f..a1e0b9ab8 100644 --- a/kernel/drivers/base/power/wakeup.c +++ b/kernel/drivers/base/power/wakeup.c @@ -14,6 +14,7 @@ #include <linux/suspend.h> #include <linux/seq_file.h> #include <linux/debugfs.h> +#include <linux/pm_wakeirq.h> #include <trace/events/power.h> #include "power.h" @@ -24,6 +25,9 @@ */ bool events_check_enabled __read_mostly; +/* First wakeup IRQ seen by the kernel in the last cycle. */ +unsigned int pm_wakeup_irq __read_mostly; + /* If set and the system is suspending, terminate the suspend. */ static bool pm_abort_suspend __read_mostly; @@ -56,6 +60,11 @@ static LIST_HEAD(wakeup_sources); static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); +static struct wakeup_source deleted_ws = { + .name = "deleted", + .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock), +}; + /** * wakeup_source_prepare - Prepare a new wakeup source for initialization. * @ws: Wakeup source to prepare. @@ -85,7 +94,7 @@ struct wakeup_source *wakeup_source_create(const char *name) if (!ws) return NULL; - wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL); + wakeup_source_prepare(ws, name ? kstrdup_const(name, GFP_KERNEL) : NULL); return ws; } EXPORT_SYMBOL_GPL(wakeup_source_create); @@ -107,6 +116,34 @@ void wakeup_source_drop(struct wakeup_source *ws) } EXPORT_SYMBOL_GPL(wakeup_source_drop); +/* + * Record wakeup_source statistics being deleted into a dummy wakeup_source. + */ +static void wakeup_source_record(struct wakeup_source *ws) +{ + unsigned long flags; + + spin_lock_irqsave(&deleted_ws.lock, flags); + + if (ws->event_count) { + deleted_ws.total_time = + ktime_add(deleted_ws.total_time, ws->total_time); + deleted_ws.prevent_sleep_time = + ktime_add(deleted_ws.prevent_sleep_time, + ws->prevent_sleep_time); + deleted_ws.max_time = + ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ? + deleted_ws.max_time : ws->max_time; + deleted_ws.event_count += ws->event_count; + deleted_ws.active_count += ws->active_count; + deleted_ws.relax_count += ws->relax_count; + deleted_ws.expire_count += ws->expire_count; + deleted_ws.wakeup_count += ws->wakeup_count; + } + + spin_unlock_irqrestore(&deleted_ws.lock, flags); +} + /** * wakeup_source_destroy - Destroy a struct wakeup_source object. * @ws: Wakeup source to destroy. @@ -119,7 +156,8 @@ void wakeup_source_destroy(struct wakeup_source *ws) return; wakeup_source_drop(ws); - kfree(ws->name); + wakeup_source_record(ws); + kfree_const(ws->name); kfree(ws); } EXPORT_SYMBOL_GPL(wakeup_source_destroy); @@ -239,6 +277,86 @@ int device_wakeup_enable(struct device *dev) EXPORT_SYMBOL_GPL(device_wakeup_enable); /** + * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source + * @dev: Device to handle + * @wakeirq: Device specific wakeirq entry + * + * Attach a device wakeirq to the wakeup source so the device + * wake IRQ can be configured automatically for suspend and + * resume. + * + * Call under the device's power.lock lock. + */ +int device_wakeup_attach_irq(struct device *dev, + struct wake_irq *wakeirq) +{ + struct wakeup_source *ws; + + ws = dev->power.wakeup; + if (!ws) { + dev_err(dev, "forgot to call call device_init_wakeup?\n"); + return -EINVAL; + } + + if (ws->wakeirq) + return -EEXIST; + + ws->wakeirq = wakeirq; + return 0; +} + +/** + * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source + * @dev: Device to handle + * + * Removes a device wakeirq from the wakeup source. + * + * Call under the device's power.lock lock. + */ +void device_wakeup_detach_irq(struct device *dev) +{ + struct wakeup_source *ws; + + ws = dev->power.wakeup; + if (ws) + ws->wakeirq = NULL; +} + +/** + * device_wakeup_arm_wake_irqs(void) + * + * Itereates over the list of device wakeirqs to arm them. + */ +void device_wakeup_arm_wake_irqs(void) +{ + struct wakeup_source *ws; + + rcu_read_lock(); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + if (ws->wakeirq) + dev_pm_arm_wake_irq(ws->wakeirq); + } + rcu_read_unlock(); +} + +/** + * device_wakeup_disarm_wake_irqs(void) + * + * Itereates over the list of device wakeirqs to disarm them. + */ +void device_wakeup_disarm_wake_irqs(void) +{ + struct wakeup_source *ws; + + rcu_read_lock(); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + if (ws->wakeirq) + dev_pm_disarm_wake_irq(ws->wakeirq); + } + rcu_read_unlock(); +} + +/** * device_wakeup_detach - Detach a device's wakeup source object from it. * @dev: Device to detach the wakeup source object from. * @@ -351,6 +469,20 @@ int device_set_wakeup_enable(struct device *dev, bool enable) } EXPORT_SYMBOL_GPL(device_set_wakeup_enable); +/** + * wakeup_source_not_registered - validate the given wakeup source. + * @ws: Wakeup source to be validated. + */ +static bool wakeup_source_not_registered(struct wakeup_source *ws) +{ + /* + * Use timer struct to check if the given source is initialized + * by wakeup_source_add. + */ + return ws->timer.function != pm_wakeup_timer_fn || + ws->timer.data != (unsigned long)ws; +} + /* * The functions below use the observation that each wakeup event starts a * period in which the system should not be suspended. The moment this period @@ -391,6 +523,10 @@ static void wakeup_source_activate(struct wakeup_source *ws) { unsigned int cec; + if (WARN_ONCE(wakeup_source_not_registered(ws), + "unregistered wakeup source\n")) + return; + /* * active wakeup source should bring the system * out of PM_SUSPEND_FREEZE state @@ -735,6 +871,15 @@ EXPORT_SYMBOL_GPL(pm_system_wakeup); void pm_wakeup_clear(void) { pm_abort_suspend = false; + pm_wakeup_irq = 0; +} + +void pm_system_irq_wakeup(unsigned int irq_number) +{ + if (pm_wakeup_irq == 0) { + pm_wakeup_irq = irq_number; + pm_system_wakeup(); + } } /** @@ -894,6 +1039,8 @@ static int wakeup_sources_stats_show(struct seq_file *m, void *unused) print_wakeup_source_stats(m, ws); rcu_read_unlock(); + print_wakeup_source_stats(m, &deleted_ws); + return 0; } |