summaryrefslogtreecommitdiffstats
path: root/kernel/kernel/sched/work-simple.c
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@linux.intel.com>2017-03-08 23:13:28 -0800
committerYunhong Jiang <yunhong.jiang@linux.intel.com>2017-03-08 23:36:15 -0800
commit52f993b8e89487ec9ee15a7fb4979e0f09a45b27 (patch)
treed65304486afe0bea4a311c783c0d72791c8c0aa2 /kernel/kernel/sched/work-simple.c
parentc189ccac5702322ed843fe17057035b7222a59b6 (diff)
Upgrade to 4.4.50-rt62
The current kernel is based on rt kernel v4.4.6-rt14. We will upgrade it to 4.4.50-rt62. The command to achieve it is: a) Clone a git repo from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git b) Get the diff between this two changesets: git diff 640eca2901f3435e616157b11379d3223a44b391 705619beeea1b0b48219a683fd1a901a86fdaf5e where the two commits are: [yjiang5@jnakajim-build linux-stable-rt]$ git show --oneline --name-only 640eca2901f3435e616157b11379d3223a44b391 640eca2901f3 v4.4.6-rt14 localversion-rt [yjiang5@jnakajim-build linux-stable-rt]$ git show --oneline --name-only 705619beeea1b0b48219a683fd1a901a86fdaf5e 705619beeea1 Linux 4.4.50-rt62 localversion-rt c) One patch has been backported thus revert the patch before applying. filterdiff -p1 -x scripts/package/Makefile ~/tmp/v4.4.6-rt14-4.4.50-rt62.diff |patch -p1 --dry-run Upstream status: backport Change-Id: I244d57a32f6066e5a5b9915f9fbf99e7bbca6e01 Signed-off-by: Yunhong Jiang <yunhong.jiang@linux.intel.com>
Diffstat (limited to 'kernel/kernel/sched/work-simple.c')
-rw-r--r--kernel/kernel/sched/work-simple.c173
1 files changed, 0 insertions, 173 deletions
diff --git a/kernel/kernel/sched/work-simple.c b/kernel/kernel/sched/work-simple.c
deleted file mode 100644
index 9ffe40543..000000000
--- a/kernel/kernel/sched/work-simple.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
- *
- * Provides a framework for enqueuing callbacks from irq context
- * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
- */
-
-#include <linux/swait.h>
-#include <linux/work-simple.h>
-#include <linux/kthread.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-
-#define SWORK_EVENT_PENDING (1 << 0)
-
-static DEFINE_MUTEX(worker_mutex);
-static struct sworker *glob_worker;
-
-struct sworker {
- struct list_head events;
- struct swait_queue_head wq;
-
- raw_spinlock_t lock;
-
- struct task_struct *task;
- int refs;
-};
-
-static bool swork_readable(struct sworker *worker)
-{
- bool r;
-
- if (kthread_should_stop())
- return true;
-
- raw_spin_lock_irq(&worker->lock);
- r = !list_empty(&worker->events);
- raw_spin_unlock_irq(&worker->lock);
-
- return r;
-}
-
-static int swork_kthread(void *arg)
-{
- struct sworker *worker = arg;
-
- for (;;) {
- swait_event_interruptible(worker->wq,
- swork_readable(worker));
- if (kthread_should_stop())
- break;
-
- raw_spin_lock_irq(&worker->lock);
- while (!list_empty(&worker->events)) {
- struct swork_event *sev;
-
- sev = list_first_entry(&worker->events,
- struct swork_event, item);
- list_del(&sev->item);
- raw_spin_unlock_irq(&worker->lock);
-
- WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
- &sev->flags));
- sev->func(sev);
- raw_spin_lock_irq(&worker->lock);
- }
- raw_spin_unlock_irq(&worker->lock);
- }
- return 0;
-}
-
-static struct sworker *swork_create(void)
-{
- struct sworker *worker;
-
- worker = kzalloc(sizeof(*worker), GFP_KERNEL);
- if (!worker)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&worker->events);
- raw_spin_lock_init(&worker->lock);
- init_swait_queue_head(&worker->wq);
-
- worker->task = kthread_run(swork_kthread, worker, "kswork");
- if (IS_ERR(worker->task)) {
- kfree(worker);
- return ERR_PTR(-ENOMEM);
- }
-
- return worker;
-}
-
-static void swork_destroy(struct sworker *worker)
-{
- kthread_stop(worker->task);
-
- WARN_ON(!list_empty(&worker->events));
- kfree(worker);
-}
-
-/**
- * swork_queue - queue swork
- *
- * Returns %false if @work was already on a queue, %true otherwise.
- *
- * The work is queued and processed on a random CPU
- */
-bool swork_queue(struct swork_event *sev)
-{
- unsigned long flags;
-
- if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
- return false;
-
- raw_spin_lock_irqsave(&glob_worker->lock, flags);
- list_add_tail(&sev->item, &glob_worker->events);
- raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
-
- swake_up(&glob_worker->wq);
- return true;
-}
-EXPORT_SYMBOL_GPL(swork_queue);
-
-/**
- * swork_get - get an instance of the sworker
- *
- * Returns an negative error code if the initialization if the worker did not
- * work, %0 otherwise.
- *
- */
-int swork_get(void)
-{
- struct sworker *worker;
-
- mutex_lock(&worker_mutex);
- if (!glob_worker) {
- worker = swork_create();
- if (IS_ERR(worker)) {
- mutex_unlock(&worker_mutex);
- return -ENOMEM;
- }
-
- glob_worker = worker;
- }
-
- glob_worker->refs++;
- mutex_unlock(&worker_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(swork_get);
-
-/**
- * swork_put - puts an instance of the sworker
- *
- * Will destroy the sworker thread. This function must not be called until all
- * queued events have been completed.
- */
-void swork_put(void)
-{
- mutex_lock(&worker_mutex);
-
- glob_worker->refs--;
- if (glob_worker->refs > 0)
- goto out;
-
- swork_destroy(glob_worker);
- glob_worker = NULL;
-out:
- mutex_unlock(&worker_mutex);
-}
-EXPORT_SYMBOL_GPL(swork_put);