summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/mailbox
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 12:17:53 -0700
committerYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 15:44:42 -0700
commit9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch)
tree1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/drivers/mailbox
parent98260f3884f4a202f9ca5eabed40b1354c489b29 (diff)
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/drivers/mailbox')
-rw-r--r--kernel/drivers/mailbox/Kconfig63
-rw-r--r--kernel/drivers/mailbox/Makefile13
-rw-r--r--kernel/drivers/mailbox/arm_mhu.c195
-rw-r--r--kernel/drivers/mailbox/mailbox-altera.c388
-rw-r--r--kernel/drivers/mailbox/mailbox.c468
-rw-r--r--kernel/drivers/mailbox/mailbox.h14
-rw-r--r--kernel/drivers/mailbox/omap-mailbox.c898
-rw-r--r--kernel/drivers/mailbox/pcc.c355
-rw-r--r--kernel/drivers/mailbox/pl320-ipc.c198
9 files changed, 2592 insertions, 0 deletions
diff --git a/kernel/drivers/mailbox/Kconfig b/kernel/drivers/mailbox/Kconfig
new file mode 100644
index 000000000..84b0a2d74
--- /dev/null
+++ b/kernel/drivers/mailbox/Kconfig
@@ -0,0 +1,63 @@
+menuconfig MAILBOX
+ bool "Mailbox Hardware Support"
+ help
+ Mailbox is a framework to control hardware communication between
+ on-chip processors through queued messages and interrupt driven
+ signals. Say Y if your platform supports hardware mailboxes.
+
+if MAILBOX
+
+config ARM_MHU
+ tristate "ARM MHU Mailbox"
+ depends on ARM_AMBA
+ help
+ Say Y here if you want to build the ARM MHU controller driver.
+ The controller has 3 mailbox channels, the last of which can be
+ used in Secure mode only.
+
+config PL320_MBOX
+ bool "ARM PL320 Mailbox"
+ depends on ARM_AMBA
+ help
+ An implementation of the ARM PL320 Interprocessor Communication
+ Mailbox (IPCM), tailored for the Calxeda Highbank. It is used to
+ send short messages between Highbank's A9 cores and the EnergyCore
+ Management Engine, primarily for cpufreq. Say Y here if you want
+ to use the PL320 IPCM support.
+
+config OMAP2PLUS_MBOX
+ tristate "OMAP2+ Mailbox framework support"
+ depends on ARCH_OMAP2PLUS
+ help
+ Mailbox implementation for OMAP family chips with hardware for
+ interprocessor communication involving DSP, IVA1.0 and IVA2 in
+ OMAP2/3; or IPU, IVA HD and DSP in OMAP4/5. Say Y here if you
+ want to use OMAP2+ Mailbox framework support.
+
+config OMAP_MBOX_KFIFO_SIZE
+ int "Mailbox kfifo default buffer size (bytes)"
+ depends on OMAP2PLUS_MBOX
+ default 256
+ help
+ Specify the default size of mailbox's kfifo buffers (bytes).
+ This can also be changed at runtime (via the mbox_kfifo_size
+ module parameter).
+
+config PCC
+ bool "Platform Communication Channel Driver"
+ depends on ACPI
+ help
+ ACPI 5.0+ spec defines a generic mode of communication
+ between the OS and a platform such as the BMC. This medium
+ (PCC) is typically used by CPPC (ACPI CPU Performance management),
+ RAS (ACPI reliability protocol) and MPST (ACPI Memory power
+ states). Select this driver if your platform implements the
+ PCC clients mentioned above.
+
+config ALTERA_MBOX
+ tristate "Altera Mailbox"
+ help
+ An implementation of the Altera Mailbox soft core. It is used
+ to send message between processors. Say Y here if you want to use the
+ Altera mailbox support.
+endif
diff --git a/kernel/drivers/mailbox/Makefile b/kernel/drivers/mailbox/Makefile
new file mode 100644
index 000000000..b18201e97
--- /dev/null
+++ b/kernel/drivers/mailbox/Makefile
@@ -0,0 +1,13 @@
+# Generic MAILBOX API
+
+obj-$(CONFIG_MAILBOX) += mailbox.o
+
+obj-$(CONFIG_ARM_MHU) += arm_mhu.o
+
+obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
+
+obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
+
+obj-$(CONFIG_PCC) += pcc.o
+
+obj-$(CONFIG_ALTERA_MBOX) += mailbox-altera.o
diff --git a/kernel/drivers/mailbox/arm_mhu.c b/kernel/drivers/mailbox/arm_mhu.c
new file mode 100644
index 000000000..ac693c635
--- /dev/null
+++ b/kernel/drivers/mailbox/arm_mhu.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd.
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Jassi Brar <jaswinder.singh@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/amba/bus.h>
+#include <linux/mailbox_controller.h>
+
+#define INTR_STAT_OFS 0x0
+#define INTR_SET_OFS 0x8
+#define INTR_CLR_OFS 0x10
+
+#define MHU_LP_OFFSET 0x0
+#define MHU_HP_OFFSET 0x20
+#define MHU_SEC_OFFSET 0x200
+#define TX_REG_OFFSET 0x100
+
+#define MHU_CHANS 3
+
+struct mhu_link {
+ unsigned irq;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+};
+
+struct arm_mhu {
+ void __iomem *base;
+ struct mhu_link mlink[MHU_CHANS];
+ struct mbox_chan chan[MHU_CHANS];
+ struct mbox_controller mbox;
+};
+
+static irqreturn_t mhu_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct mhu_link *mlink = chan->con_priv;
+ u32 val;
+
+ val = readl_relaxed(mlink->rx_reg + INTR_STAT_OFS);
+ if (!val)
+ return IRQ_NONE;
+
+ mbox_chan_received_data(chan, (void *)&val);
+
+ writel_relaxed(val, mlink->rx_reg + INTR_CLR_OFS);
+
+ return IRQ_HANDLED;
+}
+
+static bool mhu_last_tx_done(struct mbox_chan *chan)
+{
+ struct mhu_link *mlink = chan->con_priv;
+ u32 val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+
+ return (val == 0);
+}
+
+static int mhu_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mhu_link *mlink = chan->con_priv;
+ u32 *arg = data;
+
+ writel_relaxed(*arg, mlink->tx_reg + INTR_SET_OFS);
+
+ return 0;
+}
+
+static int mhu_startup(struct mbox_chan *chan)
+{
+ struct mhu_link *mlink = chan->con_priv;
+ u32 val;
+ int ret;
+
+ val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+ writel_relaxed(val, mlink->tx_reg + INTR_CLR_OFS);
+
+ ret = request_irq(mlink->irq, mhu_rx_interrupt,
+ IRQF_SHARED, "mhu_link", chan);
+ if (ret) {
+ dev_err(chan->mbox->dev,
+ "Unable to aquire IRQ %d\n", mlink->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mhu_shutdown(struct mbox_chan *chan)
+{
+ struct mhu_link *mlink = chan->con_priv;
+
+ free_irq(mlink->irq, chan);
+}
+
+static struct mbox_chan_ops mhu_ops = {
+ .send_data = mhu_send_data,
+ .startup = mhu_startup,
+ .shutdown = mhu_shutdown,
+ .last_tx_done = mhu_last_tx_done,
+};
+
+static int mhu_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int i, err;
+ struct arm_mhu *mhu;
+ struct device *dev = &adev->dev;
+ int mhu_reg[MHU_CHANS] = {MHU_LP_OFFSET, MHU_HP_OFFSET, MHU_SEC_OFFSET};
+
+ /* Allocate memory for device */
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ mhu->base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(mhu->base)) {
+ dev_err(dev, "ioremap failed\n");
+ return PTR_ERR(mhu->base);
+ }
+
+ for (i = 0; i < MHU_CHANS; i++) {
+ mhu->chan[i].con_priv = &mhu->mlink[i];
+ mhu->mlink[i].irq = adev->irq[i];
+ mhu->mlink[i].rx_reg = mhu->base + mhu_reg[i];
+ mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
+ }
+
+ mhu->mbox.dev = dev;
+ mhu->mbox.chans = &mhu->chan[0];
+ mhu->mbox.num_chans = MHU_CHANS;
+ mhu->mbox.ops = &mhu_ops;
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 10;
+
+ amba_set_drvdata(adev, mhu);
+
+ err = mbox_controller_register(&mhu->mbox);
+ if (err) {
+ dev_err(dev, "Failed to register mailboxes %d\n", err);
+ return err;
+ }
+
+ dev_info(dev, "ARM MHU Mailbox registered\n");
+ return 0;
+}
+
+static int mhu_remove(struct amba_device *adev)
+{
+ struct arm_mhu *mhu = amba_get_drvdata(adev);
+
+ mbox_controller_unregister(&mhu->mbox);
+
+ return 0;
+}
+
+static struct amba_id mhu_ids[] = {
+ {
+ .id = 0x1bb098,
+ .mask = 0xffffff,
+ },
+ { 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, mhu_ids);
+
+static struct amba_driver arm_mhu_driver = {
+ .drv = {
+ .name = "mhu",
+ },
+ .id_table = mhu_ids,
+ .probe = mhu_probe,
+ .remove = mhu_remove,
+};
+module_amba_driver(arm_mhu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ARM MHU Driver");
+MODULE_AUTHOR("Jassi Brar <jassisinghbrar@gmail.com>");
diff --git a/kernel/drivers/mailbox/mailbox-altera.c b/kernel/drivers/mailbox/mailbox-altera.c
new file mode 100644
index 000000000..a26626567
--- /dev/null
+++ b/kernel/drivers/mailbox/mailbox-altera.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright Altera Corporation (C) 2013-2014. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define DRIVER_NAME "altera-mailbox"
+
+#define MAILBOX_CMD_REG 0x00
+#define MAILBOX_PTR_REG 0x04
+#define MAILBOX_STS_REG 0x08
+#define MAILBOX_INTMASK_REG 0x0C
+
+#define INT_PENDING_MSK 0x1
+#define INT_SPACE_MSK 0x2
+
+#define STS_PENDING_MSK 0x1
+#define STS_FULL_MSK 0x2
+#define STS_FULL_OFT 0x1
+
+#define MBOX_PENDING(status) (((status) & STS_PENDING_MSK))
+#define MBOX_FULL(status) (((status) & STS_FULL_MSK) >> STS_FULL_OFT)
+
+enum altera_mbox_msg {
+ MBOX_CMD = 0,
+ MBOX_PTR,
+};
+
+#define MBOX_POLLING_MS 5 /* polling interval 5ms */
+
+struct altera_mbox {
+ bool is_sender; /* 1-sender, 0-receiver */
+ bool intr_mode;
+ int irq;
+ void __iomem *mbox_base;
+ struct device *dev;
+ struct mbox_controller controller;
+
+ /* If the controller supports only RX polling mode */
+ struct timer_list rxpoll_timer;
+};
+
+static struct altera_mbox *mbox_chan_to_altera_mbox(struct mbox_chan *chan)
+{
+ if (!chan || !chan->con_priv)
+ return NULL;
+
+ return (struct altera_mbox *)chan->con_priv;
+}
+
+static inline int altera_mbox_full(struct altera_mbox *mbox)
+{
+ u32 status;
+
+ status = readl_relaxed(mbox->mbox_base + MAILBOX_STS_REG);
+ return MBOX_FULL(status);
+}
+
+static inline int altera_mbox_pending(struct altera_mbox *mbox)
+{
+ u32 status;
+
+ status = readl_relaxed(mbox->mbox_base + MAILBOX_STS_REG);
+ return MBOX_PENDING(status);
+}
+
+static void altera_mbox_rx_intmask(struct altera_mbox *mbox, bool enable)
+{
+ u32 mask;
+
+ mask = readl_relaxed(mbox->mbox_base + MAILBOX_INTMASK_REG);
+ if (enable)
+ mask |= INT_PENDING_MSK;
+ else
+ mask &= ~INT_PENDING_MSK;
+ writel_relaxed(mask, mbox->mbox_base + MAILBOX_INTMASK_REG);
+}
+
+static void altera_mbox_tx_intmask(struct altera_mbox *mbox, bool enable)
+{
+ u32 mask;
+
+ mask = readl_relaxed(mbox->mbox_base + MAILBOX_INTMASK_REG);
+ if (enable)
+ mask |= INT_SPACE_MSK;
+ else
+ mask &= ~INT_SPACE_MSK;
+ writel_relaxed(mask, mbox->mbox_base + MAILBOX_INTMASK_REG);
+}
+
+static bool altera_mbox_is_sender(struct altera_mbox *mbox)
+{
+ u32 reg;
+ /* Write a magic number to PTR register and read back this register.
+ * This register is read-write if it is a sender.
+ */
+ #define MBOX_MAGIC 0xA5A5AA55
+ writel_relaxed(MBOX_MAGIC, mbox->mbox_base + MAILBOX_PTR_REG);
+ reg = readl_relaxed(mbox->mbox_base + MAILBOX_PTR_REG);
+ if (reg == MBOX_MAGIC) {
+ /* Clear to 0 */
+ writel_relaxed(0, mbox->mbox_base + MAILBOX_PTR_REG);
+ return true;
+ }
+ return false;
+}
+
+static void altera_mbox_rx_data(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+ u32 data[2];
+
+ if (altera_mbox_pending(mbox)) {
+ data[MBOX_PTR] =
+ readl_relaxed(mbox->mbox_base + MAILBOX_PTR_REG);
+ data[MBOX_CMD] =
+ readl_relaxed(mbox->mbox_base + MAILBOX_CMD_REG);
+ mbox_chan_received_data(chan, (void *)data);
+ }
+}
+
+static void altera_mbox_poll_rx(unsigned long data)
+{
+ struct mbox_chan *chan = (struct mbox_chan *)data;
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ altera_mbox_rx_data(chan);
+
+ mod_timer(&mbox->rxpoll_timer,
+ jiffies + msecs_to_jiffies(MBOX_POLLING_MS));
+}
+
+static irqreturn_t altera_mbox_tx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = (struct mbox_chan *)p;
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ altera_mbox_tx_intmask(mbox, false);
+ mbox_chan_txdone(chan, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t altera_mbox_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = (struct mbox_chan *)p;
+
+ altera_mbox_rx_data(chan);
+ return IRQ_HANDLED;
+}
+
+static int altera_mbox_startup_sender(struct mbox_chan *chan)
+{
+ int ret;
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ if (mbox->intr_mode) {
+ ret = request_irq(mbox->irq, altera_mbox_tx_interrupt, 0,
+ DRIVER_NAME, chan);
+ if (unlikely(ret)) {
+ dev_err(mbox->dev,
+ "failed to register mailbox interrupt:%d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int altera_mbox_startup_receiver(struct mbox_chan *chan)
+{
+ int ret;
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ if (mbox->intr_mode) {
+ ret = request_irq(mbox->irq, altera_mbox_rx_interrupt, 0,
+ DRIVER_NAME, chan);
+ if (unlikely(ret)) {
+ mbox->intr_mode = false;
+ goto polling; /* use polling if failed */
+ }
+
+ altera_mbox_rx_intmask(mbox, true);
+ return 0;
+ }
+
+polling:
+ /* Setup polling timer */
+ setup_timer(&mbox->rxpoll_timer, altera_mbox_poll_rx,
+ (unsigned long)chan);
+ mod_timer(&mbox->rxpoll_timer,
+ jiffies + msecs_to_jiffies(MBOX_POLLING_MS));
+
+ return 0;
+}
+
+static int altera_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+ u32 *udata = (u32 *)data;
+
+ if (!mbox || !data)
+ return -EINVAL;
+ if (!mbox->is_sender) {
+ dev_warn(mbox->dev,
+ "failed to send. This is receiver mailbox.\n");
+ return -EINVAL;
+ }
+
+ if (altera_mbox_full(mbox))
+ return -EBUSY;
+
+ /* Enable interrupt before send */
+ if (mbox->intr_mode)
+ altera_mbox_tx_intmask(mbox, true);
+
+ /* Pointer register must write before command register */
+ writel_relaxed(udata[MBOX_PTR], mbox->mbox_base + MAILBOX_PTR_REG);
+ writel_relaxed(udata[MBOX_CMD], mbox->mbox_base + MAILBOX_CMD_REG);
+
+ return 0;
+}
+
+static bool altera_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ /* Return false if mailbox is full */
+ return altera_mbox_full(mbox) ? false : true;
+}
+
+static bool altera_mbox_peek_data(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ return altera_mbox_pending(mbox) ? true : false;
+}
+
+static int altera_mbox_startup(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+ int ret = 0;
+
+ if (!mbox)
+ return -EINVAL;
+
+ if (mbox->is_sender)
+ ret = altera_mbox_startup_sender(chan);
+ else
+ ret = altera_mbox_startup_receiver(chan);
+
+ return ret;
+}
+
+static void altera_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ if (mbox->intr_mode) {
+ /* Unmask all interrupt masks */
+ writel_relaxed(~0, mbox->mbox_base + MAILBOX_INTMASK_REG);
+ free_irq(mbox->irq, chan);
+ } else if (!mbox->is_sender) {
+ del_timer_sync(&mbox->rxpoll_timer);
+ }
+}
+
+static struct mbox_chan_ops altera_mbox_ops = {
+ .send_data = altera_mbox_send_data,
+ .startup = altera_mbox_startup,
+ .shutdown = altera_mbox_shutdown,
+ .last_tx_done = altera_mbox_last_tx_done,
+ .peek_data = altera_mbox_peek_data,
+};
+
+static int altera_mbox_probe(struct platform_device *pdev)
+{
+ struct altera_mbox *mbox;
+ struct resource *regs;
+ struct mbox_chan *chans;
+ int ret;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox),
+ GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /* Allocated one channel */
+ chans = devm_kzalloc(&pdev->dev, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ mbox->mbox_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(mbox->mbox_base))
+ return PTR_ERR(mbox->mbox_base);
+
+ /* Check is it a sender or receiver? */
+ mbox->is_sender = altera_mbox_is_sender(mbox);
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq >= 0)
+ mbox->intr_mode = true;
+
+ mbox->dev = &pdev->dev;
+
+ /* Hardware supports only one channel. */
+ chans[0].con_priv = mbox;
+ mbox->controller.dev = mbox->dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = chans;
+ mbox->controller.ops = &altera_mbox_ops;
+
+ if (mbox->is_sender) {
+ if (mbox->intr_mode) {
+ mbox->controller.txdone_irq = true;
+ } else {
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = MBOX_POLLING_MS;
+ }
+ }
+
+ ret = mbox_controller_register(&mbox->controller);
+ if (ret) {
+ dev_err(&pdev->dev, "Register mailbox failed\n");
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+err:
+ return ret;
+}
+
+static int altera_mbox_remove(struct platform_device *pdev)
+{
+ struct altera_mbox *mbox = platform_get_drvdata(pdev);
+
+ if (!mbox)
+ return -EINVAL;
+
+ mbox_controller_unregister(&mbox->controller);
+
+ return 0;
+}
+
+static const struct of_device_id altera_mbox_match[] = {
+ { .compatible = "altr,mailbox-1.0" },
+ { /* Sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, altera_mbox_match);
+
+static struct platform_driver altera_mbox_driver = {
+ .probe = altera_mbox_probe,
+ .remove = altera_mbox_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = altera_mbox_match,
+ },
+};
+
+module_platform_driver(altera_mbox_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Altera mailbox specific functions");
+MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
+MODULE_ALIAS("platform:altera-mailbox");
diff --git a/kernel/drivers/mailbox/mailbox.c b/kernel/drivers/mailbox/mailbox.c
new file mode 100644
index 000000000..19b491d29
--- /dev/null
+++ b/kernel/drivers/mailbox/mailbox.c
@@ -0,0 +1,468 @@
+/*
+ * Mailbox: Common code for Mailbox controllers and users
+ *
+ * Copyright (C) 2013-2014 Linaro Ltd.
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/bitops.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+
+#include "mailbox.h"
+
+static LIST_HEAD(mbox_cons);
+static DEFINE_MUTEX(con_mutex);
+
+static void poll_txdone(unsigned long data);
+
+static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
+{
+ int idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /* See if there is any space left */
+ if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -ENOBUFS;
+ }
+
+ idx = chan->msg_free;
+ chan->msg_data[idx] = mssg;
+ chan->msg_count++;
+
+ if (idx == MBOX_TX_QUEUE_LEN - 1)
+ chan->msg_free = 0;
+ else
+ chan->msg_free++;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return idx;
+}
+
+static void msg_submit(struct mbox_chan *chan)
+{
+ unsigned count, idx;
+ unsigned long flags;
+ void *data;
+ int err = -EBUSY;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (!chan->msg_count || chan->active_req)
+ goto exit;
+
+ count = chan->msg_count;
+ idx = chan->msg_free;
+ if (idx >= count)
+ idx -= count;
+ else
+ idx += MBOX_TX_QUEUE_LEN - count;
+
+ data = chan->msg_data[idx];
+
+ if (chan->cl->tx_prepare)
+ chan->cl->tx_prepare(chan->cl, data);
+ /* Try to submit a message to the MBOX controller */
+ err = chan->mbox->ops->send_data(chan, data);
+ if (!err) {
+ chan->active_req = data;
+ chan->msg_count--;
+ }
+exit:
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ if (!err && (chan->txdone_method & TXDONE_BY_POLL))
+ poll_txdone((unsigned long)chan->mbox);
+}
+
+static void tx_tick(struct mbox_chan *chan, int r)
+{
+ unsigned long flags;
+ void *mssg;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ mssg = chan->active_req;
+ chan->active_req = NULL;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Submit next message */
+ msg_submit(chan);
+
+ /* Notify the client */
+ if (mssg && chan->cl->tx_done)
+ chan->cl->tx_done(chan->cl, mssg, r);
+
+ if (chan->cl->tx_block)
+ complete(&chan->tx_complete);
+}
+
+static void poll_txdone(unsigned long data)
+{
+ struct mbox_controller *mbox = (struct mbox_controller *)data;
+ bool txdone, resched = false;
+ int i;
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ struct mbox_chan *chan = &mbox->chans[i];
+
+ if (chan->active_req && chan->cl) {
+ txdone = chan->mbox->ops->last_tx_done(chan);
+ if (txdone)
+ tx_tick(chan, 0);
+ else
+ resched = true;
+ }
+ }
+
+ if (resched)
+ mod_timer(&mbox->poll, jiffies +
+ msecs_to_jiffies(mbox->txpoll_period));
+}
+
+/**
+ * mbox_chan_received_data - A way for controller driver to push data
+ * received from remote to the upper layer.
+ * @chan: Pointer to the mailbox channel on which RX happened.
+ * @mssg: Client specific message typecasted as void *
+ *
+ * After startup and before shutdown any data received on the chan
+ * is passed on to the API via atomic mbox_chan_received_data().
+ * The controller should ACK the RX only after this call returns.
+ */
+void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
+{
+ /* No buffering the received data */
+ if (chan->cl->rx_callback)
+ chan->cl->rx_callback(chan->cl, mssg);
+}
+EXPORT_SYMBOL_GPL(mbox_chan_received_data);
+
+/**
+ * mbox_chan_txdone - A way for controller driver to notify the
+ * framework that the last TX has completed.
+ * @chan: Pointer to the mailbox chan on which TX happened.
+ * @r: Status of last TX - OK or ERROR
+ *
+ * The controller that has IRQ for TX ACK calls this atomic API
+ * to tick the TX state machine. It works only if txdone_irq
+ * is set by the controller.
+ */
+void mbox_chan_txdone(struct mbox_chan *chan, int r)
+{
+ if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
+ dev_err(chan->mbox->dev,
+ "Controller can't run the TX ticker\n");
+ return;
+ }
+
+ tx_tick(chan, r);
+}
+EXPORT_SYMBOL_GPL(mbox_chan_txdone);
+
+/**
+ * mbox_client_txdone - The way for a client to run the TX state machine.
+ * @chan: Mailbox channel assigned to this client.
+ * @r: Success status of last transmission.
+ *
+ * The client/protocol had received some 'ACK' packet and it notifies
+ * the API that the last packet was sent successfully. This only works
+ * if the controller can't sense TX-Done.
+ */
+void mbox_client_txdone(struct mbox_chan *chan, int r)
+{
+ if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
+ dev_err(chan->mbox->dev, "Client can't run the TX ticker\n");
+ return;
+ }
+
+ tx_tick(chan, r);
+}
+EXPORT_SYMBOL_GPL(mbox_client_txdone);
+
+/**
+ * mbox_client_peek_data - A way for client driver to pull data
+ * received from remote by the controller.
+ * @chan: Mailbox channel assigned to this client.
+ *
+ * A poke to controller driver for any received data.
+ * The data is actually passed onto client via the
+ * mbox_chan_received_data()
+ * The call can be made from atomic context, so the controller's
+ * implementation of peek_data() must not sleep.
+ *
+ * Return: True, if controller has, and is going to push after this,
+ * some data.
+ * False, if controller doesn't have any data to be read.
+ */
+bool mbox_client_peek_data(struct mbox_chan *chan)
+{
+ if (chan->mbox->ops->peek_data)
+ return chan->mbox->ops->peek_data(chan);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(mbox_client_peek_data);
+
+/**
+ * mbox_send_message - For client to submit a message to be
+ * sent to the remote.
+ * @chan: Mailbox channel assigned to this client.
+ * @mssg: Client specific message typecasted.
+ *
+ * For client to submit data to the controller destined for a remote
+ * processor. If the client had set 'tx_block', the call will return
+ * either when the remote receives the data or when 'tx_tout' millisecs
+ * run out.
+ * In non-blocking mode, the requests are buffered by the API and a
+ * non-negative token is returned for each queued request. If the request
+ * is not queued, a negative token is returned. Upon failure or successful
+ * TX, the API calls 'tx_done' from atomic context, from which the client
+ * could submit yet another request.
+ * The pointer to message should be preserved until it is sent
+ * over the chan, i.e, tx_done() is made.
+ * This function could be called from atomic context as it simply
+ * queues the data and returns a token against the request.
+ *
+ * Return: Non-negative integer for successful submission (non-blocking mode)
+ * or transmission over chan (blocking mode).
+ * Negative value denotes failure.
+ */
+int mbox_send_message(struct mbox_chan *chan, void *mssg)
+{
+ int t;
+
+ if (!chan || !chan->cl)
+ return -EINVAL;
+
+ t = add_to_rbuf(chan, mssg);
+ if (t < 0) {
+ dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n");
+ return t;
+ }
+
+ msg_submit(chan);
+
+ if (chan->cl->tx_block && chan->active_req) {
+ unsigned long wait;
+ int ret;
+
+ if (!chan->cl->tx_tout) /* wait forever */
+ wait = msecs_to_jiffies(3600000);
+ else
+ wait = msecs_to_jiffies(chan->cl->tx_tout);
+
+ ret = wait_for_completion_timeout(&chan->tx_complete, wait);
+ if (ret == 0) {
+ t = -EIO;
+ tx_tick(chan, -EIO);
+ }
+ }
+
+ return t;
+}
+EXPORT_SYMBOL_GPL(mbox_send_message);
+
+/**
+ * mbox_request_channel - Request a mailbox channel.
+ * @cl: Identity of the client requesting the channel.
+ * @index: Index of mailbox specifier in 'mboxes' property.
+ *
+ * The Client specifies its requirements and capabilities while asking for
+ * a mailbox channel. It can't be called from atomic context.
+ * The channel is exclusively allocated and can't be used by another
+ * client before the owner calls mbox_free_channel.
+ * After assignment, any packet received on this channel will be
+ * handed over to the client via the 'rx_callback'.
+ * The framework holds reference to the client, so the mbox_client
+ * structure shouldn't be modified until the mbox_free_channel returns.
+ *
+ * Return: Pointer to the channel assigned to the client if successful.
+ * ERR_PTR for request failure.
+ */
+struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
+{
+ struct device *dev = cl->dev;
+ struct mbox_controller *mbox;
+ struct of_phandle_args spec;
+ struct mbox_chan *chan;
+ unsigned long flags;
+ int ret;
+
+ if (!dev || !dev->of_node) {
+ pr_debug("%s: No owner device node\n", __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&con_mutex);
+
+ if (of_parse_phandle_with_args(dev->of_node, "mboxes",
+ "#mbox-cells", index, &spec)) {
+ dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+ mutex_unlock(&con_mutex);
+ return ERR_PTR(-ENODEV);
+ }
+
+ chan = NULL;
+ list_for_each_entry(mbox, &mbox_cons, node)
+ if (mbox->dev->of_node == spec.np) {
+ chan = mbox->of_xlate(mbox, &spec);
+ break;
+ }
+
+ of_node_put(spec.np);
+
+ if (!chan || chan->cl || !try_module_get(mbox->dev->driver->owner)) {
+ dev_dbg(dev, "%s: mailbox not free\n", __func__);
+ mutex_unlock(&con_mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
+
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+ chan->txdone_method |= TXDONE_BY_ACK;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ ret = chan->mbox->ops->startup(chan);
+ if (ret) {
+ dev_err(dev, "Unable to startup the chan (%d)\n", ret);
+ mbox_free_channel(chan);
+ chan = ERR_PTR(ret);
+ }
+
+ mutex_unlock(&con_mutex);
+ return chan;
+}
+EXPORT_SYMBOL_GPL(mbox_request_channel);
+
+/**
+ * mbox_free_channel - The client relinquishes control of a mailbox
+ * channel by this call.
+ * @chan: The mailbox channel to be freed.
+ */
+void mbox_free_channel(struct mbox_chan *chan)
+{
+ unsigned long flags;
+
+ if (!chan || !chan->cl)
+ return;
+
+ chan->mbox->ops->shutdown(chan);
+
+ /* The queued TX requests are simply aborted, no callbacks are made */
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->cl = NULL;
+ chan->active_req = NULL;
+ if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
+ chan->txdone_method = TXDONE_BY_POLL;
+
+ module_put(chan->mbox->dev->driver->owner);
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+EXPORT_SYMBOL_GPL(mbox_free_channel);
+
+static struct mbox_chan *
+of_mbox_index_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ int ind = sp->args[0];
+
+ if (ind >= mbox->num_chans)
+ return NULL;
+
+ return &mbox->chans[ind];
+}
+
+/**
+ * mbox_controller_register - Register the mailbox controller
+ * @mbox: Pointer to the mailbox controller.
+ *
+ * The controller driver registers its communication channels
+ */
+int mbox_controller_register(struct mbox_controller *mbox)
+{
+ int i, txdone;
+
+ /* Sanity check */
+ if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans)
+ return -EINVAL;
+
+ if (mbox->txdone_irq)
+ txdone = TXDONE_BY_IRQ;
+ else if (mbox->txdone_poll)
+ txdone = TXDONE_BY_POLL;
+ else /* It has to be ACK then */
+ txdone = TXDONE_BY_ACK;
+
+ if (txdone == TXDONE_BY_POLL) {
+ mbox->poll.function = &poll_txdone;
+ mbox->poll.data = (unsigned long)mbox;
+ init_timer(&mbox->poll);
+ }
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ struct mbox_chan *chan = &mbox->chans[i];
+
+ chan->cl = NULL;
+ chan->mbox = mbox;
+ chan->txdone_method = txdone;
+ spin_lock_init(&chan->lock);
+ }
+
+ if (!mbox->of_xlate)
+ mbox->of_xlate = of_mbox_index_xlate;
+
+ mutex_lock(&con_mutex);
+ list_add_tail(&mbox->node, &mbox_cons);
+ mutex_unlock(&con_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mbox_controller_register);
+
+/**
+ * mbox_controller_unregister - Unregister the mailbox controller
+ * @mbox: Pointer to the mailbox controller.
+ */
+void mbox_controller_unregister(struct mbox_controller *mbox)
+{
+ int i;
+
+ if (!mbox)
+ return;
+
+ mutex_lock(&con_mutex);
+
+ list_del(&mbox->node);
+
+ for (i = 0; i < mbox->num_chans; i++)
+ mbox_free_channel(&mbox->chans[i]);
+
+ if (mbox->txdone_poll)
+ del_timer_sync(&mbox->poll);
+
+ mutex_unlock(&con_mutex);
+}
+EXPORT_SYMBOL_GPL(mbox_controller_unregister);
diff --git a/kernel/drivers/mailbox/mailbox.h b/kernel/drivers/mailbox/mailbox.h
new file mode 100644
index 000000000..456ba6851
--- /dev/null
+++ b/kernel/drivers/mailbox/mailbox.h
@@ -0,0 +1,14 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MAILBOX_H
+#define __MAILBOX_H
+
+#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */
+#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */
+#define TXDONE_BY_ACK BIT(2) /* S/W ACK recevied by Client ticks the TX */
+
+#endif /* __MAILBOX_H */
diff --git a/kernel/drivers/mailbox/omap-mailbox.c b/kernel/drivers/mailbox/omap-mailbox.c
new file mode 100644
index 000000000..0f332c178
--- /dev/null
+++ b/kernel/drivers/mailbox/omap-mailbox.c
@@ -0,0 +1,898 @@
+/*
+ * OMAP mailbox driver
+ *
+ * Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2013-2014 Texas Instruments Inc.
+ *
+ * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ * Suman Anna <s-anna@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_data/mailbox-omap.h>
+#include <linux/omap-mailbox.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+
+#define MAILBOX_REVISION 0x000
+#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
+#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
+#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
+
+#define OMAP2_MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u))
+#define OMAP2_MAILBOX_IRQENABLE(u) (0x104 + 8 * (u))
+
+#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u))
+#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u))
+#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u))
+
+#define MAILBOX_IRQSTATUS(type, u) (type ? OMAP4_MAILBOX_IRQSTATUS(u) : \
+ OMAP2_MAILBOX_IRQSTATUS(u))
+#define MAILBOX_IRQENABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE(u) : \
+ OMAP2_MAILBOX_IRQENABLE(u))
+#define MAILBOX_IRQDISABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE_CLR(u) \
+ : OMAP2_MAILBOX_IRQENABLE(u))
+
+#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
+#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
+
+#define MBOX_REG_SIZE 0x120
+
+#define OMAP4_MBOX_REG_SIZE 0x130
+
+#define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32))
+#define OMAP4_MBOX_NR_REGS (OMAP4_MBOX_REG_SIZE / sizeof(u32))
+
+struct omap_mbox_fifo {
+ unsigned long msg;
+ unsigned long fifo_stat;
+ unsigned long msg_stat;
+ unsigned long irqenable;
+ unsigned long irqstatus;
+ unsigned long irqdisable;
+ u32 intr_bit;
+};
+
+struct omap_mbox_queue {
+ spinlock_t lock;
+ struct kfifo fifo;
+ struct work_struct work;
+ struct omap_mbox *mbox;
+ bool full;
+};
+
+struct omap_mbox_device {
+ struct device *dev;
+ struct mutex cfg_lock;
+ void __iomem *mbox_base;
+ u32 num_users;
+ u32 num_fifos;
+ struct omap_mbox **mboxes;
+ struct mbox_controller controller;
+ struct list_head elem;
+};
+
+struct omap_mbox_fifo_info {
+ int tx_id;
+ int tx_usr;
+ int tx_irq;
+
+ int rx_id;
+ int rx_usr;
+ int rx_irq;
+
+ const char *name;
+};
+
+struct omap_mbox {
+ const char *name;
+ int irq;
+ struct omap_mbox_queue *rxq;
+ struct device *dev;
+ struct omap_mbox_device *parent;
+ struct omap_mbox_fifo tx_fifo;
+ struct omap_mbox_fifo rx_fifo;
+ u32 ctx[OMAP4_MBOX_NR_REGS];
+ u32 intr_type;
+ struct mbox_chan *chan;
+};
+
+/* global variables for the mailbox devices */
+static DEFINE_MUTEX(omap_mbox_devices_lock);
+static LIST_HEAD(omap_mbox_devices);
+
+static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
+module_param(mbox_kfifo_size, uint, S_IRUGO);
+MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
+
+static struct omap_mbox *mbox_chan_to_omap_mbox(struct mbox_chan *chan)
+{
+ if (!chan || !chan->con_priv)
+ return NULL;
+
+ return (struct omap_mbox *)chan->con_priv;
+}
+
+static inline
+unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
+{
+ return __raw_readl(mdev->mbox_base + ofs);
+}
+
+static inline
+void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs)
+{
+ __raw_writel(val, mdev->mbox_base + ofs);
+}
+
+/* Mailbox FIFO handle functions */
+static mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox)
+{
+ struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
+ return (mbox_msg_t) mbox_read_reg(mbox->parent, fifo->msg);
+}
+
+static void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
+{
+ struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
+ mbox_write_reg(mbox->parent, msg, fifo->msg);
+}
+
+static int mbox_fifo_empty(struct omap_mbox *mbox)
+{
+ struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
+ return (mbox_read_reg(mbox->parent, fifo->msg_stat) == 0);
+}
+
+static int mbox_fifo_full(struct omap_mbox *mbox)
+{
+ struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
+ return mbox_read_reg(mbox->parent, fifo->fifo_stat);
+}
+
+/* Mailbox IRQ handle functions */
+static void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+{
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqstatus = fifo->irqstatus;
+
+ mbox_write_reg(mbox->parent, bit, irqstatus);
+
+ /* Flush posted write for irq status to avoid spurious interrupts */
+ mbox_read_reg(mbox->parent, irqstatus);
+}
+
+static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+{
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqenable = fifo->irqenable;
+ u32 irqstatus = fifo->irqstatus;
+
+ u32 enable = mbox_read_reg(mbox->parent, irqenable);
+ u32 status = mbox_read_reg(mbox->parent, irqstatus);
+
+ return (int)(enable & status & bit);
+}
+
+void omap_mbox_save_ctx(struct mbox_chan *chan)
+{
+ int i;
+ int nr_regs;
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+
+ if (WARN_ON(!mbox))
+ return;
+
+ if (mbox->intr_type)
+ nr_regs = OMAP4_MBOX_NR_REGS;
+ else
+ nr_regs = MBOX_NR_REGS;
+ for (i = 0; i < nr_regs; i++) {
+ mbox->ctx[i] = mbox_read_reg(mbox->parent, i * sizeof(u32));
+
+ dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
+ i, mbox->ctx[i]);
+ }
+}
+EXPORT_SYMBOL(omap_mbox_save_ctx);
+
+void omap_mbox_restore_ctx(struct mbox_chan *chan)
+{
+ int i;
+ int nr_regs;
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+
+ if (WARN_ON(!mbox))
+ return;
+
+ if (mbox->intr_type)
+ nr_regs = OMAP4_MBOX_NR_REGS;
+ else
+ nr_regs = MBOX_NR_REGS;
+ for (i = 0; i < nr_regs; i++) {
+ mbox_write_reg(mbox->parent, mbox->ctx[i], i * sizeof(u32));
+ dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
+ i, mbox->ctx[i]);
+ }
+}
+EXPORT_SYMBOL(omap_mbox_restore_ctx);
+
+static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+{
+ u32 l;
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqenable = fifo->irqenable;
+
+ l = mbox_read_reg(mbox->parent, irqenable);
+ l |= bit;
+ mbox_write_reg(mbox->parent, l, irqenable);
+}
+
+static void _omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+{
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqdisable = fifo->irqdisable;
+
+ /*
+ * Read and update the interrupt configuration register for pre-OMAP4.
+ * OMAP4 and later SoCs have a dedicated interrupt disabling register.
+ */
+ if (!mbox->intr_type)
+ bit = mbox_read_reg(mbox->parent, irqdisable) & ~bit;
+
+ mbox_write_reg(mbox->parent, bit, irqdisable);
+}
+
+void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+
+ if (WARN_ON(!mbox))
+ return;
+
+ _omap_mbox_enable_irq(mbox, irq);
+}
+EXPORT_SYMBOL(omap_mbox_enable_irq);
+
+void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+
+ if (WARN_ON(!mbox))
+ return;
+
+ _omap_mbox_disable_irq(mbox, irq);
+}
+EXPORT_SYMBOL(omap_mbox_disable_irq);
+
+/*
+ * Message receiver(workqueue)
+ */
+static void mbox_rx_work(struct work_struct *work)
+{
+ struct omap_mbox_queue *mq =
+ container_of(work, struct omap_mbox_queue, work);
+ mbox_msg_t msg;
+ int len;
+
+ while (kfifo_len(&mq->fifo) >= sizeof(msg)) {
+ len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
+ WARN_ON(len != sizeof(msg));
+
+ mbox_chan_received_data(mq->mbox->chan, (void *)msg);
+ spin_lock_irq(&mq->lock);
+ if (mq->full) {
+ mq->full = false;
+ _omap_mbox_enable_irq(mq->mbox, IRQ_RX);
+ }
+ spin_unlock_irq(&mq->lock);
+ }
+}
+
+/*
+ * Mailbox interrupt handler
+ */
+static void __mbox_tx_interrupt(struct omap_mbox *mbox)
+{
+ _omap_mbox_disable_irq(mbox, IRQ_TX);
+ ack_mbox_irq(mbox, IRQ_TX);
+ mbox_chan_txdone(mbox->chan, 0);
+}
+
+static void __mbox_rx_interrupt(struct omap_mbox *mbox)
+{
+ struct omap_mbox_queue *mq = mbox->rxq;
+ mbox_msg_t msg;
+ int len;
+
+ while (!mbox_fifo_empty(mbox)) {
+ if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
+ _omap_mbox_disable_irq(mbox, IRQ_RX);
+ mq->full = true;
+ goto nomem;
+ }
+
+ msg = mbox_fifo_read(mbox);
+
+ len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
+ WARN_ON(len != sizeof(msg));
+ }
+
+ /* no more messages in the fifo. clear IRQ source. */
+ ack_mbox_irq(mbox, IRQ_RX);
+nomem:
+ schedule_work(&mbox->rxq->work);
+}
+
+static irqreturn_t mbox_interrupt(int irq, void *p)
+{
+ struct omap_mbox *mbox = p;
+
+ if (is_mbox_irq(mbox, IRQ_TX))
+ __mbox_tx_interrupt(mbox);
+
+ if (is_mbox_irq(mbox, IRQ_RX))
+ __mbox_rx_interrupt(mbox);
+
+ return IRQ_HANDLED;
+}
+
+static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
+ void (*work)(struct work_struct *))
+{
+ struct omap_mbox_queue *mq;
+
+ if (!work)
+ return NULL;
+
+ mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);
+ if (!mq)
+ return NULL;
+
+ spin_lock_init(&mq->lock);
+
+ if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL))
+ goto error;
+
+ INIT_WORK(&mq->work, work);
+ return mq;
+
+error:
+ kfree(mq);
+ return NULL;
+}
+
+static void mbox_queue_free(struct omap_mbox_queue *q)
+{
+ kfifo_free(&q->fifo);
+ kfree(q);
+}
+
+static int omap_mbox_startup(struct omap_mbox *mbox)
+{
+ int ret = 0;
+ struct omap_mbox_queue *mq;
+
+ mq = mbox_queue_alloc(mbox, mbox_rx_work);
+ if (!mq)
+ return -ENOMEM;
+ mbox->rxq = mq;
+ mq->mbox = mbox;
+
+ ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
+ mbox->name, mbox);
+ if (unlikely(ret)) {
+ pr_err("failed to register mailbox interrupt:%d\n", ret);
+ goto fail_request_irq;
+ }
+
+ _omap_mbox_enable_irq(mbox, IRQ_RX);
+
+ return 0;
+
+fail_request_irq:
+ mbox_queue_free(mbox->rxq);
+ return ret;
+}
+
+static void omap_mbox_fini(struct omap_mbox *mbox)
+{
+ _omap_mbox_disable_irq(mbox, IRQ_RX);
+ free_irq(mbox->irq, mbox);
+ flush_work(&mbox->rxq->work);
+ mbox_queue_free(mbox->rxq);
+}
+
+static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
+ const char *mbox_name)
+{
+ struct omap_mbox *_mbox, *mbox = NULL;
+ struct omap_mbox **mboxes = mdev->mboxes;
+ int i;
+
+ if (!mboxes)
+ return NULL;
+
+ for (i = 0; (_mbox = mboxes[i]); i++) {
+ if (!strcmp(_mbox->name, mbox_name)) {
+ mbox = _mbox;
+ break;
+ }
+ }
+ return mbox;
+}
+
+struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
+ const char *chan_name)
+{
+ struct device *dev = cl->dev;
+ struct omap_mbox *mbox = NULL;
+ struct omap_mbox_device *mdev;
+ struct mbox_chan *chan;
+ unsigned long flags;
+ int ret;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (dev->of_node) {
+ pr_err("%s: please use mbox_request_channel(), this API is supported only for OMAP non-DT usage\n",
+ __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&omap_mbox_devices_lock);
+ list_for_each_entry(mdev, &omap_mbox_devices, elem) {
+ mbox = omap_mbox_device_find(mdev, chan_name);
+ if (mbox)
+ break;
+ }
+ mutex_unlock(&omap_mbox_devices_lock);
+
+ if (!mbox || !mbox->chan)
+ return ERR_PTR(-ENOENT);
+
+ chan = mbox->chan;
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ ret = chan->mbox->ops->startup(chan);
+ if (ret) {
+ pr_err("Unable to startup the chan (%d)\n", ret);
+ mbox_free_channel(chan);
+ chan = ERR_PTR(ret);
+ }
+
+ return chan;
+}
+EXPORT_SYMBOL(omap_mbox_request_channel);
+
+static struct class omap_mbox_class = { .name = "mbox", };
+
+static int omap_mbox_register(struct omap_mbox_device *mdev)
+{
+ int ret;
+ int i;
+ struct omap_mbox **mboxes;
+
+ if (!mdev || !mdev->mboxes)
+ return -EINVAL;
+
+ mboxes = mdev->mboxes;
+ for (i = 0; mboxes[i]; i++) {
+ struct omap_mbox *mbox = mboxes[i];
+ mbox->dev = device_create(&omap_mbox_class, mdev->dev,
+ 0, mbox, "%s", mbox->name);
+ if (IS_ERR(mbox->dev)) {
+ ret = PTR_ERR(mbox->dev);
+ goto err_out;
+ }
+ }
+
+ mutex_lock(&omap_mbox_devices_lock);
+ list_add(&mdev->elem, &omap_mbox_devices);
+ mutex_unlock(&omap_mbox_devices_lock);
+
+ ret = mbox_controller_register(&mdev->controller);
+
+err_out:
+ if (ret) {
+ while (i--)
+ device_unregister(mboxes[i]->dev);
+ }
+ return ret;
+}
+
+static int omap_mbox_unregister(struct omap_mbox_device *mdev)
+{
+ int i;
+ struct omap_mbox **mboxes;
+
+ if (!mdev || !mdev->mboxes)
+ return -EINVAL;
+
+ mutex_lock(&omap_mbox_devices_lock);
+ list_del(&mdev->elem);
+ mutex_unlock(&omap_mbox_devices_lock);
+
+ mbox_controller_unregister(&mdev->controller);
+
+ mboxes = mdev->mboxes;
+ for (i = 0; mboxes[i]; i++)
+ device_unregister(mboxes[i]->dev);
+ return 0;
+}
+
+static int omap_mbox_chan_startup(struct mbox_chan *chan)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ struct omap_mbox_device *mdev = mbox->parent;
+ int ret = 0;
+
+ mutex_lock(&mdev->cfg_lock);
+ pm_runtime_get_sync(mdev->dev);
+ ret = omap_mbox_startup(mbox);
+ if (ret)
+ pm_runtime_put_sync(mdev->dev);
+ mutex_unlock(&mdev->cfg_lock);
+ return ret;
+}
+
+static void omap_mbox_chan_shutdown(struct mbox_chan *chan)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ struct omap_mbox_device *mdev = mbox->parent;
+
+ mutex_lock(&mdev->cfg_lock);
+ omap_mbox_fini(mbox);
+ pm_runtime_put_sync(mdev->dev);
+ mutex_unlock(&mdev->cfg_lock);
+}
+
+static int omap_mbox_chan_send_data(struct mbox_chan *chan, void *data)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ int ret = -EBUSY;
+
+ if (!mbox)
+ return -EINVAL;
+
+ if (!mbox_fifo_full(mbox)) {
+ mbox_fifo_write(mbox, (mbox_msg_t)data);
+ ret = 0;
+ }
+
+ /* always enable the interrupt */
+ _omap_mbox_enable_irq(mbox, IRQ_TX);
+ return ret;
+}
+
+static struct mbox_chan_ops omap_mbox_chan_ops = {
+ .startup = omap_mbox_chan_startup,
+ .send_data = omap_mbox_chan_send_data,
+ .shutdown = omap_mbox_chan_shutdown,
+};
+
+static const struct of_device_id omap_mailbox_of_match[] = {
+ {
+ .compatible = "ti,omap2-mailbox",
+ .data = (void *)MBOX_INTR_CFG_TYPE1,
+ },
+ {
+ .compatible = "ti,omap3-mailbox",
+ .data = (void *)MBOX_INTR_CFG_TYPE1,
+ },
+ {
+ .compatible = "ti,omap4-mailbox",
+ .data = (void *)MBOX_INTR_CFG_TYPE2,
+ },
+ {
+ /* end */
+ },
+};
+MODULE_DEVICE_TABLE(of, omap_mailbox_of_match);
+
+static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
+ const struct of_phandle_args *sp)
+{
+ phandle phandle = sp->args[0];
+ struct device_node *node;
+ struct omap_mbox_device *mdev;
+ struct omap_mbox *mbox;
+
+ mdev = container_of(controller, struct omap_mbox_device, controller);
+ if (WARN_ON(!mdev))
+ return NULL;
+
+ node = of_find_node_by_phandle(phandle);
+ if (!node) {
+ pr_err("%s: could not find node phandle 0x%x\n",
+ __func__, phandle);
+ return NULL;
+ }
+
+ mbox = omap_mbox_device_find(mdev, node->name);
+ of_node_put(node);
+ return mbox ? mbox->chan : NULL;
+}
+
+static int omap_mbox_probe(struct platform_device *pdev)
+{
+ struct resource *mem;
+ int ret;
+ struct mbox_chan *chnls;
+ struct omap_mbox **list, *mbox, *mboxblk;
+ struct omap_mbox_pdata *pdata = pdev->dev.platform_data;
+ struct omap_mbox_dev_info *info = NULL;
+ struct omap_mbox_fifo_info *finfo, *finfoblk;
+ struct omap_mbox_device *mdev;
+ struct omap_mbox_fifo *fifo;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *child;
+ const struct of_device_id *match;
+ u32 intr_type, info_count;
+ u32 num_users, num_fifos;
+ u32 tmp[3];
+ u32 l;
+ int i;
+
+ if (!node && (!pdata || !pdata->info_cnt || !pdata->info)) {
+ pr_err("%s: platform not supported\n", __func__);
+ return -ENODEV;
+ }
+
+ if (node) {
+ match = of_match_device(omap_mailbox_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+ intr_type = (u32)match->data;
+
+ if (of_property_read_u32(node, "ti,mbox-num-users",
+ &num_users))
+ return -ENODEV;
+
+ if (of_property_read_u32(node, "ti,mbox-num-fifos",
+ &num_fifos))
+ return -ENODEV;
+
+ info_count = of_get_available_child_count(node);
+ if (!info_count) {
+ dev_err(&pdev->dev, "no available mbox devices found\n");
+ return -ENODEV;
+ }
+ } else { /* non-DT device creation */
+ info_count = pdata->info_cnt;
+ info = pdata->info;
+ intr_type = pdata->intr_type;
+ num_users = pdata->num_users;
+ num_fifos = pdata->num_fifos;
+ }
+
+ finfoblk = devm_kzalloc(&pdev->dev, info_count * sizeof(*finfoblk),
+ GFP_KERNEL);
+ if (!finfoblk)
+ return -ENOMEM;
+
+ finfo = finfoblk;
+ child = NULL;
+ for (i = 0; i < info_count; i++, finfo++) {
+ if (node) {
+ child = of_get_next_available_child(node, child);
+ ret = of_property_read_u32_array(child, "ti,mbox-tx",
+ tmp, ARRAY_SIZE(tmp));
+ if (ret)
+ return ret;
+ finfo->tx_id = tmp[0];
+ finfo->tx_irq = tmp[1];
+ finfo->tx_usr = tmp[2];
+
+ ret = of_property_read_u32_array(child, "ti,mbox-rx",
+ tmp, ARRAY_SIZE(tmp));
+ if (ret)
+ return ret;
+ finfo->rx_id = tmp[0];
+ finfo->rx_irq = tmp[1];
+ finfo->rx_usr = tmp[2];
+
+ finfo->name = child->name;
+ } else {
+ finfo->tx_id = info->tx_id;
+ finfo->rx_id = info->rx_id;
+ finfo->tx_usr = info->usr_id;
+ finfo->tx_irq = info->irq_id;
+ finfo->rx_usr = info->usr_id;
+ finfo->rx_irq = info->irq_id;
+ finfo->name = info->name;
+ info++;
+ }
+ if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos ||
+ finfo->tx_usr >= num_users || finfo->rx_usr >= num_users)
+ return -EINVAL;
+ }
+
+ mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mdev->mbox_base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(mdev->mbox_base))
+ return PTR_ERR(mdev->mbox_base);
+
+ /* allocate one extra for marking end of list */
+ list = devm_kzalloc(&pdev->dev, (info_count + 1) * sizeof(*list),
+ GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+
+ chnls = devm_kzalloc(&pdev->dev, (info_count + 1) * sizeof(*chnls),
+ GFP_KERNEL);
+ if (!chnls)
+ return -ENOMEM;
+
+ mboxblk = devm_kzalloc(&pdev->dev, info_count * sizeof(*mbox),
+ GFP_KERNEL);
+ if (!mboxblk)
+ return -ENOMEM;
+
+ mbox = mboxblk;
+ finfo = finfoblk;
+ for (i = 0; i < info_count; i++, finfo++) {
+ fifo = &mbox->tx_fifo;
+ fifo->msg = MAILBOX_MESSAGE(finfo->tx_id);
+ fifo->fifo_stat = MAILBOX_FIFOSTATUS(finfo->tx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NOTFULL(finfo->tx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->tx_usr);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->tx_usr);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->tx_usr);
+
+ fifo = &mbox->rx_fifo;
+ fifo->msg = MAILBOX_MESSAGE(finfo->rx_id);
+ fifo->msg_stat = MAILBOX_MSGSTATUS(finfo->rx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NEWMSG(finfo->rx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->rx_usr);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->rx_usr);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->rx_usr);
+
+ mbox->intr_type = intr_type;
+
+ mbox->parent = mdev;
+ mbox->name = finfo->name;
+ mbox->irq = platform_get_irq(pdev, finfo->tx_irq);
+ if (mbox->irq < 0)
+ return mbox->irq;
+ mbox->chan = &chnls[i];
+ chnls[i].con_priv = mbox;
+ list[i] = mbox++;
+ }
+
+ mutex_init(&mdev->cfg_lock);
+ mdev->dev = &pdev->dev;
+ mdev->num_users = num_users;
+ mdev->num_fifos = num_fifos;
+ mdev->mboxes = list;
+
+ /* OMAP does not have a Tx-Done IRQ, but rather a Tx-Ready IRQ */
+ mdev->controller.txdone_irq = true;
+ mdev->controller.dev = mdev->dev;
+ mdev->controller.ops = &omap_mbox_chan_ops;
+ mdev->controller.chans = chnls;
+ mdev->controller.num_chans = info_count;
+ mdev->controller.of_xlate = omap_mbox_of_xlate;
+ ret = omap_mbox_register(mdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, mdev);
+ pm_runtime_enable(mdev->dev);
+
+ ret = pm_runtime_get_sync(mdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(mdev->dev);
+ goto unregister;
+ }
+
+ /*
+ * just print the raw revision register, the format is not
+ * uniform across all SoCs
+ */
+ l = mbox_read_reg(mdev, MAILBOX_REVISION);
+ dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l);
+
+ ret = pm_runtime_put_sync(mdev->dev);
+ if (ret < 0)
+ goto unregister;
+
+ devm_kfree(&pdev->dev, finfoblk);
+ return 0;
+
+unregister:
+ pm_runtime_disable(mdev->dev);
+ omap_mbox_unregister(mdev);
+ return ret;
+}
+
+static int omap_mbox_remove(struct platform_device *pdev)
+{
+ struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(mdev->dev);
+ omap_mbox_unregister(mdev);
+
+ return 0;
+}
+
+static struct platform_driver omap_mbox_driver = {
+ .probe = omap_mbox_probe,
+ .remove = omap_mbox_remove,
+ .driver = {
+ .name = "omap-mailbox",
+ .of_match_table = of_match_ptr(omap_mailbox_of_match),
+ },
+};
+
+static int __init omap_mbox_init(void)
+{
+ int err;
+
+ err = class_register(&omap_mbox_class);
+ if (err)
+ return err;
+
+ /* kfifo size sanity check: alignment and minimal size */
+ mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(mbox_msg_t));
+ mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
+ sizeof(mbox_msg_t));
+
+ return platform_driver_register(&omap_mbox_driver);
+}
+subsys_initcall(omap_mbox_init);
+
+static void __exit omap_mbox_exit(void)
+{
+ platform_driver_unregister(&omap_mbox_driver);
+ class_unregister(&omap_mbox_class);
+}
+module_exit(omap_mbox_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging");
+MODULE_AUTHOR("Toshihiro Kobayashi");
+MODULE_AUTHOR("Hiroshi DOYU");
diff --git a/kernel/drivers/mailbox/pcc.c b/kernel/drivers/mailbox/pcc.c
new file mode 100644
index 000000000..7e91d68a3
--- /dev/null
+++ b/kernel/drivers/mailbox/pcc.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd.
+ * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * PCC (Platform Communication Channel) is defined in the ACPI 5.0+
+ * specification. It is a mailbox like mechanism to allow clients
+ * such as CPPC (Collaborative Processor Performance Control), RAS
+ * (Reliability, Availability and Serviceability) and MPST (Memory
+ * Node Power State Table) to talk to the platform (e.g. BMC) through
+ * shared memory regions as defined in the PCC table entries. The PCC
+ * specification supports a Doorbell mechanism for the PCC clients
+ * to notify the platform about new data. This Doorbell information
+ * is also specified in each PCC table entry.
+ *
+ * Typical high level flow of operation is:
+ *
+ * PCC Reads:
+ * * Client tries to acquire a channel lock.
+ * * After it is acquired it writes READ cmd in communication region cmd
+ * address.
+ * * Client issues mbox_send_message() which rings the PCC doorbell
+ * for its PCC channel.
+ * * If command completes, then client has control over channel and
+ * it can proceed with its reads.
+ * * Client releases lock.
+ *
+ * PCC Writes:
+ * * Client tries to acquire channel lock.
+ * * Client writes to its communication region after it acquires a
+ * channel lock.
+ * * Client writes WRITE cmd in communication region cmd address.
+ * * Client issues mbox_send_message() which rings the PCC doorbell
+ * for its PCC channel.
+ * * If command completes, then writes have succeded and it can release
+ * the channel lock.
+ *
+ * There is a Nominal latency defined for each channel which indicates
+ * how long to wait until a command completes. If command is not complete
+ * the client needs to retry or assume failure.
+ *
+ * For more details about PCC, please see the ACPI specification from
+ * http://www.uefi.org/ACPIv5.1 Section 14.
+ *
+ * This file implements PCC as a Mailbox controller and allows for PCC
+ * clients to be implemented as its Mailbox Client Channels.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+
+#include "mailbox.h"
+
+#define MAX_PCC_SUBSPACES 256
+
+static struct mbox_chan *pcc_mbox_channels;
+
+static struct mbox_controller pcc_mbox_ctrl = {};
+/**
+ * get_pcc_channel - Given a PCC subspace idx, get
+ * the respective mbox_channel.
+ * @id: PCC subspace index.
+ *
+ * Return: ERR_PTR(errno) if error, else pointer
+ * to mbox channel.
+ */
+static struct mbox_chan *get_pcc_channel(int id)
+{
+ struct mbox_chan *pcc_chan;
+
+ if (id < 0 || id > pcc_mbox_ctrl.num_chans)
+ return ERR_PTR(-ENOENT);
+
+ pcc_chan = (struct mbox_chan *)
+ (unsigned long) pcc_mbox_channels +
+ (id * sizeof(*pcc_chan));
+
+ return pcc_chan;
+}
+
+/**
+ * pcc_mbox_request_channel - PCC clients call this function to
+ * request a pointer to their PCC subspace, from which they
+ * can get the details of communicating with the remote.
+ * @cl: Pointer to Mailbox client, so we know where to bind the
+ * Channel.
+ * @subspace_id: The PCC Subspace index as parsed in the PCC client
+ * ACPI package. This is used to lookup the array of PCC
+ * subspaces as parsed by the PCC Mailbox controller.
+ *
+ * Return: Pointer to the Mailbox Channel if successful or
+ * ERR_PTR.
+ */
+struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
+ int subspace_id)
+{
+ struct device *dev = pcc_mbox_ctrl.dev;
+ struct mbox_chan *chan;
+ unsigned long flags;
+
+ /*
+ * Each PCC Subspace is a Mailbox Channel.
+ * The PCC Clients get their PCC Subspace ID
+ * from their own tables and pass it here.
+ * This returns a pointer to the PCC subspace
+ * for the Client to operate on.
+ */
+ chan = get_pcc_channel(subspace_id);
+
+ if (!chan || chan->cl) {
+ dev_err(dev, "Channel not found for idx: %d\n", subspace_id);
+ return ERR_PTR(-EBUSY);
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
+
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+ chan->txdone_method |= TXDONE_BY_ACK;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
+
+/**
+ * pcc_mbox_free_channel - Clients call this to free their Channel.
+ *
+ * @chan: Pointer to the mailbox channel as returned by
+ * pcc_mbox_request_channel()
+ */
+void pcc_mbox_free_channel(struct mbox_chan *chan)
+{
+ unsigned long flags;
+
+ if (!chan || !chan->cl)
+ return;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->cl = NULL;
+ chan->active_req = NULL;
+ if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
+ chan->txdone_method = TXDONE_BY_POLL;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
+
+/**
+ * pcc_send_data - Called from Mailbox Controller code. Used
+ * here only to ring the channel doorbell. The PCC client
+ * specific read/write is done in the client driver in
+ * order to maintain atomicity over PCC channel once
+ * OS has control over it. See above for flow of operations.
+ * @chan: Pointer to Mailbox channel over which to send data.
+ * @data: Client specific data written over channel. Used here
+ * only for debug after PCC transaction completes.
+ *
+ * Return: Err if something failed else 0 for success.
+ */
+static int pcc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
+ struct acpi_generic_address doorbell;
+ u64 doorbell_preserve;
+ u64 doorbell_val;
+ u64 doorbell_write;
+
+ doorbell = pcct_ss->doorbell_register;
+ doorbell_preserve = pcct_ss->preserve_mask;
+ doorbell_write = pcct_ss->write_mask;
+
+ /* Sync notification from OS to Platform. */
+ acpi_read(&doorbell_val, &doorbell);
+ acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
+ &doorbell);
+
+ return 0;
+}
+
+static struct mbox_chan_ops pcc_chan_ops = {
+ .send_data = pcc_send_data,
+};
+
+/**
+ * parse_pcc_subspace - Parse the PCC table and verify PCC subspace
+ * entries. There should be one entry per PCC client.
+ * @header: Pointer to the ACPI subtable header under the PCCT.
+ * @end: End of subtable entry.
+ *
+ * Return: 0 for Success, else errno.
+ *
+ * This gets called for each entry in the PCC table.
+ */
+static int parse_pcc_subspace(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_pcct_hw_reduced *pcct_ss;
+
+ if (pcc_mbox_ctrl.num_chans <= MAX_PCC_SUBSPACES) {
+ pcct_ss = (struct acpi_pcct_hw_reduced *) header;
+
+ if (pcct_ss->header.type !=
+ ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE) {
+ pr_err("Incorrect PCC Subspace type detected\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * acpi_pcc_probe - Parse the ACPI tree for the PCCT.
+ *
+ * Return: 0 for Success, else errno.
+ */
+static int __init acpi_pcc_probe(void)
+{
+ acpi_size pcct_tbl_header_size;
+ struct acpi_table_header *pcct_tbl;
+ struct acpi_subtable_header *pcct_entry;
+ int count, i;
+ acpi_status status = AE_OK;
+
+ /* Search for PCCT */
+ status = acpi_get_table_with_size(ACPI_SIG_PCCT, 0,
+ &pcct_tbl,
+ &pcct_tbl_header_size);
+
+ if (ACPI_FAILURE(status) || !pcct_tbl) {
+ pr_warn("PCCT header not found.\n");
+ return -ENODEV;
+ }
+
+ count = acpi_table_parse_entries(ACPI_SIG_PCCT,
+ sizeof(struct acpi_table_pcct),
+ ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE,
+ parse_pcc_subspace, MAX_PCC_SUBSPACES);
+
+ if (count <= 0) {
+ pr_err("Error parsing PCC subspaces from PCCT\n");
+ return -EINVAL;
+ }
+
+ pcc_mbox_channels = kzalloc(sizeof(struct mbox_chan) *
+ count, GFP_KERNEL);
+
+ if (!pcc_mbox_channels) {
+ pr_err("Could not allocate space for PCC mbox channels\n");
+ return -ENOMEM;
+ }
+
+ /* Point to the first PCC subspace entry */
+ pcct_entry = (struct acpi_subtable_header *) (
+ (unsigned long) pcct_tbl + sizeof(struct acpi_table_pcct));
+
+ for (i = 0; i < count; i++) {
+ pcc_mbox_channels[i].con_priv = pcct_entry;
+ pcct_entry = (struct acpi_subtable_header *)
+ ((unsigned long) pcct_entry + pcct_entry->length);
+ }
+
+ pcc_mbox_ctrl.num_chans = count;
+
+ pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl.num_chans);
+
+ return 0;
+}
+
+/**
+ * pcc_mbox_probe - Called when we find a match for the
+ * PCCT platform device. This is purely used to represent
+ * the PCCT as a virtual device for registering with the
+ * generic Mailbox framework.
+ *
+ * @pdev: Pointer to platform device returned when a match
+ * is found.
+ *
+ * Return: 0 for Success, else errno.
+ */
+static int pcc_mbox_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ pcc_mbox_ctrl.chans = pcc_mbox_channels;
+ pcc_mbox_ctrl.ops = &pcc_chan_ops;
+ pcc_mbox_ctrl.dev = &pdev->dev;
+
+ pr_info("Registering PCC driver as Mailbox controller\n");
+ ret = mbox_controller_register(&pcc_mbox_ctrl);
+
+ if (ret) {
+ pr_err("Err registering PCC as Mailbox controller: %d\n", ret);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+struct platform_driver pcc_mbox_driver = {
+ .probe = pcc_mbox_probe,
+ .driver = {
+ .name = "PCCT",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pcc_init(void)
+{
+ int ret;
+ struct platform_device *pcc_pdev;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ /* Check if PCC support is available. */
+ ret = acpi_pcc_probe();
+
+ if (ret) {
+ pr_debug("ACPI PCC probe failed.\n");
+ return -ENODEV;
+ }
+
+ pcc_pdev = platform_create_bundle(&pcc_mbox_driver,
+ pcc_mbox_probe, NULL, 0, NULL, 0);
+
+ if (IS_ERR(pcc_pdev)) {
+ pr_debug("Err creating PCC platform bundle\n");
+ return PTR_ERR(pcc_pdev);
+ }
+
+ return 0;
+}
+device_initcall(pcc_init);
diff --git a/kernel/drivers/mailbox/pl320-ipc.c b/kernel/drivers/mailbox/pl320-ipc.c
new file mode 100644
index 000000000..f3755e0aa
--- /dev/null
+++ b/kernel/drivers/mailbox/pl320-ipc.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2012 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+
+#include <linux/pl320-ipc.h>
+
+#define IPCMxSOURCE(m) ((m) * 0x40)
+#define IPCMxDSET(m) (((m) * 0x40) + 0x004)
+#define IPCMxDCLEAR(m) (((m) * 0x40) + 0x008)
+#define IPCMxDSTATUS(m) (((m) * 0x40) + 0x00C)
+#define IPCMxMODE(m) (((m) * 0x40) + 0x010)
+#define IPCMxMSET(m) (((m) * 0x40) + 0x014)
+#define IPCMxMCLEAR(m) (((m) * 0x40) + 0x018)
+#define IPCMxMSTATUS(m) (((m) * 0x40) + 0x01C)
+#define IPCMxSEND(m) (((m) * 0x40) + 0x020)
+#define IPCMxDR(m, dr) (((m) * 0x40) + ((dr) * 4) + 0x024)
+
+#define IPCMMIS(irq) (((irq) * 8) + 0x800)
+#define IPCMRIS(irq) (((irq) * 8) + 0x804)
+
+#define MBOX_MASK(n) (1 << (n))
+#define IPC_TX_MBOX 1
+#define IPC_RX_MBOX 2
+
+#define CHAN_MASK(n) (1 << (n))
+#define A9_SOURCE 1
+#define M3_SOURCE 0
+
+static void __iomem *ipc_base;
+static int ipc_irq;
+static DEFINE_MUTEX(ipc_m1_lock);
+static DECLARE_COMPLETION(ipc_completion);
+static ATOMIC_NOTIFIER_HEAD(ipc_notifier);
+
+static inline void set_destination(int source, int mbox)
+{
+ __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
+ __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
+}
+
+static inline void clear_destination(int source, int mbox)
+{
+ __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
+ __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
+}
+
+static void __ipc_send(int mbox, u32 *data)
+{
+ int i;
+ for (i = 0; i < 7; i++)
+ __raw_writel(data[i], ipc_base + IPCMxDR(mbox, i));
+ __raw_writel(0x1, ipc_base + IPCMxSEND(mbox));
+}
+
+static u32 __ipc_rcv(int mbox, u32 *data)
+{
+ int i;
+ for (i = 0; i < 7; i++)
+ data[i] = __raw_readl(ipc_base + IPCMxDR(mbox, i));
+ return data[1];
+}
+
+/* blocking implmentation from the A9 side, not usuable in interrupts! */
+int pl320_ipc_transmit(u32 *data)
+{
+ int ret;
+
+ mutex_lock(&ipc_m1_lock);
+
+ init_completion(&ipc_completion);
+ __ipc_send(IPC_TX_MBOX, data);
+ ret = wait_for_completion_timeout(&ipc_completion,
+ msecs_to_jiffies(1000));
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ ret = __ipc_rcv(IPC_TX_MBOX, data);
+out:
+ mutex_unlock(&ipc_m1_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pl320_ipc_transmit);
+
+static irqreturn_t ipc_handler(int irq, void *dev)
+{
+ u32 irq_stat;
+ u32 data[7];
+
+ irq_stat = __raw_readl(ipc_base + IPCMMIS(1));
+ if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) {
+ __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
+ complete(&ipc_completion);
+ }
+ if (irq_stat & MBOX_MASK(IPC_RX_MBOX)) {
+ __ipc_rcv(IPC_RX_MBOX, data);
+ atomic_notifier_call_chain(&ipc_notifier, data[0], data + 1);
+ __raw_writel(2, ipc_base + IPCMxSEND(IPC_RX_MBOX));
+ }
+
+ return IRQ_HANDLED;
+}
+
+int pl320_ipc_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ipc_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(pl320_ipc_register_notifier);
+
+int pl320_ipc_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ipc_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
+
+static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret;
+
+ ipc_base = ioremap(adev->res.start, resource_size(&adev->res));
+ if (ipc_base == NULL)
+ return -ENOMEM;
+
+ __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
+
+ ipc_irq = adev->irq[0];
+ ret = request_irq(ipc_irq, ipc_handler, 0, dev_name(&adev->dev), NULL);
+ if (ret < 0)
+ goto err;
+
+ /* Init slow mailbox */
+ __raw_writel(CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxSOURCE(IPC_TX_MBOX));
+ __raw_writel(CHAN_MASK(M3_SOURCE),
+ ipc_base + IPCMxDSET(IPC_TX_MBOX));
+ __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxMSET(IPC_TX_MBOX));
+
+ /* Init receive mailbox */
+ __raw_writel(CHAN_MASK(M3_SOURCE),
+ ipc_base + IPCMxSOURCE(IPC_RX_MBOX));
+ __raw_writel(CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxDSET(IPC_RX_MBOX));
+ __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxMSET(IPC_RX_MBOX));
+
+ return 0;
+err:
+ iounmap(ipc_base);
+ return ret;
+}
+
+static struct amba_id pl320_ids[] = {
+ {
+ .id = 0x00041320,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver pl320_driver = {
+ .drv = {
+ .name = "pl320",
+ },
+ .id_table = pl320_ids,
+ .probe = pl320_probe,
+};
+
+static int __init ipc_init(void)
+{
+ return amba_driver_register(&pl320_driver);
+}
+module_init(ipc_init);