summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/soc
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/soc')
-rw-r--r--kernel/drivers/soc/Kconfig3
-rw-r--r--kernel/drivers/soc/Makefile4
-rw-r--r--kernel/drivers/soc/brcmstb/Kconfig9
-rw-r--r--kernel/drivers/soc/brcmstb/Makefile1
-rw-r--r--kernel/drivers/soc/brcmstb/biuctrl.c116
-rw-r--r--kernel/drivers/soc/brcmstb/common.c33
-rw-r--r--kernel/drivers/soc/dove/Makefile1
-rw-r--r--kernel/drivers/soc/dove/pmu.c411
-rw-r--r--kernel/drivers/soc/mediatek/Kconfig20
-rw-r--r--kernel/drivers/soc/mediatek/Makefile2
-rw-r--r--kernel/drivers/soc/mediatek/mtk-infracfg.c91
-rw-r--r--kernel/drivers/soc/mediatek/mtk-pmic-wrap.c11
-rw-r--r--kernel/drivers/soc/mediatek/mtk-scpsys.c525
-rw-r--r--kernel/drivers/soc/qcom/Kconfig39
-rw-r--r--kernel/drivers/soc/qcom/Makefile4
-rw-r--r--kernel/drivers/soc/qcom/smd-rpm.c252
-rw-r--r--kernel/drivers/soc/qcom/smd.c1391
-rw-r--r--kernel/drivers/soc/qcom/smem.c795
-rw-r--r--kernel/drivers/soc/qcom/spm.c385
-rw-r--r--kernel/drivers/soc/rockchip/Kconfig18
-rw-r--r--kernel/drivers/soc/rockchip/Makefile4
-rw-r--r--kernel/drivers/soc/rockchip/pm_domains.c490
-rw-r--r--kernel/drivers/soc/sunxi/Kconfig10
-rw-r--r--kernel/drivers/soc/sunxi/Makefile1
-rw-r--r--kernel/drivers/soc/sunxi/sunxi_sram.c284
-rw-r--r--kernel/drivers/soc/tegra/Makefile6
-rw-r--r--kernel/drivers/soc/tegra/common.c2
-rw-r--r--kernel/drivers/soc/tegra/fuse/Makefile2
-rw-r--r--kernel/drivers/soc/tegra/fuse/fuse-tegra.c257
-rw-r--r--kernel/drivers/soc/tegra/fuse/fuse-tegra20.c179
-rw-r--r--kernel/drivers/soc/tegra/fuse/fuse-tegra30.c232
-rw-r--r--kernel/drivers/soc/tegra/fuse/fuse.h95
-rw-r--r--kernel/drivers/soc/tegra/fuse/speedo-tegra114.c22
-rw-r--r--kernel/drivers/soc/tegra/fuse/speedo-tegra124.c26
-rw-r--r--kernel/drivers/soc/tegra/fuse/speedo-tegra20.c28
-rw-r--r--kernel/drivers/soc/tegra/fuse/speedo-tegra210.c184
-rw-r--r--kernel/drivers/soc/tegra/fuse/speedo-tegra30.c48
-rw-r--r--kernel/drivers/soc/tegra/fuse/tegra-apbmisc.c97
-rw-r--r--kernel/drivers/soc/tegra/pmc.c146
-rw-r--r--kernel/drivers/soc/ti/knav_qmss.h3
-rw-r--r--kernel/drivers/soc/ti/knav_qmss_acc.c14
-rw-r--r--kernel/drivers/soc/ti/knav_qmss_queue.c69
-rw-r--r--kernel/drivers/soc/versatile/soc-realview.c2
43 files changed, 5824 insertions, 488 deletions
diff --git a/kernel/drivers/soc/Kconfig b/kernel/drivers/soc/Kconfig
index d8bde82f0..4e853ed2c 100644
--- a/kernel/drivers/soc/Kconfig
+++ b/kernel/drivers/soc/Kconfig
@@ -1,7 +1,10 @@
menu "SOC (System On Chip) specific Drivers"
+source "drivers/soc/brcmstb/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
+source "drivers/soc/rockchip/Kconfig"
+source "drivers/soc/sunxi/Kconfig"
source "drivers/soc/ti/Kconfig"
source "drivers/soc/versatile/Kconfig"
diff --git a/kernel/drivers/soc/Makefile b/kernel/drivers/soc/Makefile
index 70042b259..f2ba2e932 100644
--- a/kernel/drivers/soc/Makefile
+++ b/kernel/drivers/soc/Makefile
@@ -2,8 +2,12 @@
# Makefile for the Linux Kernel SOC specific device drivers.
#
+obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/
+obj-$(CONFIG_MACH_DOVE) += dove/
obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
obj-$(CONFIG_ARCH_QCOM) += qcom/
+obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
+obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_SOC_TI) += ti/
obj-$(CONFIG_PLAT_VERSATILE) += versatile/
diff --git a/kernel/drivers/soc/brcmstb/Kconfig b/kernel/drivers/soc/brcmstb/Kconfig
new file mode 100644
index 000000000..39cab3bd5
--- /dev/null
+++ b/kernel/drivers/soc/brcmstb/Kconfig
@@ -0,0 +1,9 @@
+menuconfig SOC_BRCMSTB
+ bool "Broadcom STB SoC drivers"
+ depends on ARM
+ help
+ Enables drivers for the Broadcom Set-Top Box (STB) series of chips.
+ This option alone enables only some support code, while the drivers
+ can be enabled individually within this menu.
+
+ If unsure, say N.
diff --git a/kernel/drivers/soc/brcmstb/Makefile b/kernel/drivers/soc/brcmstb/Makefile
new file mode 100644
index 000000000..9120b2715
--- /dev/null
+++ b/kernel/drivers/soc/brcmstb/Makefile
@@ -0,0 +1 @@
+obj-y += common.o biuctrl.o
diff --git a/kernel/drivers/soc/brcmstb/biuctrl.c b/kernel/drivers/soc/brcmstb/biuctrl.c
new file mode 100644
index 000000000..9049c076f
--- /dev/null
+++ b/kernel/drivers/soc/brcmstb/biuctrl.c
@@ -0,0 +1,116 @@
+/*
+ * Broadcom STB SoCs Bus Unit Interface controls
+ *
+ * Copyright (C) 2015, Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "brcmstb: " KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+
+#define CPU_CREDIT_REG_OFFSET 0x184
+#define CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK 0x70000000
+
+static void __iomem *cpubiuctrl_base;
+static bool mcp_wr_pairing_en;
+
+static int __init mcp_write_pairing_set(void)
+{
+ u32 creds = 0;
+
+ if (!cpubiuctrl_base)
+ return -1;
+
+ creds = readl_relaxed(cpubiuctrl_base + CPU_CREDIT_REG_OFFSET);
+ if (mcp_wr_pairing_en) {
+ pr_info("MCP: Enabling write pairing\n");
+ writel_relaxed(creds | CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
+ cpubiuctrl_base + CPU_CREDIT_REG_OFFSET);
+ } else if (creds & CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK) {
+ pr_info("MCP: Disabling write pairing\n");
+ writel_relaxed(creds & ~CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
+ cpubiuctrl_base + CPU_CREDIT_REG_OFFSET);
+ } else {
+ pr_info("MCP: Write pairing already disabled\n");
+ }
+
+ return 0;
+}
+
+static int __init setup_hifcpubiuctrl_regs(void)
+{
+ struct device_node *np;
+ int ret = 0;
+
+ np = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
+ if (!np) {
+ pr_err("missing BIU control node\n");
+ return -ENODEV;
+ }
+
+ cpubiuctrl_base = of_iomap(np, 0);
+ if (!cpubiuctrl_base) {
+ pr_err("failed to remap BIU control base\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mcp_wr_pairing_en = of_property_read_bool(np, "brcm,write-pairing");
+out:
+ of_node_put(np);
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static u32 cpu_credit_reg_dump; /* for save/restore */
+
+static int brcmstb_cpu_credit_reg_suspend(void)
+{
+ if (cpubiuctrl_base)
+ cpu_credit_reg_dump =
+ readl_relaxed(cpubiuctrl_base + CPU_CREDIT_REG_OFFSET);
+ return 0;
+}
+
+static void brcmstb_cpu_credit_reg_resume(void)
+{
+ if (cpubiuctrl_base)
+ writel_relaxed(cpu_credit_reg_dump,
+ cpubiuctrl_base + CPU_CREDIT_REG_OFFSET);
+}
+
+static struct syscore_ops brcmstb_cpu_credit_syscore_ops = {
+ .suspend = brcmstb_cpu_credit_reg_suspend,
+ .resume = brcmstb_cpu_credit_reg_resume,
+};
+#endif
+
+
+void __init brcmstb_biuctrl_init(void)
+{
+ int ret;
+
+ setup_hifcpubiuctrl_regs();
+
+ ret = mcp_write_pairing_set();
+ if (ret) {
+ pr_err("MCP: Unable to disable write pairing!\n");
+ return;
+ }
+
+#ifdef CONFIG_PM_SLEEP
+ register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
+#endif
+}
diff --git a/kernel/drivers/soc/brcmstb/common.c b/kernel/drivers/soc/brcmstb/common.c
new file mode 100644
index 000000000..c262c029b
--- /dev/null
+++ b/kernel/drivers/soc/brcmstb/common.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright © 2014 NVIDIA Corporation
+ * Copyright © 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+
+#include <soc/brcmstb/common.h>
+
+static const struct of_device_id brcmstb_machine_match[] = {
+ { .compatible = "brcm,brcmstb", },
+ { }
+};
+
+bool soc_is_brcmstb(void)
+{
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (!root)
+ return false;
+
+ return of_match_node(brcmstb_machine_match, root) != NULL;
+}
diff --git a/kernel/drivers/soc/dove/Makefile b/kernel/drivers/soc/dove/Makefile
new file mode 100644
index 000000000..2db8e6551
--- /dev/null
+++ b/kernel/drivers/soc/dove/Makefile
@@ -0,0 +1 @@
+obj-y += pmu.o
diff --git a/kernel/drivers/soc/dove/pmu.c b/kernel/drivers/soc/dove/pmu.c
new file mode 100644
index 000000000..abd087917
--- /dev/null
+++ b/kernel/drivers/soc/dove/pmu.c
@@ -0,0 +1,411 @@
+/*
+ * Marvell Dove PMU support
+ */
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/reset.h>
+#include <linux/reset-controller.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/soc/dove/pmu.h>
+#include <linux/spinlock.h>
+
+#define NR_PMU_IRQS 7
+
+#define PMC_SW_RST 0x30
+#define PMC_IRQ_CAUSE 0x50
+#define PMC_IRQ_MASK 0x54
+
+#define PMU_PWR 0x10
+#define PMU_ISO 0x58
+
+struct pmu_data {
+ spinlock_t lock;
+ struct device_node *of_node;
+ void __iomem *pmc_base;
+ void __iomem *pmu_base;
+ struct irq_chip_generic *irq_gc;
+ struct irq_domain *irq_domain;
+#ifdef CONFIG_RESET_CONTROLLER
+ struct reset_controller_dev reset;
+#endif
+};
+
+/*
+ * The PMU contains a register to reset various subsystems within the
+ * SoC. Export this as a reset controller.
+ */
+#ifdef CONFIG_RESET_CONTROLLER
+#define rcdev_to_pmu(rcdev) container_of(rcdev, struct pmu_data, reset)
+
+static int pmu_reset_reset(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct pmu_data *pmu = rcdev_to_pmu(rc);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+ val = readl_relaxed(pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST);
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static int pmu_reset_assert(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct pmu_data *pmu = rcdev_to_pmu(rc);
+ unsigned long flags;
+ u32 val = ~BIT(id);
+
+ spin_lock_irqsave(&pmu->lock, flags);
+ val &= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static int pmu_reset_deassert(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct pmu_data *pmu = rcdev_to_pmu(rc);
+ unsigned long flags;
+ u32 val = BIT(id);
+
+ spin_lock_irqsave(&pmu->lock, flags);
+ val |= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static struct reset_control_ops pmu_reset_ops = {
+ .reset = pmu_reset_reset,
+ .assert = pmu_reset_assert,
+ .deassert = pmu_reset_deassert,
+};
+
+static struct reset_controller_dev pmu_reset __initdata = {
+ .ops = &pmu_reset_ops,
+ .owner = THIS_MODULE,
+ .nr_resets = 32,
+};
+
+static void __init pmu_reset_init(struct pmu_data *pmu)
+{
+ int ret;
+
+ pmu->reset = pmu_reset;
+ pmu->reset.of_node = pmu->of_node;
+
+ ret = reset_controller_register(&pmu->reset);
+ if (ret)
+ pr_err("pmu: %s failed: %d\n", "reset_controller_register", ret);
+}
+#else
+static void __init pmu_reset_init(struct pmu_data *pmu)
+{
+}
+#endif
+
+struct pmu_domain {
+ struct pmu_data *pmu;
+ u32 pwr_mask;
+ u32 rst_mask;
+ u32 iso_mask;
+ struct generic_pm_domain base;
+};
+
+#define to_pmu_domain(dom) container_of(dom, struct pmu_domain, base)
+
+/*
+ * This deals with the "old" Marvell sequence of bringing a power domain
+ * down/up, which is: apply power, release reset, disable isolators.
+ *
+ * Later devices apparantly use a different sequence: power up, disable
+ * isolators, assert repair signal, enable SRMA clock, enable AXI clock,
+ * enable module clock, deassert reset.
+ *
+ * Note: reading the assembly, it seems that the IO accessors have an
+ * unfortunate side-effect - they cause memory already read into registers
+ * for the if () to be re-read for the bit-set or bit-clear operation.
+ * The code is written to avoid this.
+ */
+static int pmu_domain_power_off(struct generic_pm_domain *domain)
+{
+ struct pmu_domain *pmu_dom = to_pmu_domain(domain);
+ struct pmu_data *pmu = pmu_dom->pmu;
+ unsigned long flags;
+ unsigned int val;
+ void __iomem *pmu_base = pmu->pmu_base;
+ void __iomem *pmc_base = pmu->pmc_base;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ /* Enable isolators */
+ if (pmu_dom->iso_mask) {
+ val = ~pmu_dom->iso_mask;
+ val &= readl_relaxed(pmu_base + PMU_ISO);
+ writel_relaxed(val, pmu_base + PMU_ISO);
+ }
+
+ /* Reset unit */
+ if (pmu_dom->rst_mask) {
+ val = ~pmu_dom->rst_mask;
+ val &= readl_relaxed(pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmc_base + PMC_SW_RST);
+ }
+
+ /* Power down */
+ val = readl_relaxed(pmu_base + PMU_PWR) | pmu_dom->pwr_mask;
+ writel_relaxed(val, pmu_base + PMU_PWR);
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static int pmu_domain_power_on(struct generic_pm_domain *domain)
+{
+ struct pmu_domain *pmu_dom = to_pmu_domain(domain);
+ struct pmu_data *pmu = pmu_dom->pmu;
+ unsigned long flags;
+ unsigned int val;
+ void __iomem *pmu_base = pmu->pmu_base;
+ void __iomem *pmc_base = pmu->pmc_base;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ /* Power on */
+ val = ~pmu_dom->pwr_mask & readl_relaxed(pmu_base + PMU_PWR);
+ writel_relaxed(val, pmu_base + PMU_PWR);
+
+ /* Release reset */
+ if (pmu_dom->rst_mask) {
+ val = pmu_dom->rst_mask;
+ val |= readl_relaxed(pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmc_base + PMC_SW_RST);
+ }
+
+ /* Disable isolators */
+ if (pmu_dom->iso_mask) {
+ val = pmu_dom->iso_mask;
+ val |= readl_relaxed(pmu_base + PMU_ISO);
+ writel_relaxed(val, pmu_base + PMU_ISO);
+ }
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static void __pmu_domain_register(struct pmu_domain *domain,
+ struct device_node *np)
+{
+ unsigned int val = readl_relaxed(domain->pmu->pmu_base + PMU_PWR);
+
+ domain->base.power_off = pmu_domain_power_off;
+ domain->base.power_on = pmu_domain_power_on;
+
+ pm_genpd_init(&domain->base, NULL, !(val & domain->pwr_mask));
+
+ if (np)
+ of_genpd_add_provider_simple(np, &domain->base);
+}
+
+/* PMU IRQ controller */
+static void pmu_irq_handler(struct irq_desc *desc)
+{
+ struct pmu_data *pmu = irq_desc_get_handler_data(desc);
+ struct irq_chip_generic *gc = pmu->irq_gc;
+ struct irq_domain *domain = pmu->irq_domain;
+ void __iomem *base = gc->reg_base;
+ u32 stat = readl_relaxed(base + PMC_IRQ_CAUSE) & gc->mask_cache;
+ u32 done = ~0;
+
+ if (stat == 0) {
+ handle_bad_irq(desc);
+ return;
+ }
+
+ while (stat) {
+ u32 hwirq = fls(stat) - 1;
+
+ stat &= ~(1 << hwirq);
+ done &= ~(1 << hwirq);
+
+ generic_handle_irq(irq_find_mapping(domain, hwirq));
+ }
+
+ /*
+ * The PMU mask register is not RW0C: it is RW. This means that
+ * the bits take whatever value is written to them; if you write
+ * a '1', you will set the interrupt.
+ *
+ * Unfortunately this means there is NO race free way to clear
+ * these interrupts.
+ *
+ * So, let's structure the code so that the window is as small as
+ * possible.
+ */
+ irq_gc_lock(gc);
+ done &= readl_relaxed(base + PMC_IRQ_CAUSE);
+ writel_relaxed(done, base + PMC_IRQ_CAUSE);
+ irq_gc_unlock(gc);
+}
+
+static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
+{
+ const char *name = "pmu_irq";
+ struct irq_chip_generic *gc;
+ struct irq_domain *domain;
+ int ret;
+
+ /* mask and clear all interrupts */
+ writel(0, pmu->pmc_base + PMC_IRQ_MASK);
+ writel(0, pmu->pmc_base + PMC_IRQ_CAUSE);
+
+ domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ pr_err("%s: unable to add irq domain\n", name);
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, NR_PMU_IRQS, 1, name,
+ handle_level_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("%s: unable to alloc irq domain gc: %d\n", name, ret);
+ irq_domain_remove(domain);
+ return ret;
+ }
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->reg_base = pmu->pmc_base;
+ gc->chip_types[0].regs.mask = PMC_IRQ_MASK;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+
+ pmu->irq_domain = domain;
+ pmu->irq_gc = gc;
+
+ irq_set_handler_data(irq, pmu);
+ irq_set_chained_handler(irq, pmu_irq_handler);
+
+ return 0;
+}
+
+/*
+ * pmu: power-manager@d0000 {
+ * compatible = "marvell,dove-pmu";
+ * reg = <0xd0000 0x8000> <0xd8000 0x8000>;
+ * interrupts = <33>;
+ * interrupt-controller;
+ * #reset-cells = 1;
+ * vpu_domain: vpu-domain {
+ * #power-domain-cells = <0>;
+ * marvell,pmu_pwr_mask = <0x00000008>;
+ * marvell,pmu_iso_mask = <0x00000001>;
+ * resets = <&pmu 16>;
+ * };
+ * gpu_domain: gpu-domain {
+ * #power-domain-cells = <0>;
+ * marvell,pmu_pwr_mask = <0x00000004>;
+ * marvell,pmu_iso_mask = <0x00000002>;
+ * resets = <&pmu 18>;
+ * };
+ * };
+ */
+int __init dove_init_pmu(void)
+{
+ struct device_node *np_pmu, *domains_node, *np;
+ struct pmu_data *pmu;
+ int ret, parent_irq;
+
+ /* Lookup the PMU node */
+ np_pmu = of_find_compatible_node(NULL, NULL, "marvell,dove-pmu");
+ if (!np_pmu)
+ return 0;
+
+ domains_node = of_get_child_by_name(np_pmu, "domains");
+ if (!domains_node) {
+ pr_err("%s: failed to find domains sub-node\n", np_pmu->name);
+ return 0;
+ }
+
+ pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ spin_lock_init(&pmu->lock);
+ pmu->of_node = np_pmu;
+ pmu->pmc_base = of_iomap(pmu->of_node, 0);
+ pmu->pmu_base = of_iomap(pmu->of_node, 1);
+ if (!pmu->pmc_base || !pmu->pmu_base) {
+ pr_err("%s: failed to map PMU\n", np_pmu->name);
+ iounmap(pmu->pmu_base);
+ iounmap(pmu->pmc_base);
+ kfree(pmu);
+ return -ENOMEM;
+ }
+
+ pmu_reset_init(pmu);
+
+ for_each_available_child_of_node(domains_node, np) {
+ struct of_phandle_args args;
+ struct pmu_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ break;
+
+ domain->pmu = pmu;
+ domain->base.name = kstrdup(np->name, GFP_KERNEL);
+ if (!domain->base.name) {
+ kfree(domain);
+ break;
+ }
+
+ of_property_read_u32(np, "marvell,pmu_pwr_mask",
+ &domain->pwr_mask);
+ of_property_read_u32(np, "marvell,pmu_iso_mask",
+ &domain->iso_mask);
+
+ /*
+ * We parse the reset controller property directly here
+ * to ensure that we can operate when the reset controller
+ * support is not configured into the kernel.
+ */
+ ret = of_parse_phandle_with_args(np, "resets", "#reset-cells",
+ 0, &args);
+ if (ret == 0) {
+ if (args.np == pmu->of_node)
+ domain->rst_mask = BIT(args.args[0]);
+ of_node_put(args.np);
+ }
+
+ __pmu_domain_register(domain, np);
+ }
+
+ /* Loss of the interrupt controller is not a fatal error. */
+ parent_irq = irq_of_parse_and_map(pmu->of_node, 0);
+ if (!parent_irq) {
+ pr_err("%s: no interrupt specified\n", np_pmu->name);
+ } else {
+ ret = dove_init_pmu_irq(pmu, parent_irq);
+ if (ret)
+ pr_err("dove_init_pmu_irq() failed: %d\n", ret);
+ }
+
+ return 0;
+}
diff --git a/kernel/drivers/soc/mediatek/Kconfig b/kernel/drivers/soc/mediatek/Kconfig
index 3c1850332..0a4ea809a 100644
--- a/kernel/drivers/soc/mediatek/Kconfig
+++ b/kernel/drivers/soc/mediatek/Kconfig
@@ -1,6 +1,15 @@
#
# MediaTek SoC drivers
#
+config MTK_INFRACFG
+ bool "MediaTek INFRACFG Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select REGMAP
+ help
+ Say yes here to add support for the MediaTek INFRACFG controller. The
+ INFRACFG controller contains various infrastructure registers not
+ directly associated to any device.
+
config MTK_PMIC_WRAP
tristate "MediaTek PMIC Wrapper Support"
depends on ARCH_MEDIATEK
@@ -10,3 +19,14 @@ config MTK_PMIC_WRAP
Say yes here to add support for MediaTek PMIC Wrapper found
on different MediaTek SoCs. The PMIC wrapper is a proprietary
hardware to connect the PMIC.
+
+config MTK_SCPSYS
+ bool "MediaTek SCPSYS Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select REGMAP
+ select MTK_INFRACFG
+ select PM_GENERIC_DOMAINS if PM
+ help
+ Say yes here to add support for the MediaTek SCPSYS power domain
+ driver.
diff --git a/kernel/drivers/soc/mediatek/Makefile b/kernel/drivers/soc/mediatek/Makefile
index ecaf4defd..12998b088 100644
--- a/kernel/drivers/soc/mediatek/Makefile
+++ b/kernel/drivers/soc/mediatek/Makefile
@@ -1 +1,3 @@
+obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
+obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
diff --git a/kernel/drivers/soc/mediatek/mtk-infracfg.c b/kernel/drivers/soc/mediatek/mtk-infracfg.c
new file mode 100644
index 000000000..dba3055a9
--- /dev/null
+++ b/kernel/drivers/soc/mediatek/mtk-infracfg.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2015 Pengutronix, Sascha Hauer <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/jiffies.h>
+#include <linux/regmap.h>
+#include <linux/soc/mediatek/infracfg.h>
+#include <asm/processor.h>
+
+#define INFRA_TOPAXI_PROTECTEN 0x0220
+#define INFRA_TOPAXI_PROTECTSTA1 0x0228
+
+/**
+ * mtk_infracfg_set_bus_protection - enable bus protection
+ * @regmap: The infracfg regmap
+ * @mask: The mask containing the protection bits to be enabled.
+ *
+ * This function enables the bus protection bits for disabled power
+ * domains so that the system does not hang when some unit accesses the
+ * bus while in power down.
+ */
+int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask)
+{
+ unsigned long expired;
+ u32 val;
+ int ret;
+
+ regmap_update_bits(infracfg, INFRA_TOPAXI_PROTECTEN, mask, mask);
+
+ expired = jiffies + HZ;
+
+ while (1) {
+ ret = regmap_read(infracfg, INFRA_TOPAXI_PROTECTSTA1, &val);
+ if (ret)
+ return ret;
+
+ if ((val & mask) == mask)
+ break;
+
+ cpu_relax();
+ if (time_after(jiffies, expired))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * mtk_infracfg_clear_bus_protection - disable bus protection
+ * @regmap: The infracfg regmap
+ * @mask: The mask containing the protection bits to be disabled.
+ *
+ * This function disables the bus protection bits previously enabled with
+ * mtk_infracfg_set_bus_protection.
+ */
+int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask)
+{
+ unsigned long expired;
+ int ret;
+
+ regmap_update_bits(infracfg, INFRA_TOPAXI_PROTECTEN, mask, 0);
+
+ expired = jiffies + HZ;
+
+ while (1) {
+ u32 val;
+
+ ret = regmap_read(infracfg, INFRA_TOPAXI_PROTECTSTA1, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & mask))
+ break;
+
+ cpu_relax();
+ if (time_after(jiffies, expired))
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/kernel/drivers/soc/mediatek/mtk-pmic-wrap.c b/kernel/drivers/soc/mediatek/mtk-pmic-wrap.c
index f432291fe..105597a88 100644
--- a/kernel/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/kernel/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -725,10 +725,6 @@ static int pwrap_init(struct pmic_wrapper *wrp)
pwrap_writel(wrp, 0x1, PWRAP_WACS2_EN);
pwrap_writel(wrp, 0x5, PWRAP_STAUPD_PRD);
pwrap_writel(wrp, 0xff, PWRAP_STAUPD_GRPEN);
- pwrap_writel(wrp, 0xf, PWRAP_WDT_UNIT);
- pwrap_writel(wrp, 0xffffffff, PWRAP_WDT_SRC_EN);
- pwrap_writel(wrp, 0x1, PWRAP_TIMER_EN);
- pwrap_writel(wrp, ~((1 << 31) | (1 << 1)), PWRAP_INT_EN);
if (pwrap_is_mt8135(wrp)) {
/* enable pwrap events and pwrap bridge in AP side */
@@ -896,6 +892,12 @@ static int pwrap_probe(struct platform_device *pdev)
return -ENODEV;
}
+ /* Initialize watchdog, may not be done by the bootloader */
+ pwrap_writel(wrp, 0xf, PWRAP_WDT_UNIT);
+ pwrap_writel(wrp, 0xffffffff, PWRAP_WDT_SRC_EN);
+ pwrap_writel(wrp, 0x1, PWRAP_TIMER_EN);
+ pwrap_writel(wrp, ~((1 << 31) | (1 << 1)), PWRAP_INT_EN);
+
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt, IRQF_TRIGGER_HIGH,
"mt-pmic-pwrap", wrp);
@@ -926,7 +928,6 @@ err_out1:
static struct platform_driver pwrap_drv = {
.driver = {
.name = "mt-pmic-pwrap",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_pwrap_match_tbl),
},
.probe = pwrap_probe,
diff --git a/kernel/drivers/soc/mediatek/mtk-scpsys.c b/kernel/drivers/soc/mediatek/mtk-scpsys.c
new file mode 100644
index 000000000..4d4203c89
--- /dev/null
+++ b/kernel/drivers/soc/mediatek/mtk-scpsys.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright (c) 2015 Pengutronix, Sascha Hauer <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/soc/mediatek/infracfg.h>
+#include <dt-bindings/power/mt8173-power.h>
+
+#define SPM_VDE_PWR_CON 0x0210
+#define SPM_MFG_PWR_CON 0x0214
+#define SPM_VEN_PWR_CON 0x0230
+#define SPM_ISP_PWR_CON 0x0238
+#define SPM_DIS_PWR_CON 0x023c
+#define SPM_VEN2_PWR_CON 0x0298
+#define SPM_AUDIO_PWR_CON 0x029c
+#define SPM_MFG_2D_PWR_CON 0x02c0
+#define SPM_MFG_ASYNC_PWR_CON 0x02c4
+#define SPM_USB_PWR_CON 0x02cc
+#define SPM_PWR_STATUS 0x060c
+#define SPM_PWR_STATUS_2ND 0x0610
+
+#define PWR_RST_B_BIT BIT(0)
+#define PWR_ISO_BIT BIT(1)
+#define PWR_ON_BIT BIT(2)
+#define PWR_ON_2ND_BIT BIT(3)
+#define PWR_CLK_DIS_BIT BIT(4)
+
+#define PWR_STATUS_DISP BIT(3)
+#define PWR_STATUS_MFG BIT(4)
+#define PWR_STATUS_ISP BIT(5)
+#define PWR_STATUS_VDEC BIT(7)
+#define PWR_STATUS_VENC_LT BIT(20)
+#define PWR_STATUS_VENC BIT(21)
+#define PWR_STATUS_MFG_2D BIT(22)
+#define PWR_STATUS_MFG_ASYNC BIT(23)
+#define PWR_STATUS_AUDIO BIT(24)
+#define PWR_STATUS_USB BIT(25)
+
+enum clk_id {
+ MT8173_CLK_NONE,
+ MT8173_CLK_MM,
+ MT8173_CLK_MFG,
+ MT8173_CLK_VENC,
+ MT8173_CLK_VENC_LT,
+ MT8173_CLK_MAX,
+};
+
+#define MAX_CLKS 2
+
+struct scp_domain_data {
+ const char *name;
+ u32 sta_mask;
+ int ctl_offs;
+ u32 sram_pdn_bits;
+ u32 sram_pdn_ack_bits;
+ u32 bus_prot_mask;
+ enum clk_id clk_id[MAX_CLKS];
+ bool active_wakeup;
+};
+
+static const struct scp_domain_data scp_domain_data[] __initconst = {
+ [MT8173_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {MT8173_CLK_MM},
+ },
+ [MT8173_POWER_DOMAIN_VENC] = {
+ .name = "venc",
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = SPM_VEN_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {MT8173_CLK_MM, MT8173_CLK_VENC},
+ },
+ [MT8173_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {MT8173_CLK_MM},
+ },
+ [MT8173_POWER_DOMAIN_MM] = {
+ .name = "mm",
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {MT8173_CLK_MM},
+ .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 |
+ MT8173_TOP_AXI_PROT_EN_MM_M1,
+ },
+ [MT8173_POWER_DOMAIN_VENC_LT] = {
+ .name = "venc_lt",
+ .sta_mask = PWR_STATUS_VENC_LT,
+ .ctl_offs = SPM_VEN2_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {MT8173_CLK_MM, MT8173_CLK_VENC_LT},
+ },
+ [MT8173_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = SPM_AUDIO_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {MT8173_CLK_NONE},
+ },
+ [MT8173_POWER_DOMAIN_USB] = {
+ .name = "usb",
+ .sta_mask = PWR_STATUS_USB,
+ .ctl_offs = SPM_USB_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {MT8173_CLK_NONE},
+ .active_wakeup = true,
+ },
+ [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
+ .name = "mfg_async",
+ .sta_mask = PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = 0,
+ .clk_id = {MT8173_CLK_MFG},
+ },
+ [MT8173_POWER_DOMAIN_MFG_2D] = {
+ .name = "mfg_2d",
+ .sta_mask = PWR_STATUS_MFG_2D,
+ .ctl_offs = SPM_MFG_2D_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {MT8173_CLK_NONE},
+ },
+ [MT8173_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(13, 8),
+ .sram_pdn_ack_bits = GENMASK(21, 16),
+ .clk_id = {MT8173_CLK_NONE},
+ .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S |
+ MT8173_TOP_AXI_PROT_EN_MFG_M0 |
+ MT8173_TOP_AXI_PROT_EN_MFG_M1 |
+ MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT,
+ },
+};
+
+#define NUM_DOMAINS ARRAY_SIZE(scp_domain_data)
+
+struct scp;
+
+struct scp_domain {
+ struct generic_pm_domain genpd;
+ struct scp *scp;
+ struct clk *clk[MAX_CLKS];
+ u32 sta_mask;
+ void __iomem *ctl_addr;
+ u32 sram_pdn_bits;
+ u32 sram_pdn_ack_bits;
+ u32 bus_prot_mask;
+ bool active_wakeup;
+};
+
+struct scp {
+ struct scp_domain domains[NUM_DOMAINS];
+ struct genpd_onecell_data pd_data;
+ struct device *dev;
+ void __iomem *base;
+ struct regmap *infracfg;
+};
+
+static int scpsys_domain_is_on(struct scp_domain *scpd)
+{
+ struct scp *scp = scpd->scp;
+
+ u32 status = readl(scp->base + SPM_PWR_STATUS) & scpd->sta_mask;
+ u32 status2 = readl(scp->base + SPM_PWR_STATUS_2ND) & scpd->sta_mask;
+
+ /*
+ * A domain is on when both status bits are set. If only one is set
+ * return an error. This happens while powering up a domain
+ */
+
+ if (status && status2)
+ return true;
+ if (!status && !status2)
+ return false;
+
+ return -EINVAL;
+}
+
+static int scpsys_power_on(struct generic_pm_domain *genpd)
+{
+ struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
+ struct scp *scp = scpd->scp;
+ unsigned long timeout;
+ bool expired;
+ void __iomem *ctl_addr = scpd->ctl_addr;
+ u32 sram_pdn_ack = scpd->sram_pdn_ack_bits;
+ u32 val;
+ int ret;
+ int i;
+
+ for (i = 0; i < MAX_CLKS && scpd->clk[i]; i++) {
+ ret = clk_prepare_enable(scpd->clk[i]);
+ if (ret) {
+ for (--i; i >= 0; i--)
+ clk_disable_unprepare(scpd->clk[i]);
+
+ goto err_clk;
+ }
+ }
+
+ val = readl(ctl_addr);
+ val |= PWR_ON_BIT;
+ writel(val, ctl_addr);
+ val |= PWR_ON_2ND_BIT;
+ writel(val, ctl_addr);
+
+ /* wait until PWR_ACK = 1 */
+ timeout = jiffies + HZ;
+ expired = false;
+ while (1) {
+ ret = scpsys_domain_is_on(scpd);
+ if (ret > 0)
+ break;
+
+ if (expired) {
+ ret = -ETIMEDOUT;
+ goto err_pwr_ack;
+ }
+
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ expired = true;
+ }
+
+ val &= ~PWR_CLK_DIS_BIT;
+ writel(val, ctl_addr);
+
+ val &= ~PWR_ISO_BIT;
+ writel(val, ctl_addr);
+
+ val |= PWR_RST_B_BIT;
+ writel(val, ctl_addr);
+
+ val &= ~scpd->sram_pdn_bits;
+ writel(val, ctl_addr);
+
+ /* wait until SRAM_PDN_ACK all 0 */
+ timeout = jiffies + HZ;
+ expired = false;
+ while (sram_pdn_ack && (readl(ctl_addr) & sram_pdn_ack)) {
+
+ if (expired) {
+ ret = -ETIMEDOUT;
+ goto err_pwr_ack;
+ }
+
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ expired = true;
+ }
+
+ if (scpd->bus_prot_mask) {
+ ret = mtk_infracfg_clear_bus_protection(scp->infracfg,
+ scpd->bus_prot_mask);
+ if (ret)
+ goto err_pwr_ack;
+ }
+
+ return 0;
+
+err_pwr_ack:
+ for (i = MAX_CLKS - 1; i >= 0; i--) {
+ if (scpd->clk[i])
+ clk_disable_unprepare(scpd->clk[i]);
+ }
+err_clk:
+ dev_err(scp->dev, "Failed to power on domain %s\n", genpd->name);
+
+ return ret;
+}
+
+static int scpsys_power_off(struct generic_pm_domain *genpd)
+{
+ struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
+ struct scp *scp = scpd->scp;
+ unsigned long timeout;
+ bool expired;
+ void __iomem *ctl_addr = scpd->ctl_addr;
+ u32 pdn_ack = scpd->sram_pdn_ack_bits;
+ u32 val;
+ int ret;
+ int i;
+
+ if (scpd->bus_prot_mask) {
+ ret = mtk_infracfg_set_bus_protection(scp->infracfg,
+ scpd->bus_prot_mask);
+ if (ret)
+ goto out;
+ }
+
+ val = readl(ctl_addr);
+ val |= scpd->sram_pdn_bits;
+ writel(val, ctl_addr);
+
+ /* wait until SRAM_PDN_ACK all 1 */
+ timeout = jiffies + HZ;
+ expired = false;
+ while (pdn_ack && (readl(ctl_addr) & pdn_ack) != pdn_ack) {
+ if (expired) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ expired = true;
+ }
+
+ val |= PWR_ISO_BIT;
+ writel(val, ctl_addr);
+
+ val &= ~PWR_RST_B_BIT;
+ writel(val, ctl_addr);
+
+ val |= PWR_CLK_DIS_BIT;
+ writel(val, ctl_addr);
+
+ val &= ~PWR_ON_BIT;
+ writel(val, ctl_addr);
+
+ val &= ~PWR_ON_2ND_BIT;
+ writel(val, ctl_addr);
+
+ /* wait until PWR_ACK = 0 */
+ timeout = jiffies + HZ;
+ expired = false;
+ while (1) {
+ ret = scpsys_domain_is_on(scpd);
+ if (ret == 0)
+ break;
+
+ if (expired) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ expired = true;
+ }
+
+ for (i = 0; i < MAX_CLKS && scpd->clk[i]; i++)
+ clk_disable_unprepare(scpd->clk[i]);
+
+ return 0;
+
+out:
+ dev_err(scp->dev, "Failed to power off domain %s\n", genpd->name);
+
+ return ret;
+}
+
+static bool scpsys_active_wakeup(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ struct scp_domain *scpd;
+
+ genpd = pd_to_genpd(dev->pm_domain);
+ scpd = container_of(genpd, struct scp_domain, genpd);
+
+ return scpd->active_wakeup;
+}
+
+static int __init scpsys_probe(struct platform_device *pdev)
+{
+ struct genpd_onecell_data *pd_data;
+ struct resource *res;
+ int i, j, ret;
+ struct scp *scp;
+ struct clk *clk[MT8173_CLK_MAX];
+
+ scp = devm_kzalloc(&pdev->dev, sizeof(*scp), GFP_KERNEL);
+ if (!scp)
+ return -ENOMEM;
+
+ scp->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ scp->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(scp->base))
+ return PTR_ERR(scp->base);
+
+ pd_data = &scp->pd_data;
+
+ pd_data->domains = devm_kzalloc(&pdev->dev,
+ sizeof(*pd_data->domains) * NUM_DOMAINS, GFP_KERNEL);
+ if (!pd_data->domains)
+ return -ENOMEM;
+
+ clk[MT8173_CLK_MM] = devm_clk_get(&pdev->dev, "mm");
+ if (IS_ERR(clk[MT8173_CLK_MM]))
+ return PTR_ERR(clk[MT8173_CLK_MM]);
+
+ clk[MT8173_CLK_MFG] = devm_clk_get(&pdev->dev, "mfg");
+ if (IS_ERR(clk[MT8173_CLK_MFG]))
+ return PTR_ERR(clk[MT8173_CLK_MFG]);
+
+ clk[MT8173_CLK_VENC] = devm_clk_get(&pdev->dev, "venc");
+ if (IS_ERR(clk[MT8173_CLK_VENC]))
+ return PTR_ERR(clk[MT8173_CLK_VENC]);
+
+ clk[MT8173_CLK_VENC_LT] = devm_clk_get(&pdev->dev, "venc_lt");
+ if (IS_ERR(clk[MT8173_CLK_VENC_LT]))
+ return PTR_ERR(clk[MT8173_CLK_VENC_LT]);
+
+ scp->infracfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "infracfg");
+ if (IS_ERR(scp->infracfg)) {
+ dev_err(&pdev->dev, "Cannot find infracfg controller: %ld\n",
+ PTR_ERR(scp->infracfg));
+ return PTR_ERR(scp->infracfg);
+ }
+
+ pd_data->num_domains = NUM_DOMAINS;
+
+ for (i = 0; i < NUM_DOMAINS; i++) {
+ struct scp_domain *scpd = &scp->domains[i];
+ struct generic_pm_domain *genpd = &scpd->genpd;
+ const struct scp_domain_data *data = &scp_domain_data[i];
+
+ pd_data->domains[i] = genpd;
+ scpd->scp = scp;
+
+ scpd->sta_mask = data->sta_mask;
+ scpd->ctl_addr = scp->base + data->ctl_offs;
+ scpd->sram_pdn_bits = data->sram_pdn_bits;
+ scpd->sram_pdn_ack_bits = data->sram_pdn_ack_bits;
+ scpd->bus_prot_mask = data->bus_prot_mask;
+ scpd->active_wakeup = data->active_wakeup;
+ for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++)
+ scpd->clk[j] = clk[data->clk_id[j]];
+
+ genpd->name = data->name;
+ genpd->power_off = scpsys_power_off;
+ genpd->power_on = scpsys_power_on;
+ genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
+
+ /*
+ * Initially turn on all domains to make the domains usable
+ * with !CONFIG_PM and to get the hardware in sync with the
+ * software. The unused domains will be switched off during
+ * late_init time.
+ */
+ genpd->power_on(genpd);
+
+ pm_genpd_init(genpd, NULL, false);
+ }
+
+ /*
+ * We are not allowed to fail here since there is no way to unregister
+ * a power domain. Once registered above we have to keep the domains
+ * valid.
+ */
+
+ ret = pm_genpd_add_subdomain(pd_data->domains[MT8173_POWER_DOMAIN_MFG_ASYNC],
+ pd_data->domains[MT8173_POWER_DOMAIN_MFG_2D]);
+ if (ret && IS_ENABLED(CONFIG_PM))
+ dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret);
+
+ ret = pm_genpd_add_subdomain(pd_data->domains[MT8173_POWER_DOMAIN_MFG_2D],
+ pd_data->domains[MT8173_POWER_DOMAIN_MFG]);
+ if (ret && IS_ENABLED(CONFIG_PM))
+ dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret);
+
+ ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret);
+
+ return 0;
+}
+
+static const struct of_device_id of_scpsys_match_tbl[] = {
+ {
+ .compatible = "mediatek,mt8173-scpsys",
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver scpsys_drv = {
+ .driver = {
+ .name = "mtk-scpsys",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_scpsys_match_tbl),
+ },
+};
+
+module_platform_driver_probe(scpsys_drv, scpsys_probe);
diff --git a/kernel/drivers/soc/qcom/Kconfig b/kernel/drivers/soc/qcom/Kconfig
index 460b2dba1..eec76141d 100644
--- a/kernel/drivers/soc/qcom/Kconfig
+++ b/kernel/drivers/soc/qcom/Kconfig
@@ -10,3 +10,42 @@ config QCOM_GSBI
functions for connecting the underlying serial UART, SPI, and I2C
devices to the output pins.
+config QCOM_PM
+ bool "Qualcomm Power Management"
+ depends on ARCH_QCOM && !ARM64
+ select QCOM_SCM
+ help
+ QCOM Platform specific power driver to manage cores and L2 low power
+ modes. It interface with various system drivers to put the cores in
+ low power modes.
+
+config QCOM_SMEM
+ tristate "Qualcomm Shared Memory Manager (SMEM)"
+ depends on ARCH_QCOM
+ depends on HWSPINLOCK
+ help
+ Say y here to enable support for the Qualcomm Shared Memory Manager.
+ The driver provides an interface to items in a heap shared among all
+ processors in a Qualcomm platform.
+
+config QCOM_SMD
+ tristate "Qualcomm Shared Memory Driver (SMD)"
+ depends on QCOM_SMEM
+ help
+ Say y here to enable support for the Qualcomm Shared Memory Driver
+ providing communication channels to remote processors in Qualcomm
+ platforms.
+
+config QCOM_SMD_RPM
+ tristate "Qualcomm Resource Power Manager (RPM) over SMD"
+ depends on QCOM_SMD && OF
+ help
+ If you say yes to this option, support will be included for the
+ Resource Power Manager system found in the Qualcomm 8974 based
+ devices.
+
+ This is required to access many regulators, clocks and bus
+ frequencies controlled by the RPM on these devices.
+
+ Say M here if you want to include support for the Qualcomm RPM as a
+ module. This will build a module called "qcom-smd-rpm".
diff --git a/kernel/drivers/soc/qcom/Makefile b/kernel/drivers/soc/qcom/Makefile
index 438901257..10a93d168 100644
--- a/kernel/drivers/soc/qcom/Makefile
+++ b/kernel/drivers/soc/qcom/Makefile
@@ -1 +1,5 @@
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
+obj-$(CONFIG_QCOM_PM) += spm.o
+obj-$(CONFIG_QCOM_SMD) += smd.o
+obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
+obj-$(CONFIG_QCOM_SMEM) += smem.o
diff --git a/kernel/drivers/soc/qcom/smd-rpm.c b/kernel/drivers/soc/qcom/smd-rpm.c
new file mode 100644
index 000000000..2969321e1
--- /dev/null
+++ b/kernel/drivers/soc/qcom/smd-rpm.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+#include <linux/soc/qcom/smd.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#define RPM_REQUEST_TIMEOUT (5 * HZ)
+
+/**
+ * struct qcom_smd_rpm - state of the rpm device driver
+ * @rpm_channel: reference to the smd channel
+ * @ack: completion for acks
+ * @lock: mutual exclusion around the send/complete pair
+ * @ack_status: result of the rpm request
+ */
+struct qcom_smd_rpm {
+ struct qcom_smd_channel *rpm_channel;
+
+ struct completion ack;
+ struct mutex lock;
+ int ack_status;
+};
+
+/**
+ * struct qcom_rpm_header - header for all rpm requests and responses
+ * @service_type: identifier of the service
+ * @length: length of the payload
+ */
+struct qcom_rpm_header {
+ __le32 service_type;
+ __le32 length;
+};
+
+/**
+ * struct qcom_rpm_request - request message to the rpm
+ * @msg_id: identifier of the outgoing message
+ * @flags: active/sleep state flags
+ * @type: resource type
+ * @id: resource id
+ * @data_len: length of the payload following this header
+ */
+struct qcom_rpm_request {
+ __le32 msg_id;
+ __le32 flags;
+ __le32 type;
+ __le32 id;
+ __le32 data_len;
+};
+
+/**
+ * struct qcom_rpm_message - response message from the rpm
+ * @msg_type: indicator of the type of message
+ * @length: the size of this message, including the message header
+ * @msg_id: message id
+ * @message: textual message from the rpm
+ *
+ * Multiple of these messages can be stacked in an rpm message.
+ */
+struct qcom_rpm_message {
+ __le32 msg_type;
+ __le32 length;
+ union {
+ __le32 msg_id;
+ u8 message[0];
+ };
+};
+
+#define RPM_SERVICE_TYPE_REQUEST 0x00716572 /* "req\0" */
+
+#define RPM_MSG_TYPE_ERR 0x00727265 /* "err\0" */
+#define RPM_MSG_TYPE_MSG_ID 0x2367736d /* "msg#" */
+
+/**
+ * qcom_rpm_smd_write - write @buf to @type:@id
+ * @rpm: rpm handle
+ * @type: resource type
+ * @id: resource identifier
+ * @buf: the data to be written
+ * @count: number of bytes in @buf
+ */
+int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
+ int state,
+ u32 type, u32 id,
+ void *buf,
+ size_t count)
+{
+ static unsigned msg_id = 1;
+ int left;
+ int ret;
+ struct {
+ struct qcom_rpm_header hdr;
+ struct qcom_rpm_request req;
+ u8 payload[];
+ } *pkt;
+ size_t size = sizeof(*pkt) + count;
+
+ /* SMD packets to the RPM may not exceed 256 bytes */
+ if (WARN_ON(size >= 256))
+ return -EINVAL;
+
+ pkt = kmalloc(size, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ mutex_lock(&rpm->lock);
+
+ pkt->hdr.service_type = cpu_to_le32(RPM_SERVICE_TYPE_REQUEST);
+ pkt->hdr.length = cpu_to_le32(sizeof(struct qcom_rpm_request) + count);
+
+ pkt->req.msg_id = cpu_to_le32(msg_id++);
+ pkt->req.flags = cpu_to_le32(state);
+ pkt->req.type = cpu_to_le32(type);
+ pkt->req.id = cpu_to_le32(id);
+ pkt->req.data_len = cpu_to_le32(count);
+ memcpy(pkt->payload, buf, count);
+
+ ret = qcom_smd_send(rpm->rpm_channel, pkt, size);
+ if (ret)
+ goto out;
+
+ left = wait_for_completion_timeout(&rpm->ack, RPM_REQUEST_TIMEOUT);
+ if (!left)
+ ret = -ETIMEDOUT;
+ else
+ ret = rpm->ack_status;
+
+out:
+ kfree(pkt);
+ mutex_unlock(&rpm->lock);
+ return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_smd_write);
+
+static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
+ const void *data,
+ size_t count)
+{
+ const struct qcom_rpm_header *hdr = data;
+ size_t hdr_length = le32_to_cpu(hdr->length);
+ const struct qcom_rpm_message *msg;
+ struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev);
+ const u8 *buf = data + sizeof(struct qcom_rpm_header);
+ const u8 *end = buf + hdr_length;
+ char msgbuf[32];
+ int status = 0;
+ u32 len, msg_length;
+
+ if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST ||
+ hdr_length < sizeof(struct qcom_rpm_message)) {
+ dev_err(&qsdev->dev, "invalid request\n");
+ return 0;
+ }
+
+ while (buf < end) {
+ msg = (struct qcom_rpm_message *)buf;
+ msg_length = le32_to_cpu(msg->length);
+ switch (le32_to_cpu(msg->msg_type)) {
+ case RPM_MSG_TYPE_MSG_ID:
+ break;
+ case RPM_MSG_TYPE_ERR:
+ len = min_t(u32, ALIGN(msg_length, 4), sizeof(msgbuf));
+ memcpy_fromio(msgbuf, msg->message, len);
+ msgbuf[len - 1] = 0;
+
+ if (!strcmp(msgbuf, "resource does not exist"))
+ status = -ENXIO;
+ else
+ status = -EINVAL;
+ break;
+ }
+
+ buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg_length, 4);
+ }
+
+ rpm->ack_status = status;
+ complete(&rpm->ack);
+ return 0;
+}
+
+static int qcom_smd_rpm_probe(struct qcom_smd_device *sdev)
+{
+ struct qcom_smd_rpm *rpm;
+
+ rpm = devm_kzalloc(&sdev->dev, sizeof(*rpm), GFP_KERNEL);
+ if (!rpm)
+ return -ENOMEM;
+
+ mutex_init(&rpm->lock);
+ init_completion(&rpm->ack);
+
+ rpm->rpm_channel = sdev->channel;
+
+ dev_set_drvdata(&sdev->dev, rpm);
+
+ return of_platform_populate(sdev->dev.of_node, NULL, NULL, &sdev->dev);
+}
+
+static void qcom_smd_rpm_remove(struct qcom_smd_device *sdev)
+{
+ of_platform_depopulate(&sdev->dev);
+}
+
+static const struct of_device_id qcom_smd_rpm_of_match[] = {
+ { .compatible = "qcom,rpm-msm8974" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
+
+static struct qcom_smd_driver qcom_smd_rpm_driver = {
+ .probe = qcom_smd_rpm_probe,
+ .remove = qcom_smd_rpm_remove,
+ .callback = qcom_smd_rpm_callback,
+ .driver = {
+ .name = "qcom_smd_rpm",
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_smd_rpm_of_match,
+ },
+};
+
+static int __init qcom_smd_rpm_init(void)
+{
+ return qcom_smd_driver_register(&qcom_smd_rpm_driver);
+}
+arch_initcall(qcom_smd_rpm_init);
+
+static void __exit qcom_smd_rpm_exit(void)
+{
+ qcom_smd_driver_unregister(&qcom_smd_rpm_driver);
+}
+module_exit(qcom_smd_rpm_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm SMD backed RPM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/soc/qcom/smd.c b/kernel/drivers/soc/qcom/smd.c
new file mode 100644
index 000000000..86b598cff
--- /dev/null
+++ b/kernel/drivers/soc/qcom/smd.c
@@ -0,0 +1,1391 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smd.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/wait.h>
+
+/*
+ * The Qualcomm Shared Memory communication solution provides point-to-point
+ * channels for clients to send and receive streaming or packet based data.
+ *
+ * Each channel consists of a control item (channel info) and a ring buffer
+ * pair. The channel info carry information related to channel state, flow
+ * control and the offsets within the ring buffer.
+ *
+ * All allocated channels are listed in an allocation table, identifying the
+ * pair of items by name, type and remote processor.
+ *
+ * Upon creating a new channel the remote processor allocates channel info and
+ * ring buffer items from the smem heap and populate the allocation table. An
+ * interrupt is sent to the other end of the channel and a scan for new
+ * channels should be done. A channel never goes away, it will only change
+ * state.
+ *
+ * The remote processor signals it intent for bring up the communication
+ * channel by setting the state of its end of the channel to "opening" and
+ * sends out an interrupt. We detect this change and register a smd device to
+ * consume the channel. Upon finding a consumer we finish the handshake and the
+ * channel is up.
+ *
+ * Upon closing a channel, the remote processor will update the state of its
+ * end of the channel and signal us, we will then unregister any attached
+ * device and close our end of the channel.
+ *
+ * Devices attached to a channel can use the qcom_smd_send function to push
+ * data to the channel, this is done by copying the data into the tx ring
+ * buffer, updating the pointers in the channel info and signaling the remote
+ * processor.
+ *
+ * The remote processor does the equivalent when it transfer data and upon
+ * receiving the interrupt we check the channel info for new data and delivers
+ * this to the attached device. If the device is not ready to receive the data
+ * we leave it in the ring buffer for now.
+ */
+
+struct smd_channel_info;
+struct smd_channel_info_pair;
+struct smd_channel_info_word;
+struct smd_channel_info_word_pair;
+
+#define SMD_ALLOC_TBL_COUNT 2
+#define SMD_ALLOC_TBL_SIZE 64
+
+/*
+ * This lists the various smem heap items relevant for the allocation table and
+ * smd channel entries.
+ */
+static const struct {
+ unsigned alloc_tbl_id;
+ unsigned info_base_id;
+ unsigned fifo_base_id;
+} smem_items[SMD_ALLOC_TBL_COUNT] = {
+ {
+ .alloc_tbl_id = 13,
+ .info_base_id = 14,
+ .fifo_base_id = 338
+ },
+ {
+ .alloc_tbl_id = 266,
+ .info_base_id = 138,
+ .fifo_base_id = 202,
+ },
+};
+
+/**
+ * struct qcom_smd_edge - representing a remote processor
+ * @smd: handle to qcom_smd
+ * @of_node: of_node handle for information related to this edge
+ * @edge_id: identifier of this edge
+ * @remote_pid: identifier of remote processor
+ * @irq: interrupt for signals on this edge
+ * @ipc_regmap: regmap handle holding the outgoing ipc register
+ * @ipc_offset: offset within @ipc_regmap of the register for ipc
+ * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap
+ * @channels: list of all channels detected on this edge
+ * @channels_lock: guard for modifications of @channels
+ * @allocated: array of bitmaps representing already allocated channels
+ * @need_rescan: flag that the @work needs to scan smem for new channels
+ * @smem_available: last available amount of smem triggering a channel scan
+ * @work: work item for edge house keeping
+ */
+struct qcom_smd_edge {
+ struct qcom_smd *smd;
+ struct device_node *of_node;
+ unsigned edge_id;
+ unsigned remote_pid;
+
+ int irq;
+
+ struct regmap *ipc_regmap;
+ int ipc_offset;
+ int ipc_bit;
+
+ struct list_head channels;
+ spinlock_t channels_lock;
+
+ DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
+
+ bool need_rescan;
+ unsigned smem_available;
+
+ struct work_struct work;
+};
+
+/*
+ * SMD channel states.
+ */
+enum smd_channel_state {
+ SMD_CHANNEL_CLOSED,
+ SMD_CHANNEL_OPENING,
+ SMD_CHANNEL_OPENED,
+ SMD_CHANNEL_FLUSHING,
+ SMD_CHANNEL_CLOSING,
+ SMD_CHANNEL_RESET,
+ SMD_CHANNEL_RESET_OPENING
+};
+
+/**
+ * struct qcom_smd_channel - smd channel struct
+ * @edge: qcom_smd_edge this channel is living on
+ * @qsdev: reference to a associated smd client device
+ * @name: name of the channel
+ * @state: local state of the channel
+ * @remote_state: remote state of the channel
+ * @info: byte aligned outgoing/incoming channel info
+ * @info_word: word aligned outgoing/incoming channel info
+ * @tx_lock: lock to make writes to the channel mutually exclusive
+ * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR
+ * @tx_fifo: pointer to the outgoing ring buffer
+ * @rx_fifo: pointer to the incoming ring buffer
+ * @fifo_size: size of each ring buffer
+ * @bounce_buffer: bounce buffer for reading wrapped packets
+ * @cb: callback function registered for this channel
+ * @recv_lock: guard for rx info modifications and cb pointer
+ * @pkt_size: size of the currently handled packet
+ * @list: lite entry for @channels in qcom_smd_edge
+ */
+struct qcom_smd_channel {
+ struct qcom_smd_edge *edge;
+
+ struct qcom_smd_device *qsdev;
+
+ char *name;
+ enum smd_channel_state state;
+ enum smd_channel_state remote_state;
+
+ struct smd_channel_info_pair *info;
+ struct smd_channel_info_word_pair *info_word;
+
+ struct mutex tx_lock;
+ wait_queue_head_t fblockread_event;
+
+ void *tx_fifo;
+ void *rx_fifo;
+ int fifo_size;
+
+ void *bounce_buffer;
+ int (*cb)(struct qcom_smd_device *, const void *, size_t);
+
+ spinlock_t recv_lock;
+
+ int pkt_size;
+
+ struct list_head list;
+};
+
+/**
+ * struct qcom_smd - smd struct
+ * @dev: device struct
+ * @num_edges: number of entries in @edges
+ * @edges: array of edges to be handled
+ */
+struct qcom_smd {
+ struct device *dev;
+
+ unsigned num_edges;
+ struct qcom_smd_edge edges[0];
+};
+
+/*
+ * Format of the smd_info smem items, for byte aligned channels.
+ */
+struct smd_channel_info {
+ __le32 state;
+ u8 fDSR;
+ u8 fCTS;
+ u8 fCD;
+ u8 fRI;
+ u8 fHEAD;
+ u8 fTAIL;
+ u8 fSTATE;
+ u8 fBLOCKREADINTR;
+ __le32 tail;
+ __le32 head;
+};
+
+struct smd_channel_info_pair {
+ struct smd_channel_info tx;
+ struct smd_channel_info rx;
+};
+
+/*
+ * Format of the smd_info smem items, for word aligned channels.
+ */
+struct smd_channel_info_word {
+ __le32 state;
+ __le32 fDSR;
+ __le32 fCTS;
+ __le32 fCD;
+ __le32 fRI;
+ __le32 fHEAD;
+ __le32 fTAIL;
+ __le32 fSTATE;
+ __le32 fBLOCKREADINTR;
+ __le32 tail;
+ __le32 head;
+};
+
+struct smd_channel_info_word_pair {
+ struct smd_channel_info_word tx;
+ struct smd_channel_info_word rx;
+};
+
+#define GET_RX_CHANNEL_FLAG(channel, param) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
+ channel->info_word ? \
+ le32_to_cpu(channel->info_word->rx.param) : \
+ channel->info->rx.param; \
+ })
+
+#define GET_RX_CHANNEL_INFO(channel, param) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
+ le32_to_cpu(channel->info_word ? \
+ channel->info_word->rx.param : \
+ channel->info->rx.param); \
+ })
+
+#define SET_RX_CHANNEL_FLAG(channel, param, value) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
+ if (channel->info_word) \
+ channel->info_word->rx.param = cpu_to_le32(value); \
+ else \
+ channel->info->rx.param = value; \
+ })
+
+#define SET_RX_CHANNEL_INFO(channel, param, value) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
+ if (channel->info_word) \
+ channel->info_word->rx.param = cpu_to_le32(value); \
+ else \
+ channel->info->rx.param = cpu_to_le32(value); \
+ })
+
+#define GET_TX_CHANNEL_FLAG(channel, param) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
+ channel->info_word ? \
+ le32_to_cpu(channel->info_word->tx.param) : \
+ channel->info->tx.param; \
+ })
+
+#define GET_TX_CHANNEL_INFO(channel, param) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
+ le32_to_cpu(channel->info_word ? \
+ channel->info_word->tx.param : \
+ channel->info->tx.param); \
+ })
+
+#define SET_TX_CHANNEL_FLAG(channel, param, value) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
+ if (channel->info_word) \
+ channel->info_word->tx.param = cpu_to_le32(value); \
+ else \
+ channel->info->tx.param = value; \
+ })
+
+#define SET_TX_CHANNEL_INFO(channel, param, value) \
+ ({ \
+ BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
+ if (channel->info_word) \
+ channel->info_word->tx.param = cpu_to_le32(value); \
+ else \
+ channel->info->tx.param = cpu_to_le32(value); \
+ })
+
+/**
+ * struct qcom_smd_alloc_entry - channel allocation entry
+ * @name: channel name
+ * @cid: channel index
+ * @flags: channel flags and edge id
+ * @ref_count: reference count of the channel
+ */
+struct qcom_smd_alloc_entry {
+ u8 name[20];
+ __le32 cid;
+ __le32 flags;
+ __le32 ref_count;
+} __packed;
+
+#define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff
+#define SMD_CHANNEL_FLAGS_STREAM BIT(8)
+#define SMD_CHANNEL_FLAGS_PACKET BIT(9)
+
+/*
+ * Each smd packet contains a 20 byte header, with the first 4 being the length
+ * of the packet.
+ */
+#define SMD_PACKET_HEADER_LEN 20
+
+/*
+ * Signal the remote processor associated with 'channel'.
+ */
+static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
+{
+ struct qcom_smd_edge *edge = channel->edge;
+
+ regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit));
+}
+
+/*
+ * Initialize the tx channel info
+ */
+static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
+{
+ SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
+ SET_TX_CHANNEL_FLAG(channel, fDSR, 0);
+ SET_TX_CHANNEL_FLAG(channel, fCTS, 0);
+ SET_TX_CHANNEL_FLAG(channel, fCD, 0);
+ SET_TX_CHANNEL_FLAG(channel, fRI, 0);
+ SET_TX_CHANNEL_FLAG(channel, fHEAD, 0);
+ SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
+ SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
+ SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
+ SET_TX_CHANNEL_INFO(channel, head, 0);
+ SET_TX_CHANNEL_INFO(channel, tail, 0);
+
+ qcom_smd_signal_channel(channel);
+
+ channel->state = SMD_CHANNEL_CLOSED;
+ channel->pkt_size = 0;
+}
+
+/*
+ * Calculate the amount of data available in the rx fifo
+ */
+static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
+{
+ unsigned head;
+ unsigned tail;
+
+ head = GET_RX_CHANNEL_INFO(channel, head);
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+
+ return (head - tail) & (channel->fifo_size - 1);
+}
+
+/*
+ * Set tx channel state and inform the remote processor
+ */
+static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
+ int state)
+{
+ struct qcom_smd_edge *edge = channel->edge;
+ bool is_open = state == SMD_CHANNEL_OPENED;
+
+ if (channel->state == state)
+ return;
+
+ dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state);
+
+ SET_TX_CHANNEL_FLAG(channel, fDSR, is_open);
+ SET_TX_CHANNEL_FLAG(channel, fCTS, is_open);
+ SET_TX_CHANNEL_FLAG(channel, fCD, is_open);
+
+ SET_TX_CHANNEL_INFO(channel, state, state);
+ SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
+
+ channel->state = state;
+ qcom_smd_signal_channel(channel);
+}
+
+/*
+ * Copy count bytes of data using 32bit accesses, if that's required.
+ */
+static void smd_copy_to_fifo(void __iomem *dst,
+ const void *src,
+ size_t count,
+ bool word_aligned)
+{
+ if (word_aligned) {
+ __iowrite32_copy(dst, src, count / sizeof(u32));
+ } else {
+ memcpy_toio(dst, src, count);
+ }
+}
+
+/*
+ * Copy count bytes of data using 32bit accesses, if that is required.
+ */
+static void smd_copy_from_fifo(void *_dst,
+ const void __iomem *_src,
+ size_t count,
+ bool word_aligned)
+{
+ u32 *dst = (u32 *)_dst;
+ u32 *src = (u32 *)_src;
+
+ if (word_aligned) {
+ count /= sizeof(u32);
+ while (count--)
+ *dst++ = __raw_readl(src++);
+ } else {
+ memcpy_fromio(_dst, _src, count);
+ }
+}
+
+/*
+ * Read count bytes of data from the rx fifo into buf, but don't advance the
+ * tail.
+ */
+static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
+ void *buf, size_t count)
+{
+ bool word_aligned;
+ unsigned tail;
+ size_t len;
+
+ word_aligned = channel->info_word;
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+
+ len = min_t(size_t, count, channel->fifo_size - tail);
+ if (len) {
+ smd_copy_from_fifo(buf,
+ channel->rx_fifo + tail,
+ len,
+ word_aligned);
+ }
+
+ if (len != count) {
+ smd_copy_from_fifo(buf + len,
+ channel->rx_fifo,
+ count - len,
+ word_aligned);
+ }
+
+ return count;
+}
+
+/*
+ * Advance the rx tail by count bytes.
+ */
+static void qcom_smd_channel_advance(struct qcom_smd_channel *channel,
+ size_t count)
+{
+ unsigned tail;
+
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+ tail += count;
+ tail &= (channel->fifo_size - 1);
+ SET_RX_CHANNEL_INFO(channel, tail, tail);
+}
+
+/*
+ * Read out a single packet from the rx fifo and deliver it to the device
+ */
+static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
+{
+ struct qcom_smd_device *qsdev = channel->qsdev;
+ unsigned tail;
+ size_t len;
+ void *ptr;
+ int ret;
+
+ if (!channel->cb)
+ return 0;
+
+ tail = GET_RX_CHANNEL_INFO(channel, tail);
+
+ /* Use bounce buffer if the data wraps */
+ if (tail + channel->pkt_size >= channel->fifo_size) {
+ ptr = channel->bounce_buffer;
+ len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size);
+ } else {
+ ptr = channel->rx_fifo + tail;
+ len = channel->pkt_size;
+ }
+
+ ret = channel->cb(qsdev, ptr, len);
+ if (ret < 0)
+ return ret;
+
+ /* Only forward the tail if the client consumed the data */
+ qcom_smd_channel_advance(channel, len);
+
+ channel->pkt_size = 0;
+
+ return 0;
+}
+
+/*
+ * Per channel interrupt handling
+ */
+static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
+{
+ bool need_state_scan = false;
+ int remote_state;
+ __le32 pktlen;
+ int avail;
+ int ret;
+
+ /* Handle state changes */
+ remote_state = GET_RX_CHANNEL_INFO(channel, state);
+ if (remote_state != channel->remote_state) {
+ channel->remote_state = remote_state;
+ need_state_scan = true;
+ }
+ /* Indicate that we have seen any state change */
+ SET_RX_CHANNEL_FLAG(channel, fSTATE, 0);
+
+ /* Signal waiting qcom_smd_send() about the interrupt */
+ if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR))
+ wake_up_interruptible(&channel->fblockread_event);
+
+ /* Don't consume any data until we've opened the channel */
+ if (channel->state != SMD_CHANNEL_OPENED)
+ goto out;
+
+ /* Indicate that we've seen the new data */
+ SET_RX_CHANNEL_FLAG(channel, fHEAD, 0);
+
+ /* Consume data */
+ for (;;) {
+ avail = qcom_smd_channel_get_rx_avail(channel);
+
+ if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) {
+ qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen));
+ qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN);
+ channel->pkt_size = le32_to_cpu(pktlen);
+ } else if (channel->pkt_size && avail >= channel->pkt_size) {
+ ret = qcom_smd_channel_recv_single(channel);
+ if (ret)
+ break;
+ } else {
+ break;
+ }
+ }
+
+ /* Indicate that we have seen and updated tail */
+ SET_RX_CHANNEL_FLAG(channel, fTAIL, 1);
+
+ /* Signal the remote that we've consumed the data (if requested) */
+ if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) {
+ /* Ensure ordering of channel info updates */
+ wmb();
+
+ qcom_smd_signal_channel(channel);
+ }
+
+out:
+ return need_state_scan;
+}
+
+/*
+ * The edge interrupts are triggered by the remote processor on state changes,
+ * channel info updates or when new channels are created.
+ */
+static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
+{
+ struct qcom_smd_edge *edge = data;
+ struct qcom_smd_channel *channel;
+ unsigned available;
+ bool kick_worker = false;
+
+ /*
+ * Handle state changes or data on each of the channels on this edge
+ */
+ spin_lock(&edge->channels_lock);
+ list_for_each_entry(channel, &edge->channels, list) {
+ spin_lock(&channel->recv_lock);
+ kick_worker |= qcom_smd_channel_intr(channel);
+ spin_unlock(&channel->recv_lock);
+ }
+ spin_unlock(&edge->channels_lock);
+
+ /*
+ * Creating a new channel requires allocating an smem entry, so we only
+ * have to scan if the amount of available space in smem have changed
+ * since last scan.
+ */
+ available = qcom_smem_get_free_space(edge->remote_pid);
+ if (available != edge->smem_available) {
+ edge->smem_available = available;
+ edge->need_rescan = true;
+ kick_worker = true;
+ }
+
+ if (kick_worker)
+ schedule_work(&edge->work);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Delivers any outstanding packets in the rx fifo, can be used after probe of
+ * the clients to deliver any packets that wasn't delivered before the client
+ * was setup.
+ */
+static void qcom_smd_channel_resume(struct qcom_smd_channel *channel)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->recv_lock, flags);
+ qcom_smd_channel_intr(channel);
+ spin_unlock_irqrestore(&channel->recv_lock, flags);
+}
+
+/*
+ * Calculate how much space is available in the tx fifo.
+ */
+static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel)
+{
+ unsigned head;
+ unsigned tail;
+ unsigned mask = channel->fifo_size - 1;
+
+ head = GET_TX_CHANNEL_INFO(channel, head);
+ tail = GET_TX_CHANNEL_INFO(channel, tail);
+
+ return mask - ((head - tail) & mask);
+}
+
+/*
+ * Write count bytes of data into channel, possibly wrapping in the ring buffer
+ */
+static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
+ const void *data,
+ size_t count)
+{
+ bool word_aligned;
+ unsigned head;
+ size_t len;
+
+ word_aligned = channel->info_word;
+ head = GET_TX_CHANNEL_INFO(channel, head);
+
+ len = min_t(size_t, count, channel->fifo_size - head);
+ if (len) {
+ smd_copy_to_fifo(channel->tx_fifo + head,
+ data,
+ len,
+ word_aligned);
+ }
+
+ if (len != count) {
+ smd_copy_to_fifo(channel->tx_fifo,
+ data + len,
+ count - len,
+ word_aligned);
+ }
+
+ head += count;
+ head &= (channel->fifo_size - 1);
+ SET_TX_CHANNEL_INFO(channel, head, head);
+
+ return count;
+}
+
+/**
+ * qcom_smd_send - write data to smd channel
+ * @channel: channel handle
+ * @data: buffer of data to write
+ * @len: number of bytes to write
+ *
+ * This is a blocking write of len bytes into the channel's tx ring buffer and
+ * signal the remote end. It will sleep until there is enough space available
+ * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid
+ * polling.
+ */
+int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
+{
+ __le32 hdr[5] = { cpu_to_le32(len), };
+ int tlen = sizeof(hdr) + len;
+ int ret;
+
+ /* Word aligned channels only accept word size aligned data */
+ if (channel->info_word && len % 4)
+ return -EINVAL;
+
+ /* Reject packets that are too big */
+ if (tlen >= channel->fifo_size)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&channel->tx_lock);
+ if (ret)
+ return ret;
+
+ while (qcom_smd_get_tx_avail(channel) < tlen) {
+ if (channel->state != SMD_CHANNEL_OPENED) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0);
+
+ ret = wait_event_interruptible(channel->fblockread_event,
+ qcom_smd_get_tx_avail(channel) >= tlen ||
+ channel->state != SMD_CHANNEL_OPENED);
+ if (ret)
+ goto out;
+
+ SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
+ }
+
+ SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
+
+ qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
+ qcom_smd_write_fifo(channel, data, len);
+
+ SET_TX_CHANNEL_FLAG(channel, fHEAD, 1);
+
+ /* Ensure ordering of channel info updates */
+ wmb();
+
+ qcom_smd_signal_channel(channel);
+
+out:
+ mutex_unlock(&channel->tx_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_smd_send);
+
+static struct qcom_smd_device *to_smd_device(struct device *dev)
+{
+ return container_of(dev, struct qcom_smd_device, dev);
+}
+
+static struct qcom_smd_driver *to_smd_driver(struct device *dev)
+{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+
+ return container_of(qsdev->dev.driver, struct qcom_smd_driver, driver);
+}
+
+static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
+{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+ struct qcom_smd_driver *qsdrv = container_of(drv, struct qcom_smd_driver, driver);
+ const struct qcom_smd_id *match = qsdrv->smd_match_table;
+ const char *name = qsdev->channel->name;
+
+ if (match) {
+ while (match->name[0]) {
+ if (!strcmp(match->name, name))
+ return 1;
+ match++;
+ }
+ }
+
+ return of_driver_match_device(dev, drv);
+}
+
+/*
+ * Probe the smd client.
+ *
+ * The remote side have indicated that it want the channel to be opened, so
+ * complete the state handshake and probe our client driver.
+ */
+static int qcom_smd_dev_probe(struct device *dev)
+{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+ struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
+ struct qcom_smd_channel *channel = qsdev->channel;
+ size_t bb_size;
+ int ret;
+
+ /*
+ * Packets are maximum 4k, but reduce if the fifo is smaller
+ */
+ bb_size = min(channel->fifo_size, SZ_4K);
+ channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL);
+ if (!channel->bounce_buffer)
+ return -ENOMEM;
+
+ channel->cb = qsdrv->callback;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
+
+ ret = qsdrv->probe(qsdev);
+ if (ret)
+ goto err;
+
+ qcom_smd_channel_resume(channel);
+
+ return 0;
+
+err:
+ dev_err(&qsdev->dev, "probe failed\n");
+
+ channel->cb = NULL;
+ kfree(channel->bounce_buffer);
+ channel->bounce_buffer = NULL;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+ return ret;
+}
+
+/*
+ * Remove the smd client.
+ *
+ * The channel is going away, for some reason, so remove the smd client and
+ * reset the channel state.
+ */
+static int qcom_smd_dev_remove(struct device *dev)
+{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+ struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
+ struct qcom_smd_channel *channel = qsdev->channel;
+ unsigned long flags;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
+
+ /*
+ * Make sure we don't race with the code receiving data.
+ */
+ spin_lock_irqsave(&channel->recv_lock, flags);
+ channel->cb = NULL;
+ spin_unlock_irqrestore(&channel->recv_lock, flags);
+
+ /* Wake up any sleepers in qcom_smd_send() */
+ wake_up_interruptible(&channel->fblockread_event);
+
+ /*
+ * We expect that the client might block in remove() waiting for any
+ * outstanding calls to qcom_smd_send() to wake up and finish.
+ */
+ if (qsdrv->remove)
+ qsdrv->remove(qsdev);
+
+ /*
+ * The client is now gone, cleanup and reset the channel state.
+ */
+ channel->qsdev = NULL;
+ kfree(channel->bounce_buffer);
+ channel->bounce_buffer = NULL;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+
+ qcom_smd_channel_reset(channel);
+
+ return 0;
+}
+
+static struct bus_type qcom_smd_bus = {
+ .name = "qcom_smd",
+ .match = qcom_smd_dev_match,
+ .probe = qcom_smd_dev_probe,
+ .remove = qcom_smd_dev_remove,
+};
+
+/*
+ * Release function for the qcom_smd_device object.
+ */
+static void qcom_smd_release_device(struct device *dev)
+{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+
+ kfree(qsdev);
+}
+
+/*
+ * Finds the device_node for the smd child interested in this channel.
+ */
+static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
+ const char *channel)
+{
+ struct device_node *child;
+ const char *name;
+ const char *key;
+ int ret;
+
+ for_each_available_child_of_node(edge_node, child) {
+ key = "qcom,smd-channels";
+ ret = of_property_read_string(child, key, &name);
+ if (ret)
+ continue;
+
+ if (strcmp(name, channel) == 0)
+ return child;
+ }
+
+ return NULL;
+}
+
+/*
+ * Create a smd client device for channel that is being opened.
+ */
+static int qcom_smd_create_device(struct qcom_smd_channel *channel)
+{
+ struct qcom_smd_device *qsdev;
+ struct qcom_smd_edge *edge = channel->edge;
+ struct device_node *node;
+ struct qcom_smd *smd = edge->smd;
+ int ret;
+
+ if (channel->qsdev)
+ return -EEXIST;
+
+ dev_dbg(smd->dev, "registering '%s'\n", channel->name);
+
+ qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
+ if (!qsdev)
+ return -ENOMEM;
+
+ node = qcom_smd_match_channel(edge->of_node, channel->name);
+ dev_set_name(&qsdev->dev, "%s.%s",
+ edge->of_node->name,
+ node ? node->name : channel->name);
+
+ qsdev->dev.parent = smd->dev;
+ qsdev->dev.bus = &qcom_smd_bus;
+ qsdev->dev.release = qcom_smd_release_device;
+ qsdev->dev.of_node = node;
+
+ qsdev->channel = channel;
+
+ channel->qsdev = qsdev;
+
+ ret = device_register(&qsdev->dev);
+ if (ret) {
+ dev_err(smd->dev, "device_register failed: %d\n", ret);
+ put_device(&qsdev->dev);
+ }
+
+ return ret;
+}
+
+/*
+ * Destroy a smd client device for a channel that's going away.
+ */
+static void qcom_smd_destroy_device(struct qcom_smd_channel *channel)
+{
+ struct device *dev;
+
+ BUG_ON(!channel->qsdev);
+
+ dev = &channel->qsdev->dev;
+
+ device_unregister(dev);
+ of_node_put(dev->of_node);
+ put_device(dev);
+}
+
+/**
+ * qcom_smd_driver_register - register a smd driver
+ * @qsdrv: qcom_smd_driver struct
+ */
+int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv)
+{
+ qsdrv->driver.bus = &qcom_smd_bus;
+ return driver_register(&qsdrv->driver);
+}
+EXPORT_SYMBOL(qcom_smd_driver_register);
+
+/**
+ * qcom_smd_driver_unregister - unregister a smd driver
+ * @qsdrv: qcom_smd_driver struct
+ */
+void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv)
+{
+ driver_unregister(&qsdrv->driver);
+}
+EXPORT_SYMBOL(qcom_smd_driver_unregister);
+
+/*
+ * Allocate the qcom_smd_channel object for a newly found smd channel,
+ * retrieving and validating the smem items involved.
+ */
+static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge,
+ unsigned smem_info_item,
+ unsigned smem_fifo_item,
+ char *name)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd *smd = edge->smd;
+ size_t fifo_size;
+ size_t info_size;
+ void *fifo_base;
+ void *info;
+ int ret;
+
+ channel = devm_kzalloc(smd->dev, sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return ERR_PTR(-ENOMEM);
+
+ channel->edge = edge;
+ channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
+ if (!channel->name)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&channel->tx_lock);
+ spin_lock_init(&channel->recv_lock);
+ init_waitqueue_head(&channel->fblockread_event);
+
+ info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto free_name_and_channel;
+ }
+
+ /*
+ * Use the size of the item to figure out which channel info struct to
+ * use.
+ */
+ if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
+ channel->info_word = info;
+ } else if (info_size == 2 * sizeof(struct smd_channel_info)) {
+ channel->info = info;
+ } else {
+ dev_err(smd->dev,
+ "channel info of size %zu not supported\n", info_size);
+ ret = -EINVAL;
+ goto free_name_and_channel;
+ }
+
+ fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size);
+ if (IS_ERR(fifo_base)) {
+ ret = PTR_ERR(fifo_base);
+ goto free_name_and_channel;
+ }
+
+ /* The channel consist of a rx and tx fifo of equal size */
+ fifo_size /= 2;
+
+ dev_dbg(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
+ name, info_size, fifo_size);
+
+ channel->tx_fifo = fifo_base;
+ channel->rx_fifo = fifo_base + fifo_size;
+ channel->fifo_size = fifo_size;
+
+ qcom_smd_channel_reset(channel);
+
+ return channel;
+
+free_name_and_channel:
+ devm_kfree(smd->dev, channel->name);
+ devm_kfree(smd->dev, channel);
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * Scans the allocation table for any newly allocated channels, calls
+ * qcom_smd_create_channel() to create representations of these and add
+ * them to the edge's list of channels.
+ */
+static void qcom_discover_channels(struct qcom_smd_edge *edge)
+{
+ struct qcom_smd_alloc_entry *alloc_tbl;
+ struct qcom_smd_alloc_entry *entry;
+ struct qcom_smd_channel *channel;
+ struct qcom_smd *smd = edge->smd;
+ unsigned long flags;
+ unsigned fifo_id;
+ unsigned info_id;
+ int tbl;
+ int i;
+ u32 eflags, cid;
+
+ for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
+ alloc_tbl = qcom_smem_get(edge->remote_pid,
+ smem_items[tbl].alloc_tbl_id, NULL);
+ if (IS_ERR(alloc_tbl))
+ continue;
+
+ for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) {
+ entry = &alloc_tbl[i];
+ eflags = le32_to_cpu(entry->flags);
+ if (test_bit(i, edge->allocated[tbl]))
+ continue;
+
+ if (entry->ref_count == 0)
+ continue;
+
+ if (!entry->name[0])
+ continue;
+
+ if (!(eflags & SMD_CHANNEL_FLAGS_PACKET))
+ continue;
+
+ if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
+ continue;
+
+ cid = le32_to_cpu(entry->cid);
+ info_id = smem_items[tbl].info_base_id + cid;
+ fifo_id = smem_items[tbl].fifo_base_id + cid;
+
+ channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name);
+ if (IS_ERR(channel))
+ continue;
+
+ spin_lock_irqsave(&edge->channels_lock, flags);
+ list_add(&channel->list, &edge->channels);
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
+
+ dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
+ set_bit(i, edge->allocated[tbl]);
+ }
+ }
+
+ schedule_work(&edge->work);
+}
+
+/*
+ * This per edge worker scans smem for any new channels and register these. It
+ * then scans all registered channels for state changes that should be handled
+ * by creating or destroying smd client devices for the registered channels.
+ *
+ * LOCKING: edge->channels_lock is not needed to be held during the traversal
+ * of the channels list as it's done synchronously with the only writer.
+ */
+static void qcom_channel_state_worker(struct work_struct *work)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_edge *edge = container_of(work,
+ struct qcom_smd_edge,
+ work);
+ unsigned remote_state;
+
+ /*
+ * Rescan smem if we have reason to belive that there are new channels.
+ */
+ if (edge->need_rescan) {
+ edge->need_rescan = false;
+ qcom_discover_channels(edge);
+ }
+
+ /*
+ * Register a device for any closed channel where the remote processor
+ * is showing interest in opening the channel.
+ */
+ list_for_each_entry(channel, &edge->channels, list) {
+ if (channel->state != SMD_CHANNEL_CLOSED)
+ continue;
+
+ remote_state = GET_RX_CHANNEL_INFO(channel, state);
+ if (remote_state != SMD_CHANNEL_OPENING &&
+ remote_state != SMD_CHANNEL_OPENED)
+ continue;
+
+ qcom_smd_create_device(channel);
+ }
+
+ /*
+ * Unregister the device for any channel that is opened where the
+ * remote processor is closing the channel.
+ */
+ list_for_each_entry(channel, &edge->channels, list) {
+ if (channel->state != SMD_CHANNEL_OPENING &&
+ channel->state != SMD_CHANNEL_OPENED)
+ continue;
+
+ remote_state = GET_RX_CHANNEL_INFO(channel, state);
+ if (remote_state == SMD_CHANNEL_OPENING ||
+ remote_state == SMD_CHANNEL_OPENED)
+ continue;
+
+ qcom_smd_destroy_device(channel);
+ }
+}
+
+/*
+ * Parses an of_node describing an edge.
+ */
+static int qcom_smd_parse_edge(struct device *dev,
+ struct device_node *node,
+ struct qcom_smd_edge *edge)
+{
+ struct device_node *syscon_np;
+ const char *key;
+ int irq;
+ int ret;
+
+ INIT_LIST_HEAD(&edge->channels);
+ spin_lock_init(&edge->channels_lock);
+
+ INIT_WORK(&edge->work, qcom_channel_state_worker);
+
+ edge->of_node = of_node_get(node);
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq < 0) {
+ dev_err(dev, "required smd interrupt missing\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(dev, irq,
+ qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
+ node->name, edge);
+ if (ret) {
+ dev_err(dev, "failed to request smd irq\n");
+ return ret;
+ }
+
+ edge->irq = irq;
+
+ key = "qcom,smd-edge";
+ ret = of_property_read_u32(node, key, &edge->edge_id);
+ if (ret) {
+ dev_err(dev, "edge missing %s property\n", key);
+ return -EINVAL;
+ }
+
+ edge->remote_pid = QCOM_SMEM_HOST_ANY;
+ key = "qcom,remote-pid";
+ of_property_read_u32(node, key, &edge->remote_pid);
+
+ syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
+ if (!syscon_np) {
+ dev_err(dev, "no qcom,ipc node\n");
+ return -ENODEV;
+ }
+
+ edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
+ if (IS_ERR(edge->ipc_regmap))
+ return PTR_ERR(edge->ipc_regmap);
+
+ key = "qcom,ipc";
+ ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
+ if (ret < 0) {
+ dev_err(dev, "no offset in %s\n", key);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
+ if (ret < 0) {
+ dev_err(dev, "no bit in %s\n", key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qcom_smd_probe(struct platform_device *pdev)
+{
+ struct qcom_smd_edge *edge;
+ struct device_node *node;
+ struct qcom_smd *smd;
+ size_t array_size;
+ int num_edges;
+ int ret;
+ int i = 0;
+ void *p;
+
+ /* Wait for smem */
+ p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL);
+ if (PTR_ERR(p) == -EPROBE_DEFER)
+ return PTR_ERR(p);
+
+ num_edges = of_get_available_child_count(pdev->dev.of_node);
+ array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
+ smd = devm_kzalloc(&pdev->dev, array_size, GFP_KERNEL);
+ if (!smd)
+ return -ENOMEM;
+ smd->dev = &pdev->dev;
+
+ smd->num_edges = num_edges;
+ for_each_available_child_of_node(pdev->dev.of_node, node) {
+ edge = &smd->edges[i++];
+ edge->smd = smd;
+
+ ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
+ if (ret)
+ continue;
+
+ edge->need_rescan = true;
+ schedule_work(&edge->work);
+ }
+
+ platform_set_drvdata(pdev, smd);
+
+ return 0;
+}
+
+/*
+ * Shut down all smd clients by making sure that each edge stops processing
+ * events and scanning for new channels, then call destroy on the devices.
+ */
+static int qcom_smd_remove(struct platform_device *pdev)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_edge *edge;
+ struct qcom_smd *smd = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < smd->num_edges; i++) {
+ edge = &smd->edges[i];
+
+ disable_irq(edge->irq);
+ cancel_work_sync(&edge->work);
+
+ list_for_each_entry(channel, &edge->channels, list) {
+ if (!channel->qsdev)
+ continue;
+
+ qcom_smd_destroy_device(channel);
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smd_of_match[] = {
+ { .compatible = "qcom,smd" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_of_match);
+
+static struct platform_driver qcom_smd_driver = {
+ .probe = qcom_smd_probe,
+ .remove = qcom_smd_remove,
+ .driver = {
+ .name = "qcom-smd",
+ .of_match_table = qcom_smd_of_match,
+ },
+};
+
+static int __init qcom_smd_init(void)
+{
+ int ret;
+
+ ret = bus_register(&qcom_smd_bus);
+ if (ret) {
+ pr_err("failed to register smd bus: %d\n", ret);
+ return ret;
+ }
+
+ return platform_driver_register(&qcom_smd_driver);
+}
+postcore_initcall(qcom_smd_init);
+
+static void __exit qcom_smd_exit(void)
+{
+ platform_driver_unregister(&qcom_smd_driver);
+ bus_unregister(&qcom_smd_bus);
+}
+module_exit(qcom_smd_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm Shared Memory Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/soc/qcom/smem.c b/kernel/drivers/soc/qcom/smem.c
new file mode 100644
index 000000000..19019aa09
--- /dev/null
+++ b/kernel/drivers/soc/qcom/smem.c
@@ -0,0 +1,795 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+
+/*
+ * The Qualcomm shared memory system is a allocate only heap structure that
+ * consists of one of more memory areas that can be accessed by the processors
+ * in the SoC.
+ *
+ * All systems contains a global heap, accessible by all processors in the SoC,
+ * with a table of contents data structure (@smem_header) at the beginning of
+ * the main shared memory block.
+ *
+ * The global header contains meta data for allocations as well as a fixed list
+ * of 512 entries (@smem_global_entry) that can be initialized to reference
+ * parts of the shared memory space.
+ *
+ *
+ * In addition to this global heap a set of "private" heaps can be set up at
+ * boot time with access restrictions so that only certain processor pairs can
+ * access the data.
+ *
+ * These partitions are referenced from an optional partition table
+ * (@smem_ptable), that is found 4kB from the end of the main smem region. The
+ * partition table entries (@smem_ptable_entry) lists the involved processors
+ * (or hosts) and their location in the main shared memory region.
+ *
+ * Each partition starts with a header (@smem_partition_header) that identifies
+ * the partition and holds properties for the two internal memory regions. The
+ * two regions are cached and non-cached memory respectively. Each region
+ * contain a link list of allocation headers (@smem_private_entry) followed by
+ * their data.
+ *
+ * Items in the non-cached region are allocated from the start of the partition
+ * while items in the cached region are allocated from the end. The free area
+ * is hence the region between the cached and non-cached offsets.
+ *
+ *
+ * To synchronize allocations in the shared memory heaps a remote spinlock must
+ * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
+ * platforms.
+ *
+ */
+
+/*
+ * Item 3 of the global heap contains an array of versions for the various
+ * software components in the SoC. We verify that the boot loader version is
+ * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check.
+ */
+#define SMEM_ITEM_VERSION 3
+#define SMEM_MASTER_SBL_VERSION_INDEX 7
+#define SMEM_EXPECTED_VERSION 11
+
+/*
+ * The first 8 items are only to be allocated by the boot loader while
+ * initializing the heap.
+ */
+#define SMEM_ITEM_LAST_FIXED 8
+
+/* Highest accepted item number, for both global and private heaps */
+#define SMEM_ITEM_COUNT 512
+
+/* Processor/host identifier for the application processor */
+#define SMEM_HOST_APPS 0
+
+/* Max number of processors/hosts in a system */
+#define SMEM_HOST_COUNT 9
+
+/**
+ * struct smem_proc_comm - proc_comm communication struct (legacy)
+ * @command: current command to be executed
+ * @status: status of the currently requested command
+ * @params: parameters to the command
+ */
+struct smem_proc_comm {
+ __le32 command;
+ __le32 status;
+ __le32 params[2];
+};
+
+/**
+ * struct smem_global_entry - entry to reference smem items on the heap
+ * @allocated: boolean to indicate if this entry is used
+ * @offset: offset to the allocated space
+ * @size: size of the allocated space, 8 byte aligned
+ * @aux_base: base address for the memory region used by this unit, or 0 for
+ * the default region. bits 0,1 are reserved
+ */
+struct smem_global_entry {
+ __le32 allocated;
+ __le32 offset;
+ __le32 size;
+ __le32 aux_base; /* bits 1:0 reserved */
+};
+#define AUX_BASE_MASK 0xfffffffc
+
+/**
+ * struct smem_header - header found in beginning of primary smem region
+ * @proc_comm: proc_comm communication interface (legacy)
+ * @version: array of versions for the various subsystems
+ * @initialized: boolean to indicate that smem is initialized
+ * @free_offset: index of the first unallocated byte in smem
+ * @available: number of bytes available for allocation
+ * @reserved: reserved field, must be 0
+ * toc: array of references to items
+ */
+struct smem_header {
+ struct smem_proc_comm proc_comm[4];
+ __le32 version[32];
+ __le32 initialized;
+ __le32 free_offset;
+ __le32 available;
+ __le32 reserved;
+ struct smem_global_entry toc[SMEM_ITEM_COUNT];
+};
+
+/**
+ * struct smem_ptable_entry - one entry in the @smem_ptable list
+ * @offset: offset, within the main shared memory region, of the partition
+ * @size: size of the partition
+ * @flags: flags for the partition (currently unused)
+ * @host0: first processor/host with access to this partition
+ * @host1: second processor/host with access to this partition
+ * @reserved: reserved entries for later use
+ */
+struct smem_ptable_entry {
+ __le32 offset;
+ __le32 size;
+ __le32 flags;
+ __le16 host0;
+ __le16 host1;
+ __le32 reserved[8];
+};
+
+/**
+ * struct smem_ptable - partition table for the private partitions
+ * @magic: magic number, must be SMEM_PTABLE_MAGIC
+ * @version: version of the partition table
+ * @num_entries: number of partitions in the table
+ * @reserved: for now reserved entries
+ * @entry: list of @smem_ptable_entry for the @num_entries partitions
+ */
+struct smem_ptable {
+ u8 magic[4];
+ __le32 version;
+ __le32 num_entries;
+ __le32 reserved[5];
+ struct smem_ptable_entry entry[];
+};
+
+static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
+
+/**
+ * struct smem_partition_header - header of the partitions
+ * @magic: magic number, must be SMEM_PART_MAGIC
+ * @host0: first processor/host with access to this partition
+ * @host1: second processor/host with access to this partition
+ * @size: size of the partition
+ * @offset_free_uncached: offset to the first free byte of uncached memory in
+ * this partition
+ * @offset_free_cached: offset to the first free byte of cached memory in this
+ * partition
+ * @reserved: for now reserved entries
+ */
+struct smem_partition_header {
+ u8 magic[4];
+ __le16 host0;
+ __le16 host1;
+ __le32 size;
+ __le32 offset_free_uncached;
+ __le32 offset_free_cached;
+ __le32 reserved[3];
+};
+
+static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
+
+/**
+ * struct smem_private_entry - header of each item in the private partition
+ * @canary: magic number, must be SMEM_PRIVATE_CANARY
+ * @item: identifying number of the smem item
+ * @size: size of the data, including padding bytes
+ * @padding_data: number of bytes of padding of data
+ * @padding_hdr: number of bytes of padding between the header and the data
+ * @reserved: for now reserved entry
+ */
+struct smem_private_entry {
+ u16 canary; /* bytes are the same so no swapping needed */
+ __le16 item;
+ __le32 size; /* includes padding bytes */
+ __le16 padding_data;
+ __le16 padding_hdr;
+ __le32 reserved;
+};
+#define SMEM_PRIVATE_CANARY 0xa5a5
+
+/**
+ * struct smem_region - representation of a chunk of memory used for smem
+ * @aux_base: identifier of aux_mem base
+ * @virt_base: virtual base address of memory with this aux_mem identifier
+ * @size: size of the memory region
+ */
+struct smem_region {
+ u32 aux_base;
+ void __iomem *virt_base;
+ size_t size;
+};
+
+/**
+ * struct qcom_smem - device data for the smem device
+ * @dev: device pointer
+ * @hwlock: reference to a hwspinlock
+ * @partitions: list of pointers to partitions affecting the current
+ * processor/host
+ * @num_regions: number of @regions
+ * @regions: list of the memory regions defining the shared memory
+ */
+struct qcom_smem {
+ struct device *dev;
+
+ struct hwspinlock *hwlock;
+
+ struct smem_partition_header *partitions[SMEM_HOST_COUNT];
+
+ unsigned num_regions;
+ struct smem_region regions[0];
+};
+
+static struct smem_private_entry *
+phdr_to_last_private_entry(struct smem_partition_header *phdr)
+{
+ void *p = phdr;
+
+ return p + le32_to_cpu(phdr->offset_free_uncached);
+}
+
+static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
+{
+ void *p = phdr;
+
+ return p + le32_to_cpu(phdr->offset_free_cached);
+}
+
+static struct smem_private_entry *
+phdr_to_first_private_entry(struct smem_partition_header *phdr)
+{
+ void *p = phdr;
+
+ return p + sizeof(*phdr);
+}
+
+static struct smem_private_entry *
+private_entry_next(struct smem_private_entry *e)
+{
+ void *p = e;
+
+ return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
+ le32_to_cpu(e->size);
+}
+
+static void *entry_to_item(struct smem_private_entry *e)
+{
+ void *p = e;
+
+ return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
+}
+
+/* Pointer to the one and only smem handle */
+static struct qcom_smem *__smem;
+
+/* Timeout (ms) for the trylock of remote spinlocks */
+#define HWSPINLOCK_TIMEOUT 1000
+
+static int qcom_smem_alloc_private(struct qcom_smem *smem,
+ unsigned host,
+ unsigned item,
+ size_t size)
+{
+ struct smem_partition_header *phdr;
+ struct smem_private_entry *hdr, *end;
+ size_t alloc_size;
+ void *cached;
+
+ phdr = smem->partitions[host];
+ hdr = phdr_to_first_private_entry(phdr);
+ end = phdr_to_last_private_entry(phdr);
+ cached = phdr_to_first_cached_entry(phdr);
+
+ while (hdr < end) {
+ if (hdr->canary != SMEM_PRIVATE_CANARY) {
+ dev_err(smem->dev,
+ "Found invalid canary in host %d partition\n",
+ host);
+ return -EINVAL;
+ }
+
+ if (le16_to_cpu(hdr->item) == item)
+ return -EEXIST;
+
+ hdr = private_entry_next(hdr);
+ }
+
+ /* Check that we don't grow into the cached region */
+ alloc_size = sizeof(*hdr) + ALIGN(size, 8);
+ if ((void *)hdr + alloc_size >= cached) {
+ dev_err(smem->dev, "Out of memory\n");
+ return -ENOSPC;
+ }
+
+ hdr->canary = SMEM_PRIVATE_CANARY;
+ hdr->item = cpu_to_le16(item);
+ hdr->size = cpu_to_le32(ALIGN(size, 8));
+ hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
+ hdr->padding_hdr = 0;
+
+ /*
+ * Ensure the header is written before we advance the free offset, so
+ * that remote processors that does not take the remote spinlock still
+ * gets a consistent view of the linked list.
+ */
+ wmb();
+ le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
+
+ return 0;
+}
+
+static int qcom_smem_alloc_global(struct qcom_smem *smem,
+ unsigned item,
+ size_t size)
+{
+ struct smem_header *header;
+ struct smem_global_entry *entry;
+
+ if (WARN_ON(item >= SMEM_ITEM_COUNT))
+ return -EINVAL;
+
+ header = smem->regions[0].virt_base;
+ entry = &header->toc[item];
+ if (entry->allocated)
+ return -EEXIST;
+
+ size = ALIGN(size, 8);
+ if (WARN_ON(size > le32_to_cpu(header->available)))
+ return -ENOMEM;
+
+ entry->offset = header->free_offset;
+ entry->size = cpu_to_le32(size);
+
+ /*
+ * Ensure the header is consistent before we mark the item allocated,
+ * so that remote processors will get a consistent view of the item
+ * even though they do not take the spinlock on read.
+ */
+ wmb();
+ entry->allocated = cpu_to_le32(1);
+
+ le32_add_cpu(&header->free_offset, size);
+ le32_add_cpu(&header->available, -size);
+
+ return 0;
+}
+
+/**
+ * qcom_smem_alloc() - allocate space for a smem item
+ * @host: remote processor id, or -1
+ * @item: smem item handle
+ * @size: number of bytes to be allocated
+ *
+ * Allocate space for a given smem item of size @size, given that the item is
+ * not yet allocated.
+ */
+int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
+{
+ unsigned long flags;
+ int ret;
+
+ if (!__smem)
+ return -EPROBE_DEFER;
+
+ if (item < SMEM_ITEM_LAST_FIXED) {
+ dev_err(__smem->dev,
+ "Rejecting allocation of static entry %d\n", item);
+ return -EINVAL;
+ }
+
+ ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+ HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret)
+ return ret;
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host])
+ ret = qcom_smem_alloc_private(__smem, host, item, size);
+ else
+ ret = qcom_smem_alloc_global(__smem, item, size);
+
+ hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_smem_alloc);
+
+static void *qcom_smem_get_global(struct qcom_smem *smem,
+ unsigned item,
+ size_t *size)
+{
+ struct smem_header *header;
+ struct smem_region *area;
+ struct smem_global_entry *entry;
+ u32 aux_base;
+ unsigned i;
+
+ if (WARN_ON(item >= SMEM_ITEM_COUNT))
+ return ERR_PTR(-EINVAL);
+
+ header = smem->regions[0].virt_base;
+ entry = &header->toc[item];
+ if (!entry->allocated)
+ return ERR_PTR(-ENXIO);
+
+ aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
+
+ for (i = 0; i < smem->num_regions; i++) {
+ area = &smem->regions[i];
+
+ if (area->aux_base == aux_base || !aux_base) {
+ if (size != NULL)
+ *size = le32_to_cpu(entry->size);
+ return area->virt_base + le32_to_cpu(entry->offset);
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static void *qcom_smem_get_private(struct qcom_smem *smem,
+ unsigned host,
+ unsigned item,
+ size_t *size)
+{
+ struct smem_partition_header *phdr;
+ struct smem_private_entry *e, *end;
+
+ phdr = smem->partitions[host];
+ e = phdr_to_first_private_entry(phdr);
+ end = phdr_to_last_private_entry(phdr);
+
+ while (e < end) {
+ if (e->canary != SMEM_PRIVATE_CANARY) {
+ dev_err(smem->dev,
+ "Found invalid canary in host %d partition\n",
+ host);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (le16_to_cpu(e->item) == item) {
+ if (size != NULL)
+ *size = le32_to_cpu(e->size) -
+ le16_to_cpu(e->padding_data);
+
+ return entry_to_item(e);
+ }
+
+ e = private_entry_next(e);
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+/**
+ * qcom_smem_get() - resolve ptr of size of a smem item
+ * @host: the remote processor, or -1
+ * @item: smem item handle
+ * @size: pointer to be filled out with size of the item
+ *
+ * Looks up smem item and returns pointer to it. Size of smem
+ * item is returned in @size.
+ */
+void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
+{
+ unsigned long flags;
+ int ret;
+ void *ptr = ERR_PTR(-EPROBE_DEFER);
+
+ if (!__smem)
+ return ptr;
+
+ ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+ HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host])
+ ptr = qcom_smem_get_private(__smem, host, item, size);
+ else
+ ptr = qcom_smem_get_global(__smem, item, size);
+
+ hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+
+ return ptr;
+
+}
+EXPORT_SYMBOL(qcom_smem_get);
+
+/**
+ * qcom_smem_get_free_space() - retrieve amount of free space in a partition
+ * @host: the remote processor identifying a partition, or -1
+ *
+ * To be used by smem clients as a quick way to determine if any new
+ * allocations has been made.
+ */
+int qcom_smem_get_free_space(unsigned host)
+{
+ struct smem_partition_header *phdr;
+ struct smem_header *header;
+ unsigned ret;
+
+ if (!__smem)
+ return -EPROBE_DEFER;
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
+ phdr = __smem->partitions[host];
+ ret = le32_to_cpu(phdr->offset_free_cached) -
+ le32_to_cpu(phdr->offset_free_uncached);
+ } else {
+ header = __smem->regions[0].virt_base;
+ ret = le32_to_cpu(header->available);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_smem_get_free_space);
+
+static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
+{
+ __le32 *versions;
+ size_t size;
+
+ versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size);
+ if (IS_ERR(versions)) {
+ dev_err(smem->dev, "Unable to read the version item\n");
+ return -ENOENT;
+ }
+
+ if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) {
+ dev_err(smem->dev, "Version item is too small\n");
+ return -EINVAL;
+ }
+
+ return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
+}
+
+static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
+ unsigned local_host)
+{
+ struct smem_partition_header *header;
+ struct smem_ptable_entry *entry;
+ struct smem_ptable *ptable;
+ unsigned remote_host;
+ u32 version, host0, host1;
+ int i;
+
+ ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
+ if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
+ return 0;
+
+ version = le32_to_cpu(ptable->version);
+ if (version != 1) {
+ dev_err(smem->dev,
+ "Unsupported partition header version %d\n", version);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
+ entry = &ptable->entry[i];
+ host0 = le16_to_cpu(entry->host0);
+ host1 = le16_to_cpu(entry->host1);
+
+ if (host0 != local_host && host1 != local_host)
+ continue;
+
+ if (!le32_to_cpu(entry->offset))
+ continue;
+
+ if (!le32_to_cpu(entry->size))
+ continue;
+
+ if (host0 == local_host)
+ remote_host = host1;
+ else
+ remote_host = host0;
+
+ if (remote_host >= SMEM_HOST_COUNT) {
+ dev_err(smem->dev,
+ "Invalid remote host %d\n",
+ remote_host);
+ return -EINVAL;
+ }
+
+ if (smem->partitions[remote_host]) {
+ dev_err(smem->dev,
+ "Already found a partition for host %d\n",
+ remote_host);
+ return -EINVAL;
+ }
+
+ header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
+ host0 = le16_to_cpu(header->host0);
+ host1 = le16_to_cpu(header->host1);
+
+ if (memcmp(header->magic, SMEM_PART_MAGIC,
+ sizeof(header->magic))) {
+ dev_err(smem->dev,
+ "Partition %d has invalid magic\n", i);
+ return -EINVAL;
+ }
+
+ if (host0 != local_host && host1 != local_host) {
+ dev_err(smem->dev,
+ "Partition %d hosts are invalid\n", i);
+ return -EINVAL;
+ }
+
+ if (host0 != remote_host && host1 != remote_host) {
+ dev_err(smem->dev,
+ "Partition %d hosts are invalid\n", i);
+ return -EINVAL;
+ }
+
+ if (header->size != entry->size) {
+ dev_err(smem->dev,
+ "Partition %d has invalid size\n", i);
+ return -EINVAL;
+ }
+
+ if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
+ dev_err(smem->dev,
+ "Partition %d has invalid free pointer\n", i);
+ return -EINVAL;
+ }
+
+ smem->partitions[remote_host] = header;
+ }
+
+ return 0;
+}
+
+static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
+ const char *name, int i)
+{
+ struct device_node *np;
+ struct resource r;
+ int ret;
+
+ np = of_parse_phandle(dev->of_node, name, 0);
+ if (!np) {
+ dev_err(dev, "No %s specified\n", name);
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+ if (ret)
+ return ret;
+
+ smem->regions[i].aux_base = (u32)r.start;
+ smem->regions[i].size = resource_size(&r);
+ smem->regions[i].virt_base = devm_ioremap_nocache(dev, r.start,
+ resource_size(&r));
+ if (!smem->regions[i].virt_base)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int qcom_smem_probe(struct platform_device *pdev)
+{
+ struct smem_header *header;
+ struct qcom_smem *smem;
+ size_t array_size;
+ int num_regions;
+ int hwlock_id;
+ u32 version;
+ int ret;
+
+ num_regions = 1;
+ if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
+ num_regions++;
+
+ array_size = num_regions * sizeof(struct smem_region);
+ smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
+ if (!smem)
+ return -ENOMEM;
+
+ smem->dev = &pdev->dev;
+ smem->num_regions = num_regions;
+
+ ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
+ if (ret)
+ return ret;
+
+ if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
+ "qcom,rpm-msg-ram", 1)))
+ return ret;
+
+ header = smem->regions[0].virt_base;
+ if (le32_to_cpu(header->initialized) != 1 ||
+ le32_to_cpu(header->reserved)) {
+ dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
+ return -EINVAL;
+ }
+
+ version = qcom_smem_get_sbl_version(smem);
+ if (version >> 16 != SMEM_EXPECTED_VERSION) {
+ dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
+ return -EINVAL;
+ }
+
+ ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
+ if (ret < 0)
+ return ret;
+
+ hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
+ if (hwlock_id < 0) {
+ dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+ return hwlock_id;
+ }
+
+ smem->hwlock = hwspin_lock_request_specific(hwlock_id);
+ if (!smem->hwlock)
+ return -ENXIO;
+
+ __smem = smem;
+
+ return 0;
+}
+
+static int qcom_smem_remove(struct platform_device *pdev)
+{
+ hwspin_lock_free(__smem->hwlock);
+ __smem = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smem_of_match[] = {
+ { .compatible = "qcom,smem" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
+
+static struct platform_driver qcom_smem_driver = {
+ .probe = qcom_smem_probe,
+ .remove = qcom_smem_remove,
+ .driver = {
+ .name = "qcom-smem",
+ .of_match_table = qcom_smem_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init qcom_smem_init(void)
+{
+ return platform_driver_register(&qcom_smem_driver);
+}
+arch_initcall(qcom_smem_init);
+
+static void __exit qcom_smem_exit(void)
+{
+ platform_driver_unregister(&qcom_smem_driver);
+}
+module_exit(qcom_smem_exit)
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/soc/qcom/spm.c b/kernel/drivers/soc/qcom/spm.c
new file mode 100644
index 000000000..b04b05a09
--- /dev/null
+++ b/kernel/drivers/soc/qcom/spm.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2015, Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/qcom_scm.h>
+
+#include <asm/cpuidle.h>
+#include <asm/proc-fns.h>
+#include <asm/suspend.h>
+
+#define MAX_PMIC_DATA 2
+#define MAX_SEQ_DATA 64
+#define SPM_CTL_INDEX 0x7f
+#define SPM_CTL_INDEX_SHIFT 4
+#define SPM_CTL_EN BIT(0)
+
+enum pm_sleep_mode {
+ PM_SLEEP_MODE_STBY,
+ PM_SLEEP_MODE_RET,
+ PM_SLEEP_MODE_SPC,
+ PM_SLEEP_MODE_PC,
+ PM_SLEEP_MODE_NR,
+};
+
+enum spm_reg {
+ SPM_REG_CFG,
+ SPM_REG_SPM_CTL,
+ SPM_REG_DLY,
+ SPM_REG_PMIC_DLY,
+ SPM_REG_PMIC_DATA_0,
+ SPM_REG_PMIC_DATA_1,
+ SPM_REG_VCTL,
+ SPM_REG_SEQ_ENTRY,
+ SPM_REG_SPM_STS,
+ SPM_REG_PMIC_STS,
+ SPM_REG_NR,
+};
+
+struct spm_reg_data {
+ const u8 *reg_offset;
+ u32 spm_cfg;
+ u32 spm_dly;
+ u32 pmic_dly;
+ u32 pmic_data[MAX_PMIC_DATA];
+ u8 seq[MAX_SEQ_DATA];
+ u8 start_index[PM_SLEEP_MODE_NR];
+};
+
+struct spm_driver_data {
+ void __iomem *reg_base;
+ const struct spm_reg_data *reg_data;
+};
+
+static const u8 spm_reg_offset_v2_1[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x30,
+ [SPM_REG_DLY] = 0x34,
+ [SPM_REG_SEQ_ENTRY] = 0x80,
+};
+
+/* SPM register data for 8974, 8084 */
+static const struct spm_reg_data spm_reg_8974_8084_cpu = {
+ .reg_offset = spm_reg_offset_v2_1,
+ .spm_cfg = 0x1,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x03, 0x0B, 0x0F, 0x00, 0x20, 0x80, 0x10, 0xE8, 0x5B, 0x03,
+ 0x3B, 0xE8, 0x5B, 0x82, 0x10, 0x0B, 0x30, 0x06, 0x26, 0x30,
+ 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 3,
+};
+
+static const u8 spm_reg_offset_v1_1[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x20,
+ [SPM_REG_PMIC_DLY] = 0x24,
+ [SPM_REG_PMIC_DATA_0] = 0x28,
+ [SPM_REG_PMIC_DATA_1] = 0x2C,
+ [SPM_REG_SEQ_ENTRY] = 0x80,
+};
+
+/* SPM register data for 8064 */
+static const struct spm_reg_data spm_reg_8064_cpu = {
+ .reg_offset = spm_reg_offset_v1_1,
+ .spm_cfg = 0x1F,
+ .pmic_dly = 0x02020004,
+ .pmic_data[0] = 0x0084009C,
+ .pmic_data[1] = 0x00A4001C,
+ .seq = { 0x03, 0x0F, 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01,
+ 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 2,
+};
+
+static DEFINE_PER_CPU(struct spm_driver_data *, cpu_spm_drv);
+
+typedef int (*idle_fn)(int);
+static DEFINE_PER_CPU(idle_fn*, qcom_idle_ops);
+
+static inline void spm_register_write(struct spm_driver_data *drv,
+ enum spm_reg reg, u32 val)
+{
+ if (drv->reg_data->reg_offset[reg])
+ writel_relaxed(val, drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+}
+
+/* Ensure a guaranteed write, before return */
+static inline void spm_register_write_sync(struct spm_driver_data *drv,
+ enum spm_reg reg, u32 val)
+{
+ u32 ret;
+
+ if (!drv->reg_data->reg_offset[reg])
+ return;
+
+ do {
+ writel_relaxed(val, drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+ ret = readl_relaxed(drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+ if (ret == val)
+ break;
+ cpu_relax();
+ } while (1);
+}
+
+static inline u32 spm_register_read(struct spm_driver_data *drv,
+ enum spm_reg reg)
+{
+ return readl_relaxed(drv->reg_base + drv->reg_data->reg_offset[reg]);
+}
+
+static void spm_set_low_power_mode(struct spm_driver_data *drv,
+ enum pm_sleep_mode mode)
+{
+ u32 start_index;
+ u32 ctl_val;
+
+ start_index = drv->reg_data->start_index[mode];
+
+ ctl_val = spm_register_read(drv, SPM_REG_SPM_CTL);
+ ctl_val &= ~(SPM_CTL_INDEX << SPM_CTL_INDEX_SHIFT);
+ ctl_val |= start_index << SPM_CTL_INDEX_SHIFT;
+ ctl_val |= SPM_CTL_EN;
+ spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
+}
+
+static int qcom_pm_collapse(unsigned long int unused)
+{
+ qcom_scm_cpu_power_down(QCOM_SCM_CPU_PWR_DOWN_L2_ON);
+
+ /*
+ * Returns here only if there was a pending interrupt and we did not
+ * power down as a result.
+ */
+ return -1;
+}
+
+static int qcom_cpu_spc(int cpu)
+{
+ int ret;
+ struct spm_driver_data *drv = per_cpu(cpu_spm_drv, cpu);
+
+ spm_set_low_power_mode(drv, PM_SLEEP_MODE_SPC);
+ ret = cpu_suspend(0, qcom_pm_collapse);
+ /*
+ * ARM common code executes WFI without calling into our driver and
+ * if the SPM mode is not reset, then we may accidently power down the
+ * cpu when we intended only to gate the cpu clock.
+ * Ensure the state is set to standby before returning.
+ */
+ spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
+
+ return ret;
+}
+
+static int qcom_idle_enter(int cpu, unsigned long index)
+{
+ return per_cpu(qcom_idle_ops, cpu)[index](cpu);
+}
+
+static const struct of_device_id qcom_idle_state_match[] __initconst = {
+ { .compatible = "qcom,idle-state-spc", .data = qcom_cpu_spc },
+ { },
+};
+
+static int __init qcom_cpuidle_init(struct device_node *cpu_node, int cpu)
+{
+ const struct of_device_id *match_id;
+ struct device_node *state_node;
+ int i;
+ int state_count = 1;
+ idle_fn idle_fns[CPUIDLE_STATE_MAX];
+ idle_fn *fns;
+ cpumask_t mask;
+ bool use_scm_power_down = false;
+
+ for (i = 0; ; i++) {
+ state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+ if (!state_node)
+ break;
+
+ if (!of_device_is_available(state_node))
+ continue;
+
+ if (i == CPUIDLE_STATE_MAX) {
+ pr_warn("%s: cpuidle states reached max possible\n",
+ __func__);
+ break;
+ }
+
+ match_id = of_match_node(qcom_idle_state_match, state_node);
+ if (!match_id)
+ return -ENODEV;
+
+ idle_fns[state_count] = match_id->data;
+
+ /* Check if any of the states allow power down */
+ if (match_id->data == qcom_cpu_spc)
+ use_scm_power_down = true;
+
+ state_count++;
+ }
+
+ if (state_count == 1)
+ goto check_spm;
+
+ fns = devm_kcalloc(get_cpu_device(cpu), state_count, sizeof(*fns),
+ GFP_KERNEL);
+ if (!fns)
+ return -ENOMEM;
+
+ for (i = 1; i < state_count; i++)
+ fns[i] = idle_fns[i];
+
+ if (use_scm_power_down) {
+ /* We have atleast one power down mode */
+ cpumask_clear(&mask);
+ cpumask_set_cpu(cpu, &mask);
+ qcom_scm_set_warm_boot_addr(cpu_resume_arm, &mask);
+ }
+
+ per_cpu(qcom_idle_ops, cpu) = fns;
+
+ /*
+ * SPM probe for the cpu should have happened by now, if the
+ * SPM device does not exist, return -ENXIO to indicate that the
+ * cpu does not support idle states.
+ */
+check_spm:
+ return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO;
+}
+
+static struct cpuidle_ops qcom_cpuidle_ops __initdata = {
+ .suspend = qcom_idle_enter,
+ .init = qcom_cpuidle_init,
+};
+
+CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v1, "qcom,kpss-acc-v1", &qcom_cpuidle_ops);
+CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v2, "qcom,kpss-acc-v2", &qcom_cpuidle_ops);
+
+static struct spm_driver_data *spm_get_drv(struct platform_device *pdev,
+ int *spm_cpu)
+{
+ struct spm_driver_data *drv = NULL;
+ struct device_node *cpu_node, *saw_node;
+ int cpu;
+ bool found;
+
+ for_each_possible_cpu(cpu) {
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ continue;
+ saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ found = (saw_node == pdev->dev.of_node);
+ of_node_put(saw_node);
+ of_node_put(cpu_node);
+ if (found)
+ break;
+ }
+
+ if (found) {
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+ if (drv)
+ *spm_cpu = cpu;
+ }
+
+ return drv;
+}
+
+static const struct of_device_id spm_match_table[] = {
+ { .compatible = "qcom,msm8974-saw2-v2.1-cpu",
+ .data = &spm_reg_8974_8084_cpu },
+ { .compatible = "qcom,apq8084-saw2-v2.1-cpu",
+ .data = &spm_reg_8974_8084_cpu },
+ { .compatible = "qcom,apq8064-saw2-v1.1-cpu",
+ .data = &spm_reg_8064_cpu },
+ { },
+};
+
+static int spm_dev_probe(struct platform_device *pdev)
+{
+ struct spm_driver_data *drv;
+ struct resource *res;
+ const struct of_device_id *match_id;
+ void __iomem *addr;
+ int cpu;
+
+ drv = spm_get_drv(pdev, &cpu);
+ if (!drv)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(drv->reg_base))
+ return PTR_ERR(drv->reg_base);
+
+ match_id = of_match_node(spm_match_table, pdev->dev.of_node);
+ if (!match_id)
+ return -ENODEV;
+
+ drv->reg_data = match_id->data;
+
+ /* Write the SPM sequences first.. */
+ addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY];
+ __iowrite32_copy(addr, drv->reg_data->seq,
+ ARRAY_SIZE(drv->reg_data->seq) / 4);
+
+ /*
+ * ..and then the control registers.
+ * On some SoC if the control registers are written first and if the
+ * CPU was held in reset, the reset signal could trigger the SPM state
+ * machine, before the sequences are completely written.
+ */
+ spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
+ spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
+ spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_0,
+ drv->reg_data->pmic_data[0]);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_1,
+ drv->reg_data->pmic_data[1]);
+
+ /* Set up Standby as the default low power mode */
+ spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
+
+ per_cpu(cpu_spm_drv, cpu) = drv;
+
+ return 0;
+}
+
+static struct platform_driver spm_driver = {
+ .probe = spm_dev_probe,
+ .driver = {
+ .name = "saw",
+ .of_match_table = spm_match_table,
+ },
+};
+module_platform_driver(spm_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SAW power controller driver");
+MODULE_ALIAS("platform:saw");
diff --git a/kernel/drivers/soc/rockchip/Kconfig b/kernel/drivers/soc/rockchip/Kconfig
new file mode 100644
index 000000000..7140ff825
--- /dev/null
+++ b/kernel/drivers/soc/rockchip/Kconfig
@@ -0,0 +1,18 @@
+if ARCH_ROCKCHIP || COMPILE_TEST
+
+#
+# Rockchip Soc drivers
+#
+config ROCKCHIP_PM_DOMAINS
+ bool "Rockchip generic power domain"
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ help
+ Say y here to enable power domain support.
+ In order to meet high performance and low power requirements, a power
+ management unit is designed or saving power when RK3288 in low power
+ mode. The RK3288 PMU is dedicated for managing the power of the whole chip.
+
+ If unsure, say N.
+
+endif
diff --git a/kernel/drivers/soc/rockchip/Makefile b/kernel/drivers/soc/rockchip/Makefile
new file mode 100644
index 000000000..3d73d0672
--- /dev/null
+++ b/kernel/drivers/soc/rockchip/Makefile
@@ -0,0 +1,4 @@
+#
+# Rockchip Soc drivers
+#
+obj-$(CONFIG_ROCKCHIP_PM_DOMAINS) += pm_domains.o
diff --git a/kernel/drivers/soc/rockchip/pm_domains.c b/kernel/drivers/soc/rockchip/pm_domains.c
new file mode 100644
index 000000000..534c58937
--- /dev/null
+++ b/kernel/drivers/soc/rockchip/pm_domains.c
@@ -0,0 +1,490 @@
+/*
+ * Rockchip Generic power domain support.
+ *
+ * Copyright (c) 2015 ROCKCHIP, Co. Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_domain.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <dt-bindings/power/rk3288-power.h>
+
+struct rockchip_domain_info {
+ int pwr_mask;
+ int status_mask;
+ int req_mask;
+ int idle_mask;
+ int ack_mask;
+};
+
+struct rockchip_pmu_info {
+ u32 pwr_offset;
+ u32 status_offset;
+ u32 req_offset;
+ u32 idle_offset;
+ u32 ack_offset;
+
+ u32 core_pwrcnt_offset;
+ u32 gpu_pwrcnt_offset;
+
+ unsigned int core_power_transition_time;
+ unsigned int gpu_power_transition_time;
+
+ int num_domains;
+ const struct rockchip_domain_info *domain_info;
+};
+
+struct rockchip_pm_domain {
+ struct generic_pm_domain genpd;
+ const struct rockchip_domain_info *info;
+ struct rockchip_pmu *pmu;
+ int num_clks;
+ struct clk *clks[];
+};
+
+struct rockchip_pmu {
+ struct device *dev;
+ struct regmap *regmap;
+ const struct rockchip_pmu_info *info;
+ struct mutex mutex; /* mutex lock for pmu */
+ struct genpd_onecell_data genpd_data;
+ struct generic_pm_domain *domains[];
+};
+
+#define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
+
+#define DOMAIN(pwr, status, req, idle, ack) \
+{ \
+ .pwr_mask = BIT(pwr), \
+ .status_mask = BIT(status), \
+ .req_mask = BIT(req), \
+ .idle_mask = BIT(idle), \
+ .ack_mask = BIT(ack), \
+}
+
+#define DOMAIN_RK3288(pwr, status, req) \
+ DOMAIN(pwr, status, req, req, (req) + 16)
+
+static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
+{
+ struct rockchip_pmu *pmu = pd->pmu;
+ const struct rockchip_domain_info *pd_info = pd->info;
+ unsigned int val;
+
+ regmap_read(pmu->regmap, pmu->info->idle_offset, &val);
+ return (val & pd_info->idle_mask) == pd_info->idle_mask;
+}
+
+static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
+ bool idle)
+{
+ const struct rockchip_domain_info *pd_info = pd->info;
+ struct rockchip_pmu *pmu = pd->pmu;
+ unsigned int val;
+
+ regmap_update_bits(pmu->regmap, pmu->info->req_offset,
+ pd_info->req_mask, idle ? -1U : 0);
+
+ dsb(sy);
+
+ do {
+ regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
+ } while ((val & pd_info->ack_mask) != (idle ? pd_info->ack_mask : 0));
+
+ while (rockchip_pmu_domain_is_idle(pd) != idle)
+ cpu_relax();
+
+ return 0;
+}
+
+static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
+{
+ struct rockchip_pmu *pmu = pd->pmu;
+ unsigned int val;
+
+ regmap_read(pmu->regmap, pmu->info->status_offset, &val);
+
+ /* 1'b0: power on, 1'b1: power off */
+ return !(val & pd->info->status_mask);
+}
+
+static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
+ bool on)
+{
+ struct rockchip_pmu *pmu = pd->pmu;
+
+ regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
+ pd->info->pwr_mask, on ? 0 : -1U);
+
+ dsb(sy);
+
+ while (rockchip_pmu_domain_is_on(pd) != on)
+ cpu_relax();
+}
+
+static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
+{
+ int i;
+
+ mutex_lock(&pd->pmu->mutex);
+
+ if (rockchip_pmu_domain_is_on(pd) != power_on) {
+ for (i = 0; i < pd->num_clks; i++)
+ clk_enable(pd->clks[i]);
+
+ if (!power_on) {
+ /* FIXME: add code to save AXI_QOS */
+
+ /* if powering down, idle request to NIU first */
+ rockchip_pmu_set_idle_request(pd, true);
+ }
+
+ rockchip_do_pmu_set_power_domain(pd, power_on);
+
+ if (power_on) {
+ /* if powering up, leave idle mode */
+ rockchip_pmu_set_idle_request(pd, false);
+
+ /* FIXME: add code to restore AXI_QOS */
+ }
+
+ for (i = pd->num_clks - 1; i >= 0; i--)
+ clk_disable(pd->clks[i]);
+ }
+
+ mutex_unlock(&pd->pmu->mutex);
+ return 0;
+}
+
+static int rockchip_pd_power_on(struct generic_pm_domain *domain)
+{
+ struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
+
+ return rockchip_pd_power(pd, true);
+}
+
+static int rockchip_pd_power_off(struct generic_pm_domain *domain)
+{
+ struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
+
+ return rockchip_pd_power(pd, false);
+}
+
+static int rockchip_pd_attach_dev(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ struct clk *clk;
+ int i;
+ int error;
+
+ dev_dbg(dev, "attaching to power domain '%s'\n", genpd->name);
+
+ error = pm_clk_create(dev);
+ if (error) {
+ dev_err(dev, "pm_clk_create failed %d\n", error);
+ return error;
+ }
+
+ i = 0;
+ while ((clk = of_clk_get(dev->of_node, i++)) && !IS_ERR(clk)) {
+ dev_dbg(dev, "adding clock '%pC' to list of PM clocks\n", clk);
+ error = pm_clk_add_clk(dev, clk);
+ if (error) {
+ dev_err(dev, "pm_clk_add_clk failed %d\n", error);
+ clk_put(clk);
+ pm_clk_destroy(dev);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static void rockchip_pd_detach_dev(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ dev_dbg(dev, "detaching from power domain '%s'\n", genpd->name);
+
+ pm_clk_destroy(dev);
+}
+
+static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
+ struct device_node *node)
+{
+ const struct rockchip_domain_info *pd_info;
+ struct rockchip_pm_domain *pd;
+ struct clk *clk;
+ int clk_cnt;
+ int i;
+ u32 id;
+ int error;
+
+ error = of_property_read_u32(node, "reg", &id);
+ if (error) {
+ dev_err(pmu->dev,
+ "%s: failed to retrieve domain id (reg): %d\n",
+ node->name, error);
+ return -EINVAL;
+ }
+
+ if (id >= pmu->info->num_domains) {
+ dev_err(pmu->dev, "%s: invalid domain id %d\n",
+ node->name, id);
+ return -EINVAL;
+ }
+
+ pd_info = &pmu->info->domain_info[id];
+ if (!pd_info) {
+ dev_err(pmu->dev, "%s: undefined domain id %d\n",
+ node->name, id);
+ return -EINVAL;
+ }
+
+ clk_cnt = of_count_phandle_with_args(node, "clocks", "#clock-cells");
+ pd = devm_kzalloc(pmu->dev,
+ sizeof(*pd) + clk_cnt * sizeof(pd->clks[0]),
+ GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ pd->info = pd_info;
+ pd->pmu = pmu;
+
+ for (i = 0; i < clk_cnt; i++) {
+ clk = of_clk_get(node, i);
+ if (IS_ERR(clk)) {
+ error = PTR_ERR(clk);
+ dev_err(pmu->dev,
+ "%s: failed to get clk at index %d: %d\n",
+ node->name, i, error);
+ goto err_out;
+ }
+
+ error = clk_prepare(clk);
+ if (error) {
+ dev_err(pmu->dev,
+ "%s: failed to prepare clk %pC (index %d): %d\n",
+ node->name, clk, i, error);
+ clk_put(clk);
+ goto err_out;
+ }
+
+ pd->clks[pd->num_clks++] = clk;
+
+ dev_dbg(pmu->dev, "added clock '%pC' to domain '%s'\n",
+ clk, node->name);
+ }
+
+ error = rockchip_pd_power(pd, true);
+ if (error) {
+ dev_err(pmu->dev,
+ "failed to power on domain '%s': %d\n",
+ node->name, error);
+ goto err_out;
+ }
+
+ pd->genpd.name = node->name;
+ pd->genpd.power_off = rockchip_pd_power_off;
+ pd->genpd.power_on = rockchip_pd_power_on;
+ pd->genpd.attach_dev = rockchip_pd_attach_dev;
+ pd->genpd.detach_dev = rockchip_pd_detach_dev;
+ pd->genpd.flags = GENPD_FLAG_PM_CLK;
+ pm_genpd_init(&pd->genpd, NULL, false);
+
+ pmu->genpd_data.domains[id] = &pd->genpd;
+ return 0;
+
+err_out:
+ while (--i >= 0) {
+ clk_unprepare(pd->clks[i]);
+ clk_put(pd->clks[i]);
+ }
+ return error;
+}
+
+static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd)
+{
+ int i;
+
+ for (i = 0; i < pd->num_clks; i++) {
+ clk_unprepare(pd->clks[i]);
+ clk_put(pd->clks[i]);
+ }
+
+ /* protect the zeroing of pm->num_clks */
+ mutex_lock(&pd->pmu->mutex);
+ pd->num_clks = 0;
+ mutex_unlock(&pd->pmu->mutex);
+
+ /* devm will free our memory */
+}
+
+static void rockchip_pm_domain_cleanup(struct rockchip_pmu *pmu)
+{
+ struct generic_pm_domain *genpd;
+ struct rockchip_pm_domain *pd;
+ int i;
+
+ for (i = 0; i < pmu->genpd_data.num_domains; i++) {
+ genpd = pmu->genpd_data.domains[i];
+ if (genpd) {
+ pd = to_rockchip_pd(genpd);
+ rockchip_pm_remove_one_domain(pd);
+ }
+ }
+
+ /* devm will free our memory */
+}
+
+static void rockchip_configure_pd_cnt(struct rockchip_pmu *pmu,
+ u32 domain_reg_offset,
+ unsigned int count)
+{
+ /* First configure domain power down transition count ... */
+ regmap_write(pmu->regmap, domain_reg_offset, count);
+ /* ... and then power up count. */
+ regmap_write(pmu->regmap, domain_reg_offset + 4, count);
+}
+
+static int rockchip_pm_domain_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *node;
+ struct device *parent;
+ struct rockchip_pmu *pmu;
+ const struct of_device_id *match;
+ const struct rockchip_pmu_info *pmu_info;
+ int error;
+
+ if (!np) {
+ dev_err(dev, "device tree node not found\n");
+ return -ENODEV;
+ }
+
+ match = of_match_device(dev->driver->of_match_table, dev);
+ if (!match || !match->data) {
+ dev_err(dev, "missing pmu data\n");
+ return -EINVAL;
+ }
+
+ pmu_info = match->data;
+
+ pmu = devm_kzalloc(dev,
+ sizeof(*pmu) +
+ pmu_info->num_domains * sizeof(pmu->domains[0]),
+ GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ pmu->dev = &pdev->dev;
+ mutex_init(&pmu->mutex);
+
+ pmu->info = pmu_info;
+
+ pmu->genpd_data.domains = pmu->domains;
+ pmu->genpd_data.num_domains = pmu_info->num_domains;
+
+ parent = dev->parent;
+ if (!parent) {
+ dev_err(dev, "no parent for syscon devices\n");
+ return -ENODEV;
+ }
+
+ pmu->regmap = syscon_node_to_regmap(parent->of_node);
+
+ /*
+ * Configure power up and down transition delays for CORE
+ * and GPU domains.
+ */
+ rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
+ pmu_info->core_power_transition_time);
+ rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
+ pmu_info->gpu_power_transition_time);
+
+ error = -ENODEV;
+
+ for_each_available_child_of_node(np, node) {
+ error = rockchip_pm_add_one_domain(pmu, node);
+ if (error) {
+ dev_err(dev, "failed to handle node %s: %d\n",
+ node->name, error);
+ goto err_out;
+ }
+ }
+
+ if (error) {
+ dev_dbg(dev, "no power domains defined\n");
+ goto err_out;
+ }
+
+ of_genpd_add_provider_onecell(np, &pmu->genpd_data);
+
+ return 0;
+
+err_out:
+ rockchip_pm_domain_cleanup(pmu);
+ return error;
+}
+
+static const struct rockchip_domain_info rk3288_pm_domains[] = {
+ [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4),
+ [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9),
+ [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3),
+ [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2),
+};
+
+static const struct rockchip_pmu_info rk3288_pmu = {
+ .pwr_offset = 0x08,
+ .status_offset = 0x0c,
+ .req_offset = 0x10,
+ .idle_offset = 0x14,
+ .ack_offset = 0x14,
+
+ .core_pwrcnt_offset = 0x34,
+ .gpu_pwrcnt_offset = 0x3c,
+
+ .core_power_transition_time = 24, /* 1us */
+ .gpu_power_transition_time = 24, /* 1us */
+
+ .num_domains = ARRAY_SIZE(rk3288_pm_domains),
+ .domain_info = rk3288_pm_domains,
+};
+
+static const struct of_device_id rockchip_pm_domain_dt_match[] = {
+ {
+ .compatible = "rockchip,rk3288-power-controller",
+ .data = (void *)&rk3288_pmu,
+ },
+ { /* sentinel */ },
+};
+
+static struct platform_driver rockchip_pm_domain_driver = {
+ .probe = rockchip_pm_domain_probe,
+ .driver = {
+ .name = "rockchip-pm-domain",
+ .of_match_table = rockchip_pm_domain_dt_match,
+ /*
+ * We can't forcibly eject devices form power domain,
+ * so we can't really remove power domains once they
+ * were added.
+ */
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init rockchip_pm_domain_drv_register(void)
+{
+ return platform_driver_register(&rockchip_pm_domain_driver);
+}
+postcore_initcall(rockchip_pm_domain_drv_register);
diff --git a/kernel/drivers/soc/sunxi/Kconfig b/kernel/drivers/soc/sunxi/Kconfig
new file mode 100644
index 000000000..353b07e40
--- /dev/null
+++ b/kernel/drivers/soc/sunxi/Kconfig
@@ -0,0 +1,10 @@
+#
+# Allwinner sunXi SoC drivers
+#
+config SUNXI_SRAM
+ bool
+ default ARCH_SUNXI
+ help
+ Say y here to enable the SRAM controller support. This
+ device is responsible on mapping the SRAM in the sunXi SoCs
+ whether to the CPU/DMA, or to the devices.
diff --git a/kernel/drivers/soc/sunxi/Makefile b/kernel/drivers/soc/sunxi/Makefile
new file mode 100644
index 000000000..4cf9dbdf3
--- /dev/null
+++ b/kernel/drivers/soc/sunxi/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SUNXI_SRAM) += sunxi_sram.o
diff --git a/kernel/drivers/soc/sunxi/sunxi_sram.c b/kernel/drivers/soc/sunxi/sunxi_sram.c
new file mode 100644
index 000000000..bc52670c8
--- /dev/null
+++ b/kernel/drivers/soc/sunxi/sunxi_sram.c
@@ -0,0 +1,284 @@
+/*
+ * Allwinner SoCs SRAM Controller Driver
+ *
+ * Copyright (C) 2015 Maxime Ripard
+ *
+ * Author: Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <linux/soc/sunxi/sunxi_sram.h>
+
+struct sunxi_sram_func {
+ char *func;
+ u8 val;
+};
+
+struct sunxi_sram_data {
+ char *name;
+ u8 reg;
+ u8 offset;
+ u8 width;
+ struct sunxi_sram_func *func;
+ struct list_head list;
+};
+
+struct sunxi_sram_desc {
+ struct sunxi_sram_data data;
+ bool claimed;
+};
+
+#define SUNXI_SRAM_MAP(_val, _func) \
+ { \
+ .func = _func, \
+ .val = _val, \
+ }
+
+#define SUNXI_SRAM_DATA(_name, _reg, _off, _width, ...) \
+ { \
+ .name = _name, \
+ .reg = _reg, \
+ .offset = _off, \
+ .width = _width, \
+ .func = (struct sunxi_sram_func[]){ \
+ __VA_ARGS__, { } }, \
+ }
+
+static struct sunxi_sram_desc sun4i_a10_sram_a3_a4 = {
+ .data = SUNXI_SRAM_DATA("A3-A4", 0x4, 0x4, 2,
+ SUNXI_SRAM_MAP(0, "cpu"),
+ SUNXI_SRAM_MAP(1, "emac")),
+};
+
+static struct sunxi_sram_desc sun4i_a10_sram_d = {
+ .data = SUNXI_SRAM_DATA("D", 0x4, 0x0, 1,
+ SUNXI_SRAM_MAP(0, "cpu"),
+ SUNXI_SRAM_MAP(1, "usb-otg")),
+};
+
+static const struct of_device_id sunxi_sram_dt_ids[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-sram-a3-a4",
+ .data = &sun4i_a10_sram_a3_a4.data,
+ },
+ {
+ .compatible = "allwinner,sun4i-a10-sram-d",
+ .data = &sun4i_a10_sram_d.data,
+ },
+ {}
+};
+
+static struct device *sram_dev;
+static LIST_HEAD(claimed_sram);
+static DEFINE_SPINLOCK(sram_lock);
+static void __iomem *base;
+
+static int sunxi_sram_show(struct seq_file *s, void *data)
+{
+ struct device_node *sram_node, *section_node;
+ const struct sunxi_sram_data *sram_data;
+ const struct of_device_id *match;
+ struct sunxi_sram_func *func;
+ const __be32 *sram_addr_p, *section_addr_p;
+ u32 val;
+
+ seq_puts(s, "Allwinner sunXi SRAM\n");
+ seq_puts(s, "--------------------\n\n");
+
+ for_each_child_of_node(sram_dev->of_node, sram_node) {
+ sram_addr_p = of_get_address(sram_node, 0, NULL, NULL);
+
+ seq_printf(s, "sram@%08x\n",
+ be32_to_cpu(*sram_addr_p));
+
+ for_each_child_of_node(sram_node, section_node) {
+ match = of_match_node(sunxi_sram_dt_ids, section_node);
+ if (!match)
+ continue;
+ sram_data = match->data;
+
+ section_addr_p = of_get_address(section_node, 0,
+ NULL, NULL);
+
+ seq_printf(s, "\tsection@%04x\t(%s)\n",
+ be32_to_cpu(*section_addr_p),
+ sram_data->name);
+
+ val = readl(base + sram_data->reg);
+ val >>= sram_data->offset;
+ val &= sram_data->width;
+
+ for (func = sram_data->func; func->func; func++) {
+ seq_printf(s, "\t\t%s%c\n", func->func,
+ func->val == val ? '*' : ' ');
+ }
+ }
+
+ seq_puts(s, "\n");
+ }
+
+ return 0;
+}
+
+static int sunxi_sram_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sunxi_sram_show, inode->i_private);
+}
+
+static const struct file_operations sunxi_sram_fops = {
+ .open = sunxi_sram_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static inline struct sunxi_sram_desc *to_sram_desc(const struct sunxi_sram_data *data)
+{
+ return container_of(data, struct sunxi_sram_desc, data);
+}
+
+static const struct sunxi_sram_data *sunxi_sram_of_parse(struct device_node *node,
+ unsigned int *value)
+{
+ const struct of_device_id *match;
+ struct of_phandle_args args;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(node, "allwinner,sram", 1, 0,
+ &args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!of_device_is_available(args.np)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ if (value)
+ *value = args.args[0];
+
+ match = of_match_node(sunxi_sram_dt_ids, args.np);
+ if (!match) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ of_node_put(args.np);
+ return match->data;
+
+err:
+ of_node_put(args.np);
+ return ERR_PTR(ret);
+}
+
+int sunxi_sram_claim(struct device *dev)
+{
+ const struct sunxi_sram_data *sram_data;
+ struct sunxi_sram_desc *sram_desc;
+ unsigned int device;
+ u32 val, mask;
+
+ if (IS_ERR(base))
+ return -EPROBE_DEFER;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ sram_data = sunxi_sram_of_parse(dev->of_node, &device);
+ if (IS_ERR(sram_data))
+ return PTR_ERR(sram_data);
+
+ sram_desc = to_sram_desc(sram_data);
+
+ spin_lock(&sram_lock);
+
+ if (sram_desc->claimed) {
+ spin_unlock(&sram_lock);
+ return -EBUSY;
+ }
+
+ mask = GENMASK(sram_data->offset + sram_data->width, sram_data->offset);
+ val = readl(base + sram_data->reg);
+ val &= ~mask;
+ writel(val | ((device << sram_data->offset) & mask),
+ base + sram_data->reg);
+
+ spin_unlock(&sram_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(sunxi_sram_claim);
+
+int sunxi_sram_release(struct device *dev)
+{
+ const struct sunxi_sram_data *sram_data;
+ struct sunxi_sram_desc *sram_desc;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ sram_data = sunxi_sram_of_parse(dev->of_node, NULL);
+ if (IS_ERR(sram_data))
+ return -EINVAL;
+
+ sram_desc = to_sram_desc(sram_data);
+
+ spin_lock(&sram_lock);
+ sram_desc->claimed = false;
+ spin_unlock(&sram_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(sunxi_sram_release);
+
+static int sunxi_sram_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct dentry *d;
+
+ sram_dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+ d = debugfs_create_file("sram", S_IRUGO, NULL, NULL,
+ &sunxi_sram_fops);
+ if (!d)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static const struct of_device_id sunxi_sram_dt_match[] = {
+ { .compatible = "allwinner,sun4i-a10-sram-controller" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sunxi_sram_dt_match);
+
+static struct platform_driver sunxi_sram_driver = {
+ .driver = {
+ .name = "sunxi-sram",
+ .of_match_table = sunxi_sram_dt_match,
+ },
+ .probe = sunxi_sram_probe,
+};
+module_platform_driver(sunxi_sram_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner sunXi SRAM Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/soc/tegra/Makefile b/kernel/drivers/soc/tegra/Makefile
index cdaad9d53..ae857ff7d 100644
--- a/kernel/drivers/soc/tegra/Makefile
+++ b/kernel/drivers/soc/tegra/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_ARCH_TEGRA) += fuse/
+obj-y += fuse/
-obj-$(CONFIG_ARCH_TEGRA) += common.o
-obj-$(CONFIG_ARCH_TEGRA) += pmc.o
+obj-y += common.o
+obj-y += pmc.o
diff --git a/kernel/drivers/soc/tegra/common.c b/kernel/drivers/soc/tegra/common.c
index a71cb74f3..cd8f41351 100644
--- a/kernel/drivers/soc/tegra/common.c
+++ b/kernel/drivers/soc/tegra/common.c
@@ -15,6 +15,8 @@ static const struct of_device_id tegra_machine_match[] = {
{ .compatible = "nvidia,tegra30", },
{ .compatible = "nvidia,tegra114", },
{ .compatible = "nvidia,tegra124", },
+ { .compatible = "nvidia,tegra132", },
+ { .compatible = "nvidia,tegra210", },
{ }
};
diff --git a/kernel/drivers/soc/tegra/fuse/Makefile b/kernel/drivers/soc/tegra/fuse/Makefile
index 3af357da9..21bc27580 100644
--- a/kernel/drivers/soc/tegra/fuse/Makefile
+++ b/kernel/drivers/soc/tegra/fuse/Makefile
@@ -6,3 +6,5 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += speedo-tegra20.o
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += speedo-tegra30.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += speedo-tegra114.o
obj-$(CONFIG_ARCH_TEGRA_124_SOC) += speedo-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_132_SOC) += speedo-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_210_SOC) += speedo-tegra210.o
diff --git a/kernel/drivers/soc/tegra/fuse/fuse-tegra.c b/kernel/drivers/soc/tegra/fuse/fuse-tegra.c
index c0d660f1a..de2c1bfe2 100644
--- a/kernel/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/kernel/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -15,9 +15,10 @@
*
*/
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/kobject.h>
-#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -28,8 +29,6 @@
#include "fuse.h"
-static u32 (*fuse_readl)(const unsigned int offset);
-static int fuse_size;
struct tegra_sku_info tegra_sku_info;
EXPORT_SYMBOL(tegra_sku_info);
@@ -42,11 +41,11 @@ static const char *tegra_revision_name[TEGRA_REVISION_MAX] = {
[TEGRA_REVISION_A04] = "A04",
};
-static u8 fuse_readb(const unsigned int offset)
+static u8 fuse_readb(struct tegra_fuse *fuse, unsigned int offset)
{
u32 val;
- val = fuse_readl(round_down(offset, 4));
+ val = fuse->read(fuse, round_down(offset, 4));
val >>= (offset % 4) * 8;
val &= 0xff;
@@ -54,19 +53,21 @@ static u8 fuse_readb(const unsigned int offset)
}
static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t pos, size_t size)
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t size)
{
+ struct device *dev = kobj_to_dev(kobj);
+ struct tegra_fuse *fuse = dev_get_drvdata(dev);
int i;
- if (pos < 0 || pos >= fuse_size)
+ if (pos < 0 || pos >= attr->size)
return 0;
- if (size > fuse_size - pos)
- size = fuse_size - pos;
+ if (size > attr->size - pos)
+ size = attr->size - pos;
for (i = 0; i < size; i++)
- buf[i] = fuse_readb(pos + i);
+ buf[i] = fuse_readb(fuse, pos + i);
return i;
}
@@ -76,89 +77,239 @@ static struct bin_attribute fuse_bin_attr = {
.read = fuse_read,
};
+static int tegra_fuse_create_sysfs(struct device *dev, unsigned int size,
+ const struct tegra_fuse_info *info)
+{
+ fuse_bin_attr.size = size;
+
+ return device_create_bin_file(dev, &fuse_bin_attr);
+}
+
static const struct of_device_id car_match[] __initconst = {
{ .compatible = "nvidia,tegra20-car", },
{ .compatible = "nvidia,tegra30-car", },
{ .compatible = "nvidia,tegra114-car", },
{ .compatible = "nvidia,tegra124-car", },
{ .compatible = "nvidia,tegra132-car", },
+ { .compatible = "nvidia,tegra210-car", },
{},
};
-static void tegra_enable_fuse_clk(void __iomem *base)
+static struct tegra_fuse *fuse = &(struct tegra_fuse) {
+ .base = NULL,
+ .soc = NULL,
+};
+
+static const struct of_device_id tegra_fuse_match[] = {
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+ { .compatible = "nvidia,tegra210-efuse", .data = &tegra210_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+ { .compatible = "nvidia,tegra132-efuse", .data = &tegra124_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+ { .compatible = "nvidia,tegra124-efuse", .data = &tegra124_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+ { .compatible = "nvidia,tegra114-efuse", .data = &tegra114_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ { .compatible = "nvidia,tegra30-efuse", .data = &tegra30_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ { .compatible = "nvidia,tegra20-efuse", .data = &tegra20_fuse_soc },
+#endif
+ { /* sentinel */ }
+};
+
+static int tegra_fuse_probe(struct platform_device *pdev)
{
- u32 reg;
+ void __iomem *base = fuse->base;
+ struct resource *res;
+ int err;
+
+ /* take over the memory region from the early initialization */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fuse->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fuse->base))
+ return PTR_ERR(fuse->base);
+
+ fuse->clk = devm_clk_get(&pdev->dev, "fuse");
+ if (IS_ERR(fuse->clk)) {
+ dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
+ PTR_ERR(fuse->clk));
+ return PTR_ERR(fuse->clk);
+ }
- reg = readl_relaxed(base + 0x48);
- reg |= 1 << 28;
- writel(reg, base + 0x48);
+ platform_set_drvdata(pdev, fuse);
+ fuse->dev = &pdev->dev;
- /*
- * Enable FUSE clock. This needs to be hardcoded because the clock
- * subsystem is not active during early boot.
- */
- reg = readl(base + 0x14);
- reg |= 1 << 7;
- writel(reg, base + 0x14);
+ if (fuse->soc->probe) {
+ err = fuse->soc->probe(fuse);
+ if (err < 0)
+ return err;
+ }
+
+ if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size,
+ fuse->soc->info))
+ return -ENODEV;
+
+ /* release the early I/O memory mapping */
+ iounmap(base);
+
+ return 0;
+}
+
+static struct platform_driver tegra_fuse_driver = {
+ .driver = {
+ .name = "tegra-fuse",
+ .of_match_table = tegra_fuse_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tegra_fuse_probe,
+};
+module_platform_driver(tegra_fuse_driver);
+
+bool __init tegra_fuse_read_spare(unsigned int spare)
+{
+ unsigned int offset = fuse->soc->info->spare + spare * 4;
+
+ return fuse->read_early(fuse, offset) & 1;
+}
+
+u32 __init tegra_fuse_read_early(unsigned int offset)
+{
+ return fuse->read_early(fuse, offset);
}
int tegra_fuse_readl(unsigned long offset, u32 *value)
{
- if (!fuse_readl)
+ if (!fuse->read)
return -EPROBE_DEFER;
- *value = fuse_readl(offset);
+ *value = fuse->read(fuse, offset);
return 0;
}
EXPORT_SYMBOL(tegra_fuse_readl);
-int tegra_fuse_create_sysfs(struct device *dev, int size,
- u32 (*readl)(const unsigned int offset))
+static void tegra_enable_fuse_clk(void __iomem *base)
{
- if (fuse_size)
- return -ENODEV;
-
- fuse_bin_attr.size = size;
- fuse_bin_attr.read = fuse_read;
+ u32 reg;
- fuse_size = size;
- fuse_readl = readl;
+ reg = readl_relaxed(base + 0x48);
+ reg |= 1 << 28;
+ writel(reg, base + 0x48);
- return device_create_bin_file(dev, &fuse_bin_attr);
+ /*
+ * Enable FUSE clock. This needs to be hardcoded because the clock
+ * subsystem is not active during early boot.
+ */
+ reg = readl(base + 0x14);
+ reg |= 1 << 7;
+ writel(reg, base + 0x14);
}
static int __init tegra_init_fuse(void)
{
+ const struct of_device_id *match;
struct device_node *np;
- void __iomem *car_base;
-
- if (!soc_is_tegra())
- return 0;
+ struct resource regs;
tegra_init_apbmisc();
- np = of_find_matching_node(NULL, car_match);
- car_base = of_iomap(np, 0);
- if (car_base) {
- tegra_enable_fuse_clk(car_base);
- iounmap(car_base);
+ np = of_find_matching_node_and_match(NULL, tegra_fuse_match, &match);
+ if (!np) {
+ /*
+ * Fall back to legacy initialization for 32-bit ARM only. All
+ * 64-bit ARM device tree files for Tegra are required to have
+ * a FUSE node.
+ *
+ * This is for backwards-compatibility with old device trees
+ * that didn't contain a FUSE node.
+ */
+ if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) {
+ u8 chip = tegra_get_chip_id();
+
+ regs.start = 0x7000f800;
+ regs.end = 0x7000fbff;
+ regs.flags = IORESOURCE_MEM;
+
+ switch (chip) {
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ case TEGRA20:
+ fuse->soc = &tegra20_fuse_soc;
+ break;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ case TEGRA30:
+ fuse->soc = &tegra30_fuse_soc;
+ break;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+ case TEGRA114:
+ fuse->soc = &tegra114_fuse_soc;
+ break;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+ case TEGRA124:
+ fuse->soc = &tegra124_fuse_soc;
+ break;
+#endif
+
+ default:
+ pr_warn("Unsupported SoC: %02x\n", chip);
+ break;
+ }
+ } else {
+ /*
+ * At this point we're not running on Tegra, so play
+ * nice with multi-platform kernels.
+ */
+ return 0;
+ }
} else {
- pr_err("Could not enable fuse clk. ioremap tegra car failed.\n");
+ /*
+ * Extract information from the device tree if we've found a
+ * matching node.
+ */
+ if (of_address_to_resource(np, 0, &regs) < 0) {
+ pr_err("failed to get FUSE register\n");
+ return -ENXIO;
+ }
+
+ fuse->soc = match->data;
+ }
+
+ np = of_find_matching_node(NULL, car_match);
+ if (np) {
+ void __iomem *base = of_iomap(np, 0);
+ if (base) {
+ tegra_enable_fuse_clk(base);
+ iounmap(base);
+ } else {
+ pr_err("failed to map clock registers\n");
+ return -ENXIO;
+ }
+ }
+
+ fuse->base = ioremap_nocache(regs.start, resource_size(&regs));
+ if (!fuse->base) {
+ pr_err("failed to map FUSE registers\n");
return -ENXIO;
}
- if (tegra_get_chip_id() == TEGRA20)
- tegra20_init_fuse_early();
- else
- tegra30_init_fuse_early();
+ fuse->soc->init(fuse);
- pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n",
+ pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n",
tegra_revision_name[tegra_sku_info.revision],
tegra_sku_info.sku_id, tegra_sku_info.cpu_process_id,
- tegra_sku_info.core_process_id);
- pr_debug("Tegra CPU Speedo ID %d, Soc Speedo ID %d\n",
- tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id);
+ tegra_sku_info.soc_process_id);
+ pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n",
+ tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id);
return 0;
}
diff --git a/kernel/drivers/soc/tegra/fuse/fuse-tegra20.c b/kernel/drivers/soc/tegra/fuse/fuse-tegra20.c
index 5eff6f097..294413a96 100644
--- a/kernel/drivers/soc/tegra/fuse/fuse-tegra20.c
+++ b/kernel/drivers/soc/tegra/fuse/fuse-tegra20.c
@@ -34,157 +34,107 @@
#include "fuse.h"
#define FUSE_BEGIN 0x100
-#define FUSE_SIZE 0x1f8
#define FUSE_UID_LOW 0x08
#define FUSE_UID_HIGH 0x0c
-static phys_addr_t fuse_phys;
-static struct clk *fuse_clk;
-static void __iomem __initdata *fuse_base;
-
-static DEFINE_MUTEX(apb_dma_lock);
-static DECLARE_COMPLETION(apb_dma_wait);
-static struct dma_chan *apb_dma_chan;
-static struct dma_slave_config dma_sconfig;
-static u32 *apb_buffer;
-static dma_addr_t apb_buffer_phys;
+static u32 tegra20_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
+{
+ return readl_relaxed(fuse->base + FUSE_BEGIN + offset);
+}
static void apb_dma_complete(void *args)
{
- complete(&apb_dma_wait);
+ struct tegra_fuse *fuse = args;
+
+ complete(&fuse->apbdma.wait);
}
-static u32 tegra20_fuse_readl(const unsigned int offset)
+static u32 tegra20_fuse_read(struct tegra_fuse *fuse, unsigned int offset)
{
- int ret;
- u32 val = 0;
+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
struct dma_async_tx_descriptor *dma_desc;
+ unsigned long time_left;
+ u32 value = 0;
+ int err;
+
+ mutex_lock(&fuse->apbdma.lock);
- mutex_lock(&apb_dma_lock);
+ fuse->apbdma.config.src_addr = fuse->apbdma.phys + FUSE_BEGIN + offset;
- dma_sconfig.src_addr = fuse_phys + FUSE_BEGIN + offset;
- ret = dmaengine_slave_config(apb_dma_chan, &dma_sconfig);
- if (ret)
+ err = dmaengine_slave_config(fuse->apbdma.chan, &fuse->apbdma.config);
+ if (err)
goto out;
- dma_desc = dmaengine_prep_slave_single(apb_dma_chan, apb_buffer_phys,
- sizeof(u32), DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ dma_desc = dmaengine_prep_slave_single(fuse->apbdma.chan,
+ fuse->apbdma.phys,
+ sizeof(u32), DMA_DEV_TO_MEM,
+ flags);
if (!dma_desc)
goto out;
dma_desc->callback = apb_dma_complete;
- dma_desc->callback_param = NULL;
+ dma_desc->callback_param = fuse;
- reinit_completion(&apb_dma_wait);
+ reinit_completion(&fuse->apbdma.wait);
- clk_prepare_enable(fuse_clk);
+ clk_prepare_enable(fuse->clk);
dmaengine_submit(dma_desc);
- dma_async_issue_pending(apb_dma_chan);
- ret = wait_for_completion_timeout(&apb_dma_wait, msecs_to_jiffies(50));
+ dma_async_issue_pending(fuse->apbdma.chan);
+ time_left = wait_for_completion_timeout(&fuse->apbdma.wait,
+ msecs_to_jiffies(50));
- if (WARN(ret == 0, "apb read dma timed out"))
- dmaengine_terminate_all(apb_dma_chan);
+ if (WARN(time_left == 0, "apb read dma timed out"))
+ dmaengine_terminate_all(fuse->apbdma.chan);
else
- val = *apb_buffer;
+ value = *fuse->apbdma.virt;
- clk_disable_unprepare(fuse_clk);
-out:
- mutex_unlock(&apb_dma_lock);
+ clk_disable_unprepare(fuse->clk);
- return val;
+out:
+ mutex_unlock(&fuse->apbdma.lock);
+ return value;
}
-static const struct of_device_id tegra20_fuse_of_match[] = {
- { .compatible = "nvidia,tegra20-efuse" },
- {},
-};
-
-static int apb_dma_init(void)
+static int tegra20_fuse_probe(struct tegra_fuse *fuse)
{
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- apb_dma_chan = dma_request_channel(mask, NULL, NULL);
- if (!apb_dma_chan)
+
+ fuse->apbdma.chan = dma_request_channel(mask, NULL, NULL);
+ if (!fuse->apbdma.chan)
return -EPROBE_DEFER;
- apb_buffer = dma_alloc_coherent(NULL, sizeof(u32), &apb_buffer_phys,
- GFP_KERNEL);
- if (!apb_buffer) {
- dma_release_channel(apb_dma_chan);
+ fuse->apbdma.virt = dma_alloc_coherent(fuse->dev, sizeof(u32),
+ &fuse->apbdma.phys,
+ GFP_KERNEL);
+ if (!fuse->apbdma.virt) {
+ dma_release_channel(fuse->apbdma.chan);
return -ENOMEM;
}
- dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_sconfig.src_maxburst = 1;
- dma_sconfig.dst_maxburst = 1;
+ fuse->apbdma.config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ fuse->apbdma.config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ fuse->apbdma.config.src_maxburst = 1;
+ fuse->apbdma.config.dst_maxburst = 1;
- return 0;
-}
-
-static int tegra20_fuse_probe(struct platform_device *pdev)
-{
- struct resource *res;
- int err;
-
- fuse_clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(fuse_clk)) {
- dev_err(&pdev->dev, "missing clock");
- return PTR_ERR(fuse_clk);
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
- fuse_phys = res->start;
-
- err = apb_dma_init();
- if (err)
- return err;
-
- if (tegra_fuse_create_sysfs(&pdev->dev, FUSE_SIZE, tegra20_fuse_readl))
- return -ENODEV;
-
- dev_dbg(&pdev->dev, "loaded\n");
+ init_completion(&fuse->apbdma.wait);
+ mutex_init(&fuse->apbdma.lock);
+ fuse->read = tegra20_fuse_read;
return 0;
}
-static struct platform_driver tegra20_fuse_driver = {
- .probe = tegra20_fuse_probe,
- .driver = {
- .name = "tegra20_fuse",
- .of_match_table = tegra20_fuse_of_match,
- }
+static const struct tegra_fuse_info tegra20_fuse_info = {
+ .read = tegra20_fuse_read,
+ .size = 0x1f8,
+ .spare = 0x100,
};
-static int __init tegra20_fuse_init(void)
-{
- return platform_driver_register(&tegra20_fuse_driver);
-}
-postcore_initcall(tegra20_fuse_init);
-
/* Early boot code. This code is called before the devices are created */
-u32 __init tegra20_fuse_early(const unsigned int offset)
-{
- return readl_relaxed(fuse_base + FUSE_BEGIN + offset);
-}
-
-bool __init tegra20_spare_fuse_early(int spare_bit)
-{
- u32 offset = spare_bit * 4;
- bool value;
-
- value = tegra20_fuse_early(offset + 0x100);
-
- return value;
-}
-
static void __init tegra20_fuse_add_randomness(void)
{
u32 randomness[7];
@@ -193,22 +143,27 @@ static void __init tegra20_fuse_add_randomness(void)
randomness[1] = tegra_read_straps();
randomness[2] = tegra_read_chipid();
randomness[3] = tegra_sku_info.cpu_process_id << 16;
- randomness[3] |= tegra_sku_info.core_process_id;
+ randomness[3] |= tegra_sku_info.soc_process_id;
randomness[4] = tegra_sku_info.cpu_speedo_id << 16;
randomness[4] |= tegra_sku_info.soc_speedo_id;
- randomness[5] = tegra20_fuse_early(FUSE_UID_LOW);
- randomness[6] = tegra20_fuse_early(FUSE_UID_HIGH);
+ randomness[5] = tegra_fuse_read_early(FUSE_UID_LOW);
+ randomness[6] = tegra_fuse_read_early(FUSE_UID_HIGH);
add_device_randomness(randomness, sizeof(randomness));
}
-void __init tegra20_init_fuse_early(void)
+static void __init tegra20_fuse_init(struct tegra_fuse *fuse)
{
- fuse_base = ioremap(TEGRA_FUSE_BASE, TEGRA_FUSE_SIZE);
+ fuse->read_early = tegra20_fuse_read_early;
tegra_init_revision();
- tegra20_init_speedo_data(&tegra_sku_info);
+ fuse->soc->speedo_init(&tegra_sku_info);
tegra20_fuse_add_randomness();
-
- iounmap(fuse_base);
}
+
+const struct tegra_fuse_soc tegra20_fuse_soc = {
+ .init = tegra20_fuse_init,
+ .speedo_init = tegra20_init_speedo_data,
+ .probe = tegra20_fuse_probe,
+ .info = &tegra20_fuse_info,
+};
diff --git a/kernel/drivers/soc/tegra/fuse/fuse-tegra30.c b/kernel/drivers/soc/tegra/fuse/fuse-tegra30.c
index 4d2f71bf6..882607bca 100644
--- a/kernel/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/kernel/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -42,113 +42,33 @@
#define FUSE_HAS_REVISION_INFO BIT(0)
-enum speedo_idx {
- SPEEDO_TEGRA30 = 0,
- SPEEDO_TEGRA114,
- SPEEDO_TEGRA124,
-};
-
-struct tegra_fuse_info {
- int size;
- int spare_bit;
- enum speedo_idx speedo_idx;
-};
-
-static void __iomem *fuse_base;
-static struct clk *fuse_clk;
-static const struct tegra_fuse_info *fuse_info;
-
-u32 tegra30_fuse_readl(const unsigned int offset)
+#if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_132_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_210_SOC)
+static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
{
- u32 val;
-
- /*
- * early in the boot, the fuse clock will be enabled by
- * tegra_init_fuse()
- */
-
- if (fuse_clk)
- clk_prepare_enable(fuse_clk);
-
- val = readl_relaxed(fuse_base + FUSE_BEGIN + offset);
-
- if (fuse_clk)
- clk_disable_unprepare(fuse_clk);
-
- return val;
+ return readl_relaxed(fuse->base + FUSE_BEGIN + offset);
}
-static const struct tegra_fuse_info tegra30_info = {
- .size = 0x2a4,
- .spare_bit = 0x144,
- .speedo_idx = SPEEDO_TEGRA30,
-};
-
-static const struct tegra_fuse_info tegra114_info = {
- .size = 0x2a0,
- .speedo_idx = SPEEDO_TEGRA114,
-};
-
-static const struct tegra_fuse_info tegra124_info = {
- .size = 0x300,
- .speedo_idx = SPEEDO_TEGRA124,
-};
-
-static const struct of_device_id tegra30_fuse_of_match[] = {
- { .compatible = "nvidia,tegra30-efuse", .data = &tegra30_info },
- { .compatible = "nvidia,tegra114-efuse", .data = &tegra114_info },
- { .compatible = "nvidia,tegra124-efuse", .data = &tegra124_info },
- {},
-};
-
-static int tegra30_fuse_probe(struct platform_device *pdev)
+static u32 tegra30_fuse_read(struct tegra_fuse *fuse, unsigned int offset)
{
- const struct of_device_id *of_dev_id;
-
- of_dev_id = of_match_device(tegra30_fuse_of_match, &pdev->dev);
- if (!of_dev_id)
- return -ENODEV;
+ u32 value;
+ int err;
- fuse_clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(fuse_clk)) {
- dev_err(&pdev->dev, "missing clock");
- return PTR_ERR(fuse_clk);
+ err = clk_prepare_enable(fuse->clk);
+ if (err < 0) {
+ dev_err(fuse->dev, "failed to enable FUSE clock: %d\n", err);
+ return 0;
}
- platform_set_drvdata(pdev, NULL);
-
- if (tegra_fuse_create_sysfs(&pdev->dev, fuse_info->size,
- tegra30_fuse_readl))
- return -ENODEV;
+ value = readl_relaxed(fuse->base + FUSE_BEGIN + offset);
- dev_dbg(&pdev->dev, "loaded\n");
+ clk_disable_unprepare(fuse->clk);
- return 0;
-}
-
-static struct platform_driver tegra30_fuse_driver = {
- .probe = tegra30_fuse_probe,
- .driver = {
- .name = "tegra_fuse",
- .of_match_table = tegra30_fuse_of_match,
- }
-};
-
-static int __init tegra30_fuse_init(void)
-{
- return platform_driver_register(&tegra30_fuse_driver);
+ return value;
}
-postcore_initcall(tegra30_fuse_init);
-
-/* Early boot code. This code is called before the devices are created */
-
-typedef void (*speedo_f)(struct tegra_sku_info *sku_info);
-
-static speedo_f __initdata speedo_tbl[] = {
- [SPEEDO_TEGRA30] = tegra30_init_speedo_data,
- [SPEEDO_TEGRA114] = tegra114_init_speedo_data,
- [SPEEDO_TEGRA124] = tegra124_init_speedo_data,
-};
static void __init tegra30_fuse_add_randomness(void)
{
@@ -158,67 +78,83 @@ static void __init tegra30_fuse_add_randomness(void)
randomness[1] = tegra_read_straps();
randomness[2] = tegra_read_chipid();
randomness[3] = tegra_sku_info.cpu_process_id << 16;
- randomness[3] |= tegra_sku_info.core_process_id;
+ randomness[3] |= tegra_sku_info.soc_process_id;
randomness[4] = tegra_sku_info.cpu_speedo_id << 16;
randomness[4] |= tegra_sku_info.soc_speedo_id;
- randomness[5] = tegra30_fuse_readl(FUSE_VENDOR_CODE);
- randomness[6] = tegra30_fuse_readl(FUSE_FAB_CODE);
- randomness[7] = tegra30_fuse_readl(FUSE_LOT_CODE_0);
- randomness[8] = tegra30_fuse_readl(FUSE_LOT_CODE_1);
- randomness[9] = tegra30_fuse_readl(FUSE_WAFER_ID);
- randomness[10] = tegra30_fuse_readl(FUSE_X_COORDINATE);
- randomness[11] = tegra30_fuse_readl(FUSE_Y_COORDINATE);
+ randomness[5] = tegra_fuse_read_early(FUSE_VENDOR_CODE);
+ randomness[6] = tegra_fuse_read_early(FUSE_FAB_CODE);
+ randomness[7] = tegra_fuse_read_early(FUSE_LOT_CODE_0);
+ randomness[8] = tegra_fuse_read_early(FUSE_LOT_CODE_1);
+ randomness[9] = tegra_fuse_read_early(FUSE_WAFER_ID);
+ randomness[10] = tegra_fuse_read_early(FUSE_X_COORDINATE);
+ randomness[11] = tegra_fuse_read_early(FUSE_Y_COORDINATE);
add_device_randomness(randomness, sizeof(randomness));
}
-static void __init legacy_fuse_init(void)
+static void __init tegra30_fuse_init(struct tegra_fuse *fuse)
{
- switch (tegra_get_chip_id()) {
- case TEGRA30:
- fuse_info = &tegra30_info;
- break;
- case TEGRA114:
- fuse_info = &tegra114_info;
- break;
- case TEGRA124:
- case TEGRA132:
- fuse_info = &tegra124_info;
- break;
- default:
- return;
- }
+ fuse->read_early = tegra30_fuse_read_early;
+ fuse->read = tegra30_fuse_read;
- fuse_base = ioremap(TEGRA_FUSE_BASE, TEGRA_FUSE_SIZE);
+ tegra_init_revision();
+ fuse->soc->speedo_init(&tegra_sku_info);
+ tegra30_fuse_add_randomness();
}
+#endif
-bool __init tegra30_spare_fuse(int spare_bit)
-{
- u32 offset = fuse_info->spare_bit + spare_bit * 4;
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+static const struct tegra_fuse_info tegra30_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x2a4,
+ .spare = 0x144,
+};
- return tegra30_fuse_readl(offset) & 1;
-}
+const struct tegra_fuse_soc tegra30_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra30_init_speedo_data,
+ .info = &tegra30_fuse_info,
+};
+#endif
-void __init tegra30_init_fuse_early(void)
-{
- struct device_node *np;
- const struct of_device_id *of_match;
-
- np = of_find_matching_node_and_match(NULL, tegra30_fuse_of_match,
- &of_match);
- if (np) {
- fuse_base = of_iomap(np, 0);
- fuse_info = (struct tegra_fuse_info *)of_match->data;
- } else
- legacy_fuse_init();
-
- if (!fuse_base) {
- pr_warn("fuse DT node missing and unknown chip id: 0x%02x\n",
- tegra_get_chip_id());
- return;
- }
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+static const struct tegra_fuse_info tegra114_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x2a0,
+ .spare = 0x180,
+};
- tegra_init_revision();
- speedo_tbl[fuse_info->speedo_idx](&tegra_sku_info);
- tegra30_fuse_add_randomness();
-}
+const struct tegra_fuse_soc tegra114_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra114_init_speedo_data,
+ .info = &tegra114_fuse_info,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+static const struct tegra_fuse_info tegra124_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x300,
+ .spare = 0x200,
+};
+
+const struct tegra_fuse_soc tegra124_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra124_init_speedo_data,
+ .info = &tegra124_fuse_info,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+static const struct tegra_fuse_info tegra210_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x300,
+ .spare = 0x280,
+};
+
+const struct tegra_fuse_soc tegra210_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra210_init_speedo_data,
+ .info = &tegra210_fuse_info,
+};
+#endif
diff --git a/kernel/drivers/soc/tegra/fuse/fuse.h b/kernel/drivers/soc/tegra/fuse/fuse.h
index 3a398bf35..10c2076d5 100644
--- a/kernel/drivers/soc/tegra/fuse/fuse.h
+++ b/kernel/drivers/soc/tegra/fuse/fuse.h
@@ -19,53 +19,90 @@
#ifndef __DRIVERS_MISC_TEGRA_FUSE_H
#define __DRIVERS_MISC_TEGRA_FUSE_H
-#define TEGRA_FUSE_BASE 0x7000f800
-#define TEGRA_FUSE_SIZE 0x400
+#include <linux/dmaengine.h>
+#include <linux/types.h>
-int tegra_fuse_create_sysfs(struct device *dev, int size,
- u32 (*readl)(const unsigned int offset));
+struct tegra_fuse;
+
+struct tegra_fuse_info {
+ u32 (*read)(struct tegra_fuse *fuse, unsigned int offset);
+ unsigned int size;
+ unsigned int spare;
+};
+
+struct tegra_fuse_soc {
+ void (*init)(struct tegra_fuse *fuse);
+ void (*speedo_init)(struct tegra_sku_info *info);
+ int (*probe)(struct tegra_fuse *fuse);
+
+ const struct tegra_fuse_info *info;
+};
+
+struct tegra_fuse {
+ struct device *dev;
+ void __iomem *base;
+ phys_addr_t phys;
+ struct clk *clk;
+
+ u32 (*read_early)(struct tegra_fuse *fuse, unsigned int offset);
+ u32 (*read)(struct tegra_fuse *fuse, unsigned int offset);
+ const struct tegra_fuse_soc *soc;
+
+ /* APBDMA on Tegra20 */
+ struct {
+ struct mutex lock;
+ struct completion wait;
+ struct dma_chan *chan;
+ struct dma_slave_config config;
+ dma_addr_t phys;
+ u32 *virt;
+ } apbdma;
+};
-bool tegra30_spare_fuse(int bit);
-u32 tegra30_fuse_readl(const unsigned int offset);
-void tegra30_init_fuse_early(void);
void tegra_init_revision(void);
void tegra_init_apbmisc(void);
+bool __init tegra_fuse_read_spare(unsigned int spare);
+u32 __init tegra_fuse_read_early(unsigned int offset);
+
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
void tegra20_init_speedo_data(struct tegra_sku_info *sku_info);
-bool tegra20_spare_fuse_early(int spare_bit);
-void tegra20_init_fuse_early(void);
-u32 tegra20_fuse_early(const unsigned int offset);
-#else
-static inline void tegra20_init_speedo_data(struct tegra_sku_info *sku_info) {}
-static inline bool tegra20_spare_fuse_early(int spare_bit)
-{
- return false;
-}
-static inline void tegra20_init_fuse_early(void) {}
-static inline u32 tegra20_fuse_early(const unsigned int offset)
-{
- return 0;
-}
#endif
-
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
void tegra30_init_speedo_data(struct tegra_sku_info *sku_info);
-#else
-static inline void tegra30_init_speedo_data(struct tegra_sku_info *sku_info) {}
#endif
#ifdef CONFIG_ARCH_TEGRA_114_SOC
void tegra114_init_speedo_data(struct tegra_sku_info *sku_info);
-#else
-static inline void tegra114_init_speedo_data(struct tegra_sku_info *sku_info) {}
#endif
-#ifdef CONFIG_ARCH_TEGRA_124_SOC
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
void tegra124_init_speedo_data(struct tegra_sku_info *sku_info);
-#else
-static inline void tegra124_init_speedo_data(struct tegra_sku_info *sku_info) {}
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+void tegra210_init_speedo_data(struct tegra_sku_info *sku_info);
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+extern const struct tegra_fuse_soc tegra20_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+extern const struct tegra_fuse_soc tegra30_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+extern const struct tegra_fuse_soc tegra114_fuse_soc;
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+extern const struct tegra_fuse_soc tegra124_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+extern const struct tegra_fuse_soc tegra210_fuse_soc;
#endif
#endif
diff --git a/kernel/drivers/soc/tegra/fuse/speedo-tegra114.c b/kernel/drivers/soc/tegra/fuse/speedo-tegra114.c
index 2a6ca036f..1ba41ebbb 100644
--- a/kernel/drivers/soc/tegra/fuse/speedo-tegra114.c
+++ b/kernel/drivers/soc/tegra/fuse/speedo-tegra114.c
@@ -22,7 +22,7 @@
#include "fuse.h"
-#define CORE_PROCESS_CORNERS 2
+#define SOC_PROCESS_CORNERS 2
#define CPU_PROCESS_CORNERS 2
enum {
@@ -31,7 +31,7 @@ enum {
THRESHOLD_INDEX_COUNT,
};
-static const u32 __initconst core_process_speedos[][CORE_PROCESS_CORNERS] = {
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
{1123, UINT_MAX},
{0, UINT_MAX},
};
@@ -74,8 +74,8 @@ static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
}
if (rev == TEGRA_REVISION_A01) {
- tmp = tegra30_fuse_readl(0x270) << 1;
- tmp |= tegra30_fuse_readl(0x26c);
+ tmp = tegra_fuse_read_early(0x270) << 1;
+ tmp |= tegra_fuse_read_early(0x26c);
if (!tmp)
sku_info->cpu_speedo_id = 0;
}
@@ -84,27 +84,27 @@ static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
void __init tegra114_init_speedo_data(struct tegra_sku_info *sku_info)
{
u32 cpu_speedo_val;
- u32 core_speedo_val;
+ u32 soc_speedo_val;
int threshold;
int i;
BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
THRESHOLD_INDEX_COUNT);
- BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) !=
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
THRESHOLD_INDEX_COUNT);
rev_sku_to_speedo_ids(sku_info, &threshold);
- cpu_speedo_val = tegra30_fuse_readl(0x12c) + 1024;
- core_speedo_val = tegra30_fuse_readl(0x134);
+ cpu_speedo_val = tegra_fuse_read_early(0x12c) + 1024;
+ soc_speedo_val = tegra_fuse_read_early(0x134);
for (i = 0; i < CPU_PROCESS_CORNERS; i++)
if (cpu_speedo_val < cpu_process_speedos[threshold][i])
break;
sku_info->cpu_process_id = i;
- for (i = 0; i < CORE_PROCESS_CORNERS; i++)
- if (core_speedo_val < core_process_speedos[threshold][i])
+ for (i = 0; i < SOC_PROCESS_CORNERS; i++)
+ if (soc_speedo_val < soc_process_speedos[threshold][i])
break;
- sku_info->core_process_id = i;
+ sku_info->soc_process_id = i;
}
diff --git a/kernel/drivers/soc/tegra/fuse/speedo-tegra124.c b/kernel/drivers/soc/tegra/fuse/speedo-tegra124.c
index 46362387d..a63a13410 100644
--- a/kernel/drivers/soc/tegra/fuse/speedo-tegra124.c
+++ b/kernel/drivers/soc/tegra/fuse/speedo-tegra124.c
@@ -24,7 +24,7 @@
#define CPU_PROCESS_CORNERS 2
#define GPU_PROCESS_CORNERS 2
-#define CORE_PROCESS_CORNERS 2
+#define SOC_PROCESS_CORNERS 2
#define FUSE_CPU_SPEEDO_0 0x14
#define FUSE_CPU_SPEEDO_1 0x2c
@@ -53,7 +53,7 @@ static const u32 __initconst gpu_process_speedos[][GPU_PROCESS_CORNERS] = {
{0, UINT_MAX},
};
-static const u32 __initconst core_process_speedos[][CORE_PROCESS_CORNERS] = {
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
{2101, UINT_MAX},
{0, UINT_MAX},
};
@@ -119,19 +119,19 @@ void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info)
THRESHOLD_INDEX_COUNT);
BUILD_BUG_ON(ARRAY_SIZE(gpu_process_speedos) !=
THRESHOLD_INDEX_COUNT);
- BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) !=
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
THRESHOLD_INDEX_COUNT);
- cpu_speedo_0_value = tegra30_fuse_readl(FUSE_CPU_SPEEDO_0);
+ cpu_speedo_0_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
/* GPU Speedo is stored in CPU_SPEEDO_2 */
- sku_info->gpu_speedo_value = tegra30_fuse_readl(FUSE_CPU_SPEEDO_2);
+ sku_info->gpu_speedo_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
- soc_speedo_0_value = tegra30_fuse_readl(FUSE_SOC_SPEEDO_0);
+ soc_speedo_0_value = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
- cpu_iddq_value = tegra30_fuse_readl(FUSE_CPU_IDDQ);
- soc_iddq_value = tegra30_fuse_readl(FUSE_SOC_IDDQ);
- gpu_iddq_value = tegra30_fuse_readl(FUSE_GPU_IDDQ);
+ cpu_iddq_value = tegra_fuse_read_early(FUSE_CPU_IDDQ);
+ soc_iddq_value = tegra_fuse_read_early(FUSE_SOC_IDDQ);
+ gpu_iddq_value = tegra_fuse_read_early(FUSE_GPU_IDDQ);
sku_info->cpu_speedo_value = cpu_speedo_0_value;
@@ -143,7 +143,7 @@ void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info)
rev_sku_to_speedo_ids(sku_info, &threshold);
- sku_info->cpu_iddq_value = tegra30_fuse_readl(FUSE_CPU_IDDQ);
+ sku_info->cpu_iddq_value = tegra_fuse_read_early(FUSE_CPU_IDDQ);
for (i = 0; i < GPU_PROCESS_CORNERS; i++)
if (sku_info->gpu_speedo_value <
@@ -157,11 +157,11 @@ void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info)
break;
sku_info->cpu_process_id = i;
- for (i = 0; i < CORE_PROCESS_CORNERS; i++)
+ for (i = 0; i < SOC_PROCESS_CORNERS; i++)
if (soc_speedo_0_value <
- core_process_speedos[threshold][i])
+ soc_process_speedos[threshold][i])
break;
- sku_info->core_process_id = i;
+ sku_info->soc_process_id = i;
pr_debug("Tegra GPU Speedo ID=%d, Speedo Value=%d\n",
sku_info->gpu_speedo_id, sku_info->gpu_speedo_value);
diff --git a/kernel/drivers/soc/tegra/fuse/speedo-tegra20.c b/kernel/drivers/soc/tegra/fuse/speedo-tegra20.c
index eff1b63f3..5f7818bf6 100644
--- a/kernel/drivers/soc/tegra/fuse/speedo-tegra20.c
+++ b/kernel/drivers/soc/tegra/fuse/speedo-tegra20.c
@@ -28,11 +28,11 @@
#define CPU_SPEEDO_REDUND_MSBIT 39
#define CPU_SPEEDO_REDUND_OFFS (CPU_SPEEDO_REDUND_MSBIT - CPU_SPEEDO_MSBIT)
-#define CORE_SPEEDO_LSBIT 40
-#define CORE_SPEEDO_MSBIT 47
-#define CORE_SPEEDO_REDUND_LSBIT 48
-#define CORE_SPEEDO_REDUND_MSBIT 55
-#define CORE_SPEEDO_REDUND_OFFS (CORE_SPEEDO_REDUND_MSBIT - CORE_SPEEDO_MSBIT)
+#define SOC_SPEEDO_LSBIT 40
+#define SOC_SPEEDO_MSBIT 47
+#define SOC_SPEEDO_REDUND_LSBIT 48
+#define SOC_SPEEDO_REDUND_MSBIT 55
+#define SOC_SPEEDO_REDUND_OFFS (SOC_SPEEDO_REDUND_MSBIT - SOC_SPEEDO_MSBIT)
#define SPEEDO_MULT 4
@@ -56,7 +56,7 @@ static const u32 __initconst cpu_process_speedos[][PROCESS_CORNERS_NUM] = {
{316, 331, 383, UINT_MAX},
};
-static const u32 __initconst core_process_speedos[][PROCESS_CORNERS_NUM] = {
+static const u32 __initconst soc_process_speedos[][PROCESS_CORNERS_NUM] = {
{165, 195, 224, UINT_MAX},
{165, 195, 224, UINT_MAX},
{165, 195, 224, UINT_MAX},
@@ -69,7 +69,7 @@ void __init tegra20_init_speedo_data(struct tegra_sku_info *sku_info)
int i;
BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) != SPEEDO_ID_COUNT);
- BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) != SPEEDO_ID_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) != SPEEDO_ID_COUNT);
if (SPEEDO_ID_SELECT_0(sku_info->revision))
sku_info->soc_speedo_id = SPEEDO_ID_0;
@@ -80,8 +80,8 @@ void __init tegra20_init_speedo_data(struct tegra_sku_info *sku_info)
val = 0;
for (i = CPU_SPEEDO_MSBIT; i >= CPU_SPEEDO_LSBIT; i--) {
- reg = tegra20_spare_fuse_early(i) |
- tegra20_spare_fuse_early(i + CPU_SPEEDO_REDUND_OFFS);
+ reg = tegra_fuse_read_spare(i) |
+ tegra_fuse_read_spare(i + CPU_SPEEDO_REDUND_OFFS);
val = (val << 1) | (reg & 0x1);
}
val = val * SPEEDO_MULT;
@@ -94,17 +94,17 @@ void __init tegra20_init_speedo_data(struct tegra_sku_info *sku_info)
sku_info->cpu_process_id = i;
val = 0;
- for (i = CORE_SPEEDO_MSBIT; i >= CORE_SPEEDO_LSBIT; i--) {
- reg = tegra20_spare_fuse_early(i) |
- tegra20_spare_fuse_early(i + CORE_SPEEDO_REDUND_OFFS);
+ for (i = SOC_SPEEDO_MSBIT; i >= SOC_SPEEDO_LSBIT; i--) {
+ reg = tegra_fuse_read_spare(i) |
+ tegra_fuse_read_spare(i + SOC_SPEEDO_REDUND_OFFS);
val = (val << 1) | (reg & 0x1);
}
val = val * SPEEDO_MULT;
pr_debug("Core speedo value %u\n", val);
for (i = 0; i < (PROCESS_CORNERS_NUM - 1); i++) {
- if (val <= core_process_speedos[sku_info->soc_speedo_id][i])
+ if (val <= soc_process_speedos[sku_info->soc_speedo_id][i])
break;
}
- sku_info->core_process_id = i;
+ sku_info->soc_process_id = i;
}
diff --git a/kernel/drivers/soc/tegra/fuse/speedo-tegra210.c b/kernel/drivers/soc/tegra/fuse/speedo-tegra210.c
new file mode 100644
index 000000000..5373f4c16
--- /dev/null
+++ b/kernel/drivers/soc/tegra/fuse/speedo-tegra210.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2013-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define CPU_PROCESS_CORNERS 2
+#define GPU_PROCESS_CORNERS 2
+#define SOC_PROCESS_CORNERS 3
+
+#define FUSE_CPU_SPEEDO_0 0x014
+#define FUSE_CPU_SPEEDO_1 0x02c
+#define FUSE_CPU_SPEEDO_2 0x030
+#define FUSE_SOC_SPEEDO_0 0x034
+#define FUSE_SOC_SPEEDO_1 0x038
+#define FUSE_SOC_SPEEDO_2 0x03c
+#define FUSE_CPU_IDDQ 0x018
+#define FUSE_SOC_IDDQ 0x040
+#define FUSE_GPU_IDDQ 0x128
+#define FUSE_FT_REV 0x028
+
+enum {
+ THRESHOLD_INDEX_0,
+ THRESHOLD_INDEX_1,
+ THRESHOLD_INDEX_COUNT,
+};
+
+static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = {
+ { 2119, UINT_MAX },
+ { 2119, UINT_MAX },
+};
+
+static const u32 __initconst gpu_process_speedos[][GPU_PROCESS_CORNERS] = {
+ { UINT_MAX, UINT_MAX },
+ { UINT_MAX, UINT_MAX },
+};
+
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
+ { 1950, 2100, UINT_MAX },
+ { 1950, 2100, UINT_MAX },
+};
+
+static u8 __init get_speedo_revision(void)
+{
+ return tegra_fuse_read_spare(4) << 2 |
+ tegra_fuse_read_spare(3) << 1 |
+ tegra_fuse_read_spare(2) << 0;
+}
+
+static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
+ u8 speedo_rev, int *threshold)
+{
+ int sku = sku_info->sku_id;
+
+ /* Assign to default */
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ sku_info->gpu_speedo_id = 0;
+ *threshold = THRESHOLD_INDEX_0;
+
+ switch (sku) {
+ case 0x00: /* Engineering SKU */
+ case 0x01: /* Engineering SKU */
+ case 0x07:
+ case 0x17:
+ case 0x27:
+ if (speedo_rev >= 2)
+ sku_info->gpu_speedo_id = 1;
+ break;
+
+ case 0x13:
+ if (speedo_rev >= 2)
+ sku_info->gpu_speedo_id = 1;
+
+ sku_info->cpu_speedo_id = 1;
+ break;
+
+ default:
+ pr_err("Tegra210: unknown SKU %#04x\n", sku);
+ /* Using the default for the error case */
+ break;
+ }
+}
+
+static int get_process_id(int value, const u32 *speedos, unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ if (value < speedos[num])
+ return i;
+
+ return -EINVAL;
+}
+
+void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ int cpu_speedo[3], soc_speedo[3], cpu_iddq, gpu_iddq, soc_iddq;
+ unsigned int index;
+ u8 speedo_revision;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(gpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+
+ /* Read speedo/IDDQ fuses */
+ cpu_speedo[0] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
+ cpu_speedo[1] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_1);
+ cpu_speedo[2] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+
+ soc_speedo[0] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
+ soc_speedo[1] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_1);
+ soc_speedo[2] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+
+ cpu_iddq = tegra_fuse_read_early(FUSE_CPU_IDDQ) * 4;
+ soc_iddq = tegra_fuse_read_early(FUSE_SOC_IDDQ) * 4;
+ gpu_iddq = tegra_fuse_read_early(FUSE_GPU_IDDQ) * 5;
+
+ /*
+ * Determine CPU, GPU and SoC speedo values depending on speedo fusing
+ * revision. Note that GPU speedo value is fused in CPU_SPEEDO_2.
+ */
+ speedo_revision = get_speedo_revision();
+ pr_info("Speedo Revision %u\n", speedo_revision);
+
+ if (speedo_revision >= 3) {
+ sku_info->cpu_speedo_value = cpu_speedo[0];
+ sku_info->gpu_speedo_value = cpu_speedo[2];
+ sku_info->soc_speedo_value = soc_speedo[0];
+ } else if (speedo_revision == 2) {
+ sku_info->cpu_speedo_value = (-1938 + (1095 * cpu_speedo[0] / 100)) / 10;
+ sku_info->gpu_speedo_value = (-1662 + (1082 * cpu_speedo[2] / 100)) / 10;
+ sku_info->soc_speedo_value = ( -705 + (1037 * soc_speedo[0] / 100)) / 10;
+ } else {
+ sku_info->cpu_speedo_value = 2100;
+ sku_info->gpu_speedo_value = cpu_speedo[2] - 75;
+ sku_info->soc_speedo_value = 1900;
+ }
+
+ if ((sku_info->cpu_speedo_value <= 0) ||
+ (sku_info->gpu_speedo_value <= 0) ||
+ (sku_info->soc_speedo_value <= 0)) {
+ WARN(1, "speedo value not fused\n");
+ return;
+ }
+
+ rev_sku_to_speedo_ids(sku_info, speedo_revision, &index);
+
+ sku_info->gpu_process_id = get_process_id(sku_info->gpu_speedo_value,
+ gpu_process_speedos[index],
+ GPU_PROCESS_CORNERS);
+
+ sku_info->cpu_process_id = get_process_id(sku_info->cpu_speedo_value,
+ cpu_process_speedos[index],
+ CPU_PROCESS_CORNERS);
+
+ sku_info->soc_process_id = get_process_id(sku_info->soc_speedo_value,
+ soc_process_speedos[index],
+ SOC_PROCESS_CORNERS);
+
+ pr_debug("Tegra GPU Speedo ID=%d, Speedo Value=%d\n",
+ sku_info->gpu_speedo_id, sku_info->gpu_speedo_value);
+}
diff --git a/kernel/drivers/soc/tegra/fuse/speedo-tegra30.c b/kernel/drivers/soc/tegra/fuse/speedo-tegra30.c
index b17f0dcdf..9b010b3ef 100644
--- a/kernel/drivers/soc/tegra/fuse/speedo-tegra30.c
+++ b/kernel/drivers/soc/tegra/fuse/speedo-tegra30.c
@@ -22,7 +22,7 @@
#include "fuse.h"
-#define CORE_PROCESS_CORNERS 1
+#define SOC_PROCESS_CORNERS 1
#define CPU_PROCESS_CORNERS 6
#define FUSE_SPEEDO_CALIB_0 0x14
@@ -54,7 +54,7 @@ enum {
THRESHOLD_INDEX_COUNT,
};
-static const u32 __initconst core_process_speedos[][CORE_PROCESS_CORNERS] = {
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
{180},
{170},
{195},
@@ -93,25 +93,25 @@ static void __init fuse_speedo_calib(u32 *speedo_g, u32 *speedo_lp)
int bit_minus1;
int bit_minus2;
- reg = tegra30_fuse_readl(FUSE_SPEEDO_CALIB_0);
+ reg = tegra_fuse_read_early(FUSE_SPEEDO_CALIB_0);
*speedo_lp = (reg & 0xFFFF) * 4;
*speedo_g = ((reg >> 16) & 0xFFFF) * 4;
- ate_ver = tegra30_fuse_readl(FUSE_TEST_PROG_VER);
+ ate_ver = tegra_fuse_read_early(FUSE_TEST_PROG_VER);
pr_debug("Tegra ATE prog ver %d.%d\n", ate_ver/10, ate_ver%10);
if (ate_ver >= 26) {
- bit_minus1 = tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS1);
- bit_minus1 |= tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS1_R);
- bit_minus2 = tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS2);
- bit_minus2 |= tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS2_R);
+ bit_minus1 = tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS1);
+ bit_minus1 |= tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS1_R);
+ bit_minus2 = tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS2);
+ bit_minus2 |= tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS2_R);
*speedo_lp |= (bit_minus1 << 1) | bit_minus2;
- bit_minus1 = tegra30_spare_fuse(G_SPEEDO_BIT_MINUS1);
- bit_minus1 |= tegra30_spare_fuse(G_SPEEDO_BIT_MINUS1_R);
- bit_minus2 = tegra30_spare_fuse(G_SPEEDO_BIT_MINUS2);
- bit_minus2 |= tegra30_spare_fuse(G_SPEEDO_BIT_MINUS2_R);
+ bit_minus1 = tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS1);
+ bit_minus1 |= tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS1_R);
+ bit_minus2 = tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS2);
+ bit_minus2 |= tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS2_R);
*speedo_g |= (bit_minus1 << 1) | bit_minus2;
} else {
*speedo_lp |= 0x3;
@@ -121,7 +121,7 @@ static void __init fuse_speedo_calib(u32 *speedo_g, u32 *speedo_lp)
static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info)
{
- int package_id = tegra30_fuse_readl(FUSE_PACKAGE_INFO) & 0x0F;
+ int package_id = tegra_fuse_read_early(FUSE_PACKAGE_INFO) & 0x0F;
switch (sku_info->revision) {
case TEGRA_REVISION_A01:
@@ -246,19 +246,19 @@ static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info)
void __init tegra30_init_speedo_data(struct tegra_sku_info *sku_info)
{
u32 cpu_speedo_val;
- u32 core_speedo_val;
+ u32 soc_speedo_val;
int i;
BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
THRESHOLD_INDEX_COUNT);
- BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) !=
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
THRESHOLD_INDEX_COUNT);
rev_sku_to_speedo_ids(sku_info);
- fuse_speedo_calib(&cpu_speedo_val, &core_speedo_val);
+ fuse_speedo_calib(&cpu_speedo_val, &soc_speedo_val);
pr_debug("Tegra CPU speedo value %u\n", cpu_speedo_val);
- pr_debug("Tegra Core speedo value %u\n", core_speedo_val);
+ pr_debug("Tegra Core speedo value %u\n", soc_speedo_val);
for (i = 0; i < CPU_PROCESS_CORNERS; i++) {
if (cpu_speedo_val < cpu_process_speedos[threshold_index][i])
@@ -273,16 +273,16 @@ void __init tegra30_init_speedo_data(struct tegra_sku_info *sku_info)
sku_info->cpu_speedo_id = 1;
}
- for (i = 0; i < CORE_PROCESS_CORNERS; i++) {
- if (core_speedo_val < core_process_speedos[threshold_index][i])
+ for (i = 0; i < SOC_PROCESS_CORNERS; i++) {
+ if (soc_speedo_val < soc_process_speedos[threshold_index][i])
break;
}
- sku_info->core_process_id = i - 1;
+ sku_info->soc_process_id = i - 1;
- if (sku_info->core_process_id == -1) {
- pr_warn("Tegra CORE speedo value %3d out of range",
- core_speedo_val);
- sku_info->core_process_id = 0;
+ if (sku_info->soc_process_id == -1) {
+ pr_warn("Tegra SoC speedo value %3d out of range",
+ soc_speedo_val);
+ sku_info->soc_process_id = 0;
sku_info->soc_speedo_id = 1;
}
}
diff --git a/kernel/drivers/soc/tegra/fuse/tegra-apbmisc.c b/kernel/drivers/soc/tegra/fuse/tegra-apbmisc.c
index 3bf5aba4c..5b18f6ffa 100644
--- a/kernel/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/kernel/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -21,15 +21,21 @@
#include <linux/io.h>
#include <soc/tegra/fuse.h>
+#include <soc/tegra/common.h>
#include "fuse.h"
-#define APBMISC_BASE 0x70000800
-#define APBMISC_SIZE 0x64
#define FUSE_SKU_INFO 0x10
+#define PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT 4
+#define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_LONG \
+ (0xf << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT)
+#define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_SHORT \
+ (0x3 << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT)
+
static void __iomem *apbmisc_base;
static void __iomem *strapping_base;
+static bool long_ram_code;
u32 tegra_read_chipid(void)
{
@@ -54,6 +60,18 @@ u32 tegra_read_straps(void)
return 0;
}
+u32 tegra_read_ram_code(void)
+{
+ u32 straps = tegra_read_straps();
+
+ if (long_ram_code)
+ straps &= PMC_STRAPPING_OPT_A_RAM_CODE_MASK_LONG;
+ else
+ straps &= PMC_STRAPPING_OPT_A_RAM_CODE_MASK_SHORT;
+
+ return straps >> PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT;
+}
+
static const struct of_device_id apbmisc_match[] __initconst = {
{ .compatible = "nvidia,tegra20-apbmisc", },
{},
@@ -76,8 +94,8 @@ void __init tegra_init_revision(void)
rev = TEGRA_REVISION_A02;
break;
case 3:
- if (chip_id == TEGRA20 && (tegra20_spare_fuse_early(18) ||
- tegra20_spare_fuse_early(19)))
+ if (chip_id == TEGRA20 && (tegra_fuse_read_spare(18) ||
+ tegra_fuse_read_spare(19)))
rev = TEGRA_REVISION_A03p;
else
rev = TEGRA_REVISION_A03;
@@ -91,25 +109,74 @@ void __init tegra_init_revision(void)
tegra_sku_info.revision = rev;
- if (chip_id == TEGRA20)
- tegra_sku_info.sku_id = tegra20_fuse_early(FUSE_SKU_INFO);
- else
- tegra_sku_info.sku_id = tegra30_fuse_readl(FUSE_SKU_INFO);
+ tegra_sku_info.sku_id = tegra_fuse_read_early(FUSE_SKU_INFO);
}
void __init tegra_init_apbmisc(void)
{
+ struct resource apbmisc, straps;
struct device_node *np;
np = of_find_matching_node(NULL, apbmisc_match);
- apbmisc_base = of_iomap(np, 0);
- if (!apbmisc_base) {
- pr_warn("ioremap tegra apbmisc failed. using %08x instead\n",
- APBMISC_BASE);
- apbmisc_base = ioremap(APBMISC_BASE, APBMISC_SIZE);
+ if (!np) {
+ /*
+ * Fall back to legacy initialization for 32-bit ARM only. All
+ * 64-bit ARM device tree files for Tegra are required to have
+ * an APBMISC node.
+ *
+ * This is for backwards-compatibility with old device trees
+ * that didn't contain an APBMISC node.
+ */
+ if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) {
+ /* APBMISC registers (chip revision, ...) */
+ apbmisc.start = 0x70000800;
+ apbmisc.end = 0x70000863;
+ apbmisc.flags = IORESOURCE_MEM;
+
+ /* strapping options */
+ if (tegra_get_chip_id() == TEGRA124) {
+ straps.start = 0x7000e864;
+ straps.end = 0x7000e867;
+ } else {
+ straps.start = 0x70000008;
+ straps.end = 0x7000000b;
+ }
+
+ straps.flags = IORESOURCE_MEM;
+
+ pr_warn("Using APBMISC region %pR\n", &apbmisc);
+ pr_warn("Using strapping options registers %pR\n",
+ &straps);
+ } else {
+ /*
+ * At this point we're not running on Tegra, so play
+ * nice with multi-platform kernels.
+ */
+ return;
+ }
+ } else {
+ /*
+ * Extract information from the device tree if we've found a
+ * matching node.
+ */
+ if (of_address_to_resource(np, 0, &apbmisc) < 0) {
+ pr_err("failed to get APBMISC registers\n");
+ return;
+ }
+
+ if (of_address_to_resource(np, 1, &straps) < 0) {
+ pr_err("failed to get strapping options registers\n");
+ return;
+ }
}
- strapping_base = of_iomap(np, 1);
+ apbmisc_base = ioremap_nocache(apbmisc.start, resource_size(&apbmisc));
+ if (!apbmisc_base)
+ pr_err("failed to map APBMISC registers\n");
+
+ strapping_base = ioremap_nocache(straps.start, resource_size(&straps));
if (!strapping_base)
- pr_err("ioremap tegra strapping_base failed\n");
+ pr_err("failed to map strapping options registers\n");
+
+ long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
}
diff --git a/kernel/drivers/soc/tegra/pmc.c b/kernel/drivers/soc/tegra/pmc.c
index c89bada87..bc34cf748 100644
--- a/kernel/drivers/soc/tegra/pmc.c
+++ b/kernel/drivers/soc/tegra/pmc.c
@@ -17,6 +17,8 @@
*
*/
+#define pr_fmt(fmt) "tegra-pmc: " fmt
+
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/clk/tegra.h>
@@ -377,13 +379,10 @@ int tegra_pmc_cpu_remove_clamping(int cpuid)
}
#endif /* CONFIG_SMP */
-/**
- * tegra_pmc_restart() - reboot the system
- * @mode: which mode to reboot in
- * @cmd: reboot command
- */
-void tegra_pmc_restart(enum reboot_mode mode, const char *cmd)
+static int tegra_pmc_restart_notify(struct notifier_block *this,
+ unsigned long action, void *data)
{
+ const char *cmd = data;
u32 value;
value = tegra_pmc_readl(PMC_SCRATCH0);
@@ -405,8 +404,15 @@ void tegra_pmc_restart(enum reboot_mode mode, const char *cmd)
value = tegra_pmc_readl(0);
value |= 0x10;
tegra_pmc_writel(value, 0);
+
+ return NOTIFY_DONE;
}
+static struct notifier_block tegra_pmc_restart_handler = {
+ .notifier_call = tegra_pmc_restart_notify,
+ .priority = 128,
+};
+
static int powergate_show(struct seq_file *s, void *data)
{
unsigned int i;
@@ -453,7 +459,6 @@ static int tegra_io_rail_prepare(int id, unsigned long *request,
unsigned long *status, unsigned int *bit)
{
unsigned long rate, value;
- struct clk *clk;
*bit = id % 32;
@@ -472,12 +477,7 @@ static int tegra_io_rail_prepare(int id, unsigned long *request,
*request = IO_DPD2_REQ;
}
- clk = clk_get_sys(NULL, "pclk");
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- rate = clk_get_rate(clk);
- clk_put(clk);
+ rate = clk_get_rate(pmc->clk);
tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE);
@@ -531,8 +531,10 @@ int tegra_io_rail_power_on(int id)
tegra_pmc_writel(value, request);
err = tegra_io_rail_poll(status, mask, 0, 250);
- if (err < 0)
+ if (err < 0) {
+ pr_info("tegra_io_rail_poll() failed: %d\n", err);
return err;
+ }
tegra_io_rail_unprepare();
@@ -547,8 +549,10 @@ int tegra_io_rail_power_off(int id)
int err;
err = tegra_io_rail_prepare(id, &request, &status, &bit);
- if (err < 0)
+ if (err < 0) {
+ pr_info("tegra_io_rail_prepare() failed: %d\n", err);
return err;
+ }
mask = 1 << bit;
@@ -797,7 +801,6 @@ void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
out:
of_node_put(np);
- return;
}
static int tegra_pmc_probe(struct platform_device *pdev)
@@ -837,6 +840,13 @@ static int tegra_pmc_probe(struct platform_device *pdev)
return err;
}
+ err = register_restart_handler(&tegra_pmc_restart_handler);
+ if (err) {
+ dev_err(&pdev->dev, "unable to register restart handler, %d\n",
+ err);
+ return err;
+ }
+
return 0;
}
@@ -991,7 +1001,56 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.has_gpu_clamps = true,
};
+static const char * const tegra210_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "crail",
+ [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_PCIE] = "pcie",
+ [TEGRA_POWERGATE_L2] = "l2",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+ [TEGRA_POWERGATE_HEG] = "heg",
+ [TEGRA_POWERGATE_SATA] = "sata",
+ [TEGRA_POWERGATE_CPU1] = "cpu1",
+ [TEGRA_POWERGATE_CPU2] = "cpu2",
+ [TEGRA_POWERGATE_CPU3] = "cpu3",
+ [TEGRA_POWERGATE_CELP] = "celp",
+ [TEGRA_POWERGATE_CPU0] = "cpu0",
+ [TEGRA_POWERGATE_C0NC] = "c0nc",
+ [TEGRA_POWERGATE_C1NC] = "c1nc",
+ [TEGRA_POWERGATE_SOR] = "sor",
+ [TEGRA_POWERGATE_DIS] = "dis",
+ [TEGRA_POWERGATE_DISB] = "disb",
+ [TEGRA_POWERGATE_XUSBA] = "xusba",
+ [TEGRA_POWERGATE_XUSBB] = "xusbb",
+ [TEGRA_POWERGATE_XUSBC] = "xusbc",
+ [TEGRA_POWERGATE_VIC] = "vic",
+ [TEGRA_POWERGATE_IRAM] = "iram",
+ [TEGRA_POWERGATE_NVDEC] = "nvdec",
+ [TEGRA_POWERGATE_NVJPG] = "nvjpg",
+ [TEGRA_POWERGATE_AUD] = "aud",
+ [TEGRA_POWERGATE_DFD] = "dfd",
+ [TEGRA_POWERGATE_VE2] = "ve2",
+};
+
+static const u8 tegra210_cpu_powergates[] = {
+ TEGRA_POWERGATE_CPU0,
+ TEGRA_POWERGATE_CPU1,
+ TEGRA_POWERGATE_CPU2,
+ TEGRA_POWERGATE_CPU3,
+};
+
+static const struct tegra_pmc_soc tegra210_pmc_soc = {
+ .num_powergates = ARRAY_SIZE(tegra210_powergates),
+ .powergates = tegra210_powergates,
+ .num_cpu_powergates = ARRAY_SIZE(tegra210_cpu_powergates),
+ .cpu_powergates = tegra210_cpu_powergates,
+ .has_tsense_reset = true,
+ .has_gpu_clamps = true,
+};
+
static const struct of_device_id tegra_pmc_match[] = {
+ { .compatible = "nvidia,tegra210-pmc", .data = &tegra210_pmc_soc },
+ { .compatible = "nvidia,tegra132-pmc", .data = &tegra124_pmc_soc },
{ .compatible = "nvidia,tegra124-pmc", .data = &tegra124_pmc_soc },
{ .compatible = "nvidia,tegra114-pmc", .data = &tegra114_pmc_soc },
{ .compatible = "nvidia,tegra30-pmc", .data = &tegra30_pmc_soc },
@@ -1010,7 +1069,7 @@ static struct platform_driver tegra_pmc_driver = {
},
.probe = tegra_pmc_probe,
};
-module_platform_driver(tegra_pmc_driver);
+builtin_platform_driver(tegra_pmc_driver);
/*
* Early initialization to allow access to registers in the very early boot
@@ -1024,25 +1083,44 @@ static int __init tegra_pmc_early_init(void)
bool invert;
u32 value;
- if (!soc_is_tegra())
- return 0;
-
np = of_find_matching_node_and_match(NULL, tegra_pmc_match, &match);
if (!np) {
- pr_warn("PMC device node not found, disabling powergating\n");
-
- regs.start = 0x7000e400;
- regs.end = 0x7000e7ff;
- regs.flags = IORESOURCE_MEM;
-
- pr_warn("Using memory region %pR\n", &regs);
+ /*
+ * Fall back to legacy initialization for 32-bit ARM only. All
+ * 64-bit ARM device tree files for Tegra are required to have
+ * a PMC node.
+ *
+ * This is for backwards-compatibility with old device trees
+ * that didn't contain a PMC node. Note that in this case the
+ * SoC data can't be matched and therefore powergating is
+ * disabled.
+ */
+ if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) {
+ pr_warn("DT node not found, powergating disabled\n");
+
+ regs.start = 0x7000e400;
+ regs.end = 0x7000e7ff;
+ regs.flags = IORESOURCE_MEM;
+
+ pr_warn("Using memory region %pR\n", &regs);
+ } else {
+ /*
+ * At this point we're not running on Tegra, so play
+ * nice with multi-platform kernels.
+ */
+ return 0;
+ }
} else {
- pmc->soc = match->data;
- }
+ /*
+ * Extract information from the device tree if we've found a
+ * matching node.
+ */
+ if (of_address_to_resource(np, 0, &regs) < 0) {
+ pr_err("failed to get PMC registers\n");
+ return -ENXIO;
+ }
- if (of_address_to_resource(np, 0, &regs) < 0) {
- pr_err("failed to get PMC registers\n");
- return -ENXIO;
+ pmc->soc = match->data;
}
pmc->base = ioremap_nocache(regs.start, resource_size(&regs));
@@ -1053,6 +1131,10 @@ static int __init tegra_pmc_early_init(void)
mutex_init(&pmc->powergates_lock);
+ /*
+ * Invert the interrupt polarity if a PMC device tree node exists and
+ * contains the nvidia,invert-interrupt property.
+ */
invert = of_property_read_bool(np, "nvidia,invert-interrupt");
value = tegra_pmc_readl(PMC_CNTRL);
diff --git a/kernel/drivers/soc/ti/knav_qmss.h b/kernel/drivers/soc/ti/knav_qmss.h
index 51da23412..6ff936cac 100644
--- a/kernel/drivers/soc/ti/knav_qmss.h
+++ b/kernel/drivers/soc/ti/knav_qmss.h
@@ -135,9 +135,10 @@ struct knav_pdsp_info {
};
void __iomem *intd;
u32 __iomem *iram;
- const char *firmware;
u32 id;
struct list_head list;
+ bool loaded;
+ bool started;
};
struct knav_qmgr_info {
diff --git a/kernel/drivers/soc/ti/knav_qmss_acc.c b/kernel/drivers/soc/ti/knav_qmss_acc.c
index ef6f69db0..d2d48f280 100644
--- a/kernel/drivers/soc/ti/knav_qmss_acc.c
+++ b/kernel/drivers/soc/ti/knav_qmss_acc.c
@@ -261,6 +261,10 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range,
if (old && !new) {
dev_dbg(kdev->dev, "setup-acc-irq: freeing %s for channel %s\n",
acc->name, acc->name);
+ ret = irq_set_affinity_hint(irq, NULL);
+ if (ret)
+ dev_warn(range->kdev->dev,
+ "Failed to set IRQ affinity\n");
free_irq(irq, range);
}
@@ -482,8 +486,8 @@ struct knav_range_ops knav_acc_range_ops = {
* Return 0 on success or error
*/
int knav_init_acc_range(struct knav_device *kdev,
- struct device_node *node,
- struct knav_range_info *range)
+ struct device_node *node,
+ struct knav_range_info *range)
{
struct knav_acc_channel *acc;
struct knav_pdsp_info *pdsp;
@@ -526,6 +530,12 @@ int knav_init_acc_range(struct knav_device *kdev,
return -EINVAL;
}
+ if (!pdsp->started) {
+ dev_err(kdev->dev, "pdsp id %d not started for range %s\n",
+ info->pdsp_id, range->name);
+ return -ENODEV;
+ }
+
info->pdsp = pdsp;
channels = range->num_queues;
if (of_get_property(node, "multi-queue", NULL)) {
diff --git a/kernel/drivers/soc/ti/knav_qmss_queue.c b/kernel/drivers/soc/ti/knav_qmss_queue.c
index 6d8646db5..8c03a80b4 100644
--- a/kernel/drivers/soc/ti/knav_qmss_queue.c
+++ b/kernel/drivers/soc/ti/knav_qmss_queue.c
@@ -68,6 +68,12 @@ static DEFINE_MUTEX(knav_dev_lock);
idx < (kdev)->num_queues_in_use; \
idx++, inst = knav_queue_idx_to_inst(kdev, idx))
+/* All firmware file names end up here. List the firmware file names below.
+ * Newest followed by older ones. Search is done from start of the array
+ * until a firmware file is found.
+ */
+const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
+
/**
* knav_queue_notify: qmss queue notfier call
*
@@ -1173,7 +1179,7 @@ static int knav_queue_setup_link_ram(struct knav_device *kdev)
block++;
if (!block->size)
- return 0;
+ continue;
dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n",
block->phys, block->virt, block->size);
@@ -1439,7 +1445,6 @@ static int knav_queue_init_pdsps(struct knav_device *kdev,
struct device *dev = kdev->dev;
struct knav_pdsp_info *pdsp;
struct device_node *child;
- int ret;
for_each_child_of_node(pdsps, child) {
pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
@@ -1448,17 +1453,6 @@ static int knav_queue_init_pdsps(struct knav_device *kdev,
return -ENOMEM;
}
pdsp->name = knav_queue_find_name(child);
- ret = of_property_read_string(child, "firmware",
- &pdsp->firmware);
- if (ret < 0 || !pdsp->firmware) {
- dev_err(dev, "unknown firmware for pdsp %s\n",
- pdsp->name);
- devm_kfree(dev, pdsp);
- continue;
- }
- dev_dbg(dev, "pdsp name %s fw name :%s\n", pdsp->name,
- pdsp->firmware);
-
pdsp->iram =
knav_queue_map_reg(kdev, child,
KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
@@ -1489,9 +1483,9 @@ static int knav_queue_init_pdsps(struct knav_device *kdev,
}
of_property_read_u32(child, "id", &pdsp->id);
list_add_tail(&pdsp->list, &kdev->pdsps);
- dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p, firmware %s\n",
+ dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
- pdsp->intd, pdsp->firmware);
+ pdsp->intd);
}
return 0;
}
@@ -1510,6 +1504,8 @@ static int knav_queue_stop_pdsp(struct knav_device *kdev,
dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
return ret;
}
+ pdsp->loaded = false;
+ pdsp->started = false;
return 0;
}
@@ -1518,14 +1514,29 @@ static int knav_queue_load_pdsp(struct knav_device *kdev,
{
int i, ret, fwlen;
const struct firmware *fw;
+ bool found = false;
u32 *fwdata;
- ret = request_firmware(&fw, pdsp->firmware, kdev->dev);
- if (ret) {
- dev_err(kdev->dev, "failed to get firmware %s for pdsp %s\n",
- pdsp->firmware, pdsp->name);
- return ret;
+ for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
+ if (knav_acc_firmwares[i]) {
+ ret = request_firmware_direct(&fw,
+ knav_acc_firmwares[i],
+ kdev->dev);
+ if (!ret) {
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ dev_err(kdev->dev, "failed to get firmware for pdsp\n");
+ return -ENODEV;
}
+
+ dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
+ knav_acc_firmwares[i]);
+
writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
/* download the firmware */
fwdata = (u32 *)fw->data;
@@ -1583,16 +1594,24 @@ static int knav_queue_start_pdsps(struct knav_device *kdev)
int ret;
knav_queue_stop_pdsps(kdev);
- /* now load them all */
+ /* now load them all. We return success even if pdsp
+ * is not loaded as acc channels are optional on having
+ * firmware availability in the system. We set the loaded
+ * and stated flag and when initialize the acc range, check
+ * it and init the range only if pdsp is started.
+ */
for_each_pdsp(kdev, pdsp) {
ret = knav_queue_load_pdsp(kdev, pdsp);
- if (ret < 0)
- return ret;
+ if (!ret)
+ pdsp->loaded = true;
}
for_each_pdsp(kdev, pdsp) {
- ret = knav_queue_start_pdsp(kdev, pdsp);
- WARN_ON(ret);
+ if (pdsp->loaded) {
+ ret = knav_queue_start_pdsp(kdev, pdsp);
+ if (!ret)
+ pdsp->started = true;
+ }
}
return 0;
}
diff --git a/kernel/drivers/soc/versatile/soc-realview.c b/kernel/drivers/soc/versatile/soc-realview.c
index 1a07bf540..e642c4540 100644
--- a/kernel/drivers/soc/versatile/soc-realview.c
+++ b/kernel/drivers/soc/versatile/soc-realview.c
@@ -142,4 +142,4 @@ static struct platform_driver realview_soc_driver = {
.of_match_table = realview_soc_of_match,
},
};
-module_platform_driver(realview_soc_driver);
+builtin_platform_driver(realview_soc_driver);