summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/bus
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/bus')
-rw-r--r--kernel/drivers/bus/Kconfig116
-rw-r--r--kernel/drivers/bus/Makefile19
-rw-r--r--kernel/drivers/bus/arm-cci.c1535
-rw-r--r--kernel/drivers/bus/arm-ccn.c1393
-rw-r--r--kernel/drivers/bus/brcmstb_gisb.c403
-rw-r--r--kernel/drivers/bus/imx-weim.c217
-rw-r--r--kernel/drivers/bus/mips_cdmm.c716
-rw-r--r--kernel/drivers/bus/mvebu-mbus.c1209
-rw-r--r--kernel/drivers/bus/omap-ocp2scp.c123
-rw-r--r--kernel/drivers/bus/omap_l3_noc.c377
-rw-r--r--kernel/drivers/bus/omap_l3_noc.h501
-rw-r--r--kernel/drivers/bus/omap_l3_smx.c311
-rw-r--r--kernel/drivers/bus/omap_l3_smx.h338
-rw-r--r--kernel/drivers/bus/simple-pm-bus.c58
-rw-r--r--kernel/drivers/bus/vexpress-config.c202
15 files changed, 7518 insertions, 0 deletions
diff --git a/kernel/drivers/bus/Kconfig b/kernel/drivers/bus/Kconfig
new file mode 100644
index 000000000..a1d4af6df
--- /dev/null
+++ b/kernel/drivers/bus/Kconfig
@@ -0,0 +1,116 @@
+#
+# Bus Devices
+#
+
+menu "Bus devices"
+
+config ARM_CCI
+ bool
+
+config ARM_CCI400_COMMON
+ bool
+ select ARM_CCI
+
+config ARM_CCI400_PMU
+ bool "ARM CCI400 PMU support"
+ default y
+ depends on ARM || ARM64
+ depends on HW_PERF_EVENTS
+ select ARM_CCI400_COMMON
+ help
+ Support for PMU events monitoring on the ARM CCI cache coherent
+ interconnect.
+
+ If unsure, say Y
+
+config ARM_CCI400_PORT_CTRL
+ bool
+ depends on ARM && OF && CPU_V7
+ select ARM_CCI400_COMMON
+ help
+ Low level power management driver for CCI400 cache coherent
+ interconnect for ARM platforms.
+
+config ARM_CCN
+ bool "ARM CCN driver support"
+ depends on ARM || ARM64
+ depends on PERF_EVENTS
+ help
+ PMU (perf) driver supporting the ARM CCN (Cache Coherent Network)
+ interconnect.
+
+config BRCMSTB_GISB_ARB
+ bool "Broadcom STB GISB bus arbiter"
+ depends on ARM || MIPS
+ help
+ Driver for the Broadcom Set Top Box System-on-a-chip internal bus
+ arbiter. This driver provides timeout and target abort error handling
+ and internal bus master decoding.
+
+config IMX_WEIM
+ bool "Freescale EIM DRIVER"
+ depends on ARCH_MXC
+ help
+ Driver for i.MX WEIM controller.
+ The WEIM(Wireless External Interface Module) works like a bus.
+ You can attach many different devices on it, such as NOR, onenand.
+
+config MIPS_CDMM
+ bool "MIPS Common Device Memory Map (CDMM) Driver"
+ depends on CPU_MIPSR2
+ help
+ Driver needed for the MIPS Common Device Memory Map bus in MIPS
+ cores. This bus is for per-CPU tightly coupled devices such as the
+ Fast Debug Channel (FDC).
+
+ For this to work, either your bootloader needs to enable the CDMM
+ region at an unused physical address on the boot CPU, or else your
+ platform code needs to implement mips_cdmm_phys_base() (see
+ asm/cdmm.h).
+
+config MVEBU_MBUS
+ bool
+ depends on PLAT_ORION
+ help
+ Driver needed for the MBus configuration on Marvell EBU SoCs
+ (Kirkwood, Dove, Orion5x, MV78XX0 and Armada 370/XP).
+
+config OMAP_INTERCONNECT
+ tristate "OMAP INTERCONNECT DRIVER"
+ depends on ARCH_OMAP2PLUS
+
+ help
+ Driver to enable OMAP interconnect error handling driver.
+
+config OMAP_OCP2SCP
+ tristate "OMAP OCP2SCP DRIVER"
+ depends on ARCH_OMAP2PLUS
+ help
+ Driver to enable ocp2scp module which transforms ocp interface
+ protocol to scp protocol. In OMAP4, USB PHY is connected via
+ OCP2SCP and in OMAP5, both USB PHY and SATA PHY is connected via
+ OCP2SCP.
+
+config SIMPLE_PM_BUS
+ bool "Simple Power-Managed Bus Driver"
+ depends on OF && PM
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ help
+ Driver for transparent busses that don't need a real driver, but
+ where the bus controller is part of a PM domain, or under the control
+ of a functional clock, and thus relies on runtime PM for managing
+ this PM domain and/or clock.
+ An example of such a bus controller is the Renesas Bus State
+ Controller (BSC, sometimes called "LBSC within Bus Bridge", or
+ "External Bus Interface") as found on several Renesas ARM SoCs.
+
+config VEXPRESS_CONFIG
+ bool "Versatile Express configuration bus"
+ default y if ARCH_VEXPRESS
+ depends on ARM || ARM64
+ depends on OF
+ select REGMAP
+ help
+ Platform configuration infrastructure for the ARM Ltd.
+ Versatile Express.
+endmenu
diff --git a/kernel/drivers/bus/Makefile b/kernel/drivers/bus/Makefile
new file mode 100644
index 000000000..790e7b933
--- /dev/null
+++ b/kernel/drivers/bus/Makefile
@@ -0,0 +1,19 @@
+#
+# Makefile for the bus drivers.
+#
+
+# Interconnect bus drivers for ARM platforms
+obj-$(CONFIG_ARM_CCI) += arm-cci.o
+obj-$(CONFIG_ARM_CCN) += arm-ccn.o
+
+obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
+obj-$(CONFIG_IMX_WEIM) += imx-weim.o
+obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o
+obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
+
+# Interconnect bus driver for OMAP SoCs.
+obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
+
+obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
+obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
+obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
diff --git a/kernel/drivers/bus/arm-cci.c b/kernel/drivers/bus/arm-cci.c
new file mode 100644
index 000000000..5340604b2
--- /dev/null
+++ b/kernel/drivers/bus/arm-cci.c
@@ -0,0 +1,1535 @@
+/*
+ * CCI cache coherent interconnect driver
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/arm-cci.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
+
+static void __iomem *cci_ctrl_base;
+static unsigned long cci_ctrl_phys;
+
+#ifdef CONFIG_ARM_CCI400_PORT_CTRL
+struct cci_nb_ports {
+ unsigned int nb_ace;
+ unsigned int nb_ace_lite;
+};
+
+static const struct cci_nb_ports cci400_ports = {
+ .nb_ace = 2,
+ .nb_ace_lite = 3
+};
+
+#define CCI400_PORTS_DATA (&cci400_ports)
+#else
+#define CCI400_PORTS_DATA (NULL)
+#endif
+
+static const struct of_device_id arm_cci_matches[] = {
+#ifdef CONFIG_ARM_CCI400_COMMON
+ {.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA },
+#endif
+ {},
+};
+
+#ifdef CONFIG_ARM_CCI400_PMU
+
+#define DRIVER_NAME "CCI-400"
+#define DRIVER_NAME_PMU DRIVER_NAME " PMU"
+
+#define CCI_PMCR 0x0100
+#define CCI_PID2 0x0fe8
+
+#define CCI_PMCR_CEN 0x00000001
+#define CCI_PMCR_NCNT_MASK 0x0000f800
+#define CCI_PMCR_NCNT_SHIFT 11
+
+#define CCI_PID2_REV_MASK 0xf0
+#define CCI_PID2_REV_SHIFT 4
+
+#define CCI_PMU_EVT_SEL 0x000
+#define CCI_PMU_CNTR 0x004
+#define CCI_PMU_CNTR_CTRL 0x008
+#define CCI_PMU_OVRFLW 0x00c
+
+#define CCI_PMU_OVRFLW_FLAG 1
+
+#define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K)
+
+#define CCI_PMU_CNTR_MASK ((1ULL << 32) -1)
+
+#define CCI_PMU_EVENT_MASK 0xffUL
+#define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7)
+#define CCI_PMU_EVENT_CODE(event) (event & 0x1f)
+
+#define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */
+
+/* Types of interfaces that can generate events */
+enum {
+ CCI_IF_SLAVE,
+ CCI_IF_MASTER,
+ CCI_IF_MAX,
+};
+
+struct event_range {
+ u32 min;
+ u32 max;
+};
+
+struct cci_pmu_hw_events {
+ struct perf_event *events[CCI_PMU_MAX_HW_EVENTS];
+ unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)];
+ raw_spinlock_t pmu_lock;
+};
+
+struct cci_pmu_model {
+ char *name;
+ struct event_range event_ranges[CCI_IF_MAX];
+};
+
+static struct cci_pmu_model cci_pmu_models[];
+
+struct cci_pmu {
+ void __iomem *base;
+ struct pmu pmu;
+ int nr_irqs;
+ int irqs[CCI_PMU_MAX_HW_EVENTS];
+ unsigned long active_irqs;
+ const struct cci_pmu_model *model;
+ struct cci_pmu_hw_events hw_events;
+ struct platform_device *plat_device;
+ int num_events;
+ atomic_t active_events;
+ struct mutex reserve_mutex;
+ cpumask_t cpus;
+};
+static struct cci_pmu *pmu;
+
+#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
+
+/* Port ids */
+#define CCI_PORT_S0 0
+#define CCI_PORT_S1 1
+#define CCI_PORT_S2 2
+#define CCI_PORT_S3 3
+#define CCI_PORT_S4 4
+#define CCI_PORT_M0 5
+#define CCI_PORT_M1 6
+#define CCI_PORT_M2 7
+
+#define CCI_REV_R0 0
+#define CCI_REV_R1 1
+#define CCI_REV_R1_PX 5
+
+/*
+ * Instead of an event id to monitor CCI cycles, a dedicated counter is
+ * provided. Use 0xff to represent CCI cycles and hope that no future revisions
+ * make use of this event in hardware.
+ */
+enum cci400_perf_events {
+ CCI_PMU_CYCLES = 0xff
+};
+
+#define CCI_PMU_CYCLE_CNTR_IDX 0
+#define CCI_PMU_CNTR0_IDX 1
+#define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1)
+
+/*
+ * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
+ * ports and bits 4:0 are event codes. There are different event codes
+ * associated with each port type.
+ *
+ * Additionally, the range of events associated with the port types changed
+ * between Rev0 and Rev1.
+ *
+ * The constants below define the range of valid codes for each port type for
+ * the different revisions and are used to validate the event to be monitored.
+ */
+
+#define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00
+#define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13
+#define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14
+#define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a
+
+#define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00
+#define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14
+#define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00
+#define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11
+
+static int pmu_validate_hw_event(unsigned long hw_event)
+{
+ u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event);
+ u8 ev_code = CCI_PMU_EVENT_CODE(hw_event);
+ int if_type;
+
+ if (hw_event & ~CCI_PMU_EVENT_MASK)
+ return -ENOENT;
+
+ switch (ev_source) {
+ case CCI_PORT_S0:
+ case CCI_PORT_S1:
+ case CCI_PORT_S2:
+ case CCI_PORT_S3:
+ case CCI_PORT_S4:
+ /* Slave Interface */
+ if_type = CCI_IF_SLAVE;
+ break;
+ case CCI_PORT_M0:
+ case CCI_PORT_M1:
+ case CCI_PORT_M2:
+ /* Master Interface */
+ if_type = CCI_IF_MASTER;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (ev_code >= pmu->model->event_ranges[if_type].min &&
+ ev_code <= pmu->model->event_ranges[if_type].max)
+ return hw_event;
+
+ return -ENOENT;
+}
+
+static int probe_cci_revision(void)
+{
+ int rev;
+ rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
+ rev >>= CCI_PID2_REV_SHIFT;
+
+ if (rev < CCI_REV_R1_PX)
+ return CCI_REV_R0;
+ else
+ return CCI_REV_R1;
+}
+
+static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev)
+{
+ if (platform_has_secure_cci_access())
+ return &cci_pmu_models[probe_cci_revision()];
+ return NULL;
+}
+
+static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
+{
+ return CCI_PMU_CYCLE_CNTR_IDX <= idx &&
+ idx <= CCI_PMU_CNTR_LAST(cci_pmu);
+}
+
+static u32 pmu_read_register(int idx, unsigned int offset)
+{
+ return readl_relaxed(pmu->base + CCI_PMU_CNTR_BASE(idx) + offset);
+}
+
+static void pmu_write_register(u32 value, int idx, unsigned int offset)
+{
+ return writel_relaxed(value, pmu->base + CCI_PMU_CNTR_BASE(idx) + offset);
+}
+
+static void pmu_disable_counter(int idx)
+{
+ pmu_write_register(0, idx, CCI_PMU_CNTR_CTRL);
+}
+
+static void pmu_enable_counter(int idx)
+{
+ pmu_write_register(1, idx, CCI_PMU_CNTR_CTRL);
+}
+
+static void pmu_set_event(int idx, unsigned long event)
+{
+ pmu_write_register(event, idx, CCI_PMU_EVT_SEL);
+}
+
+static u32 pmu_get_max_counters(void)
+{
+ u32 n_cnts = (readl_relaxed(cci_ctrl_base + CCI_PMCR) &
+ CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
+
+ /* add 1 for cycle counter */
+ return n_cnts + 1;
+}
+
+static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
+{
+ struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+ struct hw_perf_event *hw_event = &event->hw;
+ unsigned long cci_event = hw_event->config_base;
+ int idx;
+
+ if (cci_event == CCI_PMU_CYCLES) {
+ if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask))
+ return -EAGAIN;
+
+ return CCI_PMU_CYCLE_CNTR_IDX;
+ }
+
+ for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
+ if (!test_and_set_bit(idx, hw->used_mask))
+ return idx;
+
+ /* No counters available */
+ return -EAGAIN;
+}
+
+static int pmu_map_event(struct perf_event *event)
+{
+ int mapping;
+ unsigned long config = event->attr.config;
+
+ if (event->attr.type < PERF_TYPE_MAX)
+ return -ENOENT;
+
+ if (config == CCI_PMU_CYCLES)
+ mapping = config;
+ else
+ mapping = pmu_validate_hw_event(config);
+
+ return mapping;
+}
+
+static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
+{
+ int i;
+ struct platform_device *pmu_device = cci_pmu->plat_device;
+
+ if (unlikely(!pmu_device))
+ return -ENODEV;
+
+ if (pmu->nr_irqs < 1) {
+ dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Register all available CCI PMU interrupts. In the interrupt handler
+ * we iterate over the counters checking for interrupt source (the
+ * overflowing counter) and clear it.
+ *
+ * This should allow handling of non-unique interrupt for the counters.
+ */
+ for (i = 0; i < pmu->nr_irqs; i++) {
+ int err = request_irq(pmu->irqs[i], handler, IRQF_SHARED,
+ "arm-cci-pmu", cci_pmu);
+ if (err) {
+ dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
+ pmu->irqs[i]);
+ return err;
+ }
+
+ set_bit(i, &pmu->active_irqs);
+ }
+
+ return 0;
+}
+
+static void pmu_free_irq(struct cci_pmu *cci_pmu)
+{
+ int i;
+
+ for (i = 0; i < pmu->nr_irqs; i++) {
+ if (!test_and_clear_bit(i, &pmu->active_irqs))
+ continue;
+
+ free_irq(pmu->irqs[i], cci_pmu);
+ }
+}
+
+static u32 pmu_read_counter(struct perf_event *event)
+{
+ struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+ u32 value;
+
+ if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ return 0;
+ }
+ value = pmu_read_register(idx, CCI_PMU_CNTR);
+
+ return value;
+}
+
+static void pmu_write_counter(struct perf_event *event, u32 value)
+{
+ struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+
+ if (unlikely(!pmu_is_valid_counter(cci_pmu, idx)))
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ else
+ pmu_write_register(value, idx, CCI_PMU_CNTR);
+}
+
+static u64 pmu_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 delta, prev_raw_count, new_raw_count;
+
+ do {
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = pmu_read_counter(event);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count);
+
+ delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK;
+
+ local64_add(delta, &event->count);
+
+ return new_raw_count;
+}
+
+static void pmu_read(struct perf_event *event)
+{
+ pmu_event_update(event);
+}
+
+void pmu_event_set_period(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ /*
+ * The CCI PMU counters have a period of 2^32. To account for the
+ * possiblity of extreme interrupt latency we program for a period of
+ * half that. Hopefully we can handle the interrupt before another 2^31
+ * events occur and the counter overtakes its previous value.
+ */
+ u64 val = 1ULL << 31;
+ local64_set(&hwc->prev_count, val);
+ pmu_write_counter(event, val);
+}
+
+static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
+{
+ unsigned long flags;
+ struct cci_pmu *cci_pmu = dev;
+ struct cci_pmu_hw_events *events = &pmu->hw_events;
+ int idx, handled = IRQ_NONE;
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ /*
+ * Iterate over counters and update the corresponding perf events.
+ * This should work regardless of whether we have per-counter overflow
+ * interrupt or a combined overflow interrupt.
+ */
+ for (idx = CCI_PMU_CYCLE_CNTR_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
+ struct perf_event *event = events->events[idx];
+ struct hw_perf_event *hw_counter;
+
+ if (!event)
+ continue;
+
+ hw_counter = &event->hw;
+
+ /* Did this counter overflow? */
+ if (!(pmu_read_register(idx, CCI_PMU_OVRFLW) &
+ CCI_PMU_OVRFLW_FLAG))
+ continue;
+
+ pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW);
+
+ pmu_event_update(event);
+ pmu_event_set_period(event);
+ handled = IRQ_HANDLED;
+ }
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+static int cci_pmu_get_hw(struct cci_pmu *cci_pmu)
+{
+ int ret = pmu_request_irq(cci_pmu, pmu_handle_irq);
+ if (ret) {
+ pmu_free_irq(cci_pmu);
+ return ret;
+ }
+ return 0;
+}
+
+static void cci_pmu_put_hw(struct cci_pmu *cci_pmu)
+{
+ pmu_free_irq(cci_pmu);
+}
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+ atomic_t *active_events = &cci_pmu->active_events;
+ struct mutex *reserve_mutex = &cci_pmu->reserve_mutex;
+
+ if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) {
+ cci_pmu_put_hw(cci_pmu);
+ mutex_unlock(reserve_mutex);
+ }
+}
+
+static void cci_pmu_enable(struct pmu *pmu)
+{
+ struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
+ struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
+ int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_events);
+ unsigned long flags;
+ u32 val;
+
+ if (!enabled)
+ return;
+
+ raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
+
+ /* Enable all the PMU counters. */
+ val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
+ writel(val, cci_ctrl_base + CCI_PMCR);
+ raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
+
+}
+
+static void cci_pmu_disable(struct pmu *pmu)
+{
+ struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
+ struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
+
+ /* Disable all the PMU counters. */
+ val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
+ writel(val, cci_ctrl_base + CCI_PMCR);
+ raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
+}
+
+static void cci_pmu_start(struct perf_event *event, int pmu_flags)
+{
+ struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+ struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ unsigned long flags;
+
+ /*
+ * To handle interrupt latency, we always reprogram the period
+ * regardlesss of PERF_EF_RELOAD.
+ */
+ if (pmu_flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
+
+ /* Configure the event to count, unless you are counting cycles */
+ if (idx != CCI_PMU_CYCLE_CNTR_IDX)
+ pmu_set_event(idx, hwc->config_base);
+
+ pmu_event_set_period(event);
+ pmu_enable_counter(idx);
+
+ raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
+}
+
+static void cci_pmu_stop(struct perf_event *event, int pmu_flags)
+{
+ struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ return;
+ }
+
+ /*
+ * We always reprogram the counter, so ignore PERF_EF_UPDATE. See
+ * cci_pmu_start()
+ */
+ pmu_disable_counter(idx);
+ pmu_event_update(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int cci_pmu_add(struct perf_event *event, int flags)
+{
+ struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+ struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+ int err = 0;
+
+ perf_pmu_disable(event->pmu);
+
+ /* If we don't have a space for the counter then finish early. */
+ idx = pmu_get_event_idx(hw_events, event);
+ if (idx < 0) {
+ err = idx;
+ goto out;
+ }
+
+ event->hw.idx = idx;
+ hw_events->events[idx] = event;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ if (flags & PERF_EF_START)
+ cci_pmu_start(event, PERF_EF_RELOAD);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+out:
+ perf_pmu_enable(event->pmu);
+ return err;
+}
+
+static void cci_pmu_del(struct perf_event *event, int flags)
+{
+ struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+ struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ cci_pmu_stop(event, PERF_EF_UPDATE);
+ hw_events->events[idx] = NULL;
+ clear_bit(idx, hw_events->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static int
+validate_event(struct pmu *cci_pmu,
+ struct cci_pmu_hw_events *hw_events,
+ struct perf_event *event)
+{
+ if (is_software_event(event))
+ return 1;
+
+ /*
+ * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
+ * core perf code won't check that the pmu->ctx == leader->ctx
+ * until after pmu->event_init(event).
+ */
+ if (event->pmu != cci_pmu)
+ return 0;
+
+ if (event->state < PERF_EVENT_STATE_OFF)
+ return 1;
+
+ if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
+ return 1;
+
+ return pmu_get_event_idx(hw_events, event) >= 0;
+}
+
+static int
+validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct cci_pmu_hw_events fake_pmu = {
+ /*
+ * Initialise the fake PMU. We only need to populate the
+ * used_mask for the purposes of validation.
+ */
+ .used_mask = { 0 },
+ };
+
+ if (!validate_event(event->pmu, &fake_pmu, leader))
+ return -EINVAL;
+
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (!validate_event(event->pmu, &fake_pmu, sibling))
+ return -EINVAL;
+ }
+
+ if (!validate_event(event->pmu, &fake_pmu, event))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+__hw_perf_event_init(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int mapping;
+
+ mapping = pmu_map_event(event);
+
+ if (mapping < 0) {
+ pr_debug("event %x:%llx not supported\n", event->attr.type,
+ event->attr.config);
+ return mapping;
+ }
+
+ /*
+ * We don't assign an index until we actually place the event onto
+ * hardware. Use -1 to signify that we haven't decided where to put it
+ * yet.
+ */
+ hwc->idx = -1;
+ hwc->config_base = 0;
+ hwc->config = 0;
+ hwc->event_base = 0;
+
+ /*
+ * Store the event encoding into the config_base field.
+ */
+ hwc->config_base |= (unsigned long)mapping;
+
+ /*
+ * Limit the sample_period to half of the counter width. That way, the
+ * new counter value is far less likely to overtake the previous one
+ * unless you have some serious IRQ latency issues.
+ */
+ hwc->sample_period = CCI_PMU_CNTR_MASK >> 1;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+
+ if (event->group_leader != event) {
+ if (validate_group(event) != 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cci_pmu_event_init(struct perf_event *event)
+{
+ struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
+ atomic_t *active_events = &cci_pmu->active_events;
+ int err = 0;
+ int cpu;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* Shared by all CPUs, no meaningful state to sample */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+
+ /* We have no filtering of any kind */
+ if (event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest)
+ return -EINVAL;
+
+ /*
+ * Following the example set by other "uncore" PMUs, we accept any CPU
+ * and rewrite its affinity dynamically rather than having perf core
+ * handle cpu == -1 and pid == -1 for this case.
+ *
+ * The perf core will pin online CPUs for the duration of this call and
+ * the event being installed into its context, so the PMU's CPU can't
+ * change under our feet.
+ */
+ cpu = cpumask_first(&cci_pmu->cpus);
+ if (event->cpu < 0 || cpu < 0)
+ return -EINVAL;
+ event->cpu = cpu;
+
+ event->destroy = hw_perf_event_destroy;
+ if (!atomic_inc_not_zero(active_events)) {
+ mutex_lock(&cci_pmu->reserve_mutex);
+ if (atomic_read(active_events) == 0)
+ err = cci_pmu_get_hw(cci_pmu);
+ if (!err)
+ atomic_inc(active_events);
+ mutex_unlock(&cci_pmu->reserve_mutex);
+ }
+ if (err)
+ return err;
+
+ err = __hw_perf_event_init(event);
+ if (err)
+ hw_perf_event_destroy(event);
+
+ return err;
+}
+
+static ssize_t pmu_attr_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
+ cpumask_pr_args(&pmu->cpus));
+ buf[n++] = '\n';
+ buf[n] = '\0';
+ return n;
+}
+
+static DEVICE_ATTR(cpumask, S_IRUGO, pmu_attr_cpumask_show, NULL);
+
+static struct attribute *pmu_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group pmu_attr_group = {
+ .attrs = pmu_attrs,
+};
+
+static const struct attribute_group *pmu_attr_groups[] = {
+ &pmu_attr_group,
+ NULL
+};
+
+static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
+{
+ char *name = cci_pmu->model->name;
+ cci_pmu->pmu = (struct pmu) {
+ .name = cci_pmu->model->name,
+ .task_ctx_nr = perf_invalid_context,
+ .pmu_enable = cci_pmu_enable,
+ .pmu_disable = cci_pmu_disable,
+ .event_init = cci_pmu_event_init,
+ .add = cci_pmu_add,
+ .del = cci_pmu_del,
+ .start = cci_pmu_start,
+ .stop = cci_pmu_stop,
+ .read = pmu_read,
+ .attr_groups = pmu_attr_groups,
+ };
+
+ cci_pmu->plat_device = pdev;
+ cci_pmu->num_events = pmu_get_max_counters();
+
+ return perf_pmu_register(&cci_pmu->pmu, name, -1);
+}
+
+static int cci_pmu_cpu_notifier(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (long)hcpu;
+ unsigned int target;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_DOWN_PREPARE:
+ if (!cpumask_test_and_clear_cpu(cpu, &pmu->cpus))
+ break;
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target < 0) // UP, last CPU
+ break;
+ /*
+ * TODO: migrate context once core races on event->ctx have
+ * been fixed.
+ */
+ cpumask_set_cpu(target, &pmu->cpus);
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cci_pmu_cpu_nb = {
+ .notifier_call = cci_pmu_cpu_notifier,
+ /*
+ * to migrate uncore events, our notifier should be executed
+ * before perf core's notifier.
+ */
+ .priority = CPU_PRI_PERF + 1,
+};
+
+static struct cci_pmu_model cci_pmu_models[] = {
+ [CCI_REV_R0] = {
+ .name = "CCI_400",
+ .event_ranges = {
+ [CCI_IF_SLAVE] = {
+ CCI_REV_R0_SLAVE_PORT_MIN_EV,
+ CCI_REV_R0_SLAVE_PORT_MAX_EV,
+ },
+ [CCI_IF_MASTER] = {
+ CCI_REV_R0_MASTER_PORT_MIN_EV,
+ CCI_REV_R0_MASTER_PORT_MAX_EV,
+ },
+ },
+ },
+ [CCI_REV_R1] = {
+ .name = "CCI_400_r1",
+ .event_ranges = {
+ [CCI_IF_SLAVE] = {
+ CCI_REV_R1_SLAVE_PORT_MIN_EV,
+ CCI_REV_R1_SLAVE_PORT_MAX_EV,
+ },
+ [CCI_IF_MASTER] = {
+ CCI_REV_R1_MASTER_PORT_MIN_EV,
+ CCI_REV_R1_MASTER_PORT_MAX_EV,
+ },
+ },
+ },
+};
+
+static const struct of_device_id arm_cci_pmu_matches[] = {
+ {
+ .compatible = "arm,cci-400-pmu",
+ .data = NULL,
+ },
+ {
+ .compatible = "arm,cci-400-pmu,r0",
+ .data = &cci_pmu_models[CCI_REV_R0],
+ },
+ {
+ .compatible = "arm,cci-400-pmu,r1",
+ .data = &cci_pmu_models[CCI_REV_R1],
+ },
+ {},
+};
+
+static inline const struct cci_pmu_model *get_cci_model(struct platform_device *pdev)
+{
+ const struct of_device_id *match = of_match_node(arm_cci_pmu_matches,
+ pdev->dev.of_node);
+ if (!match)
+ return NULL;
+ if (match->data)
+ return match->data;
+
+ dev_warn(&pdev->dev, "DEPRECATED compatible property,"
+ "requires secure access to CCI registers");
+ return probe_cci_model(pdev);
+}
+
+static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++)
+ if (irq == irqs[i])
+ return true;
+
+ return false;
+}
+
+static int cci_pmu_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int i, ret, irq;
+ const struct cci_pmu_model *model;
+
+ model = get_cci_model(pdev);
+ if (!model) {
+ dev_warn(&pdev->dev, "CCI PMU version not supported\n");
+ return -ENODEV;
+ }
+
+ pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ pmu->model = model;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmu->base))
+ return -ENOMEM;
+
+ /*
+ * CCI PMU has 5 overflow signals - one per counter; but some may be tied
+ * together to a common interrupt.
+ */
+ pmu->nr_irqs = 0;
+ for (i = 0; i < CCI_PMU_MAX_HW_EVENTS; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ break;
+
+ if (is_duplicate_irq(irq, pmu->irqs, pmu->nr_irqs))
+ continue;
+
+ pmu->irqs[pmu->nr_irqs++] = irq;
+ }
+
+ /*
+ * Ensure that the device tree has as many interrupts as the number
+ * of counters.
+ */
+ if (i < CCI_PMU_MAX_HW_EVENTS) {
+ dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
+ i, CCI_PMU_MAX_HW_EVENTS);
+ return -EINVAL;
+ }
+
+ raw_spin_lock_init(&pmu->hw_events.pmu_lock);
+ mutex_init(&pmu->reserve_mutex);
+ atomic_set(&pmu->active_events, 0);
+ cpumask_set_cpu(smp_processor_id(), &pmu->cpus);
+
+ ret = register_cpu_notifier(&cci_pmu_cpu_nb);
+ if (ret)
+ return ret;
+
+ ret = cci_pmu_init(pmu, pdev);
+ if (ret)
+ return ret;
+
+ pr_info("ARM %s PMU driver probed", pmu->model->name);
+ return 0;
+}
+
+static int cci_platform_probe(struct platform_device *pdev)
+{
+ if (!cci_probed())
+ return -ENODEV;
+
+ return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+}
+
+static struct platform_driver cci_pmu_driver = {
+ .driver = {
+ .name = DRIVER_NAME_PMU,
+ .of_match_table = arm_cci_pmu_matches,
+ },
+ .probe = cci_pmu_probe,
+};
+
+static struct platform_driver cci_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = arm_cci_matches,
+ },
+ .probe = cci_platform_probe,
+};
+
+static int __init cci_platform_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&cci_pmu_driver);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&cci_platform_driver);
+}
+
+#else /* !CONFIG_ARM_CCI400_PMU */
+
+static int __init cci_platform_init(void)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ARM_CCI400_PMU */
+
+#ifdef CONFIG_ARM_CCI400_PORT_CTRL
+
+#define CCI_PORT_CTRL 0x0
+#define CCI_CTRL_STATUS 0xc
+
+#define CCI_ENABLE_SNOOP_REQ 0x1
+#define CCI_ENABLE_DVM_REQ 0x2
+#define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
+
+enum cci_ace_port_type {
+ ACE_INVALID_PORT = 0x0,
+ ACE_PORT,
+ ACE_LITE_PORT,
+};
+
+struct cci_ace_port {
+ void __iomem *base;
+ unsigned long phys;
+ enum cci_ace_port_type type;
+ struct device_node *dn;
+};
+
+static struct cci_ace_port *ports;
+static unsigned int nb_cci_ports;
+
+struct cpu_port {
+ u64 mpidr;
+ u32 port;
+};
+
+/*
+ * Use the port MSB as valid flag, shift can be made dynamic
+ * by computing number of bits required for port indexes.
+ * Code disabling CCI cpu ports runs with D-cache invalidated
+ * and SCTLR bit clear so data accesses must be kept to a minimum
+ * to improve performance; for now shift is left static to
+ * avoid one more data access while disabling the CCI port.
+ */
+#define PORT_VALID_SHIFT 31
+#define PORT_VALID (0x1 << PORT_VALID_SHIFT)
+
+static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr)
+{
+ port->port = PORT_VALID | index;
+ port->mpidr = mpidr;
+}
+
+static inline bool cpu_port_is_valid(struct cpu_port *port)
+{
+ return !!(port->port & PORT_VALID);
+}
+
+static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr)
+{
+ return port->mpidr == (mpidr & MPIDR_HWID_BITMASK);
+}
+
+static struct cpu_port cpu_port[NR_CPUS];
+
+/**
+ * __cci_ace_get_port - Function to retrieve the port index connected to
+ * a cpu or device.
+ *
+ * @dn: device node of the device to look-up
+ * @type: port type
+ *
+ * Return value:
+ * - CCI port index if success
+ * - -ENODEV if failure
+ */
+static int __cci_ace_get_port(struct device_node *dn, int type)
+{
+ int i;
+ bool ace_match;
+ struct device_node *cci_portn;
+
+ cci_portn = of_parse_phandle(dn, "cci-control-port", 0);
+ for (i = 0; i < nb_cci_ports; i++) {
+ ace_match = ports[i].type == type;
+ if (ace_match && cci_portn == ports[i].dn)
+ return i;
+ }
+ return -ENODEV;
+}
+
+int cci_ace_get_port(struct device_node *dn)
+{
+ return __cci_ace_get_port(dn, ACE_LITE_PORT);
+}
+EXPORT_SYMBOL_GPL(cci_ace_get_port);
+
+static void cci_ace_init_ports(void)
+{
+ int port, cpu;
+ struct device_node *cpun;
+
+ /*
+ * Port index look-up speeds up the function disabling ports by CPU,
+ * since the logical to port index mapping is done once and does
+ * not change after system boot.
+ * The stashed index array is initialized for all possible CPUs
+ * at probe time.
+ */
+ for_each_possible_cpu(cpu) {
+ /* too early to use cpu->of_node */
+ cpun = of_get_cpu_node(cpu, NULL);
+
+ if (WARN(!cpun, "Missing cpu device node\n"))
+ continue;
+
+ port = __cci_ace_get_port(cpun, ACE_PORT);
+ if (port < 0)
+ continue;
+
+ init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu));
+ }
+
+ for_each_possible_cpu(cpu) {
+ WARN(!cpu_port_is_valid(&cpu_port[cpu]),
+ "CPU %u does not have an associated CCI port\n",
+ cpu);
+ }
+}
+/*
+ * Functions to enable/disable a CCI interconnect slave port
+ *
+ * They are called by low-level power management code to disable slave
+ * interfaces snoops and DVM broadcast.
+ * Since they may execute with cache data allocation disabled and
+ * after the caches have been cleaned and invalidated the functions provide
+ * no explicit locking since they may run with D-cache disabled, so normal
+ * cacheable kernel locks based on ldrex/strex may not work.
+ * Locking has to be provided by BSP implementations to ensure proper
+ * operations.
+ */
+
+/**
+ * cci_port_control() - function to control a CCI port
+ *
+ * @port: index of the port to setup
+ * @enable: if true enables the port, if false disables it
+ */
+static void notrace cci_port_control(unsigned int port, bool enable)
+{
+ void __iomem *base = ports[port].base;
+
+ writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL);
+ /*
+ * This function is called from power down procedures
+ * and must not execute any instruction that might
+ * cause the processor to be put in a quiescent state
+ * (eg wfi). Hence, cpu_relax() can not be added to this
+ * read loop to optimize power, since it might hide possibly
+ * disruptive operations.
+ */
+ while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1)
+ ;
+}
+
+/**
+ * cci_disable_port_by_cpu() - function to disable a CCI port by CPU
+ * reference
+ *
+ * @mpidr: mpidr of the CPU whose CCI port should be disabled
+ *
+ * Disabling a CCI port for a CPU implies disabling the CCI port
+ * controlling that CPU cluster. Code disabling CPU CCI ports
+ * must make sure that the CPU running the code is the last active CPU
+ * in the cluster ie all other CPUs are quiescent in a low power state.
+ *
+ * Return:
+ * 0 on success
+ * -ENODEV on port look-up failure
+ */
+int notrace cci_disable_port_by_cpu(u64 mpidr)
+{
+ int cpu;
+ bool is_valid;
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ is_valid = cpu_port_is_valid(&cpu_port[cpu]);
+ if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) {
+ cci_port_control(cpu_port[cpu].port, false);
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu);
+
+/**
+ * cci_enable_port_for_self() - enable a CCI port for calling CPU
+ *
+ * Enabling a CCI port for the calling CPU implies enabling the CCI
+ * port controlling that CPU's cluster. Caller must make sure that the
+ * CPU running the code is the first active CPU in the cluster and all
+ * other CPUs are quiescent in a low power state or waiting for this CPU
+ * to complete the CCI initialization.
+ *
+ * Because this is called when the MMU is still off and with no stack,
+ * the code must be position independent and ideally rely on callee
+ * clobbered registers only. To achieve this we must code this function
+ * entirely in assembler.
+ *
+ * On success this returns with the proper CCI port enabled. In case of
+ * any failure this never returns as the inability to enable the CCI is
+ * fatal and there is no possible recovery at this stage.
+ */
+asmlinkage void __naked cci_enable_port_for_self(void)
+{
+ asm volatile ("\n"
+" .arch armv7-a\n"
+" mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n"
+" and r0, r0, #"__stringify(MPIDR_HWID_BITMASK)" \n"
+" adr r1, 5f \n"
+" ldr r2, [r1] \n"
+" add r1, r1, r2 @ &cpu_port \n"
+" add ip, r1, %[sizeof_cpu_port] \n"
+
+ /* Loop over the cpu_port array looking for a matching MPIDR */
+"1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n"
+" cmp r2, r0 @ compare MPIDR \n"
+" bne 2f \n"
+
+ /* Found a match, now test port validity */
+" ldr r3, [r1, %[offsetof_cpu_port_port]] \n"
+" tst r3, #"__stringify(PORT_VALID)" \n"
+" bne 3f \n"
+
+ /* no match, loop with the next cpu_port entry */
+"2: add r1, r1, %[sizeof_struct_cpu_port] \n"
+" cmp r1, ip @ done? \n"
+" blo 1b \n"
+
+ /* CCI port not found -- cheaply try to stall this CPU */
+"cci_port_not_found: \n"
+" wfi \n"
+" wfe \n"
+" b cci_port_not_found \n"
+
+ /* Use matched port index to look up the corresponding ports entry */
+"3: bic r3, r3, #"__stringify(PORT_VALID)" \n"
+" adr r0, 6f \n"
+" ldmia r0, {r1, r2} \n"
+" sub r1, r1, r0 @ virt - phys \n"
+" ldr r0, [r0, r2] @ *(&ports) \n"
+" mov r2, %[sizeof_struct_ace_port] \n"
+" mla r0, r2, r3, r0 @ &ports[index] \n"
+" sub r0, r0, r1 @ virt_to_phys() \n"
+
+ /* Enable the CCI port */
+" ldr r0, [r0, %[offsetof_port_phys]] \n"
+" mov r3, %[cci_enable_req]\n"
+" str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n"
+
+ /* poll the status reg for completion */
+" adr r1, 7f \n"
+" ldr r0, [r1] \n"
+" ldr r0, [r0, r1] @ cci_ctrl_base \n"
+"4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n"
+" tst r1, %[cci_control_status_bits] \n"
+" bne 4b \n"
+
+" mov r0, #0 \n"
+" bx lr \n"
+
+" .align 2 \n"
+"5: .word cpu_port - . \n"
+"6: .word . \n"
+" .word ports - 6b \n"
+"7: .word cci_ctrl_phys - . \n"
+ : :
+ [sizeof_cpu_port] "i" (sizeof(cpu_port)),
+ [cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ),
+ [cci_control_status_bits] "i" cpu_to_le32(1),
+#ifndef __ARMEB__
+ [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)),
+#else
+ [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)+4),
+#endif
+ [offsetof_cpu_port_port] "i" (offsetof(struct cpu_port, port)),
+ [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)),
+ [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)),
+ [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) );
+
+ unreachable();
+}
+
+/**
+ * __cci_control_port_by_device() - function to control a CCI port by device
+ * reference
+ *
+ * @dn: device node pointer of the device whose CCI port should be
+ * controlled
+ * @enable: if true enables the port, if false disables it
+ *
+ * Return:
+ * 0 on success
+ * -ENODEV on port look-up failure
+ */
+int notrace __cci_control_port_by_device(struct device_node *dn, bool enable)
+{
+ int port;
+
+ if (!dn)
+ return -ENODEV;
+
+ port = __cci_ace_get_port(dn, ACE_LITE_PORT);
+ if (WARN_ONCE(port < 0, "node %s ACE lite port look-up failure\n",
+ dn->full_name))
+ return -ENODEV;
+ cci_port_control(port, enable);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__cci_control_port_by_device);
+
+/**
+ * __cci_control_port_by_index() - function to control a CCI port by port index
+ *
+ * @port: port index previously retrieved with cci_ace_get_port()
+ * @enable: if true enables the port, if false disables it
+ *
+ * Return:
+ * 0 on success
+ * -ENODEV on port index out of range
+ * -EPERM if operation carried out on an ACE PORT
+ */
+int notrace __cci_control_port_by_index(u32 port, bool enable)
+{
+ if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT)
+ return -ENODEV;
+ /*
+ * CCI control for ports connected to CPUS is extremely fragile
+ * and must be made to go through a specific and controlled
+ * interface (ie cci_disable_port_by_cpu(); control by general purpose
+ * indexing is therefore disabled for ACE ports.
+ */
+ if (ports[port].type == ACE_PORT)
+ return -EPERM;
+
+ cci_port_control(port, enable);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__cci_control_port_by_index);
+
+static const struct of_device_id arm_cci_ctrl_if_matches[] = {
+ {.compatible = "arm,cci-400-ctrl-if", },
+ {},
+};
+
+static int cci_probe_ports(struct device_node *np)
+{
+ struct cci_nb_ports const *cci_config;
+ int ret, i, nb_ace = 0, nb_ace_lite = 0;
+ struct device_node *cp;
+ struct resource res;
+ const char *match_str;
+ bool is_ace;
+
+
+ cci_config = of_match_node(arm_cci_matches, np)->data;
+ if (!cci_config)
+ return -ENODEV;
+
+ nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
+
+ ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
+ if (!ports)
+ return -ENOMEM;
+
+ for_each_child_of_node(np, cp) {
+ if (!of_match_node(arm_cci_ctrl_if_matches, cp))
+ continue;
+
+ i = nb_ace + nb_ace_lite;
+
+ if (i >= nb_cci_ports)
+ break;
+
+ if (of_property_read_string(cp, "interface-type",
+ &match_str)) {
+ WARN(1, "node %s missing interface-type property\n",
+ cp->full_name);
+ continue;
+ }
+ is_ace = strcmp(match_str, "ace") == 0;
+ if (!is_ace && strcmp(match_str, "ace-lite")) {
+ WARN(1, "node %s containing invalid interface-type property, skipping it\n",
+ cp->full_name);
+ continue;
+ }
+
+ ret = of_address_to_resource(cp, 0, &res);
+ if (!ret) {
+ ports[i].base = ioremap(res.start, resource_size(&res));
+ ports[i].phys = res.start;
+ }
+ if (ret || !ports[i].base) {
+ WARN(1, "unable to ioremap CCI port %d\n", i);
+ continue;
+ }
+
+ if (is_ace) {
+ if (WARN_ON(nb_ace >= cci_config->nb_ace))
+ continue;
+ ports[i].type = ACE_PORT;
+ ++nb_ace;
+ } else {
+ if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite))
+ continue;
+ ports[i].type = ACE_LITE_PORT;
+ ++nb_ace_lite;
+ }
+ ports[i].dn = cp;
+ }
+
+ /* initialize a stashed array of ACE ports to speed-up look-up */
+ cci_ace_init_ports();
+
+ /*
+ * Multi-cluster systems may need this data when non-coherent, during
+ * cluster power-up/power-down. Make sure it reaches main memory.
+ */
+ sync_cache_w(&cci_ctrl_base);
+ sync_cache_w(&cci_ctrl_phys);
+ sync_cache_w(&ports);
+ sync_cache_w(&cpu_port);
+ __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports);
+ pr_info("ARM CCI driver probed\n");
+
+ return 0;
+}
+#else /* !CONFIG_ARM_CCI400_PORT_CTRL */
+static inline int cci_probe_ports(struct device_node *np)
+{
+ return 0;
+}
+#endif /* CONFIG_ARM_CCI400_PORT_CTRL */
+
+static int cci_probe(void)
+{
+ int ret;
+ struct device_node *np;
+ struct resource res;
+
+ np = of_find_matching_node(NULL, arm_cci_matches);
+ if(!np || !of_device_is_available(np))
+ return -ENODEV;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (!ret) {
+ cci_ctrl_base = ioremap(res.start, resource_size(&res));
+ cci_ctrl_phys = res.start;
+ }
+ if (ret || !cci_ctrl_base) {
+ WARN(1, "unable to ioremap CCI ctrl\n");
+ return -ENXIO;
+ }
+
+ return cci_probe_ports(np);
+}
+
+static int cci_init_status = -EAGAIN;
+static DEFINE_MUTEX(cci_probing);
+
+static int cci_init(void)
+{
+ if (cci_init_status != -EAGAIN)
+ return cci_init_status;
+
+ mutex_lock(&cci_probing);
+ if (cci_init_status == -EAGAIN)
+ cci_init_status = cci_probe();
+ mutex_unlock(&cci_probing);
+ return cci_init_status;
+}
+
+/*
+ * To sort out early init calls ordering a helper function is provided to
+ * check if the CCI driver has beed initialized. Function check if the driver
+ * has been initialized, if not it calls the init function that probes
+ * the driver and updates the return value.
+ */
+bool cci_probed(void)
+{
+ return cci_init() == 0;
+}
+EXPORT_SYMBOL_GPL(cci_probed);
+
+early_initcall(cci_init);
+core_initcall(cci_platform_init);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ARM CCI support");
diff --git a/kernel/drivers/bus/arm-ccn.c b/kernel/drivers/bus/arm-ccn.c
new file mode 100644
index 000000000..aaa0f2a87
--- /dev/null
+++ b/kernel/drivers/bus/arm-ccn.c
@@ -0,0 +1,1393 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014 ARM Limited
+ */
+
+#include <linux/ctype.h>
+#include <linux/hrtimer.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define CCN_NUM_XP_PORTS 2
+#define CCN_NUM_VCS 4
+#define CCN_NUM_REGIONS 256
+#define CCN_REGION_SIZE 0x10000
+
+#define CCN_ALL_OLY_ID 0xff00
+#define CCN_ALL_OLY_ID__OLY_ID__SHIFT 0
+#define CCN_ALL_OLY_ID__OLY_ID__MASK 0x1f
+#define CCN_ALL_OLY_ID__NODE_ID__SHIFT 8
+#define CCN_ALL_OLY_ID__NODE_ID__MASK 0x3f
+
+#define CCN_MN_ERRINT_STATUS 0x0008
+#define CCN_MN_ERRINT_STATUS__INTREQ__DESSERT 0x11
+#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__ENABLE 0x02
+#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLED 0x20
+#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE 0x22
+#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_ENABLE 0x04
+#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLED 0x40
+#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLE 0x44
+#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE 0x08
+#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED 0x80
+#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE 0x88
+#define CCN_MN_OLY_COMP_LIST_63_0 0x01e0
+#define CCN_MN_ERR_SIG_VAL_63_0 0x0300
+#define CCN_MN_ERR_SIG_VAL_63_0__DT (1 << 1)
+
+#define CCN_DT_ACTIVE_DSM 0x0000
+#define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n) ((n) * 8)
+#define CCN_DT_ACTIVE_DSM__DSM_ID__MASK 0xff
+#define CCN_DT_CTL 0x0028
+#define CCN_DT_CTL__DT_EN (1 << 0)
+#define CCN_DT_PMEVCNT(n) (0x0100 + (n) * 0x8)
+#define CCN_DT_PMCCNTR 0x0140
+#define CCN_DT_PMCCNTRSR 0x0190
+#define CCN_DT_PMOVSR 0x0198
+#define CCN_DT_PMOVSR_CLR 0x01a0
+#define CCN_DT_PMOVSR_CLR__MASK 0x1f
+#define CCN_DT_PMCR 0x01a8
+#define CCN_DT_PMCR__OVFL_INTR_EN (1 << 6)
+#define CCN_DT_PMCR__PMU_EN (1 << 0)
+#define CCN_DT_PMSR 0x01b0
+#define CCN_DT_PMSR_REQ 0x01b8
+#define CCN_DT_PMSR_CLR 0x01c0
+
+#define CCN_HNF_PMU_EVENT_SEL 0x0600
+#define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
+#define CCN_HNF_PMU_EVENT_SEL__ID__MASK 0xf
+
+#define CCN_XP_DT_CONFIG 0x0300
+#define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n) ((n) * 4)
+#define CCN_XP_DT_CONFIG__DT_CFG__MASK 0xf
+#define CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH 0x0
+#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT_0_OR_1 0x1
+#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n) (0x2 + (n))
+#define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n) (0x4 + (n))
+#define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n))
+#define CCN_XP_DT_INTERFACE_SEL 0x0308
+#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n) (0 + (n) * 8)
+#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK 0x1
+#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n) (1 + (n) * 8)
+#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK 0x1
+#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n) (2 + (n) * 8)
+#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK 0x3
+#define CCN_XP_DT_CMP_VAL_L(n) (0x0310 + (n) * 0x40)
+#define CCN_XP_DT_CMP_VAL_H(n) (0x0318 + (n) * 0x40)
+#define CCN_XP_DT_CMP_MASK_L(n) (0x0320 + (n) * 0x40)
+#define CCN_XP_DT_CMP_MASK_H(n) (0x0328 + (n) * 0x40)
+#define CCN_XP_DT_CONTROL 0x0370
+#define CCN_XP_DT_CONTROL__DT_ENABLE (1 << 0)
+#define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n) (12 + (n) * 4)
+#define CCN_XP_DT_CONTROL__WP_ARM_SEL__MASK 0xf
+#define CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS 0xf
+#define CCN_XP_PMU_EVENT_SEL 0x0600
+#define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 7)
+#define CCN_XP_PMU_EVENT_SEL__ID__MASK 0x3f
+
+#define CCN_SBAS_PMU_EVENT_SEL 0x0600
+#define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
+#define CCN_SBAS_PMU_EVENT_SEL__ID__MASK 0xf
+
+#define CCN_RNI_PMU_EVENT_SEL 0x0600
+#define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
+#define CCN_RNI_PMU_EVENT_SEL__ID__MASK 0xf
+
+#define CCN_TYPE_MN 0x01
+#define CCN_TYPE_DT 0x02
+#define CCN_TYPE_HNF 0x04
+#define CCN_TYPE_HNI 0x05
+#define CCN_TYPE_XP 0x08
+#define CCN_TYPE_SBSX 0x0c
+#define CCN_TYPE_SBAS 0x10
+#define CCN_TYPE_RNI_1P 0x14
+#define CCN_TYPE_RNI_2P 0x15
+#define CCN_TYPE_RNI_3P 0x16
+#define CCN_TYPE_RND_1P 0x18 /* RN-D = RN-I + DVM */
+#define CCN_TYPE_RND_2P 0x19
+#define CCN_TYPE_RND_3P 0x1a
+#define CCN_TYPE_CYCLES 0xff /* Pseudotype */
+
+#define CCN_EVENT_WATCHPOINT 0xfe /* Pseudoevent */
+
+#define CCN_NUM_PMU_EVENTS 4
+#define CCN_NUM_XP_WATCHPOINTS 2 /* See DT.dbg_id.num_watchpoints */
+#define CCN_NUM_PMU_EVENT_COUNTERS 8 /* See DT.dbg_id.num_pmucntr */
+#define CCN_IDX_PMU_CYCLE_COUNTER CCN_NUM_PMU_EVENT_COUNTERS
+
+#define CCN_NUM_PREDEFINED_MASKS 4
+#define CCN_IDX_MASK_ANY (CCN_NUM_PMU_EVENT_COUNTERS + 0)
+#define CCN_IDX_MASK_EXACT (CCN_NUM_PMU_EVENT_COUNTERS + 1)
+#define CCN_IDX_MASK_ORDER (CCN_NUM_PMU_EVENT_COUNTERS + 2)
+#define CCN_IDX_MASK_OPCODE (CCN_NUM_PMU_EVENT_COUNTERS + 3)
+
+struct arm_ccn_component {
+ void __iomem *base;
+ u32 type;
+
+ DECLARE_BITMAP(pmu_events_mask, CCN_NUM_PMU_EVENTS);
+ union {
+ struct {
+ DECLARE_BITMAP(dt_cmp_mask, CCN_NUM_XP_WATCHPOINTS);
+ } xp;
+ };
+};
+
+#define pmu_to_arm_ccn(_pmu) container_of(container_of(_pmu, \
+ struct arm_ccn_dt, pmu), struct arm_ccn, dt)
+
+struct arm_ccn_dt {
+ int id;
+ void __iomem *base;
+
+ spinlock_t config_lock;
+
+ DECLARE_BITMAP(pmu_counters_mask, CCN_NUM_PMU_EVENT_COUNTERS + 1);
+ struct {
+ struct arm_ccn_component *source;
+ struct perf_event *event;
+ } pmu_counters[CCN_NUM_PMU_EVENT_COUNTERS + 1];
+
+ struct {
+ u64 l, h;
+ } cmp_mask[CCN_NUM_PMU_EVENT_COUNTERS + CCN_NUM_PREDEFINED_MASKS];
+
+ struct hrtimer hrtimer;
+
+ struct pmu pmu;
+};
+
+struct arm_ccn {
+ struct device *dev;
+ void __iomem *base;
+ unsigned irq_used:1;
+ unsigned sbas_present:1;
+ unsigned sbsx_present:1;
+
+ int num_nodes;
+ struct arm_ccn_component *node;
+
+ int num_xps;
+ struct arm_ccn_component *xp;
+
+ struct arm_ccn_dt dt;
+};
+
+
+static int arm_ccn_node_to_xp(int node)
+{
+ return node / CCN_NUM_XP_PORTS;
+}
+
+static int arm_ccn_node_to_xp_port(int node)
+{
+ return node % CCN_NUM_XP_PORTS;
+}
+
+
+/*
+ * Bit shifts and masks in these defines must be kept in sync with
+ * arm_ccn_pmu_config_set() and CCN_FORMAT_ATTRs below!
+ */
+#define CCN_CONFIG_NODE(_config) (((_config) >> 0) & 0xff)
+#define CCN_CONFIG_XP(_config) (((_config) >> 0) & 0xff)
+#define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff)
+#define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff)
+#define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3)
+#define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7)
+#define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1)
+#define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf)
+
+static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port)
+{
+ *config &= ~((0xff << 0) | (0xff << 8) | (0xff << 24));
+ *config |= (node_xp << 0) | (type << 8) | (port << 24);
+}
+
+static ssize_t arm_ccn_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ea = container_of(attr,
+ struct dev_ext_attribute, attr);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var);
+}
+
+#define CCN_FORMAT_ATTR(_name, _config) \
+ struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \
+ { __ATTR(_name, S_IRUGO, arm_ccn_pmu_format_show, \
+ NULL), _config }
+
+static CCN_FORMAT_ATTR(node, "config:0-7");
+static CCN_FORMAT_ATTR(xp, "config:0-7");
+static CCN_FORMAT_ATTR(type, "config:8-15");
+static CCN_FORMAT_ATTR(event, "config:16-23");
+static CCN_FORMAT_ATTR(port, "config:24-25");
+static CCN_FORMAT_ATTR(vc, "config:26-28");
+static CCN_FORMAT_ATTR(dir, "config:29-29");
+static CCN_FORMAT_ATTR(mask, "config:30-33");
+static CCN_FORMAT_ATTR(cmp_l, "config1:0-62");
+static CCN_FORMAT_ATTR(cmp_h, "config2:0-59");
+
+static struct attribute *arm_ccn_pmu_format_attrs[] = {
+ &arm_ccn_pmu_format_attr_node.attr.attr,
+ &arm_ccn_pmu_format_attr_xp.attr.attr,
+ &arm_ccn_pmu_format_attr_type.attr.attr,
+ &arm_ccn_pmu_format_attr_event.attr.attr,
+ &arm_ccn_pmu_format_attr_port.attr.attr,
+ &arm_ccn_pmu_format_attr_vc.attr.attr,
+ &arm_ccn_pmu_format_attr_dir.attr.attr,
+ &arm_ccn_pmu_format_attr_mask.attr.attr,
+ &arm_ccn_pmu_format_attr_cmp_l.attr.attr,
+ &arm_ccn_pmu_format_attr_cmp_h.attr.attr,
+ NULL
+};
+
+static struct attribute_group arm_ccn_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = arm_ccn_pmu_format_attrs,
+};
+
+
+struct arm_ccn_pmu_event {
+ struct device_attribute attr;
+ u32 type;
+ u32 event;
+ int num_ports;
+ int num_vcs;
+ const char *def;
+ int mask;
+};
+
+#define CCN_EVENT_ATTR(_name) \
+ __ATTR(_name, S_IRUGO, arm_ccn_pmu_event_show, NULL)
+
+/*
+ * Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on
+ * their ports in XP they are connected to. For the sake of usability they are
+ * explicitly defined here (and translated into a relevant watchpoint in
+ * arm_ccn_pmu_event_init()) so the user can easily request them without deep
+ * knowledge of the flit format.
+ */
+
+#define CCN_EVENT_MN(_name, _def, _mask) { .attr = CCN_EVENT_ATTR(mn_##_name), \
+ .type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \
+ .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, \
+ .def = _def, .mask = _mask, }
+
+#define CCN_EVENT_HNI(_name, _def, _mask) { \
+ .attr = CCN_EVENT_ATTR(hni_##_name), .type = CCN_TYPE_HNI, \
+ .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
+ .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
+
+#define CCN_EVENT_SBSX(_name, _def, _mask) { \
+ .attr = CCN_EVENT_ATTR(sbsx_##_name), .type = CCN_TYPE_SBSX, \
+ .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
+ .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
+
+#define CCN_EVENT_HNF(_name, _event) { .attr = CCN_EVENT_ATTR(hnf_##_name), \
+ .type = CCN_TYPE_HNF, .event = _event, }
+
+#define CCN_EVENT_XP(_name, _event) { .attr = CCN_EVENT_ATTR(xp_##_name), \
+ .type = CCN_TYPE_XP, .event = _event, \
+ .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, }
+
+/*
+ * RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending
+ * on configuration. One of them is picked to represent the whole group,
+ * as they all share the same event types.
+ */
+#define CCN_EVENT_RNI(_name, _event) { .attr = CCN_EVENT_ATTR(rni_##_name), \
+ .type = CCN_TYPE_RNI_3P, .event = _event, }
+
+#define CCN_EVENT_SBAS(_name, _event) { .attr = CCN_EVENT_ATTR(sbas_##_name), \
+ .type = CCN_TYPE_SBAS, .event = _event, }
+
+#define CCN_EVENT_CYCLES(_name) { .attr = CCN_EVENT_ATTR(_name), \
+ .type = CCN_TYPE_CYCLES }
+
+
+static ssize_t arm_ccn_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_ccn_pmu_event *event = container_of(attr,
+ struct arm_ccn_pmu_event, attr);
+ ssize_t res;
+
+ res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type);
+ if (event->event)
+ res += snprintf(buf + res, PAGE_SIZE - res, ",event=0x%x",
+ event->event);
+ if (event->def)
+ res += snprintf(buf + res, PAGE_SIZE - res, ",%s",
+ event->def);
+ if (event->mask)
+ res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x",
+ event->mask);
+ res += snprintf(buf + res, PAGE_SIZE - res, "\n");
+
+ return res;
+}
+
+static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+ struct device_attribute *dev_attr = container_of(attr,
+ struct device_attribute, attr);
+ struct arm_ccn_pmu_event *event = container_of(dev_attr,
+ struct arm_ccn_pmu_event, attr);
+
+ if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present)
+ return 0;
+ if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present)
+ return 0;
+
+ return attr->mode;
+}
+
+static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
+ CCN_EVENT_MN(eobarrier, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
+ CCN_EVENT_MN(ecbarrier, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
+ CCN_EVENT_MN(dvmop, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
+ CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
+ CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
+ CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
+ CCN_EVENT_HNI(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
+ CCN_EVENT_HNI(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
+ CCN_IDX_MASK_ORDER),
+ CCN_EVENT_SBSX(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
+ CCN_EVENT_SBSX(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
+ CCN_EVENT_SBSX(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
+ CCN_EVENT_SBSX(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
+ CCN_EVENT_SBSX(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
+ CCN_IDX_MASK_ORDER),
+ CCN_EVENT_HNF(cache_miss, 0x1),
+ CCN_EVENT_HNF(l3_sf_cache_access, 0x02),
+ CCN_EVENT_HNF(cache_fill, 0x3),
+ CCN_EVENT_HNF(pocq_retry, 0x4),
+ CCN_EVENT_HNF(pocq_reqs_recvd, 0x5),
+ CCN_EVENT_HNF(sf_hit, 0x6),
+ CCN_EVENT_HNF(sf_evictions, 0x7),
+ CCN_EVENT_HNF(snoops_sent, 0x8),
+ CCN_EVENT_HNF(snoops_broadcast, 0x9),
+ CCN_EVENT_HNF(l3_eviction, 0xa),
+ CCN_EVENT_HNF(l3_fill_invalid_way, 0xb),
+ CCN_EVENT_HNF(mc_retries, 0xc),
+ CCN_EVENT_HNF(mc_reqs, 0xd),
+ CCN_EVENT_HNF(qos_hh_retry, 0xe),
+ CCN_EVENT_RNI(rdata_beats_p0, 0x1),
+ CCN_EVENT_RNI(rdata_beats_p1, 0x2),
+ CCN_EVENT_RNI(rdata_beats_p2, 0x3),
+ CCN_EVENT_RNI(rxdat_flits, 0x4),
+ CCN_EVENT_RNI(txdat_flits, 0x5),
+ CCN_EVENT_RNI(txreq_flits, 0x6),
+ CCN_EVENT_RNI(txreq_flits_retried, 0x7),
+ CCN_EVENT_RNI(rrt_full, 0x8),
+ CCN_EVENT_RNI(wrt_full, 0x9),
+ CCN_EVENT_RNI(txreq_flits_replayed, 0xa),
+ CCN_EVENT_XP(upload_starvation, 0x1),
+ CCN_EVENT_XP(download_starvation, 0x2),
+ CCN_EVENT_XP(respin, 0x3),
+ CCN_EVENT_XP(valid_flit, 0x4),
+ CCN_EVENT_XP(watchpoint, CCN_EVENT_WATCHPOINT),
+ CCN_EVENT_SBAS(rdata_beats_p0, 0x1),
+ CCN_EVENT_SBAS(rxdat_flits, 0x4),
+ CCN_EVENT_SBAS(txdat_flits, 0x5),
+ CCN_EVENT_SBAS(txreq_flits, 0x6),
+ CCN_EVENT_SBAS(txreq_flits_retried, 0x7),
+ CCN_EVENT_SBAS(rrt_full, 0x8),
+ CCN_EVENT_SBAS(wrt_full, 0x9),
+ CCN_EVENT_SBAS(txreq_flits_replayed, 0xa),
+ CCN_EVENT_CYCLES(cycles),
+};
+
+/* Populated in arm_ccn_init() */
+static struct attribute
+ *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1];
+
+static struct attribute_group arm_ccn_pmu_events_attr_group = {
+ .name = "events",
+ .is_visible = arm_ccn_pmu_events_is_visible,
+ .attrs = arm_ccn_pmu_events_attrs,
+};
+
+
+static u64 *arm_ccn_pmu_get_cmp_mask(struct arm_ccn *ccn, const char *name)
+{
+ unsigned long i;
+
+ if (WARN_ON(!name || !name[0] || !isxdigit(name[0]) || !name[1]))
+ return NULL;
+ i = isdigit(name[0]) ? name[0] - '0' : 0xa + tolower(name[0]) - 'a';
+
+ switch (name[1]) {
+ case 'l':
+ return &ccn->dt.cmp_mask[i].l;
+ case 'h':
+ return &ccn->dt.cmp_mask[i].h;
+ default:
+ return NULL;
+ }
+}
+
+static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+ u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
+
+ return mask ? snprintf(buf, PAGE_SIZE, "0x%016llx\n", *mask) : -EINVAL;
+}
+
+static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+ u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
+ int err = -EINVAL;
+
+ if (mask)
+ err = kstrtoull(buf, 0, mask);
+
+ return err ? err : count;
+}
+
+#define CCN_CMP_MASK_ATTR(_name) \
+ struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
+ __ATTR(_name, S_IRUGO | S_IWUSR, \
+ arm_ccn_pmu_cmp_mask_show, arm_ccn_pmu_cmp_mask_store)
+
+#define CCN_CMP_MASK_ATTR_RO(_name) \
+ struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
+ __ATTR(_name, S_IRUGO, arm_ccn_pmu_cmp_mask_show, NULL)
+
+static CCN_CMP_MASK_ATTR(0l);
+static CCN_CMP_MASK_ATTR(0h);
+static CCN_CMP_MASK_ATTR(1l);
+static CCN_CMP_MASK_ATTR(1h);
+static CCN_CMP_MASK_ATTR(2l);
+static CCN_CMP_MASK_ATTR(2h);
+static CCN_CMP_MASK_ATTR(3l);
+static CCN_CMP_MASK_ATTR(3h);
+static CCN_CMP_MASK_ATTR(4l);
+static CCN_CMP_MASK_ATTR(4h);
+static CCN_CMP_MASK_ATTR(5l);
+static CCN_CMP_MASK_ATTR(5h);
+static CCN_CMP_MASK_ATTR(6l);
+static CCN_CMP_MASK_ATTR(6h);
+static CCN_CMP_MASK_ATTR(7l);
+static CCN_CMP_MASK_ATTR(7h);
+static CCN_CMP_MASK_ATTR_RO(8l);
+static CCN_CMP_MASK_ATTR_RO(8h);
+static CCN_CMP_MASK_ATTR_RO(9l);
+static CCN_CMP_MASK_ATTR_RO(9h);
+static CCN_CMP_MASK_ATTR_RO(al);
+static CCN_CMP_MASK_ATTR_RO(ah);
+static CCN_CMP_MASK_ATTR_RO(bl);
+static CCN_CMP_MASK_ATTR_RO(bh);
+
+static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = {
+ &arm_ccn_pmu_cmp_mask_attr_0l.attr, &arm_ccn_pmu_cmp_mask_attr_0h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_1l.attr, &arm_ccn_pmu_cmp_mask_attr_1h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_2l.attr, &arm_ccn_pmu_cmp_mask_attr_2h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_3l.attr, &arm_ccn_pmu_cmp_mask_attr_3h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_4l.attr, &arm_ccn_pmu_cmp_mask_attr_4h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_5l.attr, &arm_ccn_pmu_cmp_mask_attr_5h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_6l.attr, &arm_ccn_pmu_cmp_mask_attr_6h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_7l.attr, &arm_ccn_pmu_cmp_mask_attr_7h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_8l.attr, &arm_ccn_pmu_cmp_mask_attr_8h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_9l.attr, &arm_ccn_pmu_cmp_mask_attr_9h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_al.attr, &arm_ccn_pmu_cmp_mask_attr_ah.attr,
+ &arm_ccn_pmu_cmp_mask_attr_bl.attr, &arm_ccn_pmu_cmp_mask_attr_bh.attr,
+ NULL
+};
+
+static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
+ .name = "cmp_mask",
+ .attrs = arm_ccn_pmu_cmp_mask_attrs,
+};
+
+
+/*
+ * Default poll period is 10ms, which is way over the top anyway,
+ * as in the worst case scenario (an event every cycle), with 1GHz
+ * clocked bus, the smallest, 32 bit counter will overflow in
+ * more than 4s.
+ */
+static unsigned int arm_ccn_pmu_poll_period_us = 10000;
+module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint,
+ S_IRUGO | S_IWUSR);
+
+static ktime_t arm_ccn_pmu_timer_period(void)
+{
+ return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000);
+}
+
+
+static const struct attribute_group *arm_ccn_pmu_attr_groups[] = {
+ &arm_ccn_pmu_events_attr_group,
+ &arm_ccn_pmu_format_attr_group,
+ &arm_ccn_pmu_cmp_mask_attr_group,
+ NULL
+};
+
+
+static int arm_ccn_pmu_alloc_bit(unsigned long *bitmap, unsigned long size)
+{
+ int bit;
+
+ do {
+ bit = find_first_zero_bit(bitmap, size);
+ if (bit >= size)
+ return -EAGAIN;
+ } while (test_and_set_bit(bit, bitmap));
+
+ return bit;
+}
+
+/* All RN-I and RN-D nodes have identical PMUs */
+static int arm_ccn_pmu_type_eq(u32 a, u32 b)
+{
+ if (a == b)
+ return 1;
+
+ switch (a) {
+ case CCN_TYPE_RNI_1P:
+ case CCN_TYPE_RNI_2P:
+ case CCN_TYPE_RNI_3P:
+ case CCN_TYPE_RND_1P:
+ case CCN_TYPE_RND_2P:
+ case CCN_TYPE_RND_3P:
+ switch (b) {
+ case CCN_TYPE_RNI_1P:
+ case CCN_TYPE_RNI_2P:
+ case CCN_TYPE_RNI_3P:
+ case CCN_TYPE_RND_1P:
+ case CCN_TYPE_RND_2P:
+ case CCN_TYPE_RND_3P:
+ return 1;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static void arm_ccn_pmu_event_destroy(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) {
+ clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask);
+ } else {
+ struct arm_ccn_component *source =
+ ccn->dt.pmu_counters[hw->idx].source;
+
+ if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP &&
+ CCN_CONFIG_EVENT(event->attr.config) ==
+ CCN_EVENT_WATCHPOINT)
+ clear_bit(hw->config_base, source->xp.dt_cmp_mask);
+ else
+ clear_bit(hw->config_base, source->pmu_events_mask);
+ clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
+ }
+
+ ccn->dt.pmu_counters[hw->idx].source = NULL;
+ ccn->dt.pmu_counters[hw->idx].event = NULL;
+}
+
+static int arm_ccn_pmu_event_init(struct perf_event *event)
+{
+ struct arm_ccn *ccn;
+ struct hw_perf_event *hw = &event->hw;
+ u32 node_xp, type, event_id;
+ int valid, bit;
+ struct arm_ccn_component *source;
+ int i;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ ccn = pmu_to_arm_ccn(event->pmu);
+ event->destroy = arm_ccn_pmu_event_destroy;
+
+ if (hw->sample_period) {
+ dev_warn(ccn->dev, "Sampling not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (has_branch_stack(event) || event->attr.exclude_user ||
+ event->attr.exclude_kernel || event->attr.exclude_hv ||
+ event->attr.exclude_idle) {
+ dev_warn(ccn->dev, "Can't exclude execution levels!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (event->cpu < 0) {
+ dev_warn(ccn->dev, "Can't provide per-task data!\n");
+ return -EOPNOTSUPP;
+ }
+
+ node_xp = CCN_CONFIG_NODE(event->attr.config);
+ type = CCN_CONFIG_TYPE(event->attr.config);
+ event_id = CCN_CONFIG_EVENT(event->attr.config);
+
+ /* Validate node/xp vs topology */
+ switch (type) {
+ case CCN_TYPE_XP:
+ if (node_xp >= ccn->num_xps) {
+ dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
+ return -EINVAL;
+ }
+ break;
+ case CCN_TYPE_CYCLES:
+ break;
+ default:
+ if (node_xp >= ccn->num_nodes) {
+ dev_warn(ccn->dev, "Invalid node ID %d!\n", node_xp);
+ return -EINVAL;
+ }
+ if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) {
+ dev_warn(ccn->dev, "Invalid type 0x%x for node %d!\n",
+ type, node_xp);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ /* Validate event ID vs available for the type */
+ for (i = 0, valid = 0; i < ARRAY_SIZE(arm_ccn_pmu_events) && !valid;
+ i++) {
+ struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i];
+ u32 port = CCN_CONFIG_PORT(event->attr.config);
+ u32 vc = CCN_CONFIG_VC(event->attr.config);
+
+ if (!arm_ccn_pmu_type_eq(type, e->type))
+ continue;
+ if (event_id != e->event)
+ continue;
+ if (e->num_ports && port >= e->num_ports) {
+ dev_warn(ccn->dev, "Invalid port %d for node/XP %d!\n",
+ port, node_xp);
+ return -EINVAL;
+ }
+ if (e->num_vcs && vc >= e->num_vcs) {
+ dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n",
+ vc, node_xp);
+ return -EINVAL;
+ }
+ valid = 1;
+ }
+ if (!valid) {
+ dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
+ event_id, node_xp);
+ return -EINVAL;
+ }
+
+ /* Watchpoint-based event for a node is actually set on XP */
+ if (event_id == CCN_EVENT_WATCHPOINT && type != CCN_TYPE_XP) {
+ u32 port;
+
+ type = CCN_TYPE_XP;
+ port = arm_ccn_node_to_xp_port(node_xp);
+ node_xp = arm_ccn_node_to_xp(node_xp);
+
+ arm_ccn_pmu_config_set(&event->attr.config,
+ node_xp, type, port);
+ }
+
+ /* Allocate the cycle counter */
+ if (type == CCN_TYPE_CYCLES) {
+ if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER,
+ ccn->dt.pmu_counters_mask))
+ return -EAGAIN;
+
+ hw->idx = CCN_IDX_PMU_CYCLE_COUNTER;
+ ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event;
+
+ return 0;
+ }
+
+ /* Allocate an event counter */
+ hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask,
+ CCN_NUM_PMU_EVENT_COUNTERS);
+ if (hw->idx < 0) {
+ dev_warn(ccn->dev, "No more counters available!\n");
+ return -EAGAIN;
+ }
+
+ if (type == CCN_TYPE_XP)
+ source = &ccn->xp[node_xp];
+ else
+ source = &ccn->node[node_xp];
+ ccn->dt.pmu_counters[hw->idx].source = source;
+
+ /* Allocate an event source or a watchpoint */
+ if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT)
+ bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask,
+ CCN_NUM_XP_WATCHPOINTS);
+ else
+ bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask,
+ CCN_NUM_PMU_EVENTS);
+ if (bit < 0) {
+ dev_warn(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n",
+ node_xp);
+ clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
+ return -EAGAIN;
+ }
+ hw->config_base = bit;
+
+ ccn->dt.pmu_counters[hw->idx].event = event;
+
+ return 0;
+}
+
+static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx)
+{
+ u64 res;
+
+ if (idx == CCN_IDX_PMU_CYCLE_COUNTER) {
+#ifdef readq
+ res = readq(ccn->dt.base + CCN_DT_PMCCNTR);
+#else
+ /* 40 bit counter, can do snapshot and read in two parts */
+ writel(0x1, ccn->dt.base + CCN_DT_PMSR_REQ);
+ while (!(readl(ccn->dt.base + CCN_DT_PMSR) & 0x1))
+ ;
+ writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
+ res = readl(ccn->dt.base + CCN_DT_PMCCNTRSR + 4) & 0xff;
+ res <<= 32;
+ res |= readl(ccn->dt.base + CCN_DT_PMCCNTRSR);
+#endif
+ } else {
+ res = readl(ccn->dt.base + CCN_DT_PMEVCNT(idx));
+ }
+
+ return res;
+}
+
+static void arm_ccn_pmu_event_update(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ u64 prev_count, new_count, mask;
+
+ do {
+ prev_count = local64_read(&hw->prev_count);
+ new_count = arm_ccn_pmu_read_counter(ccn, hw->idx);
+ } while (local64_xchg(&hw->prev_count, new_count) != prev_count);
+
+ mask = (1LLU << (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER ? 40 : 32)) - 1;
+
+ local64_add((new_count - prev_count) & mask, &event->count);
+}
+
+static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct arm_ccn_component *xp;
+ u32 val, dt_cfg;
+
+ if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
+ xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
+ else
+ xp = &ccn->xp[arm_ccn_node_to_xp(
+ CCN_CONFIG_NODE(event->attr.config))];
+
+ if (enable)
+ dt_cfg = hw->event_base;
+ else
+ dt_cfg = CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH;
+
+ spin_lock(&ccn->dt.config_lock);
+
+ val = readl(xp->base + CCN_XP_DT_CONFIG);
+ val &= ~(CCN_XP_DT_CONFIG__DT_CFG__MASK <<
+ CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx));
+ val |= dt_cfg << CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx);
+ writel(val, xp->base + CCN_XP_DT_CONFIG);
+
+ spin_unlock(&ccn->dt.config_lock);
+}
+
+static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ local64_set(&event->hw.prev_count,
+ arm_ccn_pmu_read_counter(ccn, hw->idx));
+ hw->state = 0;
+
+ if (!ccn->irq_used)
+ hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
+ HRTIMER_MODE_REL);
+
+ /* Set the DT bus input, engaging the counter */
+ arm_ccn_pmu_xp_dt_config(event, 1);
+}
+
+static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ u64 timeout;
+
+ /* Disable counting, setting the DT bus to pass-through mode */
+ arm_ccn_pmu_xp_dt_config(event, 0);
+
+ if (!ccn->irq_used)
+ hrtimer_cancel(&ccn->dt.hrtimer);
+
+ /* Let the DT bus drain */
+ timeout = arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) +
+ ccn->num_xps;
+ while (arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) <
+ timeout)
+ cpu_relax();
+
+ if (flags & PERF_EF_UPDATE)
+ arm_ccn_pmu_event_update(event);
+
+ hw->state |= PERF_HES_STOPPED;
+}
+
+static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct arm_ccn_component *source =
+ ccn->dt.pmu_counters[hw->idx].source;
+ unsigned long wp = hw->config_base;
+ u32 val;
+ u64 cmp_l = event->attr.config1;
+ u64 cmp_h = event->attr.config2;
+ u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l;
+ u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h;
+
+ hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp);
+
+ /* Direction (RX/TX), device (port) & virtual channel */
+ val = readl(source->base + CCN_XP_DT_INTERFACE_SEL);
+ val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK <<
+ CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp));
+ val |= CCN_CONFIG_DIR(event->attr.config) <<
+ CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp);
+ val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK <<
+ CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp));
+ val |= CCN_CONFIG_PORT(event->attr.config) <<
+ CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp);
+ val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK <<
+ CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp));
+ val |= CCN_CONFIG_VC(event->attr.config) <<
+ CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp);
+ writel(val, source->base + CCN_XP_DT_INTERFACE_SEL);
+
+ /* Comparison values */
+ writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
+ writel((cmp_l >> 32) & 0xefffffff,
+ source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
+ writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
+ writel((cmp_h >> 32) & 0x0fffffff,
+ source->base + CCN_XP_DT_CMP_VAL_H(wp) + 4);
+
+ /* Mask */
+ writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
+ writel((mask_l >> 32) & 0xefffffff,
+ source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
+ writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
+ writel((mask_h >> 32) & 0x0fffffff,
+ source->base + CCN_XP_DT_CMP_MASK_H(wp) + 4);
+}
+
+static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct arm_ccn_component *source =
+ ccn->dt.pmu_counters[hw->idx].source;
+ u32 val, id;
+
+ hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
+
+ id = (CCN_CONFIG_VC(event->attr.config) << 4) |
+ (CCN_CONFIG_PORT(event->attr.config) << 3) |
+ (CCN_CONFIG_EVENT(event->attr.config) << 0);
+
+ val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
+ val &= ~(CCN_XP_PMU_EVENT_SEL__ID__MASK <<
+ CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
+ val |= id << CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
+ writel(val, source->base + CCN_XP_PMU_EVENT_SEL);
+}
+
+static void arm_ccn_pmu_node_event_config(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct arm_ccn_component *source =
+ ccn->dt.pmu_counters[hw->idx].source;
+ u32 type = CCN_CONFIG_TYPE(event->attr.config);
+ u32 val, port;
+
+ port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config));
+ hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port,
+ hw->config_base);
+
+ /* These *_event_sel regs should be identical, but let's make sure... */
+ BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL != CCN_SBAS_PMU_EVENT_SEL);
+ BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL != CCN_RNI_PMU_EVENT_SEL);
+ BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(1) !=
+ CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1));
+ BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1) !=
+ CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(1));
+ BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__MASK !=
+ CCN_SBAS_PMU_EVENT_SEL__ID__MASK);
+ BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__MASK !=
+ CCN_RNI_PMU_EVENT_SEL__ID__MASK);
+ if (WARN_ON(type != CCN_TYPE_HNF && type != CCN_TYPE_SBAS &&
+ !arm_ccn_pmu_type_eq(type, CCN_TYPE_RNI_3P)))
+ return;
+
+ /* Set the event id for the pre-allocated counter */
+ val = readl(source->base + CCN_HNF_PMU_EVENT_SEL);
+ val &= ~(CCN_HNF_PMU_EVENT_SEL__ID__MASK <<
+ CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
+ val |= CCN_CONFIG_EVENT(event->attr.config) <<
+ CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
+ writel(val, source->base + CCN_HNF_PMU_EVENT_SEL);
+}
+
+static void arm_ccn_pmu_event_config(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ u32 xp, offset, val;
+
+ /* Cycle counter requires no setup */
+ if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
+ return;
+
+ if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
+ xp = CCN_CONFIG_XP(event->attr.config);
+ else
+ xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config));
+
+ spin_lock(&ccn->dt.config_lock);
+
+ /* Set the DT bus "distance" register */
+ offset = (hw->idx / 4) * 4;
+ val = readl(ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
+ val &= ~(CCN_DT_ACTIVE_DSM__DSM_ID__MASK <<
+ CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4));
+ val |= xp << CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4);
+ writel(val, ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
+
+ if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) {
+ if (CCN_CONFIG_EVENT(event->attr.config) ==
+ CCN_EVENT_WATCHPOINT)
+ arm_ccn_pmu_xp_watchpoint_config(event);
+ else
+ arm_ccn_pmu_xp_event_config(event);
+ } else {
+ arm_ccn_pmu_node_event_config(event);
+ }
+
+ spin_unlock(&ccn->dt.config_lock);
+}
+
+static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hw = &event->hw;
+
+ arm_ccn_pmu_event_config(event);
+
+ hw->state = PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ arm_ccn_pmu_event_start(event, PERF_EF_UPDATE);
+
+ return 0;
+}
+
+static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
+{
+ arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+static void arm_ccn_pmu_event_read(struct perf_event *event)
+{
+ arm_ccn_pmu_event_update(event);
+}
+
+static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
+{
+ u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
+ int idx;
+
+ if (!pmovsr)
+ return IRQ_NONE;
+
+ writel(pmovsr, dt->base + CCN_DT_PMOVSR_CLR);
+
+ BUILD_BUG_ON(CCN_IDX_PMU_CYCLE_COUNTER != CCN_NUM_PMU_EVENT_COUNTERS);
+
+ for (idx = 0; idx < CCN_NUM_PMU_EVENT_COUNTERS + 1; idx++) {
+ struct perf_event *event = dt->pmu_counters[idx].event;
+ int overflowed = pmovsr & BIT(idx);
+
+ WARN_ON_ONCE(overflowed && !event &&
+ idx != CCN_IDX_PMU_CYCLE_COUNTER);
+
+ if (!event || !overflowed)
+ continue;
+
+ arm_ccn_pmu_event_update(event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
+{
+ struct arm_ccn_dt *dt = container_of(hrtimer, struct arm_ccn_dt,
+ hrtimer);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ arm_ccn_pmu_overflow_handler(dt);
+ local_irq_restore(flags);
+
+ hrtimer_forward_now(hrtimer, arm_ccn_pmu_timer_period());
+ return HRTIMER_RESTART;
+}
+
+
+static DEFINE_IDA(arm_ccn_pmu_ida);
+
+static int arm_ccn_pmu_init(struct arm_ccn *ccn)
+{
+ int i;
+ char *name;
+
+ /* Initialize DT subsystem */
+ ccn->dt.base = ccn->base + CCN_REGION_SIZE;
+ spin_lock_init(&ccn->dt.config_lock);
+ writel(CCN_DT_PMOVSR_CLR__MASK, ccn->dt.base + CCN_DT_PMOVSR_CLR);
+ writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL);
+ writel(CCN_DT_PMCR__OVFL_INTR_EN | CCN_DT_PMCR__PMU_EN,
+ ccn->dt.base + CCN_DT_PMCR);
+ writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
+ for (i = 0; i < ccn->num_xps; i++) {
+ writel(0, ccn->xp[i].base + CCN_XP_DT_CONFIG);
+ writel((CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
+ CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(0)) |
+ (CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
+ CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(1)) |
+ CCN_XP_DT_CONTROL__DT_ENABLE,
+ ccn->xp[i].base + CCN_XP_DT_CONTROL);
+ }
+ ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15);
+ ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9);
+
+ /* Get a convenient /sys/event_source/devices/ name */
+ ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL);
+ if (ccn->dt.id == 0) {
+ name = "ccn";
+ } else {
+ int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
+
+ name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
+ snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
+ }
+
+ /* Perf driver registration */
+ ccn->dt.pmu = (struct pmu) {
+ .attr_groups = arm_ccn_pmu_attr_groups,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = arm_ccn_pmu_event_init,
+ .add = arm_ccn_pmu_event_add,
+ .del = arm_ccn_pmu_event_del,
+ .start = arm_ccn_pmu_event_start,
+ .stop = arm_ccn_pmu_event_stop,
+ .read = arm_ccn_pmu_event_read,
+ };
+
+ /* No overflow interrupt? Have to use a timer instead. */
+ if (!ccn->irq_used) {
+ dev_info(ccn->dev, "No access to interrupts, using timer.\n");
+ hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler;
+ }
+
+ return perf_pmu_register(&ccn->dt.pmu, name, -1);
+}
+
+static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
+{
+ int i;
+
+ for (i = 0; i < ccn->num_xps; i++)
+ writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
+ writel(0, ccn->dt.base + CCN_DT_PMCR);
+ perf_pmu_unregister(&ccn->dt.pmu);
+ ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
+}
+
+
+static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
+ int (*callback)(struct arm_ccn *ccn, int region,
+ void __iomem *base, u32 type, u32 id))
+{
+ int region;
+
+ for (region = 0; region < CCN_NUM_REGIONS; region++) {
+ u32 val, type, id;
+ void __iomem *base;
+ int err;
+
+ val = readl(ccn->base + CCN_MN_OLY_COMP_LIST_63_0 +
+ 4 * (region / 32));
+ if (!(val & (1 << (region % 32))))
+ continue;
+
+ base = ccn->base + region * CCN_REGION_SIZE;
+ val = readl(base + CCN_ALL_OLY_ID);
+ type = (val >> CCN_ALL_OLY_ID__OLY_ID__SHIFT) &
+ CCN_ALL_OLY_ID__OLY_ID__MASK;
+ id = (val >> CCN_ALL_OLY_ID__NODE_ID__SHIFT) &
+ CCN_ALL_OLY_ID__NODE_ID__MASK;
+
+ err = callback(ccn, region, base, type, id);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int arm_ccn_get_nodes_num(struct arm_ccn *ccn, int region,
+ void __iomem *base, u32 type, u32 id)
+{
+
+ if (type == CCN_TYPE_XP && id >= ccn->num_xps)
+ ccn->num_xps = id + 1;
+ else if (id >= ccn->num_nodes)
+ ccn->num_nodes = id + 1;
+
+ return 0;
+}
+
+static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
+ void __iomem *base, u32 type, u32 id)
+{
+ struct arm_ccn_component *component;
+
+ dev_dbg(ccn->dev, "Region %d: id=%u, type=0x%02x\n", region, id, type);
+
+ switch (type) {
+ case CCN_TYPE_MN:
+ case CCN_TYPE_DT:
+ return 0;
+ case CCN_TYPE_XP:
+ component = &ccn->xp[id];
+ break;
+ case CCN_TYPE_SBSX:
+ ccn->sbsx_present = 1;
+ component = &ccn->node[id];
+ break;
+ case CCN_TYPE_SBAS:
+ ccn->sbas_present = 1;
+ /* Fall-through */
+ default:
+ component = &ccn->node[id];
+ break;
+ }
+
+ component->base = base;
+ component->type = type;
+
+ return 0;
+}
+
+
+static irqreturn_t arm_ccn_error_handler(struct arm_ccn *ccn,
+ const u32 *err_sig_val)
+{
+ /* This should be really handled by firmware... */
+ dev_err(ccn->dev, "Error reported in %08x%08x%08x%08x%08x%08x.\n",
+ err_sig_val[5], err_sig_val[4], err_sig_val[3],
+ err_sig_val[2], err_sig_val[1], err_sig_val[0]);
+ dev_err(ccn->dev, "Disabling interrupt generation for all errors.\n");
+ writel(CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE,
+ ccn->base + CCN_MN_ERRINT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+
+static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id)
+{
+ irqreturn_t res = IRQ_NONE;
+ struct arm_ccn *ccn = dev_id;
+ u32 err_sig_val[6];
+ u32 err_or;
+ int i;
+
+ /* PMU overflow is a special case */
+ err_or = err_sig_val[0] = readl(ccn->base + CCN_MN_ERR_SIG_VAL_63_0);
+ if (err_or & CCN_MN_ERR_SIG_VAL_63_0__DT) {
+ err_or &= ~CCN_MN_ERR_SIG_VAL_63_0__DT;
+ res = arm_ccn_pmu_overflow_handler(&ccn->dt);
+ }
+
+ /* Have to read all err_sig_vals to clear them */
+ for (i = 1; i < ARRAY_SIZE(err_sig_val); i++) {
+ err_sig_val[i] = readl(ccn->base +
+ CCN_MN_ERR_SIG_VAL_63_0 + i * 4);
+ err_or |= err_sig_val[i];
+ }
+ if (err_or)
+ res |= arm_ccn_error_handler(ccn, err_sig_val);
+
+ if (res != IRQ_NONE)
+ writel(CCN_MN_ERRINT_STATUS__INTREQ__DESSERT,
+ ccn->base + CCN_MN_ERRINT_STATUS);
+
+ return res;
+}
+
+
+static int arm_ccn_probe(struct platform_device *pdev)
+{
+ struct arm_ccn *ccn;
+ struct resource *res;
+ int err;
+
+ ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL);
+ if (!ccn)
+ return -ENOMEM;
+ ccn->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ccn);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ if (!devm_request_mem_region(ccn->dev, res->start,
+ resource_size(res), pdev->name))
+ return -EBUSY;
+
+ ccn->base = devm_ioremap(ccn->dev, res->start,
+ resource_size(res));
+ if (!ccn->base)
+ return -EFAULT;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res)
+ return -EINVAL;
+
+ /* Check if we can use the interrupt */
+ writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE,
+ ccn->base + CCN_MN_ERRINT_STATUS);
+ if (readl(ccn->base + CCN_MN_ERRINT_STATUS) &
+ CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED) {
+ /* Can set 'disable' bits, so can acknowledge interrupts */
+ writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
+ ccn->base + CCN_MN_ERRINT_STATUS);
+ err = devm_request_irq(ccn->dev, res->start,
+ arm_ccn_irq_handler, 0, dev_name(ccn->dev),
+ ccn);
+ if (err)
+ return err;
+
+ ccn->irq_used = 1;
+ }
+
+
+ /* Build topology */
+
+ err = arm_ccn_for_each_valid_region(ccn, arm_ccn_get_nodes_num);
+ if (err)
+ return err;
+
+ ccn->node = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_nodes,
+ GFP_KERNEL);
+ ccn->xp = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_xps,
+ GFP_KERNEL);
+ if (!ccn->node || !ccn->xp)
+ return -ENOMEM;
+
+ err = arm_ccn_for_each_valid_region(ccn, arm_ccn_init_nodes);
+ if (err)
+ return err;
+
+ return arm_ccn_pmu_init(ccn);
+}
+
+static int arm_ccn_remove(struct platform_device *pdev)
+{
+ struct arm_ccn *ccn = platform_get_drvdata(pdev);
+
+ arm_ccn_pmu_cleanup(ccn);
+
+ return 0;
+}
+
+static const struct of_device_id arm_ccn_match[] = {
+ { .compatible = "arm,ccn-504", },
+ {},
+};
+
+static struct platform_driver arm_ccn_driver = {
+ .driver = {
+ .name = "arm-ccn",
+ .of_match_table = arm_ccn_match,
+ },
+ .probe = arm_ccn_probe,
+ .remove = arm_ccn_remove,
+};
+
+static int __init arm_ccn_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
+ arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
+
+ return platform_driver_register(&arm_ccn_driver);
+}
+
+static void __exit arm_ccn_exit(void)
+{
+ platform_driver_unregister(&arm_ccn_driver);
+}
+
+module_init(arm_ccn_init);
+module_exit(arm_ccn_exit);
+
+MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/bus/brcmstb_gisb.c b/kernel/drivers/bus/brcmstb_gisb.c
new file mode 100644
index 000000000..738612c45
--- /dev/null
+++ b/kernel/drivers/bus/brcmstb_gisb.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/sysfs.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/pm.h>
+
+#ifdef CONFIG_ARM
+#include <asm/bug.h>
+#include <asm/signal.h>
+#endif
+
+#define ARB_ERR_CAP_CLEAR (1 << 0)
+#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12)
+#define ARB_ERR_CAP_STATUS_TEA (1 << 11)
+#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2)
+#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c
+#define ARB_ERR_CAP_STATUS_WRITE (1 << 1)
+#define ARB_ERR_CAP_STATUS_VALID (1 << 0)
+
+enum {
+ ARB_TIMER,
+ ARB_ERR_CAP_CLR,
+ ARB_ERR_CAP_HI_ADDR,
+ ARB_ERR_CAP_ADDR,
+ ARB_ERR_CAP_DATA,
+ ARB_ERR_CAP_STATUS,
+ ARB_ERR_CAP_MASTER,
+};
+
+static const int gisb_offsets_bcm7038[] = {
+ [ARB_TIMER] = 0x00c,
+ [ARB_ERR_CAP_CLR] = 0x0c4,
+ [ARB_ERR_CAP_HI_ADDR] = -1,
+ [ARB_ERR_CAP_ADDR] = 0x0c8,
+ [ARB_ERR_CAP_DATA] = 0x0cc,
+ [ARB_ERR_CAP_STATUS] = 0x0d0,
+ [ARB_ERR_CAP_MASTER] = -1,
+};
+
+static const int gisb_offsets_bcm7400[] = {
+ [ARB_TIMER] = 0x00c,
+ [ARB_ERR_CAP_CLR] = 0x0c8,
+ [ARB_ERR_CAP_HI_ADDR] = -1,
+ [ARB_ERR_CAP_ADDR] = 0x0cc,
+ [ARB_ERR_CAP_DATA] = 0x0d0,
+ [ARB_ERR_CAP_STATUS] = 0x0d4,
+ [ARB_ERR_CAP_MASTER] = 0x0d8,
+};
+
+static const int gisb_offsets_bcm7435[] = {
+ [ARB_TIMER] = 0x00c,
+ [ARB_ERR_CAP_CLR] = 0x168,
+ [ARB_ERR_CAP_HI_ADDR] = -1,
+ [ARB_ERR_CAP_ADDR] = 0x16c,
+ [ARB_ERR_CAP_DATA] = 0x170,
+ [ARB_ERR_CAP_STATUS] = 0x174,
+ [ARB_ERR_CAP_MASTER] = 0x178,
+};
+
+static const int gisb_offsets_bcm7445[] = {
+ [ARB_TIMER] = 0x008,
+ [ARB_ERR_CAP_CLR] = 0x7e4,
+ [ARB_ERR_CAP_HI_ADDR] = 0x7e8,
+ [ARB_ERR_CAP_ADDR] = 0x7ec,
+ [ARB_ERR_CAP_DATA] = 0x7f0,
+ [ARB_ERR_CAP_STATUS] = 0x7f4,
+ [ARB_ERR_CAP_MASTER] = 0x7f8,
+};
+
+struct brcmstb_gisb_arb_device {
+ void __iomem *base;
+ const int *gisb_offsets;
+ struct mutex lock;
+ struct list_head next;
+ u32 valid_mask;
+ const char *master_names[sizeof(u32) * BITS_PER_BYTE];
+ u32 saved_timeout;
+};
+
+static LIST_HEAD(brcmstb_gisb_arb_device_list);
+
+static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
+{
+ int offset = gdev->gisb_offsets[reg];
+
+ /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
+ if (offset == -1)
+ return 1;
+
+ return ioread32(gdev->base + offset);
+}
+
+static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
+{
+ int offset = gdev->gisb_offsets[reg];
+
+ if (offset == -1)
+ return;
+ iowrite32(val, gdev->base + reg);
+}
+
+static ssize_t gisb_arb_get_timeout(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
+ u32 timeout;
+
+ mutex_lock(&gdev->lock);
+ timeout = gisb_read(gdev, ARB_TIMER);
+ mutex_unlock(&gdev->lock);
+
+ return sprintf(buf, "%d", timeout);
+}
+
+static ssize_t gisb_arb_set_timeout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
+ int val, ret;
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val == 0 || val >= 0xffffffff)
+ return -EINVAL;
+
+ mutex_lock(&gdev->lock);
+ gisb_write(gdev, val, ARB_TIMER);
+ mutex_unlock(&gdev->lock);
+
+ return count;
+}
+
+static const char *
+brcmstb_gisb_master_to_str(struct brcmstb_gisb_arb_device *gdev,
+ u32 masters)
+{
+ u32 mask = gdev->valid_mask & masters;
+
+ if (hweight_long(mask) != 1)
+ return NULL;
+
+ return gdev->master_names[ffs(mask) - 1];
+}
+
+static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
+ const char *reason)
+{
+ u32 cap_status;
+ unsigned long arb_addr;
+ u32 master;
+ const char *m_name;
+ char m_fmt[11];
+
+ cap_status = gisb_read(gdev, ARB_ERR_CAP_STATUS);
+
+ /* Invalid captured address, bail out */
+ if (!(cap_status & ARB_ERR_CAP_STATUS_VALID))
+ return 1;
+
+ /* Read the address and master */
+ arb_addr = gisb_read(gdev, ARB_ERR_CAP_ADDR) & 0xffffffff;
+#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
+ arb_addr |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
+#endif
+ master = gisb_read(gdev, ARB_ERR_CAP_MASTER);
+
+ m_name = brcmstb_gisb_master_to_str(gdev, master);
+ if (!m_name) {
+ snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master);
+ m_name = m_fmt;
+ }
+
+ pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n",
+ __func__, reason, arb_addr,
+ cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
+ cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
+ m_name);
+
+ /* clear the GISB error */
+ gisb_write(gdev, ARB_ERR_CAP_CLEAR, ARB_ERR_CAP_CLR);
+
+ return 0;
+}
+
+#ifdef CONFIG_ARM
+static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ int ret = 0;
+ struct brcmstb_gisb_arb_device *gdev;
+
+ /* iterate over each GISB arb registered handlers */
+ list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next)
+ ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error");
+ /*
+ * If it was an imprecise abort, then we need to correct the
+ * return address to be _after_ the instruction.
+ */
+ if (fsr & (1 << 10))
+ regs->ARM_pc += 4;
+
+ return ret;
+}
+#endif
+
+static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id)
+{
+ brcmstb_gisb_arb_decode_addr(dev_id, "timeout");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t brcmstb_gisb_tea_handler(int irq, void *dev_id)
+{
+ brcmstb_gisb_arb_decode_addr(dev_id, "target abort");
+
+ return IRQ_HANDLED;
+}
+
+static DEVICE_ATTR(gisb_arb_timeout, S_IWUSR | S_IRUGO,
+ gisb_arb_get_timeout, gisb_arb_set_timeout);
+
+static struct attribute *gisb_arb_sysfs_attrs[] = {
+ &dev_attr_gisb_arb_timeout.attr,
+ NULL,
+};
+
+static struct attribute_group gisb_arb_sysfs_attr_group = {
+ .attrs = gisb_arb_sysfs_attrs,
+};
+
+static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
+ { .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 },
+ { .compatible = "brcm,bcm7445-gisb-arb", .data = gisb_offsets_bcm7445 },
+ { .compatible = "brcm,bcm7435-gisb-arb", .data = gisb_offsets_bcm7435 },
+ { .compatible = "brcm,bcm7400-gisb-arb", .data = gisb_offsets_bcm7400 },
+ { .compatible = "brcm,bcm7038-gisb-arb", .data = gisb_offsets_bcm7038 },
+ { },
+};
+
+static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct brcmstb_gisb_arb_device *gdev;
+ const struct of_device_id *of_id;
+ struct resource *r;
+ int err, timeout_irq, tea_irq;
+ unsigned int num_masters, j = 0;
+ int i, first, last;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ timeout_irq = platform_get_irq(pdev, 0);
+ tea_irq = platform_get_irq(pdev, 1);
+
+ gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL);
+ if (!gdev)
+ return -ENOMEM;
+
+ mutex_init(&gdev->lock);
+ INIT_LIST_HEAD(&gdev->next);
+
+ gdev->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(gdev->base))
+ return PTR_ERR(gdev->base);
+
+ of_id = of_match_node(brcmstb_gisb_arb_of_match, dn);
+ if (!of_id) {
+ pr_err("failed to look up compatible string\n");
+ return -EINVAL;
+ }
+ gdev->gisb_offsets = of_id->data;
+
+ err = devm_request_irq(&pdev->dev, timeout_irq,
+ brcmstb_gisb_timeout_handler, 0, pdev->name,
+ gdev);
+ if (err < 0)
+ return err;
+
+ err = devm_request_irq(&pdev->dev, tea_irq,
+ brcmstb_gisb_tea_handler, 0, pdev->name,
+ gdev);
+ if (err < 0)
+ return err;
+
+ /* If we do not have a valid mask, assume all masters are enabled */
+ if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask",
+ &gdev->valid_mask))
+ gdev->valid_mask = 0xffffffff;
+
+ /* Proceed with reading the litteral names if we agree on the
+ * number of masters
+ */
+ num_masters = of_property_count_strings(dn,
+ "brcm,gisb-arb-master-names");
+ if (hweight_long(gdev->valid_mask) == num_masters) {
+ first = ffs(gdev->valid_mask) - 1;
+ last = fls(gdev->valid_mask) - 1;
+
+ for (i = first; i < last; i++) {
+ if (!(gdev->valid_mask & BIT(i)))
+ continue;
+
+ of_property_read_string_index(dn,
+ "brcm,gisb-arb-master-names", j,
+ &gdev->master_names[i]);
+ j++;
+ }
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, gdev);
+
+ list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
+
+#ifdef CONFIG_ARM
+ hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0,
+ "imprecise external abort");
+#endif
+
+ dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n",
+ gdev->base, timeout_irq, tea_irq);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int brcmstb_gisb_arb_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
+
+ gdev->saved_timeout = gisb_read(gdev, ARB_TIMER);
+
+ return 0;
+}
+
+/* Make sure we provide the same timeout value that was configured before, and
+ * do this before the GISB timeout interrupt handler has any chance to run.
+ */
+static int brcmstb_gisb_arb_resume_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev);
+
+ gisb_write(gdev, gdev->saved_timeout, ARB_TIMER);
+
+ return 0;
+}
+#else
+#define brcmstb_gisb_arb_suspend NULL
+#define brcmstb_gisb_arb_resume_noirq NULL
+#endif
+
+static const struct dev_pm_ops brcmstb_gisb_arb_pm_ops = {
+ .suspend = brcmstb_gisb_arb_suspend,
+ .resume_noirq = brcmstb_gisb_arb_resume_noirq,
+};
+
+static struct platform_driver brcmstb_gisb_arb_driver = {
+ .driver = {
+ .name = "brcm-gisb-arb",
+ .of_match_table = brcmstb_gisb_arb_of_match,
+ .pm = &brcmstb_gisb_arb_pm_ops,
+ },
+};
+
+static int __init brcm_gisb_driver_init(void)
+{
+ return platform_driver_probe(&brcmstb_gisb_arb_driver,
+ brcmstb_gisb_arb_probe);
+}
+
+module_init(brcm_gisb_driver_init);
diff --git a/kernel/drivers/bus/imx-weim.c b/kernel/drivers/bus/imx-weim.c
new file mode 100644
index 000000000..e98d15eaa
--- /dev/null
+++ b/kernel/drivers/bus/imx-weim.c
@@ -0,0 +1,217 @@
+/*
+ * EIM driver for Freescale's i.MX chips
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/regmap.h>
+
+struct imx_weim_devtype {
+ unsigned int cs_count;
+ unsigned int cs_regs_count;
+ unsigned int cs_stride;
+};
+
+static const struct imx_weim_devtype imx1_weim_devtype = {
+ .cs_count = 6,
+ .cs_regs_count = 2,
+ .cs_stride = 0x08,
+};
+
+static const struct imx_weim_devtype imx27_weim_devtype = {
+ .cs_count = 6,
+ .cs_regs_count = 3,
+ .cs_stride = 0x10,
+};
+
+static const struct imx_weim_devtype imx50_weim_devtype = {
+ .cs_count = 4,
+ .cs_regs_count = 6,
+ .cs_stride = 0x18,
+};
+
+static const struct imx_weim_devtype imx51_weim_devtype = {
+ .cs_count = 6,
+ .cs_regs_count = 6,
+ .cs_stride = 0x18,
+};
+
+static const struct of_device_id weim_id_table[] = {
+ /* i.MX1/21 */
+ { .compatible = "fsl,imx1-weim", .data = &imx1_weim_devtype, },
+ /* i.MX25/27/31/35 */
+ { .compatible = "fsl,imx27-weim", .data = &imx27_weim_devtype, },
+ /* i.MX50/53/6Q */
+ { .compatible = "fsl,imx50-weim", .data = &imx50_weim_devtype, },
+ { .compatible = "fsl,imx6q-weim", .data = &imx50_weim_devtype, },
+ /* i.MX51 */
+ { .compatible = "fsl,imx51-weim", .data = &imx51_weim_devtype, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, weim_id_table);
+
+static int __init imx_weim_gpr_setup(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct property *prop;
+ const __be32 *p;
+ struct regmap *gpr;
+ u32 gprvals[4] = {
+ 05, /* CS0(128M) CS1(0M) CS2(0M) CS3(0M) */
+ 033, /* CS0(64M) CS1(64M) CS2(0M) CS3(0M) */
+ 0113, /* CS0(64M) CS1(32M) CS2(32M) CS3(0M) */
+ 01111, /* CS0(32M) CS1(32M) CS2(32M) CS3(32M) */
+ };
+ u32 gprval = 0;
+ u32 val;
+ int cs = 0;
+ int i = 0;
+
+ gpr = syscon_regmap_lookup_by_phandle(np, "fsl,weim-cs-gpr");
+ if (IS_ERR(gpr)) {
+ dev_dbg(&pdev->dev, "failed to find weim-cs-gpr\n");
+ return 0;
+ }
+
+ of_property_for_each_u32(np, "ranges", prop, p, val) {
+ if (i % 4 == 0) {
+ cs = val;
+ } else if (i % 4 == 3 && val) {
+ val = (val / SZ_32M) | 1;
+ gprval |= val << cs * 3;
+ }
+ i++;
+ }
+
+ if (i == 0 || i % 4)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(gprvals); i++) {
+ if (gprval == gprvals[i]) {
+ /* Found it. Set up IOMUXC_GPR1[11:0] with it. */
+ regmap_update_bits(gpr, IOMUXC_GPR1, 0xfff, gprval);
+ return 0;
+ }
+ }
+
+err:
+ dev_err(&pdev->dev, "Invalid 'ranges' configuration\n");
+ return -EINVAL;
+}
+
+/* Parse and set the timing for this device. */
+static int __init weim_timing_setup(struct device_node *np, void __iomem *base,
+ const struct imx_weim_devtype *devtype)
+{
+ u32 cs_idx, value[devtype->cs_regs_count];
+ int i, ret;
+
+ /* get the CS index from this child node's "reg" property. */
+ ret = of_property_read_u32(np, "reg", &cs_idx);
+ if (ret)
+ return ret;
+
+ if (cs_idx >= devtype->cs_count)
+ return -EINVAL;
+
+ ret = of_property_read_u32_array(np, "fsl,weim-cs-timing",
+ value, devtype->cs_regs_count);
+ if (ret)
+ return ret;
+
+ /* set the timing for WEIM */
+ for (i = 0; i < devtype->cs_regs_count; i++)
+ writel(value[i], base + cs_idx * devtype->cs_stride + i * 4);
+
+ return 0;
+}
+
+static int __init weim_parse_dt(struct platform_device *pdev,
+ void __iomem *base)
+{
+ const struct of_device_id *of_id = of_match_device(weim_id_table,
+ &pdev->dev);
+ const struct imx_weim_devtype *devtype = of_id->data;
+ struct device_node *child;
+ int ret, have_child = 0;
+
+ if (devtype == &imx50_weim_devtype) {
+ ret = imx_weim_gpr_setup(pdev);
+ if (ret)
+ return ret;
+ }
+
+ for_each_child_of_node(pdev->dev.of_node, child) {
+ if (!child->name)
+ continue;
+
+ ret = weim_timing_setup(child, base, devtype);
+ if (ret)
+ dev_warn(&pdev->dev, "%s set timing failed.\n",
+ child->full_name);
+ else
+ have_child = 1;
+ }
+
+ if (have_child)
+ ret = of_platform_populate(pdev->dev.of_node,
+ of_default_bus_match_table,
+ NULL, &pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "%s fail to create devices.\n",
+ pdev->dev.of_node->full_name);
+ return ret;
+}
+
+static int __init weim_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct clk *clk;
+ void __iomem *base;
+ int ret;
+
+ /* get the resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* get the clock */
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ /* parse the device node */
+ ret = weim_parse_dt(pdev, base);
+ if (ret)
+ clk_disable_unprepare(clk);
+ else
+ dev_info(&pdev->dev, "Driver registered.\n");
+
+ return ret;
+}
+
+static struct platform_driver weim_driver = {
+ .driver = {
+ .name = "imx-weim",
+ .of_match_table = weim_id_table,
+ },
+};
+module_platform_driver_probe(weim_driver, weim_probe);
+
+MODULE_AUTHOR("Freescale Semiconductor Inc.");
+MODULE_DESCRIPTION("i.MX EIM Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/bus/mips_cdmm.c b/kernel/drivers/bus/mips_cdmm.c
new file mode 100644
index 000000000..ab3bde16e
--- /dev/null
+++ b/kernel/drivers/bus/mips_cdmm.c
@@ -0,0 +1,716 @@
+/*
+ * Bus driver for MIPS Common Device Memory Map (CDMM).
+ *
+ * Copyright (C) 2014-2015 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <asm/cdmm.h>
+#include <asm/hazards.h>
+#include <asm/mipsregs.h>
+
+/* Access control and status register fields */
+#define CDMM_ACSR_DEVTYPE_SHIFT 24
+#define CDMM_ACSR_DEVTYPE (255ul << CDMM_ACSR_DEVTYPE_SHIFT)
+#define CDMM_ACSR_DEVSIZE_SHIFT 16
+#define CDMM_ACSR_DEVSIZE (31ul << CDMM_ACSR_DEVSIZE_SHIFT)
+#define CDMM_ACSR_DEVREV_SHIFT 12
+#define CDMM_ACSR_DEVREV (15ul << CDMM_ACSR_DEVREV_SHIFT)
+#define CDMM_ACSR_UW (1ul << 3)
+#define CDMM_ACSR_UR (1ul << 2)
+#define CDMM_ACSR_SW (1ul << 1)
+#define CDMM_ACSR_SR (1ul << 0)
+
+/* Each block of device registers is 64 bytes */
+#define CDMM_DRB_SIZE 64
+
+#define to_mips_cdmm_driver(d) container_of(d, struct mips_cdmm_driver, drv)
+
+/* Default physical base address */
+static phys_addr_t mips_cdmm_default_base;
+
+/* Bus operations */
+
+static const struct mips_cdmm_device_id *
+mips_cdmm_lookup(const struct mips_cdmm_device_id *table,
+ struct mips_cdmm_device *dev)
+{
+ int ret = 0;
+
+ for (; table->type; ++table) {
+ ret = (dev->type == table->type);
+ if (ret)
+ break;
+ }
+
+ return ret ? table : NULL;
+}
+
+static int mips_cdmm_match(struct device *dev, struct device_driver *drv)
+{
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+ struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(drv);
+
+ return mips_cdmm_lookup(cdrv->id_table, cdev) != NULL;
+}
+
+static int mips_cdmm_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+ int retval = 0;
+
+ retval = add_uevent_var(env, "CDMM_CPU=%u", cdev->cpu);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "CDMM_TYPE=0x%02x", cdev->type);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "CDMM_REV=%u", cdev->rev);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "MODALIAS=mipscdmm:t%02X", cdev->type);
+ return retval;
+}
+
+/* Device attributes */
+
+#define CDMM_ATTR(name, fmt, arg...) \
+static ssize_t name##_show(struct device *_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct mips_cdmm_device *dev = to_mips_cdmm_device(_dev); \
+ return sprintf(buf, fmt, arg); \
+} \
+static DEVICE_ATTR_RO(name);
+
+CDMM_ATTR(cpu, "%u\n", dev->cpu);
+CDMM_ATTR(type, "0x%02x\n", dev->type);
+CDMM_ATTR(revision, "%u\n", dev->rev);
+CDMM_ATTR(modalias, "mipscdmm:t%02X\n", dev->type);
+CDMM_ATTR(resource, "\t%016llx\t%016llx\t%016lx\n",
+ (unsigned long long)dev->res.start,
+ (unsigned long long)dev->res.end,
+ dev->res.flags);
+
+static struct attribute *mips_cdmm_dev_attrs[] = {
+ &dev_attr_cpu.attr,
+ &dev_attr_type.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_resource.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mips_cdmm_dev);
+
+struct bus_type mips_cdmm_bustype = {
+ .name = "cdmm",
+ .dev_groups = mips_cdmm_dev_groups,
+ .match = mips_cdmm_match,
+ .uevent = mips_cdmm_uevent,
+};
+EXPORT_SYMBOL_GPL(mips_cdmm_bustype);
+
+/*
+ * Standard driver callback helpers.
+ *
+ * All the CDMM driver callbacks need to be executed on the appropriate CPU from
+ * workqueues. For the standard driver callbacks we need a work function
+ * (mips_cdmm_{void,int}_work()) to do the actual call from the right CPU, and a
+ * wrapper function (generated with BUILD_PERCPU_HELPER) to arrange for the work
+ * function to be called on that CPU.
+ */
+
+/**
+ * struct mips_cdmm_work_dev - Data for per-device call work.
+ * @fn: CDMM driver callback function to call for the device.
+ * @dev: CDMM device to pass to @fn.
+ */
+struct mips_cdmm_work_dev {
+ void *fn;
+ struct mips_cdmm_device *dev;
+};
+
+/**
+ * mips_cdmm_void_work() - Call a void returning CDMM driver callback.
+ * @data: struct mips_cdmm_work_dev pointer.
+ *
+ * A work_on_cpu() callback function to call an arbitrary CDMM driver callback
+ * function which doesn't return a value.
+ */
+static long mips_cdmm_void_work(void *data)
+{
+ struct mips_cdmm_work_dev *work = data;
+ void (*fn)(struct mips_cdmm_device *) = work->fn;
+
+ fn(work->dev);
+ return 0;
+}
+
+/**
+ * mips_cdmm_int_work() - Call an int returning CDMM driver callback.
+ * @data: struct mips_cdmm_work_dev pointer.
+ *
+ * A work_on_cpu() callback function to call an arbitrary CDMM driver callback
+ * function which returns an int.
+ */
+static long mips_cdmm_int_work(void *data)
+{
+ struct mips_cdmm_work_dev *work = data;
+ int (*fn)(struct mips_cdmm_device *) = work->fn;
+
+ return fn(work->dev);
+}
+
+#define _BUILD_RET_void
+#define _BUILD_RET_int return
+
+/**
+ * BUILD_PERCPU_HELPER() - Helper to call a CDMM driver callback on right CPU.
+ * @_ret: Return type (void or int).
+ * @_name: Name of CDMM driver callback function.
+ *
+ * Generates a specific device callback function to call a CDMM driver callback
+ * function on the appropriate CPU for the device, and if applicable return the
+ * result.
+ */
+#define BUILD_PERCPU_HELPER(_ret, _name) \
+static _ret mips_cdmm_##_name(struct device *dev) \
+{ \
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); \
+ struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(dev->driver); \
+ struct mips_cdmm_work_dev work = { \
+ .fn = cdrv->_name, \
+ .dev = cdev, \
+ }; \
+ \
+ _BUILD_RET_##_ret work_on_cpu(cdev->cpu, \
+ mips_cdmm_##_ret##_work, &work); \
+}
+
+/* Driver callback functions */
+BUILD_PERCPU_HELPER(int, probe) /* int mips_cdmm_probe(struct device) */
+BUILD_PERCPU_HELPER(int, remove) /* int mips_cdmm_remove(struct device) */
+BUILD_PERCPU_HELPER(void, shutdown) /* void mips_cdmm_shutdown(struct device) */
+
+
+/* Driver registration */
+
+/**
+ * mips_cdmm_driver_register() - Register a CDMM driver.
+ * @drv: CDMM driver information.
+ *
+ * Register a CDMM driver with the CDMM subsystem. The driver will be informed
+ * of matching devices which are discovered.
+ *
+ * Returns: 0 on success.
+ */
+int mips_cdmm_driver_register(struct mips_cdmm_driver *drv)
+{
+ drv->drv.bus = &mips_cdmm_bustype;
+
+ if (drv->probe)
+ drv->drv.probe = mips_cdmm_probe;
+ if (drv->remove)
+ drv->drv.remove = mips_cdmm_remove;
+ if (drv->shutdown)
+ drv->drv.shutdown = mips_cdmm_shutdown;
+
+ return driver_register(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_driver_register);
+
+/**
+ * mips_cdmm_driver_unregister() - Unregister a CDMM driver.
+ * @drv: CDMM driver information.
+ *
+ * Unregister a CDMM driver from the CDMM subsystem.
+ */
+void mips_cdmm_driver_unregister(struct mips_cdmm_driver *drv)
+{
+ driver_unregister(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_driver_unregister);
+
+
+/* CDMM initialisation and bus discovery */
+
+/**
+ * struct mips_cdmm_bus - Info about CDMM bus.
+ * @phys: Physical address at which it is mapped.
+ * @regs: Virtual address where registers can be accessed.
+ * @drbs: Total number of DRBs.
+ * @drbs_reserved: Number of DRBs reserved.
+ * @discovered: Whether the devices on the bus have been discovered yet.
+ * @offline: Whether the CDMM bus is going offline (or very early
+ * coming back online), in which case it should be
+ * reconfigured each time.
+ */
+struct mips_cdmm_bus {
+ phys_addr_t phys;
+ void __iomem *regs;
+ unsigned int drbs;
+ unsigned int drbs_reserved;
+ bool discovered;
+ bool offline;
+};
+
+static struct mips_cdmm_bus mips_cdmm_boot_bus;
+static DEFINE_PER_CPU(struct mips_cdmm_bus *, mips_cdmm_buses);
+static atomic_t mips_cdmm_next_id = ATOMIC_INIT(-1);
+
+/**
+ * mips_cdmm_get_bus() - Get the per-CPU CDMM bus information.
+ *
+ * Get information about the per-CPU CDMM bus, if the bus is present.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns: Pointer to CDMM bus information for the current CPU.
+ * May return ERR_PTR(-errno) in case of error, so check with
+ * IS_ERR().
+ */
+static struct mips_cdmm_bus *mips_cdmm_get_bus(void)
+{
+ struct mips_cdmm_bus *bus, **bus_p;
+ unsigned long flags;
+ unsigned int cpu;
+
+ if (!cpu_has_cdmm)
+ return ERR_PTR(-ENODEV);
+
+ cpu = smp_processor_id();
+ /* Avoid early use of per-cpu primitives before initialised */
+ if (cpu == 0)
+ return &mips_cdmm_boot_bus;
+
+ /* Get bus pointer */
+ bus_p = per_cpu_ptr(&mips_cdmm_buses, cpu);
+ local_irq_save(flags);
+ bus = *bus_p;
+ /* Attempt allocation if NULL */
+ if (unlikely(!bus)) {
+ bus = kzalloc(sizeof(*bus), GFP_ATOMIC);
+ if (unlikely(!bus))
+ bus = ERR_PTR(-ENOMEM);
+ else
+ *bus_p = bus;
+ }
+ local_irq_restore(flags);
+ return bus;
+}
+
+/**
+ * mips_cdmm_cur_base() - Find current physical base address of CDMM region.
+ *
+ * Returns: Physical base address of CDMM region according to cdmmbase CP0
+ * register, or 0 if the CDMM region is disabled.
+ */
+static phys_addr_t mips_cdmm_cur_base(void)
+{
+ unsigned long cdmmbase = read_c0_cdmmbase();
+
+ if (!(cdmmbase & MIPS_CDMMBASE_EN))
+ return 0;
+
+ return (cdmmbase >> MIPS_CDMMBASE_ADDR_SHIFT)
+ << MIPS_CDMMBASE_ADDR_START;
+}
+
+/**
+ * mips_cdmm_setup() - Ensure the CDMM bus is initialised and usable.
+ * @bus: Pointer to bus information for current CPU.
+ * IS_ERR(bus) is checked, so no need for caller to check.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns 0 on success, -errno on failure.
+ */
+static int mips_cdmm_setup(struct mips_cdmm_bus *bus)
+{
+ unsigned long cdmmbase, flags;
+ int ret = 0;
+
+ if (IS_ERR(bus))
+ return PTR_ERR(bus);
+
+ local_irq_save(flags);
+ /* Don't set up bus a second time unless marked offline */
+ if (bus->offline) {
+ /* If CDMM region is still set up, nothing to do */
+ if (bus->phys == mips_cdmm_cur_base())
+ goto out;
+ /*
+ * The CDMM region isn't set up as expected, so it needs
+ * reconfiguring, but then we can stop checking it.
+ */
+ bus->offline = false;
+ } else if (bus->phys > 1) {
+ goto out;
+ }
+
+ /* If the CDMM region is already configured, inherit that setup */
+ if (!bus->phys)
+ bus->phys = mips_cdmm_cur_base();
+ /* Otherwise, ask platform code for suggestions */
+ if (!bus->phys && mips_cdmm_phys_base)
+ bus->phys = mips_cdmm_phys_base();
+ /* Otherwise, copy what other CPUs have done */
+ if (!bus->phys)
+ bus->phys = mips_cdmm_default_base;
+ /* Otherwise, complain once */
+ if (!bus->phys) {
+ bus->phys = 1;
+ /*
+ * If you hit this, either your bootloader needs to set up the
+ * CDMM on the boot CPU, or else you need to implement
+ * mips_cdmm_phys_base() for your platform (see asm/cdmm.h).
+ */
+ pr_err("cdmm%u: Failed to choose a physical base\n",
+ smp_processor_id());
+ }
+ /* Already complained? */
+ if (bus->phys == 1) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* Record our success for other CPUs to copy */
+ mips_cdmm_default_base = bus->phys;
+
+ pr_debug("cdmm%u: Enabling CDMM region at %pa\n",
+ smp_processor_id(), &bus->phys);
+
+ /* Enable CDMM */
+ cdmmbase = read_c0_cdmmbase();
+ cdmmbase &= (1ul << MIPS_CDMMBASE_ADDR_SHIFT) - 1;
+ cdmmbase |= (bus->phys >> MIPS_CDMMBASE_ADDR_START)
+ << MIPS_CDMMBASE_ADDR_SHIFT;
+ cdmmbase |= MIPS_CDMMBASE_EN;
+ write_c0_cdmmbase(cdmmbase);
+ tlbw_use_hazard();
+
+ bus->regs = (void __iomem *)CKSEG1ADDR(bus->phys);
+ bus->drbs = 1 + ((cdmmbase & MIPS_CDMMBASE_SIZE) >>
+ MIPS_CDMMBASE_SIZE_SHIFT);
+ bus->drbs_reserved = !!(cdmmbase & MIPS_CDMMBASE_CI);
+
+out:
+ local_irq_restore(flags);
+ return ret;
+}
+
+/**
+ * mips_cdmm_early_probe() - Minimally probe for a specific device on CDMM.
+ * @dev_type: CDMM type code to look for.
+ *
+ * Minimally configure the in-CPU Common Device Memory Map (CDMM) and look for a
+ * specific device. This can be used to find a device very early in boot for
+ * example to configure an early FDC console device.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns: MMIO pointer to device memory. The caller can read the ACSR
+ * register to find more information about the device (such as the
+ * version number or the number of blocks).
+ * May return IOMEM_ERR_PTR(-errno) in case of error, so check with
+ * IS_ERR().
+ */
+void __iomem *mips_cdmm_early_probe(unsigned int dev_type)
+{
+ struct mips_cdmm_bus *bus;
+ void __iomem *cdmm;
+ u32 acsr;
+ unsigned int drb, type, size;
+ int err;
+
+ if (WARN_ON(!dev_type))
+ return IOMEM_ERR_PTR(-ENODEV);
+
+ bus = mips_cdmm_get_bus();
+ err = mips_cdmm_setup(bus);
+ if (err)
+ return IOMEM_ERR_PTR(err);
+
+ /* Skip the first block if it's reserved for more registers */
+ drb = bus->drbs_reserved;
+ cdmm = bus->regs;
+
+ /* Look for a specific device type */
+ for (; drb < bus->drbs; drb += size + 1) {
+ acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
+ type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
+ if (type == dev_type)
+ return cdmm + drb * CDMM_DRB_SIZE;
+ size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
+ }
+
+ return IOMEM_ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_early_probe);
+
+/**
+ * mips_cdmm_release() - Release a removed CDMM device.
+ * @dev: Device object
+ *
+ * Clean up the struct mips_cdmm_device for an unused CDMM device. This is
+ * called automatically by the driver core when a device is removed.
+ */
+static void mips_cdmm_release(struct device *dev)
+{
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+
+ kfree(cdev);
+}
+
+/**
+ * mips_cdmm_bus_discover() - Discover the devices on the CDMM bus.
+ * @bus: CDMM bus information, must already be set up.
+ */
+static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
+{
+ void __iomem *cdmm;
+ u32 acsr;
+ unsigned int drb, type, size, rev;
+ struct mips_cdmm_device *dev;
+ unsigned int cpu = smp_processor_id();
+ int ret = 0;
+ int id = 0;
+
+ /* Skip the first block if it's reserved for more registers */
+ drb = bus->drbs_reserved;
+ cdmm = bus->regs;
+
+ /* Discover devices */
+ bus->discovered = true;
+ pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs);
+ for (; drb < bus->drbs; drb += size + 1) {
+ acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
+ type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
+ size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
+ rev = (acsr & CDMM_ACSR_DEVREV) >> CDMM_ACSR_DEVREV_SHIFT;
+
+ if (!type)
+ continue;
+
+ pr_info("cdmm%u-%u: @%u (%#x..%#x), type 0x%02x, rev %u\n",
+ cpu, id, drb, drb * CDMM_DRB_SIZE,
+ (drb + size + 1) * CDMM_DRB_SIZE - 1,
+ type, rev);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ break;
+
+ dev->cpu = cpu;
+ dev->res.start = bus->phys + drb * CDMM_DRB_SIZE;
+ dev->res.end = bus->phys +
+ (drb + size + 1) * CDMM_DRB_SIZE - 1;
+ dev->res.flags = IORESOURCE_MEM;
+ dev->type = type;
+ dev->rev = rev;
+ dev->dev.parent = get_cpu_device(cpu);
+ dev->dev.bus = &mips_cdmm_bustype;
+ dev->dev.id = atomic_inc_return(&mips_cdmm_next_id);
+ dev->dev.release = mips_cdmm_release;
+
+ dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id);
+ ++id;
+ ret = device_register(&dev->dev);
+ if (ret) {
+ put_device(&dev->dev);
+ kfree(dev);
+ }
+ }
+}
+
+
+/*
+ * CPU hotplug and initialisation
+ *
+ * All the CDMM driver callbacks need to be executed on the appropriate CPU from
+ * workqueues. For the CPU callbacks, they need to be called for all devices on
+ * that CPU, so the work function calls bus_for_each_dev, using a helper
+ * (generated with BUILD_PERDEV_HELPER) to call the driver callback if the
+ * device's CPU matches.
+ */
+
+/**
+ * BUILD_PERDEV_HELPER() - Helper to call a CDMM driver callback if CPU matches.
+ * @_name: Name of CDMM driver callback function.
+ *
+ * Generates a bus_for_each_dev callback function to call a specific CDMM driver
+ * callback function for the device if the device's CPU matches that pointed to
+ * by the data argument.
+ *
+ * This is used for informing drivers for all devices on a given CPU of some
+ * event (such as the CPU going online/offline).
+ *
+ * It is expected to already be called from the appropriate CPU.
+ */
+#define BUILD_PERDEV_HELPER(_name) \
+static int mips_cdmm_##_name##_helper(struct device *dev, void *data) \
+{ \
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); \
+ struct mips_cdmm_driver *cdrv; \
+ unsigned int cpu = *(unsigned int *)data; \
+ \
+ if (cdev->cpu != cpu || !dev->driver) \
+ return 0; \
+ \
+ cdrv = to_mips_cdmm_driver(dev->driver); \
+ if (!cdrv->_name) \
+ return 0; \
+ return cdrv->_name(cdev); \
+}
+
+/* bus_for_each_dev callback helper functions */
+BUILD_PERDEV_HELPER(cpu_down) /* int mips_cdmm_cpu_down_helper(...) */
+BUILD_PERDEV_HELPER(cpu_up) /* int mips_cdmm_cpu_up_helper(...) */
+
+/**
+ * mips_cdmm_bus_down() - Tear down the CDMM bus.
+ * @data: Pointer to unsigned int CPU number.
+ *
+ * This work_on_cpu callback function is executed on a given CPU to call the
+ * CDMM driver cpu_down callback for all devices on that CPU.
+ */
+static long mips_cdmm_bus_down(void *data)
+{
+ struct mips_cdmm_bus *bus;
+ long ret;
+
+ /* Inform all the devices on the bus */
+ ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data,
+ mips_cdmm_cpu_down_helper);
+
+ /*
+ * While bus is offline, each use of it should reconfigure it just in
+ * case it is first use when coming back online again.
+ */
+ bus = mips_cdmm_get_bus();
+ if (!IS_ERR(bus))
+ bus->offline = true;
+
+ return ret;
+}
+
+/**
+ * mips_cdmm_bus_up() - Bring up the CDMM bus.
+ * @data: Pointer to unsigned int CPU number.
+ *
+ * This work_on_cpu callback function is executed on a given CPU to discover
+ * CDMM devices on that CPU, or to call the CDMM driver cpu_up callback for all
+ * devices already discovered on that CPU.
+ *
+ * It is used during initialisation and when CPUs are brought online.
+ */
+static long mips_cdmm_bus_up(void *data)
+{
+ struct mips_cdmm_bus *bus;
+ long ret;
+
+ bus = mips_cdmm_get_bus();
+ ret = mips_cdmm_setup(bus);
+ if (ret)
+ return ret;
+
+ /* Bus now set up, so we can drop the offline flag if still set */
+ bus->offline = false;
+
+ if (!bus->discovered)
+ mips_cdmm_bus_discover(bus);
+ else
+ /* Inform all the devices on the bus */
+ ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data,
+ mips_cdmm_cpu_up_helper);
+
+ return ret;
+}
+
+/**
+ * mips_cdmm_cpu_notify() - Take action when a CPU is going online or offline.
+ * @nb: CPU notifier block .
+ * @action: Event that has taken place (CPU_*).
+ * @data: CPU number.
+ *
+ * This notifier is used to keep the CDMM buses updated as CPUs are offlined and
+ * onlined. When CPUs go offline or come back online, so does their CDMM bus, so
+ * devices must be informed. Also when CPUs come online for the first time the
+ * devices on the CDMM bus need discovering.
+ *
+ * Returns: NOTIFY_OK if event was used.
+ * NOTIFY_DONE if we didn't care.
+ */
+static int mips_cdmm_cpu_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ unsigned int cpu = (unsigned int)data;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ work_on_cpu(cpu, mips_cdmm_bus_up, &cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ work_on_cpu(cpu, mips_cdmm_bus_down, &cpu);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mips_cdmm_cpu_nb = {
+ .notifier_call = mips_cdmm_cpu_notify,
+};
+
+/**
+ * mips_cdmm_init() - Initialise CDMM bus.
+ *
+ * Initialise CDMM bus, discover CDMM devices for online CPUs, and arrange for
+ * hotplug notifications so the CDMM drivers can be kept up to date.
+ */
+static int __init mips_cdmm_init(void)
+{
+ unsigned int cpu;
+ int ret;
+
+ /* Register the bus */
+ ret = bus_register(&mips_cdmm_bustype);
+ if (ret)
+ return ret;
+
+ /* We want to be notified about new CPUs */
+ ret = register_cpu_notifier(&mips_cdmm_cpu_nb);
+ if (ret) {
+ pr_warn("cdmm: Failed to register CPU notifier\n");
+ goto out;
+ }
+
+ /* Discover devices on CDMM of online CPUs */
+ for_each_online_cpu(cpu)
+ work_on_cpu(cpu, mips_cdmm_bus_up, &cpu);
+
+ return 0;
+out:
+ bus_unregister(&mips_cdmm_bustype);
+ return ret;
+}
+subsys_initcall(mips_cdmm_init);
diff --git a/kernel/drivers/bus/mvebu-mbus.c b/kernel/drivers/bus/mvebu-mbus.c
new file mode 100644
index 000000000..6f047dcb9
--- /dev/null
+++ b/kernel/drivers/bus/mvebu-mbus.c
@@ -0,0 +1,1209 @@
+/*
+ * Address map functions for Marvell EBU SoCs (Kirkwood, Armada
+ * 370/XP, Dove, Orion5x and MV78xx0)
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The Marvell EBU SoCs have a configurable physical address space:
+ * the physical address at which certain devices (PCIe, NOR, NAND,
+ * etc.) sit can be configured. The configuration takes place through
+ * two sets of registers:
+ *
+ * - One to configure the access of the CPU to the devices. Depending
+ * on the families, there are between 8 and 20 configurable windows,
+ * each can be use to create a physical memory window that maps to a
+ * specific device. Devices are identified by a tuple (target,
+ * attribute).
+ *
+ * - One to configure the access to the CPU to the SDRAM. There are
+ * either 2 (for Dove) or 4 (for other families) windows to map the
+ * SDRAM into the physical address space.
+ *
+ * This driver:
+ *
+ * - Reads out the SDRAM address decoding windows at initialization
+ * time, and fills the mvebu_mbus_dram_info structure with these
+ * informations. The exported function mv_mbus_dram_info() allow
+ * device drivers to get those informations related to the SDRAM
+ * address decoding windows. This is because devices also have their
+ * own windows (configured through registers that are part of each
+ * device register space), and therefore the drivers for Marvell
+ * devices have to configure those device -> SDRAM windows to ensure
+ * that DMA works properly.
+ *
+ * - Provides an API for platform code or device drivers to
+ * dynamically add or remove address decoding windows for the CPU ->
+ * device accesses. This API is mvebu_mbus_add_window_by_id(),
+ * mvebu_mbus_add_window_remap_by_id() and
+ * mvebu_mbus_del_window().
+ *
+ * - Provides a debugfs interface in /sys/kernel/debug/mvebu-mbus/ to
+ * see the list of CPU -> SDRAM windows and their configuration
+ * (file 'sdram') and the list of CPU -> devices windows and their
+ * configuration (file 'devices').
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mbus.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/debugfs.h>
+#include <linux/log2.h>
+#include <linux/syscore_ops.h>
+
+/*
+ * DDR target is the same on all platforms.
+ */
+#define TARGET_DDR 0
+
+/*
+ * CPU Address Decode Windows registers
+ */
+#define WIN_CTRL_OFF 0x0000
+#define WIN_CTRL_ENABLE BIT(0)
+/* Only on HW I/O coherency capable platforms */
+#define WIN_CTRL_SYNCBARRIER BIT(1)
+#define WIN_CTRL_TGT_MASK 0xf0
+#define WIN_CTRL_TGT_SHIFT 4
+#define WIN_CTRL_ATTR_MASK 0xff00
+#define WIN_CTRL_ATTR_SHIFT 8
+#define WIN_CTRL_SIZE_MASK 0xffff0000
+#define WIN_CTRL_SIZE_SHIFT 16
+#define WIN_BASE_OFF 0x0004
+#define WIN_BASE_LOW 0xffff0000
+#define WIN_BASE_HIGH 0xf
+#define WIN_REMAP_LO_OFF 0x0008
+#define WIN_REMAP_LOW 0xffff0000
+#define WIN_REMAP_HI_OFF 0x000c
+
+#define UNIT_SYNC_BARRIER_OFF 0x84
+#define UNIT_SYNC_BARRIER_ALL 0xFFFF
+
+#define ATTR_HW_COHERENCY (0x1 << 4)
+
+#define DDR_BASE_CS_OFF(n) (0x0000 + ((n) << 3))
+#define DDR_BASE_CS_HIGH_MASK 0xf
+#define DDR_BASE_CS_LOW_MASK 0xff000000
+#define DDR_SIZE_CS_OFF(n) (0x0004 + ((n) << 3))
+#define DDR_SIZE_ENABLED BIT(0)
+#define DDR_SIZE_CS_MASK 0x1c
+#define DDR_SIZE_CS_SHIFT 2
+#define DDR_SIZE_MASK 0xff000000
+
+#define DOVE_DDR_BASE_CS_OFF(n) ((n) << 4)
+
+/* Relative to mbusbridge_base */
+#define MBUS_BRIDGE_CTRL_OFF 0x0
+#define MBUS_BRIDGE_BASE_OFF 0x4
+
+/* Maximum number of windows, for all known platforms */
+#define MBUS_WINS_MAX 20
+
+struct mvebu_mbus_state;
+
+struct mvebu_mbus_soc_data {
+ unsigned int num_wins;
+ bool has_mbus_bridge;
+ unsigned int (*win_cfg_offset)(const int win);
+ unsigned int (*win_remap_offset)(const int win);
+ void (*setup_cpu_target)(struct mvebu_mbus_state *s);
+ int (*save_cpu_target)(struct mvebu_mbus_state *s,
+ u32 *store_addr);
+ int (*show_cpu_target)(struct mvebu_mbus_state *s,
+ struct seq_file *seq, void *v);
+};
+
+/*
+ * Used to store the state of one MBus window accross suspend/resume.
+ */
+struct mvebu_mbus_win_data {
+ u32 ctrl;
+ u32 base;
+ u32 remap_lo;
+ u32 remap_hi;
+};
+
+struct mvebu_mbus_state {
+ void __iomem *mbuswins_base;
+ void __iomem *sdramwins_base;
+ void __iomem *mbusbridge_base;
+ phys_addr_t sdramwins_phys_base;
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_sdram;
+ struct dentry *debugfs_devs;
+ struct resource pcie_mem_aperture;
+ struct resource pcie_io_aperture;
+ const struct mvebu_mbus_soc_data *soc;
+ int hw_io_coherency;
+
+ /* Used during suspend/resume */
+ u32 mbus_bridge_ctrl;
+ u32 mbus_bridge_base;
+ struct mvebu_mbus_win_data wins[MBUS_WINS_MAX];
+};
+
+static struct mvebu_mbus_state mbus_state;
+
+static struct mbus_dram_target_info mvebu_mbus_dram_info;
+const struct mbus_dram_target_info *mv_mbus_dram_info(void)
+{
+ return &mvebu_mbus_dram_info;
+}
+EXPORT_SYMBOL_GPL(mv_mbus_dram_info);
+
+/* Checks whether the given window has remap capability */
+static bool mvebu_mbus_window_is_remappable(struct mvebu_mbus_state *mbus,
+ const int win)
+{
+ return mbus->soc->win_remap_offset(win) != MVEBU_MBUS_NO_REMAP;
+}
+
+/*
+ * Functions to manipulate the address decoding windows
+ */
+
+static void mvebu_mbus_read_window(struct mvebu_mbus_state *mbus,
+ int win, int *enabled, u64 *base,
+ u32 *size, u8 *target, u8 *attr,
+ u64 *remap)
+{
+ void __iomem *addr = mbus->mbuswins_base +
+ mbus->soc->win_cfg_offset(win);
+ u32 basereg = readl(addr + WIN_BASE_OFF);
+ u32 ctrlreg = readl(addr + WIN_CTRL_OFF);
+
+ if (!(ctrlreg & WIN_CTRL_ENABLE)) {
+ *enabled = 0;
+ return;
+ }
+
+ *enabled = 1;
+ *base = ((u64)basereg & WIN_BASE_HIGH) << 32;
+ *base |= (basereg & WIN_BASE_LOW);
+ *size = (ctrlreg | ~WIN_CTRL_SIZE_MASK) + 1;
+
+ if (target)
+ *target = (ctrlreg & WIN_CTRL_TGT_MASK) >> WIN_CTRL_TGT_SHIFT;
+
+ if (attr)
+ *attr = (ctrlreg & WIN_CTRL_ATTR_MASK) >> WIN_CTRL_ATTR_SHIFT;
+
+ if (remap) {
+ if (mvebu_mbus_window_is_remappable(mbus, win)) {
+ u32 remap_low, remap_hi;
+ void __iomem *addr_rmp = mbus->mbuswins_base +
+ mbus->soc->win_remap_offset(win);
+ remap_low = readl(addr_rmp + WIN_REMAP_LO_OFF);
+ remap_hi = readl(addr_rmp + WIN_REMAP_HI_OFF);
+ *remap = ((u64)remap_hi << 32) | remap_low;
+ } else
+ *remap = 0;
+ }
+}
+
+static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
+ int win)
+{
+ void __iomem *addr;
+
+ addr = mbus->mbuswins_base + mbus->soc->win_cfg_offset(win);
+ writel(0, addr + WIN_BASE_OFF);
+ writel(0, addr + WIN_CTRL_OFF);
+
+ if (mvebu_mbus_window_is_remappable(mbus, win)) {
+ addr = mbus->mbuswins_base + mbus->soc->win_remap_offset(win);
+ writel(0, addr + WIN_REMAP_LO_OFF);
+ writel(0, addr + WIN_REMAP_HI_OFF);
+ }
+}
+
+/* Checks whether the given window number is available */
+
+static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
+ const int win)
+{
+ void __iomem *addr = mbus->mbuswins_base +
+ mbus->soc->win_cfg_offset(win);
+ u32 ctrl = readl(addr + WIN_CTRL_OFF);
+
+ return !(ctrl & WIN_CTRL_ENABLE);
+}
+
+/*
+ * Checks whether the given (base, base+size) area doesn't overlap an
+ * existing region
+ */
+static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus,
+ phys_addr_t base, size_t size,
+ u8 target, u8 attr)
+{
+ u64 end = (u64)base + size;
+ int win;
+
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ u64 wbase, wend;
+ u32 wsize;
+ u8 wtarget, wattr;
+ int enabled;
+
+ mvebu_mbus_read_window(mbus, win,
+ &enabled, &wbase, &wsize,
+ &wtarget, &wattr, NULL);
+
+ if (!enabled)
+ continue;
+
+ wend = wbase + wsize;
+
+ /*
+ * Check if the current window overlaps with the
+ * proposed physical range
+ */
+ if ((u64)base < wend && end > wbase)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int mvebu_mbus_find_window(struct mvebu_mbus_state *mbus,
+ phys_addr_t base, size_t size)
+{
+ int win;
+
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ u64 wbase;
+ u32 wsize;
+ int enabled;
+
+ mvebu_mbus_read_window(mbus, win,
+ &enabled, &wbase, &wsize,
+ NULL, NULL, NULL);
+
+ if (!enabled)
+ continue;
+
+ if (base == wbase && size == wsize)
+ return win;
+ }
+
+ return -ENODEV;
+}
+
+static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
+ int win, phys_addr_t base, size_t size,
+ phys_addr_t remap, u8 target,
+ u8 attr)
+{
+ void __iomem *addr = mbus->mbuswins_base +
+ mbus->soc->win_cfg_offset(win);
+ u32 ctrl, remap_addr;
+
+ if (!is_power_of_2(size)) {
+ WARN(true, "Invalid MBus window size: 0x%zx\n", size);
+ return -EINVAL;
+ }
+
+ if ((base & (phys_addr_t)(size - 1)) != 0) {
+ WARN(true, "Invalid MBus base/size: %pa len 0x%zx\n", &base,
+ size);
+ return -EINVAL;
+ }
+
+ ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
+ (attr << WIN_CTRL_ATTR_SHIFT) |
+ (target << WIN_CTRL_TGT_SHIFT) |
+ WIN_CTRL_ENABLE;
+ if (mbus->hw_io_coherency)
+ ctrl |= WIN_CTRL_SYNCBARRIER;
+
+ writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
+ writel(ctrl, addr + WIN_CTRL_OFF);
+
+ if (mvebu_mbus_window_is_remappable(mbus, win)) {
+ void __iomem *addr_rmp = mbus->mbuswins_base +
+ mbus->soc->win_remap_offset(win);
+
+ if (remap == MVEBU_MBUS_NO_REMAP)
+ remap_addr = base;
+ else
+ remap_addr = remap;
+ writel(remap_addr & WIN_REMAP_LOW, addr_rmp + WIN_REMAP_LO_OFF);
+ writel(0, addr_rmp + WIN_REMAP_HI_OFF);
+ }
+
+ return 0;
+}
+
+static int mvebu_mbus_alloc_window(struct mvebu_mbus_state *mbus,
+ phys_addr_t base, size_t size,
+ phys_addr_t remap, u8 target,
+ u8 attr)
+{
+ int win;
+
+ if (remap == MVEBU_MBUS_NO_REMAP) {
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ if (mvebu_mbus_window_is_remappable(mbus, win))
+ continue;
+
+ if (mvebu_mbus_window_is_free(mbus, win))
+ return mvebu_mbus_setup_window(mbus, win, base,
+ size, remap,
+ target, attr);
+ }
+ }
+
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ /* Skip window if need remap but is not supported */
+ if ((remap != MVEBU_MBUS_NO_REMAP) &&
+ !mvebu_mbus_window_is_remappable(mbus, win))
+ continue;
+
+ if (mvebu_mbus_window_is_free(mbus, win))
+ return mvebu_mbus_setup_window(mbus, win, base, size,
+ remap, target, attr);
+ }
+
+ return -ENOMEM;
+}
+
+/*
+ * Debugfs debugging
+ */
+
+/* Common function used for Dove, Kirkwood, Armada 370/XP and Orion 5x */
+static int mvebu_sdram_debug_show_orion(struct mvebu_mbus_state *mbus,
+ struct seq_file *seq, void *v)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ u32 basereg = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+ u32 sizereg = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+ u64 base;
+ u32 size;
+
+ if (!(sizereg & DDR_SIZE_ENABLED)) {
+ seq_printf(seq, "[%d] disabled\n", i);
+ continue;
+ }
+
+ base = ((u64)basereg & DDR_BASE_CS_HIGH_MASK) << 32;
+ base |= basereg & DDR_BASE_CS_LOW_MASK;
+ size = (sizereg | ~DDR_SIZE_MASK);
+
+ seq_printf(seq, "[%d] %016llx - %016llx : cs%d\n",
+ i, (unsigned long long)base,
+ (unsigned long long)base + size + 1,
+ (sizereg & DDR_SIZE_CS_MASK) >> DDR_SIZE_CS_SHIFT);
+ }
+
+ return 0;
+}
+
+/* Special function for Dove */
+static int mvebu_sdram_debug_show_dove(struct mvebu_mbus_state *mbus,
+ struct seq_file *seq, void *v)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
+ u64 base;
+ u32 size;
+
+ if (!(map & 1)) {
+ seq_printf(seq, "[%d] disabled\n", i);
+ continue;
+ }
+
+ base = map & 0xff800000;
+ size = 0x100000 << (((map & 0x000f0000) >> 16) - 4);
+
+ seq_printf(seq, "[%d] %016llx - %016llx : cs%d\n",
+ i, (unsigned long long)base,
+ (unsigned long long)base + size, i);
+ }
+
+ return 0;
+}
+
+static int mvebu_sdram_debug_show(struct seq_file *seq, void *v)
+{
+ struct mvebu_mbus_state *mbus = &mbus_state;
+ return mbus->soc->show_cpu_target(mbus, seq, v);
+}
+
+static int mvebu_sdram_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvebu_sdram_debug_show, inode->i_private);
+}
+
+static const struct file_operations mvebu_sdram_debug_fops = {
+ .open = mvebu_sdram_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
+{
+ struct mvebu_mbus_state *mbus = &mbus_state;
+ int win;
+
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ u64 wbase, wremap;
+ u32 wsize;
+ u8 wtarget, wattr;
+ int enabled;
+
+ mvebu_mbus_read_window(mbus, win,
+ &enabled, &wbase, &wsize,
+ &wtarget, &wattr, &wremap);
+
+ if (!enabled) {
+ seq_printf(seq, "[%02d] disabled\n", win);
+ continue;
+ }
+
+ seq_printf(seq, "[%02d] %016llx - %016llx : %04x:%04x",
+ win, (unsigned long long)wbase,
+ (unsigned long long)(wbase + wsize), wtarget, wattr);
+
+ if (!is_power_of_2(wsize) ||
+ ((wbase & (u64)(wsize - 1)) != 0))
+ seq_puts(seq, " (Invalid base/size!!)");
+
+ if (mvebu_mbus_window_is_remappable(mbus, win)) {
+ seq_printf(seq, " (remap %016llx)\n",
+ (unsigned long long)wremap);
+ } else
+ seq_printf(seq, "\n");
+ }
+
+ return 0;
+}
+
+static int mvebu_devs_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvebu_devs_debug_show, inode->i_private);
+}
+
+static const struct file_operations mvebu_devs_debug_fops = {
+ .open = mvebu_devs_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * SoC-specific functions and definitions
+ */
+
+static unsigned int generic_mbus_win_cfg_offset(int win)
+{
+ return win << 4;
+}
+
+static unsigned int armada_370_xp_mbus_win_cfg_offset(int win)
+{
+ /* The register layout is a bit annoying and the below code
+ * tries to cope with it.
+ * - At offset 0x0, there are the registers for the first 8
+ * windows, with 4 registers of 32 bits per window (ctrl,
+ * base, remap low, remap high)
+ * - Then at offset 0x80, there is a hole of 0x10 bytes for
+ * the internal registers base address and internal units
+ * sync barrier register.
+ * - Then at offset 0x90, there the registers for 12
+ * windows, with only 2 registers of 32 bits per window
+ * (ctrl, base).
+ */
+ if (win < 8)
+ return win << 4;
+ else
+ return 0x90 + ((win - 8) << 3);
+}
+
+static unsigned int mv78xx0_mbus_win_cfg_offset(int win)
+{
+ if (win < 8)
+ return win << 4;
+ else
+ return 0x900 + ((win - 8) << 4);
+}
+
+static unsigned int generic_mbus_win_remap_2_offset(int win)
+{
+ if (win < 2)
+ return generic_mbus_win_cfg_offset(win);
+ else
+ return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int generic_mbus_win_remap_4_offset(int win)
+{
+ if (win < 4)
+ return generic_mbus_win_cfg_offset(win);
+ else
+ return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int generic_mbus_win_remap_8_offset(int win)
+{
+ if (win < 8)
+ return generic_mbus_win_cfg_offset(win);
+ else
+ return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int armada_xp_mbus_win_remap_offset(int win)
+{
+ if (win < 8)
+ return generic_mbus_win_cfg_offset(win);
+ else if (win == 13)
+ return 0xF0 - WIN_REMAP_LO_OFF;
+ else
+ return MVEBU_MBUS_NO_REMAP;
+}
+
+static void __init
+mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
+{
+ int i;
+ int cs;
+
+ mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
+
+ for (i = 0, cs = 0; i < 4; i++) {
+ u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+ u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+
+ /*
+ * We only take care of entries for which the chip
+ * select is enabled, and that don't have high base
+ * address bits set (devices can only access the first
+ * 32 bits of the memory).
+ */
+ if ((size & DDR_SIZE_ENABLED) &&
+ !(base & DDR_BASE_CS_HIGH_MASK)) {
+ struct mbus_dram_window *w;
+
+ w = &mvebu_mbus_dram_info.cs[cs++];
+ w->cs_index = i;
+ w->mbus_attr = 0xf & ~(1 << i);
+ if (mbus->hw_io_coherency)
+ w->mbus_attr |= ATTR_HW_COHERENCY;
+ w->base = base & DDR_BASE_CS_LOW_MASK;
+ w->size = (size | ~DDR_SIZE_MASK) + 1;
+ }
+ }
+ mvebu_mbus_dram_info.num_cs = cs;
+}
+
+static int
+mvebu_mbus_default_save_cpu_target(struct mvebu_mbus_state *mbus,
+ u32 *store_addr)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+ u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+
+ writel(mbus->sdramwins_phys_base + DDR_BASE_CS_OFF(i),
+ store_addr++);
+ writel(base, store_addr++);
+ writel(mbus->sdramwins_phys_base + DDR_SIZE_CS_OFF(i),
+ store_addr++);
+ writel(size, store_addr++);
+ }
+
+ /* We've written 16 words to the store address */
+ return 16;
+}
+
+static void __init
+mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus)
+{
+ int i;
+ int cs;
+
+ mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
+
+ for (i = 0, cs = 0; i < 2; i++) {
+ u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
+
+ /*
+ * Chip select enabled?
+ */
+ if (map & 1) {
+ struct mbus_dram_window *w;
+
+ w = &mvebu_mbus_dram_info.cs[cs++];
+ w->cs_index = i;
+ w->mbus_attr = 0; /* CS address decoding done inside */
+ /* the DDR controller, no need to */
+ /* provide attributes */
+ w->base = map & 0xff800000;
+ w->size = 0x100000 << (((map & 0x000f0000) >> 16) - 4);
+ }
+ }
+
+ mvebu_mbus_dram_info.num_cs = cs;
+}
+
+static int
+mvebu_mbus_dove_save_cpu_target(struct mvebu_mbus_state *mbus,
+ u32 *store_addr)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
+
+ writel(mbus->sdramwins_phys_base + DOVE_DDR_BASE_CS_OFF(i),
+ store_addr++);
+ writel(map, store_addr++);
+ }
+
+ /* We've written 4 words to the store address */
+ return 4;
+}
+
+int mvebu_mbus_save_cpu_target(u32 *store_addr)
+{
+ return mbus_state.soc->save_cpu_target(&mbus_state, store_addr);
+}
+
+static const struct mvebu_mbus_soc_data armada_370_mbus_data = {
+ .num_wins = 20,
+ .has_mbus_bridge = true,
+ .win_cfg_offset = armada_370_xp_mbus_win_cfg_offset,
+ .win_remap_offset = generic_mbus_win_remap_8_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+};
+
+static const struct mvebu_mbus_soc_data armada_xp_mbus_data = {
+ .num_wins = 20,
+ .has_mbus_bridge = true,
+ .win_cfg_offset = armada_370_xp_mbus_win_cfg_offset,
+ .win_remap_offset = armada_xp_mbus_win_remap_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+};
+
+static const struct mvebu_mbus_soc_data kirkwood_mbus_data = {
+ .num_wins = 8,
+ .win_cfg_offset = generic_mbus_win_cfg_offset,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+ .win_remap_offset = generic_mbus_win_remap_4_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+};
+
+static const struct mvebu_mbus_soc_data dove_mbus_data = {
+ .num_wins = 8,
+ .win_cfg_offset = generic_mbus_win_cfg_offset,
+ .save_cpu_target = mvebu_mbus_dove_save_cpu_target,
+ .win_remap_offset = generic_mbus_win_remap_4_offset,
+ .setup_cpu_target = mvebu_mbus_dove_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_dove,
+};
+
+/*
+ * Some variants of Orion5x have 4 remappable windows, some other have
+ * only two of them.
+ */
+static const struct mvebu_mbus_soc_data orion5x_4win_mbus_data = {
+ .num_wins = 8,
+ .win_cfg_offset = generic_mbus_win_cfg_offset,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+ .win_remap_offset = generic_mbus_win_remap_4_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+};
+
+static const struct mvebu_mbus_soc_data orion5x_2win_mbus_data = {
+ .num_wins = 8,
+ .win_cfg_offset = generic_mbus_win_cfg_offset,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+ .win_remap_offset = generic_mbus_win_remap_2_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+};
+
+static const struct mvebu_mbus_soc_data mv78xx0_mbus_data = {
+ .num_wins = 14,
+ .win_cfg_offset = mv78xx0_mbus_win_cfg_offset,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+ .win_remap_offset = generic_mbus_win_remap_8_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+};
+
+static const struct of_device_id of_mvebu_mbus_ids[] = {
+ { .compatible = "marvell,armada370-mbus",
+ .data = &armada_370_mbus_data, },
+ { .compatible = "marvell,armada375-mbus",
+ .data = &armada_xp_mbus_data, },
+ { .compatible = "marvell,armada380-mbus",
+ .data = &armada_xp_mbus_data, },
+ { .compatible = "marvell,armadaxp-mbus",
+ .data = &armada_xp_mbus_data, },
+ { .compatible = "marvell,kirkwood-mbus",
+ .data = &kirkwood_mbus_data, },
+ { .compatible = "marvell,dove-mbus",
+ .data = &dove_mbus_data, },
+ { .compatible = "marvell,orion5x-88f5281-mbus",
+ .data = &orion5x_4win_mbus_data, },
+ { .compatible = "marvell,orion5x-88f5182-mbus",
+ .data = &orion5x_2win_mbus_data, },
+ { .compatible = "marvell,orion5x-88f5181-mbus",
+ .data = &orion5x_2win_mbus_data, },
+ { .compatible = "marvell,orion5x-88f6183-mbus",
+ .data = &orion5x_4win_mbus_data, },
+ { .compatible = "marvell,mv78xx0-mbus",
+ .data = &mv78xx0_mbus_data, },
+ { },
+};
+
+/*
+ * Public API of the driver
+ */
+int mvebu_mbus_add_window_remap_by_id(unsigned int target,
+ unsigned int attribute,
+ phys_addr_t base, size_t size,
+ phys_addr_t remap)
+{
+ struct mvebu_mbus_state *s = &mbus_state;
+
+ if (!mvebu_mbus_window_conflicts(s, base, size, target, attribute)) {
+ pr_err("cannot add window '%x:%x', conflicts with another window\n",
+ target, attribute);
+ return -EINVAL;
+ }
+
+ return mvebu_mbus_alloc_window(s, base, size, remap, target, attribute);
+}
+
+int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
+ phys_addr_t base, size_t size)
+{
+ return mvebu_mbus_add_window_remap_by_id(target, attribute, base,
+ size, MVEBU_MBUS_NO_REMAP);
+}
+
+int mvebu_mbus_del_window(phys_addr_t base, size_t size)
+{
+ int win;
+
+ win = mvebu_mbus_find_window(&mbus_state, base, size);
+ if (win < 0)
+ return win;
+
+ mvebu_mbus_disable_window(&mbus_state, win);
+ return 0;
+}
+
+void mvebu_mbus_get_pcie_mem_aperture(struct resource *res)
+{
+ if (!res)
+ return;
+ *res = mbus_state.pcie_mem_aperture;
+}
+
+void mvebu_mbus_get_pcie_io_aperture(struct resource *res)
+{
+ if (!res)
+ return;
+ *res = mbus_state.pcie_io_aperture;
+}
+
+static __init int mvebu_mbus_debugfs_init(void)
+{
+ struct mvebu_mbus_state *s = &mbus_state;
+
+ /*
+ * If no base has been initialized, doesn't make sense to
+ * register the debugfs entries. We may be on a multiplatform
+ * kernel that isn't running a Marvell EBU SoC.
+ */
+ if (!s->mbuswins_base)
+ return 0;
+
+ s->debugfs_root = debugfs_create_dir("mvebu-mbus", NULL);
+ if (s->debugfs_root) {
+ s->debugfs_sdram = debugfs_create_file("sdram", S_IRUGO,
+ s->debugfs_root, NULL,
+ &mvebu_sdram_debug_fops);
+ s->debugfs_devs = debugfs_create_file("devices", S_IRUGO,
+ s->debugfs_root, NULL,
+ &mvebu_devs_debug_fops);
+ }
+
+ return 0;
+}
+fs_initcall(mvebu_mbus_debugfs_init);
+
+static int mvebu_mbus_suspend(void)
+{
+ struct mvebu_mbus_state *s = &mbus_state;
+ int win;
+
+ if (!s->mbusbridge_base)
+ return -ENODEV;
+
+ for (win = 0; win < s->soc->num_wins; win++) {
+ void __iomem *addr = s->mbuswins_base +
+ s->soc->win_cfg_offset(win);
+ void __iomem *addr_rmp;
+
+ s->wins[win].base = readl(addr + WIN_BASE_OFF);
+ s->wins[win].ctrl = readl(addr + WIN_CTRL_OFF);
+
+ if (!mvebu_mbus_window_is_remappable(s, win))
+ continue;
+
+ addr_rmp = s->mbuswins_base +
+ s->soc->win_remap_offset(win);
+
+ s->wins[win].remap_lo = readl(addr_rmp + WIN_REMAP_LO_OFF);
+ s->wins[win].remap_hi = readl(addr_rmp + WIN_REMAP_HI_OFF);
+ }
+
+ s->mbus_bridge_ctrl = readl(s->mbusbridge_base +
+ MBUS_BRIDGE_CTRL_OFF);
+ s->mbus_bridge_base = readl(s->mbusbridge_base +
+ MBUS_BRIDGE_BASE_OFF);
+
+ return 0;
+}
+
+static void mvebu_mbus_resume(void)
+{
+ struct mvebu_mbus_state *s = &mbus_state;
+ int win;
+
+ writel(s->mbus_bridge_ctrl,
+ s->mbusbridge_base + MBUS_BRIDGE_CTRL_OFF);
+ writel(s->mbus_bridge_base,
+ s->mbusbridge_base + MBUS_BRIDGE_BASE_OFF);
+
+ for (win = 0; win < s->soc->num_wins; win++) {
+ void __iomem *addr = s->mbuswins_base +
+ s->soc->win_cfg_offset(win);
+ void __iomem *addr_rmp;
+
+ writel(s->wins[win].base, addr + WIN_BASE_OFF);
+ writel(s->wins[win].ctrl, addr + WIN_CTRL_OFF);
+
+ if (!mvebu_mbus_window_is_remappable(s, win))
+ continue;
+
+ addr_rmp = s->mbuswins_base +
+ s->soc->win_remap_offset(win);
+
+ writel(s->wins[win].remap_lo, addr_rmp + WIN_REMAP_LO_OFF);
+ writel(s->wins[win].remap_hi, addr_rmp + WIN_REMAP_HI_OFF);
+ }
+}
+
+struct syscore_ops mvebu_mbus_syscore_ops = {
+ .suspend = mvebu_mbus_suspend,
+ .resume = mvebu_mbus_resume,
+};
+
+static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
+ phys_addr_t mbuswins_phys_base,
+ size_t mbuswins_size,
+ phys_addr_t sdramwins_phys_base,
+ size_t sdramwins_size,
+ phys_addr_t mbusbridge_phys_base,
+ size_t mbusbridge_size,
+ bool is_coherent)
+{
+ int win;
+
+ mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
+ if (!mbus->mbuswins_base)
+ return -ENOMEM;
+
+ mbus->sdramwins_base = ioremap(sdramwins_phys_base, sdramwins_size);
+ if (!mbus->sdramwins_base) {
+ iounmap(mbus_state.mbuswins_base);
+ return -ENOMEM;
+ }
+
+ mbus->sdramwins_phys_base = sdramwins_phys_base;
+
+ if (mbusbridge_phys_base) {
+ mbus->mbusbridge_base = ioremap(mbusbridge_phys_base,
+ mbusbridge_size);
+ if (!mbus->mbusbridge_base) {
+ iounmap(mbus->sdramwins_base);
+ iounmap(mbus->mbuswins_base);
+ return -ENOMEM;
+ }
+ } else
+ mbus->mbusbridge_base = NULL;
+
+ for (win = 0; win < mbus->soc->num_wins; win++)
+ mvebu_mbus_disable_window(mbus, win);
+
+ mbus->soc->setup_cpu_target(mbus);
+
+ if (is_coherent)
+ writel(UNIT_SYNC_BARRIER_ALL,
+ mbus->mbuswins_base + UNIT_SYNC_BARRIER_OFF);
+
+ register_syscore_ops(&mvebu_mbus_syscore_ops);
+
+ return 0;
+}
+
+int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
+ size_t mbuswins_size,
+ phys_addr_t sdramwins_phys_base,
+ size_t sdramwins_size)
+{
+ const struct of_device_id *of_id;
+
+ for (of_id = of_mvebu_mbus_ids; of_id->compatible[0]; of_id++)
+ if (!strcmp(of_id->compatible, soc))
+ break;
+
+ if (!of_id->compatible[0]) {
+ pr_err("could not find a matching SoC family\n");
+ return -ENODEV;
+ }
+
+ mbus_state.soc = of_id->data;
+
+ return mvebu_mbus_common_init(&mbus_state,
+ mbuswins_phys_base,
+ mbuswins_size,
+ sdramwins_phys_base,
+ sdramwins_size, 0, 0, false);
+}
+
+#ifdef CONFIG_OF
+/*
+ * The window IDs in the ranges DT property have the following format:
+ * - bits 28 to 31: MBus custom field
+ * - bits 24 to 27: window target ID
+ * - bits 16 to 23: window attribute ID
+ * - bits 0 to 15: unused
+ */
+#define CUSTOM(id) (((id) & 0xF0000000) >> 24)
+#define TARGET(id) (((id) & 0x0F000000) >> 24)
+#define ATTR(id) (((id) & 0x00FF0000) >> 16)
+
+static int __init mbus_dt_setup_win(struct mvebu_mbus_state *mbus,
+ u32 base, u32 size,
+ u8 target, u8 attr)
+{
+ if (!mvebu_mbus_window_conflicts(mbus, base, size, target, attr)) {
+ pr_err("cannot add window '%04x:%04x', conflicts with another window\n",
+ target, attr);
+ return -EBUSY;
+ }
+
+ if (mvebu_mbus_alloc_window(mbus, base, size, MVEBU_MBUS_NO_REMAP,
+ target, attr)) {
+ pr_err("cannot add window '%04x:%04x', too many windows\n",
+ target, attr);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int __init
+mbus_parse_ranges(struct device_node *node,
+ int *addr_cells, int *c_addr_cells, int *c_size_cells,
+ int *cell_count, const __be32 **ranges_start,
+ const __be32 **ranges_end)
+{
+ const __be32 *prop;
+ int ranges_len, tuple_len;
+
+ /* Allow a node with no 'ranges' property */
+ *ranges_start = of_get_property(node, "ranges", &ranges_len);
+ if (*ranges_start == NULL) {
+ *addr_cells = *c_addr_cells = *c_size_cells = *cell_count = 0;
+ *ranges_start = *ranges_end = NULL;
+ return 0;
+ }
+ *ranges_end = *ranges_start + ranges_len / sizeof(__be32);
+
+ *addr_cells = of_n_addr_cells(node);
+
+ prop = of_get_property(node, "#address-cells", NULL);
+ *c_addr_cells = be32_to_cpup(prop);
+
+ prop = of_get_property(node, "#size-cells", NULL);
+ *c_size_cells = be32_to_cpup(prop);
+
+ *cell_count = *addr_cells + *c_addr_cells + *c_size_cells;
+ tuple_len = (*cell_count) * sizeof(__be32);
+
+ if (ranges_len % tuple_len) {
+ pr_warn("malformed ranges entry '%s'\n", node->name);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __init mbus_dt_setup(struct mvebu_mbus_state *mbus,
+ struct device_node *np)
+{
+ int addr_cells, c_addr_cells, c_size_cells;
+ int i, ret, cell_count;
+ const __be32 *r, *ranges_start, *ranges_end;
+
+ ret = mbus_parse_ranges(np, &addr_cells, &c_addr_cells,
+ &c_size_cells, &cell_count,
+ &ranges_start, &ranges_end);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0, r = ranges_start; r < ranges_end; r += cell_count, i++) {
+ u32 windowid, base, size;
+ u8 target, attr;
+
+ /*
+ * An entry with a non-zero custom field do not
+ * correspond to a static window, so skip it.
+ */
+ windowid = of_read_number(r, 1);
+ if (CUSTOM(windowid))
+ continue;
+
+ target = TARGET(windowid);
+ attr = ATTR(windowid);
+
+ base = of_read_number(r + c_addr_cells, addr_cells);
+ size = of_read_number(r + c_addr_cells + addr_cells,
+ c_size_cells);
+ ret = mbus_dt_setup_win(mbus, base, size, target, attr);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
+ struct resource *mem,
+ struct resource *io)
+{
+ u32 reg[2];
+ int ret;
+
+ /*
+ * These are optional, so we make sure that resource_size(x) will
+ * return 0.
+ */
+ memset(mem, 0, sizeof(struct resource));
+ mem->end = -1;
+ memset(io, 0, sizeof(struct resource));
+ io->end = -1;
+
+ ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg));
+ if (!ret) {
+ mem->start = reg[0];
+ mem->end = mem->start + reg[1] - 1;
+ mem->flags = IORESOURCE_MEM;
+ }
+
+ ret = of_property_read_u32_array(np, "pcie-io-aperture", reg, ARRAY_SIZE(reg));
+ if (!ret) {
+ io->start = reg[0];
+ io->end = io->start + reg[1] - 1;
+ io->flags = IORESOURCE_IO;
+ }
+}
+
+int __init mvebu_mbus_dt_init(bool is_coherent)
+{
+ struct resource mbuswins_res, sdramwins_res, mbusbridge_res;
+ struct device_node *np, *controller;
+ const struct of_device_id *of_id;
+ const __be32 *prop;
+ int ret;
+
+ np = of_find_matching_node_and_match(NULL, of_mvebu_mbus_ids, &of_id);
+ if (!np) {
+ pr_err("could not find a matching SoC family\n");
+ return -ENODEV;
+ }
+
+ mbus_state.soc = of_id->data;
+
+ prop = of_get_property(np, "controller", NULL);
+ if (!prop) {
+ pr_err("required 'controller' property missing\n");
+ return -EINVAL;
+ }
+
+ controller = of_find_node_by_phandle(be32_to_cpup(prop));
+ if (!controller) {
+ pr_err("could not find an 'mbus-controller' node\n");
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(controller, 0, &mbuswins_res)) {
+ pr_err("cannot get MBUS register address\n");
+ return -EINVAL;
+ }
+
+ if (of_address_to_resource(controller, 1, &sdramwins_res)) {
+ pr_err("cannot get SDRAM register address\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Set the resource to 0 so that it can be left unmapped by
+ * mvebu_mbus_common_init() if the DT doesn't carry the
+ * necessary information. This is needed to preserve backward
+ * compatibility.
+ */
+ memset(&mbusbridge_res, 0, sizeof(mbusbridge_res));
+
+ if (mbus_state.soc->has_mbus_bridge) {
+ if (of_address_to_resource(controller, 2, &mbusbridge_res))
+ pr_warn(FW_WARN "deprecated mbus-mvebu Device Tree, suspend/resume will not work\n");
+ }
+
+ mbus_state.hw_io_coherency = is_coherent;
+
+ /* Get optional pcie-{mem,io}-aperture properties */
+ mvebu_mbus_get_pcie_resources(np, &mbus_state.pcie_mem_aperture,
+ &mbus_state.pcie_io_aperture);
+
+ ret = mvebu_mbus_common_init(&mbus_state,
+ mbuswins_res.start,
+ resource_size(&mbuswins_res),
+ sdramwins_res.start,
+ resource_size(&sdramwins_res),
+ mbusbridge_res.start,
+ resource_size(&mbusbridge_res),
+ is_coherent);
+ if (ret)
+ return ret;
+
+ /* Setup statically declared windows in the DT */
+ return mbus_dt_setup(&mbus_state, np);
+}
+#endif
diff --git a/kernel/drivers/bus/omap-ocp2scp.c b/kernel/drivers/bus/omap-ocp2scp.c
new file mode 100644
index 000000000..9f1856948
--- /dev/null
+++ b/kernel/drivers/bus/omap-ocp2scp.c
@@ -0,0 +1,123 @@
+/*
+ * omap-ocp2scp.c - transform ocp interface protocol to scp protocol
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#define OCP2SCP_TIMING 0x18
+#define SYNC2_MASK 0xf
+
+static int ocp2scp_remove_devices(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static int omap_ocp2scp_probe(struct platform_device *pdev)
+{
+ int ret;
+ u32 reg;
+ void __iomem *regs;
+ struct resource *res;
+ struct device_node *np = pdev->dev.of_node;
+
+ if (np) {
+ ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to add resources for ocp2scp child\n");
+ goto err0;
+ }
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ /*
+ * As per AM572x TRM: http://www.ti.com/lit/ug/spruhz6/spruhz6.pdf
+ * under section 26.3.2.2, table 26-26 OCP2SCP TIMING Caution;
+ * As per OMAP4430 TRM: http://www.ti.com/lit/ug/swpu231ap/swpu231ap.pdf
+ * under section 23.12.6.2.2 , Table 23-1213 OCP2SCP TIMING Caution;
+ * As per OMAP4460 TRM: http://www.ti.com/lit/ug/swpu235ab/swpu235ab.pdf
+ * under section 23.12.6.2.2, Table 23-1213 OCP2SCP TIMING Caution;
+ * As per OMAP543x TRM http://www.ti.com/lit/pdf/swpu249
+ * under section 27.3.2.2, Table 27-27 OCP2SCP TIMING Caution;
+ *
+ * Read path of OCP2SCP is not working properly due to low reset value
+ * of SYNC2 parameter in OCP2SCP. Suggested reset value is 0x6 or more.
+ */
+ if (!of_device_is_compatible(np, "ti,am437x-ocp2scp")) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ goto err0;
+
+ pm_runtime_get_sync(&pdev->dev);
+ reg = readl_relaxed(regs + OCP2SCP_TIMING);
+ reg &= ~(SYNC2_MASK);
+ reg |= 0x6;
+ writel_relaxed(reg, regs + OCP2SCP_TIMING);
+ pm_runtime_put_sync(&pdev->dev);
+ }
+
+ return 0;
+
+err0:
+ device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
+
+ return ret;
+}
+
+static int omap_ocp2scp_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id omap_ocp2scp_id_table[] = {
+ { .compatible = "ti,omap-ocp2scp" },
+ { .compatible = "ti,am437x-ocp2scp" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
+#endif
+
+static struct platform_driver omap_ocp2scp_driver = {
+ .probe = omap_ocp2scp_probe,
+ .remove = omap_ocp2scp_remove,
+ .driver = {
+ .name = "omap-ocp2scp",
+ .of_match_table = of_match_ptr(omap_ocp2scp_id_table),
+ },
+};
+
+module_platform_driver(omap_ocp2scp_driver);
+
+MODULE_ALIAS("platform: omap-ocp2scp");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("OMAP OCP2SCP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/bus/omap_l3_noc.c b/kernel/drivers/bus/omap_l3_noc.c
new file mode 100644
index 000000000..ebee57d71
--- /dev/null
+++ b/kernel/drivers/bus/omap_l3_noc.c
@@ -0,0 +1,377 @@
+/*
+ * OMAP L3 Interconnect error handling driver
+ *
+ * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Sricharan <r.sricharan@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "omap_l3_noc.h"
+
+/**
+ * l3_handle_target() - Handle Target specific parse and reporting
+ * @l3: pointer to l3 struct
+ * @base: base address of clkdm
+ * @flag_mux: flagmux corresponding to the event
+ * @err_src: error source index of the slave (target)
+ *
+ * This does the second part of the error interrupt handling:
+ * 3) Parse in the slave information
+ * 4) Print the logged information.
+ * 5) Add dump stack to provide kernel trace.
+ * 6) Clear the source if known.
+ *
+ * This handles two types of errors:
+ * 1) Custom errors in L3 :
+ * Target like DMM/FW/EMIF generates SRESP=ERR error
+ * 2) Standard L3 error:
+ * - Unsupported CMD.
+ * L3 tries to access target while it is idle
+ * - OCP disconnect.
+ * - Address hole error:
+ * If DSS/ISS/FDIF/USBHOSTFS access a target where they
+ * do not have connectivity, the error is logged in
+ * their default target which is DMM2.
+ *
+ * On High Secure devices, firewall errors are possible and those
+ * can be trapped as well. But the trapping is implemented as part
+ * secure software and hence need not be implemented here.
+ */
+static int l3_handle_target(struct omap_l3 *l3, void __iomem *base,
+ struct l3_flagmux_data *flag_mux, int err_src)
+{
+ int k;
+ u32 std_err_main, clear, masterid;
+ u8 op_code, m_req_info;
+ void __iomem *l3_targ_base;
+ void __iomem *l3_targ_stderr, *l3_targ_slvofslsb, *l3_targ_mstaddr;
+ void __iomem *l3_targ_hdr, *l3_targ_info;
+ struct l3_target_data *l3_targ_inst;
+ struct l3_masters_data *master;
+ char *target_name, *master_name = "UN IDENTIFIED";
+ char *err_description;
+ char err_string[30] = { 0 };
+ char info_string[60] = { 0 };
+
+ /* We DONOT expect err_src to go out of bounds */
+ BUG_ON(err_src > MAX_CLKDM_TARGETS);
+
+ if (err_src < flag_mux->num_targ_data) {
+ l3_targ_inst = &flag_mux->l3_targ[err_src];
+ target_name = l3_targ_inst->name;
+ l3_targ_base = base + l3_targ_inst->offset;
+ } else {
+ target_name = L3_TARGET_NOT_SUPPORTED;
+ }
+
+ if (target_name == L3_TARGET_NOT_SUPPORTED)
+ return -ENODEV;
+
+ /* Read the stderrlog_main_source from clk domain */
+ l3_targ_stderr = l3_targ_base + L3_TARG_STDERRLOG_MAIN;
+ l3_targ_slvofslsb = l3_targ_base + L3_TARG_STDERRLOG_SLVOFSLSB;
+
+ std_err_main = readl_relaxed(l3_targ_stderr);
+
+ switch (std_err_main & CUSTOM_ERROR) {
+ case STANDARD_ERROR:
+ err_description = "Standard";
+ snprintf(err_string, sizeof(err_string),
+ ": At Address: 0x%08X ",
+ readl_relaxed(l3_targ_slvofslsb));
+
+ l3_targ_mstaddr = l3_targ_base + L3_TARG_STDERRLOG_MSTADDR;
+ l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_HDR;
+ l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_INFO;
+ break;
+
+ case CUSTOM_ERROR:
+ err_description = "Custom";
+
+ l3_targ_mstaddr = l3_targ_base +
+ L3_TARG_STDERRLOG_CINFO_MSTADDR;
+ l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_CINFO_OPCODE;
+ l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_CINFO_INFO;
+ break;
+
+ default:
+ /* Nothing to be handled here as of now */
+ return 0;
+ }
+
+ /* STDERRLOG_MSTADDR Stores the NTTP master address. */
+ masterid = (readl_relaxed(l3_targ_mstaddr) &
+ l3->mst_addr_mask) >> __ffs(l3->mst_addr_mask);
+
+ for (k = 0, master = l3->l3_masters; k < l3->num_masters;
+ k++, master++) {
+ if (masterid == master->id) {
+ master_name = master->name;
+ break;
+ }
+ }
+
+ op_code = readl_relaxed(l3_targ_hdr) & 0x7;
+
+ m_req_info = readl_relaxed(l3_targ_info) & 0xF;
+ snprintf(info_string, sizeof(info_string),
+ ": %s in %s mode during %s access",
+ (m_req_info & BIT(0)) ? "Opcode Fetch" : "Data Access",
+ (m_req_info & BIT(1)) ? "Supervisor" : "User",
+ (m_req_info & BIT(3)) ? "Debug" : "Functional");
+
+ WARN(true,
+ "%s:L3 %s Error: MASTER %s TARGET %s (%s)%s%s\n",
+ dev_name(l3->dev),
+ err_description,
+ master_name, target_name,
+ l3_transaction_type[op_code],
+ err_string, info_string);
+
+ /* clear the std error log*/
+ clear = std_err_main | CLEAR_STDERR_LOG;
+ writel_relaxed(clear, l3_targ_stderr);
+
+ return 0;
+}
+
+/**
+ * l3_interrupt_handler() - interrupt handler for l3 events
+ * @irq: irq number
+ * @_l3: pointer to l3 structure
+ *
+ * Interrupt Handler for L3 error detection.
+ * 1) Identify the L3 clockdomain partition to which the error belongs to.
+ * 2) Identify the slave where the error information is logged
+ * ... handle the slave event..
+ * 7) if the slave is unknown, mask out the slave.
+ */
+static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
+{
+ struct omap_l3 *l3 = _l3;
+ int inttype, i, ret;
+ int err_src = 0;
+ u32 err_reg, mask_val;
+ void __iomem *base, *mask_reg;
+ struct l3_flagmux_data *flag_mux;
+
+ /* Get the Type of interrupt */
+ inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
+
+ for (i = 0; i < l3->num_modules; i++) {
+ /*
+ * Read the regerr register of the clock domain
+ * to determine the source
+ */
+ base = l3->l3_base[i];
+ flag_mux = l3->l3_flagmux[i];
+ err_reg = readl_relaxed(base + flag_mux->offset +
+ L3_FLAGMUX_REGERR0 + (inttype << 3));
+
+ err_reg &= ~(inttype ? flag_mux->mask_app_bits :
+ flag_mux->mask_dbg_bits);
+
+ /* Get the corresponding error and analyse */
+ if (err_reg) {
+ /* Identify the source from control status register */
+ err_src = __ffs(err_reg);
+
+ ret = l3_handle_target(l3, base, flag_mux, err_src);
+
+ /*
+ * Certain plaforms may have "undocumented" status
+ * pending on boot. So dont generate a severe warning
+ * here. Just mask it off to prevent the error from
+ * reoccuring and locking up the system.
+ */
+ if (ret) {
+ dev_err(l3->dev,
+ "L3 %s error: target %d mod:%d %s\n",
+ inttype ? "debug" : "application",
+ err_src, i, "(unclearable)");
+
+ mask_reg = base + flag_mux->offset +
+ L3_FLAGMUX_MASK0 + (inttype << 3);
+ mask_val = readl_relaxed(mask_reg);
+ mask_val &= ~(1 << err_src);
+ writel_relaxed(mask_val, mask_reg);
+
+ /* Mark these bits as to be ignored */
+ if (inttype)
+ flag_mux->mask_app_bits |= 1 << err_src;
+ else
+ flag_mux->mask_dbg_bits |= 1 << err_src;
+ }
+
+ /* Error found so break the for loop */
+ return IRQ_HANDLED;
+ }
+ }
+
+ dev_err(l3->dev, "L3 %s IRQ not handled!!\n",
+ inttype ? "debug" : "application");
+
+ return IRQ_NONE;
+}
+
+static const struct of_device_id l3_noc_match[] = {
+ {.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data},
+ {.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data},
+ {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
+ {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, l3_noc_match);
+
+static int omap_l3_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ static struct omap_l3 *l3;
+ int ret, i, res_idx;
+
+ of_id = of_match_device(l3_noc_match, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "OF data missing\n");
+ return -EINVAL;
+ }
+
+ l3 = devm_kzalloc(&pdev->dev, sizeof(*l3), GFP_KERNEL);
+ if (!l3)
+ return -ENOMEM;
+
+ memcpy(l3, of_id->data, sizeof(*l3));
+ l3->dev = &pdev->dev;
+ platform_set_drvdata(pdev, l3);
+
+ /* Get mem resources */
+ for (i = 0, res_idx = 0; i < l3->num_modules; i++) {
+ struct resource *res;
+
+ if (l3->l3_base[i] == L3_BASE_IS_SUBMODULE) {
+ /* First entry cannot be submodule */
+ BUG_ON(i == 0);
+ l3->l3_base[i] = l3->l3_base[i - 1];
+ continue;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx);
+ l3->l3_base[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(l3->l3_base[i])) {
+ dev_err(l3->dev, "ioremap %d failed\n", i);
+ return PTR_ERR(l3->l3_base[i]);
+ }
+ res_idx++;
+ }
+
+ /*
+ * Setup interrupt Handlers
+ */
+ l3->debug_irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
+ 0x0, "l3-dbg-irq", l3);
+ if (ret) {
+ dev_err(l3->dev, "request_irq failed for %d\n",
+ l3->debug_irq);
+ return ret;
+ }
+
+ l3->app_irq = platform_get_irq(pdev, 1);
+ ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
+ 0x0, "l3-app-irq", l3);
+ if (ret)
+ dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+
+/**
+ * l3_resume_noirq() - resume function for l3_noc
+ * @dev: pointer to l3_noc device structure
+ *
+ * We only have the resume handler only since we
+ * have already maintained the delta register
+ * configuration as part of configuring the system
+ */
+static int l3_resume_noirq(struct device *dev)
+{
+ struct omap_l3 *l3 = dev_get_drvdata(dev);
+ int i;
+ struct l3_flagmux_data *flag_mux;
+ void __iomem *base, *mask_regx = NULL;
+ u32 mask_val;
+
+ for (i = 0; i < l3->num_modules; i++) {
+ base = l3->l3_base[i];
+ flag_mux = l3->l3_flagmux[i];
+ if (!flag_mux->mask_app_bits && !flag_mux->mask_dbg_bits)
+ continue;
+
+ mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
+ (L3_APPLICATION_ERROR << 3);
+ mask_val = readl_relaxed(mask_regx);
+ mask_val &= ~(flag_mux->mask_app_bits);
+
+ writel_relaxed(mask_val, mask_regx);
+ mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
+ (L3_DEBUG_ERROR << 3);
+ mask_val = readl_relaxed(mask_regx);
+ mask_val &= ~(flag_mux->mask_dbg_bits);
+
+ writel_relaxed(mask_val, mask_regx);
+ }
+
+ /* Dummy read to force OCP barrier */
+ if (mask_regx)
+ (void)readl(mask_regx);
+
+ return 0;
+}
+
+static const struct dev_pm_ops l3_dev_pm_ops = {
+ .resume_noirq = l3_resume_noirq,
+};
+
+#define L3_DEV_PM_OPS (&l3_dev_pm_ops)
+#else
+#define L3_DEV_PM_OPS NULL
+#endif
+
+static struct platform_driver omap_l3_driver = {
+ .probe = omap_l3_probe,
+ .driver = {
+ .name = "omap_l3_noc",
+ .pm = L3_DEV_PM_OPS,
+ .of_match_table = of_match_ptr(l3_noc_match),
+ },
+};
+
+static int __init omap_l3_init(void)
+{
+ return platform_driver_register(&omap_l3_driver);
+}
+postcore_initcall_sync(omap_l3_init);
+
+static void __exit omap_l3_exit(void)
+{
+ platform_driver_unregister(&omap_l3_driver);
+}
+module_exit(omap_l3_exit);
diff --git a/kernel/drivers/bus/omap_l3_noc.h b/kernel/drivers/bus/omap_l3_noc.h
new file mode 100644
index 000000000..73431f81d
--- /dev/null
+++ b/kernel/drivers/bus/omap_l3_noc.h
@@ -0,0 +1,501 @@
+/*
+ * OMAP L3 Interconnect error handling driver header
+ *
+ * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * sricharan <r.sricharan@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __OMAP_L3_NOC_H
+#define __OMAP_L3_NOC_H
+
+#define MAX_L3_MODULES 3
+#define MAX_CLKDM_TARGETS 31
+
+#define CLEAR_STDERR_LOG (1 << 31)
+#define CUSTOM_ERROR 0x2
+#define STANDARD_ERROR 0x0
+#define INBAND_ERROR 0x0
+#define L3_APPLICATION_ERROR 0x0
+#define L3_DEBUG_ERROR 0x1
+
+/* L3 TARG register offsets */
+#define L3_TARG_STDERRLOG_MAIN 0x48
+#define L3_TARG_STDERRLOG_HDR 0x4c
+#define L3_TARG_STDERRLOG_MSTADDR 0x50
+#define L3_TARG_STDERRLOG_INFO 0x58
+#define L3_TARG_STDERRLOG_SLVOFSLSB 0x5c
+#define L3_TARG_STDERRLOG_CINFO_INFO 0x64
+#define L3_TARG_STDERRLOG_CINFO_MSTADDR 0x68
+#define L3_TARG_STDERRLOG_CINFO_OPCODE 0x6c
+#define L3_FLAGMUX_REGERR0 0xc
+#define L3_FLAGMUX_MASK0 0x8
+
+#define L3_TARGET_NOT_SUPPORTED NULL
+
+#define L3_BASE_IS_SUBMODULE ((void __iomem *)(1 << 0))
+
+static const char * const l3_transaction_type[] = {
+ /* 0 0 0 */ "Idle",
+ /* 0 0 1 */ "Write",
+ /* 0 1 0 */ "Read",
+ /* 0 1 1 */ "ReadEx",
+ /* 1 0 0 */ "Read Link",
+ /* 1 0 1 */ "Write Non-Posted",
+ /* 1 1 0 */ "Write Conditional",
+ /* 1 1 1 */ "Write Broadcast",
+};
+
+/**
+ * struct l3_masters_data - L3 Master information
+ * @id: ID of the L3 Master
+ * @name: master name
+ */
+struct l3_masters_data {
+ u32 id;
+ char *name;
+};
+
+/**
+ * struct l3_target_data - L3 Target information
+ * @offset: Offset from base for L3 Target
+ * @name: Target name
+ *
+ * Target information is organized indexed by bit field definitions.
+ */
+struct l3_target_data {
+ u32 offset;
+ char *name;
+};
+
+/**
+ * struct l3_flagmux_data - Flag Mux information
+ * @offset: offset from base for flagmux register
+ * @l3_targ: array indexed by flagmux index (bit offset) pointing to the
+ * target data. unsupported ones are marked with
+ * L3_TARGET_NOT_SUPPORTED
+ * @num_targ_data: number of entries in target data
+ * @mask_app_bits: ignore these from raw application irq status
+ * @mask_dbg_bits: ignore these from raw debug irq status
+ */
+struct l3_flagmux_data {
+ u32 offset;
+ struct l3_target_data *l3_targ;
+ u8 num_targ_data;
+ u32 mask_app_bits;
+ u32 mask_dbg_bits;
+};
+
+
+/**
+ * struct omap_l3 - Description of data relevant for L3 bus.
+ * @dev: device representing the bus (populated runtime)
+ * @l3_base: base addresses of modules (populated runtime if 0)
+ * if set to L3_BASE_IS_SUBMODULE, then uses previous
+ * module index as the base address
+ * @l3_flag_mux: array containing flag mux data per module
+ * offset from corresponding module base indexed per
+ * module.
+ * @num_modules: number of clock domains / modules.
+ * @l3_masters: array pointing to master data containing name and register
+ * offset for the master.
+ * @num_master: number of masters
+ * @mst_addr_mask: Mask representing MSTADDR information of NTTP packet
+ * @debug_irq: irq number of the debug interrupt (populated runtime)
+ * @app_irq: irq number of the application interrupt (populated runtime)
+ */
+struct omap_l3 {
+ struct device *dev;
+
+ void __iomem *l3_base[MAX_L3_MODULES];
+ struct l3_flagmux_data **l3_flagmux;
+ int num_modules;
+
+ struct l3_masters_data *l3_masters;
+ int num_masters;
+ u32 mst_addr_mask;
+
+ int debug_irq;
+ int app_irq;
+};
+
+static struct l3_target_data omap_l3_target_data_clk1[] = {
+ {0x100, "DMM1",},
+ {0x200, "DMM2",},
+ {0x300, "ABE",},
+ {0x400, "L4CFG",},
+ {0x600, "CLK2PWRDISC",},
+ {0x0, "HOSTCLK1",},
+ {0x900, "L4WAKEUP",},
+};
+
+static struct l3_flagmux_data omap_l3_flagmux_clk1 = {
+ .offset = 0x500,
+ .l3_targ = omap_l3_target_data_clk1,
+ .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk1),
+};
+
+
+static struct l3_target_data omap_l3_target_data_clk2[] = {
+ {0x500, "CORTEXM3",},
+ {0x300, "DSS",},
+ {0x100, "GPMC",},
+ {0x400, "ISS",},
+ {0x700, "IVAHD",},
+ {0xD00, "AES1",},
+ {0x900, "L4PER0",},
+ {0x200, "OCMRAM",},
+ {0x100, "GPMCsERROR",},
+ {0x600, "SGX",},
+ {0x800, "SL2",},
+ {0x1600, "C2C",},
+ {0x1100, "PWRDISCCLK1",},
+ {0xF00, "SHA1",},
+ {0xE00, "AES2",},
+ {0xC00, "L4PER3",},
+ {0xA00, "L4PER1",},
+ {0xB00, "L4PER2",},
+ {0x0, "HOSTCLK2",},
+ {0x1800, "CAL",},
+ {0x1700, "LLI",},
+};
+
+static struct l3_flagmux_data omap_l3_flagmux_clk2 = {
+ .offset = 0x1000,
+ .l3_targ = omap_l3_target_data_clk2,
+ .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk2),
+};
+
+
+static struct l3_target_data omap4_l3_target_data_clk3[] = {
+ {0x0100, "DEBUGSS",},
+};
+
+static struct l3_flagmux_data omap4_l3_flagmux_clk3 = {
+ .offset = 0x0200,
+ .l3_targ = omap4_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(omap4_l3_target_data_clk3),
+};
+
+static struct l3_masters_data omap_l3_masters[] = {
+ { 0x00, "MPU"},
+ { 0x04, "CS_ADP"},
+ { 0x05, "xxx"},
+ { 0x08, "DSP"},
+ { 0x0C, "IVAHD"},
+ { 0x10, "ISS"},
+ { 0x11, "DucatiM3"},
+ { 0x12, "FaceDetect"},
+ { 0x14, "SDMA_Rd"},
+ { 0x15, "SDMA_Wr"},
+ { 0x16, "xxx"},
+ { 0x17, "xxx"},
+ { 0x18, "SGX"},
+ { 0x1C, "DSS"},
+ { 0x20, "C2C"},
+ { 0x22, "xxx"},
+ { 0x23, "xxx"},
+ { 0x24, "HSI"},
+ { 0x28, "MMC1"},
+ { 0x29, "MMC2"},
+ { 0x2A, "MMC6"},
+ { 0x2C, "UNIPRO1"},
+ { 0x30, "USBHOSTHS"},
+ { 0x31, "USBOTGHS"},
+ { 0x32, "USBHOSTFS"}
+};
+
+static struct l3_flagmux_data *omap4_l3_flagmux[] = {
+ &omap_l3_flagmux_clk1,
+ &omap_l3_flagmux_clk2,
+ &omap4_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 omap4_l3_data = {
+ .l3_flagmux = omap4_l3_flagmux,
+ .num_modules = ARRAY_SIZE(omap4_l3_flagmux),
+ .l3_masters = omap_l3_masters,
+ .num_masters = ARRAY_SIZE(omap_l3_masters),
+ /* The 6 MSBs of register field used to distinguish initiator */
+ .mst_addr_mask = 0xFC,
+};
+
+/* OMAP5 data */
+static struct l3_target_data omap5_l3_target_data_clk3[] = {
+ {0x0100, "L3INSTR",},
+ {0x0300, "DEBUGSS",},
+ {0x0, "HOSTCLK3",},
+};
+
+static struct l3_flagmux_data omap5_l3_flagmux_clk3 = {
+ .offset = 0x0200,
+ .l3_targ = omap5_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(omap5_l3_target_data_clk3),
+};
+
+static struct l3_flagmux_data *omap5_l3_flagmux[] = {
+ &omap_l3_flagmux_clk1,
+ &omap_l3_flagmux_clk2,
+ &omap5_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 omap5_l3_data = {
+ .l3_flagmux = omap5_l3_flagmux,
+ .num_modules = ARRAY_SIZE(omap5_l3_flagmux),
+ .l3_masters = omap_l3_masters,
+ .num_masters = ARRAY_SIZE(omap_l3_masters),
+ /* The 6 MSBs of register field used to distinguish initiator */
+ .mst_addr_mask = 0x7E0,
+};
+
+/* DRA7 data */
+static struct l3_target_data dra_l3_target_data_clk1[] = {
+ {0x2a00, "AES1",},
+ {0x0200, "DMM_P1",},
+ {0x0600, "DSP2_SDMA",},
+ {0x0b00, "EVE2",},
+ {0x1300, "DMM_P2",},
+ {0x2c00, "AES2",},
+ {0x0300, "DSP1_SDMA",},
+ {0x0a00, "EVE1",},
+ {0x0c00, "EVE3",},
+ {0x0d00, "EVE4",},
+ {0x2900, "DSS",},
+ {0x0100, "GPMC",},
+ {0x3700, "PCIE1",},
+ {0x1600, "IVA_CONFIG",},
+ {0x1800, "IVA_SL2IF",},
+ {0x0500, "L4_CFG",},
+ {0x1d00, "L4_WKUP",},
+ {0x3800, "PCIE2",},
+ {0x3300, "SHA2_1",},
+ {0x1200, "GPU",},
+ {0x1000, "IPU1",},
+ {0x1100, "IPU2",},
+ {0x2000, "TPCC_EDMA",},
+ {0x2e00, "TPTC1_EDMA",},
+ {0x2b00, "TPTC2_EDMA",},
+ {0x0700, "VCP1",},
+ {0x2500, "L4_PER2_P3",},
+ {0x0e00, "L4_PER3_P3",},
+ {0x2200, "MMU1",},
+ {0x1400, "PRUSS1",},
+ {0x1500, "PRUSS2"},
+ {0x0800, "VCP1",},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk1 = {
+ .offset = 0x803500,
+ .l3_targ = dra_l3_target_data_clk1,
+ .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk1),
+};
+
+static struct l3_target_data dra_l3_target_data_clk2[] = {
+ {0x0, "HOST CLK1",},
+ {0x800000, "HOST CLK2",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x3400, "SHA2_2",},
+ {0x0900, "BB2D",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x2100, "L4_PER1_P3",},
+ {0x1c00, "L4_PER1_P1",},
+ {0x1f00, "L4_PER1_P2",},
+ {0x2300, "L4_PER2_P1",},
+ {0x2400, "L4_PER2_P2",},
+ {0x2600, "L4_PER3_P1",},
+ {0x2700, "L4_PER3_P2",},
+ {0x2f00, "MCASP1",},
+ {0x3000, "MCASP2",},
+ {0x3100, "MCASP3",},
+ {0x2800, "MMU2",},
+ {0x0f00, "OCMC_RAM1",},
+ {0x1700, "OCMC_RAM2",},
+ {0x1900, "OCMC_RAM3",},
+ {0x1e00, "OCMC_ROM",},
+ {0x3900, "QSPI",},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk2 = {
+ .offset = 0x803600,
+ .l3_targ = dra_l3_target_data_clk2,
+ .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk2),
+};
+
+static struct l3_target_data dra_l3_target_data_clk3[] = {
+ {0x0100, "L3_INSTR"},
+ {0x0300, "DEBUGSS_CT_TBR"},
+ {0x0, "HOST CLK3"},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk3 = {
+ .offset = 0x200,
+ .l3_targ = dra_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk3),
+};
+
+static struct l3_masters_data dra_l3_masters[] = {
+ { 0x0, "MPU" },
+ { 0x4, "CS_DAP" },
+ { 0x5, "IEEE1500_2_OCP" },
+ { 0x8, "DSP1_MDMA" },
+ { 0x9, "DSP1_CFG" },
+ { 0xA, "DSP1_DMA" },
+ { 0xB, "DSP2_MDMA" },
+ { 0xC, "DSP2_CFG" },
+ { 0xD, "DSP2_DMA" },
+ { 0xE, "IVA" },
+ { 0x10, "EVE1_P1" },
+ { 0x11, "EVE2_P1" },
+ { 0x12, "EVE3_P1" },
+ { 0x13, "EVE4_P1" },
+ { 0x14, "PRUSS1 PRU1" },
+ { 0x15, "PRUSS1 PRU2" },
+ { 0x16, "PRUSS2 PRU1" },
+ { 0x17, "PRUSS2 PRU2" },
+ { 0x18, "IPU1" },
+ { 0x19, "IPU2" },
+ { 0x1A, "SDMA" },
+ { 0x1B, "CDMA" },
+ { 0x1C, "TC1_EDMA" },
+ { 0x1D, "TC2_EDMA" },
+ { 0x20, "DSS" },
+ { 0x21, "MMU1" },
+ { 0x22, "PCIE1" },
+ { 0x23, "MMU2" },
+ { 0x24, "VIP1" },
+ { 0x25, "VIP2" },
+ { 0x26, "VIP3" },
+ { 0x27, "VPE" },
+ { 0x28, "GPU_P1" },
+ { 0x29, "BB2D" },
+ { 0x29, "GPU_P2" },
+ { 0x2B, "GMAC_SW" },
+ { 0x2C, "USB3" },
+ { 0x2D, "USB2_SS" },
+ { 0x2E, "USB2_ULPI_SS1" },
+ { 0x2F, "USB2_ULPI_SS2" },
+ { 0x30, "CSI2_1" },
+ { 0x31, "CSI2_2" },
+ { 0x33, "SATA" },
+ { 0x34, "EVE1_P2" },
+ { 0x35, "EVE2_P2" },
+ { 0x36, "EVE3_P2" },
+ { 0x37, "EVE4_P2" }
+};
+
+static struct l3_flagmux_data *dra_l3_flagmux[] = {
+ &dra_l3_flagmux_clk1,
+ &dra_l3_flagmux_clk2,
+ &dra_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 dra_l3_data = {
+ .l3_base = { [1] = L3_BASE_IS_SUBMODULE },
+ .l3_flagmux = dra_l3_flagmux,
+ .num_modules = ARRAY_SIZE(dra_l3_flagmux),
+ .l3_masters = dra_l3_masters,
+ .num_masters = ARRAY_SIZE(dra_l3_masters),
+ /* The 6 MSBs of register field used to distinguish initiator */
+ .mst_addr_mask = 0xFC,
+};
+
+/* AM4372 data */
+static struct l3_target_data am4372_l3_target_data_200f[] = {
+ {0xf00, "EMIF",},
+ {0x1200, "DES",},
+ {0x400, "OCMCRAM",},
+ {0x700, "TPTC0",},
+ {0x800, "TPTC1",},
+ {0x900, "TPTC2"},
+ {0xb00, "TPCC",},
+ {0xd00, "DEBUGSS",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x200, "SHA",},
+ {0xc00, "SGX530",},
+ {0x500, "AES0",},
+ {0xa00, "L4_FAST",},
+ {0x300, "MPUSS_L2_RAM",},
+ {0x100, "ICSS",},
+};
+
+static struct l3_flagmux_data am4372_l3_flagmux_200f = {
+ .offset = 0x1000,
+ .l3_targ = am4372_l3_target_data_200f,
+ .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_200f),
+};
+
+static struct l3_target_data am4372_l3_target_data_100s[] = {
+ {0x100, "L4_PER_0",},
+ {0x200, "L4_PER_1",},
+ {0x300, "L4_PER_2",},
+ {0x400, "L4_PER_3",},
+ {0x800, "McASP0",},
+ {0x900, "McASP1",},
+ {0xC00, "MMCHS2",},
+ {0x700, "GPMC",},
+ {0xD00, "L4_FW",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x500, "ADCTSC",},
+ {0xE00, "L4_WKUP",},
+ {0xA00, "MAG_CARD",},
+};
+
+static struct l3_flagmux_data am4372_l3_flagmux_100s = {
+ .offset = 0x600,
+ .l3_targ = am4372_l3_target_data_100s,
+ .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_100s),
+};
+
+static struct l3_masters_data am4372_l3_masters[] = {
+ { 0x0, "M1 (128-bit)"},
+ { 0x1, "M2 (64-bit)"},
+ { 0x4, "DAP"},
+ { 0x5, "P1500"},
+ { 0xC, "ICSS0"},
+ { 0xD, "ICSS1"},
+ { 0x14, "Wakeup Processor"},
+ { 0x18, "TPTC0 Read"},
+ { 0x19, "TPTC0 Write"},
+ { 0x1A, "TPTC1 Read"},
+ { 0x1B, "TPTC1 Write"},
+ { 0x1C, "TPTC2 Read"},
+ { 0x1D, "TPTC2 Write"},
+ { 0x20, "SGX530"},
+ { 0x21, "OCP WP Traffic Probe"},
+ { 0x22, "OCP WP DMA Profiling"},
+ { 0x23, "OCP WP Event Trace"},
+ { 0x25, "DSS"},
+ { 0x28, "Crypto DMA RD"},
+ { 0x29, "Crypto DMA WR"},
+ { 0x2C, "VPFE0"},
+ { 0x2D, "VPFE1"},
+ { 0x30, "GEMAC"},
+ { 0x34, "USB0 RD"},
+ { 0x35, "USB0 WR"},
+ { 0x36, "USB1 RD"},
+ { 0x37, "USB1 WR"},
+};
+
+static struct l3_flagmux_data *am4372_l3_flagmux[] = {
+ &am4372_l3_flagmux_200f,
+ &am4372_l3_flagmux_100s,
+};
+
+static const struct omap_l3 am4372_l3_data = {
+ .l3_flagmux = am4372_l3_flagmux,
+ .num_modules = ARRAY_SIZE(am4372_l3_flagmux),
+ .l3_masters = am4372_l3_masters,
+ .num_masters = ARRAY_SIZE(am4372_l3_masters),
+ /* All 6 bits of register field used to distinguish initiator */
+ .mst_addr_mask = 0x3F,
+};
+
+#endif /* __OMAP_L3_NOC_H */
diff --git a/kernel/drivers/bus/omap_l3_smx.c b/kernel/drivers/bus/omap_l3_smx.c
new file mode 100644
index 000000000..360a5c0a4
--- /dev/null
+++ b/kernel/drivers/bus/omap_l3_smx.c
@@ -0,0 +1,311 @@
+/*
+ * OMAP3XXX L3 Interconnect Driver
+ *
+ * Copyright (C) 2011 Texas Corporation
+ * Felipe Balbi <balbi@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Sricharan <r.sricharan@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "omap_l3_smx.h"
+
+static inline u64 omap3_l3_readll(void __iomem *base, u16 reg)
+{
+ return __raw_readll(base + reg);
+}
+
+static inline void omap3_l3_writell(void __iomem *base, u16 reg, u64 value)
+{
+ __raw_writell(value, base + reg);
+}
+
+static inline enum omap3_l3_code omap3_l3_decode_error_code(u64 error)
+{
+ return (error & 0x0f000000) >> L3_ERROR_LOG_CODE;
+}
+
+static inline u32 omap3_l3_decode_addr(u64 error_addr)
+{
+ return error_addr & 0xffffffff;
+}
+
+static inline unsigned omap3_l3_decode_cmd(u64 error)
+{
+ return (error & 0x07) >> L3_ERROR_LOG_CMD;
+}
+
+static inline enum omap3_l3_initiator_id omap3_l3_decode_initid(u64 error)
+{
+ return (error & 0xff00) >> L3_ERROR_LOG_INITID;
+}
+
+static inline unsigned omap3_l3_decode_req_info(u64 error)
+{
+ return (error >> 32) & 0xffff;
+}
+
+static char *omap3_l3_code_string(u8 code)
+{
+ switch (code) {
+ case OMAP_L3_CODE_NOERROR:
+ return "No Error";
+ case OMAP_L3_CODE_UNSUP_CMD:
+ return "Unsupported Command";
+ case OMAP_L3_CODE_ADDR_HOLE:
+ return "Address Hole";
+ case OMAP_L3_CODE_PROTECT_VIOLATION:
+ return "Protection Violation";
+ case OMAP_L3_CODE_IN_BAND_ERR:
+ return "In-band Error";
+ case OMAP_L3_CODE_REQ_TOUT_NOT_ACCEPT:
+ return "Request Timeout Not Accepted";
+ case OMAP_L3_CODE_REQ_TOUT_NO_RESP:
+ return "Request Timeout, no response";
+ default:
+ return "UNKNOWN error";
+ }
+}
+
+static char *omap3_l3_initiator_string(u8 initid)
+{
+ switch (initid) {
+ case OMAP_L3_LCD:
+ return "LCD";
+ case OMAP_L3_SAD2D:
+ return "SAD2D";
+ case OMAP_L3_IA_MPU_SS_1:
+ case OMAP_L3_IA_MPU_SS_2:
+ case OMAP_L3_IA_MPU_SS_3:
+ case OMAP_L3_IA_MPU_SS_4:
+ case OMAP_L3_IA_MPU_SS_5:
+ return "MPU";
+ case OMAP_L3_IA_IVA_SS_1:
+ case OMAP_L3_IA_IVA_SS_2:
+ case OMAP_L3_IA_IVA_SS_3:
+ return "IVA_SS";
+ case OMAP_L3_IA_IVA_SS_DMA_1:
+ case OMAP_L3_IA_IVA_SS_DMA_2:
+ case OMAP_L3_IA_IVA_SS_DMA_3:
+ case OMAP_L3_IA_IVA_SS_DMA_4:
+ case OMAP_L3_IA_IVA_SS_DMA_5:
+ case OMAP_L3_IA_IVA_SS_DMA_6:
+ return "IVA_SS_DMA";
+ case OMAP_L3_IA_SGX:
+ return "SGX";
+ case OMAP_L3_IA_CAM_1:
+ case OMAP_L3_IA_CAM_2:
+ case OMAP_L3_IA_CAM_3:
+ return "CAM";
+ case OMAP_L3_IA_DAP:
+ return "DAP";
+ case OMAP_L3_SDMA_WR_1:
+ case OMAP_L3_SDMA_WR_2:
+ return "SDMA_WR";
+ case OMAP_L3_SDMA_RD_1:
+ case OMAP_L3_SDMA_RD_2:
+ case OMAP_L3_SDMA_RD_3:
+ case OMAP_L3_SDMA_RD_4:
+ return "SDMA_RD";
+ case OMAP_L3_USBOTG:
+ return "USB_OTG";
+ case OMAP_L3_USBHOST:
+ return "USB_HOST";
+ default:
+ return "UNKNOWN Initiator";
+ }
+}
+
+/*
+ * omap3_l3_block_irq - handles a register block's irq
+ * @l3: struct omap3_l3 *
+ * @base: register block base address
+ * @error: L3_ERROR_LOG register of our block
+ *
+ * Called in hard-irq context. Caller should take care of locking
+ *
+ * OMAP36xx TRM gives, on page 2001, Figure 9-10, the Typical Error
+ * Analysis Sequence, we are following that sequence here, please
+ * refer to that Figure for more information on the subject.
+ */
+static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3,
+ u64 error, int error_addr)
+{
+ u8 code = omap3_l3_decode_error_code(error);
+ u8 initid = omap3_l3_decode_initid(error);
+ u8 multi = error & L3_ERROR_LOG_MULTI;
+ u32 address = omap3_l3_decode_addr(error_addr);
+
+ pr_err("%s seen by %s %s at address %x\n",
+ omap3_l3_code_string(code),
+ omap3_l3_initiator_string(initid),
+ multi ? "Multiple Errors" : "", address);
+ WARN_ON(1);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t omap3_l3_app_irq(int irq, void *_l3)
+{
+ struct omap3_l3 *l3 = _l3;
+ u64 status, clear;
+ u64 error;
+ u64 error_addr;
+ u64 err_source = 0;
+ void __iomem *base;
+ int int_type;
+ irqreturn_t ret = IRQ_NONE;
+
+ int_type = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
+ if (!int_type) {
+ status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_0);
+ /*
+ * if we have a timeout error, there's nothing we can
+ * do besides rebooting the board. So let's BUG on any
+ * of such errors and handle the others. timeout error
+ * is severe and not expected to occur.
+ */
+ BUG_ON(status & L3_STATUS_0_TIMEOUT_MASK);
+ } else {
+ status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_1);
+ /* No timeout error for debug sources */
+ }
+
+ /* identify the error source */
+ err_source = __ffs(status);
+
+ base = l3->rt + omap3_l3_bases[int_type][err_source];
+ error = omap3_l3_readll(base, L3_ERROR_LOG);
+ if (error) {
+ error_addr = omap3_l3_readll(base, L3_ERROR_LOG_ADDR);
+ ret |= omap3_l3_block_irq(l3, error, error_addr);
+ }
+
+ /* Clear the status register */
+ clear = (L3_AGENT_STATUS_CLEAR_IA << int_type) |
+ L3_AGENT_STATUS_CLEAR_TA;
+ omap3_l3_writell(base, L3_AGENT_STATUS, clear);
+
+ /* clear the error log register */
+ omap3_l3_writell(base, L3_ERROR_LOG, error);
+
+ return ret;
+}
+
+#if IS_BUILTIN(CONFIG_OF)
+static const struct of_device_id omap3_l3_match[] = {
+ {
+ .compatible = "ti,omap3-l3-smx",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, omap3_l3_match);
+#endif
+
+static int omap3_l3_probe(struct platform_device *pdev)
+{
+ struct omap3_l3 *l3;
+ struct resource *res;
+ int ret;
+
+ l3 = kzalloc(sizeof(*l3), GFP_KERNEL);
+ if (!l3)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, l3);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "couldn't find resource\n");
+ ret = -ENODEV;
+ goto err0;
+ }
+ l3->rt = ioremap(res->start, resource_size(res));
+ if (!l3->rt) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ l3->debug_irq = platform_get_irq(pdev, 0);
+ ret = request_irq(l3->debug_irq, omap3_l3_app_irq, IRQF_TRIGGER_RISING,
+ "l3-debug-irq", l3);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't request debug irq\n");
+ goto err1;
+ }
+
+ l3->app_irq = platform_get_irq(pdev, 1);
+ ret = request_irq(l3->app_irq, omap3_l3_app_irq, IRQF_TRIGGER_RISING,
+ "l3-app-irq", l3);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't request app irq\n");
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ free_irq(l3->debug_irq, l3);
+err1:
+ iounmap(l3->rt);
+err0:
+ kfree(l3);
+ return ret;
+}
+
+static int omap3_l3_remove(struct platform_device *pdev)
+{
+ struct omap3_l3 *l3 = platform_get_drvdata(pdev);
+
+ free_irq(l3->app_irq, l3);
+ free_irq(l3->debug_irq, l3);
+ iounmap(l3->rt);
+ kfree(l3);
+
+ return 0;
+}
+
+static struct platform_driver omap3_l3_driver = {
+ .probe = omap3_l3_probe,
+ .remove = omap3_l3_remove,
+ .driver = {
+ .name = "omap_l3_smx",
+ .of_match_table = of_match_ptr(omap3_l3_match),
+ },
+};
+
+static int __init omap3_l3_init(void)
+{
+ return platform_driver_register(&omap3_l3_driver);
+}
+postcore_initcall_sync(omap3_l3_init);
+
+static void __exit omap3_l3_exit(void)
+{
+ platform_driver_unregister(&omap3_l3_driver);
+}
+module_exit(omap3_l3_exit);
diff --git a/kernel/drivers/bus/omap_l3_smx.h b/kernel/drivers/bus/omap_l3_smx.h
new file mode 100644
index 000000000..4f3cebca4
--- /dev/null
+++ b/kernel/drivers/bus/omap_l3_smx.h
@@ -0,0 +1,338 @@
+/*
+ * OMAP3XXX L3 Interconnect Driver header
+ *
+ * Copyright (C) 2011 Texas Corporation
+ * Felipe Balbi <balbi@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * sricharan <r.sricharan@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+#ifndef __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H
+#define __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H
+
+/* Register definitions. All 64-bit wide */
+#define L3_COMPONENT 0x000
+#define L3_CORE 0x018
+#define L3_AGENT_CONTROL 0x020
+#define L3_AGENT_STATUS 0x028
+#define L3_ERROR_LOG 0x058
+
+#define L3_ERROR_LOG_MULTI (1 << 31)
+#define L3_ERROR_LOG_SECONDARY (1 << 30)
+
+#define L3_ERROR_LOG_ADDR 0x060
+
+/* Register definitions for Sideband Interconnect */
+#define L3_SI_CONTROL 0x020
+#define L3_SI_FLAG_STATUS_0 0x510
+
+static const u64 shift = 1;
+
+#define L3_STATUS_0_MPUIA_BRST (shift << 0)
+#define L3_STATUS_0_MPUIA_RSP (shift << 1)
+#define L3_STATUS_0_MPUIA_INBAND (shift << 2)
+#define L3_STATUS_0_IVAIA_BRST (shift << 6)
+#define L3_STATUS_0_IVAIA_RSP (shift << 7)
+#define L3_STATUS_0_IVAIA_INBAND (shift << 8)
+#define L3_STATUS_0_SGXIA_BRST (shift << 9)
+#define L3_STATUS_0_SGXIA_RSP (shift << 10)
+#define L3_STATUS_0_SGXIA_MERROR (shift << 11)
+#define L3_STATUS_0_CAMIA_BRST (shift << 12)
+#define L3_STATUS_0_CAMIA_RSP (shift << 13)
+#define L3_STATUS_0_CAMIA_INBAND (shift << 14)
+#define L3_STATUS_0_DISPIA_BRST (shift << 15)
+#define L3_STATUS_0_DISPIA_RSP (shift << 16)
+#define L3_STATUS_0_DMARDIA_BRST (shift << 18)
+#define L3_STATUS_0_DMARDIA_RSP (shift << 19)
+#define L3_STATUS_0_DMAWRIA_BRST (shift << 21)
+#define L3_STATUS_0_DMAWRIA_RSP (shift << 22)
+#define L3_STATUS_0_USBOTGIA_BRST (shift << 24)
+#define L3_STATUS_0_USBOTGIA_RSP (shift << 25)
+#define L3_STATUS_0_USBOTGIA_INBAND (shift << 26)
+#define L3_STATUS_0_USBHOSTIA_BRST (shift << 27)
+#define L3_STATUS_0_USBHOSTIA_INBAND (shift << 28)
+#define L3_STATUS_0_SMSTA_REQ (shift << 48)
+#define L3_STATUS_0_GPMCTA_REQ (shift << 49)
+#define L3_STATUS_0_OCMRAMTA_REQ (shift << 50)
+#define L3_STATUS_0_OCMROMTA_REQ (shift << 51)
+#define L3_STATUS_0_IVATA_REQ (shift << 54)
+#define L3_STATUS_0_SGXTA_REQ (shift << 55)
+#define L3_STATUS_0_SGXTA_SERROR (shift << 56)
+#define L3_STATUS_0_GPMCTA_SERROR (shift << 57)
+#define L3_STATUS_0_L4CORETA_REQ (shift << 58)
+#define L3_STATUS_0_L4PERTA_REQ (shift << 59)
+#define L3_STATUS_0_L4EMUTA_REQ (shift << 60)
+#define L3_STATUS_0_MAD2DTA_REQ (shift << 61)
+
+#define L3_STATUS_0_TIMEOUT_MASK (L3_STATUS_0_MPUIA_BRST \
+ | L3_STATUS_0_MPUIA_RSP \
+ | L3_STATUS_0_IVAIA_BRST \
+ | L3_STATUS_0_IVAIA_RSP \
+ | L3_STATUS_0_SGXIA_BRST \
+ | L3_STATUS_0_SGXIA_RSP \
+ | L3_STATUS_0_CAMIA_BRST \
+ | L3_STATUS_0_CAMIA_RSP \
+ | L3_STATUS_0_DISPIA_BRST \
+ | L3_STATUS_0_DISPIA_RSP \
+ | L3_STATUS_0_DMARDIA_BRST \
+ | L3_STATUS_0_DMARDIA_RSP \
+ | L3_STATUS_0_DMAWRIA_BRST \
+ | L3_STATUS_0_DMAWRIA_RSP \
+ | L3_STATUS_0_USBOTGIA_BRST \
+ | L3_STATUS_0_USBOTGIA_RSP \
+ | L3_STATUS_0_USBHOSTIA_BRST \
+ | L3_STATUS_0_SMSTA_REQ \
+ | L3_STATUS_0_GPMCTA_REQ \
+ | L3_STATUS_0_OCMRAMTA_REQ \
+ | L3_STATUS_0_OCMROMTA_REQ \
+ | L3_STATUS_0_IVATA_REQ \
+ | L3_STATUS_0_SGXTA_REQ \
+ | L3_STATUS_0_L4CORETA_REQ \
+ | L3_STATUS_0_L4PERTA_REQ \
+ | L3_STATUS_0_L4EMUTA_REQ \
+ | L3_STATUS_0_MAD2DTA_REQ)
+
+#define L3_SI_FLAG_STATUS_1 0x530
+
+#define L3_STATUS_1_MPU_DATAIA (1 << 0)
+#define L3_STATUS_1_DAPIA0 (1 << 3)
+#define L3_STATUS_1_DAPIA1 (1 << 4)
+#define L3_STATUS_1_IVAIA (1 << 6)
+
+#define L3_PM_ERROR_LOG 0x020
+#define L3_PM_CONTROL 0x028
+#define L3_PM_ERROR_CLEAR_SINGLE 0x030
+#define L3_PM_ERROR_CLEAR_MULTI 0x038
+#define L3_PM_REQ_INFO_PERMISSION(n) (0x048 + (0x020 * n))
+#define L3_PM_READ_PERMISSION(n) (0x050 + (0x020 * n))
+#define L3_PM_WRITE_PERMISSION(n) (0x058 + (0x020 * n))
+#define L3_PM_ADDR_MATCH(n) (0x060 + (0x020 * n))
+
+/* L3 error log bit fields. Common for IA and TA */
+#define L3_ERROR_LOG_CODE 24
+#define L3_ERROR_LOG_INITID 8
+#define L3_ERROR_LOG_CMD 0
+
+/* L3 agent status bit fields. */
+#define L3_AGENT_STATUS_CLEAR_IA 0x10000000
+#define L3_AGENT_STATUS_CLEAR_TA 0x01000000
+
+#define OMAP34xx_IRQ_L3_APP 10
+#define L3_APPLICATION_ERROR 0x0
+#define L3_DEBUG_ERROR 0x1
+
+enum omap3_l3_initiator_id {
+ /* LCD has 1 ID */
+ OMAP_L3_LCD = 29,
+ /* SAD2D has 1 ID */
+ OMAP_L3_SAD2D = 28,
+ /* MPU has 5 IDs */
+ OMAP_L3_IA_MPU_SS_1 = 27,
+ OMAP_L3_IA_MPU_SS_2 = 26,
+ OMAP_L3_IA_MPU_SS_3 = 25,
+ OMAP_L3_IA_MPU_SS_4 = 24,
+ OMAP_L3_IA_MPU_SS_5 = 23,
+ /* IVA2.2 SS has 3 IDs*/
+ OMAP_L3_IA_IVA_SS_1 = 22,
+ OMAP_L3_IA_IVA_SS_2 = 21,
+ OMAP_L3_IA_IVA_SS_3 = 20,
+ /* IVA 2.2 SS DMA has 6 IDS */
+ OMAP_L3_IA_IVA_SS_DMA_1 = 19,
+ OMAP_L3_IA_IVA_SS_DMA_2 = 18,
+ OMAP_L3_IA_IVA_SS_DMA_3 = 17,
+ OMAP_L3_IA_IVA_SS_DMA_4 = 16,
+ OMAP_L3_IA_IVA_SS_DMA_5 = 15,
+ OMAP_L3_IA_IVA_SS_DMA_6 = 14,
+ /* SGX has 1 ID */
+ OMAP_L3_IA_SGX = 13,
+ /* CAM has 3 ID */
+ OMAP_L3_IA_CAM_1 = 12,
+ OMAP_L3_IA_CAM_2 = 11,
+ OMAP_L3_IA_CAM_3 = 10,
+ /* DAP has 1 ID */
+ OMAP_L3_IA_DAP = 9,
+ /* SDMA WR has 2 IDs */
+ OMAP_L3_SDMA_WR_1 = 8,
+ OMAP_L3_SDMA_WR_2 = 7,
+ /* SDMA RD has 4 IDs */
+ OMAP_L3_SDMA_RD_1 = 6,
+ OMAP_L3_SDMA_RD_2 = 5,
+ OMAP_L3_SDMA_RD_3 = 4,
+ OMAP_L3_SDMA_RD_4 = 3,
+ /* HSUSB OTG has 1 ID */
+ OMAP_L3_USBOTG = 2,
+ /* HSUSB HOST has 1 ID */
+ OMAP_L3_USBHOST = 1,
+};
+
+enum omap3_l3_code {
+ OMAP_L3_CODE_NOERROR = 0,
+ OMAP_L3_CODE_UNSUP_CMD = 1,
+ OMAP_L3_CODE_ADDR_HOLE = 2,
+ OMAP_L3_CODE_PROTECT_VIOLATION = 3,
+ OMAP_L3_CODE_IN_BAND_ERR = 4,
+ /* codes 5 and 6 are reserved */
+ OMAP_L3_CODE_REQ_TOUT_NOT_ACCEPT = 7,
+ OMAP_L3_CODE_REQ_TOUT_NO_RESP = 8,
+ /* codes 9 - 15 are also reserved */
+};
+
+struct omap3_l3 {
+ struct device *dev;
+ struct clk *ick;
+
+ /* memory base*/
+ void __iomem *rt;
+
+ int debug_irq;
+ int app_irq;
+
+ /* true when and inband functional error occurs */
+ unsigned inband:1;
+};
+
+/* offsets for l3 agents in order with the Flag status register */
+static unsigned int omap3_l3_app_bases[] = {
+ /* MPU IA */
+ 0x1400,
+ 0x1400,
+ 0x1400,
+ /* RESERVED */
+ 0,
+ 0,
+ 0,
+ /* IVA 2.2 IA */
+ 0x1800,
+ 0x1800,
+ 0x1800,
+ /* SGX IA */
+ 0x1c00,
+ 0x1c00,
+ /* RESERVED */
+ 0,
+ /* CAMERA IA */
+ 0x5800,
+ 0x5800,
+ 0x5800,
+ /* DISPLAY IA */
+ 0x5400,
+ 0x5400,
+ /* RESERVED */
+ 0,
+ /*SDMA RD IA */
+ 0x4c00,
+ 0x4c00,
+ /* RESERVED */
+ 0,
+ /* SDMA WR IA */
+ 0x5000,
+ 0x5000,
+ /* RESERVED */
+ 0,
+ /* USB OTG IA */
+ 0x4400,
+ 0x4400,
+ 0x4400,
+ /* USB HOST IA */
+ 0x4000,
+ 0x4000,
+ /* RESERVED */
+ 0,
+ 0,
+ 0,
+ 0,
+ /* SAD2D IA */
+ 0x3000,
+ 0x3000,
+ 0x3000,
+ /* RESERVED */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ /* SMA TA */
+ 0x2000,
+ /* GPMC TA */
+ 0x2400,
+ /* OCM RAM TA */
+ 0x2800,
+ /* OCM ROM TA */
+ 0x2C00,
+ /* L4 CORE TA */
+ 0x6800,
+ /* L4 PER TA */
+ 0x6c00,
+ /* IVA 2.2 TA */
+ 0x6000,
+ /* SGX TA */
+ 0x6400,
+ /* L4 EMU TA */
+ 0x7000,
+ /* GPMC TA */
+ 0x2400,
+ /* L4 CORE TA */
+ 0x6800,
+ /* L4 PER TA */
+ 0x6c00,
+ /* L4 EMU TA */
+ 0x7000,
+ /* MAD2D TA */
+ 0x3400,
+ /* RESERVED */
+ 0,
+ 0,
+};
+
+static unsigned int omap3_l3_debug_bases[] = {
+ /* MPU DATA IA */
+ 0x1400,
+ /* RESERVED */
+ 0,
+ 0,
+ /* DAP IA */
+ 0x5c00,
+ 0x5c00,
+ /* RESERVED */
+ 0,
+ /* IVA 2.2 IA */
+ 0x1800,
+ /* REST RESERVED */
+};
+
+static u32 *omap3_l3_bases[] = {
+ omap3_l3_app_bases,
+ omap3_l3_debug_bases,
+};
+
+/*
+ * REVISIT define __raw_readll/__raw_writell here, but move them to
+ * <asm/io.h> at some point
+ */
+#define __raw_writell(v, a) (__chk_io_ptr(a), \
+ *(volatile u64 __force *)(a) = (v))
+#define __raw_readll(a) (__chk_io_ptr(a), \
+ *(volatile u64 __force *)(a))
+
+#endif
diff --git a/kernel/drivers/bus/simple-pm-bus.c b/kernel/drivers/bus/simple-pm-bus.c
new file mode 100644
index 000000000..c5eb46cbf
--- /dev/null
+++ b/kernel/drivers/bus/simple-pm-bus.c
@@ -0,0 +1,58 @@
+/*
+ * Simple Power-Managed Bus Driver
+ *
+ * Copyright (C) 2014-2015 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+
+static int simple_pm_bus_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ pm_runtime_enable(&pdev->dev);
+
+ if (np)
+ of_platform_populate(np, NULL, NULL, &pdev->dev);
+
+ return 0;
+}
+
+static int simple_pm_bus_remove(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id simple_pm_bus_of_match[] = {
+ { .compatible = "simple-pm-bus", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
+
+static struct platform_driver simple_pm_bus_driver = {
+ .probe = simple_pm_bus_probe,
+ .remove = simple_pm_bus_remove,
+ .driver = {
+ .name = "simple-pm-bus",
+ .of_match_table = simple_pm_bus_of_match,
+ },
+};
+
+module_platform_driver(simple_pm_bus_driver);
+
+MODULE_DESCRIPTION("Simple Power-Managed Bus Driver");
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/bus/vexpress-config.c b/kernel/drivers/bus/vexpress-config.c
new file mode 100644
index 000000000..a64763b6b
--- /dev/null
+++ b/kernel/drivers/bus/vexpress-config.c
@@ -0,0 +1,202 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014 ARM Limited
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/vexpress.h>
+
+
+struct vexpress_config_bridge {
+ struct vexpress_config_bridge_ops *ops;
+ void *context;
+};
+
+
+static DEFINE_MUTEX(vexpress_config_mutex);
+static struct class *vexpress_config_class;
+static u32 vexpress_config_site_master = VEXPRESS_SITE_MASTER;
+
+
+void vexpress_config_set_master(u32 site)
+{
+ vexpress_config_site_master = site;
+}
+
+u32 vexpress_config_get_master(void)
+{
+ return vexpress_config_site_master;
+}
+
+void vexpress_config_lock(void *arg)
+{
+ mutex_lock(&vexpress_config_mutex);
+}
+
+void vexpress_config_unlock(void *arg)
+{
+ mutex_unlock(&vexpress_config_mutex);
+}
+
+
+static void vexpress_config_find_prop(struct device_node *node,
+ const char *name, u32 *val)
+{
+ /* Default value */
+ *val = 0;
+
+ of_node_get(node);
+ while (node) {
+ if (of_property_read_u32(node, name, val) == 0) {
+ of_node_put(node);
+ return;
+ }
+ node = of_get_next_parent(node);
+ }
+}
+
+int vexpress_config_get_topo(struct device_node *node, u32 *site,
+ u32 *position, u32 *dcc)
+{
+ vexpress_config_find_prop(node, "arm,vexpress,site", site);
+ if (*site == VEXPRESS_SITE_MASTER)
+ *site = vexpress_config_site_master;
+ if (WARN_ON(vexpress_config_site_master == VEXPRESS_SITE_MASTER))
+ return -EINVAL;
+ vexpress_config_find_prop(node, "arm,vexpress,position", position);
+ vexpress_config_find_prop(node, "arm,vexpress,dcc", dcc);
+
+ return 0;
+}
+
+
+static void vexpress_config_devres_release(struct device *dev, void *res)
+{
+ struct vexpress_config_bridge *bridge = dev_get_drvdata(dev->parent);
+ struct regmap *regmap = res;
+
+ bridge->ops->regmap_exit(regmap, bridge->context);
+}
+
+struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
+{
+ struct vexpress_config_bridge *bridge;
+ struct regmap *regmap;
+ struct regmap **res;
+
+ if (WARN_ON(dev->parent->class != vexpress_config_class))
+ return ERR_PTR(-ENODEV);
+
+ bridge = dev_get_drvdata(dev->parent);
+ if (WARN_ON(!bridge))
+ return ERR_PTR(-EINVAL);
+
+ res = devres_alloc(vexpress_config_devres_release, sizeof(*res),
+ GFP_KERNEL);
+ if (!res)
+ return ERR_PTR(-ENOMEM);
+
+ regmap = bridge->ops->regmap_init(dev, bridge->context);
+ if (IS_ERR(regmap)) {
+ devres_free(res);
+ return regmap;
+ }
+
+ *res = regmap;
+ devres_add(dev, res);
+
+ return regmap;
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_vexpress_config);
+
+struct device *vexpress_config_bridge_register(struct device *parent,
+ struct vexpress_config_bridge_ops *ops, void *context)
+{
+ struct device *dev;
+ struct vexpress_config_bridge *bridge;
+
+ if (!vexpress_config_class) {
+ vexpress_config_class = class_create(THIS_MODULE,
+ "vexpress-config");
+ if (IS_ERR(vexpress_config_class))
+ return (void *)vexpress_config_class;
+ }
+
+ dev = device_create(vexpress_config_class, parent, 0,
+ NULL, "%s.bridge", dev_name(parent));
+
+ if (IS_ERR(dev))
+ return dev;
+
+ bridge = devm_kmalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge) {
+ put_device(dev);
+ device_unregister(dev);
+ return ERR_PTR(-ENOMEM);
+ }
+ bridge->ops = ops;
+ bridge->context = context;
+
+ dev_set_drvdata(dev, bridge);
+
+ dev_dbg(parent, "Registered bridge '%s', parent node %p\n",
+ dev_name(dev), parent->of_node);
+
+ return dev;
+}
+
+
+static int vexpress_config_node_match(struct device *dev, const void *data)
+{
+ const struct device_node *node = data;
+
+ dev_dbg(dev, "Parent node %p, looking for %p\n",
+ dev->parent->of_node, node);
+
+ return dev->parent->of_node == node;
+}
+
+static int vexpress_config_populate(struct device_node *node)
+{
+ struct device_node *bridge;
+ struct device *parent;
+
+ bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
+ if (!bridge)
+ return -EINVAL;
+
+ parent = class_find_device(vexpress_config_class, NULL, bridge,
+ vexpress_config_node_match);
+ if (WARN_ON(!parent))
+ return -ENODEV;
+
+ return of_platform_populate(node, NULL, NULL, parent);
+}
+
+static int __init vexpress_config_init(void)
+{
+ int err = 0;
+ struct device_node *node;
+
+ /* Need the config devices early, before the "normal" devices... */
+ for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
+ err = vexpress_config_populate(node);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+postcore_initcall(vexpress_config_init);
+