diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/bus | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/bus')
-rw-r--r-- | kernel/drivers/bus/Kconfig | 41 | ||||
-rw-r--r-- | kernel/drivers/bus/Makefile | 1 | ||||
-rw-r--r-- | kernel/drivers/bus/arm-cci.c | 905 | ||||
-rw-r--r-- | kernel/drivers/bus/arm-ccn.c | 269 | ||||
-rw-r--r-- | kernel/drivers/bus/brcmstb_gisb.c | 13 | ||||
-rw-r--r-- | kernel/drivers/bus/mips_cdmm.c | 14 | ||||
-rw-r--r-- | kernel/drivers/bus/mvebu-mbus.c | 117 | ||||
-rw-r--r-- | kernel/drivers/bus/omap-ocp2scp.c | 2 | ||||
-rw-r--r-- | kernel/drivers/bus/omap_l3_noc.c | 4 | ||||
-rw-r--r-- | kernel/drivers/bus/sunxi-rsb.c | 783 | ||||
-rw-r--r-- | kernel/drivers/bus/vexpress-config.c | 2 |
11 files changed, 1913 insertions, 238 deletions
diff --git a/kernel/drivers/bus/Kconfig b/kernel/drivers/bus/Kconfig index a1d4af6df..116b363b7 100644 --- a/kernel/drivers/bus/Kconfig +++ b/kernel/drivers/bus/Kconfig @@ -7,21 +7,24 @@ menu "Bus devices" config ARM_CCI bool +config ARM_CCI_PMU + bool + select ARM_CCI + config ARM_CCI400_COMMON bool select ARM_CCI config ARM_CCI400_PMU bool "ARM CCI400 PMU support" - default y - depends on ARM || ARM64 - depends on HW_PERF_EVENTS + depends on (ARM && CPU_V7) || ARM64 + depends on PERF_EVENTS select ARM_CCI400_COMMON + select ARM_CCI_PMU help - Support for PMU events monitoring on the ARM CCI cache coherent - interconnect. - - If unsure, say Y + Support for PMU events monitoring on the ARM CCI-400 (cache coherent + interconnect). CCI-400 supports counting events related to the + connected slave/master interfaces. config ARM_CCI400_PORT_CTRL bool @@ -31,6 +34,19 @@ config ARM_CCI400_PORT_CTRL Low level power management driver for CCI400 cache coherent interconnect for ARM platforms. +config ARM_CCI500_PMU + bool "ARM CCI500 PMU support" + depends on (ARM && CPU_V7) || ARM64 + depends on PERF_EVENTS + select ARM_CCI_PMU + help + Support for PMU events monitoring on the ARM CCI-500 cache coherent + interconnect. CCI-500 provides 8 independent event counters, which + can count events pertaining to the slave/master interfaces as well + as the internal events to the CCI. + + If unsure, say Y + config ARM_CCN bool "ARM CCN driver support" depends on ARM || ARM64 @@ -104,6 +120,17 @@ config SIMPLE_PM_BUS Controller (BSC, sometimes called "LBSC within Bus Bridge", or "External Bus Interface") as found on several Renesas ARM SoCs. +config SUNXI_RSB + tristate "Allwinner sunXi Reduced Serial Bus Driver" + default MACH_SUN8I || MACH_SUN9I + depends on ARCH_SUNXI + select REGMAP + help + Say y here to enable support for Allwinner's Reduced Serial Bus + (RSB) support. This controller is responsible for communicating + with various RSB based devices, such as AXP223, AXP8XX PMICs, + and AC100/AC200 ICs. + config VEXPRESS_CONFIG bool "Versatile Express configuration bus" default y if ARCH_VEXPRESS diff --git a/kernel/drivers/bus/Makefile b/kernel/drivers/bus/Makefile index 790e7b933..fcb9f9794 100644 --- a/kernel/drivers/bus/Makefile +++ b/kernel/drivers/bus/Makefile @@ -15,5 +15,6 @@ obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o +obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o diff --git a/kernel/drivers/bus/arm-cci.c b/kernel/drivers/bus/arm-cci.c index 5340604b2..577cc4bf6 100644 --- a/kernel/drivers/bus/arm-cci.c +++ b/kernel/drivers/bus/arm-cci.c @@ -52,12 +52,15 @@ static const struct of_device_id arm_cci_matches[] = { #ifdef CONFIG_ARM_CCI400_COMMON {.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA }, #endif +#ifdef CONFIG_ARM_CCI500_PMU + { .compatible = "arm,cci-500", }, +#endif {}, }; -#ifdef CONFIG_ARM_CCI400_PMU +#ifdef CONFIG_ARM_CCI_PMU -#define DRIVER_NAME "CCI-400" +#define DRIVER_NAME "ARM-CCI" #define DRIVER_NAME_PMU DRIVER_NAME " PMU" #define CCI_PMCR 0x0100 @@ -77,20 +80,21 @@ static const struct of_device_id arm_cci_matches[] = { #define CCI_PMU_OVRFLW_FLAG 1 -#define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K) - -#define CCI_PMU_CNTR_MASK ((1ULL << 32) -1) +#define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size) +#define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model)) +#define CCI_PMU_CNTR_MASK ((1ULL << 32) -1) +#define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1) -#define CCI_PMU_EVENT_MASK 0xffUL -#define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7) -#define CCI_PMU_EVENT_CODE(event) (event & 0x1f) - -#define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */ +#define CCI_PMU_MAX_HW_CNTRS(model) \ + ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs) /* Types of interfaces that can generate events */ enum { CCI_IF_SLAVE, CCI_IF_MASTER, +#ifdef CONFIG_ARM_CCI500_PMU + CCI_IF_GLOBAL, +#endif CCI_IF_MAX, }; @@ -100,14 +104,30 @@ struct event_range { }; struct cci_pmu_hw_events { - struct perf_event *events[CCI_PMU_MAX_HW_EVENTS]; - unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)]; + struct perf_event **events; + unsigned long *used_mask; raw_spinlock_t pmu_lock; }; +struct cci_pmu; +/* + * struct cci_pmu_model: + * @fixed_hw_cntrs - Number of fixed event counters + * @num_hw_cntrs - Maximum number of programmable event counters + * @cntr_size - Size of an event counter mapping + */ struct cci_pmu_model { char *name; + u32 fixed_hw_cntrs; + u32 num_hw_cntrs; + u32 cntr_size; + u64 nformat_attrs; + u64 nevent_attrs; + struct dev_ext_attribute *format_attrs; + struct dev_ext_attribute *event_attrs; struct event_range event_ranges[CCI_IF_MAX]; + int (*validate_hw_event)(struct cci_pmu *, unsigned long); + int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long); }; static struct cci_pmu_model cci_pmu_models[]; @@ -116,33 +136,59 @@ struct cci_pmu { void __iomem *base; struct pmu pmu; int nr_irqs; - int irqs[CCI_PMU_MAX_HW_EVENTS]; + int *irqs; unsigned long active_irqs; const struct cci_pmu_model *model; struct cci_pmu_hw_events hw_events; struct platform_device *plat_device; - int num_events; + int num_cntrs; atomic_t active_events; struct mutex reserve_mutex; + struct notifier_block cpu_nb; cpumask_t cpus; }; -static struct cci_pmu *pmu; #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) +enum cci_models { +#ifdef CONFIG_ARM_CCI400_PMU + CCI400_R0, + CCI400_R1, +#endif +#ifdef CONFIG_ARM_CCI500_PMU + CCI500_R0, +#endif + CCI_MODEL_MAX +}; + +static ssize_t cci_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t cci_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf); + +#define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \ + { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } + +#define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \ + CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config) +#define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \ + CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config) + +/* CCI400 PMU Specific definitions */ + +#ifdef CONFIG_ARM_CCI400_PMU + /* Port ids */ -#define CCI_PORT_S0 0 -#define CCI_PORT_S1 1 -#define CCI_PORT_S2 2 -#define CCI_PORT_S3 3 -#define CCI_PORT_S4 4 -#define CCI_PORT_M0 5 -#define CCI_PORT_M1 6 -#define CCI_PORT_M2 7 - -#define CCI_REV_R0 0 -#define CCI_REV_R1 1 -#define CCI_REV_R1_PX 5 +#define CCI400_PORT_S0 0 +#define CCI400_PORT_S1 1 +#define CCI400_PORT_S2 2 +#define CCI400_PORT_S3 3 +#define CCI400_PORT_S4 4 +#define CCI400_PORT_M0 5 +#define CCI400_PORT_M1 6 +#define CCI400_PORT_M2 7 + +#define CCI400_R1_PX 5 /* * Instead of an event id to monitor CCI cycles, a dedicated counter is @@ -150,12 +196,11 @@ static struct cci_pmu *pmu; * make use of this event in hardware. */ enum cci400_perf_events { - CCI_PMU_CYCLES = 0xff + CCI400_PMU_CYCLES = 0xff }; -#define CCI_PMU_CYCLE_CNTR_IDX 0 -#define CCI_PMU_CNTR0_IDX 1 -#define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1) +#define CCI400_PMU_CYCLE_CNTR_IDX 0 +#define CCI400_PMU_CNTR0_IDX 1 /* * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 @@ -169,37 +214,173 @@ enum cci400_perf_events { * the different revisions and are used to validate the event to be monitored. */ -#define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00 -#define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13 -#define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14 -#define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a +#define CCI400_PMU_EVENT_MASK 0xffUL +#define CCI400_PMU_EVENT_SOURCE_SHIFT 5 +#define CCI400_PMU_EVENT_SOURCE_MASK 0x7 +#define CCI400_PMU_EVENT_CODE_SHIFT 0 +#define CCI400_PMU_EVENT_CODE_MASK 0x1f +#define CCI400_PMU_EVENT_SOURCE(event) \ + ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \ + CCI400_PMU_EVENT_SOURCE_MASK) +#define CCI400_PMU_EVENT_CODE(event) \ + ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK) + +#define CCI400_R0_SLAVE_PORT_MIN_EV 0x00 +#define CCI400_R0_SLAVE_PORT_MAX_EV 0x13 +#define CCI400_R0_MASTER_PORT_MIN_EV 0x14 +#define CCI400_R0_MASTER_PORT_MAX_EV 0x1a + +#define CCI400_R1_SLAVE_PORT_MIN_EV 0x00 +#define CCI400_R1_SLAVE_PORT_MAX_EV 0x14 +#define CCI400_R1_MASTER_PORT_MIN_EV 0x00 +#define CCI400_R1_MASTER_PORT_MAX_EV 0x11 + +#define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \ + CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \ + (unsigned long)_config) + +static ssize_t cci400_pmu_cycle_event_show(struct device *dev, + struct device_attribute *attr, char *buf); + +static struct dev_ext_attribute cci400_pmu_format_attrs[] = { + CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), + CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"), +}; + +static struct dev_ext_attribute cci400_r0_pmu_event_attrs[] = { + /* Slave events */ + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), + /* Master events */ + CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A), + /* Special event for cycles counter */ + CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), +}; + +static struct dev_ext_attribute cci400_r1_pmu_event_attrs[] = { + /* Slave events */ + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14), + /* Master events */ + CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0), + CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11), + /* Special event for cycles counter */ + CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), +}; -#define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00 -#define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14 -#define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00 -#define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11 +static ssize_t cci400_pmu_cycle_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var); +} -static int pmu_validate_hw_event(unsigned long hw_event) +static int cci400_get_event_idx(struct cci_pmu *cci_pmu, + struct cci_pmu_hw_events *hw, + unsigned long cci_event) { - u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event); - u8 ev_code = CCI_PMU_EVENT_CODE(hw_event); + int idx; + + /* cycles event idx is fixed */ + if (cci_event == CCI400_PMU_CYCLES) { + if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask)) + return -EAGAIN; + + return CCI400_PMU_CYCLE_CNTR_IDX; + } + + for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) + if (!test_and_set_bit(idx, hw->used_mask)) + return idx; + + /* No counters available */ + return -EAGAIN; +} + +static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event) +{ + u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event); + u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event); int if_type; - if (hw_event & ~CCI_PMU_EVENT_MASK) + if (hw_event & ~CCI400_PMU_EVENT_MASK) return -ENOENT; + if (hw_event == CCI400_PMU_CYCLES) + return hw_event; + switch (ev_source) { - case CCI_PORT_S0: - case CCI_PORT_S1: - case CCI_PORT_S2: - case CCI_PORT_S3: - case CCI_PORT_S4: + case CCI400_PORT_S0: + case CCI400_PORT_S1: + case CCI400_PORT_S2: + case CCI400_PORT_S3: + case CCI400_PORT_S4: /* Slave Interface */ if_type = CCI_IF_SLAVE; break; - case CCI_PORT_M0: - case CCI_PORT_M1: - case CCI_PORT_M2: + case CCI400_PORT_M0: + case CCI400_PORT_M1: + case CCI400_PORT_M2: /* Master Interface */ if_type = CCI_IF_MASTER; break; @@ -207,87 +388,291 @@ static int pmu_validate_hw_event(unsigned long hw_event) return -ENOENT; } - if (ev_code >= pmu->model->event_ranges[if_type].min && - ev_code <= pmu->model->event_ranges[if_type].max) + if (ev_code >= cci_pmu->model->event_ranges[if_type].min && + ev_code <= cci_pmu->model->event_ranges[if_type].max) return hw_event; return -ENOENT; } -static int probe_cci_revision(void) +static int probe_cci400_revision(void) { int rev; rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; rev >>= CCI_PID2_REV_SHIFT; - if (rev < CCI_REV_R1_PX) - return CCI_REV_R0; + if (rev < CCI400_R1_PX) + return CCI400_R0; else - return CCI_REV_R1; + return CCI400_R1; } static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) { if (platform_has_secure_cci_access()) - return &cci_pmu_models[probe_cci_revision()]; + return &cci_pmu_models[probe_cci400_revision()]; return NULL; } +#else /* !CONFIG_ARM_CCI400_PMU */ +static inline struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) +{ + return NULL; +} +#endif /* CONFIG_ARM_CCI400_PMU */ + +#ifdef CONFIG_ARM_CCI500_PMU + +/* + * CCI500 provides 8 independent event counters that can count + * any of the events available. + * + * CCI500 PMU event id is an 9-bit value made of two parts. + * bits [8:5] - Source for the event + * 0x0-0x6 - Slave interfaces + * 0x8-0xD - Master interfaces + * 0xf - Global Events + * 0x7,0xe - Reserved + * + * bits [4:0] - Event code (specific to type of interface) + */ + +/* Port ids */ +#define CCI500_PORT_S0 0x0 +#define CCI500_PORT_S1 0x1 +#define CCI500_PORT_S2 0x2 +#define CCI500_PORT_S3 0x3 +#define CCI500_PORT_S4 0x4 +#define CCI500_PORT_S5 0x5 +#define CCI500_PORT_S6 0x6 + +#define CCI500_PORT_M0 0x8 +#define CCI500_PORT_M1 0x9 +#define CCI500_PORT_M2 0xa +#define CCI500_PORT_M3 0xb +#define CCI500_PORT_M4 0xc +#define CCI500_PORT_M5 0xd + +#define CCI500_PORT_GLOBAL 0xf + +#define CCI500_PMU_EVENT_MASK 0x1ffUL +#define CCI500_PMU_EVENT_SOURCE_SHIFT 0x5 +#define CCI500_PMU_EVENT_SOURCE_MASK 0xf +#define CCI500_PMU_EVENT_CODE_SHIFT 0x0 +#define CCI500_PMU_EVENT_CODE_MASK 0x1f + +#define CCI500_PMU_EVENT_SOURCE(event) \ + ((event >> CCI500_PMU_EVENT_SOURCE_SHIFT) & CCI500_PMU_EVENT_SOURCE_MASK) +#define CCI500_PMU_EVENT_CODE(event) \ + ((event >> CCI500_PMU_EVENT_CODE_SHIFT) & CCI500_PMU_EVENT_CODE_MASK) + +#define CCI500_SLAVE_PORT_MIN_EV 0x00 +#define CCI500_SLAVE_PORT_MAX_EV 0x1f +#define CCI500_MASTER_PORT_MIN_EV 0x00 +#define CCI500_MASTER_PORT_MAX_EV 0x06 +#define CCI500_GLOBAL_PORT_MIN_EV 0x00 +#define CCI500_GLOBAL_PORT_MAX_EV 0x0f + + +#define CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \ + CCI_EXT_ATTR_ENTRY(_name, cci500_pmu_global_event_show, \ + (unsigned long) _config) + +static ssize_t cci500_pmu_global_event_show(struct device *dev, + struct device_attribute *attr, char *buf); + +static struct dev_ext_attribute cci500_pmu_format_attrs[] = { + CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), + CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"), +}; + +static struct dev_ext_attribute cci500_pmu_event_attrs[] = { + /* Slave events */ + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11), + CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12), + CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13), + CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14), + CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15), + CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19), + CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A), + CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B), + CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C), + CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D), + CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F), + + /* Master events */ + CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0), + CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2), + CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4), + CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5), + CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6), + + /* Global events */ + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0), + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1), + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2), + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3), + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4), + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5), + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6), + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7), + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8), + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9), + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA), + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB), + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC), + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD), + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE), + CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF), +}; + +static ssize_t cci500_pmu_global_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + /* Global events have single fixed source code */ + return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n", + (unsigned long)eattr->var, CCI500_PORT_GLOBAL); +} + +static int cci500_validate_hw_event(struct cci_pmu *cci_pmu, + unsigned long hw_event) +{ + u32 ev_source = CCI500_PMU_EVENT_SOURCE(hw_event); + u32 ev_code = CCI500_PMU_EVENT_CODE(hw_event); + int if_type; + + if (hw_event & ~CCI500_PMU_EVENT_MASK) + return -ENOENT; + + switch (ev_source) { + case CCI500_PORT_S0: + case CCI500_PORT_S1: + case CCI500_PORT_S2: + case CCI500_PORT_S3: + case CCI500_PORT_S4: + case CCI500_PORT_S5: + case CCI500_PORT_S6: + if_type = CCI_IF_SLAVE; + break; + case CCI500_PORT_M0: + case CCI500_PORT_M1: + case CCI500_PORT_M2: + case CCI500_PORT_M3: + case CCI500_PORT_M4: + case CCI500_PORT_M5: + if_type = CCI_IF_MASTER; + break; + case CCI500_PORT_GLOBAL: + if_type = CCI_IF_GLOBAL; + break; + default: + return -ENOENT; + } + + if (ev_code >= cci_pmu->model->event_ranges[if_type].min && + ev_code <= cci_pmu->model->event_ranges[if_type].max) + return hw_event; + + return -ENOENT; +} +#endif /* CONFIG_ARM_CCI500_PMU */ + +static ssize_t cci_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var); +} + +static ssize_t cci_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + /* source parameter is mandatory for normal PMU events */ + return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n", + (unsigned long)eattr->var); +} static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) { - return CCI_PMU_CYCLE_CNTR_IDX <= idx && - idx <= CCI_PMU_CNTR_LAST(cci_pmu); + return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu); } -static u32 pmu_read_register(int idx, unsigned int offset) +static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset) { - return readl_relaxed(pmu->base + CCI_PMU_CNTR_BASE(idx) + offset); + return readl_relaxed(cci_pmu->base + + CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); } -static void pmu_write_register(u32 value, int idx, unsigned int offset) +static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value, + int idx, unsigned int offset) { - return writel_relaxed(value, pmu->base + CCI_PMU_CNTR_BASE(idx) + offset); + return writel_relaxed(value, cci_pmu->base + + CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); } -static void pmu_disable_counter(int idx) +static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx) { - pmu_write_register(0, idx, CCI_PMU_CNTR_CTRL); + pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL); } -static void pmu_enable_counter(int idx) +static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx) { - pmu_write_register(1, idx, CCI_PMU_CNTR_CTRL); + pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL); } -static void pmu_set_event(int idx, unsigned long event) +static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) { - pmu_write_register(event, idx, CCI_PMU_EVT_SEL); + pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); } +/* + * Returns the number of programmable counters actually implemented + * by the cci + */ static u32 pmu_get_max_counters(void) { - u32 n_cnts = (readl_relaxed(cci_ctrl_base + CCI_PMCR) & - CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; - - /* add 1 for cycle counter */ - return n_cnts + 1; + return (readl_relaxed(cci_ctrl_base + CCI_PMCR) & + CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; } static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) { struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); - struct hw_perf_event *hw_event = &event->hw; - unsigned long cci_event = hw_event->config_base; + unsigned long cci_event = event->hw.config_base; int idx; - if (cci_event == CCI_PMU_CYCLES) { - if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask)) - return -EAGAIN; + if (cci_pmu->model->get_event_idx) + return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); - return CCI_PMU_CYCLE_CNTR_IDX; - } - - for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) + /* Generic code to find an unused idx from the mask */ + for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) if (!test_and_set_bit(idx, hw->used_mask)) return idx; @@ -297,18 +682,13 @@ static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *ev static int pmu_map_event(struct perf_event *event) { - int mapping; - unsigned long config = event->attr.config; + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); - if (event->attr.type < PERF_TYPE_MAX) + if (event->attr.type < PERF_TYPE_MAX || + !cci_pmu->model->validate_hw_event) return -ENOENT; - if (config == CCI_PMU_CYCLES) - mapping = config; - else - mapping = pmu_validate_hw_event(config); - - return mapping; + return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); } static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) @@ -319,7 +699,7 @@ static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) if (unlikely(!pmu_device)) return -ENODEV; - if (pmu->nr_irqs < 1) { + if (cci_pmu->nr_irqs < 1) { dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); return -ENODEV; } @@ -331,16 +711,16 @@ static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) * * This should allow handling of non-unique interrupt for the counters. */ - for (i = 0; i < pmu->nr_irqs; i++) { - int err = request_irq(pmu->irqs[i], handler, IRQF_SHARED, + for (i = 0; i < cci_pmu->nr_irqs; i++) { + int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED, "arm-cci-pmu", cci_pmu); if (err) { dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", - pmu->irqs[i]); + cci_pmu->irqs[i]); return err; } - set_bit(i, &pmu->active_irqs); + set_bit(i, &cci_pmu->active_irqs); } return 0; @@ -350,11 +730,11 @@ static void pmu_free_irq(struct cci_pmu *cci_pmu) { int i; - for (i = 0; i < pmu->nr_irqs; i++) { - if (!test_and_clear_bit(i, &pmu->active_irqs)) + for (i = 0; i < cci_pmu->nr_irqs; i++) { + if (!test_and_clear_bit(i, &cci_pmu->active_irqs)) continue; - free_irq(pmu->irqs[i], cci_pmu); + free_irq(cci_pmu->irqs[i], cci_pmu); } } @@ -369,7 +749,7 @@ static u32 pmu_read_counter(struct perf_event *event) dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); return 0; } - value = pmu_read_register(idx, CCI_PMU_CNTR); + value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR); return value; } @@ -383,7 +763,7 @@ static void pmu_write_counter(struct perf_event *event, u32 value) if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); else - pmu_write_register(value, idx, CCI_PMU_CNTR); + pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR); } static u64 pmu_event_update(struct perf_event *event) @@ -427,7 +807,7 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev) { unsigned long flags; struct cci_pmu *cci_pmu = dev; - struct cci_pmu_hw_events *events = &pmu->hw_events; + struct cci_pmu_hw_events *events = &cci_pmu->hw_events; int idx, handled = IRQ_NONE; raw_spin_lock_irqsave(&events->pmu_lock, flags); @@ -436,7 +816,7 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev) * This should work regardless of whether we have per-counter overflow * interrupt or a combined overflow interrupt. */ - for (idx = CCI_PMU_CYCLE_CNTR_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { + for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { struct perf_event *event = events->events[idx]; struct hw_perf_event *hw_counter; @@ -446,11 +826,12 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev) hw_counter = &event->hw; /* Did this counter overflow? */ - if (!(pmu_read_register(idx, CCI_PMU_OVRFLW) & + if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) & CCI_PMU_OVRFLW_FLAG)) continue; - pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW); + pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx, + CCI_PMU_OVRFLW); pmu_event_update(event); pmu_event_set_period(event); @@ -492,7 +873,7 @@ static void cci_pmu_enable(struct pmu *pmu) { struct cci_pmu *cci_pmu = to_cci_pmu(pmu); struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; - int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_events); + int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs); unsigned long flags; u32 val; @@ -523,6 +904,16 @@ static void cci_pmu_disable(struct pmu *pmu) raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); } +/* + * Check if the idx represents a non-programmable counter. + * All the fixed event counters are mapped before the programmable + * counters. + */ +static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx) +{ + return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); +} + static void cci_pmu_start(struct perf_event *event, int pmu_flags) { struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); @@ -547,12 +938,12 @@ static void cci_pmu_start(struct perf_event *event, int pmu_flags) raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); - /* Configure the event to count, unless you are counting cycles */ - if (idx != CCI_PMU_CYCLE_CNTR_IDX) - pmu_set_event(idx, hwc->config_base); + /* Configure the counter unless you are counting a fixed event */ + if (!pmu_fixed_hw_idx(cci_pmu, idx)) + pmu_set_event(cci_pmu, idx, hwc->config_base); pmu_event_set_period(event); - pmu_enable_counter(idx); + pmu_enable_counter(cci_pmu, idx); raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); } @@ -575,7 +966,7 @@ static void cci_pmu_stop(struct perf_event *event, int pmu_flags) * We always reprogram the counter, so ignore PERF_EF_UPDATE. See * cci_pmu_start() */ - pmu_disable_counter(idx); + pmu_disable_counter(cci_pmu, idx); pmu_event_update(event); hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; } @@ -655,13 +1046,16 @@ static int validate_group(struct perf_event *event) { struct perf_event *sibling, *leader = event->group_leader; + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)]; struct cci_pmu_hw_events fake_pmu = { /* * Initialise the fake PMU. We only need to populate the * used_mask for the purposes of validation. */ - .used_mask = { 0 }, + .used_mask = mask, }; + memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long)); if (!validate_event(event->pmu, &fake_pmu, leader)) return -EINVAL; @@ -779,20 +1173,27 @@ static int cci_pmu_event_init(struct perf_event *event) return err; } -static ssize_t pmu_attr_cpumask_show(struct device *dev, +static ssize_t pmu_cpumask_attr_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + struct cci_pmu *cci_pmu = eattr->var; + int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", - cpumask_pr_args(&pmu->cpus)); + cpumask_pr_args(&cci_pmu->cpus)); buf[n++] = '\n'; buf[n] = '\0'; return n; } -static DEVICE_ATTR(cpumask, S_IRUGO, pmu_attr_cpumask_show, NULL); +static struct dev_ext_attribute pmu_cpumask_attr = { + __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL), + NULL, /* Populated in cci_pmu_init */ +}; static struct attribute *pmu_attrs[] = { - &dev_attr_cpumask.attr, + &pmu_cpumask_attr.attr.attr, NULL, }; @@ -800,14 +1201,78 @@ static struct attribute_group pmu_attr_group = { .attrs = pmu_attrs, }; +static struct attribute_group pmu_format_attr_group = { + .name = "format", + .attrs = NULL, /* Filled in cci_pmu_init_attrs */ +}; + +static struct attribute_group pmu_event_attr_group = { + .name = "events", + .attrs = NULL, /* Filled in cci_pmu_init_attrs */ +}; + static const struct attribute_group *pmu_attr_groups[] = { &pmu_attr_group, + &pmu_format_attr_group, + &pmu_event_attr_group, NULL }; +static struct attribute **alloc_attrs(struct platform_device *pdev, + int n, struct dev_ext_attribute *source) +{ + int i; + struct attribute **attrs; + + /* Alloc n + 1 (for terminating NULL) */ + attrs = devm_kcalloc(&pdev->dev, n + 1, sizeof(struct attribute *), + GFP_KERNEL); + if (!attrs) + return attrs; + for(i = 0; i < n; i++) + attrs[i] = &source[i].attr.attr; + return attrs; +} + +static int cci_pmu_init_attrs(struct cci_pmu *cci_pmu, struct platform_device *pdev) +{ + const struct cci_pmu_model *model = cci_pmu->model; + struct attribute **attrs; + + /* + * All allocations below are managed, hence doesn't need to be + * free'd explicitly in case of an error. + */ + + if (model->nevent_attrs) { + attrs = alloc_attrs(pdev, model->nevent_attrs, + model->event_attrs); + if (!attrs) + return -ENOMEM; + pmu_event_attr_group.attrs = attrs; + } + if (model->nformat_attrs) { + attrs = alloc_attrs(pdev, model->nformat_attrs, + model->format_attrs); + if (!attrs) + return -ENOMEM; + pmu_format_attr_group.attrs = attrs; + } + pmu_cpumask_attr.var = cci_pmu; + + return 0; +} + static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) { char *name = cci_pmu->model->name; + u32 num_cntrs; + int rc; + + rc = cci_pmu_init_attrs(cci_pmu, pdev); + if (rc) + return rc; + cci_pmu->pmu = (struct pmu) { .name = cci_pmu->model->name, .task_ctx_nr = perf_invalid_context, @@ -823,7 +1288,15 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) }; cci_pmu->plat_device = pdev; - cci_pmu->num_events = pmu_get_max_counters(); + num_cntrs = pmu_get_max_counters(); + if (num_cntrs > cci_pmu->model->num_hw_cntrs) { + dev_warn(&pdev->dev, + "PMU implements more counters(%d) than supported by" + " the model(%d), truncated.", + num_cntrs, cci_pmu->model->num_hw_cntrs); + num_cntrs = cci_pmu->model->num_hw_cntrs; + } + cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs; return perf_pmu_register(&cci_pmu->pmu, name, -1); } @@ -831,12 +1304,14 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) static int cci_pmu_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { + struct cci_pmu *cci_pmu = container_of(self, + struct cci_pmu, cpu_nb); unsigned int cpu = (long)hcpu; unsigned int target; switch (action & ~CPU_TASKS_FROZEN) { case CPU_DOWN_PREPARE: - if (!cpumask_test_and_clear_cpu(cpu, &pmu->cpus)) + if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus)) break; target = cpumask_any_but(cpu_online_mask, cpu); if (target < 0) // UP, last CPU @@ -845,7 +1320,7 @@ static int cci_pmu_cpu_notifier(struct notifier_block *self, * TODO: migrate context once core races on event->ctx have * been fixed. */ - cpumask_set_cpu(target, &pmu->cpus); + cpumask_set_cpu(target, &cci_pmu->cpus); default: break; } @@ -853,57 +1328,103 @@ static int cci_pmu_cpu_notifier(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block cci_pmu_cpu_nb = { - .notifier_call = cci_pmu_cpu_notifier, - /* - * to migrate uncore events, our notifier should be executed - * before perf core's notifier. - */ - .priority = CPU_PRI_PERF + 1, -}; - static struct cci_pmu_model cci_pmu_models[] = { - [CCI_REV_R0] = { +#ifdef CONFIG_ARM_CCI400_PMU + [CCI400_R0] = { .name = "CCI_400", + .fixed_hw_cntrs = 1, /* Cycle counter */ + .num_hw_cntrs = 4, + .cntr_size = SZ_4K, + .format_attrs = cci400_pmu_format_attrs, + .nformat_attrs = ARRAY_SIZE(cci400_pmu_format_attrs), + .event_attrs = cci400_r0_pmu_event_attrs, + .nevent_attrs = ARRAY_SIZE(cci400_r0_pmu_event_attrs), .event_ranges = { [CCI_IF_SLAVE] = { - CCI_REV_R0_SLAVE_PORT_MIN_EV, - CCI_REV_R0_SLAVE_PORT_MAX_EV, + CCI400_R0_SLAVE_PORT_MIN_EV, + CCI400_R0_SLAVE_PORT_MAX_EV, }, [CCI_IF_MASTER] = { - CCI_REV_R0_MASTER_PORT_MIN_EV, - CCI_REV_R0_MASTER_PORT_MAX_EV, + CCI400_R0_MASTER_PORT_MIN_EV, + CCI400_R0_MASTER_PORT_MAX_EV, }, }, + .validate_hw_event = cci400_validate_hw_event, + .get_event_idx = cci400_get_event_idx, }, - [CCI_REV_R1] = { + [CCI400_R1] = { .name = "CCI_400_r1", + .fixed_hw_cntrs = 1, /* Cycle counter */ + .num_hw_cntrs = 4, + .cntr_size = SZ_4K, + .format_attrs = cci400_pmu_format_attrs, + .nformat_attrs = ARRAY_SIZE(cci400_pmu_format_attrs), + .event_attrs = cci400_r1_pmu_event_attrs, + .nevent_attrs = ARRAY_SIZE(cci400_r1_pmu_event_attrs), .event_ranges = { [CCI_IF_SLAVE] = { - CCI_REV_R1_SLAVE_PORT_MIN_EV, - CCI_REV_R1_SLAVE_PORT_MAX_EV, + CCI400_R1_SLAVE_PORT_MIN_EV, + CCI400_R1_SLAVE_PORT_MAX_EV, }, [CCI_IF_MASTER] = { - CCI_REV_R1_MASTER_PORT_MIN_EV, - CCI_REV_R1_MASTER_PORT_MAX_EV, + CCI400_R1_MASTER_PORT_MIN_EV, + CCI400_R1_MASTER_PORT_MAX_EV, }, }, + .validate_hw_event = cci400_validate_hw_event, + .get_event_idx = cci400_get_event_idx, }, +#endif +#ifdef CONFIG_ARM_CCI500_PMU + [CCI500_R0] = { + .name = "CCI_500", + .fixed_hw_cntrs = 0, + .num_hw_cntrs = 8, + .cntr_size = SZ_64K, + .format_attrs = cci500_pmu_format_attrs, + .nformat_attrs = ARRAY_SIZE(cci500_pmu_format_attrs), + .event_attrs = cci500_pmu_event_attrs, + .nevent_attrs = ARRAY_SIZE(cci500_pmu_event_attrs), + .event_ranges = { + [CCI_IF_SLAVE] = { + CCI500_SLAVE_PORT_MIN_EV, + CCI500_SLAVE_PORT_MAX_EV, + }, + [CCI_IF_MASTER] = { + CCI500_MASTER_PORT_MIN_EV, + CCI500_MASTER_PORT_MAX_EV, + }, + [CCI_IF_GLOBAL] = { + CCI500_GLOBAL_PORT_MIN_EV, + CCI500_GLOBAL_PORT_MAX_EV, + }, + }, + .validate_hw_event = cci500_validate_hw_event, + }, +#endif }; static const struct of_device_id arm_cci_pmu_matches[] = { +#ifdef CONFIG_ARM_CCI400_PMU { .compatible = "arm,cci-400-pmu", .data = NULL, }, { .compatible = "arm,cci-400-pmu,r0", - .data = &cci_pmu_models[CCI_REV_R0], + .data = &cci_pmu_models[CCI400_R0], }, { .compatible = "arm,cci-400-pmu,r1", - .data = &cci_pmu_models[CCI_REV_R1], + .data = &cci_pmu_models[CCI400_R1], + }, +#endif +#ifdef CONFIG_ARM_CCI500_PMU + { + .compatible = "arm,cci-500-pmu,r0", + .data = &cci_pmu_models[CCI500_R0], }, +#endif {}, }; @@ -932,68 +1453,114 @@ static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) return false; } -static int cci_pmu_probe(struct platform_device *pdev) +static struct cci_pmu *cci_pmu_alloc(struct platform_device *pdev) { - struct resource *res; - int i, ret, irq; + struct cci_pmu *cci_pmu; const struct cci_pmu_model *model; + /* + * All allocations are devm_* hence we don't have to free + * them explicitly on an error, as it would end up in driver + * detach. + */ model = get_cci_model(pdev); if (!model) { dev_warn(&pdev->dev, "CCI PMU version not supported\n"); - return -ENODEV; + return ERR_PTR(-ENODEV); } - pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); - if (!pmu) - return -ENOMEM; + cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*cci_pmu), GFP_KERNEL); + if (!cci_pmu) + return ERR_PTR(-ENOMEM); + + cci_pmu->model = model; + cci_pmu->irqs = devm_kcalloc(&pdev->dev, CCI_PMU_MAX_HW_CNTRS(model), + sizeof(*cci_pmu->irqs), GFP_KERNEL); + if (!cci_pmu->irqs) + return ERR_PTR(-ENOMEM); + cci_pmu->hw_events.events = devm_kcalloc(&pdev->dev, + CCI_PMU_MAX_HW_CNTRS(model), + sizeof(*cci_pmu->hw_events.events), + GFP_KERNEL); + if (!cci_pmu->hw_events.events) + return ERR_PTR(-ENOMEM); + cci_pmu->hw_events.used_mask = devm_kcalloc(&pdev->dev, + BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)), + sizeof(*cci_pmu->hw_events.used_mask), + GFP_KERNEL); + if (!cci_pmu->hw_events.used_mask) + return ERR_PTR(-ENOMEM); + + return cci_pmu; +} + + +static int cci_pmu_probe(struct platform_device *pdev) +{ + struct resource *res; + struct cci_pmu *cci_pmu; + int i, ret, irq; + + cci_pmu = cci_pmu_alloc(pdev); + if (IS_ERR(cci_pmu)) + return PTR_ERR(cci_pmu); - pmu->model = model; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pmu->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(pmu->base)) + cci_pmu->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(cci_pmu->base)) return -ENOMEM; /* - * CCI PMU has 5 overflow signals - one per counter; but some may be tied + * CCI PMU has one overflow interrupt per counter; but some may be tied * together to a common interrupt. */ - pmu->nr_irqs = 0; - for (i = 0; i < CCI_PMU_MAX_HW_EVENTS; i++) { + cci_pmu->nr_irqs = 0; + for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) { irq = platform_get_irq(pdev, i); if (irq < 0) break; - if (is_duplicate_irq(irq, pmu->irqs, pmu->nr_irqs)) + if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs)) continue; - pmu->irqs[pmu->nr_irqs++] = irq; + cci_pmu->irqs[cci_pmu->nr_irqs++] = irq; } /* * Ensure that the device tree has as many interrupts as the number * of counters. */ - if (i < CCI_PMU_MAX_HW_EVENTS) { + if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) { dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", - i, CCI_PMU_MAX_HW_EVENTS); + i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)); return -EINVAL; } - raw_spin_lock_init(&pmu->hw_events.pmu_lock); - mutex_init(&pmu->reserve_mutex); - atomic_set(&pmu->active_events, 0); - cpumask_set_cpu(smp_processor_id(), &pmu->cpus); + raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); + mutex_init(&cci_pmu->reserve_mutex); + atomic_set(&cci_pmu->active_events, 0); + cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus); + + cci_pmu->cpu_nb = (struct notifier_block) { + .notifier_call = cci_pmu_cpu_notifier, + /* + * to migrate uncore events, our notifier should be executed + * before perf core's notifier. + */ + .priority = CPU_PRI_PERF + 1, + }; - ret = register_cpu_notifier(&cci_pmu_cpu_nb); + ret = register_cpu_notifier(&cci_pmu->cpu_nb); if (ret) return ret; - ret = cci_pmu_init(pmu, pdev); - if (ret) + ret = cci_pmu_init(cci_pmu, pdev); + if (ret) { + unregister_cpu_notifier(&cci_pmu->cpu_nb); return ret; + } - pr_info("ARM %s PMU driver probed", pmu->model->name); + pr_info("ARM %s PMU driver probed", cci_pmu->model->name); return 0; } @@ -1032,14 +1599,14 @@ static int __init cci_platform_init(void) return platform_driver_register(&cci_platform_driver); } -#else /* !CONFIG_ARM_CCI400_PMU */ +#else /* !CONFIG_ARM_CCI_PMU */ static int __init cci_platform_init(void) { return 0; } -#endif /* CONFIG_ARM_CCI400_PMU */ +#endif /* CONFIG_ARM_CCI_PMU */ #ifdef CONFIG_ARM_CCI400_PORT_CTRL diff --git a/kernel/drivers/bus/arm-ccn.c b/kernel/drivers/bus/arm-ccn.c index 60397ec77..7082c7268 100644 --- a/kernel/drivers/bus/arm-ccn.c +++ b/kernel/drivers/bus/arm-ccn.c @@ -166,13 +166,17 @@ struct arm_ccn_dt { struct hrtimer hrtimer; + cpumask_t cpu; + struct notifier_block cpu_nb; + struct pmu pmu; }; struct arm_ccn { struct device *dev; void __iomem *base; - unsigned irq_used:1; + unsigned int irq; + unsigned sbas_present:1; unsigned sbsx_present:1; @@ -336,6 +340,23 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev, if (event->mask) res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x", event->mask); + + /* Arguments required by an event */ + switch (event->type) { + case CCN_TYPE_CYCLES: + break; + case CCN_TYPE_XP: + res += snprintf(buf + res, PAGE_SIZE - res, + ",xp=?,port=?,vc=?,dir=?"); + if (event->event == CCN_EVENT_WATCHPOINT) + res += snprintf(buf + res, PAGE_SIZE - res, + ",cmp_l=?,cmp_h=?,mask=?"); + break; + default: + res += snprintf(buf + res, PAGE_SIZE - res, ",node=?"); + break; + } + res += snprintf(buf + res, PAGE_SIZE - res, "\n"); return res; @@ -521,6 +542,25 @@ static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = { .attrs = arm_ccn_pmu_cmp_mask_attrs, }; +static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, &ccn->dt.cpu); +} + +static struct device_attribute arm_ccn_pmu_cpumask_attr = + __ATTR(cpumask, S_IRUGO, arm_ccn_pmu_cpumask_show, NULL); + +static struct attribute *arm_ccn_pmu_cpumask_attrs[] = { + &arm_ccn_pmu_cpumask_attr.attr, + NULL, +}; + +static struct attribute_group arm_ccn_pmu_cpumask_attr_group = { + .attrs = arm_ccn_pmu_cpumask_attrs, +}; /* * Default poll period is 10ms, which is way over the top anyway, @@ -542,6 +582,7 @@ static const struct attribute_group *arm_ccn_pmu_attr_groups[] = { &arm_ccn_pmu_events_attr_group, &arm_ccn_pmu_format_attr_group, &arm_ccn_pmu_cmp_mask_attr_group, + &arm_ccn_pmu_cpumask_attr_group, NULL }; @@ -587,7 +628,65 @@ static int arm_ccn_pmu_type_eq(u32 a, u32 b) return 0; } -static void arm_ccn_pmu_event_destroy(struct perf_event *event) +static int arm_ccn_pmu_event_alloc(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + u32 node_xp, type, event_id; + struct arm_ccn_component *source; + int bit; + + node_xp = CCN_CONFIG_NODE(event->attr.config); + type = CCN_CONFIG_TYPE(event->attr.config); + event_id = CCN_CONFIG_EVENT(event->attr.config); + + /* Allocate the cycle counter */ + if (type == CCN_TYPE_CYCLES) { + if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER, + ccn->dt.pmu_counters_mask)) + return -EAGAIN; + + hw->idx = CCN_IDX_PMU_CYCLE_COUNTER; + ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event; + + return 0; + } + + /* Allocate an event counter */ + hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask, + CCN_NUM_PMU_EVENT_COUNTERS); + if (hw->idx < 0) { + dev_dbg(ccn->dev, "No more counters available!\n"); + return -EAGAIN; + } + + if (type == CCN_TYPE_XP) + source = &ccn->xp[node_xp]; + else + source = &ccn->node[node_xp]; + ccn->dt.pmu_counters[hw->idx].source = source; + + /* Allocate an event source or a watchpoint */ + if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT) + bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask, + CCN_NUM_XP_WATCHPOINTS); + else + bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask, + CCN_NUM_PMU_EVENTS); + if (bit < 0) { + dev_dbg(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n", + node_xp); + clear_bit(hw->idx, ccn->dt.pmu_counters_mask); + return -EAGAIN; + } + hw->config_base = bit; + + ccn->dt.pmu_counters[hw->idx].event = event; + + return 0; +} + +static void arm_ccn_pmu_event_release(struct perf_event *event) { struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); struct hw_perf_event *hw = &event->hw; @@ -616,15 +715,14 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) struct arm_ccn *ccn; struct hw_perf_event *hw = &event->hw; u32 node_xp, type, event_id; - int valid, bit; - struct arm_ccn_component *source; + int valid; int i; + struct perf_event *sibling; if (event->attr.type != event->pmu->type) return -ENOENT; ccn = pmu_to_arm_ccn(event->pmu); - event->destroy = arm_ccn_pmu_event_destroy; if (hw->sample_period) { dev_warn(ccn->dev, "Sampling not supported!\n"); @@ -642,6 +740,16 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) dev_warn(ccn->dev, "Can't provide per-task data!\n"); return -EOPNOTSUPP; } + /* + * Many perf core operations (eg. events rotation) operate on a + * single CPU context. This is obvious for CPU PMUs, where one + * expects the same sets of events being observed on all CPUs, + * but can lead to issues for off-core PMUs, like CCN, where each + * event could be theoretically assigned to a different CPU. To + * mitigate this, we enforce CPU assignment to one, selected + * processor (the one described in the "cpumask" attribute). + */ + event->cpu = cpumask_first(&ccn->dt.cpu); node_xp = CCN_CONFIG_NODE(event->attr.config); type = CCN_CONFIG_TYPE(event->attr.config); @@ -711,48 +819,20 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) node_xp, type, port); } - /* Allocate the cycle counter */ - if (type == CCN_TYPE_CYCLES) { - if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER, - ccn->dt.pmu_counters_mask)) - return -EAGAIN; - - hw->idx = CCN_IDX_PMU_CYCLE_COUNTER; - ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event; - - return 0; - } - - /* Allocate an event counter */ - hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask, - CCN_NUM_PMU_EVENT_COUNTERS); - if (hw->idx < 0) { - dev_warn(ccn->dev, "No more counters available!\n"); - return -EAGAIN; - } - - if (type == CCN_TYPE_XP) - source = &ccn->xp[node_xp]; - else - source = &ccn->node[node_xp]; - ccn->dt.pmu_counters[hw->idx].source = source; - - /* Allocate an event source or a watchpoint */ - if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT) - bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask, - CCN_NUM_XP_WATCHPOINTS); - else - bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask, - CCN_NUM_PMU_EVENTS); - if (bit < 0) { - dev_warn(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n", - node_xp); - clear_bit(hw->idx, ccn->dt.pmu_counters_mask); - return -EAGAIN; - } - hw->config_base = bit; + /* + * We must NOT create groups containing mixed PMUs, although software + * events are acceptable (for example to create a CCN group + * periodically read when a hrtimer aka cpu-clock leader triggers). + */ + if (event->group_leader->pmu != event->pmu && + !is_software_event(event->group_leader)) + return -EINVAL; - ccn->dt.pmu_counters[hw->idx].event = event; + list_for_each_entry(sibling, &event->group_leader->sibling_list, + group_entry) + if (sibling->pmu != event->pmu && + !is_software_event(sibling)) + return -EINVAL; return 0; } @@ -835,9 +915,14 @@ static void arm_ccn_pmu_event_start(struct perf_event *event, int flags) arm_ccn_pmu_read_counter(ccn, hw->idx)); hw->state = 0; - if (!ccn->irq_used) + /* + * Pin the timer, so that the overflows are handled by the chosen + * event->cpu (this is the same one as presented in "cpumask" + * attribute). + */ + if (!ccn->irq) hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(), - HRTIMER_MODE_REL); + HRTIMER_MODE_REL_PINNED); /* Set the DT bus input, engaging the counter */ arm_ccn_pmu_xp_dt_config(event, 1); @@ -852,7 +937,7 @@ static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags) /* Disable counting, setting the DT bus to pass-through mode */ arm_ccn_pmu_xp_dt_config(event, 0); - if (!ccn->irq_used) + if (!ccn->irq) hrtimer_cancel(&ccn->dt.hrtimer); /* Let the DT bus drain */ @@ -1014,8 +1099,13 @@ static void arm_ccn_pmu_event_config(struct perf_event *event) static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) { + int err; struct hw_perf_event *hw = &event->hw; + err = arm_ccn_pmu_event_alloc(event); + if (err) + return err; + arm_ccn_pmu_event_config(event); hw->state = PERF_HES_STOPPED; @@ -1029,6 +1119,8 @@ static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) { arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); + + arm_ccn_pmu_event_release(event); } static void arm_ccn_pmu_event_read(struct perf_event *event) @@ -1079,12 +1171,40 @@ static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer) } +static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb, + unsigned long action, void *hcpu) +{ + struct arm_ccn_dt *dt = container_of(nb, struct arm_ccn_dt, cpu_nb); + struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); + unsigned int cpu = (long)hcpu; /* for (long) see kernel/cpu.c */ + unsigned int target; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DOWN_PREPARE: + if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) + break; + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + break; + perf_pmu_migrate_context(&dt->pmu, cpu, target); + cpumask_set_cpu(target, &dt->cpu); + if (ccn->irq) + WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0); + default: + break; + } + + return NOTIFY_OK; +} + + static DEFINE_IDA(arm_ccn_pmu_ida); static int arm_ccn_pmu_init(struct arm_ccn *ccn) { int i; char *name; + int err; /* Initialize DT subsystem */ ccn->dt.base = ccn->base + CCN_REGION_SIZE; @@ -1136,20 +1256,58 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) }; /* No overflow interrupt? Have to use a timer instead. */ - if (!ccn->irq_used) { + if (!ccn->irq) { dev_info(ccn->dev, "No access to interrupts, using timer.\n"); hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler; } - return perf_pmu_register(&ccn->dt.pmu, name, -1); + /* Pick one CPU which we will use to collect data from CCN... */ + cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu); + + /* + * ... and change the selection when it goes offline. Priority is + * picked to have a chance to migrate events before perf is notified. + */ + ccn->dt.cpu_nb.notifier_call = arm_ccn_pmu_cpu_notifier; + ccn->dt.cpu_nb.priority = CPU_PRI_PERF + 1, + err = register_cpu_notifier(&ccn->dt.cpu_nb); + if (err) + goto error_cpu_notifier; + + /* Also make sure that the overflow interrupt is handled by this CPU */ + if (ccn->irq) { + err = irq_set_affinity(ccn->irq, &ccn->dt.cpu); + if (err) { + dev_err(ccn->dev, "Failed to set interrupt affinity!\n"); + goto error_set_affinity; + } + } + + err = perf_pmu_register(&ccn->dt.pmu, name, -1); + if (err) + goto error_pmu_register; + + return 0; + +error_pmu_register: +error_set_affinity: + unregister_cpu_notifier(&ccn->dt.cpu_nb); +error_cpu_notifier: + ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); + for (i = 0; i < ccn->num_xps; i++) + writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); + writel(0, ccn->dt.base + CCN_DT_PMCR); + return err; } static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) { int i; + irq_set_affinity(ccn->irq, cpu_possible_mask); + unregister_cpu_notifier(&ccn->dt.cpu_nb); for (i = 0; i < ccn->num_xps; i++) writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); writel(0, ccn->dt.base + CCN_DT_PMCR); @@ -1285,6 +1443,7 @@ static int arm_ccn_probe(struct platform_device *pdev) { struct arm_ccn *ccn; struct resource *res; + unsigned int irq; int err; ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL); @@ -1309,6 +1468,7 @@ static int arm_ccn_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) return -EINVAL; + irq = res->start; /* Check if we can use the interrupt */ writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE, @@ -1318,13 +1478,12 @@ static int arm_ccn_probe(struct platform_device *pdev) /* Can set 'disable' bits, so can acknowledge interrupts */ writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE, ccn->base + CCN_MN_ERRINT_STATUS); - err = devm_request_irq(ccn->dev, res->start, - arm_ccn_irq_handler, 0, dev_name(ccn->dev), - ccn); + err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, 0, + dev_name(ccn->dev), ccn); if (err) return err; - ccn->irq_used = 1; + ccn->irq = irq; } diff --git a/kernel/drivers/bus/brcmstb_gisb.c b/kernel/drivers/bus/brcmstb_gisb.c index 738612c45..f364fa4d2 100644 --- a/kernel/drivers/bus/brcmstb_gisb.c +++ b/kernel/drivers/bus/brcmstb_gisb.c @@ -91,6 +91,7 @@ static const int gisb_offsets_bcm7445[] = { struct brcmstb_gisb_arb_device { void __iomem *base; const int *gisb_offsets; + bool big_endian; struct mutex lock; struct list_head next; u32 valid_mask; @@ -108,7 +109,10 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg) if (offset == -1) return 1; - return ioread32(gdev->base + offset); + if (gdev->big_endian) + return ioread32be(gdev->base + offset); + else + return ioread32(gdev->base + offset); } static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg) @@ -117,7 +121,11 @@ static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg) if (offset == -1) return; - iowrite32(val, gdev->base + reg); + + if (gdev->big_endian) + iowrite32be(val, gdev->base + reg); + else + iowrite32(val, gdev->base + reg); } static ssize_t gisb_arb_get_timeout(struct device *dev, @@ -296,6 +304,7 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev) return -EINVAL; } gdev->gisb_offsets = of_id->data; + gdev->big_endian = of_device_is_big_endian(dn); err = devm_request_irq(&pdev->dev, timeout_irq, brcmstb_gisb_timeout_handler, 0, pdev->name, diff --git a/kernel/drivers/bus/mips_cdmm.c b/kernel/drivers/bus/mips_cdmm.c index ab3bde16e..1c543effe 100644 --- a/kernel/drivers/bus/mips_cdmm.c +++ b/kernel/drivers/bus/mips_cdmm.c @@ -332,6 +332,18 @@ static phys_addr_t mips_cdmm_cur_base(void) } /** + * mips_cdmm_phys_base() - Choose a physical base address for CDMM region. + * + * Picking a suitable physical address at which to map the CDMM region is + * platform specific, so this weak function can be overridden by platform + * code to pick a suitable value if none is configured by the bootloader. + */ +phys_addr_t __weak mips_cdmm_phys_base(void) +{ + return 0; +} + +/** * mips_cdmm_setup() - Ensure the CDMM bus is initialised and usable. * @bus: Pointer to bus information for current CPU. * IS_ERR(bus) is checked, so no need for caller to check. @@ -368,7 +380,7 @@ static int mips_cdmm_setup(struct mips_cdmm_bus *bus) if (!bus->phys) bus->phys = mips_cdmm_cur_base(); /* Otherwise, ask platform code for suggestions */ - if (!bus->phys && mips_cdmm_phys_base) + if (!bus->phys) bus->phys = mips_cdmm_phys_base(); /* Otherwise, copy what other CPUs have done */ if (!bus->phys) diff --git a/kernel/drivers/bus/mvebu-mbus.c b/kernel/drivers/bus/mvebu-mbus.c index 6f047dcb9..c43c3d2ba 100644 --- a/kernel/drivers/bus/mvebu-mbus.c +++ b/kernel/drivers/bus/mvebu-mbus.c @@ -57,6 +57,7 @@ #include <linux/of_address.h> #include <linux/debugfs.h> #include <linux/log2.h> +#include <linux/memblock.h> #include <linux/syscore_ops.h> /* @@ -152,13 +153,39 @@ struct mvebu_mbus_state { static struct mvebu_mbus_state mbus_state; +/* + * We provide two variants of the mv_mbus_dram_info() function: + * + * - The normal one, where the described DRAM ranges may overlap with + * the I/O windows, but for which the DRAM ranges are guaranteed to + * have a power of two size. Such ranges are suitable for the DMA + * masters that only DMA between the RAM and the device, which is + * actually all devices except the crypto engines. + * + * - The 'nooverlap' one, where the described DRAM ranges are + * guaranteed to not overlap with the I/O windows, but for which the + * DRAM ranges will not have power of two sizes. They will only be + * aligned on a 64 KB boundary, and have a size multiple of 64 + * KB. Such ranges are suitable for the DMA masters that DMA between + * the crypto SRAM (which is mapped through an I/O window) and a + * device. This is the case for the crypto engines. + */ + static struct mbus_dram_target_info mvebu_mbus_dram_info; +static struct mbus_dram_target_info mvebu_mbus_dram_info_nooverlap; + const struct mbus_dram_target_info *mv_mbus_dram_info(void) { return &mvebu_mbus_dram_info; } EXPORT_SYMBOL_GPL(mv_mbus_dram_info); +const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void) +{ + return &mvebu_mbus_dram_info_nooverlap; +} +EXPORT_SYMBOL_GPL(mv_mbus_dram_info_nooverlap); + /* Checks whether the given window has remap capability */ static bool mvebu_mbus_window_is_remappable(struct mvebu_mbus_state *mbus, const int win) @@ -576,6 +603,95 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win) return MVEBU_MBUS_NO_REMAP; } +/* + * Use the memblock information to find the MBus bridge hole in the + * physical address space. + */ +static void __init +mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end) +{ + struct memblock_region *r; + uint64_t s = 0; + + for_each_memblock(memory, r) { + /* + * This part of the memory is above 4 GB, so we don't + * care for the MBus bridge hole. + */ + if (r->base >= 0x100000000ULL) + continue; + + /* + * The MBus bridge hole is at the end of the RAM under + * the 4 GB limit. + */ + if (r->base + r->size > s) + s = r->base + r->size; + } + + *start = s; + *end = 0x100000000ULL; +} + +/* + * This function fills in the mvebu_mbus_dram_info_nooverlap data + * structure, by looking at the mvebu_mbus_dram_info data, and + * removing the parts of it that overlap with I/O windows. + */ +static void __init +mvebu_mbus_setup_cpu_target_nooverlap(struct mvebu_mbus_state *mbus) +{ + uint64_t mbus_bridge_base, mbus_bridge_end; + int cs_nooverlap = 0; + int i; + + mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end); + + for (i = 0; i < mvebu_mbus_dram_info.num_cs; i++) { + struct mbus_dram_window *w; + u64 base, size, end; + + w = &mvebu_mbus_dram_info.cs[i]; + base = w->base; + size = w->size; + end = base + size; + + /* + * The CS is fully enclosed inside the MBus bridge + * area, so ignore it. + */ + if (base >= mbus_bridge_base && end <= mbus_bridge_end) + continue; + + /* + * Beginning of CS overlaps with end of MBus, raise CS + * base address, and shrink its size. + */ + if (base >= mbus_bridge_base && end > mbus_bridge_end) { + size -= mbus_bridge_end - base; + base = mbus_bridge_end; + } + + /* + * End of CS overlaps with beginning of MBus, shrink + * CS size. + */ + if (base < mbus_bridge_base && end > mbus_bridge_base) + size -= end - mbus_bridge_base; + + w = &mvebu_mbus_dram_info_nooverlap.cs[cs_nooverlap++]; + w->cs_index = i; + w->mbus_attr = 0xf & ~(1 << i); + if (mbus->hw_io_coherency) + w->mbus_attr |= ATTR_HW_COHERENCY; + w->base = base; + w->size = size; + } + + mvebu_mbus_dram_info_nooverlap.mbus_dram_target_id = TARGET_DDR; + mvebu_mbus_dram_info_nooverlap.num_cs = cs_nooverlap; +} + static void __init mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) { @@ -964,6 +1080,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, mvebu_mbus_disable_window(mbus, win); mbus->soc->setup_cpu_target(mbus); + mvebu_mbus_setup_cpu_target_nooverlap(mbus); if (is_coherent) writel(UNIT_SYNC_BARRIER_ALL, diff --git a/kernel/drivers/bus/omap-ocp2scp.c b/kernel/drivers/bus/omap-ocp2scp.c index 9f1856948..bf500e0e7 100644 --- a/kernel/drivers/bus/omap-ocp2scp.c +++ b/kernel/drivers/bus/omap-ocp2scp.c @@ -117,7 +117,7 @@ static struct platform_driver omap_ocp2scp_driver = { module_platform_driver(omap_ocp2scp_driver); -MODULE_ALIAS("platform: omap-ocp2scp"); +MODULE_ALIAS("platform:omap-ocp2scp"); MODULE_AUTHOR("Texas Instruments Inc."); MODULE_DESCRIPTION("OMAP OCP2SCP driver"); MODULE_LICENSE("GPL v2"); diff --git a/kernel/drivers/bus/omap_l3_noc.c b/kernel/drivers/bus/omap_l3_noc.c index ebee57d71..5012e3ad1 100644 --- a/kernel/drivers/bus/omap_l3_noc.c +++ b/kernel/drivers/bus/omap_l3_noc.c @@ -301,7 +301,7 @@ static int omap_l3_probe(struct platform_device *pdev) return ret; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP /** * l3_resume_noirq() - resume function for l3_noc @@ -347,7 +347,7 @@ static int l3_resume_noirq(struct device *dev) } static const struct dev_pm_ops l3_dev_pm_ops = { - .resume_noirq = l3_resume_noirq, + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, l3_resume_noirq) }; #define L3_DEV_PM_OPS (&l3_dev_pm_ops) diff --git a/kernel/drivers/bus/sunxi-rsb.c b/kernel/drivers/bus/sunxi-rsb.c new file mode 100644 index 000000000..25996e256 --- /dev/null +++ b/kernel/drivers/bus/sunxi-rsb.c @@ -0,0 +1,783 @@ +/* + * RSB (Reduced Serial Bus) driver. + * + * Author: Chen-Yu Tsai <wens@csie.org> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + * + * The RSB controller looks like an SMBus controller which only supports + * byte and word data transfers. But, it differs from standard SMBus + * protocol on several aspects: + * - it uses addresses set at runtime to address slaves. Runtime addresses + * are sent to slaves using their 12bit hardware addresses. Up to 15 + * runtime addresses are available. + * - it adds a parity bit every 8bits of data and address for read and + * write accesses; this replaces the ack bit + * - only one read access is required to read a byte (instead of a write + * followed by a read access in standard SMBus protocol) + * - there's no Ack bit after each read access + * + * This means this bus cannot be used to interface with standard SMBus + * devices. Devices known to support this interface include the AXP223, + * AXP809, and AXP806 PMICs, and the AC100 audio codec, all from X-Powers. + * + * A description of the operation and wire protocol can be found in the + * RSB section of Allwinner's A80 user manual, which can be found at + * + * https://github.com/allwinner-zh/documents/tree/master/A80 + * + * This document is officially released by Allwinner. + * + * This driver is based on i2c-sun6i-p2wi.c, the P2WI bus driver. + * + */ + +#include <linux/clk.h> +#include <linux/clk/clk-conf.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <linux/slab.h> +#include <linux/sunxi-rsb.h> +#include <linux/types.h> + +/* RSB registers */ +#define RSB_CTRL 0x0 /* Global control */ +#define RSB_CCR 0x4 /* Clock control */ +#define RSB_INTE 0x8 /* Interrupt controls */ +#define RSB_INTS 0xc /* Interrupt status */ +#define RSB_ADDR 0x10 /* Address to send with read/write command */ +#define RSB_DATA 0x1c /* Data to read/write */ +#define RSB_LCR 0x24 /* Line control */ +#define RSB_DMCR 0x28 /* Device mode (init) control */ +#define RSB_CMD 0x2c /* RSB Command */ +#define RSB_DAR 0x30 /* Device address / runtime address */ + +/* CTRL fields */ +#define RSB_CTRL_START_TRANS BIT(7) +#define RSB_CTRL_ABORT_TRANS BIT(6) +#define RSB_CTRL_GLOBAL_INT_ENB BIT(1) +#define RSB_CTRL_SOFT_RST BIT(0) + +/* CLK CTRL fields */ +#define RSB_CCR_SDA_OUT_DELAY(v) (((v) & 0x7) << 8) +#define RSB_CCR_MAX_CLK_DIV 0xff +#define RSB_CCR_CLK_DIV(v) ((v) & RSB_CCR_MAX_CLK_DIV) + +/* STATUS fields */ +#define RSB_INTS_TRANS_ERR_ACK BIT(16) +#define RSB_INTS_TRANS_ERR_DATA_BIT(v) (((v) >> 8) & 0xf) +#define RSB_INTS_TRANS_ERR_DATA GENMASK(11, 8) +#define RSB_INTS_LOAD_BSY BIT(2) +#define RSB_INTS_TRANS_ERR BIT(1) +#define RSB_INTS_TRANS_OVER BIT(0) + +/* LINE CTRL fields*/ +#define RSB_LCR_SCL_STATE BIT(5) +#define RSB_LCR_SDA_STATE BIT(4) +#define RSB_LCR_SCL_CTL BIT(3) +#define RSB_LCR_SCL_CTL_EN BIT(2) +#define RSB_LCR_SDA_CTL BIT(1) +#define RSB_LCR_SDA_CTL_EN BIT(0) + +/* DEVICE MODE CTRL field values */ +#define RSB_DMCR_DEVICE_START BIT(31) +#define RSB_DMCR_MODE_DATA (0x7c << 16) +#define RSB_DMCR_MODE_REG (0x3e << 8) +#define RSB_DMCR_DEV_ADDR 0x00 + +/* CMD values */ +#define RSB_CMD_RD8 0x8b +#define RSB_CMD_RD16 0x9c +#define RSB_CMD_RD32 0xa6 +#define RSB_CMD_WR8 0x4e +#define RSB_CMD_WR16 0x59 +#define RSB_CMD_WR32 0x63 +#define RSB_CMD_STRA 0xe8 + +/* DAR fields */ +#define RSB_DAR_RTA(v) (((v) & 0xff) << 16) +#define RSB_DAR_DA(v) ((v) & 0xffff) + +#define RSB_MAX_FREQ 20000000 + +#define RSB_CTRL_NAME "sunxi-rsb" + +struct sunxi_rsb_addr_map { + u16 hwaddr; + u8 rtaddr; +}; + +struct sunxi_rsb { + struct device *dev; + void __iomem *regs; + struct clk *clk; + struct reset_control *rstc; + struct completion complete; + struct mutex lock; + unsigned int status; +}; + +/* bus / slave device related functions */ +static struct bus_type sunxi_rsb_bus; + +static int sunxi_rsb_device_match(struct device *dev, struct device_driver *drv) +{ + return of_driver_match_device(dev, drv); +} + +static int sunxi_rsb_device_probe(struct device *dev) +{ + const struct sunxi_rsb_driver *drv = to_sunxi_rsb_driver(dev->driver); + struct sunxi_rsb_device *rdev = to_sunxi_rsb_device(dev); + int ret; + + if (!drv->probe) + return -ENODEV; + + if (!rdev->irq) { + int irq = -ENOENT; + + if (dev->of_node) + irq = of_irq_get(dev->of_node, 0); + + if (irq == -EPROBE_DEFER) + return irq; + if (irq < 0) + irq = 0; + + rdev->irq = irq; + } + + ret = of_clk_set_defaults(dev->of_node, false); + if (ret < 0) + return ret; + + return drv->probe(rdev); +} + +static int sunxi_rsb_device_remove(struct device *dev) +{ + const struct sunxi_rsb_driver *drv = to_sunxi_rsb_driver(dev->driver); + + return drv->remove(to_sunxi_rsb_device(dev)); +} + +static struct bus_type sunxi_rsb_bus = { + .name = RSB_CTRL_NAME, + .match = sunxi_rsb_device_match, + .probe = sunxi_rsb_device_probe, + .remove = sunxi_rsb_device_remove, +}; + +static void sunxi_rsb_dev_release(struct device *dev) +{ + struct sunxi_rsb_device *rdev = to_sunxi_rsb_device(dev); + + kfree(rdev); +} + +/** + * sunxi_rsb_device_create() - allocate and add an RSB device + * @rsb: RSB controller + * @node: RSB slave device node + * @hwaddr: RSB slave hardware address + * @rtaddr: RSB slave runtime address + */ +static struct sunxi_rsb_device *sunxi_rsb_device_create(struct sunxi_rsb *rsb, + struct device_node *node, u16 hwaddr, u8 rtaddr) +{ + int err; + struct sunxi_rsb_device *rdev; + + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); + if (!rdev) + return ERR_PTR(-ENOMEM); + + rdev->rsb = rsb; + rdev->hwaddr = hwaddr; + rdev->rtaddr = rtaddr; + rdev->dev.bus = &sunxi_rsb_bus; + rdev->dev.parent = rsb->dev; + rdev->dev.of_node = node; + rdev->dev.release = sunxi_rsb_dev_release; + + dev_set_name(&rdev->dev, "%s-%x", RSB_CTRL_NAME, hwaddr); + + err = device_register(&rdev->dev); + if (err < 0) { + dev_err(&rdev->dev, "Can't add %s, status %d\n", + dev_name(&rdev->dev), err); + goto err_device_add; + } + + dev_dbg(&rdev->dev, "device %s registered\n", dev_name(&rdev->dev)); + +err_device_add: + put_device(&rdev->dev); + + return ERR_PTR(err); +} + +/** + * sunxi_rsb_device_unregister(): unregister an RSB device + * @rdev: rsb_device to be removed + */ +static void sunxi_rsb_device_unregister(struct sunxi_rsb_device *rdev) +{ + device_unregister(&rdev->dev); +} + +static int sunxi_rsb_remove_devices(struct device *dev, void *data) +{ + struct sunxi_rsb_device *rdev = to_sunxi_rsb_device(dev); + + if (dev->bus == &sunxi_rsb_bus) + sunxi_rsb_device_unregister(rdev); + + return 0; +} + +/** + * sunxi_rsb_driver_register() - Register device driver with RSB core + * @rdrv: device driver to be associated with slave-device. + * + * This API will register the client driver with the RSB framework. + * It is typically called from the driver's module-init function. + */ +int sunxi_rsb_driver_register(struct sunxi_rsb_driver *rdrv) +{ + rdrv->driver.bus = &sunxi_rsb_bus; + return driver_register(&rdrv->driver); +} +EXPORT_SYMBOL_GPL(sunxi_rsb_driver_register); + +/* common code that starts a transfer */ +static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb) +{ + if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) { + dev_dbg(rsb->dev, "RSB transfer still in progress\n"); + return -EBUSY; + } + + reinit_completion(&rsb->complete); + + writel(RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER, + rsb->regs + RSB_INTE); + writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB, + rsb->regs + RSB_CTRL); + + if (!wait_for_completion_io_timeout(&rsb->complete, + msecs_to_jiffies(100))) { + dev_dbg(rsb->dev, "RSB timeout\n"); + + /* abort the transfer */ + writel(RSB_CTRL_ABORT_TRANS, rsb->regs + RSB_CTRL); + + /* clear any interrupt flags */ + writel(readl(rsb->regs + RSB_INTS), rsb->regs + RSB_INTS); + + return -ETIMEDOUT; + } + + if (rsb->status & RSB_INTS_LOAD_BSY) { + dev_dbg(rsb->dev, "RSB busy\n"); + return -EBUSY; + } + + if (rsb->status & RSB_INTS_TRANS_ERR) { + if (rsb->status & RSB_INTS_TRANS_ERR_ACK) { + dev_dbg(rsb->dev, "RSB slave nack\n"); + return -EINVAL; + } + + if (rsb->status & RSB_INTS_TRANS_ERR_DATA) { + dev_dbg(rsb->dev, "RSB transfer data error\n"); + return -EIO; + } + } + + return 0; +} + +static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr, + u32 *buf, size_t len) +{ + u32 cmd; + int ret; + + if (!buf) + return -EINVAL; + + switch (len) { + case 1: + cmd = RSB_CMD_RD8; + break; + case 2: + cmd = RSB_CMD_RD16; + break; + case 4: + cmd = RSB_CMD_RD32; + break; + default: + dev_err(rsb->dev, "Invalid access width: %d\n", len); + return -EINVAL; + } + + mutex_lock(&rsb->lock); + + writel(addr, rsb->regs + RSB_ADDR); + writel(RSB_DAR_RTA(rtaddr), rsb->regs + RSB_DAR); + writel(cmd, rsb->regs + RSB_CMD); + + ret = _sunxi_rsb_run_xfer(rsb); + if (ret) + goto unlock; + + *buf = readl(rsb->regs + RSB_DATA); + +unlock: + mutex_unlock(&rsb->lock); + + return ret; +} + +static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr, + const u32 *buf, size_t len) +{ + u32 cmd; + int ret; + + if (!buf) + return -EINVAL; + + switch (len) { + case 1: + cmd = RSB_CMD_WR8; + break; + case 2: + cmd = RSB_CMD_WR16; + break; + case 4: + cmd = RSB_CMD_WR32; + break; + default: + dev_err(rsb->dev, "Invalid access width: %d\n", len); + return -EINVAL; + } + + mutex_lock(&rsb->lock); + + writel(addr, rsb->regs + RSB_ADDR); + writel(RSB_DAR_RTA(rtaddr), rsb->regs + RSB_DAR); + writel(*buf, rsb->regs + RSB_DATA); + writel(cmd, rsb->regs + RSB_CMD); + ret = _sunxi_rsb_run_xfer(rsb); + + mutex_unlock(&rsb->lock); + + return ret; +} + +/* RSB regmap functions */ +struct sunxi_rsb_ctx { + struct sunxi_rsb_device *rdev; + int size; +}; + +static int regmap_sunxi_rsb_reg_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct sunxi_rsb_ctx *ctx = context; + struct sunxi_rsb_device *rdev = ctx->rdev; + + if (reg > 0xff) + return -EINVAL; + + return sunxi_rsb_read(rdev->rsb, rdev->rtaddr, reg, val, ctx->size); +} + +static int regmap_sunxi_rsb_reg_write(void *context, unsigned int reg, + unsigned int val) +{ + struct sunxi_rsb_ctx *ctx = context; + struct sunxi_rsb_device *rdev = ctx->rdev; + + return sunxi_rsb_write(rdev->rsb, rdev->rtaddr, reg, &val, ctx->size); +} + +static void regmap_sunxi_rsb_free_ctx(void *context) +{ + struct sunxi_rsb_ctx *ctx = context; + + kfree(ctx); +} + +static struct regmap_bus regmap_sunxi_rsb = { + .reg_write = regmap_sunxi_rsb_reg_write, + .reg_read = regmap_sunxi_rsb_reg_read, + .free_context = regmap_sunxi_rsb_free_ctx, + .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, + .val_format_endian_default = REGMAP_ENDIAN_NATIVE, +}; + +static struct sunxi_rsb_ctx *regmap_sunxi_rsb_init_ctx(struct sunxi_rsb_device *rdev, + const struct regmap_config *config) +{ + struct sunxi_rsb_ctx *ctx; + + switch (config->val_bits) { + case 8: + case 16: + case 32: + break; + default: + return ERR_PTR(-EINVAL); + } + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + ctx->rdev = rdev; + ctx->size = config->val_bits / 8; + + return ctx; +} + +struct regmap *__devm_regmap_init_sunxi_rsb(struct sunxi_rsb_device *rdev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + struct sunxi_rsb_ctx *ctx = regmap_sunxi_rsb_init_ctx(rdev, config); + + if (IS_ERR(ctx)) + return ERR_CAST(ctx); + + return __devm_regmap_init(&rdev->dev, ®map_sunxi_rsb, ctx, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_sunxi_rsb); + +/* RSB controller driver functions */ +static irqreturn_t sunxi_rsb_irq(int irq, void *dev_id) +{ + struct sunxi_rsb *rsb = dev_id; + u32 status; + + status = readl(rsb->regs + RSB_INTS); + rsb->status = status; + + /* Clear interrupts */ + status &= (RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | + RSB_INTS_TRANS_OVER); + writel(status, rsb->regs + RSB_INTS); + + complete(&rsb->complete); + + return IRQ_HANDLED; +} + +static int sunxi_rsb_init_device_mode(struct sunxi_rsb *rsb) +{ + int ret = 0; + u32 reg; + + /* send init sequence */ + writel(RSB_DMCR_DEVICE_START | RSB_DMCR_MODE_DATA | + RSB_DMCR_MODE_REG | RSB_DMCR_DEV_ADDR, rsb->regs + RSB_DMCR); + + readl_poll_timeout(rsb->regs + RSB_DMCR, reg, + !(reg & RSB_DMCR_DEVICE_START), 100, 250000); + if (reg & RSB_DMCR_DEVICE_START) + ret = -ETIMEDOUT; + + /* clear interrupt status bits */ + writel(readl(rsb->regs + RSB_INTS), rsb->regs + RSB_INTS); + + return ret; +} + +/* + * There are 15 valid runtime addresses, though Allwinner typically + * skips the first, for unknown reasons, and uses the following three. + * + * 0x17, 0x2d, 0x3a, 0x4e, 0x59, 0x63, 0x74, 0x8b, + * 0x9c, 0xa6, 0xb1, 0xc5, 0xd2, 0xe8, 0xff + * + * No designs with 2 RSB slave devices sharing identical hardware + * addresses on the same bus have been seen in the wild. All designs + * use 0x2d for the primary PMIC, 0x3a for the secondary PMIC if + * there is one, and 0x45 for peripheral ICs. + * + * The hardware does not seem to support re-setting runtime addresses. + * Attempts to do so result in the slave devices returning a NACK. + * Hence we just hardcode the mapping here, like Allwinner does. + */ + +static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = { + { 0x3a3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */ + { 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */ + { 0xe89, 0x4e }, /* Peripheral IC: AC100, ... */ +}; + +static u8 sunxi_rsb_get_rtaddr(u16 hwaddr) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sunxi_rsb_addr_maps); i++) + if (hwaddr == sunxi_rsb_addr_maps[i].hwaddr) + return sunxi_rsb_addr_maps[i].rtaddr; + + return 0; /* 0 is an invalid runtime address */ +} + +static int of_rsb_register_devices(struct sunxi_rsb *rsb) +{ + struct device *dev = rsb->dev; + struct device_node *child, *np = dev->of_node; + u32 hwaddr; + u8 rtaddr; + int ret; + + if (!np) + return -EINVAL; + + /* Runtime addresses for all slaves should be set first */ + for_each_available_child_of_node(np, child) { + dev_dbg(dev, "setting child %s runtime address\n", + child->full_name); + + ret = of_property_read_u32(child, "reg", &hwaddr); + if (ret) { + dev_err(dev, "%s: invalid 'reg' property: %d\n", + child->full_name, ret); + continue; + } + + rtaddr = sunxi_rsb_get_rtaddr(hwaddr); + if (!rtaddr) { + dev_err(dev, "%s: unknown hardware device address\n", + child->full_name); + continue; + } + + /* + * Since no devices have been registered yet, we are the + * only ones using the bus, we can skip locking the bus. + */ + + /* setup command parameters */ + writel(RSB_CMD_STRA, rsb->regs + RSB_CMD); + writel(RSB_DAR_RTA(rtaddr) | RSB_DAR_DA(hwaddr), + rsb->regs + RSB_DAR); + + /* send command */ + ret = _sunxi_rsb_run_xfer(rsb); + if (ret) + dev_warn(dev, "%s: set runtime address failed: %d\n", + child->full_name, ret); + } + + /* Then we start adding devices and probing them */ + for_each_available_child_of_node(np, child) { + struct sunxi_rsb_device *rdev; + + dev_dbg(dev, "adding child %s\n", child->full_name); + + ret = of_property_read_u32(child, "reg", &hwaddr); + if (ret) + continue; + + rtaddr = sunxi_rsb_get_rtaddr(hwaddr); + if (!rtaddr) + continue; + + rdev = sunxi_rsb_device_create(rsb, child, hwaddr, rtaddr); + if (IS_ERR(rdev)) + dev_err(dev, "failed to add child device %s: %ld\n", + child->full_name, PTR_ERR(rdev)); + } + + return 0; +} + +static const struct of_device_id sunxi_rsb_of_match_table[] = { + { .compatible = "allwinner,sun8i-a23-rsb" }, + {} +}; +MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table); + +static int sunxi_rsb_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct resource *r; + struct sunxi_rsb *rsb; + unsigned long p_clk_freq; + u32 clk_delay, clk_freq = 3000000; + int clk_div, irq, ret; + u32 reg; + + of_property_read_u32(np, "clock-frequency", &clk_freq); + if (clk_freq > RSB_MAX_FREQ) { + dev_err(dev, + "clock-frequency (%u Hz) is too high (max = 20MHz)\n", + clk_freq); + return -EINVAL; + } + + rsb = devm_kzalloc(dev, sizeof(*rsb), GFP_KERNEL); + if (!rsb) + return -ENOMEM; + + rsb->dev = dev; + platform_set_drvdata(pdev, rsb); + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rsb->regs = devm_ioremap_resource(dev, r); + if (IS_ERR(rsb->regs)) + return PTR_ERR(rsb->regs); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "failed to retrieve irq: %d\n", irq); + return irq; + } + + rsb->clk = devm_clk_get(dev, NULL); + if (IS_ERR(rsb->clk)) { + ret = PTR_ERR(rsb->clk); + dev_err(dev, "failed to retrieve clk: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(rsb->clk); + if (ret) { + dev_err(dev, "failed to enable clk: %d\n", ret); + return ret; + } + + p_clk_freq = clk_get_rate(rsb->clk); + + rsb->rstc = devm_reset_control_get(dev, NULL); + if (IS_ERR(rsb->rstc)) { + ret = PTR_ERR(rsb->rstc); + dev_err(dev, "failed to retrieve reset controller: %d\n", ret); + goto err_clk_disable; + } + + ret = reset_control_deassert(rsb->rstc); + if (ret) { + dev_err(dev, "failed to deassert reset line: %d\n", ret); + goto err_clk_disable; + } + + init_completion(&rsb->complete); + mutex_init(&rsb->lock); + + /* reset the controller */ + writel(RSB_CTRL_SOFT_RST, rsb->regs + RSB_CTRL); + readl_poll_timeout(rsb->regs + RSB_CTRL, reg, + !(reg & RSB_CTRL_SOFT_RST), 1000, 100000); + + /* + * Clock frequency and delay calculation code is from + * Allwinner U-boot sources. + * + * From A83 user manual: + * bus clock frequency = parent clock frequency / (2 * (divider + 1)) + */ + clk_div = p_clk_freq / clk_freq / 2; + if (!clk_div) + clk_div = 1; + else if (clk_div > RSB_CCR_MAX_CLK_DIV + 1) + clk_div = RSB_CCR_MAX_CLK_DIV + 1; + + clk_delay = clk_div >> 1; + if (!clk_delay) + clk_delay = 1; + + dev_info(dev, "RSB running at %lu Hz\n", p_clk_freq / clk_div / 2); + writel(RSB_CCR_SDA_OUT_DELAY(clk_delay) | RSB_CCR_CLK_DIV(clk_div - 1), + rsb->regs + RSB_CCR); + + ret = devm_request_irq(dev, irq, sunxi_rsb_irq, 0, RSB_CTRL_NAME, rsb); + if (ret) { + dev_err(dev, "can't register interrupt handler irq %d: %d\n", + irq, ret); + goto err_reset_assert; + } + + /* initialize all devices on the bus into RSB mode */ + ret = sunxi_rsb_init_device_mode(rsb); + if (ret) + dev_warn(dev, "Initialize device mode failed: %d\n", ret); + + of_rsb_register_devices(rsb); + + return 0; + +err_reset_assert: + reset_control_assert(rsb->rstc); + +err_clk_disable: + clk_disable_unprepare(rsb->clk); + + return ret; +} + +static int sunxi_rsb_remove(struct platform_device *pdev) +{ + struct sunxi_rsb *rsb = platform_get_drvdata(pdev); + + device_for_each_child(rsb->dev, NULL, sunxi_rsb_remove_devices); + reset_control_assert(rsb->rstc); + clk_disable_unprepare(rsb->clk); + + return 0; +} + +static struct platform_driver sunxi_rsb_driver = { + .probe = sunxi_rsb_probe, + .remove = sunxi_rsb_remove, + .driver = { + .name = RSB_CTRL_NAME, + .of_match_table = sunxi_rsb_of_match_table, + }, +}; + +static int __init sunxi_rsb_init(void) +{ + int ret; + + ret = bus_register(&sunxi_rsb_bus); + if (ret) { + pr_err("failed to register sunxi sunxi_rsb bus: %d\n", ret); + return ret; + } + + return platform_driver_register(&sunxi_rsb_driver); +} +module_init(sunxi_rsb_init); + +static void __exit sunxi_rsb_exit(void) +{ + platform_driver_unregister(&sunxi_rsb_driver); + bus_unregister(&sunxi_rsb_bus); +} +module_exit(sunxi_rsb_exit); + +MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>"); +MODULE_DESCRIPTION("Allwinner sunXi Reduced Serial Bus controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/kernel/drivers/bus/vexpress-config.c b/kernel/drivers/bus/vexpress-config.c index a64763b6b..6575c0fe6 100644 --- a/kernel/drivers/bus/vexpress-config.c +++ b/kernel/drivers/bus/vexpress-config.c @@ -107,7 +107,7 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev) if (!res) return ERR_PTR(-ENOMEM); - regmap = bridge->ops->regmap_init(dev, bridge->context); + regmap = (bridge->ops->regmap_init)(dev, bridge->context); if (IS_ERR(regmap)) { devres_free(res); return regmap; |