summaryrefslogtreecommitdiffstats
path: root/kernel/arch/x86/platform
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/arch/x86/platform
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/arch/x86/platform')
-rw-r--r--kernel/arch/x86/platform/Makefile2
-rw-r--r--kernel/arch/x86/platform/atom/Makefile2
-rw-r--r--kernel/arch/x86/platform/atom/pmc_atom.c460
-rw-r--r--kernel/arch/x86/platform/atom/punit_atom_debug.c183
-rw-r--r--kernel/arch/x86/platform/efi/efi-bgrt.c9
-rw-r--r--kernel/arch/x86/platform/efi/efi.c125
-rw-r--r--kernel/arch/x86/platform/intel-mid/device_libs/platform_wdt.c5
-rw-r--r--kernel/arch/x86/platform/intel-mid/intel-mid.c20
-rw-r--r--kernel/arch/x86/platform/intel-mid/intel_mid_vrtc.c3
-rw-r--r--kernel/arch/x86/platform/intel-mid/sfi.c33
-rw-r--r--kernel/arch/x86/platform/intel/Makefile1
-rw-r--r--kernel/arch/x86/platform/intel/iosf_mbi.c327
-rw-r--r--kernel/arch/x86/platform/sfi/sfi.c7
-rw-r--r--kernel/arch/x86/platform/uv/uv_irq.c298
-rw-r--r--kernel/arch/x86/platform/uv/uv_nmi.c59
-rw-r--r--kernel/arch/x86/platform/uv/uv_time.c37
16 files changed, 1272 insertions, 299 deletions
diff --git a/kernel/arch/x86/platform/Makefile b/kernel/arch/x86/platform/Makefile
index a62e0be3a..184842ef3 100644
--- a/kernel/arch/x86/platform/Makefile
+++ b/kernel/arch/x86/platform/Makefile
@@ -1,9 +1,11 @@
# Platform specific code goes here
+obj-y += atom/
obj-y += ce4100/
obj-y += efi/
obj-y += geode/
obj-y += goldfish/
obj-y += iris/
+obj-y += intel/
obj-y += intel-mid/
obj-y += intel-quark/
obj-y += olpc/
diff --git a/kernel/arch/x86/platform/atom/Makefile b/kernel/arch/x86/platform/atom/Makefile
new file mode 100644
index 000000000..40983f5b0
--- /dev/null
+++ b/kernel/arch/x86/platform/atom/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
+obj-$(CONFIG_PUNIT_ATOM_DEBUG) += punit_atom_debug.o
diff --git a/kernel/arch/x86/platform/atom/pmc_atom.c b/kernel/arch/x86/platform/atom/pmc_atom.c
new file mode 100644
index 000000000..964ff4fc6
--- /dev/null
+++ b/kernel/arch/x86/platform/atom/pmc_atom.c
@@ -0,0 +1,460 @@
+/*
+ * Intel Atom SOC Power Management Controller Driver
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/io.h>
+
+#include <asm/pmc_atom.h>
+
+struct pmc_bit_map {
+ const char *name;
+ u32 bit_mask;
+};
+
+struct pmc_reg_map {
+ const struct pmc_bit_map *d3_sts_0;
+ const struct pmc_bit_map *d3_sts_1;
+ const struct pmc_bit_map *func_dis;
+ const struct pmc_bit_map *func_dis_2;
+ const struct pmc_bit_map *pss;
+};
+
+struct pmc_dev {
+ u32 base_addr;
+ void __iomem *regmap;
+ const struct pmc_reg_map *map;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dbgfs_dir;
+#endif /* CONFIG_DEBUG_FS */
+ bool init;
+};
+
+static struct pmc_dev pmc_device;
+static u32 acpi_base_addr;
+
+static const struct pmc_bit_map d3_sts_0_map[] = {
+ {"LPSS1_F0_DMA", BIT_LPSS1_F0_DMA},
+ {"LPSS1_F1_PWM1", BIT_LPSS1_F1_PWM1},
+ {"LPSS1_F2_PWM2", BIT_LPSS1_F2_PWM2},
+ {"LPSS1_F3_HSUART1", BIT_LPSS1_F3_HSUART1},
+ {"LPSS1_F4_HSUART2", BIT_LPSS1_F4_HSUART2},
+ {"LPSS1_F5_SPI", BIT_LPSS1_F5_SPI},
+ {"LPSS1_F6_Reserved", BIT_LPSS1_F6_XXX},
+ {"LPSS1_F7_Reserved", BIT_LPSS1_F7_XXX},
+ {"SCC_EMMC", BIT_SCC_EMMC},
+ {"SCC_SDIO", BIT_SCC_SDIO},
+ {"SCC_SDCARD", BIT_SCC_SDCARD},
+ {"SCC_MIPI", BIT_SCC_MIPI},
+ {"HDA", BIT_HDA},
+ {"LPE", BIT_LPE},
+ {"OTG", BIT_OTG},
+ {"USH", BIT_USH},
+ {"GBE", BIT_GBE},
+ {"SATA", BIT_SATA},
+ {"USB_EHCI", BIT_USB_EHCI},
+ {"SEC", BIT_SEC},
+ {"PCIE_PORT0", BIT_PCIE_PORT0},
+ {"PCIE_PORT1", BIT_PCIE_PORT1},
+ {"PCIE_PORT2", BIT_PCIE_PORT2},
+ {"PCIE_PORT3", BIT_PCIE_PORT3},
+ {"LPSS2_F0_DMA", BIT_LPSS2_F0_DMA},
+ {"LPSS2_F1_I2C1", BIT_LPSS2_F1_I2C1},
+ {"LPSS2_F2_I2C2", BIT_LPSS2_F2_I2C2},
+ {"LPSS2_F3_I2C3", BIT_LPSS2_F3_I2C3},
+ {"LPSS2_F3_I2C4", BIT_LPSS2_F4_I2C4},
+ {"LPSS2_F5_I2C5", BIT_LPSS2_F5_I2C5},
+ {"LPSS2_F6_I2C6", BIT_LPSS2_F6_I2C6},
+ {"LPSS2_F7_I2C7", BIT_LPSS2_F7_I2C7},
+ {},
+};
+
+static struct pmc_bit_map byt_d3_sts_1_map[] = {
+ {"SMB", BIT_SMB},
+ {"OTG_SS_PHY", BIT_OTG_SS_PHY},
+ {"USH_SS_PHY", BIT_USH_SS_PHY},
+ {"DFX", BIT_DFX},
+ {},
+};
+
+static struct pmc_bit_map cht_d3_sts_1_map[] = {
+ {"SMB", BIT_SMB},
+ {"GMM", BIT_STS_GMM},
+ {"ISH", BIT_STS_ISH},
+ {},
+};
+
+static struct pmc_bit_map cht_func_dis_2_map[] = {
+ {"SMB", BIT_SMB},
+ {"GMM", BIT_FD_GMM},
+ {"ISH", BIT_FD_ISH},
+ {},
+};
+
+static const struct pmc_bit_map byt_pss_map[] = {
+ {"GBE", PMC_PSS_BIT_GBE},
+ {"SATA", PMC_PSS_BIT_SATA},
+ {"HDA", PMC_PSS_BIT_HDA},
+ {"SEC", PMC_PSS_BIT_SEC},
+ {"PCIE", PMC_PSS_BIT_PCIE},
+ {"LPSS", PMC_PSS_BIT_LPSS},
+ {"LPE", PMC_PSS_BIT_LPE},
+ {"DFX", PMC_PSS_BIT_DFX},
+ {"USH_CTRL", PMC_PSS_BIT_USH_CTRL},
+ {"USH_SUS", PMC_PSS_BIT_USH_SUS},
+ {"USH_VCCS", PMC_PSS_BIT_USH_VCCS},
+ {"USH_VCCA", PMC_PSS_BIT_USH_VCCA},
+ {"OTG_CTRL", PMC_PSS_BIT_OTG_CTRL},
+ {"OTG_VCCS", PMC_PSS_BIT_OTG_VCCS},
+ {"OTG_VCCA_CLK", PMC_PSS_BIT_OTG_VCCA_CLK},
+ {"OTG_VCCA", PMC_PSS_BIT_OTG_VCCA},
+ {"USB", PMC_PSS_BIT_USB},
+ {"USB_SUS", PMC_PSS_BIT_USB_SUS},
+ {},
+};
+
+static const struct pmc_bit_map cht_pss_map[] = {
+ {"SATA", PMC_PSS_BIT_SATA},
+ {"HDA", PMC_PSS_BIT_HDA},
+ {"SEC", PMC_PSS_BIT_SEC},
+ {"PCIE", PMC_PSS_BIT_PCIE},
+ {"LPSS", PMC_PSS_BIT_LPSS},
+ {"LPE", PMC_PSS_BIT_LPE},
+ {"UFS", PMC_PSS_BIT_CHT_UFS},
+ {"UXD", PMC_PSS_BIT_CHT_UXD},
+ {"UXD_FD", PMC_PSS_BIT_CHT_UXD_FD},
+ {"UX_ENG", PMC_PSS_BIT_CHT_UX_ENG},
+ {"USB_SUS", PMC_PSS_BIT_CHT_USB_SUS},
+ {"GMM", PMC_PSS_BIT_CHT_GMM},
+ {"ISH", PMC_PSS_BIT_CHT_ISH},
+ {"DFX_MASTER", PMC_PSS_BIT_CHT_DFX_MASTER},
+ {"DFX_CLUSTER1", PMC_PSS_BIT_CHT_DFX_CLUSTER1},
+ {"DFX_CLUSTER2", PMC_PSS_BIT_CHT_DFX_CLUSTER2},
+ {"DFX_CLUSTER3", PMC_PSS_BIT_CHT_DFX_CLUSTER3},
+ {"DFX_CLUSTER4", PMC_PSS_BIT_CHT_DFX_CLUSTER4},
+ {"DFX_CLUSTER5", PMC_PSS_BIT_CHT_DFX_CLUSTER5},
+ {},
+};
+
+static const struct pmc_reg_map byt_reg_map = {
+ .d3_sts_0 = d3_sts_0_map,
+ .d3_sts_1 = byt_d3_sts_1_map,
+ .func_dis = d3_sts_0_map,
+ .func_dis_2 = byt_d3_sts_1_map,
+ .pss = byt_pss_map,
+};
+
+static const struct pmc_reg_map cht_reg_map = {
+ .d3_sts_0 = d3_sts_0_map,
+ .d3_sts_1 = cht_d3_sts_1_map,
+ .func_dis = d3_sts_0_map,
+ .func_dis_2 = cht_func_dis_2_map,
+ .pss = cht_pss_map,
+};
+
+static inline u32 pmc_reg_read(struct pmc_dev *pmc, int reg_offset)
+{
+ return readl(pmc->regmap + reg_offset);
+}
+
+static inline void pmc_reg_write(struct pmc_dev *pmc, int reg_offset, u32 val)
+{
+ writel(val, pmc->regmap + reg_offset);
+}
+
+int pmc_atom_read(int offset, u32 *value)
+{
+ struct pmc_dev *pmc = &pmc_device;
+
+ if (!pmc->init)
+ return -ENODEV;
+
+ *value = pmc_reg_read(pmc, offset);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmc_atom_read);
+
+int pmc_atom_write(int offset, u32 value)
+{
+ struct pmc_dev *pmc = &pmc_device;
+
+ if (!pmc->init)
+ return -ENODEV;
+
+ pmc_reg_write(pmc, offset, value);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmc_atom_write);
+
+static void pmc_power_off(void)
+{
+ u16 pm1_cnt_port;
+ u32 pm1_cnt_value;
+
+ pr_info("Preparing to enter system sleep state S5\n");
+
+ pm1_cnt_port = acpi_base_addr + PM1_CNT;
+
+ pm1_cnt_value = inl(pm1_cnt_port);
+ pm1_cnt_value &= SLEEP_TYPE_MASK;
+ pm1_cnt_value |= SLEEP_TYPE_S5;
+ pm1_cnt_value |= SLEEP_ENABLE;
+
+ outl(pm1_cnt_value, pm1_cnt_port);
+}
+
+static void pmc_hw_reg_setup(struct pmc_dev *pmc)
+{
+ /*
+ * Disable PMC S0IX_WAKE_EN events coming from:
+ * - LPC clock run
+ * - GPIO_SUS ored dedicated IRQs
+ * - GPIO_SCORE ored dedicated IRQs
+ * - GPIO_SUS shared IRQ
+ * - GPIO_SCORE shared IRQ
+ */
+ pmc_reg_write(pmc, PMC_S0IX_WAKE_EN, (u32)PMC_WAKE_EN_SETTING);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void pmc_dev_state_print(struct seq_file *s, int reg_index,
+ u32 sts, const struct pmc_bit_map *sts_map,
+ u32 fd, const struct pmc_bit_map *fd_map)
+{
+ int offset = PMC_REG_BIT_WIDTH * reg_index;
+ int index;
+
+ for (index = 0; sts_map[index].name; index++) {
+ seq_printf(s, "Dev: %-2d - %-32s\tState: %s [%s]\n",
+ offset + index, sts_map[index].name,
+ fd_map[index].bit_mask & fd ? "Disabled" : "Enabled ",
+ sts_map[index].bit_mask & sts ? "D3" : "D0");
+ }
+}
+
+static int pmc_dev_state_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmc = s->private;
+ const struct pmc_reg_map *m = pmc->map;
+ u32 func_dis, func_dis_2;
+ u32 d3_sts_0, d3_sts_1;
+
+ func_dis = pmc_reg_read(pmc, PMC_FUNC_DIS);
+ func_dis_2 = pmc_reg_read(pmc, PMC_FUNC_DIS_2);
+ d3_sts_0 = pmc_reg_read(pmc, PMC_D3_STS_0);
+ d3_sts_1 = pmc_reg_read(pmc, PMC_D3_STS_1);
+
+ /* Low part */
+ pmc_dev_state_print(s, 0, d3_sts_0, m->d3_sts_0, func_dis, m->func_dis);
+
+ /* High part */
+ pmc_dev_state_print(s, 1, d3_sts_1, m->d3_sts_1, func_dis_2, m->func_dis_2);
+
+ return 0;
+}
+
+static int pmc_dev_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_dev_state_show, inode->i_private);
+}
+
+static const struct file_operations pmc_dev_state_ops = {
+ .open = pmc_dev_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pmc_pss_state_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmc = s->private;
+ const struct pmc_bit_map *map = pmc->map->pss;
+ u32 pss = pmc_reg_read(pmc, PMC_PSS);
+ int index;
+
+ for (index = 0; map[index].name; index++) {
+ seq_printf(s, "Island: %-2d - %-32s\tState: %s\n",
+ index, map[index].name,
+ map[index].bit_mask & pss ? "Off" : "On");
+ }
+ return 0;
+}
+
+static int pmc_pss_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_pss_state_show, inode->i_private);
+}
+
+static const struct file_operations pmc_pss_state_ops = {
+ .open = pmc_pss_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pmc_sleep_tmr_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmc = s->private;
+ u64 s0ir_tmr, s0i1_tmr, s0i2_tmr, s0i3_tmr, s0_tmr;
+
+ s0ir_tmr = (u64)pmc_reg_read(pmc, PMC_S0IR_TMR) << PMC_TMR_SHIFT;
+ s0i1_tmr = (u64)pmc_reg_read(pmc, PMC_S0I1_TMR) << PMC_TMR_SHIFT;
+ s0i2_tmr = (u64)pmc_reg_read(pmc, PMC_S0I2_TMR) << PMC_TMR_SHIFT;
+ s0i3_tmr = (u64)pmc_reg_read(pmc, PMC_S0I3_TMR) << PMC_TMR_SHIFT;
+ s0_tmr = (u64)pmc_reg_read(pmc, PMC_S0_TMR) << PMC_TMR_SHIFT;
+
+ seq_printf(s, "S0IR Residency:\t%lldus\n", s0ir_tmr);
+ seq_printf(s, "S0I1 Residency:\t%lldus\n", s0i1_tmr);
+ seq_printf(s, "S0I2 Residency:\t%lldus\n", s0i2_tmr);
+ seq_printf(s, "S0I3 Residency:\t%lldus\n", s0i3_tmr);
+ seq_printf(s, "S0 Residency:\t%lldus\n", s0_tmr);
+ return 0;
+}
+
+static int pmc_sleep_tmr_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_sleep_tmr_show, inode->i_private);
+}
+
+static const struct file_operations pmc_sleep_tmr_ops = {
+ .open = pmc_sleep_tmr_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void pmc_dbgfs_unregister(struct pmc_dev *pmc)
+{
+ debugfs_remove_recursive(pmc->dbgfs_dir);
+}
+
+static int pmc_dbgfs_register(struct pmc_dev *pmc)
+{
+ struct dentry *dir, *f;
+
+ dir = debugfs_create_dir("pmc_atom", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ pmc->dbgfs_dir = dir;
+
+ f = debugfs_create_file("dev_state", S_IFREG | S_IRUGO,
+ dir, pmc, &pmc_dev_state_ops);
+ if (!f)
+ goto err;
+
+ f = debugfs_create_file("pss_state", S_IFREG | S_IRUGO,
+ dir, pmc, &pmc_pss_state_ops);
+ if (!f)
+ goto err;
+
+ f = debugfs_create_file("sleep_state", S_IFREG | S_IRUGO,
+ dir, pmc, &pmc_sleep_tmr_ops);
+ if (!f)
+ goto err;
+
+ return 0;
+err:
+ pmc_dbgfs_unregister(pmc);
+ return -ENODEV;
+}
+#else
+static int pmc_dbgfs_register(struct pmc_dev *pmc)
+{
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct pmc_dev *pmc = &pmc_device;
+ const struct pmc_reg_map *map = (struct pmc_reg_map *)ent->driver_data;
+ int ret;
+
+ /* Obtain ACPI base address */
+ pci_read_config_dword(pdev, ACPI_BASE_ADDR_OFFSET, &acpi_base_addr);
+ acpi_base_addr &= ACPI_BASE_ADDR_MASK;
+
+ /* Install power off function */
+ if (acpi_base_addr != 0 && pm_power_off == NULL)
+ pm_power_off = pmc_power_off;
+
+ pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr);
+ pmc->base_addr &= PMC_BASE_ADDR_MASK;
+
+ pmc->regmap = ioremap_nocache(pmc->base_addr, PMC_MMIO_REG_LEN);
+ if (!pmc->regmap) {
+ dev_err(&pdev->dev, "error: ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ pmc->map = map;
+
+ /* PMC hardware registers setup */
+ pmc_hw_reg_setup(pmc);
+
+ ret = pmc_dbgfs_register(pmc);
+ if (ret)
+ dev_warn(&pdev->dev, "debugfs register failed\n");
+
+ pmc->init = true;
+ return ret;
+}
+
+/*
+ * Data for PCI driver interface
+ *
+ * used by pci_match_id() call below.
+ */
+static const struct pci_device_id pmc_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_VLV_PMC), (kernel_ulong_t)&byt_reg_map },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CHT_PMC), (kernel_ulong_t)&cht_reg_map },
+ { 0, },
+};
+
+static int __init pmc_atom_init(void)
+{
+ struct pci_dev *pdev = NULL;
+ const struct pci_device_id *ent;
+
+ /* We look for our device - PCU PMC
+ * we assume that there is max. one device.
+ *
+ * We can't use plain pci_driver mechanism,
+ * as the device is really a multiple function device,
+ * main driver that binds to the pci_device is lpc_ich
+ * and have to find & bind to the device this way.
+ */
+ for_each_pci_dev(pdev) {
+ ent = pci_match_id(pmc_pci_ids, pdev);
+ if (ent)
+ return pmc_setup_dev(pdev, ent);
+ }
+ /* Device not found. */
+ return -ENODEV;
+}
+
+device_initcall(pmc_atom_init);
+
+/*
+MODULE_AUTHOR("Aubrey Li <aubrey.li@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Atom SOC Power Management Controller Interface");
+MODULE_LICENSE("GPL v2");
+*/
diff --git a/kernel/arch/x86/platform/atom/punit_atom_debug.c b/kernel/arch/x86/platform/atom/punit_atom_debug.c
new file mode 100644
index 000000000..5ca8ead91
--- /dev/null
+++ b/kernel/arch/x86/platform/atom/punit_atom_debug.c
@@ -0,0 +1,183 @@
+/*
+ * Intel SOC Punit device state debug driver
+ * Punit controls power management for North Complex devices (Graphics
+ * blocks, Image Signal Processing, video processing, display, DSP etc.)
+ *
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/io.h>
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+
+/* Side band Interface port */
+#define PUNIT_PORT 0x04
+/* Power gate status reg */
+#define PWRGT_STATUS 0x61
+/* Subsystem config/status Video processor */
+#define VED_SS_PM0 0x32
+/* Subsystem config/status ISP (Image Signal Processor) */
+#define ISP_SS_PM0 0x39
+/* Subsystem config/status Input/output controller */
+#define MIO_SS_PM 0x3B
+/* Shift bits for getting status for video, isp and i/o */
+#define SSS_SHIFT 24
+/* Shift bits for getting status for graphics rendering */
+#define RENDER_POS 0
+/* Shift bits for getting status for media control */
+#define MEDIA_POS 2
+/* Shift bits for getting status for Valley View/Baytrail display */
+#define VLV_DISPLAY_POS 6
+/* Subsystem config/status display for Cherry Trail SOC */
+#define CHT_DSP_SSS 0x36
+/* Shift bits for getting status for display */
+#define CHT_DSP_SSS_POS 16
+
+struct punit_device {
+ char *name;
+ int reg;
+ int sss_pos;
+};
+
+static const struct punit_device punit_device_byt[] = {
+ { "GFX RENDER", PWRGT_STATUS, RENDER_POS },
+ { "GFX MEDIA", PWRGT_STATUS, MEDIA_POS },
+ { "DISPLAY", PWRGT_STATUS, VLV_DISPLAY_POS },
+ { "VED", VED_SS_PM0, SSS_SHIFT },
+ { "ISP", ISP_SS_PM0, SSS_SHIFT },
+ { "MIO", MIO_SS_PM, SSS_SHIFT },
+ { NULL }
+};
+
+static const struct punit_device punit_device_cht[] = {
+ { "GFX RENDER", PWRGT_STATUS, RENDER_POS },
+ { "GFX MEDIA", PWRGT_STATUS, MEDIA_POS },
+ { "DISPLAY", CHT_DSP_SSS, CHT_DSP_SSS_POS },
+ { "VED", VED_SS_PM0, SSS_SHIFT },
+ { "ISP", ISP_SS_PM0, SSS_SHIFT },
+ { "MIO", MIO_SS_PM, SSS_SHIFT },
+ { NULL }
+};
+
+static const char * const dstates[] = {"D0", "D0i1", "D0i2", "D0i3"};
+
+static int punit_dev_state_show(struct seq_file *seq_file, void *unused)
+{
+ u32 punit_pwr_status;
+ struct punit_device *punit_devp = seq_file->private;
+ int index;
+ int status;
+
+ seq_puts(seq_file, "\n\nPUNIT NORTH COMPLEX DEVICES :\n");
+ while (punit_devp->name) {
+ status = iosf_mbi_read(PUNIT_PORT, BT_MBI_PMC_READ,
+ punit_devp->reg,
+ &punit_pwr_status);
+ if (status) {
+ seq_printf(seq_file, "%9s : Read Failed\n",
+ punit_devp->name);
+ } else {
+ index = (punit_pwr_status >> punit_devp->sss_pos) & 3;
+ seq_printf(seq_file, "%9s : %s\n", punit_devp->name,
+ dstates[index]);
+ }
+ punit_devp++;
+ }
+
+ return 0;
+}
+
+static int punit_dev_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, punit_dev_state_show, inode->i_private);
+}
+
+static const struct file_operations punit_dev_state_ops = {
+ .open = punit_dev_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *punit_dbg_file;
+
+static int punit_dbgfs_register(struct punit_device *punit_device)
+{
+ static struct dentry *dev_state;
+
+ punit_dbg_file = debugfs_create_dir("punit_atom", NULL);
+ if (!punit_dbg_file)
+ return -ENXIO;
+
+ dev_state = debugfs_create_file("dev_power_state", S_IFREG | S_IRUGO,
+ punit_dbg_file, punit_device,
+ &punit_dev_state_ops);
+ if (!dev_state) {
+ pr_err("punit_dev_state register failed\n");
+ debugfs_remove(punit_dbg_file);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void punit_dbgfs_unregister(void)
+{
+ debugfs_remove_recursive(punit_dbg_file);
+}
+
+#define ICPU(model, drv_data) \
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT,\
+ (kernel_ulong_t)&drv_data }
+
+static const struct x86_cpu_id intel_punit_cpu_ids[] = {
+ ICPU(55, punit_device_byt), /* Valleyview, Bay Trail */
+ ICPU(76, punit_device_cht), /* Braswell, Cherry Trail */
+ {}
+};
+
+MODULE_DEVICE_TABLE(x86cpu, intel_punit_cpu_ids);
+
+static int __init punit_atom_debug_init(void)
+{
+ const struct x86_cpu_id *id;
+ int ret;
+
+ id = x86_match_cpu(intel_punit_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ ret = punit_dbgfs_register((struct punit_device *)id->driver_data);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void __exit punit_atom_debug_exit(void)
+{
+ punit_dbgfs_unregister();
+}
+
+module_init(punit_atom_debug_init);
+module_exit(punit_atom_debug_exit);
+
+MODULE_AUTHOR("Kumar P, Mahesh <mahesh.kumar.p@intel.com>");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Driver for Punit devices states debugging");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/arch/x86/platform/efi/efi-bgrt.c b/kernel/arch/x86/platform/efi/efi-bgrt.c
index d7f997f7c..ea48449b2 100644
--- a/kernel/arch/x86/platform/efi/efi-bgrt.c
+++ b/kernel/arch/x86/platform/efi/efi-bgrt.c
@@ -50,11 +50,16 @@ void __init efi_bgrt_init(void)
bgrt_tab->version);
return;
}
- if (bgrt_tab->status != 1) {
- pr_err("Ignoring BGRT: invalid status %u (expected 1)\n",
+ if (bgrt_tab->status & 0xfe) {
+ pr_err("Ignoring BGRT: reserved status bits are non-zero %u\n",
bgrt_tab->status);
return;
}
+ if (bgrt_tab->status != 1) {
+ pr_debug("Ignoring BGRT: invalid status %u (expected 1)\n",
+ bgrt_tab->status);
+ return;
+ }
if (bgrt_tab->image_type != 0) {
pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n",
bgrt_tab->image_type);
diff --git a/kernel/arch/x86/platform/efi/efi.c b/kernel/arch/x86/platform/efi/efi.c
index 841ea05e1..ad285404e 100644
--- a/kernel/arch/x86/platform/efi/efi.c
+++ b/kernel/arch/x86/platform/efi/efi.c
@@ -117,6 +117,27 @@ void efi_get_time(struct timespec *now)
now->tv_nsec = 0;
}
+void __init efi_find_mirror(void)
+{
+ void *p;
+ u64 mirror_size = 0, total_size = 0;
+
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ efi_memory_desc_t *md = p;
+ unsigned long long start = md->phys_addr;
+ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+
+ total_size += size;
+ if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
+ memblock_mark_mirror(start, size);
+ mirror_size += size;
+ }
+ }
+ if (mirror_size)
+ pr_info("Memory: %lldM/%lldM mirrored memory\n",
+ mirror_size>>20, total_size>>20);
+}
+
/*
* Tell the kernel about the EFI memory map. This might include
* more than the max 128 entries that can fit in the e820 legacy
@@ -153,6 +174,9 @@ static void __init do_add_efi_memmap(void)
case EFI_UNUSABLE_MEMORY:
e820_type = E820_UNUSABLE;
break;
+ case EFI_PERSISTENT_MEMORY:
+ e820_type = E820_PMEM;
+ break;
default:
/*
* EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
@@ -170,7 +194,7 @@ static void __init do_add_efi_memmap(void)
int __init efi_memblock_x86_reserve_range(void)
{
struct efi_info *e = &boot_params.efi_info;
- unsigned long pmap;
+ phys_addr_t pmap;
if (efi_enabled(EFI_PARAVIRT))
return 0;
@@ -185,7 +209,7 @@ int __init efi_memblock_x86_reserve_range(void)
#else
pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
#endif
- memmap.phys_map = (void *)pmap;
+ memmap.phys_map = pmap;
memmap.nr_map = e->efi_memmap_size /
e->efi_memdesc_size;
memmap.desc_size = e->efi_memdesc_size;
@@ -198,7 +222,7 @@ int __init efi_memblock_x86_reserve_range(void)
return 0;
}
-static void __init print_efi_memmap(void)
+void __init efi_print_memmap(void)
{
#ifdef EFI_DEBUG
efi_memory_desc_t *md;
@@ -500,7 +524,9 @@ void __init efi_init(void)
return;
if (efi_enabled(EFI_DBG))
- print_efi_memmap();
+ efi_print_memmap();
+
+ efi_esrt_init();
}
void __init efi_late_init(void)
@@ -624,7 +650,7 @@ static void __init get_systab_virt_addr(efi_memory_desc_t *md)
static void __init save_runtime_map(void)
{
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
efi_memory_desc_t *md;
void *tmp, *p, *q = NULL;
int count = 0;
@@ -679,6 +705,70 @@ out:
}
/*
+ * Iterate the EFI memory map in reverse order because the regions
+ * will be mapped top-down. The end result is the same as if we had
+ * mapped things forward, but doesn't require us to change the
+ * existing implementation of efi_map_region().
+ */
+static inline void *efi_map_next_entry_reverse(void *entry)
+{
+ /* Initial call */
+ if (!entry)
+ return memmap.map_end - memmap.desc_size;
+
+ entry -= memmap.desc_size;
+ if (entry < memmap.map)
+ return NULL;
+
+ return entry;
+}
+
+/*
+ * efi_map_next_entry - Return the next EFI memory map descriptor
+ * @entry: Previous EFI memory map descriptor
+ *
+ * This is a helper function to iterate over the EFI memory map, which
+ * we do in different orders depending on the current configuration.
+ *
+ * To begin traversing the memory map @entry must be %NULL.
+ *
+ * Returns %NULL when we reach the end of the memory map.
+ */
+static void *efi_map_next_entry(void *entry)
+{
+ if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
+ /*
+ * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
+ * config table feature requires us to map all entries
+ * in the same order as they appear in the EFI memory
+ * map. That is to say, entry N must have a lower
+ * virtual address than entry N+1. This is because the
+ * firmware toolchain leaves relative references in
+ * the code/data sections, which are split and become
+ * separate EFI memory regions. Mapping things
+ * out-of-order leads to the firmware accessing
+ * unmapped addresses.
+ *
+ * Since we need to map things this way whether or not
+ * the kernel actually makes use of
+ * EFI_PROPERTIES_TABLE, let's just switch to this
+ * scheme by default for 64-bit.
+ */
+ return efi_map_next_entry_reverse(entry);
+ }
+
+ /* Initial call */
+ if (!entry)
+ return memmap.map;
+
+ entry += memmap.desc_size;
+ if (entry >= memmap.map_end)
+ return NULL;
+
+ return entry;
+}
+
+/*
* Map the efi memory ranges of the runtime services and update new_mmap with
* virtual addresses.
*/
@@ -688,7 +778,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
unsigned long left = 0;
efi_memory_desc_t *md;
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ p = NULL;
+ while ((p = efi_map_next_entry(p))) {
md = p;
if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
#ifdef CONFIG_X86_64
@@ -722,7 +813,7 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
static void __init kexec_enter_virtual_mode(void)
{
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
efi_memory_desc_t *md;
void *p;
@@ -926,24 +1017,6 @@ u32 efi_mem_type(unsigned long phys_addr)
return 0;
}
-u64 efi_mem_attributes(unsigned long phys_addr)
-{
- efi_memory_desc_t *md;
- void *p;
-
- if (!efi_enabled(EFI_MEMMAP))
- return 0;
-
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
- md = p;
- if ((md->phys_addr <= phys_addr) &&
- (phys_addr < (md->phys_addr +
- (md->num_pages << EFI_PAGE_SHIFT))))
- return md->attribute;
- }
- return 0;
-}
-
static int __init arch_parse_efi_cmdline(char *str)
{
if (!str) {
@@ -953,8 +1026,6 @@ static int __init arch_parse_efi_cmdline(char *str)
if (parse_option_str(str, "old_map"))
set_bit(EFI_OLD_MEMMAP, &efi.flags);
- if (parse_option_str(str, "debug"))
- set_bit(EFI_DBG, &efi.flags);
return 0;
}
diff --git a/kernel/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/kernel/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
index 0b283d4d0..de734134b 100644
--- a/kernel/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
+++ b/kernel/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
@@ -27,6 +27,7 @@ static struct platform_device wdt_dev = {
static int tangier_probe(struct platform_device *pdev)
{
int gsi;
+ struct irq_alloc_info info;
struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
if (!pdata)
@@ -34,8 +35,8 @@ static int tangier_probe(struct platform_device *pdev)
/* IOAPIC builds identity mapping between GSI and IRQ on MID */
gsi = pdata->irq;
- if (mp_set_gsi_attr(gsi, 1, 0, cpu_to_node(0)) ||
- mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC) <= 0) {
+ ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0);
+ if (mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info) <= 0) {
dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n",
gsi);
return -EINVAL;
diff --git a/kernel/arch/x86/platform/intel-mid/intel-mid.c b/kernel/arch/x86/platform/intel-mid/intel-mid.c
index 3005f0c89..1bbc21e2e 100644
--- a/kernel/arch/x86/platform/intel-mid/intel-mid.c
+++ b/kernel/arch/x86/platform/intel-mid/intel-mid.c
@@ -61,7 +61,7 @@
enum intel_mid_timer_options intel_mid_timer_options;
/* intel_mid_ops to store sub arch ops */
-struct intel_mid_ops *intel_mid_ops;
+static struct intel_mid_ops *intel_mid_ops;
/* getter function for sub arch ops*/
static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
enum intel_mid_cpu_type __intel_mid_cpu_chip;
@@ -81,26 +81,34 @@ static unsigned long __init intel_mid_calibrate_tsc(void)
return 0;
}
+static void __init intel_mid_setup_bp_timer(void)
+{
+ apbt_time_init();
+ setup_boot_APIC_clock();
+}
+
static void __init intel_mid_time_init(void)
{
sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
+
switch (intel_mid_timer_options) {
case INTEL_MID_TIMER_APBT_ONLY:
break;
case INTEL_MID_TIMER_LAPIC_APBT:
- x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
+ /* Use apbt and local apic */
+ x86_init.timers.setup_percpu_clockev = intel_mid_setup_bp_timer;
x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
- break;
+ return;
default:
if (!boot_cpu_has(X86_FEATURE_ARAT))
break;
+ /* Lapic only, no apbt */
x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
return;
}
- /* we need at least one APB timer */
- pre_init_apic_IRQ0();
- apbt_time_init();
+
+ x86_init.timers.setup_percpu_clockev = apbt_time_init;
}
static void intel_mid_arch_setup(void)
diff --git a/kernel/arch/x86/platform/intel-mid/intel_mid_vrtc.c b/kernel/arch/x86/platform/intel-mid/intel_mid_vrtc.c
index 32947ba0f..ee40fcb6e 100644
--- a/kernel/arch/x86/platform/intel-mid/intel_mid_vrtc.c
+++ b/kernel/arch/x86/platform/intel-mid/intel_mid_vrtc.c
@@ -173,5 +173,4 @@ static int __init intel_mid_device_create(void)
return platform_device_register(&vrtc_device);
}
-
-module_init(intel_mid_device_create);
+device_initcall(intel_mid_device_create);
diff --git a/kernel/arch/x86/platform/intel-mid/sfi.c b/kernel/arch/x86/platform/intel-mid/sfi.c
index c14ad3477..5ee360a95 100644
--- a/kernel/arch/x86/platform/intel-mid/sfi.c
+++ b/kernel/arch/x86/platform/intel-mid/sfi.c
@@ -95,18 +95,16 @@ int __init sfi_parse_mtmr(struct sfi_table_header *table)
pr_debug("timer[%d]: paddr = 0x%08x, freq = %dHz, irq = %d\n",
totallen, (u32)pentry->phys_addr,
pentry->freq_hz, pentry->irq);
- if (!pentry->irq)
- continue;
- mp_irq.type = MP_INTSRC;
- mp_irq.irqtype = mp_INT;
-/* triggering mode edge bit 2-3, active high polarity bit 0-1 */
- mp_irq.irqflag = 5;
- mp_irq.srcbus = MP_BUS_ISA;
- mp_irq.srcbusirq = pentry->irq; /* IRQ */
- mp_irq.dstapic = MP_APIC_ALL;
- mp_irq.dstirq = pentry->irq;
- mp_save_irq(&mp_irq);
- mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
+ mp_irq.type = MP_INTSRC;
+ mp_irq.irqtype = mp_INT;
+ /* triggering mode edge bit 2-3, active high polarity bit 0-1 */
+ mp_irq.irqflag = 5;
+ mp_irq.srcbus = MP_BUS_ISA;
+ mp_irq.srcbusirq = pentry->irq; /* IRQ */
+ mp_irq.dstapic = MP_APIC_ALL;
+ mp_irq.dstirq = pentry->irq;
+ mp_save_irq(&mp_irq);
+ mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC, NULL);
}
return 0;
@@ -177,7 +175,7 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
mp_irq.dstapic = MP_APIC_ALL;
mp_irq.dstirq = pentry->irq;
mp_save_irq(&mp_irq);
- mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
+ mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC, NULL);
}
return 0;
}
@@ -199,10 +197,9 @@ static int __init sfi_parse_gpio(struct sfi_table_header *table)
num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
pentry = (struct sfi_gpio_table_entry *)sb->pentry;
- gpio_table = kmalloc(num * sizeof(*pentry), GFP_KERNEL);
+ gpio_table = kmemdup(pentry, num * sizeof(*pentry), GFP_KERNEL);
if (!gpio_table)
return -1;
- memcpy(gpio_table, pentry, num * sizeof(*pentry));
gpio_num_entry = num;
pr_debug("GPIO pin info:\n");
@@ -436,6 +433,7 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
struct devs_id *dev = NULL;
int num, i, ret;
int polarity;
+ struct irq_alloc_info info;
sb = (struct sfi_table_simple *)table;
num = SFI_GET_NUM_ENTRIES(sb, struct sfi_device_table_entry);
@@ -469,9 +467,8 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
polarity = 1;
}
- ret = mp_set_gsi_attr(irq, 1, polarity, NUMA_NO_NODE);
- if (ret == 0)
- ret = mp_map_gsi_to_irq(irq, IOAPIC_MAP_ALLOC);
+ ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 1, polarity);
+ ret = mp_map_gsi_to_irq(irq, IOAPIC_MAP_ALLOC, &info);
WARN_ON(ret < 0);
}
diff --git a/kernel/arch/x86/platform/intel/Makefile b/kernel/arch/x86/platform/intel/Makefile
new file mode 100644
index 000000000..b878032fb
--- /dev/null
+++ b/kernel/arch/x86/platform/intel/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_IOSF_MBI) += iosf_mbi.o
diff --git a/kernel/arch/x86/platform/intel/iosf_mbi.c b/kernel/arch/x86/platform/intel/iosf_mbi.c
new file mode 100644
index 000000000..edf2c54bf
--- /dev/null
+++ b/kernel/arch/x86/platform/intel/iosf_mbi.c
@@ -0,0 +1,327 @@
+/*
+ * IOSF-SB MailBox Interface Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *
+ * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
+ * mailbox interface (MBI) to communicate with mutiple devices. This
+ * driver implements access to this interface for those platforms that can
+ * enumerate the device using PCI.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/capability.h>
+
+#include <asm/iosf_mbi.h>
+
+#define PCI_DEVICE_ID_BAYTRAIL 0x0F00
+#define PCI_DEVICE_ID_BRASWELL 0x2280
+#define PCI_DEVICE_ID_QUARK_X1000 0x0958
+#define PCI_DEVICE_ID_TANGIER 0x1170
+
+static struct pci_dev *mbi_pdev;
+static DEFINE_SPINLOCK(iosf_mbi_lock);
+
+static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
+{
+ return (op << 24) | (port << 16) | (offset << 8) | MBI_ENABLE;
+}
+
+static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
+{
+ int result;
+
+ if (!mbi_pdev)
+ return -ENODEV;
+
+ if (mcrx) {
+ result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
+ mcrx);
+ if (result < 0)
+ goto fail_read;
+ }
+
+ result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
+ if (result < 0)
+ goto fail_read;
+
+ result = pci_read_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
+ if (result < 0)
+ goto fail_read;
+
+ return 0;
+
+fail_read:
+ dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+ return result;
+}
+
+static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
+{
+ int result;
+
+ if (!mbi_pdev)
+ return -ENODEV;
+
+ result = pci_write_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
+ if (result < 0)
+ goto fail_write;
+
+ if (mcrx) {
+ result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
+ mcrx);
+ if (result < 0)
+ goto fail_write;
+ }
+
+ result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
+ if (result < 0)
+ goto fail_write;
+
+ return 0;
+
+fail_write:
+ dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+ return result;
+}
+
+int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
+{
+ u32 mcr, mcrx;
+ unsigned long flags;
+ int ret;
+
+ /* Access to the GFX unit is handled by GPU code */
+ if (port == BT_MBI_UNIT_GFX) {
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+ mcrx = offset & MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+ ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr);
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_read);
+
+int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
+{
+ u32 mcr, mcrx;
+ unsigned long flags;
+ int ret;
+
+ /* Access to the GFX unit is handled by GPU code */
+ if (port == BT_MBI_UNIT_GFX) {
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+ mcrx = offset & MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+ ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr);
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_write);
+
+int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
+{
+ u32 mcr, mcrx;
+ u32 value;
+ unsigned long flags;
+ int ret;
+
+ /* Access to the GFX unit is handled by GPU code */
+ if (port == BT_MBI_UNIT_GFX) {
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+ mcrx = offset & MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+
+ /* Read current mdr value */
+ ret = iosf_mbi_pci_read_mdr(mcrx, mcr & MBI_RD_MASK, &value);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+ return ret;
+ }
+
+ /* Apply mask */
+ value &= ~mask;
+ mdr &= mask;
+ value |= mdr;
+
+ /* Write back */
+ ret = iosf_mbi_pci_write_mdr(mcrx, mcr | MBI_WR_MASK, value);
+
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_modify);
+
+bool iosf_mbi_available(void)
+{
+ /* Mbi isn't hot-pluggable. No remove routine is provided */
+ return mbi_pdev;
+}
+EXPORT_SYMBOL(iosf_mbi_available);
+
+#ifdef CONFIG_IOSF_MBI_DEBUG
+static u32 dbg_mdr;
+static u32 dbg_mcr;
+static u32 dbg_mcrx;
+
+static int mcr_get(void *data, u64 *val)
+{
+ *val = *(u32 *)data;
+ return 0;
+}
+
+static int mcr_set(void *data, u64 val)
+{
+ u8 command = ((u32)val & 0xFF000000) >> 24,
+ port = ((u32)val & 0x00FF0000) >> 16,
+ offset = ((u32)val & 0x0000FF00) >> 8;
+ int err;
+
+ *(u32 *)data = val;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EACCES;
+
+ if (command & 1u)
+ err = iosf_mbi_write(port,
+ command,
+ dbg_mcrx | offset,
+ dbg_mdr);
+ else
+ err = iosf_mbi_read(port,
+ command,
+ dbg_mcrx | offset,
+ &dbg_mdr);
+
+ return err;
+}
+DEFINE_SIMPLE_ATTRIBUTE(iosf_mcr_fops, mcr_get, mcr_set , "%llx\n");
+
+static struct dentry *iosf_dbg;
+
+static void iosf_sideband_debug_init(void)
+{
+ struct dentry *d;
+
+ iosf_dbg = debugfs_create_dir("iosf_sb", NULL);
+ if (IS_ERR_OR_NULL(iosf_dbg))
+ return;
+
+ /* mdr */
+ d = debugfs_create_x32("mdr", 0660, iosf_dbg, &dbg_mdr);
+ if (!d)
+ goto cleanup;
+
+ /* mcrx */
+ d = debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx);
+ if (!d)
+ goto cleanup;
+
+ /* mcr - initiates mailbox tranaction */
+ d = debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops);
+ if (!d)
+ goto cleanup;
+
+ return;
+
+cleanup:
+ debugfs_remove_recursive(d);
+}
+
+static void iosf_debugfs_init(void)
+{
+ iosf_sideband_debug_init();
+}
+
+static void iosf_debugfs_remove(void)
+{
+ debugfs_remove_recursive(iosf_dbg);
+}
+#else
+static inline void iosf_debugfs_init(void) { }
+static inline void iosf_debugfs_remove(void) { }
+#endif /* CONFIG_IOSF_MBI_DEBUG */
+
+static int iosf_mbi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *unused)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ mbi_pdev = pci_dev_get(pdev);
+ return 0;
+}
+
+static const struct pci_device_id iosf_mbi_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BAYTRAIL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRASWELL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_QUARK_X1000) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_TANGIER) },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
+
+static struct pci_driver iosf_mbi_pci_driver = {
+ .name = "iosf_mbi_pci",
+ .probe = iosf_mbi_probe,
+ .id_table = iosf_mbi_pci_ids,
+};
+
+static int __init iosf_mbi_init(void)
+{
+ iosf_debugfs_init();
+
+ return pci_register_driver(&iosf_mbi_pci_driver);
+}
+
+static void __exit iosf_mbi_exit(void)
+{
+ iosf_debugfs_remove();
+
+ pci_unregister_driver(&iosf_mbi_pci_driver);
+ pci_dev_put(mbi_pdev);
+ mbi_pdev = NULL;
+}
+
+module_init(iosf_mbi_init);
+module_exit(iosf_mbi_exit);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("IOSF Mailbox Interface accessor");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/arch/x86/platform/sfi/sfi.c b/kernel/arch/x86/platform/sfi/sfi.c
index 2a8a74f3b..6c7111bbd 100644
--- a/kernel/arch/x86/platform/sfi/sfi.c
+++ b/kernel/arch/x86/platform/sfi/sfi.c
@@ -25,8 +25,8 @@
#include <linux/init.h>
#include <linux/sfi.h>
#include <linux/io.h>
-#include <linux/irqdomain.h>
+#include <asm/irqdomain.h>
#include <asm/io_apic.h>
#include <asm/mpspec.h>
#include <asm/setup.h>
@@ -71,9 +71,6 @@ static int __init sfi_parse_cpus(struct sfi_table_header *table)
#endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_IO_APIC
-static struct irq_domain_ops sfi_ioapic_irqdomain_ops = {
- .map = mp_irqdomain_map,
-};
static int __init sfi_parse_ioapic(struct sfi_table_header *table)
{
@@ -82,7 +79,7 @@ static int __init sfi_parse_ioapic(struct sfi_table_header *table)
int i, num;
struct ioapic_domain_cfg cfg = {
.type = IOAPIC_DOMAIN_STRICT,
- .ops = &sfi_ioapic_irqdomain_ops,
+ .ops = &mp_ioapic_irqdomain_ops,
};
sb = (struct sfi_table_simple *)table;
diff --git a/kernel/arch/x86/platform/uv/uv_irq.c b/kernel/arch/x86/platform/uv/uv_irq.c
index 0ce673645..e1c24631a 100644
--- a/kernel/arch/x86/platform/uv/uv_irq.c
+++ b/kernel/arch/x86/platform/uv/uv_irq.c
@@ -13,22 +13,37 @@
#include <linux/slab.h>
#include <linux/irq.h>
+#include <asm/irqdomain.h>
#include <asm/apic.h>
#include <asm/uv/uv_irq.h>
#include <asm/uv/uv_hub.h>
/* MMR offset and pnode of hub sourcing interrupts for a given irq */
-struct uv_irq_2_mmr_pnode{
- struct rb_node list;
+struct uv_irq_2_mmr_pnode {
unsigned long offset;
int pnode;
- int irq;
};
-static DEFINE_SPINLOCK(uv_irq_lock);
-static struct rb_root uv_irq_root;
+static void uv_program_mmr(struct irq_cfg *cfg, struct uv_irq_2_mmr_pnode *info)
+{
+ unsigned long mmr_value;
+ struct uv_IO_APIC_route_entry *entry;
+
+ BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
+ sizeof(unsigned long));
+
+ mmr_value = 0;
+ entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
+ entry->vector = cfg->vector;
+ entry->delivery_mode = apic->irq_delivery_mode;
+ entry->dest_mode = apic->irq_dest_mode;
+ entry->polarity = 0;
+ entry->trigger = 0;
+ entry->mask = 0;
+ entry->dest = cfg->dest_apicid;
-static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool);
+ uv_write_global_mmr64(info->pnode, info->offset, mmr_value);
+}
static void uv_noop(struct irq_data *data) { }
@@ -37,6 +52,23 @@ static void uv_ack_apic(struct irq_data *data)
ack_APIC_irq();
}
+static int
+uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force)
+{
+ struct irq_data *parent = data->parent_data;
+ struct irq_cfg *cfg = irqd_cfg(data);
+ int ret;
+
+ ret = parent->chip->irq_set_affinity(parent, mask, force);
+ if (ret >= 0) {
+ uv_program_mmr(cfg, data->chip_data);
+ send_cleanup_vector(cfg);
+ }
+
+ return ret;
+}
+
static struct irq_chip uv_irq_chip = {
.name = "UV-CORE",
.irq_mask = uv_noop,
@@ -45,189 +77,99 @@ static struct irq_chip uv_irq_chip = {
.irq_set_affinity = uv_set_irq_affinity,
};
-/*
- * Add offset and pnode information of the hub sourcing interrupts to the
- * rb tree for a specific irq.
- */
-static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
+static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
{
- struct rb_node **link = &uv_irq_root.rb_node;
- struct rb_node *parent = NULL;
- struct uv_irq_2_mmr_pnode *n;
- struct uv_irq_2_mmr_pnode *e;
- unsigned long irqflags;
-
- n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
- uv_blade_to_memory_nid(blade));
- if (!n)
+ struct uv_irq_2_mmr_pnode *chip_data;
+ struct irq_alloc_info *info = arg;
+ struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
+ int ret;
+
+ if (nr_irqs > 1 || !info || info->type != X86_IRQ_ALLOC_TYPE_UV)
+ return -EINVAL;
+
+ chip_data = kmalloc_node(sizeof(*chip_data), GFP_KERNEL,
+ irq_data_get_node(irq_data));
+ if (!chip_data)
return -ENOMEM;
- n->irq = irq;
- n->offset = offset;
- n->pnode = uv_blade_to_pnode(blade);
- spin_lock_irqsave(&uv_irq_lock, irqflags);
- /* Find the right place in the rbtree: */
- while (*link) {
- parent = *link;
- e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
-
- if (unlikely(irq == e->irq)) {
- /* irq entry exists */
- e->pnode = uv_blade_to_pnode(blade);
- e->offset = offset;
- spin_unlock_irqrestore(&uv_irq_lock, irqflags);
- kfree(n);
- return 0;
- }
-
- if (irq < e->irq)
- link = &(*link)->rb_left;
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+ if (ret >= 0) {
+ if (info->uv_limit == UV_AFFINITY_CPU)
+ irq_set_status_flags(virq, IRQ_NO_BALANCING);
else
- link = &(*link)->rb_right;
+ irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
+
+ chip_data->pnode = uv_blade_to_pnode(info->uv_blade);
+ chip_data->offset = info->uv_offset;
+ irq_domain_set_info(domain, virq, virq, &uv_irq_chip, chip_data,
+ handle_percpu_irq, NULL, info->uv_name);
+ } else {
+ kfree(chip_data);
}
- /* Insert the node into the rbtree. */
- rb_link_node(&n->list, parent, link);
- rb_insert_color(&n->list, &uv_irq_root);
-
- spin_unlock_irqrestore(&uv_irq_lock, irqflags);
- return 0;
+ return ret;
}
-/* Retrieve offset and pnode information from the rb tree for a specific irq */
-int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
+static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
{
- struct uv_irq_2_mmr_pnode *e;
- struct rb_node *n;
- unsigned long irqflags;
-
- spin_lock_irqsave(&uv_irq_lock, irqflags);
- n = uv_irq_root.rb_node;
- while (n) {
- e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
-
- if (e->irq == irq) {
- *offset = e->offset;
- *pnode = e->pnode;
- spin_unlock_irqrestore(&uv_irq_lock, irqflags);
- return 0;
- }
-
- if (irq < e->irq)
- n = n->rb_left;
- else
- n = n->rb_right;
- }
- spin_unlock_irqrestore(&uv_irq_lock, irqflags);
- return -1;
+ struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
+
+ BUG_ON(nr_irqs != 1);
+ kfree(irq_data->chip_data);
+ irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT);
+ irq_clear_status_flags(virq, IRQ_NO_BALANCING);
+ irq_domain_free_irqs_top(domain, virq, nr_irqs);
}
/*
* Re-target the irq to the specified CPU and enable the specified MMR located
* on the specified blade to allow the sending of MSIs to the specified CPU.
*/
-static int
-arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
- unsigned long mmr_offset, int limit)
+static void uv_domain_activate(struct irq_domain *domain,
+ struct irq_data *irq_data)
{
- const struct cpumask *eligible_cpu = cpumask_of(cpu);
- struct irq_cfg *cfg = irq_cfg(irq);
- unsigned long mmr_value;
- struct uv_IO_APIC_route_entry *entry;
- int mmr_pnode, err;
- unsigned int dest;
-
- BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
- sizeof(unsigned long));
-
- err = assign_irq_vector(irq, cfg, eligible_cpu);
- if (err != 0)
- return err;
-
- err = apic->cpu_mask_to_apicid_and(eligible_cpu, eligible_cpu, &dest);
- if (err != 0)
- return err;
-
- if (limit == UV_AFFINITY_CPU)
- irq_set_status_flags(irq, IRQ_NO_BALANCING);
- else
- irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
-
- irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
- irq_name);
-
- mmr_value = 0;
- entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
- entry->vector = cfg->vector;
- entry->delivery_mode = apic->irq_delivery_mode;
- entry->dest_mode = apic->irq_dest_mode;
- entry->polarity = 0;
- entry->trigger = 0;
- entry->mask = 0;
- entry->dest = dest;
-
- mmr_pnode = uv_blade_to_pnode(mmr_blade);
- uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
-
- if (cfg->move_in_progress)
- send_cleanup_vector(cfg);
-
- return irq;
+ uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
}
/*
* Disable the specified MMR located on the specified blade so that MSIs are
* longer allowed to be sent.
*/
-static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
+static void uv_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *irq_data)
{
unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry;
- BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
- sizeof(unsigned long));
-
mmr_value = 0;
entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
entry->mask = 1;
-
- uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
+ uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
}
-static int
-uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
- bool force)
-{
- struct irq_cfg *cfg = irqd_cfg(data);
- unsigned int dest;
- unsigned long mmr_value, mmr_offset;
- struct uv_IO_APIC_route_entry *entry;
- int mmr_pnode;
-
- if (apic_set_affinity(data, mask, &dest))
- return -1;
-
- mmr_value = 0;
- entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
-
- entry->vector = cfg->vector;
- entry->delivery_mode = apic->irq_delivery_mode;
- entry->dest_mode = apic->irq_dest_mode;
- entry->polarity = 0;
- entry->trigger = 0;
- entry->mask = 0;
- entry->dest = dest;
-
- /* Get previously stored MMR and pnode of hub sourcing interrupts */
- if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode))
- return -1;
-
- uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
+static const struct irq_domain_ops uv_domain_ops = {
+ .alloc = uv_domain_alloc,
+ .free = uv_domain_free,
+ .activate = uv_domain_activate,
+ .deactivate = uv_domain_deactivate,
+};
- if (cfg->move_in_progress)
- send_cleanup_vector(cfg);
+static struct irq_domain *uv_get_irq_domain(void)
+{
+ static struct irq_domain *uv_domain;
+ static DEFINE_MUTEX(uv_lock);
+
+ mutex_lock(&uv_lock);
+ if (uv_domain == NULL) {
+ uv_domain = irq_domain_add_tree(NULL, &uv_domain_ops, NULL);
+ if (uv_domain)
+ uv_domain->parent = x86_vector_domain;
+ }
+ mutex_unlock(&uv_lock);
- return IRQ_SET_MASK_OK_NOCOPY;
+ return uv_domain;
}
/*
@@ -238,19 +180,21 @@ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
unsigned long mmr_offset, int limit)
{
- int ret, irq = irq_alloc_hwirq(uv_blade_to_memory_nid(mmr_blade));
+ struct irq_alloc_info info;
+ struct irq_domain *domain = uv_get_irq_domain();
- if (!irq)
- return -EBUSY;
+ if (!domain)
+ return -ENOMEM;
- ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
- limit);
- if (ret == irq)
- uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
- else
- irq_free_hwirq(irq);
+ init_irq_alloc_info(&info, cpumask_of(cpu));
+ info.type = X86_IRQ_ALLOC_TYPE_UV;
+ info.uv_limit = limit;
+ info.uv_blade = mmr_blade;
+ info.uv_offset = mmr_offset;
+ info.uv_name = irq_name;
- return ret;
+ return irq_domain_alloc_irqs(domain, 1,
+ uv_blade_to_memory_nid(mmr_blade), &info);
}
EXPORT_SYMBOL_GPL(uv_setup_irq);
@@ -263,26 +207,6 @@ EXPORT_SYMBOL_GPL(uv_setup_irq);
*/
void uv_teardown_irq(unsigned int irq)
{
- struct uv_irq_2_mmr_pnode *e;
- struct rb_node *n;
- unsigned long irqflags;
-
- spin_lock_irqsave(&uv_irq_lock, irqflags);
- n = uv_irq_root.rb_node;
- while (n) {
- e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
- if (e->irq == irq) {
- arch_disable_uv_irq(e->pnode, e->offset);
- rb_erase(n, &uv_irq_root);
- kfree(e);
- break;
- }
- if (irq < e->irq)
- n = n->rb_left;
- else
- n = n->rb_right;
- }
- spin_unlock_irqrestore(&uv_irq_lock, irqflags);
- irq_free_hwirq(irq);
+ irq_domain_free_irqs(irq, 1);
}
EXPORT_SYMBOL_GPL(uv_teardown_irq);
diff --git a/kernel/arch/x86/platform/uv/uv_nmi.c b/kernel/arch/x86/platform/uv/uv_nmi.c
index 7488cafab..327f21c3b 100644
--- a/kernel/arch/x86/platform/uv/uv_nmi.c
+++ b/kernel/arch/x86/platform/uv/uv_nmi.c
@@ -104,7 +104,7 @@ static int param_set_local64(const char *val, const struct kernel_param *kp)
return 0;
}
-static struct kernel_param_ops param_ops_local64 = {
+static const struct kernel_param_ops param_ops_local64 = {
.get = param_get_local64,
.set = param_set_local64,
};
@@ -376,38 +376,42 @@ static void uv_nmi_wait(int master)
atomic_read(&uv_nmi_cpus_in_nmi), num_online_cpus());
}
+/* Dump Instruction Pointer header */
static void uv_nmi_dump_cpu_ip_hdr(void)
{
- printk(KERN_DEFAULT
- "\nUV: %4s %6s %-32s %s (Note: PID 0 not listed)\n",
+ pr_info("\nUV: %4s %6s %-32s %s (Note: PID 0 not listed)\n",
"CPU", "PID", "COMMAND", "IP");
}
+/* Dump Instruction Pointer info */
static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
{
- printk(KERN_DEFAULT "UV: %4d %6d %-32.32s ",
- cpu, current->pid, current->comm);
-
+ pr_info("UV: %4d %6d %-32.32s ", cpu, current->pid, current->comm);
printk_address(regs->ip);
}
-/* Dump this cpu's state */
+/*
+ * Dump this CPU's state. If action was set to "kdump" and the crash_kexec
+ * failed, then we provide "dump" as an alternate action. Action "dump" now
+ * also includes the show "ips" (instruction pointers) action whereas the
+ * action "ips" only displays instruction pointers for the non-idle CPU's.
+ * This is an abbreviated form of the "ps" command.
+ */
static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
{
const char *dots = " ................................. ";
- if (uv_nmi_action_is("ips")) {
- if (cpu == 0)
- uv_nmi_dump_cpu_ip_hdr();
+ if (cpu == 0)
+ uv_nmi_dump_cpu_ip_hdr();
- if (current->pid != 0)
- uv_nmi_dump_cpu_ip(cpu, regs);
+ if (current->pid != 0 || !uv_nmi_action_is("ips"))
+ uv_nmi_dump_cpu_ip(cpu, regs);
- } else if (uv_nmi_action_is("dump")) {
- printk(KERN_DEFAULT
- "UV:%sNMI process trace for CPU %d\n", dots, cpu);
+ if (uv_nmi_action_is("dump")) {
+ pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu);
show_regs(regs);
}
+
this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
}
@@ -469,8 +473,7 @@ static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
uv_nmi_trigger_dump(tcpu);
}
if (ignored)
- printk(KERN_DEFAULT "UV: %d CPUs ignored NMI\n",
- ignored);
+ pr_alert("UV: %d CPUs ignored NMI\n", ignored);
console_loglevel = saved_console_loglevel;
pr_alert("UV: process trace complete\n");
@@ -492,8 +495,9 @@ static void uv_nmi_touch_watchdogs(void)
touch_nmi_watchdog();
}
-#if defined(CONFIG_KEXEC)
static atomic_t uv_nmi_kexec_failed;
+
+#if defined(CONFIG_KEXEC_CORE)
static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
{
/* Call crash to dump system state */
@@ -502,10 +506,9 @@ static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
crash_kexec(regs);
pr_emerg("UV: crash_kexec unexpectedly returned, ");
+ atomic_set(&uv_nmi_kexec_failed, 1);
if (!kexec_crash_image) {
pr_cont("crash kernel not loaded\n");
- atomic_set(&uv_nmi_kexec_failed, 1);
- uv_nmi_sync_exit(1);
return;
}
pr_cont("kexec busy, stalling cpus while waiting\n");
@@ -514,18 +517,16 @@ static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
/* If crash exec fails the slaves should return, otherwise stall */
while (atomic_read(&uv_nmi_kexec_failed) == 0)
mdelay(10);
-
- /* Crash kernel most likely not loaded, return in an orderly fashion */
- uv_nmi_sync_exit(0);
}
-#else /* !CONFIG_KEXEC */
+#else /* !CONFIG_KEXEC_CORE */
static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
{
if (master)
pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n");
+ atomic_set(&uv_nmi_kexec_failed, 1);
}
-#endif /* !CONFIG_KEXEC */
+#endif /* !CONFIG_KEXEC_CORE */
#ifdef CONFIG_KGDB
#ifdef CONFIG_KGDB_KDB
@@ -613,9 +614,14 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
master = (atomic_read(&uv_nmi_cpu) == cpu);
/* If NMI action is "kdump", then attempt to do it */
- if (uv_nmi_action_is("kdump"))
+ if (uv_nmi_action_is("kdump")) {
uv_nmi_kdump(cpu, master, regs);
+ /* Unexpected return, revert action to "dump" */
+ if (master)
+ strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
+ }
+
/* Pause as all cpus enter the NMI handler */
uv_nmi_wait(master);
@@ -640,6 +646,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
atomic_set(&uv_nmi_cpus_in_nmi, -1);
atomic_set(&uv_nmi_cpu, -1);
atomic_set(&uv_in_nmi, 0);
+ atomic_set(&uv_nmi_kexec_failed, 0);
}
uv_nmi_touch_watchdogs();
diff --git a/kernel/arch/x86/platform/uv/uv_time.c b/kernel/arch/x86/platform/uv/uv_time.c
index a718fe0d2..5e0b12262 100644
--- a/kernel/arch/x86/platform/uv/uv_time.c
+++ b/kernel/arch/x86/platform/uv/uv_time.c
@@ -32,8 +32,7 @@
static cycle_t uv_read_rtc(struct clocksource *cs);
static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
-static void uv_rtc_timer_setup(enum clock_event_mode,
- struct clock_event_device *);
+static int uv_rtc_shutdown(struct clock_event_device *evt);
static struct clocksource clocksource_uv = {
.name = RTC_NAME,
@@ -44,14 +43,14 @@ static struct clocksource clocksource_uv = {
};
static struct clock_event_device clock_event_device_uv = {
- .name = RTC_NAME,
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .shift = 20,
- .rating = 400,
- .irq = -1,
- .set_next_event = uv_rtc_next_event,
- .set_mode = uv_rtc_timer_setup,
- .event_handler = NULL,
+ .name = RTC_NAME,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 20,
+ .rating = 400,
+ .irq = -1,
+ .set_next_event = uv_rtc_next_event,
+ .set_state_shutdown = uv_rtc_shutdown,
+ .event_handler = NULL,
};
static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
@@ -326,24 +325,14 @@ static int uv_rtc_next_event(unsigned long delta,
}
/*
- * Setup the RTC timer in oneshot mode
+ * Shutdown the RTC timer
*/
-static void uv_rtc_timer_setup(enum clock_event_mode mode,
- struct clock_event_device *evt)
+static int uv_rtc_shutdown(struct clock_event_device *evt)
{
int ced_cpu = cpumask_first(evt->cpumask);
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_RESUME:
- /* Nothing to do here yet */
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- uv_rtc_unset_timer(ced_cpu, 1);
- break;
- }
+ uv_rtc_unset_timer(ced_cpu, 1);
+ return 0;
}
static void uv_rtc_interrupt(void)