summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/pci')
-rw-r--r--kernel/drivers/pci/Makefile1
-rw-r--r--kernel/drivers/pci/access.c50
-rw-r--r--kernel/drivers/pci/ats.c131
-rw-r--r--kernel/drivers/pci/bus.c8
-rw-r--r--kernel/drivers/pci/host/Kconfig58
-rw-r--r--kernel/drivers/pci/host/Makefile5
-rw-r--r--kernel/drivers/pci/host/pci-dra7xx.c151
-rw-r--r--kernel/drivers/pci/host/pci-exynos.c42
-rw-r--r--kernel/drivers/pci/host/pci-host-generic.c91
-rw-r--r--kernel/drivers/pci/host/pci-imx6.c108
-rw-r--r--kernel/drivers/pci/host/pci-keystone-dw.c36
-rw-r--r--kernel/drivers/pci/host/pci-keystone.c35
-rw-r--r--kernel/drivers/pci/host/pci-keystone.h2
-rw-r--r--kernel/drivers/pci/host/pci-layerscape.c196
-rw-r--r--kernel/drivers/pci/host/pci-mvebu.c492
-rw-r--r--kernel/drivers/pci/host/pci-rcar-gen2.c1
-rw-r--r--kernel/drivers/pci/host/pci-tegra.c23
-rw-r--r--kernel/drivers/pci/host/pci-xgene-msi.c583
-rw-r--r--kernel/drivers/pci/host/pci-xgene.c57
-rw-r--r--kernel/drivers/pci/host/pcie-altera-msi.c314
-rw-r--r--kernel/drivers/pci/host/pcie-altera.c588
-rw-r--r--kernel/drivers/pci/host/pcie-designware.c503
-rw-r--r--kernel/drivers/pci/host/pcie-designware.h20
-rw-r--r--kernel/drivers/pci/host/pcie-hisi.c200
-rw-r--r--kernel/drivers/pci/host/pcie-iproc-bcma.c110
-rw-r--r--kernel/drivers/pci/host/pcie-iproc-platform.c39
-rw-r--r--kernel/drivers/pci/host/pcie-iproc.c223
-rw-r--r--kernel/drivers/pci/host/pcie-iproc.h28
-rw-r--r--kernel/drivers/pci/host/pcie-rcar.c93
-rw-r--r--kernel/drivers/pci/host/pcie-spear13xx.c53
-rw-r--r--kernel/drivers/pci/host/pcie-xilinx.c45
-rw-r--r--kernel/drivers/pci/hotplug/Makefile3
-rw-r--r--kernel/drivers/pci/hotplug/acpiphp_glue.c9
-rw-r--r--kernel/drivers/pci/hotplug/pci_hotplug_core.c122
-rw-r--r--kernel/drivers/pci/hotplug/pciehp.h37
-rw-r--r--kernel/drivers/pci/hotplug/pciehp_acpi.c137
-rw-r--r--kernel/drivers/pci/hotplug/pciehp_core.c54
-rw-r--r--kernel/drivers/pci/hotplug/pciehp_ctrl.c229
-rw-r--r--kernel/drivers/pci/hotplug/pciehp_hpc.c125
-rw-r--r--kernel/drivers/pci/htirq.c48
-rw-r--r--kernel/drivers/pci/iov.c101
-rw-r--r--kernel/drivers/pci/msi.c284
-rw-r--r--kernel/drivers/pci/of.c29
-rw-r--r--kernel/drivers/pci/pci-acpi.c4
-rw-r--r--kernel/drivers/pci/pci-driver.c66
-rw-r--r--kernel/drivers/pci/pci-sysfs.c5
-rw-r--r--kernel/drivers/pci/pci.c360
-rw-r--r--kernel/drivers/pci/pci.h38
-rw-r--r--kernel/drivers/pci/pcie/aer/aerdrv.c4
-rw-r--r--kernel/drivers/pci/pcie/aer/aerdrv.h1
-rw-r--r--kernel/drivers/pci/pcie/aer/aerdrv_core.c33
-rw-r--r--kernel/drivers/pci/pcie/aspm.c57
-rw-r--r--kernel/drivers/pci/pcie/portdrv_core.c2
-rw-r--r--kernel/drivers/pci/probe.c268
-rw-r--r--kernel/drivers/pci/quirks.c274
-rw-r--r--kernel/drivers/pci/setup-bus.c50
-rw-r--r--kernel/drivers/pci/setup-res.c7
-rw-r--r--kernel/drivers/pci/slot.c29
-rw-r--r--kernel/drivers/pci/vc.c3
-rw-r--r--kernel/drivers/pci/xen-pcifront.c36
60 files changed, 4742 insertions, 1959 deletions
diff --git a/kernel/drivers/pci/Makefile b/kernel/drivers/pci/Makefile
index 73e4af400..be3f631c3 100644
--- a/kernel/drivers/pci/Makefile
+++ b/kernel/drivers/pci/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_PCI_IOV) += iov.o
#
obj-$(CONFIG_ALPHA) += setup-irq.o
obj-$(CONFIG_ARM) += setup-irq.o
+obj-$(CONFIG_ARM64) += setup-irq.o
obj-$(CONFIG_UNICORE32) += setup-irq.o
obj-$(CONFIG_SUPERH) += setup-irq.o
obj-$(CONFIG_MIPS) += setup-irq.o
diff --git a/kernel/drivers/pci/access.c b/kernel/drivers/pci/access.c
index 5908d6e3b..7a45a20af 100644
--- a/kernel/drivers/pci/access.c
+++ b/kernel/drivers/pci/access.c
@@ -442,7 +442,8 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
void *arg)
{
- struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+ struct pci_dev *tdev = pci_get_slot(dev->bus,
+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
ssize_t ret;
if (!tdev)
@@ -456,7 +457,8 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
const void *arg)
{
- struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+ struct pci_dev *tdev = pci_get_slot(dev->bus,
+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
ssize_t ret;
if (!tdev)
@@ -473,22 +475,6 @@ static const struct pci_vpd_ops pci_vpd_f0_ops = {
.release = pci_vpd_pci22_release,
};
-static int pci_vpd_f0_dev_check(struct pci_dev *dev)
-{
- struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
- int ret = 0;
-
- if (!tdev)
- return -ENODEV;
- if (!tdev->vpd || !tdev->multifunction ||
- dev->class != tdev->class || dev->vendor != tdev->vendor ||
- dev->device != tdev->device)
- ret = -ENODEV;
-
- pci_dev_put(tdev);
- return ret;
-}
-
int pci_vpd_pci22_init(struct pci_dev *dev)
{
struct pci_vpd_pci22 *vpd;
@@ -497,12 +483,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
if (!cap)
return -ENODEV;
- if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
- int ret = pci_vpd_f0_dev_check(dev);
- if (ret)
- return ret;
- }
vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
if (!vpd)
return -ENOMEM;
@@ -590,6 +571,14 @@ static inline int pcie_cap_version(const struct pci_dev *dev)
return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
}
+static bool pcie_downstream_port(const struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_DOWNSTREAM;
+}
+
bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
@@ -605,10 +594,7 @@ bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
{
- int type = pci_pcie_type(dev);
-
- return (type == PCI_EXP_TYPE_ROOT_PORT ||
- type == PCI_EXP_TYPE_DOWNSTREAM) &&
+ return pcie_downstream_port(dev) &&
pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
}
@@ -687,10 +673,9 @@ int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
* State bit in the Slot Status register of Downstream Ports,
* which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
*/
- if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
- pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+ if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
+ pos == PCI_EXP_SLTSTA)
*val = PCI_EXP_SLTSTA_PDS;
- }
return 0;
}
@@ -716,10 +701,9 @@ int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
return ret;
}
- if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL &&
- pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+ if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
+ pos == PCI_EXP_SLTSTA)
*val = PCI_EXP_SLTSTA_PDS;
- }
return 0;
}
diff --git a/kernel/drivers/pci/ats.c b/kernel/drivers/pci/ats.c
index a8099d4d0..eeb9fb2b4 100644
--- a/kernel/drivers/pci/ats.c
+++ b/kernel/drivers/pci/ats.c
@@ -17,34 +17,15 @@
#include "pci.h"
-static int ats_alloc_one(struct pci_dev *dev, int ps)
+void pci_ats_init(struct pci_dev *dev)
{
int pos;
- u16 cap;
- struct pci_ats *ats;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
if (!pos)
- return -ENODEV;
-
- ats = kzalloc(sizeof(*ats), GFP_KERNEL);
- if (!ats)
- return -ENOMEM;
-
- ats->pos = pos;
- ats->stu = ps;
- pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
- ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
- PCI_ATS_MAX_QDEP;
- dev->ats = ats;
-
- return 0;
-}
+ return;
-static void ats_free_one(struct pci_dev *dev)
-{
- kfree(dev->ats);
- dev->ats = NULL;
+ dev->ats_cap = pos;
}
/**
@@ -56,43 +37,36 @@ static void ats_free_one(struct pci_dev *dev)
*/
int pci_enable_ats(struct pci_dev *dev, int ps)
{
- int rc;
u16 ctrl;
+ struct pci_dev *pdev;
- BUG_ON(dev->ats && dev->ats->is_enabled);
-
- if (ps < PCI_ATS_MIN_STU)
+ if (!dev->ats_cap)
return -EINVAL;
- if (dev->is_physfn || dev->is_virtfn) {
- struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
-
- mutex_lock(&pdev->sriov->lock);
- if (pdev->ats)
- rc = pdev->ats->stu == ps ? 0 : -EINVAL;
- else
- rc = ats_alloc_one(pdev, ps);
-
- if (!rc)
- pdev->ats->ref_cnt++;
- mutex_unlock(&pdev->sriov->lock);
- if (rc)
- return rc;
- }
+ if (WARN_ON(dev->ats_enabled))
+ return -EBUSY;
- if (!dev->is_physfn) {
- rc = ats_alloc_one(dev, ps);
- if (rc)
- return rc;
- }
+ if (ps < PCI_ATS_MIN_STU)
+ return -EINVAL;
+ /*
+ * Note that enabling ATS on a VF fails unless it's already enabled
+ * with the same STU on the PF.
+ */
ctrl = PCI_ATS_CTRL_ENABLE;
- if (!dev->is_virtfn)
- ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
- pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
-
- dev->ats->is_enabled = 1;
+ if (dev->is_virtfn) {
+ pdev = pci_physfn(dev);
+ if (pdev->ats_stu != ps)
+ return -EINVAL;
+
+ atomic_inc(&pdev->ats_ref_cnt); /* count enabled VFs */
+ } else {
+ dev->ats_stu = ps;
+ ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
+ }
+ pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
+ dev->ats_enabled = 1;
return 0;
}
EXPORT_SYMBOL_GPL(pci_enable_ats);
@@ -103,28 +77,25 @@ EXPORT_SYMBOL_GPL(pci_enable_ats);
*/
void pci_disable_ats(struct pci_dev *dev)
{
+ struct pci_dev *pdev;
u16 ctrl;
- BUG_ON(!dev->ats || !dev->ats->is_enabled);
-
- pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
- ctrl &= ~PCI_ATS_CTRL_ENABLE;
- pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
-
- dev->ats->is_enabled = 0;
+ if (WARN_ON(!dev->ats_enabled))
+ return;
- if (dev->is_physfn || dev->is_virtfn) {
- struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+ if (atomic_read(&dev->ats_ref_cnt))
+ return; /* VFs still enabled */
- mutex_lock(&pdev->sriov->lock);
- pdev->ats->ref_cnt--;
- if (!pdev->ats->ref_cnt)
- ats_free_one(pdev);
- mutex_unlock(&pdev->sriov->lock);
+ if (dev->is_virtfn) {
+ pdev = pci_physfn(dev);
+ atomic_dec(&pdev->ats_ref_cnt);
}
- if (!dev->is_physfn)
- ats_free_one(dev);
+ pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, &ctrl);
+ ctrl &= ~PCI_ATS_CTRL_ENABLE;
+ pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
+
+ dev->ats_enabled = 0;
}
EXPORT_SYMBOL_GPL(pci_disable_ats);
@@ -132,16 +103,13 @@ void pci_restore_ats_state(struct pci_dev *dev)
{
u16 ctrl;
- if (!pci_ats_enabled(dev))
+ if (!dev->ats_enabled)
return;
- if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS))
- BUG();
ctrl = PCI_ATS_CTRL_ENABLE;
if (!dev->is_virtfn)
- ctrl |= PCI_ATS_CTRL_STU(dev->ats->stu - PCI_ATS_MIN_STU);
-
- pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+ ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
+ pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
}
EXPORT_SYMBOL_GPL(pci_restore_ats_state);
@@ -159,23 +127,16 @@ EXPORT_SYMBOL_GPL(pci_restore_ats_state);
*/
int pci_ats_queue_depth(struct pci_dev *dev)
{
- int pos;
u16 cap;
+ if (!dev->ats_cap)
+ return -EINVAL;
+
if (dev->is_virtfn)
return 0;
- if (dev->ats)
- return dev->ats->qdep;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
- if (!pos)
- return -ENODEV;
-
- pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
-
- return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
- PCI_ATS_MAX_QDEP;
+ pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CAP, &cap);
+ return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : PCI_ATS_MAX_QDEP;
}
EXPORT_SYMBOL_GPL(pci_ats_queue_depth);
diff --git a/kernel/drivers/pci/bus.c b/kernel/drivers/pci/bus.c
index 6fbd3f2b5..89b3befc7 100644
--- a/kernel/drivers/pci/bus.c
+++ b/kernel/drivers/pci/bus.c
@@ -140,6 +140,8 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
type_mask |= IORESOURCE_TYPE_BITS;
pci_bus_for_each_resource(bus, r, i) {
+ resource_size_t min_used = min;
+
if (!r)
continue;
@@ -163,12 +165,12 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
* overrides "min".
*/
if (avail.start)
- min = avail.start;
+ min_used = avail.start;
max = avail.end;
/* Ok, try it out.. */
- ret = allocate_resource(r, res, size, min, max,
+ ret = allocate_resource(r, res, size, min_used, max,
align, alignf, alignf_data);
if (ret == 0)
return 0;
@@ -256,6 +258,8 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
res->start = start;
res->end = end;
+ res->flags &= ~IORESOURCE_UNSET;
+ orig_res.flags &= ~IORESOURCE_UNSET;
dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
&orig_res, res);
diff --git a/kernel/drivers/pci/host/Kconfig b/kernel/drivers/pci/host/Kconfig
index 1dfb567b3..c0ad9aaa1 100644
--- a/kernel/drivers/pci/host/Kconfig
+++ b/kernel/drivers/pci/host/Kconfig
@@ -5,6 +5,7 @@ config PCI_DRA7XX
bool "TI DRA7xx PCIe controller"
select PCIE_DW
depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on BROKEN
help
Enables support for the PCIe controller in the DRA7xx SoC. There
are two instances of PCIe controller in DRA7xx. This controller can
@@ -39,7 +40,8 @@ config PCI_TEGRA
config PCI_RCAR_GEN2
bool "Renesas R-Car Gen2 Internal PCI controller"
- depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST)
+ depends on ARM
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
Say Y here if you want internal PCI support on R-Car Gen2 SoC.
There are 3 internal PCI controllers available with a single
@@ -47,13 +49,14 @@ config PCI_RCAR_GEN2
config PCI_RCAR_GEN2_PCIE
bool "Renesas R-Car PCIe controller"
- depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST)
+ depends on ARM
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
Say Y here if you want PCIe controller support on R-Car Gen2 SoCs.
config PCI_HOST_GENERIC
bool "Generic PCI host controller"
- depends on ARM && OF
+ depends on (ARM || ARM64) && OF
help
Say Y here if you want to support a simple generic PCI host
controller, such as the one emulated by kvmtool.
@@ -89,14 +92,23 @@ config PCI_XGENE
depends on ARCH_XGENE
depends on OF
select PCIEPORTBUS
+ select PCI_MSI_IRQ_DOMAIN if PCI_MSI
help
Say Y here if you want internal PCI support on APM X-Gene SoC.
There are 5 internal PCIe ports available. Each port is GEN3 capable
and have varied lanes from x1 to x8.
+config PCI_XGENE_MSI
+ bool "X-Gene v1 PCIe MSI feature"
+ depends on PCI_XGENE && PCI_MSI
+ default y
+ help
+ Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
+ This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC.
+
config PCI_LAYERSCAPE
bool "Freescale Layerscape PCIe controller"
- depends on OF && ARM
+ depends on OF && (ARM || ARCH_LAYERSCAPE)
select PCIE_DW
select MFD_SYSCON
help
@@ -108,7 +120,7 @@ config PCI_VERSATILE
config PCIE_IPROC
tristate "Broadcom iProc PCIe controller"
- depends on OF && ARM
+ depends on OF && (ARM || ARM64)
default n
help
This enables the iProc PCIe core controller support for Broadcom's
@@ -125,4 +137,40 @@ config PCIE_IPROC_PLATFORM
Say Y here if you want to use the Broadcom iProc PCIe controller
through the generic platform bus interface
+config PCIE_IPROC_BCMA
+ tristate "Broadcom iProc PCIe BCMA bus driver"
+ depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST)
+ select PCIE_IPROC
+ select BCMA
+ select PCI_DOMAINS
+ default ARCH_BCM_5301X
+ help
+ Say Y here if you want to use the Broadcom iProc PCIe controller
+ through the BCMA bus interface
+
+config PCIE_ALTERA
+ bool "Altera PCIe controller"
+ depends on ARM || NIOS2
+ depends on OF_PCI
+ select PCI_DOMAINS
+ help
+ Say Y here if you want to enable PCIe controller support on Altera
+ FPGA.
+
+config PCIE_ALTERA_MSI
+ bool "Altera PCIe MSI feature"
+ depends on PCIE_ALTERA && PCI_MSI
+ select PCI_MSI_IRQ_DOMAIN
+ help
+ Say Y here if you want PCIe MSI support for the Altera FPGA.
+ This MSI driver supports Altera MSI to GIC controller IP.
+
+config PCI_HISI
+ depends on OF && ARM64
+ bool "HiSilicon SoC HIP05 PCIe controller"
+ select PCIEPORTBUS
+ select PCIE_DW
+ help
+ Say Y here if you want PCIe controller support on HiSilicon HIP05 SoC
+
endmenu
diff --git a/kernel/drivers/pci/host/Makefile b/kernel/drivers/pci/host/Makefile
index f733b4e27..9d4d3c692 100644
--- a/kernel/drivers/pci/host/Makefile
+++ b/kernel/drivers/pci/host/Makefile
@@ -11,7 +11,12 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
+obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
+obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
+obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
+obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
+obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
diff --git a/kernel/drivers/pci/host/pci-dra7xx.c b/kernel/drivers/pci/host/pci-dra7xx.c
index 2d57e19a2..923607bda 100644
--- a/kernel/drivers/pci/host/pci-dra7xx.c
+++ b/kernel/drivers/pci/host/pci-dra7xx.c
@@ -17,6 +17,7 @@
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -61,6 +62,7 @@
#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
#define LINK_UP BIT(16)
+#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
struct dra7xx_pcie {
void __iomem *base;
@@ -83,6 +85,17 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
writel(value, pcie->base + offset);
}
+static inline u32 dra7xx_pcie_readl_rc(struct pcie_port *pp, u32 offset)
+{
+ return readl(pp->dbi_base + offset);
+}
+
+static inline void dra7xx_pcie_writel_rc(struct pcie_port *pp, u32 offset,
+ u32 value)
+{
+ writel(value, pp->dbi_base + offset);
+}
+
static int dra7xx_pcie_link_up(struct pcie_port *pp)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
@@ -93,9 +106,9 @@ static int dra7xx_pcie_link_up(struct pcie_port *pp)
static int dra7xx_pcie_establish_link(struct pcie_port *pp)
{
- u32 reg;
- unsigned int retries = 1000;
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+ u32 reg;
+ unsigned int retries;
if (dw_pcie_link_up(pp)) {
dev_err(pp->dev, "link is already up\n");
@@ -106,19 +119,14 @@ static int dra7xx_pcie_establish_link(struct pcie_port *pp)
reg |= LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
- while (retries--) {
- reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
- if (reg & LINK_UP)
- break;
+ for (retries = 0; retries < 1000; retries++) {
+ if (dw_pcie_link_up(pp))
+ return 0;
usleep_range(10, 20);
}
- if (retries == 0) {
- dev_err(pp->dev, "link is not up\n");
- return -ETIMEDOUT;
- }
-
- return 0;
+ dev_err(pp->dev, "link is not up\n");
+ return -EINVAL;
}
static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
@@ -144,6 +152,12 @@ static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
static void dra7xx_pcie_host_init(struct pcie_port *pp)
{
dw_pcie_setup_rc(pp);
+
+ pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
+ pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
+ pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
+ pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR;
+
dra7xx_pcie_establish_link(pp);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
@@ -160,7 +174,6 @@ static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
@@ -289,7 +302,8 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
}
ret = devm_request_irq(&pdev->dev, pp->irq,
- dra7xx_pcie_msi_irq_handler, IRQF_SHARED,
+ dra7xx_pcie_msi_irq_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
"dra7-pcie-msi", pp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
@@ -330,6 +344,9 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
char name[10];
+ int gpio_sel;
+ enum of_gpio_flags flags;
+ unsigned long gpio_flags;
dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
if (!dra7xx)
@@ -387,9 +404,25 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
dev_err(dev, "pm_runtime_get_sync failed\n");
- goto err_phy;
+ goto err_get_sync;
+ }
+
+ gpio_sel = of_get_gpio_flags(dev->of_node, 0, &flags);
+ if (gpio_is_valid(gpio_sel)) {
+ gpio_flags = (flags & OF_GPIO_ACTIVE_LOW) ?
+ GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH;
+ ret = devm_gpio_request_one(dev, gpio_sel, gpio_flags,
+ "pcie_reset");
+ if (ret) {
+ dev_err(&pdev->dev, "gpio%d request failed, ret %d\n",
+ gpio_sel, ret);
+ goto err_gpio;
+ }
+ } else if (gpio_sel == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_gpio;
}
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
@@ -400,12 +433,14 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
ret = dra7xx_add_pcie_port(dra7xx, pdev);
if (ret < 0)
- goto err_add_port;
+ goto err_gpio;
return 0;
-err_add_port:
+err_gpio:
pm_runtime_put(dev);
+
+err_get_sync:
pm_runtime_disable(dev);
err_phy:
@@ -436,6 +471,85 @@ static int __exit dra7xx_pcie_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int dra7xx_pcie_suspend(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ struct pcie_port *pp = &dra7xx->pp;
+ u32 val;
+
+ /* clear MSE */
+ val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
+ val &= ~PCI_COMMAND_MEMORY;
+ dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
+
+ return 0;
+}
+
+static int dra7xx_pcie_resume(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ struct pcie_port *pp = &dra7xx->pp;
+ u32 val;
+
+ /* set MSE */
+ val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
+ val |= PCI_COMMAND_MEMORY;
+ dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
+
+ return 0;
+}
+
+static int dra7xx_pcie_suspend_noirq(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ int count = dra7xx->phy_count;
+
+ while (count--) {
+ phy_power_off(dra7xx->phy[count]);
+ phy_exit(dra7xx->phy[count]);
+ }
+
+ return 0;
+}
+
+static int dra7xx_pcie_resume_noirq(struct device *dev)
+{
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ int phy_count = dra7xx->phy_count;
+ int ret;
+ int i;
+
+ for (i = 0; i < phy_count; i++) {
+ ret = phy_init(dra7xx->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_power_on(dra7xx->phy[i]);
+ if (ret < 0) {
+ phy_exit(dra7xx->phy[i]);
+ goto err_phy;
+ }
+ }
+
+ return 0;
+
+err_phy:
+ while (--i >= 0) {
+ phy_power_off(dra7xx->phy[i]);
+ phy_exit(dra7xx->phy[i]);
+ }
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
+ dra7xx_pcie_resume_noirq)
+};
+
static const struct of_device_id of_dra7xx_pcie_match[] = {
{ .compatible = "ti,dra7-pcie", },
{},
@@ -447,6 +561,7 @@ static struct platform_driver dra7xx_pcie_driver = {
.driver = {
.name = "dra7-pcie",
.of_match_table = of_dra7xx_pcie_match,
+ .pm = &dra7xx_pcie_pm_ops,
},
};
diff --git a/kernel/drivers/pci/host/pci-exynos.c b/kernel/drivers/pci/host/pci-exynos.c
index c139237e0..d997d22d4 100644
--- a/kernel/drivers/pci/host/pci-exynos.c
+++ b/kernel/drivers/pci/host/pci-exynos.c
@@ -316,9 +316,9 @@ static void exynos_pcie_assert_reset(struct pcie_port *pp)
static int exynos_pcie_establish_link(struct pcie_port *pp)
{
- u32 val;
- int count = 0;
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+ u32 val;
+ unsigned int retries;
if (dw_pcie_link_up(pp)) {
dev_err(pp->dev, "Link already up\n");
@@ -357,27 +357,23 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
PCIE_APP_LTSSM_ENABLE);
/* check if the link is up or not */
- while (!dw_pcie_link_up(pp)) {
- mdelay(100);
- count++;
- if (count == 10) {
- while (exynos_phy_readl(exynos_pcie,
- PCIE_PHY_PLL_LOCKED) == 0) {
- val = exynos_blk_readl(exynos_pcie,
- PCIE_PHY_PLL_LOCKED);
- dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
- }
- /* power off phy */
- exynos_pcie_power_off_phy(pp);
-
- dev_err(pp->dev, "PCIe Link Fail\n");
- return -EINVAL;
+ for (retries = 0; retries < 10; retries++) {
+ if (dw_pcie_link_up(pp)) {
+ dev_info(pp->dev, "Link up\n");
+ return 0;
}
+ mdelay(100);
}
- dev_info(pp->dev, "Link up\n");
+ while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) {
+ val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED);
+ dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
+ }
+ /* power off phy */
+ exynos_pcie_power_off_phy(pp);
- return 0;
+ dev_err(pp->dev, "PCIe Link Fail\n");
+ return -EINVAL;
}
static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
@@ -458,7 +454,7 @@ static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
int ret;
exynos_pcie_sideband_dbi_r_mode(pp, true);
- ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
+ ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val);
exynos_pcie_sideband_dbi_r_mode(pp, false);
return ret;
}
@@ -469,8 +465,7 @@ static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
int ret;
exynos_pcie_sideband_dbi_w_mode(pp, true);
- ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3),
- where, size, val);
+ ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val);
exynos_pcie_sideband_dbi_w_mode(pp, false);
return ret;
}
@@ -527,7 +522,8 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp,
ret = devm_request_irq(&pdev->dev, pp->msi_irq,
exynos_pcie_msi_irq_handler,
- IRQF_SHARED, "exynos-pcie", pp);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "exynos-pcie", pp);
if (ret) {
dev_err(&pdev->dev, "failed to request msi irq\n");
return ret;
diff --git a/kernel/drivers/pci/host/pci-host-generic.c b/kernel/drivers/pci/host/pci-host-generic.c
index ba46e581d..5434c90db 100644
--- a/kernel/drivers/pci/host/pci-host-generic.c
+++ b/kernel/drivers/pci/host/pci-host-generic.c
@@ -27,7 +27,7 @@
struct gen_pci_cfg_bus_ops {
u32 bus_shift;
- void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int);
+ struct pci_ops ops;
};
struct gen_pci_cfg_windows {
@@ -35,10 +35,19 @@ struct gen_pci_cfg_windows {
struct resource *bus_range;
void __iomem **win;
- const struct gen_pci_cfg_bus_ops *ops;
+ struct gen_pci_cfg_bus_ops *ops;
};
+/*
+ * ARM pcibios functions expect the ARM struct pci_sys_data as the PCI
+ * sysdata. Add pci_sys_data as the first element in struct gen_pci so
+ * that when we use a gen_pci pointer as sysdata, it is also a pointer to
+ * a struct pci_sys_data.
+ */
struct gen_pci {
+#ifdef CONFIG_ARM
+ struct pci_sys_data sys;
+#endif
struct pci_host_bridge host;
struct gen_pci_cfg_windows cfg;
struct list_head resources;
@@ -48,8 +57,7 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
unsigned int devfn,
int where)
{
- struct pci_sys_data *sys = bus->sysdata;
- struct gen_pci *pci = sys->private_data;
+ struct gen_pci *pci = bus->sysdata;
resource_size_t idx = bus->number - pci->cfg.bus_range->start;
return pci->cfg.win[idx] + ((devfn << 8) | where);
@@ -57,15 +65,18 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
.bus_shift = 16,
- .map_bus = gen_pci_map_cfg_bus_cam,
+ .ops = {
+ .map_bus = gen_pci_map_cfg_bus_cam,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
};
static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
unsigned int devfn,
int where)
{
- struct pci_sys_data *sys = bus->sysdata;
- struct gen_pci *pci = sys->private_data;
+ struct gen_pci *pci = bus->sysdata;
resource_size_t idx = bus->number - pci->cfg.bus_range->start;
return pci->cfg.win[idx] + ((devfn << 12) | where);
@@ -73,12 +84,11 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
.bus_shift = 20,
- .map_bus = gen_pci_map_cfg_bus_ecam,
-};
-
-static struct pci_ops gen_pci_ops = {
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
+ .ops = {
+ .map_bus = gen_pci_map_cfg_bus_ecam,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
};
static const struct of_device_id gen_pci_of_match[] = {
@@ -159,6 +169,7 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
struct resource *bus_range;
struct device *dev = pci->host.dev.parent;
struct device_node *np = dev->of_node;
+ u32 sz = 1 << pci->cfg.ops->bus_shift;
err = of_address_to_resource(np, 0, &pci->cfg.res);
if (err) {
@@ -186,10 +197,9 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
bus_range = pci->cfg.bus_range;
for (busn = bus_range->start; busn <= bus_range->end; ++busn) {
u32 idx = busn - bus_range->start;
- u32 sz = 1 << pci->cfg.ops->bus_shift;
pci->cfg.win[idx] = devm_ioremap(dev,
- pci->cfg.res.start + busn * sz,
+ pci->cfg.res.start + idx * sz,
sz);
if (!pci->cfg.win[idx])
return -ENOMEM;
@@ -198,29 +208,15 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
return 0;
}
-static int gen_pci_setup(int nr, struct pci_sys_data *sys)
-{
- struct gen_pci *pci = sys->private_data;
- list_splice_init(&pci->resources, &sys->resources);
- return 1;
-}
-
static int gen_pci_probe(struct platform_device *pdev)
{
int err;
const char *type;
const struct of_device_id *of_id;
- const int *prop;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
- struct hw_pci hw = {
- .nr_controllers = 1,
- .private_data = (void **)&pci,
- .setup = gen_pci_setup,
- .map_irq = of_irq_parse_and_map_pci,
- .ops = &gen_pci_ops,
- };
+ struct pci_bus *bus, *child;
if (!pci)
return -ENOMEM;
@@ -231,17 +227,10 @@ static int gen_pci_probe(struct platform_device *pdev)
return -EINVAL;
}
- prop = of_get_property(of_chosen, "linux,pci-probe-only", NULL);
- if (prop) {
- if (*prop)
- pci_add_flags(PCI_PROBE_ONLY);
- else
- pci_clear_flags(PCI_PROBE_ONLY);
- }
+ of_pci_check_probe_only();
of_id = of_match_node(gen_pci_of_match, np);
- pci->cfg.ops = of_id->data;
- gen_pci_ops.map_bus = pci->cfg.ops->map_bus;
+ pci->cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data;
pci->host.dev.parent = dev;
INIT_LIST_HEAD(&pci->host.windows);
INIT_LIST_HEAD(&pci->resources);
@@ -258,7 +247,29 @@ static int gen_pci_probe(struct platform_device *pdev)
return err;
}
- pci_common_init_dev(dev, &hw);
+ /* Do not reassign resources if probe only */
+ if (!pci_has_flag(PCI_PROBE_ONLY))
+ pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
+
+
+ bus = pci_scan_root_bus(dev, pci->cfg.bus_range->start,
+ &pci->cfg.ops->ops, pci, &pci->resources);
+ if (!bus) {
+ dev_err(dev, "Scanning rootbus failed");
+ return -ENODEV;
+ }
+
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+
+ if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ }
+
+ pci_bus_add_devices(bus);
return 0;
}
diff --git a/kernel/drivers/pci/host/pci-imx6.c b/kernel/drivers/pci/host/pci-imx6.c
index fdb953677..9ce7cd148 100644
--- a/kernel/drivers/pci/host/pci-imx6.c
+++ b/kernel/drivers/pci/host/pci-imx6.c
@@ -47,6 +47,8 @@ struct imx6_pcie {
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
+#define PCIE_RC_LCSR 0x80
+
/* PCIe Port Logic registers (memory-mapped) */
#define PL_OFFSET 0x700
#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
@@ -72,6 +74,7 @@ struct imx6_pcie {
/* PHY registers (not memory-mapped) */
#define PCIE_PHY_RX_ASIC_OUT 0x100D
+#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
#define PHY_RX_OVRD_IN_LO 0x1005
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
@@ -115,11 +118,7 @@ static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
val = addr << PCIE_PHY_CTRL_DATA_LOC;
writel(val, dbi_base + PCIE_PHY_CTRL);
- ret = pcie_phy_poll_ack(dbi_base, 0);
- if (ret)
- return ret;
-
- return 0;
+ return pcie_phy_poll_ack(dbi_base, 0);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
@@ -146,11 +145,7 @@ static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
/* deassert Read signal */
writel(0x00, dbi_base + PCIE_PHY_CTRL);
- ret = pcie_phy_poll_ack(dbi_base, 0);
- if (ret)
- return ret;
-
- return 0;
+ return pcie_phy_poll_ack(dbi_base, 0);
}
static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
@@ -335,21 +330,36 @@ static void imx6_pcie_init_phy(struct pcie_port *pp)
static int imx6_pcie_wait_for_link(struct pcie_port *pp)
{
- int count = 200;
+ unsigned int retries;
- while (!dw_pcie_link_up(pp)) {
+ for (retries = 0; retries < 200; retries++) {
+ if (dw_pcie_link_up(pp))
+ return 0;
usleep_range(100, 1000);
- if (--count)
- continue;
-
- dev_err(pp->dev, "phy link never came up\n");
- dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
- return -EINVAL;
}
- return 0;
+ dev_err(pp->dev, "phy link never came up\n");
+ dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+ return -EINVAL;
+}
+
+static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp)
+{
+ u32 tmp;
+ unsigned int retries;
+
+ for (retries = 0; retries < 200; retries++) {
+ tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+ /* Test if the speed change finished. */
+ if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
+ return 0;
+ usleep_range(100, 1000);
+ }
+
+ dev_err(pp->dev, "Speed change timeout\n");
+ return -EINVAL;
}
static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
@@ -359,11 +369,11 @@ static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
return dw_handle_msi_irq(pp);
}
-static int imx6_pcie_start_link(struct pcie_port *pp)
+static int imx6_pcie_establish_link(struct pcie_port *pp)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
- uint32_t tmp;
- int ret, count;
+ u32 tmp;
+ int ret;
/*
* Force Gen1 operation when starting the link. In case the link is
@@ -397,29 +407,22 @@ static int imx6_pcie_start_link(struct pcie_port *pp)
tmp |= PORT_LOGIC_SPEED_CHANGE;
writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
- count = 200;
- while (count--) {
- tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
- /* Test if the speed change finished. */
- if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
- break;
- usleep_range(100, 1000);
+ ret = imx6_pcie_wait_for_speed_change(pp);
+ if (ret) {
+ dev_err(pp->dev, "Failed to bring link up!\n");
+ return ret;
}
/* Make sure link training is finished as well! */
- if (count)
- ret = imx6_pcie_wait_for_link(pp);
- else
- ret = -EINVAL;
-
+ ret = imx6_pcie_wait_for_link(pp);
if (ret) {
dev_err(pp->dev, "Failed to bring link up!\n");
- } else {
- tmp = readl(pp->dbi_base + 0x80);
- dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
+ return ret;
}
- return ret;
+ tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
+ dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
+ return 0;
}
static void imx6_pcie_host_init(struct pcie_port *pp)
@@ -432,7 +435,7 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
- imx6_pcie_start_link(pp);
+ imx6_pcie_establish_link(pp);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
@@ -440,19 +443,19 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
static void imx6_pcie_reset_phy(struct pcie_port *pp)
{
- uint32_t temp;
+ u32 tmp;
- pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
- temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
- PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
+ pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
+ tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+ PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
usleep_range(2000, 3000);
- pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
- temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+ pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
+ tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
+ pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
}
static int imx6_pcie_link_up(struct pcie_port *pp)
@@ -501,7 +504,7 @@ static int imx6_pcie_link_up(struct pcie_port *pp)
pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0);
- if (rx_valid & 0x01)
+ if (rx_valid & PCIE_PHY_RX_ASIC_OUT_VALID)
return 0;
if ((debug_r0 & 0x3f) != 0x0d)
@@ -534,10 +537,11 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
ret = devm_request_irq(&pdev->dev, pp->msi_irq,
imx6_pcie_msi_handler,
- IRQF_SHARED, "mx6-pcie-msi", pp);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "mx6-pcie-msi", pp);
if (ret) {
dev_err(&pdev->dev, "failed to request MSI irq\n");
- return -ENODEV;
+ return ret;
}
}
diff --git a/kernel/drivers/pci/host/pci-keystone-dw.c b/kernel/drivers/pci/host/pci-keystone-dw.c
index f34892e0e..6153853ca 100644
--- a/kernel/drivers/pci/host/pci-keystone-dw.c
+++ b/kernel/drivers/pci/host/pci-keystone-dw.c
@@ -58,11 +58,6 @@
#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
-static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
-{
- return sys->private_data;
-}
-
static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
u32 *bit_pos)
{
@@ -70,7 +65,7 @@ static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
*bit_pos = offset >> 3;
}
-u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
+phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
{
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
@@ -104,14 +99,13 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
{
u32 offset, reg_offset, bit_pos;
struct keystone_pcie *ks_pcie;
- unsigned int irq = d->irq;
struct msi_desc *msi;
struct pcie_port *pp;
- msi = irq_get_msi_desc(irq);
- pp = sys_to_pcie(msi->dev->bus->sysdata);
+ msi = irq_data_get_msi_desc(d);
+ pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
ks_pcie = to_keystone_pcie(pp);
- offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
writel(BIT(bit_pos),
@@ -142,15 +136,14 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
{
struct keystone_pcie *ks_pcie;
- unsigned int irq = d->irq;
struct msi_desc *msi;
struct pcie_port *pp;
u32 offset;
- msi = irq_get_msi_desc(irq);
- pp = sys_to_pcie(msi->dev->bus->sysdata);
+ msi = irq_data_get_msi_desc(d);
+ pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
ks_pcie = to_keystone_pcie(pp);
- offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
/* Mask the end point if PVM implemented */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
@@ -164,15 +157,14 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
{
struct keystone_pcie *ks_pcie;
- unsigned int irq = d->irq;
struct msi_desc *msi;
struct pcie_port *pp;
u32 offset;
- msi = irq_get_msi_desc(irq);
- pp = sys_to_pcie(msi->dev->bus->sysdata);
+ msi = irq_data_get_msi_desc(d);
+ pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
ks_pcie = to_keystone_pcie(pp);
- offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
/* Mask the end point if PVM implemented */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
@@ -196,7 +188,6 @@ static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
@@ -277,7 +268,6 @@ static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, d->host_data);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
@@ -327,7 +317,7 @@ static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
{
struct pcie_port *pp = &ks_pcie->pp;
- u32 start = pp->mem.start, end = pp->mem.end;
+ u32 start = pp->mem->start, end = pp->mem->end;
int i, tr_size;
/* Disable BARs for inbound access */
@@ -403,7 +393,7 @@ int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
- return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val);
+ return dw_pcie_cfg_read(addr + where, size, val);
}
int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
@@ -415,7 +405,7 @@ int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
- return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val);
+ return dw_pcie_cfg_write(addr + where, size, val);
}
/**
diff --git a/kernel/drivers/pci/host/pci-keystone.c b/kernel/drivers/pci/host/pci-keystone.c
index 75333b0c4..0aa81bd3d 100644
--- a/kernel/drivers/pci/host/pci-keystone.c
+++ b/kernel/drivers/pci/host/pci-keystone.c
@@ -88,7 +88,7 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
{
struct pcie_port *pp = &ks_pcie->pp;
- int count = 200;
+ unsigned int retries;
dw_pcie_setup_rc(pp);
@@ -99,21 +99,20 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
ks_dw_pcie_initiate_link_train(ks_pcie);
/* check if the link is up or not */
- while (!dw_pcie_link_up(pp)) {
+ for (retries = 0; retries < 200; retries++) {
+ if (dw_pcie_link_up(pp))
+ return 0;
usleep_range(100, 1000);
- if (--count) {
- ks_dw_pcie_initiate_link_train(ks_pcie);
- continue;
- }
- dev_err(pp->dev, "phy link never came up\n");
- return -EINVAL;
+ ks_dw_pcie_initiate_link_train(ks_pcie);
}
- return 0;
+ dev_err(pp->dev, "phy link never came up\n");
+ return -EINVAL;
}
-static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
u32 offset = irq - ks_pcie->msi_host_irqs[0];
struct pcie_port *pp = &ks_pcie->pp;
@@ -139,8 +138,9 @@ static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
* Traverse through pending legacy interrupts and invoke handler for each. Also
* takes care of interrupt controller level mask/ack operation.
*/
-static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
struct pcie_port *pp = &ks_pcie->pp;
u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
@@ -214,19 +214,18 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
/* Legacy IRQ */
for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
- irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie);
- irq_set_chained_handler(ks_pcie->legacy_host_irqs[i],
- ks_pcie_legacy_irq_handler);
+ irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i],
+ ks_pcie_legacy_irq_handler,
+ ks_pcie);
}
ks_dw_pcie_enable_legacy_irqs(ks_pcie);
/* MSI IRQ */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
- irq_set_chained_handler(ks_pcie->msi_host_irqs[i],
- ks_pcie_msi_irq_handler);
- irq_set_handler_data(ks_pcie->msi_host_irqs[i],
- ks_pcie);
+ irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i],
+ ks_pcie_msi_irq_handler,
+ ks_pcie);
}
}
}
diff --git a/kernel/drivers/pci/host/pci-keystone.h b/kernel/drivers/pci/host/pci-keystone.h
index 478d932b6..f0944e8c4 100644
--- a/kernel/drivers/pci/host/pci-keystone.h
+++ b/kernel/drivers/pci/host/pci-keystone.h
@@ -37,7 +37,7 @@ struct keystone_pcie {
/* Keystone DW specific MSI controller APIs/definitions */
void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset);
-u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
+phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
/* Keystone specific PCI controller APIs */
void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
diff --git a/kernel/drivers/pci/host/pci-layerscape.c b/kernel/drivers/pci/host/pci-layerscape.c
index 4a6e62f67..3923bed93 100644
--- a/kernel/drivers/pci/host/pci-layerscape.c
+++ b/kernel/drivers/pci/host/pci-layerscape.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2014 Freescale Semiconductor.
*
- * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
+ * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -11,7 +11,6 @@
*/
#include <linux/kernel.h>
-#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_pci.h>
@@ -32,27 +31,60 @@
#define LTSSM_STATE_MASK 0x3f
#define LTSSM_PCIE_L0 0x11 /* L0 state */
-/* Symbol Timer Register and Filter Mask Register 1 */
-#define PCIE_STRFMR1 0x71c
+/* PEX Internal Configuration Registers */
+#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
+#define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */
+
+/* PEX LUT registers */
+#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */
+
+struct ls_pcie_drvdata {
+ u32 lut_offset;
+ u32 ltssm_shift;
+ struct pcie_host_ops *ops;
+};
struct ls_pcie {
- struct list_head node;
- struct device *dev;
- struct pci_bus *bus;
void __iomem *dbi;
+ void __iomem *lut;
struct regmap *scfg;
struct pcie_port pp;
+ const struct ls_pcie_drvdata *drvdata;
int index;
- int msi_irq;
};
#define to_ls_pcie(x) container_of(x, struct ls_pcie, pp)
-static int ls_pcie_link_up(struct pcie_port *pp)
+static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
+{
+ u32 header_type;
+
+ header_type = ioread8(pcie->dbi + PCI_HEADER_TYPE);
+ header_type &= 0x7f;
+
+ return header_type == PCI_HEADER_TYPE_BRIDGE;
+}
+
+/* Clear multi-function bit */
+static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
+{
+ iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE);
+}
+
+/* Fix class value */
+static void ls_pcie_fix_class(struct ls_pcie *pcie)
+{
+ iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
+}
+
+static int ls1021_pcie_link_up(struct pcie_port *pp)
{
u32 state;
struct ls_pcie *pcie = to_ls_pcie(pp);
+ if (!pcie->scfg)
+ return 0;
+
regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state);
state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
@@ -62,22 +94,27 @@ static int ls_pcie_link_up(struct pcie_port *pp)
return 1;
}
-static void ls_pcie_host_init(struct pcie_port *pp)
+static void ls1021_pcie_host_init(struct pcie_port *pp)
{
struct ls_pcie *pcie = to_ls_pcie(pp);
- int count = 0;
- u32 val;
+ u32 val, index[2];
- dw_pcie_setup_rc(pp);
+ pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
+ "fsl,pcie-scfg");
+ if (IS_ERR(pcie->scfg)) {
+ dev_err(pp->dev, "No syscfg phandle specified\n");
+ pcie->scfg = NULL;
+ return;
+ }
- while (!ls_pcie_link_up(pp)) {
- usleep_range(100, 1000);
- count++;
- if (count >= 200) {
- dev_err(pp->dev, "phy link never came up\n");
- return;
- }
+ if (of_property_read_u32_array(pp->dev->of_node,
+ "fsl,pcie-scfg", index, 2)) {
+ pcie->scfg = NULL;
+ return;
}
+ pcie->index = index[1];
+
+ dw_pcie_setup_rc(pp);
/*
* LS1021A Workaround for internal TKT228622
@@ -88,21 +125,97 @@ static void ls_pcie_host_init(struct pcie_port *pp)
iowrite32(val, pcie->dbi + PCIE_STRFMR1);
}
+static int ls_pcie_link_up(struct pcie_port *pp)
+{
+ struct ls_pcie *pcie = to_ls_pcie(pp);
+ u32 state;
+
+ state = (ioread32(pcie->lut + PCIE_LUT_DBG) >>
+ pcie->drvdata->ltssm_shift) &
+ LTSSM_STATE_MASK;
+
+ if (state < LTSSM_PCIE_L0)
+ return 0;
+
+ return 1;
+}
+
+static void ls_pcie_host_init(struct pcie_port *pp)
+{
+ struct ls_pcie *pcie = to_ls_pcie(pp);
+
+ iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
+ ls_pcie_fix_class(pcie);
+ ls_pcie_clear_multifunction(pcie);
+ iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
+}
+
+static int ls_pcie_msi_host_init(struct pcie_port *pp,
+ struct msi_controller *chip)
+{
+ struct device_node *msi_node;
+ struct device_node *np = pp->dev->of_node;
+
+ /*
+ * The MSI domain is set by the generic of_msi_configure(). This
+ * .msi_host_init() function keeps us from doing the default MSI
+ * domain setup in dw_pcie_host_init() and also enforces the
+ * requirement that "msi-parent" exists.
+ */
+ msi_node = of_parse_phandle(np, "msi-parent", 0);
+ if (!msi_node) {
+ dev_err(pp->dev, "failed to find msi-parent\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct pcie_host_ops ls1021_pcie_host_ops = {
+ .link_up = ls1021_pcie_link_up,
+ .host_init = ls1021_pcie_host_init,
+ .msi_host_init = ls_pcie_msi_host_init,
+};
+
static struct pcie_host_ops ls_pcie_host_ops = {
.link_up = ls_pcie_link_up,
.host_init = ls_pcie_host_init,
+ .msi_host_init = ls_pcie_msi_host_init,
};
-static int ls_add_pcie_port(struct ls_pcie *pcie)
+static struct ls_pcie_drvdata ls1021_drvdata = {
+ .ops = &ls1021_pcie_host_ops,
+};
+
+static struct ls_pcie_drvdata ls1043_drvdata = {
+ .lut_offset = 0x10000,
+ .ltssm_shift = 24,
+ .ops = &ls_pcie_host_ops,
+};
+
+static struct ls_pcie_drvdata ls2080_drvdata = {
+ .lut_offset = 0x80000,
+ .ltssm_shift = 0,
+ .ops = &ls_pcie_host_ops,
+};
+
+static const struct of_device_id ls_pcie_of_match[] = {
+ { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
+ { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
+ { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
+
+static int __init ls_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
{
- struct pcie_port *pp;
int ret;
+ struct ls_pcie *pcie = to_ls_pcie(pp);
- pp = &pcie->pp;
- pp->dev = pcie->dev;
+ pp->dev = &pdev->dev;
pp->dbi_base = pcie->dbi;
- pp->root_bus_nr = -1;
- pp->ops = &ls_pcie_host_ops;
+ pp->ops = pcie->drvdata->ops;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -115,17 +228,19 @@ static int ls_add_pcie_port(struct ls_pcie *pcie)
static int __init ls_pcie_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct ls_pcie *pcie;
struct resource *dbi_base;
- u32 index[2];
int ret;
+ match = of_match_device(ls_pcie_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
- pcie->dev = &pdev->dev;
-
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base);
if (IS_ERR(pcie->dbi)) {
@@ -133,20 +248,13 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->dbi);
}
- pcie->scfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "fsl,pcie-scfg");
- if (IS_ERR(pcie->scfg)) {
- dev_err(&pdev->dev, "No syscfg phandle specified\n");
- return PTR_ERR(pcie->scfg);
- }
+ pcie->drvdata = match->data;
+ pcie->lut = pcie->dbi + pcie->drvdata->lut_offset;
- ret = of_property_read_u32_array(pdev->dev.of_node,
- "fsl,pcie-scfg", index, 2);
- if (ret)
- return ret;
- pcie->index = index[1];
+ if (!ls_pcie_is_bridge(pcie))
+ return -ENODEV;
- ret = ls_add_pcie_port(pcie);
+ ret = ls_add_pcie_port(&pcie->pp, pdev);
if (ret < 0)
return ret;
@@ -155,12 +263,6 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id ls_pcie_of_match[] = {
- { .compatible = "fsl,ls1021a-pcie" },
- { },
-};
-MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
-
static struct platform_driver ls_pcie_driver = {
.driver = {
.name = "layerscape-pcie",
diff --git a/kernel/drivers/pci/host/pci-mvebu.c b/kernel/drivers/pci/host/pci-mvebu.c
index 1ab863551..53b79c5f0 100644
--- a/kernel/drivers/pci/host/pci-mvebu.c
+++ b/kernel/drivers/pci/host/pci-mvebu.c
@@ -30,6 +30,7 @@
#define PCIE_DEV_REV_OFF 0x0008
#define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
#define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
+#define PCIE_CAP_PCIEXP 0x0060
#define PCIE_HEADER_LOG_4_OFF 0x0128
#define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
#define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
@@ -57,14 +58,35 @@
#define PCIE_STAT_BUS 0xff00
#define PCIE_STAT_DEV 0x1f0000
#define PCIE_STAT_LINK_DOWN BIT(0)
+#define PCIE_RC_RTSTA 0x1a14
#define PCIE_DEBUG_CTRL 0x1a60
#define PCIE_DEBUG_SOFT_RESET BIT(20)
+enum {
+ PCISWCAP = PCI_BRIDGE_CONTROL + 2,
+ PCISWCAP_EXP_LIST_ID = PCISWCAP + PCI_CAP_LIST_ID,
+ PCISWCAP_EXP_DEVCAP = PCISWCAP + PCI_EXP_DEVCAP,
+ PCISWCAP_EXP_DEVCTL = PCISWCAP + PCI_EXP_DEVCTL,
+ PCISWCAP_EXP_LNKCAP = PCISWCAP + PCI_EXP_LNKCAP,
+ PCISWCAP_EXP_LNKCTL = PCISWCAP + PCI_EXP_LNKCTL,
+ PCISWCAP_EXP_SLTCAP = PCISWCAP + PCI_EXP_SLTCAP,
+ PCISWCAP_EXP_SLTCTL = PCISWCAP + PCI_EXP_SLTCTL,
+ PCISWCAP_EXP_RTCTL = PCISWCAP + PCI_EXP_RTCTL,
+ PCISWCAP_EXP_RTSTA = PCISWCAP + PCI_EXP_RTSTA,
+ PCISWCAP_EXP_DEVCAP2 = PCISWCAP + PCI_EXP_DEVCAP2,
+ PCISWCAP_EXP_DEVCTL2 = PCISWCAP + PCI_EXP_DEVCTL2,
+ PCISWCAP_EXP_LNKCAP2 = PCISWCAP + PCI_EXP_LNKCAP2,
+ PCISWCAP_EXP_LNKCTL2 = PCISWCAP + PCI_EXP_LNKCTL2,
+ PCISWCAP_EXP_SLTCAP2 = PCISWCAP + PCI_EXP_SLTCAP2,
+ PCISWCAP_EXP_SLTCTL2 = PCISWCAP + PCI_EXP_SLTCTL2,
+};
+
/* PCI configuration space of a PCI-to-PCI bridge */
struct mvebu_sw_pci_bridge {
u16 vendor;
u16 device;
u16 command;
+ u16 status;
u16 class;
u8 interface;
u8 revision;
@@ -84,13 +106,15 @@ struct mvebu_sw_pci_bridge {
u16 memlimit;
u16 iobaseupper;
u16 iolimitupper;
- u8 cappointer;
- u8 reserved1;
- u16 reserved2;
u32 romaddr;
u8 intline;
u8 intpin;
u16 bridgectrl;
+
+ /* PCI express capability */
+ u32 pcie_sltcap;
+ u16 pcie_devctl;
+ u16 pcie_rtctl;
};
struct mvebu_pcie_port;
@@ -119,8 +143,7 @@ struct mvebu_pcie_port {
unsigned int io_target;
unsigned int io_attr;
struct clk *clk;
- int reset_gpio;
- int reset_active_low;
+ struct gpio_desc *reset_gpio;
char *reset_name;
struct mvebu_sw_pci_bridge bridge;
struct device_node *dn;
@@ -254,15 +277,22 @@ static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val)
{
+ void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
+
mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
PCIE_CONF_ADDR_OFF);
- *val = mvebu_readl(port, PCIE_CONF_DATA_OFF);
-
- if (size == 1)
- *val = (*val >> (8 * (where & 3))) & 0xff;
- else if (size == 2)
- *val = (*val >> (8 * (where & 3))) & 0xffff;
+ switch (size) {
+ case 1:
+ *val = readb_relaxed(conf_data + (where & 3));
+ break;
+ case 2:
+ *val = readw_relaxed(conf_data + (where & 2));
+ break;
+ case 4:
+ *val = readl_relaxed(conf_data);
+ break;
+ }
return PCIBIOS_SUCCESSFUL;
}
@@ -271,22 +301,24 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
struct pci_bus *bus,
u32 devfn, int where, int size, u32 val)
{
- u32 _val, shift = 8 * (where & 3);
+ void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
PCIE_CONF_ADDR_OFF);
- _val = mvebu_readl(port, PCIE_CONF_DATA_OFF);
- if (size == 4)
- _val = val;
- else if (size == 2)
- _val = (_val & ~(0xffff << shift)) | ((val & 0xffff) << shift);
- else if (size == 1)
- _val = (_val & ~(0xff << shift)) | ((val & 0xff) << shift);
- else
+ switch (size) {
+ case 1:
+ writeb(val, conf_data + (where & 3));
+ break;
+ case 2:
+ writew(val, conf_data + (where & 2));
+ break;
+ case 4:
+ writel(val, conf_data);
+ break;
+ default:
return PCIBIOS_BAD_REGISTER_NUMBER;
-
- mvebu_writel(port, _val, PCIE_CONF_DATA_OFF);
+ }
return PCIBIOS_SUCCESSFUL;
}
@@ -443,6 +475,9 @@ static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
/* We support 32 bits I/O addressing */
bridge->iobase = PCI_IO_RANGE_TYPE_32;
bridge->iolimit = PCI_IO_RANGE_TYPE_32;
+
+ /* Add capabilities */
+ bridge->status = PCI_STATUS_CAP_LIST;
}
/*
@@ -460,7 +495,7 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
break;
case PCI_COMMAND:
- *value = bridge->command;
+ *value = bridge->command | bridge->status << 16;
break;
case PCI_CLASS_REVISION:
@@ -505,6 +540,10 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
*value = (bridge->iolimitupper << 16 | bridge->iobaseupper);
break;
+ case PCI_CAPABILITY_LIST:
+ *value = PCISWCAP;
+ break;
+
case PCI_ROM_ADDRESS1:
*value = 0;
break;
@@ -514,9 +553,67 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
*value = 0;
break;
+ case PCISWCAP_EXP_LIST_ID:
+ /* Set PCIe v2, root port, slot support */
+ *value = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
+ PCI_EXP_FLAGS_SLOT) << 16 | PCI_CAP_ID_EXP;
+ break;
+
+ case PCISWCAP_EXP_DEVCAP:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
+ break;
+
+ case PCISWCAP_EXP_DEVCTL:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) &
+ ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
+ *value |= bridge->pcie_devctl;
+ break;
+
+ case PCISWCAP_EXP_LNKCAP:
+ /*
+ * PCIe requires the clock power management capability to be
+ * hard-wired to zero for downstream ports
+ */
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
+ ~PCI_EXP_LNKCAP_CLKPM;
+ break;
+
+ case PCISWCAP_EXP_LNKCTL:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
+ break;
+
+ case PCISWCAP_EXP_SLTCAP:
+ *value = bridge->pcie_sltcap;
+ break;
+
+ case PCISWCAP_EXP_SLTCTL:
+ *value = PCI_EXP_SLTSTA_PDS << 16;
+ break;
+
+ case PCISWCAP_EXP_RTCTL:
+ *value = bridge->pcie_rtctl;
+ break;
+
+ case PCISWCAP_EXP_RTSTA:
+ *value = mvebu_readl(port, PCIE_RC_RTSTA);
+ break;
+
+ /* PCIe requires the v2 fields to be hard-wired to zero */
+ case PCISWCAP_EXP_DEVCAP2:
+ case PCISWCAP_EXP_DEVCTL2:
+ case PCISWCAP_EXP_LNKCAP2:
+ case PCISWCAP_EXP_LNKCTL2:
+ case PCISWCAP_EXP_SLTCAP2:
+ case PCISWCAP_EXP_SLTCTL2:
default:
- *value = 0xffffffff;
- return PCIBIOS_BAD_REGISTER_NUMBER;
+ /*
+ * PCI defines configuration read accesses to reserved or
+ * unimplemented registers to read as zero and complete
+ * normally.
+ */
+ *value = 0;
+ return PCIBIOS_SUCCESSFUL;
}
if (size == 2)
@@ -601,6 +698,51 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus);
break;
+ case PCISWCAP_EXP_DEVCTL:
+ /*
+ * Armada370 data says these bits must always
+ * be zero when in root complex mode.
+ */
+ value &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
+
+ /*
+ * If the mask is 0xffff0000, then we only want to write
+ * the device control register, rather than clearing the
+ * RW1C bits in the device status register. Mask out the
+ * status register bits.
+ */
+ if (mask == 0xffff0000)
+ value &= 0xffff;
+
+ mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
+ break;
+
+ case PCISWCAP_EXP_LNKCTL:
+ /*
+ * If we don't support CLKREQ, we must ensure that the
+ * CLKREQ enable bit always reads zero. Since we haven't
+ * had this capability, and it's dependent on board wiring,
+ * disable it for the time being.
+ */
+ value &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
+
+ /*
+ * If the mask is 0xffff0000, then we only want to write
+ * the link control register, rather than clearing the
+ * RW1C bits in the link status register. Mask out the
+ * status register bits.
+ */
+ if (mask == 0xffff0000)
+ value &= 0xffff;
+
+ mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
+ break;
+
+ case PCISWCAP_EXP_RTSTA:
+ mvebu_writel(port, value, PCIE_RC_RTSTA);
+ break;
+
default:
break;
}
@@ -652,17 +794,6 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
if (!mvebu_pcie_link_up(port))
return PCIBIOS_DEVICE_NOT_FOUND;
- /*
- * On the secondary bus, we don't want to expose any other
- * device than the device physically connected in the PCIe
- * slot, visible in slot 0. In slot 1, there's a special
- * Marvell device that only makes sense when the Armada is
- * used as a PCIe endpoint.
- */
- if (bus->number == port->bridge.secondary_bus &&
- PCI_SLOT(devfn) != 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
/* Access the real PCIe interface */
ret = mvebu_pcie_hw_wr_conf(port, bus, devfn,
where, size, val);
@@ -693,19 +824,6 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
return PCIBIOS_DEVICE_NOT_FOUND;
}
- /*
- * On the secondary bus, we don't want to expose any other
- * device than the device physically connected in the PCIe
- * slot, visible in slot 0. In slot 1, there's a special
- * Marvell device that only makes sense when the Armada is
- * used as a PCIe endpoint.
- */
- if (bus->number == port->bridge.secondary_bus &&
- PCI_SLOT(devfn) != 0) {
- *val = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
/* Access the real PCIe interface */
ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
where, size, val);
@@ -751,21 +869,6 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
return 1;
}
-static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys)
-{
- struct mvebu_pcie *pcie = sys_to_pcie(sys);
- struct pci_bus *bus;
-
- bus = pci_create_root_bus(&pcie->pdev->dev, sys->busnr,
- &mvebu_pcie_ops, sys, &sys->resources);
- if (!bus)
- return NULL;
-
- pci_scan_child_bus(bus);
-
- return bus;
-}
-
static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
const struct resource *res,
resource_size_t start,
@@ -809,12 +912,11 @@ static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
hw.nr_controllers = 1;
hw.private_data = (void **)&pcie;
hw.setup = mvebu_pcie_setup;
- hw.scan = mvebu_pcie_scan_bus;
hw.map_irq = of_irq_parse_and_map_pci;
hw.ops = &mvebu_pcie_ops;
hw.align_resource = mvebu_pcie_align_resource;
- pci_common_init(&hw);
+ pci_common_init_dev(&pcie->pdev->dev, &hw);
}
/*
@@ -895,6 +997,7 @@ static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie)
return;
pcie->msi = of_pci_find_msi_chip_by_node(msi_node);
+ of_node_put(msi_node);
if (pcie->msi)
pcie->msi->dev = &pcie->pdev->dev;
@@ -929,12 +1032,167 @@ static int mvebu_pcie_resume(struct device *dev)
return 0;
}
+static void mvebu_pcie_port_clk_put(void *data)
+{
+ struct mvebu_pcie_port *port = data;
+
+ clk_put(port->clk);
+}
+
+static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
+ struct mvebu_pcie_port *port, struct device_node *child)
+{
+ struct device *dev = &pcie->pdev->dev;
+ enum of_gpio_flags flags;
+ int reset_gpio, ret;
+
+ port->pcie = pcie;
+
+ if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
+ dev_warn(dev, "ignoring %s, missing pcie-port property\n",
+ of_node_full_name(child));
+ goto skip;
+ }
+
+ if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
+ port->lane = 0;
+
+ port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
+ port->lane);
+ if (!port->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ port->devfn = of_pci_get_devfn(child);
+ if (port->devfn < 0)
+ goto skip;
+
+ ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
+ &port->mem_target, &port->mem_attr);
+ if (ret < 0) {
+ dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
+ port->name);
+ goto skip;
+ }
+
+ if (resource_size(&pcie->io) != 0) {
+ mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
+ &port->io_target, &port->io_attr);
+ } else {
+ port->io_target = -1;
+ port->io_attr = -1;
+ }
+
+ reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags);
+ if (reset_gpio == -EPROBE_DEFER) {
+ ret = reset_gpio;
+ goto err;
+ }
+
+ if (gpio_is_valid(reset_gpio)) {
+ unsigned long gpio_flags;
+
+ port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
+ port->name);
+ if (!port->reset_name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (flags & OF_GPIO_ACTIVE_LOW) {
+ dev_info(dev, "%s: reset gpio is active low\n",
+ of_node_full_name(child));
+ gpio_flags = GPIOF_ACTIVE_LOW |
+ GPIOF_OUT_INIT_LOW;
+ } else {
+ gpio_flags = GPIOF_OUT_INIT_HIGH;
+ }
+
+ ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags,
+ port->reset_name);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ goto err;
+ goto skip;
+ }
+
+ port->reset_gpio = gpio_to_desc(reset_gpio);
+ }
+
+ port->clk = of_clk_get_by_name(child, NULL);
+ if (IS_ERR(port->clk)) {
+ dev_err(dev, "%s: cannot get clock\n", port->name);
+ goto skip;
+ }
+
+ ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
+ if (ret < 0) {
+ clk_put(port->clk);
+ goto err;
+ }
+
+ return 1;
+
+skip:
+ ret = 0;
+
+ /* In the case of skipping, we need to free these */
+ devm_kfree(dev, port->reset_name);
+ port->reset_name = NULL;
+ devm_kfree(dev, port->name);
+ port->name = NULL;
+
+err:
+ return ret;
+}
+
+/*
+ * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs
+ * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications
+ * of the PCI Express Card Electromechanical Specification, 1.1.
+ */
+static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
+{
+ int ret;
+
+ ret = clk_prepare_enable(port->clk);
+ if (ret < 0)
+ return ret;
+
+ if (port->reset_gpio) {
+ u32 reset_udelay = 20000;
+
+ of_property_read_u32(port->dn, "reset-delay-us",
+ &reset_udelay);
+
+ udelay(100);
+
+ gpiod_set_value_cansleep(port->reset_gpio, 0);
+ msleep(reset_udelay / 1000);
+ }
+
+ return 0;
+}
+
+/*
+ * Power down a PCIe port. Strictly, PCIe requires us to place the card
+ * in D3hot state before asserting PERST#.
+ */
+static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
+{
+ if (port->reset_gpio)
+ gpiod_set_value_cansleep(port->reset_gpio, 1);
+
+ clk_disable_unprepare(port->clk);
+}
+
static int mvebu_pcie_probe(struct platform_device *pdev)
{
struct mvebu_pcie *pcie;
struct device_node *np = pdev->dev.of_node;
struct device_node *child;
- int i, ret;
+ int num, i, ret;
pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie),
GFP_KERNEL);
@@ -970,112 +1228,52 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
return ret;
}
- i = 0;
- for_each_child_of_node(pdev->dev.of_node, child) {
- if (!of_device_is_available(child))
- continue;
- i++;
- }
+ num = of_get_available_child_count(pdev->dev.of_node);
- pcie->ports = devm_kzalloc(&pdev->dev, i *
- sizeof(struct mvebu_pcie_port),
+ pcie->ports = devm_kcalloc(&pdev->dev, num, sizeof(*pcie->ports),
GFP_KERNEL);
if (!pcie->ports)
return -ENOMEM;
i = 0;
- for_each_child_of_node(pdev->dev.of_node, child) {
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
struct mvebu_pcie_port *port = &pcie->ports[i];
- enum of_gpio_flags flags;
-
- if (!of_device_is_available(child))
- continue;
-
- port->pcie = pcie;
-
- if (of_property_read_u32(child, "marvell,pcie-port",
- &port->port)) {
- dev_warn(&pdev->dev,
- "ignoring PCIe DT node, missing pcie-port property\n");
- continue;
- }
-
- if (of_property_read_u32(child, "marvell,pcie-lane",
- &port->lane))
- port->lane = 0;
- port->name = kasprintf(GFP_KERNEL, "pcie%d.%d",
- port->port, port->lane);
-
- port->devfn = of_pci_get_devfn(child);
- if (port->devfn < 0)
- continue;
-
- ret = mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_MEM,
- &port->mem_target, &port->mem_attr);
+ ret = mvebu_pcie_parse_port(pcie, port, child);
if (ret < 0) {
- dev_err(&pdev->dev, "PCIe%d.%d: cannot get tgt/attr for mem window\n",
- port->port, port->lane);
+ of_node_put(child);
+ return ret;
+ } else if (ret == 0) {
continue;
}
- if (resource_size(&pcie->io) != 0)
- mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_IO,
- &port->io_target, &port->io_attr);
- else {
- port->io_target = -1;
- port->io_attr = -1;
- }
+ port->dn = child;
+ i++;
+ }
+ pcie->nports = i;
- port->reset_gpio = of_get_named_gpio_flags(child,
- "reset-gpios", 0, &flags);
- if (gpio_is_valid(port->reset_gpio)) {
- u32 reset_udelay = 20000;
-
- port->reset_active_low = flags & OF_GPIO_ACTIVE_LOW;
- port->reset_name = kasprintf(GFP_KERNEL,
- "pcie%d.%d-reset", port->port, port->lane);
- of_property_read_u32(child, "reset-delay-us",
- &reset_udelay);
-
- ret = devm_gpio_request_one(&pdev->dev,
- port->reset_gpio, GPIOF_DIR_OUT, port->reset_name);
- if (ret) {
- if (ret == -EPROBE_DEFER)
- return ret;
- continue;
- }
-
- gpio_set_value(port->reset_gpio,
- (port->reset_active_low) ? 1 : 0);
- msleep(reset_udelay/1000);
- }
+ for (i = 0; i < pcie->nports; i++) {
+ struct mvebu_pcie_port *port = &pcie->ports[i];
- port->clk = of_clk_get_by_name(child, NULL);
- if (IS_ERR(port->clk)) {
- dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
- port->port, port->lane);
+ child = port->dn;
+ if (!child)
continue;
- }
- ret = clk_prepare_enable(port->clk);
- if (ret)
+ ret = mvebu_pcie_powerup(port);
+ if (ret < 0)
continue;
port->base = mvebu_pcie_map_registers(pdev, child, port);
if (IS_ERR(port->base)) {
- dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n",
- port->port, port->lane);
+ dev_err(&pdev->dev, "%s: cannot map registers\n",
+ port->name);
port->base = NULL;
- clk_disable_unprepare(port->clk);
+ mvebu_pcie_powerdown(port);
continue;
}
mvebu_pcie_set_local_dev_nr(port, 1);
-
- port->dn = child;
mvebu_sw_pci_bridge_init(port);
- i++;
}
pcie->nports = i;
diff --git a/kernel/drivers/pci/host/pci-rcar-gen2.c b/kernel/drivers/pci/host/pci-rcar-gen2.c
index 367e28fa7..c4f64bfee 100644
--- a/kernel/drivers/pci/host/pci-rcar-gen2.c
+++ b/kernel/drivers/pci/host/pci-rcar-gen2.c
@@ -362,6 +362,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
static struct of_device_id rcar_pci_of_match[] = {
{ .compatible = "renesas,pci-r8a7790", },
{ .compatible = "renesas,pci-r8a7791", },
+ { .compatible = "renesas,pci-r8a7794", },
{ },
};
diff --git a/kernel/drivers/pci/host/pci-tegra.c b/kernel/drivers/pci/host/pci-tegra.c
index 00e92720d..30323114c 100644
--- a/kernel/drivers/pci/host/pci-tegra.c
+++ b/kernel/drivers/pci/host/pci-tegra.c
@@ -382,8 +382,8 @@ static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
unsigned int busnr)
{
- pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
- L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
+ pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED);
phys_addr_t cs = pcie->cs->start;
struct tegra_pcie_bus *bus;
unsigned int i;
@@ -630,21 +630,6 @@ static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
return irq;
}
-static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
-{
- struct tegra_pcie *pcie = sys_to_pcie(sys);
- struct pci_bus *bus;
-
- bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
- &sys->resources);
- if (!bus)
- return NULL;
-
- pci_scan_child_bus(bus);
-
- return bus;
-}
-
static irqreturn_t tegra_pcie_isr(int irq, void *arg)
{
const char *err_msg[] = {
@@ -1263,7 +1248,6 @@ static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
- set_irq_flags(irq, IRQF_VALID);
tegra_cpuidle_pcie_irqs_in_use();
@@ -1304,7 +1288,7 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
msi->irq = err;
- err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
+ err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
tegra_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
@@ -1831,7 +1815,6 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
hw.private_data = (void **)&pcie;
hw.setup = tegra_pcie_setup;
hw.map_irq = tegra_pcie_map_irq;
- hw.scan = tegra_pcie_scan_bus;
hw.ops = &tegra_pcie_ops;
pci_common_init_dev(pcie->dev, &hw);
diff --git a/kernel/drivers/pci/host/pci-xgene-msi.c b/kernel/drivers/pci/host/pci-xgene-msi.c
new file mode 100644
index 000000000..a6456b578
--- /dev/null
+++ b/kernel/drivers/pci/host/pci-xgene-msi.c
@@ -0,0 +1,583 @@
+/*
+ * APM X-Gene MSI Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Tanmay Inamdar <tinamdar@apm.com>
+ * Duc Dang <dhdang@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/of_pci.h>
+
+#define MSI_IR0 0x000000
+#define MSI_INT0 0x800000
+#define IDX_PER_GROUP 8
+#define IRQS_PER_IDX 16
+#define NR_HW_IRQS 16
+#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
+
+struct xgene_msi_group {
+ struct xgene_msi *msi;
+ int gic_irq;
+ u32 msi_grp;
+};
+
+struct xgene_msi {
+ struct device_node *node;
+ struct irq_domain *inner_domain;
+ struct irq_domain *msi_domain;
+ u64 msi_addr;
+ void __iomem *msi_regs;
+ unsigned long *bitmap;
+ struct mutex bitmap_lock;
+ struct xgene_msi_group *msi_groups;
+ int num_cpus;
+};
+
+/* Global data */
+static struct xgene_msi xgene_msi_ctrl;
+
+static struct irq_chip xgene_msi_top_irq_chip = {
+ .name = "X-Gene1 MSI",
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info xgene_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &xgene_msi_top_irq_chip,
+};
+
+/*
+ * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
+ * n is group number (0..F), x is index of registers in each group (0..7)
+ * The register layout is as follows:
+ * MSI0IR0 base_addr
+ * MSI0IR1 base_addr + 0x10000
+ * ... ...
+ * MSI0IR6 base_addr + 0x60000
+ * MSI0IR7 base_addr + 0x70000
+ * MSI1IR0 base_addr + 0x80000
+ * MSI1IR1 base_addr + 0x90000
+ * ... ...
+ * MSI1IR7 base_addr + 0xF0000
+ * MSI2IR0 base_addr + 0x100000
+ * ... ...
+ * MSIFIR0 base_addr + 0x780000
+ * MSIFIR1 base_addr + 0x790000
+ * ... ...
+ * MSIFIR7 base_addr + 0x7F0000
+ * MSIINT0 base_addr + 0x800000
+ * MSIINT1 base_addr + 0x810000
+ * ... ...
+ * MSIINTF base_addr + 0x8F0000
+ *
+ * Each index register supports 16 MSI vectors (0..15) to generate interrupt.
+ * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination
+ * registers.
+ *
+ * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate
+ * the MSI pending status caused by 1 of its 8 index registers.
+ */
+
+/* MSInIRx read helper */
+static u32 xgene_msi_ir_read(struct xgene_msi *msi,
+ u32 msi_grp, u32 msir_idx)
+{
+ return readl_relaxed(msi->msi_regs + MSI_IR0 +
+ (msi_grp << 19) + (msir_idx << 16));
+}
+
+/* MSIINTn read helper */
+static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp)
+{
+ return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16));
+}
+
+/*
+ * With 2048 MSI vectors supported, the MSI message can be constructed using
+ * following scheme:
+ * - Divide into 8 256-vector groups
+ * Group 0: 0-255
+ * Group 1: 256-511
+ * Group 2: 512-767
+ * ...
+ * Group 7: 1792-2047
+ * - Each 256-vector group is divided into 16 16-vector groups
+ * As an example: 16 16-vector groups for 256-vector group 0-255 is
+ * Group 0: 0-15
+ * Group 1: 16-32
+ * ...
+ * Group 15: 240-255
+ * - The termination address of MSI vector in 256-vector group n and 16-vector
+ * group x is the address of MSIxIRn
+ * - The data for MSI vector in 16-vector group x is x
+ */
+static u32 hwirq_to_reg_set(unsigned long hwirq)
+{
+ return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX));
+}
+
+static u32 hwirq_to_group(unsigned long hwirq)
+{
+ return (hwirq % NR_HW_IRQS);
+}
+
+static u32 hwirq_to_msi_data(unsigned long hwirq)
+{
+ return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX);
+}
+
+static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct xgene_msi *msi = irq_data_get_irq_chip_data(data);
+ u32 reg_set = hwirq_to_reg_set(data->hwirq);
+ u32 group = hwirq_to_group(data->hwirq);
+ u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16);
+
+ msg->address_hi = upper_32_bits(target_addr);
+ msg->address_lo = lower_32_bits(target_addr);
+ msg->data = hwirq_to_msi_data(data->hwirq);
+}
+
+/*
+ * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain
+ * the expected behaviour of .set_affinity for each MSI interrupt, the 16
+ * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs
+ * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another
+ * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a
+ * consequence, the total MSI vectors that X-Gene v1 supports will be
+ * reduced to 256 (2048/8) vectors.
+ */
+static int hwirq_to_cpu(unsigned long hwirq)
+{
+ return (hwirq % xgene_msi_ctrl.num_cpus);
+}
+
+static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq)
+{
+ return (hwirq - hwirq_to_cpu(hwirq));
+}
+
+static int xgene_msi_set_affinity(struct irq_data *irqdata,
+ const struct cpumask *mask, bool force)
+{
+ int target_cpu = cpumask_first(mask);
+ int curr_cpu;
+
+ curr_cpu = hwirq_to_cpu(irqdata->hwirq);
+ if (curr_cpu == target_cpu)
+ return IRQ_SET_MASK_OK_DONE;
+
+ /* Update MSI number to target the new CPU */
+ irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu;
+
+ return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip xgene_msi_bottom_irq_chip = {
+ .name = "MSI",
+ .irq_set_affinity = xgene_msi_set_affinity,
+ .irq_compose_msi_msg = xgene_compose_msi_msg,
+};
+
+static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct xgene_msi *msi = domain->host_data;
+ int msi_irq;
+
+ mutex_lock(&msi->bitmap_lock);
+
+ msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0,
+ msi->num_cpus, 0);
+ if (msi_irq < NR_MSI_VEC)
+ bitmap_set(msi->bitmap, msi_irq, msi->num_cpus);
+ else
+ msi_irq = -ENOSPC;
+
+ mutex_unlock(&msi->bitmap_lock);
+
+ if (msi_irq < 0)
+ return msi_irq;
+
+ irq_domain_set_info(domain, virq, msi_irq,
+ &xgene_msi_bottom_irq_chip, domain->host_data,
+ handle_simple_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void xgene_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct xgene_msi *msi = irq_data_get_irq_chip_data(d);
+ u32 hwirq;
+
+ mutex_lock(&msi->bitmap_lock);
+
+ hwirq = hwirq_to_canonical_hwirq(d->hwirq);
+ bitmap_clear(msi->bitmap, hwirq, msi->num_cpus);
+
+ mutex_unlock(&msi->bitmap_lock);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = xgene_irq_domain_alloc,
+ .free = xgene_irq_domain_free,
+};
+
+static int xgene_allocate_domains(struct xgene_msi *msi)
+{
+ msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
+ &msi_domain_ops, msi);
+ if (!msi->inner_domain)
+ return -ENOMEM;
+
+ msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node),
+ &xgene_msi_domain_info,
+ msi->inner_domain);
+
+ if (!msi->msi_domain) {
+ irq_domain_remove(msi->inner_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void xgene_free_domains(struct xgene_msi *msi)
+{
+ if (msi->msi_domain)
+ irq_domain_remove(msi->msi_domain);
+ if (msi->inner_domain)
+ irq_domain_remove(msi->inner_domain);
+}
+
+static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
+{
+ int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long);
+
+ xgene_msi->bitmap = kzalloc(size, GFP_KERNEL);
+ if (!xgene_msi->bitmap)
+ return -ENOMEM;
+
+ mutex_init(&xgene_msi->bitmap_lock);
+
+ xgene_msi->msi_groups = kcalloc(NR_HW_IRQS,
+ sizeof(struct xgene_msi_group),
+ GFP_KERNEL);
+ if (!xgene_msi->msi_groups)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void xgene_msi_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct xgene_msi_group *msi_groups;
+ struct xgene_msi *xgene_msi;
+ unsigned int virq;
+ int msir_index, msir_val, hw_irq;
+ u32 intr_index, grp_select, msi_grp;
+
+ chained_irq_enter(chip, desc);
+
+ msi_groups = irq_desc_get_handler_data(desc);
+ xgene_msi = msi_groups->msi;
+ msi_grp = msi_groups->msi_grp;
+
+ /*
+ * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt
+ * If bit x of this register is set (x is 0..7), one or more interupts
+ * corresponding to MSInIRx is set.
+ */
+ grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
+ while (grp_select) {
+ msir_index = ffs(grp_select) - 1;
+ /*
+ * Calculate MSInIRx address to read to check for interrupts
+ * (refer to termination address and data assignment
+ * described in xgene_compose_msi_msg() )
+ */
+ msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index);
+ while (msir_val) {
+ intr_index = ffs(msir_val) - 1;
+ /*
+ * Calculate MSI vector number (refer to the termination
+ * address and data assignment described in
+ * xgene_compose_msi_msg function)
+ */
+ hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) *
+ NR_HW_IRQS) + msi_grp;
+ /*
+ * As we have multiple hw_irq that maps to single MSI,
+ * always look up the virq using the hw_irq as seen from
+ * CPU0
+ */
+ hw_irq = hwirq_to_canonical_hwirq(hw_irq);
+ virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq);
+ WARN_ON(!virq);
+ if (virq != 0)
+ generic_handle_irq(virq);
+ msir_val &= ~(1 << intr_index);
+ }
+ grp_select &= ~(1 << msir_index);
+
+ if (!grp_select) {
+ /*
+ * We handled all interrupts happened in this group,
+ * resample this group MSI_INTx register in case
+ * something else has been made pending in the meantime
+ */
+ grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int xgene_msi_remove(struct platform_device *pdev)
+{
+ int virq, i;
+ struct xgene_msi *msi = platform_get_drvdata(pdev);
+
+ for (i = 0; i < NR_HW_IRQS; i++) {
+ virq = msi->msi_groups[i].gic_irq;
+ if (virq != 0)
+ irq_set_chained_handler_and_data(virq, NULL, NULL);
+ }
+ kfree(msi->msi_groups);
+
+ kfree(msi->bitmap);
+ msi->bitmap = NULL;
+
+ xgene_free_domains(msi);
+
+ return 0;
+}
+
+static int xgene_msi_hwirq_alloc(unsigned int cpu)
+{
+ struct xgene_msi *msi = &xgene_msi_ctrl;
+ struct xgene_msi_group *msi_group;
+ cpumask_var_t mask;
+ int i;
+ int err;
+
+ for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
+ msi_group = &msi->msi_groups[i];
+ if (!msi_group->gic_irq)
+ continue;
+
+ irq_set_chained_handler(msi_group->gic_irq,
+ xgene_msi_isr);
+ err = irq_set_handler_data(msi_group->gic_irq, msi_group);
+ if (err) {
+ pr_err("failed to register GIC IRQ handler\n");
+ return -EINVAL;
+ }
+ /*
+ * Statically allocate MSI GIC IRQs to each CPU core.
+ * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
+ * to each core.
+ */
+ if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ cpumask_clear(mask);
+ cpumask_set_cpu(cpu, mask);
+ err = irq_set_affinity(msi_group->gic_irq, mask);
+ if (err)
+ pr_err("failed to set affinity for GIC IRQ");
+ free_cpumask_var(mask);
+ } else {
+ pr_err("failed to alloc CPU mask for affinity\n");
+ err = -EINVAL;
+ }
+
+ if (err) {
+ irq_set_chained_handler_and_data(msi_group->gic_irq,
+ NULL, NULL);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void xgene_msi_hwirq_free(unsigned int cpu)
+{
+ struct xgene_msi *msi = &xgene_msi_ctrl;
+ struct xgene_msi_group *msi_group;
+ int i;
+
+ for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
+ msi_group = &msi->msi_groups[i];
+ if (!msi_group->gic_irq)
+ continue;
+
+ irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
+ NULL);
+ }
+}
+
+static int xgene_msi_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ xgene_msi_hwirq_alloc(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ xgene_msi_hwirq_free(cpu);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block xgene_msi_cpu_notifier = {
+ .notifier_call = xgene_msi_cpu_callback,
+};
+
+static const struct of_device_id xgene_msi_match_table[] = {
+ {.compatible = "apm,xgene1-msi"},
+ {},
+};
+
+static int xgene_msi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int rc, irq_index;
+ struct xgene_msi *xgene_msi;
+ unsigned int cpu;
+ int virt_msir;
+ u32 msi_val, msi_idx;
+
+ xgene_msi = &xgene_msi_ctrl;
+
+ platform_set_drvdata(pdev, xgene_msi);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xgene_msi->msi_regs)) {
+ dev_err(&pdev->dev, "no reg space\n");
+ rc = -EINVAL;
+ goto error;
+ }
+ xgene_msi->msi_addr = res->start;
+ xgene_msi->node = pdev->dev.of_node;
+ xgene_msi->num_cpus = num_possible_cpus();
+
+ rc = xgene_msi_init_allocator(xgene_msi);
+ if (rc) {
+ dev_err(&pdev->dev, "Error allocating MSI bitmap\n");
+ goto error;
+ }
+
+ rc = xgene_allocate_domains(xgene_msi);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to allocate MSI domain\n");
+ goto error;
+ }
+
+ for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
+ virt_msir = platform_get_irq(pdev, irq_index);
+ if (virt_msir < 0) {
+ dev_err(&pdev->dev, "Cannot translate IRQ index %d\n",
+ irq_index);
+ rc = -EINVAL;
+ goto error;
+ }
+ xgene_msi->msi_groups[irq_index].gic_irq = virt_msir;
+ xgene_msi->msi_groups[irq_index].msi_grp = irq_index;
+ xgene_msi->msi_groups[irq_index].msi = xgene_msi;
+ }
+
+ /*
+ * MSInIRx registers are read-to-clear; before registering
+ * interrupt handlers, read all of them to clear spurious
+ * interrupts that may occur before the driver is probed.
+ */
+ for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
+ for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
+ msi_val = xgene_msi_ir_read(xgene_msi, irq_index,
+ msi_idx);
+ /* Read MSIINTn to confirm */
+ msi_val = xgene_msi_int_read(xgene_msi, irq_index);
+ if (msi_val) {
+ dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
+ rc = -EINVAL;
+ goto error;
+ }
+ }
+
+ cpu_notifier_register_begin();
+
+ for_each_online_cpu(cpu)
+ if (xgene_msi_hwirq_alloc(cpu)) {
+ dev_err(&pdev->dev, "failed to register MSI handlers\n");
+ cpu_notifier_register_done();
+ goto error;
+ }
+
+ rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
+ cpu_notifier_register_done();
+ goto error;
+ }
+
+ cpu_notifier_register_done();
+
+ dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
+
+ return 0;
+
+error:
+ xgene_msi_remove(pdev);
+ return rc;
+}
+
+static struct platform_driver xgene_msi_driver = {
+ .driver = {
+ .name = "xgene-msi",
+ .of_match_table = xgene_msi_match_table,
+ },
+ .probe = xgene_msi_probe,
+ .remove = xgene_msi_remove,
+};
+
+static int __init xgene_pcie_msi_init(void)
+{
+ return platform_driver_register(&xgene_msi_driver);
+}
+subsys_initcall(xgene_pcie_msi_init);
diff --git a/kernel/drivers/pci/host/pci-xgene.c b/kernel/drivers/pci/host/pci-xgene.c
index ee082c036..ae00ce22d 100644
--- a/kernel/drivers/pci/host/pci-xgene.c
+++ b/kernel/drivers/pci/host/pci-xgene.c
@@ -59,6 +59,12 @@
#define SZ_1T (SZ_1G*1024ULL)
#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe)
+#define ROOT_CAP_AND_CTRL 0x5C
+
+/* PCIe IP version */
+#define XGENE_PCIE_IP_VER_UNKN 0
+#define XGENE_PCIE_IP_VER_1 1
+
struct xgene_pcie_port {
struct device_node *node;
struct device *dev;
@@ -67,6 +73,7 @@ struct xgene_pcie_port {
void __iomem *cfg_base;
unsigned long cfg_addr;
bool link_up;
+ u32 version;
};
static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
@@ -130,9 +137,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
int offset)
{
- struct xgene_pcie_port *port = bus->sysdata;
-
- if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up ||
+ if ((pci_is_root_bus(bus) && devfn != 0) ||
xgene_pcie_hide_rc_bars(bus, offset))
return NULL;
@@ -140,9 +145,37 @@ static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
return xgene_pcie_get_cfg_base(bus) + offset;
}
+static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct xgene_pcie_port *port = bus->sysdata;
+
+ if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) !=
+ PCIBIOS_SUCCESSFUL)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * The v1 controller has a bug in its Configuration Request
+ * Retry Status (CRS) logic: when CRS is enabled and we read the
+ * Vendor and Device ID of a non-existent device, the controller
+ * fabricates return data of 0xFFFF0001 ("device exists but is not
+ * ready") instead of 0xFFFFFFFF ("device does not exist"). This
+ * causes the PCI core to retry the read until it times out.
+ * Avoid this by not claiming to support CRS.
+ */
+ if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
+ ((where & ~0x3) == ROOT_CAP_AND_CTRL))
+ *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+
+ if (size <= 2)
+ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
static struct pci_ops xgene_pcie_ops = {
.map_bus = xgene_pcie_map_bus,
- .read = pci_generic_config_read32,
+ .read = xgene_pcie_config_read32,
.write = pci_generic_config_write32,
};
@@ -288,8 +321,16 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
return ret;
break;
case IORESOURCE_MEM:
- xgene_pcie_setup_ob_reg(port, res, OMR1BARL, res->start,
- res->start - window->offset);
+ if (res->flags & IORESOURCE_PREFETCH)
+ xgene_pcie_setup_ob_reg(port, res, OMR2BARL,
+ res->start,
+ res->start -
+ window->offset);
+ else
+ xgene_pcie_setup_ob_reg(port, res, OMR1BARL,
+ res->start,
+ res->start -
+ window->offset);
break;
case IORESOURCE_BUS:
break;
@@ -483,6 +524,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
port->node = of_node_get(pdev->dev.of_node);
port->dev = &pdev->dev;
+ port->version = XGENE_PCIE_IP_VER_UNKN;
+ if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
+ port->version = XGENE_PCIE_IP_VER_1;
+
ret = xgene_pcie_map_reg(port, pdev);
if (ret)
return ret;
diff --git a/kernel/drivers/pci/host/pcie-altera-msi.c b/kernel/drivers/pci/host/pcie-altera-msi.c
new file mode 100644
index 000000000..99177f4cc
--- /dev/null
+++ b/kernel/drivers/pci/host/pcie-altera-msi.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright Altera Corporation (C) 2013-2015. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define MSI_STATUS 0x0
+#define MSI_ERROR 0x4
+#define MSI_INTMASK 0x8
+
+#define MAX_MSI_VECTORS 32
+
+struct altera_msi {
+ DECLARE_BITMAP(used, MAX_MSI_VECTORS);
+ struct mutex lock; /* protect "used" bitmap */
+ struct platform_device *pdev;
+ struct irq_domain *msi_domain;
+ struct irq_domain *inner_domain;
+ void __iomem *csr_base;
+ void __iomem *vector_base;
+ phys_addr_t vector_phy;
+ u32 num_of_vectors;
+ int irq;
+};
+
+static inline void msi_writel(struct altera_msi *msi, const u32 value,
+ const u32 reg)
+{
+ writel_relaxed(value, msi->csr_base + reg);
+}
+
+static inline u32 msi_readl(struct altera_msi *msi, const u32 reg)
+{
+ return readl_relaxed(msi->csr_base + reg);
+}
+
+static void altera_msi_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct altera_msi *msi;
+ unsigned long status;
+ u32 num_of_vectors;
+ u32 bit;
+ u32 virq;
+
+ chained_irq_enter(chip, desc);
+ msi = irq_desc_get_handler_data(desc);
+ num_of_vectors = msi->num_of_vectors;
+
+ while ((status = msi_readl(msi, MSI_STATUS)) != 0) {
+ for_each_set_bit(bit, &status, msi->num_of_vectors) {
+ /* Dummy read from vector to clear the interrupt */
+ readl_relaxed(msi->vector_base + (bit * sizeof(u32)));
+
+ virq = irq_find_mapping(msi->inner_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_err(&msi->pdev->dev, "unexpected MSI\n");
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip altera_msi_irq_chip = {
+ .name = "Altera PCIe MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info altera_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &altera_msi_irq_chip,
+};
+
+static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct altera_msi *msi = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = msi->vector_phy + (data->hwirq * sizeof(u32));
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq;
+
+ dev_dbg(&msi->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int altera_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip altera_msi_bottom_irq_chip = {
+ .name = "Altera MSI",
+ .irq_compose_msi_msg = altera_compose_msi_msg,
+ .irq_set_affinity = altera_msi_set_affinity,
+};
+
+static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct altera_msi *msi = domain->host_data;
+ unsigned long bit;
+ u32 mask;
+
+ WARN_ON(nr_irqs != 1);
+ mutex_lock(&msi->lock);
+
+ bit = find_first_zero_bit(msi->used, msi->num_of_vectors);
+ if (bit >= msi->num_of_vectors) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, msi->used);
+
+ mutex_unlock(&msi->lock);
+
+ irq_domain_set_info(domain, virq, bit, &altera_msi_bottom_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+
+ mask = msi_readl(msi, MSI_INTMASK);
+ mask |= 1 << bit;
+ msi_writel(msi, mask, MSI_INTMASK);
+
+ return 0;
+}
+
+static void altera_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct altera_msi *msi = irq_data_get_irq_chip_data(d);
+ u32 mask;
+
+ mutex_lock(&msi->lock);
+
+ if (!test_bit(d->hwirq, msi->used)) {
+ dev_err(&msi->pdev->dev, "trying to free unused MSI#%lu\n",
+ d->hwirq);
+ } else {
+ __clear_bit(d->hwirq, msi->used);
+ mask = msi_readl(msi, MSI_INTMASK);
+ mask &= ~(1 << d->hwirq);
+ msi_writel(msi, mask, MSI_INTMASK);
+ }
+
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = altera_irq_domain_alloc,
+ .free = altera_irq_domain_free,
+};
+
+static int altera_allocate_domains(struct altera_msi *msi)
+{
+ struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node);
+
+ msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
+ &msi_domain_ops, msi);
+ if (!msi->inner_domain) {
+ dev_err(&msi->pdev->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &altera_msi_domain_info, msi->inner_domain);
+ if (!msi->msi_domain) {
+ dev_err(&msi->pdev->dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi->inner_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void altera_free_domains(struct altera_msi *msi)
+{
+ irq_domain_remove(msi->msi_domain);
+ irq_domain_remove(msi->inner_domain);
+}
+
+static int altera_msi_remove(struct platform_device *pdev)
+{
+ struct altera_msi *msi = platform_get_drvdata(pdev);
+
+ msi_writel(msi, 0, MSI_INTMASK);
+ irq_set_chained_handler(msi->irq, NULL);
+ irq_set_handler_data(msi->irq, NULL);
+
+ altera_free_domains(msi);
+
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static int altera_msi_probe(struct platform_device *pdev)
+{
+ struct altera_msi *msi;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+ int ret;
+
+ msi = devm_kzalloc(&pdev->dev, sizeof(struct altera_msi),
+ GFP_KERNEL);
+ if (!msi)
+ return -ENOMEM;
+
+ mutex_init(&msi->lock);
+ msi->pdev = pdev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
+ if (!res) {
+ dev_err(&pdev->dev, "no csr memory resource defined\n");
+ return -ENODEV;
+ }
+
+ msi->csr_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(msi->csr_base)) {
+ dev_err(&pdev->dev, "failed to map csr memory\n");
+ return PTR_ERR(msi->csr_base);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "vector_slave");
+ if (!res) {
+ dev_err(&pdev->dev, "no vector_slave memory resource defined\n");
+ return -ENODEV;
+ }
+
+ msi->vector_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(msi->vector_base)) {
+ dev_err(&pdev->dev, "failed to map vector_slave memory\n");
+ return PTR_ERR(msi->vector_base);
+ }
+
+ msi->vector_phy = res->start;
+
+ if (of_property_read_u32(np, "num-vectors", &msi->num_of_vectors)) {
+ dev_err(&pdev->dev, "failed to parse the number of vectors\n");
+ return -EINVAL;
+ }
+
+ ret = altera_allocate_domains(msi);
+ if (ret)
+ return ret;
+
+ msi->irq = platform_get_irq(pdev, 0);
+ if (msi->irq <= 0) {
+ dev_err(&pdev->dev, "failed to map IRQ: %d\n", msi->irq);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ irq_set_chained_handler_and_data(msi->irq, altera_msi_isr, msi);
+ platform_set_drvdata(pdev, msi);
+
+ return 0;
+
+err:
+ altera_msi_remove(pdev);
+ return ret;
+}
+
+static const struct of_device_id altera_msi_of_match[] = {
+ { .compatible = "altr,msi-1.0", NULL },
+ { },
+};
+
+static struct platform_driver altera_msi_driver = {
+ .driver = {
+ .name = "altera-msi",
+ .of_match_table = altera_msi_of_match,
+ },
+ .probe = altera_msi_probe,
+ .remove = altera_msi_remove,
+};
+
+static int __init altera_msi_init(void)
+{
+ return platform_driver_register(&altera_msi_driver);
+}
+subsys_initcall(altera_msi_init);
+
+MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
+MODULE_DESCRIPTION("Altera PCIe MSI support");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/pci/host/pcie-altera.c b/kernel/drivers/pci/host/pcie-altera.c
new file mode 100644
index 000000000..99da549d5
--- /dev/null
+++ b/kernel/drivers/pci/host/pcie-altera.c
@@ -0,0 +1,588 @@
+/*
+ * Copyright Altera Corporation (C) 2013-2015. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define RP_TX_REG0 0x2000
+#define RP_TX_REG1 0x2004
+#define RP_TX_CNTRL 0x2008
+#define RP_TX_EOP 0x2
+#define RP_TX_SOP 0x1
+#define RP_RXCPL_STATUS 0x2010
+#define RP_RXCPL_EOP 0x2
+#define RP_RXCPL_SOP 0x1
+#define RP_RXCPL_REG0 0x2014
+#define RP_RXCPL_REG1 0x2018
+#define P2A_INT_STATUS 0x3060
+#define P2A_INT_STS_ALL 0xf
+#define P2A_INT_ENABLE 0x3070
+#define P2A_INT_ENA_ALL 0xf
+#define RP_LTSSM 0x3c64
+#define LTSSM_L0 0xf
+
+/* TLP configuration type 0 and 1 */
+#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */
+#define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */
+#define TLP_FMTTYPE_CFGRD1 0x05 /* Configuration Read Type 1 */
+#define TLP_FMTTYPE_CFGWR1 0x45 /* Configuration Write Type 1 */
+#define TLP_PAYLOAD_SIZE 0x01
+#define TLP_READ_TAG 0x1d
+#define TLP_WRITE_TAG 0x10
+#define TLP_CFG_DW0(fmttype) (((fmttype) << 24) | TLP_PAYLOAD_SIZE)
+#define TLP_CFG_DW1(reqid, tag, be) (((reqid) << 16) | (tag << 8) | (be))
+#define TLP_CFG_DW2(bus, devfn, offset) \
+ (((bus) << 24) | ((devfn) << 16) | (offset))
+#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
+#define TLP_COMP_STATUS(s) (((s) >> 12) & 7)
+#define TLP_HDR_SIZE 3
+#define TLP_LOOP 500
+#define RP_DEVFN 0
+
+#define INTX_NUM 4
+
+#define DWORD_MASK 3
+
+struct altera_pcie {
+ struct platform_device *pdev;
+ void __iomem *cra_base;
+ int irq;
+ u8 root_bus_nr;
+ struct irq_domain *irq_domain;
+ struct resource bus_range;
+ struct list_head resources;
+};
+
+struct tlp_rp_regpair_t {
+ u32 ctrl;
+ u32 reg0;
+ u32 reg1;
+};
+
+static void altera_pcie_retrain(struct pci_dev *dev)
+{
+ u16 linkcap, linkstat;
+
+ /*
+ * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
+ * current speed is 2.5 GB/s.
+ */
+ pcie_capability_read_word(dev, PCI_EXP_LNKCAP, &linkcap);
+
+ if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
+ return;
+
+ pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat);
+ if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB)
+ pcie_capability_set_word(dev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_RL);
+}
+DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain);
+
+/*
+ * Altera PCIe port uses BAR0 of RC's configuration space as the translation
+ * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
+ * using these registers, so it can be reached by DMA from EP devices.
+ * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt
+ * from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge
+ * should be hidden during enumeration to avoid the sizing and resource
+ * allocation by PCIe core.
+ */
+static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
+ int offset)
+{
+ if (pci_is_root_bus(bus) && (devfn == 0) &&
+ (offset == PCI_BASE_ADDRESS_0))
+ return true;
+
+ return false;
+}
+
+static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writel_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
+{
+ return readl_relaxed(pcie->cra_base + reg);
+}
+
+static void tlp_write_tx(struct altera_pcie *pcie,
+ struct tlp_rp_regpair_t *tlp_rp_regdata)
+{
+ cra_writel(pcie, tlp_rp_regdata->reg0, RP_TX_REG0);
+ cra_writel(pcie, tlp_rp_regdata->reg1, RP_TX_REG1);
+ cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
+}
+
+static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
+{
+ return !!(cra_readl(pcie, RP_LTSSM) & LTSSM_L0);
+}
+
+static bool altera_pcie_valid_config(struct altera_pcie *pcie,
+ struct pci_bus *bus, int dev)
+{
+ /* If there is no link, then there is no device */
+ if (bus->number != pcie->root_bus_nr) {
+ if (!altera_pcie_link_is_up(pcie))
+ return false;
+ }
+
+ /* access only one slot on each root port */
+ if (bus->number == pcie->root_bus_nr && dev > 0)
+ return false;
+
+ /*
+ * Do not read more than one device on the bus directly attached
+ * to root port, root port can only attach to one downstream port.
+ */
+ if (bus->primary == pcie->root_bus_nr && dev > 0)
+ return false;
+
+ return true;
+}
+
+static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
+{
+ int i;
+ bool sop = 0;
+ u32 ctrl;
+ u32 reg0, reg1;
+ u32 comp_status = 1;
+
+ /*
+ * Minimum 2 loops to read TLP headers and 1 loop to read data
+ * payload.
+ */
+ for (i = 0; i < TLP_LOOP; i++) {
+ ctrl = cra_readl(pcie, RP_RXCPL_STATUS);
+ if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) {
+ reg0 = cra_readl(pcie, RP_RXCPL_REG0);
+ reg1 = cra_readl(pcie, RP_RXCPL_REG1);
+
+ if (ctrl & RP_RXCPL_SOP) {
+ sop = true;
+ comp_status = TLP_COMP_STATUS(reg1);
+ }
+
+ if (ctrl & RP_RXCPL_EOP) {
+ if (comp_status)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (value)
+ *value = reg0;
+
+ return PCIBIOS_SUCCESSFUL;
+ }
+ }
+ udelay(5);
+ }
+
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
+ u32 data, bool align)
+{
+ struct tlp_rp_regpair_t tlp_rp_regdata;
+
+ tlp_rp_regdata.reg0 = headers[0];
+ tlp_rp_regdata.reg1 = headers[1];
+ tlp_rp_regdata.ctrl = RP_TX_SOP;
+ tlp_write_tx(pcie, &tlp_rp_regdata);
+
+ if (align) {
+ tlp_rp_regdata.reg0 = headers[2];
+ tlp_rp_regdata.reg1 = 0;
+ tlp_rp_regdata.ctrl = 0;
+ tlp_write_tx(pcie, &tlp_rp_regdata);
+
+ tlp_rp_regdata.reg0 = data;
+ tlp_rp_regdata.reg1 = 0;
+ } else {
+ tlp_rp_regdata.reg0 = headers[2];
+ tlp_rp_regdata.reg1 = data;
+ }
+
+ tlp_rp_regdata.ctrl = RP_TX_EOP;
+ tlp_write_tx(pcie, &tlp_rp_regdata);
+}
+
+static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
+ int where, u8 byte_en, u32 *value)
+{
+ u32 headers[TLP_HDR_SIZE];
+
+ if (bus == pcie->root_bus_nr)
+ headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD0);
+ else
+ headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1);
+
+ headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
+ TLP_READ_TAG, byte_en);
+ headers[2] = TLP_CFG_DW2(bus, devfn, where);
+
+ tlp_write_packet(pcie, headers, 0, false);
+
+ return tlp_read_packet(pcie, value);
+}
+
+static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
+ int where, u8 byte_en, u32 value)
+{
+ u32 headers[TLP_HDR_SIZE];
+ int ret;
+
+ if (bus == pcie->root_bus_nr)
+ headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR0);
+ else
+ headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1);
+
+ headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
+ TLP_WRITE_TAG, byte_en);
+ headers[2] = TLP_CFG_DW2(bus, devfn, where);
+
+ /* check alignment to Qword */
+ if ((where & 0x7) == 0)
+ tlp_write_packet(pcie, headers, value, true);
+ else
+ tlp_write_packet(pcie, headers, value, false);
+
+ ret = tlp_read_packet(pcie, NULL);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ /*
+ * Monitor changes to PCI_PRIMARY_BUS register on root port
+ * and update local copy of root bus number accordingly.
+ */
+ if ((bus == pcie->root_bus_nr) && (where == PCI_PRIMARY_BUS))
+ pcie->root_bus_nr = (u8)(value);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct altera_pcie *pcie = bus->sysdata;
+ int ret;
+ u32 data;
+ u8 byte_en;
+
+ if (altera_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
+ *value = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ switch (size) {
+ case 1:
+ byte_en = 1 << (where & 3);
+ break;
+ case 2:
+ byte_en = 3 << (where & 3);
+ break;
+ default:
+ byte_en = 0xf;
+ break;
+ }
+
+ ret = tlp_cfg_dword_read(pcie, bus->number, devfn,
+ (where & ~DWORD_MASK), byte_en, &data);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ switch (size) {
+ case 1:
+ *value = (data >> (8 * (where & 0x3))) & 0xff;
+ break;
+ case 2:
+ *value = (data >> (8 * (where & 0x2))) & 0xffff;
+ break;
+ default:
+ *value = data;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct altera_pcie *pcie = bus->sysdata;
+ u32 data32;
+ u32 shift = 8 * (where & 3);
+ u8 byte_en;
+
+ if (altera_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ data32 = (value & 0xff) << shift;
+ byte_en = 1 << (where & 3);
+ break;
+ case 2:
+ data32 = (value & 0xffff) << shift;
+ byte_en = 3 << (where & 3);
+ break;
+ default:
+ data32 = value;
+ byte_en = 0xf;
+ break;
+ }
+
+ return tlp_cfg_dword_write(pcie, bus->number, devfn,
+ (where & ~DWORD_MASK), byte_en, data32);
+}
+
+static struct pci_ops altera_pcie_ops = {
+ .read = altera_pcie_cfg_read,
+ .write = altera_pcie_cfg_write,
+};
+
+static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = altera_pcie_intx_map,
+};
+
+static void altera_pcie_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct altera_pcie *pcie;
+ unsigned long status;
+ u32 bit;
+ u32 virq;
+
+ chained_irq_enter(chip, desc);
+ pcie = irq_desc_get_handler_data(desc);
+
+ while ((status = cra_readl(pcie, P2A_INT_STATUS)
+ & P2A_INT_STS_ALL) != 0) {
+ for_each_set_bit(bit, &status, INTX_NUM) {
+ /* clear interrupts */
+ cra_writel(pcie, 1 << bit, P2A_INT_STATUS);
+
+ virq = irq_find_mapping(pcie->irq_domain, bit + 1);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_err(&pcie->pdev->dev,
+ "unexpected IRQ, INT%d\n", bit);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void altera_pcie_release_of_pci_ranges(struct altera_pcie *pcie)
+{
+ pci_free_resource_list(&pcie->resources);
+}
+
+static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
+{
+ int err, res_valid = 0;
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource_entry *win;
+
+ err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
+ NULL);
+ if (err)
+ return err;
+
+ resource_list_for_each_entry(win, &pcie->resources) {
+ struct resource *parent, *res = win->res;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_MEM:
+ parent = &iomem_resource;
+ res_valid |= !(res->flags & IORESOURCE_PREFETCH);
+ break;
+ default:
+ continue;
+ }
+
+ err = devm_request_resource(dev, parent, res);
+ if (err)
+ goto out_release_res;
+ }
+
+ if (!res_valid) {
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ err = -EINVAL;
+ goto out_release_res;
+ }
+
+ return 0;
+
+out_release_res:
+ altera_pcie_release_of_pci_ranges(pcie);
+ return err;
+}
+
+static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+
+ /* Setup INTx */
+ pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM + 1,
+ &intx_domain_ops, pcie);
+ if (!pcie->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int altera_pcie_parse_dt(struct altera_pcie *pcie)
+{
+ struct resource *cra;
+ struct platform_device *pdev = pcie->pdev;
+
+ cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra");
+ if (!cra) {
+ dev_err(&pdev->dev, "no Cra memory resource defined\n");
+ return -ENODEV;
+ }
+
+ pcie->cra_base = devm_ioremap_resource(&pdev->dev, cra);
+ if (IS_ERR(pcie->cra_base)) {
+ dev_err(&pdev->dev, "failed to map cra memory\n");
+ return PTR_ERR(pcie->cra_base);
+ }
+
+ /* setup IRQ */
+ pcie->irq = platform_get_irq(pdev, 0);
+ if (pcie->irq <= 0) {
+ dev_err(&pdev->dev, "failed to get IRQ: %d\n", pcie->irq);
+ return -EINVAL;
+ }
+
+ irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
+
+ return 0;
+}
+
+static int altera_pcie_probe(struct platform_device *pdev)
+{
+ struct altera_pcie *pcie;
+ struct pci_bus *bus;
+ struct pci_bus *child;
+ int ret;
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->pdev = pdev;
+
+ ret = altera_pcie_parse_dt(pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Parsing DT failed\n");
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&pcie->resources);
+
+ ret = altera_pcie_parse_request_of_pci_ranges(pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed add resources\n");
+ return ret;
+ }
+
+ ret = altera_pcie_init_irq_domain(pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed creating IRQ Domain\n");
+ return ret;
+ }
+
+ /* clear all interrupts */
+ cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
+ /* enable all interrupts */
+ cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
+
+ bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops,
+ pcie, &pcie->resources);
+ if (!bus)
+ return -ENOMEM;
+
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+ pci_assign_unassigned_bus_resources(bus);
+
+ /* Configure PCI Express setting. */
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(bus);
+
+ platform_set_drvdata(pdev, pcie);
+ return ret;
+}
+
+static const struct of_device_id altera_pcie_of_match[] = {
+ { .compatible = "altr,pcie-root-port-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
+
+static struct platform_driver altera_pcie_driver = {
+ .probe = altera_pcie_probe,
+ .driver = {
+ .name = "altera-pcie",
+ .of_match_table = altera_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int altera_pcie_init(void)
+{
+ return platform_driver_register(&altera_pcie_driver);
+}
+module_init(altera_pcie_init);
+
+MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
+MODULE_DESCRIPTION("Altera PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/pci/host/pcie-designware.c b/kernel/drivers/pci/host/pcie-designware.c
index 2e9f84fdd..02a7452bd 100644
--- a/kernel/drivers/pci/host/pcie-designware.c
+++ b/kernel/drivers/pci/host/pcie-designware.c
@@ -31,13 +31,15 @@
#define PORT_LINK_MODE_1_LANES (0x1 << 16)
#define PORT_LINK_MODE_2_LANES (0x3 << 16)
#define PORT_LINK_MODE_4_LANES (0x7 << 16)
+#define PORT_LINK_MODE_8_LANES (0xf << 16)
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
-#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8)
+#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8)
#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
+#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8)
#define PCIE_MSI_ADDR_LO 0x820
#define PCIE_MSI_ADDR_HI 0x824
@@ -67,39 +69,40 @@
#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
#define PCIE_ATU_UPPER_TARGET 0x91C
-static struct hw_pci dw_pci;
+static struct pci_ops dw_pcie_ops;
-static unsigned long global_io_offset;
-
-static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
-{
- BUG_ON(!sys->private_data);
-
- return sys->private_data;
-}
-
-int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val)
+int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val)
{
- *val = readl(addr);
+ if ((uintptr_t)addr & (size - 1)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
- if (size == 1)
- *val = (*val >> (8 * (where & 3))) & 0xff;
+ if (size == 4)
+ *val = readl(addr);
else if (size == 2)
- *val = (*val >> (8 * (where & 3))) & 0xffff;
- else if (size != 4)
+ *val = readw(addr);
+ else if (size == 1)
+ *val = readb(addr);
+ else {
+ *val = 0;
return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
return PCIBIOS_SUCCESSFUL;
}
-int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val)
+int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
{
+ if ((uintptr_t)addr & (size - 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
if (size == 4)
writel(val, addr);
else if (size == 2)
- writew(val, addr + (where & 2));
+ writew(val, addr);
else if (size == 1)
- writeb(val, addr + (where & 3));
+ writeb(val, addr);
else
return PCIBIOS_BAD_REGISTER_NUMBER;
@@ -130,8 +133,7 @@ static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
if (pp->ops->rd_own_conf)
ret = pp->ops->rd_own_conf(pp, where, size, val);
else
- ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where,
- size, val);
+ ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val);
return ret;
}
@@ -144,12 +146,26 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
if (pp->ops->wr_own_conf)
ret = pp->ops->wr_own_conf(pp, where, size, val);
else
- ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), where,
- size, val);
+ ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val);
return ret;
}
+static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
+ int type, u64 cpu_addr, u64 pci_addr, u32 size)
+{
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
static struct irq_chip dw_msi_irq_chip = {
.name = "PCI-MSI",
.irq_enable = pci_msi_unmask_irq,
@@ -188,12 +204,16 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
void dw_pcie_msi_init(struct pcie_port *pp)
{
+ u64 msi_target;
+
pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
+ msi_target = virt_to_phys((void *)pp->msi_data);
/* program the msi_data */
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
- virt_to_phys((void *)pp->msi_data));
- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
+ (u32)(msi_target & 0xffffffff));
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
+ (u32)(msi_target >> 32 & 0xffffffff));
}
static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
@@ -238,7 +258,7 @@ static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
{
int irq, pos0, i;
- struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
+ struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(desc);
pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
order_base_2(no_irqs));
@@ -269,6 +289,9 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
}
*pos = pos0;
+ desc->nvec_used = no_irqs;
+ desc->msi_attrib.multiple = order_base_2(no_irqs);
+
return irq;
no_valid_irq:
@@ -276,12 +299,32 @@ no_valid_irq:
return -ENOSPC;
}
+static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
+{
+ struct msi_msg msg;
+ u64 msi_target;
+
+ if (pp->ops->get_msi_addr)
+ msi_target = pp->ops->get_msi_addr(pp);
+ else
+ msi_target = virt_to_phys((void *)pp->msi_data);
+
+ msg.address_lo = (u32)(msi_target & 0xffffffff);
+ msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
+
+ if (pp->ops->get_msi_data)
+ msg.data = pp->ops->get_msi_data(pp, pos);
+ else
+ msg.data = pos;
+
+ pci_write_msi_msg(irq, &msg);
+}
+
static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
struct msi_desc *desc)
{
int irq, pos;
- struct msi_msg msg;
- struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
+ struct pcie_port *pp = pdev->bus->sysdata;
if (desc->msi_attrib.is_msix)
return -EINVAL;
@@ -290,33 +333,50 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
if (irq < 0)
return irq;
- if (pp->ops->get_msi_addr)
- msg.address_lo = pp->ops->get_msi_addr(pp);
- else
- msg.address_lo = virt_to_phys((void *)pp->msi_data);
- msg.address_hi = 0x0;
+ dw_msi_setup_msg(pp, irq, pos);
- if (pp->ops->get_msi_data)
- msg.data = pp->ops->get_msi_data(pp, pos);
- else
- msg.data = pos;
+ return 0;
+}
- pci_write_msi_msg(irq, &msg);
+static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
+ int nvec, int type)
+{
+#ifdef CONFIG_PCI_MSI
+ int irq, pos;
+ struct msi_desc *desc;
+ struct pcie_port *pp = pdev->bus->sysdata;
+
+ /* MSI-X interrupts are not supported */
+ if (type == PCI_CAP_ID_MSIX)
+ return -EINVAL;
+
+ WARN_ON(!list_is_singular(&pdev->dev.msi_list));
+ desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
+
+ irq = assign_irq(nvec, desc, &pos);
+ if (irq < 0)
+ return irq;
+
+ dw_msi_setup_msg(pp, irq, pos);
return 0;
+#else
+ return -EINVAL;
+#endif
}
static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
{
struct irq_data *data = irq_get_irq_data(irq);
- struct msi_desc *msi = irq_data_get_msi(data);
- struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata);
+ struct msi_desc *msi = irq_data_get_msi_desc(data);
+ struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
clear_irq_range(pp, irq, 1, data->hwirq);
}
static struct msi_controller dw_pcie_msi_chip = {
.setup_irq = dw_msi_setup_irq,
+ .setup_irqs = dw_msi_setup_irqs,
.teardown_irq = dw_msi_teardown_irq,
};
@@ -333,7 +393,6 @@ static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
@@ -346,18 +405,12 @@ int dw_pcie_host_init(struct pcie_port *pp)
{
struct device_node *np = pp->dev->of_node;
struct platform_device *pdev = to_platform_device(pp->dev);
- struct of_pci_range range;
- struct of_pci_range_parser parser;
+ struct pci_bus *bus, *child;
struct resource *cfg_res;
- u32 val, na, ns;
- const __be32 *addrp;
- int i, index, ret;
-
- /* Find the address cell size and the number of cells in order to get
- * the untranslated address.
- */
- of_property_read_u32(np, "#address-cells", &na);
- ns = of_n_size_cells(np);
+ u32 val;
+ int i, ret;
+ LIST_HEAD(res);
+ struct resource_entry *win;
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
@@ -365,88 +418,60 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg1_size = resource_size(cfg_res)/2;
pp->cfg0_base = cfg_res->start;
pp->cfg1_base = cfg_res->start + pp->cfg0_size;
-
- /* Find the untranslated configuration space address */
- index = of_property_match_string(np, "reg-names", "config");
- addrp = of_get_address(np, index, NULL, NULL);
- pp->cfg0_mod_base = of_read_number(addrp, ns);
- pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size;
- } else {
+ } else if (!pp->va_cfg0_base) {
dev_err(pp->dev, "missing *config* reg space\n");
}
- if (of_pci_range_parser_init(&parser, np)) {
- dev_err(pp->dev, "missing ranges property\n");
- return -EINVAL;
- }
+ ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
+ if (ret)
+ return ret;
/* Get the I/O and memory ranges from DT */
- for_each_of_pci_range(&parser, &range) {
- unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
-
- if (restype == IORESOURCE_IO) {
- of_pci_range_to_resource(&range, np, &pp->io);
- pp->io.name = "I/O";
- pp->io.start = max_t(resource_size_t,
- PCIBIOS_MIN_IO,
- range.pci_addr + global_io_offset);
- pp->io.end = min_t(resource_size_t,
- IO_SPACE_LIMIT,
- range.pci_addr + range.size
- + global_io_offset - 1);
- pp->io_size = resource_size(&pp->io);
- pp->io_bus_addr = range.pci_addr;
- pp->io_base = range.cpu_addr;
-
- /* Find the untranslated IO space address */
- pp->io_mod_base = of_read_number(parser.range -
- parser.np + na, ns);
- }
- if (restype == IORESOURCE_MEM) {
- of_pci_range_to_resource(&range, np, &pp->mem);
- pp->mem.name = "MEM";
- pp->mem_size = resource_size(&pp->mem);
- pp->mem_bus_addr = range.pci_addr;
-
- /* Find the untranslated MEM space address */
- pp->mem_mod_base = of_read_number(parser.range -
- parser.np + na, ns);
- }
- if (restype == 0) {
- of_pci_range_to_resource(&range, np, &pp->cfg);
- pp->cfg0_size = resource_size(&pp->cfg)/2;
- pp->cfg1_size = resource_size(&pp->cfg)/2;
- pp->cfg0_base = pp->cfg.start;
- pp->cfg1_base = pp->cfg.start + pp->cfg0_size;
-
- /* Find the untranslated configuration space address */
- pp->cfg0_mod_base = of_read_number(parser.range -
- parser.np + na, ns);
- pp->cfg1_mod_base = pp->cfg0_mod_base +
- pp->cfg0_size;
+ resource_list_for_each_entry(win, &res) {
+ switch (resource_type(win->res)) {
+ case IORESOURCE_IO:
+ pp->io = win->res;
+ pp->io->name = "I/O";
+ pp->io_size = resource_size(pp->io);
+ pp->io_bus_addr = pp->io->start - win->offset;
+ ret = pci_remap_iospace(pp->io, pp->io_base);
+ if (ret) {
+ dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
+ ret, pp->io);
+ continue;
+ }
+ break;
+ case IORESOURCE_MEM:
+ pp->mem = win->res;
+ pp->mem->name = "MEM";
+ pp->mem_size = resource_size(pp->mem);
+ pp->mem_bus_addr = pp->mem->start - win->offset;
+ break;
+ case 0:
+ pp->cfg = win->res;
+ pp->cfg0_size = resource_size(pp->cfg)/2;
+ pp->cfg1_size = resource_size(pp->cfg)/2;
+ pp->cfg0_base = pp->cfg->start;
+ pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
+ break;
+ case IORESOURCE_BUS:
+ pp->busn = win->res;
+ break;
+ default:
+ continue;
}
}
- ret = of_pci_parse_bus_range(np, &pp->busn);
- if (ret < 0) {
- pp->busn.name = np->name;
- pp->busn.start = 0;
- pp->busn.end = 0xff;
- pp->busn.flags = IORESOURCE_BUS;
- dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n",
- ret, &pp->busn);
- }
-
if (!pp->dbi_base) {
- pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
- resource_size(&pp->cfg));
+ pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start,
+ resource_size(pp->cfg));
if (!pp->dbi_base) {
dev_err(pp->dev, "error with ioremap\n");
return -ENOMEM;
}
}
- pp->mem_base = pp->mem.start;
+ pp->mem_base = pp->mem->start;
if (!pp->va_cfg0_base) {
pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
@@ -466,10 +491,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
}
- if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
- dev_err(pp->dev, "Failed to parse the number of lanes\n");
- return -EINVAL;
- }
+ ret = of_property_read_u32(np, "num-lanes", &pp->lanes);
+ if (ret)
+ pp->lanes = 0;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
if (!pp->ops->msi_host_init) {
@@ -493,6 +517,11 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (pp->ops->host_init)
pp->ops->host_init(pp);
+ if (!pp->ops->rd_other_conf)
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_TYPE_MEM, pp->mem_base,
+ pp->mem_bus_addr, pp->mem_size);
+
dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
/* program correct class for RC */
@@ -502,128 +531,103 @@ int dw_pcie_host_init(struct pcie_port *pp)
val |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
-#ifdef CONFIG_PCI_MSI
- dw_pcie_msi_chip.dev = pp->dev;
- dw_pci.msi_ctrl = &dw_pcie_msi_chip;
-#endif
-
- dw_pci.nr_controllers = 1;
- dw_pci.private_data = (void **)&pp;
-
- pci_common_init_dev(pp->dev, &dw_pci);
+ pp->root_bus_nr = pp->busn->start;
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr,
+ &dw_pcie_ops, pp, &res,
+ &dw_pcie_msi_chip);
+ dw_pcie_msi_chip.dev = pp->dev;
+ } else
+ bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops,
+ pp, &res);
+ if (!bus)
+ return -ENOMEM;
- return 0;
-}
+ if (pp->ops->scan_bus)
+ pp->ops->scan_bus(pp);
-static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
-{
- /* Program viewport 0 : OUTBOUND : CFG0 */
- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
- PCIE_ATU_VIEWPORT);
- dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE);
- dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE);
- dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1,
- PCIE_ATU_LIMIT);
- dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
- dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1);
- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
-}
+#ifdef CONFIG_ARM
+ /* support old dtbs that incorrectly describe IRQs */
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+#endif
-static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
-{
- /* Program viewport 1 : OUTBOUND : CFG1 */
- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
- PCIE_ATU_VIEWPORT);
- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
- dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE);
- dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE);
- dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1,
- PCIE_ATU_LIMIT);
- dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
- dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
-}
+ if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
-static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
-{
- /* Program viewport 0 : OUTBOUND : MEM */
- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
- PCIE_ATU_VIEWPORT);
- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
- dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE);
- dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE);
- dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1,
- PCIE_ATU_LIMIT);
- dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET);
- dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr),
- PCIE_ATU_UPPER_TARGET);
- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
-}
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ }
-static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
-{
- /* Program viewport 1 : OUTBOUND : IO */
- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
- PCIE_ATU_VIEWPORT);
- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
- dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE);
- dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE);
- dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1,
- PCIE_ATU_LIMIT);
- dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET);
- dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr),
- PCIE_ATU_UPPER_TARGET);
- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ pci_bus_add_devices(bus);
+ return 0;
}
static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val)
{
- int ret = PCIBIOS_SUCCESSFUL;
- u32 address, busdev;
+ int ret, type;
+ u32 busdev, cfg_size;
+ u64 cpu_addr;
+ void __iomem *va_cfg_base;
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
PCIE_ATU_FUNC(PCI_FUNC(devfn));
- address = where & ~0x3;
if (bus->parent->number == pp->root_bus_nr) {
- dw_pcie_prog_viewport_cfg0(pp, busdev);
- ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size,
- val);
- dw_pcie_prog_viewport_mem_outbound(pp);
+ type = PCIE_ATU_TYPE_CFG0;
+ cpu_addr = pp->cfg0_base;
+ cfg_size = pp->cfg0_size;
+ va_cfg_base = pp->va_cfg0_base;
} else {
- dw_pcie_prog_viewport_cfg1(pp, busdev);
- ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size,
- val);
- dw_pcie_prog_viewport_io_outbound(pp);
+ type = PCIE_ATU_TYPE_CFG1;
+ cpu_addr = pp->cfg1_base;
+ cfg_size = pp->cfg1_size;
+ va_cfg_base = pp->va_cfg1_base;
}
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
+ type, cpu_addr,
+ busdev, cfg_size);
+ ret = dw_pcie_cfg_read(va_cfg_base + where, size, val);
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
+ PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
+
return ret;
}
static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 val)
{
- int ret = PCIBIOS_SUCCESSFUL;
- u32 address, busdev;
+ int ret, type;
+ u32 busdev, cfg_size;
+ u64 cpu_addr;
+ void __iomem *va_cfg_base;
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
PCIE_ATU_FUNC(PCI_FUNC(devfn));
- address = where & ~0x3;
if (bus->parent->number == pp->root_bus_nr) {
- dw_pcie_prog_viewport_cfg0(pp, busdev);
- ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size,
- val);
- dw_pcie_prog_viewport_mem_outbound(pp);
+ type = PCIE_ATU_TYPE_CFG0;
+ cpu_addr = pp->cfg0_base;
+ cfg_size = pp->cfg0_size;
+ va_cfg_base = pp->va_cfg0_base;
} else {
- dw_pcie_prog_viewport_cfg1(pp, busdev);
- ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size,
- val);
- dw_pcie_prog_viewport_io_outbound(pp);
+ type = PCIE_ATU_TYPE_CFG1;
+ cpu_addr = pp->cfg1_base;
+ cfg_size = pp->cfg1_size;
+ va_cfg_base = pp->va_cfg1_base;
}
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
+ type, cpu_addr,
+ busdev, cfg_size);
+ ret = dw_pcie_cfg_write(va_cfg_base + where, size, val);
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
+ PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
+
return ret;
}
@@ -653,7 +657,7 @@ static int dw_pcie_valid_config(struct pcie_port *pp,
static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
- struct pcie_port *pp = sys_to_pcie(bus->sysdata);
+ struct pcie_port *pp = bus->sysdata;
int ret;
if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
@@ -677,7 +681,7 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
- struct pcie_port *pp = sys_to_pcie(bus->sysdata);
+ struct pcie_port *pp = bus->sysdata;
int ret;
if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
@@ -701,64 +705,6 @@ static struct pci_ops dw_pcie_ops = {
.write = dw_pcie_wr_conf,
};
-static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
-{
- struct pcie_port *pp;
-
- pp = sys_to_pcie(sys);
-
- if (global_io_offset < SZ_1M && pp->io_size > 0) {
- sys->io_offset = global_io_offset - pp->io_bus_addr;
- pci_ioremap_io(global_io_offset, pp->io_base);
- global_io_offset += SZ_64K;
- pci_add_resource_offset(&sys->resources, &pp->io,
- sys->io_offset);
- }
-
- sys->mem_offset = pp->mem.start - pp->mem_bus_addr;
- pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
- pci_add_resource(&sys->resources, &pp->busn);
-
- return 1;
-}
-
-static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
-{
- struct pci_bus *bus;
- struct pcie_port *pp = sys_to_pcie(sys);
-
- pp->root_bus_nr = sys->busnr;
- bus = pci_create_root_bus(pp->dev, sys->busnr,
- &dw_pcie_ops, sys, &sys->resources);
- if (!bus)
- return NULL;
-
- pci_scan_child_bus(bus);
-
- if (bus && pp->ops->scan_bus)
- pp->ops->scan_bus(pp);
-
- return bus;
-}
-
-static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
- int irq;
-
- irq = of_irq_parse_and_map_pci(dev, slot, pin);
- if (!irq)
- irq = pp->irq;
-
- return irq;
-}
-
-static struct hw_pci dw_pci = {
- .setup = dw_pcie_setup,
- .scan = dw_pcie_scan_bus,
- .map_irq = dw_pcie_map_irq,
-};
-
void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val;
@@ -778,6 +724,12 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
case 4:
val |= PORT_LINK_MODE_4_LANES;
break;
+ case 8:
+ val |= PORT_LINK_MODE_8_LANES;
+ break;
+ default:
+ dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
+ return;
}
dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
@@ -794,6 +746,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
case 4:
val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
break;
+ case 8:
+ val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
+ break;
}
dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
diff --git a/kernel/drivers/pci/host/pcie-designware.h b/kernel/drivers/pci/host/pcie-designware.h
index d0bbd2768..2356d29e8 100644
--- a/kernel/drivers/pci/host/pcie-designware.h
+++ b/kernel/drivers/pci/host/pcie-designware.h
@@ -27,25 +27,21 @@ struct pcie_port {
u8 root_bus_nr;
void __iomem *dbi_base;
u64 cfg0_base;
- u64 cfg0_mod_base;
void __iomem *va_cfg0_base;
u32 cfg0_size;
u64 cfg1_base;
- u64 cfg1_mod_base;
void __iomem *va_cfg1_base;
u32 cfg1_size;
- u64 io_base;
- u64 io_mod_base;
+ resource_size_t io_base;
phys_addr_t io_bus_addr;
u32 io_size;
u64 mem_base;
- u64 mem_mod_base;
phys_addr_t mem_bus_addr;
u32 mem_size;
- struct resource cfg;
- struct resource io;
- struct resource mem;
- struct resource busn;
+ struct resource *cfg;
+ struct resource *io;
+ struct resource *mem;
+ struct resource *busn;
int irq;
u32 lanes;
struct pcie_host_ops *ops;
@@ -70,14 +66,14 @@ struct pcie_host_ops {
void (*host_init)(struct pcie_port *pp);
void (*msi_set_irq)(struct pcie_port *pp, int irq);
void (*msi_clear_irq)(struct pcie_port *pp, int irq);
- u32 (*get_msi_addr)(struct pcie_port *pp);
+ phys_addr_t (*get_msi_addr)(struct pcie_port *pp);
u32 (*get_msi_data)(struct pcie_port *pp, int pos);
void (*scan_bus)(struct pcie_port *pp);
int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip);
};
-int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
-int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val);
+int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val);
+int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val);
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
void dw_pcie_msi_init(struct pcie_port *pp);
int dw_pcie_link_up(struct pcie_port *pp);
diff --git a/kernel/drivers/pci/host/pcie-hisi.c b/kernel/drivers/pci/host/pcie-hisi.c
new file mode 100644
index 000000000..77f7c669a
--- /dev/null
+++ b/kernel/drivers/pci/host/pcie-hisi.c
@@ -0,0 +1,200 @@
+/*
+ * PCIe host controller driver for HiSilicon Hip05 SoC
+ *
+ * Copyright (C) 2015 HiSilicon Co., Ltd. http://www.hisilicon.com
+ *
+ * Author: Zhou Wang <wangzhou1@hisilicon.com>
+ * Dacai Zhu <zhudacai@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "pcie-designware.h"
+
+#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818
+#define PCIE_LTSSM_LINKUP_STATE 0x11
+#define PCIE_LTSSM_STATE_MASK 0x3F
+
+#define to_hisi_pcie(x) container_of(x, struct hisi_pcie, pp)
+
+struct hisi_pcie {
+ struct regmap *subctrl;
+ void __iomem *reg_base;
+ u32 port_id;
+ struct pcie_port pp;
+};
+
+static inline void hisi_pcie_apb_writel(struct hisi_pcie *pcie,
+ u32 val, u32 reg)
+{
+ writel(val, pcie->reg_base + reg);
+}
+
+static inline u32 hisi_pcie_apb_readl(struct hisi_pcie *pcie, u32 reg)
+{
+ return readl(pcie->reg_base + reg);
+}
+
+/* Hip05 PCIe host only supports 32-bit config access */
+static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
+ u32 *val)
+{
+ u32 reg;
+ u32 reg_val;
+ struct hisi_pcie *pcie = to_hisi_pcie(pp);
+ void *walker = &reg_val;
+
+ walker += (where & 0x3);
+ reg = where & ~0x3;
+ reg_val = hisi_pcie_apb_readl(pcie, reg);
+
+ if (size == 1)
+ *val = *(u8 __force *) walker;
+ else if (size == 2)
+ *val = *(u16 __force *) walker;
+ else if (size == 4)
+ *val = reg_val;
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* Hip05 PCIe host only supports 32-bit config access */
+static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size,
+ u32 val)
+{
+ u32 reg_val;
+ u32 reg;
+ struct hisi_pcie *pcie = to_hisi_pcie(pp);
+ void *walker = &reg_val;
+
+ walker += (where & 0x3);
+ reg = where & ~0x3;
+ if (size == 4)
+ hisi_pcie_apb_writel(pcie, val, reg);
+ else if (size == 2) {
+ reg_val = hisi_pcie_apb_readl(pcie, reg);
+ *(u16 __force *) walker = val;
+ hisi_pcie_apb_writel(pcie, reg_val, reg);
+ } else if (size == 1) {
+ reg_val = hisi_pcie_apb_readl(pcie, reg);
+ *(u8 __force *) walker = val;
+ hisi_pcie_apb_writel(pcie, reg_val, reg);
+ } else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int hisi_pcie_link_up(struct pcie_port *pp)
+{
+ u32 val;
+ struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp);
+
+ regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG +
+ 0x100 * hisi_pcie->port_id, &val);
+
+ return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
+}
+
+static struct pcie_host_ops hisi_pcie_host_ops = {
+ .rd_own_conf = hisi_pcie_cfg_read,
+ .wr_own_conf = hisi_pcie_cfg_write,
+ .link_up = hisi_pcie_link_up,
+};
+
+static int hisi_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ int ret;
+ u32 port_id;
+ struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp);
+
+ if (of_property_read_u32(pdev->dev.of_node, "port-id", &port_id)) {
+ dev_err(&pdev->dev, "failed to read port-id\n");
+ return -EINVAL;
+ }
+ if (port_id > 3) {
+ dev_err(&pdev->dev, "Invalid port-id: %d\n", port_id);
+ return -EINVAL;
+ }
+ hisi_pcie->port_id = port_id;
+
+ pp->ops = &hisi_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hisi_pcie_probe(struct platform_device *pdev)
+{
+ struct hisi_pcie *hisi_pcie;
+ struct pcie_port *pp;
+ struct resource *reg;
+ int ret;
+
+ hisi_pcie = devm_kzalloc(&pdev->dev, sizeof(*hisi_pcie), GFP_KERNEL);
+ if (!hisi_pcie)
+ return -ENOMEM;
+
+ pp = &hisi_pcie->pp;
+ pp->dev = &pdev->dev;
+
+ hisi_pcie->subctrl =
+ syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl");
+ if (IS_ERR(hisi_pcie->subctrl)) {
+ dev_err(pp->dev, "cannot get subctrl base\n");
+ return PTR_ERR(hisi_pcie->subctrl);
+ }
+
+ reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
+ hisi_pcie->reg_base = devm_ioremap_resource(&pdev->dev, reg);
+ if (IS_ERR(hisi_pcie->reg_base)) {
+ dev_err(pp->dev, "cannot get rc_dbi base\n");
+ return PTR_ERR(hisi_pcie->reg_base);
+ }
+
+ hisi_pcie->pp.dbi_base = hisi_pcie->reg_base;
+
+ ret = hisi_add_pcie_port(pp, pdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, hisi_pcie);
+
+ dev_warn(pp->dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
+
+ return 0;
+}
+
+static const struct of_device_id hisi_pcie_of_match[] = {
+ {.compatible = "hisilicon,hip05-pcie",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hisi_pcie_of_match);
+
+static struct platform_driver hisi_pcie_driver = {
+ .probe = hisi_pcie_probe,
+ .driver = {
+ .name = "hisi-pcie",
+ .of_match_table = hisi_pcie_of_match,
+ },
+};
+
+module_platform_driver(hisi_pcie_driver);
diff --git a/kernel/drivers/pci/host/pcie-iproc-bcma.c b/kernel/drivers/pci/host/pcie-iproc-bcma.c
new file mode 100644
index 000000000..96a7d999f
--- /dev/null
+++ b/kernel/drivers/pci/host/pcie-iproc-bcma.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ * Copyright (C) 2015 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/phy/phy.h>
+#include <linux/bcma/bcma.h>
+#include <linux/ioport.h>
+
+#include "pcie-iproc.h"
+
+
+/* NS: CLASS field is R/O, and set to wrong 0x200 value */
+static void bcma_pcie2_fixup_class(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class);
+
+static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct pci_sys_data *sys = dev->sysdata;
+ struct iproc_pcie *pcie = sys->private_data;
+ struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev);
+
+ return bcma_core_irq(bdev, 5);
+}
+
+static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
+{
+ struct iproc_pcie *pcie;
+ LIST_HEAD(res);
+ struct resource res_mem;
+ int ret;
+
+ pcie = devm_kzalloc(&bdev->dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->dev = &bdev->dev;
+ bcma_set_drvdata(bdev, pcie);
+
+ pcie->base = bdev->io_addr;
+
+ res_mem.start = bdev->addr_s[0];
+ res_mem.end = bdev->addr_s[0] + SZ_128M - 1;
+ res_mem.name = "PCIe MEM space";
+ res_mem.flags = IORESOURCE_MEM;
+ pci_add_resource(&res, &res_mem);
+
+ pcie->map_irq = iproc_pcie_bcma_map_irq;
+
+ ret = iproc_pcie_setup(pcie, &res);
+ if (ret)
+ dev_err(pcie->dev, "PCIe controller setup failed\n");
+
+ pci_free_resource_list(&res);
+
+ return ret;
+}
+
+static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
+{
+ struct iproc_pcie *pcie = bcma_get_drvdata(bdev);
+
+ iproc_pcie_remove(pcie);
+}
+
+static const struct bcma_device_id iproc_pcie_bcma_table[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS),
+ {},
+};
+MODULE_DEVICE_TABLE(bcma, iproc_pcie_bcma_table);
+
+static struct bcma_driver iproc_pcie_bcma_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = iproc_pcie_bcma_table,
+ .probe = iproc_pcie_bcma_probe,
+ .remove = iproc_pcie_bcma_remove,
+};
+
+static int __init iproc_pcie_bcma_init(void)
+{
+ return bcma_driver_register(&iproc_pcie_bcma_driver);
+}
+module_init(iproc_pcie_bcma_init);
+
+static void __exit iproc_pcie_bcma_exit(void)
+{
+ bcma_driver_unregister(&iproc_pcie_bcma_driver);
+}
+module_exit(iproc_pcie_bcma_exit);
+
+MODULE_AUTHOR("Hauke Mehrtens");
+MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/pci/host/pcie-iproc-platform.c b/kernel/drivers/pci/host/pcie-iproc-platform.c
index afad6c21f..c9550dc8b 100644
--- a/kernel/drivers/pci/host/pcie-iproc-platform.c
+++ b/kernel/drivers/pci/host/pcie-iproc-platform.c
@@ -54,6 +54,33 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ if (of_property_read_bool(np, "brcm,pcie-ob")) {
+ u32 val;
+
+ ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset",
+ &val);
+ if (ret) {
+ dev_err(pcie->dev,
+ "missing brcm,pcie-ob-axi-offset property\n");
+ return ret;
+ }
+ pcie->ob.axi_offset = val;
+
+ ret = of_property_read_u32(np, "brcm,pcie-ob-window-size",
+ &val);
+ if (ret) {
+ dev_err(pcie->dev,
+ "missing brcm,pcie-ob-window-size property\n");
+ return ret;
+ }
+ pcie->ob.window_size = (resource_size_t)val * SZ_1M;
+
+ if (of_property_read_bool(np, "brcm,pcie-ob-oarr-size"))
+ pcie->ob.set_oarr_size = true;
+
+ pcie->need_ob_cfg = true;
+ }
+
/* PHY use is optional */
pcie->phy = devm_phy_get(&pdev->dev, "pcie-phy");
if (IS_ERR(pcie->phy)) {
@@ -69,15 +96,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
return ret;
}
- pcie->resources = &res;
+ pcie->map_irq = of_irq_parse_and_map_pci;
- ret = iproc_pcie_setup(pcie);
- if (ret) {
+ ret = iproc_pcie_setup(pcie, &res);
+ if (ret)
dev_err(pcie->dev, "PCIe controller setup failed\n");
- return ret;
- }
- return 0;
+ pci_free_resource_list(&res);
+
+ return ret;
}
static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
diff --git a/kernel/drivers/pci/host/pcie-iproc.c b/kernel/drivers/pci/host/pcie-iproc.c
index 329e1b545..eac719af1 100644
--- a/kernel/drivers/pci/host/pcie-iproc.c
+++ b/kernel/drivers/pci/host/pcie-iproc.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2014 Hauke Mehrtens <hauke@hauke-m.de>
- * Copyright (C) 2015 Broadcom Corporatcommon ion
+ * Copyright (C) 2015 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -31,6 +31,8 @@
#include "pcie-iproc.h"
#define CLK_CONTROL_OFFSET 0x000
+#define EP_PERST_SOURCE_SELECT_SHIFT 2
+#define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT)
#define EP_MODE_SURVIVE_PERST_SHIFT 1
#define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT)
#define RC_PCIE_RST_OUTPUT_SHIFT 0
@@ -58,9 +60,35 @@
#define SYS_RC_INTX_EN 0x330
#define SYS_RC_INTX_MASK 0xf
-static inline struct iproc_pcie *sys_to_pcie(struct pci_sys_data *sys)
+#define PCIE_LINK_STATUS_OFFSET 0xf0c
+#define PCIE_PHYLINKUP_SHIFT 3
+#define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT)
+#define PCIE_DL_ACTIVE_SHIFT 2
+#define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT)
+
+#define OARR_VALID_SHIFT 0
+#define OARR_VALID BIT(OARR_VALID_SHIFT)
+#define OARR_SIZE_CFG_SHIFT 1
+#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
+
+#define OARR_LO(window) (0xd20 + (window) * 8)
+#define OARR_HI(window) (0xd24 + (window) * 8)
+#define OMAP_LO(window) (0xd40 + (window) * 8)
+#define OMAP_HI(window) (0xd44 + (window) * 8)
+
+#define MAX_NUM_OB_WINDOWS 2
+
+static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
{
- return sys->private_data;
+ struct iproc_pcie *pcie;
+#ifdef CONFIG_ARM
+ struct pci_sys_data *sys = bus->sysdata;
+
+ pcie = sys->private_data;
+#else
+ pcie = bus->sysdata;
+#endif
+ return pcie;
}
/**
@@ -71,8 +99,7 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
unsigned int devfn,
int where)
{
- struct pci_sys_data *sys = bus->sysdata;
- struct iproc_pcie *pcie = sys_to_pcie(sys);
+ struct iproc_pcie *pcie = iproc_data(bus);
unsigned slot = PCI_SLOT(devfn);
unsigned fn = PCI_FUNC(devfn);
unsigned busno = bus->number;
@@ -112,23 +139,32 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie)
u32 val;
/*
- * Configure the PCIe controller as root complex and send a downstream
- * reset
+ * Select perst_b signal as reset source. Put the device into reset,
+ * and then bring it out of reset
*/
- val = EP_MODE_SURVIVE_PERST | RC_PCIE_RST_OUTPUT;
+ val = readl(pcie->base + CLK_CONTROL_OFFSET);
+ val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST &
+ ~RC_PCIE_RST_OUTPUT;
writel(val, pcie->base + CLK_CONTROL_OFFSET);
udelay(250);
- val &= ~EP_MODE_SURVIVE_PERST;
+
+ val |= RC_PCIE_RST_OUTPUT;
writel(val, pcie->base + CLK_CONTROL_OFFSET);
- msleep(250);
+ msleep(100);
}
static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
{
u8 hdr_type;
- u32 link_ctrl;
+ u32 link_ctrl, class, val;
u16 pos, link_status;
- int link_is_active = 0;
+ bool link_is_active = false;
+
+ val = readl(pcie->base + PCIE_LINK_STATUS_OFFSET);
+ if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
+ dev_err(pcie->dev, "PHY or data link is INACTIVE!\n");
+ return -ENODEV;
+ }
/* make sure we are not in EP mode */
pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type);
@@ -138,14 +174,19 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
}
/* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */
- pci_bus_write_config_word(bus, 0, PCI_CLASS_DEVICE,
- PCI_CLASS_BRIDGE_PCI);
+#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
+#define PCI_CLASS_BRIDGE_MASK 0xffff00
+#define PCI_CLASS_BRIDGE_SHIFT 8
+ pci_bus_read_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, &class);
+ class &= ~PCI_CLASS_BRIDGE_MASK;
+ class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
+ pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class);
/* check link status to see if link is active */
pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
- link_is_active = 1;
+ link_is_active = true;
if (!link_is_active) {
/* try GEN 1 link speed */
@@ -169,7 +210,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA,
&link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
- link_is_active = 1;
+ link_is_active = true;
}
}
@@ -183,35 +224,140 @@ static void iproc_pcie_enable(struct iproc_pcie *pcie)
writel(SYS_RC_INTX_MASK, pcie->base + SYS_RC_INTX_EN);
}
-int iproc_pcie_setup(struct iproc_pcie *pcie)
+/**
+ * Some iProc SoCs require the SW to configure the outbound address mapping
+ *
+ * Outbound address translation:
+ *
+ * iproc_pcie_address = axi_address - axi_offset
+ * OARR = iproc_pcie_address
+ * OMAP = pci_addr
+ *
+ * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address
+ */
+static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
+ u64 pci_addr, resource_size_t size)
+{
+ struct iproc_pcie_ob *ob = &pcie->ob;
+ unsigned i;
+ u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS;
+ u64 remainder;
+
+ if (size > max_size) {
+ dev_err(pcie->dev,
+ "res size 0x%pap exceeds max supported size 0x%llx\n",
+ &size, max_size);
+ return -EINVAL;
+ }
+
+ div64_u64_rem(size, ob->window_size, &remainder);
+ if (remainder) {
+ dev_err(pcie->dev,
+ "res size %pap needs to be multiple of window size %pap\n",
+ &size, &ob->window_size);
+ return -EINVAL;
+ }
+
+ if (axi_addr < ob->axi_offset) {
+ dev_err(pcie->dev,
+ "axi address %pap less than offset %pap\n",
+ &axi_addr, &ob->axi_offset);
+ return -EINVAL;
+ }
+
+ /*
+ * Translate the AXI address to the internal address used by the iProc
+ * PCIe core before programming the OARR
+ */
+ axi_addr -= ob->axi_offset;
+
+ for (i = 0; i < MAX_NUM_OB_WINDOWS; i++) {
+ writel(lower_32_bits(axi_addr) | OARR_VALID |
+ (ob->set_oarr_size ? 1 : 0), pcie->base + OARR_LO(i));
+ writel(upper_32_bits(axi_addr), pcie->base + OARR_HI(i));
+ writel(lower_32_bits(pci_addr), pcie->base + OMAP_LO(i));
+ writel(upper_32_bits(pci_addr), pcie->base + OMAP_HI(i));
+
+ size -= ob->window_size;
+ if (size == 0)
+ break;
+
+ axi_addr += ob->window_size;
+ pci_addr += ob->window_size;
+ }
+
+ return 0;
+}
+
+static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
+ struct list_head *resources)
+{
+ struct resource_entry *window;
+ int ret;
+
+ resource_list_for_each_entry(window, resources) {
+ struct resource *res = window->res;
+ u64 res_type = resource_type(res);
+
+ switch (res_type) {
+ case IORESOURCE_IO:
+ case IORESOURCE_BUS:
+ break;
+ case IORESOURCE_MEM:
+ ret = iproc_pcie_setup_ob(pcie, res->start,
+ res->start - window->offset,
+ resource_size(res));
+ if (ret)
+ return ret;
+ break;
+ default:
+ dev_err(pcie->dev, "invalid resource %pR\n", res);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
{
int ret;
+ void *sysdata;
struct pci_bus *bus;
if (!pcie || !pcie->dev || !pcie->base)
return -EINVAL;
- if (pcie->phy) {
- ret = phy_init(pcie->phy);
- if (ret) {
- dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
- return ret;
- }
-
- ret = phy_power_on(pcie->phy);
- if (ret) {
- dev_err(pcie->dev, "unable to power on PCIe PHY\n");
- goto err_exit_phy;
- }
+ ret = phy_init(pcie->phy);
+ if (ret) {
+ dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
+ return ret;
+ }
+ ret = phy_power_on(pcie->phy);
+ if (ret) {
+ dev_err(pcie->dev, "unable to power on PCIe PHY\n");
+ goto err_exit_phy;
}
iproc_pcie_reset(pcie);
+ if (pcie->need_ob_cfg) {
+ ret = iproc_pcie_map_ranges(pcie, res);
+ if (ret) {
+ dev_err(pcie->dev, "map failed\n");
+ goto err_power_off_phy;
+ }
+ }
+
+#ifdef CONFIG_ARM
pcie->sysdata.private_data = pcie;
+ sysdata = &pcie->sysdata;
+#else
+ sysdata = pcie;
+#endif
- bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops,
- &pcie->sysdata, pcie->resources);
+ bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, sysdata, res);
if (!bus) {
dev_err(pcie->dev, "unable to create PCI root bus\n");
ret = -ENOMEM;
@@ -229,7 +375,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie)
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
- pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+ pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
pci_bus_add_devices(bus);
return 0;
@@ -239,12 +385,9 @@ err_rm_root_bus:
pci_remove_root_bus(bus);
err_power_off_phy:
- if (pcie->phy)
- phy_power_off(pcie->phy);
+ phy_power_off(pcie->phy);
err_exit_phy:
- if (pcie->phy)
- phy_exit(pcie->phy);
-
+ phy_exit(pcie->phy);
return ret;
}
EXPORT_SYMBOL(iproc_pcie_setup);
@@ -254,10 +397,8 @@ int iproc_pcie_remove(struct iproc_pcie *pcie)
pci_stop_root_bus(pcie->root_bus);
pci_remove_root_bus(pcie->root_bus);
- if (pcie->phy) {
- phy_power_off(pcie->phy);
- phy_exit(pcie->phy);
- }
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
return 0;
}
diff --git a/kernel/drivers/pci/host/pcie-iproc.h b/kernel/drivers/pci/host/pcie-iproc.h
index e28075ed1..d3dc940f7 100644
--- a/kernel/drivers/pci/host/pcie-iproc.h
+++ b/kernel/drivers/pci/host/pcie-iproc.h
@@ -14,29 +14,45 @@
#ifndef _PCIE_IPROC_H
#define _PCIE_IPROC_H
-#define IPROC_PCIE_MAX_NUM_IRQS 6
+/**
+ * iProc PCIe outbound mapping
+ * @set_oarr_size: indicates the OARR size bit needs to be set
+ * @axi_offset: offset from the AXI address to the internal address used by
+ * the iProc PCIe core
+ * @window_size: outbound window size
+ */
+struct iproc_pcie_ob {
+ bool set_oarr_size;
+ resource_size_t axi_offset;
+ resource_size_t window_size;
+};
/**
* iProc PCIe device
* @dev: pointer to device data structure
* @base: PCIe host controller I/O register base
- * @resources: linked list of all PCI resources
- * @sysdata: Per PCI controller data
+ * @sysdata: Per PCI controller data (ARM-specific)
* @root_bus: pointer to root bus
* @phy: optional PHY device that controls the Serdes
* @irqs: interrupt IDs
+ * @map_irq: function callback to map interrupts
+ * @need_ob_cfg: indidates SW needs to configure the outbound mapping window
+ * @ob: outbound mapping parameters
*/
struct iproc_pcie {
struct device *dev;
void __iomem *base;
- struct list_head *resources;
+#ifdef CONFIG_ARM
struct pci_sys_data sysdata;
+#endif
struct pci_bus *root_bus;
struct phy *phy;
- int irqs[IPROC_PCIE_MAX_NUM_IRQS];
+ int (*map_irq)(const struct pci_dev *, u8, u8);
+ bool need_ob_cfg;
+ struct iproc_pcie_ob ob;
};
-int iproc_pcie_setup(struct iproc_pcie *pcie);
+int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res);
int iproc_pcie_remove(struct iproc_pcie *pcie);
#endif /* _PCIE_IPROC_H */
diff --git a/kernel/drivers/pci/host/pcie-rcar.c b/kernel/drivers/pci/host/pcie-rcar.c
index c086210f2..414c33686 100644
--- a/kernel/drivers/pci/host/pcie-rcar.c
+++ b/kernel/drivers/pci/host/pcie-rcar.c
@@ -108,6 +108,8 @@
#define RCAR_PCI_MAX_RESOURCES 4
#define MAX_NR_INBOUND_MAPS 6
+static unsigned long global_io_offset;
+
struct rcar_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
@@ -124,7 +126,16 @@ static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
}
/* Structure representing the PCIe interface */
+/*
+ * ARM pcibios functions expect the ARM struct pci_sys_data as the PCI
+ * sysdata. Add pci_sys_data as the first element in struct gen_pci so
+ * that when we use a gen_pci pointer as sysdata, it is also a pointer to
+ * a struct pci_sys_data.
+ */
struct rcar_pcie {
+#ifdef CONFIG_ARM
+ struct pci_sys_data sys;
+#endif
struct device *dev;
void __iomem *base;
struct resource res[RCAR_PCI_MAX_RESOURCES];
@@ -135,11 +146,6 @@ struct rcar_pcie {
struct rcar_msi msi;
};
-static inline struct rcar_pcie *sys_to_pcie(struct pci_sys_data *sys)
-{
- return sys->private_data;
-}
-
static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
unsigned long reg)
{
@@ -258,7 +264,7 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie,
static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct rcar_pcie *pcie = bus->sysdata;
int ret;
ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
@@ -283,7 +289,7 @@ static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
- struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct rcar_pcie *pcie = bus->sysdata;
int shift, ret;
u32 data;
@@ -353,13 +359,12 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie)
rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
}
-static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
+static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pcie)
{
- struct rcar_pcie *pcie = sys_to_pcie(sys);
struct resource *res;
int i;
- pcie->root_bus_nr = -1;
+ pcie->root_bus_nr = pcie->busn.start;
/* Setup PCI resources */
for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) {
@@ -372,32 +377,53 @@ static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
if (res->flags & IORESOURCE_IO) {
phys_addr_t io_start = pci_pio_to_address(res->start);
- pci_ioremap_io(nr * SZ_64K, io_start);
- } else
- pci_add_resource(&sys->resources, res);
+ pci_ioremap_io(global_io_offset, io_start);
+ global_io_offset += SZ_64K;
+ }
+
+ pci_add_resource(resource, res);
}
- pci_add_resource(&sys->resources, &pcie->busn);
+ pci_add_resource(resource, &pcie->busn);
return 1;
}
-static struct hw_pci rcar_pci = {
- .setup = rcar_pcie_setup,
- .map_irq = of_irq_parse_and_map_pci,
- .ops = &rcar_pcie_ops,
-};
-
-static void rcar_pcie_enable(struct rcar_pcie *pcie)
+static int rcar_pcie_enable(struct rcar_pcie *pcie)
{
- struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct pci_bus *bus, *child;
+ LIST_HEAD(res);
- rcar_pci.nr_controllers = 1;
- rcar_pci.private_data = (void **)&pcie;
-#ifdef CONFIG_PCI_MSI
- rcar_pci.msi_ctrl = &pcie->msi.chip;
-#endif
+ rcar_pcie_setup(&res, pcie);
+
+ /* Do not reassign resources if probe only */
+ if (!pci_has_flag(PCI_PROBE_ONLY))
+ pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
- pci_common_init_dev(&pdev->dev, &rcar_pci);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr,
+ &rcar_pcie_ops, pcie, &res, &pcie->msi.chip);
+ else
+ bus = pci_scan_root_bus(pcie->dev, pcie->root_bus_nr,
+ &rcar_pcie_ops, pcie, &res);
+
+ if (!bus) {
+ dev_err(pcie->dev, "Scanning rootbus failed");
+ return -ENODEV;
+ }
+
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+
+ if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ }
+
+ pci_bus_add_devices(bus);
+
+ return 0;
}
static int phy_wait_for_ack(struct rcar_pcie *pcie)
@@ -664,7 +690,6 @@ static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
@@ -695,14 +720,16 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
/* Two irqs are for MSI, but they are also used for non-MSI irqs */
err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
- IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ rcar_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
goto err;
}
err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
- IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ rcar_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
goto err;
@@ -971,9 +998,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
data = rcar_pci_read_reg(pcie, MACSR);
dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
- rcar_pcie_enable(pcie);
-
- return 0;
+ return rcar_pcie_enable(pcie);
}
static struct platform_driver rcar_pcie_driver = {
diff --git a/kernel/drivers/pci/host/pcie-spear13xx.c b/kernel/drivers/pci/host/pcie-spear13xx.c
index 020d78890..a6cd8233e 100644
--- a/kernel/drivers/pci/host/pcie-spear13xx.c
+++ b/kernel/drivers/pci/host/pcie-spear13xx.c
@@ -4,8 +4,8 @@
* SPEAr13xx PCIe Glue Layer Source Code
*
* Copyright (C) 2010-2014 ST Microelectronics
- * Pratyush Anand <pratyush.anand@st.com>
- * Mohit Kumar <mohit.kumar@st.com>
+ * Pratyush Anand <pratyush.anand@gmail.com>
+ * Mohit Kumar <mohit.kumar.dhaka@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -146,10 +146,10 @@ struct pcie_app_reg {
static int spear13xx_pcie_establish_link(struct pcie_port *pp)
{
u32 val;
- int count = 0;
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
u32 exp_cap_off = EXP_CAP_ID_OFFSET;
+ unsigned int retries;
if (dw_pcie_link_up(pp)) {
dev_err(pp->dev, "link already up\n");
@@ -163,34 +163,34 @@ static int spear13xx_pcie_establish_link(struct pcie_port *pp)
* default value in capability register is 512 bytes. So force
* it to 128 here.
*/
- dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, &val);
+ dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val);
val &= ~PCI_EXP_DEVCTL_READRQ;
- dw_pcie_cfg_write(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, val);
+ dw_pcie_cfg_write(pp->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val);
- dw_pcie_cfg_write(pp->dbi_base, PCI_VENDOR_ID, 2, 0x104A);
- dw_pcie_cfg_write(pp->dbi_base, PCI_DEVICE_ID, 2, 0xCD80);
+ dw_pcie_cfg_write(pp->dbi_base + PCI_VENDOR_ID, 2, 0x104A);
+ dw_pcie_cfg_write(pp->dbi_base + PCI_DEVICE_ID, 2, 0xCD80);
/*
* if is_gen1 is set then handle it, so that some buggy card
* also works
*/
if (spear13xx_pcie->is_gen1) {
- dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCAP, 4,
- &val);
+ dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
+ 4, &val);
if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
val &= ~((u32)PCI_EXP_LNKCAP_SLS);
val |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_cfg_write(pp->dbi_base, exp_cap_off +
- PCI_EXP_LNKCAP, 4, val);
+ dw_pcie_cfg_write(pp->dbi_base + exp_cap_off +
+ PCI_EXP_LNKCAP, 4, val);
}
- dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCTL2, 4,
- &val);
+ dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
+ 2, &val);
if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
val &= ~((u32)PCI_EXP_LNKCAP_SLS);
val |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_cfg_write(pp->dbi_base, exp_cap_off +
- PCI_EXP_LNKCTL2, 4, val);
+ dw_pcie_cfg_write(pp->dbi_base + exp_cap_off +
+ PCI_EXP_LNKCTL2, 2, val);
}
}
@@ -201,17 +201,16 @@ static int spear13xx_pcie_establish_link(struct pcie_port *pp)
&app_reg->app_ctrl_0);
/* check if the link is up or not */
- while (!dw_pcie_link_up(pp)) {
- mdelay(100);
- count++;
- if (count == 10) {
- dev_err(pp->dev, "link Fail\n");
- return -EINVAL;
+ for (retries = 0; retries < 10; retries++) {
+ if (dw_pcie_link_up(pp)) {
+ dev_info(pp->dev, "link up\n");
+ return 0;
}
+ mdelay(100);
}
- dev_info(pp->dev, "link up\n");
- return 0;
+ dev_err(pp->dev, "link Fail\n");
+ return -EINVAL;
}
static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
@@ -224,8 +223,7 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
status = readl(&app_reg->int_sts);
if (status & MSI_CTRL_INT) {
- if (!IS_ENABLED(CONFIG_PCI_MSI))
- BUG();
+ BUG_ON(!IS_ENABLED(CONFIG_PCI_MSI));
dw_handle_msi_irq(pp);
}
@@ -281,7 +279,8 @@ static int spear13xx_add_pcie_port(struct pcie_port *pp,
return -ENODEV;
}
ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
- IRQF_SHARED, "spear1340-pcie", pp);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "spear1340-pcie", pp);
if (ret) {
dev_err(dev, "failed to request irq %d\n", pp->irq);
return ret;
@@ -387,5 +386,5 @@ static int __init spear13xx_pcie_init(void)
module_init(spear13xx_pcie_init);
MODULE_DESCRIPTION("ST Microelectronics SPEAr13xx PCIe host controller driver");
-MODULE_AUTHOR("Pratyush Anand <pratyush.anand@st.com>");
+MODULE_AUTHOR("Pratyush Anand <pratyush.anand@gmail.com>");
MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/pci/host/pcie-xilinx.c b/kernel/drivers/pci/host/pcie-xilinx.c
index f1a06a091..4cfa46360 100644
--- a/kernel/drivers/pci/host/pcie-xilinx.c
+++ b/kernel/drivers/pci/host/pcie-xilinx.c
@@ -227,18 +227,16 @@ static struct pci_ops xilinx_pcie_ops = {
*/
static void xilinx_pcie_destroy_msi(unsigned int irq)
{
- struct irq_desc *desc;
struct msi_desc *msi;
struct xilinx_pcie_port *port;
- desc = irq_to_desc(irq);
- msi = irq_desc_get_msi_desc(desc);
- port = sys_to_pcie(msi->dev->bus->sysdata);
-
- if (!test_bit(irq, msi_irq_in_use))
+ if (!test_bit(irq, msi_irq_in_use)) {
+ msi = irq_get_msi_desc(irq);
+ port = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
- else
+ } else {
clear_bit(irq, msi_irq_in_use);
+ }
}
/**
@@ -338,7 +336,6 @@ static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
@@ -377,7 +374,6 @@ static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
@@ -449,14 +445,17 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
return IRQ_HANDLED;
}
- /* Clear interrupt FIFO register 1 */
- pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
- XILINX_PCIE_REG_RPIFR1);
+ if (!(val & XILINX_PCIE_RPIFR1_MSI_INTR)) {
+ /* Clear interrupt FIFO register 1 */
+ pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+ XILINX_PCIE_REG_RPIFR1);
- /* Handle INTx Interrupt */
- val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
- XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
- generic_handle_irq(irq_find_mapping(port->irq_domain, val));
+ /* Handle INTx Interrupt */
+ val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
+ XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
+ generic_handle_irq(irq_find_mapping(port->irq_domain,
+ val));
+ }
}
if (status & XILINX_PCIE_INTR_MSI) {
@@ -647,9 +646,15 @@ static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys)
struct pci_bus *bus;
port->root_busno = sys->busnr;
- bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops,
- sys, &sys->resources);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ bus = pci_scan_root_bus_msi(port->dev, sys->busnr,
+ &xilinx_pcie_ops, sys,
+ &sys->resources,
+ &xilinx_pcie_msi_chip);
+ else
+ bus = pci_scan_root_bus(port->dev, sys->busnr,
+ &xilinx_pcie_ops, sys, &sys->resources);
return bus;
}
@@ -776,7 +781,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
port->irq = irq_of_parse_and_map(node, 0);
err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
- IRQF_SHARED, "xilinx-pcie", port);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "xilinx-pcie", port);
if (err) {
dev_err(dev, "unable to request irq %d\n", port->irq);
return err;
@@ -847,7 +853,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
#ifdef CONFIG_PCI_MSI
xilinx_pcie_msi_chip.dev = port->dev;
- hw.msi_ctrl = &xilinx_pcie_msi_chip;
#endif
pci_common_init_dev(dev, &hw);
diff --git a/kernel/drivers/pci/hotplug/Makefile b/kernel/drivers/pci/hotplug/Makefile
index 4a9aa08b0..b616e7588 100644
--- a/kernel/drivers/pci/hotplug/Makefile
+++ b/kernel/drivers/pci/hotplug/Makefile
@@ -61,9 +61,6 @@ pciehp-objs := pciehp_core.o \
pciehp_ctrl.o \
pciehp_pci.o \
pciehp_hpc.o
-ifdef CONFIG_ACPI
-pciehp-objs += pciehp_acpi.o
-endif
shpchp-objs := shpchp_core.o \
shpchp_ctrl.o \
diff --git a/kernel/drivers/pci/hotplug/acpiphp_glue.c b/kernel/drivers/pci/hotplug/acpiphp_glue.c
index bcb90e488..0b3e0bfa7 100644
--- a/kernel/drivers/pci/hotplug/acpiphp_glue.c
+++ b/kernel/drivers/pci/hotplug/acpiphp_glue.c
@@ -632,15 +632,14 @@ static void trim_stale_devices(struct pci_dev *dev)
{
struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
struct pci_bus *bus = dev->subordinate;
- bool alive = false;
+ bool alive = dev->ignore_hotplug;
if (adev) {
acpi_status status;
unsigned long long sta;
status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
- alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
- || dev->ignore_hotplug;
+ alive = alive || (ACPI_SUCCESS(status) && device_status_valid(sta));
}
if (!alive)
alive = pci_device_is_present(dev);
@@ -954,8 +953,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
{
pci_lock_rescan_remove();
- if (slot->flags & SLOT_IS_GOING_AWAY)
+ if (slot->flags & SLOT_IS_GOING_AWAY) {
+ pci_unlock_rescan_remove();
return -ENODEV;
+ }
/* configure all functions */
if (!(slot->flags & SLOT_ENABLED))
diff --git a/kernel/drivers/pci/hotplug/pci_hotplug_core.c b/kernel/drivers/pci/hotplug/pci_hotplug_core.c
index 56d8486dc..d1fab97d6 100644
--- a/kernel/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/kernel/drivers/pci/hotplug/pci_hotplug_core.c
@@ -83,12 +83,12 @@ GET_STATUS(attention_status, u8)
GET_STATUS(latch_status, u8)
GET_STATUS(adapter_status, u8)
-static ssize_t power_read_file(struct pci_slot *slot, char *buf)
+static ssize_t power_read_file(struct pci_slot *pci_slot, char *buf)
{
int retval;
u8 value;
- retval = get_power_status(slot->hotplug, &value);
+ retval = get_power_status(pci_slot->hotplug, &value);
if (retval)
return retval;
@@ -140,22 +140,22 @@ static struct pci_slot_attribute hotplug_slot_attr_power = {
.store = power_write_file
};
-static ssize_t attention_read_file(struct pci_slot *slot, char *buf)
+static ssize_t attention_read_file(struct pci_slot *pci_slot, char *buf)
{
int retval;
u8 value;
- retval = get_attention_status(slot->hotplug, &value);
+ retval = get_attention_status(pci_slot->hotplug, &value);
if (retval)
return retval;
return sprintf(buf, "%d\n", value);
}
-static ssize_t attention_write_file(struct pci_slot *slot, const char *buf,
+static ssize_t attention_write_file(struct pci_slot *pci_slot, const char *buf,
size_t count)
{
- struct hotplug_slot_ops *ops = slot->hotplug->ops;
+ struct hotplug_slot_ops *ops = pci_slot->hotplug->ops;
unsigned long lattention;
u8 attention;
int retval = 0;
@@ -169,7 +169,7 @@ static ssize_t attention_write_file(struct pci_slot *slot, const char *buf,
goto exit;
}
if (ops->set_attention_status)
- retval = ops->set_attention_status(slot->hotplug, attention);
+ retval = ops->set_attention_status(pci_slot->hotplug, attention);
module_put(ops->owner);
exit:
@@ -184,12 +184,12 @@ static struct pci_slot_attribute hotplug_slot_attr_attention = {
.store = attention_write_file
};
-static ssize_t latch_read_file(struct pci_slot *slot, char *buf)
+static ssize_t latch_read_file(struct pci_slot *pci_slot, char *buf)
{
int retval;
u8 value;
- retval = get_latch_status(slot->hotplug, &value);
+ retval = get_latch_status(pci_slot->hotplug, &value);
if (retval)
return retval;
@@ -201,12 +201,12 @@ static struct pci_slot_attribute hotplug_slot_attr_latch = {
.show = latch_read_file,
};
-static ssize_t presence_read_file(struct pci_slot *slot, char *buf)
+static ssize_t presence_read_file(struct pci_slot *pci_slot, char *buf)
{
int retval;
u8 value;
- retval = get_adapter_status(slot->hotplug, &value);
+ retval = get_adapter_status(pci_slot->hotplug, &value);
if (retval)
return retval;
@@ -307,43 +307,43 @@ static bool has_test_file(struct pci_slot *pci_slot)
return false;
}
-static int fs_add_slot(struct pci_slot *slot)
+static int fs_add_slot(struct pci_slot *pci_slot)
{
int retval = 0;
/* Create symbolic link to the hotplug driver module */
- pci_hp_create_module_link(slot);
+ pci_hp_create_module_link(pci_slot);
- if (has_power_file(slot)) {
- retval = sysfs_create_file(&slot->kobj,
+ if (has_power_file(pci_slot)) {
+ retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_power.attr);
if (retval)
goto exit_power;
}
- if (has_attention_file(slot)) {
- retval = sysfs_create_file(&slot->kobj,
+ if (has_attention_file(pci_slot)) {
+ retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
if (retval)
goto exit_attention;
}
- if (has_latch_file(slot)) {
- retval = sysfs_create_file(&slot->kobj,
+ if (has_latch_file(pci_slot)) {
+ retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_latch.attr);
if (retval)
goto exit_latch;
}
- if (has_adapter_file(slot)) {
- retval = sysfs_create_file(&slot->kobj,
+ if (has_adapter_file(pci_slot)) {
+ retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
if (retval)
goto exit_adapter;
}
- if (has_test_file(slot)) {
- retval = sysfs_create_file(&slot->kobj,
+ if (has_test_file(pci_slot)) {
+ retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_test.attr);
if (retval)
goto exit_test;
@@ -352,45 +352,45 @@ static int fs_add_slot(struct pci_slot *slot)
goto exit;
exit_test:
- if (has_adapter_file(slot))
- sysfs_remove_file(&slot->kobj,
+ if (has_adapter_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
exit_adapter:
- if (has_latch_file(slot))
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
+ if (has_latch_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr);
exit_latch:
- if (has_attention_file(slot))
- sysfs_remove_file(&slot->kobj,
+ if (has_attention_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
exit_attention:
- if (has_power_file(slot))
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
+ if (has_power_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr);
exit_power:
- pci_hp_remove_module_link(slot);
+ pci_hp_remove_module_link(pci_slot);
exit:
return retval;
}
-static void fs_remove_slot(struct pci_slot *slot)
+static void fs_remove_slot(struct pci_slot *pci_slot)
{
- if (has_power_file(slot))
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
+ if (has_power_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr);
- if (has_attention_file(slot))
- sysfs_remove_file(&slot->kobj,
+ if (has_attention_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
- if (has_latch_file(slot))
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
+ if (has_latch_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr);
- if (has_adapter_file(slot))
- sysfs_remove_file(&slot->kobj,
+ if (has_adapter_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
- if (has_test_file(slot))
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);
+ if (has_test_file(pci_slot))
+ sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_test.attr);
- pci_hp_remove_module_link(slot);
+ pci_hp_remove_module_link(pci_slot);
}
static struct hotplug_slot *get_slot_from_name(const char *name)
@@ -467,37 +467,37 @@ EXPORT_SYMBOL_GPL(__pci_hp_register);
/**
* pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem
- * @hotplug: pointer to the &struct hotplug_slot to deregister
+ * @slot: pointer to the &struct hotplug_slot to deregister
*
* The @slot must have been registered with the pci hotplug subsystem
* previously with a call to pci_hp_register().
*
* Returns 0 if successful, anything else for an error.
*/
-int pci_hp_deregister(struct hotplug_slot *hotplug)
+int pci_hp_deregister(struct hotplug_slot *slot)
{
struct hotplug_slot *temp;
- struct pci_slot *slot;
+ struct pci_slot *pci_slot;
- if (!hotplug)
+ if (!slot)
return -ENODEV;
mutex_lock(&pci_hp_mutex);
- temp = get_slot_from_name(hotplug_slot_name(hotplug));
- if (temp != hotplug) {
+ temp = get_slot_from_name(hotplug_slot_name(slot));
+ if (temp != slot) {
mutex_unlock(&pci_hp_mutex);
return -ENODEV;
}
- list_del(&hotplug->slot_list);
+ list_del(&slot->slot_list);
- slot = hotplug->pci_slot;
- fs_remove_slot(slot);
- dbg("Removed slot %s from the list\n", hotplug_slot_name(hotplug));
+ pci_slot = slot->pci_slot;
+ fs_remove_slot(pci_slot);
+ dbg("Removed slot %s from the list\n", hotplug_slot_name(slot));
- hotplug->release(hotplug);
- slot->hotplug = NULL;
- pci_destroy_slot(slot);
+ slot->release(slot);
+ pci_slot->hotplug = NULL;
+ pci_destroy_slot(pci_slot);
mutex_unlock(&pci_hp_mutex);
return 0;
@@ -506,7 +506,7 @@ EXPORT_SYMBOL_GPL(pci_hp_deregister);
/**
* pci_hp_change_slot_info - changes the slot's information structure in the core
- * @hotplug: pointer to the slot whose info has changed
+ * @slot: pointer to the slot whose info has changed
* @info: pointer to the info copy into the slot's info structure
*
* @slot must have been registered with the pci
@@ -514,13 +514,13 @@ EXPORT_SYMBOL_GPL(pci_hp_deregister);
*
* Returns 0 if successful, anything else for an error.
*/
-int pci_hp_change_slot_info(struct hotplug_slot *hotplug,
+int pci_hp_change_slot_info(struct hotplug_slot *slot,
struct hotplug_slot_info *info)
{
- if (!hotplug || !info)
+ if (!slot || !info)
return -ENODEV;
- memcpy(hotplug->info, info, sizeof(struct hotplug_slot_info));
+ memcpy(slot->info, info, sizeof(struct hotplug_slot_info));
return 0;
}
diff --git a/kernel/drivers/pci/hotplug/pciehp.h b/kernel/drivers/pci/hotplug/pciehp.h
index b11521953..62d6fe6c3 100644
--- a/kernel/drivers/pci/hotplug/pciehp.h
+++ b/kernel/drivers/pci/hotplug/pciehp.h
@@ -101,18 +101,12 @@ struct controller {
unsigned int power_fault_detected;
};
-#define INT_BUTTON_IGNORE 0
#define INT_PRESENCE_ON 1
#define INT_PRESENCE_OFF 2
-#define INT_SWITCH_CLOSE 3
-#define INT_SWITCH_OPEN 4
-#define INT_POWER_FAULT 5
-#define INT_POWER_FAULT_CLEAR 6
-#define INT_BUTTON_PRESS 7
-#define INT_BUTTON_RELEASE 8
-#define INT_BUTTON_CANCEL 9
-#define INT_LINK_UP 10
-#define INT_LINK_DOWN 11
+#define INT_POWER_FAULT 3
+#define INT_BUTTON_PRESS 4
+#define INT_LINK_UP 5
+#define INT_LINK_DOWN 6
#define STATIC_STATE 0
#define BLINKINGON_STATE 1
@@ -132,11 +126,7 @@ struct controller {
int pciehp_sysfs_enable_slot(struct slot *slot);
int pciehp_sysfs_disable_slot(struct slot *slot);
-u8 pciehp_handle_attention_button(struct slot *p_slot);
-u8 pciehp_handle_switch_change(struct slot *p_slot);
-u8 pciehp_handle_presence_change(struct slot *p_slot);
-u8 pciehp_handle_power_fault(struct slot *p_slot);
-void pciehp_handle_linkstate_change(struct slot *p_slot);
+void pciehp_queue_interrupt_event(struct slot *slot, u32 event_type);
int pciehp_configure_device(struct slot *p_slot);
int pciehp_unconfigure_device(struct slot *p_slot);
void pciehp_queue_pushbutton_work(struct work_struct *work);
@@ -167,21 +157,4 @@ static inline const char *slot_name(struct slot *slot)
return hotplug_slot_name(slot->hotplug_slot);
}
-#ifdef CONFIG_ACPI
-#include <linux/pci-acpi.h>
-
-void __init pciehp_acpi_slot_detection_init(void);
-int pciehp_acpi_slot_detection_check(struct pci_dev *dev);
-
-static inline void pciehp_firmware_init(void)
-{
- pciehp_acpi_slot_detection_init();
-}
-#else
-#define pciehp_firmware_init() do {} while (0)
-static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
-{
- return 0;
-}
-#endif /* CONFIG_ACPI */
#endif /* _PCIEHP_H */
diff --git a/kernel/drivers/pci/hotplug/pciehp_acpi.c b/kernel/drivers/pci/hotplug/pciehp_acpi.c
deleted file mode 100644
index 93cc9266e..000000000
--- a/kernel/drivers/pci/hotplug/pciehp_acpi.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * ACPI related functions for PCI Express Hot Plug driver.
- *
- * Copyright (C) 2008 Kenji Kaneshige
- * Copyright (C) 2008 Fujitsu Limited.
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include <linux/acpi.h>
-#include <linux/pci.h>
-#include <linux/pci_hotplug.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include "pciehp.h"
-
-#define PCIEHP_DETECT_PCIE (0)
-#define PCIEHP_DETECT_ACPI (1)
-#define PCIEHP_DETECT_AUTO (2)
-#define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO
-
-struct dummy_slot {
- u32 number;
- struct list_head list;
-};
-
-static int slot_detection_mode;
-static char *pciehp_detect_mode;
-module_param(pciehp_detect_mode, charp, 0444);
-MODULE_PARM_DESC(pciehp_detect_mode,
- "Slot detection mode: pcie, acpi, auto\n"
- " pcie - Use PCIe based slot detection\n"
- " acpi - Use ACPI for slot detection\n"
- " auto(default) - Auto select mode. Use acpi option if duplicate\n"
- " slot ids are found. Otherwise, use pcie option\n");
-
-int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
-{
- if (slot_detection_mode != PCIEHP_DETECT_ACPI)
- return 0;
- if (acpi_pci_detect_ejectable(ACPI_HANDLE(&dev->dev)))
- return 0;
- return -ENODEV;
-}
-
-static int __init parse_detect_mode(void)
-{
- if (!pciehp_detect_mode)
- return PCIEHP_DETECT_DEFAULT;
- if (!strcmp(pciehp_detect_mode, "pcie"))
- return PCIEHP_DETECT_PCIE;
- if (!strcmp(pciehp_detect_mode, "acpi"))
- return PCIEHP_DETECT_ACPI;
- if (!strcmp(pciehp_detect_mode, "auto"))
- return PCIEHP_DETECT_AUTO;
- warn("bad specifier '%s' for pciehp_detect_mode. Use default\n",
- pciehp_detect_mode);
- return PCIEHP_DETECT_DEFAULT;
-}
-
-static int __initdata dup_slot_id;
-static int __initdata acpi_slot_detected;
-static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots);
-
-/* Dummy driver for duplicate name detection */
-static int __init dummy_probe(struct pcie_device *dev)
-{
- u32 slot_cap;
- acpi_handle handle;
- struct dummy_slot *slot, *tmp;
- struct pci_dev *pdev = dev->port;
-
- pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
- slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
- return -ENOMEM;
- slot->number = (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19;
- list_for_each_entry(tmp, &dummy_slots, list) {
- if (tmp->number == slot->number)
- dup_slot_id++;
- }
- list_add_tail(&slot->list, &dummy_slots);
- handle = ACPI_HANDLE(&pdev->dev);
- if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle))
- acpi_slot_detected = 1;
- return -ENODEV; /* dummy driver always returns error */
-}
-
-static struct pcie_port_service_driver __initdata dummy_driver = {
- .name = "pciehp_dummy",
- .port_type = PCIE_ANY_PORT,
- .service = PCIE_PORT_SERVICE_HP,
- .probe = dummy_probe,
-};
-
-static int __init select_detection_mode(void)
-{
- struct dummy_slot *slot, *tmp;
-
- if (pcie_port_service_register(&dummy_driver))
- return PCIEHP_DETECT_ACPI;
- pcie_port_service_unregister(&dummy_driver);
- list_for_each_entry_safe(slot, tmp, &dummy_slots, list) {
- list_del(&slot->list);
- kfree(slot);
- }
- if (acpi_slot_detected && dup_slot_id)
- return PCIEHP_DETECT_ACPI;
- return PCIEHP_DETECT_PCIE;
-}
-
-void __init pciehp_acpi_slot_detection_init(void)
-{
- slot_detection_mode = parse_detect_mode();
- if (slot_detection_mode != PCIEHP_DETECT_AUTO)
- goto out;
- slot_detection_mode = select_detection_mode();
-out:
- if (slot_detection_mode == PCIEHP_DETECT_ACPI)
- info("Using ACPI for slot detection.\n");
-}
diff --git a/kernel/drivers/pci/hotplug/pciehp_core.c b/kernel/drivers/pci/hotplug/pciehp_core.c
index 07aa722bb..612b21a14 100644
--- a/kernel/drivers/pci/hotplug/pciehp_core.c
+++ b/kernel/drivers/pci/hotplug/pciehp_core.c
@@ -77,11 +77,6 @@ static int reset_slot (struct hotplug_slot *slot, int probe);
*/
static void release_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
-
- ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
- __func__, hotplug_slot_name(hotplug_slot));
-
kfree(hotplug_slot->ops);
kfree(hotplug_slot->info);
kfree(hotplug_slot);
@@ -129,14 +124,10 @@ static int init_slot(struct controller *ctrl)
slot->hotplug_slot = hotplug;
snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
- ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:00 sun=%x\n",
- pci_domain_nr(ctrl->pcie->port->subordinate),
- ctrl->pcie->port->subordinate->number, PSN(ctrl));
retval = pci_hp_register(hotplug,
ctrl->pcie->port->subordinate, 0, name);
if (retval)
- ctrl_err(ctrl,
- "pci_hp_register failed with error %d\n", retval);
+ ctrl_err(ctrl, "pci_hp_register failed: error %d\n", retval);
out:
if (retval) {
kfree(ops);
@@ -158,9 +149,6 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
{
struct slot *slot = hotplug_slot->private;
- ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
- __func__, slot_name(slot));
-
pciehp_set_attention_status(slot, status);
return 0;
}
@@ -170,9 +158,6 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
- __func__, slot_name(slot));
-
return pciehp_sysfs_enable_slot(slot);
}
@@ -181,9 +166,6 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
- __func__, slot_name(slot));
-
return pciehp_sysfs_disable_slot(slot);
}
@@ -191,9 +173,6 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
- __func__, slot_name(slot));
-
pciehp_get_power_status(slot, value);
return 0;
}
@@ -202,9 +181,6 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
- __func__, slot_name(slot));
-
pciehp_get_attention_status(slot, value);
return 0;
}
@@ -213,9 +189,6 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
- __func__, slot_name(slot));
-
pciehp_get_latch_status(slot, value);
return 0;
}
@@ -224,9 +197,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
- __func__, slot_name(slot));
-
pciehp_get_adapter_status(slot, value);
return 0;
}
@@ -235,9 +205,6 @@ static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
{
struct slot *slot = hotplug_slot->private;
- ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
- __func__, slot_name(slot));
-
return pciehp_reset_slot(slot, probe);
}
@@ -248,24 +215,21 @@ static int pciehp_probe(struct pcie_device *dev)
struct slot *slot;
u8 occupied, poweron;
- if (pciehp_force)
- dev_info(&dev->device,
- "Bypassing BIOS check for pciehp use on %s\n",
- pci_name(dev->port));
- else if (pciehp_acpi_slot_detection_check(dev->port))
- goto err_out_none;
+ /* If this is not a "hotplug" service, we have no business here. */
+ if (dev->service != PCIE_PORT_SERVICE_HP)
+ return -ENODEV;
if (!dev->port->subordinate) {
/* Can happen if we run out of bus numbers during probe */
dev_err(&dev->device,
"Hotplug bridge without secondary bus, ignoring\n");
- goto err_out_none;
+ return -ENODEV;
}
ctrl = pcie_init(dev);
if (!ctrl) {
dev_err(&dev->device, "Controller initialization failed\n");
- goto err_out_none;
+ return -ENODEV;
}
set_service_data(dev, ctrl);
@@ -275,14 +239,14 @@ static int pciehp_probe(struct pcie_device *dev)
if (rc == -EBUSY)
ctrl_warn(ctrl, "Slot already registered by another hotplug driver\n");
else
- ctrl_err(ctrl, "Slot initialization failed\n");
+ ctrl_err(ctrl, "Slot initialization failed (%d)\n", rc);
goto err_out_release_ctlr;
}
/* Enable events after we have setup the data structures */
rc = pcie_init_notification(ctrl);
if (rc) {
- ctrl_err(ctrl, "Notification initialization failed\n");
+ ctrl_err(ctrl, "Notification initialization failed (%d)\n", rc);
goto err_out_free_ctrl_slot;
}
@@ -305,7 +269,6 @@ err_out_free_ctrl_slot:
cleanup_slot(ctrl);
err_out_release_ctlr:
pciehp_release_ctrl(ctrl);
-err_out_none:
return -ENODEV;
}
@@ -366,7 +329,6 @@ static int __init pcied_init(void)
{
int retval = 0;
- pciehp_firmware_init();
retval = pcie_port_service_register(&hpdriver_portdrv);
dbg("pcie_port_service_register = %d\n", retval);
info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
diff --git a/kernel/drivers/pci/hotplug/pciehp_ctrl.c b/kernel/drivers/pci/hotplug/pciehp_ctrl.c
index f052e951b..4c8f4cde6 100644
--- a/kernel/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/kernel/drivers/pci/hotplug/pciehp_ctrl.c
@@ -37,138 +37,20 @@
static void interrupt_event_handler(struct work_struct *work);
-static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
+void pciehp_queue_interrupt_event(struct slot *p_slot, u32 event_type)
{
struct event_info *info;
info = kmalloc(sizeof(*info), GFP_ATOMIC);
- if (!info)
- return -ENOMEM;
+ if (!info) {
+ ctrl_err(p_slot->ctrl, "dropped event %d (ENOMEM)\n", event_type);
+ return;
+ }
+ INIT_WORK(&info->work, interrupt_event_handler);
info->event_type = event_type;
info->p_slot = p_slot;
- INIT_WORK(&info->work, interrupt_event_handler);
-
queue_work(p_slot->wq, &info->work);
-
- return 0;
-}
-
-u8 pciehp_handle_attention_button(struct slot *p_slot)
-{
- u32 event_type;
- struct controller *ctrl = p_slot->ctrl;
-
- /* Attention Button Change */
- ctrl_dbg(ctrl, "Attention button interrupt received\n");
-
- /*
- * Button pressed - See if need to TAKE ACTION!!!
- */
- ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot));
- event_type = INT_BUTTON_PRESS;
-
- queue_interrupt_event(p_slot, event_type);
-
- return 0;
-}
-
-u8 pciehp_handle_switch_change(struct slot *p_slot)
-{
- u8 getstatus;
- u32 event_type;
- struct controller *ctrl = p_slot->ctrl;
-
- /* Switch Change */
- ctrl_dbg(ctrl, "Switch interrupt received\n");
-
- pciehp_get_latch_status(p_slot, &getstatus);
- if (getstatus) {
- /*
- * Switch opened
- */
- ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot));
- event_type = INT_SWITCH_OPEN;
- } else {
- /*
- * Switch closed
- */
- ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot));
- event_type = INT_SWITCH_CLOSE;
- }
-
- queue_interrupt_event(p_slot, event_type);
-
- return 1;
-}
-
-u8 pciehp_handle_presence_change(struct slot *p_slot)
-{
- u32 event_type;
- u8 presence_save;
- struct controller *ctrl = p_slot->ctrl;
-
- /* Presence Change */
- ctrl_dbg(ctrl, "Presence/Notify input change\n");
-
- /* Switch is open, assume a presence change
- * Save the presence state
- */
- pciehp_get_adapter_status(p_slot, &presence_save);
- if (presence_save) {
- /*
- * Card Present
- */
- ctrl_info(ctrl, "Card present on Slot(%s)\n", slot_name(p_slot));
- event_type = INT_PRESENCE_ON;
- } else {
- /*
- * Not Present
- */
- ctrl_info(ctrl, "Card not present on Slot(%s)\n",
- slot_name(p_slot));
- event_type = INT_PRESENCE_OFF;
- }
-
- queue_interrupt_event(p_slot, event_type);
-
- return 1;
-}
-
-u8 pciehp_handle_power_fault(struct slot *p_slot)
-{
- u32 event_type;
- struct controller *ctrl = p_slot->ctrl;
-
- /* power fault */
- ctrl_dbg(ctrl, "Power fault interrupt received\n");
- ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
- event_type = INT_POWER_FAULT;
- ctrl_info(ctrl, "Power fault bit %x set\n", 0);
- queue_interrupt_event(p_slot, event_type);
-
- return 1;
-}
-
-void pciehp_handle_linkstate_change(struct slot *p_slot)
-{
- u32 event_type;
- struct controller *ctrl = p_slot->ctrl;
-
- /* Link Status Change */
- ctrl_dbg(ctrl, "Data Link Layer State change\n");
-
- if (pciehp_check_link_active(ctrl)) {
- ctrl_info(ctrl, "slot(%s): Link Up event\n",
- slot_name(p_slot));
- event_type = INT_LINK_UP;
- } else {
- ctrl_info(ctrl, "slot(%s): Link Down event\n",
- slot_name(p_slot));
- event_type = INT_LINK_DOWN;
- }
-
- queue_interrupt_event(p_slot, event_type);
}
/* The following routines constitute the bulk of the
@@ -298,10 +180,6 @@ static void pciehp_power_thread(struct work_struct *work)
switch (info->req) {
case DISABLE_REQ:
- ctrl_dbg(p_slot->ctrl,
- "Disabling domain:bus:device=%04x:%02x:00\n",
- pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
- p_slot->ctrl->pcie->port->subordinate->number);
mutex_lock(&p_slot->hotplug_lock);
pciehp_disable_slot(p_slot);
mutex_unlock(&p_slot->hotplug_lock);
@@ -310,10 +188,6 @@ static void pciehp_power_thread(struct work_struct *work)
mutex_unlock(&p_slot->lock);
break;
case ENABLE_REQ:
- ctrl_dbg(p_slot->ctrl,
- "Enabling domain:bus:device=%04x:%02x:00\n",
- pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
- p_slot->ctrl->pcie->port->subordinate->number);
mutex_lock(&p_slot->hotplug_lock);
ret = pciehp_enable_slot(p_slot);
mutex_unlock(&p_slot->hotplug_lock);
@@ -330,36 +204,39 @@ static void pciehp_power_thread(struct work_struct *work)
kfree(info);
}
-void pciehp_queue_pushbutton_work(struct work_struct *work)
+static void pciehp_queue_power_work(struct slot *p_slot, int req)
{
- struct slot *p_slot = container_of(work, struct slot, work.work);
struct power_work_info *info;
+ p_slot->state = (req == ENABLE_REQ) ? POWERON_STATE : POWEROFF_STATE;
+
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
- ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
- __func__);
+ ctrl_err(p_slot->ctrl, "no memory to queue %s request\n",
+ (req == ENABLE_REQ) ? "poweron" : "poweroff");
return;
}
info->p_slot = p_slot;
INIT_WORK(&info->work, pciehp_power_thread);
+ info->req = req;
+ queue_work(p_slot->wq, &info->work);
+}
+
+void pciehp_queue_pushbutton_work(struct work_struct *work)
+{
+ struct slot *p_slot = container_of(work, struct slot, work.work);
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGOFF_STATE:
- p_slot->state = POWEROFF_STATE;
- info->req = DISABLE_REQ;
+ pciehp_queue_power_work(p_slot, DISABLE_REQ);
break;
case BLINKINGON_STATE:
- p_slot->state = POWERON_STATE;
- info->req = ENABLE_REQ;
+ pciehp_queue_power_work(p_slot, ENABLE_REQ);
break;
default:
- kfree(info);
- goto out;
+ break;
}
- queue_work(p_slot->wq, &info->work);
- out:
mutex_unlock(&p_slot->lock);
}
@@ -416,7 +293,7 @@ static void handle_button_press_event(struct slot *p_slot)
ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot));
break;
default:
- ctrl_warn(ctrl, "Not a valid state\n");
+ ctrl_warn(ctrl, "ignoring invalid state %#x\n", p_slot->state);
break;
}
}
@@ -427,27 +304,12 @@ static void handle_button_press_event(struct slot *p_slot)
static void handle_surprise_event(struct slot *p_slot)
{
u8 getstatus;
- struct power_work_info *info;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
- __func__);
- return;
- }
- info->p_slot = p_slot;
- INIT_WORK(&info->work, pciehp_power_thread);
pciehp_get_adapter_status(p_slot, &getstatus);
- if (!getstatus) {
- p_slot->state = POWEROFF_STATE;
- info->req = DISABLE_REQ;
- } else {
- p_slot->state = POWERON_STATE;
- info->req = ENABLE_REQ;
- }
-
- queue_work(p_slot->wq, &info->work);
+ if (!getstatus)
+ pciehp_queue_power_work(p_slot, DISABLE_REQ);
+ else
+ pciehp_queue_power_work(p_slot, ENABLE_REQ);
}
/*
@@ -456,17 +318,6 @@ static void handle_surprise_event(struct slot *p_slot)
static void handle_link_event(struct slot *p_slot, u32 event)
{
struct controller *ctrl = p_slot->ctrl;
- struct power_work_info *info;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
- __func__);
- return;
- }
- info->p_slot = p_slot;
- info->req = event == INT_LINK_UP ? ENABLE_REQ : DISABLE_REQ;
- INIT_WORK(&info->work, pciehp_power_thread);
switch (p_slot->state) {
case BLINKINGON_STATE:
@@ -474,22 +325,19 @@ static void handle_link_event(struct slot *p_slot, u32 event)
cancel_delayed_work(&p_slot->work);
/* Fall through */
case STATIC_STATE:
- p_slot->state = event == INT_LINK_UP ?
- POWERON_STATE : POWEROFF_STATE;
- queue_work(p_slot->wq, &info->work);
+ pciehp_queue_power_work(p_slot, event == INT_LINK_UP ?
+ ENABLE_REQ : DISABLE_REQ);
break;
case POWERON_STATE:
if (event == INT_LINK_UP) {
ctrl_info(ctrl,
"Link Up event ignored on slot(%s): already powering on\n",
slot_name(p_slot));
- kfree(info);
} else {
ctrl_info(ctrl,
"Link Down event queued on slot(%s): currently getting powered on\n",
slot_name(p_slot));
- p_slot->state = POWEROFF_STATE;
- queue_work(p_slot->wq, &info->work);
+ pciehp_queue_power_work(p_slot, DISABLE_REQ);
}
break;
case POWEROFF_STATE:
@@ -497,19 +345,16 @@ static void handle_link_event(struct slot *p_slot, u32 event)
ctrl_info(ctrl,
"Link Up event queued on slot(%s): currently getting powered off\n",
slot_name(p_slot));
- p_slot->state = POWERON_STATE;
- queue_work(p_slot->wq, &info->work);
+ pciehp_queue_power_work(p_slot, ENABLE_REQ);
} else {
ctrl_info(ctrl,
"Link Down event ignored on slot(%s): already powering off\n",
slot_name(p_slot));
- kfree(info);
}
break;
default:
- ctrl_err(ctrl, "Not a valid state on slot(%s)\n",
- slot_name(p_slot));
- kfree(info);
+ ctrl_err(ctrl, "ignoring invalid state %#x on slot(%s)\n",
+ p_slot->state, slot_name(p_slot));
break;
}
}
@@ -532,7 +377,6 @@ static void interrupt_event_handler(struct work_struct *work)
pciehp_green_led_off(p_slot);
break;
case INT_PRESENCE_ON:
- ctrl_dbg(ctrl, "Surprise Insertion\n");
handle_surprise_event(p_slot);
break;
case INT_PRESENCE_OFF:
@@ -540,7 +384,6 @@ static void interrupt_event_handler(struct work_struct *work)
* Regardless of surprise capability, we need to
* definitely remove a card that has been pulled out!
*/
- ctrl_dbg(ctrl, "Surprise Removal\n");
handle_surprise_event(p_slot);
break;
case INT_LINK_UP:
@@ -647,8 +490,8 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
slot_name(p_slot));
break;
default:
- ctrl_err(ctrl, "Not a valid state on slot %s\n",
- slot_name(p_slot));
+ ctrl_err(ctrl, "invalid state %#x on slot %s\n",
+ p_slot->state, slot_name(p_slot));
break;
}
mutex_unlock(&p_slot->lock);
@@ -682,8 +525,8 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot)
slot_name(p_slot));
break;
default:
- ctrl_err(ctrl, "Not a valid state on slot %s\n",
- slot_name(p_slot));
+ ctrl_err(ctrl, "invalid state %#x on slot %s\n",
+ p_slot->state, slot_name(p_slot));
break;
}
mutex_unlock(&p_slot->lock);
diff --git a/kernel/drivers/pci/hotplug/pciehp_hpc.c b/kernel/drivers/pci/hotplug/pciehp_hpc.c
index 6d6868811..5c24e9380 100644
--- a/kernel/drivers/pci/hotplug/pciehp_hpc.c
+++ b/kernel/drivers/pci/hotplug/pciehp_hpc.c
@@ -109,21 +109,23 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout)
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
- pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
- if (slot_status & PCI_EXP_SLTSTA_CC) {
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
- PCI_EXP_SLTSTA_CC);
- return 1;
- }
- while (timeout > 0) {
- msleep(10);
- timeout -= 10;
+ while (true) {
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (slot_status == (u16) ~0) {
+ ctrl_info(ctrl, "%s: no response from device\n",
+ __func__);
+ return 0;
+ }
+
if (slot_status & PCI_EXP_SLTSTA_CC) {
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_CC);
return 1;
}
+ if (timeout < 0)
+ break;
+ msleep(10);
+ timeout -= 10;
}
return 0; /* timeout */
}
@@ -190,6 +192,11 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
pcie_wait_cmd(ctrl);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
+ if (slot_ctrl == (u16) ~0) {
+ ctrl_info(ctrl, "%s: no response from device\n", __func__);
+ goto out;
+ }
+
slot_ctrl &= ~mask;
slot_ctrl |= (cmd & mask);
ctrl->cmd_busy = 1;
@@ -205,6 +212,7 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
if (wait)
pcie_wait_cmd(ctrl);
+out:
mutex_unlock(&ctrl->ctrl_lock);
}
@@ -312,7 +320,8 @@ int pciehp_check_link_status(struct controller *ctrl)
ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
!(lnk_status & PCI_EXP_LNKSTA_NLW)) {
- ctrl_err(ctrl, "Link Training Error occurs\n");
+ ctrl_err(ctrl, "link training error: status %#06x\n",
+ lnk_status);
return -1;
}
@@ -534,6 +543,8 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
struct pci_dev *dev;
struct slot *slot = ctrl->slot;
u16 detected, intr_loc;
+ u8 present;
+ bool link;
/*
* In order to guarantee that all interrupt events are
@@ -543,9 +554,14 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
intr_loc = 0;
do {
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &detected);
+ if (detected == (u16) ~0) {
+ ctrl_info(ctrl, "%s: no response from device\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
- PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
+ PCI_EXP_SLTSTA_PDC |
PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
detected &= ~intr_loc;
intr_loc |= detected;
@@ -556,7 +572,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
intr_loc);
} while (detected);
- ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc);
+ ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", intr_loc);
/* Check Command Complete Interrupt Pending */
if (intr_loc & PCI_EXP_SLTSTA_CC) {
@@ -578,26 +594,36 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
if (!(intr_loc & ~PCI_EXP_SLTSTA_CC))
return IRQ_HANDLED;
- /* Check MRL Sensor Changed */
- if (intr_loc & PCI_EXP_SLTSTA_MRLSC)
- pciehp_handle_switch_change(slot);
-
/* Check Attention Button Pressed */
- if (intr_loc & PCI_EXP_SLTSTA_ABP)
- pciehp_handle_attention_button(slot);
+ if (intr_loc & PCI_EXP_SLTSTA_ABP) {
+ ctrl_info(ctrl, "Button pressed on Slot(%s)\n",
+ slot_name(slot));
+ pciehp_queue_interrupt_event(slot, INT_BUTTON_PRESS);
+ }
/* Check Presence Detect Changed */
- if (intr_loc & PCI_EXP_SLTSTA_PDC)
- pciehp_handle_presence_change(slot);
+ if (intr_loc & PCI_EXP_SLTSTA_PDC) {
+ pciehp_get_adapter_status(slot, &present);
+ ctrl_info(ctrl, "Card %spresent on Slot(%s)\n",
+ present ? "" : "not ", slot_name(slot));
+ pciehp_queue_interrupt_event(slot, present ? INT_PRESENCE_ON :
+ INT_PRESENCE_OFF);
+ }
/* Check Power Fault Detected */
if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
ctrl->power_fault_detected = 1;
- pciehp_handle_power_fault(slot);
+ ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(slot));
+ pciehp_queue_interrupt_event(slot, INT_POWER_FAULT);
}
- if (intr_loc & PCI_EXP_SLTSTA_DLLSC)
- pciehp_handle_linkstate_change(slot);
+ if (intr_loc & PCI_EXP_SLTSTA_DLLSC) {
+ link = pciehp_check_link_active(ctrl);
+ ctrl_info(ctrl, "slot(%s): Link %s event\n",
+ slot_name(slot), link ? "Up" : "Down");
+ pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP :
+ INT_LINK_DOWN);
+ }
return IRQ_HANDLED;
}
@@ -627,13 +653,11 @@ void pcie_enable_notification(struct controller *ctrl)
cmd |= PCI_EXP_SLTCTL_ABPE;
else
cmd |= PCI_EXP_SLTCTL_PDCE;
- if (MRL_SENS(ctrl))
- cmd |= PCI_EXP_SLTCTL_MRLSCE;
if (!pciehp_poll_mode)
cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE;
mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
- PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
+ PCI_EXP_SLTCTL_PFDE |
PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
PCI_EXP_SLTCTL_DLLSCE);
@@ -748,48 +772,13 @@ static void pcie_cleanup_slot(struct controller *ctrl)
static inline void dbg_ctrl(struct controller *ctrl)
{
- int i;
- u16 reg16;
struct pci_dev *pdev = ctrl->pcie->port;
+ u16 reg16;
if (!pciehp_debug)
return;
- ctrl_info(ctrl, "Hotplug Controller:\n");
- ctrl_info(ctrl, " Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n",
- pci_name(pdev), pdev->irq);
- ctrl_info(ctrl, " Vendor ID : 0x%04x\n", pdev->vendor);
- ctrl_info(ctrl, " Device ID : 0x%04x\n", pdev->device);
- ctrl_info(ctrl, " Subsystem ID : 0x%04x\n",
- pdev->subsystem_device);
- ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n",
- pdev->subsystem_vendor);
- ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n",
- pci_pcie_cap(pdev));
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- if (!pci_resource_len(pdev, i))
- continue;
- ctrl_info(ctrl, " PCI resource [%d] : %pR\n",
- i, &pdev->resource[i]);
- }
ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
- ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl));
- ctrl_info(ctrl, " Attention Button : %3s\n",
- ATTN_BUTTN(ctrl) ? "yes" : "no");
- ctrl_info(ctrl, " Power Controller : %3s\n",
- POWER_CTRL(ctrl) ? "yes" : "no");
- ctrl_info(ctrl, " MRL Sensor : %3s\n",
- MRL_SENS(ctrl) ? "yes" : "no");
- ctrl_info(ctrl, " Attention Indicator : %3s\n",
- ATTN_LED(ctrl) ? "yes" : "no");
- ctrl_info(ctrl, " Power Indicator : %3s\n",
- PWR_LED(ctrl) ? "yes" : "no");
- ctrl_info(ctrl, " Hot-Plug Surprise : %3s\n",
- HP_SUPR_RM(ctrl) ? "yes" : "no");
- ctrl_info(ctrl, " EMI Present : %3s\n",
- EMI(ctrl) ? "yes" : "no");
- ctrl_info(ctrl, " Command Completed : %3s\n",
- NO_CMD_CMPL(ctrl) ? "no" : "yes");
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
@@ -818,10 +807,8 @@ struct controller *pcie_init(struct pcie_device *dev)
/* Check if Data Link Layer Link Active Reporting is implemented */
pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
- if (link_cap & PCI_EXP_LNKCAP_DLLLARC) {
- ctrl_dbg(ctrl, "Link Active Reporting supported\n");
+ if (link_cap & PCI_EXP_LNKCAP_DLLLARC)
ctrl->link_active_reporting = 1;
- }
/* Clear all remaining event bits in Slot Status register */
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
@@ -829,13 +816,15 @@ struct controller *pcie_init(struct pcie_device *dev)
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
- ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n",
+ ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c\n",
(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
- FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
- FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_HPC),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC));
diff --git a/kernel/drivers/pci/htirq.c b/kernel/drivers/pci/htirq.c
index a94dd2c41..7eb4109a3 100644
--- a/kernel/drivers/pci/htirq.c
+++ b/kernel/drivers/pci/htirq.c
@@ -23,20 +23,11 @@
*/
static DEFINE_SPINLOCK(ht_irq_lock);
-struct ht_irq_cfg {
- struct pci_dev *dev;
- /* Update callback used to cope with buggy hardware */
- ht_irq_update_t *update;
- unsigned pos;
- unsigned idx;
- struct ht_irq_msg msg;
-};
-
-
void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
{
struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
unsigned long flags;
+
spin_lock_irqsave(&ht_irq_lock, flags);
if (cfg->msg.address_lo != msg->address_lo) {
pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
@@ -55,6 +46,7 @@ void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
{
struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
+
*msg = cfg->msg;
}
@@ -86,7 +78,6 @@ void unmask_ht_irq(struct irq_data *data)
*/
int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
{
- struct ht_irq_cfg *cfg;
int max_irq, pos, irq;
unsigned long flags;
u32 data;
@@ -105,29 +96,9 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
if (idx > max_irq)
return -EINVAL;
- cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return -ENOMEM;
-
- cfg->dev = dev;
- cfg->update = update;
- cfg->pos = pos;
- cfg->idx = 0x10 + (idx * 2);
- /* Initialize msg to a value that will never match the first write. */
- cfg->msg.address_lo = 0xffffffff;
- cfg->msg.address_hi = 0xffffffff;
-
- irq = irq_alloc_hwirq(dev_to_node(&dev->dev));
- if (!irq) {
- kfree(cfg);
- return -EBUSY;
- }
- irq_set_handler_data(irq, cfg);
-
- if (arch_setup_ht_irq(irq, dev) < 0) {
- ht_destroy_irq(irq);
- return -EBUSY;
- }
+ irq = arch_setup_ht_irq(idx, pos, dev, update);
+ if (irq > 0)
+ dev_dbg(&dev->dev, "irq %d for HT\n", irq);
return irq;
}
@@ -158,13 +129,6 @@ EXPORT_SYMBOL(ht_create_irq);
*/
void ht_destroy_irq(unsigned int irq)
{
- struct ht_irq_cfg *cfg;
-
- cfg = irq_get_handler_data(irq);
- irq_set_chip(irq, NULL);
- irq_set_handler_data(irq, NULL);
- irq_free_hwirq(irq);
-
- kfree(cfg);
+ arch_teardown_ht_irq(irq);
}
EXPORT_SYMBOL(ht_destroy_irq);
diff --git a/kernel/drivers/pci/iov.c b/kernel/drivers/pci/iov.c
index ee0ebff10..31f31d460 100644
--- a/kernel/drivers/pci/iov.c
+++ b/kernel/drivers/pci/iov.c
@@ -54,24 +54,29 @@ static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn)
* The PF consumes one bus number. NumVFs, First VF Offset, and VF Stride
* determine how many additional bus numbers will be consumed by VFs.
*
- * Iterate over all valid NumVFs and calculate the maximum number of bus
- * numbers that could ever be required.
+ * Iterate over all valid NumVFs, validate offset and stride, and calculate
+ * the maximum number of bus numbers that could ever be required.
*/
-static inline u8 virtfn_max_buses(struct pci_dev *dev)
+static int compute_max_vf_buses(struct pci_dev *dev)
{
struct pci_sriov *iov = dev->sriov;
- int nr_virtfn;
- u8 max = 0;
- int busnr;
+ int nr_virtfn, busnr, rc = 0;
- for (nr_virtfn = 1; nr_virtfn <= iov->total_VFs; nr_virtfn++) {
+ for (nr_virtfn = iov->total_VFs; nr_virtfn; nr_virtfn--) {
pci_iov_set_numvfs(dev, nr_virtfn);
+ if (!iov->offset || (nr_virtfn > 1 && !iov->stride)) {
+ rc = -EIO;
+ goto out;
+ }
+
busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
- if (busnr > max)
- max = busnr;
+ if (busnr > iov->max_VF_buses)
+ iov->max_VF_buses = busnr;
}
- return max;
+out:
+ pci_iov_set_numvfs(dev, 0);
+ return rc;
}
static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
@@ -222,21 +227,25 @@ static void virtfn_remove(struct pci_dev *dev, int id, int reset)
int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
{
- return 0;
+ return 0;
+}
+
+int __weak pcibios_sriov_disable(struct pci_dev *pdev)
+{
+ return 0;
}
static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
{
int rc;
- int i, j;
+ int i;
int nres;
- u16 offset, stride, initial;
+ u16 initial;
struct resource *res;
struct pci_dev *pdev;
struct pci_sriov *iov = dev->sriov;
int bars = 0;
int bus;
- int retval;
if (!nr_virtfn)
return 0;
@@ -253,11 +262,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
(!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
return -EINVAL;
- pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
- pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
- if (!offset || (nr_virtfn > 1 && !stride))
- return -EIO;
-
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
bars |= (1 << (i + PCI_IOV_RESOURCES));
@@ -270,9 +274,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return -ENOMEM;
}
- iov->offset = offset;
- iov->stride = stride;
-
bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
if (bus > dev->bus->busn_res.end) {
dev_err(&dev->dev, "can't enable %d VFs (bus %02x out of range of %pR)\n",
@@ -313,10 +314,10 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
if (nr_virtfn < initial)
initial = nr_virtfn;
- if ((retval = pcibios_sriov_enable(dev, initial))) {
- dev_err(&dev->dev, "failure %d from pcibios_sriov_enable()\n",
- retval);
- return retval;
+ rc = pcibios_sriov_enable(dev, initial);
+ if (rc) {
+ dev_err(&dev->dev, "failure %d from pcibios_sriov_enable()\n", rc);
+ goto err_pcibios;
}
for (i = 0; i < initial; i++) {
@@ -331,27 +332,24 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return 0;
failed:
- for (j = 0; j < i; j++)
- virtfn_remove(dev, j, 0);
+ while (i--)
+ virtfn_remove(dev, i, 0);
+ pcibios_sriov_disable(dev);
+err_pcibios:
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
- pci_iov_set_numvfs(dev, 0);
ssleep(1);
pci_cfg_access_unlock(dev);
if (iov->link != dev->devfn)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
+ pci_iov_set_numvfs(dev, 0);
return rc;
}
-int __weak pcibios_sriov_disable(struct pci_dev *pdev)
-{
- return 0;
-}
-
static void sriov_disable(struct pci_dev *dev)
{
int i;
@@ -384,7 +382,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
int rc;
int nres;
u32 pgsz;
- u16 ctrl, total, offset, stride;
+ u16 ctrl, total;
struct pci_sriov *iov;
struct resource *res;
struct pci_dev *pdev;
@@ -399,10 +397,6 @@ static int sriov_init(struct pci_dev *dev, int pos)
ssleep(1);
}
- pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
- if (!total)
- return 0;
-
ctrl = 0;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
if (pdev->is_physfn)
@@ -414,11 +408,10 @@ static int sriov_init(struct pci_dev *dev, int pos)
found:
pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
- pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, 0);
- pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
- pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
- if (!offset || (total > 1 && !stride))
- return -EIO;
+
+ pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
+ if (!total)
+ return 0;
pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
@@ -436,8 +429,15 @@ found:
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];
- bar64 = __pci_read_base(dev, pci_bar_unknown, res,
- pos + PCI_SRIOV_BAR + i * 4);
+ /*
+ * If it is already FIXED, don't change it, something
+ * (perhaps EA or header fixups) wants it this way.
+ */
+ if (res->flags & IORESOURCE_PCI_FIXED)
+ bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
+ else
+ bar64 = __pci_read_base(dev, pci_bar_unknown, res,
+ pos + PCI_SRIOV_BAR + i * 4);
if (!res->flags)
continue;
if (resource_size(res) & (PAGE_SIZE - 1)) {
@@ -456,8 +456,6 @@ found:
iov->nres = nres;
iov->ctrl = ctrl;
iov->total_VFs = total;
- iov->offset = offset;
- iov->stride = stride;
iov->pgsz = pgsz;
iov->self = dev;
pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
@@ -474,10 +472,15 @@ found:
dev->sriov = iov;
dev->is_physfn = 1;
- iov->max_VF_buses = virtfn_max_buses(dev);
+ rc = compute_max_vf_buses(dev);
+ if (rc)
+ goto fail_max_buses;
return 0;
+fail_max_buses:
+ dev->sriov = NULL;
+ dev->is_physfn = 0;
failed:
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];
diff --git a/kernel/drivers/pci/msi.c b/kernel/drivers/pci/msi.c
index c3e7dfcf9..7eaa4c87f 100644
--- a/kernel/drivers/pci/msi.c
+++ b/kernel/drivers/pci/msi.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
#include "pci.h"
@@ -39,14 +40,13 @@ struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev)
static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev)
{
- struct irq_domain *domain = NULL;
+ struct irq_domain *domain;
- if (dev->bus->msi)
- domain = dev->bus->msi->domain;
- if (!domain)
- domain = arch_get_pci_msi_domain(dev);
+ domain = dev_get_msi_domain(&dev->dev);
+ if (domain)
+ return domain;
- return domain;
+ return arch_get_pci_msi_domain(dev);
}
static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
@@ -54,7 +54,7 @@ static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
struct irq_domain *domain;
domain = pci_msi_get_domain(dev);
- if (domain)
+ if (domain && irq_domain_is_hierarchy(domain))
return pci_msi_domain_alloc_irqs(domain, dev, nvec, type);
return arch_setup_msi_irqs(dev, nvec, type);
@@ -65,7 +65,7 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
struct irq_domain *domain;
domain = pci_msi_get_domain(dev);
- if (domain)
+ if (domain && irq_domain_is_hierarchy(domain))
pci_msi_domain_free_irqs(domain, dev);
else
arch_teardown_msi_irqs(dev);
@@ -77,24 +77,9 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
/* Arch hooks */
-struct msi_controller * __weak pcibios_msi_controller(struct pci_dev *dev)
-{
- return NULL;
-}
-
-static struct msi_controller *pci_msi_controller(struct pci_dev *dev)
-{
- struct msi_controller *msi_ctrl = dev->bus->msi;
-
- if (msi_ctrl)
- return msi_ctrl;
-
- return pcibios_msi_controller(dev);
-}
-
int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
- struct msi_controller *chip = pci_msi_controller(dev);
+ struct msi_controller *chip = dev->bus->msi;
int err;
if (!chip || !chip->setup_irq)
@@ -121,9 +106,12 @@ void __weak arch_teardown_msi_irq(unsigned int irq)
int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
+ struct msi_controller *chip = dev->bus->msi;
struct msi_desc *entry;
int ret;
+ if (chip && chip->setup_irqs)
+ return chip->setup_irqs(chip, dev, nvec, type);
/*
* If an architecture wants to support multiple MSI, it needs to
* override arch_setup_msi_irqs()
@@ -131,7 +119,7 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
ret = arch_setup_msi_irq(dev, entry);
if (ret < 0)
return ret;
@@ -151,7 +139,7 @@ void default_teardown_msi_irqs(struct pci_dev *dev)
int i;
struct msi_desc *entry;
- list_for_each_entry(entry, &dev->msi_list, list)
+ for_each_pci_msi_entry(entry, dev)
if (entry->irq)
for (i = 0; i < entry->nvec_used; i++)
arch_teardown_msi_irq(entry->irq + i);
@@ -168,7 +156,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq)
entry = NULL;
if (dev->msix_enabled) {
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
if (irq == entry->irq)
break;
}
@@ -185,27 +173,6 @@ void __weak arch_restore_msi_irqs(struct pci_dev *dev)
return default_restore_msi_irqs(dev);
}
-static void msi_set_enable(struct pci_dev *dev, int enable)
-{
- u16 control;
-
- pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
- control &= ~PCI_MSI_FLAGS_ENABLE;
- if (enable)
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
-}
-
-static void msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
-{
- u16 ctrl;
-
- pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
- ctrl &= ~clear;
- ctrl |= set;
- pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl);
-}
-
static inline __attribute_const__ u32 msi_mask(unsigned x)
{
/* Don't shift by >= width of type */
@@ -229,7 +196,8 @@ u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
mask_bits &= ~mask;
mask_bits |= flag;
- pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
+ pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
+ mask_bits);
return mask_bits;
}
@@ -270,7 +238,7 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag)
static void msi_set_mask_bit(struct irq_data *data, u32 flag)
{
- struct msi_desc *desc = irq_data_get_msi(data);
+ struct msi_desc *desc = irq_data_get_msi_desc(data);
if (desc->msi_attrib.is_msix) {
msix_mask_irq(desc, flag);
@@ -303,13 +271,15 @@ void default_restore_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry;
- list_for_each_entry(entry, &dev->msi_list, list)
+ for_each_pci_msi_entry(entry, dev)
default_restore_msi_irq(dev, entry->irq);
}
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
- BUG_ON(entry->dev->current_state != PCI_D0);
+ struct pci_dev *dev = msi_desc_to_pci_dev(entry);
+
+ BUG_ON(dev->current_state != PCI_D0);
if (entry->msi_attrib.is_msix) {
void __iomem *base = entry->mask_base +
@@ -319,7 +289,6 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
} else {
- struct pci_dev *dev = entry->dev;
int pos = dev->msi_cap;
u16 data;
@@ -339,7 +308,9 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
- if (entry->dev->current_state != PCI_D0) {
+ struct pci_dev *dev = msi_desc_to_pci_dev(entry);
+
+ if (dev->current_state != PCI_D0) {
/* Don't touch the hardware now */
} else if (entry->msi_attrib.is_msix) {
void __iomem *base;
@@ -350,7 +321,6 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
} else {
- struct pci_dev *dev = entry->dev;
int pos = dev->msi_cap;
u16 msgctl;
@@ -384,21 +354,22 @@ EXPORT_SYMBOL_GPL(pci_write_msi_msg);
static void free_msi_irqs(struct pci_dev *dev)
{
+ struct list_head *msi_list = dev_to_msi_list(&dev->dev);
struct msi_desc *entry, *tmp;
struct attribute **msi_attrs;
struct device_attribute *dev_attr;
int i, count = 0;
- list_for_each_entry(entry, &dev->msi_list, list)
+ for_each_pci_msi_entry(entry, dev)
if (entry->irq)
for (i = 0; i < entry->nvec_used; i++)
BUG_ON(irq_has_action(entry->irq + i));
pci_msi_teardown_msi_irqs(dev);
- list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
+ list_for_each_entry_safe(entry, tmp, msi_list, list) {
if (entry->msi_attrib.is_msix) {
- if (list_is_last(&entry->list, &dev->msi_list))
+ if (list_is_last(&entry->list, msi_list))
iounmap(entry->mask_base);
}
@@ -423,18 +394,6 @@ static void free_msi_irqs(struct pci_dev *dev)
}
}
-static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
-{
- struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return NULL;
-
- INIT_LIST_HEAD(&desc->list);
- desc->dev = dev;
-
- return desc;
-}
-
static void pci_intx_for_msi(struct pci_dev *dev, int enable)
{
if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
@@ -452,7 +411,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
entry = irq_get_msi_desc(dev->irq);
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, 0);
+ pci_msi_set_enable(dev, 0);
arch_restore_msi_irqs(dev);
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
@@ -469,18 +428,18 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
if (!dev->msix_enabled)
return;
- BUG_ON(list_empty(&dev->msi_list));
+ BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
/* route the table */
pci_intx_for_msi(dev, 0);
- msix_clear_and_set_ctrl(dev, 0,
+ pci_msix_clear_and_set_ctrl(dev, 0,
PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
arch_restore_msi_irqs(dev);
- list_for_each_entry(entry, &dev->msi_list, list)
+ for_each_pci_msi_entry(entry, dev)
msix_mask_irq(entry, entry->masked);
- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
}
void pci_restore_msi_state(struct pci_dev *dev)
@@ -520,10 +479,11 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
int ret = -ENOMEM;
int num_msi = 0;
int count = 0;
+ int i;
/* Determine how many msi entries we have */
- list_for_each_entry(entry, &pdev->msi_list, list)
- ++num_msi;
+ for_each_pci_msi_entry(entry, pdev)
+ num_msi += entry->nvec_used;
if (!num_msi)
return 0;
@@ -531,20 +491,22 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL);
if (!msi_attrs)
return -ENOMEM;
- list_for_each_entry(entry, &pdev->msi_list, list) {
- msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
- if (!msi_dev_attr)
- goto error_attrs;
- msi_attrs[count] = &msi_dev_attr->attr;
-
- sysfs_attr_init(&msi_dev_attr->attr);
- msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
- entry->irq);
- if (!msi_dev_attr->attr.name)
- goto error_attrs;
- msi_dev_attr->attr.mode = S_IRUGO;
- msi_dev_attr->show = msi_mode_show;
- ++count;
+ for_each_pci_msi_entry(entry, pdev) {
+ for (i = 0; i < entry->nvec_used; i++) {
+ msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
+ if (!msi_dev_attr)
+ goto error_attrs;
+ msi_attrs[count] = &msi_dev_attr->attr;
+
+ sysfs_attr_init(&msi_dev_attr->attr);
+ msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
+ entry->irq + i);
+ if (!msi_dev_attr->attr.name)
+ goto error_attrs;
+ msi_dev_attr->attr.mode = S_IRUGO;
+ msi_dev_attr->show = msi_mode_show;
+ ++count;
+ }
}
msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
@@ -589,7 +551,7 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
struct msi_desc *entry;
/* MSI Entry Initialization */
- entry = alloc_msi_entry(dev);
+ entry = alloc_msi_entry(&dev->dev);
if (!entry)
return NULL;
@@ -620,7 +582,7 @@ static int msi_verify_entries(struct pci_dev *dev)
{
struct msi_desc *entry;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
if (!dev->no_64bit_msi || !entry->msg.address_hi)
continue;
dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
@@ -647,7 +609,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
int ret;
unsigned mask;
- msi_set_enable(dev, 0); /* Disable MSI during set up */
+ pci_msi_set_enable(dev, 0); /* Disable MSI during set up */
entry = msi_setup_entry(dev, nvec);
if (!entry)
@@ -657,7 +619,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
mask = msi_mask(entry->msi_attrib.multi_cap);
msi_mask_irq(entry, mask, mask);
- list_add_tail(&entry->list, &dev->msi_list);
+ list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
@@ -683,9 +645,10 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, 1);
+ pci_msi_set_enable(dev, 1);
dev->msi_enabled = 1;
+ pcibios_free_irq(dev);
dev->irq = entry->irq;
return 0;
}
@@ -717,7 +680,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
int i;
for (i = 0; i < nvec; i++) {
- entry = alloc_msi_entry(dev);
+ entry = alloc_msi_entry(&dev->dev);
if (!entry) {
if (!i)
iounmap(base);
@@ -734,7 +697,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->mask_base = base;
entry->nvec_used = 1;
- list_add_tail(&entry->list, &dev->msi_list);
+ list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
}
return 0;
@@ -746,7 +709,7 @@ static void msix_program_entries(struct pci_dev *dev,
struct msi_desc *entry;
int i = 0;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_VECTOR_CTRL;
@@ -775,7 +738,7 @@ static int msix_capability_init(struct pci_dev *dev,
void __iomem *base;
/* Ensure MSI-X is disabled while it is set up */
- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* Request & Map MSI-X table region */
@@ -801,7 +764,7 @@ static int msix_capability_init(struct pci_dev *dev,
* MSI-X registers. We need to mask all the vectors to prevent
* interrupts coming in before they're fully set up.
*/
- msix_clear_and_set_ctrl(dev, 0,
+ pci_msix_clear_and_set_ctrl(dev, 0,
PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
msix_program_entries(dev, entries);
@@ -813,9 +776,9 @@ static int msix_capability_init(struct pci_dev *dev,
/* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
dev->msix_enabled = 1;
+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
-
+ pcibios_free_irq(dev);
return 0;
out_avail:
@@ -827,7 +790,7 @@ out_avail:
struct msi_desc *entry;
int avail = 0;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
if (entry->irq != 0)
avail++;
}
@@ -916,10 +879,10 @@ void pci_msi_shutdown(struct pci_dev *dev)
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
- BUG_ON(list_empty(&dev->msi_list));
- desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
+ BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
+ desc = first_pci_msi_entry(dev);
- msi_set_enable(dev, 0);
+ pci_msi_set_enable(dev, 0);
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;
@@ -930,6 +893,7 @@ void pci_msi_shutdown(struct pci_dev *dev)
/* Restore dev->irq to its default pin-assertion irq */
dev->irq = desc->msi_attrib.default_irq;
+ pcibios_alloc_irq(dev);
}
void pci_disable_msi(struct pci_dev *dev)
@@ -1022,14 +986,15 @@ void pci_msix_shutdown(struct pci_dev *dev)
return;
/* Return the device with MSI-X masked as initial states */
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
/* Keep cached states to be restored */
__pci_msix_desc_mask_irq(entry, 1);
}
- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_intx_for_msi(dev, 1);
dev->msix_enabled = 0;
+ pcibios_alloc_irq(dev);
}
void pci_disable_msix(struct pci_dev *dev)
@@ -1061,19 +1026,6 @@ EXPORT_SYMBOL(pci_msi_enabled);
void pci_msi_init_pci_dev(struct pci_dev *dev)
{
- INIT_LIST_HEAD(&dev->msi_list);
-
- /* Disable the msi hardware to avoid screaming interrupts
- * during boot. This is the power on reset default so
- * usually this should be a noop.
- */
- dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (dev->msi_cap)
- msi_set_enable(dev, 0);
-
- dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (dev->msix_cap)
- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
}
/**
@@ -1170,6 +1122,19 @@ int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
}
EXPORT_SYMBOL(pci_enable_msix_range);
+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
+{
+ return to_pci_dev(desc->dev);
+}
+
+void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
+{
+ struct pci_dev *dev = msi_desc_to_pci_dev(desc);
+
+ return dev->bus->sysdata;
+}
+EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata);
+
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
/**
* pci_msi_domain_write_msg - Helper to write MSI message to PCI config space
@@ -1178,7 +1143,7 @@ EXPORT_SYMBOL(pci_enable_msix_range);
*/
void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
{
- struct msi_desc *desc = irq_data->msi_desc;
+ struct msi_desc *desc = irq_data_get_msi_desc(irq_data);
/*
* For MSI-X desc->irq is always equal to irq_data->irq. For
@@ -1285,11 +1250,15 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
BUG_ON(!chip);
if (!chip->irq_write_msi_msg)
chip->irq_write_msi_msg = pci_msi_domain_write_msg;
+ if (!chip->irq_mask)
+ chip->irq_mask = pci_msi_mask_irq;
+ if (!chip->irq_unmask)
+ chip->irq_unmask = pci_msi_unmask_irq;
}
/**
- * pci_msi_create_irq_domain - Creat a MSI interrupt domain
- * @node: Optional device-tree node of the interrupt controller
+ * pci_msi_create_irq_domain - Create a MSI interrupt domain
+ * @fwnode: Optional fwnode of the interrupt controller
* @info: MSI domain info
* @parent: Parent irq domain
*
@@ -1298,16 +1267,23 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
* Returns:
* A domain pointer or NULL in case of failure.
*/
-struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
+struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent)
{
+ struct irq_domain *domain;
+
if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
pci_msi_domain_update_dom_ops(info);
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
pci_msi_domain_update_chip_ops(info);
- return msi_create_irq_domain(node, info, parent);
+ domain = msi_create_irq_domain(fwnode, info, parent);
+ if (!domain)
+ return NULL;
+
+ domain->bus_token = DOMAIN_BUS_PCI_MSI;
+ return domain;
}
/**
@@ -1338,14 +1314,14 @@ void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev)
/**
* pci_msi_create_default_irq_domain - Create a default MSI interrupt domain
- * @node: Optional device-tree node of the interrupt controller
+ * @fwnode: Optional fwnode of the interrupt controller
* @info: MSI domain info
* @parent: Parent irq domain
*
* Returns: A domain pointer or NULL in case of failure. If successful
* the default PCI/MSI irqdomain pointer is updated.
*/
-struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
+struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info, struct irq_domain *parent)
{
struct irq_domain *domain;
@@ -1355,11 +1331,59 @@ struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
pr_err("PCI: default irq domain for PCI MSI has already been created.\n");
domain = NULL;
} else {
- domain = pci_msi_create_irq_domain(node, info, parent);
+ domain = pci_msi_create_irq_domain(fwnode, info, parent);
pci_msi_default_domain = domain;
}
mutex_unlock(&pci_msi_domain_lock);
return domain;
}
+
+static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
+{
+ u32 *pa = data;
+
+ *pa = alias;
+ return 0;
+}
+/**
+ * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID)
+ * @domain: The interrupt domain
+ * @pdev: The PCI device.
+ *
+ * The RID for a device is formed from the alias, with a firmware
+ * supplied mapping applied
+ *
+ * Returns: The RID.
+ */
+u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
+{
+ struct device_node *of_node;
+ u32 rid = 0;
+
+ pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
+
+ of_node = irq_domain_get_of_node(domain);
+ if (of_node)
+ rid = of_msi_map_rid(&pdev->dev, of_node, rid);
+
+ return rid;
+}
+
+/**
+ * pci_msi_get_device_domain - Get the MSI domain for a given PCI device
+ * @pdev: The PCI device
+ *
+ * Use the firmware data to find a device-specific MSI domain
+ * (i.e. not one that is ste as a default).
+ *
+ * Returns: The coresponding MSI domain or NULL if none has been found.
+ */
+struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
+{
+ u32 rid = 0;
+
+ pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
+ return of_msi_map_get_device_domain(&pdev->dev, rid);
+}
#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
diff --git a/kernel/drivers/pci/of.c b/kernel/drivers/pci/of.c
index f0929934b..e112da116 100644
--- a/kernel/drivers/pci/of.c
+++ b/kernel/drivers/pci/of.c
@@ -9,9 +9,11 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include "pci.h"
@@ -59,3 +61,30 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
return of_node_get(bus->bridge->parent->of_node);
return NULL;
}
+
+struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
+{
+#ifdef CONFIG_IRQ_DOMAIN
+ struct irq_domain *d;
+
+ if (!bus->dev.of_node)
+ return NULL;
+
+ /* Start looking for a phandle to an MSI controller. */
+ d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
+ if (d)
+ return d;
+
+ /*
+ * If we don't have an msi-parent property, look for a domain
+ * directly attached to the host bridge.
+ */
+ d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
+ if (d)
+ return d;
+
+ return irq_find_host(bus->dev.of_node);
+#else
+ return NULL;
+#endif
+}
diff --git a/kernel/drivers/pci/pci-acpi.c b/kernel/drivers/pci/pci-acpi.c
index 6f6f175f5..a32ba753e 100644
--- a/kernel/drivers/pci/pci-acpi.c
+++ b/kernel/drivers/pci/pci-acpi.c
@@ -420,7 +420,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
[PCI_D0] = ACPI_STATE_D0,
[PCI_D1] = ACPI_STATE_D1,
[PCI_D2] = ACPI_STATE_D2,
- [PCI_D3hot] = ACPI_STATE_D3_COLD,
+ [PCI_D3hot] = ACPI_STATE_D3_HOT,
[PCI_D3cold] = ACPI_STATE_D3_COLD,
};
int error = -EINVAL;
@@ -594,7 +594,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev)
/**
* pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
* @pdev: the PCI device whose delay is to be updated
- * @adev: the companion ACPI device of this PCI device
+ * @handle: ACPI handle of this device
*
* Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM
* control method of either the device itself or the PCI host bridge.
diff --git a/kernel/drivers/pci/pci-driver.c b/kernel/drivers/pci/pci-driver.c
index 3cb2210de..d7ffd6681 100644
--- a/kernel/drivers/pci/pci-driver.c
+++ b/kernel/drivers/pci/pci-driver.c
@@ -172,7 +172,7 @@ static ssize_t store_remove_id(struct device_driver *driver, const char *buf,
__u32 vendor, device, subvendor = PCI_ANY_ID,
subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
int fields = 0;
- int retval = -ENODEV;
+ size_t retval = -ENODEV;
fields = sscanf(buf, "%x %x %x %x %x %x",
&vendor, &device, &subvendor, &subdevice,
@@ -190,15 +190,13 @@ static ssize_t store_remove_id(struct device_driver *driver, const char *buf,
!((id->class ^ class) & class_mask)) {
list_del(&dynid->node);
kfree(dynid);
- retval = 0;
+ retval = count;
break;
}
}
spin_unlock(&pdrv->dynids.lock);
- if (retval)
- return retval;
- return count;
+ return retval;
}
static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
@@ -299,9 +297,10 @@ static long local_pci_probe(void *_ddi)
* Unbound PCI devices are always put in D0, regardless of
* runtime PM status. During probe, the device is set to
* active and the usage count is incremented. If the driver
- * supports runtime PM, it should call pm_runtime_put_noidle()
- * in its probe routine and pm_runtime_get_noresume() in its
- * remove routine.
+ * supports runtime PM, it should call pm_runtime_put_noidle(),
+ * or any other runtime PM helper function decrementing the usage
+ * count, in its probe routine and pm_runtime_get_noresume() in
+ * its remove routine.
*/
pm_runtime_get_sync(dev);
pci_dev->driver = pci_drv;
@@ -388,18 +387,31 @@ static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
return error;
}
+int __weak pcibios_alloc_irq(struct pci_dev *dev)
+{
+ return 0;
+}
+
+void __weak pcibios_free_irq(struct pci_dev *dev)
+{
+}
+
static int pci_device_probe(struct device *dev)
{
- int error = 0;
- struct pci_driver *drv;
- struct pci_dev *pci_dev;
+ int error;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = to_pci_driver(dev->driver);
+
+ error = pcibios_alloc_irq(pci_dev);
+ if (error < 0)
+ return error;
- drv = to_pci_driver(dev->driver);
- pci_dev = to_pci_dev(dev);
pci_dev_get(pci_dev);
error = __pci_device_probe(drv, pci_dev);
- if (error)
+ if (error) {
+ pcibios_free_irq(pci_dev);
pci_dev_put(pci_dev);
+ }
return error;
}
@@ -415,6 +427,7 @@ static int pci_device_remove(struct device *dev)
drv->remove(pci_dev);
pm_runtime_put_noidle(dev);
}
+ pcibios_free_irq(pci_dev);
pci_dev->driver = NULL;
}
@@ -453,7 +466,7 @@ static void pci_device_shutdown(struct device *dev)
pci_msi_shutdown(pci_dev);
pci_msix_shutdown(pci_dev);
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
/*
* If this is a kexec reboot, turn off Bus Master bit on the
* device to tell it to not continue to do DMA. Don't touch
@@ -669,10 +682,16 @@ static int pci_pm_prepare(struct device *dev)
return pci_dev_keep_suspended(to_pci_dev(dev));
}
+static void pci_pm_complete(struct device *dev)
+{
+ pci_dev_complete_resume(to_pci_dev(dev));
+ pm_complete_with_resume_check(dev);
+}
#else /* !CONFIG_PM_SLEEP */
#define pci_pm_prepare NULL
+#define pci_pm_complete NULL
#endif /* !CONFIG_PM_SLEEP */
@@ -1127,9 +1146,21 @@ static int pci_pm_runtime_suspend(struct device *dev)
pci_dev->state_saved = false;
pci_dev->no_d3cold = false;
error = pm->runtime_suspend(dev);
- suspend_report_result(pm->runtime_suspend, error);
- if (error)
+ if (error) {
+ /*
+ * -EBUSY and -EAGAIN is used to request the runtime PM core
+ * to schedule a new suspend, so log the event only with debug
+ * log level.
+ */
+ if (error == -EBUSY || error == -EAGAIN)
+ dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
+ pm->runtime_suspend, error);
+ else
+ dev_err(dev, "can't suspend (%pf returned %d)\n",
+ pm->runtime_suspend, error);
+
return error;
+ }
if (!pci_dev->d3cold_allowed)
pci_dev->no_d3cold = true;
@@ -1203,6 +1234,7 @@ static int pci_pm_runtime_idle(struct device *dev)
static const struct dev_pm_ops pci_dev_pm_ops = {
.prepare = pci_pm_prepare,
+ .complete = pci_pm_complete,
.suspend = pci_pm_suspend,
.resume = pci_pm_resume,
.freeze = pci_pm_freeze,
diff --git a/kernel/drivers/pci/pci-sysfs.c b/kernel/drivers/pci/pci-sysfs.c
index 312f23a84..eead54cd0 100644
--- a/kernel/drivers/pci/pci-sysfs.c
+++ b/kernel/drivers/pci/pci-sysfs.c
@@ -216,7 +216,10 @@ static ssize_t numa_node_store(struct device *dev,
if (ret)
return ret;
- if (!node_online(node))
+ if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
+ return -EINVAL;
+
+ if (node != NUMA_NO_NODE && !node_online(node))
return -EINVAL;
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
diff --git a/kernel/drivers/pci/pci.c b/kernel/drivers/pci/pci.c
index c44393f26..42d861735 100644
--- a/kernel/drivers/pci/pci.c
+++ b/kernel/drivers/pci/pci.c
@@ -27,6 +27,7 @@
#include <linux/pci_hotplug.h>
#include <asm-generic/pci-bridge.h>
#include <asm/setup.h>
+#include <linux/aer.h>
#include "pci.h"
const char *pci_power_names[] = {
@@ -81,7 +82,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
-enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
/*
* The default CLS is used if arch didn't set CLS explicitly and not
@@ -138,9 +139,22 @@ void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
return ioremap_nocache(res->start, resource_size(res));
}
EXPORT_SYMBOL_GPL(pci_ioremap_bar);
+
+void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
+{
+ /*
+ * Make sure the BAR is actually a memory resource, not an IO resource
+ */
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
+ WARN_ON(1);
+ return NULL;
+ }
+ return ioremap_wc(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
+}
+EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
#endif
-#define PCI_FIND_CAP_TTL 48
static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
u8 pos, int cap, int *ttl)
@@ -196,8 +210,6 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus,
return PCI_CAPABILITY_LIST;
case PCI_HEADER_TYPE_CARDBUS:
return PCI_CB_CAPABILITY_LIST;
- default:
- return 0;
}
return 0;
@@ -447,6 +459,30 @@ struct resource *pci_find_parent_resource(const struct pci_dev *dev,
EXPORT_SYMBOL(pci_find_parent_resource);
/**
+ * pci_find_pcie_root_port - return PCIe Root Port
+ * @dev: PCI device to query
+ *
+ * Traverse up the parent chain and return the PCIe Root Port PCI Device
+ * for a given PCI Device.
+ */
+struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
+{
+ struct pci_dev *bridge, *highest_pcie_bridge = NULL;
+
+ bridge = pci_upstream_bridge(dev);
+ while (bridge && pci_is_pcie(bridge)) {
+ highest_pcie_bridge = bridge;
+ bridge = pci_upstream_bridge(bridge);
+ }
+
+ if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
+ return NULL;
+
+ return highest_pcie_bridge;
+}
+EXPORT_SYMBOL(pci_find_pcie_root_port);
+
+/**
* pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
* @dev: the PCI device to operate on
* @pos: config space offset of status word
@@ -473,7 +509,7 @@ int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
}
/**
- * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
+ * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
* @dev: PCI device to have its BARs restored
*
* Restore the BAR values for a given device, so as to make it
@@ -483,6 +519,10 @@ static void pci_restore_bars(struct pci_dev *dev)
{
int i;
+ /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
+ if (dev->is_virtfn)
+ return;
+
for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
pci_update_resource(dev, i);
}
@@ -972,7 +1012,7 @@ static int pci_save_pcix_state(struct pci_dev *dev)
struct pci_cap_saved_state *save_state;
pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
- if (pos <= 0)
+ if (!pos)
return 0;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
@@ -995,7 +1035,7 @@ static void pci_restore_pcix_state(struct pci_dev *dev)
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
- if (!save_state || pos <= 0)
+ if (!save_state || !pos)
return;
cap = (u16 *)&save_state->cap.data[0];
@@ -1088,10 +1128,15 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_ats_state(dev);
pci_restore_vc_state(dev);
+ pci_cleanup_aer_error_status_regs(dev);
+
pci_restore_config_space(dev);
pci_restore_pcix_state(dev);
pci_restore_msi_state(dev);
+
+ /* Restore ACS and IOV configuration state */
+ pci_enable_acs(dev);
pci_restore_iov_state(dev);
dev->state_saved = false;
@@ -1696,15 +1741,7 @@ static void pci_pme_list_scan(struct work_struct *work)
mutex_unlock(&pci_pme_list_mutex);
}
-/**
- * pci_pme_active - enable or disable PCI device's PME# function
- * @dev: PCI device to handle.
- * @enable: 'true' to enable PME# generation; 'false' to disable it.
- *
- * The caller must verify that the device is capable of generating PME# before
- * calling this function with @enable equal to 'true'.
- */
-void pci_pme_active(struct pci_dev *dev, bool enable)
+static void __pci_pme_active(struct pci_dev *dev, bool enable)
{
u16 pmcsr;
@@ -1718,6 +1755,19 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
+}
+
+/**
+ * pci_pme_active - enable or disable PCI device's PME# function
+ * @dev: PCI device to handle.
+ * @enable: 'true' to enable PME# generation; 'false' to disable it.
+ *
+ * The caller must verify that the device is capable of generating PME# before
+ * calling this function with @enable equal to 'true'.
+ */
+void pci_pme_active(struct pci_dev *dev, bool enable)
+{
+ __pci_pme_active(dev, enable);
/*
* PCI (as opposed to PCIe) PME requires that the device have
@@ -2018,17 +2068,60 @@ EXPORT_SYMBOL_GPL(pci_dev_run_wake);
* reconfigured due to wakeup settings difference between system and runtime
* suspend and the current power state of it is suitable for the upcoming
* (system) transition.
+ *
+ * If the device is not configured for system wakeup, disable PME for it before
+ * returning 'true' to prevent it from waking up the system unnecessarily.
*/
bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
{
struct device *dev = &pci_dev->dev;
if (!pm_runtime_suspended(dev)
- || (device_can_wakeup(dev) && !device_may_wakeup(dev))
+ || pci_target_state(pci_dev) != pci_dev->current_state
|| platform_pci_need_resume(pci_dev))
return false;
- return pci_target_state(pci_dev) == pci_dev->current_state;
+ /*
+ * At this point the device is good to go unless it's been configured
+ * to generate PME at the runtime suspend time, but it is not supposed
+ * to wake up the system. In that case, simply disable PME for it
+ * (it will have to be re-enabled on exit from system resume).
+ *
+ * If the device's power state is D3cold and the platform check above
+ * hasn't triggered, the device's configuration is suitable and we don't
+ * need to manipulate it at all.
+ */
+ spin_lock_irq(&dev->power.lock);
+
+ if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
+ !device_may_wakeup(dev))
+ __pci_pme_active(pci_dev, false);
+
+ spin_unlock_irq(&dev->power.lock);
+ return true;
+}
+
+/**
+ * pci_dev_complete_resume - Finalize resume from system sleep for a device.
+ * @pci_dev: Device to handle.
+ *
+ * If the device is runtime suspended and wakeup-capable, enable PME for it as
+ * it might have been disabled during the prepare phase of system suspend if
+ * the device was not configured for system wakeup.
+ */
+void pci_dev_complete_resume(struct pci_dev *pci_dev)
+{
+ struct device *dev = &pci_dev->dev;
+
+ if (!pci_dev_run_wake(pci_dev))
+ return;
+
+ spin_lock_irq(&dev->power.lock);
+
+ if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
+ __pci_pme_active(pci_dev, true);
+
+ spin_unlock_irq(&dev->power.lock);
}
void pci_config_pm_runtime_get(struct pci_dev *pdev)
@@ -2134,6 +2227,198 @@ void pci_pm_init(struct pci_dev *dev)
}
}
+static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
+{
+ unsigned long flags = IORESOURCE_PCI_FIXED;
+
+ switch (prop) {
+ case PCI_EA_P_MEM:
+ case PCI_EA_P_VF_MEM:
+ flags |= IORESOURCE_MEM;
+ break;
+ case PCI_EA_P_MEM_PREFETCH:
+ case PCI_EA_P_VF_MEM_PREFETCH:
+ flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ break;
+ case PCI_EA_P_IO:
+ flags |= IORESOURCE_IO;
+ break;
+ default:
+ return 0;
+ }
+
+ return flags;
+}
+
+static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
+ u8 prop)
+{
+ if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
+ return &dev->resource[bei];
+#ifdef CONFIG_PCI_IOV
+ else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
+ (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
+ return &dev->resource[PCI_IOV_RESOURCES +
+ bei - PCI_EA_BEI_VF_BAR0];
+#endif
+ else if (bei == PCI_EA_BEI_ROM)
+ return &dev->resource[PCI_ROM_RESOURCE];
+ else
+ return NULL;
+}
+
+/* Read an Enhanced Allocation (EA) entry */
+static int pci_ea_read(struct pci_dev *dev, int offset)
+{
+ struct resource *res;
+ int ent_size, ent_offset = offset;
+ resource_size_t start, end;
+ unsigned long flags;
+ u32 dw0, bei, base, max_offset;
+ u8 prop;
+ bool support_64 = (sizeof(resource_size_t) >= 8);
+
+ pci_read_config_dword(dev, ent_offset, &dw0);
+ ent_offset += 4;
+
+ /* Entry size field indicates DWORDs after 1st */
+ ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
+
+ if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
+ goto out;
+
+ bei = (dw0 & PCI_EA_BEI) >> 4;
+ prop = (dw0 & PCI_EA_PP) >> 8;
+
+ /*
+ * If the Property is in the reserved range, try the Secondary
+ * Property instead.
+ */
+ if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
+ prop = (dw0 & PCI_EA_SP) >> 16;
+ if (prop > PCI_EA_P_BRIDGE_IO)
+ goto out;
+
+ res = pci_ea_get_resource(dev, bei, prop);
+ if (!res) {
+ dev_err(&dev->dev, "Unsupported EA entry BEI: %u\n", bei);
+ goto out;
+ }
+
+ flags = pci_ea_flags(dev, prop);
+ if (!flags) {
+ dev_err(&dev->dev, "Unsupported EA properties: %#x\n", prop);
+ goto out;
+ }
+
+ /* Read Base */
+ pci_read_config_dword(dev, ent_offset, &base);
+ start = (base & PCI_EA_FIELD_MASK);
+ ent_offset += 4;
+
+ /* Read MaxOffset */
+ pci_read_config_dword(dev, ent_offset, &max_offset);
+ ent_offset += 4;
+
+ /* Read Base MSBs (if 64-bit entry) */
+ if (base & PCI_EA_IS_64) {
+ u32 base_upper;
+
+ pci_read_config_dword(dev, ent_offset, &base_upper);
+ ent_offset += 4;
+
+ flags |= IORESOURCE_MEM_64;
+
+ /* entry starts above 32-bit boundary, can't use */
+ if (!support_64 && base_upper)
+ goto out;
+
+ if (support_64)
+ start |= ((u64)base_upper << 32);
+ }
+
+ end = start + (max_offset | 0x03);
+
+ /* Read MaxOffset MSBs (if 64-bit entry) */
+ if (max_offset & PCI_EA_IS_64) {
+ u32 max_offset_upper;
+
+ pci_read_config_dword(dev, ent_offset, &max_offset_upper);
+ ent_offset += 4;
+
+ flags |= IORESOURCE_MEM_64;
+
+ /* entry too big, can't use */
+ if (!support_64 && max_offset_upper)
+ goto out;
+
+ if (support_64)
+ end += ((u64)max_offset_upper << 32);
+ }
+
+ if (end < start) {
+ dev_err(&dev->dev, "EA Entry crosses address boundary\n");
+ goto out;
+ }
+
+ if (ent_size != ent_offset - offset) {
+ dev_err(&dev->dev,
+ "EA Entry Size (%d) does not match length read (%d)\n",
+ ent_size, ent_offset - offset);
+ goto out;
+ }
+
+ res->name = pci_name(dev);
+ res->start = start;
+ res->end = end;
+ res->flags = flags;
+
+ if (bei <= PCI_EA_BEI_BAR5)
+ dev_printk(KERN_DEBUG, &dev->dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+ bei, res, prop);
+ else if (bei == PCI_EA_BEI_ROM)
+ dev_printk(KERN_DEBUG, &dev->dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
+ res, prop);
+ else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
+ dev_printk(KERN_DEBUG, &dev->dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+ bei - PCI_EA_BEI_VF_BAR0, res, prop);
+ else
+ dev_printk(KERN_DEBUG, &dev->dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
+ bei, res, prop);
+
+out:
+ return offset + ent_size;
+}
+
+/* Enhanced Allocation Initalization */
+void pci_ea_init(struct pci_dev *dev)
+{
+ int ea;
+ u8 num_ent;
+ int offset;
+ int i;
+
+ /* find PCI EA capability in list */
+ ea = pci_find_capability(dev, PCI_CAP_ID_EA);
+ if (!ea)
+ return;
+
+ /* determine the number of entries */
+ pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
+ &num_ent);
+ num_ent &= PCI_EA_NUM_ENT_MASK;
+
+ offset = ea + PCI_EA_FIRST_ENT;
+
+ /* Skip DWORD 2 for type 1 functions */
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ offset += 4;
+
+ /* parse each EA entry */
+ for (i = 0; i < num_ent; ++i)
+ offset = pci_ea_read(dev, offset);
+}
+
static void pci_add_saved_cap(struct pci_dev *pci_dev,
struct pci_cap_saved_state *new_cap)
{
@@ -2159,7 +2444,7 @@ static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
else
pos = pci_find_capability(dev, cap);
- if (pos <= 0)
+ if (!pos)
return 0;
save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
@@ -3101,39 +3386,6 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
-/**
- * pci_msi_off - disables any MSI or MSI-X capabilities
- * @dev: the PCI device to operate on
- *
- * If you want to use MSI, see pci_enable_msi() and friends.
- * This is a lower-level primitive that allows us to disable
- * MSI operation at the device level.
- */
-void pci_msi_off(struct pci_dev *dev)
-{
- int pos;
- u16 control;
-
- /*
- * This looks like it could go in msi.c, but we need it even when
- * CONFIG_PCI_MSI=n. For the same reason, we can't use
- * dev->msi_cap or dev->msix_cap here.
- */
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (pos) {
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
- control &= ~PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
- }
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
- control &= ~PCI_MSIX_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
- }
-}
-EXPORT_SYMBOL_GPL(pci_msi_off);
-
int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
{
return dma_set_max_seg_size(&dev->dev, size);
@@ -4520,8 +4772,10 @@ int pci_get_new_domain_nr(void)
void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
{
static int use_dt_domains = -1;
- int domain = of_get_pci_domain_nr(parent->of_node);
+ int domain = -1;
+ if (parent)
+ domain = of_get_pci_domain_nr(parent->of_node);
/*
* Check DT domain and use_dt_domains values.
*
diff --git a/kernel/drivers/pci/pci.h b/kernel/drivers/pci/pci.h
index 9bd762c23..d390fc147 100644
--- a/kernel/drivers/pci/pci.h
+++ b/kernel/drivers/pci/pci.h
@@ -4,6 +4,8 @@
#define PCI_CFG_SPACE_SIZE 256
#define PCI_CFG_SPACE_EXP_SIZE 4096
+#define PCI_FIND_CAP_TTL 48
+
extern const unsigned char pcie_link_speed[];
bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
@@ -73,9 +75,11 @@ void pci_disable_enabled_device(struct pci_dev *dev);
int pci_finish_runtime_suspend(struct pci_dev *dev);
int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
bool pci_dev_keep_suspended(struct pci_dev *dev);
+void pci_dev_complete_resume(struct pci_dev *pci_dev);
void pci_config_pm_runtime_get(struct pci_dev *dev);
void pci_config_pm_runtime_put(struct pci_dev *dev);
void pci_pm_init(struct pci_dev *dev);
+void pci_ea_init(struct pci_dev *dev);
void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
@@ -146,6 +150,27 @@ static inline void pci_no_msi(void) { }
static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
#endif
+static inline void pci_msi_set_enable(struct pci_dev *dev, int enable)
+{
+ u16 control;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+ if (enable)
+ control |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
+}
+
+static inline void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
+{
+ u16 ctrl;
+
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ ctrl &= ~clear;
+ ctrl |= set;
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl);
+}
+
void pci_realloc_get_opt(char *);
static inline int pci_no_d1d2(struct pci_dev *dev)
@@ -216,17 +241,6 @@ void __pci_bus_assign_resources(const struct pci_bus *bus,
struct list_head *fail_head);
bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
-/**
- * pci_ari_enabled - query ARI forwarding status
- * @bus: the PCI bus
- *
- * Returns 1 if ARI forwarding is enabled, or 0 if not enabled;
- */
-static inline int pci_ari_enabled(struct pci_bus *bus)
-{
- return bus->self && bus->self->ari_enabled;
-}
-
void pci_reassigndev_resource_alignment(struct pci_dev *dev);
void pci_disable_bridge_window(struct pci_dev *dev);
@@ -323,6 +337,4 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
}
#endif
-struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
-
#endif /* DRIVERS_PCI_H */
diff --git a/kernel/drivers/pci/pcie/aer/aerdrv.c b/kernel/drivers/pci/pcie/aer/aerdrv.c
index 0bf82a20a..48d21e0ed 100644
--- a/kernel/drivers/pci/pcie/aer/aerdrv.c
+++ b/kernel/drivers/pci/pcie/aer/aerdrv.c
@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
rpc->rpd = dev;
INIT_WORK(&rpc->dpc_handler, aer_isr);
mutex_init(&rpc->rpc_mutex);
- init_waitqueue_head(&rpc->wait_release);
/* Use PCIe bus function to store rpc into PCIe device */
set_service_data(dev, rpc);
@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
if (rpc->isr)
free_irq(dev->irq, dev);
- wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
-
+ flush_work(&rpc->dpc_handler);
aer_disable_rootport(rpc);
kfree(rpc);
set_service_data(dev, NULL);
diff --git a/kernel/drivers/pci/pcie/aer/aerdrv.h b/kernel/drivers/pci/pcie/aer/aerdrv.h
index 84420b7c9..945c939a8 100644
--- a/kernel/drivers/pci/pcie/aer/aerdrv.h
+++ b/kernel/drivers/pci/pcie/aer/aerdrv.h
@@ -72,7 +72,6 @@ struct aer_rpc {
* recovery on the same
* root port hierarchy
*/
- wait_queue_head_t wait_release;
};
struct aer_broadcast_data {
diff --git a/kernel/drivers/pci/pcie/aer/aerdrv_core.c b/kernel/drivers/pci/pcie/aer/aerdrv_core.c
index 5653ea945..4e14de0f0 100644
--- a/kernel/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/kernel/drivers/pci/pcie/aer/aerdrv_core.c
@@ -74,6 +74,34 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
+int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
+{
+ int pos;
+ u32 status;
+ int port_type;
+
+ if (!pci_is_pcie(dev))
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -EIO;
+
+ port_type = pci_pcie_type(dev);
+ if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
+ }
+
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
+
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+
+ return 0;
+}
+
/**
* add_error_device - list device to be handled
* @e_info: pointer to error info
@@ -425,8 +453,7 @@ static pci_ers_result_t reset_link(struct pci_dev *dev)
if (driver && driver->reset_link) {
status = driver->reset_link(udev);
- } else if (pci_pcie_type(udev) == PCI_EXP_TYPE_DOWNSTREAM ||
- pci_pcie_type(udev) == PCI_EXP_TYPE_ROOT_PORT) {
+ } else if (udev->has_secondary_link) {
status = default_reset_link(udev);
} else {
dev_printk(KERN_DEBUG, &dev->dev,
@@ -784,8 +811,6 @@ void aer_isr(struct work_struct *work)
while (get_e_source(rpc, &e_src))
aer_isr_one_error(p_device, &e_src);
mutex_unlock(&rpc->rpc_mutex);
-
- wake_up(&rpc->wait_release);
}
/**
diff --git a/kernel/drivers/pci/pcie/aspm.c b/kernel/drivers/pci/pcie/aspm.c
index 7d4fcdc51..317e3558a 100644
--- a/kernel/drivers/pci/pcie/aspm.c
+++ b/kernel/drivers/pci/pcie/aspm.c
@@ -127,15 +127,12 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
{
struct pci_dev *child;
struct pci_bus *linkbus = link->pdev->subordinate;
+ u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- if (enable)
- pcie_capability_set_word(child, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_CLKREQ_EN);
- else
- pcie_capability_clear_word(child, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_CLKREQ_EN);
- }
+ list_for_each_entry(child, &linkbus->devices, bus_list)
+ pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CLKREQ_EN,
+ val);
link->clkpm_enabled = !!enable;
}
@@ -525,7 +522,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
INIT_LIST_HEAD(&link->children);
INIT_LIST_HEAD(&link->link);
link->pdev = pdev;
- if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM) {
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
struct pcie_link_state *parent;
parent = pdev->bus->parent->self->link_state;
if (!parent) {
@@ -559,10 +556,15 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
if (!aspm_support_enabled)
return;
- if (!pci_is_pcie(pdev) || pdev->link_state)
+ if (pdev->link_state)
return;
- if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
- pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
+
+ /*
+ * We allocate pcie_link_state for the component on the upstream
+ * end of a Link, so there's nothing to do unless this device has a
+ * Link on its secondary side.
+ */
+ if (!pdev->has_secondary_link)
return;
/* VIA has a strange chipset, root port is under a bridge */
@@ -675,10 +677,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
{
struct pcie_link_state *link = pdev->link_state;
- if (aspm_disabled || !pci_is_pcie(pdev) || !link)
- return;
- if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) &&
- (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
+ if (aspm_disabled || !link)
return;
/*
* Devices changed PM state, we should recheck if latency
@@ -696,16 +695,12 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
{
struct pcie_link_state *link = pdev->link_state;
- if (aspm_disabled || !pci_is_pcie(pdev) || !link)
+ if (aspm_disabled || !link)
return;
if (aspm_policy != POLICY_POWERSAVE)
return;
- if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) &&
- (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
- return;
-
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
pcie_config_aspm_path(link);
@@ -714,8 +709,7 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
up_read(&pci_bus_sem);
}
-static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
- bool force)
+static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
{
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link;
@@ -723,8 +717,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
if (!pci_is_pcie(pdev))
return;
- if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
- pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM)
+ if (pdev->has_secondary_link)
parent = pdev;
if (!parent || !parent->link_state)
return;
@@ -737,7 +730,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
* a similar mechanism using "PciASPMOptOut", which is also
* ignored in this situation.
*/
- if (aspm_disabled && !force) {
+ if (aspm_disabled) {
dev_warn(&pdev->dev, "can't disable ASPM; OS doesn't have ASPM control\n");
return;
}
@@ -763,7 +756,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
void pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{
- __pci_disable_link_state(pdev, state, false, false);
+ __pci_disable_link_state(pdev, state, false);
}
EXPORT_SYMBOL(pci_disable_link_state_locked);
@@ -778,7 +771,7 @@ EXPORT_SYMBOL(pci_disable_link_state_locked);
*/
void pci_disable_link_state(struct pci_dev *pdev, int state)
{
- __pci_disable_link_state(pdev, state, true, false);
+ __pci_disable_link_state(pdev, state, true);
}
EXPORT_SYMBOL(pci_disable_link_state);
@@ -907,9 +900,7 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
{
struct pcie_link_state *link_state = pdev->link_state;
- if (!pci_is_pcie(pdev) ||
- (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
- pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
+ if (!link_state)
return;
if (link_state->aspm_support)
@@ -924,9 +915,7 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
{
struct pcie_link_state *link_state = pdev->link_state;
- if (!pci_is_pcie(pdev) ||
- (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
- pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
+ if (!link_state)
return;
if (link_state->aspm_support)
diff --git a/kernel/drivers/pci/pcie/portdrv_core.c b/kernel/drivers/pci/pcie/portdrv_core.c
index 2f0ce668a..88122dc2e 100644
--- a/kernel/drivers/pci/pcie/portdrv_core.c
+++ b/kernel/drivers/pci/pcie/portdrv_core.c
@@ -448,7 +448,7 @@ static int resume_iter(struct device *dev, void *data)
}
/**
- * pcie_port_device_suspend - resume port services associated with a PCIe port
+ * pcie_port_device_resume - resume port services associated with a PCIe port
* @dev: PCI Express port to handle
*/
int pcie_port_device_resume(struct device *dev)
diff --git a/kernel/drivers/pci/probe.c b/kernel/drivers/pci/probe.c
index c91185721..edb198420 100644
--- a/kernel/drivers/pci/probe.c
+++ b/kernel/drivers/pci/probe.c
@@ -6,12 +6,15 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/of_device.h>
#include <linux/of_pci.h>
#include <linux/pci_hotplug.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/pci-aspm.h>
+#include <linux/aer.h>
+#include <linux/acpi.h>
#include <asm-generic/pci-bridge.h>
#include "pci.h"
@@ -326,8 +329,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
dev->rom_base_reg = rom;
res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
- IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
- IORESOURCE_SIZEALIGN;
+ IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
__pci_read_base(dev, pci_bar_mem32, res, rom);
}
}
@@ -661,6 +663,40 @@ static void pci_set_bus_speed(struct pci_bus *bus)
}
}
+static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
+{
+ struct irq_domain *d;
+
+ /*
+ * Any firmware interface that can resolve the msi_domain
+ * should be called from here.
+ */
+ d = pci_host_bridge_of_msi_domain(bus);
+
+ return d;
+}
+
+static void pci_set_bus_msi_domain(struct pci_bus *bus)
+{
+ struct irq_domain *d;
+ struct pci_bus *b;
+
+ /*
+ * The bus can be a root bus, a subordinate bus, or a virtual bus
+ * created by an SR-IOV device. Walk up to the first bridge device
+ * found or derive the domain from the host bridge.
+ */
+ for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
+ if (b->self)
+ d = dev_get_msi_domain(&b->self->dev);
+ }
+
+ if (!d)
+ d = pci_host_bridge_msi_domain(b);
+
+ dev_set_msi_domain(&bus->dev, d);
+}
+
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
{
@@ -714,6 +750,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
bridge->subordinate = child;
add_dev:
+ pci_set_bus_msi_domain(child);
ret = device_register(&child->dev);
WARN_ON(ret < 0);
@@ -973,6 +1010,8 @@ void set_pcie_port_type(struct pci_dev *pdev)
{
int pos;
u16 reg16;
+ int type;
+ struct pci_dev *parent;
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (!pos)
@@ -982,6 +1021,27 @@ void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_flags_reg = reg16;
pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+
+ /*
+ * A Root Port is always the upstream end of a Link. No PCIe
+ * component has two Links. Two Links are connected by a Switch
+ * that has a Port on each Link and internal logic to connect the
+ * two Ports.
+ */
+ type = pci_pcie_type(pdev);
+ if (type == PCI_EXP_TYPE_ROOT_PORT)
+ pdev->has_secondary_link = 1;
+ else if (type == PCI_EXP_TYPE_UPSTREAM ||
+ type == PCI_EXP_TYPE_DOWNSTREAM) {
+ parent = pci_upstream_bridge(pdev);
+
+ /*
+ * Usually there's an upstream device (Root Port or Switch
+ * Downstream Port), but we can't assume one exists.
+ */
+ if (parent && !parent->has_secondary_link)
+ pdev->has_secondary_link = 1;
+ }
}
void set_pcie_hotplug_bridge(struct pci_dev *pdev)
@@ -1085,6 +1145,22 @@ int pci_cfg_space_size(struct pci_dev *dev)
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
+void pci_msi_setup_pci_dev(struct pci_dev *dev)
+{
+ /*
+ * Disable the MSI hardware to avoid screaming interrupts
+ * during boot. This is the power on reset default so
+ * usually this should be a noop.
+ */
+ dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (dev->msi_cap)
+ pci_msi_set_enable(dev, 0);
+
+ dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (dev->msix_cap)
+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+}
+
/**
* pci_setup_device - fill in class and map information of a device
* @dev: the device structure to fill
@@ -1099,7 +1175,6 @@ int pci_setup_device(struct pci_dev *dev)
{
u32 class;
u8 hdr_type;
- struct pci_slot *slot;
int pos = 0;
struct pci_bus_region region;
struct resource *res;
@@ -1115,10 +1190,7 @@ int pci_setup_device(struct pci_dev *dev)
dev->error_state = pci_channel_io_normal;
set_pcie_port_type(dev);
- list_for_each_entry(slot, &dev->bus->slots, list)
- if (PCI_SLOT(dev->devfn) == slot->number)
- dev->slot = slot;
-
+ pci_dev_assign_slot(dev);
/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
set this higher, assuming the system even supports it. */
dev->dma_mask = 0xffffffff;
@@ -1140,6 +1212,8 @@ int pci_setup_device(struct pci_dev *dev)
/* "Unknown power state" */
dev->current_state = PCI_UNKNOWN;
+ pci_msi_setup_pci_dev(dev);
+
/* Early fixups, before probing the BARs */
pci_fixup_device(pci_fixup_early, dev);
/* device class may be changed after fixup */
@@ -1232,13 +1306,51 @@ int pci_setup_device(struct pci_dev *dev)
bad:
dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
dev->class, dev->hdr_type);
- dev->class = PCI_CLASS_NOT_DEFINED;
+ dev->class = PCI_CLASS_NOT_DEFINED << 8;
}
/* We found a fine healthy device, go go go... */
return 0;
}
+static void pci_configure_mps(struct pci_dev *dev)
+{
+ struct pci_dev *bridge = pci_upstream_bridge(dev);
+ int mps, p_mps, rc;
+
+ if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
+ return;
+
+ mps = pcie_get_mps(dev);
+ p_mps = pcie_get_mps(bridge);
+
+ if (mps == p_mps)
+ return;
+
+ if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
+ dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+ mps, pci_name(bridge), p_mps);
+ return;
+ }
+
+ /*
+ * Fancier MPS configuration is done later by
+ * pcie_bus_configure_settings()
+ */
+ if (pcie_bus_config != PCIE_BUS_DEFAULT)
+ return;
+
+ rc = pcie_set_mps(dev, p_mps);
+ if (rc) {
+ dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+ p_mps);
+ return;
+ }
+
+ dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
+ p_mps, mps, 128 << dev->pcie_mpss);
+}
+
static struct hpp_type0 pci_default_type0 = {
.revision = 1,
.cache_line_size = 8,
@@ -1360,6 +1472,8 @@ static void pci_configure_device(struct pci_dev *dev)
struct hotplug_params hpp;
int ret;
+ pci_configure_mps(dev);
+
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
if (ret)
@@ -1486,6 +1600,9 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
static void pci_init_capabilities(struct pci_dev *dev)
{
+ /* Enhanced Allocation */
+ pci_ea_init(dev);
+
/* MSI/MSI-X list */
pci_msi_init_pci_dev(dev);
@@ -1504,8 +1621,85 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Single Root I/O Virtualization */
pci_iov_init(dev);
+ /* Address Translation Services */
+ pci_ats_init(dev);
+
/* Enable ACS P2P upstream forwarding */
pci_enable_acs(dev);
+
+ pci_cleanup_aer_error_status_regs(dev);
+}
+
+/*
+ * This is the equivalent of pci_host_bridge_msi_domain that acts on
+ * devices. Firmware interfaces that can select the MSI domain on a
+ * per-device basis should be called from here.
+ */
+static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
+{
+ struct irq_domain *d;
+
+ /*
+ * If a domain has been set through the pcibios_add_device
+ * callback, then this is the one (platform code knows best).
+ */
+ d = dev_get_msi_domain(&dev->dev);
+ if (d)
+ return d;
+
+ /*
+ * Let's see if we have a firmware interface able to provide
+ * the domain.
+ */
+ d = pci_msi_get_device_domain(dev);
+ if (d)
+ return d;
+
+ return NULL;
+}
+
+static void pci_set_msi_domain(struct pci_dev *dev)
+{
+ struct irq_domain *d;
+
+ /*
+ * If the platform or firmware interfaces cannot supply a
+ * device-specific MSI domain, then inherit the default domain
+ * from the host bridge itself.
+ */
+ d = pci_dev_msi_domain(dev);
+ if (!d)
+ d = dev_get_msi_domain(&dev->bus->dev);
+
+ dev_set_msi_domain(&dev->dev, d);
+}
+
+/**
+ * pci_dma_configure - Setup DMA configuration
+ * @dev: ptr to pci_dev struct of the PCI device
+ *
+ * Function to update PCI devices's DMA configuration using the same
+ * info from the OF node or ACPI node of host bridge's parent (if any).
+ */
+static void pci_dma_configure(struct pci_dev *dev)
+{
+ struct device *bridge = pci_get_host_bridge_device(dev);
+
+ if (IS_ENABLED(CONFIG_OF) &&
+ bridge->parent && bridge->parent->of_node) {
+ of_dma_configure(&dev->dev, bridge->parent->of_node);
+ } else if (has_acpi_companion(bridge)) {
+ struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
+ enum dev_dma_attr attr = acpi_get_dma_attr(adev);
+
+ if (attr == DEV_DMA_NOT_SUPPORTED)
+ dev_warn(&dev->dev, "DMA not supported.\n");
+ else
+ arch_setup_dma_ops(&dev->dev, 0, 0, NULL,
+ attr == DEV_DMA_COHERENT);
+ }
+
+ pci_put_host_bridge_device(bridge);
}
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
@@ -1521,7 +1715,7 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.dma_parms = &dev->dma_parms;
dev->dev.coherent_dma_mask = 0xffffffffull;
- of_pci_dma_configure(dev);
+ pci_dma_configure(dev);
pci_set_dma_max_seg_size(dev, 65536);
pci_set_dma_seg_boundary(dev, 0xffffffff);
@@ -1549,6 +1743,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
ret = pcibios_add_device(dev);
WARN_ON(ret < 0);
+ /* Setup MSI irq domain */
+ pci_set_msi_domain(dev);
+
/* Notifier could use PCI capabilities */
dev->match_driver = false;
ret = device_add(&dev->dev);
@@ -1611,7 +1808,7 @@ static int only_one_child(struct pci_bus *bus)
return 0;
if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
return 1;
- if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
+ if (parent->has_secondary_link &&
!pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
return 1;
return 0;
@@ -1755,22 +1952,6 @@ static void pcie_write_mrrs(struct pci_dev *dev)
dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
}
-static void pcie_bus_detect_mps(struct pci_dev *dev)
-{
- struct pci_dev *bridge = dev->bus->self;
- int mps, p_mps;
-
- if (!bridge)
- return;
-
- mps = pcie_get_mps(dev);
- p_mps = pcie_get_mps(bridge);
-
- if (mps != p_mps)
- dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
- mps, pci_name(bridge), p_mps);
-}
-
static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
{
int mps, orig_mps;
@@ -1778,10 +1959,9 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
if (!pci_is_pcie(dev))
return 0;
- if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
- pcie_bus_detect_mps(dev);
+ if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
+ pcie_bus_config == PCIE_BUS_DEFAULT)
return 0;
- }
mps = 128 << *(u8 *)data;
orig_mps = pcie_get_mps(dev);
@@ -1939,6 +2119,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
b->bridge = get_device(&bridge->dev);
device_enable_async_suspend(b->bridge);
pci_set_bus_of_node(b);
+ pci_set_bus_msi_domain(b);
if (!parent)
set_dev_node(b->bridge, pcibus_to_node(b));
@@ -2060,8 +2241,9 @@ void pci_bus_release_busn_res(struct pci_bus *b)
res, ret ? "can not be" : "is");
}
-struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
- struct pci_ops *ops, void *sysdata, struct list_head *resources)
+struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata,
+ struct list_head *resources, struct msi_controller *msi)
{
struct resource_entry *window;
bool found = false;
@@ -2078,6 +2260,8 @@ struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
if (!b)
return NULL;
+ b->msi = msi;
+
if (!found) {
dev_info(&b->dev,
"No busn resource found for root bus, will use [bus %02x-ff]\n",
@@ -2092,26 +2276,14 @@ struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
return b;
}
-EXPORT_SYMBOL(pci_scan_root_bus);
-/* Deprecated; use pci_scan_root_bus() instead */
-struct pci_bus *pci_scan_bus_parented(struct device *parent,
- int bus, struct pci_ops *ops, void *sysdata)
+struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
{
- LIST_HEAD(resources);
- struct pci_bus *b;
-
- pci_add_resource(&resources, &ioport_resource);
- pci_add_resource(&resources, &iomem_resource);
- pci_add_resource(&resources, &busn_resource);
- b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
- if (b)
- pci_scan_child_bus(b);
- else
- pci_free_resource_list(&resources);
- return b;
+ return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources,
+ NULL);
}
-EXPORT_SYMBOL(pci_scan_bus_parented);
+EXPORT_SYMBOL(pci_scan_root_bus);
struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
void *sysdata)
diff --git a/kernel/drivers/pci/quirks.c b/kernel/drivers/pci/quirks.c
index 804cd3b02..7e327309c 100644
--- a/kernel/drivers/pci/quirks.c
+++ b/kernel/drivers/pci/quirks.c
@@ -163,7 +163,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_
* VIA Apollo KT133 needs PCI latency patch
* Made according to a windows driver based patch by George E. Breese
* see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
- * and http://www.georgebreese.com/net/software/#PCI
* Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
* the info on which Mr Breese based his work.
*
@@ -424,10 +423,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
*/
static void quirk_amd_nl_class(struct pci_dev *pdev)
{
- /*
- * Use 'USB Device' (0x0c03fe) instead of PCI header provided
- */
- pdev->class = 0x0c03fe;
+ u32 class = pdev->class;
+
+ /* Use "USB Device (not host controller)" class */
+ pdev->class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe;
+ dev_info(&pdev->dev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
+ class, pdev->class);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
quirk_amd_nl_class);
@@ -819,13 +820,6 @@ static void quirk_amd_ioapic(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
-
-static void quirk_ioapic_rmw(struct pci_dev *dev)
-{
- if (dev->devfn == 0 && dev->bus->number == 0)
- sis_apic_bug = 1;
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw);
#endif /* CONFIG_X86_IO_APIC */
/*
@@ -1612,7 +1606,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_a
static void quirk_pcie_mch(struct pci_dev *pdev)
{
- pci_msi_off(pdev);
pdev->no_msi = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
@@ -1626,7 +1619,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quir
*/
static void quirk_pcie_pxh(struct pci_dev *dev)
{
- pci_msi_off(dev);
dev->no_msi = 1;
dev_warn(&dev->dev, "PXH quirk detected; SHPC device MSI disabled\n");
}
@@ -1915,11 +1907,27 @@ static void quirk_netmos(struct pci_dev *dev)
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
+/*
+ * Quirk non-zero PCI functions to route VPD access through function 0 for
+ * devices that share VPD resources between functions. The functions are
+ * expected to be identical devices.
+ */
static void quirk_f0_vpd_link(struct pci_dev *dev)
{
- if (!dev->multifunction || !PCI_FUNC(dev->devfn))
+ struct pci_dev *f0;
+
+ if (!PCI_FUNC(dev->devfn))
+ return;
+
+ f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ if (!f0)
return;
- dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
+
+ if (f0->vpd && dev->class == f0->class &&
+ dev->vendor == f0->vendor && dev->device == f0->device)
+ dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
+
+ pci_dev_put(f0);
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
@@ -2016,14 +2024,18 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
static void fixup_rev1_53c810(struct pci_dev *dev)
{
- /* rev 1 ncr53c810 chips don't set the class at all which means
+ u32 class = dev->class;
+
+ /*
+ * rev 1 ncr53c810 chips don't set the class at all which means
* they don't get their resources remapped. Fix that here.
*/
+ if (class)
+ return;
- if (dev->class == PCI_CLASS_NOT_DEFINED) {
- dev_info(&dev->dev, "NCR 53c810 rev 1 detected; setting PCI class\n");
- dev->class = PCI_CLASS_STORAGE_SCSI;
- }
+ dev->class = PCI_CLASS_STORAGE_SCSI << 8;
+ dev_info(&dev->dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n",
+ class, dev->class);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
@@ -2234,6 +2246,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disab
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
/* Disable MSI on chipsets that are known to not support it */
static void quirk_disable_msi(struct pci_dev *dev)
@@ -2271,7 +2284,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
* return 1 if a HT MSI capability is found and enabled */
static int msi_ht_cap_enabled(struct pci_dev *dev)
{
- int pos, ttl = 48;
+ int pos, ttl = PCI_FIND_CAP_TTL;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
@@ -2330,7 +2343,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
/* Force enable MSI mapping capability on HT bridges */
static void ht_enable_msi_mapping(struct pci_dev *dev)
{
- int pos, ttl = 48;
+ int pos, ttl = PCI_FIND_CAP_TTL;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
@@ -2409,7 +2422,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
static int ht_check_msi_mapping(struct pci_dev *dev)
{
- int pos, ttl = 48;
+ int pos, ttl = PCI_FIND_CAP_TTL;
int found = 0;
/* check if there is HT MSI cap or enabled on this device */
@@ -2534,7 +2547,7 @@ out:
static void ht_disable_msi_mapping(struct pci_dev *dev)
{
- int pos, ttl = 48;
+ int pos, ttl = PCI_FIND_CAP_TTL;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
@@ -2867,7 +2880,7 @@ static void fixup_ti816x_class(struct pci_dev *dev)
class, dev->class);
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
- PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
+ PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
/* Some PCIe devices do not work reliably with the claimed maximum
* payload size supported.
@@ -2895,7 +2908,8 @@ static void quirk_intel_mc_errata(struct pci_dev *dev)
int err;
u16 rcc;
- if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
+ if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
+ pcie_bus_config == PCIE_BUS_DEFAULT)
return;
/* Intel errata specifies bits to change but does not say what they are.
@@ -3061,7 +3075,16 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
-
+/* Intel Cherrytrail devices do not need 10ms d3_delay */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
/*
* Some devices may pass our check in pci_intx_mask_supported if
* PCI_COMMAND_INTX_DISABLE works though they actually do not properly
@@ -3359,28 +3382,6 @@ fs_initcall_sync(pci_apply_final_quirks);
* reset a single function if other methods (e.g. FLR, PM D0->D3) are
* not available.
*/
-static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
-{
- int pos;
-
- /* only implement PCI_CLASS_SERIAL_USB at present */
- if (dev->class == PCI_CLASS_SERIAL_USB) {
- pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
- if (!pos)
- return -ENOTTY;
-
- if (probe)
- return 0;
-
- pci_write_config_byte(dev, pos + 0x4, 1);
- msleep(100);
-
- return 0;
- } else {
- return -ENOTTY;
- }
-}
-
static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
{
/*
@@ -3539,8 +3540,6 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
reset_ivb_igd },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
reset_ivb_igd },
- { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
- reset_intel_generic_dev },
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
reset_chelsio_generic_dev },
{ 0 }
@@ -3596,6 +3595,8 @@ static void quirk_dma_func1_alias(struct pci_dev *dev)
* SKUs this function is not present, making this a ghost requester.
* https://bugzilla.kernel.org/show_bug.cgi?id=42679
*/
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
+ quirk_dma_func1_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
@@ -3686,6 +3687,85 @@ DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
/*
+ * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero)
+ * class code. Fix it.
+ */
+static void quirk_tw686x_class(struct pci_dev *pdev)
+{
+ u32 class = pdev->class;
+
+ /* Use "Multimedia controller" class */
+ pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01;
+ dev_info(&pdev->dev, "TW686x PCI class overridden (%#08x -> %#08x)\n",
+ class, pdev->class);
+}
+DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_tw686x_class);
+DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6865, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_tw686x_class);
+DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_tw686x_class);
+DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
+ quirk_tw686x_class);
+
+/*
+ * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same
+ * values for the Attribute as were supplied in the header of the
+ * corresponding Request, except as explicitly allowed when IDO is used."
+ *
+ * If a non-compliant device generates a completion with a different
+ * attribute than the request, the receiver may accept it (which itself
+ * seems non-compliant based on sec 2.3.2), or it may handle it as a
+ * Malformed TLP or an Unexpected Completion, which will probably lead to a
+ * device access timeout.
+ *
+ * If the non-compliant device generates completions with zero attributes
+ * (instead of copying the attributes from the request), we can work around
+ * this by disabling the "Relaxed Ordering" and "No Snoop" attributes in
+ * upstream devices so they always generate requests with zero attributes.
+ *
+ * This affects other devices under the same Root Port, but since these
+ * attributes are performance hints, there should be no functional problem.
+ *
+ * Note that Configuration Space accesses are never supposed to have TLP
+ * Attributes, so we're safe waiting till after any Configuration Space
+ * accesses to do the Root Port fixup.
+ */
+static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
+{
+ struct pci_dev *root_port = pci_find_pcie_root_port(pdev);
+
+ if (!root_port) {
+ dev_warn(&pdev->dev, "PCIe Completion erratum may cause device errors\n");
+ return;
+ }
+
+ dev_info(&root_port->dev, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
+ dev_name(&pdev->dev));
+ pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
+}
+
+/*
+ * The Chelsio T5 chip fails to copy TLP Attributes from a Request to the
+ * Completion it generates.
+ */
+static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
+{
+ /*
+ * This mask/compare operation selects for Physical Function 4 on a
+ * T5. We only need to fix up the Root Port once for any of the
+ * PFs. PF[0..3] have PCI Device IDs of 0x50xx, but PF4 is uniquely
+ * 0x54xx so we use that one,
+ */
+ if ((pdev->device & 0xff00) == 0x5400)
+ quirk_disable_root_port_attributes(pdev);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+ quirk_chelsio_T5_disable_root_port_attributes);
+
+/*
* AMD has indicated that the devices below do not support peer-to-peer
* in any system where they are found in the southbridge with an AMD
* IOMMU in the system. Multifunction devices that do not support
@@ -3764,6 +3844,8 @@ static const u16 pci_quirk_intel_pch_acs_ids[] = {
/* Wellsburg (X99) PCH */
0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
+ /* Lynx Point (9 series) PCH */
+ 0x8c90, 0x8c92, 0x8c94, 0x8c96, 0x8c98, 0x8c9a, 0x8c9c, 0x8c9e,
};
static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
@@ -3877,6 +3959,9 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_INTEL, 0x105F, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x1060, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10D9, pci_quirk_mf_endpoint_acs },
+ /* I219 */
+ { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
/* Intel PCH root ports */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
{ 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
@@ -4037,3 +4122,88 @@ void pci_dev_specific_enable_acs(struct pci_dev *dev)
}
}
}
+
+/*
+ * The PCI capabilities list for Intel DH895xCC VFs (device id 0x0443) with
+ * QuickAssist Technology (QAT) is prematurely terminated in hardware. The
+ * Next Capability pointer in the MSI Capability Structure should point to
+ * the PCIe Capability Structure but is incorrectly hardwired as 0 terminating
+ * the list.
+ */
+static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
+{
+ int pos, i = 0;
+ u8 next_cap;
+ u16 reg16, *cap;
+ struct pci_cap_saved_state *state;
+
+ /* Bail if the hardware bug is fixed */
+ if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP))
+ return;
+
+ /* Bail if MSI Capability Structure is not found for some reason */
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+ if (!pos)
+ return;
+
+ /*
+ * Bail if Next Capability pointer in the MSI Capability Structure
+ * is not the expected incorrect 0x00.
+ */
+ pci_read_config_byte(pdev, pos + 1, &next_cap);
+ if (next_cap)
+ return;
+
+ /*
+ * PCIe Capability Structure is expected to be at 0x50 and should
+ * terminate the list (Next Capability pointer is 0x00). Verify
+ * Capability Id and Next Capability pointer is as expected.
+ * Open-code some of set_pcie_port_type() and pci_cfg_space_size_ext()
+ * to correctly set kernel data structures which have already been
+ * set incorrectly due to the hardware bug.
+ */
+ pos = 0x50;
+ pci_read_config_word(pdev, pos, &reg16);
+ if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) {
+ u32 status;
+#ifndef PCI_EXP_SAVE_REGS
+#define PCI_EXP_SAVE_REGS 7
+#endif
+ int size = PCI_EXP_SAVE_REGS * sizeof(u16);
+
+ pdev->pcie_cap = pos;
+ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+ pdev->pcie_flags_reg = reg16;
+ pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
+ pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+
+ pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
+ if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
+ PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
+ pdev->cfg_size = PCI_CFG_SPACE_SIZE;
+
+ if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
+ return;
+
+ /*
+ * Save PCIE cap
+ */
+ state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
+ if (!state)
+ return;
+
+ state->cap.cap_nr = PCI_CAP_ID_EXP;
+ state->cap.cap_extended = 0;
+ state->cap.size = size;
+ cap = (u16 *)&state->cap.data[0];
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]);
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]);
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]);
+ pcie_capability_read_word(pdev, PCI_EXP_RTCTL, &cap[i++]);
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]);
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]);
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]);
+ hlist_add_head(&state->next, &pdev->saved_cap_space);
+ }
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
diff --git a/kernel/drivers/pci/setup-bus.c b/kernel/drivers/pci/setup-bus.c
index 508cc5613..1723ac1b3 100644
--- a/kernel/drivers/pci/setup-bus.c
+++ b/kernel/drivers/pci/setup-bus.c
@@ -1037,9 +1037,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
struct resource *r = &dev->resource[i];
resource_size_t r_size;
- if (r->parent || ((r->flags & mask) != type &&
- (r->flags & mask) != type2 &&
- (r->flags & mask) != type3))
+ if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
+ ((r->flags & mask) != type &&
+ (r->flags & mask) != type2 &&
+ (r->flags & mask) != type3))
continue;
r_size = resource_size(r);
#ifdef CONFIG_PCI_IOV
@@ -1340,6 +1341,47 @@ void pci_bus_size_bridges(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_size_bridges);
+static void assign_fixed_resource_on_bus(struct pci_bus *b, struct resource *r)
+{
+ int i;
+ struct resource *parent_r;
+ unsigned long mask = IORESOURCE_IO | IORESOURCE_MEM |
+ IORESOURCE_PREFETCH;
+
+ pci_bus_for_each_resource(b, parent_r, i) {
+ if (!parent_r)
+ continue;
+
+ if ((r->flags & mask) == (parent_r->flags & mask) &&
+ resource_contains(parent_r, r))
+ request_resource(parent_r, r);
+ }
+}
+
+/*
+ * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they
+ * are skipped by pbus_assign_resources_sorted().
+ */
+static void pdev_assign_fixed_resources(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct pci_bus *b;
+ struct resource *r = &dev->resource[i];
+
+ if (r->parent || !(r->flags & IORESOURCE_PCI_FIXED) ||
+ !(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
+ continue;
+
+ b = dev->bus;
+ while (b && !r->parent) {
+ assign_fixed_resource_on_bus(b, r);
+ b = b->parent;
+ }
+ }
+}
+
void __pci_bus_assign_resources(const struct pci_bus *bus,
struct list_head *realloc_head,
struct list_head *fail_head)
@@ -1350,6 +1392,8 @@ void __pci_bus_assign_resources(const struct pci_bus *bus,
pbus_assign_resources_sorted(bus, realloc_head, fail_head);
list_for_each_entry(dev, &bus->devices, bus_list) {
+ pdev_assign_fixed_resources(dev);
+
b = dev->subordinate;
if (!b)
continue;
diff --git a/kernel/drivers/pci/setup-res.c b/kernel/drivers/pci/setup-res.c
index 232f9254c..604011e04 100644
--- a/kernel/drivers/pci/setup-res.c
+++ b/kernel/drivers/pci/setup-res.c
@@ -36,6 +36,11 @@ void pci_update_resource(struct pci_dev *dev, int resno)
enum pci_bar_type type;
struct resource *res = dev->resource + resno;
+ if (dev->is_virtfn) {
+ dev_warn(&dev->dev, "can't update VF BAR%d\n", resno);
+ return;
+ }
+
/*
* Ignore resources for unimplemented BARs and unused resource slots
* for 64 bit BARs.
@@ -177,6 +182,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
end = res->end;
res->start = fw_addr;
res->end = res->start + size - 1;
+ res->flags &= ~IORESOURCE_UNSET;
root = pci_find_parent_resource(dev, res);
if (!root) {
@@ -194,6 +200,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
resno, res, conflict->name, conflict);
res->start = start;
res->end = end;
+ res->flags |= IORESOURCE_UNSET;
return -EBUSY;
}
return 0;
diff --git a/kernel/drivers/pci/slot.c b/kernel/drivers/pci/slot.c
index 396c200b9..429d34c34 100644
--- a/kernel/drivers/pci/slot.c
+++ b/kernel/drivers/pci/slot.c
@@ -14,6 +14,7 @@
struct kset *pci_slots_kset;
EXPORT_SYMBOL_GPL(pci_slots_kset);
+static DEFINE_MUTEX(pci_slot_mutex);
static ssize_t pci_slot_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
@@ -106,9 +107,11 @@ static void pci_slot_release(struct kobject *kobj)
dev_dbg(&slot->bus->dev, "dev %02x, released physical slot %s\n",
slot->number, pci_slot_name(slot));
+ down_read(&pci_bus_sem);
list_for_each_entry(dev, &slot->bus->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot->number)
dev->slot = NULL;
+ up_read(&pci_bus_sem);
list_del(&slot->list);
@@ -191,12 +194,22 @@ static int rename_slot(struct pci_slot *slot, const char *name)
return result;
}
+void pci_dev_assign_slot(struct pci_dev *dev)
+{
+ struct pci_slot *slot;
+
+ mutex_lock(&pci_slot_mutex);
+ list_for_each_entry(slot, &dev->bus->slots, list)
+ if (PCI_SLOT(dev->devfn) == slot->number)
+ dev->slot = slot;
+ mutex_unlock(&pci_slot_mutex);
+}
+
static struct pci_slot *get_slot(struct pci_bus *parent, int slot_nr)
{
struct pci_slot *slot;
- /*
- * We already hold pci_bus_sem so don't worry
- */
+
+ /* We already hold pci_slot_mutex */
list_for_each_entry(slot, &parent->slots, list)
if (slot->number == slot_nr) {
kobject_get(&slot->kobj);
@@ -253,7 +266,7 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
int err = 0;
char *slot_name = NULL;
- down_write(&pci_bus_sem);
+ mutex_lock(&pci_slot_mutex);
if (slot_nr == -1)
goto placeholder;
@@ -301,16 +314,18 @@ placeholder:
INIT_LIST_HEAD(&slot->list);
list_add(&slot->list, &parent->slots);
+ down_read(&pci_bus_sem);
list_for_each_entry(dev, &parent->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot_nr)
dev->slot = slot;
+ up_read(&pci_bus_sem);
dev_dbg(&parent->dev, "dev %02x, created physical slot %s\n",
slot_nr, pci_slot_name(slot));
out:
kfree(slot_name);
- up_write(&pci_bus_sem);
+ mutex_unlock(&pci_slot_mutex);
return slot;
err:
kfree(slot);
@@ -332,9 +347,9 @@ void pci_destroy_slot(struct pci_slot *slot)
dev_dbg(&slot->bus->dev, "dev %02x, dec refcount to %d\n",
slot->number, atomic_read(&slot->kobj.kref.refcount) - 1);
- down_write(&pci_bus_sem);
+ mutex_lock(&pci_slot_mutex);
kobject_put(&slot->kobj);
- up_write(&pci_bus_sem);
+ mutex_unlock(&pci_slot_mutex);
}
EXPORT_SYMBOL_GPL(pci_destroy_slot);
diff --git a/kernel/drivers/pci/vc.c b/kernel/drivers/pci/vc.c
index 7e1304d2e..dfbab61a1 100644
--- a/kernel/drivers/pci/vc.c
+++ b/kernel/drivers/pci/vc.c
@@ -108,8 +108,7 @@ static void pci_vc_enable(struct pci_dev *dev, int pos, int res)
struct pci_dev *link = NULL;
/* Enable VCs from the downstream device */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
- pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
+ if (!dev->has_secondary_link)
return;
ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF);
diff --git a/kernel/drivers/pci/xen-pcifront.c b/kernel/drivers/pci/xen-pcifront.c
index 7cfd2db02..5f70fee59 100644
--- a/kernel/drivers/pci/xen-pcifront.c
+++ b/kernel/drivers/pci/xen-pcifront.c
@@ -20,6 +20,7 @@
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/time.h>
+#include <linux/ktime.h>
#include <xen/platform_pci.h>
#include <asm/xen/swiotlb-xen.h>
@@ -52,7 +53,7 @@ struct pcifront_device {
};
struct pcifront_sd {
- int domain;
+ struct pci_sysdata sd;
struct pcifront_device *pdev;
};
@@ -66,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
unsigned int domain, unsigned int bus,
struct pcifront_device *pdev)
{
- sd->domain = domain;
+ /* Because we do not expose that information via XenBus. */
+ sd->sd.node = first_online_node;
+ sd->sd.domain = domain;
sd->pdev = pdev;
}
@@ -115,7 +118,6 @@ static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
evtchn_port_t port = pdev->evtchn;
unsigned irq = pdev->irq;
s64 ns, ns_timeout;
- struct timeval tv;
spin_lock_irqsave(&pdev->sh_info_lock, irq_flags);
@@ -132,8 +134,7 @@ static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
* (in the latter case we end up continually re-executing poll() with a
* timeout in the past). 1s difference gives plenty of slack for error.
*/
- do_gettimeofday(&tv);
- ns_timeout = timeval_to_ns(&tv) + 2 * (s64)NSEC_PER_SEC;
+ ns_timeout = ktime_get_ns() + 2 * (s64)NSEC_PER_SEC;
xen_clear_irq_pending(irq);
@@ -141,8 +142,7 @@ static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
(unsigned long *)&pdev->sh_info->flags)) {
xen_poll_irq_timeout(irq, jiffies + 3*HZ);
xen_clear_irq_pending(irq);
- do_gettimeofday(&tv);
- ns = timeval_to_ns(&tv);
+ ns = ktime_get_ns();
if (ns > ns_timeout) {
dev_err(&pdev->xdev->dev,
"pciback not responding!!!\n");
@@ -267,7 +267,7 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
}
i = 0;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
op.msix_entries[i].entry = entry->msi_attrib.entry_nr;
/* Vector is useless at this point. */
op.msix_entries[i].vector = -1;
@@ -446,9 +446,15 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
unsigned int domain, unsigned int bus)
{
struct pci_bus *b;
+ LIST_HEAD(resources);
struct pcifront_sd *sd = NULL;
struct pci_bus_entry *bus_entry = NULL;
int err = 0;
+ static struct resource busn_res = {
+ .start = 0,
+ .end = 255,
+ .flags = IORESOURCE_BUS,
+ };
#ifndef CONFIG_PCI_DOMAINS
if (domain != 0) {
@@ -464,23 +470,27 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
domain, bus);
- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
- sd = kmalloc(sizeof(*sd), GFP_KERNEL);
+ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
+ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
if (!bus_entry || !sd) {
err = -ENOMEM;
goto err_out;
}
+ pci_add_resource(&resources, &ioport_resource);
+ pci_add_resource(&resources, &iomem_resource);
+ pci_add_resource(&resources, &busn_res);
pcifront_init_sd(sd, domain, bus, pdev);
pci_lock_rescan_remove();
- b = pci_scan_bus_parented(&pdev->xdev->dev, bus,
- &pcifront_bus_ops, sd);
+ b = pci_scan_root_bus(&pdev->xdev->dev, bus,
+ &pcifront_bus_ops, sd, &resources);
if (!b) {
dev_err(&pdev->xdev->dev,
"Error creating PCI Frontend Bus!\n");
err = -ENOMEM;
pci_unlock_rescan_remove();
+ pci_free_resource_list(&resources);
goto err_out;
}
@@ -488,7 +498,7 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
list_add(&bus_entry->list, &pdev->root_buses);
- /* pci_scan_bus_parented skips devices which do not have a have
+ /* pci_scan_root_bus skips devices which do not have a
* devfn==0. The pcifront_scan_bus enumerates all devfn. */
err = pcifront_scan_bus(pdev, domain, bus, b);