diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/pci/host | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/pci/host')
27 files changed, 3276 insertions, 820 deletions
diff --git a/kernel/drivers/pci/host/Kconfig b/kernel/drivers/pci/host/Kconfig index 1dfb567b3..c0ad9aaa1 100644 --- a/kernel/drivers/pci/host/Kconfig +++ b/kernel/drivers/pci/host/Kconfig @@ -5,6 +5,7 @@ config PCI_DRA7XX bool "TI DRA7xx PCIe controller" select PCIE_DW depends on OF && HAS_IOMEM && TI_PIPE3 + depends on BROKEN help Enables support for the PCIe controller in the DRA7xx SoC. There are two instances of PCIe controller in DRA7xx. This controller can @@ -39,7 +40,8 @@ config PCI_TEGRA config PCI_RCAR_GEN2 bool "Renesas R-Car Gen2 Internal PCI controller" - depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST) + depends on ARM + depends on ARCH_SHMOBILE || COMPILE_TEST help Say Y here if you want internal PCI support on R-Car Gen2 SoC. There are 3 internal PCI controllers available with a single @@ -47,13 +49,14 @@ config PCI_RCAR_GEN2 config PCI_RCAR_GEN2_PCIE bool "Renesas R-Car PCIe controller" - depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST) + depends on ARM + depends on ARCH_SHMOBILE || COMPILE_TEST help Say Y here if you want PCIe controller support on R-Car Gen2 SoCs. config PCI_HOST_GENERIC bool "Generic PCI host controller" - depends on ARM && OF + depends on (ARM || ARM64) && OF help Say Y here if you want to support a simple generic PCI host controller, such as the one emulated by kvmtool. @@ -89,14 +92,23 @@ config PCI_XGENE depends on ARCH_XGENE depends on OF select PCIEPORTBUS + select PCI_MSI_IRQ_DOMAIN if PCI_MSI help Say Y here if you want internal PCI support on APM X-Gene SoC. There are 5 internal PCIe ports available. Each port is GEN3 capable and have varied lanes from x1 to x8. +config PCI_XGENE_MSI + bool "X-Gene v1 PCIe MSI feature" + depends on PCI_XGENE && PCI_MSI + default y + help + Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC. + This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC. + config PCI_LAYERSCAPE bool "Freescale Layerscape PCIe controller" - depends on OF && ARM + depends on OF && (ARM || ARCH_LAYERSCAPE) select PCIE_DW select MFD_SYSCON help @@ -108,7 +120,7 @@ config PCI_VERSATILE config PCIE_IPROC tristate "Broadcom iProc PCIe controller" - depends on OF && ARM + depends on OF && (ARM || ARM64) default n help This enables the iProc PCIe core controller support for Broadcom's @@ -125,4 +137,40 @@ config PCIE_IPROC_PLATFORM Say Y here if you want to use the Broadcom iProc PCIe controller through the generic platform bus interface +config PCIE_IPROC_BCMA + tristate "Broadcom iProc PCIe BCMA bus driver" + depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST) + select PCIE_IPROC + select BCMA + select PCI_DOMAINS + default ARCH_BCM_5301X + help + Say Y here if you want to use the Broadcom iProc PCIe controller + through the BCMA bus interface + +config PCIE_ALTERA + bool "Altera PCIe controller" + depends on ARM || NIOS2 + depends on OF_PCI + select PCI_DOMAINS + help + Say Y here if you want to enable PCIe controller support on Altera + FPGA. + +config PCIE_ALTERA_MSI + bool "Altera PCIe MSI feature" + depends on PCIE_ALTERA && PCI_MSI + select PCI_MSI_IRQ_DOMAIN + help + Say Y here if you want PCIe MSI support for the Altera FPGA. + This MSI driver supports Altera MSI to GIC controller IP. + +config PCI_HISI + depends on OF && ARM64 + bool "HiSilicon SoC HIP05 PCIe controller" + select PCIEPORTBUS + select PCIE_DW + help + Say Y here if you want PCIe controller support on HiSilicon HIP05 SoC + endmenu diff --git a/kernel/drivers/pci/host/Makefile b/kernel/drivers/pci/host/Makefile index f733b4e27..9d4d3c692 100644 --- a/kernel/drivers/pci/host/Makefile +++ b/kernel/drivers/pci/host/Makefile @@ -11,7 +11,12 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o obj-$(CONFIG_PCI_XGENE) += pci-xgene.o +obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o +obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o +obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o +obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o +obj-$(CONFIG_PCI_HISI) += pcie-hisi.o diff --git a/kernel/drivers/pci/host/pci-dra7xx.c b/kernel/drivers/pci/host/pci-dra7xx.c index 2d57e19a2..923607bda 100644 --- a/kernel/drivers/pci/host/pci-dra7xx.c +++ b/kernel/drivers/pci/host/pci-dra7xx.c @@ -17,6 +17,7 @@ #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of_gpio.h> #include <linux/pci.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> @@ -61,6 +62,7 @@ #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C #define LINK_UP BIT(16) +#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF struct dra7xx_pcie { void __iomem *base; @@ -83,6 +85,17 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, writel(value, pcie->base + offset); } +static inline u32 dra7xx_pcie_readl_rc(struct pcie_port *pp, u32 offset) +{ + return readl(pp->dbi_base + offset); +} + +static inline void dra7xx_pcie_writel_rc(struct pcie_port *pp, u32 offset, + u32 value) +{ + writel(value, pp->dbi_base + offset); +} + static int dra7xx_pcie_link_up(struct pcie_port *pp) { struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); @@ -93,9 +106,9 @@ static int dra7xx_pcie_link_up(struct pcie_port *pp) static int dra7xx_pcie_establish_link(struct pcie_port *pp) { - u32 reg; - unsigned int retries = 1000; struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); + u32 reg; + unsigned int retries; if (dw_pcie_link_up(pp)) { dev_err(pp->dev, "link is already up\n"); @@ -106,19 +119,14 @@ static int dra7xx_pcie_establish_link(struct pcie_port *pp) reg |= LTSSM_EN; dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); - while (retries--) { - reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); - if (reg & LINK_UP) - break; + for (retries = 0; retries < 1000; retries++) { + if (dw_pcie_link_up(pp)) + return 0; usleep_range(10, 20); } - if (retries == 0) { - dev_err(pp->dev, "link is not up\n"); - return -ETIMEDOUT; - } - - return 0; + dev_err(pp->dev, "link is not up\n"); + return -EINVAL; } static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp) @@ -144,6 +152,12 @@ static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp) static void dra7xx_pcie_host_init(struct pcie_port *pp) { dw_pcie_setup_rc(pp); + + pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR; + pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR; + pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR; + pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR; + dra7xx_pcie_establish_link(pp); if (IS_ENABLED(CONFIG_PCI_MSI)) dw_pcie_msi_init(pp); @@ -160,7 +174,6 @@ static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, { irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); irq_set_chip_data(irq, domain->host_data); - set_irq_flags(irq, IRQF_VALID); return 0; } @@ -289,7 +302,8 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, } ret = devm_request_irq(&pdev->dev, pp->irq, - dra7xx_pcie_msi_irq_handler, IRQF_SHARED, + dra7xx_pcie_msi_irq_handler, + IRQF_SHARED | IRQF_NO_THREAD, "dra7-pcie-msi", pp); if (ret) { dev_err(&pdev->dev, "failed to request irq\n"); @@ -330,6 +344,9 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; char name[10]; + int gpio_sel; + enum of_gpio_flags flags; + unsigned long gpio_flags; dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); if (!dra7xx) @@ -387,9 +404,25 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); - if (IS_ERR_VALUE(ret)) { + if (ret < 0) { dev_err(dev, "pm_runtime_get_sync failed\n"); - goto err_phy; + goto err_get_sync; + } + + gpio_sel = of_get_gpio_flags(dev->of_node, 0, &flags); + if (gpio_is_valid(gpio_sel)) { + gpio_flags = (flags & OF_GPIO_ACTIVE_LOW) ? + GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH; + ret = devm_gpio_request_one(dev, gpio_sel, gpio_flags, + "pcie_reset"); + if (ret) { + dev_err(&pdev->dev, "gpio%d request failed, ret %d\n", + gpio_sel, ret); + goto err_gpio; + } + } else if (gpio_sel == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto err_gpio; } reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); @@ -400,12 +433,14 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) ret = dra7xx_add_pcie_port(dra7xx, pdev); if (ret < 0) - goto err_add_port; + goto err_gpio; return 0; -err_add_port: +err_gpio: pm_runtime_put(dev); + +err_get_sync: pm_runtime_disable(dev); err_phy: @@ -436,6 +471,85 @@ static int __exit dra7xx_pcie_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int dra7xx_pcie_suspend(struct device *dev) +{ + struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); + struct pcie_port *pp = &dra7xx->pp; + u32 val; + + /* clear MSE */ + val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND); + val &= ~PCI_COMMAND_MEMORY; + dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val); + + return 0; +} + +static int dra7xx_pcie_resume(struct device *dev) +{ + struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); + struct pcie_port *pp = &dra7xx->pp; + u32 val; + + /* set MSE */ + val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND); + val |= PCI_COMMAND_MEMORY; + dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val); + + return 0; +} + +static int dra7xx_pcie_suspend_noirq(struct device *dev) +{ + struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); + int count = dra7xx->phy_count; + + while (count--) { + phy_power_off(dra7xx->phy[count]); + phy_exit(dra7xx->phy[count]); + } + + return 0; +} + +static int dra7xx_pcie_resume_noirq(struct device *dev) +{ + struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); + int phy_count = dra7xx->phy_count; + int ret; + int i; + + for (i = 0; i < phy_count; i++) { + ret = phy_init(dra7xx->phy[i]); + if (ret < 0) + goto err_phy; + + ret = phy_power_on(dra7xx->phy[i]); + if (ret < 0) { + phy_exit(dra7xx->phy[i]); + goto err_phy; + } + } + + return 0; + +err_phy: + while (--i >= 0) { + phy_power_off(dra7xx->phy[i]); + phy_exit(dra7xx->phy[i]); + } + + return ret; +} +#endif + +static const struct dev_pm_ops dra7xx_pcie_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, + dra7xx_pcie_resume_noirq) +}; + static const struct of_device_id of_dra7xx_pcie_match[] = { { .compatible = "ti,dra7-pcie", }, {}, @@ -447,6 +561,7 @@ static struct platform_driver dra7xx_pcie_driver = { .driver = { .name = "dra7-pcie", .of_match_table = of_dra7xx_pcie_match, + .pm = &dra7xx_pcie_pm_ops, }, }; diff --git a/kernel/drivers/pci/host/pci-exynos.c b/kernel/drivers/pci/host/pci-exynos.c index c139237e0..d997d22d4 100644 --- a/kernel/drivers/pci/host/pci-exynos.c +++ b/kernel/drivers/pci/host/pci-exynos.c @@ -316,9 +316,9 @@ static void exynos_pcie_assert_reset(struct pcie_port *pp) static int exynos_pcie_establish_link(struct pcie_port *pp) { - u32 val; - int count = 0; struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + u32 val; + unsigned int retries; if (dw_pcie_link_up(pp)) { dev_err(pp->dev, "Link already up\n"); @@ -357,27 +357,23 @@ static int exynos_pcie_establish_link(struct pcie_port *pp) PCIE_APP_LTSSM_ENABLE); /* check if the link is up or not */ - while (!dw_pcie_link_up(pp)) { - mdelay(100); - count++; - if (count == 10) { - while (exynos_phy_readl(exynos_pcie, - PCIE_PHY_PLL_LOCKED) == 0) { - val = exynos_blk_readl(exynos_pcie, - PCIE_PHY_PLL_LOCKED); - dev_info(pp->dev, "PLL Locked: 0x%x\n", val); - } - /* power off phy */ - exynos_pcie_power_off_phy(pp); - - dev_err(pp->dev, "PCIe Link Fail\n"); - return -EINVAL; + for (retries = 0; retries < 10; retries++) { + if (dw_pcie_link_up(pp)) { + dev_info(pp->dev, "Link up\n"); + return 0; } + mdelay(100); } - dev_info(pp->dev, "Link up\n"); + while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) { + val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED); + dev_info(pp->dev, "PLL Locked: 0x%x\n", val); + } + /* power off phy */ + exynos_pcie_power_off_phy(pp); - return 0; + dev_err(pp->dev, "PCIe Link Fail\n"); + return -EINVAL; } static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp) @@ -458,7 +454,7 @@ static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, int ret; exynos_pcie_sideband_dbi_r_mode(pp, true); - ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); + ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val); exynos_pcie_sideband_dbi_r_mode(pp, false); return ret; } @@ -469,8 +465,7 @@ static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, int ret; exynos_pcie_sideband_dbi_w_mode(pp, true); - ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), - where, size, val); + ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val); exynos_pcie_sideband_dbi_w_mode(pp, false); return ret; } @@ -527,7 +522,8 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp, ret = devm_request_irq(&pdev->dev, pp->msi_irq, exynos_pcie_msi_irq_handler, - IRQF_SHARED, "exynos-pcie", pp); + IRQF_SHARED | IRQF_NO_THREAD, + "exynos-pcie", pp); if (ret) { dev_err(&pdev->dev, "failed to request msi irq\n"); return ret; diff --git a/kernel/drivers/pci/host/pci-host-generic.c b/kernel/drivers/pci/host/pci-host-generic.c index ba46e581d..5434c90db 100644 --- a/kernel/drivers/pci/host/pci-host-generic.c +++ b/kernel/drivers/pci/host/pci-host-generic.c @@ -27,7 +27,7 @@ struct gen_pci_cfg_bus_ops { u32 bus_shift; - void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int); + struct pci_ops ops; }; struct gen_pci_cfg_windows { @@ -35,10 +35,19 @@ struct gen_pci_cfg_windows { struct resource *bus_range; void __iomem **win; - const struct gen_pci_cfg_bus_ops *ops; + struct gen_pci_cfg_bus_ops *ops; }; +/* + * ARM pcibios functions expect the ARM struct pci_sys_data as the PCI + * sysdata. Add pci_sys_data as the first element in struct gen_pci so + * that when we use a gen_pci pointer as sysdata, it is also a pointer to + * a struct pci_sys_data. + */ struct gen_pci { +#ifdef CONFIG_ARM + struct pci_sys_data sys; +#endif struct pci_host_bridge host; struct gen_pci_cfg_windows cfg; struct list_head resources; @@ -48,8 +57,7 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus, unsigned int devfn, int where) { - struct pci_sys_data *sys = bus->sysdata; - struct gen_pci *pci = sys->private_data; + struct gen_pci *pci = bus->sysdata; resource_size_t idx = bus->number - pci->cfg.bus_range->start; return pci->cfg.win[idx] + ((devfn << 8) | where); @@ -57,15 +65,18 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus, static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = { .bus_shift = 16, - .map_bus = gen_pci_map_cfg_bus_cam, + .ops = { + .map_bus = gen_pci_map_cfg_bus_cam, + .read = pci_generic_config_read, + .write = pci_generic_config_write, + } }; static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus, unsigned int devfn, int where) { - struct pci_sys_data *sys = bus->sysdata; - struct gen_pci *pci = sys->private_data; + struct gen_pci *pci = bus->sysdata; resource_size_t idx = bus->number - pci->cfg.bus_range->start; return pci->cfg.win[idx] + ((devfn << 12) | where); @@ -73,12 +84,11 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus, static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = { .bus_shift = 20, - .map_bus = gen_pci_map_cfg_bus_ecam, -}; - -static struct pci_ops gen_pci_ops = { - .read = pci_generic_config_read, - .write = pci_generic_config_write, + .ops = { + .map_bus = gen_pci_map_cfg_bus_ecam, + .read = pci_generic_config_read, + .write = pci_generic_config_write, + } }; static const struct of_device_id gen_pci_of_match[] = { @@ -159,6 +169,7 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci) struct resource *bus_range; struct device *dev = pci->host.dev.parent; struct device_node *np = dev->of_node; + u32 sz = 1 << pci->cfg.ops->bus_shift; err = of_address_to_resource(np, 0, &pci->cfg.res); if (err) { @@ -186,10 +197,9 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci) bus_range = pci->cfg.bus_range; for (busn = bus_range->start; busn <= bus_range->end; ++busn) { u32 idx = busn - bus_range->start; - u32 sz = 1 << pci->cfg.ops->bus_shift; pci->cfg.win[idx] = devm_ioremap(dev, - pci->cfg.res.start + busn * sz, + pci->cfg.res.start + idx * sz, sz); if (!pci->cfg.win[idx]) return -ENOMEM; @@ -198,29 +208,15 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci) return 0; } -static int gen_pci_setup(int nr, struct pci_sys_data *sys) -{ - struct gen_pci *pci = sys->private_data; - list_splice_init(&pci->resources, &sys->resources); - return 1; -} - static int gen_pci_probe(struct platform_device *pdev) { int err; const char *type; const struct of_device_id *of_id; - const int *prop; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - struct hw_pci hw = { - .nr_controllers = 1, - .private_data = (void **)&pci, - .setup = gen_pci_setup, - .map_irq = of_irq_parse_and_map_pci, - .ops = &gen_pci_ops, - }; + struct pci_bus *bus, *child; if (!pci) return -ENOMEM; @@ -231,17 +227,10 @@ static int gen_pci_probe(struct platform_device *pdev) return -EINVAL; } - prop = of_get_property(of_chosen, "linux,pci-probe-only", NULL); - if (prop) { - if (*prop) - pci_add_flags(PCI_PROBE_ONLY); - else - pci_clear_flags(PCI_PROBE_ONLY); - } + of_pci_check_probe_only(); of_id = of_match_node(gen_pci_of_match, np); - pci->cfg.ops = of_id->data; - gen_pci_ops.map_bus = pci->cfg.ops->map_bus; + pci->cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data; pci->host.dev.parent = dev; INIT_LIST_HEAD(&pci->host.windows); INIT_LIST_HEAD(&pci->resources); @@ -258,7 +247,29 @@ static int gen_pci_probe(struct platform_device *pdev) return err; } - pci_common_init_dev(dev, &hw); + /* Do not reassign resources if probe only */ + if (!pci_has_flag(PCI_PROBE_ONLY)) + pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); + + + bus = pci_scan_root_bus(dev, pci->cfg.bus_range->start, + &pci->cfg.ops->ops, pci, &pci->resources); + if (!bus) { + dev_err(dev, "Scanning rootbus failed"); + return -ENODEV; + } + + pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); + + if (!pci_has_flag(PCI_PROBE_ONLY)) { + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + } + + pci_bus_add_devices(bus); return 0; } diff --git a/kernel/drivers/pci/host/pci-imx6.c b/kernel/drivers/pci/host/pci-imx6.c index fdb953677..9ce7cd148 100644 --- a/kernel/drivers/pci/host/pci-imx6.c +++ b/kernel/drivers/pci/host/pci-imx6.c @@ -47,6 +47,8 @@ struct imx6_pcie { #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf +#define PCIE_RC_LCSR 0x80 + /* PCIe Port Logic registers (memory-mapped) */ #define PL_OFFSET 0x700 #define PCIE_PL_PFLR (PL_OFFSET + 0x08) @@ -72,6 +74,7 @@ struct imx6_pcie { /* PHY registers (not memory-mapped) */ #define PCIE_PHY_RX_ASIC_OUT 0x100D +#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) #define PHY_RX_OVRD_IN_LO 0x1005 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) @@ -115,11 +118,7 @@ static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr) val = addr << PCIE_PHY_CTRL_DATA_LOC; writel(val, dbi_base + PCIE_PHY_CTRL); - ret = pcie_phy_poll_ack(dbi_base, 0); - if (ret) - return ret; - - return 0; + return pcie_phy_poll_ack(dbi_base, 0); } /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ @@ -146,11 +145,7 @@ static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data) /* deassert Read signal */ writel(0x00, dbi_base + PCIE_PHY_CTRL); - ret = pcie_phy_poll_ack(dbi_base, 0); - if (ret) - return ret; - - return 0; + return pcie_phy_poll_ack(dbi_base, 0); } static int pcie_phy_write(void __iomem *dbi_base, int addr, int data) @@ -335,21 +330,36 @@ static void imx6_pcie_init_phy(struct pcie_port *pp) static int imx6_pcie_wait_for_link(struct pcie_port *pp) { - int count = 200; + unsigned int retries; - while (!dw_pcie_link_up(pp)) { + for (retries = 0; retries < 200; retries++) { + if (dw_pcie_link_up(pp)) + return 0; usleep_range(100, 1000); - if (--count) - continue; - - dev_err(pp->dev, "phy link never came up\n"); - dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", - readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), - readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); - return -EINVAL; } - return 0; + dev_err(pp->dev, "phy link never came up\n"); + dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", + readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), + readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); + return -EINVAL; +} + +static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp) +{ + u32 tmp; + unsigned int retries; + + for (retries = 0; retries < 200; retries++) { + tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); + /* Test if the speed change finished. */ + if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) + return 0; + usleep_range(100, 1000); + } + + dev_err(pp->dev, "Speed change timeout\n"); + return -EINVAL; } static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg) @@ -359,11 +369,11 @@ static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg) return dw_handle_msi_irq(pp); } -static int imx6_pcie_start_link(struct pcie_port *pp) +static int imx6_pcie_establish_link(struct pcie_port *pp) { struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); - uint32_t tmp; - int ret, count; + u32 tmp; + int ret; /* * Force Gen1 operation when starting the link. In case the link is @@ -397,29 +407,22 @@ static int imx6_pcie_start_link(struct pcie_port *pp) tmp |= PORT_LOGIC_SPEED_CHANGE; writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); - count = 200; - while (count--) { - tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); - /* Test if the speed change finished. */ - if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) - break; - usleep_range(100, 1000); + ret = imx6_pcie_wait_for_speed_change(pp); + if (ret) { + dev_err(pp->dev, "Failed to bring link up!\n"); + return ret; } /* Make sure link training is finished as well! */ - if (count) - ret = imx6_pcie_wait_for_link(pp); - else - ret = -EINVAL; - + ret = imx6_pcie_wait_for_link(pp); if (ret) { dev_err(pp->dev, "Failed to bring link up!\n"); - } else { - tmp = readl(pp->dbi_base + 0x80); - dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf); + return ret; } - return ret; + tmp = readl(pp->dbi_base + PCIE_RC_LCSR); + dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf); + return 0; } static void imx6_pcie_host_init(struct pcie_port *pp) @@ -432,7 +435,7 @@ static void imx6_pcie_host_init(struct pcie_port *pp) dw_pcie_setup_rc(pp); - imx6_pcie_start_link(pp); + imx6_pcie_establish_link(pp); if (IS_ENABLED(CONFIG_PCI_MSI)) dw_pcie_msi_init(pp); @@ -440,19 +443,19 @@ static void imx6_pcie_host_init(struct pcie_port *pp) static void imx6_pcie_reset_phy(struct pcie_port *pp) { - uint32_t temp; + u32 tmp; - pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp); - temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | - PHY_RX_OVRD_IN_LO_RX_PLL_EN); - pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp); + pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp); + tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | + PHY_RX_OVRD_IN_LO_RX_PLL_EN); + pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp); usleep_range(2000, 3000); - pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp); - temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | + pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp); + tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); - pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp); + pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp); } static int imx6_pcie_link_up(struct pcie_port *pp) @@ -501,7 +504,7 @@ static int imx6_pcie_link_up(struct pcie_port *pp) pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid); debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0); - if (rx_valid & 0x01) + if (rx_valid & PCIE_PHY_RX_ASIC_OUT_VALID) return 0; if ((debug_r0 & 0x3f) != 0x0d) @@ -534,10 +537,11 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp, ret = devm_request_irq(&pdev->dev, pp->msi_irq, imx6_pcie_msi_handler, - IRQF_SHARED, "mx6-pcie-msi", pp); + IRQF_SHARED | IRQF_NO_THREAD, + "mx6-pcie-msi", pp); if (ret) { dev_err(&pdev->dev, "failed to request MSI irq\n"); - return -ENODEV; + return ret; } } diff --git a/kernel/drivers/pci/host/pci-keystone-dw.c b/kernel/drivers/pci/host/pci-keystone-dw.c index f34892e0e..6153853ca 100644 --- a/kernel/drivers/pci/host/pci-keystone-dw.c +++ b/kernel/drivers/pci/host/pci-keystone-dw.c @@ -58,11 +58,6 @@ #define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp) -static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) -{ - return sys->private_data; -} - static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, u32 *bit_pos) { @@ -70,7 +65,7 @@ static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, *bit_pos = offset >> 3; } -u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp) +phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp) { struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); @@ -104,14 +99,13 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d) { u32 offset, reg_offset, bit_pos; struct keystone_pcie *ks_pcie; - unsigned int irq = d->irq; struct msi_desc *msi; struct pcie_port *pp; - msi = irq_get_msi_desc(irq); - pp = sys_to_pcie(msi->dev->bus->sysdata); + msi = irq_data_get_msi_desc(d); + pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); ks_pcie = to_keystone_pcie(pp); - offset = irq - irq_linear_revmap(pp->irq_domain, 0); + offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); update_reg_offset_bit_pos(offset, ®_offset, &bit_pos); writel(BIT(bit_pos), @@ -142,15 +136,14 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) { struct keystone_pcie *ks_pcie; - unsigned int irq = d->irq; struct msi_desc *msi; struct pcie_port *pp; u32 offset; - msi = irq_get_msi_desc(irq); - pp = sys_to_pcie(msi->dev->bus->sysdata); + msi = irq_data_get_msi_desc(d); + pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); ks_pcie = to_keystone_pcie(pp); - offset = irq - irq_linear_revmap(pp->irq_domain, 0); + offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); /* Mask the end point if PVM implemented */ if (IS_ENABLED(CONFIG_PCI_MSI)) { @@ -164,15 +157,14 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d) { struct keystone_pcie *ks_pcie; - unsigned int irq = d->irq; struct msi_desc *msi; struct pcie_port *pp; u32 offset; - msi = irq_get_msi_desc(irq); - pp = sys_to_pcie(msi->dev->bus->sysdata); + msi = irq_data_get_msi_desc(d); + pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); ks_pcie = to_keystone_pcie(pp); - offset = irq - irq_linear_revmap(pp->irq_domain, 0); + offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); /* Mask the end point if PVM implemented */ if (IS_ENABLED(CONFIG_PCI_MSI)) { @@ -196,7 +188,6 @@ static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); - set_irq_flags(irq, IRQF_VALID); return 0; } @@ -277,7 +268,6 @@ static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d, irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip, handle_level_irq); irq_set_chip_data(irq, d->host_data); - set_irq_flags(irq, IRQF_VALID); return 0; } @@ -327,7 +317,7 @@ static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt) void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) { struct pcie_port *pp = &ks_pcie->pp; - u32 start = pp->mem.start, end = pp->mem.end; + u32 start = pp->mem->start, end = pp->mem->end; int i, tr_size; /* Disable BARs for inbound access */ @@ -403,7 +393,7 @@ int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); - return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val); + return dw_pcie_cfg_read(addr + where, size, val); } int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, @@ -415,7 +405,7 @@ int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); - return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val); + return dw_pcie_cfg_write(addr + where, size, val); } /** diff --git a/kernel/drivers/pci/host/pci-keystone.c b/kernel/drivers/pci/host/pci-keystone.c index 75333b0c4..0aa81bd3d 100644 --- a/kernel/drivers/pci/host/pci-keystone.c +++ b/kernel/drivers/pci/host/pci-keystone.c @@ -88,7 +88,7 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs); static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) { struct pcie_port *pp = &ks_pcie->pp; - int count = 200; + unsigned int retries; dw_pcie_setup_rc(pp); @@ -99,21 +99,20 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) ks_dw_pcie_initiate_link_train(ks_pcie); /* check if the link is up or not */ - while (!dw_pcie_link_up(pp)) { + for (retries = 0; retries < 200; retries++) { + if (dw_pcie_link_up(pp)) + return 0; usleep_range(100, 1000); - if (--count) { - ks_dw_pcie_initiate_link_train(ks_pcie); - continue; - } - dev_err(pp->dev, "phy link never came up\n"); - return -EINVAL; + ks_dw_pcie_initiate_link_train(ks_pcie); } - return 0; + dev_err(pp->dev, "phy link never came up\n"); + return -EINVAL; } -static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ks_pcie_msi_irq_handler(struct irq_desc *desc) { + unsigned int irq = irq_desc_get_irq(desc); struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); u32 offset = irq - ks_pcie->msi_host_irqs[0]; struct pcie_port *pp = &ks_pcie->pp; @@ -139,8 +138,9 @@ static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc) * Traverse through pending legacy interrupts and invoke handler for each. Also * takes care of interrupt controller level mask/ack operation. */ -static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc) +static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) { + unsigned int irq = irq_desc_get_irq(desc); struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); struct pcie_port *pp = &ks_pcie->pp; u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; @@ -214,19 +214,18 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) /* Legacy IRQ */ for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) { - irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie); - irq_set_chained_handler(ks_pcie->legacy_host_irqs[i], - ks_pcie_legacy_irq_handler); + irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i], + ks_pcie_legacy_irq_handler, + ks_pcie); } ks_dw_pcie_enable_legacy_irqs(ks_pcie); /* MSI IRQ */ if (IS_ENABLED(CONFIG_PCI_MSI)) { for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) { - irq_set_chained_handler(ks_pcie->msi_host_irqs[i], - ks_pcie_msi_irq_handler); - irq_set_handler_data(ks_pcie->msi_host_irqs[i], - ks_pcie); + irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i], + ks_pcie_msi_irq_handler, + ks_pcie); } } } diff --git a/kernel/drivers/pci/host/pci-keystone.h b/kernel/drivers/pci/host/pci-keystone.h index 478d932b6..f0944e8c4 100644 --- a/kernel/drivers/pci/host/pci-keystone.h +++ b/kernel/drivers/pci/host/pci-keystone.h @@ -37,7 +37,7 @@ struct keystone_pcie { /* Keystone DW specific MSI controller APIs/definitions */ void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset); -u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp); +phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp); /* Keystone specific PCI controller APIs */ void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie); diff --git a/kernel/drivers/pci/host/pci-layerscape.c b/kernel/drivers/pci/host/pci-layerscape.c index 4a6e62f67..3923bed93 100644 --- a/kernel/drivers/pci/host/pci-layerscape.c +++ b/kernel/drivers/pci/host/pci-layerscape.c @@ -3,7 +3,7 @@ * * Copyright (C) 2014 Freescale Semiconductor. * - * Author: Minghuan Lian <Minghuan.Lian@freescale.com> + * Author: Minghuan Lian <Minghuan.Lian@freescale.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -11,7 +11,6 @@ */ #include <linux/kernel.h> -#include <linux/delay.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of_pci.h> @@ -32,27 +31,60 @@ #define LTSSM_STATE_MASK 0x3f #define LTSSM_PCIE_L0 0x11 /* L0 state */ -/* Symbol Timer Register and Filter Mask Register 1 */ -#define PCIE_STRFMR1 0x71c +/* PEX Internal Configuration Registers */ +#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ +#define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */ + +/* PEX LUT registers */ +#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */ + +struct ls_pcie_drvdata { + u32 lut_offset; + u32 ltssm_shift; + struct pcie_host_ops *ops; +}; struct ls_pcie { - struct list_head node; - struct device *dev; - struct pci_bus *bus; void __iomem *dbi; + void __iomem *lut; struct regmap *scfg; struct pcie_port pp; + const struct ls_pcie_drvdata *drvdata; int index; - int msi_irq; }; #define to_ls_pcie(x) container_of(x, struct ls_pcie, pp) -static int ls_pcie_link_up(struct pcie_port *pp) +static bool ls_pcie_is_bridge(struct ls_pcie *pcie) +{ + u32 header_type; + + header_type = ioread8(pcie->dbi + PCI_HEADER_TYPE); + header_type &= 0x7f; + + return header_type == PCI_HEADER_TYPE_BRIDGE; +} + +/* Clear multi-function bit */ +static void ls_pcie_clear_multifunction(struct ls_pcie *pcie) +{ + iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE); +} + +/* Fix class value */ +static void ls_pcie_fix_class(struct ls_pcie *pcie) +{ + iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE); +} + +static int ls1021_pcie_link_up(struct pcie_port *pp) { u32 state; struct ls_pcie *pcie = to_ls_pcie(pp); + if (!pcie->scfg) + return 0; + regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state); state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; @@ -62,22 +94,27 @@ static int ls_pcie_link_up(struct pcie_port *pp) return 1; } -static void ls_pcie_host_init(struct pcie_port *pp) +static void ls1021_pcie_host_init(struct pcie_port *pp) { struct ls_pcie *pcie = to_ls_pcie(pp); - int count = 0; - u32 val; + u32 val, index[2]; - dw_pcie_setup_rc(pp); + pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node, + "fsl,pcie-scfg"); + if (IS_ERR(pcie->scfg)) { + dev_err(pp->dev, "No syscfg phandle specified\n"); + pcie->scfg = NULL; + return; + } - while (!ls_pcie_link_up(pp)) { - usleep_range(100, 1000); - count++; - if (count >= 200) { - dev_err(pp->dev, "phy link never came up\n"); - return; - } + if (of_property_read_u32_array(pp->dev->of_node, + "fsl,pcie-scfg", index, 2)) { + pcie->scfg = NULL; + return; } + pcie->index = index[1]; + + dw_pcie_setup_rc(pp); /* * LS1021A Workaround for internal TKT228622 @@ -88,21 +125,97 @@ static void ls_pcie_host_init(struct pcie_port *pp) iowrite32(val, pcie->dbi + PCIE_STRFMR1); } +static int ls_pcie_link_up(struct pcie_port *pp) +{ + struct ls_pcie *pcie = to_ls_pcie(pp); + u32 state; + + state = (ioread32(pcie->lut + PCIE_LUT_DBG) >> + pcie->drvdata->ltssm_shift) & + LTSSM_STATE_MASK; + + if (state < LTSSM_PCIE_L0) + return 0; + + return 1; +} + +static void ls_pcie_host_init(struct pcie_port *pp) +{ + struct ls_pcie *pcie = to_ls_pcie(pp); + + iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN); + ls_pcie_fix_class(pcie); + ls_pcie_clear_multifunction(pcie); + iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); +} + +static int ls_pcie_msi_host_init(struct pcie_port *pp, + struct msi_controller *chip) +{ + struct device_node *msi_node; + struct device_node *np = pp->dev->of_node; + + /* + * The MSI domain is set by the generic of_msi_configure(). This + * .msi_host_init() function keeps us from doing the default MSI + * domain setup in dw_pcie_host_init() and also enforces the + * requirement that "msi-parent" exists. + */ + msi_node = of_parse_phandle(np, "msi-parent", 0); + if (!msi_node) { + dev_err(pp->dev, "failed to find msi-parent\n"); + return -EINVAL; + } + + return 0; +} + +static struct pcie_host_ops ls1021_pcie_host_ops = { + .link_up = ls1021_pcie_link_up, + .host_init = ls1021_pcie_host_init, + .msi_host_init = ls_pcie_msi_host_init, +}; + static struct pcie_host_ops ls_pcie_host_ops = { .link_up = ls_pcie_link_up, .host_init = ls_pcie_host_init, + .msi_host_init = ls_pcie_msi_host_init, }; -static int ls_add_pcie_port(struct ls_pcie *pcie) +static struct ls_pcie_drvdata ls1021_drvdata = { + .ops = &ls1021_pcie_host_ops, +}; + +static struct ls_pcie_drvdata ls1043_drvdata = { + .lut_offset = 0x10000, + .ltssm_shift = 24, + .ops = &ls_pcie_host_ops, +}; + +static struct ls_pcie_drvdata ls2080_drvdata = { + .lut_offset = 0x80000, + .ltssm_shift = 0, + .ops = &ls_pcie_host_ops, +}; + +static const struct of_device_id ls_pcie_of_match[] = { + { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, + { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, + { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, + { }, +}; +MODULE_DEVICE_TABLE(of, ls_pcie_of_match); + +static int __init ls_add_pcie_port(struct pcie_port *pp, + struct platform_device *pdev) { - struct pcie_port *pp; int ret; + struct ls_pcie *pcie = to_ls_pcie(pp); - pp = &pcie->pp; - pp->dev = pcie->dev; + pp->dev = &pdev->dev; pp->dbi_base = pcie->dbi; - pp->root_bus_nr = -1; - pp->ops = &ls_pcie_host_ops; + pp->ops = pcie->drvdata->ops; ret = dw_pcie_host_init(pp); if (ret) { @@ -115,17 +228,19 @@ static int ls_add_pcie_port(struct ls_pcie *pcie) static int __init ls_pcie_probe(struct platform_device *pdev) { + const struct of_device_id *match; struct ls_pcie *pcie; struct resource *dbi_base; - u32 index[2]; int ret; + match = of_match_device(ls_pcie_of_match, &pdev->dev); + if (!match) + return -ENODEV; + pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; - pcie->dev = &pdev->dev; - dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base); if (IS_ERR(pcie->dbi)) { @@ -133,20 +248,13 @@ static int __init ls_pcie_probe(struct platform_device *pdev) return PTR_ERR(pcie->dbi); } - pcie->scfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, - "fsl,pcie-scfg"); - if (IS_ERR(pcie->scfg)) { - dev_err(&pdev->dev, "No syscfg phandle specified\n"); - return PTR_ERR(pcie->scfg); - } + pcie->drvdata = match->data; + pcie->lut = pcie->dbi + pcie->drvdata->lut_offset; - ret = of_property_read_u32_array(pdev->dev.of_node, - "fsl,pcie-scfg", index, 2); - if (ret) - return ret; - pcie->index = index[1]; + if (!ls_pcie_is_bridge(pcie)) + return -ENODEV; - ret = ls_add_pcie_port(pcie); + ret = ls_add_pcie_port(&pcie->pp, pdev); if (ret < 0) return ret; @@ -155,12 +263,6 @@ static int __init ls_pcie_probe(struct platform_device *pdev) return 0; } -static const struct of_device_id ls_pcie_of_match[] = { - { .compatible = "fsl,ls1021a-pcie" }, - { }, -}; -MODULE_DEVICE_TABLE(of, ls_pcie_of_match); - static struct platform_driver ls_pcie_driver = { .driver = { .name = "layerscape-pcie", diff --git a/kernel/drivers/pci/host/pci-mvebu.c b/kernel/drivers/pci/host/pci-mvebu.c index 1ab863551..53b79c5f0 100644 --- a/kernel/drivers/pci/host/pci-mvebu.c +++ b/kernel/drivers/pci/host/pci-mvebu.c @@ -30,6 +30,7 @@ #define PCIE_DEV_REV_OFF 0x0008 #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3)) #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3)) +#define PCIE_CAP_PCIEXP 0x0060 #define PCIE_HEADER_LOG_4_OFF 0x0128 #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4)) #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4)) @@ -57,14 +58,35 @@ #define PCIE_STAT_BUS 0xff00 #define PCIE_STAT_DEV 0x1f0000 #define PCIE_STAT_LINK_DOWN BIT(0) +#define PCIE_RC_RTSTA 0x1a14 #define PCIE_DEBUG_CTRL 0x1a60 #define PCIE_DEBUG_SOFT_RESET BIT(20) +enum { + PCISWCAP = PCI_BRIDGE_CONTROL + 2, + PCISWCAP_EXP_LIST_ID = PCISWCAP + PCI_CAP_LIST_ID, + PCISWCAP_EXP_DEVCAP = PCISWCAP + PCI_EXP_DEVCAP, + PCISWCAP_EXP_DEVCTL = PCISWCAP + PCI_EXP_DEVCTL, + PCISWCAP_EXP_LNKCAP = PCISWCAP + PCI_EXP_LNKCAP, + PCISWCAP_EXP_LNKCTL = PCISWCAP + PCI_EXP_LNKCTL, + PCISWCAP_EXP_SLTCAP = PCISWCAP + PCI_EXP_SLTCAP, + PCISWCAP_EXP_SLTCTL = PCISWCAP + PCI_EXP_SLTCTL, + PCISWCAP_EXP_RTCTL = PCISWCAP + PCI_EXP_RTCTL, + PCISWCAP_EXP_RTSTA = PCISWCAP + PCI_EXP_RTSTA, + PCISWCAP_EXP_DEVCAP2 = PCISWCAP + PCI_EXP_DEVCAP2, + PCISWCAP_EXP_DEVCTL2 = PCISWCAP + PCI_EXP_DEVCTL2, + PCISWCAP_EXP_LNKCAP2 = PCISWCAP + PCI_EXP_LNKCAP2, + PCISWCAP_EXP_LNKCTL2 = PCISWCAP + PCI_EXP_LNKCTL2, + PCISWCAP_EXP_SLTCAP2 = PCISWCAP + PCI_EXP_SLTCAP2, + PCISWCAP_EXP_SLTCTL2 = PCISWCAP + PCI_EXP_SLTCTL2, +}; + /* PCI configuration space of a PCI-to-PCI bridge */ struct mvebu_sw_pci_bridge { u16 vendor; u16 device; u16 command; + u16 status; u16 class; u8 interface; u8 revision; @@ -84,13 +106,15 @@ struct mvebu_sw_pci_bridge { u16 memlimit; u16 iobaseupper; u16 iolimitupper; - u8 cappointer; - u8 reserved1; - u16 reserved2; u32 romaddr; u8 intline; u8 intpin; u16 bridgectrl; + + /* PCI express capability */ + u32 pcie_sltcap; + u16 pcie_devctl; + u16 pcie_rtctl; }; struct mvebu_pcie_port; @@ -119,8 +143,7 @@ struct mvebu_pcie_port { unsigned int io_target; unsigned int io_attr; struct clk *clk; - int reset_gpio; - int reset_active_low; + struct gpio_desc *reset_gpio; char *reset_name; struct mvebu_sw_pci_bridge bridge; struct device_node *dn; @@ -254,15 +277,22 @@ static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port, struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { + void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF; + mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), PCIE_CONF_ADDR_OFF); - *val = mvebu_readl(port, PCIE_CONF_DATA_OFF); - - if (size == 1) - *val = (*val >> (8 * (where & 3))) & 0xff; - else if (size == 2) - *val = (*val >> (8 * (where & 3))) & 0xffff; + switch (size) { + case 1: + *val = readb_relaxed(conf_data + (where & 3)); + break; + case 2: + *val = readw_relaxed(conf_data + (where & 2)); + break; + case 4: + *val = readl_relaxed(conf_data); + break; + } return PCIBIOS_SUCCESSFUL; } @@ -271,22 +301,24 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port, struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { - u32 _val, shift = 8 * (where & 3); + void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF; mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), PCIE_CONF_ADDR_OFF); - _val = mvebu_readl(port, PCIE_CONF_DATA_OFF); - if (size == 4) - _val = val; - else if (size == 2) - _val = (_val & ~(0xffff << shift)) | ((val & 0xffff) << shift); - else if (size == 1) - _val = (_val & ~(0xff << shift)) | ((val & 0xff) << shift); - else + switch (size) { + case 1: + writeb(val, conf_data + (where & 3)); + break; + case 2: + writew(val, conf_data + (where & 2)); + break; + case 4: + writel(val, conf_data); + break; + default: return PCIBIOS_BAD_REGISTER_NUMBER; - - mvebu_writel(port, _val, PCIE_CONF_DATA_OFF); + } return PCIBIOS_SUCCESSFUL; } @@ -443,6 +475,9 @@ static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port) /* We support 32 bits I/O addressing */ bridge->iobase = PCI_IO_RANGE_TYPE_32; bridge->iolimit = PCI_IO_RANGE_TYPE_32; + + /* Add capabilities */ + bridge->status = PCI_STATUS_CAP_LIST; } /* @@ -460,7 +495,7 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, break; case PCI_COMMAND: - *value = bridge->command; + *value = bridge->command | bridge->status << 16; break; case PCI_CLASS_REVISION: @@ -505,6 +540,10 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, *value = (bridge->iolimitupper << 16 | bridge->iobaseupper); break; + case PCI_CAPABILITY_LIST: + *value = PCISWCAP; + break; + case PCI_ROM_ADDRESS1: *value = 0; break; @@ -514,9 +553,67 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, *value = 0; break; + case PCISWCAP_EXP_LIST_ID: + /* Set PCIe v2, root port, slot support */ + *value = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2 | + PCI_EXP_FLAGS_SLOT) << 16 | PCI_CAP_ID_EXP; + break; + + case PCISWCAP_EXP_DEVCAP: + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP); + break; + + case PCISWCAP_EXP_DEVCTL: + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) & + ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE | + PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE); + *value |= bridge->pcie_devctl; + break; + + case PCISWCAP_EXP_LNKCAP: + /* + * PCIe requires the clock power management capability to be + * hard-wired to zero for downstream ports + */ + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) & + ~PCI_EXP_LNKCAP_CLKPM; + break; + + case PCISWCAP_EXP_LNKCTL: + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); + break; + + case PCISWCAP_EXP_SLTCAP: + *value = bridge->pcie_sltcap; + break; + + case PCISWCAP_EXP_SLTCTL: + *value = PCI_EXP_SLTSTA_PDS << 16; + break; + + case PCISWCAP_EXP_RTCTL: + *value = bridge->pcie_rtctl; + break; + + case PCISWCAP_EXP_RTSTA: + *value = mvebu_readl(port, PCIE_RC_RTSTA); + break; + + /* PCIe requires the v2 fields to be hard-wired to zero */ + case PCISWCAP_EXP_DEVCAP2: + case PCISWCAP_EXP_DEVCTL2: + case PCISWCAP_EXP_LNKCAP2: + case PCISWCAP_EXP_LNKCTL2: + case PCISWCAP_EXP_SLTCAP2: + case PCISWCAP_EXP_SLTCTL2: default: - *value = 0xffffffff; - return PCIBIOS_BAD_REGISTER_NUMBER; + /* + * PCI defines configuration read accesses to reserved or + * unimplemented registers to read as zero and complete + * normally. + */ + *value = 0; + return PCIBIOS_SUCCESSFUL; } if (size == 2) @@ -601,6 +698,51 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port, mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus); break; + case PCISWCAP_EXP_DEVCTL: + /* + * Armada370 data says these bits must always + * be zero when in root complex mode. + */ + value &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE | + PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE); + + /* + * If the mask is 0xffff0000, then we only want to write + * the device control register, rather than clearing the + * RW1C bits in the device status register. Mask out the + * status register bits. + */ + if (mask == 0xffff0000) + value &= 0xffff; + + mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); + break; + + case PCISWCAP_EXP_LNKCTL: + /* + * If we don't support CLKREQ, we must ensure that the + * CLKREQ enable bit always reads zero. Since we haven't + * had this capability, and it's dependent on board wiring, + * disable it for the time being. + */ + value &= ~PCI_EXP_LNKCTL_CLKREQ_EN; + + /* + * If the mask is 0xffff0000, then we only want to write + * the link control register, rather than clearing the + * RW1C bits in the link status register. Mask out the + * status register bits. + */ + if (mask == 0xffff0000) + value &= 0xffff; + + mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); + break; + + case PCISWCAP_EXP_RTSTA: + mvebu_writel(port, value, PCIE_RC_RTSTA); + break; + default: break; } @@ -652,17 +794,6 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn, if (!mvebu_pcie_link_up(port)) return PCIBIOS_DEVICE_NOT_FOUND; - /* - * On the secondary bus, we don't want to expose any other - * device than the device physically connected in the PCIe - * slot, visible in slot 0. In slot 1, there's a special - * Marvell device that only makes sense when the Armada is - * used as a PCIe endpoint. - */ - if (bus->number == port->bridge.secondary_bus && - PCI_SLOT(devfn) != 0) - return PCIBIOS_DEVICE_NOT_FOUND; - /* Access the real PCIe interface */ ret = mvebu_pcie_hw_wr_conf(port, bus, devfn, where, size, val); @@ -693,19 +824,6 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, return PCIBIOS_DEVICE_NOT_FOUND; } - /* - * On the secondary bus, we don't want to expose any other - * device than the device physically connected in the PCIe - * slot, visible in slot 0. In slot 1, there's a special - * Marvell device that only makes sense when the Armada is - * used as a PCIe endpoint. - */ - if (bus->number == port->bridge.secondary_bus && - PCI_SLOT(devfn) != 0) { - *val = 0xffffffff; - return PCIBIOS_DEVICE_NOT_FOUND; - } - /* Access the real PCIe interface */ ret = mvebu_pcie_hw_rd_conf(port, bus, devfn, where, size, val); @@ -751,21 +869,6 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) return 1; } -static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys) -{ - struct mvebu_pcie *pcie = sys_to_pcie(sys); - struct pci_bus *bus; - - bus = pci_create_root_bus(&pcie->pdev->dev, sys->busnr, - &mvebu_pcie_ops, sys, &sys->resources); - if (!bus) - return NULL; - - pci_scan_child_bus(bus); - - return bus; -} - static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, const struct resource *res, resource_size_t start, @@ -809,12 +912,11 @@ static void mvebu_pcie_enable(struct mvebu_pcie *pcie) hw.nr_controllers = 1; hw.private_data = (void **)&pcie; hw.setup = mvebu_pcie_setup; - hw.scan = mvebu_pcie_scan_bus; hw.map_irq = of_irq_parse_and_map_pci; hw.ops = &mvebu_pcie_ops; hw.align_resource = mvebu_pcie_align_resource; - pci_common_init(&hw); + pci_common_init_dev(&pcie->pdev->dev, &hw); } /* @@ -895,6 +997,7 @@ static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie) return; pcie->msi = of_pci_find_msi_chip_by_node(msi_node); + of_node_put(msi_node); if (pcie->msi) pcie->msi->dev = &pcie->pdev->dev; @@ -929,12 +1032,167 @@ static int mvebu_pcie_resume(struct device *dev) return 0; } +static void mvebu_pcie_port_clk_put(void *data) +{ + struct mvebu_pcie_port *port = data; + + clk_put(port->clk); +} + +static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, + struct mvebu_pcie_port *port, struct device_node *child) +{ + struct device *dev = &pcie->pdev->dev; + enum of_gpio_flags flags; + int reset_gpio, ret; + + port->pcie = pcie; + + if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) { + dev_warn(dev, "ignoring %s, missing pcie-port property\n", + of_node_full_name(child)); + goto skip; + } + + if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane)) + port->lane = 0; + + port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port, + port->lane); + if (!port->name) { + ret = -ENOMEM; + goto err; + } + + port->devfn = of_pci_get_devfn(child); + if (port->devfn < 0) + goto skip; + + ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM, + &port->mem_target, &port->mem_attr); + if (ret < 0) { + dev_err(dev, "%s: cannot get tgt/attr for mem window\n", + port->name); + goto skip; + } + + if (resource_size(&pcie->io) != 0) { + mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO, + &port->io_target, &port->io_attr); + } else { + port->io_target = -1; + port->io_attr = -1; + } + + reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags); + if (reset_gpio == -EPROBE_DEFER) { + ret = reset_gpio; + goto err; + } + + if (gpio_is_valid(reset_gpio)) { + unsigned long gpio_flags; + + port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset", + port->name); + if (!port->reset_name) { + ret = -ENOMEM; + goto err; + } + + if (flags & OF_GPIO_ACTIVE_LOW) { + dev_info(dev, "%s: reset gpio is active low\n", + of_node_full_name(child)); + gpio_flags = GPIOF_ACTIVE_LOW | + GPIOF_OUT_INIT_LOW; + } else { + gpio_flags = GPIOF_OUT_INIT_HIGH; + } + + ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags, + port->reset_name); + if (ret) { + if (ret == -EPROBE_DEFER) + goto err; + goto skip; + } + + port->reset_gpio = gpio_to_desc(reset_gpio); + } + + port->clk = of_clk_get_by_name(child, NULL); + if (IS_ERR(port->clk)) { + dev_err(dev, "%s: cannot get clock\n", port->name); + goto skip; + } + + ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port); + if (ret < 0) { + clk_put(port->clk); + goto err; + } + + return 1; + +skip: + ret = 0; + + /* In the case of skipping, we need to free these */ + devm_kfree(dev, port->reset_name); + port->reset_name = NULL; + devm_kfree(dev, port->name); + port->name = NULL; + +err: + return ret; +} + +/* + * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs + * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications + * of the PCI Express Card Electromechanical Specification, 1.1. + */ +static int mvebu_pcie_powerup(struct mvebu_pcie_port *port) +{ + int ret; + + ret = clk_prepare_enable(port->clk); + if (ret < 0) + return ret; + + if (port->reset_gpio) { + u32 reset_udelay = 20000; + + of_property_read_u32(port->dn, "reset-delay-us", + &reset_udelay); + + udelay(100); + + gpiod_set_value_cansleep(port->reset_gpio, 0); + msleep(reset_udelay / 1000); + } + + return 0; +} + +/* + * Power down a PCIe port. Strictly, PCIe requires us to place the card + * in D3hot state before asserting PERST#. + */ +static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port) +{ + if (port->reset_gpio) + gpiod_set_value_cansleep(port->reset_gpio, 1); + + clk_disable_unprepare(port->clk); +} + static int mvebu_pcie_probe(struct platform_device *pdev) { struct mvebu_pcie *pcie; struct device_node *np = pdev->dev.of_node; struct device_node *child; - int i, ret; + int num, i, ret; pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie), GFP_KERNEL); @@ -970,112 +1228,52 @@ static int mvebu_pcie_probe(struct platform_device *pdev) return ret; } - i = 0; - for_each_child_of_node(pdev->dev.of_node, child) { - if (!of_device_is_available(child)) - continue; - i++; - } + num = of_get_available_child_count(pdev->dev.of_node); - pcie->ports = devm_kzalloc(&pdev->dev, i * - sizeof(struct mvebu_pcie_port), + pcie->ports = devm_kcalloc(&pdev->dev, num, sizeof(*pcie->ports), GFP_KERNEL); if (!pcie->ports) return -ENOMEM; i = 0; - for_each_child_of_node(pdev->dev.of_node, child) { + for_each_available_child_of_node(pdev->dev.of_node, child) { struct mvebu_pcie_port *port = &pcie->ports[i]; - enum of_gpio_flags flags; - - if (!of_device_is_available(child)) - continue; - - port->pcie = pcie; - - if (of_property_read_u32(child, "marvell,pcie-port", - &port->port)) { - dev_warn(&pdev->dev, - "ignoring PCIe DT node, missing pcie-port property\n"); - continue; - } - - if (of_property_read_u32(child, "marvell,pcie-lane", - &port->lane)) - port->lane = 0; - port->name = kasprintf(GFP_KERNEL, "pcie%d.%d", - port->port, port->lane); - - port->devfn = of_pci_get_devfn(child); - if (port->devfn < 0) - continue; - - ret = mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_MEM, - &port->mem_target, &port->mem_attr); + ret = mvebu_pcie_parse_port(pcie, port, child); if (ret < 0) { - dev_err(&pdev->dev, "PCIe%d.%d: cannot get tgt/attr for mem window\n", - port->port, port->lane); + of_node_put(child); + return ret; + } else if (ret == 0) { continue; } - if (resource_size(&pcie->io) != 0) - mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_IO, - &port->io_target, &port->io_attr); - else { - port->io_target = -1; - port->io_attr = -1; - } + port->dn = child; + i++; + } + pcie->nports = i; - port->reset_gpio = of_get_named_gpio_flags(child, - "reset-gpios", 0, &flags); - if (gpio_is_valid(port->reset_gpio)) { - u32 reset_udelay = 20000; - - port->reset_active_low = flags & OF_GPIO_ACTIVE_LOW; - port->reset_name = kasprintf(GFP_KERNEL, - "pcie%d.%d-reset", port->port, port->lane); - of_property_read_u32(child, "reset-delay-us", - &reset_udelay); - - ret = devm_gpio_request_one(&pdev->dev, - port->reset_gpio, GPIOF_DIR_OUT, port->reset_name); - if (ret) { - if (ret == -EPROBE_DEFER) - return ret; - continue; - } - - gpio_set_value(port->reset_gpio, - (port->reset_active_low) ? 1 : 0); - msleep(reset_udelay/1000); - } + for (i = 0; i < pcie->nports; i++) { + struct mvebu_pcie_port *port = &pcie->ports[i]; - port->clk = of_clk_get_by_name(child, NULL); - if (IS_ERR(port->clk)) { - dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n", - port->port, port->lane); + child = port->dn; + if (!child) continue; - } - ret = clk_prepare_enable(port->clk); - if (ret) + ret = mvebu_pcie_powerup(port); + if (ret < 0) continue; port->base = mvebu_pcie_map_registers(pdev, child, port); if (IS_ERR(port->base)) { - dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n", - port->port, port->lane); + dev_err(&pdev->dev, "%s: cannot map registers\n", + port->name); port->base = NULL; - clk_disable_unprepare(port->clk); + mvebu_pcie_powerdown(port); continue; } mvebu_pcie_set_local_dev_nr(port, 1); - - port->dn = child; mvebu_sw_pci_bridge_init(port); - i++; } pcie->nports = i; diff --git a/kernel/drivers/pci/host/pci-rcar-gen2.c b/kernel/drivers/pci/host/pci-rcar-gen2.c index 367e28fa7..c4f64bfee 100644 --- a/kernel/drivers/pci/host/pci-rcar-gen2.c +++ b/kernel/drivers/pci/host/pci-rcar-gen2.c @@ -362,6 +362,7 @@ static int rcar_pci_probe(struct platform_device *pdev) static struct of_device_id rcar_pci_of_match[] = { { .compatible = "renesas,pci-r8a7790", }, { .compatible = "renesas,pci-r8a7791", }, + { .compatible = "renesas,pci-r8a7794", }, { }, }; diff --git a/kernel/drivers/pci/host/pci-tegra.c b/kernel/drivers/pci/host/pci-tegra.c index 00e92720d..30323114c 100644 --- a/kernel/drivers/pci/host/pci-tegra.c +++ b/kernel/drivers/pci/host/pci-tegra.c @@ -382,8 +382,8 @@ static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where) static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie, unsigned int busnr) { - pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN | - L_PTE_MT_DEV_SHARED | L_PTE_SHARED; + pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED); phys_addr_t cs = pcie->cs->start; struct tegra_pcie_bus *bus; unsigned int i; @@ -630,21 +630,6 @@ static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin) return irq; } -static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys) -{ - struct tegra_pcie *pcie = sys_to_pcie(sys); - struct pci_bus *bus; - - bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys, - &sys->resources); - if (!bus) - return NULL; - - pci_scan_child_bus(bus); - - return bus; -} - static irqreturn_t tegra_pcie_isr(int irq, void *arg) { const char *err_msg[] = { @@ -1263,7 +1248,6 @@ static int tegra_msi_map(struct irq_domain *domain, unsigned int irq, { irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq); irq_set_chip_data(irq, domain->host_data); - set_irq_flags(irq, IRQF_VALID); tegra_cpuidle_pcie_irqs_in_use(); @@ -1304,7 +1288,7 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) msi->irq = err; - err = request_irq(msi->irq, tegra_pcie_msi_irq, 0, + err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD, tegra_msi_irq_chip.name, pcie); if (err < 0) { dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); @@ -1831,7 +1815,6 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie) hw.private_data = (void **)&pcie; hw.setup = tegra_pcie_setup; hw.map_irq = tegra_pcie_map_irq; - hw.scan = tegra_pcie_scan_bus; hw.ops = &tegra_pcie_ops; pci_common_init_dev(pcie->dev, &hw); diff --git a/kernel/drivers/pci/host/pci-xgene-msi.c b/kernel/drivers/pci/host/pci-xgene-msi.c new file mode 100644 index 000000000..a6456b578 --- /dev/null +++ b/kernel/drivers/pci/host/pci-xgene-msi.c @@ -0,0 +1,583 @@ +/* + * APM X-Gene MSI Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Author: Tanmay Inamdar <tinamdar@apm.com> + * Duc Dang <dhdang@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/cpu.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/of_pci.h> + +#define MSI_IR0 0x000000 +#define MSI_INT0 0x800000 +#define IDX_PER_GROUP 8 +#define IRQS_PER_IDX 16 +#define NR_HW_IRQS 16 +#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS) + +struct xgene_msi_group { + struct xgene_msi *msi; + int gic_irq; + u32 msi_grp; +}; + +struct xgene_msi { + struct device_node *node; + struct irq_domain *inner_domain; + struct irq_domain *msi_domain; + u64 msi_addr; + void __iomem *msi_regs; + unsigned long *bitmap; + struct mutex bitmap_lock; + struct xgene_msi_group *msi_groups; + int num_cpus; +}; + +/* Global data */ +static struct xgene_msi xgene_msi_ctrl; + +static struct irq_chip xgene_msi_top_irq_chip = { + .name = "X-Gene1 MSI", + .irq_enable = pci_msi_unmask_irq, + .irq_disable = pci_msi_mask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, +}; + +static struct msi_domain_info xgene_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX), + .chip = &xgene_msi_top_irq_chip, +}; + +/* + * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where + * n is group number (0..F), x is index of registers in each group (0..7) + * The register layout is as follows: + * MSI0IR0 base_addr + * MSI0IR1 base_addr + 0x10000 + * ... ... + * MSI0IR6 base_addr + 0x60000 + * MSI0IR7 base_addr + 0x70000 + * MSI1IR0 base_addr + 0x80000 + * MSI1IR1 base_addr + 0x90000 + * ... ... + * MSI1IR7 base_addr + 0xF0000 + * MSI2IR0 base_addr + 0x100000 + * ... ... + * MSIFIR0 base_addr + 0x780000 + * MSIFIR1 base_addr + 0x790000 + * ... ... + * MSIFIR7 base_addr + 0x7F0000 + * MSIINT0 base_addr + 0x800000 + * MSIINT1 base_addr + 0x810000 + * ... ... + * MSIINTF base_addr + 0x8F0000 + * + * Each index register supports 16 MSI vectors (0..15) to generate interrupt. + * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination + * registers. + * + * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate + * the MSI pending status caused by 1 of its 8 index registers. + */ + +/* MSInIRx read helper */ +static u32 xgene_msi_ir_read(struct xgene_msi *msi, + u32 msi_grp, u32 msir_idx) +{ + return readl_relaxed(msi->msi_regs + MSI_IR0 + + (msi_grp << 19) + (msir_idx << 16)); +} + +/* MSIINTn read helper */ +static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp) +{ + return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16)); +} + +/* + * With 2048 MSI vectors supported, the MSI message can be constructed using + * following scheme: + * - Divide into 8 256-vector groups + * Group 0: 0-255 + * Group 1: 256-511 + * Group 2: 512-767 + * ... + * Group 7: 1792-2047 + * - Each 256-vector group is divided into 16 16-vector groups + * As an example: 16 16-vector groups for 256-vector group 0-255 is + * Group 0: 0-15 + * Group 1: 16-32 + * ... + * Group 15: 240-255 + * - The termination address of MSI vector in 256-vector group n and 16-vector + * group x is the address of MSIxIRn + * - The data for MSI vector in 16-vector group x is x + */ +static u32 hwirq_to_reg_set(unsigned long hwirq) +{ + return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX)); +} + +static u32 hwirq_to_group(unsigned long hwirq) +{ + return (hwirq % NR_HW_IRQS); +} + +static u32 hwirq_to_msi_data(unsigned long hwirq) +{ + return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX); +} + +static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct xgene_msi *msi = irq_data_get_irq_chip_data(data); + u32 reg_set = hwirq_to_reg_set(data->hwirq); + u32 group = hwirq_to_group(data->hwirq); + u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16); + + msg->address_hi = upper_32_bits(target_addr); + msg->address_lo = lower_32_bits(target_addr); + msg->data = hwirq_to_msi_data(data->hwirq); +} + +/* + * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain + * the expected behaviour of .set_affinity for each MSI interrupt, the 16 + * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs + * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another + * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a + * consequence, the total MSI vectors that X-Gene v1 supports will be + * reduced to 256 (2048/8) vectors. + */ +static int hwirq_to_cpu(unsigned long hwirq) +{ + return (hwirq % xgene_msi_ctrl.num_cpus); +} + +static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq) +{ + return (hwirq - hwirq_to_cpu(hwirq)); +} + +static int xgene_msi_set_affinity(struct irq_data *irqdata, + const struct cpumask *mask, bool force) +{ + int target_cpu = cpumask_first(mask); + int curr_cpu; + + curr_cpu = hwirq_to_cpu(irqdata->hwirq); + if (curr_cpu == target_cpu) + return IRQ_SET_MASK_OK_DONE; + + /* Update MSI number to target the new CPU */ + irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu; + + return IRQ_SET_MASK_OK; +} + +static struct irq_chip xgene_msi_bottom_irq_chip = { + .name = "MSI", + .irq_set_affinity = xgene_msi_set_affinity, + .irq_compose_msi_msg = xgene_compose_msi_msg, +}; + +static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct xgene_msi *msi = domain->host_data; + int msi_irq; + + mutex_lock(&msi->bitmap_lock); + + msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0, + msi->num_cpus, 0); + if (msi_irq < NR_MSI_VEC) + bitmap_set(msi->bitmap, msi_irq, msi->num_cpus); + else + msi_irq = -ENOSPC; + + mutex_unlock(&msi->bitmap_lock); + + if (msi_irq < 0) + return msi_irq; + + irq_domain_set_info(domain, virq, msi_irq, + &xgene_msi_bottom_irq_chip, domain->host_data, + handle_simple_irq, NULL, NULL); + + return 0; +} + +static void xgene_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct xgene_msi *msi = irq_data_get_irq_chip_data(d); + u32 hwirq; + + mutex_lock(&msi->bitmap_lock); + + hwirq = hwirq_to_canonical_hwirq(d->hwirq); + bitmap_clear(msi->bitmap, hwirq, msi->num_cpus); + + mutex_unlock(&msi->bitmap_lock); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops msi_domain_ops = { + .alloc = xgene_irq_domain_alloc, + .free = xgene_irq_domain_free, +}; + +static int xgene_allocate_domains(struct xgene_msi *msi) +{ + msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC, + &msi_domain_ops, msi); + if (!msi->inner_domain) + return -ENOMEM; + + msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node), + &xgene_msi_domain_info, + msi->inner_domain); + + if (!msi->msi_domain) { + irq_domain_remove(msi->inner_domain); + return -ENOMEM; + } + + return 0; +} + +static void xgene_free_domains(struct xgene_msi *msi) +{ + if (msi->msi_domain) + irq_domain_remove(msi->msi_domain); + if (msi->inner_domain) + irq_domain_remove(msi->inner_domain); +} + +static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) +{ + int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long); + + xgene_msi->bitmap = kzalloc(size, GFP_KERNEL); + if (!xgene_msi->bitmap) + return -ENOMEM; + + mutex_init(&xgene_msi->bitmap_lock); + + xgene_msi->msi_groups = kcalloc(NR_HW_IRQS, + sizeof(struct xgene_msi_group), + GFP_KERNEL); + if (!xgene_msi->msi_groups) + return -ENOMEM; + + return 0; +} + +static void xgene_msi_isr(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct xgene_msi_group *msi_groups; + struct xgene_msi *xgene_msi; + unsigned int virq; + int msir_index, msir_val, hw_irq; + u32 intr_index, grp_select, msi_grp; + + chained_irq_enter(chip, desc); + + msi_groups = irq_desc_get_handler_data(desc); + xgene_msi = msi_groups->msi; + msi_grp = msi_groups->msi_grp; + + /* + * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt + * If bit x of this register is set (x is 0..7), one or more interupts + * corresponding to MSInIRx is set. + */ + grp_select = xgene_msi_int_read(xgene_msi, msi_grp); + while (grp_select) { + msir_index = ffs(grp_select) - 1; + /* + * Calculate MSInIRx address to read to check for interrupts + * (refer to termination address and data assignment + * described in xgene_compose_msi_msg() ) + */ + msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index); + while (msir_val) { + intr_index = ffs(msir_val) - 1; + /* + * Calculate MSI vector number (refer to the termination + * address and data assignment described in + * xgene_compose_msi_msg function) + */ + hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) * + NR_HW_IRQS) + msi_grp; + /* + * As we have multiple hw_irq that maps to single MSI, + * always look up the virq using the hw_irq as seen from + * CPU0 + */ + hw_irq = hwirq_to_canonical_hwirq(hw_irq); + virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq); + WARN_ON(!virq); + if (virq != 0) + generic_handle_irq(virq); + msir_val &= ~(1 << intr_index); + } + grp_select &= ~(1 << msir_index); + + if (!grp_select) { + /* + * We handled all interrupts happened in this group, + * resample this group MSI_INTx register in case + * something else has been made pending in the meantime + */ + grp_select = xgene_msi_int_read(xgene_msi, msi_grp); + } + } + + chained_irq_exit(chip, desc); +} + +static int xgene_msi_remove(struct platform_device *pdev) +{ + int virq, i; + struct xgene_msi *msi = platform_get_drvdata(pdev); + + for (i = 0; i < NR_HW_IRQS; i++) { + virq = msi->msi_groups[i].gic_irq; + if (virq != 0) + irq_set_chained_handler_and_data(virq, NULL, NULL); + } + kfree(msi->msi_groups); + + kfree(msi->bitmap); + msi->bitmap = NULL; + + xgene_free_domains(msi); + + return 0; +} + +static int xgene_msi_hwirq_alloc(unsigned int cpu) +{ + struct xgene_msi *msi = &xgene_msi_ctrl; + struct xgene_msi_group *msi_group; + cpumask_var_t mask; + int i; + int err; + + for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { + msi_group = &msi->msi_groups[i]; + if (!msi_group->gic_irq) + continue; + + irq_set_chained_handler(msi_group->gic_irq, + xgene_msi_isr); + err = irq_set_handler_data(msi_group->gic_irq, msi_group); + if (err) { + pr_err("failed to register GIC IRQ handler\n"); + return -EINVAL; + } + /* + * Statically allocate MSI GIC IRQs to each CPU core. + * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated + * to each core. + */ + if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + cpumask_clear(mask); + cpumask_set_cpu(cpu, mask); + err = irq_set_affinity(msi_group->gic_irq, mask); + if (err) + pr_err("failed to set affinity for GIC IRQ"); + free_cpumask_var(mask); + } else { + pr_err("failed to alloc CPU mask for affinity\n"); + err = -EINVAL; + } + + if (err) { + irq_set_chained_handler_and_data(msi_group->gic_irq, + NULL, NULL); + return err; + } + } + + return 0; +} + +static void xgene_msi_hwirq_free(unsigned int cpu) +{ + struct xgene_msi *msi = &xgene_msi_ctrl; + struct xgene_msi_group *msi_group; + int i; + + for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { + msi_group = &msi->msi_groups[i]; + if (!msi_group->gic_irq) + continue; + + irq_set_chained_handler_and_data(msi_group->gic_irq, NULL, + NULL); + } +} + +static int xgene_msi_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned cpu = (unsigned long)hcpu; + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + xgene_msi_hwirq_alloc(cpu); + break; + case CPU_DEAD: + case CPU_DEAD_FROZEN: + xgene_msi_hwirq_free(cpu); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block xgene_msi_cpu_notifier = { + .notifier_call = xgene_msi_cpu_callback, +}; + +static const struct of_device_id xgene_msi_match_table[] = { + {.compatible = "apm,xgene1-msi"}, + {}, +}; + +static int xgene_msi_probe(struct platform_device *pdev) +{ + struct resource *res; + int rc, irq_index; + struct xgene_msi *xgene_msi; + unsigned int cpu; + int virt_msir; + u32 msi_val, msi_idx; + + xgene_msi = &xgene_msi_ctrl; + + platform_set_drvdata(pdev, xgene_msi); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(xgene_msi->msi_regs)) { + dev_err(&pdev->dev, "no reg space\n"); + rc = -EINVAL; + goto error; + } + xgene_msi->msi_addr = res->start; + xgene_msi->node = pdev->dev.of_node; + xgene_msi->num_cpus = num_possible_cpus(); + + rc = xgene_msi_init_allocator(xgene_msi); + if (rc) { + dev_err(&pdev->dev, "Error allocating MSI bitmap\n"); + goto error; + } + + rc = xgene_allocate_domains(xgene_msi); + if (rc) { + dev_err(&pdev->dev, "Failed to allocate MSI domain\n"); + goto error; + } + + for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { + virt_msir = platform_get_irq(pdev, irq_index); + if (virt_msir < 0) { + dev_err(&pdev->dev, "Cannot translate IRQ index %d\n", + irq_index); + rc = -EINVAL; + goto error; + } + xgene_msi->msi_groups[irq_index].gic_irq = virt_msir; + xgene_msi->msi_groups[irq_index].msi_grp = irq_index; + xgene_msi->msi_groups[irq_index].msi = xgene_msi; + } + + /* + * MSInIRx registers are read-to-clear; before registering + * interrupt handlers, read all of them to clear spurious + * interrupts that may occur before the driver is probed. + */ + for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { + for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) + msi_val = xgene_msi_ir_read(xgene_msi, irq_index, + msi_idx); + /* Read MSIINTn to confirm */ + msi_val = xgene_msi_int_read(xgene_msi, irq_index); + if (msi_val) { + dev_err(&pdev->dev, "Failed to clear spurious IRQ\n"); + rc = -EINVAL; + goto error; + } + } + + cpu_notifier_register_begin(); + + for_each_online_cpu(cpu) + if (xgene_msi_hwirq_alloc(cpu)) { + dev_err(&pdev->dev, "failed to register MSI handlers\n"); + cpu_notifier_register_done(); + goto error; + } + + rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier); + if (rc) { + dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); + cpu_notifier_register_done(); + goto error; + } + + cpu_notifier_register_done(); + + dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); + + return 0; + +error: + xgene_msi_remove(pdev); + return rc; +} + +static struct platform_driver xgene_msi_driver = { + .driver = { + .name = "xgene-msi", + .of_match_table = xgene_msi_match_table, + }, + .probe = xgene_msi_probe, + .remove = xgene_msi_remove, +}; + +static int __init xgene_pcie_msi_init(void) +{ + return platform_driver_register(&xgene_msi_driver); +} +subsys_initcall(xgene_pcie_msi_init); diff --git a/kernel/drivers/pci/host/pci-xgene.c b/kernel/drivers/pci/host/pci-xgene.c index ee082c036..ae00ce22d 100644 --- a/kernel/drivers/pci/host/pci-xgene.c +++ b/kernel/drivers/pci/host/pci-xgene.c @@ -59,6 +59,12 @@ #define SZ_1T (SZ_1G*1024ULL) #define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe) +#define ROOT_CAP_AND_CTRL 0x5C + +/* PCIe IP version */ +#define XGENE_PCIE_IP_VER_UNKN 0 +#define XGENE_PCIE_IP_VER_1 1 + struct xgene_pcie_port { struct device_node *node; struct device *dev; @@ -67,6 +73,7 @@ struct xgene_pcie_port { void __iomem *cfg_base; unsigned long cfg_addr; bool link_up; + u32 version; }; static inline u32 pcie_bar_low_val(u32 addr, u32 flags) @@ -130,9 +137,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int offset) { - struct xgene_pcie_port *port = bus->sysdata; - - if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up || + if ((pci_is_root_bus(bus) && devfn != 0) || xgene_pcie_hide_rc_bars(bus, offset)) return NULL; @@ -140,9 +145,37 @@ static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, return xgene_pcie_get_cfg_base(bus) + offset; } +static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct xgene_pcie_port *port = bus->sysdata; + + if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) != + PCIBIOS_SUCCESSFUL) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* + * The v1 controller has a bug in its Configuration Request + * Retry Status (CRS) logic: when CRS is enabled and we read the + * Vendor and Device ID of a non-existent device, the controller + * fabricates return data of 0xFFFF0001 ("device exists but is not + * ready") instead of 0xFFFFFFFF ("device does not exist"). This + * causes the PCI core to retry the read until it times out. + * Avoid this by not claiming to support CRS. + */ + if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) && + ((where & ~0x3) == ROOT_CAP_AND_CTRL)) + *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); + + if (size <= 2) + *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); + + return PCIBIOS_SUCCESSFUL; +} + static struct pci_ops xgene_pcie_ops = { .map_bus = xgene_pcie_map_bus, - .read = pci_generic_config_read32, + .read = xgene_pcie_config_read32, .write = pci_generic_config_write32, }; @@ -288,8 +321,16 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port, return ret; break; case IORESOURCE_MEM: - xgene_pcie_setup_ob_reg(port, res, OMR1BARL, res->start, - res->start - window->offset); + if (res->flags & IORESOURCE_PREFETCH) + xgene_pcie_setup_ob_reg(port, res, OMR2BARL, + res->start, + res->start - + window->offset); + else + xgene_pcie_setup_ob_reg(port, res, OMR1BARL, + res->start, + res->start - + window->offset); break; case IORESOURCE_BUS: break; @@ -483,6 +524,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) port->node = of_node_get(pdev->dev.of_node); port->dev = &pdev->dev; + port->version = XGENE_PCIE_IP_VER_UNKN; + if (of_device_is_compatible(port->node, "apm,xgene-pcie")) + port->version = XGENE_PCIE_IP_VER_1; + ret = xgene_pcie_map_reg(port, pdev); if (ret) return ret; diff --git a/kernel/drivers/pci/host/pcie-altera-msi.c b/kernel/drivers/pci/host/pcie-altera-msi.c new file mode 100644 index 000000000..99177f4cc --- /dev/null +++ b/kernel/drivers/pci/host/pcie-altera-msi.c @@ -0,0 +1,314 @@ +/* + * Copyright Altera Corporation (C) 2013-2015. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/interrupt.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_pci.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define MSI_STATUS 0x0 +#define MSI_ERROR 0x4 +#define MSI_INTMASK 0x8 + +#define MAX_MSI_VECTORS 32 + +struct altera_msi { + DECLARE_BITMAP(used, MAX_MSI_VECTORS); + struct mutex lock; /* protect "used" bitmap */ + struct platform_device *pdev; + struct irq_domain *msi_domain; + struct irq_domain *inner_domain; + void __iomem *csr_base; + void __iomem *vector_base; + phys_addr_t vector_phy; + u32 num_of_vectors; + int irq; +}; + +static inline void msi_writel(struct altera_msi *msi, const u32 value, + const u32 reg) +{ + writel_relaxed(value, msi->csr_base + reg); +} + +static inline u32 msi_readl(struct altera_msi *msi, const u32 reg) +{ + return readl_relaxed(msi->csr_base + reg); +} + +static void altera_msi_isr(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct altera_msi *msi; + unsigned long status; + u32 num_of_vectors; + u32 bit; + u32 virq; + + chained_irq_enter(chip, desc); + msi = irq_desc_get_handler_data(desc); + num_of_vectors = msi->num_of_vectors; + + while ((status = msi_readl(msi, MSI_STATUS)) != 0) { + for_each_set_bit(bit, &status, msi->num_of_vectors) { + /* Dummy read from vector to clear the interrupt */ + readl_relaxed(msi->vector_base + (bit * sizeof(u32))); + + virq = irq_find_mapping(msi->inner_domain, bit); + if (virq) + generic_handle_irq(virq); + else + dev_err(&msi->pdev->dev, "unexpected MSI\n"); + } + } + + chained_irq_exit(chip, desc); +} + +static struct irq_chip altera_msi_irq_chip = { + .name = "Altera PCIe MSI", + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, +}; + +static struct msi_domain_info altera_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX), + .chip = &altera_msi_irq_chip, +}; + +static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct altera_msi *msi = irq_data_get_irq_chip_data(data); + phys_addr_t addr = msi->vector_phy + (data->hwirq * sizeof(u32)); + + msg->address_lo = lower_32_bits(addr); + msg->address_hi = upper_32_bits(addr); + msg->data = data->hwirq; + + dev_dbg(&msi->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n", + (int)data->hwirq, msg->address_hi, msg->address_lo); +} + +static int altera_msi_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) +{ + return -EINVAL; +} + +static struct irq_chip altera_msi_bottom_irq_chip = { + .name = "Altera MSI", + .irq_compose_msi_msg = altera_compose_msi_msg, + .irq_set_affinity = altera_msi_set_affinity, +}; + +static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct altera_msi *msi = domain->host_data; + unsigned long bit; + u32 mask; + + WARN_ON(nr_irqs != 1); + mutex_lock(&msi->lock); + + bit = find_first_zero_bit(msi->used, msi->num_of_vectors); + if (bit >= msi->num_of_vectors) { + mutex_unlock(&msi->lock); + return -ENOSPC; + } + + set_bit(bit, msi->used); + + mutex_unlock(&msi->lock); + + irq_domain_set_info(domain, virq, bit, &altera_msi_bottom_irq_chip, + domain->host_data, handle_simple_irq, + NULL, NULL); + + mask = msi_readl(msi, MSI_INTMASK); + mask |= 1 << bit; + msi_writel(msi, mask, MSI_INTMASK); + + return 0; +} + +static void altera_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct altera_msi *msi = irq_data_get_irq_chip_data(d); + u32 mask; + + mutex_lock(&msi->lock); + + if (!test_bit(d->hwirq, msi->used)) { + dev_err(&msi->pdev->dev, "trying to free unused MSI#%lu\n", + d->hwirq); + } else { + __clear_bit(d->hwirq, msi->used); + mask = msi_readl(msi, MSI_INTMASK); + mask &= ~(1 << d->hwirq); + msi_writel(msi, mask, MSI_INTMASK); + } + + mutex_unlock(&msi->lock); +} + +static const struct irq_domain_ops msi_domain_ops = { + .alloc = altera_irq_domain_alloc, + .free = altera_irq_domain_free, +}; + +static int altera_allocate_domains(struct altera_msi *msi) +{ + struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node); + + msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors, + &msi_domain_ops, msi); + if (!msi->inner_domain) { + dev_err(&msi->pdev->dev, "failed to create IRQ domain\n"); + return -ENOMEM; + } + + msi->msi_domain = pci_msi_create_irq_domain(fwnode, + &altera_msi_domain_info, msi->inner_domain); + if (!msi->msi_domain) { + dev_err(&msi->pdev->dev, "failed to create MSI domain\n"); + irq_domain_remove(msi->inner_domain); + return -ENOMEM; + } + + return 0; +} + +static void altera_free_domains(struct altera_msi *msi) +{ + irq_domain_remove(msi->msi_domain); + irq_domain_remove(msi->inner_domain); +} + +static int altera_msi_remove(struct platform_device *pdev) +{ + struct altera_msi *msi = platform_get_drvdata(pdev); + + msi_writel(msi, 0, MSI_INTMASK); + irq_set_chained_handler(msi->irq, NULL); + irq_set_handler_data(msi->irq, NULL); + + altera_free_domains(msi); + + platform_set_drvdata(pdev, NULL); + return 0; +} + +static int altera_msi_probe(struct platform_device *pdev) +{ + struct altera_msi *msi; + struct device_node *np = pdev->dev.of_node; + struct resource *res; + int ret; + + msi = devm_kzalloc(&pdev->dev, sizeof(struct altera_msi), + GFP_KERNEL); + if (!msi) + return -ENOMEM; + + mutex_init(&msi->lock); + msi->pdev = pdev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); + if (!res) { + dev_err(&pdev->dev, "no csr memory resource defined\n"); + return -ENODEV; + } + + msi->csr_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(msi->csr_base)) { + dev_err(&pdev->dev, "failed to map csr memory\n"); + return PTR_ERR(msi->csr_base); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "vector_slave"); + if (!res) { + dev_err(&pdev->dev, "no vector_slave memory resource defined\n"); + return -ENODEV; + } + + msi->vector_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(msi->vector_base)) { + dev_err(&pdev->dev, "failed to map vector_slave memory\n"); + return PTR_ERR(msi->vector_base); + } + + msi->vector_phy = res->start; + + if (of_property_read_u32(np, "num-vectors", &msi->num_of_vectors)) { + dev_err(&pdev->dev, "failed to parse the number of vectors\n"); + return -EINVAL; + } + + ret = altera_allocate_domains(msi); + if (ret) + return ret; + + msi->irq = platform_get_irq(pdev, 0); + if (msi->irq <= 0) { + dev_err(&pdev->dev, "failed to map IRQ: %d\n", msi->irq); + ret = -ENODEV; + goto err; + } + + irq_set_chained_handler_and_data(msi->irq, altera_msi_isr, msi); + platform_set_drvdata(pdev, msi); + + return 0; + +err: + altera_msi_remove(pdev); + return ret; +} + +static const struct of_device_id altera_msi_of_match[] = { + { .compatible = "altr,msi-1.0", NULL }, + { }, +}; + +static struct platform_driver altera_msi_driver = { + .driver = { + .name = "altera-msi", + .of_match_table = altera_msi_of_match, + }, + .probe = altera_msi_probe, + .remove = altera_msi_remove, +}; + +static int __init altera_msi_init(void) +{ + return platform_driver_register(&altera_msi_driver); +} +subsys_initcall(altera_msi_init); + +MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>"); +MODULE_DESCRIPTION("Altera PCIe MSI support"); +MODULE_LICENSE("GPL v2"); diff --git a/kernel/drivers/pci/host/pcie-altera.c b/kernel/drivers/pci/host/pcie-altera.c new file mode 100644 index 000000000..99da549d5 --- /dev/null +++ b/kernel/drivers/pci/host/pcie-altera.c @@ -0,0 +1,588 @@ +/* + * Copyright Altera Corporation (C) 2013-2015. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_pci.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define RP_TX_REG0 0x2000 +#define RP_TX_REG1 0x2004 +#define RP_TX_CNTRL 0x2008 +#define RP_TX_EOP 0x2 +#define RP_TX_SOP 0x1 +#define RP_RXCPL_STATUS 0x2010 +#define RP_RXCPL_EOP 0x2 +#define RP_RXCPL_SOP 0x1 +#define RP_RXCPL_REG0 0x2014 +#define RP_RXCPL_REG1 0x2018 +#define P2A_INT_STATUS 0x3060 +#define P2A_INT_STS_ALL 0xf +#define P2A_INT_ENABLE 0x3070 +#define P2A_INT_ENA_ALL 0xf +#define RP_LTSSM 0x3c64 +#define LTSSM_L0 0xf + +/* TLP configuration type 0 and 1 */ +#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ +#define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */ +#define TLP_FMTTYPE_CFGRD1 0x05 /* Configuration Read Type 1 */ +#define TLP_FMTTYPE_CFGWR1 0x45 /* Configuration Write Type 1 */ +#define TLP_PAYLOAD_SIZE 0x01 +#define TLP_READ_TAG 0x1d +#define TLP_WRITE_TAG 0x10 +#define TLP_CFG_DW0(fmttype) (((fmttype) << 24) | TLP_PAYLOAD_SIZE) +#define TLP_CFG_DW1(reqid, tag, be) (((reqid) << 16) | (tag << 8) | (be)) +#define TLP_CFG_DW2(bus, devfn, offset) \ + (((bus) << 24) | ((devfn) << 16) | (offset)) +#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) +#define TLP_COMP_STATUS(s) (((s) >> 12) & 7) +#define TLP_HDR_SIZE 3 +#define TLP_LOOP 500 +#define RP_DEVFN 0 + +#define INTX_NUM 4 + +#define DWORD_MASK 3 + +struct altera_pcie { + struct platform_device *pdev; + void __iomem *cra_base; + int irq; + u8 root_bus_nr; + struct irq_domain *irq_domain; + struct resource bus_range; + struct list_head resources; +}; + +struct tlp_rp_regpair_t { + u32 ctrl; + u32 reg0; + u32 reg1; +}; + +static void altera_pcie_retrain(struct pci_dev *dev) +{ + u16 linkcap, linkstat; + + /* + * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but + * current speed is 2.5 GB/s. + */ + pcie_capability_read_word(dev, PCI_EXP_LNKCAP, &linkcap); + + if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB) + return; + + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat); + if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) + pcie_capability_set_word(dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_RL); +} +DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain); + +/* + * Altera PCIe port uses BAR0 of RC's configuration space as the translation + * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space + * using these registers, so it can be reached by DMA from EP devices. + * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt + * from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge + * should be hidden during enumeration to avoid the sizing and resource + * allocation by PCIe core. + */ +static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn, + int offset) +{ + if (pci_is_root_bus(bus) && (devfn == 0) && + (offset == PCI_BASE_ADDRESS_0)) + return true; + + return false; +} + +static inline void cra_writel(struct altera_pcie *pcie, const u32 value, + const u32 reg) +{ + writel_relaxed(value, pcie->cra_base + reg); +} + +static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg) +{ + return readl_relaxed(pcie->cra_base + reg); +} + +static void tlp_write_tx(struct altera_pcie *pcie, + struct tlp_rp_regpair_t *tlp_rp_regdata) +{ + cra_writel(pcie, tlp_rp_regdata->reg0, RP_TX_REG0); + cra_writel(pcie, tlp_rp_regdata->reg1, RP_TX_REG1); + cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL); +} + +static bool altera_pcie_link_is_up(struct altera_pcie *pcie) +{ + return !!(cra_readl(pcie, RP_LTSSM) & LTSSM_L0); +} + +static bool altera_pcie_valid_config(struct altera_pcie *pcie, + struct pci_bus *bus, int dev) +{ + /* If there is no link, then there is no device */ + if (bus->number != pcie->root_bus_nr) { + if (!altera_pcie_link_is_up(pcie)) + return false; + } + + /* access only one slot on each root port */ + if (bus->number == pcie->root_bus_nr && dev > 0) + return false; + + /* + * Do not read more than one device on the bus directly attached + * to root port, root port can only attach to one downstream port. + */ + if (bus->primary == pcie->root_bus_nr && dev > 0) + return false; + + return true; +} + +static int tlp_read_packet(struct altera_pcie *pcie, u32 *value) +{ + int i; + bool sop = 0; + u32 ctrl; + u32 reg0, reg1; + u32 comp_status = 1; + + /* + * Minimum 2 loops to read TLP headers and 1 loop to read data + * payload. + */ + for (i = 0; i < TLP_LOOP; i++) { + ctrl = cra_readl(pcie, RP_RXCPL_STATUS); + if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) { + reg0 = cra_readl(pcie, RP_RXCPL_REG0); + reg1 = cra_readl(pcie, RP_RXCPL_REG1); + + if (ctrl & RP_RXCPL_SOP) { + sop = true; + comp_status = TLP_COMP_STATUS(reg1); + } + + if (ctrl & RP_RXCPL_EOP) { + if (comp_status) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (value) + *value = reg0; + + return PCIBIOS_SUCCESSFUL; + } + } + udelay(5); + } + + return PCIBIOS_DEVICE_NOT_FOUND; +} + +static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers, + u32 data, bool align) +{ + struct tlp_rp_regpair_t tlp_rp_regdata; + + tlp_rp_regdata.reg0 = headers[0]; + tlp_rp_regdata.reg1 = headers[1]; + tlp_rp_regdata.ctrl = RP_TX_SOP; + tlp_write_tx(pcie, &tlp_rp_regdata); + + if (align) { + tlp_rp_regdata.reg0 = headers[2]; + tlp_rp_regdata.reg1 = 0; + tlp_rp_regdata.ctrl = 0; + tlp_write_tx(pcie, &tlp_rp_regdata); + + tlp_rp_regdata.reg0 = data; + tlp_rp_regdata.reg1 = 0; + } else { + tlp_rp_regdata.reg0 = headers[2]; + tlp_rp_regdata.reg1 = data; + } + + tlp_rp_regdata.ctrl = RP_TX_EOP; + tlp_write_tx(pcie, &tlp_rp_regdata); +} + +static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, + int where, u8 byte_en, u32 *value) +{ + u32 headers[TLP_HDR_SIZE]; + + if (bus == pcie->root_bus_nr) + headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD0); + else + headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1); + + headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN), + TLP_READ_TAG, byte_en); + headers[2] = TLP_CFG_DW2(bus, devfn, where); + + tlp_write_packet(pcie, headers, 0, false); + + return tlp_read_packet(pcie, value); +} + +static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, + int where, u8 byte_en, u32 value) +{ + u32 headers[TLP_HDR_SIZE]; + int ret; + + if (bus == pcie->root_bus_nr) + headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR0); + else + headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1); + + headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN), + TLP_WRITE_TAG, byte_en); + headers[2] = TLP_CFG_DW2(bus, devfn, where); + + /* check alignment to Qword */ + if ((where & 0x7) == 0) + tlp_write_packet(pcie, headers, value, true); + else + tlp_write_packet(pcie, headers, value, false); + + ret = tlp_read_packet(pcie, NULL); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; + + /* + * Monitor changes to PCI_PRIMARY_BUS register on root port + * and update local copy of root bus number accordingly. + */ + if ((bus == pcie->root_bus_nr) && (where == PCI_PRIMARY_BUS)) + pcie->root_bus_nr = (u8)(value); + + return PCIBIOS_SUCCESSFUL; +} + +static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *value) +{ + struct altera_pcie *pcie = bus->sysdata; + int ret; + u32 data; + u8 byte_en; + + if (altera_pcie_hide_rc_bar(bus, devfn, where)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) { + *value = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + switch (size) { + case 1: + byte_en = 1 << (where & 3); + break; + case 2: + byte_en = 3 << (where & 3); + break; + default: + byte_en = 0xf; + break; + } + + ret = tlp_cfg_dword_read(pcie, bus->number, devfn, + (where & ~DWORD_MASK), byte_en, &data); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; + + switch (size) { + case 1: + *value = (data >> (8 * (where & 0x3))) & 0xff; + break; + case 2: + *value = (data >> (8 * (where & 0x2))) & 0xffff; + break; + default: + *value = data; + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 value) +{ + struct altera_pcie *pcie = bus->sysdata; + u32 data32; + u32 shift = 8 * (where & 3); + u8 byte_en; + + if (altera_pcie_hide_rc_bar(bus, devfn, where)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch (size) { + case 1: + data32 = (value & 0xff) << shift; + byte_en = 1 << (where & 3); + break; + case 2: + data32 = (value & 0xffff) << shift; + byte_en = 3 << (where & 3); + break; + default: + data32 = value; + byte_en = 0xf; + break; + } + + return tlp_cfg_dword_write(pcie, bus->number, devfn, + (where & ~DWORD_MASK), byte_en, data32); +} + +static struct pci_ops altera_pcie_ops = { + .read = altera_pcie_cfg_read, + .write = altera_pcie_cfg_write, +}; + +static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops intx_domain_ops = { + .map = altera_pcie_intx_map, +}; + +static void altera_pcie_isr(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct altera_pcie *pcie; + unsigned long status; + u32 bit; + u32 virq; + + chained_irq_enter(chip, desc); + pcie = irq_desc_get_handler_data(desc); + + while ((status = cra_readl(pcie, P2A_INT_STATUS) + & P2A_INT_STS_ALL) != 0) { + for_each_set_bit(bit, &status, INTX_NUM) { + /* clear interrupts */ + cra_writel(pcie, 1 << bit, P2A_INT_STATUS); + + virq = irq_find_mapping(pcie->irq_domain, bit + 1); + if (virq) + generic_handle_irq(virq); + else + dev_err(&pcie->pdev->dev, + "unexpected IRQ, INT%d\n", bit); + } + } + + chained_irq_exit(chip, desc); +} + +static void altera_pcie_release_of_pci_ranges(struct altera_pcie *pcie) +{ + pci_free_resource_list(&pcie->resources); +} + +static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie) +{ + int err, res_valid = 0; + struct device *dev = &pcie->pdev->dev; + struct device_node *np = dev->of_node; + struct resource_entry *win; + + err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources, + NULL); + if (err) + return err; + + resource_list_for_each_entry(win, &pcie->resources) { + struct resource *parent, *res = win->res; + + switch (resource_type(res)) { + case IORESOURCE_MEM: + parent = &iomem_resource; + res_valid |= !(res->flags & IORESOURCE_PREFETCH); + break; + default: + continue; + } + + err = devm_request_resource(dev, parent, res); + if (err) + goto out_release_res; + } + + if (!res_valid) { + dev_err(dev, "non-prefetchable memory resource required\n"); + err = -EINVAL; + goto out_release_res; + } + + return 0; + +out_release_res: + altera_pcie_release_of_pci_ranges(pcie); + return err; +} + +static int altera_pcie_init_irq_domain(struct altera_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + struct device_node *node = dev->of_node; + + /* Setup INTx */ + pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM + 1, + &intx_domain_ops, pcie); + if (!pcie->irq_domain) { + dev_err(dev, "Failed to get a INTx IRQ domain\n"); + return -ENOMEM; + } + + return 0; +} + +static int altera_pcie_parse_dt(struct altera_pcie *pcie) +{ + struct resource *cra; + struct platform_device *pdev = pcie->pdev; + + cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra"); + if (!cra) { + dev_err(&pdev->dev, "no Cra memory resource defined\n"); + return -ENODEV; + } + + pcie->cra_base = devm_ioremap_resource(&pdev->dev, cra); + if (IS_ERR(pcie->cra_base)) { + dev_err(&pdev->dev, "failed to map cra memory\n"); + return PTR_ERR(pcie->cra_base); + } + + /* setup IRQ */ + pcie->irq = platform_get_irq(pdev, 0); + if (pcie->irq <= 0) { + dev_err(&pdev->dev, "failed to get IRQ: %d\n", pcie->irq); + return -EINVAL; + } + + irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie); + + return 0; +} + +static int altera_pcie_probe(struct platform_device *pdev) +{ + struct altera_pcie *pcie; + struct pci_bus *bus; + struct pci_bus *child; + int ret; + + pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->pdev = pdev; + + ret = altera_pcie_parse_dt(pcie); + if (ret) { + dev_err(&pdev->dev, "Parsing DT failed\n"); + return ret; + } + + INIT_LIST_HEAD(&pcie->resources); + + ret = altera_pcie_parse_request_of_pci_ranges(pcie); + if (ret) { + dev_err(&pdev->dev, "Failed add resources\n"); + return ret; + } + + ret = altera_pcie_init_irq_domain(pcie); + if (ret) { + dev_err(&pdev->dev, "Failed creating IRQ Domain\n"); + return ret; + } + + /* clear all interrupts */ + cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); + /* enable all interrupts */ + cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); + + bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops, + pcie, &pcie->resources); + if (!bus) + return -ENOMEM; + + pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); + pci_assign_unassigned_bus_resources(bus); + + /* Configure PCI Express setting. */ + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(bus); + + platform_set_drvdata(pdev, pcie); + return ret; +} + +static const struct of_device_id altera_pcie_of_match[] = { + { .compatible = "altr,pcie-root-port-1.0", }, + {}, +}; +MODULE_DEVICE_TABLE(of, altera_pcie_of_match); + +static struct platform_driver altera_pcie_driver = { + .probe = altera_pcie_probe, + .driver = { + .name = "altera-pcie", + .of_match_table = altera_pcie_of_match, + .suppress_bind_attrs = true, + }, +}; + +static int altera_pcie_init(void) +{ + return platform_driver_register(&altera_pcie_driver); +} +module_init(altera_pcie_init); + +MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>"); +MODULE_DESCRIPTION("Altera PCIe host controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/kernel/drivers/pci/host/pcie-designware.c b/kernel/drivers/pci/host/pcie-designware.c index 2e9f84fdd..02a7452bd 100644 --- a/kernel/drivers/pci/host/pcie-designware.c +++ b/kernel/drivers/pci/host/pcie-designware.c @@ -31,13 +31,15 @@ #define PORT_LINK_MODE_1_LANES (0x1 << 16) #define PORT_LINK_MODE_2_LANES (0x3 << 16) #define PORT_LINK_MODE_4_LANES (0x7 << 16) +#define PORT_LINK_MODE_8_LANES (0xf << 16) #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) -#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8) +#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8) #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) +#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) #define PCIE_MSI_ADDR_LO 0x820 #define PCIE_MSI_ADDR_HI 0x824 @@ -67,39 +69,40 @@ #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) #define PCIE_ATU_UPPER_TARGET 0x91C -static struct hw_pci dw_pci; +static struct pci_ops dw_pcie_ops; -static unsigned long global_io_offset; - -static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) -{ - BUG_ON(!sys->private_data); - - return sys->private_data; -} - -int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val) +int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val) { - *val = readl(addr); + if ((uintptr_t)addr & (size - 1)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } - if (size == 1) - *val = (*val >> (8 * (where & 3))) & 0xff; + if (size == 4) + *val = readl(addr); else if (size == 2) - *val = (*val >> (8 * (where & 3))) & 0xffff; - else if (size != 4) + *val = readw(addr); + else if (size == 1) + *val = readb(addr); + else { + *val = 0; return PCIBIOS_BAD_REGISTER_NUMBER; + } return PCIBIOS_SUCCESSFUL; } -int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val) +int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val) { + if ((uintptr_t)addr & (size - 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + if (size == 4) writel(val, addr); else if (size == 2) - writew(val, addr + (where & 2)); + writew(val, addr); else if (size == 1) - writeb(val, addr + (where & 3)); + writeb(val, addr); else return PCIBIOS_BAD_REGISTER_NUMBER; @@ -130,8 +133,7 @@ static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, if (pp->ops->rd_own_conf) ret = pp->ops->rd_own_conf(pp, where, size, val); else - ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, - size, val); + ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val); return ret; } @@ -144,12 +146,26 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, if (pp->ops->wr_own_conf) ret = pp->ops->wr_own_conf(pp, where, size, val); else - ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), where, - size, val); + ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val); return ret; } +static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, + int type, u64 cpu_addr, u64 pci_addr, u32 size) +{ + dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, + PCIE_ATU_VIEWPORT); + dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE); + dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE); + dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1), + PCIE_ATU_LIMIT); + dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET); + dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET); + dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1); + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); +} + static struct irq_chip dw_msi_irq_chip = { .name = "PCI-MSI", .irq_enable = pci_msi_unmask_irq, @@ -188,12 +204,16 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) void dw_pcie_msi_init(struct pcie_port *pp) { + u64 msi_target; + pp->msi_data = __get_free_pages(GFP_KERNEL, 0); + msi_target = virt_to_phys((void *)pp->msi_data); /* program the msi_data */ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, - virt_to_phys((void *)pp->msi_data)); - dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0); + (u32)(msi_target & 0xffffffff)); + dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, + (u32)(msi_target >> 32 & 0xffffffff)); } static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) @@ -238,7 +258,7 @@ static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) { int irq, pos0, i; - struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata); + struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(desc); pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS, order_base_2(no_irqs)); @@ -269,6 +289,9 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) } *pos = pos0; + desc->nvec_used = no_irqs; + desc->msi_attrib.multiple = order_base_2(no_irqs); + return irq; no_valid_irq: @@ -276,12 +299,32 @@ no_valid_irq: return -ENOSPC; } +static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos) +{ + struct msi_msg msg; + u64 msi_target; + + if (pp->ops->get_msi_addr) + msi_target = pp->ops->get_msi_addr(pp); + else + msi_target = virt_to_phys((void *)pp->msi_data); + + msg.address_lo = (u32)(msi_target & 0xffffffff); + msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff); + + if (pp->ops->get_msi_data) + msg.data = pp->ops->get_msi_data(pp, pos); + else + msg.data = pos; + + pci_write_msi_msg(irq, &msg); +} + static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, struct msi_desc *desc) { int irq, pos; - struct msi_msg msg; - struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); + struct pcie_port *pp = pdev->bus->sysdata; if (desc->msi_attrib.is_msix) return -EINVAL; @@ -290,33 +333,50 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, if (irq < 0) return irq; - if (pp->ops->get_msi_addr) - msg.address_lo = pp->ops->get_msi_addr(pp); - else - msg.address_lo = virt_to_phys((void *)pp->msi_data); - msg.address_hi = 0x0; + dw_msi_setup_msg(pp, irq, pos); - if (pp->ops->get_msi_data) - msg.data = pp->ops->get_msi_data(pp, pos); - else - msg.data = pos; + return 0; +} - pci_write_msi_msg(irq, &msg); +static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev, + int nvec, int type) +{ +#ifdef CONFIG_PCI_MSI + int irq, pos; + struct msi_desc *desc; + struct pcie_port *pp = pdev->bus->sysdata; + + /* MSI-X interrupts are not supported */ + if (type == PCI_CAP_ID_MSIX) + return -EINVAL; + + WARN_ON(!list_is_singular(&pdev->dev.msi_list)); + desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list); + + irq = assign_irq(nvec, desc, &pos); + if (irq < 0) + return irq; + + dw_msi_setup_msg(pp, irq, pos); return 0; +#else + return -EINVAL; +#endif } static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) { struct irq_data *data = irq_get_irq_data(irq); - struct msi_desc *msi = irq_data_get_msi(data); - struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata); + struct msi_desc *msi = irq_data_get_msi_desc(data); + struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); clear_irq_range(pp, irq, 1, data->hwirq); } static struct msi_controller dw_pcie_msi_chip = { .setup_irq = dw_msi_setup_irq, + .setup_irqs = dw_msi_setup_irqs, .teardown_irq = dw_msi_teardown_irq, }; @@ -333,7 +393,6 @@ static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, { irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq); irq_set_chip_data(irq, domain->host_data); - set_irq_flags(irq, IRQF_VALID); return 0; } @@ -346,18 +405,12 @@ int dw_pcie_host_init(struct pcie_port *pp) { struct device_node *np = pp->dev->of_node; struct platform_device *pdev = to_platform_device(pp->dev); - struct of_pci_range range; - struct of_pci_range_parser parser; + struct pci_bus *bus, *child; struct resource *cfg_res; - u32 val, na, ns; - const __be32 *addrp; - int i, index, ret; - - /* Find the address cell size and the number of cells in order to get - * the untranslated address. - */ - of_property_read_u32(np, "#address-cells", &na); - ns = of_n_size_cells(np); + u32 val; + int i, ret; + LIST_HEAD(res); + struct resource_entry *win; cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); if (cfg_res) { @@ -365,88 +418,60 @@ int dw_pcie_host_init(struct pcie_port *pp) pp->cfg1_size = resource_size(cfg_res)/2; pp->cfg0_base = cfg_res->start; pp->cfg1_base = cfg_res->start + pp->cfg0_size; - - /* Find the untranslated configuration space address */ - index = of_property_match_string(np, "reg-names", "config"); - addrp = of_get_address(np, index, NULL, NULL); - pp->cfg0_mod_base = of_read_number(addrp, ns); - pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size; - } else { + } else if (!pp->va_cfg0_base) { dev_err(pp->dev, "missing *config* reg space\n"); } - if (of_pci_range_parser_init(&parser, np)) { - dev_err(pp->dev, "missing ranges property\n"); - return -EINVAL; - } + ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base); + if (ret) + return ret; /* Get the I/O and memory ranges from DT */ - for_each_of_pci_range(&parser, &range) { - unsigned long restype = range.flags & IORESOURCE_TYPE_BITS; - - if (restype == IORESOURCE_IO) { - of_pci_range_to_resource(&range, np, &pp->io); - pp->io.name = "I/O"; - pp->io.start = max_t(resource_size_t, - PCIBIOS_MIN_IO, - range.pci_addr + global_io_offset); - pp->io.end = min_t(resource_size_t, - IO_SPACE_LIMIT, - range.pci_addr + range.size - + global_io_offset - 1); - pp->io_size = resource_size(&pp->io); - pp->io_bus_addr = range.pci_addr; - pp->io_base = range.cpu_addr; - - /* Find the untranslated IO space address */ - pp->io_mod_base = of_read_number(parser.range - - parser.np + na, ns); - } - if (restype == IORESOURCE_MEM) { - of_pci_range_to_resource(&range, np, &pp->mem); - pp->mem.name = "MEM"; - pp->mem_size = resource_size(&pp->mem); - pp->mem_bus_addr = range.pci_addr; - - /* Find the untranslated MEM space address */ - pp->mem_mod_base = of_read_number(parser.range - - parser.np + na, ns); - } - if (restype == 0) { - of_pci_range_to_resource(&range, np, &pp->cfg); - pp->cfg0_size = resource_size(&pp->cfg)/2; - pp->cfg1_size = resource_size(&pp->cfg)/2; - pp->cfg0_base = pp->cfg.start; - pp->cfg1_base = pp->cfg.start + pp->cfg0_size; - - /* Find the untranslated configuration space address */ - pp->cfg0_mod_base = of_read_number(parser.range - - parser.np + na, ns); - pp->cfg1_mod_base = pp->cfg0_mod_base + - pp->cfg0_size; + resource_list_for_each_entry(win, &res) { + switch (resource_type(win->res)) { + case IORESOURCE_IO: + pp->io = win->res; + pp->io->name = "I/O"; + pp->io_size = resource_size(pp->io); + pp->io_bus_addr = pp->io->start - win->offset; + ret = pci_remap_iospace(pp->io, pp->io_base); + if (ret) { + dev_warn(pp->dev, "error %d: failed to map resource %pR\n", + ret, pp->io); + continue; + } + break; + case IORESOURCE_MEM: + pp->mem = win->res; + pp->mem->name = "MEM"; + pp->mem_size = resource_size(pp->mem); + pp->mem_bus_addr = pp->mem->start - win->offset; + break; + case 0: + pp->cfg = win->res; + pp->cfg0_size = resource_size(pp->cfg)/2; + pp->cfg1_size = resource_size(pp->cfg)/2; + pp->cfg0_base = pp->cfg->start; + pp->cfg1_base = pp->cfg->start + pp->cfg0_size; + break; + case IORESOURCE_BUS: + pp->busn = win->res; + break; + default: + continue; } } - ret = of_pci_parse_bus_range(np, &pp->busn); - if (ret < 0) { - pp->busn.name = np->name; - pp->busn.start = 0; - pp->busn.end = 0xff; - pp->busn.flags = IORESOURCE_BUS; - dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n", - ret, &pp->busn); - } - if (!pp->dbi_base) { - pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start, - resource_size(&pp->cfg)); + pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start, + resource_size(pp->cfg)); if (!pp->dbi_base) { dev_err(pp->dev, "error with ioremap\n"); return -ENOMEM; } } - pp->mem_base = pp->mem.start; + pp->mem_base = pp->mem->start; if (!pp->va_cfg0_base) { pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base, @@ -466,10 +491,9 @@ int dw_pcie_host_init(struct pcie_port *pp) } } - if (of_property_read_u32(np, "num-lanes", &pp->lanes)) { - dev_err(pp->dev, "Failed to parse the number of lanes\n"); - return -EINVAL; - } + ret = of_property_read_u32(np, "num-lanes", &pp->lanes); + if (ret) + pp->lanes = 0; if (IS_ENABLED(CONFIG_PCI_MSI)) { if (!pp->ops->msi_host_init) { @@ -493,6 +517,11 @@ int dw_pcie_host_init(struct pcie_port *pp) if (pp->ops->host_init) pp->ops->host_init(pp); + if (!pp->ops->rd_other_conf) + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, + PCIE_ATU_TYPE_MEM, pp->mem_base, + pp->mem_bus_addr, pp->mem_size); + dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); /* program correct class for RC */ @@ -502,128 +531,103 @@ int dw_pcie_host_init(struct pcie_port *pp) val |= PORT_LOGIC_SPEED_CHANGE; dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); -#ifdef CONFIG_PCI_MSI - dw_pcie_msi_chip.dev = pp->dev; - dw_pci.msi_ctrl = &dw_pcie_msi_chip; -#endif - - dw_pci.nr_controllers = 1; - dw_pci.private_data = (void **)&pp; - - pci_common_init_dev(pp->dev, &dw_pci); + pp->root_bus_nr = pp->busn->start; + if (IS_ENABLED(CONFIG_PCI_MSI)) { + bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr, + &dw_pcie_ops, pp, &res, + &dw_pcie_msi_chip); + dw_pcie_msi_chip.dev = pp->dev; + } else + bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops, + pp, &res); + if (!bus) + return -ENOMEM; - return 0; -} + if (pp->ops->scan_bus) + pp->ops->scan_bus(pp); -static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev) -{ - /* Program viewport 0 : OUTBOUND : CFG0 */ - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, - PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1, - PCIE_ATU_LIMIT); - dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); - dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); -} +#ifdef CONFIG_ARM + /* support old dtbs that incorrectly describe IRQs */ + pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); +#endif -static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev) -{ - /* Program viewport 1 : OUTBOUND : CFG1 */ - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, - PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1, - PCIE_ATU_LIMIT); - dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); - dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); -} + if (!pci_has_flag(PCI_PROBE_ONLY)) { + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); -static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) -{ - /* Program viewport 0 : OUTBOUND : MEM */ - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, - PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1, - PCIE_ATU_LIMIT); - dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET); - dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr), - PCIE_ATU_UPPER_TARGET); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); -} + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + } -static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp) -{ - /* Program viewport 1 : OUTBOUND : IO */ - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, - PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1, - PCIE_ATU_LIMIT); - dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET); - dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr), - PCIE_ATU_UPPER_TARGET); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); + pci_bus_add_devices(bus); + return 0; } static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { - int ret = PCIBIOS_SUCCESSFUL; - u32 address, busdev; + int ret, type; + u32 busdev, cfg_size; + u64 cpu_addr; + void __iomem *va_cfg_base; busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | PCIE_ATU_FUNC(PCI_FUNC(devfn)); - address = where & ~0x3; if (bus->parent->number == pp->root_bus_nr) { - dw_pcie_prog_viewport_cfg0(pp, busdev); - ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size, - val); - dw_pcie_prog_viewport_mem_outbound(pp); + type = PCIE_ATU_TYPE_CFG0; + cpu_addr = pp->cfg0_base; + cfg_size = pp->cfg0_size; + va_cfg_base = pp->va_cfg0_base; } else { - dw_pcie_prog_viewport_cfg1(pp, busdev); - ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size, - val); - dw_pcie_prog_viewport_io_outbound(pp); + type = PCIE_ATU_TYPE_CFG1; + cpu_addr = pp->cfg1_base; + cfg_size = pp->cfg1_size; + va_cfg_base = pp->va_cfg1_base; } + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, + type, cpu_addr, + busdev, cfg_size); + ret = dw_pcie_cfg_read(va_cfg_base + where, size, val); + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_IO, pp->io_base, + pp->io_bus_addr, pp->io_size); + return ret; } static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { - int ret = PCIBIOS_SUCCESSFUL; - u32 address, busdev; + int ret, type; + u32 busdev, cfg_size; + u64 cpu_addr; + void __iomem *va_cfg_base; busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | PCIE_ATU_FUNC(PCI_FUNC(devfn)); - address = where & ~0x3; if (bus->parent->number == pp->root_bus_nr) { - dw_pcie_prog_viewport_cfg0(pp, busdev); - ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size, - val); - dw_pcie_prog_viewport_mem_outbound(pp); + type = PCIE_ATU_TYPE_CFG0; + cpu_addr = pp->cfg0_base; + cfg_size = pp->cfg0_size; + va_cfg_base = pp->va_cfg0_base; } else { - dw_pcie_prog_viewport_cfg1(pp, busdev); - ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size, - val); - dw_pcie_prog_viewport_io_outbound(pp); + type = PCIE_ATU_TYPE_CFG1; + cpu_addr = pp->cfg1_base; + cfg_size = pp->cfg1_size; + va_cfg_base = pp->va_cfg1_base; } + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, + type, cpu_addr, + busdev, cfg_size); + ret = dw_pcie_cfg_write(va_cfg_base + where, size, val); + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_IO, pp->io_base, + pp->io_bus_addr, pp->io_size); + return ret; } @@ -653,7 +657,7 @@ static int dw_pcie_valid_config(struct pcie_port *pp, static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { - struct pcie_port *pp = sys_to_pcie(bus->sysdata); + struct pcie_port *pp = bus->sysdata; int ret; if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { @@ -677,7 +681,7 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { - struct pcie_port *pp = sys_to_pcie(bus->sysdata); + struct pcie_port *pp = bus->sysdata; int ret; if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) @@ -701,64 +705,6 @@ static struct pci_ops dw_pcie_ops = { .write = dw_pcie_wr_conf, }; -static int dw_pcie_setup(int nr, struct pci_sys_data *sys) -{ - struct pcie_port *pp; - - pp = sys_to_pcie(sys); - - if (global_io_offset < SZ_1M && pp->io_size > 0) { - sys->io_offset = global_io_offset - pp->io_bus_addr; - pci_ioremap_io(global_io_offset, pp->io_base); - global_io_offset += SZ_64K; - pci_add_resource_offset(&sys->resources, &pp->io, - sys->io_offset); - } - - sys->mem_offset = pp->mem.start - pp->mem_bus_addr; - pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset); - pci_add_resource(&sys->resources, &pp->busn); - - return 1; -} - -static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys) -{ - struct pci_bus *bus; - struct pcie_port *pp = sys_to_pcie(sys); - - pp->root_bus_nr = sys->busnr; - bus = pci_create_root_bus(pp->dev, sys->busnr, - &dw_pcie_ops, sys, &sys->resources); - if (!bus) - return NULL; - - pci_scan_child_bus(bus); - - if (bus && pp->ops->scan_bus) - pp->ops->scan_bus(pp); - - return bus; -} - -static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) -{ - struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata); - int irq; - - irq = of_irq_parse_and_map_pci(dev, slot, pin); - if (!irq) - irq = pp->irq; - - return irq; -} - -static struct hw_pci dw_pci = { - .setup = dw_pcie_setup, - .scan = dw_pcie_scan_bus, - .map_irq = dw_pcie_map_irq, -}; - void dw_pcie_setup_rc(struct pcie_port *pp) { u32 val; @@ -778,6 +724,12 @@ void dw_pcie_setup_rc(struct pcie_port *pp) case 4: val |= PORT_LINK_MODE_4_LANES; break; + case 8: + val |= PORT_LINK_MODE_8_LANES; + break; + default: + dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes); + return; } dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL); @@ -794,6 +746,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp) case 4: val |= PORT_LOGIC_LINK_WIDTH_4_LANES; break; + case 8: + val |= PORT_LOGIC_LINK_WIDTH_8_LANES; + break; } dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL); diff --git a/kernel/drivers/pci/host/pcie-designware.h b/kernel/drivers/pci/host/pcie-designware.h index d0bbd2768..2356d29e8 100644 --- a/kernel/drivers/pci/host/pcie-designware.h +++ b/kernel/drivers/pci/host/pcie-designware.h @@ -27,25 +27,21 @@ struct pcie_port { u8 root_bus_nr; void __iomem *dbi_base; u64 cfg0_base; - u64 cfg0_mod_base; void __iomem *va_cfg0_base; u32 cfg0_size; u64 cfg1_base; - u64 cfg1_mod_base; void __iomem *va_cfg1_base; u32 cfg1_size; - u64 io_base; - u64 io_mod_base; + resource_size_t io_base; phys_addr_t io_bus_addr; u32 io_size; u64 mem_base; - u64 mem_mod_base; phys_addr_t mem_bus_addr; u32 mem_size; - struct resource cfg; - struct resource io; - struct resource mem; - struct resource busn; + struct resource *cfg; + struct resource *io; + struct resource *mem; + struct resource *busn; int irq; u32 lanes; struct pcie_host_ops *ops; @@ -70,14 +66,14 @@ struct pcie_host_ops { void (*host_init)(struct pcie_port *pp); void (*msi_set_irq)(struct pcie_port *pp, int irq); void (*msi_clear_irq)(struct pcie_port *pp, int irq); - u32 (*get_msi_addr)(struct pcie_port *pp); + phys_addr_t (*get_msi_addr)(struct pcie_port *pp); u32 (*get_msi_data)(struct pcie_port *pp, int pos); void (*scan_bus)(struct pcie_port *pp); int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip); }; -int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val); -int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val); +int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val); +int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val); irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); void dw_pcie_msi_init(struct pcie_port *pp); int dw_pcie_link_up(struct pcie_port *pp); diff --git a/kernel/drivers/pci/host/pcie-hisi.c b/kernel/drivers/pci/host/pcie-hisi.c new file mode 100644 index 000000000..77f7c669a --- /dev/null +++ b/kernel/drivers/pci/host/pcie-hisi.c @@ -0,0 +1,200 @@ +/* + * PCIe host controller driver for HiSilicon Hip05 SoC + * + * Copyright (C) 2015 HiSilicon Co., Ltd. http://www.hisilicon.com + * + * Author: Zhou Wang <wangzhou1@hisilicon.com> + * Dacai Zhu <zhudacai@hisilicon.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/mfd/syscon.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include "pcie-designware.h" + +#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818 +#define PCIE_LTSSM_LINKUP_STATE 0x11 +#define PCIE_LTSSM_STATE_MASK 0x3F + +#define to_hisi_pcie(x) container_of(x, struct hisi_pcie, pp) + +struct hisi_pcie { + struct regmap *subctrl; + void __iomem *reg_base; + u32 port_id; + struct pcie_port pp; +}; + +static inline void hisi_pcie_apb_writel(struct hisi_pcie *pcie, + u32 val, u32 reg) +{ + writel(val, pcie->reg_base + reg); +} + +static inline u32 hisi_pcie_apb_readl(struct hisi_pcie *pcie, u32 reg) +{ + return readl(pcie->reg_base + reg); +} + +/* Hip05 PCIe host only supports 32-bit config access */ +static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size, + u32 *val) +{ + u32 reg; + u32 reg_val; + struct hisi_pcie *pcie = to_hisi_pcie(pp); + void *walker = ®_val; + + walker += (where & 0x3); + reg = where & ~0x3; + reg_val = hisi_pcie_apb_readl(pcie, reg); + + if (size == 1) + *val = *(u8 __force *) walker; + else if (size == 2) + *val = *(u16 __force *) walker; + else if (size == 4) + *val = reg_val; + else + return PCIBIOS_BAD_REGISTER_NUMBER; + + return PCIBIOS_SUCCESSFUL; +} + +/* Hip05 PCIe host only supports 32-bit config access */ +static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size, + u32 val) +{ + u32 reg_val; + u32 reg; + struct hisi_pcie *pcie = to_hisi_pcie(pp); + void *walker = ®_val; + + walker += (where & 0x3); + reg = where & ~0x3; + if (size == 4) + hisi_pcie_apb_writel(pcie, val, reg); + else if (size == 2) { + reg_val = hisi_pcie_apb_readl(pcie, reg); + *(u16 __force *) walker = val; + hisi_pcie_apb_writel(pcie, reg_val, reg); + } else if (size == 1) { + reg_val = hisi_pcie_apb_readl(pcie, reg); + *(u8 __force *) walker = val; + hisi_pcie_apb_writel(pcie, reg_val, reg); + } else + return PCIBIOS_BAD_REGISTER_NUMBER; + + return PCIBIOS_SUCCESSFUL; +} + +static int hisi_pcie_link_up(struct pcie_port *pp) +{ + u32 val; + struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp); + + regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG + + 0x100 * hisi_pcie->port_id, &val); + + return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); +} + +static struct pcie_host_ops hisi_pcie_host_ops = { + .rd_own_conf = hisi_pcie_cfg_read, + .wr_own_conf = hisi_pcie_cfg_write, + .link_up = hisi_pcie_link_up, +}; + +static int hisi_add_pcie_port(struct pcie_port *pp, + struct platform_device *pdev) +{ + int ret; + u32 port_id; + struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp); + + if (of_property_read_u32(pdev->dev.of_node, "port-id", &port_id)) { + dev_err(&pdev->dev, "failed to read port-id\n"); + return -EINVAL; + } + if (port_id > 3) { + dev_err(&pdev->dev, "Invalid port-id: %d\n", port_id); + return -EINVAL; + } + hisi_pcie->port_id = port_id; + + pp->ops = &hisi_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(&pdev->dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static int hisi_pcie_probe(struct platform_device *pdev) +{ + struct hisi_pcie *hisi_pcie; + struct pcie_port *pp; + struct resource *reg; + int ret; + + hisi_pcie = devm_kzalloc(&pdev->dev, sizeof(*hisi_pcie), GFP_KERNEL); + if (!hisi_pcie) + return -ENOMEM; + + pp = &hisi_pcie->pp; + pp->dev = &pdev->dev; + + hisi_pcie->subctrl = + syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl"); + if (IS_ERR(hisi_pcie->subctrl)) { + dev_err(pp->dev, "cannot get subctrl base\n"); + return PTR_ERR(hisi_pcie->subctrl); + } + + reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); + hisi_pcie->reg_base = devm_ioremap_resource(&pdev->dev, reg); + if (IS_ERR(hisi_pcie->reg_base)) { + dev_err(pp->dev, "cannot get rc_dbi base\n"); + return PTR_ERR(hisi_pcie->reg_base); + } + + hisi_pcie->pp.dbi_base = hisi_pcie->reg_base; + + ret = hisi_add_pcie_port(pp, pdev); + if (ret) + return ret; + + platform_set_drvdata(pdev, hisi_pcie); + + dev_warn(pp->dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n"); + + return 0; +} + +static const struct of_device_id hisi_pcie_of_match[] = { + {.compatible = "hisilicon,hip05-pcie",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, hisi_pcie_of_match); + +static struct platform_driver hisi_pcie_driver = { + .probe = hisi_pcie_probe, + .driver = { + .name = "hisi-pcie", + .of_match_table = hisi_pcie_of_match, + }, +}; + +module_platform_driver(hisi_pcie_driver); diff --git a/kernel/drivers/pci/host/pcie-iproc-bcma.c b/kernel/drivers/pci/host/pcie-iproc-bcma.c new file mode 100644 index 000000000..96a7d999f --- /dev/null +++ b/kernel/drivers/pci/host/pcie-iproc-bcma.c @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2015 Broadcom Corporation + * Copyright (C) 2015 Hauke Mehrtens <hauke@hauke-m.de> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/phy/phy.h> +#include <linux/bcma/bcma.h> +#include <linux/ioport.h> + +#include "pcie-iproc.h" + + +/* NS: CLASS field is R/O, and set to wrong 0x200 value */ +static void bcma_pcie2_fixup_class(struct pci_dev *dev) +{ + dev->class = PCI_CLASS_BRIDGE_PCI << 8; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class); + +static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct pci_sys_data *sys = dev->sysdata; + struct iproc_pcie *pcie = sys->private_data; + struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev); + + return bcma_core_irq(bdev, 5); +} + +static int iproc_pcie_bcma_probe(struct bcma_device *bdev) +{ + struct iproc_pcie *pcie; + LIST_HEAD(res); + struct resource res_mem; + int ret; + + pcie = devm_kzalloc(&bdev->dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->dev = &bdev->dev; + bcma_set_drvdata(bdev, pcie); + + pcie->base = bdev->io_addr; + + res_mem.start = bdev->addr_s[0]; + res_mem.end = bdev->addr_s[0] + SZ_128M - 1; + res_mem.name = "PCIe MEM space"; + res_mem.flags = IORESOURCE_MEM; + pci_add_resource(&res, &res_mem); + + pcie->map_irq = iproc_pcie_bcma_map_irq; + + ret = iproc_pcie_setup(pcie, &res); + if (ret) + dev_err(pcie->dev, "PCIe controller setup failed\n"); + + pci_free_resource_list(&res); + + return ret; +} + +static void iproc_pcie_bcma_remove(struct bcma_device *bdev) +{ + struct iproc_pcie *pcie = bcma_get_drvdata(bdev); + + iproc_pcie_remove(pcie); +} + +static const struct bcma_device_id iproc_pcie_bcma_table[] = { + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS), + {}, +}; +MODULE_DEVICE_TABLE(bcma, iproc_pcie_bcma_table); + +static struct bcma_driver iproc_pcie_bcma_driver = { + .name = KBUILD_MODNAME, + .id_table = iproc_pcie_bcma_table, + .probe = iproc_pcie_bcma_probe, + .remove = iproc_pcie_bcma_remove, +}; + +static int __init iproc_pcie_bcma_init(void) +{ + return bcma_driver_register(&iproc_pcie_bcma_driver); +} +module_init(iproc_pcie_bcma_init); + +static void __exit iproc_pcie_bcma_exit(void) +{ + bcma_driver_unregister(&iproc_pcie_bcma_driver); +} +module_exit(iproc_pcie_bcma_exit); + +MODULE_AUTHOR("Hauke Mehrtens"); +MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver"); +MODULE_LICENSE("GPL v2"); diff --git a/kernel/drivers/pci/host/pcie-iproc-platform.c b/kernel/drivers/pci/host/pcie-iproc-platform.c index afad6c21f..c9550dc8b 100644 --- a/kernel/drivers/pci/host/pcie-iproc-platform.c +++ b/kernel/drivers/pci/host/pcie-iproc-platform.c @@ -54,6 +54,33 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) return -ENOMEM; } + if (of_property_read_bool(np, "brcm,pcie-ob")) { + u32 val; + + ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset", + &val); + if (ret) { + dev_err(pcie->dev, + "missing brcm,pcie-ob-axi-offset property\n"); + return ret; + } + pcie->ob.axi_offset = val; + + ret = of_property_read_u32(np, "brcm,pcie-ob-window-size", + &val); + if (ret) { + dev_err(pcie->dev, + "missing brcm,pcie-ob-window-size property\n"); + return ret; + } + pcie->ob.window_size = (resource_size_t)val * SZ_1M; + + if (of_property_read_bool(np, "brcm,pcie-ob-oarr-size")) + pcie->ob.set_oarr_size = true; + + pcie->need_ob_cfg = true; + } + /* PHY use is optional */ pcie->phy = devm_phy_get(&pdev->dev, "pcie-phy"); if (IS_ERR(pcie->phy)) { @@ -69,15 +96,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) return ret; } - pcie->resources = &res; + pcie->map_irq = of_irq_parse_and_map_pci; - ret = iproc_pcie_setup(pcie); - if (ret) { + ret = iproc_pcie_setup(pcie, &res); + if (ret) dev_err(pcie->dev, "PCIe controller setup failed\n"); - return ret; - } - return 0; + pci_free_resource_list(&res); + + return ret; } static int iproc_pcie_pltfm_remove(struct platform_device *pdev) diff --git a/kernel/drivers/pci/host/pcie-iproc.c b/kernel/drivers/pci/host/pcie-iproc.c index 329e1b545..eac719af1 100644 --- a/kernel/drivers/pci/host/pcie-iproc.c +++ b/kernel/drivers/pci/host/pcie-iproc.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2014 Hauke Mehrtens <hauke@hauke-m.de> - * Copyright (C) 2015 Broadcom Corporatcommon ion + * Copyright (C) 2015 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -31,6 +31,8 @@ #include "pcie-iproc.h" #define CLK_CONTROL_OFFSET 0x000 +#define EP_PERST_SOURCE_SELECT_SHIFT 2 +#define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT) #define EP_MODE_SURVIVE_PERST_SHIFT 1 #define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT) #define RC_PCIE_RST_OUTPUT_SHIFT 0 @@ -58,9 +60,35 @@ #define SYS_RC_INTX_EN 0x330 #define SYS_RC_INTX_MASK 0xf -static inline struct iproc_pcie *sys_to_pcie(struct pci_sys_data *sys) +#define PCIE_LINK_STATUS_OFFSET 0xf0c +#define PCIE_PHYLINKUP_SHIFT 3 +#define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT) +#define PCIE_DL_ACTIVE_SHIFT 2 +#define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT) + +#define OARR_VALID_SHIFT 0 +#define OARR_VALID BIT(OARR_VALID_SHIFT) +#define OARR_SIZE_CFG_SHIFT 1 +#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT) + +#define OARR_LO(window) (0xd20 + (window) * 8) +#define OARR_HI(window) (0xd24 + (window) * 8) +#define OMAP_LO(window) (0xd40 + (window) * 8) +#define OMAP_HI(window) (0xd44 + (window) * 8) + +#define MAX_NUM_OB_WINDOWS 2 + +static inline struct iproc_pcie *iproc_data(struct pci_bus *bus) { - return sys->private_data; + struct iproc_pcie *pcie; +#ifdef CONFIG_ARM + struct pci_sys_data *sys = bus->sysdata; + + pcie = sys->private_data; +#else + pcie = bus->sysdata; +#endif + return pcie; } /** @@ -71,8 +99,7 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus, unsigned int devfn, int where) { - struct pci_sys_data *sys = bus->sysdata; - struct iproc_pcie *pcie = sys_to_pcie(sys); + struct iproc_pcie *pcie = iproc_data(bus); unsigned slot = PCI_SLOT(devfn); unsigned fn = PCI_FUNC(devfn); unsigned busno = bus->number; @@ -112,23 +139,32 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie) u32 val; /* - * Configure the PCIe controller as root complex and send a downstream - * reset + * Select perst_b signal as reset source. Put the device into reset, + * and then bring it out of reset */ - val = EP_MODE_SURVIVE_PERST | RC_PCIE_RST_OUTPUT; + val = readl(pcie->base + CLK_CONTROL_OFFSET); + val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST & + ~RC_PCIE_RST_OUTPUT; writel(val, pcie->base + CLK_CONTROL_OFFSET); udelay(250); - val &= ~EP_MODE_SURVIVE_PERST; + + val |= RC_PCIE_RST_OUTPUT; writel(val, pcie->base + CLK_CONTROL_OFFSET); - msleep(250); + msleep(100); } static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) { u8 hdr_type; - u32 link_ctrl; + u32 link_ctrl, class, val; u16 pos, link_status; - int link_is_active = 0; + bool link_is_active = false; + + val = readl(pcie->base + PCIE_LINK_STATUS_OFFSET); + if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) { + dev_err(pcie->dev, "PHY or data link is INACTIVE!\n"); + return -ENODEV; + } /* make sure we are not in EP mode */ pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type); @@ -138,14 +174,19 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) } /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */ - pci_bus_write_config_word(bus, 0, PCI_CLASS_DEVICE, - PCI_CLASS_BRIDGE_PCI); +#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c +#define PCI_CLASS_BRIDGE_MASK 0xffff00 +#define PCI_CLASS_BRIDGE_SHIFT 8 + pci_bus_read_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, &class); + class &= ~PCI_CLASS_BRIDGE_MASK; + class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT); + pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class); /* check link status to see if link is active */ pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP); pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status); if (link_status & PCI_EXP_LNKSTA_NLW) - link_is_active = 1; + link_is_active = true; if (!link_is_active) { /* try GEN 1 link speed */ @@ -169,7 +210,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status); if (link_status & PCI_EXP_LNKSTA_NLW) - link_is_active = 1; + link_is_active = true; } } @@ -183,35 +224,140 @@ static void iproc_pcie_enable(struct iproc_pcie *pcie) writel(SYS_RC_INTX_MASK, pcie->base + SYS_RC_INTX_EN); } -int iproc_pcie_setup(struct iproc_pcie *pcie) +/** + * Some iProc SoCs require the SW to configure the outbound address mapping + * + * Outbound address translation: + * + * iproc_pcie_address = axi_address - axi_offset + * OARR = iproc_pcie_address + * OMAP = pci_addr + * + * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address + */ +static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, + u64 pci_addr, resource_size_t size) +{ + struct iproc_pcie_ob *ob = &pcie->ob; + unsigned i; + u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS; + u64 remainder; + + if (size > max_size) { + dev_err(pcie->dev, + "res size 0x%pap exceeds max supported size 0x%llx\n", + &size, max_size); + return -EINVAL; + } + + div64_u64_rem(size, ob->window_size, &remainder); + if (remainder) { + dev_err(pcie->dev, + "res size %pap needs to be multiple of window size %pap\n", + &size, &ob->window_size); + return -EINVAL; + } + + if (axi_addr < ob->axi_offset) { + dev_err(pcie->dev, + "axi address %pap less than offset %pap\n", + &axi_addr, &ob->axi_offset); + return -EINVAL; + } + + /* + * Translate the AXI address to the internal address used by the iProc + * PCIe core before programming the OARR + */ + axi_addr -= ob->axi_offset; + + for (i = 0; i < MAX_NUM_OB_WINDOWS; i++) { + writel(lower_32_bits(axi_addr) | OARR_VALID | + (ob->set_oarr_size ? 1 : 0), pcie->base + OARR_LO(i)); + writel(upper_32_bits(axi_addr), pcie->base + OARR_HI(i)); + writel(lower_32_bits(pci_addr), pcie->base + OMAP_LO(i)); + writel(upper_32_bits(pci_addr), pcie->base + OMAP_HI(i)); + + size -= ob->window_size; + if (size == 0) + break; + + axi_addr += ob->window_size; + pci_addr += ob->window_size; + } + + return 0; +} + +static int iproc_pcie_map_ranges(struct iproc_pcie *pcie, + struct list_head *resources) +{ + struct resource_entry *window; + int ret; + + resource_list_for_each_entry(window, resources) { + struct resource *res = window->res; + u64 res_type = resource_type(res); + + switch (res_type) { + case IORESOURCE_IO: + case IORESOURCE_BUS: + break; + case IORESOURCE_MEM: + ret = iproc_pcie_setup_ob(pcie, res->start, + res->start - window->offset, + resource_size(res)); + if (ret) + return ret; + break; + default: + dev_err(pcie->dev, "invalid resource %pR\n", res); + return -EINVAL; + } + } + + return 0; +} + +int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) { int ret; + void *sysdata; struct pci_bus *bus; if (!pcie || !pcie->dev || !pcie->base) return -EINVAL; - if (pcie->phy) { - ret = phy_init(pcie->phy); - if (ret) { - dev_err(pcie->dev, "unable to initialize PCIe PHY\n"); - return ret; - } - - ret = phy_power_on(pcie->phy); - if (ret) { - dev_err(pcie->dev, "unable to power on PCIe PHY\n"); - goto err_exit_phy; - } + ret = phy_init(pcie->phy); + if (ret) { + dev_err(pcie->dev, "unable to initialize PCIe PHY\n"); + return ret; + } + ret = phy_power_on(pcie->phy); + if (ret) { + dev_err(pcie->dev, "unable to power on PCIe PHY\n"); + goto err_exit_phy; } iproc_pcie_reset(pcie); + if (pcie->need_ob_cfg) { + ret = iproc_pcie_map_ranges(pcie, res); + if (ret) { + dev_err(pcie->dev, "map failed\n"); + goto err_power_off_phy; + } + } + +#ifdef CONFIG_ARM pcie->sysdata.private_data = pcie; + sysdata = &pcie->sysdata; +#else + sysdata = pcie; +#endif - bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, - &pcie->sysdata, pcie->resources); + bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, sysdata, res); if (!bus) { dev_err(pcie->dev, "unable to create PCI root bus\n"); ret = -ENOMEM; @@ -229,7 +375,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie) pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); - pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); + pci_fixup_irqs(pci_common_swizzle, pcie->map_irq); pci_bus_add_devices(bus); return 0; @@ -239,12 +385,9 @@ err_rm_root_bus: pci_remove_root_bus(bus); err_power_off_phy: - if (pcie->phy) - phy_power_off(pcie->phy); + phy_power_off(pcie->phy); err_exit_phy: - if (pcie->phy) - phy_exit(pcie->phy); - + phy_exit(pcie->phy); return ret; } EXPORT_SYMBOL(iproc_pcie_setup); @@ -254,10 +397,8 @@ int iproc_pcie_remove(struct iproc_pcie *pcie) pci_stop_root_bus(pcie->root_bus); pci_remove_root_bus(pcie->root_bus); - if (pcie->phy) { - phy_power_off(pcie->phy); - phy_exit(pcie->phy); - } + phy_power_off(pcie->phy); + phy_exit(pcie->phy); return 0; } diff --git a/kernel/drivers/pci/host/pcie-iproc.h b/kernel/drivers/pci/host/pcie-iproc.h index e28075ed1..d3dc940f7 100644 --- a/kernel/drivers/pci/host/pcie-iproc.h +++ b/kernel/drivers/pci/host/pcie-iproc.h @@ -14,29 +14,45 @@ #ifndef _PCIE_IPROC_H #define _PCIE_IPROC_H -#define IPROC_PCIE_MAX_NUM_IRQS 6 +/** + * iProc PCIe outbound mapping + * @set_oarr_size: indicates the OARR size bit needs to be set + * @axi_offset: offset from the AXI address to the internal address used by + * the iProc PCIe core + * @window_size: outbound window size + */ +struct iproc_pcie_ob { + bool set_oarr_size; + resource_size_t axi_offset; + resource_size_t window_size; +}; /** * iProc PCIe device * @dev: pointer to device data structure * @base: PCIe host controller I/O register base - * @resources: linked list of all PCI resources - * @sysdata: Per PCI controller data + * @sysdata: Per PCI controller data (ARM-specific) * @root_bus: pointer to root bus * @phy: optional PHY device that controls the Serdes * @irqs: interrupt IDs + * @map_irq: function callback to map interrupts + * @need_ob_cfg: indidates SW needs to configure the outbound mapping window + * @ob: outbound mapping parameters */ struct iproc_pcie { struct device *dev; void __iomem *base; - struct list_head *resources; +#ifdef CONFIG_ARM struct pci_sys_data sysdata; +#endif struct pci_bus *root_bus; struct phy *phy; - int irqs[IPROC_PCIE_MAX_NUM_IRQS]; + int (*map_irq)(const struct pci_dev *, u8, u8); + bool need_ob_cfg; + struct iproc_pcie_ob ob; }; -int iproc_pcie_setup(struct iproc_pcie *pcie); +int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res); int iproc_pcie_remove(struct iproc_pcie *pcie); #endif /* _PCIE_IPROC_H */ diff --git a/kernel/drivers/pci/host/pcie-rcar.c b/kernel/drivers/pci/host/pcie-rcar.c index c086210f2..414c33686 100644 --- a/kernel/drivers/pci/host/pcie-rcar.c +++ b/kernel/drivers/pci/host/pcie-rcar.c @@ -108,6 +108,8 @@ #define RCAR_PCI_MAX_RESOURCES 4 #define MAX_NR_INBOUND_MAPS 6 +static unsigned long global_io_offset; + struct rcar_msi { DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; @@ -124,7 +126,16 @@ static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip) } /* Structure representing the PCIe interface */ +/* + * ARM pcibios functions expect the ARM struct pci_sys_data as the PCI + * sysdata. Add pci_sys_data as the first element in struct gen_pci so + * that when we use a gen_pci pointer as sysdata, it is also a pointer to + * a struct pci_sys_data. + */ struct rcar_pcie { +#ifdef CONFIG_ARM + struct pci_sys_data sys; +#endif struct device *dev; void __iomem *base; struct resource res[RCAR_PCI_MAX_RESOURCES]; @@ -135,11 +146,6 @@ struct rcar_pcie { struct rcar_msi msi; }; -static inline struct rcar_pcie *sys_to_pcie(struct pci_sys_data *sys) -{ - return sys->private_data; -} - static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val, unsigned long reg) { @@ -258,7 +264,7 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie, static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { - struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata); + struct rcar_pcie *pcie = bus->sysdata; int ret; ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, @@ -283,7 +289,7 @@ static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { - struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata); + struct rcar_pcie *pcie = bus->sysdata; int shift, ret; u32 data; @@ -353,13 +359,12 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie) rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win)); } -static int rcar_pcie_setup(int nr, struct pci_sys_data *sys) +static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pcie) { - struct rcar_pcie *pcie = sys_to_pcie(sys); struct resource *res; int i; - pcie->root_bus_nr = -1; + pcie->root_bus_nr = pcie->busn.start; /* Setup PCI resources */ for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) { @@ -372,32 +377,53 @@ static int rcar_pcie_setup(int nr, struct pci_sys_data *sys) if (res->flags & IORESOURCE_IO) { phys_addr_t io_start = pci_pio_to_address(res->start); - pci_ioremap_io(nr * SZ_64K, io_start); - } else - pci_add_resource(&sys->resources, res); + pci_ioremap_io(global_io_offset, io_start); + global_io_offset += SZ_64K; + } + + pci_add_resource(resource, res); } - pci_add_resource(&sys->resources, &pcie->busn); + pci_add_resource(resource, &pcie->busn); return 1; } -static struct hw_pci rcar_pci = { - .setup = rcar_pcie_setup, - .map_irq = of_irq_parse_and_map_pci, - .ops = &rcar_pcie_ops, -}; - -static void rcar_pcie_enable(struct rcar_pcie *pcie) +static int rcar_pcie_enable(struct rcar_pcie *pcie) { - struct platform_device *pdev = to_platform_device(pcie->dev); + struct pci_bus *bus, *child; + LIST_HEAD(res); - rcar_pci.nr_controllers = 1; - rcar_pci.private_data = (void **)&pcie; -#ifdef CONFIG_PCI_MSI - rcar_pci.msi_ctrl = &pcie->msi.chip; -#endif + rcar_pcie_setup(&res, pcie); + + /* Do not reassign resources if probe only */ + if (!pci_has_flag(PCI_PROBE_ONLY)) + pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); - pci_common_init_dev(&pdev->dev, &rcar_pci); + if (IS_ENABLED(CONFIG_PCI_MSI)) + bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr, + &rcar_pcie_ops, pcie, &res, &pcie->msi.chip); + else + bus = pci_scan_root_bus(pcie->dev, pcie->root_bus_nr, + &rcar_pcie_ops, pcie, &res); + + if (!bus) { + dev_err(pcie->dev, "Scanning rootbus failed"); + return -ENODEV; + } + + pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); + + if (!pci_has_flag(PCI_PROBE_ONLY)) { + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + } + + pci_bus_add_devices(bus); + + return 0; } static int phy_wait_for_ack(struct rcar_pcie *pcie) @@ -664,7 +690,6 @@ static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, { irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq); irq_set_chip_data(irq, domain->host_data); - set_irq_flags(irq, IRQF_VALID); return 0; } @@ -695,14 +720,16 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) /* Two irqs are for MSI, but they are also used for non-MSI irqs */ err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq, - IRQF_SHARED, rcar_msi_irq_chip.name, pcie); + IRQF_SHARED | IRQF_NO_THREAD, + rcar_msi_irq_chip.name, pcie); if (err < 0) { dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); goto err; } err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq, - IRQF_SHARED, rcar_msi_irq_chip.name, pcie); + IRQF_SHARED | IRQF_NO_THREAD, + rcar_msi_irq_chip.name, pcie); if (err < 0) { dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); goto err; @@ -971,9 +998,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) data = rcar_pci_read_reg(pcie, MACSR); dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); - rcar_pcie_enable(pcie); - - return 0; + return rcar_pcie_enable(pcie); } static struct platform_driver rcar_pcie_driver = { diff --git a/kernel/drivers/pci/host/pcie-spear13xx.c b/kernel/drivers/pci/host/pcie-spear13xx.c index 020d78890..a6cd8233e 100644 --- a/kernel/drivers/pci/host/pcie-spear13xx.c +++ b/kernel/drivers/pci/host/pcie-spear13xx.c @@ -4,8 +4,8 @@ * SPEAr13xx PCIe Glue Layer Source Code * * Copyright (C) 2010-2014 ST Microelectronics - * Pratyush Anand <pratyush.anand@st.com> - * Mohit Kumar <mohit.kumar@st.com> + * Pratyush Anand <pratyush.anand@gmail.com> + * Mohit Kumar <mohit.kumar.dhaka@gmail.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -146,10 +146,10 @@ struct pcie_app_reg { static int spear13xx_pcie_establish_link(struct pcie_port *pp) { u32 val; - int count = 0; struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; u32 exp_cap_off = EXP_CAP_ID_OFFSET; + unsigned int retries; if (dw_pcie_link_up(pp)) { dev_err(pp->dev, "link already up\n"); @@ -163,34 +163,34 @@ static int spear13xx_pcie_establish_link(struct pcie_port *pp) * default value in capability register is 512 bytes. So force * it to 128 here. */ - dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, &val); + dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val); val &= ~PCI_EXP_DEVCTL_READRQ; - dw_pcie_cfg_write(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, val); + dw_pcie_cfg_write(pp->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val); - dw_pcie_cfg_write(pp->dbi_base, PCI_VENDOR_ID, 2, 0x104A); - dw_pcie_cfg_write(pp->dbi_base, PCI_DEVICE_ID, 2, 0xCD80); + dw_pcie_cfg_write(pp->dbi_base + PCI_VENDOR_ID, 2, 0x104A); + dw_pcie_cfg_write(pp->dbi_base + PCI_DEVICE_ID, 2, 0xCD80); /* * if is_gen1 is set then handle it, so that some buggy card * also works */ if (spear13xx_pcie->is_gen1) { - dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCAP, 4, - &val); + dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, + 4, &val); if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { val &= ~((u32)PCI_EXP_LNKCAP_SLS); val |= PCI_EXP_LNKCAP_SLS_2_5GB; - dw_pcie_cfg_write(pp->dbi_base, exp_cap_off + - PCI_EXP_LNKCAP, 4, val); + dw_pcie_cfg_write(pp->dbi_base + exp_cap_off + + PCI_EXP_LNKCAP, 4, val); } - dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCTL2, 4, - &val); + dw_pcie_cfg_read(pp->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, + 2, &val); if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { val &= ~((u32)PCI_EXP_LNKCAP_SLS); val |= PCI_EXP_LNKCAP_SLS_2_5GB; - dw_pcie_cfg_write(pp->dbi_base, exp_cap_off + - PCI_EXP_LNKCTL2, 4, val); + dw_pcie_cfg_write(pp->dbi_base + exp_cap_off + + PCI_EXP_LNKCTL2, 2, val); } } @@ -201,17 +201,16 @@ static int spear13xx_pcie_establish_link(struct pcie_port *pp) &app_reg->app_ctrl_0); /* check if the link is up or not */ - while (!dw_pcie_link_up(pp)) { - mdelay(100); - count++; - if (count == 10) { - dev_err(pp->dev, "link Fail\n"); - return -EINVAL; + for (retries = 0; retries < 10; retries++) { + if (dw_pcie_link_up(pp)) { + dev_info(pp->dev, "link up\n"); + return 0; } + mdelay(100); } - dev_info(pp->dev, "link up\n"); - return 0; + dev_err(pp->dev, "link Fail\n"); + return -EINVAL; } static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) @@ -224,8 +223,7 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) status = readl(&app_reg->int_sts); if (status & MSI_CTRL_INT) { - if (!IS_ENABLED(CONFIG_PCI_MSI)) - BUG(); + BUG_ON(!IS_ENABLED(CONFIG_PCI_MSI)); dw_handle_msi_irq(pp); } @@ -281,7 +279,8 @@ static int spear13xx_add_pcie_port(struct pcie_port *pp, return -ENODEV; } ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, - IRQF_SHARED, "spear1340-pcie", pp); + IRQF_SHARED | IRQF_NO_THREAD, + "spear1340-pcie", pp); if (ret) { dev_err(dev, "failed to request irq %d\n", pp->irq); return ret; @@ -387,5 +386,5 @@ static int __init spear13xx_pcie_init(void) module_init(spear13xx_pcie_init); MODULE_DESCRIPTION("ST Microelectronics SPEAr13xx PCIe host controller driver"); -MODULE_AUTHOR("Pratyush Anand <pratyush.anand@st.com>"); +MODULE_AUTHOR("Pratyush Anand <pratyush.anand@gmail.com>"); MODULE_LICENSE("GPL v2"); diff --git a/kernel/drivers/pci/host/pcie-xilinx.c b/kernel/drivers/pci/host/pcie-xilinx.c index f1a06a091..4cfa46360 100644 --- a/kernel/drivers/pci/host/pcie-xilinx.c +++ b/kernel/drivers/pci/host/pcie-xilinx.c @@ -227,18 +227,16 @@ static struct pci_ops xilinx_pcie_ops = { */ static void xilinx_pcie_destroy_msi(unsigned int irq) { - struct irq_desc *desc; struct msi_desc *msi; struct xilinx_pcie_port *port; - desc = irq_to_desc(irq); - msi = irq_desc_get_msi_desc(desc); - port = sys_to_pcie(msi->dev->bus->sysdata); - - if (!test_bit(irq, msi_irq_in_use)) + if (!test_bit(irq, msi_irq_in_use)) { + msi = irq_get_msi_desc(irq); + port = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); dev_err(port->dev, "Trying to free unused MSI#%d\n", irq); - else + } else { clear_bit(irq, msi_irq_in_use); + } } /** @@ -338,7 +336,6 @@ static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq, { irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq); irq_set_chip_data(irq, domain->host_data); - set_irq_flags(irq, IRQF_VALID); return 0; } @@ -377,7 +374,6 @@ static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, { irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); irq_set_chip_data(irq, domain->host_data); - set_irq_flags(irq, IRQF_VALID); return 0; } @@ -449,14 +445,17 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) return IRQ_HANDLED; } - /* Clear interrupt FIFO register 1 */ - pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK, - XILINX_PCIE_REG_RPIFR1); + if (!(val & XILINX_PCIE_RPIFR1_MSI_INTR)) { + /* Clear interrupt FIFO register 1 */ + pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK, + XILINX_PCIE_REG_RPIFR1); - /* Handle INTx Interrupt */ - val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >> - XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1; - generic_handle_irq(irq_find_mapping(port->irq_domain, val)); + /* Handle INTx Interrupt */ + val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >> + XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1; + generic_handle_irq(irq_find_mapping(port->irq_domain, + val)); + } } if (status & XILINX_PCIE_INTR_MSI) { @@ -647,9 +646,15 @@ static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys) struct pci_bus *bus; port->root_busno = sys->busnr; - bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops, - sys, &sys->resources); + if (IS_ENABLED(CONFIG_PCI_MSI)) + bus = pci_scan_root_bus_msi(port->dev, sys->busnr, + &xilinx_pcie_ops, sys, + &sys->resources, + &xilinx_pcie_msi_chip); + else + bus = pci_scan_root_bus(port->dev, sys->busnr, + &xilinx_pcie_ops, sys, &sys->resources); return bus; } @@ -776,7 +781,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port) port->irq = irq_of_parse_and_map(node, 0); err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler, - IRQF_SHARED, "xilinx-pcie", port); + IRQF_SHARED | IRQF_NO_THREAD, + "xilinx-pcie", port); if (err) { dev_err(dev, "unable to request irq %d\n", port->irq); return err; @@ -847,7 +853,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev) #ifdef CONFIG_PCI_MSI xilinx_pcie_msi_chip.dev = port->dev; - hw.msi_ctrl = &xilinx_pcie_msi_chip; #endif pci_common_init_dev(dev, &hw); |