summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/mmc/host
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 12:17:53 -0700
committerYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 15:44:42 -0700
commit9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch)
tree1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/drivers/mmc/host
parent98260f3884f4a202f9ca5eabed40b1354c489b29 (diff)
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/drivers/mmc/host')
-rw-r--r--kernel/drivers/mmc/host/Kconfig777
-rw-r--r--kernel/drivers/mmc/host/Makefile79
-rw-r--r--kernel/drivers/mmc/host/android-goldfish.c568
-rw-r--r--kernel/drivers/mmc/host/atmel-mci-regs.h171
-rw-r--r--kernel/drivers/mmc/host/atmel-mci.c2585
-rw-r--r--kernel/drivers/mmc/host/au1xmmc.c1240
-rw-r--r--kernel/drivers/mmc/host/bfin_sdh.c682
-rw-r--r--kernel/drivers/mmc/host/cb710-mmc.c780
-rw-r--r--kernel/drivers/mmc/host/cb710-mmc.h104
-rw-r--r--kernel/drivers/mmc/host/davinci_mmc.c1482
-rw-r--r--kernel/drivers/mmc/host/dw_mmc-exynos.c559
-rw-r--r--kernel/drivers/mmc/host/dw_mmc-exynos.h73
-rw-r--r--kernel/drivers/mmc/host/dw_mmc-k3.c97
-rw-r--r--kernel/drivers/mmc/host/dw_mmc-pci.c122
-rw-r--r--kernel/drivers/mmc/host/dw_mmc-pltfm.c140
-rw-r--r--kernel/drivers/mmc/host/dw_mmc-pltfm.h20
-rw-r--r--kernel/drivers/mmc/host/dw_mmc-rockchip.c157
-rw-r--r--kernel/drivers/mmc/host/dw_mmc.c3042
-rw-r--r--kernel/drivers/mmc/host/dw_mmc.h291
-rw-r--r--kernel/drivers/mmc/host/jz4740_mmc.c1172
-rw-r--r--kernel/drivers/mmc/host/mmc_spi.c1531
-rw-r--r--kernel/drivers/mmc/host/mmci.c1922
-rw-r--r--kernel/drivers/mmc/host/mmci.h247
-rw-r--r--kernel/drivers/mmc/host/mmci_qcom_dml.c177
-rw-r--r--kernel/drivers/mmc/host/mmci_qcom_dml.h31
-rw-r--r--kernel/drivers/mmc/host/moxart-mmc.c728
-rw-r--r--kernel/drivers/mmc/host/mvsdio.c876
-rw-r--r--kernel/drivers/mmc/host/mvsdio.h190
-rw-r--r--kernel/drivers/mmc/host/mxcmmc.c1246
-rw-r--r--kernel/drivers/mmc/host/mxs-mmc.c745
-rw-r--r--kernel/drivers/mmc/host/of_mmc_spi.c159
-rw-r--r--kernel/drivers/mmc/host/omap.c1505
-rw-r--r--kernel/drivers/mmc/host/omap_hsmmc.c2344
-rw-r--r--kernel/drivers/mmc/host/pxamci.c896
-rw-r--r--kernel/drivers/mmc/host/pxamci.h90
-rw-r--r--kernel/drivers/mmc/host/rtsx_pci_sdmmc.c1498
-rw-r--r--kernel/drivers/mmc/host/rtsx_usb_sdmmc.c1463
-rw-r--r--kernel/drivers/mmc/host/s3cmci.c1889
-rw-r--r--kernel/drivers/mmc/host/s3cmci.h80
-rw-r--r--kernel/drivers/mmc/host/sdhci-acpi.c473
-rw-r--r--kernel/drivers/mmc/host/sdhci-bcm-kona.c339
-rw-r--r--kernel/drivers/mmc/host/sdhci-bcm2835.c202
-rw-r--r--kernel/drivers/mmc/host/sdhci-cns3xxx.c115
-rw-r--r--kernel/drivers/mmc/host/sdhci-dove.c132
-rw-r--r--kernel/drivers/mmc/host/sdhci-esdhc-imx.c1155
-rw-r--r--kernel/drivers/mmc/host/sdhci-esdhc.h50
-rw-r--r--kernel/drivers/mmc/host/sdhci-iproc.c241
-rw-r--r--kernel/drivers/mmc/host/sdhci-msm.c593
-rw-r--r--kernel/drivers/mmc/host/sdhci-of-arasan.c229
-rw-r--r--kernel/drivers/mmc/host/sdhci-of-esdhc.c412
-rw-r--r--kernel/drivers/mmc/host/sdhci-of-hlwd.c98
-rw-r--r--kernel/drivers/mmc/host/sdhci-pci-data.c5
-rw-r--r--kernel/drivers/mmc/host/sdhci-pci-o2micro.c395
-rw-r--r--kernel/drivers/mmc/host/sdhci-pci-o2micro.h75
-rw-r--r--kernel/drivers/mmc/host/sdhci-pci.c1708
-rw-r--r--kernel/drivers/mmc/host/sdhci-pci.h89
-rw-r--r--kernel/drivers/mmc/host/sdhci-pltfm.c276
-rw-r--r--kernel/drivers/mmc/host/sdhci-pltfm.h122
-rw-r--r--kernel/drivers/mmc/host/sdhci-pxav2.c269
-rw-r--r--kernel/drivers/mmc/host/sdhci-pxav3.c595
-rw-r--r--kernel/drivers/mmc/host/sdhci-s3c-regs.h87
-rw-r--r--kernel/drivers/mmc/host/sdhci-s3c.c777
-rw-r--r--kernel/drivers/mmc/host/sdhci-sirf.c252
-rw-r--r--kernel/drivers/mmc/host/sdhci-spear.c215
-rw-r--r--kernel/drivers/mmc/host/sdhci-st.c512
-rw-r--r--kernel/drivers/mmc/host/sdhci-tegra.c327
-rw-r--r--kernel/drivers/mmc/host/sdhci.c3590
-rw-r--r--kernel/drivers/mmc/host/sdhci.h663
-rw-r--r--kernel/drivers/mmc/host/sdhci_f_sdh30.c237
-rw-r--r--kernel/drivers/mmc/host/sdricoh_cs.c552
-rw-r--r--kernel/drivers/mmc/host/sh_mmcif.c1571
-rw-r--r--kernel/drivers/mmc/host/sh_mobile_sdhi.c395
-rw-r--r--kernel/drivers/mmc/host/sunxi-mmc.c1089
-rw-r--r--kernel/drivers/mmc/host/tifm_sd.c1091
-rw-r--r--kernel/drivers/mmc/host/tmio_mmc.c164
-rw-r--r--kernel/drivers/mmc/host/tmio_mmc.h209
-rw-r--r--kernel/drivers/mmc/host/tmio_mmc_dma.c356
-rw-r--r--kernel/drivers/mmc/host/tmio_mmc_pio.c1276
-rw-r--r--kernel/drivers/mmc/host/toshsd.c708
-rw-r--r--kernel/drivers/mmc/host/toshsd.h176
-rw-r--r--kernel/drivers/mmc/host/usdhi6rol0.c1846
-rw-r--r--kernel/drivers/mmc/host/ushc.c569
-rw-r--r--kernel/drivers/mmc/host/via-sdmmc.c1339
-rw-r--r--kernel/drivers/mmc/host/vub300.c2488
-rw-r--r--kernel/drivers/mmc/host/wbsd.c2013
-rw-r--r--kernel/drivers/mmc/host/wbsd.h185
-rw-r--r--kernel/drivers/mmc/host/wmt-sdmmc.c1004
87 files changed, 63022 insertions, 0 deletions
diff --git a/kernel/drivers/mmc/host/Kconfig b/kernel/drivers/mmc/host/Kconfig
new file mode 100644
index 000000000..b1f837e74
--- /dev/null
+++ b/kernel/drivers/mmc/host/Kconfig
@@ -0,0 +1,777 @@
+#
+# MMC/SD host controller drivers
+#
+
+comment "MMC/SD/SDIO Host Controller Drivers"
+
+config MMC_ARMMMCI
+ tristate "ARM AMBA Multimedia Card Interface support"
+ depends on ARM_AMBA
+ help
+ This selects the ARM(R) AMBA(R) PrimeCell Multimedia Card
+ Interface (PL180 and PL181) support. If you have an ARM(R)
+ platform with a Multimedia Card slot, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_QCOM_DML
+ tristate "Qualcomm Data Mover for SD Card Controller"
+ depends on MMC_ARMMMCI && QCOM_BAM_DMA
+ default y
+ help
+ This selects the Qualcomm Data Mover lite/local on SD Card controller.
+ This option will enable the dma to work correctly, if you are using
+ Qcom SOCs and MMC, you would probably need this option to get DMA working.
+
+ if unsure, say N.
+
+config MMC_PXA
+ tristate "Intel PXA25x/26x/27x Multimedia Card Interface support"
+ depends on ARCH_PXA
+ help
+ This selects the Intel(R) PXA(R) Multimedia card Interface.
+ If you have a PXA(R) platform with a Multimedia Card slot,
+ say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI
+ tristate "Secure Digital Host Controller Interface support"
+ depends on HAS_DMA
+ help
+ This selects the generic Secure Digital Host Controller Interface.
+ It is used by manufacturers such as Texas Instruments(R), Ricoh(R)
+ and Toshiba(R). Most controllers found in laptops are of this type.
+
+ If you have a controller with this interface, say Y or M here. You
+ also need to enable an appropriate bus interface.
+
+ If unsure, say N.
+
+config MMC_SDHCI_IO_ACCESSORS
+ bool
+ depends on MMC_SDHCI
+ help
+ This is silent Kconfig symbol that is selected by the drivers that
+ need to overwrite SDHCI IO memory accessors.
+
+config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+ bool
+ depends on MMC_SDHCI
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This option is selected by drivers running on big endian hosts
+ and performing I/O to a SDHCI controller through a bus that
+ implements a hardware byte swapper using a 32-bit datum.
+ This endian mapping mode is called "data invariance" and
+ has the effect of scrambling the addresses and formats of data
+ accessed in sizes other than the datum size.
+
+ This is the case for the Freescale eSDHC and Nintendo Wii SDHCI.
+
+config MMC_SDHCI_PCI
+ tristate "SDHCI support on PCI bus"
+ depends on MMC_SDHCI && PCI
+ help
+ This selects the PCI Secure Digital Host Controller Interface.
+ Most controllers found today are PCI devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_RICOH_MMC
+ bool "Ricoh MMC Controller Disabler"
+ depends on MMC_SDHCI_PCI
+ default y
+ help
+ This adds a pci quirk to disable Ricoh MMC Controller. This
+ proprietary controller is unnecessary because the SDHCI driver
+ supports MMC cards on the SD controller, but if it is not
+ disabled, it will steal the MMC cards away - rendering them
+ useless. It is safe to select this even if you don't
+ have a Ricoh based card reader.
+
+ If unsure, say Y.
+
+config MMC_SDHCI_ACPI
+ tristate "SDHCI support for ACPI enumerated SDHCI controllers"
+ depends on MMC_SDHCI && ACPI
+ help
+ This selects support for ACPI enumerated SDHCI controllers,
+ identified by ACPI Compatibility ID PNP0D40 or specific
+ ACPI Hardware IDs.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_PLTFM
+ tristate "SDHCI platform and OF driver helper"
+ depends on MMC_SDHCI
+ help
+ This selects the common helper functions support for Secure Digital
+ Host Controller Interface based platform and OF drivers.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_OF_ARASAN
+ tristate "SDHCI OF support for the Arasan SDHCI controllers"
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ help
+ This selects the Arasan Secure Digital Host Controller Interface
+ (SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_OF_ESDHC
+ tristate "SDHCI OF support for the Freescale eSDHC controller"
+ depends on MMC_SDHCI_PLTFM
+ depends on PPC
+ select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+ help
+ This selects the Freescale eSDHC controller support.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_OF_HLWD
+ tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers"
+ depends on MMC_SDHCI_PLTFM
+ depends on PPC
+ select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ found in the "Hollywood" chipset of the Nintendo Wii video game
+ console.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_CNS3XXX
+ tristate "SDHCI support on the Cavium Networks CNS3xxx SoC"
+ depends on ARCH_CNS3XXX
+ depends on MMC_SDHCI_PLTFM
+ help
+ This selects the SDHCI support for CNS3xxx System-on-Chip devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_ESDHC_IMX
+ tristate "SDHCI support for the Freescale eSDHC/uSDHC i.MX controller"
+ depends on ARCH_MXC
+ depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the Freescale eSDHC/uSDHC controller support
+ found on i.MX25, i.MX35 i.MX5x and i.MX6x.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_DOVE
+ tristate "SDHCI support on Marvell's Dove SoC"
+ depends on ARCH_DOVE || MACH_DOVE
+ depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the Secure Digital Host Controller Interface in
+ Marvell's Dove SoC.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_TEGRA
+ tristate "SDHCI platform support for the Tegra SD/MMC Controller"
+ depends on ARCH_TEGRA
+ depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the Tegra SD/MMC controller. If you have a Tegra
+ platform with SD or MMC devices, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_S3C
+ tristate "SDHCI support on Samsung S3C SoC"
+ depends on MMC_SDHCI && PLAT_SAMSUNG
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ often referrered to as the HSMMC block in some of the Samsung S3C
+ range of SoC.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_SIRF
+ tristate "SDHCI support on CSR SiRFprimaII and SiRFmarco SoCs"
+ depends on ARCH_SIRF
+ depends on MMC_SDHCI_PLTFM
+ help
+ This selects the SDHCI support for SiRF System-on-Chip devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_PXAV3
+ tristate "Marvell MMP2 SD Host Controller support (PXAV3)"
+ depends on CLKDEV_LOOKUP
+ depends on MMC_SDHCI_PLTFM
+ depends on ARCH_BERLIN || ARCH_MMP || ARCH_MVEBU || COMPILE_TEST
+ default CPU_MMP2
+ help
+ This selects the Marvell(R) PXAV3 SD Host Controller.
+ If you have a MMP2 platform with SD Host Controller
+ and a card slot, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_PXAV2
+ tristate "Marvell PXA9XX SD Host Controller support (PXAV2)"
+ depends on CLKDEV_LOOKUP
+ depends on MMC_SDHCI_PLTFM
+ depends on ARCH_MMP || COMPILE_TEST
+ default CPU_PXA910
+ help
+ This selects the Marvell(R) PXAV2 SD Host Controller.
+ If you have a PXA9XX platform with SD Host Controller
+ and a card slot, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_SPEAR
+ tristate "SDHCI support on ST SPEAr platform"
+ depends on MMC_SDHCI && PLAT_SPEAR
+ depends on OF
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ often referrered to as the HSMMC block in some of the ST SPEAR range
+ of SoC
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_S3C_DMA
+ bool "DMA support on S3C SDHCI"
+ depends on MMC_SDHCI_S3C
+ help
+ Enable DMA support on the Samsung S3C SDHCI glue. The DMA
+ has proved to be problematic if the controller encounters
+ certain errors, and thus should be treated with care.
+
+ YMMV.
+
+config MMC_SDHCI_BCM_KONA
+ tristate "SDHCI support on Broadcom KONA platform"
+ depends on ARCH_BCM_MOBILE
+ depends on MMC_SDHCI_PLTFM
+ help
+ This selects the Broadcom Kona Secure Digital Host Controller
+ Interface(SDHCI) support.
+ This is used in Broadcom mobile SoCs.
+
+ If you have a controller with this interface, say Y or M here.
+
+config MMC_SDHCI_BCM2835
+ tristate "SDHCI platform support for the BCM2835 SD/MMC Controller"
+ depends on ARCH_BCM2835
+ depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the BCM2835 SD/MMC controller. If you have a BCM2835
+ platform with SD or MMC devices, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_F_SDH30
+ tristate "SDHCI support for Fujitsu Semiconductor F_SDH30"
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ Needed by some Fujitsu SoC for MMC / SD / SDIO support.
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_IPROC
+ tristate "SDHCI platform support for the iProc SD/MMC Controller"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on MMC_SDHCI_PLTFM
+ default ARCH_BCM_IPROC
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the iProc SD/MMC controller.
+
+ If you have an IPROC platform with SD or MMC devices,
+ say Y or M here.
+
+ If unsure, say N.
+
+config MMC_MOXART
+ tristate "MOXART SD/MMC Host Controller support"
+ depends on ARCH_MOXART && MMC
+ help
+ This selects support for the MOXART SD/MMC Host Controller.
+ MOXA provides one multi-functional card reader which can
+ be found on some embedded hardware such as UC-7112-LX.
+ If you have a controller with this interface, say Y here.
+
+config MMC_SDHCI_ST
+ tristate "SDHCI support on STMicroelectronics SoC"
+ depends on ARCH_STI
+ depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the Secure Digital Host Controller Interface in
+ STMicroelectronics SoCs.
+
+ If you have a controller with this interface, say Y or M here.
+ If unsure, say N.
+
+config MMC_OMAP
+ tristate "TI OMAP Multimedia Card Interface support"
+ depends on ARCH_OMAP
+ depends on TPS65010 || !MACH_OMAP_H2
+ help
+ This selects the TI OMAP Multimedia card Interface.
+ If you have an OMAP board with a Multimedia Card slot,
+ say Y or M here.
+
+ If unsure, say N.
+
+config MMC_OMAP_HS
+ tristate "TI OMAP High Speed Multimedia Card Interface support"
+ depends on HAS_DMA
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ help
+ This selects the TI OMAP High Speed Multimedia card Interface.
+ If you have an omap2plus board with a Multimedia Card slot,
+ say Y or M here.
+
+ If unsure, say N.
+
+config MMC_WBSD
+ tristate "Winbond W83L51xD SD/MMC Card Interface support"
+ depends on ISA_DMA_API
+ help
+ This selects the Winbond(R) W83L51xD Secure digital and
+ Multimedia card Interface.
+ If you have a machine with a integrated W83L518D or W83L519D
+ SD/MMC card reader, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_AU1X
+ tristate "Alchemy AU1XX0 MMC Card Interface support"
+ depends on MIPS_ALCHEMY
+ help
+ This selects the AMD Alchemy(R) Multimedia card interface.
+ If you have a Alchemy platform with a MMC slot, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_ATMELMCI
+ tristate "Atmel SD/MMC Driver (Multimedia Card Interface)"
+ depends on AVR32 || ARCH_AT91
+ help
+ This selects the Atmel Multimedia Card Interface driver. If
+ you have an AT32 (AVR32) or AT91 platform with a Multimedia
+ Card slot, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_MSM
+ tristate "Qualcomm SDHCI Controller Support"
+ depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ depends on MMC_SDHCI_PLTFM
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ support present in Qualcomm SOCs. The controller supports
+ SD/MMC/SDIO devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_MXC
+ tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support"
+ depends on ARCH_MXC || PPC_MPC512x
+ help
+ This selects the Freescale i.MX21, i.MX27, i.MX31 or MPC512x
+ Multimedia Card Interface. If you have an i.MX or MPC512x platform
+ with a Multimedia Card slot, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_MXS
+ tristate "Freescale MXS Multimedia Card Interface support"
+ depends on ARCH_MXS && MXS_DMA
+ help
+ This selects the Freescale SSP MMC controller found on MXS based
+ platforms like mx23/28.
+
+ If unsure, say N.
+
+config MMC_TIFM_SD
+ tristate "TI Flash Media MMC/SD Interface support"
+ depends on PCI
+ select TIFM_CORE
+ help
+ Say Y here if you want to be able to access MMC/SD cards with
+ the Texas Instruments(R) Flash Media card reader, found in many
+ laptops.
+ This option 'selects' (turns on, enables) 'TIFM_CORE', but you
+ probably also need appropriate card reader host adapter, such as
+ 'Misc devices: TI Flash Media PCI74xx/PCI76xx host adapter support
+ (TIFM_7XX1)'.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tifm_sd.
+
+config MMC_MVSDIO
+ tristate "Marvell MMC/SD/SDIO host driver"
+ depends on PLAT_ORION
+ ---help---
+ This selects the Marvell SDIO host driver.
+ SDIO may currently be found on the Kirkwood 88F6281 and 88F6192
+ SoC controllers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mvsdio.
+
+config MMC_DAVINCI
+ tristate "TI DAVINCI Multimedia Card Interface support"
+ depends on ARCH_DAVINCI
+ help
+ This selects the TI DAVINCI Multimedia card Interface.
+ If you have an DAVINCI board with a Multimedia Card slot,
+ say Y or M here. If unsure, say N.
+
+config MMC_GOLDFISH
+ tristate "goldfish qemu Multimedia Card Interface support"
+ depends on GOLDFISH
+ help
+ This selects the Goldfish Multimedia card Interface emulation
+ found on the Goldfish Android virtual device emulation.
+
+config MMC_SPI
+ tristate "MMC/SD/SDIO over SPI"
+ depends on SPI_MASTER && !HIGHMEM && HAS_DMA
+ select CRC7
+ select CRC_ITU_T
+ help
+ Some systems access MMC/SD/SDIO cards using a SPI controller
+ instead of using a "native" MMC/SD/SDIO controller. This has a
+ disadvantage of being relatively high overhead, but a compensating
+ advantage of working on many systems without dedicated MMC/SD/SDIO
+ controllers.
+
+ If unsure, or if your system has no SPI master driver, say N.
+
+config MMC_S3C
+ tristate "Samsung S3C SD/MMC Card Interface support"
+ depends on ARCH_S3C24XX
+ depends on S3C24XX_DMAC
+ help
+ This selects a driver for the MCI interface found in
+ Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs.
+ If you have a board based on one of those and a MMC/SD
+ slot, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_S3C_HW_SDIO_IRQ
+ bool "Hardware support for SDIO IRQ"
+ depends on MMC_S3C
+ help
+ Enable the hardware support for SDIO interrupts instead of using
+ the generic polling code.
+
+choice
+ prompt "Samsung S3C SD/MMC transfer code"
+ depends on MMC_S3C
+
+config MMC_S3C_PIO
+ bool "Use PIO transfers only"
+ help
+ Use PIO to transfer data between memory and the hardware.
+
+ PIO is slower than DMA as it requires CPU instructions to
+ move the data. This has been the traditional default for
+ the S3C MCI driver.
+
+config MMC_S3C_DMA
+ bool "Use DMA transfers only"
+ help
+ Use DMA to transfer data between memory and the hardare.
+
+ Currently, the DMA support in this driver seems to not be
+ working properly and needs to be debugged before this
+ option is useful.
+
+endchoice
+
+config MMC_SDRICOH_CS
+ tristate "MMC/SD driver for Ricoh Bay1Controllers"
+ depends on PCI && PCMCIA
+ help
+ Say Y here if your Notebook reports a Ricoh Bay1Controller PCMCIA
+ card whenever you insert a MMC or SD card into the card slot.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sdricoh_cs.
+
+config MMC_TMIO_CORE
+ tristate
+
+config MMC_TMIO
+ tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support"
+ depends on MFD_TMIO || MFD_ASIC3
+ select MMC_TMIO_CORE
+ help
+ This provides support for the SD/MMC cell found in TC6393XB,
+ T7L66XB and also HTC ASIC3
+
+config MMC_SDHI
+ tristate "SH-Mobile SDHI SD/SDIO controller support"
+ depends on SUPERH || ARM
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ select MMC_TMIO_CORE
+ help
+ This provides support for the SDHI SD/SDIO controller found in
+ SuperH and ARM SH-Mobile SoCs
+
+config MMC_CB710
+ tristate "ENE CB710 MMC/SD Interface support"
+ depends on PCI
+ select CB710_CORE
+ help
+ This option enables support for MMC/SD part of ENE CB710/720 Flash
+ memory card reader found in some laptops (ie. some versions of
+ HP Compaq nx9500).
+
+ This driver can also be built as a module. If so, the module
+ will be called cb710-mmc.
+
+config MMC_VIA_SDMMC
+ tristate "VIA SD/MMC Card Reader Driver"
+ depends on PCI
+ help
+ This selects the VIA SD/MMC Card Reader driver, say Y or M here.
+ VIA provides one multi-functional card reader which integrated into
+ some motherboards manufactured by VIA. This card reader supports
+ SD/MMC/SDHC.
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config SDH_BFIN
+ tristate "Blackfin Secure Digital Host support"
+ depends on (BF54x && !BF544) || (BF51x && !BF512)
+ help
+ If you say yes here you will get support for the Blackfin on-chip
+ Secure Digital Host interface. This includes support for MMC and
+ SD cards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bfin_sdh.
+
+ If unsure, say N.
+
+config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
+ bool "Blackfin EZkit Missing SDH_CMD Pull Up Resistor Workaround"
+ depends on SDH_BFIN
+ help
+ If you say yes here SD-Cards may work on the EZkit.
+
+config MMC_DW
+ tristate "Synopsys DesignWare Memory Card Interface"
+ depends on HAS_DMA
+ depends on ARC || ARM || ARM64 || MIPS || COMPILE_TEST
+ help
+ This selects support for the Synopsys DesignWare Mobile Storage IP
+ block, this provides host support for SD and MMC interfaces, in both
+ PIO and external DMA modes.
+
+config MMC_DW_IDMAC
+ bool "Internal DMAC interface"
+ depends on MMC_DW
+ help
+ This selects support for the internal DMAC block within the Synopsys
+ Designware Mobile Storage IP block. This disables the external DMA
+ interface.
+
+config MMC_DW_PLTFM
+ tristate "Synopsys Designware MCI Support as platform device"
+ depends on MMC_DW
+ default y
+ help
+ This selects the common helper functions support for Host Controller
+ Interface based platform driver. Please select this option if the IP
+ is present as a platform device. This is the common interface for the
+ Synopsys Designware IP.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say Y.
+
+config MMC_DW_EXYNOS
+ tristate "Exynos specific extensions for Synopsys DW Memory Card Interface"
+ depends on MMC_DW
+ select MMC_DW_PLTFM
+ help
+ This selects support for Samsung Exynos SoC specific extensions to the
+ Synopsys DesignWare Memory Card Interface driver. Select this option
+ for platforms based on Exynos4 and Exynos5 SoC's.
+
+config MMC_DW_K3
+ tristate "K3 specific extensions for Synopsys DW Memory Card Interface"
+ depends on MMC_DW
+ select MMC_DW_PLTFM
+ select MMC_DW_IDMAC
+ help
+ This selects support for Hisilicon K3 SoC specific extensions to the
+ Synopsys DesignWare Memory Card Interface driver. Select this option
+ for platforms based on Hisilicon K3 SoC's.
+
+config MMC_DW_PCI
+ tristate "Synopsys Designware MCI support on PCI bus"
+ depends on MMC_DW && PCI
+ help
+ This selects the PCI bus for the Synopsys Designware Mobile Storage IP.
+ Select this option if the IP is present on PCI platform.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_DW_ROCKCHIP
+ tristate "Rockchip specific extensions for Synopsys DW Memory Card Interface"
+ depends on MMC_DW && ARCH_ROCKCHIP
+ select MMC_DW_PLTFM
+ help
+ This selects support for Rockchip SoC specific extensions to the
+ Synopsys DesignWare Memory Card Interface driver. Select this option
+ for platforms based on RK3066, RK3188 and RK3288 SoC's.
+
+config MMC_SH_MMCIF
+ tristate "SuperH Internal MMCIF support"
+ depends on MMC_BLOCK && HAS_DMA
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ help
+ This selects the MMC Host Interface controller (MMCIF).
+
+ This driver supports MMCIF in sh7724/sh7757/sh7372.
+
+config MMC_JZ4740
+ tristate "JZ4740 SD/Multimedia Card Interface support"
+ depends on MACH_JZ4740
+ help
+ This selects support for the SD/MMC controller on Ingenic JZ4740
+ SoCs.
+ If you have a board based on such a SoC and with a SD/MMC slot,
+ say Y or M here.
+
+config MMC_VUB300
+ tristate "VUB300 USB to SDIO/SD/MMC Host Controller support"
+ depends on USB
+ help
+ This selects support for Elan Digital Systems' VUB300 chip.
+
+ The VUB300 is a USB-SDIO Host Controller Interface chip
+ that enables the host computer to use SDIO/SD/MMC cards
+ via a USB 2.0 or USB 1.1 host.
+
+ The VUB300 chip will be found in both physically separate
+ USB to SDIO/SD/MMC adapters and embedded on some motherboards.
+
+ The VUB300 chip supports SD and MMC memory cards in addition
+ to single and multifunction SDIO cards.
+
+ Some SDIO cards will need a firmware file to be loaded and
+ sent to VUB300 chip in order to achieve better data throughput.
+ Download these "Offload Pseudocode" from Elan Digital Systems'
+ web-site http://www.elandigitalsystems.com/support/downloads.php
+ and put them in /lib/firmware. Note that without these additional
+ firmware files the VUB300 chip will still function, but not at
+ the best obtainable data rate.
+
+ To compile this mmc host controller driver as a module,
+ choose M here: the module will be called vub300.
+
+ If you have a computer with an embedded VUB300 chip
+ or if you intend connecting a USB adapter based on a
+ VUB300 chip say Y or M here.
+
+config MMC_USHC
+ tristate "USB SD Host Controller (USHC) support"
+ depends on USB
+ help
+ This selects support for USB SD Host Controllers based on
+ the Cypress Astoria chip with firmware compliant with CSR's
+ USB SD Host Controller specification (CS-118793-SP).
+
+ CSR boards with this device include: USB<>SDIO (M1985v2),
+ and Ultrasira.
+
+ Note: These controllers only support SDIO cards and do not
+ support MMC or SD memory cards.
+
+config MMC_WMT
+ tristate "Wondermedia SD/MMC Host Controller support"
+ depends on ARCH_VT8500
+ default y
+ help
+ This selects support for the SD/MMC Host Controller on
+ Wondermedia WM8505/WM8650 based SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called wmt-sdmmc.
+
+config MMC_USDHI6ROL0
+ tristate "Renesas USDHI6ROL0 SD/SDIO Host Controller support"
+ depends on HAS_DMA
+ help
+ This selects support for the Renesas USDHI6ROL0 SD/SDIO
+ Host Controller
+
+config MMC_REALTEK_PCI
+ tristate "Realtek PCI-E SD/MMC Card Interface Driver"
+ depends on MFD_RTSX_PCI
+ help
+ Say Y here to include driver code to support SD/MMC card interface
+ of Realtek PCI-E card reader
+
+config MMC_REALTEK_USB
+ tristate "Realtek USB SD/MMC Card Interface Driver"
+ depends on MFD_RTSX_USB
+ help
+ Say Y here to include driver code to support SD/MMC card interface
+ of Realtek RTS5129/39 series card reader
+
+config MMC_SUNXI
+ tristate "Allwinner sunxi SD/MMC Host Controller support"
+ depends on ARCH_SUNXI
+ help
+ This selects support for the SD/MMC Host Controller on
+ Allwinner sunxi SoCs.
+
+config MMC_TOSHIBA_PCI
+ tristate "Toshiba Type A SD/MMC Card Interface Driver"
+ depends on PCI
+ help
diff --git a/kernel/drivers/mmc/host/Makefile b/kernel/drivers/mmc/host/Makefile
new file mode 100644
index 000000000..e3ab5b968
--- /dev/null
+++ b/kernel/drivers/mmc/host/Makefile
@@ -0,0 +1,79 @@
+#
+# Makefile for MMC/SD host controller drivers
+#
+
+obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
+obj-$(CONFIG_MMC_QCOM_DML) += mmci_qcom_dml.o
+obj-$(CONFIG_MMC_PXA) += pxamci.o
+obj-$(CONFIG_MMC_MXC) += mxcmmc.o
+obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
+obj-$(CONFIG_MMC_SDHCI) += sdhci.o
+obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
+obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
+obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-o2micro.o
+obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
+obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
+obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
+obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
+obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o
+obj-$(CONFIG_MMC_SDHCI_F_SDH30) += sdhci_f_sdh30.o
+obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
+obj-$(CONFIG_MMC_WBSD) += wbsd.o
+obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
+obj-$(CONFIG_MMC_OMAP) += omap.o
+obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o
+obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
+obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
+obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o
+obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o
+obj-$(CONFIG_MMC_GOLDFISH) += android-goldfish.o
+obj-$(CONFIG_MMC_SPI) += mmc_spi.o
+ifeq ($(CONFIG_OF),y)
+obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o
+endif
+obj-$(CONFIG_MMC_S3C) += s3cmci.o
+obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
+obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
+obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
+tmio_mmc_core-y := tmio_mmc_pio.o
+tmio_mmc_core-$(subst m,y,$(CONFIG_MMC_SDHI)) += tmio_mmc_dma.o
+obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o
+obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
+obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
+obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
+obj-$(CONFIG_MMC_DW) += dw_mmc.o
+obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o
+obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o
+obj-$(CONFIG_MMC_DW_K3) += dw_mmc-k3.o
+obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o
+obj-$(CONFIG_MMC_DW_ROCKCHIP) += dw_mmc-rockchip.o
+obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
+obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
+obj-$(CONFIG_MMC_VUB300) += vub300.o
+obj-$(CONFIG_MMC_USHC) += ushc.o
+obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
+obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o
+obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
+obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
+obj-$(CONFIG_MMC_TOSHIBA_PCI) += toshsd.o
+
+obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
+obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o
+
+obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
+obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
+obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
+obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
+obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
+obj-$(CONFIG_MMC_SDHCI_OF_ARASAN) += sdhci-of-arasan.o
+obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
+obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
+obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
+obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o
+obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o
+obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
+obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
+
+ifeq ($(CONFIG_CB710_DEBUG),y)
+ CFLAGS-cb710-mmc += -DDEBUG
+endif
diff --git a/kernel/drivers/mmc/host/android-goldfish.c b/kernel/drivers/mmc/host/android-goldfish.c
new file mode 100644
index 000000000..8b4e20a3f
--- /dev/null
+++ b/kernel/drivers/mmc/host/android-goldfish.c
@@ -0,0 +1,568 @@
+/*
+ * Copyright 2007, Google Inc.
+ * Copyright 2012, Intel Inc.
+ *
+ * based on omap.c driver, which was
+ * Copyright (C) 2004 Nokia Corporation
+ * Written by Tuukka Tikkanen and Juha Yrjölä <juha.yrjola@nokia.com>
+ * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
+ * Other hacks (DMA, SD, etc) by David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/major.h>
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/scatterlist.h>
+
+#include <asm/types.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#define DRIVER_NAME "goldfish_mmc"
+
+#define BUFFER_SIZE 16384
+
+#define GOLDFISH_MMC_READ(host, addr) (readl(host->reg_base + addr))
+#define GOLDFISH_MMC_WRITE(host, addr, x) (writel(x, host->reg_base + addr))
+
+enum {
+ /* status register */
+ MMC_INT_STATUS = 0x00,
+ /* set this to enable IRQ */
+ MMC_INT_ENABLE = 0x04,
+ /* set this to specify buffer address */
+ MMC_SET_BUFFER = 0x08,
+
+ /* MMC command number */
+ MMC_CMD = 0x0C,
+
+ /* MMC argument */
+ MMC_ARG = 0x10,
+
+ /* MMC response (or R2 bits 0 - 31) */
+ MMC_RESP_0 = 0x14,
+
+ /* MMC R2 response bits 32 - 63 */
+ MMC_RESP_1 = 0x18,
+
+ /* MMC R2 response bits 64 - 95 */
+ MMC_RESP_2 = 0x1C,
+
+ /* MMC R2 response bits 96 - 127 */
+ MMC_RESP_3 = 0x20,
+
+ MMC_BLOCK_LENGTH = 0x24,
+ MMC_BLOCK_COUNT = 0x28,
+
+ /* MMC state flags */
+ MMC_STATE = 0x2C,
+
+ /* MMC_INT_STATUS bits */
+
+ MMC_STAT_END_OF_CMD = 1U << 0,
+ MMC_STAT_END_OF_DATA = 1U << 1,
+ MMC_STAT_STATE_CHANGE = 1U << 2,
+ MMC_STAT_CMD_TIMEOUT = 1U << 3,
+
+ /* MMC_STATE bits */
+ MMC_STATE_INSERTED = 1U << 0,
+ MMC_STATE_READ_ONLY = 1U << 1,
+};
+
+/*
+ * Command types
+ */
+#define OMAP_MMC_CMDTYPE_BC 0
+#define OMAP_MMC_CMDTYPE_BCR 1
+#define OMAP_MMC_CMDTYPE_AC 2
+#define OMAP_MMC_CMDTYPE_ADTC 3
+
+
+struct goldfish_mmc_host {
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ struct mmc_host *mmc;
+ struct device *dev;
+ unsigned char id; /* 16xx chips have 2 MMC blocks */
+ void __iomem *virt_base;
+ unsigned int phys_base;
+ int irq;
+ unsigned char bus_mode;
+ unsigned char hw_bus_mode;
+
+ unsigned int sg_len;
+ unsigned dma_done:1;
+ unsigned dma_in_use:1;
+
+ void __iomem *reg_base;
+};
+
+static inline int
+goldfish_mmc_cover_is_open(struct goldfish_mmc_host *host)
+{
+ return 0;
+}
+
+static ssize_t
+goldfish_mmc_show_cover_switch(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct goldfish_mmc_host *host = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", goldfish_mmc_cover_is_open(host) ? "open" :
+ "closed");
+}
+
+static DEVICE_ATTR(cover_switch, S_IRUGO, goldfish_mmc_show_cover_switch, NULL);
+
+static void
+goldfish_mmc_start_command(struct goldfish_mmc_host *host, struct mmc_command *cmd)
+{
+ u32 cmdreg;
+ u32 resptype;
+ u32 cmdtype;
+
+ host->cmd = cmd;
+
+ resptype = 0;
+ cmdtype = 0;
+
+ /* Our hardware needs to know exact type */
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ break;
+ case MMC_RSP_R1:
+ case MMC_RSP_R1B:
+ /* resp 1, 1b, 6, 7 */
+ resptype = 1;
+ break;
+ case MMC_RSP_R2:
+ resptype = 2;
+ break;
+ case MMC_RSP_R3:
+ resptype = 3;
+ break;
+ default:
+ dev_err(mmc_dev(host->mmc),
+ "Invalid response type: %04x\n", mmc_resp_type(cmd));
+ break;
+ }
+
+ if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
+ cmdtype = OMAP_MMC_CMDTYPE_ADTC;
+ else if (mmc_cmd_type(cmd) == MMC_CMD_BC)
+ cmdtype = OMAP_MMC_CMDTYPE_BC;
+ else if (mmc_cmd_type(cmd) == MMC_CMD_BCR)
+ cmdtype = OMAP_MMC_CMDTYPE_BCR;
+ else
+ cmdtype = OMAP_MMC_CMDTYPE_AC;
+
+ cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
+
+ if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ cmdreg |= 1 << 6;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ cmdreg |= 1 << 11;
+
+ if (host->data && !(host->data->flags & MMC_DATA_WRITE))
+ cmdreg |= 1 << 15;
+
+ GOLDFISH_MMC_WRITE(host, MMC_ARG, cmd->arg);
+ GOLDFISH_MMC_WRITE(host, MMC_CMD, cmdreg);
+}
+
+static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host,
+ struct mmc_data *data)
+{
+ if (host->dma_in_use) {
+ enum dma_data_direction dma_data_dir;
+
+ if (data->flags & MMC_DATA_WRITE)
+ dma_data_dir = DMA_TO_DEVICE;
+ else
+ dma_data_dir = DMA_FROM_DEVICE;
+
+ if (dma_data_dir == DMA_FROM_DEVICE) {
+ /*
+ * We don't really have DMA, so we need
+ * to copy from our platform driver buffer
+ */
+ uint8_t *dest = (uint8_t *)sg_virt(data->sg);
+ memcpy(dest, host->virt_base, data->sg->length);
+ }
+ host->data->bytes_xfered += data->sg->length;
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
+ dma_data_dir);
+ }
+
+ host->data = NULL;
+ host->sg_len = 0;
+
+ /*
+ * NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
+ * dozens of requests until the card finishes writing data.
+ * It'd be cheaper to just wait till an EOFB interrupt arrives...
+ */
+
+ if (!data->stop) {
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, data->mrq);
+ return;
+ }
+
+ goldfish_mmc_start_command(host, data->stop);
+}
+
+static void goldfish_mmc_end_of_data(struct goldfish_mmc_host *host,
+ struct mmc_data *data)
+{
+ if (!host->dma_in_use) {
+ goldfish_mmc_xfer_done(host, data);
+ return;
+ }
+ if (host->dma_done)
+ goldfish_mmc_xfer_done(host, data);
+}
+
+static void goldfish_mmc_cmd_done(struct goldfish_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ host->cmd = NULL;
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ /* response type 2 */
+ cmd->resp[3] =
+ GOLDFISH_MMC_READ(host, MMC_RESP_0);
+ cmd->resp[2] =
+ GOLDFISH_MMC_READ(host, MMC_RESP_1);
+ cmd->resp[1] =
+ GOLDFISH_MMC_READ(host, MMC_RESP_2);
+ cmd->resp[0] =
+ GOLDFISH_MMC_READ(host, MMC_RESP_3);
+ } else {
+ /* response types 1, 1b, 3, 4, 5, 6 */
+ cmd->resp[0] =
+ GOLDFISH_MMC_READ(host, MMC_RESP_0);
+ }
+ }
+
+ if (host->data == NULL || cmd->error) {
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, cmd->mrq);
+ }
+}
+
+static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id)
+{
+ struct goldfish_mmc_host *host = (struct goldfish_mmc_host *)dev_id;
+ u16 status;
+ int end_command = 0;
+ int end_transfer = 0;
+ int transfer_error = 0;
+ int state_changed = 0;
+ int cmd_timeout = 0;
+
+ while ((status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS)) != 0) {
+ GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
+
+ if (status & MMC_STAT_END_OF_CMD)
+ end_command = 1;
+
+ if (status & MMC_STAT_END_OF_DATA)
+ end_transfer = 1;
+
+ if (status & MMC_STAT_STATE_CHANGE)
+ state_changed = 1;
+
+ if (status & MMC_STAT_CMD_TIMEOUT) {
+ end_command = 0;
+ cmd_timeout = 1;
+ }
+ }
+
+ if (cmd_timeout) {
+ struct mmc_request *mrq = host->mrq;
+ mrq->cmd->error = -ETIMEDOUT;
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+ }
+
+ if (end_command)
+ goldfish_mmc_cmd_done(host, host->cmd);
+
+ if (transfer_error)
+ goldfish_mmc_xfer_done(host, host->data);
+ else if (end_transfer) {
+ host->dma_done = 1;
+ goldfish_mmc_end_of_data(host, host->data);
+ } else if (host->data != NULL) {
+ /*
+ * WORKAROUND -- after porting this driver from 2.6 to 3.4,
+ * during device initialization, cases where host->data is
+ * non-null but end_transfer is false would occur. Doing
+ * nothing in such cases results in no further interrupts,
+ * and initialization failure.
+ * TODO -- find the real cause.
+ */
+ host->dma_done = 1;
+ goldfish_mmc_end_of_data(host, host->data);
+ }
+
+ if (state_changed) {
+ u32 state = GOLDFISH_MMC_READ(host, MMC_STATE);
+ pr_info("%s: Card detect now %d\n", __func__,
+ (state & MMC_STATE_INSERTED));
+ mmc_detect_change(host->mmc, 0);
+ }
+
+ if (!end_command && !end_transfer &&
+ !transfer_error && !state_changed && !cmd_timeout) {
+ status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS);
+ dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status);
+ if (status != 0) {
+ GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status);
+ GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, 0);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host,
+ struct mmc_request *req)
+{
+ struct mmc_data *data = req->data;
+ int block_size;
+ unsigned sg_len;
+ enum dma_data_direction dma_data_dir;
+
+ host->data = data;
+ if (data == NULL) {
+ GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, 0);
+ GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, 0);
+ host->dma_in_use = 0;
+ return;
+ }
+
+ block_size = data->blksz;
+
+ GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, data->blocks - 1);
+ GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, block_size - 1);
+
+ /*
+ * Cope with calling layer confusion; it issues "single
+ * block" writes using multi-block scatterlists.
+ */
+ sg_len = (data->blocks == 1) ? 1 : data->sg_len;
+
+ if (data->flags & MMC_DATA_WRITE)
+ dma_data_dir = DMA_TO_DEVICE;
+ else
+ dma_data_dir = DMA_FROM_DEVICE;
+
+ host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
+ sg_len, dma_data_dir);
+ host->dma_done = 0;
+ host->dma_in_use = 1;
+
+ if (dma_data_dir == DMA_TO_DEVICE) {
+ /*
+ * We don't really have DMA, so we need to copy to our
+ * platform driver buffer
+ */
+ const uint8_t *src = (uint8_t *)sg_virt(data->sg);
+ memcpy(host->virt_base, src, data->sg->length);
+ }
+}
+
+static void goldfish_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct goldfish_mmc_host *host = mmc_priv(mmc);
+
+ WARN_ON(host->mrq != NULL);
+
+ host->mrq = req;
+ goldfish_mmc_prepare_data(host, req);
+ goldfish_mmc_start_command(host, req->cmd);
+
+ /*
+ * This is to avoid accidentally being detected as an SDIO card
+ * in mmc_attach_sdio().
+ */
+ if (req->cmd->opcode == SD_IO_SEND_OP_COND &&
+ req->cmd->flags == (MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR))
+ req->cmd->error = -EINVAL;
+}
+
+static void goldfish_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct goldfish_mmc_host *host = mmc_priv(mmc);
+
+ host->bus_mode = ios->bus_mode;
+ host->hw_bus_mode = host->bus_mode;
+}
+
+static int goldfish_mmc_get_ro(struct mmc_host *mmc)
+{
+ uint32_t state;
+ struct goldfish_mmc_host *host = mmc_priv(mmc);
+
+ state = GOLDFISH_MMC_READ(host, MMC_STATE);
+ return ((state & MMC_STATE_READ_ONLY) != 0);
+}
+
+static const struct mmc_host_ops goldfish_mmc_ops = {
+ .request = goldfish_mmc_request,
+ .set_ios = goldfish_mmc_set_ios,
+ .get_ro = goldfish_mmc_get_ro,
+};
+
+static int goldfish_mmc_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct goldfish_mmc_host *host = NULL;
+ struct resource *res;
+ int ret = 0;
+ int irq;
+ dma_addr_t buf_addr;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (res == NULL || irq < 0)
+ return -ENXIO;
+
+ mmc = mmc_alloc_host(sizeof(struct goldfish_mmc_host), &pdev->dev);
+ if (mmc == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_host_failed;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ pr_err("mmc: Mapping %lX to %lX\n", (long)res->start, (long)res->end);
+ host->reg_base = ioremap(res->start, resource_size(res));
+ if (host->reg_base == NULL) {
+ ret = -ENOMEM;
+ goto ioremap_failed;
+ }
+ host->virt_base = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
+ &buf_addr, GFP_KERNEL);
+
+ if (host->virt_base == 0) {
+ ret = -ENOMEM;
+ goto dma_alloc_failed;
+ }
+ host->phys_base = buf_addr;
+
+ host->id = pdev->id;
+ host->irq = irq;
+
+ mmc->ops = &goldfish_mmc_ops;
+ mmc->f_min = 400000;
+ mmc->f_max = 24000000;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->caps = MMC_CAP_4_BIT_DATA;
+
+ /* Use scatterlist DMA to reduce per-transfer costs.
+ * NOTE max_seg_size assumption that small blocks aren't
+ * normally used (except e.g. for reading SD registers).
+ */
+ mmc->max_segs = 32;
+ mmc->max_blk_size = 2048; /* MMC_BLOCK_LENGTH is 11 bits (+1) */
+ mmc->max_blk_count = 2048; /* MMC_BLOCK_COUNT is 11 bits (+1) */
+ mmc->max_req_size = BUFFER_SIZE;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ ret = request_irq(host->irq, goldfish_mmc_irq, 0, DRIVER_NAME, host);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed IRQ Adding goldfish MMC\n");
+ goto err_request_irq_failed;
+ }
+
+ host->dev = &pdev->dev;
+ platform_set_drvdata(pdev, host);
+
+ ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
+ if (ret)
+ dev_warn(mmc_dev(host->mmc),
+ "Unable to create sysfs attributes\n");
+
+ GOLDFISH_MMC_WRITE(host, MMC_SET_BUFFER, host->phys_base);
+ GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE,
+ MMC_STAT_END_OF_CMD | MMC_STAT_END_OF_DATA |
+ MMC_STAT_STATE_CHANGE | MMC_STAT_CMD_TIMEOUT);
+
+ mmc_add_host(mmc);
+ return 0;
+
+err_request_irq_failed:
+ dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base,
+ host->phys_base);
+dma_alloc_failed:
+ iounmap(host->reg_base);
+ioremap_failed:
+ mmc_free_host(host->mmc);
+err_alloc_host_failed:
+ return ret;
+}
+
+static int goldfish_mmc_remove(struct platform_device *pdev)
+{
+ struct goldfish_mmc_host *host = platform_get_drvdata(pdev);
+
+ BUG_ON(host == NULL);
+
+ mmc_remove_host(host->mmc);
+ free_irq(host->irq, host);
+ dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, host->phys_base);
+ iounmap(host->reg_base);
+ mmc_free_host(host->mmc);
+ return 0;
+}
+
+static struct platform_driver goldfish_mmc_driver = {
+ .probe = goldfish_mmc_probe,
+ .remove = goldfish_mmc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(goldfish_mmc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/atmel-mci-regs.h b/kernel/drivers/mmc/host/atmel-mci-regs.h
new file mode 100644
index 000000000..0aa44e679
--- /dev/null
+++ b/kernel/drivers/mmc/host/atmel-mci-regs.h
@@ -0,0 +1,171 @@
+/*
+ * Atmel MultiMedia Card Interface driver
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Superset of MCI IP registers integrated in Atmel AVR32 and AT91 Processors
+ * Registers and bitfields marked with [2] are only available in MCI2
+ */
+
+#ifndef __DRIVERS_MMC_ATMEL_MCI_H__
+#define __DRIVERS_MMC_ATMEL_MCI_H__
+
+/* MCI Register Definitions */
+#define ATMCI_CR 0x0000 /* Control */
+# define ATMCI_CR_MCIEN ( 1 << 0) /* MCI Enable */
+# define ATMCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */
+# define ATMCI_CR_PWSEN ( 1 << 2) /* Power Save Enable */
+# define ATMCI_CR_PWSDIS ( 1 << 3) /* Power Save Disable */
+# define ATMCI_CR_SWRST ( 1 << 7) /* Software Reset */
+#define ATMCI_MR 0x0004 /* Mode */
+# define ATMCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */
+# define ATMCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */
+# define ATMCI_MR_RDPROOF ( 1 << 11) /* Read Proof */
+# define ATMCI_MR_WRPROOF ( 1 << 12) /* Write Proof */
+# define ATMCI_MR_PDCFBYTE ( 1 << 13) /* Force Byte Transfer */
+# define ATMCI_MR_PDCPADV ( 1 << 14) /* Padding Value */
+# define ATMCI_MR_PDCMODE ( 1 << 15) /* PDC-oriented Mode */
+# define ATMCI_MR_CLKODD(x) ((x) << 16) /* LSB of Clock Divider */
+#define ATMCI_DTOR 0x0008 /* Data Timeout */
+# define ATMCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */
+# define ATMCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */
+#define ATMCI_SDCR 0x000c /* SD Card / SDIO */
+# define ATMCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */
+# define ATMCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */
+# define ATMCI_SDCSEL_MASK ( 3 << 0)
+# define ATMCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */
+# define ATMCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */
+# define ATMCI_SDCBUS_8BIT ( 3 << 6) /* 8-bit data bus[2] */
+# define ATMCI_SDCBUS_MASK ( 3 << 6)
+#define ATMCI_ARGR 0x0010 /* Command Argument */
+#define ATMCI_CMDR 0x0014 /* Command */
+# define ATMCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */
+# define ATMCI_CMDR_RSPTYP_NONE ( 0 << 6) /* No response */
+# define ATMCI_CMDR_RSPTYP_48BIT ( 1 << 6) /* 48-bit response */
+# define ATMCI_CMDR_RSPTYP_136BIT ( 2 << 6) /* 136-bit response */
+# define ATMCI_CMDR_SPCMD_INIT ( 1 << 8) /* Initialization command */
+# define ATMCI_CMDR_SPCMD_SYNC ( 2 << 8) /* Synchronized command */
+# define ATMCI_CMDR_SPCMD_INT ( 4 << 8) /* Interrupt command */
+# define ATMCI_CMDR_SPCMD_INTRESP ( 5 << 8) /* Interrupt response */
+# define ATMCI_CMDR_OPDCMD ( 1 << 11) /* Open Drain */
+# define ATMCI_CMDR_MAXLAT_5CYC ( 0 << 12) /* Max latency 5 cycles */
+# define ATMCI_CMDR_MAXLAT_64CYC ( 1 << 12) /* Max latency 64 cycles */
+# define ATMCI_CMDR_START_XFER ( 1 << 16) /* Start data transfer */
+# define ATMCI_CMDR_STOP_XFER ( 2 << 16) /* Stop data transfer */
+# define ATMCI_CMDR_TRDIR_WRITE ( 0 << 18) /* Write data */
+# define ATMCI_CMDR_TRDIR_READ ( 1 << 18) /* Read data */
+# define ATMCI_CMDR_BLOCK ( 0 << 19) /* Single-block transfer */
+# define ATMCI_CMDR_MULTI_BLOCK ( 1 << 19) /* Multi-block transfer */
+# define ATMCI_CMDR_STREAM ( 2 << 19) /* MMC Stream transfer */
+# define ATMCI_CMDR_SDIO_BYTE ( 4 << 19) /* SDIO Byte transfer */
+# define ATMCI_CMDR_SDIO_BLOCK ( 5 << 19) /* SDIO Block transfer */
+# define ATMCI_CMDR_SDIO_SUSPEND ( 1 << 24) /* SDIO Suspend Command */
+# define ATMCI_CMDR_SDIO_RESUME ( 2 << 24) /* SDIO Resume Command */
+#define ATMCI_BLKR 0x0018 /* Block */
+# define ATMCI_BCNT(x) ((x) << 0) /* Data Block Count */
+# define ATMCI_BLKLEN(x) ((x) << 16) /* Data Block Length */
+#define ATMCI_CSTOR 0x001c /* Completion Signal Timeout[2] */
+# define ATMCI_CSTOCYC(x) ((x) << 0) /* CST cycles */
+# define ATMCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */
+#define ATMCI_RSPR 0x0020 /* Response 0 */
+#define ATMCI_RSPR1 0x0024 /* Response 1 */
+#define ATMCI_RSPR2 0x0028 /* Response 2 */
+#define ATMCI_RSPR3 0x002c /* Response 3 */
+#define ATMCI_RDR 0x0030 /* Receive Data */
+#define ATMCI_TDR 0x0034 /* Transmit Data */
+#define ATMCI_SR 0x0040 /* Status */
+#define ATMCI_IER 0x0044 /* Interrupt Enable */
+#define ATMCI_IDR 0x0048 /* Interrupt Disable */
+#define ATMCI_IMR 0x004c /* Interrupt Mask */
+# define ATMCI_CMDRDY ( 1 << 0) /* Command Ready */
+# define ATMCI_RXRDY ( 1 << 1) /* Receiver Ready */
+# define ATMCI_TXRDY ( 1 << 2) /* Transmitter Ready */
+# define ATMCI_BLKE ( 1 << 3) /* Data Block Ended */
+# define ATMCI_DTIP ( 1 << 4) /* Data Transfer In Progress */
+# define ATMCI_NOTBUSY ( 1 << 5) /* Data Not Busy */
+# define ATMCI_ENDRX ( 1 << 6) /* End of RX Buffer */
+# define ATMCI_ENDTX ( 1 << 7) /* End of TX Buffer */
+# define ATMCI_SDIOIRQA ( 1 << 8) /* SDIO IRQ in slot A */
+# define ATMCI_SDIOIRQB ( 1 << 9) /* SDIO IRQ in slot B */
+# define ATMCI_SDIOWAIT ( 1 << 12) /* SDIO Read Wait Operation Status */
+# define ATMCI_CSRCV ( 1 << 13) /* CE-ATA Completion Signal Received */
+# define ATMCI_RXBUFF ( 1 << 14) /* RX Buffer Full */
+# define ATMCI_TXBUFE ( 1 << 15) /* TX Buffer Empty */
+# define ATMCI_RINDE ( 1 << 16) /* Response Index Error */
+# define ATMCI_RDIRE ( 1 << 17) /* Response Direction Error */
+# define ATMCI_RCRCE ( 1 << 18) /* Response CRC Error */
+# define ATMCI_RENDE ( 1 << 19) /* Response End Bit Error */
+# define ATMCI_RTOE ( 1 << 20) /* Response Time-Out Error */
+# define ATMCI_DCRCE ( 1 << 21) /* Data CRC Error */
+# define ATMCI_DTOE ( 1 << 22) /* Data Time-Out Error */
+# define ATMCI_CSTOE ( 1 << 23) /* Completion Signal Time-out Error */
+# define ATMCI_BLKOVRE ( 1 << 24) /* DMA Block Overrun Error */
+# define ATMCI_DMADONE ( 1 << 25) /* DMA Transfer Done */
+# define ATMCI_FIFOEMPTY ( 1 << 26) /* FIFO Empty Flag */
+# define ATMCI_XFRDONE ( 1 << 27) /* Transfer Done Flag */
+# define ATMCI_ACKRCV ( 1 << 28) /* Boot Operation Acknowledge Received */
+# define ATMCI_ACKRCVE ( 1 << 29) /* Boot Operation Acknowledge Error */
+# define ATMCI_OVRE ( 1 << 30) /* RX Overrun Error */
+# define ATMCI_UNRE ( 1 << 31) /* TX Underrun Error */
+#define ATMCI_DMA 0x0050 /* DMA Configuration[2] */
+# define ATMCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */
+# define ATMCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */
+# define ATMCI_DMAEN ( 1 << 8) /* DMA Hardware Handshaking Enable */
+#define ATMCI_CFG 0x0054 /* Configuration[2] */
+# define ATMCI_CFG_FIFOMODE_1DATA ( 1 << 0) /* MCI Internal FIFO control mode */
+# define ATMCI_CFG_FERRCTRL_COR ( 1 << 4) /* Flow Error flag reset control mode */
+# define ATMCI_CFG_HSMODE ( 1 << 8) /* High Speed Mode */
+# define ATMCI_CFG_LSYNC ( 1 << 12) /* Synchronize on the last block */
+#define ATMCI_WPMR 0x00e4 /* Write Protection Mode[2] */
+# define ATMCI_WP_EN ( 1 << 0) /* WP Enable */
+# define ATMCI_WP_KEY (0x4d4349 << 8) /* WP Key */
+#define ATMCI_WPSR 0x00e8 /* Write Protection Status[2] */
+# define ATMCI_GET_WP_VS(x) ((x) & 0x0f)
+# define ATMCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff)
+#define ATMCI_VERSION 0x00FC /* Version */
+#define ATMCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */
+
+/* This is not including the FIFO Aperture on MCI2 */
+#define ATMCI_REGS_SIZE 0x100
+
+/* Register access macros */
+#ifdef CONFIG_AVR32
+#define atmci_readl(port, reg) \
+ __raw_readl((port)->regs + reg)
+#define atmci_writel(port, reg, value) \
+ __raw_writel((value), (port)->regs + reg)
+#else
+#define atmci_readl(port, reg) \
+ readl_relaxed((port)->regs + reg)
+#define atmci_writel(port, reg, value) \
+ writel_relaxed((value), (port)->regs + reg)
+#endif
+
+/* On AVR chips the Peripheral DMA Controller is not connected to MCI. */
+#ifdef CONFIG_AVR32
+# define ATMCI_PDC_CONNECTED 0
+#else
+# define ATMCI_PDC_CONNECTED 1
+#endif
+
+/*
+ * Fix sconfig's burst size according to atmel MCI. We need to convert them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+ *
+ * This can be done by finding most significant bit set.
+ */
+static inline unsigned int atmci_convert_chksize(unsigned int maxburst)
+{
+ if (maxburst > 1)
+ return fls(maxburst) - 2;
+ else
+ return 0;
+}
+
+#endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
diff --git a/kernel/drivers/mmc/host/atmel-mci.c b/kernel/drivers/mmc/host/atmel-mci.c
new file mode 100644
index 000000000..9a39e0b7e
--- /dev/null
+++ b/kernel/drivers/mmc/host/atmel-mci.c
@@ -0,0 +1,2585 @@
+/*
+ * Atmel MultiMedia Card Interface driver
+ *
+ * Copyright (C) 2004-2008 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/blkdev.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+#include <linux/platform_data/atmel.h>
+#include <linux/platform_data/mmc-atmel-mci.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio.h>
+
+#include <linux/atmel-mci.h>
+#include <linux/atmel_pdc.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/unaligned.h>
+
+#include "atmel-mci-regs.h"
+
+#define AUTOSUSPEND_DELAY 50
+
+#define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
+#define ATMCI_DMA_THRESHOLD 16
+
+enum {
+ EVENT_CMD_RDY = 0,
+ EVENT_XFER_COMPLETE,
+ EVENT_NOTBUSY,
+ EVENT_DATA_ERROR,
+};
+
+enum atmel_mci_state {
+ STATE_IDLE = 0,
+ STATE_SENDING_CMD,
+ STATE_DATA_XFER,
+ STATE_WAITING_NOTBUSY,
+ STATE_SENDING_STOP,
+ STATE_END_REQUEST,
+};
+
+enum atmci_xfer_dir {
+ XFER_RECEIVE = 0,
+ XFER_TRANSMIT,
+};
+
+enum atmci_pdc_buf {
+ PDC_FIRST_BUF = 0,
+ PDC_SECOND_BUF,
+};
+
+struct atmel_mci_caps {
+ bool has_dma_conf_reg;
+ bool has_pdc;
+ bool has_cfg_reg;
+ bool has_cstor_reg;
+ bool has_highspeed;
+ bool has_rwproof;
+ bool has_odd_clk_div;
+ bool has_bad_data_ordering;
+ bool need_reset_after_xfer;
+ bool need_blksz_mul_4;
+ bool need_notbusy_for_read_ops;
+};
+
+struct atmel_mci_dma {
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *data_desc;
+};
+
+/**
+ * struct atmel_mci - MMC controller state shared between all slots
+ * @lock: Spinlock protecting the queue and associated data.
+ * @regs: Pointer to MMIO registers.
+ * @sg: Scatterlist entry currently being processed by PIO or PDC code.
+ * @pio_offset: Offset into the current scatterlist entry.
+ * @buffer: Buffer used if we don't have the r/w proof capability. We
+ * don't have the time to switch pdc buffers so we have to use only
+ * one buffer for the full transaction.
+ * @buf_size: size of the buffer.
+ * @phys_buf_addr: buffer address needed for pdc.
+ * @cur_slot: The slot which is currently using the controller.
+ * @mrq: The request currently being processed on @cur_slot,
+ * or NULL if the controller is idle.
+ * @cmd: The command currently being sent to the card, or NULL.
+ * @data: The data currently being transferred, or NULL if no data
+ * transfer is in progress.
+ * @data_size: just data->blocks * data->blksz.
+ * @dma: DMA client state.
+ * @data_chan: DMA channel being used for the current data transfer.
+ * @cmd_status: Snapshot of SR taken upon completion of the current
+ * command. Only valid when EVENT_CMD_COMPLETE is pending.
+ * @data_status: Snapshot of SR taken upon completion of the current
+ * data transfer. Only valid when EVENT_DATA_COMPLETE or
+ * EVENT_DATA_ERROR is pending.
+ * @stop_cmdr: Value to be loaded into CMDR when the stop command is
+ * to be sent.
+ * @tasklet: Tasklet running the request state machine.
+ * @pending_events: Bitmask of events flagged by the interrupt handler
+ * to be processed by the tasklet.
+ * @completed_events: Bitmask of events which the state machine has
+ * processed.
+ * @state: Tasklet state.
+ * @queue: List of slots waiting for access to the controller.
+ * @need_clock_update: Update the clock rate before the next request.
+ * @need_reset: Reset controller before next request.
+ * @timer: Timer to balance the data timeout error flag which cannot rise.
+ * @mode_reg: Value of the MR register.
+ * @cfg_reg: Value of the CFG register.
+ * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
+ * rate and timeout calculations.
+ * @mapbase: Physical address of the MMIO registers.
+ * @mck: The peripheral bus clock hooked up to the MMC controller.
+ * @pdev: Platform device associated with the MMC controller.
+ * @slot: Slots sharing this MMC controller.
+ * @caps: MCI capabilities depending on MCI version.
+ * @prepare_data: function to setup MCI before data transfer which
+ * depends on MCI capabilities.
+ * @submit_data: function to start data transfer which depends on MCI
+ * capabilities.
+ * @stop_transfer: function to stop data transfer which depends on MCI
+ * capabilities.
+ *
+ * Locking
+ * =======
+ *
+ * @lock is a softirq-safe spinlock protecting @queue as well as
+ * @cur_slot, @mrq and @state. These must always be updated
+ * at the same time while holding @lock.
+ *
+ * @lock also protects mode_reg and need_clock_update since these are
+ * used to synchronize mode register updates with the queue
+ * processing.
+ *
+ * The @mrq field of struct atmel_mci_slot is also protected by @lock,
+ * and must always be written at the same time as the slot is added to
+ * @queue.
+ *
+ * @pending_events and @completed_events are accessed using atomic bit
+ * operations, so they don't need any locking.
+ *
+ * None of the fields touched by the interrupt handler need any
+ * locking. However, ordering is important: Before EVENT_DATA_ERROR or
+ * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
+ * interrupts must be disabled and @data_status updated with a
+ * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
+ * CMDRDY interrupt must be disabled and @cmd_status updated with a
+ * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
+ * bytes_xfered field of @data must be written. This is ensured by
+ * using barriers.
+ */
+struct atmel_mci {
+ spinlock_t lock;
+ void __iomem *regs;
+
+ struct scatterlist *sg;
+ unsigned int sg_len;
+ unsigned int pio_offset;
+ unsigned int *buffer;
+ unsigned int buf_size;
+ dma_addr_t buf_phys_addr;
+
+ struct atmel_mci_slot *cur_slot;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ unsigned int data_size;
+
+ struct atmel_mci_dma dma;
+ struct dma_chan *data_chan;
+ struct dma_slave_config dma_conf;
+
+ u32 cmd_status;
+ u32 data_status;
+ u32 stop_cmdr;
+
+ struct tasklet_struct tasklet;
+ unsigned long pending_events;
+ unsigned long completed_events;
+ enum atmel_mci_state state;
+ struct list_head queue;
+
+ bool need_clock_update;
+ bool need_reset;
+ struct timer_list timer;
+ u32 mode_reg;
+ u32 cfg_reg;
+ unsigned long bus_hz;
+ unsigned long mapbase;
+ struct clk *mck;
+ struct platform_device *pdev;
+
+ struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS];
+
+ struct atmel_mci_caps caps;
+
+ u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
+ void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
+ void (*stop_transfer)(struct atmel_mci *host);
+};
+
+/**
+ * struct atmel_mci_slot - MMC slot state
+ * @mmc: The mmc_host representing this slot.
+ * @host: The MMC controller this slot is using.
+ * @sdc_reg: Value of SDCR to be written before using this slot.
+ * @sdio_irq: SDIO irq mask for this slot.
+ * @mrq: mmc_request currently being processed or waiting to be
+ * processed, or NULL when the slot is idle.
+ * @queue_node: List node for placing this node in the @queue list of
+ * &struct atmel_mci.
+ * @clock: Clock rate configured by set_ios(). Protected by host->lock.
+ * @flags: Random state bits associated with the slot.
+ * @detect_pin: GPIO pin used for card detection, or negative if not
+ * available.
+ * @wp_pin: GPIO pin used for card write protect sending, or negative
+ * if not available.
+ * @detect_is_active_high: The state of the detect pin when it is active.
+ * @detect_timer: Timer used for debouncing @detect_pin interrupts.
+ */
+struct atmel_mci_slot {
+ struct mmc_host *mmc;
+ struct atmel_mci *host;
+
+ u32 sdc_reg;
+ u32 sdio_irq;
+
+ struct mmc_request *mrq;
+ struct list_head queue_node;
+
+ unsigned int clock;
+ unsigned long flags;
+#define ATMCI_CARD_PRESENT 0
+#define ATMCI_CARD_NEED_INIT 1
+#define ATMCI_SHUTDOWN 2
+
+ int detect_pin;
+ int wp_pin;
+ bool detect_is_active_high;
+
+ struct timer_list detect_timer;
+};
+
+#define atmci_test_and_clear_pending(host, event) \
+ test_and_clear_bit(event, &host->pending_events)
+#define atmci_set_completed(host, event) \
+ set_bit(event, &host->completed_events)
+#define atmci_set_pending(host, event) \
+ set_bit(event, &host->pending_events)
+
+/*
+ * The debugfs stuff below is mostly optimized away when
+ * CONFIG_DEBUG_FS is not set.
+ */
+static int atmci_req_show(struct seq_file *s, void *v)
+{
+ struct atmel_mci_slot *slot = s->private;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_command *stop;
+ struct mmc_data *data;
+
+ /* Make sure we get a consistent snapshot */
+ spin_lock_bh(&slot->host->lock);
+ mrq = slot->mrq;
+
+ if (mrq) {
+ cmd = mrq->cmd;
+ data = mrq->data;
+ stop = mrq->stop;
+
+ if (cmd)
+ seq_printf(s,
+ "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+ cmd->opcode, cmd->arg, cmd->flags,
+ cmd->resp[0], cmd->resp[1], cmd->resp[2],
+ cmd->resp[3], cmd->error);
+ if (data)
+ seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
+ data->bytes_xfered, data->blocks,
+ data->blksz, data->flags, data->error);
+ if (stop)
+ seq_printf(s,
+ "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+ stop->opcode, stop->arg, stop->flags,
+ stop->resp[0], stop->resp[1], stop->resp[2],
+ stop->resp[3], stop->error);
+ }
+
+ spin_unlock_bh(&slot->host->lock);
+
+ return 0;
+}
+
+static int atmci_req_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, atmci_req_show, inode->i_private);
+}
+
+static const struct file_operations atmci_req_fops = {
+ .owner = THIS_MODULE,
+ .open = atmci_req_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void atmci_show_status_reg(struct seq_file *s,
+ const char *regname, u32 value)
+{
+ static const char *sr_bit[] = {
+ [0] = "CMDRDY",
+ [1] = "RXRDY",
+ [2] = "TXRDY",
+ [3] = "BLKE",
+ [4] = "DTIP",
+ [5] = "NOTBUSY",
+ [6] = "ENDRX",
+ [7] = "ENDTX",
+ [8] = "SDIOIRQA",
+ [9] = "SDIOIRQB",
+ [12] = "SDIOWAIT",
+ [14] = "RXBUFF",
+ [15] = "TXBUFE",
+ [16] = "RINDE",
+ [17] = "RDIRE",
+ [18] = "RCRCE",
+ [19] = "RENDE",
+ [20] = "RTOE",
+ [21] = "DCRCE",
+ [22] = "DTOE",
+ [23] = "CSTOE",
+ [24] = "BLKOVRE",
+ [25] = "DMADONE",
+ [26] = "FIFOEMPTY",
+ [27] = "XFRDONE",
+ [30] = "OVRE",
+ [31] = "UNRE",
+ };
+ unsigned int i;
+
+ seq_printf(s, "%s:\t0x%08x", regname, value);
+ for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
+ if (value & (1 << i)) {
+ if (sr_bit[i])
+ seq_printf(s, " %s", sr_bit[i]);
+ else
+ seq_puts(s, " UNKNOWN");
+ }
+ }
+ seq_putc(s, '\n');
+}
+
+static int atmci_regs_show(struct seq_file *s, void *v)
+{
+ struct atmel_mci *host = s->private;
+ u32 *buf;
+ int ret = 0;
+
+
+ buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pm_runtime_get_sync(&host->pdev->dev);
+
+ /*
+ * Grab a more or less consistent snapshot. Note that we're
+ * not disabling interrupts, so IMR and SR may not be
+ * consistent.
+ */
+ spin_lock_bh(&host->lock);
+ memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
+ spin_unlock_bh(&host->lock);
+
+ pm_runtime_mark_last_busy(&host->pdev->dev);
+ pm_runtime_put_autosuspend(&host->pdev->dev);
+
+ seq_printf(s, "MR:\t0x%08x%s%s ",
+ buf[ATMCI_MR / 4],
+ buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
+ buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
+ if (host->caps.has_odd_clk_div)
+ seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
+ ((buf[ATMCI_MR / 4] & 0xff) << 1)
+ | ((buf[ATMCI_MR / 4] >> 16) & 1));
+ else
+ seq_printf(s, "CLKDIV=%u\n",
+ (buf[ATMCI_MR / 4] & 0xff));
+ seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
+ seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
+ seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
+ seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
+ buf[ATMCI_BLKR / 4],
+ buf[ATMCI_BLKR / 4] & 0xffff,
+ (buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
+ if (host->caps.has_cstor_reg)
+ seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
+
+ /* Don't read RSPR and RDR; it will consume the data there */
+
+ atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
+ atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
+
+ if (host->caps.has_dma_conf_reg) {
+ u32 val;
+
+ val = buf[ATMCI_DMA / 4];
+ seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
+ val, val & 3,
+ ((val >> 4) & 3) ?
+ 1 << (((val >> 4) & 3) + 1) : 1,
+ val & ATMCI_DMAEN ? " DMAEN" : "");
+ }
+ if (host->caps.has_cfg_reg) {
+ u32 val;
+
+ val = buf[ATMCI_CFG / 4];
+ seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
+ val,
+ val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
+ val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
+ val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
+ val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
+ }
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int atmci_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, atmci_regs_show, inode->i_private);
+}
+
+static const struct file_operations atmci_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = atmci_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void atmci_init_debugfs(struct atmel_mci_slot *slot)
+{
+ struct mmc_host *mmc = slot->mmc;
+ struct atmel_mci *host = slot->host;
+ struct dentry *root;
+ struct dentry *node;
+
+ root = mmc->debugfs_root;
+ if (!root)
+ return;
+
+ node = debugfs_create_file("regs", S_IRUSR, root, host,
+ &atmci_regs_fops);
+ if (IS_ERR(node))
+ return;
+ if (!node)
+ goto err;
+
+ node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_x32("pending_events", S_IRUSR, root,
+ (u32 *)&host->pending_events);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_x32("completed_events", S_IRUSR, root,
+ (u32 *)&host->completed_events);
+ if (!node)
+ goto err;
+
+ return;
+
+err:
+ dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id atmci_dt_ids[] = {
+ { .compatible = "atmel,hsmci" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmci_dt_ids);
+
+static struct mci_platform_data*
+atmci_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *cnp;
+ struct mci_platform_data *pdata;
+ u32 slot_id;
+
+ if (!np) {
+ dev_err(&pdev->dev, "device node not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_child_of_node(np, cnp) {
+ if (of_property_read_u32(cnp, "reg", &slot_id)) {
+ dev_warn(&pdev->dev, "reg property is missing for %s\n",
+ cnp->full_name);
+ continue;
+ }
+
+ if (slot_id >= ATMCI_MAX_NR_SLOTS) {
+ dev_warn(&pdev->dev, "can't have more than %d slots\n",
+ ATMCI_MAX_NR_SLOTS);
+ break;
+ }
+
+ if (of_property_read_u32(cnp, "bus-width",
+ &pdata->slot[slot_id].bus_width))
+ pdata->slot[slot_id].bus_width = 1;
+
+ pdata->slot[slot_id].detect_pin =
+ of_get_named_gpio(cnp, "cd-gpios", 0);
+
+ pdata->slot[slot_id].detect_is_active_high =
+ of_property_read_bool(cnp, "cd-inverted");
+
+ pdata->slot[slot_id].non_removable =
+ of_property_read_bool(cnp, "non-removable");
+
+ pdata->slot[slot_id].wp_pin =
+ of_get_named_gpio(cnp, "wp-gpios", 0);
+ }
+
+ return pdata;
+}
+#else /* CONFIG_OF */
+static inline struct mci_platform_data*
+atmci_of_init(struct platform_device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+static inline unsigned int atmci_get_version(struct atmel_mci *host)
+{
+ return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
+}
+
+static void atmci_timeout_timer(unsigned long data)
+{
+ struct atmel_mci *host;
+
+ host = (struct atmel_mci *)data;
+
+ dev_dbg(&host->pdev->dev, "software timeout\n");
+
+ if (host->mrq->cmd->data) {
+ host->mrq->cmd->data->error = -ETIMEDOUT;
+ host->data = NULL;
+ /*
+ * With some SDIO modules, sometimes DMA transfer hangs. If
+ * stop_transfer() is not called then the DMA request is not
+ * removed, following ones are queued and never computed.
+ */
+ if (host->state == STATE_DATA_XFER)
+ host->stop_transfer(host);
+ } else {
+ host->mrq->cmd->error = -ETIMEDOUT;
+ host->cmd = NULL;
+ }
+ host->need_reset = 1;
+ host->state = STATE_END_REQUEST;
+ smp_wmb();
+ tasklet_schedule(&host->tasklet);
+}
+
+static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
+ unsigned int ns)
+{
+ /*
+ * It is easier here to use us instead of ns for the timeout,
+ * it prevents from overflows during calculation.
+ */
+ unsigned int us = DIV_ROUND_UP(ns, 1000);
+
+ /* Maximum clock frequency is host->bus_hz/2 */
+ return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
+}
+
+static void atmci_set_timeout(struct atmel_mci *host,
+ struct atmel_mci_slot *slot, struct mmc_data *data)
+{
+ static unsigned dtomul_to_shift[] = {
+ 0, 4, 7, 8, 10, 12, 16, 20
+ };
+ unsigned timeout;
+ unsigned dtocyc;
+ unsigned dtomul;
+
+ timeout = atmci_ns_to_clocks(host, data->timeout_ns)
+ + data->timeout_clks;
+
+ for (dtomul = 0; dtomul < 8; dtomul++) {
+ unsigned shift = dtomul_to_shift[dtomul];
+ dtocyc = (timeout + (1 << shift) - 1) >> shift;
+ if (dtocyc < 15)
+ break;
+ }
+
+ if (dtomul >= 8) {
+ dtomul = 7;
+ dtocyc = 15;
+ }
+
+ dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
+ dtocyc << dtomul_to_shift[dtomul]);
+ atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
+}
+
+/*
+ * Return mask with command flags to be enabled for this command.
+ */
+static u32 atmci_prepare_command(struct mmc_host *mmc,
+ struct mmc_command *cmd)
+{
+ struct mmc_data *data;
+ u32 cmdr;
+
+ cmd->error = -EINPROGRESS;
+
+ cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136)
+ cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
+ else
+ cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
+ }
+
+ /*
+ * This should really be MAXLAT_5 for CMD2 and ACMD41, but
+ * it's too difficult to determine whether this is an ACMD or
+ * not. Better make it 64.
+ */
+ cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
+
+ if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
+ cmdr |= ATMCI_CMDR_OPDCMD;
+
+ data = cmd->data;
+ if (data) {
+ cmdr |= ATMCI_CMDR_START_XFER;
+
+ if (cmd->opcode == SD_IO_RW_EXTENDED) {
+ cmdr |= ATMCI_CMDR_SDIO_BLOCK;
+ } else {
+ if (data->flags & MMC_DATA_STREAM)
+ cmdr |= ATMCI_CMDR_STREAM;
+ else if (data->blocks > 1)
+ cmdr |= ATMCI_CMDR_MULTI_BLOCK;
+ else
+ cmdr |= ATMCI_CMDR_BLOCK;
+ }
+
+ if (data->flags & MMC_DATA_READ)
+ cmdr |= ATMCI_CMDR_TRDIR_READ;
+ }
+
+ return cmdr;
+}
+
+static void atmci_send_command(struct atmel_mci *host,
+ struct mmc_command *cmd, u32 cmd_flags)
+{
+ WARN_ON(host->cmd);
+ host->cmd = cmd;
+
+ dev_vdbg(&host->pdev->dev,
+ "start command: ARGR=0x%08x CMDR=0x%08x\n",
+ cmd->arg, cmd_flags);
+
+ atmci_writel(host, ATMCI_ARGR, cmd->arg);
+ atmci_writel(host, ATMCI_CMDR, cmd_flags);
+}
+
+static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
+{
+ dev_dbg(&host->pdev->dev, "send stop command\n");
+ atmci_send_command(host, data->stop, host->stop_cmdr);
+ atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
+}
+
+/*
+ * Configure given PDC buffer taking care of alignement issues.
+ * Update host->data_size and host->sg.
+ */
+static void atmci_pdc_set_single_buf(struct atmel_mci *host,
+ enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
+{
+ u32 pointer_reg, counter_reg;
+ unsigned int buf_size;
+
+ if (dir == XFER_RECEIVE) {
+ pointer_reg = ATMEL_PDC_RPR;
+ counter_reg = ATMEL_PDC_RCR;
+ } else {
+ pointer_reg = ATMEL_PDC_TPR;
+ counter_reg = ATMEL_PDC_TCR;
+ }
+
+ if (buf_nb == PDC_SECOND_BUF) {
+ pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
+ counter_reg += ATMEL_PDC_SCND_BUF_OFF;
+ }
+
+ if (!host->caps.has_rwproof) {
+ buf_size = host->buf_size;
+ atmci_writel(host, pointer_reg, host->buf_phys_addr);
+ } else {
+ buf_size = sg_dma_len(host->sg);
+ atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
+ }
+
+ if (host->data_size <= buf_size) {
+ if (host->data_size & 0x3) {
+ /* If size is different from modulo 4, transfer bytes */
+ atmci_writel(host, counter_reg, host->data_size);
+ atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
+ } else {
+ /* Else transfer 32-bits words */
+ atmci_writel(host, counter_reg, host->data_size / 4);
+ }
+ host->data_size = 0;
+ } else {
+ /* We assume the size of a page is 32-bits aligned */
+ atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
+ host->data_size -= sg_dma_len(host->sg);
+ if (host->data_size)
+ host->sg = sg_next(host->sg);
+ }
+}
+
+/*
+ * Configure PDC buffer according to the data size ie configuring one or two
+ * buffers. Don't use this function if you want to configure only the second
+ * buffer. In this case, use atmci_pdc_set_single_buf.
+ */
+static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
+{
+ atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
+ if (host->data_size)
+ atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
+}
+
+/*
+ * Unmap sg lists, called when transfer is finished.
+ */
+static void atmci_pdc_cleanup(struct atmel_mci *host)
+{
+ struct mmc_data *data = host->data;
+
+ if (data)
+ dma_unmap_sg(&host->pdev->dev,
+ data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+}
+
+/*
+ * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after
+ * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY
+ * interrupt needed for both transfer directions.
+ */
+static void atmci_pdc_complete(struct atmel_mci *host)
+{
+ int transfer_size = host->data->blocks * host->data->blksz;
+ int i;
+
+ atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
+
+ if ((!host->caps.has_rwproof)
+ && (host->data->flags & MMC_DATA_READ)) {
+ if (host->caps.has_bad_data_ordering)
+ for (i = 0; i < transfer_size; i++)
+ host->buffer[i] = swab32(host->buffer[i]);
+ sg_copy_from_buffer(host->data->sg, host->data->sg_len,
+ host->buffer, transfer_size);
+ }
+
+ atmci_pdc_cleanup(host);
+
+ dev_dbg(&host->pdev->dev, "(%s) set pending xfer complete\n", __func__);
+ atmci_set_pending(host, EVENT_XFER_COMPLETE);
+ tasklet_schedule(&host->tasklet);
+}
+
+static void atmci_dma_cleanup(struct atmel_mci *host)
+{
+ struct mmc_data *data = host->data;
+
+ if (data)
+ dma_unmap_sg(host->dma.chan->device->dev,
+ data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+}
+
+/*
+ * This function is called by the DMA driver from tasklet context.
+ */
+static void atmci_dma_complete(void *arg)
+{
+ struct atmel_mci *host = arg;
+ struct mmc_data *data = host->data;
+
+ dev_vdbg(&host->pdev->dev, "DMA complete\n");
+
+ if (host->caps.has_dma_conf_reg)
+ /* Disable DMA hardware handshaking on MCI */
+ atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
+
+ atmci_dma_cleanup(host);
+
+ /*
+ * If the card was removed, data will be NULL. No point trying
+ * to send the stop command or waiting for NBUSY in this case.
+ */
+ if (data) {
+ dev_dbg(&host->pdev->dev,
+ "(%s) set pending xfer complete\n", __func__);
+ atmci_set_pending(host, EVENT_XFER_COMPLETE);
+ tasklet_schedule(&host->tasklet);
+
+ /*
+ * Regardless of what the documentation says, we have
+ * to wait for NOTBUSY even after block read
+ * operations.
+ *
+ * When the DMA transfer is complete, the controller
+ * may still be reading the CRC from the card, i.e.
+ * the data transfer is still in progress and we
+ * haven't seen all the potential error bits yet.
+ *
+ * The interrupt handler will schedule a different
+ * tasklet to finish things up when the data transfer
+ * is completely done.
+ *
+ * We may not complete the mmc request here anyway
+ * because the mmc layer may call back and cause us to
+ * violate the "don't submit new operations from the
+ * completion callback" rule of the dma engine
+ * framework.
+ */
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ }
+}
+
+/*
+ * Returns a mask of interrupt flags to be enabled after the whole
+ * request has been prepared.
+ */
+static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
+{
+ u32 iflags;
+
+ data->error = -EINPROGRESS;
+
+ host->sg = data->sg;
+ host->sg_len = data->sg_len;
+ host->data = data;
+ host->data_chan = NULL;
+
+ iflags = ATMCI_DATA_ERROR_FLAGS;
+
+ /*
+ * Errata: MMC data write operation with less than 12
+ * bytes is impossible.
+ *
+ * Errata: MCI Transmit Data Register (TDR) FIFO
+ * corruption when length is not multiple of 4.
+ */
+ if (data->blocks * data->blksz < 12
+ || (data->blocks * data->blksz) & 3)
+ host->need_reset = true;
+
+ host->pio_offset = 0;
+ if (data->flags & MMC_DATA_READ)
+ iflags |= ATMCI_RXRDY;
+ else
+ iflags |= ATMCI_TXRDY;
+
+ return iflags;
+}
+
+/*
+ * Set interrupt flags and set block length into the MCI mode register even
+ * if this value is also accessible in the MCI block register. It seems to be
+ * necessary before the High Speed MCI version. It also map sg and configure
+ * PDC registers.
+ */
+static u32
+atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
+{
+ u32 iflags, tmp;
+ unsigned int sg_len;
+ enum dma_data_direction dir;
+ int i;
+
+ data->error = -EINPROGRESS;
+
+ host->data = data;
+ host->sg = data->sg;
+ iflags = ATMCI_DATA_ERROR_FLAGS;
+
+ /* Enable pdc mode */
+ atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
+
+ if (data->flags & MMC_DATA_READ) {
+ dir = DMA_FROM_DEVICE;
+ iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
+ } else {
+ dir = DMA_TO_DEVICE;
+ iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
+ }
+
+ /* Set BLKLEN */
+ tmp = atmci_readl(host, ATMCI_MR);
+ tmp &= 0x0000ffff;
+ tmp |= ATMCI_BLKLEN(data->blksz);
+ atmci_writel(host, ATMCI_MR, tmp);
+
+ /* Configure PDC */
+ host->data_size = data->blocks * data->blksz;
+ sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
+
+ if ((!host->caps.has_rwproof)
+ && (host->data->flags & MMC_DATA_WRITE)) {
+ sg_copy_to_buffer(host->data->sg, host->data->sg_len,
+ host->buffer, host->data_size);
+ if (host->caps.has_bad_data_ordering)
+ for (i = 0; i < host->data_size; i++)
+ host->buffer[i] = swab32(host->buffer[i]);
+ }
+
+ if (host->data_size)
+ atmci_pdc_set_both_buf(host,
+ ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
+
+ return iflags;
+}
+
+static u32
+atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
+{
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist *sg;
+ unsigned int i;
+ enum dma_data_direction direction;
+ enum dma_transfer_direction slave_dirn;
+ unsigned int sglen;
+ u32 maxburst;
+ u32 iflags;
+
+ data->error = -EINPROGRESS;
+
+ WARN_ON(host->data);
+ host->sg = NULL;
+ host->data = data;
+
+ iflags = ATMCI_DATA_ERROR_FLAGS;
+
+ /*
+ * We don't do DMA on "complex" transfers, i.e. with
+ * non-word-aligned buffers or lengths. Also, we don't bother
+ * with all the DMA setup overhead for short transfers.
+ */
+ if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
+ return atmci_prepare_data(host, data);
+ if (data->blksz & 3)
+ return atmci_prepare_data(host, data);
+
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->offset & 3 || sg->length & 3)
+ return atmci_prepare_data(host, data);
+ }
+
+ /* If we don't have a channel, we can't do DMA */
+ chan = host->dma.chan;
+ if (chan)
+ host->data_chan = chan;
+
+ if (!chan)
+ return -ENODEV;
+
+ if (data->flags & MMC_DATA_READ) {
+ direction = DMA_FROM_DEVICE;
+ host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
+ maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
+ } else {
+ direction = DMA_TO_DEVICE;
+ host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
+ maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
+ }
+
+ if (host->caps.has_dma_conf_reg)
+ atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) |
+ ATMCI_DMAEN);
+
+ sglen = dma_map_sg(chan->device->dev, data->sg,
+ data->sg_len, direction);
+
+ dmaengine_slave_config(chan, &host->dma_conf);
+ desc = dmaengine_prep_slave_sg(chan,
+ data->sg, sglen, slave_dirn,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ goto unmap_exit;
+
+ host->dma.data_desc = desc;
+ desc->callback = atmci_dma_complete;
+ desc->callback_param = host;
+
+ return iflags;
+unmap_exit:
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
+ return -ENOMEM;
+}
+
+static void
+atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
+{
+ return;
+}
+
+/*
+ * Start PDC according to transfer direction.
+ */
+static void
+atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
+{
+ if (data->flags & MMC_DATA_READ)
+ atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
+ else
+ atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
+}
+
+static void
+atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
+{
+ struct dma_chan *chan = host->data_chan;
+ struct dma_async_tx_descriptor *desc = host->dma.data_desc;
+
+ if (chan) {
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+ }
+}
+
+static void atmci_stop_transfer(struct atmel_mci *host)
+{
+ dev_dbg(&host->pdev->dev,
+ "(%s) set pending xfer complete\n", __func__);
+ atmci_set_pending(host, EVENT_XFER_COMPLETE);
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+}
+
+/*
+ * Stop data transfer because error(s) occurred.
+ */
+static void atmci_stop_transfer_pdc(struct atmel_mci *host)
+{
+ atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
+}
+
+static void atmci_stop_transfer_dma(struct atmel_mci *host)
+{
+ struct dma_chan *chan = host->data_chan;
+
+ if (chan) {
+ dmaengine_terminate_all(chan);
+ atmci_dma_cleanup(host);
+ } else {
+ /* Data transfer was stopped by the interrupt handler */
+ dev_dbg(&host->pdev->dev,
+ "(%s) set pending xfer complete\n", __func__);
+ atmci_set_pending(host, EVENT_XFER_COMPLETE);
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ }
+}
+
+/*
+ * Start a request: prepare data if needed, prepare the command and activate
+ * interrupts.
+ */
+static void atmci_start_request(struct atmel_mci *host,
+ struct atmel_mci_slot *slot)
+{
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ u32 iflags;
+ u32 cmdflags;
+
+ mrq = slot->mrq;
+ host->cur_slot = slot;
+ host->mrq = mrq;
+
+ host->pending_events = 0;
+ host->completed_events = 0;
+ host->cmd_status = 0;
+ host->data_status = 0;
+
+ dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
+
+ if (host->need_reset || host->caps.need_reset_after_xfer) {
+ iflags = atmci_readl(host, ATMCI_IMR);
+ iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
+ atmci_writel(host, ATMCI_MR, host->mode_reg);
+ if (host->caps.has_cfg_reg)
+ atmci_writel(host, ATMCI_CFG, host->cfg_reg);
+ atmci_writel(host, ATMCI_IER, iflags);
+ host->need_reset = false;
+ }
+ atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
+
+ iflags = atmci_readl(host, ATMCI_IMR);
+ if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
+ dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
+ iflags);
+
+ if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
+ /* Send init sequence (74 clock cycles) */
+ atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
+ while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
+ cpu_relax();
+ }
+ iflags = 0;
+ data = mrq->data;
+ if (data) {
+ atmci_set_timeout(host, slot, data);
+
+ /* Must set block count/size before sending command */
+ atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
+ | ATMCI_BLKLEN(data->blksz));
+ dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
+ ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
+
+ iflags |= host->prepare_data(host, data);
+ }
+
+ iflags |= ATMCI_CMDRDY;
+ cmd = mrq->cmd;
+ cmdflags = atmci_prepare_command(slot->mmc, cmd);
+
+ /*
+ * DMA transfer should be started before sending the command to avoid
+ * unexpected errors especially for read operations in SDIO mode.
+ * Unfortunately, in PDC mode, command has to be sent before starting
+ * the transfer.
+ */
+ if (host->submit_data != &atmci_submit_data_dma)
+ atmci_send_command(host, cmd, cmdflags);
+
+ if (data)
+ host->submit_data(host, data);
+
+ if (host->submit_data == &atmci_submit_data_dma)
+ atmci_send_command(host, cmd, cmdflags);
+
+ if (mrq->stop) {
+ host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
+ host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
+ if (!(data->flags & MMC_DATA_WRITE))
+ host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
+ if (data->flags & MMC_DATA_STREAM)
+ host->stop_cmdr |= ATMCI_CMDR_STREAM;
+ else
+ host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
+ }
+
+ /*
+ * We could have enabled interrupts earlier, but I suspect
+ * that would open up a nice can of interesting race
+ * conditions (e.g. command and data complete, but stop not
+ * prepared yet.)
+ */
+ atmci_writel(host, ATMCI_IER, iflags);
+
+ mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
+}
+
+static void atmci_queue_request(struct atmel_mci *host,
+ struct atmel_mci_slot *slot, struct mmc_request *mrq)
+{
+ dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
+ host->state);
+
+ spin_lock_bh(&host->lock);
+ slot->mrq = mrq;
+ if (host->state == STATE_IDLE) {
+ host->state = STATE_SENDING_CMD;
+ atmci_start_request(host, slot);
+ } else {
+ dev_dbg(&host->pdev->dev, "queue request\n");
+ list_add_tail(&slot->queue_node, &host->queue);
+ }
+ spin_unlock_bh(&host->lock);
+}
+
+static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct atmel_mci_slot *slot = mmc_priv(mmc);
+ struct atmel_mci *host = slot->host;
+ struct mmc_data *data;
+
+ WARN_ON(slot->mrq);
+ dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
+
+ pm_runtime_get_sync(&host->pdev->dev);
+
+ /*
+ * We may "know" the card is gone even though there's still an
+ * electrical connection. If so, we really need to communicate
+ * this to the MMC core since there won't be any more
+ * interrupts as the card is completely removed. Otherwise,
+ * the MMC core might believe the card is still there even
+ * though the card was just removed very slowly.
+ */
+ if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
+ mrq->cmd->error = -ENOMEDIUM;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ /* We don't support multiple blocks of weird lengths. */
+ data = mrq->data;
+ if (data && data->blocks > 1 && data->blksz & 3) {
+ mrq->cmd->error = -EINVAL;
+ mmc_request_done(mmc, mrq);
+ }
+
+ atmci_queue_request(host, slot, mrq);
+}
+
+static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct atmel_mci_slot *slot = mmc_priv(mmc);
+ struct atmel_mci *host = slot->host;
+ unsigned int i;
+
+ pm_runtime_get_sync(&host->pdev->dev);
+
+ slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
+ break;
+ case MMC_BUS_WIDTH_4:
+ slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
+ break;
+ }
+
+ if (ios->clock) {
+ unsigned int clock_min = ~0U;
+ int clkdiv;
+
+ spin_lock_bh(&host->lock);
+ if (!host->mode_reg) {
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
+ if (host->caps.has_cfg_reg)
+ atmci_writel(host, ATMCI_CFG, host->cfg_reg);
+ }
+
+ /*
+ * Use mirror of ios->clock to prevent race with mmc
+ * core ios update when finding the minimum.
+ */
+ slot->clock = ios->clock;
+ for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
+ if (host->slot[i] && host->slot[i]->clock
+ && host->slot[i]->clock < clock_min)
+ clock_min = host->slot[i]->clock;
+ }
+
+ /* Calculate clock divider */
+ if (host->caps.has_odd_clk_div) {
+ clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
+ if (clkdiv < 0) {
+ dev_warn(&mmc->class_dev,
+ "clock %u too fast; using %lu\n",
+ clock_min, host->bus_hz / 2);
+ clkdiv = 0;
+ } else if (clkdiv > 511) {
+ dev_warn(&mmc->class_dev,
+ "clock %u too slow; using %lu\n",
+ clock_min, host->bus_hz / (511 + 2));
+ clkdiv = 511;
+ }
+ host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
+ | ATMCI_MR_CLKODD(clkdiv & 1);
+ } else {
+ clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
+ if (clkdiv > 255) {
+ dev_warn(&mmc->class_dev,
+ "clock %u too slow; using %lu\n",
+ clock_min, host->bus_hz / (2 * 256));
+ clkdiv = 255;
+ }
+ host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
+ }
+
+ /*
+ * WRPROOF and RDPROOF prevent overruns/underruns by
+ * stopping the clock when the FIFO is full/empty.
+ * This state is not expected to last for long.
+ */
+ if (host->caps.has_rwproof)
+ host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
+
+ if (host->caps.has_cfg_reg) {
+ /* setup High Speed mode in relation with card capacity */
+ if (ios->timing == MMC_TIMING_SD_HS)
+ host->cfg_reg |= ATMCI_CFG_HSMODE;
+ else
+ host->cfg_reg &= ~ATMCI_CFG_HSMODE;
+ }
+
+ if (list_empty(&host->queue)) {
+ atmci_writel(host, ATMCI_MR, host->mode_reg);
+ if (host->caps.has_cfg_reg)
+ atmci_writel(host, ATMCI_CFG, host->cfg_reg);
+ } else {
+ host->need_clock_update = true;
+ }
+
+ spin_unlock_bh(&host->lock);
+ } else {
+ bool any_slot_active = false;
+
+ spin_lock_bh(&host->lock);
+ slot->clock = 0;
+ for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
+ if (host->slot[i] && host->slot[i]->clock) {
+ any_slot_active = true;
+ break;
+ }
+ }
+ if (!any_slot_active) {
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
+ if (host->mode_reg) {
+ atmci_readl(host, ATMCI_MR);
+ }
+ host->mode_reg = 0;
+ }
+ spin_unlock_bh(&host->lock);
+ }
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ break;
+ case MMC_POWER_UP:
+ set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+ break;
+ default:
+ /*
+ * TODO: None of the currently available AVR32-based
+ * boards allow MMC power to be turned off. Implement
+ * power control when this can be tested properly.
+ *
+ * We also need to hook this into the clock management
+ * somehow so that newly inserted cards aren't
+ * subjected to a fast clock before we have a chance
+ * to figure out what the maximum rate is. Currently,
+ * there's no way to avoid this, and there never will
+ * be for boards that don't support power control.
+ */
+ break;
+ }
+
+ pm_runtime_mark_last_busy(&host->pdev->dev);
+ pm_runtime_put_autosuspend(&host->pdev->dev);
+}
+
+static int atmci_get_ro(struct mmc_host *mmc)
+{
+ int read_only = -ENOSYS;
+ struct atmel_mci_slot *slot = mmc_priv(mmc);
+
+ if (gpio_is_valid(slot->wp_pin)) {
+ read_only = gpio_get_value(slot->wp_pin);
+ dev_dbg(&mmc->class_dev, "card is %s\n",
+ read_only ? "read-only" : "read-write");
+ }
+
+ return read_only;
+}
+
+static int atmci_get_cd(struct mmc_host *mmc)
+{
+ int present = -ENOSYS;
+ struct atmel_mci_slot *slot = mmc_priv(mmc);
+
+ if (gpio_is_valid(slot->detect_pin)) {
+ present = !(gpio_get_value(slot->detect_pin) ^
+ slot->detect_is_active_high);
+ dev_dbg(&mmc->class_dev, "card is %spresent\n",
+ present ? "" : "not ");
+ }
+
+ return present;
+}
+
+static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct atmel_mci_slot *slot = mmc_priv(mmc);
+ struct atmel_mci *host = slot->host;
+
+ if (enable)
+ atmci_writel(host, ATMCI_IER, slot->sdio_irq);
+ else
+ atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
+}
+
+static const struct mmc_host_ops atmci_ops = {
+ .request = atmci_request,
+ .set_ios = atmci_set_ios,
+ .get_ro = atmci_get_ro,
+ .get_cd = atmci_get_cd,
+ .enable_sdio_irq = atmci_enable_sdio_irq,
+};
+
+/* Called with host->lock held */
+static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
+ __releases(&host->lock)
+ __acquires(&host->lock)
+{
+ struct atmel_mci_slot *slot = NULL;
+ struct mmc_host *prev_mmc = host->cur_slot->mmc;
+
+ WARN_ON(host->cmd || host->data);
+
+ /*
+ * Update the MMC clock rate if necessary. This may be
+ * necessary if set_ios() is called when a different slot is
+ * busy transferring data.
+ */
+ if (host->need_clock_update) {
+ atmci_writel(host, ATMCI_MR, host->mode_reg);
+ if (host->caps.has_cfg_reg)
+ atmci_writel(host, ATMCI_CFG, host->cfg_reg);
+ }
+
+ host->cur_slot->mrq = NULL;
+ host->mrq = NULL;
+ if (!list_empty(&host->queue)) {
+ slot = list_entry(host->queue.next,
+ struct atmel_mci_slot, queue_node);
+ list_del(&slot->queue_node);
+ dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
+ mmc_hostname(slot->mmc));
+ host->state = STATE_SENDING_CMD;
+ atmci_start_request(host, slot);
+ } else {
+ dev_vdbg(&host->pdev->dev, "list empty\n");
+ host->state = STATE_IDLE;
+ }
+
+ del_timer(&host->timer);
+
+ spin_unlock(&host->lock);
+ mmc_request_done(prev_mmc, mrq);
+ spin_lock(&host->lock);
+
+ pm_runtime_mark_last_busy(&host->pdev->dev);
+ pm_runtime_put_autosuspend(&host->pdev->dev);
+}
+
+static void atmci_command_complete(struct atmel_mci *host,
+ struct mmc_command *cmd)
+{
+ u32 status = host->cmd_status;
+
+ /* Read the response from the card (up to 16 bytes) */
+ cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
+ cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
+ cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
+ cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
+
+ if (status & ATMCI_RTOE)
+ cmd->error = -ETIMEDOUT;
+ else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
+ cmd->error = -EILSEQ;
+ else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
+ cmd->error = -EIO;
+ else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
+ if (host->caps.need_blksz_mul_4) {
+ cmd->error = -EINVAL;
+ host->need_reset = 1;
+ }
+ } else
+ cmd->error = 0;
+}
+
+static void atmci_detect_change(unsigned long data)
+{
+ struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
+ bool present;
+ bool present_old;
+
+ /*
+ * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before
+ * freeing the interrupt. We must not re-enable the interrupt
+ * if it has been freed, and if we're shutting down, it
+ * doesn't really matter whether the card is present or not.
+ */
+ smp_rmb();
+ if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
+ return;
+
+ enable_irq(gpio_to_irq(slot->detect_pin));
+ present = !(gpio_get_value(slot->detect_pin) ^
+ slot->detect_is_active_high);
+ present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
+
+ dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
+ present, present_old);
+
+ if (present != present_old) {
+ struct atmel_mci *host = slot->host;
+ struct mmc_request *mrq;
+
+ dev_dbg(&slot->mmc->class_dev, "card %s\n",
+ present ? "inserted" : "removed");
+
+ spin_lock(&host->lock);
+
+ if (!present)
+ clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
+ else
+ set_bit(ATMCI_CARD_PRESENT, &slot->flags);
+
+ /* Clean up queue if present */
+ mrq = slot->mrq;
+ if (mrq) {
+ if (mrq == host->mrq) {
+ /*
+ * Reset controller to terminate any ongoing
+ * commands or data transfers.
+ */
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
+ atmci_writel(host, ATMCI_MR, host->mode_reg);
+ if (host->caps.has_cfg_reg)
+ atmci_writel(host, ATMCI_CFG, host->cfg_reg);
+
+ host->data = NULL;
+ host->cmd = NULL;
+
+ switch (host->state) {
+ case STATE_IDLE:
+ break;
+ case STATE_SENDING_CMD:
+ mrq->cmd->error = -ENOMEDIUM;
+ if (mrq->data)
+ host->stop_transfer(host);
+ break;
+ case STATE_DATA_XFER:
+ mrq->data->error = -ENOMEDIUM;
+ host->stop_transfer(host);
+ break;
+ case STATE_WAITING_NOTBUSY:
+ mrq->data->error = -ENOMEDIUM;
+ break;
+ case STATE_SENDING_STOP:
+ mrq->stop->error = -ENOMEDIUM;
+ break;
+ case STATE_END_REQUEST:
+ break;
+ }
+
+ atmci_request_end(host, mrq);
+ } else {
+ list_del(&slot->queue_node);
+ mrq->cmd->error = -ENOMEDIUM;
+ if (mrq->data)
+ mrq->data->error = -ENOMEDIUM;
+ if (mrq->stop)
+ mrq->stop->error = -ENOMEDIUM;
+
+ spin_unlock(&host->lock);
+ mmc_request_done(slot->mmc, mrq);
+ spin_lock(&host->lock);
+ }
+ }
+ spin_unlock(&host->lock);
+
+ mmc_detect_change(slot->mmc, 0);
+ }
+}
+
+static void atmci_tasklet_func(unsigned long priv)
+{
+ struct atmel_mci *host = (struct atmel_mci *)priv;
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_data *data = host->data;
+ enum atmel_mci_state state = host->state;
+ enum atmel_mci_state prev_state;
+ u32 status;
+
+ spin_lock(&host->lock);
+
+ state = host->state;
+
+ dev_vdbg(&host->pdev->dev,
+ "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
+ state, host->pending_events, host->completed_events,
+ atmci_readl(host, ATMCI_IMR));
+
+ do {
+ prev_state = state;
+ dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
+
+ switch (state) {
+ case STATE_IDLE:
+ break;
+
+ case STATE_SENDING_CMD:
+ /*
+ * Command has been sent, we are waiting for command
+ * ready. Then we have three next states possible:
+ * END_REQUEST by default, WAITING_NOTBUSY if it's a
+ * command needing it or DATA_XFER if there is data.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
+ if (!atmci_test_and_clear_pending(host,
+ EVENT_CMD_RDY))
+ break;
+
+ dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
+ host->cmd = NULL;
+ atmci_set_completed(host, EVENT_CMD_RDY);
+ atmci_command_complete(host, mrq->cmd);
+ if (mrq->data) {
+ dev_dbg(&host->pdev->dev,
+ "command with data transfer");
+ /*
+ * If there is a command error don't start
+ * data transfer.
+ */
+ if (mrq->cmd->error) {
+ host->stop_transfer(host);
+ host->data = NULL;
+ atmci_writel(host, ATMCI_IDR,
+ ATMCI_TXRDY | ATMCI_RXRDY
+ | ATMCI_DATA_ERROR_FLAGS);
+ state = STATE_END_REQUEST;
+ } else
+ state = STATE_DATA_XFER;
+ } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
+ dev_dbg(&host->pdev->dev,
+ "command response need waiting notbusy");
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ state = STATE_WAITING_NOTBUSY;
+ } else
+ state = STATE_END_REQUEST;
+
+ break;
+
+ case STATE_DATA_XFER:
+ if (atmci_test_and_clear_pending(host,
+ EVENT_DATA_ERROR)) {
+ dev_dbg(&host->pdev->dev, "set completed data error\n");
+ atmci_set_completed(host, EVENT_DATA_ERROR);
+ state = STATE_END_REQUEST;
+ break;
+ }
+
+ /*
+ * A data transfer is in progress. The event expected
+ * to move to the next state depends of data transfer
+ * type (PDC or DMA). Once transfer done we can move
+ * to the next step which is WAITING_NOTBUSY in write
+ * case and directly SENDING_STOP in read case.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
+ if (!atmci_test_and_clear_pending(host,
+ EVENT_XFER_COMPLETE))
+ break;
+
+ dev_dbg(&host->pdev->dev,
+ "(%s) set completed xfer complete\n",
+ __func__);
+ atmci_set_completed(host, EVENT_XFER_COMPLETE);
+
+ if (host->caps.need_notbusy_for_read_ops ||
+ (host->data->flags & MMC_DATA_WRITE)) {
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ state = STATE_WAITING_NOTBUSY;
+ } else if (host->mrq->stop) {
+ atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
+ atmci_send_stop_cmd(host, data);
+ state = STATE_SENDING_STOP;
+ } else {
+ host->data = NULL;
+ data->bytes_xfered = data->blocks * data->blksz;
+ data->error = 0;
+ state = STATE_END_REQUEST;
+ }
+ break;
+
+ case STATE_WAITING_NOTBUSY:
+ /*
+ * We can be in the state for two reasons: a command
+ * requiring waiting not busy signal (stop command
+ * included) or a write operation. In the latest case,
+ * we need to send a stop command.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
+ if (!atmci_test_and_clear_pending(host,
+ EVENT_NOTBUSY))
+ break;
+
+ dev_dbg(&host->pdev->dev, "set completed not busy\n");
+ atmci_set_completed(host, EVENT_NOTBUSY);
+
+ if (host->data) {
+ /*
+ * For some commands such as CMD53, even if
+ * there is data transfer, there is no stop
+ * command to send.
+ */
+ if (host->mrq->stop) {
+ atmci_writel(host, ATMCI_IER,
+ ATMCI_CMDRDY);
+ atmci_send_stop_cmd(host, data);
+ state = STATE_SENDING_STOP;
+ } else {
+ host->data = NULL;
+ data->bytes_xfered = data->blocks
+ * data->blksz;
+ data->error = 0;
+ state = STATE_END_REQUEST;
+ }
+ } else
+ state = STATE_END_REQUEST;
+ break;
+
+ case STATE_SENDING_STOP:
+ /*
+ * In this state, it is important to set host->data to
+ * NULL (which is tested in the waiting notbusy state)
+ * in order to go to the end request state instead of
+ * sending stop again.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
+ if (!atmci_test_and_clear_pending(host,
+ EVENT_CMD_RDY))
+ break;
+
+ dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
+ host->cmd = NULL;
+ data->bytes_xfered = data->blocks * data->blksz;
+ data->error = 0;
+ atmci_command_complete(host, mrq->stop);
+ if (mrq->stop->error) {
+ host->stop_transfer(host);
+ atmci_writel(host, ATMCI_IDR,
+ ATMCI_TXRDY | ATMCI_RXRDY
+ | ATMCI_DATA_ERROR_FLAGS);
+ state = STATE_END_REQUEST;
+ } else {
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ state = STATE_WAITING_NOTBUSY;
+ }
+ host->data = NULL;
+ break;
+
+ case STATE_END_REQUEST:
+ atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
+ | ATMCI_DATA_ERROR_FLAGS);
+ status = host->data_status;
+ if (unlikely(status)) {
+ host->stop_transfer(host);
+ host->data = NULL;
+ if (data) {
+ if (status & ATMCI_DTOE) {
+ data->error = -ETIMEDOUT;
+ } else if (status & ATMCI_DCRCE) {
+ data->error = -EILSEQ;
+ } else {
+ data->error = -EIO;
+ }
+ }
+ }
+
+ atmci_request_end(host, host->mrq);
+ state = STATE_IDLE;
+ break;
+ }
+ } while (state != prev_state);
+
+ host->state = state;
+
+ spin_unlock(&host->lock);
+}
+
+static void atmci_read_data_pio(struct atmel_mci *host)
+{
+ struct scatterlist *sg = host->sg;
+ void *buf = sg_virt(sg);
+ unsigned int offset = host->pio_offset;
+ struct mmc_data *data = host->data;
+ u32 value;
+ u32 status;
+ unsigned int nbytes = 0;
+
+ do {
+ value = atmci_readl(host, ATMCI_RDR);
+ if (likely(offset + 4 <= sg->length)) {
+ put_unaligned(value, (u32 *)(buf + offset));
+
+ offset += 4;
+ nbytes += 4;
+
+ if (offset == sg->length) {
+ flush_dcache_page(sg_page(sg));
+ host->sg = sg = sg_next(sg);
+ host->sg_len--;
+ if (!sg || !host->sg_len)
+ goto done;
+
+ offset = 0;
+ buf = sg_virt(sg);
+ }
+ } else {
+ unsigned int remaining = sg->length - offset;
+ memcpy(buf + offset, &value, remaining);
+ nbytes += remaining;
+
+ flush_dcache_page(sg_page(sg));
+ host->sg = sg = sg_next(sg);
+ host->sg_len--;
+ if (!sg || !host->sg_len)
+ goto done;
+
+ offset = 4 - remaining;
+ buf = sg_virt(sg);
+ memcpy(buf, (u8 *)&value + remaining, offset);
+ nbytes += offset;
+ }
+
+ status = atmci_readl(host, ATMCI_SR);
+ if (status & ATMCI_DATA_ERROR_FLAGS) {
+ atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
+ | ATMCI_DATA_ERROR_FLAGS));
+ host->data_status = status;
+ data->bytes_xfered += nbytes;
+ return;
+ }
+ } while (status & ATMCI_RXRDY);
+
+ host->pio_offset = offset;
+ data->bytes_xfered += nbytes;
+
+ return;
+
+done:
+ atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ data->bytes_xfered += nbytes;
+ smp_wmb();
+ atmci_set_pending(host, EVENT_XFER_COMPLETE);
+}
+
+static void atmci_write_data_pio(struct atmel_mci *host)
+{
+ struct scatterlist *sg = host->sg;
+ void *buf = sg_virt(sg);
+ unsigned int offset = host->pio_offset;
+ struct mmc_data *data = host->data;
+ u32 value;
+ u32 status;
+ unsigned int nbytes = 0;
+
+ do {
+ if (likely(offset + 4 <= sg->length)) {
+ value = get_unaligned((u32 *)(buf + offset));
+ atmci_writel(host, ATMCI_TDR, value);
+
+ offset += 4;
+ nbytes += 4;
+ if (offset == sg->length) {
+ host->sg = sg = sg_next(sg);
+ host->sg_len--;
+ if (!sg || !host->sg_len)
+ goto done;
+
+ offset = 0;
+ buf = sg_virt(sg);
+ }
+ } else {
+ unsigned int remaining = sg->length - offset;
+
+ value = 0;
+ memcpy(&value, buf + offset, remaining);
+ nbytes += remaining;
+
+ host->sg = sg = sg_next(sg);
+ host->sg_len--;
+ if (!sg || !host->sg_len) {
+ atmci_writel(host, ATMCI_TDR, value);
+ goto done;
+ }
+
+ offset = 4 - remaining;
+ buf = sg_virt(sg);
+ memcpy((u8 *)&value + remaining, buf, offset);
+ atmci_writel(host, ATMCI_TDR, value);
+ nbytes += offset;
+ }
+
+ status = atmci_readl(host, ATMCI_SR);
+ if (status & ATMCI_DATA_ERROR_FLAGS) {
+ atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
+ | ATMCI_DATA_ERROR_FLAGS));
+ host->data_status = status;
+ data->bytes_xfered += nbytes;
+ return;
+ }
+ } while (status & ATMCI_TXRDY);
+
+ host->pio_offset = offset;
+ data->bytes_xfered += nbytes;
+
+ return;
+
+done:
+ atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ data->bytes_xfered += nbytes;
+ smp_wmb();
+ atmci_set_pending(host, EVENT_XFER_COMPLETE);
+}
+
+static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
+{
+ int i;
+
+ for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
+ struct atmel_mci_slot *slot = host->slot[i];
+ if (slot && (status & slot->sdio_irq)) {
+ mmc_signal_sdio_irq(slot->mmc);
+ }
+ }
+}
+
+
+static irqreturn_t atmci_interrupt(int irq, void *dev_id)
+{
+ struct atmel_mci *host = dev_id;
+ u32 status, mask, pending;
+ unsigned int pass_count = 0;
+
+ do {
+ status = atmci_readl(host, ATMCI_SR);
+ mask = atmci_readl(host, ATMCI_IMR);
+ pending = status & mask;
+ if (!pending)
+ break;
+
+ if (pending & ATMCI_DATA_ERROR_FLAGS) {
+ dev_dbg(&host->pdev->dev, "IRQ: data error\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
+ | ATMCI_RXRDY | ATMCI_TXRDY
+ | ATMCI_ENDRX | ATMCI_ENDTX
+ | ATMCI_RXBUFF | ATMCI_TXBUFE);
+
+ host->data_status = status;
+ dev_dbg(&host->pdev->dev, "set pending data error\n");
+ smp_wmb();
+ atmci_set_pending(host, EVENT_DATA_ERROR);
+ tasklet_schedule(&host->tasklet);
+ }
+
+ if (pending & ATMCI_TXBUFE) {
+ dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
+ atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
+ /*
+ * We can receive this interruption before having configured
+ * the second pdc buffer, so we need to reconfigure first and
+ * second buffers again
+ */
+ if (host->data_size) {
+ atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
+ atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
+ atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
+ } else {
+ atmci_pdc_complete(host);
+ }
+ } else if (pending & ATMCI_ENDTX) {
+ dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
+
+ if (host->data_size) {
+ atmci_pdc_set_single_buf(host,
+ XFER_TRANSMIT, PDC_SECOND_BUF);
+ atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
+ }
+ }
+
+ if (pending & ATMCI_RXBUFF) {
+ dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
+ atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
+ /*
+ * We can receive this interruption before having configured
+ * the second pdc buffer, so we need to reconfigure first and
+ * second buffers again
+ */
+ if (host->data_size) {
+ atmci_pdc_set_both_buf(host, XFER_RECEIVE);
+ atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
+ atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
+ } else {
+ atmci_pdc_complete(host);
+ }
+ } else if (pending & ATMCI_ENDRX) {
+ dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
+
+ if (host->data_size) {
+ atmci_pdc_set_single_buf(host,
+ XFER_RECEIVE, PDC_SECOND_BUF);
+ atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
+ }
+ }
+
+ /*
+ * First mci IPs, so mainly the ones having pdc, have some
+ * issues with the notbusy signal. You can't get it after
+ * data transmission if you have not sent a stop command.
+ * The appropriate workaround is to use the BLKE signal.
+ */
+ if (pending & ATMCI_BLKE) {
+ dev_dbg(&host->pdev->dev, "IRQ: blke\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
+ smp_wmb();
+ dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+ atmci_set_pending(host, EVENT_NOTBUSY);
+ tasklet_schedule(&host->tasklet);
+ }
+
+ if (pending & ATMCI_NOTBUSY) {
+ dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
+ smp_wmb();
+ dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+ atmci_set_pending(host, EVENT_NOTBUSY);
+ tasklet_schedule(&host->tasklet);
+ }
+
+ if (pending & ATMCI_RXRDY)
+ atmci_read_data_pio(host);
+ if (pending & ATMCI_TXRDY)
+ atmci_write_data_pio(host);
+
+ if (pending & ATMCI_CMDRDY) {
+ dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
+ host->cmd_status = status;
+ smp_wmb();
+ dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
+ atmci_set_pending(host, EVENT_CMD_RDY);
+ tasklet_schedule(&host->tasklet);
+ }
+
+ if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
+ atmci_sdio_interrupt(host, status);
+
+ } while (pass_count++ < 5);
+
+ return pass_count ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
+{
+ struct atmel_mci_slot *slot = dev_id;
+
+ /*
+ * Disable interrupts until the pin has stabilized and check
+ * the state then. Use mod_timer() since we may be in the
+ * middle of the timer routine when this interrupt triggers.
+ */
+ disable_irq_nosync(irq);
+ mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
+
+ return IRQ_HANDLED;
+}
+
+static int atmci_init_slot(struct atmel_mci *host,
+ struct mci_slot_pdata *slot_data, unsigned int id,
+ u32 sdc_reg, u32 sdio_irq)
+{
+ struct mmc_host *mmc;
+ struct atmel_mci_slot *slot;
+
+ mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ slot = mmc_priv(mmc);
+ slot->mmc = mmc;
+ slot->host = host;
+ slot->detect_pin = slot_data->detect_pin;
+ slot->wp_pin = slot_data->wp_pin;
+ slot->detect_is_active_high = slot_data->detect_is_active_high;
+ slot->sdc_reg = sdc_reg;
+ slot->sdio_irq = sdio_irq;
+
+ dev_dbg(&mmc->class_dev,
+ "slot[%u]: bus_width=%u, detect_pin=%d, "
+ "detect_is_active_high=%s, wp_pin=%d\n",
+ id, slot_data->bus_width, slot_data->detect_pin,
+ slot_data->detect_is_active_high ? "true" : "false",
+ slot_data->wp_pin);
+
+ mmc->ops = &atmci_ops;
+ mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
+ mmc->f_max = host->bus_hz / 2;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ if (sdio_irq)
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+ if (host->caps.has_highspeed)
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+ /*
+ * Without the read/write proof capability, it is strongly suggested to
+ * use only one bit for data to prevent fifo underruns and overruns
+ * which will corrupt data.
+ */
+ if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ if (atmci_get_version(host) < 0x200) {
+ mmc->max_segs = 256;
+ mmc->max_blk_size = 4095;
+ mmc->max_blk_count = 256;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
+ } else {
+ mmc->max_segs = 64;
+ mmc->max_req_size = 32768 * 512;
+ mmc->max_blk_size = 32768;
+ mmc->max_blk_count = 512;
+ }
+
+ /* Assume card is present initially */
+ set_bit(ATMCI_CARD_PRESENT, &slot->flags);
+ if (gpio_is_valid(slot->detect_pin)) {
+ if (devm_gpio_request(&host->pdev->dev, slot->detect_pin,
+ "mmc_detect")) {
+ dev_dbg(&mmc->class_dev, "no detect pin available\n");
+ slot->detect_pin = -EBUSY;
+ } else if (gpio_get_value(slot->detect_pin) ^
+ slot->detect_is_active_high) {
+ clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
+ }
+ }
+
+ if (!gpio_is_valid(slot->detect_pin)) {
+ if (slot_data->non_removable)
+ mmc->caps |= MMC_CAP_NONREMOVABLE;
+ else
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+ }
+
+ if (gpio_is_valid(slot->wp_pin)) {
+ if (devm_gpio_request(&host->pdev->dev, slot->wp_pin,
+ "mmc_wp")) {
+ dev_dbg(&mmc->class_dev, "no WP pin available\n");
+ slot->wp_pin = -EBUSY;
+ }
+ }
+
+ host->slot[id] = slot;
+ mmc_regulator_get_supply(mmc);
+ mmc_add_host(mmc);
+
+ if (gpio_is_valid(slot->detect_pin)) {
+ int ret;
+
+ setup_timer(&slot->detect_timer, atmci_detect_change,
+ (unsigned long)slot);
+
+ ret = request_irq(gpio_to_irq(slot->detect_pin),
+ atmci_detect_interrupt,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "mmc-detect", slot);
+ if (ret) {
+ dev_dbg(&mmc->class_dev,
+ "could not request IRQ %d for detect pin\n",
+ gpio_to_irq(slot->detect_pin));
+ slot->detect_pin = -EBUSY;
+ }
+ }
+
+ atmci_init_debugfs(slot);
+
+ return 0;
+}
+
+static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
+ unsigned int id)
+{
+ /* Debugfs stuff is cleaned up by mmc core */
+
+ set_bit(ATMCI_SHUTDOWN, &slot->flags);
+ smp_wmb();
+
+ mmc_remove_host(slot->mmc);
+
+ if (gpio_is_valid(slot->detect_pin)) {
+ int pin = slot->detect_pin;
+
+ free_irq(gpio_to_irq(pin), slot);
+ del_timer_sync(&slot->detect_timer);
+ }
+
+ slot->host->slot[id] = NULL;
+ mmc_free_host(slot->mmc);
+}
+
+static int atmci_configure_dma(struct atmel_mci *host)
+{
+ host->dma.chan = dma_request_slave_channel_reason(&host->pdev->dev,
+ "rxtx");
+ if (IS_ERR(host->dma.chan))
+ return PTR_ERR(host->dma.chan);
+
+ dev_info(&host->pdev->dev, "using %s for DMA transfers\n",
+ dma_chan_name(host->dma.chan));
+
+ host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
+ host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_conf.src_maxburst = 1;
+ host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
+ host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_conf.dst_maxburst = 1;
+ host->dma_conf.device_fc = false;
+
+ return 0;
+}
+
+/*
+ * HSMCI (High Speed MCI) module is not fully compatible with MCI module.
+ * HSMCI provides DMA support and a new config register but no more supports
+ * PDC.
+ */
+static void atmci_get_cap(struct atmel_mci *host)
+{
+ unsigned int version;
+
+ version = atmci_get_version(host);
+ dev_info(&host->pdev->dev,
+ "version: 0x%x\n", version);
+
+ host->caps.has_dma_conf_reg = 0;
+ host->caps.has_pdc = ATMCI_PDC_CONNECTED;
+ host->caps.has_cfg_reg = 0;
+ host->caps.has_cstor_reg = 0;
+ host->caps.has_highspeed = 0;
+ host->caps.has_rwproof = 0;
+ host->caps.has_odd_clk_div = 0;
+ host->caps.has_bad_data_ordering = 1;
+ host->caps.need_reset_after_xfer = 1;
+ host->caps.need_blksz_mul_4 = 1;
+ host->caps.need_notbusy_for_read_ops = 0;
+
+ /* keep only major version number */
+ switch (version & 0xf00) {
+ case 0x600:
+ case 0x500:
+ host->caps.has_odd_clk_div = 1;
+ case 0x400:
+ case 0x300:
+ host->caps.has_dma_conf_reg = 1;
+ host->caps.has_pdc = 0;
+ host->caps.has_cfg_reg = 1;
+ host->caps.has_cstor_reg = 1;
+ host->caps.has_highspeed = 1;
+ case 0x200:
+ host->caps.has_rwproof = 1;
+ host->caps.need_blksz_mul_4 = 0;
+ host->caps.need_notbusy_for_read_ops = 1;
+ case 0x100:
+ host->caps.has_bad_data_ordering = 0;
+ host->caps.need_reset_after_xfer = 0;
+ case 0x0:
+ break;
+ default:
+ host->caps.has_pdc = 0;
+ dev_warn(&host->pdev->dev,
+ "Unmanaged mci version, set minimum capabilities\n");
+ break;
+ }
+}
+
+static int atmci_probe(struct platform_device *pdev)
+{
+ struct mci_platform_data *pdata;
+ struct atmel_mci *host;
+ struct resource *regs;
+ unsigned int nr_slots;
+ int irq;
+ int ret, i;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ pdata = atmci_of_init(pdev);
+ if (IS_ERR(pdata)) {
+ dev_err(&pdev->dev, "platform data not available\n");
+ return PTR_ERR(pdata);
+ }
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->pdev = pdev;
+ spin_lock_init(&host->lock);
+ INIT_LIST_HEAD(&host->queue);
+
+ host->mck = devm_clk_get(&pdev->dev, "mci_clk");
+ if (IS_ERR(host->mck))
+ return PTR_ERR(host->mck);
+
+ host->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+ if (!host->regs)
+ return -ENOMEM;
+
+ ret = clk_prepare_enable(host->mck);
+ if (ret)
+ return ret;
+
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
+ host->bus_hz = clk_get_rate(host->mck);
+
+ host->mapbase = regs->start;
+
+ tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
+
+ ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
+ if (ret) {
+ clk_disable_unprepare(host->mck);
+ return ret;
+ }
+
+ /* Get MCI capabilities and set operations according to it */
+ atmci_get_cap(host);
+ ret = atmci_configure_dma(host);
+ if (ret == -EPROBE_DEFER)
+ goto err_dma_probe_defer;
+ if (ret == 0) {
+ host->prepare_data = &atmci_prepare_data_dma;
+ host->submit_data = &atmci_submit_data_dma;
+ host->stop_transfer = &atmci_stop_transfer_dma;
+ } else if (host->caps.has_pdc) {
+ dev_info(&pdev->dev, "using PDC\n");
+ host->prepare_data = &atmci_prepare_data_pdc;
+ host->submit_data = &atmci_submit_data_pdc;
+ host->stop_transfer = &atmci_stop_transfer_pdc;
+ } else {
+ dev_info(&pdev->dev, "using PIO\n");
+ host->prepare_data = &atmci_prepare_data;
+ host->submit_data = &atmci_submit_data;
+ host->stop_transfer = &atmci_stop_transfer;
+ }
+
+ platform_set_drvdata(pdev, host);
+
+ setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ /* We need at least one slot to succeed */
+ nr_slots = 0;
+ ret = -ENODEV;
+ if (pdata->slot[0].bus_width) {
+ ret = atmci_init_slot(host, &pdata->slot[0],
+ 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
+ if (!ret) {
+ nr_slots++;
+ host->buf_size = host->slot[0]->mmc->max_req_size;
+ }
+ }
+ if (pdata->slot[1].bus_width) {
+ ret = atmci_init_slot(host, &pdata->slot[1],
+ 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
+ if (!ret) {
+ nr_slots++;
+ if (host->slot[1]->mmc->max_req_size > host->buf_size)
+ host->buf_size =
+ host->slot[1]->mmc->max_req_size;
+ }
+ }
+
+ if (!nr_slots) {
+ dev_err(&pdev->dev, "init failed: no slot defined\n");
+ goto err_init_slot;
+ }
+
+ if (!host->caps.has_rwproof) {
+ host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
+ &host->buf_phys_addr,
+ GFP_KERNEL);
+ if (!host->buffer) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "buffer allocation failed\n");
+ goto err_dma_alloc;
+ }
+ }
+
+ dev_info(&pdev->dev,
+ "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
+ host->mapbase, irq, nr_slots);
+
+ pm_runtime_mark_last_busy(&host->pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+
+err_dma_alloc:
+ for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
+ if (host->slot[i])
+ atmci_cleanup_slot(host->slot[i], i);
+ }
+err_init_slot:
+ clk_disable_unprepare(host->mck);
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ del_timer_sync(&host->timer);
+ if (!IS_ERR(host->dma.chan))
+ dma_release_channel(host->dma.chan);
+err_dma_probe_defer:
+ free_irq(irq, host);
+ return ret;
+}
+
+static int atmci_remove(struct platform_device *pdev)
+{
+ struct atmel_mci *host = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ if (host->buffer)
+ dma_free_coherent(&pdev->dev, host->buf_size,
+ host->buffer, host->buf_phys_addr);
+
+ for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
+ if (host->slot[i])
+ atmci_cleanup_slot(host->slot[i], i);
+ }
+
+ atmci_writel(host, ATMCI_IDR, ~0UL);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
+ atmci_readl(host, ATMCI_SR);
+
+ del_timer_sync(&host->timer);
+ if (!IS_ERR(host->dma.chan))
+ dma_release_channel(host->dma.chan);
+
+ free_irq(platform_get_irq(pdev, 0), host);
+
+ clk_disable_unprepare(host->mck);
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int atmci_runtime_suspend(struct device *dev)
+{
+ struct atmel_mci *host = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(host->mck);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int atmci_runtime_resume(struct device *dev)
+{
+ struct atmel_mci *host = dev_get_drvdata(dev);
+
+ pinctrl_pm_select_default_state(dev);
+
+ return clk_prepare_enable(host->mck);
+}
+#endif
+
+static const struct dev_pm_ops atmci_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(atmci_runtime_suspend, atmci_runtime_resume, NULL)
+};
+
+static struct platform_driver atmci_driver = {
+ .probe = atmci_probe,
+ .remove = atmci_remove,
+ .driver = {
+ .name = "atmel_mci",
+ .of_match_table = of_match_ptr(atmci_dt_ids),
+ .pm = &atmci_dev_pm_ops,
+ },
+};
+module_platform_driver(atmci_driver);
+
+MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
+MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/au1xmmc.c b/kernel/drivers/mmc/host/au1xmmc.c
new file mode 100644
index 000000000..ed77fbfa4
--- /dev/null
+++ b/kernel/drivers/mmc/host/au1xmmc.c
@@ -0,0 +1,1240 @@
+/*
+ * linux/drivers/mmc/host/au1xmmc.c - AU1XX0 MMC driver
+ *
+ * Copyright (c) 2005, Advanced Micro Devices, Inc.
+ *
+ * Developed with help from the 2.4.30 MMC AU1XXX controller including
+ * the following copyright notices:
+ * Copyright (c) 2003-2004 Embedded Edge, LLC.
+ * Portions Copyright (C) 2002 Embedix, Inc
+ * Copyright 2002 Hewlett-Packard Company
+
+ * 2.6 version of this driver inspired by:
+ * (drivers/mmc/wbsd.c) Copyright (C) 2004-2005 Pierre Ossman,
+ * All Rights Reserved.
+ * (drivers/mmc/pxa.c) Copyright (C) 2003 Russell King,
+ * All Rights Reserved.
+ *
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Why don't we use the SD controllers' carddetect feature?
+ *
+ * From the AU1100 MMC application guide:
+ * If the Au1100-based design is intended to support both MultiMediaCards
+ * and 1- or 4-data bit SecureDigital cards, then the solution is to
+ * connect a weak (560KOhm) pull-up resistor to connector pin 1.
+ * In doing so, a MMC card never enters SPI-mode communications,
+ * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective
+ * (the low to high transition will not occur).
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/leds.h>
+#include <linux/mmc/host.h>
+#include <linux/slab.h>
+
+#include <asm/io.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
+#include <asm/mach-au1x00/au1100_mmc.h>
+
+#define DRIVER_NAME "au1xxx-mmc"
+
+/* Set this to enable special debugging macros */
+/* #define DEBUG */
+
+#ifdef DEBUG
+#define DBG(fmt, idx, args...) \
+ pr_debug("au1xmmc(%d): DEBUG: " fmt, idx, ##args)
+#else
+#define DBG(fmt, idx, args...) do {} while (0)
+#endif
+
+/* Hardware definitions */
+#define AU1XMMC_DESCRIPTOR_COUNT 1
+
+/* max DMA seg size: 64KB on Au1100, 4MB on Au1200 */
+#define AU1100_MMC_DESCRIPTOR_SIZE 0x0000ffff
+#define AU1200_MMC_DESCRIPTOR_SIZE 0x003fffff
+
+#define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
+ MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
+ MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36)
+
+/* This gives us a hard value for the stop command that we can write directly
+ * to the command register.
+ */
+#define STOP_CMD \
+ (SD_CMD_RT_1B | SD_CMD_CT_7 | (0xC << SD_CMD_CI_SHIFT) | SD_CMD_GO)
+
+/* This is the set of interrupts that we configure by default. */
+#define AU1XMMC_INTERRUPTS \
+ (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_RAT | \
+ SD_CONFIG_CR | SD_CONFIG_I)
+
+/* The poll event (looking for insert/remove events runs twice a second. */
+#define AU1XMMC_DETECT_TIMEOUT (HZ/2)
+
+struct au1xmmc_host {
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+
+ u32 flags;
+ void __iomem *iobase;
+ u32 clock;
+ u32 bus_width;
+ u32 power_mode;
+
+ int status;
+
+ struct {
+ int len;
+ int dir;
+ } dma;
+
+ struct {
+ int index;
+ int offset;
+ int len;
+ } pio;
+
+ u32 tx_chan;
+ u32 rx_chan;
+
+ int irq;
+
+ struct tasklet_struct finish_task;
+ struct tasklet_struct data_task;
+ struct au1xmmc_platform_data *platdata;
+ struct platform_device *pdev;
+ struct resource *ioarea;
+ struct clk *clk;
+};
+
+/* Status flags used by the host structure */
+#define HOST_F_XMIT 0x0001
+#define HOST_F_RECV 0x0002
+#define HOST_F_DMA 0x0010
+#define HOST_F_DBDMA 0x0020
+#define HOST_F_ACTIVE 0x0100
+#define HOST_F_STOP 0x1000
+
+#define HOST_S_IDLE 0x0001
+#define HOST_S_CMD 0x0002
+#define HOST_S_DATA 0x0003
+#define HOST_S_STOP 0x0004
+
+/* Easy access macros */
+#define HOST_STATUS(h) ((h)->iobase + SD_STATUS)
+#define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG)
+#define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE)
+#define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT)
+#define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT)
+#define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG)
+#define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE)
+#define HOST_CMD(h) ((h)->iobase + SD_CMD)
+#define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2)
+#define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT)
+#define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG)
+
+#define DMA_CHANNEL(h) \
+ (((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan)
+
+static inline int has_dbdma(void)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1200:
+ case ALCHEMY_CPU_AU1300:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
+{
+ u32 val = __raw_readl(HOST_CONFIG(host));
+ val |= mask;
+ __raw_writel(val, HOST_CONFIG(host));
+ wmb(); /* drain writebuffer */
+}
+
+static inline void FLUSH_FIFO(struct au1xmmc_host *host)
+{
+ u32 val = __raw_readl(HOST_CONFIG2(host));
+
+ __raw_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
+ mdelay(1);
+
+ /* SEND_STOP will turn off clock control - this re-enables it */
+ val &= ~SD_CONFIG2_DF;
+
+ __raw_writel(val, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
+}
+
+static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
+{
+ u32 val = __raw_readl(HOST_CONFIG(host));
+ val &= ~mask;
+ __raw_writel(val, HOST_CONFIG(host));
+ wmb(); /* drain writebuffer */
+}
+
+static inline void SEND_STOP(struct au1xmmc_host *host)
+{
+ u32 config2;
+
+ WARN_ON(host->status != HOST_S_DATA);
+ host->status = HOST_S_STOP;
+
+ config2 = __raw_readl(HOST_CONFIG2(host));
+ __raw_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
+
+ /* Send the stop command */
+ __raw_writel(STOP_CMD, HOST_CMD(host));
+ wmb(); /* drain writebuffer */
+}
+
+static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
+{
+ if (host->platdata && host->platdata->set_power)
+ host->platdata->set_power(host->mmc, state);
+}
+
+static int au1xmmc_card_inserted(struct mmc_host *mmc)
+{
+ struct au1xmmc_host *host = mmc_priv(mmc);
+
+ if (host->platdata && host->platdata->card_inserted)
+ return !!host->platdata->card_inserted(host->mmc);
+
+ return -ENOSYS;
+}
+
+static int au1xmmc_card_readonly(struct mmc_host *mmc)
+{
+ struct au1xmmc_host *host = mmc_priv(mmc);
+
+ if (host->platdata && host->platdata->card_readonly)
+ return !!host->platdata->card_readonly(mmc);
+
+ return -ENOSYS;
+}
+
+static void au1xmmc_finish_request(struct au1xmmc_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+
+ host->mrq = NULL;
+ host->flags &= HOST_F_ACTIVE | HOST_F_DMA;
+
+ host->dma.len = 0;
+ host->dma.dir = 0;
+
+ host->pio.index = 0;
+ host->pio.offset = 0;
+ host->pio.len = 0;
+
+ host->status = HOST_S_IDLE;
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void au1xmmc_tasklet_finish(unsigned long param)
+{
+ struct au1xmmc_host *host = (struct au1xmmc_host *) param;
+ au1xmmc_finish_request(host);
+}
+
+static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
+ struct mmc_command *cmd, struct mmc_data *data)
+{
+ u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ break;
+ case MMC_RSP_R1:
+ mmccmd |= SD_CMD_RT_1;
+ break;
+ case MMC_RSP_R1B:
+ mmccmd |= SD_CMD_RT_1B;
+ break;
+ case MMC_RSP_R2:
+ mmccmd |= SD_CMD_RT_2;
+ break;
+ case MMC_RSP_R3:
+ mmccmd |= SD_CMD_RT_3;
+ break;
+ default:
+ pr_info("au1xmmc: unhandled response type %02x\n",
+ mmc_resp_type(cmd));
+ return -EINVAL;
+ }
+
+ if (data) {
+ if (data->flags & MMC_DATA_READ) {
+ if (data->blocks > 1)
+ mmccmd |= SD_CMD_CT_4;
+ else
+ mmccmd |= SD_CMD_CT_2;
+ } else if (data->flags & MMC_DATA_WRITE) {
+ if (data->blocks > 1)
+ mmccmd |= SD_CMD_CT_3;
+ else
+ mmccmd |= SD_CMD_CT_1;
+ }
+ }
+
+ __raw_writel(cmd->arg, HOST_CMDARG(host));
+ wmb(); /* drain writebuffer */
+
+ if (wait)
+ IRQ_OFF(host, SD_CONFIG_CR);
+
+ __raw_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
+ wmb(); /* drain writebuffer */
+
+ /* Wait for the command to go on the line */
+ while (__raw_readl(HOST_CMD(host)) & SD_CMD_GO)
+ /* nop */;
+
+ /* Wait for the command to come back */
+ if (wait) {
+ u32 status = __raw_readl(HOST_STATUS(host));
+
+ while (!(status & SD_STATUS_CR))
+ status = __raw_readl(HOST_STATUS(host));
+
+ /* Clear the CR status */
+ __raw_writel(SD_STATUS_CR, HOST_STATUS(host));
+
+ IRQ_ON(host, SD_CONFIG_CR);
+ }
+
+ return 0;
+}
+
+static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_data *data;
+ u32 crc;
+
+ WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP));
+
+ if (host->mrq == NULL)
+ return;
+
+ data = mrq->cmd->data;
+
+ if (status == 0)
+ status = __raw_readl(HOST_STATUS(host));
+
+ /* The transaction is really over when the SD_STATUS_DB bit is clear */
+ while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
+ status = __raw_readl(HOST_STATUS(host));
+
+ data->error = 0;
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
+
+ /* Process any errors */
+ crc = (status & (SD_STATUS_WC | SD_STATUS_RC));
+ if (host->flags & HOST_F_XMIT)
+ crc |= ((status & 0x07) == 0x02) ? 0 : 1;
+
+ if (crc)
+ data->error = -EILSEQ;
+
+ /* Clear the CRC bits */
+ __raw_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
+
+ data->bytes_xfered = 0;
+
+ if (!data->error) {
+ if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
+ u32 chan = DMA_CHANNEL(host);
+
+ chan_tab_t *c = *((chan_tab_t **)chan);
+ au1x_dma_chan_t *cp = c->chan_ptr;
+ data->bytes_xfered = cp->ddma_bytecnt;
+ } else
+ data->bytes_xfered =
+ (data->blocks * data->blksz) - host->pio.len;
+ }
+
+ au1xmmc_finish_request(host);
+}
+
+static void au1xmmc_tasklet_data(unsigned long param)
+{
+ struct au1xmmc_host *host = (struct au1xmmc_host *)param;
+
+ u32 status = __raw_readl(HOST_STATUS(host));
+ au1xmmc_data_complete(host, status);
+}
+
+#define AU1XMMC_MAX_TRANSFER 8
+
+static void au1xmmc_send_pio(struct au1xmmc_host *host)
+{
+ struct mmc_data *data;
+ int sg_len, max, count;
+ unsigned char *sg_ptr, val;
+ u32 status;
+ struct scatterlist *sg;
+
+ data = host->mrq->data;
+
+ if (!(host->flags & HOST_F_XMIT))
+ return;
+
+ /* This is the pointer to the data buffer */
+ sg = &data->sg[host->pio.index];
+ sg_ptr = sg_virt(sg) + host->pio.offset;
+
+ /* This is the space left inside the buffer */
+ sg_len = data->sg[host->pio.index].length - host->pio.offset;
+
+ /* Check if we need less than the size of the sg_buffer */
+ max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
+ if (max > AU1XMMC_MAX_TRANSFER)
+ max = AU1XMMC_MAX_TRANSFER;
+
+ for (count = 0; count < max; count++) {
+ status = __raw_readl(HOST_STATUS(host));
+
+ if (!(status & SD_STATUS_TH))
+ break;
+
+ val = *sg_ptr++;
+
+ __raw_writel((unsigned long)val, HOST_TXPORT(host));
+ wmb(); /* drain writebuffer */
+ }
+
+ host->pio.len -= count;
+ host->pio.offset += count;
+
+ if (count == sg_len) {
+ host->pio.index++;
+ host->pio.offset = 0;
+ }
+
+ if (host->pio.len == 0) {
+ IRQ_OFF(host, SD_CONFIG_TH);
+
+ if (host->flags & HOST_F_STOP)
+ SEND_STOP(host);
+
+ tasklet_schedule(&host->data_task);
+ }
+}
+
+static void au1xmmc_receive_pio(struct au1xmmc_host *host)
+{
+ struct mmc_data *data;
+ int max, count, sg_len = 0;
+ unsigned char *sg_ptr = NULL;
+ u32 status, val;
+ struct scatterlist *sg;
+
+ data = host->mrq->data;
+
+ if (!(host->flags & HOST_F_RECV))
+ return;
+
+ max = host->pio.len;
+
+ if (host->pio.index < host->dma.len) {
+ sg = &data->sg[host->pio.index];
+ sg_ptr = sg_virt(sg) + host->pio.offset;
+
+ /* This is the space left inside the buffer */
+ sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
+
+ /* Check if we need less than the size of the sg_buffer */
+ if (sg_len < max)
+ max = sg_len;
+ }
+
+ if (max > AU1XMMC_MAX_TRANSFER)
+ max = AU1XMMC_MAX_TRANSFER;
+
+ for (count = 0; count < max; count++) {
+ status = __raw_readl(HOST_STATUS(host));
+
+ if (!(status & SD_STATUS_NE))
+ break;
+
+ if (status & SD_STATUS_RC) {
+ DBG("RX CRC Error [%d + %d].\n", host->pdev->id,
+ host->pio.len, count);
+ break;
+ }
+
+ if (status & SD_STATUS_RO) {
+ DBG("RX Overrun [%d + %d]\n", host->pdev->id,
+ host->pio.len, count);
+ break;
+ }
+ else if (status & SD_STATUS_RU) {
+ DBG("RX Underrun [%d + %d]\n", host->pdev->id,
+ host->pio.len, count);
+ break;
+ }
+
+ val = __raw_readl(HOST_RXPORT(host));
+
+ if (sg_ptr)
+ *sg_ptr++ = (unsigned char)(val & 0xFF);
+ }
+
+ host->pio.len -= count;
+ host->pio.offset += count;
+
+ if (sg_len && count == sg_len) {
+ host->pio.index++;
+ host->pio.offset = 0;
+ }
+
+ if (host->pio.len == 0) {
+ /* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */
+ IRQ_OFF(host, SD_CONFIG_NE);
+
+ if (host->flags & HOST_F_STOP)
+ SEND_STOP(host);
+
+ tasklet_schedule(&host->data_task);
+ }
+}
+
+/* This is called when a command has been completed - grab the response
+ * and check for errors. Then start the data transfer if it is indicated.
+ */
+static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd;
+ u32 r[4];
+ int i, trans;
+
+ if (!host->mrq)
+ return;
+
+ cmd = mrq->cmd;
+ cmd->error = 0;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ r[0] = __raw_readl(host->iobase + SD_RESP3);
+ r[1] = __raw_readl(host->iobase + SD_RESP2);
+ r[2] = __raw_readl(host->iobase + SD_RESP1);
+ r[3] = __raw_readl(host->iobase + SD_RESP0);
+
+ /* The CRC is omitted from the response, so really
+ * we only got 120 bytes, but the engine expects
+ * 128 bits, so we have to shift things up.
+ */
+ for (i = 0; i < 4; i++) {
+ cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
+ if (i != 3)
+ cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
+ }
+ } else {
+ /* Techincally, we should be getting all 48 bits of
+ * the response (SD_RESP1 + SD_RESP2), but because
+ * our response omits the CRC, our data ends up
+ * being shifted 8 bits to the right. In this case,
+ * that means that the OSR data starts at bit 31,
+ * so we can just read RESP0 and return that.
+ */
+ cmd->resp[0] = __raw_readl(host->iobase + SD_RESP0);
+ }
+ }
+
+ /* Figure out errors */
+ if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC))
+ cmd->error = -EILSEQ;
+
+ trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
+
+ if (!trans || cmd->error) {
+ IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF);
+ tasklet_schedule(&host->finish_task);
+ return;
+ }
+
+ host->status = HOST_S_DATA;
+
+ if ((host->flags & (HOST_F_DMA | HOST_F_DBDMA))) {
+ u32 channel = DMA_CHANNEL(host);
+
+ /* Start the DBDMA as soon as the buffer gets something in it */
+
+ if (host->flags & HOST_F_RECV) {
+ u32 mask = SD_STATUS_DB | SD_STATUS_NE;
+
+ while((status & mask) != mask)
+ status = __raw_readl(HOST_STATUS(host));
+ }
+
+ au1xxx_dbdma_start(channel);
+ }
+}
+
+static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
+{
+ unsigned int pbus = clk_get_rate(host->clk);
+ unsigned int divisor = ((pbus / rate) / 2) - 1;
+ u32 config;
+
+ config = __raw_readl(HOST_CONFIG(host));
+
+ config &= ~(SD_CONFIG_DIV);
+ config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE;
+
+ __raw_writel(config, HOST_CONFIG(host));
+ wmb(); /* drain writebuffer */
+}
+
+static int au1xmmc_prepare_data(struct au1xmmc_host *host,
+ struct mmc_data *data)
+{
+ int datalen = data->blocks * data->blksz;
+
+ if (data->flags & MMC_DATA_READ)
+ host->flags |= HOST_F_RECV;
+ else
+ host->flags |= HOST_F_XMIT;
+
+ if (host->mrq->stop)
+ host->flags |= HOST_F_STOP;
+
+ host->dma.dir = DMA_BIDIRECTIONAL;
+
+ host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, host->dma.dir);
+
+ if (host->dma.len == 0)
+ return -ETIMEDOUT;
+
+ __raw_writel(data->blksz - 1, HOST_BLKSIZE(host));
+
+ if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
+ int i;
+ u32 channel = DMA_CHANNEL(host);
+
+ au1xxx_dbdma_stop(channel);
+
+ for (i = 0; i < host->dma.len; i++) {
+ u32 ret = 0, flags = DDMA_FLAGS_NOIE;
+ struct scatterlist *sg = &data->sg[i];
+ int sg_len = sg->length;
+
+ int len = (datalen > sg_len) ? sg_len : datalen;
+
+ if (i == host->dma.len - 1)
+ flags = DDMA_FLAGS_IE;
+
+ if (host->flags & HOST_F_XMIT) {
+ ret = au1xxx_dbdma_put_source(channel,
+ sg_phys(sg), len, flags);
+ } else {
+ ret = au1xxx_dbdma_put_dest(channel,
+ sg_phys(sg), len, flags);
+ }
+
+ if (!ret)
+ goto dataerr;
+
+ datalen -= len;
+ }
+ } else {
+ host->pio.index = 0;
+ host->pio.offset = 0;
+ host->pio.len = datalen;
+
+ if (host->flags & HOST_F_XMIT)
+ IRQ_ON(host, SD_CONFIG_TH);
+ else
+ IRQ_ON(host, SD_CONFIG_NE);
+ /* IRQ_ON(host, SD_CONFIG_RA | SD_CONFIG_RF); */
+ }
+
+ return 0;
+
+dataerr:
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ host->dma.dir);
+ return -ETIMEDOUT;
+}
+
+/* This actually starts a command or data transaction */
+static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
+{
+ struct au1xmmc_host *host = mmc_priv(mmc);
+ int ret = 0;
+
+ WARN_ON(irqs_disabled());
+ WARN_ON(host->status != HOST_S_IDLE);
+
+ host->mrq = mrq;
+ host->status = HOST_S_CMD;
+
+ /* fail request immediately if no card is present */
+ if (0 == au1xmmc_card_inserted(mmc)) {
+ mrq->cmd->error = -ENOMEDIUM;
+ au1xmmc_finish_request(host);
+ return;
+ }
+
+ if (mrq->data) {
+ FLUSH_FIFO(host);
+ ret = au1xmmc_prepare_data(host, mrq->data);
+ }
+
+ if (!ret)
+ ret = au1xmmc_send_command(host, 0, mrq->cmd, mrq->data);
+
+ if (ret) {
+ mrq->cmd->error = ret;
+ au1xmmc_finish_request(host);
+ }
+}
+
+static void au1xmmc_reset_controller(struct au1xmmc_host *host)
+{
+ /* Apply the clock */
+ __raw_writel(SD_ENABLE_CE, HOST_ENABLE(host));
+ wmb(); /* drain writebuffer */
+ mdelay(1);
+
+ __raw_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
+ wmb(); /* drain writebuffer */
+ mdelay(5);
+
+ __raw_writel(~0, HOST_STATUS(host));
+ wmb(); /* drain writebuffer */
+
+ __raw_writel(0, HOST_BLKSIZE(host));
+ __raw_writel(0x001fffff, HOST_TIMEOUT(host));
+ wmb(); /* drain writebuffer */
+
+ __raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
+
+ __raw_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
+ mdelay(1);
+
+ __raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
+
+ /* Configure interrupts */
+ __raw_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
+ wmb(); /* drain writebuffer */
+}
+
+
+static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct au1xmmc_host *host = mmc_priv(mmc);
+ u32 config2;
+
+ if (ios->power_mode == MMC_POWER_OFF)
+ au1xmmc_set_power(host, 0);
+ else if (ios->power_mode == MMC_POWER_ON) {
+ au1xmmc_set_power(host, 1);
+ }
+
+ if (ios->clock && ios->clock != host->clock) {
+ au1xmmc_set_clock(host, ios->clock);
+ host->clock = ios->clock;
+ }
+
+ config2 = __raw_readl(HOST_CONFIG2(host));
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_8:
+ config2 |= SD_CONFIG2_BB;
+ break;
+ case MMC_BUS_WIDTH_4:
+ config2 &= ~SD_CONFIG2_BB;
+ config2 |= SD_CONFIG2_WB;
+ break;
+ case MMC_BUS_WIDTH_1:
+ config2 &= ~(SD_CONFIG2_WB | SD_CONFIG2_BB);
+ break;
+ }
+ __raw_writel(config2, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
+}
+
+#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
+#define STATUS_DATA_IN (SD_STATUS_NE)
+#define STATUS_DATA_OUT (SD_STATUS_TH)
+
+static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
+{
+ struct au1xmmc_host *host = dev_id;
+ u32 status;
+
+ status = __raw_readl(HOST_STATUS(host));
+
+ if (!(status & SD_STATUS_I))
+ return IRQ_NONE; /* not ours */
+
+ if (status & SD_STATUS_SI) /* SDIO */
+ mmc_signal_sdio_irq(host->mmc);
+
+ if (host->mrq && (status & STATUS_TIMEOUT)) {
+ if (status & SD_STATUS_RAT)
+ host->mrq->cmd->error = -ETIMEDOUT;
+ else if (status & SD_STATUS_DT)
+ host->mrq->data->error = -ETIMEDOUT;
+
+ /* In PIO mode, interrupts might still be enabled */
+ IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
+
+ /* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */
+ tasklet_schedule(&host->finish_task);
+ }
+#if 0
+ else if (status & SD_STATUS_DD) {
+ /* Sometimes we get a DD before a NE in PIO mode */
+ if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE))
+ au1xmmc_receive_pio(host);
+ else {
+ au1xmmc_data_complete(host, status);
+ /* tasklet_schedule(&host->data_task); */
+ }
+ }
+#endif
+ else if (status & SD_STATUS_CR) {
+ if (host->status == HOST_S_CMD)
+ au1xmmc_cmd_complete(host, status);
+
+ } else if (!(host->flags & HOST_F_DMA)) {
+ if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT))
+ au1xmmc_send_pio(host);
+ else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN))
+ au1xmmc_receive_pio(host);
+
+ } else if (status & 0x203F3C70) {
+ DBG("Unhandled status %8.8x\n", host->pdev->id,
+ status);
+ }
+
+ __raw_writel(status, HOST_STATUS(host));
+ wmb(); /* drain writebuffer */
+
+ return IRQ_HANDLED;
+}
+
+/* 8bit memory DMA device */
+static dbdev_tab_t au1xmmc_mem_dbdev = {
+ .dev_id = DSCR_CMD0_ALWAYS,
+ .dev_flags = DEV_FLAGS_ANYUSE,
+ .dev_tsize = 0,
+ .dev_devwidth = 8,
+ .dev_physaddr = 0x00000000,
+ .dev_intlevel = 0,
+ .dev_intpolarity = 0,
+};
+static int memid;
+
+static void au1xmmc_dbdma_callback(int irq, void *dev_id)
+{
+ struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id;
+
+ /* Avoid spurious interrupts */
+ if (!host->mrq)
+ return;
+
+ if (host->flags & HOST_F_STOP)
+ SEND_STOP(host);
+
+ tasklet_schedule(&host->data_task);
+}
+
+static int au1xmmc_dbdma_init(struct au1xmmc_host *host)
+{
+ struct resource *res;
+ int txid, rxid;
+
+ res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0);
+ if (!res)
+ return -ENODEV;
+ txid = res->start;
+
+ res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1);
+ if (!res)
+ return -ENODEV;
+ rxid = res->start;
+
+ if (!memid)
+ return -ENODEV;
+
+ host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid,
+ au1xmmc_dbdma_callback, (void *)host);
+ if (!host->tx_chan) {
+ dev_err(&host->pdev->dev, "cannot allocate TX DMA\n");
+ return -ENODEV;
+ }
+
+ host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid,
+ au1xmmc_dbdma_callback, (void *)host);
+ if (!host->rx_chan) {
+ dev_err(&host->pdev->dev, "cannot allocate RX DMA\n");
+ au1xxx_dbdma_chan_free(host->tx_chan);
+ return -ENODEV;
+ }
+
+ au1xxx_dbdma_set_devwidth(host->tx_chan, 8);
+ au1xxx_dbdma_set_devwidth(host->rx_chan, 8);
+
+ au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT);
+ au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT);
+
+ /* DBDMA is good to go */
+ host->flags |= HOST_F_DMA | HOST_F_DBDMA;
+
+ return 0;
+}
+
+static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host)
+{
+ if (host->flags & HOST_F_DMA) {
+ host->flags &= ~HOST_F_DMA;
+ au1xxx_dbdma_chan_free(host->tx_chan);
+ au1xxx_dbdma_chan_free(host->rx_chan);
+ }
+}
+
+static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en)
+{
+ struct au1xmmc_host *host = mmc_priv(mmc);
+
+ if (en)
+ IRQ_ON(host, SD_CONFIG_SI);
+ else
+ IRQ_OFF(host, SD_CONFIG_SI);
+}
+
+static const struct mmc_host_ops au1xmmc_ops = {
+ .request = au1xmmc_request,
+ .set_ios = au1xmmc_set_ios,
+ .get_ro = au1xmmc_card_readonly,
+ .get_cd = au1xmmc_card_inserted,
+ .enable_sdio_irq = au1xmmc_enable_sdio_irq,
+};
+
+static int au1xmmc_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct au1xmmc_host *host;
+ struct resource *r;
+ int ret, iflag;
+
+ mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
+ if (!mmc) {
+ dev_err(&pdev->dev, "no memory for mmc_host\n");
+ ret = -ENOMEM;
+ goto out0;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->platdata = pdev->dev.platform_data;
+ host->pdev = pdev;
+
+ ret = -ENODEV;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no mmio defined\n");
+ goto out1;
+ }
+
+ host->ioarea = request_mem_region(r->start, resource_size(r),
+ pdev->name);
+ if (!host->ioarea) {
+ dev_err(&pdev->dev, "mmio already in use\n");
+ goto out1;
+ }
+
+ host->iobase = ioremap(r->start, 0x3c);
+ if (!host->iobase) {
+ dev_err(&pdev->dev, "cannot remap mmio\n");
+ goto out2;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no IRQ defined\n");
+ goto out3;
+ }
+ host->irq = r->start;
+
+ mmc->ops = &au1xmmc_ops;
+
+ mmc->f_min = 450000;
+ mmc->f_max = 24000000;
+
+ mmc->max_blk_size = 2048;
+ mmc->max_blk_count = 512;
+
+ mmc->ocr_avail = AU1XMMC_OCR;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+ mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
+
+ iflag = IRQF_SHARED; /* Au1100/Au1200: one int for both ctrls */
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1100:
+ mmc->max_seg_size = AU1100_MMC_DESCRIPTOR_SIZE;
+ break;
+ case ALCHEMY_CPU_AU1200:
+ mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
+ break;
+ case ALCHEMY_CPU_AU1300:
+ iflag = 0; /* nothing is shared */
+ mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
+ mmc->f_max = 52000000;
+ if (host->ioarea->start == AU1100_SD0_PHYS_ADDR)
+ mmc->caps |= MMC_CAP_8_BIT_DATA;
+ break;
+ }
+
+ ret = request_irq(host->irq, au1xmmc_irq, iflag, DRIVER_NAME, host);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot grab IRQ\n");
+ goto out3;
+ }
+
+ host->clk = clk_get(&pdev->dev, ALCHEMY_PERIPH_CLK);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "cannot find clock\n");
+ ret = PTR_ERR(host->clk);
+ goto out_irq;
+ }
+
+ ret = clk_prepare_enable(host->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot enable clock\n");
+ goto out_clk;
+ }
+
+ host->status = HOST_S_IDLE;
+
+ /* board-specific carddetect setup, if any */
+ if (host->platdata && host->platdata->cd_setup) {
+ ret = host->platdata->cd_setup(mmc, 1);
+ if (ret) {
+ dev_warn(&pdev->dev, "board CD setup failed\n");
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+ }
+ } else
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+
+ /* platform may not be able to use all advertised caps */
+ if (host->platdata)
+ mmc->caps &= ~(host->platdata->mask_host_caps);
+
+ tasklet_init(&host->data_task, au1xmmc_tasklet_data,
+ (unsigned long)host);
+
+ tasklet_init(&host->finish_task, au1xmmc_tasklet_finish,
+ (unsigned long)host);
+
+ if (has_dbdma()) {
+ ret = au1xmmc_dbdma_init(host);
+ if (ret)
+ pr_info(DRIVER_NAME ": DBDMA init failed; using PIO\n");
+ }
+
+#ifdef CONFIG_LEDS_CLASS
+ if (host->platdata && host->platdata->led) {
+ struct led_classdev *led = host->platdata->led;
+ led->name = mmc_hostname(mmc);
+ led->brightness = LED_OFF;
+ led->default_trigger = mmc_hostname(mmc);
+ ret = led_classdev_register(mmc_dev(mmc), led);
+ if (ret)
+ goto out5;
+ }
+#endif
+
+ au1xmmc_reset_controller(host);
+
+ ret = mmc_add_host(mmc);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot add mmc host\n");
+ goto out6;
+ }
+
+ platform_set_drvdata(pdev, host);
+
+ pr_info(DRIVER_NAME ": MMC Controller %d set up at %p"
+ " (mode=%s)\n", pdev->id, host->iobase,
+ host->flags & HOST_F_DMA ? "dma" : "pio");
+
+ return 0; /* all ok */
+
+out6:
+#ifdef CONFIG_LEDS_CLASS
+ if (host->platdata && host->platdata->led)
+ led_classdev_unregister(host->platdata->led);
+out5:
+#endif
+ __raw_writel(0, HOST_ENABLE(host));
+ __raw_writel(0, HOST_CONFIG(host));
+ __raw_writel(0, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
+
+ if (host->flags & HOST_F_DBDMA)
+ au1xmmc_dbdma_shutdown(host);
+
+ tasklet_kill(&host->data_task);
+ tasklet_kill(&host->finish_task);
+
+ if (host->platdata && host->platdata->cd_setup &&
+ !(mmc->caps & MMC_CAP_NEEDS_POLL))
+ host->platdata->cd_setup(mmc, 0);
+out_clk:
+ clk_disable_unprepare(host->clk);
+ clk_put(host->clk);
+out_irq:
+ free_irq(host->irq, host);
+out3:
+ iounmap((void *)host->iobase);
+out2:
+ release_resource(host->ioarea);
+ kfree(host->ioarea);
+out1:
+ mmc_free_host(mmc);
+out0:
+ return ret;
+}
+
+static int au1xmmc_remove(struct platform_device *pdev)
+{
+ struct au1xmmc_host *host = platform_get_drvdata(pdev);
+
+ if (host) {
+ mmc_remove_host(host->mmc);
+
+#ifdef CONFIG_LEDS_CLASS
+ if (host->platdata && host->platdata->led)
+ led_classdev_unregister(host->platdata->led);
+#endif
+
+ if (host->platdata && host->platdata->cd_setup &&
+ !(host->mmc->caps & MMC_CAP_NEEDS_POLL))
+ host->platdata->cd_setup(host->mmc, 0);
+
+ __raw_writel(0, HOST_ENABLE(host));
+ __raw_writel(0, HOST_CONFIG(host));
+ __raw_writel(0, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
+
+ tasklet_kill(&host->data_task);
+ tasklet_kill(&host->finish_task);
+
+ if (host->flags & HOST_F_DBDMA)
+ au1xmmc_dbdma_shutdown(host);
+
+ au1xmmc_set_power(host, 0);
+
+ clk_disable_unprepare(host->clk);
+ clk_put(host->clk);
+
+ free_irq(host->irq, host);
+ iounmap((void *)host->iobase);
+ release_resource(host->ioarea);
+ kfree(host->ioarea);
+
+ mmc_free_host(host->mmc);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct au1xmmc_host *host = platform_get_drvdata(pdev);
+
+ __raw_writel(0, HOST_CONFIG2(host));
+ __raw_writel(0, HOST_CONFIG(host));
+ __raw_writel(0xffffffff, HOST_STATUS(host));
+ __raw_writel(0, HOST_ENABLE(host));
+ wmb(); /* drain writebuffer */
+
+ return 0;
+}
+
+static int au1xmmc_resume(struct platform_device *pdev)
+{
+ struct au1xmmc_host *host = platform_get_drvdata(pdev);
+
+ au1xmmc_reset_controller(host);
+
+ return 0;
+}
+#else
+#define au1xmmc_suspend NULL
+#define au1xmmc_resume NULL
+#endif
+
+static struct platform_driver au1xmmc_driver = {
+ .probe = au1xmmc_probe,
+ .remove = au1xmmc_remove,
+ .suspend = au1xmmc_suspend,
+ .resume = au1xmmc_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init au1xmmc_init(void)
+{
+ if (has_dbdma()) {
+ /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
+ * of 8 bits. And since devices are shared, we need to create
+ * our own to avoid freaking out other devices.
+ */
+ memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
+ if (!memid)
+ pr_err("au1xmmc: cannot add memory dbdma\n");
+ }
+ return platform_driver_register(&au1xmmc_driver);
+}
+
+static void __exit au1xmmc_exit(void)
+{
+ if (has_dbdma() && memid)
+ au1xxx_ddma_del_device(memid);
+
+ platform_driver_unregister(&au1xmmc_driver);
+}
+
+module_init(au1xmmc_init);
+module_exit(au1xmmc_exit);
+
+MODULE_AUTHOR("Advanced Micro Devices, Inc");
+MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:au1xxx-mmc");
diff --git a/kernel/drivers/mmc/host/bfin_sdh.c b/kernel/drivers/mmc/host/bfin_sdh.c
new file mode 100644
index 000000000..2b7f37e82
--- /dev/null
+++ b/kernel/drivers/mmc/host/bfin_sdh.c
@@ -0,0 +1,682 @@
+/*
+ * bfin_sdh.c - Analog Devices Blackfin SDH Controller
+ *
+ * Copyright (C) 2007-2009 Analog Device Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#define DRIVER_NAME "bfin-sdh"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+#include <linux/proc_fs.h>
+#include <linux/gfp.h>
+
+#include <asm/cacheflush.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+#include <asm/bfin_sdh.h>
+
+#if defined(CONFIG_BF51x) || defined(__ADSPBF60x__)
+#define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL
+#define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL
+#define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT
+#define bfin_write_SDH_COMMAND bfin_write_RSI_COMMAND
+#define bfin_write_SDH_DATA_TIMER bfin_write_RSI_DATA_TIMER
+#define bfin_read_SDH_RESPONSE0 bfin_read_RSI_RESPONSE0
+#define bfin_read_SDH_RESPONSE1 bfin_read_RSI_RESPONSE1
+#define bfin_read_SDH_RESPONSE2 bfin_read_RSI_RESPONSE2
+#define bfin_read_SDH_RESPONSE3 bfin_read_RSI_RESPONSE3
+#define bfin_write_SDH_DATA_LGTH bfin_write_RSI_DATA_LGTH
+#define bfin_read_SDH_DATA_CTL bfin_read_RSI_DATA_CTL
+#define bfin_write_SDH_DATA_CTL bfin_write_RSI_DATA_CTL
+#define bfin_read_SDH_DATA_CNT bfin_read_RSI_DATA_CNT
+#define bfin_write_SDH_STATUS_CLR bfin_write_RSI_STATUS_CLR
+#define bfin_read_SDH_E_STATUS bfin_read_RSI_E_STATUS
+#define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS
+#define bfin_read_SDH_STATUS bfin_read_RSI_STATUS
+#define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0
+#define bfin_write_SDH_E_MASK bfin_write_RSI_E_MASK
+#define bfin_read_SDH_CFG bfin_read_RSI_CFG
+#define bfin_write_SDH_CFG bfin_write_RSI_CFG
+# if defined(__ADSPBF60x__)
+# define bfin_read_SDH_BLK_SIZE bfin_read_RSI_BLKSZ
+# define bfin_write_SDH_BLK_SIZE bfin_write_RSI_BLKSZ
+# else
+# define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL
+# define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL
+# endif
+#endif
+
+struct sdh_host {
+ struct mmc_host *mmc;
+ spinlock_t lock;
+ struct resource *res;
+ void __iomem *base;
+ int irq;
+ int stat_irq;
+ int dma_ch;
+ int dma_dir;
+ struct dma_desc_array *sg_cpu;
+ dma_addr_t sg_dma;
+ int dma_len;
+
+ unsigned long sclk;
+ unsigned int imask;
+ unsigned int power_mode;
+ unsigned int clk_div;
+
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+};
+
+static struct bfin_sd_host *get_sdh_data(struct platform_device *pdev)
+{
+ return pdev->dev.platform_data;
+}
+
+static void sdh_stop_clock(struct sdh_host *host)
+{
+ bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() & ~CLK_E);
+ SSYNC();
+}
+
+static void sdh_enable_stat_irq(struct sdh_host *host, unsigned int mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->imask |= mask;
+ bfin_write_SDH_MASK0(mask);
+ SSYNC();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void sdh_disable_stat_irq(struct sdh_host *host, unsigned int mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->imask &= ~mask;
+ bfin_write_SDH_MASK0(host->imask);
+ SSYNC();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
+{
+ unsigned int length;
+ unsigned int data_ctl;
+ unsigned int dma_cfg;
+ unsigned int cycle_ns, timeout;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter flags: 0x%x\n", __func__, data->flags);
+ host->data = data;
+ data_ctl = 0;
+ dma_cfg = 0;
+
+ length = data->blksz * data->blocks;
+ bfin_write_SDH_DATA_LGTH(length);
+
+ if (data->flags & MMC_DATA_STREAM)
+ data_ctl |= DTX_MODE;
+
+ if (data->flags & MMC_DATA_READ)
+ data_ctl |= DTX_DIR;
+ /* Only supports power-of-2 block size */
+ if (data->blksz & (data->blksz - 1))
+ return -EINVAL;
+#ifndef RSI_BLKSZ
+ data_ctl |= ((ffs(data->blksz) - 1) << 4);
+#else
+ bfin_write_SDH_BLK_SIZE(data->blksz);
+#endif
+
+ bfin_write_SDH_DATA_CTL(data_ctl);
+ /* the time of a host clock period in ns */
+ cycle_ns = 1000000000 / (host->sclk / (2 * (host->clk_div + 1)));
+ timeout = data->timeout_ns / cycle_ns;
+ timeout += data->timeout_clks;
+ bfin_write_SDH_DATA_TIMER(timeout);
+ SSYNC();
+
+ if (data->flags & MMC_DATA_READ) {
+ host->dma_dir = DMA_FROM_DEVICE;
+ dma_cfg |= WNR;
+ } else
+ host->dma_dir = DMA_TO_DEVICE;
+
+ sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END));
+ host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir);
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
+ dma_cfg |= DMAFLOW_ARRAY | RESTART | WDSIZE_32 | DMAEN;
+# ifdef RSI_BLKSZ
+ dma_cfg |= PSIZE_32 | NDSIZE_3;
+# else
+ dma_cfg |= NDSIZE_5;
+# endif
+ {
+ struct scatterlist *sg;
+ int i;
+ for_each_sg(data->sg, sg, host->dma_len, i) {
+ host->sg_cpu[i].start_addr = sg_dma_address(sg);
+ host->sg_cpu[i].cfg = dma_cfg;
+ host->sg_cpu[i].x_count = sg_dma_len(sg) / 4;
+ host->sg_cpu[i].x_modify = 4;
+ dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, "
+ "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
+ i, host->sg_cpu[i].start_addr,
+ host->sg_cpu[i].cfg, host->sg_cpu[i].x_count,
+ host->sg_cpu[i].x_modify);
+ }
+ }
+ flush_dcache_range((unsigned int)host->sg_cpu,
+ (unsigned int)host->sg_cpu +
+ host->dma_len * sizeof(struct dma_desc_array));
+ /* Set the last descriptor to stop mode */
+ host->sg_cpu[host->dma_len - 1].cfg &= ~(DMAFLOW | NDSIZE);
+ host->sg_cpu[host->dma_len - 1].cfg |= DI_EN;
+
+ set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma);
+ set_dma_x_count(host->dma_ch, 0);
+ set_dma_x_modify(host->dma_ch, 0);
+ SSYNC();
+ set_dma_config(host->dma_ch, dma_cfg);
+#elif defined(CONFIG_BF51x)
+ /* RSI DMA doesn't work in array mode */
+ dma_cfg |= WDSIZE_32 | DMAEN;
+ set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0]));
+ set_dma_x_count(host->dma_ch, length / 4);
+ set_dma_x_modify(host->dma_ch, 4);
+ SSYNC();
+ set_dma_config(host->dma_ch, dma_cfg);
+#endif
+ bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E);
+
+ SSYNC();
+
+ dev_dbg(mmc_dev(host->mmc), "%s exit\n", __func__);
+ return 0;
+}
+
+static void sdh_start_cmd(struct sdh_host *host, struct mmc_command *cmd)
+{
+ unsigned int sdh_cmd;
+ unsigned int stat_mask;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter cmd: 0x%p\n", __func__, cmd);
+ WARN_ON(host->cmd != NULL);
+ host->cmd = cmd;
+
+ sdh_cmd = 0;
+ stat_mask = 0;
+
+ sdh_cmd |= cmd->opcode;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ sdh_cmd |= CMD_RSP;
+ stat_mask |= CMD_RESP_END;
+ } else {
+ stat_mask |= CMD_SENT;
+ }
+
+ if (cmd->flags & MMC_RSP_136)
+ sdh_cmd |= CMD_L_RSP;
+
+ stat_mask |= CMD_CRC_FAIL | CMD_TIME_OUT;
+
+ sdh_enable_stat_irq(host, stat_mask);
+
+ bfin_write_SDH_ARGUMENT(cmd->arg);
+ bfin_write_SDH_COMMAND(sdh_cmd | CMD_E);
+ bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() | CLK_E);
+ SSYNC();
+}
+
+static void sdh_finish_request(struct sdh_host *host, struct mmc_request *mrq)
+{
+ dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+ mmc_request_done(host->mmc, mrq);
+}
+
+static int sdh_cmd_done(struct sdh_host *host, unsigned int stat)
+{
+ struct mmc_command *cmd = host->cmd;
+ int ret = 0;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter cmd: %p\n", __func__, cmd);
+ if (!cmd)
+ return 0;
+
+ host->cmd = NULL;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ cmd->resp[0] = bfin_read_SDH_RESPONSE0();
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[1] = bfin_read_SDH_RESPONSE1();
+ cmd->resp[2] = bfin_read_SDH_RESPONSE2();
+ cmd->resp[3] = bfin_read_SDH_RESPONSE3();
+ }
+ }
+ if (stat & CMD_TIME_OUT)
+ cmd->error = -ETIMEDOUT;
+ else if (stat & CMD_CRC_FAIL && cmd->flags & MMC_RSP_CRC)
+ cmd->error = -EILSEQ;
+
+ sdh_disable_stat_irq(host, (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL));
+
+ if (host->data && !cmd->error) {
+ if (host->data->flags & MMC_DATA_WRITE) {
+ ret = sdh_setup_data(host, host->data);
+ if (ret)
+ return 0;
+ }
+
+ sdh_enable_stat_irq(host, DAT_END | RX_OVERRUN | TX_UNDERRUN | DAT_TIME_OUT);
+ } else
+ sdh_finish_request(host, host->mrq);
+
+ return 1;
+}
+
+static int sdh_data_done(struct sdh_host *host, unsigned int stat)
+{
+ struct mmc_data *data = host->data;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter stat: 0x%x\n", __func__, stat);
+ if (!data)
+ return 0;
+
+ disable_dma(host->dma_ch);
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ host->dma_dir);
+
+ if (stat & DAT_TIME_OUT)
+ data->error = -ETIMEDOUT;
+ else if (stat & DAT_CRC_FAIL)
+ data->error = -EILSEQ;
+ else if (stat & (RX_OVERRUN | TX_UNDERRUN))
+ data->error = -EIO;
+
+ if (!data->error)
+ data->bytes_xfered = data->blocks * data->blksz;
+ else
+ data->bytes_xfered = 0;
+
+ bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \
+ DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN);
+ bfin_write_SDH_DATA_CTL(0);
+ SSYNC();
+
+ host->data = NULL;
+ if (host->mrq->stop) {
+ sdh_stop_clock(host);
+ sdh_start_cmd(host, host->mrq->stop);
+ } else {
+ sdh_finish_request(host, host->mrq);
+ }
+
+ return 1;
+}
+
+static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sdh_host *host = mmc_priv(mmc);
+ int ret = 0;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd);
+ WARN_ON(host->mrq != NULL);
+
+ spin_lock(&host->lock);
+ host->mrq = mrq;
+ host->data = mrq->data;
+
+ if (mrq->data && mrq->data->flags & MMC_DATA_READ) {
+ ret = sdh_setup_data(host, mrq->data);
+ if (ret)
+ goto data_err;
+ }
+
+ sdh_start_cmd(host, mrq->cmd);
+data_err:
+ spin_unlock(&host->lock);
+}
+
+static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdh_host *host;
+ u16 clk_ctl = 0;
+#ifndef RSI_BLKSZ
+ u16 pwr_ctl = 0;
+#endif
+ u16 cfg;
+ host = mmc_priv(mmc);
+
+ spin_lock(&host->lock);
+
+ cfg = bfin_read_SDH_CFG();
+ cfg |= MWE;
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_4:
+#ifndef RSI_BLKSZ
+ cfg &= ~PD_SDDAT3;
+#endif
+ cfg |= PUP_SDDAT3;
+ /* Enable 4 bit SDIO */
+ cfg |= SD4E;
+ clk_ctl |= WIDE_BUS_4;
+ break;
+ case MMC_BUS_WIDTH_8:
+#ifndef RSI_BLKSZ
+ cfg &= ~PD_SDDAT3;
+#endif
+ cfg |= PUP_SDDAT3;
+ /* Disable 4 bit SDIO */
+ cfg &= ~SD4E;
+ clk_ctl |= BYTE_BUS_8;
+ break;
+ default:
+ cfg &= ~PUP_SDDAT3;
+ /* Disable 4 bit SDIO */
+ cfg &= ~SD4E;
+ }
+ bfin_write_SDH_CFG(cfg);
+
+ host->power_mode = ios->power_mode;
+#ifndef RSI_BLKSZ
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
+ pwr_ctl |= ROD_CTL;
+# ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
+ pwr_ctl |= SD_CMD_OD;
+# endif
+ }
+
+ if (ios->power_mode != MMC_POWER_OFF)
+ pwr_ctl |= PWR_ON;
+ else
+ pwr_ctl &= ~PWR_ON;
+
+ bfin_write_SDH_PWR_CTL(pwr_ctl);
+#else
+# ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ cfg |= SD_CMD_OD;
+ else
+ cfg &= ~SD_CMD_OD;
+# endif
+
+ if (ios->power_mode != MMC_POWER_OFF)
+ cfg |= PWR_ON;
+ else
+ cfg &= ~PWR_ON;
+
+ bfin_write_SDH_CFG(cfg);
+#endif
+ SSYNC();
+
+ if (ios->power_mode == MMC_POWER_ON && ios->clock) {
+ unsigned char clk_div;
+ clk_div = (get_sclk() / ios->clock - 1) / 2;
+ clk_div = min_t(unsigned char, clk_div, 0xFF);
+ clk_ctl |= clk_div;
+ clk_ctl |= CLK_E;
+ host->clk_div = clk_div;
+ bfin_write_SDH_CLK_CTL(clk_ctl);
+ } else
+ sdh_stop_clock(host);
+
+ /* set up sdh interrupt mask*/
+ if (ios->power_mode == MMC_POWER_ON)
+ bfin_write_SDH_MASK0(DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL |
+ RX_OVERRUN | TX_UNDERRUN | CMD_SENT | CMD_RESP_END |
+ CMD_TIME_OUT | CMD_CRC_FAIL);
+ else
+ bfin_write_SDH_MASK0(0);
+ SSYNC();
+
+ spin_unlock(&host->lock);
+
+ dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n",
+ host->clk_div,
+ host->clk_div ? get_sclk() / (2 * (host->clk_div + 1)) : 0,
+ ios->clock);
+}
+
+static const struct mmc_host_ops sdh_ops = {
+ .request = sdh_request,
+ .set_ios = sdh_set_ios,
+};
+
+static irqreturn_t sdh_dma_irq(int irq, void *devid)
+{
+ struct sdh_host *host = devid;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04lx\n", __func__,
+ get_dma_curr_irqstat(host->dma_ch));
+ clear_dma_irqstat(host->dma_ch);
+ SSYNC();
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sdh_stat_irq(int irq, void *devid)
+{
+ struct sdh_host *host = devid;
+ unsigned int status;
+ int handled = 0;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
+
+ spin_lock(&host->lock);
+
+ status = bfin_read_SDH_E_STATUS();
+ if (status & SD_CARD_DET) {
+ mmc_detect_change(host->mmc, 0);
+ bfin_write_SDH_E_STATUS(SD_CARD_DET);
+ }
+ status = bfin_read_SDH_STATUS();
+ if (status & (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)) {
+ handled |= sdh_cmd_done(host, status);
+ bfin_write_SDH_STATUS_CLR(CMD_SENT_STAT | CMD_RESP_END_STAT | \
+ CMD_TIMEOUT_STAT | CMD_CRC_FAIL_STAT);
+ SSYNC();
+ }
+
+ status = bfin_read_SDH_STATUS();
+ if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN))
+ handled |= sdh_data_done(host, status);
+
+ spin_unlock(&host->lock);
+
+ dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__);
+
+ return IRQ_RETVAL(handled);
+}
+
+static void sdh_reset(void)
+{
+#if defined(CONFIG_BF54x)
+ /* Secure Digital Host shares DMA with Nand controller */
+ bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
+#endif
+
+ bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
+ SSYNC();
+
+ /* Disable card inserting detection pin. set MMC_CAP_NEEDS_POLL, and
+ * mmc stack will do the detection.
+ */
+ bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
+ SSYNC();
+}
+
+static int sdh_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct sdh_host *host;
+ struct bfin_sd_host *drv_data = get_sdh_data(pdev);
+ int ret;
+
+ if (!drv_data) {
+ dev_err(&pdev->dev, "missing platform driver data\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mmc = mmc_alloc_host(sizeof(struct sdh_host), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mmc->ops = &sdh_ops;
+#if defined(CONFIG_BF51x)
+ mmc->max_segs = 1;
+#else
+ mmc->max_segs = PAGE_SIZE / sizeof(struct dma_desc_array);
+#endif
+#ifdef RSI_BLKSZ
+ mmc->max_seg_size = -1;
+#else
+ mmc->max_seg_size = 1 << 16;
+#endif
+ mmc->max_blk_size = 1 << 11;
+ mmc->max_blk_count = 1 << 11;
+ mmc->max_req_size = PAGE_SIZE;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->f_max = get_sclk();
+ mmc->f_min = mmc->f_max >> 9;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL;
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->sclk = get_sclk();
+
+ spin_lock_init(&host->lock);
+ host->irq = drv_data->irq_int0;
+ host->dma_ch = drv_data->dma_chan;
+
+ ret = request_dma(host->dma_ch, DRIVER_NAME "DMA");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request DMA channel\n");
+ goto out1;
+ }
+
+ ret = set_dma_callback(host->dma_ch, sdh_dma_irq, host);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request DMA irq\n");
+ goto out2;
+ }
+
+ host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
+ if (host->sg_cpu == NULL) {
+ ret = -ENOMEM;
+ goto out2;
+ }
+
+ platform_set_drvdata(pdev, mmc);
+
+ ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request status irq\n");
+ goto out3;
+ }
+
+ ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request peripheral pins\n");
+ goto out4;
+ }
+
+ sdh_reset();
+
+ mmc_add_host(mmc);
+ return 0;
+
+out4:
+ free_irq(host->irq, host);
+out3:
+ mmc_remove_host(mmc);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+out2:
+ free_dma(host->dma_ch);
+out1:
+ mmc_free_host(mmc);
+ out:
+ return ret;
+}
+
+static int sdh_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+
+ if (mmc) {
+ struct sdh_host *host = mmc_priv(mmc);
+
+ mmc_remove_host(mmc);
+
+ sdh_stop_clock(host);
+ free_irq(host->irq, host);
+ free_dma(host->dma_ch);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+
+ mmc_free_host(mmc);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sdh_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct bfin_sd_host *drv_data = get_sdh_data(dev);
+
+ peripheral_free_list(drv_data->pin_req);
+
+ return 0;
+}
+
+static int sdh_resume(struct platform_device *dev)
+{
+ struct bfin_sd_host *drv_data = get_sdh_data(dev);
+ int ret = 0;
+
+ ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME);
+ if (ret) {
+ dev_err(&dev->dev, "unable to request peripheral pins\n");
+ return ret;
+ }
+
+ sdh_reset();
+ return ret;
+}
+#else
+# define sdh_suspend NULL
+# define sdh_resume NULL
+#endif
+
+static struct platform_driver sdh_driver = {
+ .probe = sdh_probe,
+ .remove = sdh_remove,
+ .suspend = sdh_suspend,
+ .resume = sdh_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(sdh_driver);
+
+MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver");
+MODULE_AUTHOR("Cliff Cai, Roy Huang");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/mmc/host/cb710-mmc.c b/kernel/drivers/mmc/host/cb710-mmc.c
new file mode 100644
index 000000000..1087b4c79
--- /dev/null
+++ b/kernel/drivers/mmc/host/cb710-mmc.c
@@ -0,0 +1,780 @@
+/*
+ * cb710/mmc.c
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "cb710-mmc.h"
+
+static const u8 cb710_clock_divider_log2[8] = {
+/* 1, 2, 4, 8, 16, 32, 128, 512 */
+ 0, 1, 2, 3, 4, 5, 7, 9
+};
+#define CB710_MAX_DIVIDER_IDX \
+ (ARRAY_SIZE(cb710_clock_divider_log2) - 1)
+
+static const u8 cb710_src_freq_mhz[16] = {
+ 33, 10, 20, 25, 30, 35, 40, 45,
+ 50, 55, 60, 65, 70, 75, 80, 85
+};
+
+static void cb710_mmc_select_clock_divider(struct mmc_host *mmc, int hz)
+{
+ struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
+ struct pci_dev *pdev = cb710_slot_to_chip(slot)->pdev;
+ u32 src_freq_idx;
+ u32 divider_idx;
+ int src_hz;
+
+ /* on CB710 in HP nx9500:
+ * src_freq_idx == 0
+ * indexes 1-7 work as written in the table
+ * indexes 0,8-15 give no clock output
+ */
+ pci_read_config_dword(pdev, 0x48, &src_freq_idx);
+ src_freq_idx = (src_freq_idx >> 16) & 0xF;
+ src_hz = cb710_src_freq_mhz[src_freq_idx] * 1000000;
+
+ for (divider_idx = 0; divider_idx < CB710_MAX_DIVIDER_IDX; ++divider_idx) {
+ if (hz >= src_hz >> cb710_clock_divider_log2[divider_idx])
+ break;
+ }
+
+ if (src_freq_idx)
+ divider_idx |= 0x8;
+ else if (divider_idx == 0)
+ divider_idx = 1;
+
+ cb710_pci_update_config_reg(pdev, 0x40, ~0xF0000000, divider_idx << 28);
+
+ dev_dbg(cb710_slot_dev(slot),
+ "clock set to %d Hz, wanted %d Hz; src_freq_idx = %d, divider_idx = %d|%d\n",
+ src_hz >> cb710_clock_divider_log2[divider_idx & 7],
+ hz, src_freq_idx, divider_idx & 7, divider_idx & 8);
+}
+
+static void __cb710_mmc_enable_irq(struct cb710_slot *slot,
+ unsigned short enable, unsigned short mask)
+{
+ /* clear global IE
+ * - it gets set later if any interrupt sources are enabled */
+ mask |= CB710_MMC_IE_IRQ_ENABLE;
+
+ /* look like interrupt is fired whenever
+ * WORD[0x0C] & WORD[0x10] != 0;
+ * -> bit 15 port 0x0C seems to be global interrupt enable
+ */
+
+ enable = (cb710_read_port_16(slot, CB710_MMC_IRQ_ENABLE_PORT)
+ & ~mask) | enable;
+
+ if (enable)
+ enable |= CB710_MMC_IE_IRQ_ENABLE;
+
+ cb710_write_port_16(slot, CB710_MMC_IRQ_ENABLE_PORT, enable);
+}
+
+static void cb710_mmc_enable_irq(struct cb710_slot *slot,
+ unsigned short enable, unsigned short mask)
+{
+ struct cb710_mmc_reader *reader = mmc_priv(cb710_slot_to_mmc(slot));
+ unsigned long flags;
+
+ spin_lock_irqsave(&reader->irq_lock, flags);
+ /* this is the only thing irq_lock protects */
+ __cb710_mmc_enable_irq(slot, enable, mask);
+ spin_unlock_irqrestore(&reader->irq_lock, flags);
+}
+
+static void cb710_mmc_reset_events(struct cb710_slot *slot)
+{
+ cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, 0xFF);
+ cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, 0xFF);
+ cb710_write_port_8(slot, CB710_MMC_STATUS2_PORT, 0xFF);
+}
+
+static void cb710_mmc_enable_4bit_data(struct cb710_slot *slot, int enable)
+{
+ if (enable)
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT,
+ CB710_MMC_C1_4BIT_DATA_BUS, 0);
+ else
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT,
+ 0, CB710_MMC_C1_4BIT_DATA_BUS);
+}
+
+static int cb710_check_event(struct cb710_slot *slot, u8 what)
+{
+ u16 status;
+
+ status = cb710_read_port_16(slot, CB710_MMC_STATUS_PORT);
+
+ if (status & CB710_MMC_S0_FIFO_UNDERFLOW) {
+ /* it is just a guess, so log it */
+ dev_dbg(cb710_slot_dev(slot),
+ "CHECK : ignoring bit 6 in status %04X\n", status);
+ cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT,
+ CB710_MMC_S0_FIFO_UNDERFLOW);
+ status &= ~CB710_MMC_S0_FIFO_UNDERFLOW;
+ }
+
+ if (status & CB710_MMC_STATUS_ERROR_EVENTS) {
+ dev_dbg(cb710_slot_dev(slot),
+ "CHECK : returning EIO on status %04X\n", status);
+ cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, status & 0xFF);
+ cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT,
+ CB710_MMC_S1_RESET);
+ return -EIO;
+ }
+
+ /* 'what' is a bit in MMC_STATUS1 */
+ if ((status >> 8) & what) {
+ cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, what);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int cb710_wait_for_event(struct cb710_slot *slot, u8 what)
+{
+ int err = 0;
+ unsigned limit = 2000000; /* FIXME: real timeout */
+
+#ifdef CONFIG_CB710_DEBUG
+ u32 e, x;
+ e = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
+#endif
+
+ while (!(err = cb710_check_event(slot, what))) {
+ if (!--limit) {
+ cb710_dump_regs(cb710_slot_to_chip(slot),
+ CB710_DUMP_REGS_MMC);
+ err = -ETIMEDOUT;
+ break;
+ }
+ udelay(1);
+ }
+
+#ifdef CONFIG_CB710_DEBUG
+ x = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
+
+ limit = 2000000 - limit;
+ if (limit > 100)
+ dev_dbg(cb710_slot_dev(slot),
+ "WAIT10: waited %d loops, what %d, entry val %08X, exit val %08X\n",
+ limit, what, e, x);
+#endif
+ return err < 0 ? err : 0;
+}
+
+
+static int cb710_wait_while_busy(struct cb710_slot *slot, uint8_t mask)
+{
+ unsigned limit = 500000; /* FIXME: real timeout */
+ int err = 0;
+
+#ifdef CONFIG_CB710_DEBUG
+ u32 e, x;
+ e = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
+#endif
+
+ while (cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT) & mask) {
+ if (!--limit) {
+ cb710_dump_regs(cb710_slot_to_chip(slot),
+ CB710_DUMP_REGS_MMC);
+ err = -ETIMEDOUT;
+ break;
+ }
+ udelay(1);
+ }
+
+#ifdef CONFIG_CB710_DEBUG
+ x = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
+
+ limit = 500000 - limit;
+ if (limit > 100)
+ dev_dbg(cb710_slot_dev(slot),
+ "WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n",
+ limit, mask, e, x);
+#endif
+ return err;
+}
+
+static void cb710_mmc_set_transfer_size(struct cb710_slot *slot,
+ size_t count, size_t blocksize)
+{
+ cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
+ cb710_write_port_32(slot, CB710_MMC_TRANSFER_SIZE_PORT,
+ ((count - 1) << 16)|(blocksize - 1));
+
+ dev_vdbg(cb710_slot_dev(slot), "set up for %zu block%s of %zu bytes\n",
+ count, count == 1 ? "" : "s", blocksize);
+}
+
+static void cb710_mmc_fifo_hack(struct cb710_slot *slot)
+{
+ /* without this, received data is prepended with 8-bytes of zeroes */
+ u32 r1, r2;
+ int ok = 0;
+
+ r1 = cb710_read_port_32(slot, CB710_MMC_DATA_PORT);
+ r2 = cb710_read_port_32(slot, CB710_MMC_DATA_PORT);
+ if (cb710_read_port_8(slot, CB710_MMC_STATUS0_PORT)
+ & CB710_MMC_S0_FIFO_UNDERFLOW) {
+ cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT,
+ CB710_MMC_S0_FIFO_UNDERFLOW);
+ ok = 1;
+ }
+
+ dev_dbg(cb710_slot_dev(slot),
+ "FIFO-read-hack: expected STATUS0 bit was %s\n",
+ ok ? "set." : "NOT SET!");
+ dev_dbg(cb710_slot_dev(slot),
+ "FIFO-read-hack: dwords ignored: %08X %08X - %s\n",
+ r1, r2, (r1|r2) ? "BAD (NOT ZERO)!" : "ok");
+}
+
+static int cb710_mmc_receive_pio(struct cb710_slot *slot,
+ struct sg_mapping_iter *miter, size_t dw_count)
+{
+ if (!(cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT) & CB710_MMC_S2_FIFO_READY)) {
+ int err = cb710_wait_for_event(slot,
+ CB710_MMC_S1_PIO_TRANSFER_DONE);
+ if (err)
+ return err;
+ }
+
+ cb710_sg_dwiter_write_from_io(miter,
+ slot->iobase + CB710_MMC_DATA_PORT, dw_count);
+
+ return 0;
+}
+
+static bool cb710_is_transfer_size_supported(struct mmc_data *data)
+{
+ return !(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8));
+}
+
+static int cb710_mmc_receive(struct cb710_slot *slot, struct mmc_data *data)
+{
+ struct sg_mapping_iter miter;
+ size_t len, blocks = data->blocks;
+ int err = 0;
+
+ /* TODO: I don't know how/if the hardware handles non-16B-boundary blocks
+ * except single 8B block */
+ if (unlikely(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8)))
+ return -EINVAL;
+
+ sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_TO_SG);
+
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT,
+ 15, CB710_MMC_C2_READ_PIO_SIZE_MASK);
+
+ cb710_mmc_fifo_hack(slot);
+
+ while (blocks-- > 0) {
+ len = data->blksz;
+
+ while (len >= 16) {
+ err = cb710_mmc_receive_pio(slot, &miter, 4);
+ if (err)
+ goto out;
+ len -= 16;
+ }
+
+ if (!len)
+ continue;
+
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT,
+ len - 1, CB710_MMC_C2_READ_PIO_SIZE_MASK);
+
+ len = (len >= 8) ? 4 : 2;
+ err = cb710_mmc_receive_pio(slot, &miter, len);
+ if (err)
+ goto out;
+ }
+out:
+ sg_miter_stop(&miter);
+ return err;
+}
+
+static int cb710_mmc_send(struct cb710_slot *slot, struct mmc_data *data)
+{
+ struct sg_mapping_iter miter;
+ size_t len, blocks = data->blocks;
+ int err = 0;
+
+ /* TODO: I don't know how/if the hardware handles multiple
+ * non-16B-boundary blocks */
+ if (unlikely(data->blocks > 1 && data->blksz & 15))
+ return -EINVAL;
+
+ sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_FROM_SG);
+
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT,
+ 0, CB710_MMC_C2_READ_PIO_SIZE_MASK);
+
+ while (blocks-- > 0) {
+ len = (data->blksz + 15) >> 4;
+ do {
+ if (!(cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT)
+ & CB710_MMC_S2_FIFO_EMPTY)) {
+ err = cb710_wait_for_event(slot,
+ CB710_MMC_S1_PIO_TRANSFER_DONE);
+ if (err)
+ goto out;
+ }
+ cb710_sg_dwiter_read_to_io(&miter,
+ slot->iobase + CB710_MMC_DATA_PORT, 4);
+ } while (--len);
+ }
+out:
+ sg_miter_stop(&miter);
+ return err;
+}
+
+static u16 cb710_encode_cmd_flags(struct cb710_mmc_reader *reader,
+ struct mmc_command *cmd)
+{
+ unsigned int flags = cmd->flags;
+ u16 cb_flags = 0;
+
+ /* Windows driver returned 0 for commands for which no response
+ * is expected. It happened that there were only two such commands
+ * used: MMC_GO_IDLE_STATE and MMC_GO_INACTIVE_STATE so it might
+ * as well be a bug in that driver.
+ *
+ * Original driver set bit 14 for MMC/SD application
+ * commands. There's no difference 'on the wire' and
+ * it apparently works without it anyway.
+ */
+
+ switch (flags & MMC_CMD_MASK) {
+ case MMC_CMD_AC: cb_flags = CB710_MMC_CMD_AC; break;
+ case MMC_CMD_ADTC: cb_flags = CB710_MMC_CMD_ADTC; break;
+ case MMC_CMD_BC: cb_flags = CB710_MMC_CMD_BC; break;
+ case MMC_CMD_BCR: cb_flags = CB710_MMC_CMD_BCR; break;
+ }
+
+ if (flags & MMC_RSP_BUSY)
+ cb_flags |= CB710_MMC_RSP_BUSY;
+
+ cb_flags |= cmd->opcode << CB710_MMC_CMD_CODE_SHIFT;
+
+ if (cmd->data && (cmd->data->flags & MMC_DATA_READ))
+ cb_flags |= CB710_MMC_DATA_READ;
+
+ if (flags & MMC_RSP_PRESENT) {
+ /* Windows driver set 01 at bits 4,3 except for
+ * MMC_SET_BLOCKLEN where it set 10. Maybe the
+ * hardware can do something special about this
+ * command? The original driver looks buggy/incomplete
+ * anyway so we ignore this for now.
+ *
+ * I assume that 00 here means no response is expected.
+ */
+ cb_flags |= CB710_MMC_RSP_PRESENT;
+
+ if (flags & MMC_RSP_136)
+ cb_flags |= CB710_MMC_RSP_136;
+ if (!(flags & MMC_RSP_CRC))
+ cb_flags |= CB710_MMC_RSP_NO_CRC;
+ }
+
+ return cb_flags;
+}
+
+static void cb710_receive_response(struct cb710_slot *slot,
+ struct mmc_command *cmd)
+{
+ unsigned rsp_opcode, wanted_opcode;
+
+ /* Looks like final byte with CRC is always stripped (same as SDHCI) */
+ if (cmd->flags & MMC_RSP_136) {
+ u32 resp[4];
+
+ resp[0] = cb710_read_port_32(slot, CB710_MMC_RESPONSE3_PORT);
+ resp[1] = cb710_read_port_32(slot, CB710_MMC_RESPONSE2_PORT);
+ resp[2] = cb710_read_port_32(slot, CB710_MMC_RESPONSE1_PORT);
+ resp[3] = cb710_read_port_32(slot, CB710_MMC_RESPONSE0_PORT);
+ rsp_opcode = resp[0] >> 24;
+
+ cmd->resp[0] = (resp[0] << 8)|(resp[1] >> 24);
+ cmd->resp[1] = (resp[1] << 8)|(resp[2] >> 24);
+ cmd->resp[2] = (resp[2] << 8)|(resp[3] >> 24);
+ cmd->resp[3] = (resp[3] << 8);
+ } else {
+ rsp_opcode = cb710_read_port_32(slot, CB710_MMC_RESPONSE1_PORT) & 0x3F;
+ cmd->resp[0] = cb710_read_port_32(slot, CB710_MMC_RESPONSE0_PORT);
+ }
+
+ wanted_opcode = (cmd->flags & MMC_RSP_OPCODE) ? cmd->opcode : 0x3F;
+ if (rsp_opcode != wanted_opcode)
+ cmd->error = -EILSEQ;
+}
+
+static int cb710_mmc_transfer_data(struct cb710_slot *slot,
+ struct mmc_data *data)
+{
+ int error, to;
+
+ if (data->flags & MMC_DATA_READ)
+ error = cb710_mmc_receive(slot, data);
+ else
+ error = cb710_mmc_send(slot, data);
+
+ to = cb710_wait_for_event(slot, CB710_MMC_S1_DATA_TRANSFER_DONE);
+ if (!error)
+ error = to;
+
+ if (!error)
+ data->bytes_xfered = data->blksz * data->blocks;
+ return error;
+}
+
+static int cb710_mmc_command(struct mmc_host *mmc, struct mmc_command *cmd)
+{
+ struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
+ struct cb710_mmc_reader *reader = mmc_priv(mmc);
+ struct mmc_data *data = cmd->data;
+
+ u16 cb_cmd = cb710_encode_cmd_flags(reader, cmd);
+ dev_dbg(cb710_slot_dev(slot), "cmd request: 0x%04X\n", cb_cmd);
+
+ if (data) {
+ if (!cb710_is_transfer_size_supported(data)) {
+ data->error = -EINVAL;
+ return -1;
+ }
+ cb710_mmc_set_transfer_size(slot, data->blocks, data->blksz);
+ }
+
+ cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20|CB710_MMC_S2_BUSY_10);
+ cb710_write_port_16(slot, CB710_MMC_CMD_TYPE_PORT, cb_cmd);
+ cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
+ cb710_write_port_32(slot, CB710_MMC_CMD_PARAM_PORT, cmd->arg);
+ cb710_mmc_reset_events(slot);
+ cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x01, 0);
+
+ cmd->error = cb710_wait_for_event(slot, CB710_MMC_S1_COMMAND_SENT);
+ if (cmd->error)
+ return -1;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ cb710_receive_response(slot, cmd);
+ if (cmd->error)
+ return -1;
+ }
+
+ if (data)
+ data->error = cb710_mmc_transfer_data(slot, data);
+ return 0;
+}
+
+static void cb710_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
+ struct cb710_mmc_reader *reader = mmc_priv(mmc);
+
+ WARN_ON(reader->mrq != NULL);
+
+ reader->mrq = mrq;
+ cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0);
+
+ if (!cb710_mmc_command(mmc, mrq->cmd) && mrq->stop)
+ cb710_mmc_command(mmc, mrq->stop);
+
+ tasklet_schedule(&reader->finish_req_tasklet);
+}
+
+static int cb710_mmc_powerup(struct cb710_slot *slot)
+{
+#ifdef CONFIG_CB710_DEBUG
+ struct cb710_chip *chip = cb710_slot_to_chip(slot);
+#endif
+ int err;
+
+ /* a lot of magic for now */
+ dev_dbg(cb710_slot_dev(slot), "bus powerup\n");
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
+ if (unlikely(err))
+ return err;
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x80, 0);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0x80, 0);
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ mdelay(1);
+ dev_dbg(cb710_slot_dev(slot), "after delay 1\n");
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
+ if (unlikely(err))
+ return err;
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x09, 0);
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ mdelay(1);
+ dev_dbg(cb710_slot_dev(slot), "after delay 2\n");
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
+ if (unlikely(err))
+ return err;
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0, 0x08);
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ mdelay(2);
+ dev_dbg(cb710_slot_dev(slot), "after delay 3\n");
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x06, 0);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x70, 0);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, 0x80, 0);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0x03, 0);
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
+ if (unlikely(err))
+ return err;
+ /* This port behaves weird: quick byte reads of 0x08,0x09 return
+ * 0xFF,0x00 after writing 0xFFFF to 0x08; it works correctly when
+ * read/written from userspace... What am I missing here?
+ * (it doesn't depend on write-to-read delay) */
+ cb710_write_port_16(slot, CB710_MMC_CONFIGB_PORT, 0xFFFF);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x06, 0);
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+ dev_dbg(cb710_slot_dev(slot), "bus powerup finished\n");
+
+ return cb710_check_event(slot, 0);
+}
+
+static void cb710_mmc_powerdown(struct cb710_slot *slot)
+{
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0, 0x81);
+ cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0, 0x80);
+}
+
+static void cb710_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
+ struct cb710_mmc_reader *reader = mmc_priv(mmc);
+ int err;
+
+ cb710_mmc_select_clock_divider(mmc, ios->clock);
+
+ if (ios->power_mode != reader->last_power_mode)
+ switch (ios->power_mode) {
+ case MMC_POWER_ON:
+ err = cb710_mmc_powerup(slot);
+ if (err) {
+ dev_warn(cb710_slot_dev(slot),
+ "powerup failed (%d)- retrying\n", err);
+ cb710_mmc_powerdown(slot);
+ udelay(1);
+ err = cb710_mmc_powerup(slot);
+ if (err)
+ dev_warn(cb710_slot_dev(slot),
+ "powerup retry failed (%d) - expect errors\n",
+ err);
+ }
+ reader->last_power_mode = MMC_POWER_ON;
+ break;
+ case MMC_POWER_OFF:
+ cb710_mmc_powerdown(slot);
+ reader->last_power_mode = MMC_POWER_OFF;
+ break;
+ case MMC_POWER_UP:
+ default:
+ /* ignore */;
+ }
+
+ cb710_mmc_enable_4bit_data(slot, ios->bus_width != MMC_BUS_WIDTH_1);
+
+ cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0);
+}
+
+static int cb710_mmc_get_ro(struct mmc_host *mmc)
+{
+ struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
+
+ return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT)
+ & CB710_MMC_S3_WRITE_PROTECTED;
+}
+
+static int cb710_mmc_get_cd(struct mmc_host *mmc)
+{
+ struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
+
+ return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT)
+ & CB710_MMC_S3_CARD_DETECTED;
+}
+
+static int cb710_mmc_irq_handler(struct cb710_slot *slot)
+{
+ struct mmc_host *mmc = cb710_slot_to_mmc(slot);
+ struct cb710_mmc_reader *reader = mmc_priv(mmc);
+ u32 status, config1, config2, irqen;
+
+ status = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
+ irqen = cb710_read_port_32(slot, CB710_MMC_IRQ_ENABLE_PORT);
+ config2 = cb710_read_port_32(slot, CB710_MMC_CONFIGB_PORT);
+ config1 = cb710_read_port_32(slot, CB710_MMC_CONFIG_PORT);
+
+ dev_dbg(cb710_slot_dev(slot), "interrupt; status: %08X, "
+ "ie: %08X, c2: %08X, c1: %08X\n",
+ status, irqen, config2, config1);
+
+ if (status & (CB710_MMC_S1_CARD_CHANGED << 8)) {
+ /* ack the event */
+ cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT,
+ CB710_MMC_S1_CARD_CHANGED);
+ if ((irqen & CB710_MMC_IE_CISTATUS_MASK)
+ == CB710_MMC_IE_CISTATUS_MASK)
+ mmc_detect_change(mmc, HZ/5);
+ } else {
+ dev_dbg(cb710_slot_dev(slot), "unknown interrupt (test)\n");
+ spin_lock(&reader->irq_lock);
+ __cb710_mmc_enable_irq(slot, 0, CB710_MMC_IE_TEST_MASK);
+ spin_unlock(&reader->irq_lock);
+ }
+
+ return 1;
+}
+
+static void cb710_mmc_finish_request_tasklet(unsigned long data)
+{
+ struct mmc_host *mmc = (void *)data;
+ struct cb710_mmc_reader *reader = mmc_priv(mmc);
+ struct mmc_request *mrq = reader->mrq;
+
+ reader->mrq = NULL;
+ mmc_request_done(mmc, mrq);
+}
+
+static const struct mmc_host_ops cb710_mmc_host = {
+ .request = cb710_mmc_request,
+ .set_ios = cb710_mmc_set_ios,
+ .get_ro = cb710_mmc_get_ro,
+ .get_cd = cb710_mmc_get_cd,
+};
+
+#ifdef CONFIG_PM
+
+static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
+
+ cb710_mmc_enable_irq(slot, 0, ~0);
+ return 0;
+}
+
+static int cb710_mmc_resume(struct platform_device *pdev)
+{
+ struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
+
+ cb710_mmc_enable_irq(slot, 0, ~0);
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+static int cb710_mmc_init(struct platform_device *pdev)
+{
+ struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
+ struct cb710_chip *chip = cb710_slot_to_chip(slot);
+ struct mmc_host *mmc;
+ struct cb710_mmc_reader *reader;
+ int err;
+ u32 val;
+
+ mmc = mmc_alloc_host(sizeof(*reader), cb710_slot_dev(slot));
+ if (!mmc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mmc);
+
+ /* harmless (maybe) magic */
+ pci_read_config_dword(chip->pdev, 0x48, &val);
+ val = cb710_src_freq_mhz[(val >> 16) & 0xF];
+ dev_dbg(cb710_slot_dev(slot), "source frequency: %dMHz\n", val);
+ val *= 1000000;
+
+ mmc->ops = &cb710_mmc_host;
+ mmc->f_max = val;
+ mmc->f_min = val >> cb710_clock_divider_log2[CB710_MAX_DIVIDER_IDX];
+ mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
+ mmc->caps = MMC_CAP_4_BIT_DATA;
+
+ reader = mmc_priv(mmc);
+
+ tasklet_init(&reader->finish_req_tasklet,
+ cb710_mmc_finish_request_tasklet, (unsigned long)mmc);
+ spin_lock_init(&reader->irq_lock);
+ cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
+
+ cb710_mmc_enable_irq(slot, 0, ~0);
+ cb710_set_irq_handler(slot, cb710_mmc_irq_handler);
+
+ err = mmc_add_host(mmc);
+ if (unlikely(err))
+ goto err_free_mmc;
+
+ dev_dbg(cb710_slot_dev(slot), "mmc_hostname is %s\n",
+ mmc_hostname(mmc));
+
+ cb710_mmc_enable_irq(slot, CB710_MMC_IE_CARD_INSERTION_STATUS, 0);
+
+ return 0;
+
+err_free_mmc:
+ dev_dbg(cb710_slot_dev(slot), "mmc_add_host() failed: %d\n", err);
+
+ cb710_set_irq_handler(slot, NULL);
+ mmc_free_host(mmc);
+ return err;
+}
+
+static int cb710_mmc_exit(struct platform_device *pdev)
+{
+ struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
+ struct mmc_host *mmc = cb710_slot_to_mmc(slot);
+ struct cb710_mmc_reader *reader = mmc_priv(mmc);
+
+ cb710_mmc_enable_irq(slot, 0, CB710_MMC_IE_CARD_INSERTION_STATUS);
+
+ mmc_remove_host(mmc);
+
+ /* IRQs should be disabled now, but let's stay on the safe side */
+ cb710_mmc_enable_irq(slot, 0, ~0);
+ cb710_set_irq_handler(slot, NULL);
+
+ /* clear config ports - just in case */
+ cb710_write_port_32(slot, CB710_MMC_CONFIG_PORT, 0);
+ cb710_write_port_16(slot, CB710_MMC_CONFIGB_PORT, 0);
+
+ tasklet_kill(&reader->finish_req_tasklet);
+
+ mmc_free_host(mmc);
+ return 0;
+}
+
+static struct platform_driver cb710_mmc_driver = {
+ .driver.name = "cb710-mmc",
+ .probe = cb710_mmc_init,
+ .remove = cb710_mmc_exit,
+#ifdef CONFIG_PM
+ .suspend = cb710_mmc_suspend,
+ .resume = cb710_mmc_resume,
+#endif
+};
+
+module_platform_driver(cb710_mmc_driver);
+
+MODULE_AUTHOR("Michał Mirosław <mirq-linux@rere.qmqm.pl>");
+MODULE_DESCRIPTION("ENE CB710 memory card reader driver - MMC/SD part");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:cb710-mmc");
diff --git a/kernel/drivers/mmc/host/cb710-mmc.h b/kernel/drivers/mmc/host/cb710-mmc.h
new file mode 100644
index 000000000..8984ec878
--- /dev/null
+++ b/kernel/drivers/mmc/host/cb710-mmc.h
@@ -0,0 +1,104 @@
+/*
+ * cb710/cb710-mmc.h
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef LINUX_CB710_MMC_H
+#define LINUX_CB710_MMC_H
+
+#include <linux/cb710.h>
+
+/* per-MMC-reader structure */
+struct cb710_mmc_reader {
+ struct tasklet_struct finish_req_tasklet;
+ struct mmc_request *mrq;
+ spinlock_t irq_lock;
+ unsigned char last_power_mode;
+};
+
+/* some device struct walking */
+
+static inline struct mmc_host *cb710_slot_to_mmc(struct cb710_slot *slot)
+{
+ return platform_get_drvdata(&slot->pdev);
+}
+
+static inline struct cb710_slot *cb710_mmc_to_slot(struct mmc_host *mmc)
+{
+ struct platform_device *pdev = container_of(mmc_dev(mmc),
+ struct platform_device, dev);
+ return cb710_pdev_to_slot(pdev);
+}
+
+/* registers (this might be all wrong ;) */
+
+#define CB710_MMC_DATA_PORT 0x00
+
+#define CB710_MMC_CONFIG_PORT 0x04
+#define CB710_MMC_CONFIG0_PORT 0x04
+#define CB710_MMC_CONFIG1_PORT 0x05
+#define CB710_MMC_C1_4BIT_DATA_BUS 0x40
+#define CB710_MMC_CONFIG2_PORT 0x06
+#define CB710_MMC_C2_READ_PIO_SIZE_MASK 0x0F /* N-1 */
+#define CB710_MMC_CONFIG3_PORT 0x07
+
+#define CB710_MMC_CONFIGB_PORT 0x08
+
+#define CB710_MMC_IRQ_ENABLE_PORT 0x0C
+#define CB710_MMC_IE_TEST_MASK 0x00BF
+#define CB710_MMC_IE_CARD_INSERTION_STATUS 0x1000
+#define CB710_MMC_IE_IRQ_ENABLE 0x8000
+#define CB710_MMC_IE_CISTATUS_MASK \
+ (CB710_MMC_IE_CARD_INSERTION_STATUS|CB710_MMC_IE_IRQ_ENABLE)
+
+#define CB710_MMC_STATUS_PORT 0x10
+#define CB710_MMC_STATUS_ERROR_EVENTS 0x60FF
+#define CB710_MMC_STATUS0_PORT 0x10
+#define CB710_MMC_S0_FIFO_UNDERFLOW 0x40
+#define CB710_MMC_STATUS1_PORT 0x11
+#define CB710_MMC_S1_COMMAND_SENT 0x01
+#define CB710_MMC_S1_DATA_TRANSFER_DONE 0x02
+#define CB710_MMC_S1_PIO_TRANSFER_DONE 0x04
+#define CB710_MMC_S1_CARD_CHANGED 0x10
+#define CB710_MMC_S1_RESET 0x20
+#define CB710_MMC_STATUS2_PORT 0x12
+#define CB710_MMC_S2_FIFO_READY 0x01
+#define CB710_MMC_S2_FIFO_EMPTY 0x02
+#define CB710_MMC_S2_BUSY_10 0x10
+#define CB710_MMC_S2_BUSY_20 0x20
+#define CB710_MMC_STATUS3_PORT 0x13
+#define CB710_MMC_S3_CARD_DETECTED 0x02
+#define CB710_MMC_S3_WRITE_PROTECTED 0x04
+
+#define CB710_MMC_CMD_TYPE_PORT 0x14
+#define CB710_MMC_RSP_TYPE_MASK 0x0007
+#define CB710_MMC_RSP_R1 (0)
+#define CB710_MMC_RSP_136 (5)
+#define CB710_MMC_RSP_NO_CRC (2)
+#define CB710_MMC_RSP_PRESENT_MASK 0x0018
+#define CB710_MMC_RSP_NONE (0 << 3)
+#define CB710_MMC_RSP_PRESENT (1 << 3)
+#define CB710_MMC_RSP_PRESENT_X (2 << 3)
+#define CB710_MMC_CMD_TYPE_MASK 0x0060
+#define CB710_MMC_CMD_BC (0 << 5)
+#define CB710_MMC_CMD_BCR (1 << 5)
+#define CB710_MMC_CMD_AC (2 << 5)
+#define CB710_MMC_CMD_ADTC (3 << 5)
+#define CB710_MMC_DATA_READ 0x0080
+#define CB710_MMC_CMD_CODE_MASK 0x3F00
+#define CB710_MMC_CMD_CODE_SHIFT 8
+#define CB710_MMC_IS_APP_CMD 0x4000
+#define CB710_MMC_RSP_BUSY 0x8000
+
+#define CB710_MMC_CMD_PARAM_PORT 0x18
+#define CB710_MMC_TRANSFER_SIZE_PORT 0x1C
+#define CB710_MMC_RESPONSE0_PORT 0x20
+#define CB710_MMC_RESPONSE1_PORT 0x24
+#define CB710_MMC_RESPONSE2_PORT 0x28
+#define CB710_MMC_RESPONSE3_PORT 0x2C
+
+#endif /* LINUX_CB710_MMC_H */
diff --git a/kernel/drivers/mmc/host/davinci_mmc.c b/kernel/drivers/mmc/host/davinci_mmc.c
new file mode 100644
index 000000000..1625f908d
--- /dev/null
+++ b/kernel/drivers/mmc/host/davinci_mmc.c
@@ -0,0 +1,1482 @@
+/*
+ * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Original author: Purushotam Kumar
+ * Copyright (C) 2009 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/cpufreq.h>
+#include <linux/mmc/host.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/edma.h>
+#include <linux/mmc/mmc.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/platform_data/edma.h>
+#include <linux/platform_data/mmc-davinci.h>
+
+/*
+ * Register Definitions
+ */
+#define DAVINCI_MMCCTL 0x00 /* Control Register */
+#define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */
+#define DAVINCI_MMCST0 0x08 /* Status Register 0 */
+#define DAVINCI_MMCST1 0x0C /* Status Register 1 */
+#define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */
+#define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */
+#define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */
+#define DAVINCI_MMCBLEN 0x1C /* Block Length Register */
+#define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */
+#define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */
+#define DAVINCI_MMCDRR 0x28 /* Data Receive Register */
+#define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */
+#define DAVINCI_MMCCMD 0x30 /* Command Register */
+#define DAVINCI_MMCARGHL 0x34 /* Argument Register */
+#define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */
+#define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */
+#define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */
+#define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */
+#define DAVINCI_MMCDRSP 0x48 /* Data Response Register */
+#define DAVINCI_MMCETOK 0x4C
+#define DAVINCI_MMCCIDX 0x50 /* Command Index Register */
+#define DAVINCI_MMCCKC 0x54
+#define DAVINCI_MMCTORC 0x58
+#define DAVINCI_MMCTODC 0x5C
+#define DAVINCI_MMCBLNC 0x60
+#define DAVINCI_SDIOCTL 0x64
+#define DAVINCI_SDIOST0 0x68
+#define DAVINCI_SDIOIEN 0x6C
+#define DAVINCI_SDIOIST 0x70
+#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */
+
+/* DAVINCI_MMCCTL definitions */
+#define MMCCTL_DATRST (1 << 0)
+#define MMCCTL_CMDRST (1 << 1)
+#define MMCCTL_WIDTH_8_BIT (1 << 8)
+#define MMCCTL_WIDTH_4_BIT (1 << 2)
+#define MMCCTL_DATEG_DISABLED (0 << 6)
+#define MMCCTL_DATEG_RISING (1 << 6)
+#define MMCCTL_DATEG_FALLING (2 << 6)
+#define MMCCTL_DATEG_BOTH (3 << 6)
+#define MMCCTL_PERMDR_LE (0 << 9)
+#define MMCCTL_PERMDR_BE (1 << 9)
+#define MMCCTL_PERMDX_LE (0 << 10)
+#define MMCCTL_PERMDX_BE (1 << 10)
+
+/* DAVINCI_MMCCLK definitions */
+#define MMCCLK_CLKEN (1 << 8)
+#define MMCCLK_CLKRT_MASK (0xFF << 0)
+
+/* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */
+#define MMCST0_DATDNE BIT(0) /* data done */
+#define MMCST0_BSYDNE BIT(1) /* busy done */
+#define MMCST0_RSPDNE BIT(2) /* command done */
+#define MMCST0_TOUTRD BIT(3) /* data read timeout */
+#define MMCST0_TOUTRS BIT(4) /* command response timeout */
+#define MMCST0_CRCWR BIT(5) /* data write CRC error */
+#define MMCST0_CRCRD BIT(6) /* data read CRC error */
+#define MMCST0_CRCRS BIT(7) /* command response CRC error */
+#define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */
+#define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/
+#define MMCST0_DATED BIT(11) /* DAT3 edge detect */
+#define MMCST0_TRNDNE BIT(12) /* transfer done */
+
+/* DAVINCI_MMCST1 definitions */
+#define MMCST1_BUSY (1 << 0)
+
+/* DAVINCI_MMCCMD definitions */
+#define MMCCMD_CMD_MASK (0x3F << 0)
+#define MMCCMD_PPLEN (1 << 7)
+#define MMCCMD_BSYEXP (1 << 8)
+#define MMCCMD_RSPFMT_MASK (3 << 9)
+#define MMCCMD_RSPFMT_NONE (0 << 9)
+#define MMCCMD_RSPFMT_R1456 (1 << 9)
+#define MMCCMD_RSPFMT_R2 (2 << 9)
+#define MMCCMD_RSPFMT_R3 (3 << 9)
+#define MMCCMD_DTRW (1 << 11)
+#define MMCCMD_STRMTP (1 << 12)
+#define MMCCMD_WDATX (1 << 13)
+#define MMCCMD_INITCK (1 << 14)
+#define MMCCMD_DCLR (1 << 15)
+#define MMCCMD_DMATRIG (1 << 16)
+
+/* DAVINCI_MMCFIFOCTL definitions */
+#define MMCFIFOCTL_FIFORST (1 << 0)
+#define MMCFIFOCTL_FIFODIR_WR (1 << 1)
+#define MMCFIFOCTL_FIFODIR_RD (0 << 1)
+#define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */
+#define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */
+#define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */
+#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */
+#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */
+
+/* DAVINCI_SDIOST0 definitions */
+#define SDIOST0_DAT1_HI BIT(0)
+
+/* DAVINCI_SDIOIEN definitions */
+#define SDIOIEN_IOINTEN BIT(0)
+
+/* DAVINCI_SDIOIST definitions */
+#define SDIOIST_IOINT BIT(0)
+
+/* MMCSD Init clock in Hz in opendrain mode */
+#define MMCSD_INIT_CLOCK 200000
+
+/*
+ * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
+ * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
+ * for drivers with max_segs == 1, making the segments bigger (64KB)
+ * than the page or two that's otherwise typical. nr_sg (passed from
+ * platform data) == 16 gives at least the same throughput boost, using
+ * EDMA transfer linkage instead of spending CPU time copying pages.
+ */
+#define MAX_CCNT ((1 << 16) - 1)
+
+#define MAX_NR_SG 16
+
+static unsigned rw_threshold = 32;
+module_param(rw_threshold, uint, S_IRUGO);
+MODULE_PARM_DESC(rw_threshold,
+ "Read/Write threshold. Default = 32");
+
+static unsigned poll_threshold = 128;
+module_param(poll_threshold, uint, S_IRUGO);
+MODULE_PARM_DESC(poll_threshold,
+ "Polling transaction size threshold. Default = 128");
+
+static unsigned poll_loopcount = 32;
+module_param(poll_loopcount, uint, S_IRUGO);
+MODULE_PARM_DESC(poll_loopcount,
+ "Maximum polling loop count. Default = 32");
+
+static unsigned __initdata use_dma = 1;
+module_param(use_dma, uint, 0);
+MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
+
+struct mmc_davinci_host {
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ struct mmc_host *mmc;
+ struct clk *clk;
+ unsigned int mmc_input_clk;
+ void __iomem *base;
+ struct resource *mem_res;
+ int mmc_irq, sdio_irq;
+ unsigned char bus_mode;
+
+#define DAVINCI_MMC_DATADIR_NONE 0
+#define DAVINCI_MMC_DATADIR_READ 1
+#define DAVINCI_MMC_DATADIR_WRITE 2
+ unsigned char data_dir;
+
+ /* buffer is used during PIO of one scatterlist segment, and
+ * is updated along with buffer_bytes_left. bytes_left applies
+ * to all N blocks of the PIO transfer.
+ */
+ u8 *buffer;
+ u32 buffer_bytes_left;
+ u32 bytes_left;
+
+ u32 rxdma, txdma;
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
+ bool use_dma;
+ bool do_dma;
+ bool sdio_int;
+ bool active_request;
+
+ /* For PIO we walk scatterlists one segment at a time. */
+ unsigned int sg_len;
+ struct scatterlist *sg;
+
+ /* Version of the MMC/SD controller */
+ u8 version;
+ /* for ns in one cycle calculation */
+ unsigned ns_in_one_cycle;
+ /* Number of sg segments */
+ u8 nr_sg;
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block freq_transition;
+#endif
+};
+
+static irqreturn_t mmc_davinci_irq(int irq, void *dev_id);
+
+/* PIO only */
+static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
+{
+ host->buffer_bytes_left = sg_dma_len(host->sg);
+ host->buffer = sg_virt(host->sg);
+ if (host->buffer_bytes_left > host->bytes_left)
+ host->buffer_bytes_left = host->bytes_left;
+}
+
+static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
+ unsigned int n)
+{
+ u8 *p;
+ unsigned int i;
+
+ if (host->buffer_bytes_left == 0) {
+ host->sg = sg_next(host->data->sg);
+ mmc_davinci_sg_to_buf(host);
+ }
+
+ p = host->buffer;
+ if (n > host->buffer_bytes_left)
+ n = host->buffer_bytes_left;
+ host->buffer_bytes_left -= n;
+ host->bytes_left -= n;
+
+ /* NOTE: we never transfer more than rw_threshold bytes
+ * to/from the fifo here; there's no I/O overlap.
+ * This also assumes that access width( i.e. ACCWD) is 4 bytes
+ */
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
+ for (i = 0; i < (n >> 2); i++) {
+ writel(*((u32 *)p), host->base + DAVINCI_MMCDXR);
+ p = p + 4;
+ }
+ if (n & 3) {
+ iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3));
+ p = p + (n & 3);
+ }
+ } else {
+ for (i = 0; i < (n >> 2); i++) {
+ *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR);
+ p = p + 4;
+ }
+ if (n & 3) {
+ ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3));
+ p = p + (n & 3);
+ }
+ }
+ host->buffer = p;
+}
+
+static void mmc_davinci_start_command(struct mmc_davinci_host *host,
+ struct mmc_command *cmd)
+{
+ u32 cmd_reg = 0;
+ u32 im_val;
+
+ dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n",
+ cmd->opcode, cmd->arg,
+ ({ char *s;
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1:
+ s = ", R1/R5/R6/R7 response";
+ break;
+ case MMC_RSP_R1B:
+ s = ", R1b response";
+ break;
+ case MMC_RSP_R2:
+ s = ", R2 response";
+ break;
+ case MMC_RSP_R3:
+ s = ", R3/R4 response";
+ break;
+ default:
+ s = ", (R? response)";
+ break;
+ }; s; }));
+ host->cmd = cmd;
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1B:
+ /* There's some spec confusion about when R1B is
+ * allowed, but if the card doesn't issue a BUSY
+ * then it's harmless for us to allow it.
+ */
+ cmd_reg |= MMCCMD_BSYEXP;
+ /* FALLTHROUGH */
+ case MMC_RSP_R1: /* 48 bits, CRC */
+ cmd_reg |= MMCCMD_RSPFMT_R1456;
+ break;
+ case MMC_RSP_R2: /* 136 bits, CRC */
+ cmd_reg |= MMCCMD_RSPFMT_R2;
+ break;
+ case MMC_RSP_R3: /* 48 bits, no CRC */
+ cmd_reg |= MMCCMD_RSPFMT_R3;
+ break;
+ default:
+ cmd_reg |= MMCCMD_RSPFMT_NONE;
+ dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n",
+ mmc_resp_type(cmd));
+ break;
+ }
+
+ /* Set command index */
+ cmd_reg |= cmd->opcode;
+
+ /* Enable EDMA transfer triggers */
+ if (host->do_dma)
+ cmd_reg |= MMCCMD_DMATRIG;
+
+ if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL &&
+ host->data_dir == DAVINCI_MMC_DATADIR_READ)
+ cmd_reg |= MMCCMD_DMATRIG;
+
+ /* Setting whether command involves data transfer or not */
+ if (cmd->data)
+ cmd_reg |= MMCCMD_WDATX;
+
+ /* Setting whether stream or block transfer */
+ if (cmd->flags & MMC_DATA_STREAM)
+ cmd_reg |= MMCCMD_STRMTP;
+
+ /* Setting whether data read or write */
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
+ cmd_reg |= MMCCMD_DTRW;
+
+ if (host->bus_mode == MMC_BUSMODE_PUSHPULL)
+ cmd_reg |= MMCCMD_PPLEN;
+
+ /* set Command timeout */
+ writel(0x1FFF, host->base + DAVINCI_MMCTOR);
+
+ /* Enable interrupt (calculate here, defer until FIFO is stuffed). */
+ im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS;
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
+ im_val |= MMCST0_DATDNE | MMCST0_CRCWR;
+
+ if (!host->do_dma)
+ im_val |= MMCST0_DXRDY;
+ } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) {
+ im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD;
+
+ if (!host->do_dma)
+ im_val |= MMCST0_DRRDY;
+ }
+
+ /*
+ * Before non-DMA WRITE commands the controller needs priming:
+ * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size
+ */
+ if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE))
+ davinci_fifo_data_trans(host, rw_threshold);
+
+ writel(cmd->arg, host->base + DAVINCI_MMCARGHL);
+ writel(cmd_reg, host->base + DAVINCI_MMCCMD);
+
+ host->active_request = true;
+
+ if (!host->do_dma && host->bytes_left <= poll_threshold) {
+ u32 count = poll_loopcount;
+
+ while (host->active_request && count--) {
+ mmc_davinci_irq(0, host);
+ cpu_relax();
+ }
+ }
+
+ if (host->active_request)
+ writel(im_val, host->base + DAVINCI_MMCIM);
+}
+
+/*----------------------------------------------------------------------*/
+
+/* DMA infrastructure */
+
+static void davinci_abort_dma(struct mmc_davinci_host *host)
+{
+ struct dma_chan *sync_dev;
+
+ if (host->data_dir == DAVINCI_MMC_DATADIR_READ)
+ sync_dev = host->dma_rx;
+ else
+ sync_dev = host->dma_tx;
+
+ dmaengine_terminate_all(sync_dev);
+}
+
+static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
+ struct mmc_data *data)
+{
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *desc;
+ int ret = 0;
+
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
+ struct dma_slave_config dma_tx_conf = {
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = host->mem_res->start + DAVINCI_MMCDXR,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_maxburst =
+ rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
+ };
+ chan = host->dma_tx;
+ dmaengine_slave_config(host->dma_tx, &dma_tx_conf);
+
+ desc = dmaengine_prep_slave_sg(host->dma_tx,
+ data->sg,
+ host->sg_len,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_dbg(mmc_dev(host->mmc),
+ "failed to allocate DMA TX descriptor");
+ ret = -1;
+ goto out;
+ }
+ } else {
+ struct dma_slave_config dma_rx_conf = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_addr = host->mem_res->start + DAVINCI_MMCDRR,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_maxburst =
+ rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
+ };
+ chan = host->dma_rx;
+ dmaengine_slave_config(host->dma_rx, &dma_rx_conf);
+
+ desc = dmaengine_prep_slave_sg(host->dma_rx,
+ data->sg,
+ host->sg_len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_dbg(mmc_dev(host->mmc),
+ "failed to allocate DMA RX descriptor");
+ ret = -1;
+ goto out;
+ }
+ }
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+
+out:
+ return ret;
+}
+
+static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
+ struct mmc_data *data)
+{
+ int i;
+ int mask = rw_threshold - 1;
+ int ret = 0;
+
+ host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE));
+
+ /* no individual DMA segment should need a partial FIFO */
+ for (i = 0; i < host->sg_len; i++) {
+ if (sg_dma_len(data->sg + i) & mask) {
+ dma_unmap_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len,
+ (data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ return -1;
+ }
+ }
+
+ host->do_dma = 1;
+ ret = mmc_davinci_send_dma_request(host, data);
+
+ return ret;
+}
+
+static void __init_or_module
+davinci_release_dma_channels(struct mmc_davinci_host *host)
+{
+ if (!host->use_dma)
+ return;
+
+ dma_release_channel(host->dma_tx);
+ dma_release_channel(host->dma_rx);
+}
+
+static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
+{
+ int r;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->dma_tx =
+ dma_request_slave_channel_compat(mask, edma_filter_fn,
+ &host->txdma, mmc_dev(host->mmc), "tx");
+ if (!host->dma_tx) {
+ dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n");
+ return -ENODEV;
+ }
+
+ host->dma_rx =
+ dma_request_slave_channel_compat(mask, edma_filter_fn,
+ &host->rxdma, mmc_dev(host->mmc), "rx");
+ if (!host->dma_rx) {
+ dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n");
+ r = -ENODEV;
+ goto free_master_write;
+ }
+
+ return 0;
+
+free_master_write:
+ dma_release_channel(host->dma_tx);
+
+ return r;
+}
+
+/*----------------------------------------------------------------------*/
+
+static void
+mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
+{
+ int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0;
+ int timeout;
+ struct mmc_data *data = req->data;
+
+ if (host->version == MMC_CTLR_VERSION_2)
+ fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0;
+
+ host->data = data;
+ if (data == NULL) {
+ host->data_dir = DAVINCI_MMC_DATADIR_NONE;
+ writel(0, host->base + DAVINCI_MMCBLEN);
+ writel(0, host->base + DAVINCI_MMCNBLK);
+ return;
+ }
+
+ dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n",
+ (data->flags & MMC_DATA_STREAM) ? "stream" : "block",
+ (data->flags & MMC_DATA_WRITE) ? "write" : "read",
+ data->blocks, data->blksz);
+ dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n",
+ data->timeout_clks, data->timeout_ns);
+ timeout = data->timeout_clks +
+ (data->timeout_ns / host->ns_in_one_cycle);
+ if (timeout > 0xffff)
+ timeout = 0xffff;
+
+ writel(timeout, host->base + DAVINCI_MMCTOD);
+ writel(data->blocks, host->base + DAVINCI_MMCNBLK);
+ writel(data->blksz, host->base + DAVINCI_MMCBLEN);
+
+ /* Configure the FIFO */
+ switch (data->flags & MMC_DATA_WRITE) {
+ case MMC_DATA_WRITE:
+ host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
+ writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
+ host->base + DAVINCI_MMCFIFOCTL);
+ writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
+ host->base + DAVINCI_MMCFIFOCTL);
+ break;
+
+ default:
+ host->data_dir = DAVINCI_MMC_DATADIR_READ;
+ writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
+ host->base + DAVINCI_MMCFIFOCTL);
+ writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD,
+ host->base + DAVINCI_MMCFIFOCTL);
+ break;
+ }
+
+ host->buffer = NULL;
+ host->bytes_left = data->blocks * data->blksz;
+
+ /* For now we try to use DMA whenever we won't need partial FIFO
+ * reads or writes, either for the whole transfer (as tested here)
+ * or for any individual scatterlist segment (tested when we call
+ * start_dma_transfer).
+ *
+ * While we *could* change that, unusual block sizes are rarely
+ * used. The occasional fallback to PIO should't hurt.
+ */
+ if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0
+ && mmc_davinci_start_dma_transfer(host, data) == 0) {
+ /* zero this to ensure we take no PIO paths */
+ host->bytes_left = 0;
+ } else {
+ /* Revert to CPU Copy */
+ host->sg_len = data->sg_len;
+ host->sg = host->data->sg;
+ mmc_davinci_sg_to_buf(host);
+ }
+}
+
+static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct mmc_davinci_host *host = mmc_priv(mmc);
+ unsigned long timeout = jiffies + msecs_to_jiffies(900);
+ u32 mmcst1 = 0;
+
+ /* Card may still be sending BUSY after a previous operation,
+ * typically some kind of write. If so, we can't proceed yet.
+ */
+ while (time_before(jiffies, timeout)) {
+ mmcst1 = readl(host->base + DAVINCI_MMCST1);
+ if (!(mmcst1 & MMCST1_BUSY))
+ break;
+ cpu_relax();
+ }
+ if (mmcst1 & MMCST1_BUSY) {
+ dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n");
+ req->cmd->error = -ETIMEDOUT;
+ mmc_request_done(mmc, req);
+ return;
+ }
+
+ host->do_dma = 0;
+ mmc_davinci_prepare_data(host, req);
+ mmc_davinci_start_command(host, req->cmd);
+}
+
+static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host,
+ unsigned int mmc_req_freq)
+{
+ unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0;
+
+ mmc_pclk = host->mmc_input_clk;
+ if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq))
+ mmc_push_pull_divisor = ((unsigned int)mmc_pclk
+ / (2 * mmc_req_freq)) - 1;
+ else
+ mmc_push_pull_divisor = 0;
+
+ mmc_freq = (unsigned int)mmc_pclk
+ / (2 * (mmc_push_pull_divisor + 1));
+
+ if (mmc_freq > mmc_req_freq)
+ mmc_push_pull_divisor = mmc_push_pull_divisor + 1;
+ /* Convert ns to clock cycles */
+ if (mmc_req_freq <= 400000)
+ host->ns_in_one_cycle = (1000000) / (((mmc_pclk
+ / (2 * (mmc_push_pull_divisor + 1)))/1000));
+ else
+ host->ns_in_one_cycle = (1000000) / (((mmc_pclk
+ / (2 * (mmc_push_pull_divisor + 1)))/1000000));
+
+ return mmc_push_pull_divisor;
+}
+
+static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ unsigned int open_drain_freq = 0, mmc_pclk = 0;
+ unsigned int mmc_push_pull_freq = 0;
+ struct mmc_davinci_host *host = mmc_priv(mmc);
+
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
+ u32 temp;
+
+ /* Ignoring the init clock value passed for fixing the inter
+ * operability with different cards.
+ */
+ open_drain_freq = ((unsigned int)mmc_pclk
+ / (2 * MMCSD_INIT_CLOCK)) - 1;
+
+ if (open_drain_freq > 0xFF)
+ open_drain_freq = 0xFF;
+
+ temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
+ temp |= open_drain_freq;
+ writel(temp, host->base + DAVINCI_MMCCLK);
+
+ /* Convert ns to clock cycles */
+ host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000);
+ } else {
+ u32 temp;
+ mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock);
+
+ if (mmc_push_pull_freq > 0xFF)
+ mmc_push_pull_freq = 0xFF;
+
+ temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN;
+ writel(temp, host->base + DAVINCI_MMCCLK);
+
+ udelay(10);
+
+ temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
+ temp |= mmc_push_pull_freq;
+ writel(temp, host->base + DAVINCI_MMCCLK);
+
+ writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
+
+ udelay(10);
+ }
+}
+
+static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mmc_davinci_host *host = mmc_priv(mmc);
+ struct platform_device *pdev = to_platform_device(mmc->parent);
+ struct davinci_mmc_config *config = pdev->dev.platform_data;
+
+ dev_dbg(mmc_dev(host->mmc),
+ "clock %dHz busmode %d powermode %d Vdd %04x\n",
+ ios->clock, ios->bus_mode, ios->power_mode,
+ ios->vdd);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ if (config && config->set_power)
+ config->set_power(pdev->id, false);
+ break;
+ case MMC_POWER_UP:
+ if (config && config->set_power)
+ config->set_power(pdev->id, true);
+ break;
+ }
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_8:
+ dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n");
+ writel((readl(host->base + DAVINCI_MMCCTL) &
+ ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT,
+ host->base + DAVINCI_MMCCTL);
+ break;
+ case MMC_BUS_WIDTH_4:
+ dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n");
+ if (host->version == MMC_CTLR_VERSION_2)
+ writel((readl(host->base + DAVINCI_MMCCTL) &
+ ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT,
+ host->base + DAVINCI_MMCCTL);
+ else
+ writel(readl(host->base + DAVINCI_MMCCTL) |
+ MMCCTL_WIDTH_4_BIT,
+ host->base + DAVINCI_MMCCTL);
+ break;
+ case MMC_BUS_WIDTH_1:
+ dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n");
+ if (host->version == MMC_CTLR_VERSION_2)
+ writel(readl(host->base + DAVINCI_MMCCTL) &
+ ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT),
+ host->base + DAVINCI_MMCCTL);
+ else
+ writel(readl(host->base + DAVINCI_MMCCTL) &
+ ~MMCCTL_WIDTH_4_BIT,
+ host->base + DAVINCI_MMCCTL);
+ break;
+ }
+
+ calculate_clk_divider(mmc, ios);
+
+ host->bus_mode = ios->bus_mode;
+ if (ios->power_mode == MMC_POWER_UP) {
+ unsigned long timeout = jiffies + msecs_to_jiffies(50);
+ bool lose = true;
+
+ /* Send clock cycles, poll completion */
+ writel(0, host->base + DAVINCI_MMCARGHL);
+ writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD);
+ while (time_before(jiffies, timeout)) {
+ u32 tmp = readl(host->base + DAVINCI_MMCST0);
+
+ if (tmp & MMCST0_RSPDNE) {
+ lose = false;
+ break;
+ }
+ cpu_relax();
+ }
+ if (lose)
+ dev_warn(mmc_dev(host->mmc), "powerup timeout\n");
+ }
+
+ /* FIXME on power OFF, reset things ... */
+}
+
+static void
+mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
+{
+ host->data = NULL;
+
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
+ /*
+ * SDIO Interrupt Detection work-around as suggested by
+ * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
+ * 2.1.6): Signal SDIO interrupt only if it is enabled by core
+ */
+ if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
+ SDIOST0_DAT1_HI)) {
+ writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+ mmc_signal_sdio_irq(host->mmc);
+ }
+ }
+
+ if (host->do_dma) {
+ davinci_abort_dma(host);
+
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ (data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ host->do_dma = false;
+ }
+ host->data_dir = DAVINCI_MMC_DATADIR_NONE;
+
+ if (!data->stop || (host->cmd && host->cmd->error)) {
+ mmc_request_done(host->mmc, data->mrq);
+ writel(0, host->base + DAVINCI_MMCIM);
+ host->active_request = false;
+ } else
+ mmc_davinci_start_command(host, data->stop);
+}
+
+static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
+ struct mmc_command *cmd)
+{
+ host->cmd = NULL;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ /* response type 2 */
+ cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01);
+ cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23);
+ cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45);
+ cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
+ } else {
+ /* response types 1, 1b, 3, 4, 5, 6 */
+ cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
+ }
+ }
+
+ if (host->data == NULL || cmd->error) {
+ if (cmd->error == -ETIMEDOUT)
+ cmd->mrq->cmd->retries = 0;
+ mmc_request_done(host->mmc, cmd->mrq);
+ writel(0, host->base + DAVINCI_MMCIM);
+ host->active_request = false;
+ }
+}
+
+static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host,
+ int val)
+{
+ u32 temp;
+
+ temp = readl(host->base + DAVINCI_MMCCTL);
+ if (val) /* reset */
+ temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
+ else /* enable */
+ temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
+
+ writel(temp, host->base + DAVINCI_MMCCTL);
+ udelay(10);
+}
+
+static void
+davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
+{
+ mmc_davinci_reset_ctrl(host, 1);
+ mmc_davinci_reset_ctrl(host, 0);
+}
+
+static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
+{
+ struct mmc_davinci_host *host = dev_id;
+ unsigned int status;
+
+ status = readl(host->base + DAVINCI_SDIOIST);
+ if (status & SDIOIST_IOINT) {
+ dev_dbg(mmc_dev(host->mmc),
+ "SDIO interrupt status %x\n", status);
+ writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+ mmc_signal_sdio_irq(host->mmc);
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
+{
+ struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
+ unsigned int status, qstatus;
+ int end_command = 0;
+ int end_transfer = 0;
+ struct mmc_data *data = host->data;
+
+ if (host->cmd == NULL && host->data == NULL) {
+ status = readl(host->base + DAVINCI_MMCST0);
+ dev_dbg(mmc_dev(host->mmc),
+ "Spurious interrupt 0x%04x\n", status);
+ /* Disable the interrupt from mmcsd */
+ writel(0, host->base + DAVINCI_MMCIM);
+ return IRQ_NONE;
+ }
+
+ status = readl(host->base + DAVINCI_MMCST0);
+ qstatus = status;
+
+ /* handle FIFO first when using PIO for data.
+ * bytes_left will decrease to zero as I/O progress and status will
+ * read zero over iteration because this controller status
+ * register(MMCST0) reports any status only once and it is cleared
+ * by read. So, it is not unbouned loop even in the case of
+ * non-dma.
+ */
+ if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) {
+ unsigned long im_val;
+
+ /*
+ * If interrupts fire during the following loop, they will be
+ * handled by the handler, but the PIC will still buffer these.
+ * As a result, the handler will be called again to serve these
+ * needlessly. In order to avoid these spurious interrupts,
+ * keep interrupts masked during the loop.
+ */
+ im_val = readl(host->base + DAVINCI_MMCIM);
+ writel(0, host->base + DAVINCI_MMCIM);
+
+ do {
+ davinci_fifo_data_trans(host, rw_threshold);
+ status = readl(host->base + DAVINCI_MMCST0);
+ qstatus |= status;
+ } while (host->bytes_left &&
+ (status & (MMCST0_DXRDY | MMCST0_DRRDY)));
+
+ /*
+ * If an interrupt is pending, it is assumed it will fire when
+ * it is unmasked. This assumption is also taken when the MMCIM
+ * is first set. Otherwise, writing to MMCIM after reading the
+ * status is race-prone.
+ */
+ writel(im_val, host->base + DAVINCI_MMCIM);
+ }
+
+ if (qstatus & MMCST0_DATDNE) {
+ /* All blocks sent/received, and CRC checks passed */
+ if (data != NULL) {
+ if ((host->do_dma == 0) && (host->bytes_left > 0)) {
+ /* if datasize < rw_threshold
+ * no RX ints are generated
+ */
+ davinci_fifo_data_trans(host, host->bytes_left);
+ }
+ end_transfer = 1;
+ data->bytes_xfered = data->blocks * data->blksz;
+ } else {
+ dev_err(mmc_dev(host->mmc),
+ "DATDNE with no host->data\n");
+ }
+ }
+
+ if (qstatus & MMCST0_TOUTRD) {
+ /* Read data timeout */
+ data->error = -ETIMEDOUT;
+ end_transfer = 1;
+
+ dev_dbg(mmc_dev(host->mmc),
+ "read data timeout, status %x\n",
+ qstatus);
+
+ davinci_abort_data(host, data);
+ }
+
+ if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) {
+ /* Data CRC error */
+ data->error = -EILSEQ;
+ end_transfer = 1;
+
+ /* NOTE: this controller uses CRCWR to report both CRC
+ * errors and timeouts (on writes). MMCDRSP values are
+ * only weakly documented, but 0x9f was clearly a timeout
+ * case and the two three-bit patterns in various SD specs
+ * (101, 010) aren't part of it ...
+ */
+ if (qstatus & MMCST0_CRCWR) {
+ u32 temp = readb(host->base + DAVINCI_MMCDRSP);
+
+ if (temp == 0x9f)
+ data->error = -ETIMEDOUT;
+ }
+ dev_dbg(mmc_dev(host->mmc), "data %s %s error\n",
+ (qstatus & MMCST0_CRCWR) ? "write" : "read",
+ (data->error == -ETIMEDOUT) ? "timeout" : "CRC");
+
+ davinci_abort_data(host, data);
+ }
+
+ if (qstatus & MMCST0_TOUTRS) {
+ /* Command timeout */
+ if (host->cmd) {
+ dev_dbg(mmc_dev(host->mmc),
+ "CMD%d timeout, status %x\n",
+ host->cmd->opcode, qstatus);
+ host->cmd->error = -ETIMEDOUT;
+ if (data) {
+ end_transfer = 1;
+ davinci_abort_data(host, data);
+ } else
+ end_command = 1;
+ }
+ }
+
+ if (qstatus & MMCST0_CRCRS) {
+ /* Command CRC error */
+ dev_dbg(mmc_dev(host->mmc), "Command CRC error\n");
+ if (host->cmd) {
+ host->cmd->error = -EILSEQ;
+ end_command = 1;
+ }
+ }
+
+ if (qstatus & MMCST0_RSPDNE) {
+ /* End of command phase */
+ end_command = (int) host->cmd;
+ }
+
+ if (end_command)
+ mmc_davinci_cmd_done(host, host->cmd);
+ if (end_transfer)
+ mmc_davinci_xfer_done(host, data);
+ return IRQ_HANDLED;
+}
+
+static int mmc_davinci_get_cd(struct mmc_host *mmc)
+{
+ struct platform_device *pdev = to_platform_device(mmc->parent);
+ struct davinci_mmc_config *config = pdev->dev.platform_data;
+
+ if (!config || !config->get_cd)
+ return -ENOSYS;
+ return config->get_cd(pdev->id);
+}
+
+static int mmc_davinci_get_ro(struct mmc_host *mmc)
+{
+ struct platform_device *pdev = to_platform_device(mmc->parent);
+ struct davinci_mmc_config *config = pdev->dev.platform_data;
+
+ if (!config || !config->get_ro)
+ return -ENOSYS;
+ return config->get_ro(pdev->id);
+}
+
+static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mmc_davinci_host *host = mmc_priv(mmc);
+
+ if (enable) {
+ if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
+ writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+ mmc_signal_sdio_irq(host->mmc);
+ } else {
+ host->sdio_int = true;
+ writel(readl(host->base + DAVINCI_SDIOIEN) |
+ SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
+ }
+ } else {
+ host->sdio_int = false;
+ writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
+ host->base + DAVINCI_SDIOIEN);
+ }
+}
+
+static struct mmc_host_ops mmc_davinci_ops = {
+ .request = mmc_davinci_request,
+ .set_ios = mmc_davinci_set_ios,
+ .get_cd = mmc_davinci_get_cd,
+ .get_ro = mmc_davinci_get_ro,
+ .enable_sdio_irq = mmc_davinci_enable_sdio_irq,
+};
+
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_CPU_FREQ
+static int mmc_davinci_cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct mmc_davinci_host *host;
+ unsigned int mmc_pclk;
+ struct mmc_host *mmc;
+ unsigned long flags;
+
+ host = container_of(nb, struct mmc_davinci_host, freq_transition);
+ mmc = host->mmc;
+ mmc_pclk = clk_get_rate(host->clk);
+
+ if (val == CPUFREQ_POSTCHANGE) {
+ spin_lock_irqsave(&mmc->lock, flags);
+ host->mmc_input_clk = mmc_pclk;
+ calculate_clk_divider(mmc, &mmc->ios);
+ spin_unlock_irqrestore(&mmc->lock, flags);
+ }
+
+ return 0;
+}
+
+static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
+{
+ host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition;
+
+ return cpufreq_register_notifier(&host->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
+{
+ cpufreq_unregister_notifier(&host->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+#else
+static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
+{
+ return 0;
+}
+
+static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
+{
+}
+#endif
+static void __init init_mmcsd_host(struct mmc_davinci_host *host)
+{
+
+ mmc_davinci_reset_ctrl(host, 1);
+
+ writel(0, host->base + DAVINCI_MMCCLK);
+ writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
+
+ writel(0x1FFF, host->base + DAVINCI_MMCTOR);
+ writel(0xFFFF, host->base + DAVINCI_MMCTOD);
+
+ mmc_davinci_reset_ctrl(host, 0);
+}
+
+static struct platform_device_id davinci_mmc_devtype[] = {
+ {
+ .name = "dm6441-mmc",
+ .driver_data = MMC_CTLR_VERSION_1,
+ }, {
+ .name = "da830-mmc",
+ .driver_data = MMC_CTLR_VERSION_2,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype);
+
+static const struct of_device_id davinci_mmc_dt_ids[] = {
+ {
+ .compatible = "ti,dm6441-mmc",
+ .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1],
+ },
+ {
+ .compatible = "ti,da830-mmc",
+ .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2],
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids);
+
+static struct davinci_mmc_config
+ *mmc_parse_pdata(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct davinci_mmc_config *pdata = pdev->dev.platform_data;
+ const struct of_device_id *match =
+ of_match_device(davinci_mmc_dt_ids, &pdev->dev);
+ u32 data;
+
+ np = pdev->dev.of_node;
+ if (!np)
+ return pdata;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Failed to allocate memory for struct davinci_mmc_config\n");
+ goto nodata;
+ }
+
+ if (match)
+ pdev->id_entry = match->data;
+
+ if (of_property_read_u32(np, "max-frequency", &pdata->max_freq))
+ dev_info(&pdev->dev, "'max-frequency' property not specified, defaulting to 25MHz\n");
+
+ of_property_read_u32(np, "bus-width", &data);
+ switch (data) {
+ case 1:
+ case 4:
+ case 8:
+ pdata->wires = data;
+ break;
+ default:
+ pdata->wires = 1;
+ dev_info(&pdev->dev, "Unsupported buswidth, defaulting to 1 bit\n");
+ }
+nodata:
+ return pdata;
+}
+
+static int __init davinci_mmcsd_probe(struct platform_device *pdev)
+{
+ struct davinci_mmc_config *pdata = NULL;
+ struct mmc_davinci_host *host = NULL;
+ struct mmc_host *mmc = NULL;
+ struct resource *r, *mem = NULL;
+ int ret = 0, irq = 0;
+ size_t mem_size;
+ const struct platform_device_id *id_entry;
+
+ pdata = mmc_parse_pdata(pdev);
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "Couldn't get platform data\n");
+ return -ENOENT;
+ }
+
+ ret = -ENODEV;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!r || irq == NO_IRQ)
+ goto out;
+
+ ret = -EBUSY;
+ mem_size = resource_size(r);
+ mem = request_mem_region(r->start, mem_size, pdev->name);
+ if (!mem)
+ goto out;
+
+ ret = -ENOMEM;
+ mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
+ if (!mmc)
+ goto out;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc; /* Important */
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!r)
+ dev_warn(&pdev->dev, "RX DMA resource not specified\n");
+ else
+ host->rxdma = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!r)
+ dev_warn(&pdev->dev, "TX DMA resource not specified\n");
+ else
+ host->txdma = r->start;
+
+ host->mem_res = mem;
+ host->base = ioremap(mem->start, mem_size);
+ if (!host->base)
+ goto out;
+
+ ret = -ENXIO;
+ host->clk = clk_get(&pdev->dev, "MMCSDCLK");
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ goto out;
+ }
+ clk_enable(host->clk);
+ host->mmc_input_clk = clk_get_rate(host->clk);
+
+ init_mmcsd_host(host);
+
+ if (pdata->nr_sg)
+ host->nr_sg = pdata->nr_sg - 1;
+
+ if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
+ host->nr_sg = MAX_NR_SG;
+
+ host->use_dma = use_dma;
+ host->mmc_irq = irq;
+ host->sdio_irq = platform_get_irq(pdev, 1);
+
+ if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
+ host->use_dma = 0;
+
+ /* REVISIT: someday, support IRQ-driven card detection. */
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+ mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
+
+ if (pdata && (pdata->wires == 4 || pdata->wires == 0))
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ if (pdata && (pdata->wires == 8))
+ mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA);
+
+ id_entry = platform_get_device_id(pdev);
+ if (id_entry)
+ host->version = id_entry->driver_data;
+
+ mmc->ops = &mmc_davinci_ops;
+ mmc->f_min = 312500;
+ mmc->f_max = 25000000;
+ if (pdata && pdata->max_freq)
+ mmc->f_max = pdata->max_freq;
+ if (pdata && pdata->caps)
+ mmc->caps |= pdata->caps;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+ /* With no iommu coalescing pages, each phys_seg is a hw_seg.
+ * Each hw_seg uses one EDMA parameter RAM slot, always one
+ * channel and then usually some linked slots.
+ */
+ mmc->max_segs = MAX_NR_SG;
+
+ /* EDMA limit per hw segment (one or two MBytes) */
+ mmc->max_seg_size = MAX_CCNT * rw_threshold;
+
+ /* MMC/SD controller limits for multiblock requests */
+ mmc->max_blk_size = 4095; /* BLEN is 12 bits */
+ mmc->max_blk_count = 65535; /* NBLK is 16 bits */
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+
+ dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs);
+ dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size);
+ dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size);
+ dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size);
+
+ platform_set_drvdata(pdev, host);
+
+ ret = mmc_davinci_cpufreq_register(host);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register cpufreq\n");
+ goto cpu_freq_fail;
+ }
+
+ ret = mmc_add_host(mmc);
+ if (ret < 0)
+ goto out;
+
+ ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host);
+ if (ret)
+ goto out;
+
+ if (host->sdio_irq >= 0) {
+ ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
+ mmc_hostname(mmc), host);
+ if (!ret)
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+ }
+
+ rename_region(mem, mmc_hostname(mmc));
+
+ dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
+ host->use_dma ? "DMA" : "PIO",
+ (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
+
+ return 0;
+
+out:
+ mmc_davinci_cpufreq_deregister(host);
+cpu_freq_fail:
+ if (host) {
+ davinci_release_dma_channels(host);
+
+ if (host->clk) {
+ clk_disable(host->clk);
+ clk_put(host->clk);
+ }
+
+ if (host->base)
+ iounmap(host->base);
+ }
+
+ if (mmc)
+ mmc_free_host(mmc);
+
+ if (mem)
+ release_resource(mem);
+
+ dev_dbg(&pdev->dev, "probe err %d\n", ret);
+
+ return ret;
+}
+
+static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
+{
+ struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+
+ if (host) {
+ mmc_davinci_cpufreq_deregister(host);
+
+ mmc_remove_host(host->mmc);
+ free_irq(host->mmc_irq, host);
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
+ free_irq(host->sdio_irq, host);
+
+ davinci_release_dma_channels(host);
+
+ clk_disable(host->clk);
+ clk_put(host->clk);
+
+ iounmap(host->base);
+
+ release_resource(host->mem_res);
+
+ mmc_free_host(host->mmc);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int davinci_mmcsd_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+
+ writel(0, host->base + DAVINCI_MMCIM);
+ mmc_davinci_reset_ctrl(host, 1);
+ clk_disable(host->clk);
+
+ return 0;
+}
+
+static int davinci_mmcsd_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+
+ clk_enable(host->clk);
+ mmc_davinci_reset_ctrl(host, 0);
+
+ return 0;
+}
+
+static const struct dev_pm_ops davinci_mmcsd_pm = {
+ .suspend = davinci_mmcsd_suspend,
+ .resume = davinci_mmcsd_resume,
+};
+
+#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
+#else
+#define davinci_mmcsd_pm_ops NULL
+#endif
+
+static struct platform_driver davinci_mmcsd_driver = {
+ .driver = {
+ .name = "davinci_mmc",
+ .pm = davinci_mmcsd_pm_ops,
+ .of_match_table = davinci_mmc_dt_ids,
+ },
+ .remove = __exit_p(davinci_mmcsd_remove),
+ .id_table = davinci_mmc_devtype,
+};
+
+module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe);
+
+MODULE_AUTHOR("Texas Instruments India");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
+MODULE_ALIAS("platform:davinci_mmc");
+
diff --git a/kernel/drivers/mmc/host/dw_mmc-exynos.c b/kernel/drivers/mmc/host/dw_mmc-exynos.c
new file mode 100644
index 000000000..e761eb1b1
--- /dev/null
+++ b/kernel/drivers/mmc/host/dw_mmc-exynos.c
@@ -0,0 +1,559 @@
+/*
+ * Exynos Specific Extensions for Synopsys DW Multimedia Card Interface driver
+ *
+ * Copyright (C) 2012, Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/mmc/mmc.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/slab.h>
+
+#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
+#include "dw_mmc-exynos.h"
+
+/* Variations in Exynos specific dw-mshc controller */
+enum dw_mci_exynos_type {
+ DW_MCI_TYPE_EXYNOS4210,
+ DW_MCI_TYPE_EXYNOS4412,
+ DW_MCI_TYPE_EXYNOS5250,
+ DW_MCI_TYPE_EXYNOS5420,
+ DW_MCI_TYPE_EXYNOS5420_SMU,
+ DW_MCI_TYPE_EXYNOS7,
+ DW_MCI_TYPE_EXYNOS7_SMU,
+};
+
+/* Exynos implementation specific driver private data */
+struct dw_mci_exynos_priv_data {
+ enum dw_mci_exynos_type ctrl_type;
+ u8 ciu_div;
+ u32 sdr_timing;
+ u32 ddr_timing;
+ u32 hs400_timing;
+ u32 tuned_sample;
+ u32 cur_speed;
+ u32 dqs_delay;
+ u32 saved_dqs_en;
+ u32 saved_strobe_ctrl;
+};
+
+static struct dw_mci_exynos_compatible {
+ char *compatible;
+ enum dw_mci_exynos_type ctrl_type;
+} exynos_compat[] = {
+ {
+ .compatible = "samsung,exynos4210-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS4210,
+ }, {
+ .compatible = "samsung,exynos4412-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS4412,
+ }, {
+ .compatible = "samsung,exynos5250-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS5250,
+ }, {
+ .compatible = "samsung,exynos5420-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS5420,
+ }, {
+ .compatible = "samsung,exynos5420-dw-mshc-smu",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS5420_SMU,
+ }, {
+ .compatible = "samsung,exynos7-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS7,
+ }, {
+ .compatible = "samsung,exynos7-dw-mshc-smu",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS7_SMU,
+ },
+};
+
+static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
+ return EXYNOS4412_FIXED_CIU_CLK_DIV;
+ else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210)
+ return EXYNOS4210_FIXED_CIU_CLK_DIV;
+ else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL64)) + 1;
+ else
+ return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL)) + 1;
+}
+
+static int dw_mci_exynos_priv_init(struct dw_mci *host)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
+ mci_writel(host, MPSBEGIN0, 0);
+ mci_writel(host, MPSEND0, SDMMC_ENDING_SEC_NR_MAX);
+ mci_writel(host, MPSCTRL0, SDMMC_MPSCTRL_SECURE_WRITE_BIT |
+ SDMMC_MPSCTRL_NON_SECURE_READ_BIT |
+ SDMMC_MPSCTRL_VALID |
+ SDMMC_MPSCTRL_NON_SECURE_WRITE_BIT);
+ }
+
+ if (priv->ctrl_type >= DW_MCI_TYPE_EXYNOS5420) {
+ priv->saved_strobe_ctrl = mci_readl(host, HS400_DLINE_CTRL);
+ priv->saved_dqs_en = mci_readl(host, HS400_DQS_EN);
+ priv->saved_dqs_en |= AXI_NON_BLOCKING_WR;
+ mci_writel(host, HS400_DQS_EN, priv->saved_dqs_en);
+ if (!priv->dqs_delay)
+ priv->dqs_delay =
+ DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl);
+ }
+
+ return 0;
+}
+
+static int dw_mci_exynos_setup_clock(struct dw_mci *host)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+
+ host->bus_hz /= (priv->ciu_div + 1);
+
+ return 0;
+}
+
+static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+ u32 clksel;
+
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ clksel = mci_readl(host, CLKSEL64);
+ else
+ clksel = mci_readl(host, CLKSEL);
+
+ clksel = (clksel & ~SDMMC_CLKSEL_TIMING_MASK) | timing;
+
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ mci_writel(host, CLKSEL64, clksel);
+ else
+ mci_writel(host, CLKSEL, clksel);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dw_mci_exynos_suspend(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+
+ return dw_mci_suspend(host);
+}
+
+static int dw_mci_exynos_resume(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+
+ dw_mci_exynos_priv_init(host);
+ return dw_mci_resume(host);
+}
+
+/**
+ * dw_mci_exynos_resume_noirq - Exynos-specific resume code
+ *
+ * On exynos5420 there is a silicon errata that will sometimes leave the
+ * WAKEUP_INT bit in the CLKSEL register asserted. This bit is 1 to indicate
+ * that it fired and we can clear it by writing a 1 back. Clear it to prevent
+ * interrupts from going off constantly.
+ *
+ * We run this code on all exynos variants because it doesn't hurt.
+ */
+
+static int dw_mci_exynos_resume_noirq(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+ u32 clksel;
+
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ clksel = mci_readl(host, CLKSEL64);
+ else
+ clksel = mci_readl(host, CLKSEL);
+
+ if (clksel & SDMMC_CLKSEL_WAKEUP_INT) {
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ mci_writel(host, CLKSEL64, clksel);
+ else
+ mci_writel(host, CLKSEL, clksel);
+ }
+
+ return 0;
+}
+#else
+#define dw_mci_exynos_suspend NULL
+#define dw_mci_exynos_resume NULL
+#define dw_mci_exynos_resume_noirq NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+ /*
+ * Exynos4412 and Exynos5250 extends the use of CMD register with the
+ * use of bit 29 (which is reserved on standard MSHC controllers) for
+ * optionally bypassing the HOLD register for command and data. The
+ * HOLD register should be bypassed in case there is no phase shift
+ * applied on CMD/DATA that is sent to the card.
+ */
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
+ if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL64)))
+ *cmdr |= SDMMC_CMD_USE_HOLD_REG;
+ } else {
+ if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL)))
+ *cmdr |= SDMMC_CMD_USE_HOLD_REG;
+ }
+}
+
+static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+ u32 dqs, strobe;
+
+ /*
+ * Not supported to configure register
+ * related to HS400
+ */
+ if (priv->ctrl_type < DW_MCI_TYPE_EXYNOS5420)
+ return;
+
+ dqs = priv->saved_dqs_en;
+ strobe = priv->saved_strobe_ctrl;
+
+ if (timing == MMC_TIMING_MMC_HS400) {
+ dqs |= DATA_STROBE_EN;
+ strobe = DQS_CTRL_RD_DELAY(strobe, priv->dqs_delay);
+ } else {
+ dqs &= ~DATA_STROBE_EN;
+ }
+
+ mci_writel(host, HS400_DQS_EN, dqs);
+ mci_writel(host, HS400_DLINE_CTRL, strobe);
+}
+
+static void dw_mci_exynos_adjust_clock(struct dw_mci *host, unsigned int wanted)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+ unsigned long actual;
+ u8 div;
+ int ret;
+ /*
+ * Don't care if wanted clock is zero or
+ * ciu clock is unavailable
+ */
+ if (!wanted || IS_ERR(host->ciu_clk))
+ return;
+
+ /* Guaranteed minimum frequency for cclkin */
+ if (wanted < EXYNOS_CCLKIN_MIN)
+ wanted = EXYNOS_CCLKIN_MIN;
+
+ if (wanted == priv->cur_speed)
+ return;
+
+ div = dw_mci_exynos_get_ciu_div(host);
+ ret = clk_set_rate(host->ciu_clk, wanted * div);
+ if (ret)
+ dev_warn(host->dev,
+ "failed to set clk-rate %u error: %d\n",
+ wanted * div, ret);
+ actual = clk_get_rate(host->ciu_clk);
+ host->bus_hz = actual / div;
+ priv->cur_speed = wanted;
+ host->current_speed = 0;
+}
+
+static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+ unsigned int wanted = ios->clock;
+ u32 timing = ios->timing, clksel;
+
+ switch (timing) {
+ case MMC_TIMING_MMC_HS400:
+ /* Update tuned sample timing */
+ clksel = SDMMC_CLKSEL_UP_SAMPLE(
+ priv->hs400_timing, priv->tuned_sample);
+ wanted <<= 1;
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ clksel = priv->ddr_timing;
+ /* Should be double rate for DDR mode */
+ if (ios->bus_width == MMC_BUS_WIDTH_8)
+ wanted <<= 1;
+ break;
+ default:
+ clksel = priv->sdr_timing;
+ }
+
+ /* Set clock timing for the requested speed mode*/
+ dw_mci_exynos_set_clksel_timing(host, clksel);
+
+ /* Configure setting for HS400 */
+ dw_mci_exynos_config_hs400(host, timing);
+
+ /* Configure clock rate */
+ dw_mci_exynos_adjust_clock(host, wanted);
+}
+
+static int dw_mci_exynos_parse_dt(struct dw_mci *host)
+{
+ struct dw_mci_exynos_priv_data *priv;
+ struct device_node *np = host->dev->of_node;
+ u32 timing[2];
+ u32 div = 0;
+ int idx;
+ int ret;
+
+ priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) {
+ if (of_device_is_compatible(np, exynos_compat[idx].compatible))
+ priv->ctrl_type = exynos_compat[idx].ctrl_type;
+ }
+
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
+ priv->ciu_div = EXYNOS4412_FIXED_CIU_CLK_DIV - 1;
+ else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210)
+ priv->ciu_div = EXYNOS4210_FIXED_CIU_CLK_DIV - 1;
+ else {
+ of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div);
+ priv->ciu_div = div;
+ }
+
+ ret = of_property_read_u32_array(np,
+ "samsung,dw-mshc-sdr-timing", timing, 2);
+ if (ret)
+ return ret;
+
+ priv->sdr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div);
+
+ ret = of_property_read_u32_array(np,
+ "samsung,dw-mshc-ddr-timing", timing, 2);
+ if (ret)
+ return ret;
+
+ priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div);
+
+ ret = of_property_read_u32_array(np,
+ "samsung,dw-mshc-hs400-timing", timing, 2);
+ if (!ret && of_property_read_u32(np,
+ "samsung,read-strobe-delay", &priv->dqs_delay))
+ dev_dbg(host->dev,
+ "read-strobe-delay is not found, assuming usage of default value\n");
+
+ priv->hs400_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1],
+ HS400_FIXED_CIU_CLK_DIV);
+ host->priv = priv;
+ return 0;
+}
+
+static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL64));
+ else
+ return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL));
+}
+
+static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
+{
+ u32 clksel;
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ clksel = mci_readl(host, CLKSEL64);
+ else
+ clksel = mci_readl(host, CLKSEL);
+ clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ mci_writel(host, CLKSEL64, clksel);
+ else
+ mci_writel(host, CLKSEL, clksel);
+}
+
+static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+ u32 clksel;
+ u8 sample;
+
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ clksel = mci_readl(host, CLKSEL64);
+ else
+ clksel = mci_readl(host, CLKSEL);
+
+ sample = (clksel + 1) & 0x7;
+ clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
+
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
+ mci_writel(host, CLKSEL64, clksel);
+ else
+ mci_writel(host, CLKSEL, clksel);
+
+ return sample;
+}
+
+static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates)
+{
+ const u8 iter = 8;
+ u8 __c;
+ s8 i, loc = -1;
+
+ for (i = 0; i < iter; i++) {
+ __c = ror8(candiates, i);
+ if ((__c & 0xc7) == 0xc7) {
+ loc = i;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < iter; i++) {
+ __c = ror8(candiates, i);
+ if ((__c & 0x83) == 0x83) {
+ loc = i;
+ goto out;
+ }
+ }
+
+out:
+ return loc;
+}
+
+static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot)
+{
+ struct dw_mci *host = slot->host;
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+ struct mmc_host *mmc = slot->mmc;
+ u8 start_smpl, smpl, candiates = 0;
+ s8 found = -1;
+ int ret = 0;
+
+ start_smpl = dw_mci_exynos_get_clksmpl(host);
+
+ do {
+ mci_writel(host, TMOUT, ~0);
+ smpl = dw_mci_exynos_move_next_clksmpl(host);
+
+ if (!mmc_send_tuning(mmc))
+ candiates |= (1 << smpl);
+
+ } while (start_smpl != smpl);
+
+ found = dw_mci_exynos_get_best_clksmpl(candiates);
+ if (found >= 0) {
+ dw_mci_exynos_set_clksmpl(host, found);
+ priv->tuned_sample = found;
+ } else {
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int dw_mci_exynos_prepare_hs400_tuning(struct dw_mci *host,
+ struct mmc_ios *ios)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+
+ dw_mci_exynos_set_clksel_timing(host, priv->hs400_timing);
+ dw_mci_exynos_adjust_clock(host, (ios->clock) << 1);
+
+ return 0;
+}
+
+/* Common capabilities of Exynos4/Exynos5 SoC */
+static unsigned long exynos_dwmmc_caps[4] = {
+ MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+};
+
+static const struct dw_mci_drv_data exynos_drv_data = {
+ .caps = exynos_dwmmc_caps,
+ .init = dw_mci_exynos_priv_init,
+ .setup_clock = dw_mci_exynos_setup_clock,
+ .prepare_command = dw_mci_exynos_prepare_command,
+ .set_ios = dw_mci_exynos_set_ios,
+ .parse_dt = dw_mci_exynos_parse_dt,
+ .execute_tuning = dw_mci_exynos_execute_tuning,
+ .prepare_hs400_tuning = dw_mci_exynos_prepare_hs400_tuning,
+};
+
+static const struct of_device_id dw_mci_exynos_match[] = {
+ { .compatible = "samsung,exynos4412-dw-mshc",
+ .data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos5250-dw-mshc",
+ .data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos5420-dw-mshc",
+ .data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos5420-dw-mshc-smu",
+ .data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos7-dw-mshc",
+ .data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos7-dw-mshc-smu",
+ .data = &exynos_drv_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
+
+static int dw_mci_exynos_probe(struct platform_device *pdev)
+{
+ const struct dw_mci_drv_data *drv_data;
+ const struct of_device_id *match;
+
+ match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node);
+ drv_data = match->data;
+ return dw_mci_pltfm_register(pdev, drv_data);
+}
+
+static const struct dev_pm_ops dw_mci_exynos_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend, dw_mci_exynos_resume)
+ .resume_noirq = dw_mci_exynos_resume_noirq,
+ .thaw_noirq = dw_mci_exynos_resume_noirq,
+ .restore_noirq = dw_mci_exynos_resume_noirq,
+};
+
+static struct platform_driver dw_mci_exynos_pltfm_driver = {
+ .probe = dw_mci_exynos_probe,
+ .remove = dw_mci_pltfm_remove,
+ .driver = {
+ .name = "dwmmc_exynos",
+ .of_match_table = dw_mci_exynos_match,
+ .pm = &dw_mci_exynos_pmops,
+ },
+};
+
+module_platform_driver(dw_mci_exynos_pltfm_driver);
+
+MODULE_DESCRIPTION("Samsung Specific DW-MSHC Driver Extension");
+MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dwmmc-exynos");
diff --git a/kernel/drivers/mmc/host/dw_mmc-exynos.h b/kernel/drivers/mmc/host/dw_mmc-exynos.h
new file mode 100644
index 000000000..595c934e6
--- /dev/null
+++ b/kernel/drivers/mmc/host/dw_mmc-exynos.h
@@ -0,0 +1,73 @@
+/*
+ * Exynos Specific Extensions for Synopsys DW Multimedia Card Interface driver
+ *
+ * Copyright (C) 2012-2014 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _DW_MMC_EXYNOS_H_
+#define _DW_MMC_EXYNOS_H_
+
+#define SDMMC_CLKSEL 0x09C
+#define SDMMC_CLKSEL64 0x0A8
+
+/* Extended Register's Offset */
+#define SDMMC_HS400_DQS_EN 0x180
+#define SDMMC_HS400_ASYNC_FIFO_CTRL 0x184
+#define SDMMC_HS400_DLINE_CTRL 0x188
+
+/* CLKSEL register defines */
+#define SDMMC_CLKSEL_CCLK_SAMPLE(x) (((x) & 7) << 0)
+#define SDMMC_CLKSEL_CCLK_DRIVE(x) (((x) & 7) << 16)
+#define SDMMC_CLKSEL_CCLK_DIVIDER(x) (((x) & 7) << 24)
+#define SDMMC_CLKSEL_GET_DRV_WD3(x) (((x) >> 16) & 0x7)
+#define SDMMC_CLKSEL_GET_DIV(x) (((x) >> 24) & 0x7)
+#define SDMMC_CLKSEL_UP_SAMPLE(x, y) (((x) & ~SDMMC_CLKSEL_CCLK_SAMPLE(7)) |\
+ SDMMC_CLKSEL_CCLK_SAMPLE(y))
+#define SDMMC_CLKSEL_TIMING(x, y, z) (SDMMC_CLKSEL_CCLK_SAMPLE(x) | \
+ SDMMC_CLKSEL_CCLK_DRIVE(y) | \
+ SDMMC_CLKSEL_CCLK_DIVIDER(z))
+#define SDMMC_CLKSEL_TIMING_MASK SDMMC_CLKSEL_TIMING(0x7, 0x7, 0x7)
+#define SDMMC_CLKSEL_WAKEUP_INT BIT(11)
+
+/* RCLK_EN register defines */
+#define DATA_STROBE_EN BIT(0)
+#define AXI_NON_BLOCKING_WR BIT(7)
+
+/* DLINE_CTRL register defines */
+#define DQS_CTRL_RD_DELAY(x, y) (((x) & ~0x3FF) | ((y) & 0x3FF))
+#define DQS_CTRL_GET_RD_DELAY(x) ((x) & 0x3FF)
+
+/* Protector Register */
+#define SDMMC_EMMCP_BASE 0x1000
+#define SDMMC_MPSECURITY (SDMMC_EMMCP_BASE + 0x0010)
+#define SDMMC_MPSBEGIN0 (SDMMC_EMMCP_BASE + 0x0200)
+#define SDMMC_MPSEND0 (SDMMC_EMMCP_BASE + 0x0204)
+#define SDMMC_MPSCTRL0 (SDMMC_EMMCP_BASE + 0x020C)
+
+/* SMU control defines */
+#define SDMMC_MPSCTRL_SECURE_READ_BIT BIT(7)
+#define SDMMC_MPSCTRL_SECURE_WRITE_BIT BIT(6)
+#define SDMMC_MPSCTRL_NON_SECURE_READ_BIT BIT(5)
+#define SDMMC_MPSCTRL_NON_SECURE_WRITE_BIT BIT(4)
+#define SDMMC_MPSCTRL_USE_FUSE_KEY BIT(3)
+#define SDMMC_MPSCTRL_ECB_MODE BIT(2)
+#define SDMMC_MPSCTRL_ENCRYPTION BIT(1)
+#define SDMMC_MPSCTRL_VALID BIT(0)
+
+/* Maximum number of Ending sector */
+#define SDMMC_ENDING_SEC_NR_MAX 0xFFFFFFFF
+
+/* Fixed clock divider */
+#define EXYNOS4210_FIXED_CIU_CLK_DIV 2
+#define EXYNOS4412_FIXED_CIU_CLK_DIV 4
+#define HS400_FIXED_CIU_CLK_DIV 1
+
+/* Minimal required clock frequency for cclkin, unit: HZ */
+#define EXYNOS_CCLKIN_MIN 50000000
+
+#endif /* _DW_MMC_EXYNOS_H_ */
diff --git a/kernel/drivers/mmc/host/dw_mmc-k3.c b/kernel/drivers/mmc/host/dw_mmc-k3.c
new file mode 100644
index 000000000..650f9cc3f
--- /dev/null
+++ b/kernel/drivers/mmc/host/dw_mmc-k3.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2013 Linaro Ltd.
+ * Copyright (c) 2013 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/of_address.h>
+
+#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
+
+static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios)
+{
+ int ret;
+
+ ret = clk_set_rate(host->ciu_clk, ios->clock);
+ if (ret)
+ dev_warn(host->dev, "failed to set rate %uHz\n", ios->clock);
+
+ host->bus_hz = clk_get_rate(host->ciu_clk);
+}
+
+static const struct dw_mci_drv_data k3_drv_data = {
+ .set_ios = dw_mci_k3_set_ios,
+};
+
+static const struct of_device_id dw_mci_k3_match[] = {
+ { .compatible = "hisilicon,hi4511-dw-mshc", .data = &k3_drv_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_mci_k3_match);
+
+static int dw_mci_k3_probe(struct platform_device *pdev)
+{
+ const struct dw_mci_drv_data *drv_data;
+ const struct of_device_id *match;
+
+ match = of_match_node(dw_mci_k3_match, pdev->dev.of_node);
+ drv_data = match->data;
+
+ return dw_mci_pltfm_register(pdev, drv_data);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dw_mci_k3_suspend(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dw_mci_suspend(host);
+ if (!ret)
+ clk_disable_unprepare(host->ciu_clk);
+
+ return ret;
+}
+
+static int dw_mci_k3_resume(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(host->ciu_clk);
+ if (ret) {
+ dev_err(host->dev, "failed to enable ciu clock\n");
+ return ret;
+ }
+
+ return dw_mci_resume(host);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(dw_mci_k3_pmops, dw_mci_k3_suspend, dw_mci_k3_resume);
+
+static struct platform_driver dw_mci_k3_pltfm_driver = {
+ .probe = dw_mci_k3_probe,
+ .remove = dw_mci_pltfm_remove,
+ .driver = {
+ .name = "dwmmc_k3",
+ .of_match_table = dw_mci_k3_match,
+ .pm = &dw_mci_k3_pmops,
+ },
+};
+
+module_platform_driver(dw_mci_k3_pltfm_driver);
+
+MODULE_DESCRIPTION("K3 Specific DW-MSHC Driver Extension");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dwmmc-k3");
diff --git a/kernel/drivers/mmc/host/dw_mmc-pci.c b/kernel/drivers/mmc/host/dw_mmc-pci.c
new file mode 100644
index 000000000..4c69fbd29
--- /dev/null
+++ b/kernel/drivers/mmc/host/dw_mmc-pci.c
@@ -0,0 +1,122 @@
+/*
+ * Synopsys DesignWare Multimedia Card PCI Interface driver
+ *
+ * Copyright (C) 2012 Vayavya Labs Pvt. Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/dw_mmc.h>
+#include "dw_mmc.h"
+
+#define PCI_BAR_NO 2
+#define SYNOPSYS_DW_MCI_VENDOR_ID 0x700
+#define SYNOPSYS_DW_MCI_DEVICE_ID 0x1107
+/* Defining the Capabilities */
+#define DW_MCI_CAPABILITIES (MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |\
+ MMC_CAP_SD_HIGHSPEED | MMC_CAP_8_BIT_DATA |\
+ MMC_CAP_SDIO_IRQ)
+
+static struct dw_mci_board pci_board_data = {
+ .num_slots = 1,
+ .caps = DW_MCI_CAPABILITIES,
+ .bus_hz = 33 * 1000 * 1000,
+ .detect_delay_ms = 200,
+ .fifo_depth = 32,
+};
+
+static int dw_mci_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *entries)
+{
+ struct dw_mci *host;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->irq = pdev->irq;
+ host->irq_flags = IRQF_SHARED;
+ host->dev = &pdev->dev;
+ host->pdata = &pci_board_data;
+
+ ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_NO, pci_name(pdev));
+ if (ret)
+ return ret;
+
+ host->regs = pcim_iomap_table(pdev)[PCI_BAR_NO];
+
+ pci_set_master(pdev);
+
+ ret = dw_mci_probe(host);
+ if (ret)
+ return ret;
+
+ pci_set_drvdata(pdev, host);
+
+ return 0;
+}
+
+static void dw_mci_pci_remove(struct pci_dev *pdev)
+{
+ struct dw_mci *host = pci_get_drvdata(pdev);
+
+ dw_mci_remove(host);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dw_mci_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct dw_mci *host = pci_get_drvdata(pdev);
+
+ return dw_mci_suspend(host);
+}
+
+static int dw_mci_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct dw_mci *host = pci_get_drvdata(pdev);
+
+ return dw_mci_resume(host);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(dw_mci_pci_pmops, dw_mci_pci_suspend, dw_mci_pci_resume);
+
+static const struct pci_device_id dw_mci_pci_id[] = {
+ { PCI_DEVICE(SYNOPSYS_DW_MCI_VENDOR_ID, SYNOPSYS_DW_MCI_DEVICE_ID) },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, dw_mci_pci_id);
+
+static struct pci_driver dw_mci_pci_driver = {
+ .name = "dw_mmc_pci",
+ .id_table = dw_mci_pci_id,
+ .probe = dw_mci_pci_probe,
+ .remove = dw_mci_pci_remove,
+ .driver = {
+ .pm = &dw_mci_pci_pmops
+ },
+};
+
+module_pci_driver(dw_mci_pci_driver);
+
+MODULE_DESCRIPTION("DW Multimedia Card PCI Interface driver");
+MODULE_AUTHOR("Shashidhar Hiremath <shashidharh@vayavyalabs.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/dw_mmc-pltfm.c b/kernel/drivers/mmc/host/dw_mmc-pltfm.c
new file mode 100644
index 000000000..ec6dbcdec
--- /dev/null
+++ b/kernel/drivers/mmc/host/dw_mmc-pltfm.c
@@ -0,0 +1,140 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+
+#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
+
+static void dw_mci_pltfm_prepare_command(struct dw_mci *host, u32 *cmdr)
+{
+ *cmdr |= SDMMC_CMD_USE_HOLD_REG;
+}
+
+static const struct dw_mci_drv_data socfpga_drv_data = {
+ .prepare_command = dw_mci_pltfm_prepare_command,
+};
+
+static const struct dw_mci_drv_data pistachio_drv_data = {
+ .prepare_command = dw_mci_pltfm_prepare_command,
+};
+
+int dw_mci_pltfm_register(struct platform_device *pdev,
+ const struct dw_mci_drv_data *drv_data)
+{
+ struct dw_mci *host;
+ struct resource *regs;
+
+ host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0)
+ return host->irq;
+
+ host->drv_data = drv_data;
+ host->dev = &pdev->dev;
+ host->irq_flags = 0;
+ host->pdata = pdev->dev.platform_data;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(host->regs))
+ return PTR_ERR(host->regs);
+
+ platform_set_drvdata(pdev, host);
+ return dw_mci_probe(host);
+}
+EXPORT_SYMBOL_GPL(dw_mci_pltfm_register);
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * TODO: we should probably disable the clock to the card in the suspend path.
+ */
+static int dw_mci_pltfm_suspend(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+
+ return dw_mci_suspend(host);
+}
+
+static int dw_mci_pltfm_resume(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+
+ return dw_mci_resume(host);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+SIMPLE_DEV_PM_OPS(dw_mci_pltfm_pmops, dw_mci_pltfm_suspend, dw_mci_pltfm_resume);
+EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops);
+
+static const struct of_device_id dw_mci_pltfm_match[] = {
+ { .compatible = "snps,dw-mshc", },
+ { .compatible = "altr,socfpga-dw-mshc",
+ .data = &socfpga_drv_data },
+ { .compatible = "img,pistachio-dw-mshc",
+ .data = &pistachio_drv_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match);
+
+static int dw_mci_pltfm_probe(struct platform_device *pdev)
+{
+ const struct dw_mci_drv_data *drv_data = NULL;
+ const struct of_device_id *match;
+
+ if (pdev->dev.of_node) {
+ match = of_match_node(dw_mci_pltfm_match, pdev->dev.of_node);
+ drv_data = match->data;
+ }
+
+ return dw_mci_pltfm_register(pdev, drv_data);
+}
+
+int dw_mci_pltfm_remove(struct platform_device *pdev)
+{
+ struct dw_mci *host = platform_get_drvdata(pdev);
+
+ dw_mci_remove(host);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_mci_pltfm_remove);
+
+static struct platform_driver dw_mci_pltfm_driver = {
+ .probe = dw_mci_pltfm_probe,
+ .remove = dw_mci_pltfm_remove,
+ .driver = {
+ .name = "dw_mmc",
+ .of_match_table = dw_mci_pltfm_match,
+ .pm = &dw_mci_pltfm_pmops,
+ },
+};
+
+module_platform_driver(dw_mci_pltfm_driver);
+
+MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
+MODULE_AUTHOR("NXP Semiconductor VietNam");
+MODULE_AUTHOR("Imagination Technologies Ltd");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/dw_mmc-pltfm.h b/kernel/drivers/mmc/host/dw_mmc-pltfm.h
new file mode 100644
index 000000000..68e7fd2f6
--- /dev/null
+++ b/kernel/drivers/mmc/host/dw_mmc-pltfm.h
@@ -0,0 +1,20 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface Platform driver
+ *
+ * Copyright (C) 2012, Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _DW_MMC_PLTFM_H_
+#define _DW_MMC_PLTFM_H_
+
+extern int dw_mci_pltfm_register(struct platform_device *pdev,
+ const struct dw_mci_drv_data *drv_data);
+extern int dw_mci_pltfm_remove(struct platform_device *pdev);
+extern const struct dev_pm_ops dw_mci_pltfm_pmops;
+
+#endif /* _DW_MMC_PLTFM_H_ */
diff --git a/kernel/drivers/mmc/host/dw_mmc-rockchip.c b/kernel/drivers/mmc/host/dw_mmc-rockchip.c
new file mode 100644
index 000000000..dbf166f94
--- /dev/null
+++ b/kernel/drivers/mmc/host/dw_mmc-rockchip.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/of_address.h>
+
+#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
+
+#define RK3288_CLKGEN_DIV 2
+
+static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr)
+{
+ *cmdr |= SDMMC_CMD_USE_HOLD_REG;
+}
+
+static int dw_mci_rk3288_setup_clock(struct dw_mci *host)
+{
+ host->bus_hz /= RK3288_CLKGEN_DIV;
+
+ return 0;
+}
+
+static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
+{
+ int ret;
+ unsigned int cclkin;
+ u32 bus_hz;
+
+ if (ios->clock == 0)
+ return;
+
+ /*
+ * cclkin: source clock of mmc controller
+ * bus_hz: card interface clock generated by CLKGEN
+ * bus_hz = cclkin / RK3288_CLKGEN_DIV
+ * ios->clock = (div == 0) ? bus_hz : (bus_hz / (2 * div))
+ *
+ * Note: div can only be 0 or 1
+ * if DDR50 8bit mode(only emmc work in 8bit mode),
+ * div must be set 1
+ */
+ if (ios->bus_width == MMC_BUS_WIDTH_8 &&
+ ios->timing == MMC_TIMING_MMC_DDR52)
+ cclkin = 2 * ios->clock * RK3288_CLKGEN_DIV;
+ else
+ cclkin = ios->clock * RK3288_CLKGEN_DIV;
+
+ ret = clk_set_rate(host->ciu_clk, cclkin);
+ if (ret)
+ dev_warn(host->dev, "failed to set rate %uHz\n", ios->clock);
+
+ bus_hz = clk_get_rate(host->ciu_clk) / RK3288_CLKGEN_DIV;
+ if (bus_hz != host->bus_hz) {
+ host->bus_hz = bus_hz;
+ /* force dw_mci_setup_bus() */
+ host->current_speed = 0;
+ }
+}
+
+static int dw_mci_rockchip_init(struct dw_mci *host)
+{
+ /* It is slot 8 on Rockchip SoCs */
+ host->sdio_id0 = 8;
+
+ return 0;
+}
+
+/* Common capabilities of RK3288 SoC */
+static unsigned long dw_mci_rk3288_dwmmc_caps[4] = {
+ MMC_CAP_RUNTIME_RESUME, /* emmc */
+ MMC_CAP_RUNTIME_RESUME, /* sdmmc */
+ MMC_CAP_RUNTIME_RESUME, /* sdio0 */
+ MMC_CAP_RUNTIME_RESUME, /* sdio1 */
+};
+static const struct dw_mci_drv_data rk2928_drv_data = {
+ .prepare_command = dw_mci_rockchip_prepare_command,
+ .init = dw_mci_rockchip_init,
+};
+
+static const struct dw_mci_drv_data rk3288_drv_data = {
+ .caps = dw_mci_rk3288_dwmmc_caps,
+ .prepare_command = dw_mci_rockchip_prepare_command,
+ .set_ios = dw_mci_rk3288_set_ios,
+ .setup_clock = dw_mci_rk3288_setup_clock,
+ .init = dw_mci_rockchip_init,
+};
+
+static const struct of_device_id dw_mci_rockchip_match[] = {
+ { .compatible = "rockchip,rk2928-dw-mshc",
+ .data = &rk2928_drv_data },
+ { .compatible = "rockchip,rk3288-dw-mshc",
+ .data = &rk3288_drv_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_mci_rockchip_match);
+
+static int dw_mci_rockchip_probe(struct platform_device *pdev)
+{
+ const struct dw_mci_drv_data *drv_data;
+ const struct of_device_id *match;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ match = of_match_node(dw_mci_rockchip_match, pdev->dev.of_node);
+ drv_data = match->data;
+
+ return dw_mci_pltfm_register(pdev, drv_data);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dw_mci_rockchip_suspend(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+
+ return dw_mci_suspend(host);
+}
+
+static int dw_mci_rockchip_resume(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+
+ return dw_mci_resume(host);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(dw_mci_rockchip_pmops,
+ dw_mci_rockchip_suspend,
+ dw_mci_rockchip_resume);
+
+static struct platform_driver dw_mci_rockchip_pltfm_driver = {
+ .probe = dw_mci_rockchip_probe,
+ .remove = dw_mci_pltfm_remove,
+ .driver = {
+ .name = "dwmmc_rockchip",
+ .of_match_table = dw_mci_rockchip_match,
+ .pm = &dw_mci_rockchip_pmops,
+ },
+};
+
+module_platform_driver(dw_mci_rockchip_pltfm_driver);
+
+MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip Specific DW-MSHC Driver Extension");
+MODULE_ALIAS("platform:dwmmc-rockchip");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/dw_mmc.c b/kernel/drivers/mmc/host/dw_mmc.c
new file mode 100644
index 000000000..5f5adafb2
--- /dev/null
+++ b/kernel/drivers/mmc/host/dw_mmc.c
@@ -0,0 +1,3042 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ * (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/bitops.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/mmc/slot-gpio.h>
+
+#include "dw_mmc.h"
+
+/* Common flag combinations */
+#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
+ SDMMC_INT_HTO | SDMMC_INT_SBE | \
+ SDMMC_INT_EBE)
+#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
+ SDMMC_INT_RESP_ERR)
+#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
+ DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
+#define DW_MCI_SEND_STATUS 1
+#define DW_MCI_RECV_STATUS 2
+#define DW_MCI_DMA_THRESHOLD 16
+
+#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
+#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
+
+#ifdef CONFIG_MMC_DW_IDMAC
+#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
+ SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
+ SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
+ SDMMC_IDMAC_INT_TI)
+
+struct idmac_desc_64addr {
+ u32 des0; /* Control Descriptor */
+
+ u32 des1; /* Reserved */
+
+ u32 des2; /*Buffer sizes */
+#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
+ ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
+ ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
+
+ u32 des3; /* Reserved */
+
+ u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
+ u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
+
+ u32 des6; /* Lower 32-bits of Next Descriptor Address */
+ u32 des7; /* Upper 32-bits of Next Descriptor Address */
+};
+
+struct idmac_desc {
+ __le32 des0; /* Control Descriptor */
+#define IDMAC_DES0_DIC BIT(1)
+#define IDMAC_DES0_LD BIT(2)
+#define IDMAC_DES0_FD BIT(3)
+#define IDMAC_DES0_CH BIT(4)
+#define IDMAC_DES0_ER BIT(5)
+#define IDMAC_DES0_CES BIT(30)
+#define IDMAC_DES0_OWN BIT(31)
+
+ __le32 des1; /* Buffer sizes */
+#define IDMAC_SET_BUFFER1_SIZE(d, s) \
+ ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
+
+ __le32 des2; /* buffer 1 physical address */
+
+ __le32 des3; /* buffer 2 physical address */
+};
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+static bool dw_mci_reset(struct dw_mci *host);
+static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
+static int dw_mci_card_busy(struct mmc_host *mmc);
+
+#if defined(CONFIG_DEBUG_FS)
+static int dw_mci_req_show(struct seq_file *s, void *v)
+{
+ struct dw_mci_slot *slot = s->private;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_command *stop;
+ struct mmc_data *data;
+
+ /* Make sure we get a consistent snapshot */
+ spin_lock_bh(&slot->host->lock);
+ mrq = slot->mrq;
+
+ if (mrq) {
+ cmd = mrq->cmd;
+ data = mrq->data;
+ stop = mrq->stop;
+
+ if (cmd)
+ seq_printf(s,
+ "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+ cmd->opcode, cmd->arg, cmd->flags,
+ cmd->resp[0], cmd->resp[1], cmd->resp[2],
+ cmd->resp[2], cmd->error);
+ if (data)
+ seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
+ data->bytes_xfered, data->blocks,
+ data->blksz, data->flags, data->error);
+ if (stop)
+ seq_printf(s,
+ "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+ stop->opcode, stop->arg, stop->flags,
+ stop->resp[0], stop->resp[1], stop->resp[2],
+ stop->resp[2], stop->error);
+ }
+
+ spin_unlock_bh(&slot->host->lock);
+
+ return 0;
+}
+
+static int dw_mci_req_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dw_mci_req_show, inode->i_private);
+}
+
+static const struct file_operations dw_mci_req_fops = {
+ .owner = THIS_MODULE,
+ .open = dw_mci_req_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dw_mci_regs_show(struct seq_file *s, void *v)
+{
+ seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
+ seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
+ seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
+ seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
+ seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
+ seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
+
+ return 0;
+}
+
+static int dw_mci_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dw_mci_regs_show, inode->i_private);
+}
+
+static const struct file_operations dw_mci_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = dw_mci_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
+{
+ struct mmc_host *mmc = slot->mmc;
+ struct dw_mci *host = slot->host;
+ struct dentry *root;
+ struct dentry *node;
+
+ root = mmc->debugfs_root;
+ if (!root)
+ return;
+
+ node = debugfs_create_file("regs", S_IRUSR, root, host,
+ &dw_mci_regs_fops);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_file("req", S_IRUSR, root, slot,
+ &dw_mci_req_fops);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_x32("pending_events", S_IRUSR, root,
+ (u32 *)&host->pending_events);
+ if (!node)
+ goto err;
+
+ node = debugfs_create_x32("completed_events", S_IRUSR, root,
+ (u32 *)&host->completed_events);
+ if (!node)
+ goto err;
+
+ return;
+
+err:
+ dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
+
+static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
+{
+ struct mmc_data *data;
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+ const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
+ u32 cmdr;
+ cmd->error = -EINPROGRESS;
+
+ cmdr = cmd->opcode;
+
+ if (cmd->opcode == MMC_STOP_TRANSMISSION ||
+ cmd->opcode == MMC_GO_IDLE_STATE ||
+ cmd->opcode == MMC_GO_INACTIVE_STATE ||
+ (cmd->opcode == SD_IO_RW_DIRECT &&
+ ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
+ cmdr |= SDMMC_CMD_STOP;
+ else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
+ cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
+
+ if (cmd->opcode == SD_SWITCH_VOLTAGE) {
+ u32 clk_en_a;
+
+ /* Special bit makes CMD11 not die */
+ cmdr |= SDMMC_CMD_VOLT_SWITCH;
+
+ /* Change state to continue to handle CMD11 weirdness */
+ WARN_ON(slot->host->state != STATE_SENDING_CMD);
+ slot->host->state = STATE_SENDING_CMD11;
+
+ /*
+ * We need to disable low power mode (automatic clock stop)
+ * while doing voltage switch so we don't confuse the card,
+ * since stopping the clock is a specific part of the UHS
+ * voltage change dance.
+ *
+ * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
+ * unconditionally turned back on in dw_mci_setup_bus() if it's
+ * ever called with a non-zero clock. That shouldn't happen
+ * until the voltage change is all done.
+ */
+ clk_en_a = mci_readl(host, CLKENA);
+ clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
+ mci_writel(host, CLKENA, clk_en_a);
+ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
+ SDMMC_CMD_PRV_DAT_WAIT, 0);
+ }
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ /* We expect a response, so set this bit */
+ cmdr |= SDMMC_CMD_RESP_EXP;
+ if (cmd->flags & MMC_RSP_136)
+ cmdr |= SDMMC_CMD_RESP_LONG;
+ }
+
+ if (cmd->flags & MMC_RSP_CRC)
+ cmdr |= SDMMC_CMD_RESP_CRC;
+
+ data = cmd->data;
+ if (data) {
+ cmdr |= SDMMC_CMD_DAT_EXP;
+ if (data->flags & MMC_DATA_STREAM)
+ cmdr |= SDMMC_CMD_STRM_MODE;
+ if (data->flags & MMC_DATA_WRITE)
+ cmdr |= SDMMC_CMD_DAT_WR;
+ }
+
+ if (drv_data && drv_data->prepare_command)
+ drv_data->prepare_command(slot->host, &cmdr);
+
+ return cmdr;
+}
+
+static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
+{
+ struct mmc_command *stop;
+ u32 cmdr;
+
+ if (!cmd->data)
+ return 0;
+
+ stop = &host->stop_abort;
+ cmdr = cmd->opcode;
+ memset(stop, 0, sizeof(struct mmc_command));
+
+ if (cmdr == MMC_READ_SINGLE_BLOCK ||
+ cmdr == MMC_READ_MULTIPLE_BLOCK ||
+ cmdr == MMC_WRITE_BLOCK ||
+ cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
+ cmdr == MMC_SEND_TUNING_BLOCK ||
+ cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
+ stop->opcode = MMC_STOP_TRANSMISSION;
+ stop->arg = 0;
+ stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
+ } else if (cmdr == SD_IO_RW_EXTENDED) {
+ stop->opcode = SD_IO_RW_DIRECT;
+ stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
+ ((cmd->arg >> 28) & 0x7);
+ stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
+ } else {
+ return 0;
+ }
+
+ cmdr = stop->opcode | SDMMC_CMD_STOP |
+ SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
+
+ return cmdr;
+}
+
+static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+
+ /*
+ * Databook says that before issuing a new data transfer command
+ * we need to check to see if the card is busy. Data transfer commands
+ * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
+ *
+ * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
+ * expected.
+ */
+ if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
+ !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
+ while (mci_readl(host, STATUS) & SDMMC_STATUS_BUSY) {
+ if (time_after(jiffies, timeout)) {
+ /* Command will fail; we'll pass error then */
+ dev_err(host->dev, "Busy; trying anyway\n");
+ break;
+ }
+ udelay(10);
+ }
+ }
+}
+
+static void dw_mci_start_command(struct dw_mci *host,
+ struct mmc_command *cmd, u32 cmd_flags)
+{
+ host->cmd = cmd;
+ dev_vdbg(host->dev,
+ "start command: ARGR=0x%08x CMDR=0x%08x\n",
+ cmd->arg, cmd_flags);
+
+ mci_writel(host, CMDARG, cmd->arg);
+ wmb();
+ dw_mci_wait_while_busy(host, cmd_flags);
+
+ mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
+}
+
+static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
+{
+ struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
+ dw_mci_start_command(host, stop, host->stop_cmdr);
+}
+
+/* DMA interface functions */
+static void dw_mci_stop_dma(struct dw_mci *host)
+{
+ if (host->using_dma) {
+ host->dma_ops->stop(host);
+ host->dma_ops->cleanup(host);
+ }
+
+ /* Data transfer was stopped by the interrupt handler */
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+}
+
+static int dw_mci_get_dma_dir(struct mmc_data *data)
+{
+ if (data->flags & MMC_DATA_WRITE)
+ return DMA_TO_DEVICE;
+ else
+ return DMA_FROM_DEVICE;
+}
+
+#ifdef CONFIG_MMC_DW_IDMAC
+static void dw_mci_dma_cleanup(struct dw_mci *host)
+{
+ struct mmc_data *data = host->data;
+
+ if (data)
+ if (!data->host_cookie)
+ dma_unmap_sg(host->dev,
+ data->sg,
+ data->sg_len,
+ dw_mci_get_dma_dir(data));
+}
+
+static void dw_mci_idmac_reset(struct dw_mci *host)
+{
+ u32 bmod = mci_readl(host, BMOD);
+ /* Software reset of DMA */
+ bmod |= SDMMC_IDMAC_SWRESET;
+ mci_writel(host, BMOD, bmod);
+}
+
+static void dw_mci_idmac_stop_dma(struct dw_mci *host)
+{
+ u32 temp;
+
+ /* Disable and reset the IDMAC interface */
+ temp = mci_readl(host, CTRL);
+ temp &= ~SDMMC_CTRL_USE_IDMAC;
+ temp |= SDMMC_CTRL_DMA_RESET;
+ mci_writel(host, CTRL, temp);
+
+ /* Stop the IDMAC running */
+ temp = mci_readl(host, BMOD);
+ temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
+ temp |= SDMMC_IDMAC_SWRESET;
+ mci_writel(host, BMOD, temp);
+}
+
+static void dw_mci_idmac_complete_dma(struct dw_mci *host)
+{
+ struct mmc_data *data = host->data;
+
+ dev_vdbg(host->dev, "DMA complete\n");
+
+ host->dma_ops->cleanup(host);
+
+ /*
+ * If the card was removed, data will be NULL. No point in trying to
+ * send the stop command or waiting for NBUSY in this case.
+ */
+ if (data) {
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ }
+}
+
+static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
+ unsigned int sg_len)
+{
+ int i;
+ if (host->dma_64bit_address == 1) {
+ struct idmac_desc_64addr *desc = host->sg_cpu;
+
+ for (i = 0; i < sg_len; i++, desc++) {
+ unsigned int length = sg_dma_len(&data->sg[i]);
+ u64 mem_addr = sg_dma_address(&data->sg[i]);
+
+ /*
+ * Set the OWN bit and disable interrupts for this
+ * descriptor
+ */
+ desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
+ IDMAC_DES0_CH;
+ /* Buffer length */
+ IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length);
+
+ /* Physical address to DMA to/from */
+ desc->des4 = mem_addr & 0xffffffff;
+ desc->des5 = mem_addr >> 32;
+ }
+
+ /* Set first descriptor */
+ desc = host->sg_cpu;
+ desc->des0 |= IDMAC_DES0_FD;
+
+ /* Set last descriptor */
+ desc = host->sg_cpu + (i - 1) *
+ sizeof(struct idmac_desc_64addr);
+ desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
+ desc->des0 |= IDMAC_DES0_LD;
+
+ } else {
+ struct idmac_desc *desc = host->sg_cpu;
+
+ for (i = 0; i < sg_len; i++, desc++) {
+ unsigned int length = sg_dma_len(&data->sg[i]);
+ u32 mem_addr = sg_dma_address(&data->sg[i]);
+
+ /*
+ * Set the OWN bit and disable interrupts for this
+ * descriptor
+ */
+ desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
+ IDMAC_DES0_DIC | IDMAC_DES0_CH);
+ /* Buffer length */
+ IDMAC_SET_BUFFER1_SIZE(desc, length);
+
+ /* Physical address to DMA to/from */
+ desc->des2 = cpu_to_le32(mem_addr);
+ }
+
+ /* Set first descriptor */
+ desc = host->sg_cpu;
+ desc->des0 |= cpu_to_le32(IDMAC_DES0_FD);
+
+ /* Set last descriptor */
+ desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
+ desc->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | IDMAC_DES0_DIC));
+ desc->des0 |= cpu_to_le32(IDMAC_DES0_LD);
+ }
+
+ wmb();
+}
+
+static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
+{
+ u32 temp;
+
+ dw_mci_translate_sglist(host, host->data, sg_len);
+
+ /* Make sure to reset DMA in case we did PIO before this */
+ dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
+ dw_mci_idmac_reset(host);
+
+ /* Select IDMAC interface */
+ temp = mci_readl(host, CTRL);
+ temp |= SDMMC_CTRL_USE_IDMAC;
+ mci_writel(host, CTRL, temp);
+
+ wmb();
+
+ /* Enable the IDMAC */
+ temp = mci_readl(host, BMOD);
+ temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
+ mci_writel(host, BMOD, temp);
+
+ /* Start it running */
+ mci_writel(host, PLDMND, 1);
+}
+
+static int dw_mci_idmac_init(struct dw_mci *host)
+{
+ int i;
+
+ if (host->dma_64bit_address == 1) {
+ struct idmac_desc_64addr *p;
+ /* Number of descriptors in the ring buffer */
+ host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
+
+ /* Forward link the descriptor list */
+ for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
+ i++, p++) {
+ p->des6 = (host->sg_dma +
+ (sizeof(struct idmac_desc_64addr) *
+ (i + 1))) & 0xffffffff;
+
+ p->des7 = (u64)(host->sg_dma +
+ (sizeof(struct idmac_desc_64addr) *
+ (i + 1))) >> 32;
+ /* Initialize reserved and buffer size fields to "0" */
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+ }
+
+ /* Set the last descriptor as the end-of-ring descriptor */
+ p->des6 = host->sg_dma & 0xffffffff;
+ p->des7 = (u64)host->sg_dma >> 32;
+ p->des0 = IDMAC_DES0_ER;
+
+ } else {
+ struct idmac_desc *p;
+ /* Number of descriptors in the ring buffer */
+ host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
+
+ /* Forward link the descriptor list */
+ for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) {
+ p->des3 = cpu_to_le32(host->sg_dma +
+ (sizeof(struct idmac_desc) * (i + 1)));
+ p->des1 = 0;
+ }
+
+ /* Set the last descriptor as the end-of-ring descriptor */
+ p->des3 = cpu_to_le32(host->sg_dma);
+ p->des0 = cpu_to_le32(IDMAC_DES0_ER);
+ }
+
+ dw_mci_idmac_reset(host);
+
+ if (host->dma_64bit_address == 1) {
+ /* Mask out interrupts - get Tx & Rx complete only */
+ mci_writel(host, IDSTS64, IDMAC_INT_CLR);
+ mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
+ SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
+
+ /* Set the descriptor base address */
+ mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
+ mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
+
+ } else {
+ /* Mask out interrupts - get Tx & Rx complete only */
+ mci_writel(host, IDSTS, IDMAC_INT_CLR);
+ mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
+ SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
+
+ /* Set the descriptor base address */
+ mci_writel(host, DBADDR, host->sg_dma);
+ }
+
+ return 0;
+}
+
+static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
+ .init = dw_mci_idmac_init,
+ .start = dw_mci_idmac_start_dma,
+ .stop = dw_mci_idmac_stop_dma,
+ .complete = dw_mci_idmac_complete_dma,
+ .cleanup = dw_mci_dma_cleanup,
+};
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+static int dw_mci_pre_dma_transfer(struct dw_mci *host,
+ struct mmc_data *data,
+ bool next)
+{
+ struct scatterlist *sg;
+ unsigned int i, sg_len;
+
+ if (!next && data->host_cookie)
+ return data->host_cookie;
+
+ /*
+ * We don't do DMA on "complex" transfers, i.e. with
+ * non-word-aligned buffers or lengths. Also, we don't bother
+ * with all the DMA setup overhead for short transfers.
+ */
+ if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
+ return -EINVAL;
+
+ if (data->blksz & 3)
+ return -EINVAL;
+
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->offset & 3 || sg->length & 3)
+ return -EINVAL;
+ }
+
+ sg_len = dma_map_sg(host->dev,
+ data->sg,
+ data->sg_len,
+ dw_mci_get_dma_dir(data));
+ if (sg_len == 0)
+ return -EINVAL;
+
+ if (next)
+ data->host_cookie = sg_len;
+
+ return sg_len;
+}
+
+static void dw_mci_pre_req(struct mmc_host *mmc,
+ struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (!slot->host->use_dma || !data)
+ return;
+
+ if (data->host_cookie) {
+ data->host_cookie = 0;
+ return;
+ }
+
+ if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
+ data->host_cookie = 0;
+}
+
+static void dw_mci_post_req(struct mmc_host *mmc,
+ struct mmc_request *mrq,
+ int err)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (!slot->host->use_dma || !data)
+ return;
+
+ if (data->host_cookie)
+ dma_unmap_sg(slot->host->dev,
+ data->sg,
+ data->sg_len,
+ dw_mci_get_dma_dir(data));
+ data->host_cookie = 0;
+}
+
+static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
+{
+#ifdef CONFIG_MMC_DW_IDMAC
+ unsigned int blksz = data->blksz;
+ const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ u32 fifo_width = 1 << host->data_shift;
+ u32 blksz_depth = blksz / fifo_width, fifoth_val;
+ u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
+ int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
+
+ tx_wmark = (host->fifo_depth) / 2;
+ tx_wmark_invers = host->fifo_depth - tx_wmark;
+
+ /*
+ * MSIZE is '1',
+ * if blksz is not a multiple of the FIFO width
+ */
+ if (blksz % fifo_width) {
+ msize = 0;
+ rx_wmark = 1;
+ goto done;
+ }
+
+ do {
+ if (!((blksz_depth % mszs[idx]) ||
+ (tx_wmark_invers % mszs[idx]))) {
+ msize = idx;
+ rx_wmark = mszs[idx] - 1;
+ break;
+ }
+ } while (--idx > 0);
+ /*
+ * If idx is '0', it won't be tried
+ * Thus, initial values are uesed
+ */
+done:
+ fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
+ mci_writel(host, FIFOTH, fifoth_val);
+#endif
+}
+
+static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
+{
+ unsigned int blksz = data->blksz;
+ u32 blksz_depth, fifo_depth;
+ u16 thld_size;
+
+ WARN_ON(!(data->flags & MMC_DATA_READ));
+
+ /*
+ * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
+ * in the FIFO region, so we really shouldn't access it).
+ */
+ if (host->verid < DW_MMC_240A)
+ return;
+
+ if (host->timing != MMC_TIMING_MMC_HS200 &&
+ host->timing != MMC_TIMING_MMC_HS400 &&
+ host->timing != MMC_TIMING_UHS_SDR104)
+ goto disable;
+
+ blksz_depth = blksz / (1 << host->data_shift);
+ fifo_depth = host->fifo_depth;
+
+ if (blksz_depth > fifo_depth)
+ goto disable;
+
+ /*
+ * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
+ * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
+ * Currently just choose blksz.
+ */
+ thld_size = blksz;
+ mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
+ return;
+
+disable:
+ mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
+}
+
+static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
+{
+ unsigned long irqflags;
+ int sg_len;
+ u32 temp;
+
+ host->using_dma = 0;
+
+ /* If we don't have a channel, we can't do DMA */
+ if (!host->use_dma)
+ return -ENODEV;
+
+ sg_len = dw_mci_pre_dma_transfer(host, data, 0);
+ if (sg_len < 0) {
+ host->dma_ops->stop(host);
+ return sg_len;
+ }
+
+ host->using_dma = 1;
+
+ dev_vdbg(host->dev,
+ "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
+ (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
+ sg_len);
+
+ /*
+ * Decide the MSIZE and RX/TX Watermark.
+ * If current block size is same with previous size,
+ * no need to update fifoth.
+ */
+ if (host->prev_blksz != data->blksz)
+ dw_mci_adjust_fifoth(host, data);
+
+ /* Enable the DMA interface */
+ temp = mci_readl(host, CTRL);
+ temp |= SDMMC_CTRL_DMA_ENABLE;
+ mci_writel(host, CTRL, temp);
+
+ /* Disable RX/TX IRQs, let DMA handle it */
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ temp = mci_readl(host, INTMASK);
+ temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
+ mci_writel(host, INTMASK, temp);
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ host->dma_ops->start(host, sg_len);
+
+ return 0;
+}
+
+static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
+{
+ unsigned long irqflags;
+ u32 temp;
+
+ data->error = -EINPROGRESS;
+
+ WARN_ON(host->data);
+ host->sg = NULL;
+ host->data = data;
+
+ if (data->flags & MMC_DATA_READ) {
+ host->dir_status = DW_MCI_RECV_STATUS;
+ dw_mci_ctrl_rd_thld(host, data);
+ } else {
+ host->dir_status = DW_MCI_SEND_STATUS;
+ }
+
+ if (dw_mci_submit_data_dma(host, data)) {
+ int flags = SG_MITER_ATOMIC;
+ if (host->data->flags & MMC_DATA_READ)
+ flags |= SG_MITER_TO_SG;
+ else
+ flags |= SG_MITER_FROM_SG;
+
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
+ host->sg = data->sg;
+ host->part_buf_start = 0;
+ host->part_buf_count = 0;
+
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ temp = mci_readl(host, INTMASK);
+ temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
+ mci_writel(host, INTMASK, temp);
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ temp = mci_readl(host, CTRL);
+ temp &= ~SDMMC_CTRL_DMA_ENABLE;
+ mci_writel(host, CTRL, temp);
+
+ /*
+ * Use the initial fifoth_val for PIO mode.
+ * If next issued data may be transfered by DMA mode,
+ * prev_blksz should be invalidated.
+ */
+ mci_writel(host, FIFOTH, host->fifoth_val);
+ host->prev_blksz = 0;
+ } else {
+ /*
+ * Keep the current block size.
+ * It will be used to decide whether to update
+ * fifoth register next time.
+ */
+ host->prev_blksz = data->blksz;
+ }
+}
+
+static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
+{
+ struct dw_mci *host = slot->host;
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+ unsigned int cmd_status = 0;
+
+ mci_writel(host, CMDARG, arg);
+ wmb();
+ dw_mci_wait_while_busy(host, cmd);
+ mci_writel(host, CMD, SDMMC_CMD_START | cmd);
+
+ while (time_before(jiffies, timeout)) {
+ cmd_status = mci_readl(host, CMD);
+ if (!(cmd_status & SDMMC_CMD_START))
+ return;
+ }
+ dev_err(&slot->mmc->class_dev,
+ "Timeout sending command (cmd %#x arg %#x status %#x)\n",
+ cmd, arg, cmd_status);
+}
+
+static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
+{
+ struct dw_mci *host = slot->host;
+ unsigned int clock = slot->clock;
+ u32 div;
+ u32 clk_en_a;
+ u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
+
+ /* We must continue to set bit 28 in CMD until the change is complete */
+ if (host->state == STATE_WAITING_CMD11_DONE)
+ sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
+
+ if (!clock) {
+ mci_writel(host, CLKENA, 0);
+ mci_send_cmd(slot, sdmmc_cmd_bits, 0);
+ } else if (clock != host->current_speed || force_clkinit) {
+ div = host->bus_hz / clock;
+ if (host->bus_hz % clock && host->bus_hz > clock)
+ /*
+ * move the + 1 after the divide to prevent
+ * over-clocking the card.
+ */
+ div += 1;
+
+ div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
+
+ if ((clock << div) != slot->__clk_old || force_clkinit)
+ dev_info(&slot->mmc->class_dev,
+ "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
+ slot->id, host->bus_hz, clock,
+ div ? ((host->bus_hz / div) >> 1) :
+ host->bus_hz, div);
+
+ /* disable clock */
+ mci_writel(host, CLKENA, 0);
+ mci_writel(host, CLKSRC, 0);
+
+ /* inform CIU */
+ mci_send_cmd(slot, sdmmc_cmd_bits, 0);
+
+ /* set clock to desired speed */
+ mci_writel(host, CLKDIV, div);
+
+ /* inform CIU */
+ mci_send_cmd(slot, sdmmc_cmd_bits, 0);
+
+ /* enable clock; only low power if no SDIO */
+ clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
+ if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
+ clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
+ mci_writel(host, CLKENA, clk_en_a);
+
+ /* inform CIU */
+ mci_send_cmd(slot, sdmmc_cmd_bits, 0);
+
+ /* keep the clock with reflecting clock dividor */
+ slot->__clk_old = clock << div;
+ }
+
+ host->current_speed = clock;
+
+ /* Set the current slot bus width */
+ mci_writel(host, CTYPE, (slot->ctype << slot->id));
+}
+
+static void __dw_mci_start_request(struct dw_mci *host,
+ struct dw_mci_slot *slot,
+ struct mmc_command *cmd)
+{
+ struct mmc_request *mrq;
+ struct mmc_data *data;
+ u32 cmdflags;
+
+ mrq = slot->mrq;
+
+ host->cur_slot = slot;
+ host->mrq = mrq;
+
+ host->pending_events = 0;
+ host->completed_events = 0;
+ host->cmd_status = 0;
+ host->data_status = 0;
+ host->dir_status = 0;
+
+ data = cmd->data;
+ if (data) {
+ mci_writel(host, TMOUT, 0xFFFFFFFF);
+ mci_writel(host, BYTCNT, data->blksz*data->blocks);
+ mci_writel(host, BLKSIZ, data->blksz);
+ }
+
+ cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
+
+ /* this is the first command, send the initialization clock */
+ if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
+ cmdflags |= SDMMC_CMD_INIT;
+
+ if (data) {
+ dw_mci_submit_data(host, data);
+ wmb();
+ }
+
+ dw_mci_start_command(host, cmd, cmdflags);
+
+ if (cmd->opcode == SD_SWITCH_VOLTAGE) {
+ unsigned long irqflags;
+
+ /*
+ * Databook says to fail after 2ms w/ no response, but evidence
+ * shows that sometimes the cmd11 interrupt takes over 130ms.
+ * We'll set to 500ms, plus an extra jiffy just in case jiffies
+ * is just about to roll over.
+ *
+ * We do this whole thing under spinlock and only if the
+ * command hasn't already completed (indicating the the irq
+ * already ran so we don't want the timeout).
+ */
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
+ mod_timer(&host->cmd11_timer,
+ jiffies + msecs_to_jiffies(500) + 1);
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+ }
+
+ if (mrq->stop)
+ host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
+ else
+ host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
+}
+
+static void dw_mci_start_request(struct dw_mci *host,
+ struct dw_mci_slot *slot)
+{
+ struct mmc_request *mrq = slot->mrq;
+ struct mmc_command *cmd;
+
+ cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
+ __dw_mci_start_request(host, slot, cmd);
+}
+
+/* must be called with host->lock held */
+static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
+ struct mmc_request *mrq)
+{
+ dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
+ host->state);
+
+ slot->mrq = mrq;
+
+ if (host->state == STATE_WAITING_CMD11_DONE) {
+ dev_warn(&slot->mmc->class_dev,
+ "Voltage change didn't complete\n");
+ /*
+ * this case isn't expected to happen, so we can
+ * either crash here or just try to continue on
+ * in the closest possible state
+ */
+ host->state = STATE_IDLE;
+ }
+
+ if (host->state == STATE_IDLE) {
+ host->state = STATE_SENDING_CMD;
+ dw_mci_start_request(host, slot);
+ } else {
+ list_add_tail(&slot->queue_node, &host->queue);
+ }
+}
+
+static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+
+ WARN_ON(slot->mrq);
+
+ /*
+ * The check for card presence and queueing of the request must be
+ * atomic, otherwise the card could be removed in between and the
+ * request wouldn't fail until another card was inserted.
+ */
+ spin_lock_bh(&host->lock);
+
+ if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
+ spin_unlock_bh(&host->lock);
+ mrq->cmd->error = -ENOMEDIUM;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ dw_mci_queue_request(host, slot, mrq);
+
+ spin_unlock_bh(&host->lock);
+}
+
+static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
+ u32 regs;
+ int ret;
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_4:
+ slot->ctype = SDMMC_CTYPE_4BIT;
+ break;
+ case MMC_BUS_WIDTH_8:
+ slot->ctype = SDMMC_CTYPE_8BIT;
+ break;
+ default:
+ /* set default 1 bit mode */
+ slot->ctype = SDMMC_CTYPE_1BIT;
+ }
+
+ regs = mci_readl(slot->host, UHS_REG);
+
+ /* DDR mode set */
+ if (ios->timing == MMC_TIMING_MMC_DDR52 ||
+ ios->timing == MMC_TIMING_MMC_HS400)
+ regs |= ((0x1 << slot->id) << 16);
+ else
+ regs &= ~((0x1 << slot->id) << 16);
+
+ mci_writel(slot->host, UHS_REG, regs);
+ slot->host->timing = ios->timing;
+
+ /*
+ * Use mirror of ios->clock to prevent race with mmc
+ * core ios update when finding the minimum.
+ */
+ slot->clock = ios->clock;
+
+ if (drv_data && drv_data->set_ios)
+ drv_data->set_ios(slot->host, ios);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
+ ios->vdd);
+ if (ret) {
+ dev_err(slot->host->dev,
+ "failed to enable vmmc regulator\n");
+ /*return, if failed turn on vmmc*/
+ return;
+ }
+ }
+ set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
+ regs = mci_readl(slot->host, PWREN);
+ regs |= (1 << slot->id);
+ mci_writel(slot->host, PWREN, regs);
+ break;
+ case MMC_POWER_ON:
+ if (!slot->host->vqmmc_enabled) {
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = regulator_enable(mmc->supply.vqmmc);
+ if (ret < 0)
+ dev_err(slot->host->dev,
+ "failed to enable vqmmc\n");
+ else
+ slot->host->vqmmc_enabled = true;
+
+ } else {
+ /* Keep track so we don't reset again */
+ slot->host->vqmmc_enabled = true;
+ }
+
+ /* Reset our state machine after powering on */
+ dw_mci_ctrl_reset(slot->host,
+ SDMMC_CTRL_ALL_RESET_FLAGS);
+ }
+
+ /* Adjust clock / bus width after power is up */
+ dw_mci_setup_bus(slot, false);
+
+ break;
+ case MMC_POWER_OFF:
+ /* Turn clock off before power goes down */
+ dw_mci_setup_bus(slot, false);
+
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
+ if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
+ regulator_disable(mmc->supply.vqmmc);
+ slot->host->vqmmc_enabled = false;
+
+ regs = mci_readl(slot->host, PWREN);
+ regs &= ~(1 << slot->id);
+ mci_writel(slot->host, PWREN, regs);
+ break;
+ default:
+ break;
+ }
+
+ if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
+ slot->host->state = STATE_IDLE;
+}
+
+static int dw_mci_card_busy(struct mmc_host *mmc)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ u32 status;
+
+ /*
+ * Check the busy bit which is low when DAT[3:0]
+ * (the data lines) are 0000
+ */
+ status = mci_readl(slot->host, STATUS);
+
+ return !!(status & SDMMC_STATUS_BUSY);
+}
+
+static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+ u32 uhs;
+ u32 v18 = SDMMC_UHS_18V << slot->id;
+ int min_uv, max_uv;
+ int ret;
+
+ /*
+ * Program the voltage. Note that some instances of dw_mmc may use
+ * the UHS_REG for this. For other instances (like exynos) the UHS_REG
+ * does no harm but you need to set the regulator directly. Try both.
+ */
+ uhs = mci_readl(host, UHS_REG);
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ min_uv = 2700000;
+ max_uv = 3600000;
+ uhs &= ~v18;
+ } else {
+ min_uv = 1700000;
+ max_uv = 1950000;
+ uhs |= v18;
+ }
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
+
+ if (ret) {
+ dev_dbg(&mmc->class_dev,
+ "Regulator set error %d: %d - %d\n",
+ ret, min_uv, max_uv);
+ return ret;
+ }
+ }
+ mci_writel(host, UHS_REG, uhs);
+
+ return 0;
+}
+
+static int dw_mci_get_ro(struct mmc_host *mmc)
+{
+ int read_only;
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ int gpio_ro = mmc_gpio_get_ro(mmc);
+
+ /* Use platform get_ro function, else try on board write protect */
+ if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
+ (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
+ read_only = 0;
+ else if (!IS_ERR_VALUE(gpio_ro))
+ read_only = gpio_ro;
+ else
+ read_only =
+ mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
+
+ dev_dbg(&mmc->class_dev, "card is %s\n",
+ read_only ? "read-only" : "read-write");
+
+ return read_only;
+}
+
+static int dw_mci_get_cd(struct mmc_host *mmc)
+{
+ int present;
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci_board *brd = slot->host->pdata;
+ struct dw_mci *host = slot->host;
+ int gpio_cd = mmc_gpio_get_cd(mmc);
+
+ /* Use platform get_cd function, else try onboard card detect */
+ if ((brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) ||
+ (mmc->caps & MMC_CAP_NONREMOVABLE))
+ present = 1;
+ else if (!IS_ERR_VALUE(gpio_cd))
+ present = gpio_cd;
+ else
+ present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
+ == 0 ? 1 : 0;
+
+ spin_lock_bh(&host->lock);
+ if (present) {
+ set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ dev_dbg(&mmc->class_dev, "card is present\n");
+ } else {
+ clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ dev_dbg(&mmc->class_dev, "card is not present\n");
+ }
+ spin_unlock_bh(&host->lock);
+
+ return present;
+}
+
+static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+
+ /*
+ * Low power mode will stop the card clock when idle. According to the
+ * description of the CLKENA register we should disable low power mode
+ * for SDIO cards if we need SDIO interrupts to work.
+ */
+ if (mmc->caps & MMC_CAP_SDIO_IRQ) {
+ const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
+ u32 clk_en_a_old;
+ u32 clk_en_a;
+
+ clk_en_a_old = mci_readl(host, CLKENA);
+
+ if (card->type == MMC_TYPE_SDIO ||
+ card->type == MMC_TYPE_SD_COMBO) {
+ set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ clk_en_a = clk_en_a_old & ~clken_low_pwr;
+ } else {
+ clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ clk_en_a = clk_en_a_old | clken_low_pwr;
+ }
+
+ if (clk_en_a != clk_en_a_old) {
+ mci_writel(host, CLKENA, clk_en_a);
+ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
+ SDMMC_CMD_PRV_DAT_WAIT, 0);
+ }
+ }
+}
+
+static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+ unsigned long irqflags;
+ u32 int_mask;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ /* Enable/disable Slot Specific SDIO interrupt */
+ int_mask = mci_readl(host, INTMASK);
+ if (enb)
+ int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
+ else
+ int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
+ mci_writel(host, INTMASK, int_mask);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+}
+
+static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
+ int err = -ENOSYS;
+
+ if (drv_data && drv_data->execute_tuning)
+ err = drv_data->execute_tuning(slot);
+ return err;
+}
+
+static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
+
+ if (drv_data && drv_data->prepare_hs400_tuning)
+ return drv_data->prepare_hs400_tuning(host, ios);
+
+ return 0;
+}
+
+static const struct mmc_host_ops dw_mci_ops = {
+ .request = dw_mci_request,
+ .pre_req = dw_mci_pre_req,
+ .post_req = dw_mci_post_req,
+ .set_ios = dw_mci_set_ios,
+ .get_ro = dw_mci_get_ro,
+ .get_cd = dw_mci_get_cd,
+ .enable_sdio_irq = dw_mci_enable_sdio_irq,
+ .execute_tuning = dw_mci_execute_tuning,
+ .card_busy = dw_mci_card_busy,
+ .start_signal_voltage_switch = dw_mci_switch_voltage,
+ .init_card = dw_mci_init_card,
+ .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
+};
+
+static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
+ __releases(&host->lock)
+ __acquires(&host->lock)
+{
+ struct dw_mci_slot *slot;
+ struct mmc_host *prev_mmc = host->cur_slot->mmc;
+
+ WARN_ON(host->cmd || host->data);
+
+ host->cur_slot->mrq = NULL;
+ host->mrq = NULL;
+ if (!list_empty(&host->queue)) {
+ slot = list_entry(host->queue.next,
+ struct dw_mci_slot, queue_node);
+ list_del(&slot->queue_node);
+ dev_vdbg(host->dev, "list not empty: %s is next\n",
+ mmc_hostname(slot->mmc));
+ host->state = STATE_SENDING_CMD;
+ dw_mci_start_request(host, slot);
+ } else {
+ dev_vdbg(host->dev, "list empty\n");
+
+ if (host->state == STATE_SENDING_CMD11)
+ host->state = STATE_WAITING_CMD11_DONE;
+ else
+ host->state = STATE_IDLE;
+ }
+
+ spin_unlock(&host->lock);
+ mmc_request_done(prev_mmc, mrq);
+ spin_lock(&host->lock);
+}
+
+static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
+{
+ u32 status = host->cmd_status;
+
+ host->cmd_status = 0;
+
+ /* Read the response from the card (up to 16 bytes) */
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[3] = mci_readl(host, RESP0);
+ cmd->resp[2] = mci_readl(host, RESP1);
+ cmd->resp[1] = mci_readl(host, RESP2);
+ cmd->resp[0] = mci_readl(host, RESP3);
+ } else {
+ cmd->resp[0] = mci_readl(host, RESP0);
+ cmd->resp[1] = 0;
+ cmd->resp[2] = 0;
+ cmd->resp[3] = 0;
+ }
+ }
+
+ if (status & SDMMC_INT_RTO)
+ cmd->error = -ETIMEDOUT;
+ else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
+ cmd->error = -EILSEQ;
+ else if (status & SDMMC_INT_RESP_ERR)
+ cmd->error = -EIO;
+ else
+ cmd->error = 0;
+
+ if (cmd->error) {
+ /* newer ip versions need a delay between retries */
+ if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
+ mdelay(20);
+ }
+
+ return cmd->error;
+}
+
+static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
+{
+ u32 status = host->data_status;
+
+ if (status & DW_MCI_DATA_ERROR_FLAGS) {
+ if (status & SDMMC_INT_DRTO) {
+ data->error = -ETIMEDOUT;
+ } else if (status & SDMMC_INT_DCRC) {
+ data->error = -EILSEQ;
+ } else if (status & SDMMC_INT_EBE) {
+ if (host->dir_status ==
+ DW_MCI_SEND_STATUS) {
+ /*
+ * No data CRC status was returned.
+ * The number of bytes transferred
+ * will be exaggerated in PIO mode.
+ */
+ data->bytes_xfered = 0;
+ data->error = -ETIMEDOUT;
+ } else if (host->dir_status ==
+ DW_MCI_RECV_STATUS) {
+ data->error = -EIO;
+ }
+ } else {
+ /* SDMMC_INT_SBE is included */
+ data->error = -EIO;
+ }
+
+ dev_dbg(host->dev, "data error, status 0x%08x\n", status);
+
+ /*
+ * After an error, there may be data lingering
+ * in the FIFO
+ */
+ dw_mci_reset(host);
+ } else {
+ data->bytes_xfered = data->blocks * data->blksz;
+ data->error = 0;
+ }
+
+ return data->error;
+}
+
+static void dw_mci_tasklet_func(unsigned long priv)
+{
+ struct dw_mci *host = (struct dw_mci *)priv;
+ struct mmc_data *data;
+ struct mmc_command *cmd;
+ struct mmc_request *mrq;
+ enum dw_mci_state state;
+ enum dw_mci_state prev_state;
+ unsigned int err;
+
+ spin_lock(&host->lock);
+
+ state = host->state;
+ data = host->data;
+ mrq = host->mrq;
+
+ do {
+ prev_state = state;
+
+ switch (state) {
+ case STATE_IDLE:
+ case STATE_WAITING_CMD11_DONE:
+ break;
+
+ case STATE_SENDING_CMD11:
+ case STATE_SENDING_CMD:
+ if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
+ &host->pending_events))
+ break;
+
+ cmd = host->cmd;
+ host->cmd = NULL;
+ set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
+ err = dw_mci_command_complete(host, cmd);
+ if (cmd == mrq->sbc && !err) {
+ prev_state = state = STATE_SENDING_CMD;
+ __dw_mci_start_request(host, host->cur_slot,
+ mrq->cmd);
+ goto unlock;
+ }
+
+ if (cmd->data && err) {
+ dw_mci_stop_dma(host);
+ send_stop_abort(host, data);
+ state = STATE_SENDING_STOP;
+ break;
+ }
+
+ if (!cmd->data || err) {
+ dw_mci_request_end(host, mrq);
+ goto unlock;
+ }
+
+ prev_state = state = STATE_SENDING_DATA;
+ /* fall through */
+
+ case STATE_SENDING_DATA:
+ /*
+ * We could get a data error and never a transfer
+ * complete so we'd better check for it here.
+ *
+ * Note that we don't really care if we also got a
+ * transfer complete; stopping the DMA and sending an
+ * abort won't hurt.
+ */
+ if (test_and_clear_bit(EVENT_DATA_ERROR,
+ &host->pending_events)) {
+ dw_mci_stop_dma(host);
+ if (data->stop ||
+ !(host->data_status & (SDMMC_INT_DRTO |
+ SDMMC_INT_EBE)))
+ send_stop_abort(host, data);
+ state = STATE_DATA_ERROR;
+ break;
+ }
+
+ if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
+ &host->pending_events))
+ break;
+
+ set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
+
+ /*
+ * Handle an EVENT_DATA_ERROR that might have shown up
+ * before the transfer completed. This might not have
+ * been caught by the check above because the interrupt
+ * could have gone off between the previous check and
+ * the check for transfer complete.
+ *
+ * Technically this ought not be needed assuming we
+ * get a DATA_COMPLETE eventually (we'll notice the
+ * error and end the request), but it shouldn't hurt.
+ *
+ * This has the advantage of sending the stop command.
+ */
+ if (test_and_clear_bit(EVENT_DATA_ERROR,
+ &host->pending_events)) {
+ dw_mci_stop_dma(host);
+ if (data->stop ||
+ !(host->data_status & (SDMMC_INT_DRTO |
+ SDMMC_INT_EBE)))
+ send_stop_abort(host, data);
+ state = STATE_DATA_ERROR;
+ break;
+ }
+ prev_state = state = STATE_DATA_BUSY;
+
+ /* fall through */
+
+ case STATE_DATA_BUSY:
+ if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
+ &host->pending_events))
+ break;
+
+ host->data = NULL;
+ set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
+ err = dw_mci_data_complete(host, data);
+
+ if (!err) {
+ if (!data->stop || mrq->sbc) {
+ if (mrq->sbc && data->stop)
+ data->stop->error = 0;
+ dw_mci_request_end(host, mrq);
+ goto unlock;
+ }
+
+ /* stop command for open-ended transfer*/
+ if (data->stop)
+ send_stop_abort(host, data);
+ } else {
+ /*
+ * If we don't have a command complete now we'll
+ * never get one since we just reset everything;
+ * better end the request.
+ *
+ * If we do have a command complete we'll fall
+ * through to the SENDING_STOP command and
+ * everything will be peachy keen.
+ */
+ if (!test_bit(EVENT_CMD_COMPLETE,
+ &host->pending_events)) {
+ host->cmd = NULL;
+ dw_mci_request_end(host, mrq);
+ goto unlock;
+ }
+ }
+
+ /*
+ * If err has non-zero,
+ * stop-abort command has been already issued.
+ */
+ prev_state = state = STATE_SENDING_STOP;
+
+ /* fall through */
+
+ case STATE_SENDING_STOP:
+ if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
+ &host->pending_events))
+ break;
+
+ /* CMD error in data command */
+ if (mrq->cmd->error && mrq->data)
+ dw_mci_reset(host);
+
+ host->cmd = NULL;
+ host->data = NULL;
+
+ if (mrq->stop)
+ dw_mci_command_complete(host, mrq->stop);
+ else
+ host->cmd_status = 0;
+
+ dw_mci_request_end(host, mrq);
+ goto unlock;
+
+ case STATE_DATA_ERROR:
+ if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
+ &host->pending_events))
+ break;
+
+ state = STATE_DATA_BUSY;
+ break;
+ }
+ } while (state != prev_state);
+
+ host->state = state;
+unlock:
+ spin_unlock(&host->lock);
+
+}
+
+/* push final bytes to part_buf, only use during push */
+static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
+{
+ memcpy((void *)&host->part_buf, buf, cnt);
+ host->part_buf_count = cnt;
+}
+
+/* append bytes to part_buf, only use during push */
+static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
+{
+ cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
+ memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
+ host->part_buf_count += cnt;
+ return cnt;
+}
+
+/* pull first bytes from part_buf, only use during pull */
+static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
+{
+ cnt = min(cnt, (int)host->part_buf_count);
+ if (cnt) {
+ memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
+ cnt);
+ host->part_buf_count -= cnt;
+ host->part_buf_start += cnt;
+ }
+ return cnt;
+}
+
+/* pull final bytes from the part_buf, assuming it's just been filled */
+static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
+{
+ memcpy(buf, &host->part_buf, cnt);
+ host->part_buf_start = cnt;
+ host->part_buf_count = (1 << host->data_shift) - cnt;
+}
+
+static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
+{
+ struct mmc_data *data = host->data;
+ int init_cnt = cnt;
+
+ /* try and push anything in the part_buf */
+ if (unlikely(host->part_buf_count)) {
+ int len = dw_mci_push_part_bytes(host, buf, cnt);
+ buf += len;
+ cnt -= len;
+ if (host->part_buf_count == 2) {
+ mci_fifo_writew(host->fifo_reg, host->part_buf16);
+ host->part_buf_count = 0;
+ }
+ }
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x1)) {
+ while (cnt >= 2) {
+ u16 aligned_buf[64];
+ int len = min(cnt & -2, (int)sizeof(aligned_buf));
+ int items = len >> 1;
+ int i;
+ /* memcpy from input buffer into aligned buffer */
+ memcpy(aligned_buf, buf, len);
+ buf += len;
+ cnt -= len;
+ /* push data from aligned buffer into fifo */
+ for (i = 0; i < items; ++i)
+ mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
+ }
+ } else
+#endif
+ {
+ u16 *pdata = buf;
+ for (; cnt >= 2; cnt -= 2)
+ mci_fifo_writew(host->fifo_reg, *pdata++);
+ buf = pdata;
+ }
+ /* put anything remaining in the part_buf */
+ if (cnt) {
+ dw_mci_set_part_bytes(host, buf, cnt);
+ /* Push data if we have reached the expected data length */
+ if ((data->bytes_xfered + init_cnt) ==
+ (data->blksz * data->blocks))
+ mci_fifo_writew(host->fifo_reg, host->part_buf16);
+ }
+}
+
+static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x1)) {
+ while (cnt >= 2) {
+ /* pull data from fifo into aligned buffer */
+ u16 aligned_buf[64];
+ int len = min(cnt & -2, (int)sizeof(aligned_buf));
+ int items = len >> 1;
+ int i;
+ for (i = 0; i < items; ++i)
+ aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
+ /* memcpy from aligned buffer into output buffer */
+ memcpy(buf, aligned_buf, len);
+ buf += len;
+ cnt -= len;
+ }
+ } else
+#endif
+ {
+ u16 *pdata = buf;
+ for (; cnt >= 2; cnt -= 2)
+ *pdata++ = mci_fifo_readw(host->fifo_reg);
+ buf = pdata;
+ }
+ if (cnt) {
+ host->part_buf16 = mci_fifo_readw(host->fifo_reg);
+ dw_mci_pull_final_bytes(host, buf, cnt);
+ }
+}
+
+static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
+{
+ struct mmc_data *data = host->data;
+ int init_cnt = cnt;
+
+ /* try and push anything in the part_buf */
+ if (unlikely(host->part_buf_count)) {
+ int len = dw_mci_push_part_bytes(host, buf, cnt);
+ buf += len;
+ cnt -= len;
+ if (host->part_buf_count == 4) {
+ mci_fifo_writel(host->fifo_reg, host->part_buf32);
+ host->part_buf_count = 0;
+ }
+ }
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x3)) {
+ while (cnt >= 4) {
+ u32 aligned_buf[32];
+ int len = min(cnt & -4, (int)sizeof(aligned_buf));
+ int items = len >> 2;
+ int i;
+ /* memcpy from input buffer into aligned buffer */
+ memcpy(aligned_buf, buf, len);
+ buf += len;
+ cnt -= len;
+ /* push data from aligned buffer into fifo */
+ for (i = 0; i < items; ++i)
+ mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
+ }
+ } else
+#endif
+ {
+ u32 *pdata = buf;
+ for (; cnt >= 4; cnt -= 4)
+ mci_fifo_writel(host->fifo_reg, *pdata++);
+ buf = pdata;
+ }
+ /* put anything remaining in the part_buf */
+ if (cnt) {
+ dw_mci_set_part_bytes(host, buf, cnt);
+ /* Push data if we have reached the expected data length */
+ if ((data->bytes_xfered + init_cnt) ==
+ (data->blksz * data->blocks))
+ mci_fifo_writel(host->fifo_reg, host->part_buf32);
+ }
+}
+
+static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x3)) {
+ while (cnt >= 4) {
+ /* pull data from fifo into aligned buffer */
+ u32 aligned_buf[32];
+ int len = min(cnt & -4, (int)sizeof(aligned_buf));
+ int items = len >> 2;
+ int i;
+ for (i = 0; i < items; ++i)
+ aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
+ /* memcpy from aligned buffer into output buffer */
+ memcpy(buf, aligned_buf, len);
+ buf += len;
+ cnt -= len;
+ }
+ } else
+#endif
+ {
+ u32 *pdata = buf;
+ for (; cnt >= 4; cnt -= 4)
+ *pdata++ = mci_fifo_readl(host->fifo_reg);
+ buf = pdata;
+ }
+ if (cnt) {
+ host->part_buf32 = mci_fifo_readl(host->fifo_reg);
+ dw_mci_pull_final_bytes(host, buf, cnt);
+ }
+}
+
+static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
+{
+ struct mmc_data *data = host->data;
+ int init_cnt = cnt;
+
+ /* try and push anything in the part_buf */
+ if (unlikely(host->part_buf_count)) {
+ int len = dw_mci_push_part_bytes(host, buf, cnt);
+ buf += len;
+ cnt -= len;
+
+ if (host->part_buf_count == 8) {
+ mci_fifo_writeq(host->fifo_reg, host->part_buf);
+ host->part_buf_count = 0;
+ }
+ }
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x7)) {
+ while (cnt >= 8) {
+ u64 aligned_buf[16];
+ int len = min(cnt & -8, (int)sizeof(aligned_buf));
+ int items = len >> 3;
+ int i;
+ /* memcpy from input buffer into aligned buffer */
+ memcpy(aligned_buf, buf, len);
+ buf += len;
+ cnt -= len;
+ /* push data from aligned buffer into fifo */
+ for (i = 0; i < items; ++i)
+ mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
+ }
+ } else
+#endif
+ {
+ u64 *pdata = buf;
+ for (; cnt >= 8; cnt -= 8)
+ mci_fifo_writeq(host->fifo_reg, *pdata++);
+ buf = pdata;
+ }
+ /* put anything remaining in the part_buf */
+ if (cnt) {
+ dw_mci_set_part_bytes(host, buf, cnt);
+ /* Push data if we have reached the expected data length */
+ if ((data->bytes_xfered + init_cnt) ==
+ (data->blksz * data->blocks))
+ mci_fifo_writeq(host->fifo_reg, host->part_buf);
+ }
+}
+
+static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x7)) {
+ while (cnt >= 8) {
+ /* pull data from fifo into aligned buffer */
+ u64 aligned_buf[16];
+ int len = min(cnt & -8, (int)sizeof(aligned_buf));
+ int items = len >> 3;
+ int i;
+ for (i = 0; i < items; ++i)
+ aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
+
+ /* memcpy from aligned buffer into output buffer */
+ memcpy(buf, aligned_buf, len);
+ buf += len;
+ cnt -= len;
+ }
+ } else
+#endif
+ {
+ u64 *pdata = buf;
+ for (; cnt >= 8; cnt -= 8)
+ *pdata++ = mci_fifo_readq(host->fifo_reg);
+ buf = pdata;
+ }
+ if (cnt) {
+ host->part_buf = mci_fifo_readq(host->fifo_reg);
+ dw_mci_pull_final_bytes(host, buf, cnt);
+ }
+}
+
+static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
+{
+ int len;
+
+ /* get remaining partial bytes */
+ len = dw_mci_pull_part_bytes(host, buf, cnt);
+ if (unlikely(len == cnt))
+ return;
+ buf += len;
+ cnt -= len;
+
+ /* get the rest of the data */
+ host->pull_data(host, buf, cnt);
+}
+
+static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
+{
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ void *buf;
+ unsigned int offset;
+ struct mmc_data *data = host->data;
+ int shift = host->data_shift;
+ u32 status;
+ unsigned int len;
+ unsigned int remain, fcnt;
+
+ do {
+ if (!sg_miter_next(sg_miter))
+ goto done;
+
+ host->sg = sg_miter->piter.sg;
+ buf = sg_miter->addr;
+ remain = sg_miter->length;
+ offset = 0;
+
+ do {
+ fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
+ << shift) + host->part_buf_count;
+ len = min(remain, fcnt);
+ if (!len)
+ break;
+ dw_mci_pull_data(host, (void *)(buf + offset), len);
+ data->bytes_xfered += len;
+ offset += len;
+ remain -= len;
+ } while (remain);
+
+ sg_miter->consumed = offset;
+ status = mci_readl(host, MINTSTS);
+ mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
+ /* if the RXDR is ready read again */
+ } while ((status & SDMMC_INT_RXDR) ||
+ (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
+
+ if (!remain) {
+ if (!sg_miter_next(sg_miter))
+ goto done;
+ sg_miter->consumed = 0;
+ }
+ sg_miter_stop(sg_miter);
+ return;
+
+done:
+ sg_miter_stop(sg_miter);
+ host->sg = NULL;
+ smp_wmb();
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+}
+
+static void dw_mci_write_data_pio(struct dw_mci *host)
+{
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ void *buf;
+ unsigned int offset;
+ struct mmc_data *data = host->data;
+ int shift = host->data_shift;
+ u32 status;
+ unsigned int len;
+ unsigned int fifo_depth = host->fifo_depth;
+ unsigned int remain, fcnt;
+
+ do {
+ if (!sg_miter_next(sg_miter))
+ goto done;
+
+ host->sg = sg_miter->piter.sg;
+ buf = sg_miter->addr;
+ remain = sg_miter->length;
+ offset = 0;
+
+ do {
+ fcnt = ((fifo_depth -
+ SDMMC_GET_FCNT(mci_readl(host, STATUS)))
+ << shift) - host->part_buf_count;
+ len = min(remain, fcnt);
+ if (!len)
+ break;
+ host->push_data(host, (void *)(buf + offset), len);
+ data->bytes_xfered += len;
+ offset += len;
+ remain -= len;
+ } while (remain);
+
+ sg_miter->consumed = offset;
+ status = mci_readl(host, MINTSTS);
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
+ } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
+
+ if (!remain) {
+ if (!sg_miter_next(sg_miter))
+ goto done;
+ sg_miter->consumed = 0;
+ }
+ sg_miter_stop(sg_miter);
+ return;
+
+done:
+ sg_miter_stop(sg_miter);
+ host->sg = NULL;
+ smp_wmb();
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+}
+
+static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
+{
+ if (!host->cmd_status)
+ host->cmd_status = status;
+
+ smp_wmb();
+
+ set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+}
+
+static void dw_mci_handle_cd(struct dw_mci *host)
+{
+ int i;
+
+ for (i = 0; i < host->num_slots; i++) {
+ struct dw_mci_slot *slot = host->slot[i];
+
+ if (!slot)
+ continue;
+
+ if (slot->mmc->ops->card_event)
+ slot->mmc->ops->card_event(slot->mmc);
+ mmc_detect_change(slot->mmc,
+ msecs_to_jiffies(host->pdata->detect_delay_ms));
+ }
+}
+
+static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
+{
+ struct dw_mci *host = dev_id;
+ u32 pending;
+ int i;
+
+ pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+
+ /*
+ * DTO fix - version 2.10a and below, and only if internal DMA
+ * is configured.
+ */
+ if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
+ if (!pending &&
+ ((mci_readl(host, STATUS) >> 17) & 0x1fff))
+ pending |= SDMMC_INT_DATA_OVER;
+ }
+
+ if (pending) {
+ /* Check volt switch first, since it can look like an error */
+ if ((host->state == STATE_SENDING_CMD11) &&
+ (pending & SDMMC_INT_VOLT_SWITCH)) {
+ unsigned long irqflags;
+
+ mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
+ pending &= ~SDMMC_INT_VOLT_SWITCH;
+
+ /*
+ * Hold the lock; we know cmd11_timer can't be kicked
+ * off after the lock is released, so safe to delete.
+ */
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ dw_mci_cmd_interrupt(host, pending);
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ del_timer(&host->cmd11_timer);
+ }
+
+ if (pending & DW_MCI_CMD_ERROR_FLAGS) {
+ mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
+ host->cmd_status = pending;
+ smp_wmb();
+ set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+ }
+
+ if (pending & DW_MCI_DATA_ERROR_FLAGS) {
+ /* if there is an error report DATA_ERROR */
+ mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
+ host->data_status = pending;
+ smp_wmb();
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ }
+
+ if (pending & SDMMC_INT_DATA_OVER) {
+ mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
+ if (!host->data_status)
+ host->data_status = pending;
+ smp_wmb();
+ if (host->dir_status == DW_MCI_RECV_STATUS) {
+ if (host->sg != NULL)
+ dw_mci_read_data_pio(host, true);
+ }
+ set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ }
+
+ if (pending & SDMMC_INT_RXDR) {
+ mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
+ if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
+ dw_mci_read_data_pio(host, false);
+ }
+
+ if (pending & SDMMC_INT_TXDR) {
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
+ if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
+ dw_mci_write_data_pio(host);
+ }
+
+ if (pending & SDMMC_INT_CMD_DONE) {
+ mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
+ dw_mci_cmd_interrupt(host, pending);
+ }
+
+ if (pending & SDMMC_INT_CD) {
+ mci_writel(host, RINTSTS, SDMMC_INT_CD);
+ dw_mci_handle_cd(host);
+ }
+
+ /* Handle SDIO Interrupts */
+ for (i = 0; i < host->num_slots; i++) {
+ struct dw_mci_slot *slot = host->slot[i];
+
+ if (!slot)
+ continue;
+
+ if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
+ mci_writel(host, RINTSTS,
+ SDMMC_INT_SDIO(slot->sdio_id));
+ mmc_signal_sdio_irq(slot->mmc);
+ }
+ }
+
+ }
+
+#ifdef CONFIG_MMC_DW_IDMAC
+ /* Handle DMA interrupts */
+ if (host->dma_64bit_address == 1) {
+ pending = mci_readl(host, IDSTS64);
+ if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
+ mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
+ SDMMC_IDMAC_INT_RI);
+ mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
+ host->dma_ops->complete(host);
+ }
+ } else {
+ pending = mci_readl(host, IDSTS);
+ if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
+ mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
+ SDMMC_IDMAC_INT_RI);
+ mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
+ host->dma_ops->complete(host);
+ }
+ }
+#endif
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_OF
+/* given a slot id, find out the device node representing that slot */
+static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
+{
+ struct device_node *np;
+ const __be32 *addr;
+ int len;
+
+ if (!dev || !dev->of_node)
+ return NULL;
+
+ for_each_child_of_node(dev->of_node, np) {
+ addr = of_get_property(np, "reg", &len);
+ if (!addr || (len < sizeof(int)))
+ continue;
+ if (be32_to_cpup(addr) == slot)
+ return np;
+ }
+ return NULL;
+}
+
+static struct dw_mci_of_slot_quirks {
+ char *quirk;
+ int id;
+} of_slot_quirks[] = {
+ {
+ .quirk = "disable-wp",
+ .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
+ },
+};
+
+static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
+{
+ struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
+ int quirks = 0;
+ int idx;
+
+ /* get quirks */
+ for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
+ if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
+ dev_warn(dev, "Slot quirk %s is deprecated\n",
+ of_slot_quirks[idx].quirk);
+ quirks |= of_slot_quirks[idx].id;
+ }
+
+ return quirks;
+}
+#else /* CONFIG_OF */
+static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
+{
+ return 0;
+}
+#endif /* CONFIG_OF */
+
+static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
+{
+ struct mmc_host *mmc;
+ struct dw_mci_slot *slot;
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
+ int ctrl_id, ret;
+ u32 freq[2];
+
+ mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ slot = mmc_priv(mmc);
+ slot->id = id;
+ slot->sdio_id = host->sdio_id0 + id;
+ slot->mmc = mmc;
+ slot->host = host;
+ host->slot[id] = slot;
+
+ slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
+
+ mmc->ops = &dw_mci_ops;
+ if (of_property_read_u32_array(host->dev->of_node,
+ "clock-freq-min-max", freq, 2)) {
+ mmc->f_min = DW_MCI_FREQ_MIN;
+ mmc->f_max = DW_MCI_FREQ_MAX;
+ } else {
+ mmc->f_min = freq[0];
+ mmc->f_max = freq[1];
+ }
+
+ /*if there are external regulators, get them*/
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret == -EPROBE_DEFER)
+ goto err_host_allocated;
+
+ if (!mmc->ocr_avail)
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+ if (host->pdata->caps)
+ mmc->caps = host->pdata->caps;
+
+ if (host->pdata->pm_caps)
+ mmc->pm_caps = host->pdata->pm_caps;
+
+ if (host->dev->of_node) {
+ ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
+ if (ctrl_id < 0)
+ ctrl_id = 0;
+ } else {
+ ctrl_id = to_platform_device(host->dev)->id;
+ }
+ if (drv_data && drv_data->caps)
+ mmc->caps |= drv_data->caps[ctrl_id];
+
+ if (host->pdata->caps2)
+ mmc->caps2 = host->pdata->caps2;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto err_host_allocated;
+
+ if (host->pdata->blk_settings) {
+ mmc->max_segs = host->pdata->blk_settings->max_segs;
+ mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
+ mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
+ mmc->max_req_size = host->pdata->blk_settings->max_req_size;
+ mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
+ } else {
+ /* Useful defaults if platform data is unset. */
+#ifdef CONFIG_MMC_DW_IDMAC
+ mmc->max_segs = host->ring_size;
+ mmc->max_blk_size = 65536;
+ mmc->max_seg_size = 0x1000;
+ mmc->max_req_size = mmc->max_seg_size * host->ring_size;
+ mmc->max_blk_count = mmc->max_req_size / 512;
+#else
+ mmc->max_segs = 64;
+ mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
+ mmc->max_blk_count = 512;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+#endif /* CONFIG_MMC_DW_IDMAC */
+ }
+
+ if (dw_mci_get_cd(mmc))
+ set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ else
+ clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto err_host_allocated;
+
+#if defined(CONFIG_DEBUG_FS)
+ dw_mci_init_debugfs(slot);
+#endif
+
+ return 0;
+
+err_host_allocated:
+ mmc_free_host(mmc);
+ return ret;
+}
+
+static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
+{
+ /* Debugfs stuff is cleaned up by mmc core */
+ mmc_remove_host(slot->mmc);
+ slot->host->slot[id] = NULL;
+ mmc_free_host(slot->mmc);
+}
+
+static void dw_mci_init_dma(struct dw_mci *host)
+{
+ int addr_config;
+ /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */
+ addr_config = (mci_readl(host, HCON) >> 27) & 0x01;
+
+ if (addr_config == 1) {
+ /* host supports IDMAC in 64-bit address mode */
+ host->dma_64bit_address = 1;
+ dev_info(host->dev, "IDMAC supports 64-bit address mode.\n");
+ if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
+ dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64));
+ } else {
+ /* host supports IDMAC in 32-bit address mode */
+ host->dma_64bit_address = 0;
+ dev_info(host->dev, "IDMAC supports 32-bit address mode.\n");
+ }
+
+ /* Alloc memory for sg translation */
+ host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
+ &host->sg_dma, GFP_KERNEL);
+ if (!host->sg_cpu) {
+ dev_err(host->dev, "%s: could not alloc DMA memory\n",
+ __func__);
+ goto no_dma;
+ }
+
+ /* Determine which DMA interface to use */
+#ifdef CONFIG_MMC_DW_IDMAC
+ host->dma_ops = &dw_mci_idmac_ops;
+ dev_info(host->dev, "Using internal DMA controller.\n");
+#endif
+
+ if (!host->dma_ops)
+ goto no_dma;
+
+ if (host->dma_ops->init && host->dma_ops->start &&
+ host->dma_ops->stop && host->dma_ops->cleanup) {
+ if (host->dma_ops->init(host)) {
+ dev_err(host->dev, "%s: Unable to initialize "
+ "DMA Controller.\n", __func__);
+ goto no_dma;
+ }
+ } else {
+ dev_err(host->dev, "DMA initialization not found.\n");
+ goto no_dma;
+ }
+
+ host->use_dma = 1;
+ return;
+
+no_dma:
+ dev_info(host->dev, "Using PIO mode.\n");
+ host->use_dma = 0;
+ return;
+}
+
+static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+ u32 ctrl;
+
+ ctrl = mci_readl(host, CTRL);
+ ctrl |= reset;
+ mci_writel(host, CTRL, ctrl);
+
+ /* wait till resets clear */
+ do {
+ ctrl = mci_readl(host, CTRL);
+ if (!(ctrl & reset))
+ return true;
+ } while (time_before(jiffies, timeout));
+
+ dev_err(host->dev,
+ "Timeout resetting block (ctrl reset %#x)\n",
+ ctrl & reset);
+
+ return false;
+}
+
+static bool dw_mci_reset(struct dw_mci *host)
+{
+ u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
+ bool ret = false;
+
+ /*
+ * Reseting generates a block interrupt, hence setting
+ * the scatter-gather pointer to NULL.
+ */
+ if (host->sg) {
+ sg_miter_stop(&host->sg_miter);
+ host->sg = NULL;
+ }
+
+ if (host->use_dma)
+ flags |= SDMMC_CTRL_DMA_RESET;
+
+ if (dw_mci_ctrl_reset(host, flags)) {
+ /*
+ * In all cases we clear the RAWINTS register to clear any
+ * interrupts.
+ */
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+
+ /* if using dma we wait for dma_req to clear */
+ if (host->use_dma) {
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+ u32 status;
+ do {
+ status = mci_readl(host, STATUS);
+ if (!(status & SDMMC_STATUS_DMA_REQ))
+ break;
+ cpu_relax();
+ } while (time_before(jiffies, timeout));
+
+ if (status & SDMMC_STATUS_DMA_REQ) {
+ dev_err(host->dev,
+ "%s: Timeout waiting for dma_req to "
+ "clear during reset\n", __func__);
+ goto ciu_out;
+ }
+
+ /* when using DMA next we reset the fifo again */
+ if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
+ goto ciu_out;
+ }
+ } else {
+ /* if the controller reset bit did clear, then set clock regs */
+ if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
+ dev_err(host->dev, "%s: fifo/dma reset bits didn't "
+ "clear but ciu was reset, doing clock update\n",
+ __func__);
+ goto ciu_out;
+ }
+ }
+
+#if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
+ /* It is also recommended that we reset and reprogram idmac */
+ dw_mci_idmac_reset(host);
+#endif
+
+ ret = true;
+
+ciu_out:
+ /* After a CTRL reset we need to have CIU set clock registers */
+ mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
+
+ return ret;
+}
+
+static void dw_mci_cmd11_timer(unsigned long arg)
+{
+ struct dw_mci *host = (struct dw_mci *)arg;
+
+ if (host->state != STATE_SENDING_CMD11) {
+ dev_warn(host->dev, "Unexpected CMD11 timeout\n");
+ return;
+ }
+
+ host->cmd_status = SDMMC_INT_RTO;
+ set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+}
+
+#ifdef CONFIG_OF
+static struct dw_mci_of_quirks {
+ char *quirk;
+ int id;
+} of_quirks[] = {
+ {
+ .quirk = "broken-cd",
+ .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
+ }, {
+ .quirk = "disable-wp",
+ .id = DW_MCI_QUIRK_NO_WRITE_PROTECT,
+ },
+};
+
+static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
+{
+ struct dw_mci_board *pdata;
+ struct device *dev = host->dev;
+ struct device_node *np = dev->of_node;
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
+ int idx, ret;
+ u32 clock_frequency;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ /* find out number of slots supported */
+ if (of_property_read_u32(dev->of_node, "num-slots",
+ &pdata->num_slots)) {
+ dev_info(dev, "num-slots property not found, "
+ "assuming 1 slot is available\n");
+ pdata->num_slots = 1;
+ }
+
+ /* get quirks */
+ for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
+ if (of_get_property(np, of_quirks[idx].quirk, NULL))
+ pdata->quirks |= of_quirks[idx].id;
+
+ if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
+ dev_info(dev, "fifo-depth property not found, using "
+ "value of FIFOTH register as default\n");
+
+ of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
+
+ if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
+ pdata->bus_hz = clock_frequency;
+
+ if (drv_data && drv_data->parse_dt) {
+ ret = drv_data->parse_dt(host);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ if (of_find_property(np, "supports-highspeed", NULL))
+ pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
+
+ return pdata;
+}
+
+#else /* CONFIG_OF */
+static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_OF */
+
+static void dw_mci_enable_cd(struct dw_mci *host)
+{
+ struct dw_mci_board *brd = host->pdata;
+ unsigned long irqflags;
+ u32 temp;
+ int i;
+
+ /* No need for CD if broken card detection */
+ if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
+ return;
+
+ /* No need for CD if all slots have a non-error GPIO */
+ for (i = 0; i < host->num_slots; i++) {
+ struct dw_mci_slot *slot = host->slot[i];
+
+ if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc)))
+ break;
+ }
+ if (i == host->num_slots)
+ return;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ temp = mci_readl(host, INTMASK);
+ temp |= SDMMC_INT_CD;
+ mci_writel(host, INTMASK, temp);
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+}
+
+int dw_mci_probe(struct dw_mci *host)
+{
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
+ int width, i, ret = 0;
+ u32 fifo_size;
+ int init_slots = 0;
+
+ if (!host->pdata) {
+ host->pdata = dw_mci_parse_dt(host);
+ if (IS_ERR(host->pdata)) {
+ dev_err(host->dev, "platform data not available\n");
+ return -EINVAL;
+ }
+ }
+
+ if (host->pdata->num_slots > 1) {
+ dev_err(host->dev,
+ "Platform data must supply num_slots.\n");
+ return -ENODEV;
+ }
+
+ host->biu_clk = devm_clk_get(host->dev, "biu");
+ if (IS_ERR(host->biu_clk)) {
+ dev_dbg(host->dev, "biu clock not available\n");
+ } else {
+ ret = clk_prepare_enable(host->biu_clk);
+ if (ret) {
+ dev_err(host->dev, "failed to enable biu clock\n");
+ return ret;
+ }
+ }
+
+ host->ciu_clk = devm_clk_get(host->dev, "ciu");
+ if (IS_ERR(host->ciu_clk)) {
+ dev_dbg(host->dev, "ciu clock not available\n");
+ host->bus_hz = host->pdata->bus_hz;
+ } else {
+ ret = clk_prepare_enable(host->ciu_clk);
+ if (ret) {
+ dev_err(host->dev, "failed to enable ciu clock\n");
+ goto err_clk_biu;
+ }
+
+ if (host->pdata->bus_hz) {
+ ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
+ if (ret)
+ dev_warn(host->dev,
+ "Unable to set bus rate to %uHz\n",
+ host->pdata->bus_hz);
+ }
+ host->bus_hz = clk_get_rate(host->ciu_clk);
+ }
+
+ if (!host->bus_hz) {
+ dev_err(host->dev,
+ "Platform data must supply bus speed\n");
+ ret = -ENODEV;
+ goto err_clk_ciu;
+ }
+
+ if (drv_data && drv_data->init) {
+ ret = drv_data->init(host);
+ if (ret) {
+ dev_err(host->dev,
+ "implementation specific init failed\n");
+ goto err_clk_ciu;
+ }
+ }
+
+ if (drv_data && drv_data->setup_clock) {
+ ret = drv_data->setup_clock(host);
+ if (ret) {
+ dev_err(host->dev,
+ "implementation specific clock setup failed\n");
+ goto err_clk_ciu;
+ }
+ }
+
+ setup_timer(&host->cmd11_timer,
+ dw_mci_cmd11_timer, (unsigned long)host);
+
+ host->quirks = host->pdata->quirks;
+
+ spin_lock_init(&host->lock);
+ spin_lock_init(&host->irq_lock);
+ INIT_LIST_HEAD(&host->queue);
+
+ /*
+ * Get the host data width - this assumes that HCON has been set with
+ * the correct values.
+ */
+ i = (mci_readl(host, HCON) >> 7) & 0x7;
+ if (!i) {
+ host->push_data = dw_mci_push_data16;
+ host->pull_data = dw_mci_pull_data16;
+ width = 16;
+ host->data_shift = 1;
+ } else if (i == 2) {
+ host->push_data = dw_mci_push_data64;
+ host->pull_data = dw_mci_pull_data64;
+ width = 64;
+ host->data_shift = 3;
+ } else {
+ /* Check for a reserved value, and warn if it is */
+ WARN((i != 1),
+ "HCON reports a reserved host data width!\n"
+ "Defaulting to 32-bit access.\n");
+ host->push_data = dw_mci_push_data32;
+ host->pull_data = dw_mci_pull_data32;
+ width = 32;
+ host->data_shift = 2;
+ }
+
+ /* Reset all blocks */
+ if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
+ return -ENODEV;
+
+ host->dma_ops = host->pdata->dma_ops;
+ dw_mci_init_dma(host);
+
+ /* Clear the interrupts for the host controller */
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+
+ /* Put in max timeout */
+ mci_writel(host, TMOUT, 0xFFFFFFFF);
+
+ /*
+ * FIFO threshold settings RxMark = fifo_size / 2 - 1,
+ * Tx Mark = fifo_size / 2 DMA Size = 8
+ */
+ if (!host->pdata->fifo_depth) {
+ /*
+ * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
+ * have been overwritten by the bootloader, just like we're
+ * about to do, so if you know the value for your hardware, you
+ * should put it in the platform data.
+ */
+ fifo_size = mci_readl(host, FIFOTH);
+ fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
+ } else {
+ fifo_size = host->pdata->fifo_depth;
+ }
+ host->fifo_depth = fifo_size;
+ host->fifoth_val =
+ SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
+ mci_writel(host, FIFOTH, host->fifoth_val);
+
+ /* disable clock to CIU */
+ mci_writel(host, CLKENA, 0);
+ mci_writel(host, CLKSRC, 0);
+
+ /*
+ * In 2.40a spec, Data offset is changed.
+ * Need to check the version-id and set data-offset for DATA register.
+ */
+ host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
+ dev_info(host->dev, "Version ID is %04x\n", host->verid);
+
+ if (host->verid < DW_MMC_240A)
+ host->fifo_reg = host->regs + DATA_OFFSET;
+ else
+ host->fifo_reg = host->regs + DATA_240A_OFFSET;
+
+ tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
+ ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
+ host->irq_flags, "dw-mci", host);
+ if (ret)
+ goto err_dmaunmap;
+
+ if (host->pdata->num_slots)
+ host->num_slots = host->pdata->num_slots;
+ else
+ host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
+
+ /*
+ * Enable interrupts for command done, data over, data empty,
+ * receive ready and error such as transmit, receive timeout, crc error
+ */
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
+ SDMMC_INT_TXDR | SDMMC_INT_RXDR |
+ DW_MCI_ERROR_FLAGS);
+ mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
+
+ dev_info(host->dev, "DW MMC controller at irq %d, "
+ "%d bit host data width, "
+ "%u deep fifo\n",
+ host->irq, width, fifo_size);
+
+ /* We need at least one slot to succeed */
+ for (i = 0; i < host->num_slots; i++) {
+ ret = dw_mci_init_slot(host, i);
+ if (ret)
+ dev_dbg(host->dev, "slot %d init failed\n", i);
+ else
+ init_slots++;
+ }
+
+ if (init_slots) {
+ dev_info(host->dev, "%d slots initialized\n", init_slots);
+ } else {
+ dev_dbg(host->dev, "attempted to initialize %d slots, "
+ "but failed on all\n", host->num_slots);
+ goto err_dmaunmap;
+ }
+
+ /* Now that slots are all setup, we can enable card detect */
+ dw_mci_enable_cd(host);
+
+ if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
+ dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
+
+ return 0;
+
+err_dmaunmap:
+ if (host->use_dma && host->dma_ops->exit)
+ host->dma_ops->exit(host);
+
+err_clk_ciu:
+ if (!IS_ERR(host->ciu_clk))
+ clk_disable_unprepare(host->ciu_clk);
+
+err_clk_biu:
+ if (!IS_ERR(host->biu_clk))
+ clk_disable_unprepare(host->biu_clk);
+
+ return ret;
+}
+EXPORT_SYMBOL(dw_mci_probe);
+
+void dw_mci_remove(struct dw_mci *host)
+{
+ int i;
+
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+
+ for (i = 0; i < host->num_slots; i++) {
+ dev_dbg(host->dev, "remove slot %d\n", i);
+ if (host->slot[i])
+ dw_mci_cleanup_slot(host->slot[i], i);
+ }
+
+ /* disable clock to CIU */
+ mci_writel(host, CLKENA, 0);
+ mci_writel(host, CLKSRC, 0);
+
+ if (host->use_dma && host->dma_ops->exit)
+ host->dma_ops->exit(host);
+
+ if (!IS_ERR(host->ciu_clk))
+ clk_disable_unprepare(host->ciu_clk);
+
+ if (!IS_ERR(host->biu_clk))
+ clk_disable_unprepare(host->biu_clk);
+}
+EXPORT_SYMBOL(dw_mci_remove);
+
+
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * TODO: we should probably disable the clock to the card in the suspend path.
+ */
+int dw_mci_suspend(struct dw_mci *host)
+{
+ return 0;
+}
+EXPORT_SYMBOL(dw_mci_suspend);
+
+int dw_mci_resume(struct dw_mci *host)
+{
+ int i, ret;
+
+ if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
+ ret = -ENODEV;
+ return ret;
+ }
+
+ if (host->use_dma && host->dma_ops->init)
+ host->dma_ops->init(host);
+
+ /*
+ * Restore the initial value at FIFOTH register
+ * And Invalidate the prev_blksz with zero
+ */
+ mci_writel(host, FIFOTH, host->fifoth_val);
+ host->prev_blksz = 0;
+
+ /* Put in max timeout */
+ mci_writel(host, TMOUT, 0xFFFFFFFF);
+
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
+ SDMMC_INT_TXDR | SDMMC_INT_RXDR |
+ DW_MCI_ERROR_FLAGS);
+ mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
+
+ for (i = 0; i < host->num_slots; i++) {
+ struct dw_mci_slot *slot = host->slot[i];
+ if (!slot)
+ continue;
+ if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
+ dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
+ dw_mci_setup_bus(slot, true);
+ }
+ }
+
+ /* Now that slots are all setup, we can enable card detect */
+ dw_mci_enable_cd(host);
+
+ return 0;
+}
+EXPORT_SYMBOL(dw_mci_resume);
+#endif /* CONFIG_PM_SLEEP */
+
+static int __init dw_mci_init(void)
+{
+ pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
+ return 0;
+}
+
+static void __exit dw_mci_exit(void)
+{
+}
+
+module_init(dw_mci_init);
+module_exit(dw_mci_exit);
+
+MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
+MODULE_AUTHOR("NXP Semiconductor VietNam");
+MODULE_AUTHOR("Imagination Technologies Ltd");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/dw_mmc.h b/kernel/drivers/mmc/host/dw_mmc.h
new file mode 100644
index 000000000..f45ab91de
--- /dev/null
+++ b/kernel/drivers/mmc/host/dw_mmc.h
@@ -0,0 +1,291 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ * (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _DW_MMC_H_
+#define _DW_MMC_H_
+
+#define DW_MMC_240A 0x240a
+
+#define SDMMC_CTRL 0x000
+#define SDMMC_PWREN 0x004
+#define SDMMC_CLKDIV 0x008
+#define SDMMC_CLKSRC 0x00c
+#define SDMMC_CLKENA 0x010
+#define SDMMC_TMOUT 0x014
+#define SDMMC_CTYPE 0x018
+#define SDMMC_BLKSIZ 0x01c
+#define SDMMC_BYTCNT 0x020
+#define SDMMC_INTMASK 0x024
+#define SDMMC_CMDARG 0x028
+#define SDMMC_CMD 0x02c
+#define SDMMC_RESP0 0x030
+#define SDMMC_RESP1 0x034
+#define SDMMC_RESP2 0x038
+#define SDMMC_RESP3 0x03c
+#define SDMMC_MINTSTS 0x040
+#define SDMMC_RINTSTS 0x044
+#define SDMMC_STATUS 0x048
+#define SDMMC_FIFOTH 0x04c
+#define SDMMC_CDETECT 0x050
+#define SDMMC_WRTPRT 0x054
+#define SDMMC_GPIO 0x058
+#define SDMMC_TCBCNT 0x05c
+#define SDMMC_TBBCNT 0x060
+#define SDMMC_DEBNCE 0x064
+#define SDMMC_USRID 0x068
+#define SDMMC_VERID 0x06c
+#define SDMMC_HCON 0x070
+#define SDMMC_UHS_REG 0x074
+#define SDMMC_BMOD 0x080
+#define SDMMC_PLDMND 0x084
+#define SDMMC_DBADDR 0x088
+#define SDMMC_IDSTS 0x08c
+#define SDMMC_IDINTEN 0x090
+#define SDMMC_DSCADDR 0x094
+#define SDMMC_BUFADDR 0x098
+#define SDMMC_CDTHRCTL 0x100
+#define SDMMC_DATA(x) (x)
+/*
+* Registers to support idmac 64-bit address mode
+*/
+#define SDMMC_DBADDRL 0x088
+#define SDMMC_DBADDRU 0x08c
+#define SDMMC_IDSTS64 0x090
+#define SDMMC_IDINTEN64 0x094
+#define SDMMC_DSCADDRL 0x098
+#define SDMMC_DSCADDRU 0x09c
+#define SDMMC_BUFADDRL 0x0A0
+#define SDMMC_BUFADDRU 0x0A4
+
+/*
+ * Data offset is difference according to Version
+ * Lower than 2.40a : data register offest is 0x100
+ */
+#define DATA_OFFSET 0x100
+#define DATA_240A_OFFSET 0x200
+
+/* shift bit field */
+#define _SBF(f, v) ((v) << (f))
+
+/* Control register defines */
+#define SDMMC_CTRL_USE_IDMAC BIT(25)
+#define SDMMC_CTRL_CEATA_INT_EN BIT(11)
+#define SDMMC_CTRL_SEND_AS_CCSD BIT(10)
+#define SDMMC_CTRL_SEND_CCSD BIT(9)
+#define SDMMC_CTRL_ABRT_READ_DATA BIT(8)
+#define SDMMC_CTRL_SEND_IRQ_RESP BIT(7)
+#define SDMMC_CTRL_READ_WAIT BIT(6)
+#define SDMMC_CTRL_DMA_ENABLE BIT(5)
+#define SDMMC_CTRL_INT_ENABLE BIT(4)
+#define SDMMC_CTRL_DMA_RESET BIT(2)
+#define SDMMC_CTRL_FIFO_RESET BIT(1)
+#define SDMMC_CTRL_RESET BIT(0)
+/* Clock Enable register defines */
+#define SDMMC_CLKEN_LOW_PWR BIT(16)
+#define SDMMC_CLKEN_ENABLE BIT(0)
+/* time-out register defines */
+#define SDMMC_TMOUT_DATA(n) _SBF(8, (n))
+#define SDMMC_TMOUT_DATA_MSK 0xFFFFFF00
+#define SDMMC_TMOUT_RESP(n) ((n) & 0xFF)
+#define SDMMC_TMOUT_RESP_MSK 0xFF
+/* card-type register defines */
+#define SDMMC_CTYPE_8BIT BIT(16)
+#define SDMMC_CTYPE_4BIT BIT(0)
+#define SDMMC_CTYPE_1BIT 0
+/* Interrupt status & mask register defines */
+#define SDMMC_INT_SDIO(n) BIT(16 + (n))
+#define SDMMC_INT_EBE BIT(15)
+#define SDMMC_INT_ACD BIT(14)
+#define SDMMC_INT_SBE BIT(13)
+#define SDMMC_INT_HLE BIT(12)
+#define SDMMC_INT_FRUN BIT(11)
+#define SDMMC_INT_HTO BIT(10)
+#define SDMMC_INT_VOLT_SWITCH BIT(10) /* overloads bit 10! */
+#define SDMMC_INT_DRTO BIT(9)
+#define SDMMC_INT_RTO BIT(8)
+#define SDMMC_INT_DCRC BIT(7)
+#define SDMMC_INT_RCRC BIT(6)
+#define SDMMC_INT_RXDR BIT(5)
+#define SDMMC_INT_TXDR BIT(4)
+#define SDMMC_INT_DATA_OVER BIT(3)
+#define SDMMC_INT_CMD_DONE BIT(2)
+#define SDMMC_INT_RESP_ERR BIT(1)
+#define SDMMC_INT_CD BIT(0)
+#define SDMMC_INT_ERROR 0xbfc2
+/* Command register defines */
+#define SDMMC_CMD_START BIT(31)
+#define SDMMC_CMD_USE_HOLD_REG BIT(29)
+#define SDMMC_CMD_VOLT_SWITCH BIT(28)
+#define SDMMC_CMD_CCS_EXP BIT(23)
+#define SDMMC_CMD_CEATA_RD BIT(22)
+#define SDMMC_CMD_UPD_CLK BIT(21)
+#define SDMMC_CMD_INIT BIT(15)
+#define SDMMC_CMD_STOP BIT(14)
+#define SDMMC_CMD_PRV_DAT_WAIT BIT(13)
+#define SDMMC_CMD_SEND_STOP BIT(12)
+#define SDMMC_CMD_STRM_MODE BIT(11)
+#define SDMMC_CMD_DAT_WR BIT(10)
+#define SDMMC_CMD_DAT_EXP BIT(9)
+#define SDMMC_CMD_RESP_CRC BIT(8)
+#define SDMMC_CMD_RESP_LONG BIT(7)
+#define SDMMC_CMD_RESP_EXP BIT(6)
+#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
+/* Status register defines */
+#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FFF)
+#define SDMMC_STATUS_DMA_REQ BIT(31)
+#define SDMMC_STATUS_BUSY BIT(9)
+/* FIFOTH register defines */
+#define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \
+ ((r) & 0xFFF) << 16 | \
+ ((t) & 0xFFF))
+/* Internal DMAC interrupt defines */
+#define SDMMC_IDMAC_INT_AI BIT(9)
+#define SDMMC_IDMAC_INT_NI BIT(8)
+#define SDMMC_IDMAC_INT_CES BIT(5)
+#define SDMMC_IDMAC_INT_DU BIT(4)
+#define SDMMC_IDMAC_INT_FBE BIT(2)
+#define SDMMC_IDMAC_INT_RI BIT(1)
+#define SDMMC_IDMAC_INT_TI BIT(0)
+/* Internal DMAC bus mode bits */
+#define SDMMC_IDMAC_ENABLE BIT(7)
+#define SDMMC_IDMAC_FB BIT(1)
+#define SDMMC_IDMAC_SWRESET BIT(0)
+/* Version ID register define */
+#define SDMMC_GET_VERID(x) ((x) & 0xFFFF)
+/* Card read threshold */
+#define SDMMC_SET_RD_THLD(v, x) (((v) & 0x1FFF) << 16 | (x))
+#define SDMMC_UHS_18V BIT(0)
+/* All ctrl reset bits */
+#define SDMMC_CTRL_ALL_RESET_FLAGS \
+ (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET)
+
+/* FIFO register access macros. These should not change the data endian-ness
+ * as they are written to memory to be dealt with by the upper layers */
+#define mci_fifo_readw(__reg) __raw_readw(__reg)
+#define mci_fifo_readl(__reg) __raw_readl(__reg)
+#define mci_fifo_readq(__reg) __raw_readq(__reg)
+
+#define mci_fifo_writew(__value, __reg) __raw_writew(__reg, __value)
+#define mci_fifo_writel(__value, __reg) __raw_writel(__reg, __value)
+#define mci_fifo_writeq(__value, __reg) __raw_writeq(__reg, __value)
+
+/* Register access macros */
+#define mci_readl(dev, reg) \
+ readl_relaxed((dev)->regs + SDMMC_##reg)
+#define mci_writel(dev, reg, value) \
+ writel_relaxed((value), (dev)->regs + SDMMC_##reg)
+
+/* 16-bit FIFO access macros */
+#define mci_readw(dev, reg) \
+ readw_relaxed((dev)->regs + SDMMC_##reg)
+#define mci_writew(dev, reg, value) \
+ writew_relaxed((value), (dev)->regs + SDMMC_##reg)
+
+/* 64-bit FIFO access macros */
+#ifdef readq
+#define mci_readq(dev, reg) \
+ readq_relaxed((dev)->regs + SDMMC_##reg)
+#define mci_writeq(dev, reg, value) \
+ writeq_relaxed((value), (dev)->regs + SDMMC_##reg)
+#else
+/*
+ * Dummy readq implementation for architectures that don't define it.
+ *
+ * We would assume that none of these architectures would configure
+ * the IP block with a 64bit FIFO width, so this code will never be
+ * executed on those machines. Defining these macros here keeps the
+ * rest of the code free from ifdefs.
+ */
+#define mci_readq(dev, reg) \
+ (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg))
+#define mci_writeq(dev, reg, value) \
+ (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg) = (value))
+
+#define __raw_writeq(__value, __reg) \
+ (*(volatile u64 __force *)(__reg) = (__value))
+#define __raw_readq(__reg) (*(volatile u64 __force *)(__reg))
+#endif
+
+extern int dw_mci_probe(struct dw_mci *host);
+extern void dw_mci_remove(struct dw_mci *host);
+#ifdef CONFIG_PM_SLEEP
+extern int dw_mci_suspend(struct dw_mci *host);
+extern int dw_mci_resume(struct dw_mci *host);
+#endif
+
+/**
+ * struct dw_mci_slot - MMC slot state
+ * @mmc: The mmc_host representing this slot.
+ * @host: The MMC controller this slot is using.
+ * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX)
+ * @ctype: Card type for this slot.
+ * @mrq: mmc_request currently being processed or waiting to be
+ * processed, or NULL when the slot is idle.
+ * @queue_node: List node for placing this node in the @queue list of
+ * &struct dw_mci.
+ * @clock: Clock rate configured by set_ios(). Protected by host->lock.
+ * @__clk_old: The last updated clock with reflecting clock divider.
+ * Keeping track of this helps us to avoid spamming the console
+ * with CONFIG_MMC_CLKGATE.
+ * @flags: Random state bits associated with the slot.
+ * @id: Number of this slot.
+ * @sdio_id: Number of this slot in the SDIO interrupt registers.
+ */
+struct dw_mci_slot {
+ struct mmc_host *mmc;
+ struct dw_mci *host;
+
+ int quirks;
+
+ u32 ctype;
+
+ struct mmc_request *mrq;
+ struct list_head queue_node;
+
+ unsigned int clock;
+ unsigned int __clk_old;
+
+ unsigned long flags;
+#define DW_MMC_CARD_PRESENT 0
+#define DW_MMC_CARD_NEED_INIT 1
+#define DW_MMC_CARD_NO_LOW_PWR 2
+ int id;
+ int sdio_id;
+};
+
+/**
+ * dw_mci driver data - dw-mshc implementation specific driver data.
+ * @caps: mmc subsystem specified capabilities of the controller(s).
+ * @init: early implementation specific initialization.
+ * @setup_clock: implementation specific clock configuration.
+ * @prepare_command: handle CMD register extensions.
+ * @set_ios: handle bus specific extensions.
+ * @parse_dt: parse implementation specific device tree properties.
+ * @execute_tuning: implementation specific tuning procedure.
+ *
+ * Provide controller implementation specific extensions. The usage of this
+ * data structure is fully optional and usage of each member in this structure
+ * is optional as well.
+ */
+struct dw_mci_drv_data {
+ unsigned long *caps;
+ int (*init)(struct dw_mci *host);
+ int (*setup_clock)(struct dw_mci *host);
+ void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
+ void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
+ int (*parse_dt)(struct dw_mci *host);
+ int (*execute_tuning)(struct dw_mci_slot *slot);
+ int (*prepare_hs400_tuning)(struct dw_mci *host,
+ struct mmc_ios *ios);
+};
+#endif /* _DW_MMC_H_ */
diff --git a/kernel/drivers/mmc/host/jz4740_mmc.c b/kernel/drivers/mmc/host/jz4740_mmc.c
new file mode 100644
index 000000000..76e8bce6f
--- /dev/null
+++ b/kernel/drivers/mmc/host/jz4740_mmc.c
@@ -0,0 +1,1172 @@
+/*
+ * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * JZ4740 SD/MMC controller driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/clk.h>
+
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <asm/mach-jz4740/gpio.h>
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+
+#include <asm/mach-jz4740/dma.h>
+#include <asm/mach-jz4740/jz4740_mmc.h>
+
+#define JZ_REG_MMC_STRPCL 0x00
+#define JZ_REG_MMC_STATUS 0x04
+#define JZ_REG_MMC_CLKRT 0x08
+#define JZ_REG_MMC_CMDAT 0x0C
+#define JZ_REG_MMC_RESTO 0x10
+#define JZ_REG_MMC_RDTO 0x14
+#define JZ_REG_MMC_BLKLEN 0x18
+#define JZ_REG_MMC_NOB 0x1C
+#define JZ_REG_MMC_SNOB 0x20
+#define JZ_REG_MMC_IMASK 0x24
+#define JZ_REG_MMC_IREG 0x28
+#define JZ_REG_MMC_CMD 0x2C
+#define JZ_REG_MMC_ARG 0x30
+#define JZ_REG_MMC_RESP_FIFO 0x34
+#define JZ_REG_MMC_RXFIFO 0x38
+#define JZ_REG_MMC_TXFIFO 0x3C
+
+#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
+#define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6)
+#define JZ_MMC_STRPCL_START_READWAIT BIT(5)
+#define JZ_MMC_STRPCL_STOP_READWAIT BIT(4)
+#define JZ_MMC_STRPCL_RESET BIT(3)
+#define JZ_MMC_STRPCL_START_OP BIT(2)
+#define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0))
+#define JZ_MMC_STRPCL_CLOCK_STOP BIT(0)
+#define JZ_MMC_STRPCL_CLOCK_START BIT(1)
+
+
+#define JZ_MMC_STATUS_IS_RESETTING BIT(15)
+#define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14)
+#define JZ_MMC_STATUS_PRG_DONE BIT(13)
+#define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12)
+#define JZ_MMC_STATUS_END_CMD_RES BIT(11)
+#define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10)
+#define JZ_MMC_STATUS_IS_READWAIT BIT(9)
+#define JZ_MMC_STATUS_CLK_EN BIT(8)
+#define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7)
+#define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6)
+#define JZ_MMC_STATUS_CRC_RES_ERR BIT(5)
+#define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4)
+#define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3)
+#define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2)
+#define JZ_MMC_STATUS_TIMEOUT_RES BIT(1)
+#define JZ_MMC_STATUS_TIMEOUT_READ BIT(0)
+
+#define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0))
+#define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2))
+
+
+#define JZ_MMC_CMDAT_IO_ABORT BIT(11)
+#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
+#define JZ_MMC_CMDAT_DMA_EN BIT(8)
+#define JZ_MMC_CMDAT_INIT BIT(7)
+#define JZ_MMC_CMDAT_BUSY BIT(6)
+#define JZ_MMC_CMDAT_STREAM BIT(5)
+#define JZ_MMC_CMDAT_WRITE BIT(4)
+#define JZ_MMC_CMDAT_DATA_EN BIT(3)
+#define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0))
+#define JZ_MMC_CMDAT_RSP_R1 1
+#define JZ_MMC_CMDAT_RSP_R2 2
+#define JZ_MMC_CMDAT_RSP_R3 3
+
+#define JZ_MMC_IRQ_SDIO BIT(7)
+#define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6)
+#define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5)
+#define JZ_MMC_IRQ_END_CMD_RES BIT(2)
+#define JZ_MMC_IRQ_PRG_DONE BIT(1)
+#define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0)
+
+
+#define JZ_MMC_CLK_RATE 24000000
+
+enum jz4740_mmc_state {
+ JZ4740_MMC_STATE_READ_RESPONSE,
+ JZ4740_MMC_STATE_TRANSFER_DATA,
+ JZ4740_MMC_STATE_SEND_STOP,
+ JZ4740_MMC_STATE_DONE,
+};
+
+struct jz4740_mmc_host_next {
+ int sg_len;
+ s32 cookie;
+};
+
+struct jz4740_mmc_host {
+ struct mmc_host *mmc;
+ struct platform_device *pdev;
+ struct jz4740_mmc_platform_data *pdata;
+ struct clk *clk;
+
+ int irq;
+ int card_detect_irq;
+
+ void __iomem *base;
+ struct resource *mem_res;
+ struct mmc_request *req;
+ struct mmc_command *cmd;
+
+ unsigned long waiting;
+
+ uint32_t cmdat;
+
+ uint16_t irq_mask;
+
+ spinlock_t lock;
+
+ struct timer_list timeout_timer;
+ struct sg_mapping_iter miter;
+ enum jz4740_mmc_state state;
+
+ /* DMA support */
+ struct dma_chan *dma_rx;
+ struct dma_chan *dma_tx;
+ struct jz4740_mmc_host_next next_data;
+ bool use_dma;
+ int sg_len;
+
+/* The DMA trigger level is 8 words, that is to say, the DMA read
+ * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
+ * trigger is when data words in MSC_TXFIFO is < 8.
+ */
+#define JZ4740_MMC_FIFO_HALF_SIZE 8
+};
+
+/*----------------------------------------------------------------------------*/
+/* DMA infrastructure */
+
+static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
+{
+ if (!host->use_dma)
+ return;
+
+ dma_release_channel(host->dma_tx);
+ dma_release_channel(host->dma_rx);
+}
+
+static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->dma_tx = dma_request_channel(mask, NULL, host);
+ if (!host->dma_tx) {
+ dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
+ return -ENODEV;
+ }
+
+ host->dma_rx = dma_request_channel(mask, NULL, host);
+ if (!host->dma_rx) {
+ dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
+ goto free_master_write;
+ }
+
+ /* Initialize DMA pre request cookie */
+ host->next_data.cookie = 1;
+
+ return 0;
+
+free_master_write:
+ dma_release_channel(host->dma_tx);
+ return -ENODEV;
+}
+
+static inline int jz4740_mmc_get_dma_dir(struct mmc_data *data)
+{
+ return (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+}
+
+static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
+ struct mmc_data *data)
+{
+ return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx;
+}
+
+static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
+ struct mmc_data *data)
+{
+ struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
+ enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
+
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+}
+
+/* Prepares DMA data for current/next transfer, returns non-zero on failure */
+static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
+ struct mmc_data *data,
+ struct jz4740_mmc_host_next *next,
+ struct dma_chan *chan)
+{
+ struct jz4740_mmc_host_next *next_data = &host->next_data;
+ enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
+ int sg_len;
+
+ if (!next && data->host_cookie &&
+ data->host_cookie != host->next_data.cookie) {
+ dev_warn(mmc_dev(host->mmc),
+ "[%s] invalid cookie: data->host_cookie %d host->next_data.cookie %d\n",
+ __func__,
+ data->host_cookie,
+ host->next_data.cookie);
+ data->host_cookie = 0;
+ }
+
+ /* Check if next job is already prepared */
+ if (next || data->host_cookie != host->next_data.cookie) {
+ sg_len = dma_map_sg(chan->device->dev,
+ data->sg,
+ data->sg_len,
+ dir);
+
+ } else {
+ sg_len = next_data->sg_len;
+ next_data->sg_len = 0;
+ }
+
+ if (sg_len <= 0) {
+ dev_err(mmc_dev(host->mmc),
+ "Failed to map scatterlist for DMA operation\n");
+ return -EINVAL;
+ }
+
+ if (next) {
+ next->sg_len = sg_len;
+ data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
+ } else
+ host->sg_len = sg_len;
+
+ return 0;
+}
+
+static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
+ struct mmc_data *data)
+{
+ int ret;
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config conf = {
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
+ .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
+ };
+
+ if (data->flags & MMC_DATA_WRITE) {
+ conf.direction = DMA_MEM_TO_DEV;
+ conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
+ conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT;
+ chan = host->dma_tx;
+ } else {
+ conf.direction = DMA_DEV_TO_MEM;
+ conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
+ conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE;
+ chan = host->dma_rx;
+ }
+
+ ret = jz4740_mmc_prepare_dma_data(host, data, NULL, chan);
+ if (ret)
+ return ret;
+
+ dmaengine_slave_config(chan, &conf);
+ desc = dmaengine_prep_slave_sg(chan,
+ data->sg,
+ host->sg_len,
+ conf.direction,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(mmc_dev(host->mmc),
+ "Failed to allocate DMA %s descriptor",
+ conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
+ goto dma_unmap;
+ }
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+
+ return 0;
+
+dma_unmap:
+ jz4740_mmc_dma_unmap(host, data);
+ return -ENOMEM;
+}
+
+static void jz4740_mmc_pre_request(struct mmc_host *mmc,
+ struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct jz4740_mmc_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+ struct jz4740_mmc_host_next *next_data = &host->next_data;
+
+ BUG_ON(data->host_cookie);
+
+ if (host->use_dma) {
+ struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
+
+ if (jz4740_mmc_prepare_dma_data(host, data, next_data, chan))
+ data->host_cookie = 0;
+ }
+}
+
+static void jz4740_mmc_post_request(struct mmc_host *mmc,
+ struct mmc_request *mrq,
+ int err)
+{
+ struct jz4740_mmc_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (host->use_dma && data->host_cookie) {
+ jz4740_mmc_dma_unmap(host, data);
+ data->host_cookie = 0;
+ }
+
+ if (err) {
+ struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
+
+ dmaengine_terminate_all(chan);
+ }
+}
+
+/*----------------------------------------------------------------------------*/
+
+static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
+ unsigned int irq, bool enabled)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (enabled)
+ host->irq_mask &= ~irq;
+ else
+ host->irq_mask |= irq;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
+}
+
+static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
+ bool start_transfer)
+{
+ uint16_t val = JZ_MMC_STRPCL_CLOCK_START;
+
+ if (start_transfer)
+ val |= JZ_MMC_STRPCL_START_OP;
+
+ writew(val, host->base + JZ_REG_MMC_STRPCL);
+}
+
+static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host)
+{
+ uint32_t status;
+ unsigned int timeout = 1000;
+
+ writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL);
+ do {
+ status = readl(host->base + JZ_REG_MMC_STATUS);
+ } while (status & JZ_MMC_STATUS_CLK_EN && --timeout);
+}
+
+static void jz4740_mmc_reset(struct jz4740_mmc_host *host)
+{
+ uint32_t status;
+ unsigned int timeout = 1000;
+
+ writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL);
+ udelay(10);
+ do {
+ status = readl(host->base + JZ_REG_MMC_STATUS);
+ } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout);
+}
+
+static void jz4740_mmc_request_done(struct jz4740_mmc_host *host)
+{
+ struct mmc_request *req;
+
+ req = host->req;
+ host->req = NULL;
+
+ mmc_request_done(host->mmc, req);
+}
+
+static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host,
+ unsigned int irq)
+{
+ unsigned int timeout = 0x800;
+ uint16_t status;
+
+ do {
+ status = readw(host->base + JZ_REG_MMC_IREG);
+ } while (!(status & irq) && --timeout);
+
+ if (timeout == 0) {
+ set_bit(0, &host->waiting);
+ mod_timer(&host->timeout_timer, jiffies + 5*HZ);
+ jz4740_mmc_set_irq_enabled(host, irq, true);
+ return true;
+ }
+
+ return false;
+}
+
+static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host,
+ struct mmc_data *data)
+{
+ int status;
+
+ status = readl(host->base + JZ_REG_MMC_STATUS);
+ if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) {
+ if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) {
+ host->req->cmd->error = -ETIMEDOUT;
+ data->error = -ETIMEDOUT;
+ } else {
+ host->req->cmd->error = -EIO;
+ data->error = -EIO;
+ }
+ } else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) {
+ if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) {
+ host->req->cmd->error = -ETIMEDOUT;
+ data->error = -ETIMEDOUT;
+ } else {
+ host->req->cmd->error = -EIO;
+ data->error = -EIO;
+ }
+ }
+}
+
+static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host,
+ struct mmc_data *data)
+{
+ struct sg_mapping_iter *miter = &host->miter;
+ void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO;
+ uint32_t *buf;
+ bool timeout;
+ size_t i, j;
+
+ while (sg_miter_next(miter)) {
+ buf = miter->addr;
+ i = miter->length / 4;
+ j = i / 8;
+ i = i & 0x7;
+ while (j) {
+ timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
+ if (unlikely(timeout))
+ goto poll_timeout;
+
+ writel(buf[0], fifo_addr);
+ writel(buf[1], fifo_addr);
+ writel(buf[2], fifo_addr);
+ writel(buf[3], fifo_addr);
+ writel(buf[4], fifo_addr);
+ writel(buf[5], fifo_addr);
+ writel(buf[6], fifo_addr);
+ writel(buf[7], fifo_addr);
+ buf += 8;
+ --j;
+ }
+ if (unlikely(i)) {
+ timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
+ if (unlikely(timeout))
+ goto poll_timeout;
+
+ while (i) {
+ writel(*buf, fifo_addr);
+ ++buf;
+ --i;
+ }
+ }
+ data->bytes_xfered += miter->length;
+ }
+ sg_miter_stop(miter);
+
+ return false;
+
+poll_timeout:
+ miter->consumed = (void *)buf - miter->addr;
+ data->bytes_xfered += miter->consumed;
+ sg_miter_stop(miter);
+
+ return true;
+}
+
+static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
+ struct mmc_data *data)
+{
+ struct sg_mapping_iter *miter = &host->miter;
+ void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
+ uint32_t *buf;
+ uint32_t d;
+ uint16_t status;
+ size_t i, j;
+ unsigned int timeout;
+
+ while (sg_miter_next(miter)) {
+ buf = miter->addr;
+ i = miter->length;
+ j = i / 32;
+ i = i & 0x1f;
+ while (j) {
+ timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
+ if (unlikely(timeout))
+ goto poll_timeout;
+
+ buf[0] = readl(fifo_addr);
+ buf[1] = readl(fifo_addr);
+ buf[2] = readl(fifo_addr);
+ buf[3] = readl(fifo_addr);
+ buf[4] = readl(fifo_addr);
+ buf[5] = readl(fifo_addr);
+ buf[6] = readl(fifo_addr);
+ buf[7] = readl(fifo_addr);
+
+ buf += 8;
+ --j;
+ }
+
+ if (unlikely(i)) {
+ timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
+ if (unlikely(timeout))
+ goto poll_timeout;
+
+ while (i >= 4) {
+ *buf++ = readl(fifo_addr);
+ i -= 4;
+ }
+ if (unlikely(i > 0)) {
+ d = readl(fifo_addr);
+ memcpy(buf, &d, i);
+ }
+ }
+ data->bytes_xfered += miter->length;
+
+ /* This can go away once MIPS implements
+ * flush_kernel_dcache_page */
+ flush_dcache_page(miter->page);
+ }
+ sg_miter_stop(miter);
+
+ /* For whatever reason there is sometime one word more in the fifo then
+ * requested */
+ timeout = 1000;
+ status = readl(host->base + JZ_REG_MMC_STATUS);
+ while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
+ d = readl(fifo_addr);
+ status = readl(host->base + JZ_REG_MMC_STATUS);
+ }
+
+ return false;
+
+poll_timeout:
+ miter->consumed = (void *)buf - miter->addr;
+ data->bytes_xfered += miter->consumed;
+ sg_miter_stop(miter);
+
+ return true;
+}
+
+static void jz4740_mmc_timeout(unsigned long data)
+{
+ struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)data;
+
+ if (!test_and_clear_bit(0, &host->waiting))
+ return;
+
+ jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false);
+
+ host->req->cmd->error = -ETIMEDOUT;
+ jz4740_mmc_request_done(host);
+}
+
+static void jz4740_mmc_read_response(struct jz4740_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ int i;
+ uint16_t tmp;
+ void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO;
+
+ if (cmd->flags & MMC_RSP_136) {
+ tmp = readw(fifo_addr);
+ for (i = 0; i < 4; ++i) {
+ cmd->resp[i] = tmp << 24;
+ tmp = readw(fifo_addr);
+ cmd->resp[i] |= tmp << 8;
+ tmp = readw(fifo_addr);
+ cmd->resp[i] |= tmp >> 8;
+ }
+ } else {
+ cmd->resp[0] = readw(fifo_addr) << 24;
+ cmd->resp[0] |= readw(fifo_addr) << 8;
+ cmd->resp[0] |= readw(fifo_addr) & 0xff;
+ }
+}
+
+static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ uint32_t cmdat = host->cmdat;
+
+ host->cmdat &= ~JZ_MMC_CMDAT_INIT;
+ jz4740_mmc_clock_disable(host);
+
+ host->cmd = cmd;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ cmdat |= JZ_MMC_CMDAT_BUSY;
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1B:
+ case MMC_RSP_R1:
+ cmdat |= JZ_MMC_CMDAT_RSP_R1;
+ break;
+ case MMC_RSP_R2:
+ cmdat |= JZ_MMC_CMDAT_RSP_R2;
+ break;
+ case MMC_RSP_R3:
+ cmdat |= JZ_MMC_CMDAT_RSP_R3;
+ break;
+ default:
+ break;
+ }
+
+ if (cmd->data) {
+ cmdat |= JZ_MMC_CMDAT_DATA_EN;
+ if (cmd->data->flags & MMC_DATA_WRITE)
+ cmdat |= JZ_MMC_CMDAT_WRITE;
+ if (cmd->data->flags & MMC_DATA_STREAM)
+ cmdat |= JZ_MMC_CMDAT_STREAM;
+ if (host->use_dma)
+ cmdat |= JZ_MMC_CMDAT_DMA_EN;
+
+ writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
+ writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
+ }
+
+ writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD);
+ writel(cmd->arg, host->base + JZ_REG_MMC_ARG);
+ writel(cmdat, host->base + JZ_REG_MMC_CMDAT);
+
+ jz4740_mmc_clock_enable(host, 1);
+}
+
+static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host)
+{
+ struct mmc_command *cmd = host->req->cmd;
+ struct mmc_data *data = cmd->data;
+ int direction;
+
+ if (data->flags & MMC_DATA_READ)
+ direction = SG_MITER_TO_SG;
+ else
+ direction = SG_MITER_FROM_SG;
+
+ sg_miter_start(&host->miter, data->sg, data->sg_len, direction);
+}
+
+
+static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
+{
+ struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
+ struct mmc_command *cmd = host->req->cmd;
+ struct mmc_request *req = host->req;
+ struct mmc_data *data = cmd->data;
+ bool timeout = false;
+
+ if (cmd->error)
+ host->state = JZ4740_MMC_STATE_DONE;
+
+ switch (host->state) {
+ case JZ4740_MMC_STATE_READ_RESPONSE:
+ if (cmd->flags & MMC_RSP_PRESENT)
+ jz4740_mmc_read_response(host, cmd);
+
+ if (!data)
+ break;
+
+ jz_mmc_prepare_data_transfer(host);
+
+ case JZ4740_MMC_STATE_TRANSFER_DATA:
+ if (host->use_dma) {
+ /* Use DMA if enabled.
+ * Data transfer direction is defined later by
+ * relying on data flags in
+ * jz4740_mmc_prepare_dma_data() and
+ * jz4740_mmc_start_dma_transfer().
+ */
+ timeout = jz4740_mmc_start_dma_transfer(host, data);
+ data->bytes_xfered = data->blocks * data->blksz;
+ } else if (data->flags & MMC_DATA_READ)
+ /* Use PIO if DMA is not enabled.
+ * Data transfer direction was defined before
+ * by relying on data flags in
+ * jz_mmc_prepare_data_transfer().
+ */
+ timeout = jz4740_mmc_read_data(host, data);
+ else
+ timeout = jz4740_mmc_write_data(host, data);
+
+ if (unlikely(timeout)) {
+ host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
+ break;
+ }
+
+ jz4740_mmc_transfer_check_state(host, data);
+
+ timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
+ if (unlikely(timeout)) {
+ host->state = JZ4740_MMC_STATE_SEND_STOP;
+ break;
+ }
+ writew(JZ_MMC_IRQ_DATA_TRAN_DONE, host->base + JZ_REG_MMC_IREG);
+
+ case JZ4740_MMC_STATE_SEND_STOP:
+ if (!req->stop)
+ break;
+
+ jz4740_mmc_send_command(host, req->stop);
+
+ if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) {
+ timeout = jz4740_mmc_poll_irq(host,
+ JZ_MMC_IRQ_PRG_DONE);
+ if (timeout) {
+ host->state = JZ4740_MMC_STATE_DONE;
+ break;
+ }
+ }
+ case JZ4740_MMC_STATE_DONE:
+ break;
+ }
+
+ if (!timeout)
+ jz4740_mmc_request_done(host);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t jz_mmc_irq(int irq, void *devid)
+{
+ struct jz4740_mmc_host *host = devid;
+ struct mmc_command *cmd = host->cmd;
+ uint16_t irq_reg, status, tmp;
+
+ irq_reg = readw(host->base + JZ_REG_MMC_IREG);
+
+ tmp = irq_reg;
+ irq_reg &= ~host->irq_mask;
+
+ tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ |
+ JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE);
+
+ if (tmp != irq_reg)
+ writew(tmp & ~irq_reg, host->base + JZ_REG_MMC_IREG);
+
+ if (irq_reg & JZ_MMC_IRQ_SDIO) {
+ writew(JZ_MMC_IRQ_SDIO, host->base + JZ_REG_MMC_IREG);
+ mmc_signal_sdio_irq(host->mmc);
+ irq_reg &= ~JZ_MMC_IRQ_SDIO;
+ }
+
+ if (host->req && cmd && irq_reg) {
+ if (test_and_clear_bit(0, &host->waiting)) {
+ del_timer(&host->timeout_timer);
+
+ status = readl(host->base + JZ_REG_MMC_STATUS);
+
+ if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
+ cmd->error = -ETIMEDOUT;
+ } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) {
+ cmd->error = -EIO;
+ } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
+ JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
+ if (cmd->data)
+ cmd->data->error = -EIO;
+ cmd->error = -EIO;
+ }
+
+ jz4740_mmc_set_irq_enabled(host, irq_reg, false);
+ writew(irq_reg, host->base + JZ_REG_MMC_IREG);
+
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
+{
+ int div = 0;
+ int real_rate;
+
+ jz4740_mmc_clock_disable(host);
+ clk_set_rate(host->clk, JZ_MMC_CLK_RATE);
+
+ real_rate = clk_get_rate(host->clk);
+
+ while (real_rate > rate && div < 7) {
+ ++div;
+ real_rate >>= 1;
+ }
+
+ writew(div, host->base + JZ_REG_MMC_CLKRT);
+ return real_rate;
+}
+
+static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct jz4740_mmc_host *host = mmc_priv(mmc);
+
+ host->req = req;
+
+ writew(0xffff, host->base + JZ_REG_MMC_IREG);
+
+ writew(JZ_MMC_IRQ_END_CMD_RES, host->base + JZ_REG_MMC_IREG);
+ jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true);
+
+ host->state = JZ4740_MMC_STATE_READ_RESPONSE;
+ set_bit(0, &host->waiting);
+ mod_timer(&host->timeout_timer, jiffies + 5*HZ);
+ jz4740_mmc_send_command(host, req->cmd);
+}
+
+static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct jz4740_mmc_host *host = mmc_priv(mmc);
+ if (ios->clock)
+ jz4740_mmc_set_clock_rate(host, ios->clock);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ jz4740_mmc_reset(host);
+ if (gpio_is_valid(host->pdata->gpio_power))
+ gpio_set_value(host->pdata->gpio_power,
+ !host->pdata->power_active_low);
+ host->cmdat |= JZ_MMC_CMDAT_INIT;
+ clk_prepare_enable(host->clk);
+ break;
+ case MMC_POWER_ON:
+ break;
+ default:
+ if (gpio_is_valid(host->pdata->gpio_power))
+ gpio_set_value(host->pdata->gpio_power,
+ host->pdata->power_active_low);
+ clk_disable_unprepare(host->clk);
+ break;
+ }
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
+ break;
+ case MMC_BUS_WIDTH_4:
+ host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
+ break;
+ default:
+ break;
+ }
+}
+
+static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct jz4740_mmc_host *host = mmc_priv(mmc);
+ jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
+}
+
+static const struct mmc_host_ops jz4740_mmc_ops = {
+ .request = jz4740_mmc_request,
+ .pre_req = jz4740_mmc_pre_request,
+ .post_req = jz4740_mmc_post_request,
+ .set_ios = jz4740_mmc_set_ios,
+ .get_ro = mmc_gpio_get_ro,
+ .get_cd = mmc_gpio_get_cd,
+ .enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
+};
+
+static const struct jz_gpio_bulk_request jz4740_mmc_pins[] = {
+ JZ_GPIO_BULK_PIN(MSC_CMD),
+ JZ_GPIO_BULK_PIN(MSC_CLK),
+ JZ_GPIO_BULK_PIN(MSC_DATA0),
+ JZ_GPIO_BULK_PIN(MSC_DATA1),
+ JZ_GPIO_BULK_PIN(MSC_DATA2),
+ JZ_GPIO_BULK_PIN(MSC_DATA3),
+};
+
+static int jz4740_mmc_request_gpio(struct device *dev, int gpio,
+ const char *name, bool output, int value)
+{
+ int ret;
+
+ if (!gpio_is_valid(gpio))
+ return 0;
+
+ ret = gpio_request(gpio, name);
+ if (ret) {
+ dev_err(dev, "Failed to request %s gpio: %d\n", name, ret);
+ return ret;
+ }
+
+ if (output)
+ gpio_direction_output(gpio, value);
+ else
+ gpio_direction_input(gpio);
+
+ return 0;
+}
+
+static int jz4740_mmc_request_gpios(struct mmc_host *mmc,
+ struct platform_device *pdev)
+{
+ struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data;
+ int ret = 0;
+
+ if (!pdata)
+ return 0;
+
+ if (!pdata->card_detect_active_low)
+ mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+ if (!pdata->read_only_active_low)
+ mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ if (gpio_is_valid(pdata->gpio_card_detect)) {
+ ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (gpio_is_valid(pdata->gpio_read_only)) {
+ ret = mmc_gpio_request_ro(mmc, pdata->gpio_read_only);
+ if (ret)
+ return ret;
+ }
+
+ return jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power,
+ "MMC read only", true, pdata->power_active_low);
+}
+
+static void jz4740_mmc_free_gpios(struct platform_device *pdev)
+{
+ struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return;
+
+ if (gpio_is_valid(pdata->gpio_power))
+ gpio_free(pdata->gpio_power);
+}
+
+static inline size_t jz4740_mmc_num_pins(struct jz4740_mmc_host *host)
+{
+ size_t num_pins = ARRAY_SIZE(jz4740_mmc_pins);
+ if (host->pdata && host->pdata->data_1bit)
+ num_pins -= 3;
+
+ return num_pins;
+}
+
+static int jz4740_mmc_probe(struct platform_device* pdev)
+{
+ int ret;
+ struct mmc_host *mmc;
+ struct jz4740_mmc_host *host;
+ struct jz4740_mmc_platform_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+
+ mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
+ if (!mmc) {
+ dev_err(&pdev->dev, "Failed to alloc mmc host structure\n");
+ return -ENOMEM;
+ }
+
+ host = mmc_priv(mmc);
+ host->pdata = pdata;
+
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0) {
+ ret = host->irq;
+ dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
+ goto err_free_host;
+ }
+
+ host->clk = devm_clk_get(&pdev->dev, "mmc");
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ dev_err(&pdev->dev, "Failed to get mmc clock\n");
+ goto err_free_host;
+ }
+
+ host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
+ dev_err(&pdev->dev, "Failed to ioremap base memory\n");
+ goto err_free_host;
+ }
+
+ ret = jz_gpio_bulk_request(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request mmc pins: %d\n", ret);
+ goto err_free_host;
+ }
+
+ ret = jz4740_mmc_request_gpios(mmc, pdev);
+ if (ret)
+ goto err_gpio_bulk_free;
+
+ mmc->ops = &jz4740_mmc_ops;
+ mmc->f_min = JZ_MMC_CLK_RATE / 128;
+ mmc->f_max = JZ_MMC_CLK_RATE;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->caps = (pdata && pdata->data_1bit) ? 0 : MMC_CAP_4_BIT_DATA;
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+
+ mmc->max_blk_size = (1 << 10) - 1;
+ mmc->max_blk_count = (1 << 15) - 1;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+
+ mmc->max_segs = 128;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ host->mmc = mmc;
+ host->pdev = pdev;
+ spin_lock_init(&host->lock);
+ host->irq_mask = 0xffff;
+
+ ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,
+ dev_name(&pdev->dev), host);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
+ goto err_free_gpios;
+ }
+
+ jz4740_mmc_reset(host);
+ jz4740_mmc_clock_disable(host);
+ setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
+ (unsigned long)host);
+ /* It is not important when it times out, it just needs to timeout. */
+ set_timer_slack(&host->timeout_timer, HZ);
+
+ host->use_dma = true;
+ if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
+ host->use_dma = false;
+
+ platform_set_drvdata(pdev, host);
+ ret = mmc_add_host(mmc);
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
+ goto err_free_irq;
+ }
+ dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n");
+
+ dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
+ host->use_dma ? "DMA" : "PIO",
+ (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
+
+ return 0;
+
+err_free_irq:
+ free_irq(host->irq, host);
+err_free_gpios:
+ jz4740_mmc_free_gpios(pdev);
+err_gpio_bulk_free:
+ if (host->use_dma)
+ jz4740_mmc_release_dma_channels(host);
+ jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
+err_free_host:
+ mmc_free_host(mmc);
+
+ return ret;
+}
+
+static int jz4740_mmc_remove(struct platform_device *pdev)
+{
+ struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
+
+ del_timer_sync(&host->timeout_timer);
+ jz4740_mmc_set_irq_enabled(host, 0xff, false);
+ jz4740_mmc_reset(host);
+
+ mmc_remove_host(host->mmc);
+
+ free_irq(host->irq, host);
+
+ jz4740_mmc_free_gpios(pdev);
+ jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
+
+ if (host->use_dma)
+ jz4740_mmc_release_dma_channels(host);
+
+ mmc_free_host(host->mmc);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int jz4740_mmc_suspend(struct device *dev)
+{
+ struct jz4740_mmc_host *host = dev_get_drvdata(dev);
+
+ jz_gpio_bulk_suspend(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
+
+ return 0;
+}
+
+static int jz4740_mmc_resume(struct device *dev)
+{
+ struct jz4740_mmc_host *host = dev_get_drvdata(dev);
+
+ jz_gpio_bulk_resume(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
+ jz4740_mmc_resume);
+#define JZ4740_MMC_PM_OPS (&jz4740_mmc_pm_ops)
+#else
+#define JZ4740_MMC_PM_OPS NULL
+#endif
+
+static struct platform_driver jz4740_mmc_driver = {
+ .probe = jz4740_mmc_probe,
+ .remove = jz4740_mmc_remove,
+ .driver = {
+ .name = "jz4740-mmc",
+ .pm = JZ4740_MMC_PM_OPS,
+ },
+};
+
+module_platform_driver(jz4740_mmc_driver);
+
+MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/kernel/drivers/mmc/host/mmc_spi.c b/kernel/drivers/mmc/host/mmc_spi.c
new file mode 100644
index 000000000..ae19d83bb
--- /dev/null
+++ b/kernel/drivers/mmc/host/mmc_spi.c
@@ -0,0 +1,1531 @@
+/*
+ * mmc_spi.c - Access SD/MMC cards through SPI master controllers
+ *
+ * (C) Copyright 2005, Intec Automation,
+ * Mike Lavender (mike@steroidmicros)
+ * (C) Copyright 2006-2007, David Brownell
+ * (C) Copyright 2007, Axis Communications,
+ * Hans-Peter Nilsson (hp@axis.com)
+ * (C) Copyright 2007, ATRON electronic GmbH,
+ * Jan Nikitenko <jan.nikitenko@gmail.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/dma-mapping.h>
+#include <linux/crc7.h>
+#include <linux/crc-itu-t.h>
+#include <linux/scatterlist.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h> /* for R1_SPI_* bit values */
+#include <linux/mmc/slot-gpio.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/mmc_spi.h>
+
+#include <asm/unaligned.h>
+
+
+/* NOTES:
+ *
+ * - For now, we won't try to interoperate with a real mmc/sd/sdio
+ * controller, although some of them do have hardware support for
+ * SPI protocol. The main reason for such configs would be mmc-ish
+ * cards like DataFlash, which don't support that "native" protocol.
+ *
+ * We don't have a "DataFlash/MMC/SD/SDIO card slot" abstraction to
+ * switch between driver stacks, and in any case if "native" mode
+ * is available, it will be faster and hence preferable.
+ *
+ * - MMC depends on a different chipselect management policy than the
+ * SPI interface currently supports for shared bus segments: it needs
+ * to issue multiple spi_message requests with the chipselect active,
+ * using the results of one message to decide the next one to issue.
+ *
+ * Pending updates to the programming interface, this driver expects
+ * that it not share the bus with other drivers (precluding conflicts).
+ *
+ * - We tell the controller to keep the chipselect active from the
+ * beginning of an mmc_host_ops.request until the end. So beware
+ * of SPI controller drivers that mis-handle the cs_change flag!
+ *
+ * However, many cards seem OK with chipselect flapping up/down
+ * during that time ... at least on unshared bus segments.
+ */
+
+
+/*
+ * Local protocol constants, internal to data block protocols.
+ */
+
+/* Response tokens used to ack each block written: */
+#define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
+#define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
+#define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
+#define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
+
+/* Read and write blocks start with these tokens and end with crc;
+ * on error, read tokens act like a subset of R2_SPI_* values.
+ */
+#define SPI_TOKEN_SINGLE 0xfe /* single block r/w, multiblock read */
+#define SPI_TOKEN_MULTI_WRITE 0xfc /* multiblock write */
+#define SPI_TOKEN_STOP_TRAN 0xfd /* terminate multiblock write */
+
+#define MMC_SPI_BLOCKSIZE 512
+
+
+/* These fixed timeouts come from the latest SD specs, which say to ignore
+ * the CSD values. The R1B value is for card erase (e.g. the "I forgot the
+ * card's password" scenario); it's mostly applied to STOP_TRANSMISSION after
+ * reads which takes nowhere near that long. Older cards may be able to use
+ * shorter timeouts ... but why bother?
+ */
+#define r1b_timeout (HZ * 3)
+
+/* One of the critical speed parameters is the amount of data which may
+ * be transferred in one command. If this value is too low, the SD card
+ * controller has to do multiple partial block writes (argggh!). With
+ * today (2008) SD cards there is little speed gain if we transfer more
+ * than 64 KBytes at a time. So use this value until there is any indication
+ * that we should do more here.
+ */
+#define MMC_SPI_BLOCKSATONCE 128
+
+/****************************************************************************/
+
+/*
+ * Local Data Structures
+ */
+
+/* "scratch" is per-{command,block} data exchanged with the card */
+struct scratch {
+ u8 status[29];
+ u8 data_token;
+ __be16 crc_val;
+};
+
+struct mmc_spi_host {
+ struct mmc_host *mmc;
+ struct spi_device *spi;
+
+ unsigned char power_mode;
+ u16 powerup_msecs;
+
+ struct mmc_spi_platform_data *pdata;
+
+ /* for bulk data transfers */
+ struct spi_transfer token, t, crc, early_status;
+ struct spi_message m;
+
+ /* for status readback */
+ struct spi_transfer status;
+ struct spi_message readback;
+
+ /* underlying DMA-aware controller, or null */
+ struct device *dma_dev;
+
+ /* buffer used for commands and for message "overhead" */
+ struct scratch *data;
+ dma_addr_t data_dma;
+
+ /* Specs say to write ones most of the time, even when the card
+ * has no need to read its input data; and many cards won't care.
+ * This is our source of those ones.
+ */
+ void *ones;
+ dma_addr_t ones_dma;
+};
+
+
+/****************************************************************************/
+
+/*
+ * MMC-over-SPI protocol glue, used by the MMC stack interface
+ */
+
+static inline int mmc_cs_off(struct mmc_spi_host *host)
+{
+ /* chipselect will always be inactive after setup() */
+ return spi_setup(host->spi);
+}
+
+static int
+mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
+{
+ int status;
+
+ if (len > sizeof(*host->data)) {
+ WARN_ON(1);
+ return -EIO;
+ }
+
+ host->status.len = len;
+
+ if (host->dma_dev)
+ dma_sync_single_for_device(host->dma_dev,
+ host->data_dma, sizeof(*host->data),
+ DMA_FROM_DEVICE);
+
+ status = spi_sync_locked(host->spi, &host->readback);
+
+ if (host->dma_dev)
+ dma_sync_single_for_cpu(host->dma_dev,
+ host->data_dma, sizeof(*host->data),
+ DMA_FROM_DEVICE);
+
+ return status;
+}
+
+static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
+ unsigned n, u8 byte)
+{
+ u8 *cp = host->data->status;
+ unsigned long start = jiffies;
+
+ while (1) {
+ int status;
+ unsigned i;
+
+ status = mmc_spi_readbytes(host, n);
+ if (status < 0)
+ return status;
+
+ for (i = 0; i < n; i++) {
+ if (cp[i] != byte)
+ return cp[i];
+ }
+
+ if (time_is_before_jiffies(start + timeout))
+ break;
+
+ /* If we need long timeouts, we may release the CPU.
+ * We use jiffies here because we want to have a relation
+ * between elapsed time and the blocking of the scheduler.
+ */
+ if (time_is_before_jiffies(start+1))
+ schedule();
+ }
+ return -ETIMEDOUT;
+}
+
+static inline int
+mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout)
+{
+ return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
+}
+
+static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout)
+{
+ return mmc_spi_skip(host, timeout, 1, 0xff);
+}
+
+
+/*
+ * Note that for SPI, cmd->resp[0] is not the same data as "native" protocol
+ * hosts return! The low byte holds R1_SPI bits. The next byte may hold
+ * R2_SPI bits ... for SEND_STATUS, or after data read errors.
+ *
+ * cmd->resp[1] holds any four-byte response, for R3 (READ_OCR) and on
+ * newer cards R7 (IF_COND).
+ */
+
+static char *maptype(struct mmc_command *cmd)
+{
+ switch (mmc_spi_resp_type(cmd)) {
+ case MMC_RSP_SPI_R1: return "R1";
+ case MMC_RSP_SPI_R1B: return "R1B";
+ case MMC_RSP_SPI_R2: return "R2/R5";
+ case MMC_RSP_SPI_R3: return "R3/R4/R7";
+ default: return "?";
+ }
+}
+
+/* return zero, else negative errno after setting cmd->error */
+static int mmc_spi_response_get(struct mmc_spi_host *host,
+ struct mmc_command *cmd, int cs_on)
+{
+ u8 *cp = host->data->status;
+ u8 *end = cp + host->t.len;
+ int value = 0;
+ int bitshift;
+ u8 leftover = 0;
+ unsigned short rotator;
+ int i;
+ char tag[32];
+
+ snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s",
+ cmd->opcode, maptype(cmd));
+
+ /* Except for data block reads, the whole response will already
+ * be stored in the scratch buffer. It's somewhere after the
+ * command and the first byte we read after it. We ignore that
+ * first byte. After STOP_TRANSMISSION command it may include
+ * two data bits, but otherwise it's all ones.
+ */
+ cp += 8;
+ while (cp < end && *cp == 0xff)
+ cp++;
+
+ /* Data block reads (R1 response types) may need more data... */
+ if (cp == end) {
+ cp = host->data->status;
+ end = cp+1;
+
+ /* Card sends N(CR) (== 1..8) bytes of all-ones then one
+ * status byte ... and we already scanned 2 bytes.
+ *
+ * REVISIT block read paths use nasty byte-at-a-time I/O
+ * so it can always DMA directly into the target buffer.
+ * It'd probably be better to memcpy() the first chunk and
+ * avoid extra i/o calls...
+ *
+ * Note we check for more than 8 bytes, because in practice,
+ * some SD cards are slow...
+ */
+ for (i = 2; i < 16; i++) {
+ value = mmc_spi_readbytes(host, 1);
+ if (value < 0)
+ goto done;
+ if (*cp != 0xff)
+ goto checkstatus;
+ }
+ value = -ETIMEDOUT;
+ goto done;
+ }
+
+checkstatus:
+ bitshift = 0;
+ if (*cp & 0x80) {
+ /* Houston, we have an ugly card with a bit-shifted response */
+ rotator = *cp++ << 8;
+ /* read the next byte */
+ if (cp == end) {
+ value = mmc_spi_readbytes(host, 1);
+ if (value < 0)
+ goto done;
+ cp = host->data->status;
+ end = cp+1;
+ }
+ rotator |= *cp++;
+ while (rotator & 0x8000) {
+ bitshift++;
+ rotator <<= 1;
+ }
+ cmd->resp[0] = rotator >> 8;
+ leftover = rotator;
+ } else {
+ cmd->resp[0] = *cp++;
+ }
+ cmd->error = 0;
+
+ /* Status byte: the entire seven-bit R1 response. */
+ if (cmd->resp[0] != 0) {
+ if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS)
+ & cmd->resp[0])
+ value = -EFAULT; /* Bad address */
+ else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0])
+ value = -ENOSYS; /* Function not implemented */
+ else if (R1_SPI_COM_CRC & cmd->resp[0])
+ value = -EILSEQ; /* Illegal byte sequence */
+ else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
+ & cmd->resp[0])
+ value = -EIO; /* I/O error */
+ /* else R1_SPI_IDLE, "it's resetting" */
+ }
+
+ switch (mmc_spi_resp_type(cmd)) {
+
+ /* SPI R1B == R1 + busy; STOP_TRANSMISSION (for multiblock reads)
+ * and less-common stuff like various erase operations.
+ */
+ case MMC_RSP_SPI_R1B:
+ /* maybe we read all the busy tokens already */
+ while (cp < end && *cp == 0)
+ cp++;
+ if (cp == end)
+ mmc_spi_wait_unbusy(host, r1b_timeout);
+ break;
+
+ /* SPI R2 == R1 + second status byte; SEND_STATUS
+ * SPI R5 == R1 + data byte; IO_RW_DIRECT
+ */
+ case MMC_RSP_SPI_R2:
+ /* read the next byte */
+ if (cp == end) {
+ value = mmc_spi_readbytes(host, 1);
+ if (value < 0)
+ goto done;
+ cp = host->data->status;
+ end = cp+1;
+ }
+ if (bitshift) {
+ rotator = leftover << 8;
+ rotator |= *cp << bitshift;
+ cmd->resp[0] |= (rotator & 0xFF00);
+ } else {
+ cmd->resp[0] |= *cp << 8;
+ }
+ break;
+
+ /* SPI R3, R4, or R7 == R1 + 4 bytes */
+ case MMC_RSP_SPI_R3:
+ rotator = leftover << 8;
+ cmd->resp[1] = 0;
+ for (i = 0; i < 4; i++) {
+ cmd->resp[1] <<= 8;
+ /* read the next byte */
+ if (cp == end) {
+ value = mmc_spi_readbytes(host, 1);
+ if (value < 0)
+ goto done;
+ cp = host->data->status;
+ end = cp+1;
+ }
+ if (bitshift) {
+ rotator |= *cp++ << bitshift;
+ cmd->resp[1] |= (rotator >> 8);
+ rotator <<= 8;
+ } else {
+ cmd->resp[1] |= *cp++;
+ }
+ }
+ break;
+
+ /* SPI R1 == just one status byte */
+ case MMC_RSP_SPI_R1:
+ break;
+
+ default:
+ dev_dbg(&host->spi->dev, "bad response type %04x\n",
+ mmc_spi_resp_type(cmd));
+ if (value >= 0)
+ value = -EINVAL;
+ goto done;
+ }
+
+ if (value < 0)
+ dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n",
+ tag, cmd->resp[0], cmd->resp[1]);
+
+ /* disable chipselect on errors and some success cases */
+ if (value >= 0 && cs_on)
+ return value;
+done:
+ if (value < 0)
+ cmd->error = value;
+ mmc_cs_off(host);
+ return value;
+}
+
+/* Issue command and read its response.
+ * Returns zero on success, negative for error.
+ *
+ * On error, caller must cope with mmc core retry mechanism. That
+ * means immediate low-level resubmit, which affects the bus lock...
+ */
+static int
+mmc_spi_command_send(struct mmc_spi_host *host,
+ struct mmc_request *mrq,
+ struct mmc_command *cmd, int cs_on)
+{
+ struct scratch *data = host->data;
+ u8 *cp = data->status;
+ int status;
+ struct spi_transfer *t;
+
+ /* We can handle most commands (except block reads) in one full
+ * duplex I/O operation before either starting the next transfer
+ * (data block or command) or else deselecting the card.
+ *
+ * First, write 7 bytes:
+ * - an all-ones byte to ensure the card is ready
+ * - opcode byte (plus start and transmission bits)
+ * - four bytes of big-endian argument
+ * - crc7 (plus end bit) ... always computed, it's cheap
+ *
+ * We init the whole buffer to all-ones, which is what we need
+ * to write while we're reading (later) response data.
+ */
+ memset(cp, 0xff, sizeof(data->status));
+
+ cp[1] = 0x40 | cmd->opcode;
+ put_unaligned_be32(cmd->arg, cp+2);
+ cp[6] = crc7_be(0, cp+1, 5) | 0x01;
+ cp += 7;
+
+ /* Then, read up to 13 bytes (while writing all-ones):
+ * - N(CR) (== 1..8) bytes of all-ones
+ * - status byte (for all response types)
+ * - the rest of the response, either:
+ * + nothing, for R1 or R1B responses
+ * + second status byte, for R2 responses
+ * + four data bytes, for R3 and R7 responses
+ *
+ * Finally, read some more bytes ... in the nice cases we know in
+ * advance how many, and reading 1 more is always OK:
+ * - N(EC) (== 0..N) bytes of all-ones, before deselect/finish
+ * - N(RC) (== 1..N) bytes of all-ones, before next command
+ * - N(WR) (== 1..N) bytes of all-ones, before data write
+ *
+ * So in those cases one full duplex I/O of at most 21 bytes will
+ * handle the whole command, leaving the card ready to receive a
+ * data block or new command. We do that whenever we can, shaving
+ * CPU and IRQ costs (especially when using DMA or FIFOs).
+ *
+ * There are two other cases, where it's not generally practical
+ * to rely on a single I/O:
+ *
+ * - R1B responses need at least N(EC) bytes of all-zeroes.
+ *
+ * In this case we can *try* to fit it into one I/O, then
+ * maybe read more data later.
+ *
+ * - Data block reads are more troublesome, since a variable
+ * number of padding bytes precede the token and data.
+ * + N(CX) (== 0..8) bytes of all-ones, before CSD or CID
+ * + N(AC) (== 1..many) bytes of all-ones
+ *
+ * In this case we currently only have minimal speedups here:
+ * when N(CR) == 1 we can avoid I/O in response_get().
+ */
+ if (cs_on && (mrq->data->flags & MMC_DATA_READ)) {
+ cp += 2; /* min(N(CR)) + status */
+ /* R1 */
+ } else {
+ cp += 10; /* max(N(CR)) + status + min(N(RC),N(WR)) */
+ if (cmd->flags & MMC_RSP_SPI_S2) /* R2/R5 */
+ cp++;
+ else if (cmd->flags & MMC_RSP_SPI_B4) /* R3/R4/R7 */
+ cp += 4;
+ else if (cmd->flags & MMC_RSP_BUSY) /* R1B */
+ cp = data->status + sizeof(data->status);
+ /* else: R1 (most commands) */
+ }
+
+ dev_dbg(&host->spi->dev, " mmc_spi: CMD%d, resp %s\n",
+ cmd->opcode, maptype(cmd));
+
+ /* send command, leaving chipselect active */
+ spi_message_init(&host->m);
+
+ t = &host->t;
+ memset(t, 0, sizeof(*t));
+ t->tx_buf = t->rx_buf = data->status;
+ t->tx_dma = t->rx_dma = host->data_dma;
+ t->len = cp - data->status;
+ t->cs_change = 1;
+ spi_message_add_tail(t, &host->m);
+
+ if (host->dma_dev) {
+ host->m.is_dma_mapped = 1;
+ dma_sync_single_for_device(host->dma_dev,
+ host->data_dma, sizeof(*host->data),
+ DMA_BIDIRECTIONAL);
+ }
+ status = spi_sync_locked(host->spi, &host->m);
+
+ if (host->dma_dev)
+ dma_sync_single_for_cpu(host->dma_dev,
+ host->data_dma, sizeof(*host->data),
+ DMA_BIDIRECTIONAL);
+ if (status < 0) {
+ dev_dbg(&host->spi->dev, " ... write returned %d\n", status);
+ cmd->error = status;
+ return status;
+ }
+
+ /* after no-data commands and STOP_TRANSMISSION, chipselect off */
+ return mmc_spi_response_get(host, cmd, cs_on);
+}
+
+/* Build data message with up to four separate transfers. For TX, we
+ * start by writing the data token. And in most cases, we finish with
+ * a status transfer.
+ *
+ * We always provide TX data for data and CRC. The MMC/SD protocol
+ * requires us to write ones; but Linux defaults to writing zeroes;
+ * so we explicitly initialize it to all ones on RX paths.
+ *
+ * We also handle DMA mapping, so the underlying SPI controller does
+ * not need to (re)do it for each message.
+ */
+static void
+mmc_spi_setup_data_message(
+ struct mmc_spi_host *host,
+ int multiple,
+ enum dma_data_direction direction)
+{
+ struct spi_transfer *t;
+ struct scratch *scratch = host->data;
+ dma_addr_t dma = host->data_dma;
+
+ spi_message_init(&host->m);
+ if (dma)
+ host->m.is_dma_mapped = 1;
+
+ /* for reads, readblock() skips 0xff bytes before finding
+ * the token; for writes, this transfer issues that token.
+ */
+ if (direction == DMA_TO_DEVICE) {
+ t = &host->token;
+ memset(t, 0, sizeof(*t));
+ t->len = 1;
+ if (multiple)
+ scratch->data_token = SPI_TOKEN_MULTI_WRITE;
+ else
+ scratch->data_token = SPI_TOKEN_SINGLE;
+ t->tx_buf = &scratch->data_token;
+ if (dma)
+ t->tx_dma = dma + offsetof(struct scratch, data_token);
+ spi_message_add_tail(t, &host->m);
+ }
+
+ /* Body of transfer is buffer, then CRC ...
+ * either TX-only, or RX with TX-ones.
+ */
+ t = &host->t;
+ memset(t, 0, sizeof(*t));
+ t->tx_buf = host->ones;
+ t->tx_dma = host->ones_dma;
+ /* length and actual buffer info are written later */
+ spi_message_add_tail(t, &host->m);
+
+ t = &host->crc;
+ memset(t, 0, sizeof(*t));
+ t->len = 2;
+ if (direction == DMA_TO_DEVICE) {
+ /* the actual CRC may get written later */
+ t->tx_buf = &scratch->crc_val;
+ if (dma)
+ t->tx_dma = dma + offsetof(struct scratch, crc_val);
+ } else {
+ t->tx_buf = host->ones;
+ t->tx_dma = host->ones_dma;
+ t->rx_buf = &scratch->crc_val;
+ if (dma)
+ t->rx_dma = dma + offsetof(struct scratch, crc_val);
+ }
+ spi_message_add_tail(t, &host->m);
+
+ /*
+ * A single block read is followed by N(EC) [0+] all-ones bytes
+ * before deselect ... don't bother.
+ *
+ * Multiblock reads are followed by N(AC) [1+] all-ones bytes before
+ * the next block is read, or a STOP_TRANSMISSION is issued. We'll
+ * collect that single byte, so readblock() doesn't need to.
+ *
+ * For a write, the one-byte data response follows immediately, then
+ * come zero or more busy bytes, then N(WR) [1+] all-ones bytes.
+ * Then single block reads may deselect, and multiblock ones issue
+ * the next token (next data block, or STOP_TRAN). We can try to
+ * minimize I/O ops by using a single read to collect end-of-busy.
+ */
+ if (multiple || direction == DMA_TO_DEVICE) {
+ t = &host->early_status;
+ memset(t, 0, sizeof(*t));
+ t->len = (direction == DMA_TO_DEVICE)
+ ? sizeof(scratch->status)
+ : 1;
+ t->tx_buf = host->ones;
+ t->tx_dma = host->ones_dma;
+ t->rx_buf = scratch->status;
+ if (dma)
+ t->rx_dma = dma + offsetof(struct scratch, status);
+ t->cs_change = 1;
+ spi_message_add_tail(t, &host->m);
+ }
+}
+
+/*
+ * Write one block:
+ * - caller handled preceding N(WR) [1+] all-ones bytes
+ * - data block
+ * + token
+ * + data bytes
+ * + crc16
+ * - an all-ones byte ... card writes a data-response byte
+ * - followed by N(EC) [0+] all-ones bytes, card writes zero/'busy'
+ *
+ * Return negative errno, else success.
+ */
+static int
+mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
+ unsigned long timeout)
+{
+ struct spi_device *spi = host->spi;
+ int status, i;
+ struct scratch *scratch = host->data;
+ u32 pattern;
+
+ if (host->mmc->use_spi_crc)
+ scratch->crc_val = cpu_to_be16(
+ crc_itu_t(0, t->tx_buf, t->len));
+ if (host->dma_dev)
+ dma_sync_single_for_device(host->dma_dev,
+ host->data_dma, sizeof(*scratch),
+ DMA_BIDIRECTIONAL);
+
+ status = spi_sync_locked(spi, &host->m);
+
+ if (status != 0) {
+ dev_dbg(&spi->dev, "write error (%d)\n", status);
+ return status;
+ }
+
+ if (host->dma_dev)
+ dma_sync_single_for_cpu(host->dma_dev,
+ host->data_dma, sizeof(*scratch),
+ DMA_BIDIRECTIONAL);
+
+ /*
+ * Get the transmission data-response reply. It must follow
+ * immediately after the data block we transferred. This reply
+ * doesn't necessarily tell whether the write operation succeeded;
+ * it just says if the transmission was ok and whether *earlier*
+ * writes succeeded; see the standard.
+ *
+ * In practice, there are (even modern SDHC-)cards which are late
+ * in sending the response, and miss the time frame by a few bits,
+ * so we have to cope with this situation and check the response
+ * bit-by-bit. Arggh!!!
+ */
+ pattern = get_unaligned_be32(scratch->status);
+
+ /* First 3 bit of pattern are undefined */
+ pattern |= 0xE0000000;
+
+ /* left-adjust to leading 0 bit */
+ while (pattern & 0x80000000)
+ pattern <<= 1;
+ /* right-adjust for pattern matching. Code is in bit 4..0 now. */
+ pattern >>= 27;
+
+ switch (pattern) {
+ case SPI_RESPONSE_ACCEPTED:
+ status = 0;
+ break;
+ case SPI_RESPONSE_CRC_ERR:
+ /* host shall then issue MMC_STOP_TRANSMISSION */
+ status = -EILSEQ;
+ break;
+ case SPI_RESPONSE_WRITE_ERR:
+ /* host shall then issue MMC_STOP_TRANSMISSION,
+ * and should MMC_SEND_STATUS to sort it out
+ */
+ status = -EIO;
+ break;
+ default:
+ status = -EPROTO;
+ break;
+ }
+ if (status != 0) {
+ dev_dbg(&spi->dev, "write error %02x (%d)\n",
+ scratch->status[0], status);
+ return status;
+ }
+
+ t->tx_buf += t->len;
+ if (host->dma_dev)
+ t->tx_dma += t->len;
+
+ /* Return when not busy. If we didn't collect that status yet,
+ * we'll need some more I/O.
+ */
+ for (i = 4; i < sizeof(scratch->status); i++) {
+ /* card is non-busy if the most recent bit is 1 */
+ if (scratch->status[i] & 0x01)
+ return 0;
+ }
+ return mmc_spi_wait_unbusy(host, timeout);
+}
+
+/*
+ * Read one block:
+ * - skip leading all-ones bytes ... either
+ * + N(AC) [1..f(clock,CSD)] usually, else
+ * + N(CX) [0..8] when reading CSD or CID
+ * - data block
+ * + token ... if error token, no data or crc
+ * + data bytes
+ * + crc16
+ *
+ * After single block reads, we're done; N(EC) [0+] all-ones bytes follow
+ * before dropping chipselect.
+ *
+ * For multiblock reads, caller either reads the next block or issues a
+ * STOP_TRANSMISSION command.
+ */
+static int
+mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
+ unsigned long timeout)
+{
+ struct spi_device *spi = host->spi;
+ int status;
+ struct scratch *scratch = host->data;
+ unsigned int bitshift;
+ u8 leftover;
+
+ /* At least one SD card sends an all-zeroes byte when N(CX)
+ * applies, before the all-ones bytes ... just cope with that.
+ */
+ status = mmc_spi_readbytes(host, 1);
+ if (status < 0)
+ return status;
+ status = scratch->status[0];
+ if (status == 0xff || status == 0)
+ status = mmc_spi_readtoken(host, timeout);
+
+ if (status < 0) {
+ dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
+ return status;
+ }
+
+ /* The token may be bit-shifted...
+ * the first 0-bit precedes the data stream.
+ */
+ bitshift = 7;
+ while (status & 0x80) {
+ status <<= 1;
+ bitshift--;
+ }
+ leftover = status << 1;
+
+ if (host->dma_dev) {
+ dma_sync_single_for_device(host->dma_dev,
+ host->data_dma, sizeof(*scratch),
+ DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(host->dma_dev,
+ t->rx_dma, t->len,
+ DMA_FROM_DEVICE);
+ }
+
+ status = spi_sync_locked(spi, &host->m);
+
+ if (host->dma_dev) {
+ dma_sync_single_for_cpu(host->dma_dev,
+ host->data_dma, sizeof(*scratch),
+ DMA_BIDIRECTIONAL);
+ dma_sync_single_for_cpu(host->dma_dev,
+ t->rx_dma, t->len,
+ DMA_FROM_DEVICE);
+ }
+
+ if (bitshift) {
+ /* Walk through the data and the crc and do
+ * all the magic to get byte-aligned data.
+ */
+ u8 *cp = t->rx_buf;
+ unsigned int len;
+ unsigned int bitright = 8 - bitshift;
+ u8 temp;
+ for (len = t->len; len; len--) {
+ temp = *cp;
+ *cp++ = leftover | (temp >> bitshift);
+ leftover = temp << bitright;
+ }
+ cp = (u8 *) &scratch->crc_val;
+ temp = *cp;
+ *cp++ = leftover | (temp >> bitshift);
+ leftover = temp << bitright;
+ temp = *cp;
+ *cp = leftover | (temp >> bitshift);
+ }
+
+ if (host->mmc->use_spi_crc) {
+ u16 crc = crc_itu_t(0, t->rx_buf, t->len);
+
+ be16_to_cpus(&scratch->crc_val);
+ if (scratch->crc_val != crc) {
+ dev_dbg(&spi->dev, "read - crc error: crc_val=0x%04x, "
+ "computed=0x%04x len=%d\n",
+ scratch->crc_val, crc, t->len);
+ return -EILSEQ;
+ }
+ }
+
+ t->rx_buf += t->len;
+ if (host->dma_dev)
+ t->rx_dma += t->len;
+
+ return 0;
+}
+
+/*
+ * An MMC/SD data stage includes one or more blocks, optional CRCs,
+ * and inline handshaking. That handhaking makes it unlike most
+ * other SPI protocol stacks.
+ */
+static void
+mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
+ struct mmc_data *data, u32 blk_size)
+{
+ struct spi_device *spi = host->spi;
+ struct device *dma_dev = host->dma_dev;
+ struct spi_transfer *t;
+ enum dma_data_direction direction;
+ struct scatterlist *sg;
+ unsigned n_sg;
+ int multiple = (data->blocks > 1);
+ u32 clock_rate;
+ unsigned long timeout;
+
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+ mmc_spi_setup_data_message(host, multiple, direction);
+ t = &host->t;
+
+ if (t->speed_hz)
+ clock_rate = t->speed_hz;
+ else
+ clock_rate = spi->max_speed_hz;
+
+ timeout = data->timeout_ns +
+ data->timeout_clks * 1000000 / clock_rate;
+ timeout = usecs_to_jiffies((unsigned int)(timeout / 1000)) + 1;
+
+ /* Handle scatterlist segments one at a time, with synch for
+ * each 512-byte block
+ */
+ for (sg = data->sg, n_sg = data->sg_len; n_sg; n_sg--, sg++) {
+ int status = 0;
+ dma_addr_t dma_addr = 0;
+ void *kmap_addr;
+ unsigned length = sg->length;
+ enum dma_data_direction dir = direction;
+
+ /* set up dma mapping for controller drivers that might
+ * use DMA ... though they may fall back to PIO
+ */
+ if (dma_dev) {
+ /* never invalidate whole *shared* pages ... */
+ if ((sg->offset != 0 || length != PAGE_SIZE)
+ && dir == DMA_FROM_DEVICE)
+ dir = DMA_BIDIRECTIONAL;
+
+ dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
+ PAGE_SIZE, dir);
+ if (direction == DMA_TO_DEVICE)
+ t->tx_dma = dma_addr + sg->offset;
+ else
+ t->rx_dma = dma_addr + sg->offset;
+ }
+
+ /* allow pio too; we don't allow highmem */
+ kmap_addr = kmap(sg_page(sg));
+ if (direction == DMA_TO_DEVICE)
+ t->tx_buf = kmap_addr + sg->offset;
+ else
+ t->rx_buf = kmap_addr + sg->offset;
+
+ /* transfer each block, and update request status */
+ while (length) {
+ t->len = min(length, blk_size);
+
+ dev_dbg(&host->spi->dev,
+ " mmc_spi: %s block, %d bytes\n",
+ (direction == DMA_TO_DEVICE)
+ ? "write"
+ : "read",
+ t->len);
+
+ if (direction == DMA_TO_DEVICE)
+ status = mmc_spi_writeblock(host, t, timeout);
+ else
+ status = mmc_spi_readblock(host, t, timeout);
+ if (status < 0)
+ break;
+
+ data->bytes_xfered += t->len;
+ length -= t->len;
+
+ if (!multiple)
+ break;
+ }
+
+ /* discard mappings */
+ if (direction == DMA_FROM_DEVICE)
+ flush_kernel_dcache_page(sg_page(sg));
+ kunmap(sg_page(sg));
+ if (dma_dev)
+ dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
+
+ if (status < 0) {
+ data->error = status;
+ dev_dbg(&spi->dev, "%s status %d\n",
+ (direction == DMA_TO_DEVICE)
+ ? "write" : "read",
+ status);
+ break;
+ }
+ }
+
+ /* NOTE some docs describe an MMC-only SET_BLOCK_COUNT (CMD23) that
+ * can be issued before multiblock writes. Unlike its more widely
+ * documented analogue for SD cards (SET_WR_BLK_ERASE_COUNT, ACMD23),
+ * that can affect the STOP_TRAN logic. Complete (and current)
+ * MMC specs should sort that out before Linux starts using CMD23.
+ */
+ if (direction == DMA_TO_DEVICE && multiple) {
+ struct scratch *scratch = host->data;
+ int tmp;
+ const unsigned statlen = sizeof(scratch->status);
+
+ dev_dbg(&spi->dev, " mmc_spi: STOP_TRAN\n");
+
+ /* Tweak the per-block message we set up earlier by morphing
+ * it to hold single buffer with the token followed by some
+ * all-ones bytes ... skip N(BR) (0..1), scan the rest for
+ * "not busy any longer" status, and leave chip selected.
+ */
+ INIT_LIST_HEAD(&host->m.transfers);
+ list_add(&host->early_status.transfer_list,
+ &host->m.transfers);
+
+ memset(scratch->status, 0xff, statlen);
+ scratch->status[0] = SPI_TOKEN_STOP_TRAN;
+
+ host->early_status.tx_buf = host->early_status.rx_buf;
+ host->early_status.tx_dma = host->early_status.rx_dma;
+ host->early_status.len = statlen;
+
+ if (host->dma_dev)
+ dma_sync_single_for_device(host->dma_dev,
+ host->data_dma, sizeof(*scratch),
+ DMA_BIDIRECTIONAL);
+
+ tmp = spi_sync_locked(spi, &host->m);
+
+ if (host->dma_dev)
+ dma_sync_single_for_cpu(host->dma_dev,
+ host->data_dma, sizeof(*scratch),
+ DMA_BIDIRECTIONAL);
+
+ if (tmp < 0) {
+ if (!data->error)
+ data->error = tmp;
+ return;
+ }
+
+ /* Ideally we collected "not busy" status with one I/O,
+ * avoiding wasteful byte-at-a-time scanning... but more
+ * I/O is often needed.
+ */
+ for (tmp = 2; tmp < statlen; tmp++) {
+ if (scratch->status[tmp] != 0)
+ return;
+ }
+ tmp = mmc_spi_wait_unbusy(host, timeout);
+ if (tmp < 0 && !data->error)
+ data->error = tmp;
+ }
+}
+
+/****************************************************************************/
+
+/*
+ * MMC driver implementation -- the interface to the MMC stack
+ */
+
+static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mmc_spi_host *host = mmc_priv(mmc);
+ int status = -EINVAL;
+ int crc_retry = 5;
+ struct mmc_command stop;
+
+#ifdef DEBUG
+ /* MMC core and layered drivers *MUST* issue SPI-aware commands */
+ {
+ struct mmc_command *cmd;
+ int invalid = 0;
+
+ cmd = mrq->cmd;
+ if (!mmc_spi_resp_type(cmd)) {
+ dev_dbg(&host->spi->dev, "bogus command\n");
+ cmd->error = -EINVAL;
+ invalid = 1;
+ }
+
+ cmd = mrq->stop;
+ if (cmd && !mmc_spi_resp_type(cmd)) {
+ dev_dbg(&host->spi->dev, "bogus STOP command\n");
+ cmd->error = -EINVAL;
+ invalid = 1;
+ }
+
+ if (invalid) {
+ dump_stack();
+ mmc_request_done(host->mmc, mrq);
+ return;
+ }
+ }
+#endif
+
+ /* request exclusive bus access */
+ spi_bus_lock(host->spi->master);
+
+crc_recover:
+ /* issue command; then optionally data and stop */
+ status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
+ if (status == 0 && mrq->data) {
+ mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
+
+ /*
+ * The SPI bus is not always reliable for large data transfers.
+ * If an occasional crc error is reported by the SD device with
+ * data read/write over SPI, it may be recovered by repeating
+ * the last SD command again. The retry count is set to 5 to
+ * ensure the driver passes stress tests.
+ */
+ if (mrq->data->error == -EILSEQ && crc_retry) {
+ stop.opcode = MMC_STOP_TRANSMISSION;
+ stop.arg = 0;
+ stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ status = mmc_spi_command_send(host, mrq, &stop, 0);
+ crc_retry--;
+ mrq->data->error = 0;
+ goto crc_recover;
+ }
+
+ if (mrq->stop)
+ status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
+ else
+ mmc_cs_off(host);
+ }
+
+ /* release the bus */
+ spi_bus_unlock(host->spi->master);
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+/* See Section 6.4.1, in SD "Simplified Physical Layer Specification 2.0"
+ *
+ * NOTE that here we can't know that the card has just been powered up;
+ * not all MMC/SD sockets support power switching.
+ *
+ * FIXME when the card is still in SPI mode, e.g. from a previous kernel,
+ * this doesn't seem to do the right thing at all...
+ */
+static void mmc_spi_initsequence(struct mmc_spi_host *host)
+{
+ /* Try to be very sure any previous command has completed;
+ * wait till not-busy, skip debris from any old commands.
+ */
+ mmc_spi_wait_unbusy(host, r1b_timeout);
+ mmc_spi_readbytes(host, 10);
+
+ /*
+ * Do a burst with chipselect active-high. We need to do this to
+ * meet the requirement of 74 clock cycles with both chipselect
+ * and CMD (MOSI) high before CMD0 ... after the card has been
+ * powered up to Vdd(min), and so is ready to take commands.
+ *
+ * Some cards are particularly needy of this (e.g. Viking "SD256")
+ * while most others don't seem to care.
+ *
+ * Note that this is one of the places MMC/SD plays games with the
+ * SPI protocol. Another is that when chipselect is released while
+ * the card returns BUSY status, the clock must issue several cycles
+ * with chipselect high before the card will stop driving its output.
+ */
+ host->spi->mode |= SPI_CS_HIGH;
+ if (spi_setup(host->spi) != 0) {
+ /* Just warn; most cards work without it. */
+ dev_warn(&host->spi->dev,
+ "can't change chip-select polarity\n");
+ host->spi->mode &= ~SPI_CS_HIGH;
+ } else {
+ mmc_spi_readbytes(host, 18);
+
+ host->spi->mode &= ~SPI_CS_HIGH;
+ if (spi_setup(host->spi) != 0) {
+ /* Wot, we can't get the same setup we had before? */
+ dev_err(&host->spi->dev,
+ "can't restore chip-select polarity\n");
+ }
+ }
+}
+
+static char *mmc_powerstring(u8 power_mode)
+{
+ switch (power_mode) {
+ case MMC_POWER_OFF: return "off";
+ case MMC_POWER_UP: return "up";
+ case MMC_POWER_ON: return "on";
+ }
+ return "?";
+}
+
+static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mmc_spi_host *host = mmc_priv(mmc);
+
+ if (host->power_mode != ios->power_mode) {
+ int canpower;
+
+ canpower = host->pdata && host->pdata->setpower;
+
+ dev_dbg(&host->spi->dev, "mmc_spi: power %s (%d)%s\n",
+ mmc_powerstring(ios->power_mode),
+ ios->vdd,
+ canpower ? ", can switch" : "");
+
+ /* switch power on/off if possible, accounting for
+ * max 250msec powerup time if needed.
+ */
+ if (canpower) {
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ case MMC_POWER_UP:
+ host->pdata->setpower(&host->spi->dev,
+ ios->vdd);
+ if (ios->power_mode == MMC_POWER_UP)
+ msleep(host->powerup_msecs);
+ }
+ }
+
+ /* See 6.4.1 in the simplified SD card physical spec 2.0 */
+ if (ios->power_mode == MMC_POWER_ON)
+ mmc_spi_initsequence(host);
+
+ /* If powering down, ground all card inputs to avoid power
+ * delivery from data lines! On a shared SPI bus, this
+ * will probably be temporary; 6.4.2 of the simplified SD
+ * spec says this must last at least 1msec.
+ *
+ * - Clock low means CPOL 0, e.g. mode 0
+ * - MOSI low comes from writing zero
+ * - Chipselect is usually active low...
+ */
+ if (canpower && ios->power_mode == MMC_POWER_OFF) {
+ int mres;
+ u8 nullbyte = 0;
+
+ host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
+ mres = spi_setup(host->spi);
+ if (mres < 0)
+ dev_dbg(&host->spi->dev,
+ "switch to SPI mode 0 failed\n");
+
+ if (spi_write(host->spi, &nullbyte, 1) < 0)
+ dev_dbg(&host->spi->dev,
+ "put spi signals to low failed\n");
+
+ /*
+ * Now clock should be low due to spi mode 0;
+ * MOSI should be low because of written 0x00;
+ * chipselect should be low (it is active low)
+ * power supply is off, so now MMC is off too!
+ *
+ * FIXME no, chipselect can be high since the
+ * device is inactive and SPI_CS_HIGH is clear...
+ */
+ msleep(10);
+ if (mres == 0) {
+ host->spi->mode |= (SPI_CPOL|SPI_CPHA);
+ mres = spi_setup(host->spi);
+ if (mres < 0)
+ dev_dbg(&host->spi->dev,
+ "switch back to SPI mode 3"
+ " failed\n");
+ }
+ }
+
+ host->power_mode = ios->power_mode;
+ }
+
+ if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) {
+ int status;
+
+ host->spi->max_speed_hz = ios->clock;
+ status = spi_setup(host->spi);
+ dev_dbg(&host->spi->dev,
+ "mmc_spi: clock to %d Hz, %d\n",
+ host->spi->max_speed_hz, status);
+ }
+}
+
+static const struct mmc_host_ops mmc_spi_ops = {
+ .request = mmc_spi_request,
+ .set_ios = mmc_spi_set_ios,
+ .get_ro = mmc_gpio_get_ro,
+ .get_cd = mmc_gpio_get_cd,
+};
+
+
+/****************************************************************************/
+
+/*
+ * SPI driver implementation
+ */
+
+static irqreturn_t
+mmc_spi_detect_irq(int irq, void *mmc)
+{
+ struct mmc_spi_host *host = mmc_priv(mmc);
+ u16 delay_msec = max(host->pdata->detect_delay, (u16)100);
+
+ mmc_detect_change(mmc, msecs_to_jiffies(delay_msec));
+ return IRQ_HANDLED;
+}
+
+static int mmc_spi_probe(struct spi_device *spi)
+{
+ void *ones;
+ struct mmc_host *mmc;
+ struct mmc_spi_host *host;
+ int status;
+ bool has_ro = false;
+
+ /* We rely on full duplex transfers, mostly to reduce
+ * per-transfer overheads (by making fewer transfers).
+ */
+ if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
+ return -EINVAL;
+
+ /* MMC and SD specs only seem to care that sampling is on the
+ * rising edge ... meaning SPI modes 0 or 3. So either SPI mode
+ * should be legit. We'll use mode 0 since the steady state is 0,
+ * which is appropriate for hotplugging, unless the platform data
+ * specify mode 3 (if hardware is not compatible to mode 0).
+ */
+ if (spi->mode != SPI_MODE_3)
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 8;
+
+ status = spi_setup(spi);
+ if (status < 0) {
+ dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n",
+ spi->mode, spi->max_speed_hz / 1000,
+ status);
+ return status;
+ }
+
+ /* We need a supply of ones to transmit. This is the only time
+ * the CPU touches these, so cache coherency isn't a concern.
+ *
+ * NOTE if many systems use more than one MMC-over-SPI connector
+ * it'd save some memory to share this. That's evidently rare.
+ */
+ status = -ENOMEM;
+ ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL);
+ if (!ones)
+ goto nomem;
+ memset(ones, 0xff, MMC_SPI_BLOCKSIZE);
+
+ mmc = mmc_alloc_host(sizeof(*host), &spi->dev);
+ if (!mmc)
+ goto nomem;
+
+ mmc->ops = &mmc_spi_ops;
+ mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
+ mmc->max_segs = MMC_SPI_BLOCKSATONCE;
+ mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
+ mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
+
+ mmc->caps = MMC_CAP_SPI;
+
+ /* SPI doesn't need the lowspeed device identification thing for
+ * MMC or SD cards, since it never comes up in open drain mode.
+ * That's good; some SPI masters can't handle very low speeds!
+ *
+ * However, low speed SDIO cards need not handle over 400 KHz;
+ * that's the only reason not to use a few MHz for f_min (until
+ * the upper layer reads the target frequency from the CSD).
+ */
+ mmc->f_min = 400000;
+ mmc->f_max = spi->max_speed_hz;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->spi = spi;
+
+ host->ones = ones;
+
+ /* Platform data is used to hook up things like card sensing
+ * and power switching gpios.
+ */
+ host->pdata = mmc_spi_get_pdata(spi);
+ if (host->pdata)
+ mmc->ocr_avail = host->pdata->ocr_mask;
+ if (!mmc->ocr_avail) {
+ dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n");
+ mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
+ }
+ if (host->pdata && host->pdata->setpower) {
+ host->powerup_msecs = host->pdata->powerup_msecs;
+ if (!host->powerup_msecs || host->powerup_msecs > 250)
+ host->powerup_msecs = 250;
+ }
+
+ dev_set_drvdata(&spi->dev, mmc);
+
+ /* preallocate dma buffers */
+ host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
+ if (!host->data)
+ goto fail_nobuf1;
+
+ if (spi->master->dev.parent->dma_mask) {
+ struct device *dev = spi->master->dev.parent;
+
+ host->dma_dev = dev;
+ host->ones_dma = dma_map_single(dev, ones,
+ MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
+ host->data_dma = dma_map_single(dev, host->data,
+ sizeof(*host->data), DMA_BIDIRECTIONAL);
+
+ /* REVISIT in theory those map operations can fail... */
+
+ dma_sync_single_for_cpu(host->dma_dev,
+ host->data_dma, sizeof(*host->data),
+ DMA_BIDIRECTIONAL);
+ }
+
+ /* setup message for status/busy readback */
+ spi_message_init(&host->readback);
+ host->readback.is_dma_mapped = (host->dma_dev != NULL);
+
+ spi_message_add_tail(&host->status, &host->readback);
+ host->status.tx_buf = host->ones;
+ host->status.tx_dma = host->ones_dma;
+ host->status.rx_buf = &host->data->status;
+ host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
+ host->status.cs_change = 1;
+
+ /* register card detect irq */
+ if (host->pdata && host->pdata->init) {
+ status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc);
+ if (status != 0)
+ goto fail_glue_init;
+ }
+
+ /* pass platform capabilities, if any */
+ if (host->pdata) {
+ mmc->caps |= host->pdata->caps;
+ mmc->caps2 |= host->pdata->caps2;
+ }
+
+ status = mmc_add_host(mmc);
+ if (status != 0)
+ goto fail_add_host;
+
+ if (host->pdata && host->pdata->flags & MMC_SPI_USE_CD_GPIO) {
+ status = mmc_gpio_request_cd(mmc, host->pdata->cd_gpio,
+ host->pdata->cd_debounce);
+ if (status != 0)
+ goto fail_add_host;
+ mmc_gpiod_request_cd_irq(mmc);
+ }
+
+ if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) {
+ has_ro = true;
+ status = mmc_gpio_request_ro(mmc, host->pdata->ro_gpio);
+ if (status != 0)
+ goto fail_add_host;
+ }
+
+ dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
+ dev_name(&mmc->class_dev),
+ host->dma_dev ? "" : ", no DMA",
+ has_ro ? "" : ", no WP",
+ (host->pdata && host->pdata->setpower)
+ ? "" : ", no poweroff",
+ (mmc->caps & MMC_CAP_NEEDS_POLL)
+ ? ", cd polling" : "");
+ return 0;
+
+fail_add_host:
+ mmc_remove_host (mmc);
+fail_glue_init:
+ if (host->dma_dev)
+ dma_unmap_single(host->dma_dev, host->data_dma,
+ sizeof(*host->data), DMA_BIDIRECTIONAL);
+ kfree(host->data);
+
+fail_nobuf1:
+ mmc_free_host(mmc);
+ mmc_spi_put_pdata(spi);
+ dev_set_drvdata(&spi->dev, NULL);
+
+nomem:
+ kfree(ones);
+ return status;
+}
+
+
+static int mmc_spi_remove(struct spi_device *spi)
+{
+ struct mmc_host *mmc = dev_get_drvdata(&spi->dev);
+ struct mmc_spi_host *host;
+
+ if (mmc) {
+ host = mmc_priv(mmc);
+
+ /* prevent new mmc_detect_change() calls */
+ if (host->pdata && host->pdata->exit)
+ host->pdata->exit(&spi->dev, mmc);
+
+ mmc_remove_host(mmc);
+
+ if (host->dma_dev) {
+ dma_unmap_single(host->dma_dev, host->ones_dma,
+ MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
+ dma_unmap_single(host->dma_dev, host->data_dma,
+ sizeof(*host->data), DMA_BIDIRECTIONAL);
+ }
+
+ kfree(host->data);
+ kfree(host->ones);
+
+ spi->max_speed_hz = mmc->f_max;
+ mmc_free_host(mmc);
+ mmc_spi_put_pdata(spi);
+ dev_set_drvdata(&spi->dev, NULL);
+ }
+ return 0;
+}
+
+static const struct of_device_id mmc_spi_of_match_table[] = {
+ { .compatible = "mmc-spi-slot", },
+ {},
+};
+
+static struct spi_driver mmc_spi_driver = {
+ .driver = {
+ .name = "mmc_spi",
+ .owner = THIS_MODULE,
+ .of_match_table = mmc_spi_of_match_table,
+ },
+ .probe = mmc_spi_probe,
+ .remove = mmc_spi_remove,
+};
+
+module_spi_driver(mmc_spi_driver);
+
+MODULE_AUTHOR("Mike Lavender, David Brownell, "
+ "Hans-Peter Nilsson, Jan Nikitenko");
+MODULE_DESCRIPTION("SPI SD/MMC host driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:mmc_spi");
diff --git a/kernel/drivers/mmc/host/mmci.c b/kernel/drivers/mmc/host/mmci.c
new file mode 100644
index 000000000..d42fc084d
--- /dev/null
+++ b/kernel/drivers/mmc/host/mmci.c
@@ -0,0 +1,1922 @@
+/*
+ * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
+ *
+ * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ * Copyright (C) 2010 ST-Ericsson SA
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/log2.h>
+#include <linux/mmc/pm.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/amba/bus.h>
+#include <linux/clk.h>
+#include <linux/scatterlist.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/amba/mmci.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <asm/div64.h>
+#include <asm/io.h>
+#include <asm/sizes.h>
+
+#include "mmci.h"
+#include "mmci_qcom_dml.h"
+
+#define DRIVER_NAME "mmci-pl18x"
+
+static unsigned int fmax = 515633;
+
+/**
+ * struct variant_data - MMCI variant-specific quirks
+ * @clkreg: default value for MCICLOCK register
+ * @clkreg_enable: enable value for MMCICLOCK register
+ * @clkreg_8bit_bus_enable: enable value for 8 bit bus
+ * @clkreg_neg_edge_enable: enable value for inverted data/cmd output
+ * @datalength_bits: number of bits in the MMCIDATALENGTH register
+ * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
+ * is asserted (likewise for RX)
+ * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
+ * is asserted (likewise for RX)
+ * @data_cmd_enable: enable value for data commands.
+ * @st_sdio: enable ST specific SDIO logic
+ * @st_clkdiv: true if using a ST-specific clock divider algorithm
+ * @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
+ * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
+ * @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
+ * register
+ * @datactrl_mask_sdio: SDIO enable mask in datactrl register
+ * @pwrreg_powerup: power up value for MMCIPOWER register
+ * @f_max: maximum clk frequency supported by the controller.
+ * @signal_direction: input/out direction of bus signals can be indicated
+ * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
+ * @busy_detect: true if busy detection on dat0 is supported
+ * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
+ * @explicit_mclk_control: enable explicit mclk control in driver.
+ * @qcom_fifo: enables qcom specific fifo pio read logic.
+ * @qcom_dml: enables qcom specific dma glue for dma transfers.
+ * @reversed_irq_handling: handle data irq before cmd irq.
+ */
+struct variant_data {
+ unsigned int clkreg;
+ unsigned int clkreg_enable;
+ unsigned int clkreg_8bit_bus_enable;
+ unsigned int clkreg_neg_edge_enable;
+ unsigned int datalength_bits;
+ unsigned int fifosize;
+ unsigned int fifohalfsize;
+ unsigned int data_cmd_enable;
+ unsigned int datactrl_mask_ddrmode;
+ unsigned int datactrl_mask_sdio;
+ bool st_sdio;
+ bool st_clkdiv;
+ bool blksz_datactrl16;
+ bool blksz_datactrl4;
+ u32 pwrreg_powerup;
+ u32 f_max;
+ bool signal_direction;
+ bool pwrreg_clkgate;
+ bool busy_detect;
+ bool pwrreg_nopower;
+ bool explicit_mclk_control;
+ bool qcom_fifo;
+ bool qcom_dml;
+ bool reversed_irq_handling;
+};
+
+static struct variant_data variant_arm = {
+ .fifosize = 16 * 4,
+ .fifohalfsize = 8 * 4,
+ .datalength_bits = 16,
+ .pwrreg_powerup = MCI_PWR_UP,
+ .f_max = 100000000,
+ .reversed_irq_handling = true,
+};
+
+static struct variant_data variant_arm_extended_fifo = {
+ .fifosize = 128 * 4,
+ .fifohalfsize = 64 * 4,
+ .datalength_bits = 16,
+ .pwrreg_powerup = MCI_PWR_UP,
+ .f_max = 100000000,
+};
+
+static struct variant_data variant_arm_extended_fifo_hwfc = {
+ .fifosize = 128 * 4,
+ .fifohalfsize = 64 * 4,
+ .clkreg_enable = MCI_ARM_HWFCEN,
+ .datalength_bits = 16,
+ .pwrreg_powerup = MCI_PWR_UP,
+ .f_max = 100000000,
+};
+
+static struct variant_data variant_u300 = {
+ .fifosize = 16 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg_enable = MCI_ST_U300_HWFCEN,
+ .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
+ .datalength_bits = 16,
+ .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .st_sdio = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .f_max = 100000000,
+ .signal_direction = true,
+ .pwrreg_clkgate = true,
+ .pwrreg_nopower = true,
+};
+
+static struct variant_data variant_nomadik = {
+ .fifosize = 16 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg = MCI_CLK_ENABLE,
+ .datalength_bits = 24,
+ .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .st_sdio = true,
+ .st_clkdiv = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .f_max = 100000000,
+ .signal_direction = true,
+ .pwrreg_clkgate = true,
+ .pwrreg_nopower = true,
+};
+
+static struct variant_data variant_ux500 = {
+ .fifosize = 30 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg = MCI_CLK_ENABLE,
+ .clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
+ .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+ .datalength_bits = 24,
+ .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .st_sdio = true,
+ .st_clkdiv = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .f_max = 100000000,
+ .signal_direction = true,
+ .pwrreg_clkgate = true,
+ .busy_detect = true,
+ .pwrreg_nopower = true,
+};
+
+static struct variant_data variant_ux500v2 = {
+ .fifosize = 30 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg = MCI_CLK_ENABLE,
+ .clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
+ .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+ .datactrl_mask_ddrmode = MCI_ST_DPSM_DDRMODE,
+ .datalength_bits = 24,
+ .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .st_sdio = true,
+ .st_clkdiv = true,
+ .blksz_datactrl16 = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .f_max = 100000000,
+ .signal_direction = true,
+ .pwrreg_clkgate = true,
+ .busy_detect = true,
+ .pwrreg_nopower = true,
+};
+
+static struct variant_data variant_qcom = {
+ .fifosize = 16 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg = MCI_CLK_ENABLE,
+ .clkreg_enable = MCI_QCOM_CLK_FLOWENA |
+ MCI_QCOM_CLK_SELECT_IN_FBCLK,
+ .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
+ .datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
+ .data_cmd_enable = MCI_QCOM_CSPM_DATCMD,
+ .blksz_datactrl4 = true,
+ .datalength_bits = 24,
+ .pwrreg_powerup = MCI_PWR_UP,
+ .f_max = 208000000,
+ .explicit_mclk_control = true,
+ .qcom_fifo = true,
+ .qcom_dml = true,
+};
+
+static int mmci_card_busy(struct mmc_host *mmc)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int busy = 0;
+
+ pm_runtime_get_sync(mmc_dev(mmc));
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY)
+ busy = 1;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
+
+ return busy;
+}
+
+/*
+ * Validate mmc prerequisites
+ */
+static int mmci_validate_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ if (!data)
+ return 0;
+
+ if (!is_power_of_2(data->blksz)) {
+ dev_err(mmc_dev(host->mmc),
+ "unsupported block size (%d bytes)\n", data->blksz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mmci_reg_delay(struct mmci_host *host)
+{
+ /*
+ * According to the spec, at least three feedback clock cycles
+ * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
+ * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
+ * Worst delay time during card init is at 100 kHz => 30 us.
+ * Worst delay time when up and running is at 25 MHz => 120 ns.
+ */
+ if (host->cclk < 25000000)
+ udelay(30);
+ else
+ ndelay(120);
+}
+
+/*
+ * This must be called with host->lock held
+ */
+static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
+{
+ if (host->clk_reg != clk) {
+ host->clk_reg = clk;
+ writel(clk, host->base + MMCICLOCK);
+ }
+}
+
+/*
+ * This must be called with host->lock held
+ */
+static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
+{
+ if (host->pwr_reg != pwr) {
+ host->pwr_reg = pwr;
+ writel(pwr, host->base + MMCIPOWER);
+ }
+}
+
+/*
+ * This must be called with host->lock held
+ */
+static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
+{
+ /* Keep ST Micro busy mode if enabled */
+ datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE;
+
+ if (host->datactrl_reg != datactrl) {
+ host->datactrl_reg = datactrl;
+ writel(datactrl, host->base + MMCIDATACTRL);
+ }
+}
+
+/*
+ * This must be called with host->lock held
+ */
+static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
+{
+ struct variant_data *variant = host->variant;
+ u32 clk = variant->clkreg;
+
+ /* Make sure cclk reflects the current calculated clock */
+ host->cclk = 0;
+
+ if (desired) {
+ if (variant->explicit_mclk_control) {
+ host->cclk = host->mclk;
+ } else if (desired >= host->mclk) {
+ clk = MCI_CLK_BYPASS;
+ if (variant->st_clkdiv)
+ clk |= MCI_ST_UX500_NEG_EDGE;
+ host->cclk = host->mclk;
+ } else if (variant->st_clkdiv) {
+ /*
+ * DB8500 TRM says f = mclk / (clkdiv + 2)
+ * => clkdiv = (mclk / f) - 2
+ * Round the divider up so we don't exceed the max
+ * frequency
+ */
+ clk = DIV_ROUND_UP(host->mclk, desired) - 2;
+ if (clk >= 256)
+ clk = 255;
+ host->cclk = host->mclk / (clk + 2);
+ } else {
+ /*
+ * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
+ * => clkdiv = mclk / (2 * f) - 1
+ */
+ clk = host->mclk / (2 * desired) - 1;
+ if (clk >= 256)
+ clk = 255;
+ host->cclk = host->mclk / (2 * (clk + 1));
+ }
+
+ clk |= variant->clkreg_enable;
+ clk |= MCI_CLK_ENABLE;
+ /* This hasn't proven to be worthwhile */
+ /* clk |= MCI_CLK_PWRSAVE; */
+ }
+
+ /* Set actual clock for debug */
+ host->mmc->actual_clock = host->cclk;
+
+ if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
+ clk |= MCI_4BIT_BUS;
+ if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
+ clk |= variant->clkreg_8bit_bus_enable;
+
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
+ host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
+ clk |= variant->clkreg_neg_edge_enable;
+
+ mmci_write_clkreg(host, clk);
+}
+
+static void
+mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
+{
+ writel(0, host->base + MMCICOMMAND);
+
+ BUG_ON(host->data);
+
+ host->mrq = NULL;
+ host->cmd = NULL;
+
+ mmc_request_done(host->mmc, mrq);
+
+ pm_runtime_mark_last_busy(mmc_dev(host->mmc));
+ pm_runtime_put_autosuspend(mmc_dev(host->mmc));
+}
+
+static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
+{
+ void __iomem *base = host->base;
+
+ if (host->singleirq) {
+ unsigned int mask0 = readl(base + MMCIMASK0);
+
+ mask0 &= ~MCI_IRQ1MASK;
+ mask0 |= mask;
+
+ writel(mask0, base + MMCIMASK0);
+ }
+
+ writel(mask, base + MMCIMASK1);
+}
+
+static void mmci_stop_data(struct mmci_host *host)
+{
+ mmci_write_datactrlreg(host, 0);
+ mmci_set_mask1(host, 0);
+ host->data = NULL;
+}
+
+static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
+{
+ unsigned int flags = SG_MITER_ATOMIC;
+
+ if (data->flags & MMC_DATA_READ)
+ flags |= SG_MITER_TO_SG;
+ else
+ flags |= SG_MITER_FROM_SG;
+
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
+}
+
+/*
+ * All the DMA operation mode stuff goes inside this ifdef.
+ * This assumes that you have a generic DMA device interface,
+ * no custom DMA interfaces are supported.
+ */
+#ifdef CONFIG_DMA_ENGINE
+static void mmci_dma_setup(struct mmci_host *host)
+{
+ const char *rxname, *txname;
+ struct variant_data *variant = host->variant;
+
+ host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
+ host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
+
+ /* initialize pre request cookie */
+ host->next_data.cookie = 1;
+
+ /*
+ * If only an RX channel is specified, the driver will
+ * attempt to use it bidirectionally, however if it is
+ * is specified but cannot be located, DMA will be disabled.
+ */
+ if (host->dma_rx_channel && !host->dma_tx_channel)
+ host->dma_tx_channel = host->dma_rx_channel;
+
+ if (host->dma_rx_channel)
+ rxname = dma_chan_name(host->dma_rx_channel);
+ else
+ rxname = "none";
+
+ if (host->dma_tx_channel)
+ txname = dma_chan_name(host->dma_tx_channel);
+ else
+ txname = "none";
+
+ dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
+ rxname, txname);
+
+ /*
+ * Limit the maximum segment size in any SG entry according to
+ * the parameters of the DMA engine device.
+ */
+ if (host->dma_tx_channel) {
+ struct device *dev = host->dma_tx_channel->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+ if (host->dma_rx_channel) {
+ struct device *dev = host->dma_rx_channel->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+
+ if (variant->qcom_dml && host->dma_rx_channel && host->dma_tx_channel)
+ if (dml_hw_init(host, host->mmc->parent->of_node))
+ variant->qcom_dml = false;
+}
+
+/*
+ * This is used in or so inline it
+ * so it can be discarded.
+ */
+static inline void mmci_dma_release(struct mmci_host *host)
+{
+ if (host->dma_rx_channel)
+ dma_release_channel(host->dma_rx_channel);
+ if (host->dma_tx_channel)
+ dma_release_channel(host->dma_tx_channel);
+ host->dma_rx_channel = host->dma_tx_channel = NULL;
+}
+
+static void mmci_dma_data_error(struct mmci_host *host)
+{
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
+ dmaengine_terminate_all(host->dma_current);
+ host->dma_current = NULL;
+ host->dma_desc_current = NULL;
+ host->data->host_cookie = 0;
+}
+
+static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
+{
+ struct dma_chan *chan;
+ enum dma_data_direction dir;
+
+ if (data->flags & MMC_DATA_READ) {
+ dir = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ } else {
+ dir = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ }
+
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+}
+
+static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+{
+ u32 status;
+ int i;
+
+ /* Wait up to 1ms for the DMA to complete */
+ for (i = 0; ; i++) {
+ status = readl(host->base + MMCISTATUS);
+ if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
+ break;
+ udelay(10);
+ }
+
+ /*
+ * Check to see whether we still have some data left in the FIFO -
+ * this catches DMA controllers which are unable to monitor the
+ * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
+ * contiguous buffers. On TX, we'll get a FIFO underrun error.
+ */
+ if (status & MCI_RXDATAAVLBLMASK) {
+ mmci_dma_data_error(host);
+ if (!data->error)
+ data->error = -EIO;
+ }
+
+ if (!data->host_cookie)
+ mmci_dma_unmap(host, data);
+
+ /*
+ * Use of DMA with scatter-gather is impossible.
+ * Give up with DMA and switch back to PIO mode.
+ */
+ if (status & MCI_RXDATAAVLBLMASK) {
+ dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
+ mmci_dma_release(host);
+ }
+
+ host->dma_current = NULL;
+ host->dma_desc_current = NULL;
+}
+
+/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
+static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
+ struct dma_chan **dma_chan,
+ struct dma_async_tx_descriptor **dma_desc)
+{
+ struct variant_data *variant = host->variant;
+ struct dma_slave_config conf = {
+ .src_addr = host->phybase + MMCIFIFO,
+ .dst_addr = host->phybase + MMCIFIFO,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ .device_fc = false,
+ };
+ struct dma_chan *chan;
+ struct dma_device *device;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_data_direction buffer_dirn;
+ int nr_sg;
+ unsigned long flags = DMA_CTRL_ACK;
+
+ if (data->flags & MMC_DATA_READ) {
+ conf.direction = DMA_DEV_TO_MEM;
+ buffer_dirn = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ } else {
+ conf.direction = DMA_MEM_TO_DEV;
+ buffer_dirn = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ }
+
+ /* If there's no DMA channel, fall back to PIO */
+ if (!chan)
+ return -EINVAL;
+
+ /* If less than or equal to the fifo size, don't bother with DMA */
+ if (data->blksz * data->blocks <= variant->fifosize)
+ return -EINVAL;
+
+ device = chan->device;
+ nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
+ if (nr_sg == 0)
+ return -EINVAL;
+
+ if (host->variant->qcom_dml)
+ flags |= DMA_PREP_INTERRUPT;
+
+ dmaengine_slave_config(chan, &conf);
+ desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
+ conf.direction, flags);
+ if (!desc)
+ goto unmap_exit;
+
+ *dma_chan = chan;
+ *dma_desc = desc;
+
+ return 0;
+
+ unmap_exit:
+ dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
+ return -ENOMEM;
+}
+
+static inline int mmci_dma_prep_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ /* Check if next job is already prepared. */
+ if (host->dma_current && host->dma_desc_current)
+ return 0;
+
+ /* No job were prepared thus do it now. */
+ return __mmci_dma_prep_data(host, data, &host->dma_current,
+ &host->dma_desc_current);
+}
+
+static inline int mmci_dma_prep_next(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ struct mmci_host_next *nd = &host->next_data;
+ return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
+}
+
+static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ int ret;
+ struct mmc_data *data = host->data;
+
+ ret = mmci_dma_prep_data(host, host->data);
+ if (ret)
+ return ret;
+
+ /* Okay, go for it. */
+ dev_vdbg(mmc_dev(host->mmc),
+ "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
+ data->sg_len, data->blksz, data->blocks, data->flags);
+ dmaengine_submit(host->dma_desc_current);
+ dma_async_issue_pending(host->dma_current);
+
+ if (host->variant->qcom_dml)
+ dml_start_xfer(host, data);
+
+ datactrl |= MCI_DPSM_DMAENABLE;
+
+ /* Trigger the DMA transfer */
+ mmci_write_datactrlreg(host, datactrl);
+
+ /*
+ * Let the MMCI say when the data is ended and it's time
+ * to fire next DMA request. When that happens, MMCI will
+ * call mmci_data_end()
+ */
+ writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
+ host->base + MMCIMASK0);
+ return 0;
+}
+
+static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+{
+ struct mmci_host_next *next = &host->next_data;
+
+ WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
+ WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
+
+ host->dma_desc_current = next->dma_desc;
+ host->dma_current = next->dma_chan;
+ next->dma_desc = NULL;
+ next->dma_chan = NULL;
+}
+
+static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+ struct mmci_host_next *nd = &host->next_data;
+
+ if (!data)
+ return;
+
+ BUG_ON(data->host_cookie);
+
+ if (mmci_validate_data(host, data))
+ return;
+
+ if (!mmci_dma_prep_next(host, data))
+ data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
+}
+
+static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (!data || !data->host_cookie)
+ return;
+
+ mmci_dma_unmap(host, data);
+
+ if (err) {
+ struct mmci_host_next *next = &host->next_data;
+ struct dma_chan *chan;
+ if (data->flags & MMC_DATA_READ)
+ chan = host->dma_rx_channel;
+ else
+ chan = host->dma_tx_channel;
+ dmaengine_terminate_all(chan);
+
+ if (host->dma_desc_current == next->dma_desc)
+ host->dma_desc_current = NULL;
+
+ if (host->dma_current == next->dma_chan)
+ host->dma_current = NULL;
+
+ next->dma_desc = NULL;
+ next->dma_chan = NULL;
+ data->host_cookie = 0;
+ }
+}
+
+#else
+/* Blank functions if the DMA engine is not available */
+static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+{
+}
+static inline void mmci_dma_setup(struct mmci_host *host)
+{
+}
+
+static inline void mmci_dma_release(struct mmci_host *host)
+{
+}
+
+static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
+{
+}
+
+static inline void mmci_dma_finalize(struct mmci_host *host,
+ struct mmc_data *data)
+{
+}
+
+static inline void mmci_dma_data_error(struct mmci_host *host)
+{
+}
+
+static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ return -ENOSYS;
+}
+
+#define mmci_pre_request NULL
+#define mmci_post_request NULL
+
+#endif
+
+static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
+{
+ struct variant_data *variant = host->variant;
+ unsigned int datactrl, timeout, irqmask;
+ unsigned long long clks;
+ void __iomem *base;
+ int blksz_bits;
+
+ dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
+ data->blksz, data->blocks, data->flags);
+
+ host->data = data;
+ host->size = data->blksz * data->blocks;
+ data->bytes_xfered = 0;
+
+ clks = (unsigned long long)data->timeout_ns * host->cclk;
+ do_div(clks, NSEC_PER_SEC);
+
+ timeout = data->timeout_clks + (unsigned int)clks;
+
+ base = host->base;
+ writel(timeout, base + MMCIDATATIMER);
+ writel(host->size, base + MMCIDATALENGTH);
+
+ blksz_bits = ffs(data->blksz) - 1;
+ BUG_ON(1 << blksz_bits != data->blksz);
+
+ if (variant->blksz_datactrl16)
+ datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
+ else if (variant->blksz_datactrl4)
+ datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
+ else
+ datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
+
+ if (data->flags & MMC_DATA_READ)
+ datactrl |= MCI_DPSM_DIRECTION;
+
+ if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
+ u32 clk;
+
+ datactrl |= variant->datactrl_mask_sdio;
+
+ /*
+ * The ST Micro variant for SDIO small write transfers
+ * needs to have clock H/W flow control disabled,
+ * otherwise the transfer will not start. The threshold
+ * depends on the rate of MCLK.
+ */
+ if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
+ (host->size < 8 ||
+ (host->size <= 8 && host->mclk > 50000000)))
+ clk = host->clk_reg & ~variant->clkreg_enable;
+ else
+ clk = host->clk_reg | variant->clkreg_enable;
+
+ mmci_write_clkreg(host, clk);
+ }
+
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
+ host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
+ datactrl |= variant->datactrl_mask_ddrmode;
+
+ /*
+ * Attempt to use DMA operation mode, if this
+ * should fail, fall back to PIO mode
+ */
+ if (!mmci_dma_start_data(host, datactrl))
+ return;
+
+ /* IRQ mode, map the SG list for CPU reading/writing */
+ mmci_init_sg(host, data);
+
+ if (data->flags & MMC_DATA_READ) {
+ irqmask = MCI_RXFIFOHALFFULLMASK;
+
+ /*
+ * If we have less than the fifo 'half-full' threshold to
+ * transfer, trigger a PIO interrupt as soon as any data
+ * is available.
+ */
+ if (host->size < variant->fifohalfsize)
+ irqmask |= MCI_RXDATAAVLBLMASK;
+ } else {
+ /*
+ * We don't actually need to include "FIFO empty" here
+ * since its implicit in "FIFO half empty".
+ */
+ irqmask = MCI_TXFIFOHALFEMPTYMASK;
+ }
+
+ mmci_write_datactrlreg(host, datactrl);
+ writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
+ mmci_set_mask1(host, irqmask);
+}
+
+static void
+mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
+{
+ void __iomem *base = host->base;
+
+ dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
+ cmd->opcode, cmd->arg, cmd->flags);
+
+ if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
+ writel(0, base + MMCICOMMAND);
+ mmci_reg_delay(host);
+ }
+
+ c |= cmd->opcode | MCI_CPSM_ENABLE;
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136)
+ c |= MCI_CPSM_LONGRSP;
+ c |= MCI_CPSM_RESPONSE;
+ }
+ if (/*interrupt*/0)
+ c |= MCI_CPSM_INTERRUPT;
+
+ if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
+ c |= host->variant->data_cmd_enable;
+
+ host->cmd = cmd;
+
+ writel(cmd->arg, base + MMCIARGUMENT);
+ writel(c, base + MMCICOMMAND);
+}
+
+static void
+mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
+ unsigned int status)
+{
+ /* Make sure we have data to handle */
+ if (!data)
+ return;
+
+ /* First check for errors */
+ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+ MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ u32 remain, success;
+
+ /* Terminate the DMA transfer */
+ if (dma_inprogress(host)) {
+ mmci_dma_data_error(host);
+ mmci_dma_unmap(host, data);
+ }
+
+ /*
+ * Calculate how far we are into the transfer. Note that
+ * the data counter gives the number of bytes transferred
+ * on the MMC bus, not on the host side. On reads, this
+ * can be as much as a FIFO-worth of data ahead. This
+ * matters for FIFO overruns only.
+ */
+ remain = readl(host->base + MMCIDATACNT);
+ success = data->blksz * data->blocks - remain;
+
+ dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
+ status, success);
+ if (status & MCI_DATACRCFAIL) {
+ /* Last block was not successful */
+ success -= 1;
+ data->error = -EILSEQ;
+ } else if (status & MCI_DATATIMEOUT) {
+ data->error = -ETIMEDOUT;
+ } else if (status & MCI_STARTBITERR) {
+ data->error = -ECOMM;
+ } else if (status & MCI_TXUNDERRUN) {
+ data->error = -EIO;
+ } else if (status & MCI_RXOVERRUN) {
+ if (success > host->variant->fifosize)
+ success -= host->variant->fifosize;
+ else
+ success = 0;
+ data->error = -EIO;
+ }
+ data->bytes_xfered = round_down(success, data->blksz);
+ }
+
+ if (status & MCI_DATABLOCKEND)
+ dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
+
+ if (status & MCI_DATAEND || data->error) {
+ if (dma_inprogress(host))
+ mmci_dma_finalize(host, data);
+ mmci_stop_data(host);
+
+ if (!data->error)
+ /* The error clause is handled above, success! */
+ data->bytes_xfered = data->blksz * data->blocks;
+
+ if (!data->stop || host->mrq->sbc) {
+ mmci_request_end(host, data->mrq);
+ } else {
+ mmci_start_command(host, data->stop, 0);
+ }
+ }
+}
+
+static void
+mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
+ unsigned int status)
+{
+ void __iomem *base = host->base;
+ bool sbc, busy_resp;
+
+ if (!cmd)
+ return;
+
+ sbc = (cmd == host->mrq->sbc);
+ busy_resp = host->variant->busy_detect && (cmd->flags & MMC_RSP_BUSY);
+
+ if (!((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|
+ MCI_CMDSENT|MCI_CMDRESPEND)))
+ return;
+
+ /* Check if we need to wait for busy completion. */
+ if (host->busy_status && (status & MCI_ST_CARDBUSY))
+ return;
+
+ /* Enable busy completion if needed and supported. */
+ if (!host->busy_status && busy_resp &&
+ !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
+ (readl(base + MMCISTATUS) & MCI_ST_CARDBUSY)) {
+ writel(readl(base + MMCIMASK0) | MCI_ST_BUSYEND,
+ base + MMCIMASK0);
+ host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND);
+ return;
+ }
+
+ /* At busy completion, mask the IRQ and complete the request. */
+ if (host->busy_status) {
+ writel(readl(base + MMCIMASK0) & ~MCI_ST_BUSYEND,
+ base + MMCIMASK0);
+ host->busy_status = 0;
+ }
+
+ host->cmd = NULL;
+
+ if (status & MCI_CMDTIMEOUT) {
+ cmd->error = -ETIMEDOUT;
+ } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
+ cmd->error = -EILSEQ;
+ } else {
+ cmd->resp[0] = readl(base + MMCIRESPONSE0);
+ cmd->resp[1] = readl(base + MMCIRESPONSE1);
+ cmd->resp[2] = readl(base + MMCIRESPONSE2);
+ cmd->resp[3] = readl(base + MMCIRESPONSE3);
+ }
+
+ if ((!sbc && !cmd->data) || cmd->error) {
+ if (host->data) {
+ /* Terminate the DMA transfer */
+ if (dma_inprogress(host)) {
+ mmci_dma_data_error(host);
+ mmci_dma_unmap(host, host->data);
+ }
+ mmci_stop_data(host);
+ }
+ mmci_request_end(host, host->mrq);
+ } else if (sbc) {
+ mmci_start_command(host, host->mrq->cmd, 0);
+ } else if (!(cmd->data->flags & MMC_DATA_READ)) {
+ mmci_start_data(host, cmd->data);
+ }
+}
+
+static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
+{
+ return remain - (readl(host->base + MMCIFIFOCNT) << 2);
+}
+
+static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
+{
+ /*
+ * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
+ * from the fifo range should be used
+ */
+ if (status & MCI_RXFIFOHALFFULL)
+ return host->variant->fifohalfsize;
+ else if (status & MCI_RXDATAAVLBL)
+ return 4;
+
+ return 0;
+}
+
+static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
+{
+ void __iomem *base = host->base;
+ char *ptr = buffer;
+ u32 status = readl(host->base + MMCISTATUS);
+ int host_remain = host->size;
+
+ do {
+ int count = host->get_rx_fifocnt(host, status, host_remain);
+
+ if (count > remain)
+ count = remain;
+
+ if (count <= 0)
+ break;
+
+ /*
+ * SDIO especially may want to send something that is
+ * not divisible by 4 (as opposed to card sectors
+ * etc). Therefore make sure to always read the last bytes
+ * while only doing full 32-bit reads towards the FIFO.
+ */
+ if (unlikely(count & 0x3)) {
+ if (count < 4) {
+ unsigned char buf[4];
+ ioread32_rep(base + MMCIFIFO, buf, 1);
+ memcpy(ptr, buf, count);
+ } else {
+ ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
+ count &= ~0x3;
+ }
+ } else {
+ ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
+ }
+
+ ptr += count;
+ remain -= count;
+ host_remain -= count;
+
+ if (remain == 0)
+ break;
+
+ status = readl(base + MMCISTATUS);
+ } while (status & MCI_RXDATAAVLBL);
+
+ return ptr - buffer;
+}
+
+static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
+{
+ struct variant_data *variant = host->variant;
+ void __iomem *base = host->base;
+ char *ptr = buffer;
+
+ do {
+ unsigned int count, maxcnt;
+
+ maxcnt = status & MCI_TXFIFOEMPTY ?
+ variant->fifosize : variant->fifohalfsize;
+ count = min(remain, maxcnt);
+
+ /*
+ * SDIO especially may want to send something that is
+ * not divisible by 4 (as opposed to card sectors
+ * etc), and the FIFO only accept full 32-bit writes.
+ * So compensate by adding +3 on the count, a single
+ * byte become a 32bit write, 7 bytes will be two
+ * 32bit writes etc.
+ */
+ iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
+
+ ptr += count;
+ remain -= count;
+
+ if (remain == 0)
+ break;
+
+ status = readl(base + MMCISTATUS);
+ } while (status & MCI_TXFIFOHALFEMPTY);
+
+ return ptr - buffer;
+}
+
+/*
+ * PIO data transfer IRQ handler.
+ */
+static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
+{
+ struct mmci_host *host = dev_id;
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ struct variant_data *variant = host->variant;
+ void __iomem *base = host->base;
+ u32 status;
+
+ status = readl(base + MMCISTATUS);
+
+ dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
+
+ do {
+ unsigned int remain, len;
+ char *buffer;
+
+ /*
+ * For write, we only need to test the half-empty flag
+ * here - if the FIFO is completely empty, then by
+ * definition it is more than half empty.
+ *
+ * For read, check for data available.
+ */
+ if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
+ break;
+
+ if (!sg_miter_next(sg_miter))
+ break;
+
+ buffer = sg_miter->addr;
+ remain = sg_miter->length;
+
+ len = 0;
+ if (status & MCI_RXACTIVE)
+ len = mmci_pio_read(host, buffer, remain);
+ if (status & MCI_TXACTIVE)
+ len = mmci_pio_write(host, buffer, remain, status);
+
+ sg_miter->consumed = len;
+
+ host->size -= len;
+ remain -= len;
+
+ if (remain)
+ break;
+
+ status = readl(base + MMCISTATUS);
+ } while (1);
+
+ sg_miter_stop(sg_miter);
+
+ /*
+ * If we have less than the fifo 'half-full' threshold to transfer,
+ * trigger a PIO interrupt as soon as any data is available.
+ */
+ if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
+ mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
+
+ /*
+ * If we run out of data, disable the data IRQs; this
+ * prevents a race where the FIFO becomes empty before
+ * the chip itself has disabled the data path, and
+ * stops us racing with our data end IRQ.
+ */
+ if (host->size == 0) {
+ mmci_set_mask1(host, 0);
+ writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Handle completion of command and data transfers.
+ */
+static irqreturn_t mmci_irq(int irq, void *dev_id)
+{
+ struct mmci_host *host = dev_id;
+ u32 status;
+ int ret = 0;
+
+ spin_lock(&host->lock);
+
+ do {
+ status = readl(host->base + MMCISTATUS);
+
+ if (host->singleirq) {
+ if (status & readl(host->base + MMCIMASK1))
+ mmci_pio_irq(irq, dev_id);
+
+ status &= ~MCI_IRQ1MASK;
+ }
+
+ /*
+ * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's
+ * enabled) since the HW seems to be triggering the IRQ on both
+ * edges while monitoring DAT0 for busy completion.
+ */
+ status &= readl(host->base + MMCIMASK0);
+ writel(status, host->base + MMCICLEAR);
+
+ dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
+
+ if (host->variant->reversed_irq_handling) {
+ mmci_data_irq(host, host->data, status);
+ mmci_cmd_irq(host, host->cmd, status);
+ } else {
+ mmci_cmd_irq(host, host->cmd, status);
+ mmci_data_irq(host, host->data, status);
+ }
+
+ /* Don't poll for busy completion in irq context. */
+ if (host->busy_status)
+ status &= ~MCI_ST_CARDBUSY;
+
+ ret = 1;
+ } while (status);
+
+ spin_unlock(&host->lock);
+
+ return IRQ_RETVAL(ret);
+}
+
+static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ WARN_ON(host->mrq != NULL);
+
+ mrq->cmd->error = mmci_validate_data(host, mrq->data);
+ if (mrq->cmd->error) {
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ pm_runtime_get_sync(mmc_dev(mmc));
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->mrq = mrq;
+
+ if (mrq->data)
+ mmci_get_next_data(host, mrq->data);
+
+ if (mrq->data && mrq->data->flags & MMC_DATA_READ)
+ mmci_start_data(host, mrq->data);
+
+ if (mrq->sbc)
+ mmci_start_command(host, mrq->sbc, 0);
+ else
+ mmci_start_command(host, mrq->cmd, 0);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
+ u32 pwr = 0;
+ unsigned long flags;
+ int ret;
+
+ pm_runtime_get_sync(mmc_dev(mmc));
+
+ if (host->plat->ios_handler &&
+ host->plat->ios_handler(mmc_dev(mmc), ios))
+ dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
+ if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
+ regulator_disable(mmc->supply.vqmmc);
+ host->vqmmc_enabled = false;
+ }
+
+ break;
+ case MMC_POWER_UP:
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+
+ /*
+ * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
+ * and instead uses MCI_PWR_ON so apply whatever value is
+ * configured in the variant data.
+ */
+ pwr |= variant->pwrreg_powerup;
+
+ break;
+ case MMC_POWER_ON:
+ if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
+ ret = regulator_enable(mmc->supply.vqmmc);
+ if (ret < 0)
+ dev_err(mmc_dev(mmc),
+ "failed to enable vqmmc regulator\n");
+ else
+ host->vqmmc_enabled = true;
+ }
+
+ pwr |= MCI_PWR_ON;
+ break;
+ }
+
+ if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
+ /*
+ * The ST Micro variant has some additional bits
+ * indicating signal direction for the signals in
+ * the SD/MMC bus and feedback-clock usage.
+ */
+ pwr |= host->pwr_reg_add;
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ pwr &= ~MCI_ST_DATA74DIREN;
+ else if (ios->bus_width == MMC_BUS_WIDTH_1)
+ pwr &= (~MCI_ST_DATA74DIREN &
+ ~MCI_ST_DATA31DIREN &
+ ~MCI_ST_DATA2DIREN);
+ }
+
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
+ if (host->hw_designer != AMBA_VENDOR_ST)
+ pwr |= MCI_ROD;
+ else {
+ /*
+ * The ST Micro variant use the ROD bit for something
+ * else and only has OD (Open Drain).
+ */
+ pwr |= MCI_OD;
+ }
+ }
+
+ /*
+ * If clock = 0 and the variant requires the MMCIPOWER to be used for
+ * gating the clock, the MCI_PWR_ON bit is cleared.
+ */
+ if (!ios->clock && variant->pwrreg_clkgate)
+ pwr &= ~MCI_PWR_ON;
+
+ if (host->variant->explicit_mclk_control &&
+ ios->clock != host->clock_cache) {
+ ret = clk_set_rate(host->clk, ios->clock);
+ if (ret < 0)
+ dev_err(mmc_dev(host->mmc),
+ "Error setting clock rate (%d)\n", ret);
+ else
+ host->mclk = clk_get_rate(host->clk);
+ }
+ host->clock_cache = ios->clock;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ mmci_set_clkreg(host, ios->clock);
+ mmci_write_pwrreg(host, pwr);
+ mmci_reg_delay(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
+}
+
+static int mmci_get_cd(struct mmc_host *mmc)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ struct mmci_platform_data *plat = host->plat;
+ unsigned int status = mmc_gpio_get_cd(mmc);
+
+ if (status == -ENOSYS) {
+ if (!plat->status)
+ return 1; /* Assume always present */
+
+ status = plat->status(mmc_dev(host->mmc));
+ }
+ return status;
+}
+
+static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ int ret = 0;
+
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+
+ pm_runtime_get_sync(mmc_dev(mmc));
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ ret = regulator_set_voltage(mmc->supply.vqmmc,
+ 2700000, 3600000);
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ ret = regulator_set_voltage(mmc->supply.vqmmc,
+ 1700000, 1950000);
+ break;
+ case MMC_SIGNAL_VOLTAGE_120:
+ ret = regulator_set_voltage(mmc->supply.vqmmc,
+ 1100000, 1300000);
+ break;
+ }
+
+ if (ret)
+ dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
+
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
+ }
+
+ return ret;
+}
+
+static struct mmc_host_ops mmci_ops = {
+ .request = mmci_request,
+ .pre_req = mmci_pre_request,
+ .post_req = mmci_post_request,
+ .set_ios = mmci_set_ios,
+ .get_ro = mmc_gpio_get_ro,
+ .get_cd = mmci_get_cd,
+ .start_signal_voltage_switch = mmci_sig_volt_switch,
+};
+
+static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ int ret = mmc_of_parse(mmc);
+
+ if (ret)
+ return ret;
+
+ if (of_get_property(np, "st,sig-dir-dat0", NULL))
+ host->pwr_reg_add |= MCI_ST_DATA0DIREN;
+ if (of_get_property(np, "st,sig-dir-dat2", NULL))
+ host->pwr_reg_add |= MCI_ST_DATA2DIREN;
+ if (of_get_property(np, "st,sig-dir-dat31", NULL))
+ host->pwr_reg_add |= MCI_ST_DATA31DIREN;
+ if (of_get_property(np, "st,sig-dir-dat74", NULL))
+ host->pwr_reg_add |= MCI_ST_DATA74DIREN;
+ if (of_get_property(np, "st,sig-dir-cmd", NULL))
+ host->pwr_reg_add |= MCI_ST_CMDDIREN;
+ if (of_get_property(np, "st,sig-pin-fbclk", NULL))
+ host->pwr_reg_add |= MCI_ST_FBCLKEN;
+
+ if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
+ if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+
+ return 0;
+}
+
+static int mmci_probe(struct amba_device *dev,
+ const struct amba_id *id)
+{
+ struct mmci_platform_data *plat = dev->dev.platform_data;
+ struct device_node *np = dev->dev.of_node;
+ struct variant_data *variant = id->data;
+ struct mmci_host *host;
+ struct mmc_host *mmc;
+ int ret;
+
+ /* Must have platform data or Device Tree. */
+ if (!plat && !np) {
+ dev_err(&dev->dev, "No plat data or DT found\n");
+ return -EINVAL;
+ }
+
+ if (!plat) {
+ plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+ }
+
+ mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ ret = mmci_of_parse(np, mmc);
+ if (ret)
+ goto host_free;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ host->hw_designer = amba_manf(dev);
+ host->hw_revision = amba_rev(dev);
+ dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
+ dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
+
+ host->clk = devm_clk_get(&dev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ goto host_free;
+ }
+
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ goto host_free;
+
+ if (variant->qcom_fifo)
+ host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
+ else
+ host->get_rx_fifocnt = mmci_get_rx_fifocnt;
+
+ host->plat = plat;
+ host->variant = variant;
+ host->mclk = clk_get_rate(host->clk);
+ /*
+ * According to the spec, mclk is max 100 MHz,
+ * so we try to adjust the clock down to this,
+ * (if possible).
+ */
+ if (host->mclk > variant->f_max) {
+ ret = clk_set_rate(host->clk, variant->f_max);
+ if (ret < 0)
+ goto clk_disable;
+ host->mclk = clk_get_rate(host->clk);
+ dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
+ host->mclk);
+ }
+
+ host->phybase = dev->res.start;
+ host->base = devm_ioremap_resource(&dev->dev, &dev->res);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
+ goto clk_disable;
+ }
+
+ /*
+ * The ARM and ST versions of the block have slightly different
+ * clock divider equations which means that the minimum divider
+ * differs too.
+ * on Qualcomm like controllers get the nearest minimum clock to 100Khz
+ */
+ if (variant->st_clkdiv)
+ mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
+ else if (variant->explicit_mclk_control)
+ mmc->f_min = clk_round_rate(host->clk, 100000);
+ else
+ mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
+ /*
+ * If no maximum operating frequency is supplied, fall back to use
+ * the module parameter, which has a (low) default value in case it
+ * is not specified. Either value must not exceed the clock rate into
+ * the block, of course.
+ */
+ if (mmc->f_max)
+ mmc->f_max = variant->explicit_mclk_control ?
+ min(variant->f_max, mmc->f_max) :
+ min(host->mclk, mmc->f_max);
+ else
+ mmc->f_max = variant->explicit_mclk_control ?
+ fmax : min(host->mclk, fmax);
+
+
+ dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
+
+ /* Get regulators and the supported OCR mask */
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret == -EPROBE_DEFER)
+ goto clk_disable;
+
+ if (!mmc->ocr_avail)
+ mmc->ocr_avail = plat->ocr_mask;
+ else if (plat->ocr_mask)
+ dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
+
+ /* DT takes precedence over platform data. */
+ if (!np) {
+ if (!plat->cd_invert)
+ mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+ mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+ }
+
+ /* We support these capabilities. */
+ mmc->caps |= MMC_CAP_CMD23;
+
+ if (variant->busy_detect) {
+ mmci_ops.card_busy = mmci_card_busy;
+ mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
+ mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
+ mmc->max_busy_timeout = 0;
+ }
+
+ mmc->ops = &mmci_ops;
+
+ /* We support these PM capabilities. */
+ mmc->pm_caps |= MMC_PM_KEEP_POWER;
+
+ /*
+ * We can do SGIO
+ */
+ mmc->max_segs = NR_SG;
+
+ /*
+ * Since only a certain number of bits are valid in the data length
+ * register, we must ensure that we don't exceed 2^num-1 bytes in a
+ * single request.
+ */
+ mmc->max_req_size = (1 << variant->datalength_bits) - 1;
+
+ /*
+ * Set the maximum segment size. Since we aren't doing DMA
+ * (yet) we are only limited by the data length register.
+ */
+ mmc->max_seg_size = mmc->max_req_size;
+
+ /*
+ * Block size can be up to 2048 bytes, but must be a power of two.
+ */
+ mmc->max_blk_size = 1 << 11;
+
+ /*
+ * Limit the number of blocks transferred so that we don't overflow
+ * the maximum request size.
+ */
+ mmc->max_blk_count = mmc->max_req_size >> 11;
+
+ spin_lock_init(&host->lock);
+
+ writel(0, host->base + MMCIMASK0);
+ writel(0, host->base + MMCIMASK1);
+ writel(0xfff, host->base + MMCICLEAR);
+
+ /*
+ * If:
+ * - not using DT but using a descriptor table, or
+ * - using a table of descriptors ALONGSIDE DT, or
+ * look up these descriptors named "cd" and "wp" right here, fail
+ * silently of these do not exist and proceed to try platform data
+ */
+ if (!np) {
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ if (ret < 0) {
+ if (ret == -EPROBE_DEFER)
+ goto clk_disable;
+ else if (gpio_is_valid(plat->gpio_cd)) {
+ ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0);
+ if (ret)
+ goto clk_disable;
+ }
+ }
+
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
+ if (ret < 0) {
+ if (ret == -EPROBE_DEFER)
+ goto clk_disable;
+ else if (gpio_is_valid(plat->gpio_wp)) {
+ ret = mmc_gpio_request_ro(mmc, plat->gpio_wp);
+ if (ret)
+ goto clk_disable;
+ }
+ }
+ }
+
+ ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
+ DRIVER_NAME " (cmd)", host);
+ if (ret)
+ goto clk_disable;
+
+ if (!dev->irq[1])
+ host->singleirq = true;
+ else {
+ ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
+ IRQF_SHARED, DRIVER_NAME " (pio)", host);
+ if (ret)
+ goto clk_disable;
+ }
+
+ writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+
+ amba_set_drvdata(dev, mmc);
+
+ dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
+ mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
+ amba_rev(dev), (unsigned long long)dev->res.start,
+ dev->irq[0], dev->irq[1]);
+
+ mmci_dma_setup(host);
+
+ pm_runtime_set_autosuspend_delay(&dev->dev, 50);
+ pm_runtime_use_autosuspend(&dev->dev);
+
+ mmc_add_host(mmc);
+
+ pm_runtime_put(&dev->dev);
+ return 0;
+
+ clk_disable:
+ clk_disable_unprepare(host->clk);
+ host_free:
+ mmc_free_host(mmc);
+ return ret;
+}
+
+static int mmci_remove(struct amba_device *dev)
+{
+ struct mmc_host *mmc = amba_get_drvdata(dev);
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+
+ /*
+ * Undo pm_runtime_put() in probe. We use the _sync
+ * version here so that we can access the primecell.
+ */
+ pm_runtime_get_sync(&dev->dev);
+
+ mmc_remove_host(mmc);
+
+ writel(0, host->base + MMCIMASK0);
+ writel(0, host->base + MMCIMASK1);
+
+ writel(0, host->base + MMCICOMMAND);
+ writel(0, host->base + MMCIDATACTRL);
+
+ mmci_dma_release(host);
+ clk_disable_unprepare(host->clk);
+ mmc_free_host(mmc);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void mmci_save(struct mmci_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ writel(0, host->base + MMCIMASK0);
+ if (host->variant->pwrreg_nopower) {
+ writel(0, host->base + MMCIDATACTRL);
+ writel(0, host->base + MMCIPOWER);
+ writel(0, host->base + MMCICLOCK);
+ }
+ mmci_reg_delay(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void mmci_restore(struct mmci_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->variant->pwrreg_nopower) {
+ writel(host->clk_reg, host->base + MMCICLOCK);
+ writel(host->datactrl_reg, host->base + MMCIDATACTRL);
+ writel(host->pwr_reg, host->base + MMCIPOWER);
+ }
+ writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ mmci_reg_delay(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int mmci_runtime_suspend(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct mmc_host *mmc = amba_get_drvdata(adev);
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+ pinctrl_pm_select_sleep_state(dev);
+ mmci_save(host);
+ clk_disable_unprepare(host->clk);
+ }
+
+ return 0;
+}
+
+static int mmci_runtime_resume(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct mmc_host *mmc = amba_get_drvdata(adev);
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+ clk_prepare_enable(host->clk);
+ mmci_restore(host);
+ pinctrl_pm_select_default_state(dev);
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops mmci_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
+};
+
+static struct amba_id mmci_ids[] = {
+ {
+ .id = 0x00041180,
+ .mask = 0xff0fffff,
+ .data = &variant_arm,
+ },
+ {
+ .id = 0x01041180,
+ .mask = 0xff0fffff,
+ .data = &variant_arm_extended_fifo,
+ },
+ {
+ .id = 0x02041180,
+ .mask = 0xff0fffff,
+ .data = &variant_arm_extended_fifo_hwfc,
+ },
+ {
+ .id = 0x00041181,
+ .mask = 0x000fffff,
+ .data = &variant_arm,
+ },
+ /* ST Micro variants */
+ {
+ .id = 0x00180180,
+ .mask = 0x00ffffff,
+ .data = &variant_u300,
+ },
+ {
+ .id = 0x10180180,
+ .mask = 0xf0ffffff,
+ .data = &variant_nomadik,
+ },
+ {
+ .id = 0x00280180,
+ .mask = 0x00ffffff,
+ .data = &variant_u300,
+ },
+ {
+ .id = 0x00480180,
+ .mask = 0xf0ffffff,
+ .data = &variant_ux500,
+ },
+ {
+ .id = 0x10480180,
+ .mask = 0xf0ffffff,
+ .data = &variant_ux500v2,
+ },
+ /* Qualcomm variants */
+ {
+ .id = 0x00051180,
+ .mask = 0x000fffff,
+ .data = &variant_qcom,
+ },
+ { 0, 0 },
+};
+
+MODULE_DEVICE_TABLE(amba, mmci_ids);
+
+static struct amba_driver mmci_driver = {
+ .drv = {
+ .name = DRIVER_NAME,
+ .pm = &mmci_dev_pm_ops,
+ },
+ .probe = mmci_probe,
+ .remove = mmci_remove,
+ .id_table = mmci_ids,
+};
+
+module_amba_driver(mmci_driver);
+
+module_param(fmax, uint, 0444);
+
+MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/mmc/host/mmci.h b/kernel/drivers/mmc/host/mmci.h
new file mode 100644
index 000000000..a1f5e4f49
--- /dev/null
+++ b/kernel/drivers/mmc/host/mmci.h
@@ -0,0 +1,247 @@
+/*
+ * linux/drivers/mmc/host/mmci.h - ARM PrimeCell MMCI PL180/1 driver
+ *
+ * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define MMCIPOWER 0x000
+#define MCI_PWR_OFF 0x00
+#define MCI_PWR_UP 0x02
+#define MCI_PWR_ON 0x03
+#define MCI_OD (1 << 6)
+#define MCI_ROD (1 << 7)
+/*
+ * The ST Micro version does not have ROD and reuse the voltage registers for
+ * direction settings.
+ */
+#define MCI_ST_DATA2DIREN (1 << 2)
+#define MCI_ST_CMDDIREN (1 << 3)
+#define MCI_ST_DATA0DIREN (1 << 4)
+#define MCI_ST_DATA31DIREN (1 << 5)
+#define MCI_ST_FBCLKEN (1 << 7)
+#define MCI_ST_DATA74DIREN (1 << 8)
+
+#define MMCICLOCK 0x004
+#define MCI_CLK_ENABLE (1 << 8)
+#define MCI_CLK_PWRSAVE (1 << 9)
+#define MCI_CLK_BYPASS (1 << 10)
+#define MCI_4BIT_BUS (1 << 11)
+/*
+ * 8bit wide buses, hardware flow contronl, negative edges and clock inversion
+ * supported in ST Micro U300 and Ux500 versions
+ */
+#define MCI_ST_8BIT_BUS (1 << 12)
+#define MCI_ST_U300_HWFCEN (1 << 13)
+#define MCI_ST_UX500_NEG_EDGE (1 << 13)
+#define MCI_ST_UX500_HWFCEN (1 << 14)
+#define MCI_ST_UX500_CLK_INV (1 << 15)
+/* Modified PL180 on Versatile Express platform */
+#define MCI_ARM_HWFCEN (1 << 12)
+
+/* Modified on Qualcomm Integrations */
+#define MCI_QCOM_CLK_WIDEBUS_8 (BIT(10) | BIT(11))
+#define MCI_QCOM_CLK_FLOWENA BIT(12)
+#define MCI_QCOM_CLK_INVERTOUT BIT(13)
+
+/* select in latch data and command in */
+#define MCI_QCOM_CLK_SELECT_IN_FBCLK BIT(15)
+#define MCI_QCOM_CLK_SELECT_IN_DDR_MODE (BIT(14) | BIT(15))
+
+#define MMCIARGUMENT 0x008
+#define MMCICOMMAND 0x00c
+#define MCI_CPSM_RESPONSE (1 << 6)
+#define MCI_CPSM_LONGRSP (1 << 7)
+#define MCI_CPSM_INTERRUPT (1 << 8)
+#define MCI_CPSM_PENDING (1 << 9)
+#define MCI_CPSM_ENABLE (1 << 10)
+/* Argument flag extenstions in the ST Micro versions */
+#define MCI_ST_SDIO_SUSP (1 << 11)
+#define MCI_ST_ENCMD_COMPL (1 << 12)
+#define MCI_ST_NIEN (1 << 13)
+#define MCI_ST_CE_ATACMD (1 << 14)
+
+/* Modified on Qualcomm Integrations */
+#define MCI_QCOM_CSPM_DATCMD BIT(12)
+#define MCI_QCOM_CSPM_MCIABORT BIT(13)
+#define MCI_QCOM_CSPM_CCSENABLE BIT(14)
+#define MCI_QCOM_CSPM_CCSDISABLE BIT(15)
+#define MCI_QCOM_CSPM_AUTO_CMD19 BIT(16)
+#define MCI_QCOM_CSPM_AUTO_CMD21 BIT(21)
+
+#define MMCIRESPCMD 0x010
+#define MMCIRESPONSE0 0x014
+#define MMCIRESPONSE1 0x018
+#define MMCIRESPONSE2 0x01c
+#define MMCIRESPONSE3 0x020
+#define MMCIDATATIMER 0x024
+#define MMCIDATALENGTH 0x028
+#define MMCIDATACTRL 0x02c
+#define MCI_DPSM_ENABLE (1 << 0)
+#define MCI_DPSM_DIRECTION (1 << 1)
+#define MCI_DPSM_MODE (1 << 2)
+#define MCI_DPSM_DMAENABLE (1 << 3)
+#define MCI_DPSM_BLOCKSIZE (1 << 4)
+/* Control register extensions in the ST Micro U300 and Ux500 versions */
+#define MCI_ST_DPSM_RWSTART (1 << 8)
+#define MCI_ST_DPSM_RWSTOP (1 << 9)
+#define MCI_ST_DPSM_RWMOD (1 << 10)
+#define MCI_ST_DPSM_SDIOEN (1 << 11)
+/* Control register extensions in the ST Micro Ux500 versions */
+#define MCI_ST_DPSM_DMAREQCTL (1 << 12)
+#define MCI_ST_DPSM_DBOOTMODEEN (1 << 13)
+#define MCI_ST_DPSM_BUSYMODE (1 << 14)
+#define MCI_ST_DPSM_DDRMODE (1 << 15)
+
+#define MMCIDATACNT 0x030
+#define MMCISTATUS 0x034
+#define MCI_CMDCRCFAIL (1 << 0)
+#define MCI_DATACRCFAIL (1 << 1)
+#define MCI_CMDTIMEOUT (1 << 2)
+#define MCI_DATATIMEOUT (1 << 3)
+#define MCI_TXUNDERRUN (1 << 4)
+#define MCI_RXOVERRUN (1 << 5)
+#define MCI_CMDRESPEND (1 << 6)
+#define MCI_CMDSENT (1 << 7)
+#define MCI_DATAEND (1 << 8)
+#define MCI_STARTBITERR (1 << 9)
+#define MCI_DATABLOCKEND (1 << 10)
+#define MCI_CMDACTIVE (1 << 11)
+#define MCI_TXACTIVE (1 << 12)
+#define MCI_RXACTIVE (1 << 13)
+#define MCI_TXFIFOHALFEMPTY (1 << 14)
+#define MCI_RXFIFOHALFFULL (1 << 15)
+#define MCI_TXFIFOFULL (1 << 16)
+#define MCI_RXFIFOFULL (1 << 17)
+#define MCI_TXFIFOEMPTY (1 << 18)
+#define MCI_RXFIFOEMPTY (1 << 19)
+#define MCI_TXDATAAVLBL (1 << 20)
+#define MCI_RXDATAAVLBL (1 << 21)
+/* Extended status bits for the ST Micro variants */
+#define MCI_ST_SDIOIT (1 << 22)
+#define MCI_ST_CEATAEND (1 << 23)
+#define MCI_ST_CARDBUSY (1 << 24)
+
+#define MMCICLEAR 0x038
+#define MCI_CMDCRCFAILCLR (1 << 0)
+#define MCI_DATACRCFAILCLR (1 << 1)
+#define MCI_CMDTIMEOUTCLR (1 << 2)
+#define MCI_DATATIMEOUTCLR (1 << 3)
+#define MCI_TXUNDERRUNCLR (1 << 4)
+#define MCI_RXOVERRUNCLR (1 << 5)
+#define MCI_CMDRESPENDCLR (1 << 6)
+#define MCI_CMDSENTCLR (1 << 7)
+#define MCI_DATAENDCLR (1 << 8)
+#define MCI_STARTBITERRCLR (1 << 9)
+#define MCI_DATABLOCKENDCLR (1 << 10)
+/* Extended status bits for the ST Micro variants */
+#define MCI_ST_SDIOITC (1 << 22)
+#define MCI_ST_CEATAENDC (1 << 23)
+#define MCI_ST_BUSYENDC (1 << 24)
+
+#define MMCIMASK0 0x03c
+#define MCI_CMDCRCFAILMASK (1 << 0)
+#define MCI_DATACRCFAILMASK (1 << 1)
+#define MCI_CMDTIMEOUTMASK (1 << 2)
+#define MCI_DATATIMEOUTMASK (1 << 3)
+#define MCI_TXUNDERRUNMASK (1 << 4)
+#define MCI_RXOVERRUNMASK (1 << 5)
+#define MCI_CMDRESPENDMASK (1 << 6)
+#define MCI_CMDSENTMASK (1 << 7)
+#define MCI_DATAENDMASK (1 << 8)
+#define MCI_STARTBITERRMASK (1 << 9)
+#define MCI_DATABLOCKENDMASK (1 << 10)
+#define MCI_CMDACTIVEMASK (1 << 11)
+#define MCI_TXACTIVEMASK (1 << 12)
+#define MCI_RXACTIVEMASK (1 << 13)
+#define MCI_TXFIFOHALFEMPTYMASK (1 << 14)
+#define MCI_RXFIFOHALFFULLMASK (1 << 15)
+#define MCI_TXFIFOFULLMASK (1 << 16)
+#define MCI_RXFIFOFULLMASK (1 << 17)
+#define MCI_TXFIFOEMPTYMASK (1 << 18)
+#define MCI_RXFIFOEMPTYMASK (1 << 19)
+#define MCI_TXDATAAVLBLMASK (1 << 20)
+#define MCI_RXDATAAVLBLMASK (1 << 21)
+/* Extended status bits for the ST Micro variants */
+#define MCI_ST_SDIOITMASK (1 << 22)
+#define MCI_ST_CEATAENDMASK (1 << 23)
+#define MCI_ST_BUSYEND (1 << 24)
+
+#define MMCIMASK1 0x040
+#define MMCIFIFOCNT 0x048
+#define MMCIFIFO 0x080 /* to 0x0bc */
+
+#define MCI_IRQENABLE \
+ (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
+ MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
+ MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_STARTBITERRMASK)
+
+/* These interrupts are directed to IRQ1 when two IRQ lines are available */
+#define MCI_IRQ1MASK \
+ (MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \
+ MCI_TXFIFOHALFEMPTYMASK)
+
+#define NR_SG 128
+
+struct clk;
+struct variant_data;
+struct dma_chan;
+
+struct mmci_host_next {
+ struct dma_async_tx_descriptor *dma_desc;
+ struct dma_chan *dma_chan;
+ s32 cookie;
+};
+
+struct mmci_host {
+ phys_addr_t phybase;
+ void __iomem *base;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ struct mmc_host *mmc;
+ struct clk *clk;
+ bool singleirq;
+
+ spinlock_t lock;
+
+ unsigned int mclk;
+ /* cached value of requested clk in set_ios */
+ unsigned int clock_cache;
+ unsigned int cclk;
+ u32 pwr_reg;
+ u32 pwr_reg_add;
+ u32 clk_reg;
+ u32 datactrl_reg;
+ u32 busy_status;
+ bool vqmmc_enabled;
+ struct mmci_platform_data *plat;
+ struct variant_data *variant;
+
+ u8 hw_designer;
+ u8 hw_revision:4;
+
+ struct timer_list timer;
+ unsigned int oldstat;
+
+ /* pio stuff */
+ struct sg_mapping_iter sg_miter;
+ unsigned int size;
+ int (*get_rx_fifocnt)(struct mmci_host *h, u32 status, int remain);
+
+#ifdef CONFIG_DMA_ENGINE
+ /* DMA stuff */
+ struct dma_chan *dma_current;
+ struct dma_chan *dma_rx_channel;
+ struct dma_chan *dma_tx_channel;
+ struct dma_async_tx_descriptor *dma_desc_current;
+ struct mmci_host_next next_data;
+
+#define dma_inprogress(host) ((host)->dma_current)
+#else
+#define dma_inprogress(host) (0)
+#endif
+};
+
diff --git a/kernel/drivers/mmc/host/mmci_qcom_dml.c b/kernel/drivers/mmc/host/mmci_qcom_dml.c
new file mode 100644
index 000000000..2b7fc3764
--- /dev/null
+++ b/kernel/drivers/mmc/host/mmci_qcom_dml.c
@@ -0,0 +1,177 @@
+/*
+ *
+ * Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include "mmci.h"
+
+/* Registers */
+#define DML_CONFIG 0x00
+#define PRODUCER_CRCI_MSK GENMASK(1, 0)
+#define PRODUCER_CRCI_DISABLE 0
+#define PRODUCER_CRCI_X_SEL BIT(0)
+#define PRODUCER_CRCI_Y_SEL BIT(1)
+#define CONSUMER_CRCI_MSK GENMASK(3, 2)
+#define CONSUMER_CRCI_DISABLE 0
+#define CONSUMER_CRCI_X_SEL BIT(2)
+#define CONSUMER_CRCI_Y_SEL BIT(3)
+#define PRODUCER_TRANS_END_EN BIT(4)
+#define BYPASS BIT(16)
+#define DIRECT_MODE BIT(17)
+#define INFINITE_CONS_TRANS BIT(18)
+
+#define DML_SW_RESET 0x08
+#define DML_PRODUCER_START 0x0c
+#define DML_CONSUMER_START 0x10
+#define DML_PRODUCER_PIPE_LOGICAL_SIZE 0x14
+#define DML_CONSUMER_PIPE_LOGICAL_SIZE 0x18
+#define DML_PIPE_ID 0x1c
+#define PRODUCER_PIPE_ID_SHFT 0
+#define PRODUCER_PIPE_ID_MSK GENMASK(4, 0)
+#define CONSUMER_PIPE_ID_SHFT 16
+#define CONSUMER_PIPE_ID_MSK GENMASK(20, 16)
+
+#define DML_PRODUCER_BAM_BLOCK_SIZE 0x24
+#define DML_PRODUCER_BAM_TRANS_SIZE 0x28
+
+/* other definitions */
+#define PRODUCER_PIPE_LOGICAL_SIZE 4096
+#define CONSUMER_PIPE_LOGICAL_SIZE 4096
+
+#define DML_OFFSET 0x800
+
+void dml_start_xfer(struct mmci_host *host, struct mmc_data *data)
+{
+ u32 config;
+ void __iomem *base = host->base + DML_OFFSET;
+
+ if (data->flags & MMC_DATA_READ) {
+ /* Read operation: configure DML for producer operation */
+ /* Set producer CRCI-x and disable consumer CRCI */
+ config = readl_relaxed(base + DML_CONFIG);
+ config = (config & ~PRODUCER_CRCI_MSK) | PRODUCER_CRCI_X_SEL;
+ config = (config & ~CONSUMER_CRCI_MSK) | CONSUMER_CRCI_DISABLE;
+ writel_relaxed(config, base + DML_CONFIG);
+
+ /* Set the Producer BAM block size */
+ writel_relaxed(data->blksz, base + DML_PRODUCER_BAM_BLOCK_SIZE);
+
+ /* Set Producer BAM Transaction size */
+ writel_relaxed(data->blocks * data->blksz,
+ base + DML_PRODUCER_BAM_TRANS_SIZE);
+ /* Set Producer Transaction End bit */
+ config = readl_relaxed(base + DML_CONFIG);
+ config |= PRODUCER_TRANS_END_EN;
+ writel_relaxed(config, base + DML_CONFIG);
+ /* Trigger producer */
+ writel_relaxed(1, base + DML_PRODUCER_START);
+ } else {
+ /* Write operation: configure DML for consumer operation */
+ /* Set consumer CRCI-x and disable producer CRCI*/
+ config = readl_relaxed(base + DML_CONFIG);
+ config = (config & ~CONSUMER_CRCI_MSK) | CONSUMER_CRCI_X_SEL;
+ config = (config & ~PRODUCER_CRCI_MSK) | PRODUCER_CRCI_DISABLE;
+ writel_relaxed(config, base + DML_CONFIG);
+ /* Clear Producer Transaction End bit */
+ config = readl_relaxed(base + DML_CONFIG);
+ config &= ~PRODUCER_TRANS_END_EN;
+ writel_relaxed(config, base + DML_CONFIG);
+ /* Trigger consumer */
+ writel_relaxed(1, base + DML_CONSUMER_START);
+ }
+
+ /* make sure the dml is configured before dma is triggered */
+ wmb();
+}
+
+static int of_get_dml_pipe_index(struct device_node *np, const char *name)
+{
+ int index;
+ struct of_phandle_args dma_spec;
+
+ index = of_property_match_string(np, "dma-names", name);
+
+ if (index < 0)
+ return -ENODEV;
+
+ if (of_parse_phandle_with_args(np, "dmas", "#dma-cells", index,
+ &dma_spec))
+ return -ENODEV;
+
+ if (dma_spec.args_count)
+ return dma_spec.args[0];
+
+ return -ENODEV;
+}
+
+/* Initialize the dml hardware connected to SD Card controller */
+int dml_hw_init(struct mmci_host *host, struct device_node *np)
+{
+ u32 config;
+ void __iomem *base;
+ int consumer_id, producer_id;
+
+ consumer_id = of_get_dml_pipe_index(np, "tx");
+ producer_id = of_get_dml_pipe_index(np, "rx");
+
+ if (producer_id < 0 || consumer_id < 0)
+ return -ENODEV;
+
+ base = host->base + DML_OFFSET;
+
+ /* Reset the DML block */
+ writel_relaxed(1, base + DML_SW_RESET);
+
+ /* Disable the producer and consumer CRCI */
+ config = (PRODUCER_CRCI_DISABLE | CONSUMER_CRCI_DISABLE);
+ /*
+ * Disable the bypass mode. Bypass mode will only be used
+ * if data transfer is to happen in PIO mode and don't
+ * want the BAM interface to connect with SDCC-DML.
+ */
+ config &= ~BYPASS;
+ /*
+ * Disable direct mode as we don't DML to MASTER the AHB bus.
+ * BAM connected with DML should MASTER the AHB bus.
+ */
+ config &= ~DIRECT_MODE;
+ /*
+ * Disable infinite mode transfer as we won't be doing any
+ * infinite size data transfers. All data transfer will be
+ * of finite data size.
+ */
+ config &= ~INFINITE_CONS_TRANS;
+ writel_relaxed(config, base + DML_CONFIG);
+
+ /*
+ * Initialize the logical BAM pipe size for producer
+ * and consumer.
+ */
+ writel_relaxed(PRODUCER_PIPE_LOGICAL_SIZE,
+ base + DML_PRODUCER_PIPE_LOGICAL_SIZE);
+ writel_relaxed(CONSUMER_PIPE_LOGICAL_SIZE,
+ base + DML_CONSUMER_PIPE_LOGICAL_SIZE);
+
+ /* Initialize Producer/consumer pipe id */
+ writel_relaxed(producer_id | (consumer_id << CONSUMER_PIPE_ID_SHFT),
+ base + DML_PIPE_ID);
+
+ /* Make sure dml intialization is finished */
+ mb();
+
+ return 0;
+}
diff --git a/kernel/drivers/mmc/host/mmci_qcom_dml.h b/kernel/drivers/mmc/host/mmci_qcom_dml.h
new file mode 100644
index 000000000..6e405d09d
--- /dev/null
+++ b/kernel/drivers/mmc/host/mmci_qcom_dml.h
@@ -0,0 +1,31 @@
+/*
+ *
+ * Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __MMC_QCOM_DML_H__
+#define __MMC_QCOM_DML_H__
+
+#ifdef CONFIG_MMC_QCOM_DML
+int dml_hw_init(struct mmci_host *host, struct device_node *np);
+void dml_start_xfer(struct mmci_host *host, struct mmc_data *data);
+#else
+static inline int dml_hw_init(struct mmci_host *host, struct device_node *np)
+{
+ return -ENOSYS;
+}
+static inline void dml_start_xfer(struct mmci_host *host, struct mmc_data *data)
+{
+}
+#endif /* CONFIG_MMC_QCOM_DML */
+
+#endif /* __MMC_QCOM_DML_H__ */
diff --git a/kernel/drivers/mmc/host/moxart-mmc.c b/kernel/drivers/mmc/host/moxart-mmc.c
new file mode 100644
index 000000000..006f18624
--- /dev/null
+++ b/kernel/drivers/mmc/host/moxart-mmc.c
@@ -0,0 +1,728 @@
+/*
+ * MOXA ART MMC host driver.
+ *
+ * Copyright (C) 2014 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * Based on code from
+ * Moxa Technologies Co., Ltd. <www.moxa.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sd.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/clk.h>
+#include <linux/bitops.h>
+#include <linux/of_dma.h>
+#include <linux/spinlock.h>
+
+#define REG_COMMAND 0
+#define REG_ARGUMENT 4
+#define REG_RESPONSE0 8
+#define REG_RESPONSE1 12
+#define REG_RESPONSE2 16
+#define REG_RESPONSE3 20
+#define REG_RESPONSE_COMMAND 24
+#define REG_DATA_CONTROL 28
+#define REG_DATA_TIMER 32
+#define REG_DATA_LENGTH 36
+#define REG_STATUS 40
+#define REG_CLEAR 44
+#define REG_INTERRUPT_MASK 48
+#define REG_POWER_CONTROL 52
+#define REG_CLOCK_CONTROL 56
+#define REG_BUS_WIDTH 60
+#define REG_DATA_WINDOW 64
+#define REG_FEATURE 68
+#define REG_REVISION 72
+
+/* REG_COMMAND */
+#define CMD_SDC_RESET BIT(10)
+#define CMD_EN BIT(9)
+#define CMD_APP_CMD BIT(8)
+#define CMD_LONG_RSP BIT(7)
+#define CMD_NEED_RSP BIT(6)
+#define CMD_IDX_MASK 0x3f
+
+/* REG_RESPONSE_COMMAND */
+#define RSP_CMD_APP BIT(6)
+#define RSP_CMD_IDX_MASK 0x3f
+
+/* REG_DATA_CONTROL */
+#define DCR_DATA_FIFO_RESET BIT(8)
+#define DCR_DATA_THRES BIT(7)
+#define DCR_DATA_EN BIT(6)
+#define DCR_DMA_EN BIT(5)
+#define DCR_DATA_WRITE BIT(4)
+#define DCR_BLK_SIZE 0x0f
+
+/* REG_DATA_LENGTH */
+#define DATA_LEN_MASK 0xffffff
+
+/* REG_STATUS */
+#define WRITE_PROT BIT(12)
+#define CARD_DETECT BIT(11)
+/* 1-10 below can be sent to either registers, interrupt or clear. */
+#define CARD_CHANGE BIT(10)
+#define FIFO_ORUN BIT(9)
+#define FIFO_URUN BIT(8)
+#define DATA_END BIT(7)
+#define CMD_SENT BIT(6)
+#define DATA_CRC_OK BIT(5)
+#define RSP_CRC_OK BIT(4)
+#define DATA_TIMEOUT BIT(3)
+#define RSP_TIMEOUT BIT(2)
+#define DATA_CRC_FAIL BIT(1)
+#define RSP_CRC_FAIL BIT(0)
+
+#define MASK_RSP (RSP_TIMEOUT | RSP_CRC_FAIL | \
+ RSP_CRC_OK | CARD_DETECT | CMD_SENT)
+
+#define MASK_DATA (DATA_CRC_OK | DATA_END | \
+ DATA_CRC_FAIL | DATA_TIMEOUT)
+
+#define MASK_INTR_PIO (FIFO_URUN | FIFO_ORUN | CARD_CHANGE)
+
+/* REG_POWER_CONTROL */
+#define SD_POWER_ON BIT(4)
+#define SD_POWER_MASK 0x0f
+
+/* REG_CLOCK_CONTROL */
+#define CLK_HISPD BIT(9)
+#define CLK_OFF BIT(8)
+#define CLK_SD BIT(7)
+#define CLK_DIV_MASK 0x7f
+
+/* REG_BUS_WIDTH */
+#define BUS_WIDTH_8 BIT(2)
+#define BUS_WIDTH_4 BIT(1)
+#define BUS_WIDTH_1 BIT(0)
+
+#define MMC_VDD_360 23
+#define MIN_POWER (MMC_VDD_360 - SD_POWER_MASK)
+#define MAX_RETRIES 500000
+
+struct moxart_host {
+ spinlock_t lock;
+
+ void __iomem *base;
+
+ phys_addr_t reg_phys;
+
+ struct dma_chan *dma_chan_tx;
+ struct dma_chan *dma_chan_rx;
+ struct dma_async_tx_descriptor *tx_desc;
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct scatterlist *cur_sg;
+ struct completion dma_complete;
+ struct completion pio_complete;
+
+ u32 num_sg;
+ u32 data_remain;
+ u32 data_len;
+ u32 fifo_width;
+ u32 timeout;
+ u32 rate;
+
+ long sysclk;
+
+ bool have_dma;
+ bool is_removed;
+};
+
+static inline void moxart_init_sg(struct moxart_host *host,
+ struct mmc_data *data)
+{
+ host->cur_sg = data->sg;
+ host->num_sg = data->sg_len;
+ host->data_remain = host->cur_sg->length;
+
+ if (host->data_remain > host->data_len)
+ host->data_remain = host->data_len;
+}
+
+static inline int moxart_next_sg(struct moxart_host *host)
+{
+ int remain;
+ struct mmc_data *data = host->mrq->cmd->data;
+
+ host->cur_sg++;
+ host->num_sg--;
+
+ if (host->num_sg > 0) {
+ host->data_remain = host->cur_sg->length;
+ remain = host->data_len - data->bytes_xfered;
+ if (remain > 0 && remain < host->data_remain)
+ host->data_remain = remain;
+ }
+
+ return host->num_sg;
+}
+
+static int moxart_wait_for_status(struct moxart_host *host,
+ u32 mask, u32 *status)
+{
+ int ret = -ETIMEDOUT;
+ u32 i;
+
+ for (i = 0; i < MAX_RETRIES; i++) {
+ *status = readl(host->base + REG_STATUS);
+ if (!(*status & mask)) {
+ udelay(5);
+ continue;
+ }
+ writel(*status & mask, host->base + REG_CLEAR);
+ ret = 0;
+ break;
+ }
+
+ if (ret)
+ dev_err(mmc_dev(host->mmc), "timed out waiting for status\n");
+
+ return ret;
+}
+
+
+static void moxart_send_command(struct moxart_host *host,
+ struct mmc_command *cmd)
+{
+ u32 status, cmdctrl;
+
+ writel(RSP_TIMEOUT | RSP_CRC_OK |
+ RSP_CRC_FAIL | CMD_SENT, host->base + REG_CLEAR);
+ writel(cmd->arg, host->base + REG_ARGUMENT);
+
+ cmdctrl = cmd->opcode & CMD_IDX_MASK;
+ if (cmdctrl == SD_APP_SET_BUS_WIDTH || cmdctrl == SD_APP_OP_COND ||
+ cmdctrl == SD_APP_SEND_SCR || cmdctrl == SD_APP_SD_STATUS ||
+ cmdctrl == SD_APP_SEND_NUM_WR_BLKS)
+ cmdctrl |= CMD_APP_CMD;
+
+ if (cmd->flags & MMC_RSP_PRESENT)
+ cmdctrl |= CMD_NEED_RSP;
+
+ if (cmd->flags & MMC_RSP_136)
+ cmdctrl |= CMD_LONG_RSP;
+
+ writel(cmdctrl | CMD_EN, host->base + REG_COMMAND);
+
+ if (moxart_wait_for_status(host, MASK_RSP, &status) == -ETIMEDOUT)
+ cmd->error = -ETIMEDOUT;
+
+ if (status & RSP_TIMEOUT) {
+ cmd->error = -ETIMEDOUT;
+ return;
+ }
+ if (status & RSP_CRC_FAIL) {
+ cmd->error = -EIO;
+ return;
+ }
+ if (status & RSP_CRC_OK) {
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[3] = readl(host->base + REG_RESPONSE0);
+ cmd->resp[2] = readl(host->base + REG_RESPONSE1);
+ cmd->resp[1] = readl(host->base + REG_RESPONSE2);
+ cmd->resp[0] = readl(host->base + REG_RESPONSE3);
+ } else {
+ cmd->resp[0] = readl(host->base + REG_RESPONSE0);
+ }
+ }
+}
+
+static void moxart_dma_complete(void *param)
+{
+ struct moxart_host *host = param;
+
+ complete(&host->dma_complete);
+}
+
+static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
+{
+ u32 len, dir_data, dir_slave;
+ unsigned long dma_time;
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *dma_chan;
+
+ if (host->data_len == data->bytes_xfered)
+ return;
+
+ if (data->flags & MMC_DATA_WRITE) {
+ dma_chan = host->dma_chan_tx;
+ dir_data = DMA_TO_DEVICE;
+ dir_slave = DMA_MEM_TO_DEV;
+ } else {
+ dma_chan = host->dma_chan_rx;
+ dir_data = DMA_FROM_DEVICE;
+ dir_slave = DMA_DEV_TO_MEM;
+ }
+
+ len = dma_map_sg(dma_chan->device->dev, data->sg,
+ data->sg_len, dir_data);
+
+ if (len > 0) {
+ desc = dmaengine_prep_slave_sg(dma_chan, data->sg,
+ len, dir_slave,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ } else {
+ dev_err(mmc_dev(host->mmc), "dma_map_sg returned zero length\n");
+ }
+
+ if (desc) {
+ host->tx_desc = desc;
+ desc->callback = moxart_dma_complete;
+ desc->callback_param = host;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma_chan);
+ }
+
+ data->bytes_xfered += host->data_remain;
+
+ dma_time = wait_for_completion_interruptible_timeout(
+ &host->dma_complete, host->timeout);
+
+ dma_unmap_sg(dma_chan->device->dev,
+ data->sg, data->sg_len,
+ dir_data);
+}
+
+
+static void moxart_transfer_pio(struct moxart_host *host)
+{
+ struct mmc_data *data = host->mrq->cmd->data;
+ u32 *sgp, len = 0, remain, status;
+
+ if (host->data_len == data->bytes_xfered)
+ return;
+
+ sgp = sg_virt(host->cur_sg);
+ remain = host->data_remain;
+
+ if (data->flags & MMC_DATA_WRITE) {
+ while (remain > 0) {
+ if (moxart_wait_for_status(host, FIFO_URUN, &status)
+ == -ETIMEDOUT) {
+ data->error = -ETIMEDOUT;
+ complete(&host->pio_complete);
+ return;
+ }
+ for (len = 0; len < remain && len < host->fifo_width;) {
+ iowrite32(*sgp, host->base + REG_DATA_WINDOW);
+ sgp++;
+ len += 4;
+ }
+ remain -= len;
+ }
+
+ } else {
+ while (remain > 0) {
+ if (moxart_wait_for_status(host, FIFO_ORUN, &status)
+ == -ETIMEDOUT) {
+ data->error = -ETIMEDOUT;
+ complete(&host->pio_complete);
+ return;
+ }
+ for (len = 0; len < remain && len < host->fifo_width;) {
+ /* SCR data must be read in big endian. */
+ if (data->mrq->cmd->opcode == SD_APP_SEND_SCR)
+ *sgp = ioread32be(host->base +
+ REG_DATA_WINDOW);
+ else
+ *sgp = ioread32(host->base +
+ REG_DATA_WINDOW);
+ sgp++;
+ len += 4;
+ }
+ remain -= len;
+ }
+ }
+
+ data->bytes_xfered += host->data_remain - remain;
+ host->data_remain = remain;
+
+ if (host->data_len != data->bytes_xfered)
+ moxart_next_sg(host);
+ else
+ complete(&host->pio_complete);
+}
+
+static void moxart_prepare_data(struct moxart_host *host)
+{
+ struct mmc_data *data = host->mrq->cmd->data;
+ u32 datactrl;
+ int blksz_bits;
+
+ if (!data)
+ return;
+
+ host->data_len = data->blocks * data->blksz;
+ blksz_bits = ffs(data->blksz) - 1;
+ BUG_ON(1 << blksz_bits != data->blksz);
+
+ moxart_init_sg(host, data);
+
+ datactrl = DCR_DATA_EN | (blksz_bits & DCR_BLK_SIZE);
+
+ if (data->flags & MMC_DATA_WRITE)
+ datactrl |= DCR_DATA_WRITE;
+
+ if ((host->data_len > host->fifo_width) && host->have_dma)
+ datactrl |= DCR_DMA_EN;
+
+ writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL);
+ writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR);
+ writel(host->rate, host->base + REG_DATA_TIMER);
+ writel(host->data_len, host->base + REG_DATA_LENGTH);
+ writel(datactrl, host->base + REG_DATA_CONTROL);
+}
+
+static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct moxart_host *host = mmc_priv(mmc);
+ unsigned long pio_time, flags;
+ u32 status;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ init_completion(&host->dma_complete);
+ init_completion(&host->pio_complete);
+
+ host->mrq = mrq;
+
+ if (readl(host->base + REG_STATUS) & CARD_DETECT) {
+ mrq->cmd->error = -ETIMEDOUT;
+ goto request_done;
+ }
+
+ moxart_prepare_data(host);
+ moxart_send_command(host, host->mrq->cmd);
+
+ if (mrq->cmd->data) {
+ if ((host->data_len > host->fifo_width) && host->have_dma) {
+
+ writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ moxart_transfer_dma(mrq->cmd->data, host);
+
+ spin_lock_irqsave(&host->lock, flags);
+ } else {
+
+ writel(MASK_INTR_PIO, host->base + REG_INTERRUPT_MASK);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /* PIO transfers start from interrupt. */
+ pio_time = wait_for_completion_interruptible_timeout(
+ &host->pio_complete, host->timeout);
+
+ spin_lock_irqsave(&host->lock, flags);
+ }
+
+ if (host->is_removed) {
+ dev_err(mmc_dev(host->mmc), "card removed\n");
+ mrq->cmd->error = -ETIMEDOUT;
+ goto request_done;
+ }
+
+ if (moxart_wait_for_status(host, MASK_DATA, &status)
+ == -ETIMEDOUT) {
+ mrq->cmd->data->error = -ETIMEDOUT;
+ goto request_done;
+ }
+
+ if (status & DATA_CRC_FAIL)
+ mrq->cmd->data->error = -ETIMEDOUT;
+
+ if (mrq->cmd->data->stop)
+ moxart_send_command(host, mrq->cmd->data->stop);
+ }
+
+request_done:
+ spin_unlock_irqrestore(&host->lock, flags);
+ mmc_request_done(host->mmc, mrq);
+}
+
+static irqreturn_t moxart_irq(int irq, void *devid)
+{
+ struct moxart_host *host = (struct moxart_host *)devid;
+ u32 status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ status = readl(host->base + REG_STATUS);
+ if (status & CARD_CHANGE) {
+ host->is_removed = status & CARD_DETECT;
+ if (host->is_removed && host->have_dma) {
+ dmaengine_terminate_all(host->dma_chan_tx);
+ dmaengine_terminate_all(host->dma_chan_rx);
+ }
+ host->mrq = NULL;
+ writel(MASK_INTR_PIO, host->base + REG_CLEAR);
+ writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK);
+ mmc_detect_change(host->mmc, 0);
+ }
+ if (status & (FIFO_ORUN | FIFO_URUN) && host->mrq)
+ moxart_transfer_pio(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void moxart_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct moxart_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u8 power, div;
+ u32 ctrl;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (ios->clock) {
+ for (div = 0; div < CLK_DIV_MASK; ++div) {
+ if (ios->clock >= host->sysclk / (2 * (div + 1)))
+ break;
+ }
+ ctrl = CLK_SD | div;
+ host->rate = host->sysclk / (2 * (div + 1));
+ if (host->rate > host->sysclk)
+ ctrl |= CLK_HISPD;
+ writel(ctrl, host->base + REG_CLOCK_CONTROL);
+ }
+
+ if (ios->power_mode == MMC_POWER_OFF) {
+ writel(readl(host->base + REG_POWER_CONTROL) & ~SD_POWER_ON,
+ host->base + REG_POWER_CONTROL);
+ } else {
+ if (ios->vdd < MIN_POWER)
+ power = 0;
+ else
+ power = ios->vdd - MIN_POWER;
+
+ writel(SD_POWER_ON | (u32) power,
+ host->base + REG_POWER_CONTROL);
+ }
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_4:
+ writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH);
+ break;
+ case MMC_BUS_WIDTH_8:
+ writel(BUS_WIDTH_8, host->base + REG_BUS_WIDTH);
+ break;
+ default:
+ writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH);
+ break;
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+
+static int moxart_get_ro(struct mmc_host *mmc)
+{
+ struct moxart_host *host = mmc_priv(mmc);
+
+ return !!(readl(host->base + REG_STATUS) & WRITE_PROT);
+}
+
+static struct mmc_host_ops moxart_ops = {
+ .request = moxart_request,
+ .set_ios = moxart_set_ios,
+ .get_ro = moxart_get_ro,
+};
+
+static int moxart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource res_mmc;
+ struct mmc_host *mmc;
+ struct moxart_host *host = NULL;
+ struct dma_slave_config cfg;
+ struct clk *clk;
+ void __iomem *reg_mmc;
+ int irq, ret;
+ u32 i;
+
+ mmc = mmc_alloc_host(sizeof(struct moxart_host), dev);
+ if (!mmc) {
+ dev_err(dev, "mmc_alloc_host failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = of_address_to_resource(node, 0, &res_mmc);
+ if (ret) {
+ dev_err(dev, "of_address_to_resource failed\n");
+ goto out;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ dev_err(dev, "irq_of_parse_and_map failed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto out;
+ }
+
+ reg_mmc = devm_ioremap_resource(dev, &res_mmc);
+ if (IS_ERR(reg_mmc)) {
+ ret = PTR_ERR(reg_mmc);
+ goto out;
+ }
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto out;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->base = reg_mmc;
+ host->reg_phys = res_mmc.start;
+ host->timeout = msecs_to_jiffies(1000);
+ host->sysclk = clk_get_rate(clk);
+ host->fifo_width = readl(host->base + REG_FEATURE) << 2;
+ host->dma_chan_tx = dma_request_slave_channel_reason(dev, "tx");
+ host->dma_chan_rx = dma_request_slave_channel_reason(dev, "rx");
+
+ spin_lock_init(&host->lock);
+
+ mmc->ops = &moxart_ops;
+ mmc->f_max = DIV_ROUND_CLOSEST(host->sysclk, 2);
+ mmc->f_min = DIV_ROUND_CLOSEST(host->sysclk, CLK_DIV_MASK * 2);
+ mmc->ocr_avail = 0xffff00; /* Support 2.0v - 3.6v power. */
+
+ if (IS_ERR(host->dma_chan_tx) || IS_ERR(host->dma_chan_rx)) {
+ if (PTR_ERR(host->dma_chan_tx) == -EPROBE_DEFER ||
+ PTR_ERR(host->dma_chan_rx) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
+ dev_dbg(dev, "PIO mode transfer enabled\n");
+ host->have_dma = false;
+ } else {
+ dev_dbg(dev, "DMA channels found (%p,%p)\n",
+ host->dma_chan_tx, host->dma_chan_rx);
+ host->have_dma = true;
+
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ cfg.direction = DMA_MEM_TO_DEV;
+ cfg.src_addr = 0;
+ cfg.dst_addr = host->reg_phys + REG_DATA_WINDOW;
+ dmaengine_slave_config(host->dma_chan_tx, &cfg);
+
+ cfg.direction = DMA_DEV_TO_MEM;
+ cfg.src_addr = host->reg_phys + REG_DATA_WINDOW;
+ cfg.dst_addr = 0;
+ dmaengine_slave_config(host->dma_chan_rx, &cfg);
+ }
+
+ switch ((readl(host->base + REG_BUS_WIDTH) >> 3) & 3) {
+ case 1:
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 2:
+ mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
+ break;
+ default:
+ break;
+ }
+
+ writel(0, host->base + REG_INTERRUPT_MASK);
+
+ writel(CMD_SDC_RESET, host->base + REG_COMMAND);
+ for (i = 0; i < MAX_RETRIES; i++) {
+ if (!(readl(host->base + REG_COMMAND) & CMD_SDC_RESET))
+ break;
+ udelay(5);
+ }
+
+ ret = devm_request_irq(dev, irq, moxart_irq, 0, "moxart-mmc", host);
+ if (ret)
+ goto out;
+
+ dev_set_drvdata(dev, mmc);
+ mmc_add_host(mmc);
+
+ dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width);
+
+ return 0;
+
+out:
+ if (mmc)
+ mmc_free_host(mmc);
+ return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(&pdev->dev);
+ struct moxart_host *host = mmc_priv(mmc);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (mmc) {
+ if (!IS_ERR(host->dma_chan_tx))
+ dma_release_channel(host->dma_chan_tx);
+ if (!IS_ERR(host->dma_chan_rx))
+ dma_release_channel(host->dma_chan_rx);
+ mmc_remove_host(mmc);
+ mmc_free_host(mmc);
+
+ writel(0, host->base + REG_INTERRUPT_MASK);
+ writel(0, host->base + REG_POWER_CONTROL);
+ writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
+ host->base + REG_CLOCK_CONTROL);
+ }
+ return 0;
+}
+
+static const struct of_device_id moxart_mmc_match[] = {
+ { .compatible = "moxa,moxart-mmc" },
+ { .compatible = "faraday,ftsdc010" },
+ { }
+};
+
+static struct platform_driver moxart_mmc_driver = {
+ .probe = moxart_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "mmc-moxart",
+ .of_match_table = moxart_mmc_match,
+ },
+};
+module_platform_driver(moxart_mmc_driver);
+
+MODULE_ALIAS("platform:mmc-moxart");
+MODULE_DESCRIPTION("MOXA ART MMC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
diff --git a/kernel/drivers/mmc/host/mvsdio.c b/kernel/drivers/mmc/host/mvsdio.c
new file mode 100644
index 000000000..a448498e3
--- /dev/null
+++ b/kernel/drivers/mmc/host/mvsdio.c
@@ -0,0 +1,876 @@
+/*
+ * Marvell MMC/SD/SDIO driver
+ *
+ * Authors: Maen Suleiman, Nicolas Pitre
+ * Copyright (C) 2008-2009 Marvell Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/mbus.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+
+#include <asm/sizes.h>
+#include <asm/unaligned.h>
+#include <linux/platform_data/mmc-mvsdio.h>
+
+#include "mvsdio.h"
+
+#define DRIVER_NAME "mvsdio"
+
+static int maxfreq;
+static int nodma;
+
+struct mvsd_host {
+ void __iomem *base;
+ struct mmc_request *mrq;
+ spinlock_t lock;
+ unsigned int xfer_mode;
+ unsigned int intr_en;
+ unsigned int ctrl;
+ unsigned int pio_size;
+ void *pio_ptr;
+ unsigned int sg_frags;
+ unsigned int ns_per_clk;
+ unsigned int clock;
+ unsigned int base_clock;
+ struct timer_list timer;
+ struct mmc_host *mmc;
+ struct device *dev;
+ struct clk *clk;
+};
+
+#define mvsd_write(offs, val) writel(val, iobase + (offs))
+#define mvsd_read(offs) readl(iobase + (offs))
+
+static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
+{
+ void __iomem *iobase = host->base;
+ unsigned int tmout;
+ int tmout_index;
+
+ /*
+ * Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE
+ * register is sometimes not set before a while when some
+ * "unusual" data block sizes are used (such as with the SWITCH
+ * command), even despite the fact that the XFER_DONE interrupt
+ * was raised. And if another data transfer starts before
+ * this bit comes to good sense (which eventually happens by
+ * itself) then the new transfer simply fails with a timeout.
+ */
+ if (!(mvsd_read(MVSD_HW_STATE) & (1 << 13))) {
+ unsigned long t = jiffies + HZ;
+ unsigned int hw_state, count = 0;
+ do {
+ hw_state = mvsd_read(MVSD_HW_STATE);
+ if (time_after(jiffies, t)) {
+ dev_warn(host->dev, "FIFO_EMPTY bit missing\n");
+ break;
+ }
+ count++;
+ } while (!(hw_state & (1 << 13)));
+ dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit "
+ "(hw=0x%04x, count=%d, jiffies=%ld)\n",
+ hw_state, count, jiffies - (t - HZ));
+ }
+
+ /* If timeout=0 then maximum timeout index is used. */
+ tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk);
+ tmout += data->timeout_clks;
+ tmout_index = fls(tmout - 1) - 12;
+ if (tmout_index < 0)
+ tmout_index = 0;
+ if (tmout_index > MVSD_HOST_CTRL_TMOUT_MAX)
+ tmout_index = MVSD_HOST_CTRL_TMOUT_MAX;
+
+ dev_dbg(host->dev, "data %s at 0x%08x: blocks=%d blksz=%d tmout=%u (%d)\n",
+ (data->flags & MMC_DATA_READ) ? "read" : "write",
+ (u32)sg_virt(data->sg), data->blocks, data->blksz,
+ tmout, tmout_index);
+
+ host->ctrl &= ~MVSD_HOST_CTRL_TMOUT_MASK;
+ host->ctrl |= MVSD_HOST_CTRL_TMOUT(tmout_index);
+ mvsd_write(MVSD_HOST_CTRL, host->ctrl);
+ mvsd_write(MVSD_BLK_COUNT, data->blocks);
+ mvsd_write(MVSD_BLK_SIZE, data->blksz);
+
+ if (nodma || (data->blksz | data->sg->offset) & 3 ||
+ ((!(data->flags & MMC_DATA_READ) && data->sg->offset & 0x3f))) {
+ /*
+ * We cannot do DMA on a buffer which offset or size
+ * is not aligned on a 4-byte boundary.
+ *
+ * It also appears the host to card DMA can corrupt
+ * data when the buffer is not aligned on a 64 byte
+ * boundary.
+ */
+ host->pio_size = data->blocks * data->blksz;
+ host->pio_ptr = sg_virt(data->sg);
+ if (!nodma)
+ dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n",
+ host->pio_ptr, host->pio_size);
+ return 1;
+ } else {
+ dma_addr_t phys_addr;
+ int dma_dir = (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ host->sg_frags = dma_map_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, dma_dir);
+ phys_addr = sg_dma_address(data->sg);
+ mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff);
+ mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16);
+ return 0;
+ }
+}
+
+static void mvsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mvsd_host *host = mmc_priv(mmc);
+ void __iomem *iobase = host->base;
+ struct mmc_command *cmd = mrq->cmd;
+ u32 cmdreg = 0, xfer = 0, intr = 0;
+ unsigned long flags;
+
+ BUG_ON(host->mrq != NULL);
+ host->mrq = mrq;
+
+ dev_dbg(host->dev, "cmd %d (hw state 0x%04x)\n",
+ cmd->opcode, mvsd_read(MVSD_HW_STATE));
+
+ cmdreg = MVSD_CMD_INDEX(cmd->opcode);
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ cmdreg |= MVSD_CMD_RSP_48BUSY;
+ else if (cmd->flags & MMC_RSP_136)
+ cmdreg |= MVSD_CMD_RSP_136;
+ else if (cmd->flags & MMC_RSP_PRESENT)
+ cmdreg |= MVSD_CMD_RSP_48;
+ else
+ cmdreg |= MVSD_CMD_RSP_NONE;
+
+ if (cmd->flags & MMC_RSP_CRC)
+ cmdreg |= MVSD_CMD_CHECK_CMDCRC;
+
+ if (cmd->flags & MMC_RSP_OPCODE)
+ cmdreg |= MVSD_CMD_INDX_CHECK;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ cmdreg |= MVSD_UNEXPECTED_RESP;
+ intr |= MVSD_NOR_UNEXP_RSP;
+ }
+
+ if (mrq->data) {
+ struct mmc_data *data = mrq->data;
+ int pio;
+
+ cmdreg |= MVSD_CMD_DATA_PRESENT | MVSD_CMD_CHECK_DATACRC16;
+ xfer |= MVSD_XFER_MODE_HW_WR_DATA_EN;
+ if (data->flags & MMC_DATA_READ)
+ xfer |= MVSD_XFER_MODE_TO_HOST;
+
+ pio = mvsd_setup_data(host, data);
+ if (pio) {
+ xfer |= MVSD_XFER_MODE_PIO;
+ /* PIO section of mvsd_irq has comments on those bits */
+ if (data->flags & MMC_DATA_WRITE)
+ intr |= MVSD_NOR_TX_AVAIL;
+ else if (host->pio_size > 32)
+ intr |= MVSD_NOR_RX_FIFO_8W;
+ else
+ intr |= MVSD_NOR_RX_READY;
+ }
+
+ if (data->stop) {
+ struct mmc_command *stop = data->stop;
+ u32 cmd12reg = 0;
+
+ mvsd_write(MVSD_AUTOCMD12_ARG_LOW, stop->arg & 0xffff);
+ mvsd_write(MVSD_AUTOCMD12_ARG_HI, stop->arg >> 16);
+
+ if (stop->flags & MMC_RSP_BUSY)
+ cmd12reg |= MVSD_AUTOCMD12_BUSY;
+ if (stop->flags & MMC_RSP_OPCODE)
+ cmd12reg |= MVSD_AUTOCMD12_INDX_CHECK;
+ cmd12reg |= MVSD_AUTOCMD12_INDEX(stop->opcode);
+ mvsd_write(MVSD_AUTOCMD12_CMD, cmd12reg);
+
+ xfer |= MVSD_XFER_MODE_AUTO_CMD12;
+ intr |= MVSD_NOR_AUTOCMD12_DONE;
+ } else {
+ intr |= MVSD_NOR_XFER_DONE;
+ }
+ } else {
+ intr |= MVSD_NOR_CMD_DONE;
+ }
+
+ mvsd_write(MVSD_ARG_LOW, cmd->arg & 0xffff);
+ mvsd_write(MVSD_ARG_HI, cmd->arg >> 16);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN;
+ host->xfer_mode |= xfer;
+ mvsd_write(MVSD_XFER_MODE, host->xfer_mode);
+
+ mvsd_write(MVSD_NOR_INTR_STATUS, ~MVSD_NOR_CARD_INT);
+ mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
+ mvsd_write(MVSD_CMD, cmdreg);
+
+ host->intr_en &= MVSD_NOR_CARD_INT;
+ host->intr_en |= intr | MVSD_NOR_ERROR;
+ mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
+ mvsd_write(MVSD_ERR_INTR_EN, 0xffff);
+
+ mod_timer(&host->timer, jiffies + 5 * HZ);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static u32 mvsd_finish_cmd(struct mvsd_host *host, struct mmc_command *cmd,
+ u32 err_status)
+{
+ void __iomem *iobase = host->base;
+
+ if (cmd->flags & MMC_RSP_136) {
+ unsigned int response[8], i;
+ for (i = 0; i < 8; i++)
+ response[i] = mvsd_read(MVSD_RSP(i));
+ cmd->resp[0] = ((response[0] & 0x03ff) << 22) |
+ ((response[1] & 0xffff) << 6) |
+ ((response[2] & 0xfc00) >> 10);
+ cmd->resp[1] = ((response[2] & 0x03ff) << 22) |
+ ((response[3] & 0xffff) << 6) |
+ ((response[4] & 0xfc00) >> 10);
+ cmd->resp[2] = ((response[4] & 0x03ff) << 22) |
+ ((response[5] & 0xffff) << 6) |
+ ((response[6] & 0xfc00) >> 10);
+ cmd->resp[3] = ((response[6] & 0x03ff) << 22) |
+ ((response[7] & 0x3fff) << 8);
+ } else if (cmd->flags & MMC_RSP_PRESENT) {
+ unsigned int response[3], i;
+ for (i = 0; i < 3; i++)
+ response[i] = mvsd_read(MVSD_RSP(i));
+ cmd->resp[0] = ((response[2] & 0x003f) << (8 - 8)) |
+ ((response[1] & 0xffff) << (14 - 8)) |
+ ((response[0] & 0x03ff) << (30 - 8));
+ cmd->resp[1] = ((response[0] & 0xfc00) >> 10);
+ cmd->resp[2] = 0;
+ cmd->resp[3] = 0;
+ }
+
+ if (err_status & MVSD_ERR_CMD_TIMEOUT) {
+ cmd->error = -ETIMEDOUT;
+ } else if (err_status & (MVSD_ERR_CMD_CRC | MVSD_ERR_CMD_ENDBIT |
+ MVSD_ERR_CMD_INDEX | MVSD_ERR_CMD_STARTBIT)) {
+ cmd->error = -EILSEQ;
+ }
+ err_status &= ~(MVSD_ERR_CMD_TIMEOUT | MVSD_ERR_CMD_CRC |
+ MVSD_ERR_CMD_ENDBIT | MVSD_ERR_CMD_INDEX |
+ MVSD_ERR_CMD_STARTBIT);
+
+ return err_status;
+}
+
+static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
+ u32 err_status)
+{
+ void __iomem *iobase = host->base;
+
+ if (host->pio_ptr) {
+ host->pio_ptr = NULL;
+ host->pio_size = 0;
+ } else {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+
+ if (err_status & MVSD_ERR_DATA_TIMEOUT)
+ data->error = -ETIMEDOUT;
+ else if (err_status & (MVSD_ERR_DATA_CRC | MVSD_ERR_DATA_ENDBIT))
+ data->error = -EILSEQ;
+ else if (err_status & MVSD_ERR_XFER_SIZE)
+ data->error = -EBADE;
+ err_status &= ~(MVSD_ERR_DATA_TIMEOUT | MVSD_ERR_DATA_CRC |
+ MVSD_ERR_DATA_ENDBIT | MVSD_ERR_XFER_SIZE);
+
+ dev_dbg(host->dev, "data done: blocks_left=%d, bytes_left=%d\n",
+ mvsd_read(MVSD_CURR_BLK_LEFT), mvsd_read(MVSD_CURR_BYTE_LEFT));
+ data->bytes_xfered =
+ (data->blocks - mvsd_read(MVSD_CURR_BLK_LEFT)) * data->blksz;
+ /* We can't be sure about the last block when errors are detected */
+ if (data->bytes_xfered && data->error)
+ data->bytes_xfered -= data->blksz;
+
+ /* Handle Auto cmd 12 response */
+ if (data->stop) {
+ unsigned int response[3], i;
+ for (i = 0; i < 3; i++)
+ response[i] = mvsd_read(MVSD_AUTO_RSP(i));
+ data->stop->resp[0] = ((response[2] & 0x003f) << (8 - 8)) |
+ ((response[1] & 0xffff) << (14 - 8)) |
+ ((response[0] & 0x03ff) << (30 - 8));
+ data->stop->resp[1] = ((response[0] & 0xfc00) >> 10);
+ data->stop->resp[2] = 0;
+ data->stop->resp[3] = 0;
+
+ if (err_status & MVSD_ERR_AUTOCMD12) {
+ u32 err_cmd12 = mvsd_read(MVSD_AUTOCMD12_ERR_STATUS);
+ dev_dbg(host->dev, "c12err 0x%04x\n", err_cmd12);
+ if (err_cmd12 & MVSD_AUTOCMD12_ERR_NOTEXE)
+ data->stop->error = -ENOEXEC;
+ else if (err_cmd12 & MVSD_AUTOCMD12_ERR_TIMEOUT)
+ data->stop->error = -ETIMEDOUT;
+ else if (err_cmd12)
+ data->stop->error = -EILSEQ;
+ err_status &= ~MVSD_ERR_AUTOCMD12;
+ }
+ }
+
+ return err_status;
+}
+
+static irqreturn_t mvsd_irq(int irq, void *dev)
+{
+ struct mvsd_host *host = dev;
+ void __iomem *iobase = host->base;
+ u32 intr_status, intr_done_mask;
+ int irq_handled = 0;
+
+ intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
+ dev_dbg(host->dev, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n",
+ intr_status, mvsd_read(MVSD_NOR_INTR_EN),
+ mvsd_read(MVSD_HW_STATE));
+
+ /*
+ * It looks like, SDIO IP can issue one late, spurious irq
+ * although all irqs should be disabled. To work around this,
+ * bail out early, if we didn't expect any irqs to occur.
+ */
+ if (!mvsd_read(MVSD_NOR_INTR_EN) && !mvsd_read(MVSD_ERR_INTR_EN)) {
+ dev_dbg(host->dev, "spurious irq detected intr 0x%04x intr_en 0x%04x erri 0x%04x erri_en 0x%04x\n",
+ mvsd_read(MVSD_NOR_INTR_STATUS),
+ mvsd_read(MVSD_NOR_INTR_EN),
+ mvsd_read(MVSD_ERR_INTR_STATUS),
+ mvsd_read(MVSD_ERR_INTR_EN));
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(&host->lock);
+
+ /* PIO handling, if needed. Messy business... */
+ if (host->pio_size &&
+ (intr_status & host->intr_en &
+ (MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W))) {
+ u16 *p = host->pio_ptr;
+ int s = host->pio_size;
+ while (s >= 32 && (intr_status & MVSD_NOR_RX_FIFO_8W)) {
+ readsw(iobase + MVSD_FIFO, p, 16);
+ p += 16;
+ s -= 32;
+ intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
+ }
+ /*
+ * Normally we'd use < 32 here, but the RX_FIFO_8W bit
+ * doesn't appear to assert when there is exactly 32 bytes
+ * (8 words) left to fetch in a transfer.
+ */
+ if (s <= 32) {
+ while (s >= 4 && (intr_status & MVSD_NOR_RX_READY)) {
+ put_unaligned(mvsd_read(MVSD_FIFO), p++);
+ put_unaligned(mvsd_read(MVSD_FIFO), p++);
+ s -= 4;
+ intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
+ }
+ if (s && s < 4 && (intr_status & MVSD_NOR_RX_READY)) {
+ u16 val[2] = {0, 0};
+ val[0] = mvsd_read(MVSD_FIFO);
+ val[1] = mvsd_read(MVSD_FIFO);
+ memcpy(p, ((void *)&val) + 4 - s, s);
+ s = 0;
+ intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
+ }
+ if (s == 0) {
+ host->intr_en &=
+ ~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W);
+ mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
+ } else if (host->intr_en & MVSD_NOR_RX_FIFO_8W) {
+ host->intr_en &= ~MVSD_NOR_RX_FIFO_8W;
+ host->intr_en |= MVSD_NOR_RX_READY;
+ mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
+ }
+ }
+ dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
+ s, intr_status, mvsd_read(MVSD_HW_STATE));
+ host->pio_ptr = p;
+ host->pio_size = s;
+ irq_handled = 1;
+ } else if (host->pio_size &&
+ (intr_status & host->intr_en &
+ (MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W))) {
+ u16 *p = host->pio_ptr;
+ int s = host->pio_size;
+ /*
+ * The TX_FIFO_8W bit is unreliable. When set, bursting
+ * 16 halfwords all at once in the FIFO drops data. Actually
+ * TX_AVAIL does go off after only one word is pushed even if
+ * TX_FIFO_8W remains set.
+ */
+ while (s >= 4 && (intr_status & MVSD_NOR_TX_AVAIL)) {
+ mvsd_write(MVSD_FIFO, get_unaligned(p++));
+ mvsd_write(MVSD_FIFO, get_unaligned(p++));
+ s -= 4;
+ intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
+ }
+ if (s < 4) {
+ if (s && (intr_status & MVSD_NOR_TX_AVAIL)) {
+ u16 val[2] = {0, 0};
+ memcpy(((void *)&val) + 4 - s, p, s);
+ mvsd_write(MVSD_FIFO, val[0]);
+ mvsd_write(MVSD_FIFO, val[1]);
+ s = 0;
+ intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
+ }
+ if (s == 0) {
+ host->intr_en &=
+ ~(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W);
+ mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
+ }
+ }
+ dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
+ s, intr_status, mvsd_read(MVSD_HW_STATE));
+ host->pio_ptr = p;
+ host->pio_size = s;
+ irq_handled = 1;
+ }
+
+ mvsd_write(MVSD_NOR_INTR_STATUS, intr_status);
+
+ intr_done_mask = MVSD_NOR_CARD_INT | MVSD_NOR_RX_READY |
+ MVSD_NOR_RX_FIFO_8W | MVSD_NOR_TX_FIFO_8W;
+ if (intr_status & host->intr_en & ~intr_done_mask) {
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd = mrq->cmd;
+ u32 err_status = 0;
+
+ del_timer(&host->timer);
+ host->mrq = NULL;
+
+ host->intr_en &= MVSD_NOR_CARD_INT;
+ mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
+ mvsd_write(MVSD_ERR_INTR_EN, 0);
+
+ spin_unlock(&host->lock);
+
+ if (intr_status & MVSD_NOR_UNEXP_RSP) {
+ cmd->error = -EPROTO;
+ } else if (intr_status & MVSD_NOR_ERROR) {
+ err_status = mvsd_read(MVSD_ERR_INTR_STATUS);
+ dev_dbg(host->dev, "err 0x%04x\n", err_status);
+ }
+
+ err_status = mvsd_finish_cmd(host, cmd, err_status);
+ if (mrq->data)
+ err_status = mvsd_finish_data(host, mrq->data, err_status);
+ if (err_status) {
+ dev_err(host->dev, "unhandled error status %#04x\n",
+ err_status);
+ cmd->error = -ENOMSG;
+ }
+
+ mmc_request_done(host->mmc, mrq);
+ irq_handled = 1;
+ } else
+ spin_unlock(&host->lock);
+
+ if (intr_status & MVSD_NOR_CARD_INT) {
+ mmc_signal_sdio_irq(host->mmc);
+ irq_handled = 1;
+ }
+
+ if (irq_handled)
+ return IRQ_HANDLED;
+
+ dev_err(host->dev, "unhandled interrupt status=0x%04x en=0x%04x pio=%d\n",
+ intr_status, host->intr_en, host->pio_size);
+ return IRQ_NONE;
+}
+
+static void mvsd_timeout_timer(unsigned long data)
+{
+ struct mvsd_host *host = (struct mvsd_host *)data;
+ void __iomem *iobase = host->base;
+ struct mmc_request *mrq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ mrq = host->mrq;
+ if (mrq) {
+ dev_err(host->dev, "Timeout waiting for hardware interrupt.\n");
+ dev_err(host->dev, "hw_state=0x%04x, intr_status=0x%04x intr_en=0x%04x\n",
+ mvsd_read(MVSD_HW_STATE),
+ mvsd_read(MVSD_NOR_INTR_STATUS),
+ mvsd_read(MVSD_NOR_INTR_EN));
+
+ host->mrq = NULL;
+
+ mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW);
+
+ host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN;
+ mvsd_write(MVSD_XFER_MODE, host->xfer_mode);
+
+ host->intr_en &= MVSD_NOR_CARD_INT;
+ mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
+ mvsd_write(MVSD_ERR_INTR_EN, 0);
+ mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
+
+ mrq->cmd->error = -ETIMEDOUT;
+ mvsd_finish_cmd(host, mrq->cmd, 0);
+ if (mrq->data) {
+ mrq->data->error = -ETIMEDOUT;
+ mvsd_finish_data(host, mrq->data, 0);
+ }
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (mrq)
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mvsd_host *host = mmc_priv(mmc);
+ void __iomem *iobase = host->base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (enable) {
+ host->xfer_mode |= MVSD_XFER_MODE_INT_CHK_EN;
+ host->intr_en |= MVSD_NOR_CARD_INT;
+ } else {
+ host->xfer_mode &= ~MVSD_XFER_MODE_INT_CHK_EN;
+ host->intr_en &= ~MVSD_NOR_CARD_INT;
+ }
+ mvsd_write(MVSD_XFER_MODE, host->xfer_mode);
+ mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void mvsd_power_up(struct mvsd_host *host)
+{
+ void __iomem *iobase = host->base;
+ dev_dbg(host->dev, "power up\n");
+ mvsd_write(MVSD_NOR_INTR_EN, 0);
+ mvsd_write(MVSD_ERR_INTR_EN, 0);
+ mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW);
+ mvsd_write(MVSD_XFER_MODE, 0);
+ mvsd_write(MVSD_NOR_STATUS_EN, 0xffff);
+ mvsd_write(MVSD_ERR_STATUS_EN, 0xffff);
+ mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff);
+ mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
+}
+
+static void mvsd_power_down(struct mvsd_host *host)
+{
+ void __iomem *iobase = host->base;
+ dev_dbg(host->dev, "power down\n");
+ mvsd_write(MVSD_NOR_INTR_EN, 0);
+ mvsd_write(MVSD_ERR_INTR_EN, 0);
+ mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW);
+ mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK);
+ mvsd_write(MVSD_NOR_STATUS_EN, 0);
+ mvsd_write(MVSD_ERR_STATUS_EN, 0);
+ mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff);
+ mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
+}
+
+static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mvsd_host *host = mmc_priv(mmc);
+ void __iomem *iobase = host->base;
+ u32 ctrl_reg = 0;
+
+ if (ios->power_mode == MMC_POWER_UP)
+ mvsd_power_up(host);
+
+ if (ios->clock == 0) {
+ mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK);
+ mvsd_write(MVSD_CLK_DIV, MVSD_BASE_DIV_MAX);
+ host->clock = 0;
+ dev_dbg(host->dev, "clock off\n");
+ } else if (ios->clock != host->clock) {
+ u32 m = DIV_ROUND_UP(host->base_clock, ios->clock) - 1;
+ if (m > MVSD_BASE_DIV_MAX)
+ m = MVSD_BASE_DIV_MAX;
+ mvsd_write(MVSD_CLK_DIV, m);
+ host->clock = ios->clock;
+ host->ns_per_clk = 1000000000 / (host->base_clock / (m+1));
+ dev_dbg(host->dev, "clock=%d (%d), div=0x%04x\n",
+ ios->clock, host->base_clock / (m+1), m);
+ }
+
+ /* default transfer mode */
+ ctrl_reg |= MVSD_HOST_CTRL_BIG_ENDIAN;
+ ctrl_reg &= ~MVSD_HOST_CTRL_LSB_FIRST;
+
+ /* default to maximum timeout */
+ ctrl_reg |= MVSD_HOST_CTRL_TMOUT_MASK;
+ ctrl_reg |= MVSD_HOST_CTRL_TMOUT_EN;
+
+ if (ios->bus_mode == MMC_BUSMODE_PUSHPULL)
+ ctrl_reg |= MVSD_HOST_CTRL_PUSH_PULL_EN;
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS;
+
+ /*
+ * The HI_SPEED_EN bit is causing trouble with many (but not all)
+ * high speed SD, SDHC and SDIO cards. Not enabling that bit
+ * makes all cards work. So let's just ignore that bit for now
+ * and revisit this issue if problems for not enabling this bit
+ * are ever reported.
+ */
+#if 0
+ if (ios->timing == MMC_TIMING_MMC_HS ||
+ ios->timing == MMC_TIMING_SD_HS)
+ ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN;
+#endif
+
+ host->ctrl = ctrl_reg;
+ mvsd_write(MVSD_HOST_CTRL, ctrl_reg);
+ dev_dbg(host->dev, "ctrl 0x%04x: %s %s %s\n", ctrl_reg,
+ (ctrl_reg & MVSD_HOST_CTRL_PUSH_PULL_EN) ?
+ "push-pull" : "open-drain",
+ (ctrl_reg & MVSD_HOST_CTRL_DATA_WIDTH_4_BITS) ?
+ "4bit-width" : "1bit-width",
+ (ctrl_reg & MVSD_HOST_CTRL_HI_SPEED_EN) ?
+ "high-speed" : "");
+
+ if (ios->power_mode == MMC_POWER_OFF)
+ mvsd_power_down(host);
+}
+
+static const struct mmc_host_ops mvsd_ops = {
+ .request = mvsd_request,
+ .get_ro = mmc_gpio_get_ro,
+ .set_ios = mvsd_set_ios,
+ .enable_sdio_irq = mvsd_enable_sdio_irq,
+};
+
+static void
+mv_conf_mbus_windows(struct mvsd_host *host,
+ const struct mbus_dram_target_info *dram)
+{
+ void __iomem *iobase = host->base;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ writel(0, iobase + MVSD_WINDOW_CTRL(i));
+ writel(0, iobase + MVSD_WINDOW_BASE(i));
+ }
+
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+ writel(((cs->size - 1) & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ (dram->mbus_dram_target_id << 4) | 1,
+ iobase + MVSD_WINDOW_CTRL(i));
+ writel(cs->base, iobase + MVSD_WINDOW_BASE(i));
+ }
+}
+
+static int mvsd_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mmc_host *mmc = NULL;
+ struct mvsd_host *host = NULL;
+ const struct mbus_dram_target_info *dram;
+ struct resource *r;
+ int ret, irq;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!r || irq < 0)
+ return -ENXIO;
+
+ mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->dev = &pdev->dev;
+
+ /*
+ * Some non-DT platforms do not pass a clock, and the clock
+ * frequency is passed through platform_data. On DT platforms,
+ * a clock must always be passed, even if there is no gatable
+ * clock associated to the SDIO interface (it can simply be a
+ * fixed rate clock).
+ */
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(host->clk))
+ clk_prepare_enable(host->clk);
+
+ mmc->ops = &mvsd_ops;
+
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+ mmc->f_min = DIV_ROUND_UP(host->base_clock, MVSD_BASE_DIV_MAX);
+ mmc->f_max = MVSD_CLOCKRATE_MAX;
+
+ mmc->max_blk_size = 2048;
+ mmc->max_blk_count = 65535;
+
+ mmc->max_segs = 1;
+ mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+
+ if (np) {
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "DT platforms must have a clock associated\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ host->base_clock = clk_get_rate(host->clk) / 2;
+ ret = mmc_of_parse(mmc);
+ if (ret < 0)
+ goto out;
+ } else {
+ const struct mvsdio_platform_data *mvsd_data;
+
+ mvsd_data = pdev->dev.platform_data;
+ if (!mvsd_data) {
+ ret = -ENXIO;
+ goto out;
+ }
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ |
+ MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
+ host->base_clock = mvsd_data->clock / 2;
+ /* GPIO 0 regarded as invalid for backward compatibility */
+ if (mvsd_data->gpio_card_detect &&
+ gpio_is_valid(mvsd_data->gpio_card_detect)) {
+ ret = mmc_gpio_request_cd(mmc,
+ mvsd_data->gpio_card_detect,
+ 0);
+ if (ret)
+ goto out;
+ } else {
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+ }
+
+ if (mvsd_data->gpio_write_protect &&
+ gpio_is_valid(mvsd_data->gpio_write_protect))
+ mmc_gpio_request_ro(mmc, mvsd_data->gpio_write_protect);
+ }
+
+ if (maxfreq)
+ mmc->f_max = maxfreq;
+
+ spin_lock_init(&host->lock);
+
+ host->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
+ goto out;
+ }
+
+ /* (Re-)program MBUS remapping windows if we are asked to. */
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_conf_mbus_windows(host, dram);
+
+ mvsd_power_down(host);
+
+ ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot assign irq %d\n", irq);
+ goto out;
+ }
+
+ setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host);
+ platform_set_drvdata(pdev, mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto out;
+
+ if (!(mmc->caps & MMC_CAP_NEEDS_POLL))
+ dev_dbg(&pdev->dev, "using GPIO for card detection\n");
+ else
+ dev_dbg(&pdev->dev, "lacking card detect (fall back to polling)\n");
+
+ return 0;
+
+out:
+ if (mmc) {
+ if (!IS_ERR(host->clk))
+ clk_disable_unprepare(host->clk);
+ mmc_free_host(mmc);
+ }
+
+ return ret;
+}
+
+static int mvsd_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+
+ struct mvsd_host *host = mmc_priv(mmc);
+
+ mmc_remove_host(mmc);
+ del_timer_sync(&host->timer);
+ mvsd_power_down(host);
+
+ if (!IS_ERR(host->clk))
+ clk_disable_unprepare(host->clk);
+ mmc_free_host(mmc);
+
+ return 0;
+}
+
+static const struct of_device_id mvsdio_dt_ids[] = {
+ { .compatible = "marvell,orion-sdio" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mvsdio_dt_ids);
+
+static struct platform_driver mvsd_driver = {
+ .probe = mvsd_probe,
+ .remove = mvsd_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = mvsdio_dt_ids,
+ },
+};
+
+module_platform_driver(mvsd_driver);
+
+/* maximum card clock frequency (default 50MHz) */
+module_param(maxfreq, int, 0);
+
+/* force PIO transfers all the time */
+module_param(nodma, int, 0);
+
+MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre");
+MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mvsdio");
diff --git a/kernel/drivers/mmc/host/mvsdio.h b/kernel/drivers/mmc/host/mvsdio.h
new file mode 100644
index 000000000..7d9727b9f
--- /dev/null
+++ b/kernel/drivers/mmc/host/mvsdio.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2008 Marvell Semiconductors, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MVSDIO_H
+#define __MVSDIO_H
+
+/*
+ * Clock rates
+ */
+
+#define MVSD_CLOCKRATE_MAX 50000000
+#define MVSD_BASE_DIV_MAX 0x7ff
+
+
+/*
+ * Register offsets
+ */
+
+#define MVSD_SYS_ADDR_LOW 0x000
+#define MVSD_SYS_ADDR_HI 0x004
+#define MVSD_BLK_SIZE 0x008
+#define MVSD_BLK_COUNT 0x00c
+#define MVSD_ARG_LOW 0x010
+#define MVSD_ARG_HI 0x014
+#define MVSD_XFER_MODE 0x018
+#define MVSD_CMD 0x01c
+#define MVSD_RSP(i) (0x020 + ((i)<<2))
+#define MVSD_RSP0 0x020
+#define MVSD_RSP1 0x024
+#define MVSD_RSP2 0x028
+#define MVSD_RSP3 0x02c
+#define MVSD_RSP4 0x030
+#define MVSD_RSP5 0x034
+#define MVSD_RSP6 0x038
+#define MVSD_RSP7 0x03c
+#define MVSD_FIFO 0x040
+#define MVSD_RSP_CRC7 0x044
+#define MVSD_HW_STATE 0x048
+#define MVSD_HOST_CTRL 0x050
+#define MVSD_BLK_GAP_CTRL 0x054
+#define MVSD_CLK_CTRL 0x058
+#define MVSD_SW_RESET 0x05c
+#define MVSD_NOR_INTR_STATUS 0x060
+#define MVSD_ERR_INTR_STATUS 0x064
+#define MVSD_NOR_STATUS_EN 0x068
+#define MVSD_ERR_STATUS_EN 0x06c
+#define MVSD_NOR_INTR_EN 0x070
+#define MVSD_ERR_INTR_EN 0x074
+#define MVSD_AUTOCMD12_ERR_STATUS 0x078
+#define MVSD_CURR_BYTE_LEFT 0x07c
+#define MVSD_CURR_BLK_LEFT 0x080
+#define MVSD_AUTOCMD12_ARG_LOW 0x084
+#define MVSD_AUTOCMD12_ARG_HI 0x088
+#define MVSD_AUTOCMD12_CMD 0x08c
+#define MVSD_AUTO_RSP(i) (0x090 + ((i)<<2))
+#define MVSD_AUTO_RSP0 0x090
+#define MVSD_AUTO_RSP1 0x094
+#define MVSD_AUTO_RSP2 0x098
+#define MVSD_CLK_DIV 0x128
+
+#define MVSD_WINDOW_CTRL(i) (0x108 + ((i) << 3))
+#define MVSD_WINDOW_BASE(i) (0x10c + ((i) << 3))
+
+
+/*
+ * MVSD_CMD
+ */
+
+#define MVSD_CMD_RSP_NONE (0 << 0)
+#define MVSD_CMD_RSP_136 (1 << 0)
+#define MVSD_CMD_RSP_48 (2 << 0)
+#define MVSD_CMD_RSP_48BUSY (3 << 0)
+
+#define MVSD_CMD_CHECK_DATACRC16 (1 << 2)
+#define MVSD_CMD_CHECK_CMDCRC (1 << 3)
+#define MVSD_CMD_INDX_CHECK (1 << 4)
+#define MVSD_CMD_DATA_PRESENT (1 << 5)
+#define MVSD_UNEXPECTED_RESP (1 << 7)
+#define MVSD_CMD_INDEX(x) ((x) << 8)
+
+
+/*
+ * MVSD_AUTOCMD12_CMD
+ */
+
+#define MVSD_AUTOCMD12_BUSY (1 << 0)
+#define MVSD_AUTOCMD12_INDX_CHECK (1 << 1)
+#define MVSD_AUTOCMD12_INDEX(x) ((x) << 8)
+
+/*
+ * MVSD_XFER_MODE
+ */
+
+#define MVSD_XFER_MODE_WR_DATA_START (1 << 0)
+#define MVSD_XFER_MODE_HW_WR_DATA_EN (1 << 1)
+#define MVSD_XFER_MODE_AUTO_CMD12 (1 << 2)
+#define MVSD_XFER_MODE_INT_CHK_EN (1 << 3)
+#define MVSD_XFER_MODE_TO_HOST (1 << 4)
+#define MVSD_XFER_MODE_STOP_CLK (1 << 5)
+#define MVSD_XFER_MODE_PIO (1 << 6)
+
+
+/*
+ * MVSD_HOST_CTRL
+ */
+
+#define MVSD_HOST_CTRL_PUSH_PULL_EN (1 << 0)
+
+#define MVSD_HOST_CTRL_CARD_TYPE_MEM_ONLY (0 << 1)
+#define MVSD_HOST_CTRL_CARD_TYPE_IO_ONLY (1 << 1)
+#define MVSD_HOST_CTRL_CARD_TYPE_IO_MEM_COMBO (2 << 1)
+#define MVSD_HOST_CTRL_CARD_TYPE_IO_MMC (3 << 1)
+#define MVSD_HOST_CTRL_CARD_TYPE_MASK (3 << 1)
+
+#define MVSD_HOST_CTRL_BIG_ENDIAN (1 << 3)
+#define MVSD_HOST_CTRL_LSB_FIRST (1 << 4)
+#define MVSD_HOST_CTRL_DATA_WIDTH_4_BITS (1 << 9)
+#define MVSD_HOST_CTRL_HI_SPEED_EN (1 << 10)
+
+#define MVSD_HOST_CTRL_TMOUT_MAX 0xf
+#define MVSD_HOST_CTRL_TMOUT_MASK (0xf << 11)
+#define MVSD_HOST_CTRL_TMOUT(x) ((x) << 11)
+#define MVSD_HOST_CTRL_TMOUT_EN (1 << 15)
+
+
+/*
+ * MVSD_SW_RESET
+ */
+
+#define MVSD_SW_RESET_NOW (1 << 8)
+
+
+/*
+ * Normal interrupt status bits
+ */
+
+#define MVSD_NOR_CMD_DONE (1 << 0)
+#define MVSD_NOR_XFER_DONE (1 << 1)
+#define MVSD_NOR_BLK_GAP_EVT (1 << 2)
+#define MVSD_NOR_DMA_DONE (1 << 3)
+#define MVSD_NOR_TX_AVAIL (1 << 4)
+#define MVSD_NOR_RX_READY (1 << 5)
+#define MVSD_NOR_CARD_INT (1 << 8)
+#define MVSD_NOR_READ_WAIT_ON (1 << 9)
+#define MVSD_NOR_RX_FIFO_8W (1 << 10)
+#define MVSD_NOR_TX_FIFO_8W (1 << 11)
+#define MVSD_NOR_SUSPEND_ON (1 << 12)
+#define MVSD_NOR_AUTOCMD12_DONE (1 << 13)
+#define MVSD_NOR_UNEXP_RSP (1 << 14)
+#define MVSD_NOR_ERROR (1 << 15)
+
+
+/*
+ * Error status bits
+ */
+
+#define MVSD_ERR_CMD_TIMEOUT (1 << 0)
+#define MVSD_ERR_CMD_CRC (1 << 1)
+#define MVSD_ERR_CMD_ENDBIT (1 << 2)
+#define MVSD_ERR_CMD_INDEX (1 << 3)
+#define MVSD_ERR_DATA_TIMEOUT (1 << 4)
+#define MVSD_ERR_DATA_CRC (1 << 5)
+#define MVSD_ERR_DATA_ENDBIT (1 << 6)
+#define MVSD_ERR_AUTOCMD12 (1 << 8)
+#define MVSD_ERR_CMD_STARTBIT (1 << 9)
+#define MVSD_ERR_XFER_SIZE (1 << 10)
+#define MVSD_ERR_RESP_T_BIT (1 << 11)
+#define MVSD_ERR_CRC_ENDBIT (1 << 12)
+#define MVSD_ERR_CRC_STARTBIT (1 << 13)
+#define MVSD_ERR_CRC_STATUS (1 << 14)
+
+
+/*
+ * CMD12 error status bits
+ */
+
+#define MVSD_AUTOCMD12_ERR_NOTEXE (1 << 0)
+#define MVSD_AUTOCMD12_ERR_TIMEOUT (1 << 1)
+#define MVSD_AUTOCMD12_ERR_CRC (1 << 2)
+#define MVSD_AUTOCMD12_ERR_ENDBIT (1 << 3)
+#define MVSD_AUTOCMD12_ERR_INDEX (1 << 4)
+#define MVSD_AUTOCMD12_ERR_RESP_T_BIT (1 << 5)
+#define MVSD_AUTOCMD12_ERR_RESP_STARTBIT (1 << 6)
+
+#endif
diff --git a/kernel/drivers/mmc/host/mxcmmc.c b/kernel/drivers/mmc/host/mxcmmc.c
new file mode 100644
index 000000000..317d709f7
--- /dev/null
+++ b/kernel/drivers/mmc/host/mxcmmc.c
@@ -0,0 +1,1246 @@
+/*
+ * linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
+ *
+ * This is a driver for the SDHC controller found in Freescale MX2/MX3
+ * SoCs. It is basically the same hardware as found on MX1 (imxmmc.c).
+ * Unlike the hardware found on MX1, this hardware just works and does
+ * not need all the quirks found in imxmmc.c, hence the separate driver.
+ *
+ * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
+ *
+ * derived from pxamci.c by Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dmaengine.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/of_gpio.h>
+#include <linux/mmc/slot-gpio.h>
+
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <linux/platform_data/mmc-mxcmmc.h>
+
+#include <linux/platform_data/dma-imx.h>
+
+#define DRIVER_NAME "mxc-mmc"
+#define MXCMCI_TIMEOUT_MS 10000
+
+#define MMC_REG_STR_STP_CLK 0x00
+#define MMC_REG_STATUS 0x04
+#define MMC_REG_CLK_RATE 0x08
+#define MMC_REG_CMD_DAT_CONT 0x0C
+#define MMC_REG_RES_TO 0x10
+#define MMC_REG_READ_TO 0x14
+#define MMC_REG_BLK_LEN 0x18
+#define MMC_REG_NOB 0x1C
+#define MMC_REG_REV_NO 0x20
+#define MMC_REG_INT_CNTR 0x24
+#define MMC_REG_CMD 0x28
+#define MMC_REG_ARG 0x2C
+#define MMC_REG_RES_FIFO 0x34
+#define MMC_REG_BUFFER_ACCESS 0x38
+
+#define STR_STP_CLK_RESET (1 << 3)
+#define STR_STP_CLK_START_CLK (1 << 1)
+#define STR_STP_CLK_STOP_CLK (1 << 0)
+
+#define STATUS_CARD_INSERTION (1 << 31)
+#define STATUS_CARD_REMOVAL (1 << 30)
+#define STATUS_YBUF_EMPTY (1 << 29)
+#define STATUS_XBUF_EMPTY (1 << 28)
+#define STATUS_YBUF_FULL (1 << 27)
+#define STATUS_XBUF_FULL (1 << 26)
+#define STATUS_BUF_UND_RUN (1 << 25)
+#define STATUS_BUF_OVFL (1 << 24)
+#define STATUS_SDIO_INT_ACTIVE (1 << 14)
+#define STATUS_END_CMD_RESP (1 << 13)
+#define STATUS_WRITE_OP_DONE (1 << 12)
+#define STATUS_DATA_TRANS_DONE (1 << 11)
+#define STATUS_READ_OP_DONE (1 << 11)
+#define STATUS_WR_CRC_ERROR_CODE_MASK (3 << 10)
+#define STATUS_CARD_BUS_CLK_RUN (1 << 8)
+#define STATUS_BUF_READ_RDY (1 << 7)
+#define STATUS_BUF_WRITE_RDY (1 << 6)
+#define STATUS_RESP_CRC_ERR (1 << 5)
+#define STATUS_CRC_READ_ERR (1 << 3)
+#define STATUS_CRC_WRITE_ERR (1 << 2)
+#define STATUS_TIME_OUT_RESP (1 << 1)
+#define STATUS_TIME_OUT_READ (1 << 0)
+#define STATUS_ERR_MASK 0x2f
+
+#define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1 << 12)
+#define CMD_DAT_CONT_STOP_READWAIT (1 << 11)
+#define CMD_DAT_CONT_START_READWAIT (1 << 10)
+#define CMD_DAT_CONT_BUS_WIDTH_4 (2 << 8)
+#define CMD_DAT_CONT_INIT (1 << 7)
+#define CMD_DAT_CONT_WRITE (1 << 4)
+#define CMD_DAT_CONT_DATA_ENABLE (1 << 3)
+#define CMD_DAT_CONT_RESPONSE_48BIT_CRC (1 << 0)
+#define CMD_DAT_CONT_RESPONSE_136BIT (2 << 0)
+#define CMD_DAT_CONT_RESPONSE_48BIT (3 << 0)
+
+#define INT_SDIO_INT_WKP_EN (1 << 18)
+#define INT_CARD_INSERTION_WKP_EN (1 << 17)
+#define INT_CARD_REMOVAL_WKP_EN (1 << 16)
+#define INT_CARD_INSERTION_EN (1 << 15)
+#define INT_CARD_REMOVAL_EN (1 << 14)
+#define INT_SDIO_IRQ_EN (1 << 13)
+#define INT_DAT0_EN (1 << 12)
+#define INT_BUF_READ_EN (1 << 4)
+#define INT_BUF_WRITE_EN (1 << 3)
+#define INT_END_CMD_RES_EN (1 << 2)
+#define INT_WRITE_OP_DONE_EN (1 << 1)
+#define INT_READ_OP_EN (1 << 0)
+
+enum mxcmci_type {
+ IMX21_MMC,
+ IMX31_MMC,
+ MPC512X_MMC,
+};
+
+struct mxcmci_host {
+ struct mmc_host *mmc;
+ void __iomem *base;
+ dma_addr_t phys_base;
+ int detect_irq;
+ struct dma_chan *dma;
+ struct dma_async_tx_descriptor *desc;
+ int do_dma;
+ int default_irq_mask;
+ int use_sdio;
+ unsigned int power_mode;
+ struct imxmmc_platform_data *pdata;
+
+ struct mmc_request *req;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+
+ unsigned int datasize;
+ unsigned int dma_dir;
+
+ u16 rev_no;
+ unsigned int cmdat;
+
+ struct clk *clk_ipg;
+ struct clk *clk_per;
+
+ int clock;
+
+ struct work_struct datawork;
+ spinlock_t lock;
+
+ int burstlen;
+ int dmareq;
+ struct dma_slave_config dma_slave_config;
+ struct imx_dma_data dma_data;
+
+ struct timer_list watchdog;
+ enum mxcmci_type devtype;
+};
+
+static const struct platform_device_id mxcmci_devtype[] = {
+ {
+ .name = "imx21-mmc",
+ .driver_data = IMX21_MMC,
+ }, {
+ .name = "imx31-mmc",
+ .driver_data = IMX31_MMC,
+ }, {
+ .name = "mpc512x-sdhc",
+ .driver_data = MPC512X_MMC,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxcmci_devtype);
+
+static const struct of_device_id mxcmci_of_match[] = {
+ {
+ .compatible = "fsl,imx21-mmc",
+ .data = &mxcmci_devtype[IMX21_MMC],
+ }, {
+ .compatible = "fsl,imx31-mmc",
+ .data = &mxcmci_devtype[IMX31_MMC],
+ }, {
+ .compatible = "fsl,mpc5121-sdhc",
+ .data = &mxcmci_devtype[MPC512X_MMC],
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, mxcmci_of_match);
+
+static inline int is_imx31_mmc(struct mxcmci_host *host)
+{
+ return host->devtype == IMX31_MMC;
+}
+
+static inline int is_mpc512x_mmc(struct mxcmci_host *host)
+{
+ return host->devtype == MPC512X_MMC;
+}
+
+static inline u32 mxcmci_readl(struct mxcmci_host *host, int reg)
+{
+ if (IS_ENABLED(CONFIG_PPC_MPC512x))
+ return ioread32be(host->base + reg);
+ else
+ return readl(host->base + reg);
+}
+
+static inline void mxcmci_writel(struct mxcmci_host *host, u32 val, int reg)
+{
+ if (IS_ENABLED(CONFIG_PPC_MPC512x))
+ iowrite32be(val, host->base + reg);
+ else
+ writel(val, host->base + reg);
+}
+
+static inline u16 mxcmci_readw(struct mxcmci_host *host, int reg)
+{
+ if (IS_ENABLED(CONFIG_PPC_MPC512x))
+ return ioread32be(host->base + reg);
+ else
+ return readw(host->base + reg);
+}
+
+static inline void mxcmci_writew(struct mxcmci_host *host, u16 val, int reg)
+{
+ if (IS_ENABLED(CONFIG_PPC_MPC512x))
+ iowrite32be(val, host->base + reg);
+ else
+ writew(val, host->base + reg);
+}
+
+static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
+
+static void mxcmci_set_power(struct mxcmci_host *host, unsigned int vdd)
+{
+ if (!IS_ERR(host->mmc->supply.vmmc)) {
+ if (host->power_mode == MMC_POWER_UP)
+ mmc_regulator_set_ocr(host->mmc,
+ host->mmc->supply.vmmc, vdd);
+ else if (host->power_mode == MMC_POWER_OFF)
+ mmc_regulator_set_ocr(host->mmc,
+ host->mmc->supply.vmmc, 0);
+ }
+
+ if (host->pdata && host->pdata->setpower)
+ host->pdata->setpower(mmc_dev(host->mmc), vdd);
+}
+
+static inline int mxcmci_use_dma(struct mxcmci_host *host)
+{
+ return host->do_dma;
+}
+
+static void mxcmci_softreset(struct mxcmci_host *host)
+{
+ int i;
+
+ dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
+
+ /* reset sequence */
+ mxcmci_writew(host, STR_STP_CLK_RESET, MMC_REG_STR_STP_CLK);
+ mxcmci_writew(host, STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
+ MMC_REG_STR_STP_CLK);
+
+ for (i = 0; i < 8; i++)
+ mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
+
+ mxcmci_writew(host, 0xff, MMC_REG_RES_TO);
+}
+
+#if IS_ENABLED(CONFIG_PPC_MPC512x)
+static inline void buffer_swap32(u32 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < ((len + 3) / 4); i++) {
+ *buf = swab32(*buf);
+ buf++;
+ }
+}
+
+static void mxcmci_swap_buffers(struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(data->sg, sg, data->sg_len, i)
+ buffer_swap32(sg_virt(sg), sg->length);
+}
+#else
+static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
+#endif
+
+static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
+{
+ unsigned int nob = data->blocks;
+ unsigned int blksz = data->blksz;
+ unsigned int datasize = nob * blksz;
+ struct scatterlist *sg;
+ enum dma_transfer_direction slave_dirn;
+ int i, nents;
+
+ if (data->flags & MMC_DATA_STREAM)
+ nob = 0xffff;
+
+ host->data = data;
+ data->bytes_xfered = 0;
+
+ mxcmci_writew(host, nob, MMC_REG_NOB);
+ mxcmci_writew(host, blksz, MMC_REG_BLK_LEN);
+ host->datasize = datasize;
+
+ if (!mxcmci_use_dma(host))
+ return 0;
+
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->offset & 3 || sg->length & 3 || sg->length < 512) {
+ host->do_dma = 0;
+ return 0;
+ }
+ }
+
+ if (data->flags & MMC_DATA_READ) {
+ host->dma_dir = DMA_FROM_DEVICE;
+ slave_dirn = DMA_DEV_TO_MEM;
+ } else {
+ host->dma_dir = DMA_TO_DEVICE;
+ slave_dirn = DMA_MEM_TO_DEV;
+
+ mxcmci_swap_buffers(data);
+ }
+
+ nents = dma_map_sg(host->dma->device->dev, data->sg,
+ data->sg_len, host->dma_dir);
+ if (nents != data->sg_len)
+ return -EINVAL;
+
+ host->desc = dmaengine_prep_slave_sg(host->dma,
+ data->sg, data->sg_len, slave_dirn,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!host->desc) {
+ dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
+ host->dma_dir);
+ host->do_dma = 0;
+ return 0; /* Fall back to PIO */
+ }
+ wmb();
+
+ dmaengine_submit(host->desc);
+ dma_async_issue_pending(host->dma);
+
+ mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS));
+
+ return 0;
+}
+
+static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat);
+static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat);
+
+static void mxcmci_dma_callback(void *data)
+{
+ struct mxcmci_host *host = data;
+ u32 stat;
+
+ del_timer(&host->watchdog);
+
+ stat = mxcmci_readl(host, MMC_REG_STATUS);
+
+ dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
+
+ mxcmci_data_done(host, stat);
+}
+
+static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
+ unsigned int cmdat)
+{
+ u32 int_cntr = host->default_irq_mask;
+ unsigned long flags;
+
+ WARN_ON(host->cmd != NULL);
+ host->cmd = cmd;
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1: /* short CRC, OPCODE */
+ case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
+ cmdat |= CMD_DAT_CONT_RESPONSE_48BIT_CRC;
+ break;
+ case MMC_RSP_R2: /* long 136 bit + CRC */
+ cmdat |= CMD_DAT_CONT_RESPONSE_136BIT;
+ break;
+ case MMC_RSP_R3: /* short */
+ cmdat |= CMD_DAT_CONT_RESPONSE_48BIT;
+ break;
+ case MMC_RSP_NONE:
+ break;
+ default:
+ dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n",
+ mmc_resp_type(cmd));
+ cmd->error = -EINVAL;
+ return -EINVAL;
+ }
+
+ int_cntr = INT_END_CMD_RES_EN;
+
+ if (mxcmci_use_dma(host)) {
+ if (host->dma_dir == DMA_FROM_DEVICE) {
+ host->desc->callback = mxcmci_dma_callback;
+ host->desc->callback_param = host;
+ } else {
+ int_cntr |= INT_WRITE_OP_DONE_EN;
+ }
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->use_sdio)
+ int_cntr |= INT_SDIO_IRQ_EN;
+ mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mxcmci_writew(host, cmd->opcode, MMC_REG_CMD);
+ mxcmci_writel(host, cmd->arg, MMC_REG_ARG);
+ mxcmci_writew(host, cmdat, MMC_REG_CMD_DAT_CONT);
+
+ return 0;
+}
+
+static void mxcmci_finish_request(struct mxcmci_host *host,
+ struct mmc_request *req)
+{
+ u32 int_cntr = host->default_irq_mask;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->use_sdio)
+ int_cntr |= INT_SDIO_IRQ_EN;
+ mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ host->req = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+
+ mmc_request_done(host->mmc, req);
+}
+
+static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
+{
+ struct mmc_data *data = host->data;
+ int data_error;
+
+ if (mxcmci_use_dma(host)) {
+ dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
+ host->dma_dir);
+ mxcmci_swap_buffers(data);
+ }
+
+ if (stat & STATUS_ERR_MASK) {
+ dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
+ stat);
+ if (stat & STATUS_CRC_READ_ERR) {
+ dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
+ data->error = -EILSEQ;
+ } else if (stat & STATUS_CRC_WRITE_ERR) {
+ u32 err_code = (stat >> 9) & 0x3;
+ if (err_code == 2) { /* No CRC response */
+ dev_err(mmc_dev(host->mmc),
+ "%s: No CRC -ETIMEDOUT\n", __func__);
+ data->error = -ETIMEDOUT;
+ } else {
+ dev_err(mmc_dev(host->mmc),
+ "%s: -EILSEQ\n", __func__);
+ data->error = -EILSEQ;
+ }
+ } else if (stat & STATUS_TIME_OUT_READ) {
+ dev_err(mmc_dev(host->mmc),
+ "%s: read -ETIMEDOUT\n", __func__);
+ data->error = -ETIMEDOUT;
+ } else {
+ dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
+ data->error = -EIO;
+ }
+ } else {
+ data->bytes_xfered = host->datasize;
+ }
+
+ data_error = data->error;
+
+ host->data = NULL;
+
+ return data_error;
+}
+
+static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat)
+{
+ struct mmc_command *cmd = host->cmd;
+ int i;
+ u32 a, b, c;
+
+ if (!cmd)
+ return;
+
+ if (stat & STATUS_TIME_OUT_RESP) {
+ dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
+ cmd->error = -ETIMEDOUT;
+ } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
+ dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
+ cmd->error = -EILSEQ;
+ }
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ for (i = 0; i < 4; i++) {
+ a = mxcmci_readw(host, MMC_REG_RES_FIFO);
+ b = mxcmci_readw(host, MMC_REG_RES_FIFO);
+ cmd->resp[i] = a << 16 | b;
+ }
+ } else {
+ a = mxcmci_readw(host, MMC_REG_RES_FIFO);
+ b = mxcmci_readw(host, MMC_REG_RES_FIFO);
+ c = mxcmci_readw(host, MMC_REG_RES_FIFO);
+ cmd->resp[0] = a << 24 | b << 8 | c >> 8;
+ }
+ }
+}
+
+static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
+{
+ u32 stat;
+ unsigned long timeout = jiffies + HZ;
+
+ do {
+ stat = mxcmci_readl(host, MMC_REG_STATUS);
+ if (stat & STATUS_ERR_MASK)
+ return stat;
+ if (time_after(jiffies, timeout)) {
+ mxcmci_softreset(host);
+ mxcmci_set_clk_rate(host, host->clock);
+ return STATUS_TIME_OUT_READ;
+ }
+ if (stat & mask)
+ return 0;
+ cpu_relax();
+ } while (1);
+}
+
+static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
+{
+ unsigned int stat;
+ u32 *buf = _buf;
+
+ while (bytes > 3) {
+ stat = mxcmci_poll_status(host,
+ STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
+ if (stat)
+ return stat;
+ *buf++ = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
+ bytes -= 4;
+ }
+
+ if (bytes) {
+ u8 *b = (u8 *)buf;
+ u32 tmp;
+
+ stat = mxcmci_poll_status(host,
+ STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
+ if (stat)
+ return stat;
+ tmp = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
+ memcpy(b, &tmp, bytes);
+ }
+
+ return 0;
+}
+
+static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
+{
+ unsigned int stat;
+ u32 *buf = _buf;
+
+ while (bytes > 3) {
+ stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
+ if (stat)
+ return stat;
+ mxcmci_writel(host, cpu_to_le32(*buf++), MMC_REG_BUFFER_ACCESS);
+ bytes -= 4;
+ }
+
+ if (bytes) {
+ u8 *b = (u8 *)buf;
+ u32 tmp;
+
+ stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
+ if (stat)
+ return stat;
+
+ memcpy(&tmp, b, bytes);
+ mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS);
+ }
+
+ stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
+ if (stat)
+ return stat;
+
+ return 0;
+}
+
+static int mxcmci_transfer_data(struct mxcmci_host *host)
+{
+ struct mmc_data *data = host->req->data;
+ struct scatterlist *sg;
+ int stat, i;
+
+ host->data = data;
+ host->datasize = 0;
+
+ if (data->flags & MMC_DATA_READ) {
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ stat = mxcmci_pull(host, sg_virt(sg), sg->length);
+ if (stat)
+ return stat;
+ host->datasize += sg->length;
+ }
+ } else {
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ stat = mxcmci_push(host, sg_virt(sg), sg->length);
+ if (stat)
+ return stat;
+ host->datasize += sg->length;
+ }
+ stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
+ if (stat)
+ return stat;
+ }
+ return 0;
+}
+
+static void mxcmci_datawork(struct work_struct *work)
+{
+ struct mxcmci_host *host = container_of(work, struct mxcmci_host,
+ datawork);
+ int datastat = mxcmci_transfer_data(host);
+
+ mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
+ MMC_REG_STATUS);
+ mxcmci_finish_data(host, datastat);
+
+ if (host->req->stop) {
+ if (mxcmci_start_cmd(host, host->req->stop, 0)) {
+ mxcmci_finish_request(host, host->req);
+ return;
+ }
+ } else {
+ mxcmci_finish_request(host, host->req);
+ }
+}
+
+static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
+{
+ struct mmc_request *req;
+ int data_error;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (!host->data) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ if (!host->req) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ req = host->req;
+ if (!req->stop)
+ host->req = NULL; /* we will handle finish req below */
+
+ data_error = mxcmci_finish_data(host, stat);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mxcmci_read_response(host, stat);
+ host->cmd = NULL;
+
+ if (req->stop) {
+ if (mxcmci_start_cmd(host, req->stop, 0)) {
+ mxcmci_finish_request(host, req);
+ return;
+ }
+ } else {
+ mxcmci_finish_request(host, req);
+ }
+}
+
+static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
+{
+ mxcmci_read_response(host, stat);
+ host->cmd = NULL;
+
+ if (!host->data && host->req) {
+ mxcmci_finish_request(host, host->req);
+ return;
+ }
+
+ /* For the DMA case the DMA engine handles the data transfer
+ * automatically. For non DMA we have to do it ourselves.
+ * Don't do it in interrupt context though.
+ */
+ if (!mxcmci_use_dma(host) && host->data)
+ schedule_work(&host->datawork);
+
+}
+
+static irqreturn_t mxcmci_irq(int irq, void *devid)
+{
+ struct mxcmci_host *host = devid;
+ unsigned long flags;
+ bool sdio_irq;
+ u32 stat;
+
+ stat = mxcmci_readl(host, MMC_REG_STATUS);
+ mxcmci_writel(host,
+ stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
+ STATUS_WRITE_OP_DONE),
+ MMC_REG_STATUS);
+
+ dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
+
+ spin_lock_irqsave(&host->lock, flags);
+ sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (mxcmci_use_dma(host) && (stat & (STATUS_WRITE_OP_DONE)))
+ mxcmci_writel(host, STATUS_WRITE_OP_DONE, MMC_REG_STATUS);
+
+ if (sdio_irq) {
+ mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS);
+ mmc_signal_sdio_irq(host->mmc);
+ }
+
+ if (stat & STATUS_END_CMD_RESP)
+ mxcmci_cmd_done(host, stat);
+
+ if (mxcmci_use_dma(host) && (stat & STATUS_WRITE_OP_DONE)) {
+ del_timer(&host->watchdog);
+ mxcmci_data_done(host, stat);
+ }
+
+ if (host->default_irq_mask &&
+ (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
+ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+
+ return IRQ_HANDLED;
+}
+
+static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct mxcmci_host *host = mmc_priv(mmc);
+ unsigned int cmdat = host->cmdat;
+ int error;
+
+ WARN_ON(host->req != NULL);
+
+ host->req = req;
+ host->cmdat &= ~CMD_DAT_CONT_INIT;
+
+ if (host->dma)
+ host->do_dma = 1;
+
+ if (req->data) {
+ error = mxcmci_setup_data(host, req->data);
+ if (error) {
+ req->cmd->error = error;
+ goto out;
+ }
+
+
+ cmdat |= CMD_DAT_CONT_DATA_ENABLE;
+
+ if (req->data->flags & MMC_DATA_WRITE)
+ cmdat |= CMD_DAT_CONT_WRITE;
+ }
+
+ error = mxcmci_start_cmd(host, req->cmd, cmdat);
+
+out:
+ if (error)
+ mxcmci_finish_request(host, req);
+}
+
+static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
+{
+ unsigned int divider;
+ int prescaler = 0;
+ unsigned int clk_in = clk_get_rate(host->clk_per);
+
+ while (prescaler <= 0x800) {
+ for (divider = 1; divider <= 0xF; divider++) {
+ int x;
+
+ x = (clk_in / (divider + 1));
+
+ if (prescaler)
+ x /= (prescaler * 2);
+
+ if (x <= clk_ios)
+ break;
+ }
+ if (divider < 0x10)
+ break;
+
+ if (prescaler == 0)
+ prescaler = 1;
+ else
+ prescaler <<= 1;
+ }
+
+ mxcmci_writew(host, (prescaler << 4) | divider, MMC_REG_CLK_RATE);
+
+ dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n",
+ prescaler, divider, clk_in, clk_ios);
+}
+
+static int mxcmci_setup_dma(struct mmc_host *mmc)
+{
+ struct mxcmci_host *host = mmc_priv(mmc);
+ struct dma_slave_config *config = &host->dma_slave_config;
+
+ config->dst_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
+ config->src_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
+ config->dst_addr_width = 4;
+ config->src_addr_width = 4;
+ config->dst_maxburst = host->burstlen;
+ config->src_maxburst = host->burstlen;
+ config->device_fc = false;
+
+ return dmaengine_slave_config(host->dma, config);
+}
+
+static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mxcmci_host *host = mmc_priv(mmc);
+ int burstlen, ret;
+
+ /*
+ * use burstlen of 64 (16 words) in 4 bit mode (--> reg value 0)
+ * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
+ */
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ burstlen = 16;
+ else
+ burstlen = 4;
+
+ if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
+ host->burstlen = burstlen;
+ ret = mxcmci_setup_dma(mmc);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to config DMA channel. Falling back to PIO\n");
+ dma_release_channel(host->dma);
+ host->do_dma = 0;
+ host->dma = NULL;
+ }
+ }
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
+ else
+ host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
+
+ if (host->power_mode != ios->power_mode) {
+ host->power_mode = ios->power_mode;
+ mxcmci_set_power(host, ios->vdd);
+
+ if (ios->power_mode == MMC_POWER_ON)
+ host->cmdat |= CMD_DAT_CONT_INIT;
+ }
+
+ if (ios->clock) {
+ mxcmci_set_clk_rate(host, ios->clock);
+ mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
+ } else {
+ mxcmci_writew(host, STR_STP_CLK_STOP_CLK, MMC_REG_STR_STP_CLK);
+ }
+
+ host->clock = ios->clock;
+}
+
+static irqreturn_t mxcmci_detect_irq(int irq, void *data)
+{
+ struct mmc_host *mmc = data;
+
+ dev_dbg(mmc_dev(mmc), "%s\n", __func__);
+
+ mmc_detect_change(mmc, msecs_to_jiffies(250));
+ return IRQ_HANDLED;
+}
+
+static int mxcmci_get_ro(struct mmc_host *mmc)
+{
+ struct mxcmci_host *host = mmc_priv(mmc);
+
+ if (host->pdata && host->pdata->get_ro)
+ return !!host->pdata->get_ro(mmc_dev(mmc));
+ /*
+ * If board doesn't support read only detection (no mmc_gpio
+ * context or gpio is invalid), then let the mmc core decide
+ * what to do.
+ */
+ return mmc_gpio_get_ro(mmc);
+}
+
+static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mxcmci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u32 int_cntr;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->use_sdio = enable;
+ int_cntr = mxcmci_readl(host, MMC_REG_INT_CNTR);
+
+ if (enable)
+ int_cntr |= INT_SDIO_IRQ_EN;
+ else
+ int_cntr &= ~INT_SDIO_IRQ_EN;
+
+ mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
+{
+ struct mxcmci_host *mxcmci = mmc_priv(host);
+
+ /*
+ * MX3 SoCs have a silicon bug which corrupts CRC calculation of
+ * multi-block transfers when connected SDIO peripheral doesn't
+ * drive the BUSY line as required by the specs.
+ * One way to prevent this is to only allow 1-bit transfers.
+ */
+
+ if (is_imx31_mmc(mxcmci) && card->type == MMC_TYPE_SDIO)
+ host->caps &= ~MMC_CAP_4_BIT_DATA;
+ else
+ host->caps |= MMC_CAP_4_BIT_DATA;
+}
+
+static bool filter(struct dma_chan *chan, void *param)
+{
+ struct mxcmci_host *host = param;
+
+ if (!imx_dma_is_general_purpose(chan))
+ return false;
+
+ chan->private = &host->dma_data;
+
+ return true;
+}
+
+static void mxcmci_watchdog(unsigned long data)
+{
+ struct mmc_host *mmc = (struct mmc_host *)data;
+ struct mxcmci_host *host = mmc_priv(mmc);
+ struct mmc_request *req = host->req;
+ unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
+
+ if (host->dma_dir == DMA_FROM_DEVICE) {
+ dmaengine_terminate_all(host->dma);
+ dev_err(mmc_dev(host->mmc),
+ "%s: read time out (status = 0x%08x)\n",
+ __func__, stat);
+ } else {
+ dev_err(mmc_dev(host->mmc),
+ "%s: write time out (status = 0x%08x)\n",
+ __func__, stat);
+ mxcmci_softreset(host);
+ }
+
+ /* Mark transfer as erroneus and inform the upper layers */
+
+ if (host->data)
+ host->data->error = -ETIMEDOUT;
+ host->req = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+ mmc_request_done(host->mmc, req);
+}
+
+static const struct mmc_host_ops mxcmci_ops = {
+ .request = mxcmci_request,
+ .set_ios = mxcmci_set_ios,
+ .get_ro = mxcmci_get_ro,
+ .enable_sdio_irq = mxcmci_enable_sdio_irq,
+ .init_card = mxcmci_init_card,
+};
+
+static int mxcmci_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct mxcmci_host *host;
+ struct resource *res;
+ int ret = 0, irq;
+ bool dat3_card_detect = false;
+ dma_cap_mask_t mask;
+ const struct of_device_id *of_id;
+ struct imxmmc_platform_data *pdata = pdev->dev.platform_data;
+
+ pr_info("i.MX/MPC512x SDHC driver\n");
+
+ of_id = of_match_device(mxcmci_of_match, &pdev->dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ host = mmc_priv(mmc);
+
+ host->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
+ goto out_free;
+ }
+
+ host->phys_base = res->start;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto out_free;
+ mmc->ops = &mxcmci_ops;
+
+ /* For devicetree parsing, the bus width is read from devicetree */
+ if (pdata)
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+ else
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+
+ /* MMC core transfer sizes tunable parameters */
+ mmc->max_blk_size = 2048;
+ mmc->max_blk_count = 65535;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ if (of_id) {
+ const struct platform_device_id *id_entry = of_id->data;
+ host->devtype = id_entry->driver_data;
+ } else {
+ host->devtype = pdev->id_entry->driver_data;
+ }
+
+ /* adjust max_segs after devtype detection */
+ if (!is_mpc512x_mmc(host))
+ mmc->max_segs = 64;
+
+ host->mmc = mmc;
+ host->pdata = pdata;
+ spin_lock_init(&host->lock);
+
+ if (pdata)
+ dat3_card_detect = pdata->dat3_card_detect;
+ else if (!(mmc->caps & MMC_CAP_NONREMOVABLE)
+ && !of_property_read_bool(pdev->dev.of_node, "cd-gpios"))
+ dat3_card_detect = true;
+
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret == -EPROBE_DEFER)
+ goto out_free;
+
+ if (!mmc->ocr_avail) {
+ if (pdata && pdata->ocr_avail)
+ mmc->ocr_avail = pdata->ocr_avail;
+ else
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ }
+
+ if (dat3_card_detect)
+ host->default_irq_mask =
+ INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN;
+ else
+ host->default_irq_mask = 0;
+
+ host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(host->clk_ipg)) {
+ ret = PTR_ERR(host->clk_ipg);
+ goto out_free;
+ }
+
+ host->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(host->clk_per)) {
+ ret = PTR_ERR(host->clk_per);
+ goto out_free;
+ }
+
+ clk_prepare_enable(host->clk_per);
+ clk_prepare_enable(host->clk_ipg);
+
+ mxcmci_softreset(host);
+
+ host->rev_no = mxcmci_readw(host, MMC_REG_REV_NO);
+ if (host->rev_no != 0x400) {
+ ret = -ENODEV;
+ dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
+ host->rev_no);
+ goto out_clk_put;
+ }
+
+ mmc->f_min = clk_get_rate(host->clk_per) >> 16;
+ mmc->f_max = clk_get_rate(host->clk_per) >> 1;
+
+ /* recommended in data sheet */
+ mxcmci_writew(host, 0x2db4, MMC_REG_READ_TO);
+
+ mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR);
+
+ if (!host->pdata) {
+ host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx");
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (res) {
+ host->dmareq = res->start;
+ host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
+ host->dma_data.priority = DMA_PRIO_LOW;
+ host->dma_data.dma_request = host->dmareq;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma = dma_request_channel(mask, filter, host);
+ }
+ }
+ if (host->dma)
+ mmc->max_seg_size = dma_get_max_seg_size(
+ host->dma->device->dev);
+ else
+ dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
+
+ INIT_WORK(&host->datawork, mxcmci_datawork);
+
+ ret = devm_request_irq(&pdev->dev, irq, mxcmci_irq, 0,
+ dev_name(&pdev->dev), host);
+ if (ret)
+ goto out_free_dma;
+
+ platform_set_drvdata(pdev, mmc);
+
+ if (host->pdata && host->pdata->init) {
+ ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq,
+ host->mmc);
+ if (ret)
+ goto out_free_dma;
+ }
+
+ init_timer(&host->watchdog);
+ host->watchdog.function = &mxcmci_watchdog;
+ host->watchdog.data = (unsigned long)mmc;
+
+ mmc_add_host(mmc);
+
+ return 0;
+
+out_free_dma:
+ if (host->dma)
+ dma_release_channel(host->dma);
+
+out_clk_put:
+ clk_disable_unprepare(host->clk_per);
+ clk_disable_unprepare(host->clk_ipg);
+
+out_free:
+ mmc_free_host(mmc);
+
+ return ret;
+}
+
+static int mxcmci_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct mxcmci_host *host = mmc_priv(mmc);
+
+ mmc_remove_host(mmc);
+
+ if (host->pdata && host->pdata->exit)
+ host->pdata->exit(&pdev->dev, mmc);
+
+ if (host->dma)
+ dma_release_channel(host->dma);
+
+ clk_disable_unprepare(host->clk_per);
+ clk_disable_unprepare(host->clk_ipg);
+
+ mmc_free_host(mmc);
+
+ return 0;
+}
+
+static int __maybe_unused mxcmci_suspend(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct mxcmci_host *host = mmc_priv(mmc);
+
+ clk_disable_unprepare(host->clk_per);
+ clk_disable_unprepare(host->clk_ipg);
+ return 0;
+}
+
+static int __maybe_unused mxcmci_resume(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct mxcmci_host *host = mmc_priv(mmc);
+
+ clk_prepare_enable(host->clk_per);
+ clk_prepare_enable(host->clk_ipg);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
+
+static struct platform_driver mxcmci_driver = {
+ .probe = mxcmci_probe,
+ .remove = mxcmci_remove,
+ .id_table = mxcmci_devtype,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &mxcmci_pm_ops,
+ .of_match_table = mxcmci_of_match,
+ }
+};
+
+module_platform_driver(mxcmci_driver);
+
+MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
+MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxc-mmc");
diff --git a/kernel/drivers/mmc/host/mxs-mmc.c b/kernel/drivers/mmc/host/mxs-mmc.c
new file mode 100644
index 000000000..a82411a2c
--- /dev/null
+++ b/kernel/drivers/mmc/host/mxs-mmc.c
@@ -0,0 +1,745 @@
+/*
+ * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
+ * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
+ *
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/highmem.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+#include <linux/stmp_device.h>
+#include <linux/spi/mxs-spi.h>
+
+#define DRIVER_NAME "mxs-mmc"
+
+#define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
+ BM_SSP_CTRL1_RESP_ERR_IRQ | \
+ BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
+ BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
+ BM_SSP_CTRL1_DATA_CRC_IRQ | \
+ BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
+ BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
+ BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
+
+/* card detect polling timeout */
+#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
+
+struct mxs_mmc_host {
+ struct mxs_ssp ssp;
+
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+
+ unsigned char bus_width;
+ spinlock_t lock;
+ int sdio_irq_en;
+ bool broken_cd;
+};
+
+static int mxs_mmc_get_cd(struct mmc_host *mmc)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ struct mxs_ssp *ssp = &host->ssp;
+ int present, ret;
+
+ if (host->broken_cd)
+ return -ENOSYS;
+
+ ret = mmc_gpio_get_cd(mmc);
+ if (ret >= 0)
+ return ret;
+
+ present = mmc->caps & MMC_CAP_NEEDS_POLL ||
+ !(readl(ssp->base + HW_SSP_STATUS(ssp)) &
+ BM_SSP_STATUS_CARD_DETECT);
+
+ if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
+ present = !present;
+
+ return present;
+}
+
+static int mxs_mmc_reset(struct mxs_mmc_host *host)
+{
+ struct mxs_ssp *ssp = &host->ssp;
+ u32 ctrl0, ctrl1;
+ int ret;
+
+ ret = stmp_reset_block(ssp->base);
+ if (ret)
+ return ret;
+
+ ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
+ ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
+ BF_SSP(0x7, CTRL1_WORD_LENGTH) |
+ BM_SSP_CTRL1_DMA_ENABLE |
+ BM_SSP_CTRL1_POLARITY |
+ BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
+ BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
+ BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
+ BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
+ BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
+
+ writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
+ BF_SSP(2, TIMING_CLOCK_DIVIDE) |
+ BF_SSP(0, TIMING_CLOCK_RATE),
+ ssp->base + HW_SSP_TIMING(ssp));
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
+ }
+
+ writel(ctrl0, ssp->base + HW_SSP_CTRL0);
+ writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
+ return 0;
+}
+
+static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
+ struct mmc_command *cmd);
+
+static void mxs_mmc_request_done(struct mxs_mmc_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ struct mmc_data *data = host->data;
+ struct mmc_request *mrq = host->mrq;
+ struct mxs_ssp *ssp = &host->ssp;
+
+ if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
+ if (mmc_resp_type(cmd) & MMC_RSP_136) {
+ cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
+ cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp));
+ cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp));
+ cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp));
+ } else {
+ cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
+ }
+ }
+
+ if (data) {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, ssp->dma_dir);
+ /*
+ * If there was an error on any block, we mark all
+ * data blocks as being in error.
+ */
+ if (!data->error)
+ data->bytes_xfered = data->blocks * data->blksz;
+ else
+ data->bytes_xfered = 0;
+
+ host->data = NULL;
+ if (mrq->stop) {
+ mxs_mmc_start_cmd(host, mrq->stop);
+ return;
+ }
+ }
+
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void mxs_mmc_dma_irq_callback(void *param)
+{
+ struct mxs_mmc_host *host = param;
+
+ mxs_mmc_request_done(host);
+}
+
+static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
+{
+ struct mxs_mmc_host *host = dev_id;
+ struct mmc_command *cmd = host->cmd;
+ struct mmc_data *data = host->data;
+ struct mxs_ssp *ssp = &host->ssp;
+ u32 stat;
+
+ spin_lock(&host->lock);
+
+ stat = readl(ssp->base + HW_SSP_CTRL1(ssp));
+ writel(stat & MXS_MMC_IRQ_BITS,
+ ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
+
+ spin_unlock(&host->lock);
+
+ if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
+ mmc_signal_sdio_irq(host->mmc);
+
+ if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
+ cmd->error = -ETIMEDOUT;
+ else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
+ cmd->error = -EIO;
+
+ if (data) {
+ if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
+ BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
+ data->error = -ETIMEDOUT;
+ else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
+ data->error = -EILSEQ;
+ else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
+ BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
+ data->error = -EIO;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
+ struct mxs_mmc_host *host, unsigned long flags)
+{
+ struct mxs_ssp *ssp = &host->ssp;
+ struct dma_async_tx_descriptor *desc;
+ struct mmc_data *data = host->data;
+ struct scatterlist * sgl;
+ unsigned int sg_len;
+
+ if (data) {
+ /* data */
+ dma_map_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, ssp->dma_dir);
+ sgl = data->sg;
+ sg_len = data->sg_len;
+ } else {
+ /* pio */
+ sgl = (struct scatterlist *) ssp->ssp_pio_words;
+ sg_len = SSP_PIO_NUM;
+ }
+
+ desc = dmaengine_prep_slave_sg(ssp->dmach,
+ sgl, sg_len, ssp->slave_dirn, flags);
+ if (desc) {
+ desc->callback = mxs_mmc_dma_irq_callback;
+ desc->callback_param = host;
+ } else {
+ if (data)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, ssp->dma_dir);
+ }
+
+ return desc;
+}
+
+static void mxs_mmc_bc(struct mxs_mmc_host *host)
+{
+ struct mxs_ssp *ssp = &host->ssp;
+ struct mmc_command *cmd = host->cmd;
+ struct dma_async_tx_descriptor *desc;
+ u32 ctrl0, cmd0, cmd1;
+
+ ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
+ cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
+ cmd1 = cmd->arg;
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
+ }
+
+ ssp->ssp_pio_words[0] = ctrl0;
+ ssp->ssp_pio_words[1] = cmd0;
+ ssp->ssp_pio_words[2] = cmd1;
+ ssp->dma_dir = DMA_NONE;
+ ssp->slave_dirn = DMA_TRANS_NONE;
+ desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
+ if (!desc)
+ goto out;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(ssp->dmach);
+ return;
+
+out:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: failed to prep dma\n", __func__);
+}
+
+static void mxs_mmc_ac(struct mxs_mmc_host *host)
+{
+ struct mxs_ssp *ssp = &host->ssp;
+ struct mmc_command *cmd = host->cmd;
+ struct dma_async_tx_descriptor *desc;
+ u32 ignore_crc, get_resp, long_resp;
+ u32 ctrl0, cmd0, cmd1;
+
+ ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
+ 0 : BM_SSP_CTRL0_IGNORE_CRC;
+ get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
+ BM_SSP_CTRL0_GET_RESP : 0;
+ long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
+ BM_SSP_CTRL0_LONG_RESP : 0;
+
+ ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
+ cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
+ cmd1 = cmd->arg;
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
+ }
+
+ ssp->ssp_pio_words[0] = ctrl0;
+ ssp->ssp_pio_words[1] = cmd0;
+ ssp->ssp_pio_words[2] = cmd1;
+ ssp->dma_dir = DMA_NONE;
+ ssp->slave_dirn = DMA_TRANS_NONE;
+ desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
+ if (!desc)
+ goto out;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(ssp->dmach);
+ return;
+
+out:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: failed to prep dma\n", __func__);
+}
+
+static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
+{
+ const unsigned int ssp_timeout_mul = 4096;
+ /*
+ * Calculate ticks in ms since ns are large numbers
+ * and might overflow
+ */
+ const unsigned int clock_per_ms = clock_rate / 1000;
+ const unsigned int ms = ns / 1000;
+ const unsigned int ticks = ms * clock_per_ms;
+ const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
+
+ WARN_ON(ssp_ticks == 0);
+ return ssp_ticks;
+}
+
+static void mxs_mmc_adtc(struct mxs_mmc_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ struct mmc_data *data = cmd->data;
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist *sgl = data->sg, *sg;
+ unsigned int sg_len = data->sg_len;
+ unsigned int i;
+
+ unsigned short dma_data_dir, timeout;
+ enum dma_transfer_direction slave_dirn;
+ unsigned int data_size = 0, log2_blksz;
+ unsigned int blocks = data->blocks;
+
+ struct mxs_ssp *ssp = &host->ssp;
+
+ u32 ignore_crc, get_resp, long_resp, read;
+ u32 ctrl0, cmd0, cmd1, val;
+
+ ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
+ 0 : BM_SSP_CTRL0_IGNORE_CRC;
+ get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
+ BM_SSP_CTRL0_GET_RESP : 0;
+ long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
+ BM_SSP_CTRL0_LONG_RESP : 0;
+
+ if (data->flags & MMC_DATA_WRITE) {
+ dma_data_dir = DMA_TO_DEVICE;
+ slave_dirn = DMA_MEM_TO_DEV;
+ read = 0;
+ } else {
+ dma_data_dir = DMA_FROM_DEVICE;
+ slave_dirn = DMA_DEV_TO_MEM;
+ read = BM_SSP_CTRL0_READ;
+ }
+
+ ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
+ ignore_crc | get_resp | long_resp |
+ BM_SSP_CTRL0_DATA_XFER | read |
+ BM_SSP_CTRL0_WAIT_FOR_IRQ |
+ BM_SSP_CTRL0_ENABLE;
+
+ cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
+
+ /* get logarithm to base 2 of block size for setting register */
+ log2_blksz = ilog2(data->blksz);
+
+ /*
+ * take special care of the case that data size from data->sg
+ * is not equal to blocks x blksz
+ */
+ for_each_sg(sgl, sg, sg_len, i)
+ data_size += sg->length;
+
+ if (data_size != data->blocks * data->blksz)
+ blocks = 1;
+
+ /* xfer count, block size and count need to be set differently */
+ if (ssp_is_old(ssp)) {
+ ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
+ cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
+ BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
+ } else {
+ writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
+ writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
+ BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
+ ssp->base + HW_SSP_BLOCK_SIZE);
+ }
+
+ if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
+ (cmd->opcode == SD_IO_RW_EXTENDED))
+ cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
+
+ cmd1 = cmd->arg;
+
+ if (host->sdio_irq_en) {
+ ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
+ }
+
+ /* set the timeout count */
+ timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
+ val = readl(ssp->base + HW_SSP_TIMING(ssp));
+ val &= ~(BM_SSP_TIMING_TIMEOUT);
+ val |= BF_SSP(timeout, TIMING_TIMEOUT);
+ writel(val, ssp->base + HW_SSP_TIMING(ssp));
+
+ /* pio */
+ ssp->ssp_pio_words[0] = ctrl0;
+ ssp->ssp_pio_words[1] = cmd0;
+ ssp->ssp_pio_words[2] = cmd1;
+ ssp->dma_dir = DMA_NONE;
+ ssp->slave_dirn = DMA_TRANS_NONE;
+ desc = mxs_mmc_prep_dma(host, 0);
+ if (!desc)
+ goto out;
+
+ /* append data sg */
+ WARN_ON(host->data != NULL);
+ host->data = data;
+ ssp->dma_dir = dma_data_dir;
+ ssp->slave_dirn = slave_dirn;
+ desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ goto out;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(ssp->dmach);
+ return;
+out:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: failed to prep dma\n", __func__);
+}
+
+static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ host->cmd = cmd;
+
+ switch (mmc_cmd_type(cmd)) {
+ case MMC_CMD_BC:
+ mxs_mmc_bc(host);
+ break;
+ case MMC_CMD_BCR:
+ mxs_mmc_ac(host);
+ break;
+ case MMC_CMD_AC:
+ mxs_mmc_ac(host);
+ break;
+ case MMC_CMD_ADTC:
+ mxs_mmc_adtc(host);
+ break;
+ default:
+ dev_warn(mmc_dev(host->mmc),
+ "%s: unknown MMC command\n", __func__);
+ break;
+ }
+}
+
+static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+
+ WARN_ON(host->mrq != NULL);
+ host->mrq = mrq;
+ mxs_mmc_start_cmd(host, mrq->cmd);
+}
+
+static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+
+ if (ios->bus_width == MMC_BUS_WIDTH_8)
+ host->bus_width = 2;
+ else if (ios->bus_width == MMC_BUS_WIDTH_4)
+ host->bus_width = 1;
+ else
+ host->bus_width = 0;
+
+ if (ios->clock)
+ mxs_ssp_set_clk_rate(&host->ssp, ios->clock);
+}
+
+static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ struct mxs_ssp *ssp = &host->ssp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->sdio_irq_en = enable;
+
+ if (enable) {
+ writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+ writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
+ ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET);
+ } else {
+ writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+ writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
+ ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) &
+ BM_SSP_STATUS_SDIO_IRQ)
+ mmc_signal_sdio_irq(host->mmc);
+
+}
+
+static const struct mmc_host_ops mxs_mmc_ops = {
+ .request = mxs_mmc_request,
+ .get_ro = mmc_gpio_get_ro,
+ .get_cd = mxs_mmc_get_cd,
+ .set_ios = mxs_mmc_set_ios,
+ .enable_sdio_irq = mxs_mmc_enable_sdio_irq,
+};
+
+static struct platform_device_id mxs_ssp_ids[] = {
+ {
+ .name = "imx23-mmc",
+ .driver_data = IMX23_SSP,
+ }, {
+ .name = "imx28-mmc",
+ .driver_data = IMX28_SSP,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxs_ssp_ids);
+
+static const struct of_device_id mxs_mmc_dt_ids[] = {
+ { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, },
+ { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
+
+static int mxs_mmc_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(mxs_mmc_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct mxs_mmc_host *host;
+ struct mmc_host *mmc;
+ struct resource *iores;
+ int ret = 0, irq_err;
+ struct regulator *reg_vmmc;
+ struct mxs_ssp *ssp;
+
+ irq_err = platform_get_irq(pdev, 0);
+ if (irq_err < 0)
+ return irq_err;
+
+ mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ host = mmc_priv(mmc);
+ ssp = &host->ssp;
+ ssp->dev = &pdev->dev;
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ssp->base = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(ssp->base)) {
+ ret = PTR_ERR(ssp->base);
+ goto out_mmc_free;
+ }
+
+ ssp->devid = (enum mxs_ssp_id) of_id->data;
+
+ host->mmc = mmc;
+ host->sdio_irq_en = 0;
+
+ reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
+ if (!IS_ERR(reg_vmmc)) {
+ ret = regulator_enable(reg_vmmc);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to enable vmmc regulator: %d\n", ret);
+ goto out_mmc_free;
+ }
+ }
+
+ ssp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ssp->clk)) {
+ ret = PTR_ERR(ssp->clk);
+ goto out_mmc_free;
+ }
+ ret = clk_prepare_enable(ssp->clk);
+ if (ret)
+ goto out_mmc_free;
+
+ ret = mxs_mmc_reset(host);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
+ goto out_clk_disable;
+ }
+
+ ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
+ if (!ssp->dmach) {
+ dev_err(mmc_dev(host->mmc),
+ "%s: failed to request dma\n", __func__);
+ ret = -ENODEV;
+ goto out_clk_disable;
+ }
+
+ /* set mmc core parameters */
+ mmc->ops = &mxs_mmc_ops;
+ mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
+
+ host->broken_cd = of_property_read_bool(np, "broken-cd");
+
+ mmc->f_min = 400000;
+ mmc->f_max = 288000000;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto out_clk_disable;
+
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+ mmc->max_segs = 52;
+ mmc->max_blk_size = 1 << 0xf;
+ mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff;
+ mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff;
+ mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev);
+
+ platform_set_drvdata(pdev, mmc);
+
+ ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
+ dev_name(&pdev->dev), host);
+ if (ret)
+ goto out_free_dma;
+
+ spin_lock_init(&host->lock);
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto out_free_dma;
+
+ dev_info(mmc_dev(host->mmc), "initialized\n");
+
+ return 0;
+
+out_free_dma:
+ dma_release_channel(ssp->dmach);
+out_clk_disable:
+ clk_disable_unprepare(ssp->clk);
+out_mmc_free:
+ mmc_free_host(mmc);
+ return ret;
+}
+
+static int mxs_mmc_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ struct mxs_ssp *ssp = &host->ssp;
+
+ mmc_remove_host(mmc);
+
+ if (ssp->dmach)
+ dma_release_channel(ssp->dmach);
+
+ clk_disable_unprepare(ssp->clk);
+
+ mmc_free_host(mmc);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mxs_mmc_suspend(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ struct mxs_ssp *ssp = &host->ssp;
+
+ clk_disable_unprepare(ssp->clk);
+ return 0;
+}
+
+static int mxs_mmc_resume(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct mxs_mmc_host *host = mmc_priv(mmc);
+ struct mxs_ssp *ssp = &host->ssp;
+
+ return clk_prepare_enable(ssp->clk);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
+
+static struct platform_driver mxs_mmc_driver = {
+ .probe = mxs_mmc_probe,
+ .remove = mxs_mmc_remove,
+ .id_table = mxs_ssp_ids,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &mxs_mmc_pm_ops,
+ .of_match_table = mxs_mmc_dt_ids,
+ },
+};
+
+module_platform_driver(mxs_mmc_driver);
+
+MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/kernel/drivers/mmc/host/of_mmc_spi.c b/kernel/drivers/mmc/host/of_mmc_spi.c
new file mode 100644
index 000000000..6e218fb1a
--- /dev/null
+++ b/kernel/drivers/mmc/host/of_mmc_spi.c
@@ -0,0 +1,159 @@
+/*
+ * OpenFirmware bindings for the MMC-over-SPI driver
+ *
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/mmc_spi.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+
+/* For archs that don't support NO_IRQ (such as mips), provide a dummy value */
+#ifndef NO_IRQ
+#define NO_IRQ 0
+#endif
+
+MODULE_LICENSE("GPL");
+
+enum {
+ CD_GPIO = 0,
+ WP_GPIO,
+ NUM_GPIOS,
+};
+
+struct of_mmc_spi {
+ int gpios[NUM_GPIOS];
+ bool alow_gpios[NUM_GPIOS];
+ int detect_irq;
+ struct mmc_spi_platform_data pdata;
+};
+
+static struct of_mmc_spi *to_of_mmc_spi(struct device *dev)
+{
+ return container_of(dev->platform_data, struct of_mmc_spi, pdata);
+}
+
+static int of_mmc_spi_init(struct device *dev,
+ irqreturn_t (*irqhandler)(int, void *), void *mmc)
+{
+ struct of_mmc_spi *oms = to_of_mmc_spi(dev);
+
+ return request_threaded_irq(oms->detect_irq, NULL, irqhandler, 0,
+ dev_name(dev), mmc);
+}
+
+static void of_mmc_spi_exit(struct device *dev, void *mmc)
+{
+ struct of_mmc_spi *oms = to_of_mmc_spi(dev);
+
+ free_irq(oms->detect_irq, mmc);
+}
+
+struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct device_node *np = dev->of_node;
+ struct of_mmc_spi *oms;
+ const u32 *voltage_ranges;
+ int num_ranges;
+ int i;
+ int ret = -EINVAL;
+
+ if (dev->platform_data || !np)
+ return dev->platform_data;
+
+ oms = kzalloc(sizeof(*oms), GFP_KERNEL);
+ if (!oms)
+ return NULL;
+
+ voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
+ num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
+ if (!voltage_ranges || !num_ranges) {
+ dev_err(dev, "OF: voltage-ranges unspecified\n");
+ goto err_ocr;
+ }
+
+ for (i = 0; i < num_ranges; i++) {
+ const int j = i * 2;
+ u32 mask;
+
+ mask = mmc_vddrange_to_ocrmask(be32_to_cpu(voltage_ranges[j]),
+ be32_to_cpu(voltage_ranges[j + 1]));
+ if (!mask) {
+ ret = -EINVAL;
+ dev_err(dev, "OF: voltage-range #%d is invalid\n", i);
+ goto err_ocr;
+ }
+ oms->pdata.ocr_mask |= mask;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(oms->gpios); i++) {
+ enum of_gpio_flags gpio_flags;
+
+ oms->gpios[i] = of_get_gpio_flags(np, i, &gpio_flags);
+ if (!gpio_is_valid(oms->gpios[i]))
+ continue;
+
+ if (gpio_flags & OF_GPIO_ACTIVE_LOW)
+ oms->alow_gpios[i] = true;
+ }
+
+ if (gpio_is_valid(oms->gpios[CD_GPIO])) {
+ oms->pdata.cd_gpio = oms->gpios[CD_GPIO];
+ oms->pdata.flags |= MMC_SPI_USE_CD_GPIO;
+ if (!oms->alow_gpios[CD_GPIO])
+ oms->pdata.caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+ }
+ if (gpio_is_valid(oms->gpios[WP_GPIO])) {
+ oms->pdata.ro_gpio = oms->gpios[WP_GPIO];
+ oms->pdata.flags |= MMC_SPI_USE_RO_GPIO;
+ if (!oms->alow_gpios[WP_GPIO])
+ oms->pdata.caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+ }
+
+ oms->detect_irq = irq_of_parse_and_map(np, 0);
+ if (oms->detect_irq != 0) {
+ oms->pdata.init = of_mmc_spi_init;
+ oms->pdata.exit = of_mmc_spi_exit;
+ } else {
+ oms->pdata.caps |= MMC_CAP_NEEDS_POLL;
+ }
+
+ dev->platform_data = &oms->pdata;
+ return dev->platform_data;
+err_ocr:
+ kfree(oms);
+ return NULL;
+}
+EXPORT_SYMBOL(mmc_spi_get_pdata);
+
+void mmc_spi_put_pdata(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct device_node *np = dev->of_node;
+ struct of_mmc_spi *oms = to_of_mmc_spi(dev);
+
+ if (!dev->platform_data || !np)
+ return;
+
+ kfree(oms);
+ dev->platform_data = NULL;
+}
+EXPORT_SYMBOL(mmc_spi_put_pdata);
diff --git a/kernel/drivers/mmc/host/omap.c b/kernel/drivers/mmc/host/omap.c
new file mode 100644
index 000000000..68dd6c79c
--- /dev/null
+++ b/kernel/drivers/mmc/host/omap.c
@@ -0,0 +1,1505 @@
+/*
+ * linux/drivers/mmc/host/omap.c
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
+ * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
+ * Other hacks (DMA, SD, etc) by David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/of.h>
+#include <linux/omap-dma.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/clk.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/platform_data/mmc-omap.h>
+
+
+#define OMAP_MMC_REG_CMD 0x00
+#define OMAP_MMC_REG_ARGL 0x01
+#define OMAP_MMC_REG_ARGH 0x02
+#define OMAP_MMC_REG_CON 0x03
+#define OMAP_MMC_REG_STAT 0x04
+#define OMAP_MMC_REG_IE 0x05
+#define OMAP_MMC_REG_CTO 0x06
+#define OMAP_MMC_REG_DTO 0x07
+#define OMAP_MMC_REG_DATA 0x08
+#define OMAP_MMC_REG_BLEN 0x09
+#define OMAP_MMC_REG_NBLK 0x0a
+#define OMAP_MMC_REG_BUF 0x0b
+#define OMAP_MMC_REG_SDIO 0x0d
+#define OMAP_MMC_REG_REV 0x0f
+#define OMAP_MMC_REG_RSP0 0x10
+#define OMAP_MMC_REG_RSP1 0x11
+#define OMAP_MMC_REG_RSP2 0x12
+#define OMAP_MMC_REG_RSP3 0x13
+#define OMAP_MMC_REG_RSP4 0x14
+#define OMAP_MMC_REG_RSP5 0x15
+#define OMAP_MMC_REG_RSP6 0x16
+#define OMAP_MMC_REG_RSP7 0x17
+#define OMAP_MMC_REG_IOSR 0x18
+#define OMAP_MMC_REG_SYSC 0x19
+#define OMAP_MMC_REG_SYSS 0x1a
+
+#define OMAP_MMC_STAT_CARD_ERR (1 << 14)
+#define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
+#define OMAP_MMC_STAT_OCR_BUSY (1 << 12)
+#define OMAP_MMC_STAT_A_EMPTY (1 << 11)
+#define OMAP_MMC_STAT_A_FULL (1 << 10)
+#define OMAP_MMC_STAT_CMD_CRC (1 << 8)
+#define OMAP_MMC_STAT_CMD_TOUT (1 << 7)
+#define OMAP_MMC_STAT_DATA_CRC (1 << 6)
+#define OMAP_MMC_STAT_DATA_TOUT (1 << 5)
+#define OMAP_MMC_STAT_END_BUSY (1 << 4)
+#define OMAP_MMC_STAT_END_OF_DATA (1 << 3)
+#define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
+#define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
+
+#define mmc_omap7xx() (host->features & MMC_OMAP7XX)
+#define mmc_omap15xx() (host->features & MMC_OMAP15XX)
+#define mmc_omap16xx() (host->features & MMC_OMAP16XX)
+#define MMC_OMAP1_MASK (MMC_OMAP7XX | MMC_OMAP15XX | MMC_OMAP16XX)
+#define mmc_omap1() (host->features & MMC_OMAP1_MASK)
+#define mmc_omap2() (!mmc_omap1())
+
+#define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift)
+#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))
+#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg))
+
+/*
+ * Command types
+ */
+#define OMAP_MMC_CMDTYPE_BC 0
+#define OMAP_MMC_CMDTYPE_BCR 1
+#define OMAP_MMC_CMDTYPE_AC 2
+#define OMAP_MMC_CMDTYPE_ADTC 3
+
+#define DRIVER_NAME "mmci-omap"
+
+/* Specifies how often in millisecs to poll for card status changes
+ * when the cover switch is open */
+#define OMAP_MMC_COVER_POLL_DELAY 500
+
+struct mmc_omap_host;
+
+struct mmc_omap_slot {
+ int id;
+ unsigned int vdd;
+ u16 saved_con;
+ u16 bus_mode;
+ unsigned int fclk_freq;
+
+ struct tasklet_struct cover_tasklet;
+ struct timer_list cover_timer;
+ unsigned cover_open;
+
+ struct mmc_request *mrq;
+ struct mmc_omap_host *host;
+ struct mmc_host *mmc;
+ struct omap_mmc_slot_data *pdata;
+};
+
+struct mmc_omap_host {
+ int initialized;
+ struct mmc_request * mrq;
+ struct mmc_command * cmd;
+ struct mmc_data * data;
+ struct mmc_host * mmc;
+ struct device * dev;
+ unsigned char id; /* 16xx chips have 2 MMC blocks */
+ struct clk * iclk;
+ struct clk * fclk;
+ struct dma_chan *dma_rx;
+ u32 dma_rx_burst;
+ struct dma_chan *dma_tx;
+ u32 dma_tx_burst;
+ void __iomem *virt_base;
+ unsigned int phys_base;
+ int irq;
+ unsigned char bus_mode;
+ unsigned int reg_shift;
+
+ struct work_struct cmd_abort_work;
+ unsigned abort:1;
+ struct timer_list cmd_abort_timer;
+
+ struct work_struct slot_release_work;
+ struct mmc_omap_slot *next_slot;
+ struct work_struct send_stop_work;
+ struct mmc_data *stop_data;
+
+ unsigned int sg_len;
+ int sg_idx;
+ u16 * buffer;
+ u32 buffer_bytes_left;
+ u32 total_bytes_left;
+
+ unsigned features;
+ unsigned brs_received:1, dma_done:1;
+ unsigned dma_in_use:1;
+ spinlock_t dma_lock;
+
+ struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS];
+ struct mmc_omap_slot *current_slot;
+ spinlock_t slot_lock;
+ wait_queue_head_t slot_wq;
+ int nr_slots;
+
+ struct timer_list clk_timer;
+ spinlock_t clk_lock; /* for changing enabled state */
+ unsigned int fclk_enabled:1;
+ struct workqueue_struct *mmc_omap_wq;
+
+ struct omap_mmc_platform_data *pdata;
+};
+
+
+static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
+{
+ unsigned long tick_ns;
+
+ if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) {
+ tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, slot->fclk_freq);
+ ndelay(8 * tick_ns);
+ }
+}
+
+static void mmc_omap_fclk_enable(struct mmc_omap_host *host, unsigned int enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (host->fclk_enabled != enable) {
+ host->fclk_enabled = enable;
+ if (enable)
+ clk_enable(host->fclk);
+ else
+ clk_disable(host->fclk);
+ }
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+static void mmc_omap_select_slot(struct mmc_omap_slot *slot, int claimed)
+{
+ struct mmc_omap_host *host = slot->host;
+ unsigned long flags;
+
+ if (claimed)
+ goto no_claim;
+ spin_lock_irqsave(&host->slot_lock, flags);
+ while (host->mmc != NULL) {
+ spin_unlock_irqrestore(&host->slot_lock, flags);
+ wait_event(host->slot_wq, host->mmc == NULL);
+ spin_lock_irqsave(&host->slot_lock, flags);
+ }
+ host->mmc = slot->mmc;
+ spin_unlock_irqrestore(&host->slot_lock, flags);
+no_claim:
+ del_timer(&host->clk_timer);
+ if (host->current_slot != slot || !claimed)
+ mmc_omap_fclk_offdelay(host->current_slot);
+
+ if (host->current_slot != slot) {
+ OMAP_MMC_WRITE(host, CON, slot->saved_con & 0xFC00);
+ if (host->pdata->switch_slot != NULL)
+ host->pdata->switch_slot(mmc_dev(slot->mmc), slot->id);
+ host->current_slot = slot;
+ }
+
+ if (claimed) {
+ mmc_omap_fclk_enable(host, 1);
+
+ /* Doing the dummy read here seems to work around some bug
+ * at least in OMAP24xx silicon where the command would not
+ * start after writing the CMD register. Sigh. */
+ OMAP_MMC_READ(host, CON);
+
+ OMAP_MMC_WRITE(host, CON, slot->saved_con);
+ } else
+ mmc_omap_fclk_enable(host, 0);
+}
+
+static void mmc_omap_start_request(struct mmc_omap_host *host,
+ struct mmc_request *req);
+
+static void mmc_omap_slot_release_work(struct work_struct *work)
+{
+ struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
+ slot_release_work);
+ struct mmc_omap_slot *next_slot = host->next_slot;
+ struct mmc_request *rq;
+
+ host->next_slot = NULL;
+ mmc_omap_select_slot(next_slot, 1);
+
+ rq = next_slot->mrq;
+ next_slot->mrq = NULL;
+ mmc_omap_start_request(host, rq);
+}
+
+static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
+{
+ struct mmc_omap_host *host = slot->host;
+ unsigned long flags;
+ int i;
+
+ BUG_ON(slot == NULL || host->mmc == NULL);
+
+ if (clk_enabled)
+ /* Keeps clock running for at least 8 cycles on valid freq */
+ mod_timer(&host->clk_timer, jiffies + HZ/10);
+ else {
+ del_timer(&host->clk_timer);
+ mmc_omap_fclk_offdelay(slot);
+ mmc_omap_fclk_enable(host, 0);
+ }
+
+ spin_lock_irqsave(&host->slot_lock, flags);
+ /* Check for any pending requests */
+ for (i = 0; i < host->nr_slots; i++) {
+ struct mmc_omap_slot *new_slot;
+
+ if (host->slots[i] == NULL || host->slots[i]->mrq == NULL)
+ continue;
+
+ BUG_ON(host->next_slot != NULL);
+ new_slot = host->slots[i];
+ /* The current slot should not have a request in queue */
+ BUG_ON(new_slot == host->current_slot);
+
+ host->next_slot = new_slot;
+ host->mmc = new_slot->mmc;
+ spin_unlock_irqrestore(&host->slot_lock, flags);
+ queue_work(host->mmc_omap_wq, &host->slot_release_work);
+ return;
+ }
+
+ host->mmc = NULL;
+ wake_up(&host->slot_wq);
+ spin_unlock_irqrestore(&host->slot_lock, flags);
+}
+
+static inline
+int mmc_omap_cover_is_open(struct mmc_omap_slot *slot)
+{
+ if (slot->pdata->get_cover_state)
+ return slot->pdata->get_cover_state(mmc_dev(slot->mmc),
+ slot->id);
+ return 0;
+}
+
+static ssize_t
+mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
+ struct mmc_omap_slot *slot = mmc_priv(mmc);
+
+ return sprintf(buf, "%s\n", mmc_omap_cover_is_open(slot) ? "open" :
+ "closed");
+}
+
+static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
+
+static ssize_t
+mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
+ struct mmc_omap_slot *slot = mmc_priv(mmc);
+
+ return sprintf(buf, "%s\n", slot->pdata->name);
+}
+
+static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL);
+
+static void
+mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
+{
+ u32 cmdreg;
+ u32 resptype;
+ u32 cmdtype;
+ u16 irq_mask;
+
+ host->cmd = cmd;
+
+ resptype = 0;
+ cmdtype = 0;
+
+ /* Our hardware needs to know exact type */
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ break;
+ case MMC_RSP_R1:
+ case MMC_RSP_R1B:
+ /* resp 1, 1b, 6, 7 */
+ resptype = 1;
+ break;
+ case MMC_RSP_R2:
+ resptype = 2;
+ break;
+ case MMC_RSP_R3:
+ resptype = 3;
+ break;
+ default:
+ dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd));
+ break;
+ }
+
+ if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
+ cmdtype = OMAP_MMC_CMDTYPE_ADTC;
+ } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
+ cmdtype = OMAP_MMC_CMDTYPE_BC;
+ } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
+ cmdtype = OMAP_MMC_CMDTYPE_BCR;
+ } else {
+ cmdtype = OMAP_MMC_CMDTYPE_AC;
+ }
+
+ cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
+
+ if (host->current_slot->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ cmdreg |= 1 << 6;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ cmdreg |= 1 << 11;
+
+ if (host->data && !(host->data->flags & MMC_DATA_WRITE))
+ cmdreg |= 1 << 15;
+
+ mod_timer(&host->cmd_abort_timer, jiffies + HZ/2);
+
+ OMAP_MMC_WRITE(host, CTO, 200);
+ OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff);
+ OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16);
+ irq_mask = OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL |
+ OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT |
+ OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT |
+ OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR |
+ OMAP_MMC_STAT_END_OF_DATA;
+ if (cmd->opcode == MMC_ERASE)
+ irq_mask &= ~OMAP_MMC_STAT_DATA_TOUT;
+ OMAP_MMC_WRITE(host, IE, irq_mask);
+ OMAP_MMC_WRITE(host, CMD, cmdreg);
+}
+
+static void
+mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
+ int abort)
+{
+ enum dma_data_direction dma_data_dir;
+ struct device *dev = mmc_dev(host->mmc);
+ struct dma_chan *c;
+
+ if (data->flags & MMC_DATA_WRITE) {
+ dma_data_dir = DMA_TO_DEVICE;
+ c = host->dma_tx;
+ } else {
+ dma_data_dir = DMA_FROM_DEVICE;
+ c = host->dma_rx;
+ }
+ if (c) {
+ if (data->error) {
+ dmaengine_terminate_all(c);
+ /* Claim nothing transferred on error... */
+ data->bytes_xfered = 0;
+ }
+ dev = c->device->dev;
+ }
+ dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
+}
+
+static void mmc_omap_send_stop_work(struct work_struct *work)
+{
+ struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
+ send_stop_work);
+ struct mmc_omap_slot *slot = host->current_slot;
+ struct mmc_data *data = host->stop_data;
+ unsigned long tick_ns;
+
+ tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, slot->fclk_freq);
+ ndelay(8*tick_ns);
+
+ mmc_omap_start_command(host, data->stop);
+}
+
+static void
+mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
+{
+ if (host->dma_in_use)
+ mmc_omap_release_dma(host, data, data->error);
+
+ host->data = NULL;
+ host->sg_len = 0;
+
+ /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
+ * dozens of requests until the card finishes writing data.
+ * It'd be cheaper to just wait till an EOFB interrupt arrives...
+ */
+
+ if (!data->stop) {
+ struct mmc_host *mmc;
+
+ host->mrq = NULL;
+ mmc = host->mmc;
+ mmc_omap_release_slot(host->current_slot, 1);
+ mmc_request_done(mmc, data->mrq);
+ return;
+ }
+
+ host->stop_data = data;
+ queue_work(host->mmc_omap_wq, &host->send_stop_work);
+}
+
+static void
+mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops)
+{
+ struct mmc_omap_slot *slot = host->current_slot;
+ unsigned int restarts, passes, timeout;
+ u16 stat = 0;
+
+ /* Sending abort takes 80 clocks. Have some extra and round up */
+ timeout = DIV_ROUND_UP(120 * USEC_PER_SEC, slot->fclk_freq);
+ restarts = 0;
+ while (restarts < maxloops) {
+ OMAP_MMC_WRITE(host, STAT, 0xFFFF);
+ OMAP_MMC_WRITE(host, CMD, (3 << 12) | (1 << 7));
+
+ passes = 0;
+ while (passes < timeout) {
+ stat = OMAP_MMC_READ(host, STAT);
+ if (stat & OMAP_MMC_STAT_END_OF_CMD)
+ goto out;
+ udelay(1);
+ passes++;
+ }
+
+ restarts++;
+ }
+out:
+ OMAP_MMC_WRITE(host, STAT, stat);
+}
+
+static void
+mmc_omap_abort_xfer(struct mmc_omap_host *host, struct mmc_data *data)
+{
+ if (host->dma_in_use)
+ mmc_omap_release_dma(host, data, 1);
+
+ host->data = NULL;
+ host->sg_len = 0;
+
+ mmc_omap_send_abort(host, 10000);
+}
+
+static void
+mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
+{
+ unsigned long flags;
+ int done;
+
+ if (!host->dma_in_use) {
+ mmc_omap_xfer_done(host, data);
+ return;
+ }
+ done = 0;
+ spin_lock_irqsave(&host->dma_lock, flags);
+ if (host->dma_done)
+ done = 1;
+ else
+ host->brs_received = 1;
+ spin_unlock_irqrestore(&host->dma_lock, flags);
+ if (done)
+ mmc_omap_xfer_done(host, data);
+}
+
+static void
+mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
+{
+ unsigned long flags;
+ int done;
+
+ done = 0;
+ spin_lock_irqsave(&host->dma_lock, flags);
+ if (host->brs_received)
+ done = 1;
+ else
+ host->dma_done = 1;
+ spin_unlock_irqrestore(&host->dma_lock, flags);
+ if (done)
+ mmc_omap_xfer_done(host, data);
+}
+
+static void
+mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
+{
+ host->cmd = NULL;
+
+ del_timer(&host->cmd_abort_timer);
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ /* response type 2 */
+ cmd->resp[3] =
+ OMAP_MMC_READ(host, RSP0) |
+ (OMAP_MMC_READ(host, RSP1) << 16);
+ cmd->resp[2] =
+ OMAP_MMC_READ(host, RSP2) |
+ (OMAP_MMC_READ(host, RSP3) << 16);
+ cmd->resp[1] =
+ OMAP_MMC_READ(host, RSP4) |
+ (OMAP_MMC_READ(host, RSP5) << 16);
+ cmd->resp[0] =
+ OMAP_MMC_READ(host, RSP6) |
+ (OMAP_MMC_READ(host, RSP7) << 16);
+ } else {
+ /* response types 1, 1b, 3, 4, 5, 6 */
+ cmd->resp[0] =
+ OMAP_MMC_READ(host, RSP6) |
+ (OMAP_MMC_READ(host, RSP7) << 16);
+ }
+ }
+
+ if (host->data == NULL || cmd->error) {
+ struct mmc_host *mmc;
+
+ if (host->data != NULL)
+ mmc_omap_abort_xfer(host, host->data);
+ host->mrq = NULL;
+ mmc = host->mmc;
+ mmc_omap_release_slot(host->current_slot, 1);
+ mmc_request_done(mmc, cmd->mrq);
+ }
+}
+
+/*
+ * Abort stuck command. Can occur when card is removed while it is being
+ * read.
+ */
+static void mmc_omap_abort_command(struct work_struct *work)
+{
+ struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
+ cmd_abort_work);
+ BUG_ON(!host->cmd);
+
+ dev_dbg(mmc_dev(host->mmc), "Aborting stuck command CMD%d\n",
+ host->cmd->opcode);
+
+ if (host->cmd->error == 0)
+ host->cmd->error = -ETIMEDOUT;
+
+ if (host->data == NULL) {
+ struct mmc_command *cmd;
+ struct mmc_host *mmc;
+
+ cmd = host->cmd;
+ host->cmd = NULL;
+ mmc_omap_send_abort(host, 10000);
+
+ host->mrq = NULL;
+ mmc = host->mmc;
+ mmc_omap_release_slot(host->current_slot, 1);
+ mmc_request_done(mmc, cmd->mrq);
+ } else
+ mmc_omap_cmd_done(host, host->cmd);
+
+ host->abort = 0;
+ enable_irq(host->irq);
+}
+
+static void
+mmc_omap_cmd_timer(unsigned long data)
+{
+ struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->slot_lock, flags);
+ if (host->cmd != NULL && !host->abort) {
+ OMAP_MMC_WRITE(host, IE, 0);
+ disable_irq(host->irq);
+ host->abort = 1;
+ queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
+ }
+ spin_unlock_irqrestore(&host->slot_lock, flags);
+}
+
+/* PIO only */
+static void
+mmc_omap_sg_to_buf(struct mmc_omap_host *host)
+{
+ struct scatterlist *sg;
+
+ sg = host->data->sg + host->sg_idx;
+ host->buffer_bytes_left = sg->length;
+ host->buffer = sg_virt(sg);
+ if (host->buffer_bytes_left > host->total_bytes_left)
+ host->buffer_bytes_left = host->total_bytes_left;
+}
+
+static void
+mmc_omap_clk_timer(unsigned long data)
+{
+ struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+
+ mmc_omap_fclk_enable(host, 0);
+}
+
+/* PIO only */
+static void
+mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
+{
+ int n, nwords;
+
+ if (host->buffer_bytes_left == 0) {
+ host->sg_idx++;
+ BUG_ON(host->sg_idx == host->sg_len);
+ mmc_omap_sg_to_buf(host);
+ }
+ n = 64;
+ if (n > host->buffer_bytes_left)
+ n = host->buffer_bytes_left;
+
+ /* Round up to handle odd number of bytes to transfer */
+ nwords = DIV_ROUND_UP(n, 2);
+
+ host->buffer_bytes_left -= n;
+ host->total_bytes_left -= n;
+ host->data->bytes_xfered += n;
+
+ if (write) {
+ __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA),
+ host->buffer, nwords);
+ } else {
+ __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA),
+ host->buffer, nwords);
+ }
+
+ host->buffer += nwords;
+}
+
+#ifdef CONFIG_MMC_DEBUG
+static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status)
+{
+ static const char *mmc_omap_status_bits[] = {
+ "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
+ "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
+ };
+ int i;
+ char res[64], *buf = res;
+
+ buf += sprintf(buf, "MMC IRQ 0x%x:", status);
+
+ for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
+ if (status & (1 << i))
+ buf += sprintf(buf, " %s", mmc_omap_status_bits[i]);
+ dev_vdbg(mmc_dev(host->mmc), "%s\n", res);
+}
+#else
+static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status)
+{
+}
+#endif
+
+
+static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
+{
+ struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
+ u16 status;
+ int end_command;
+ int end_transfer;
+ int transfer_error, cmd_error;
+
+ if (host->cmd == NULL && host->data == NULL) {
+ status = OMAP_MMC_READ(host, STAT);
+ dev_info(mmc_dev(host->slots[0]->mmc),
+ "Spurious IRQ 0x%04x\n", status);
+ if (status != 0) {
+ OMAP_MMC_WRITE(host, STAT, status);
+ OMAP_MMC_WRITE(host, IE, 0);
+ }
+ return IRQ_HANDLED;
+ }
+
+ end_command = 0;
+ end_transfer = 0;
+ transfer_error = 0;
+ cmd_error = 0;
+
+ while ((status = OMAP_MMC_READ(host, STAT)) != 0) {
+ int cmd;
+
+ OMAP_MMC_WRITE(host, STAT, status);
+ if (host->cmd != NULL)
+ cmd = host->cmd->opcode;
+ else
+ cmd = -1;
+ dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
+ status, cmd);
+ mmc_omap_report_irq(host, status);
+
+ if (host->total_bytes_left) {
+ if ((status & OMAP_MMC_STAT_A_FULL) ||
+ (status & OMAP_MMC_STAT_END_OF_DATA))
+ mmc_omap_xfer_data(host, 0);
+ if (status & OMAP_MMC_STAT_A_EMPTY)
+ mmc_omap_xfer_data(host, 1);
+ }
+
+ if (status & OMAP_MMC_STAT_END_OF_DATA)
+ end_transfer = 1;
+
+ if (status & OMAP_MMC_STAT_DATA_TOUT) {
+ dev_dbg(mmc_dev(host->mmc), "data timeout (CMD%d)\n",
+ cmd);
+ if (host->data) {
+ host->data->error = -ETIMEDOUT;
+ transfer_error = 1;
+ }
+ }
+
+ if (status & OMAP_MMC_STAT_DATA_CRC) {
+ if (host->data) {
+ host->data->error = -EILSEQ;
+ dev_dbg(mmc_dev(host->mmc),
+ "data CRC error, bytes left %d\n",
+ host->total_bytes_left);
+ transfer_error = 1;
+ } else {
+ dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
+ }
+ }
+
+ if (status & OMAP_MMC_STAT_CMD_TOUT) {
+ /* Timeouts are routine with some commands */
+ if (host->cmd) {
+ struct mmc_omap_slot *slot =
+ host->current_slot;
+ if (slot == NULL ||
+ !mmc_omap_cover_is_open(slot))
+ dev_err(mmc_dev(host->mmc),
+ "command timeout (CMD%d)\n",
+ cmd);
+ host->cmd->error = -ETIMEDOUT;
+ end_command = 1;
+ cmd_error = 1;
+ }
+ }
+
+ if (status & OMAP_MMC_STAT_CMD_CRC) {
+ if (host->cmd) {
+ dev_err(mmc_dev(host->mmc),
+ "command CRC error (CMD%d, arg 0x%08x)\n",
+ cmd, host->cmd->arg);
+ host->cmd->error = -EILSEQ;
+ end_command = 1;
+ cmd_error = 1;
+ } else
+ dev_err(mmc_dev(host->mmc),
+ "command CRC error without cmd?\n");
+ }
+
+ if (status & OMAP_MMC_STAT_CARD_ERR) {
+ dev_dbg(mmc_dev(host->mmc),
+ "ignoring card status error (CMD%d)\n",
+ cmd);
+ end_command = 1;
+ }
+
+ /*
+ * NOTE: On 1610 the END_OF_CMD may come too early when
+ * starting a write
+ */
+ if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
+ (!(status & OMAP_MMC_STAT_A_EMPTY))) {
+ end_command = 1;
+ }
+ }
+
+ if (cmd_error && host->data) {
+ del_timer(&host->cmd_abort_timer);
+ host->abort = 1;
+ OMAP_MMC_WRITE(host, IE, 0);
+ disable_irq_nosync(host->irq);
+ queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
+ return IRQ_HANDLED;
+ }
+
+ if (end_command && host->cmd)
+ mmc_omap_cmd_done(host, host->cmd);
+ if (host->data != NULL) {
+ if (transfer_error)
+ mmc_omap_xfer_done(host, host->data);
+ else if (end_transfer)
+ mmc_omap_end_of_data(host, host->data);
+ }
+
+ return IRQ_HANDLED;
+}
+
+void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
+{
+ int cover_open;
+ struct mmc_omap_host *host = dev_get_drvdata(dev);
+ struct mmc_omap_slot *slot = host->slots[num];
+
+ BUG_ON(num >= host->nr_slots);
+
+ /* Other subsystems can call in here before we're initialised. */
+ if (host->nr_slots == 0 || !host->slots[num])
+ return;
+
+ cover_open = mmc_omap_cover_is_open(slot);
+ if (cover_open != slot->cover_open) {
+ slot->cover_open = cover_open;
+ sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch");
+ }
+
+ tasklet_hi_schedule(&slot->cover_tasklet);
+}
+
+static void mmc_omap_cover_timer(unsigned long arg)
+{
+ struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg;
+ tasklet_schedule(&slot->cover_tasklet);
+}
+
+static void mmc_omap_cover_handler(unsigned long param)
+{
+ struct mmc_omap_slot *slot = (struct mmc_omap_slot *)param;
+ int cover_open = mmc_omap_cover_is_open(slot);
+
+ mmc_detect_change(slot->mmc, 0);
+ if (!cover_open)
+ return;
+
+ /*
+ * If no card is inserted, we postpone polling until
+ * the cover has been closed.
+ */
+ if (slot->mmc->card == NULL || !mmc_card_present(slot->mmc->card))
+ return;
+
+ mod_timer(&slot->cover_timer,
+ jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
+}
+
+static void mmc_omap_dma_callback(void *priv)
+{
+ struct mmc_omap_host *host = priv;
+ struct mmc_data *data = host->data;
+
+ /* If we got to the end of DMA, assume everything went well */
+ data->bytes_xfered += data->blocks * data->blksz;
+
+ mmc_omap_dma_done(host, data);
+}
+
+static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
+{
+ u16 reg;
+
+ reg = OMAP_MMC_READ(host, SDIO);
+ reg &= ~(1 << 5);
+ OMAP_MMC_WRITE(host, SDIO, reg);
+ /* Set maximum timeout */
+ OMAP_MMC_WRITE(host, CTO, 0xff);
+}
+
+static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
+{
+ unsigned int timeout, cycle_ns;
+ u16 reg;
+
+ cycle_ns = 1000000000 / host->current_slot->fclk_freq;
+ timeout = req->data->timeout_ns / cycle_ns;
+ timeout += req->data->timeout_clks;
+
+ /* Check if we need to use timeout multiplier register */
+ reg = OMAP_MMC_READ(host, SDIO);
+ if (timeout > 0xffff) {
+ reg |= (1 << 5);
+ timeout /= 1024;
+ } else
+ reg &= ~(1 << 5);
+ OMAP_MMC_WRITE(host, SDIO, reg);
+ OMAP_MMC_WRITE(host, DTO, timeout);
+}
+
+static void
+mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
+{
+ struct mmc_data *data = req->data;
+ int i, use_dma = 1, block_size;
+ unsigned sg_len;
+
+ host->data = data;
+ if (data == NULL) {
+ OMAP_MMC_WRITE(host, BLEN, 0);
+ OMAP_MMC_WRITE(host, NBLK, 0);
+ OMAP_MMC_WRITE(host, BUF, 0);
+ host->dma_in_use = 0;
+ set_cmd_timeout(host, req);
+ return;
+ }
+
+ block_size = data->blksz;
+
+ OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
+ OMAP_MMC_WRITE(host, BLEN, block_size - 1);
+ set_data_timeout(host, req);
+
+ /* cope with calling layer confusion; it issues "single
+ * block" writes using multi-block scatterlists.
+ */
+ sg_len = (data->blocks == 1) ? 1 : data->sg_len;
+
+ /* Only do DMA for entire blocks */
+ for (i = 0; i < sg_len; i++) {
+ if ((data->sg[i].length % block_size) != 0) {
+ use_dma = 0;
+ break;
+ }
+ }
+
+ host->sg_idx = 0;
+ if (use_dma) {
+ enum dma_data_direction dma_data_dir;
+ struct dma_async_tx_descriptor *tx;
+ struct dma_chan *c;
+ u32 burst, *bp;
+ u16 buf;
+
+ /*
+ * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx
+ * and 24xx. Use 16 or 32 word frames when the
+ * blocksize is at least that large. Blocksize is
+ * usually 512 bytes; but not for some SD reads.
+ */
+ burst = mmc_omap15xx() ? 32 : 64;
+ if (burst > data->blksz)
+ burst = data->blksz;
+
+ burst >>= 1;
+
+ if (data->flags & MMC_DATA_WRITE) {
+ c = host->dma_tx;
+ bp = &host->dma_tx_burst;
+ buf = 0x0f80 | (burst - 1) << 0;
+ dma_data_dir = DMA_TO_DEVICE;
+ } else {
+ c = host->dma_rx;
+ bp = &host->dma_rx_burst;
+ buf = 0x800f | (burst - 1) << 8;
+ dma_data_dir = DMA_FROM_DEVICE;
+ }
+
+ if (!c)
+ goto use_pio;
+
+ /* Only reconfigure if we have a different burst size */
+ if (*bp != burst) {
+ struct dma_slave_config cfg;
+
+ cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
+ cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.src_maxburst = burst;
+ cfg.dst_maxburst = burst;
+
+ if (dmaengine_slave_config(c, &cfg))
+ goto use_pio;
+
+ *bp = burst;
+ }
+
+ host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
+ dma_data_dir);
+ if (host->sg_len == 0)
+ goto use_pio;
+
+ tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
+ data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx)
+ goto use_pio;
+
+ OMAP_MMC_WRITE(host, BUF, buf);
+
+ tx->callback = mmc_omap_dma_callback;
+ tx->callback_param = host;
+ dmaengine_submit(tx);
+ host->brs_received = 0;
+ host->dma_done = 0;
+ host->dma_in_use = 1;
+ return;
+ }
+ use_pio:
+
+ /* Revert to PIO? */
+ OMAP_MMC_WRITE(host, BUF, 0x1f1f);
+ host->total_bytes_left = data->blocks * block_size;
+ host->sg_len = sg_len;
+ mmc_omap_sg_to_buf(host);
+ host->dma_in_use = 0;
+}
+
+static void mmc_omap_start_request(struct mmc_omap_host *host,
+ struct mmc_request *req)
+{
+ BUG_ON(host->mrq != NULL);
+
+ host->mrq = req;
+
+ /* only touch fifo AFTER the controller readies it */
+ mmc_omap_prepare_data(host, req);
+ mmc_omap_start_command(host, req->cmd);
+ if (host->dma_in_use) {
+ struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
+ host->dma_tx : host->dma_rx;
+
+ dma_async_issue_pending(c);
+ }
+}
+
+static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct mmc_omap_slot *slot = mmc_priv(mmc);
+ struct mmc_omap_host *host = slot->host;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->slot_lock, flags);
+ if (host->mmc != NULL) {
+ BUG_ON(slot->mrq != NULL);
+ slot->mrq = req;
+ spin_unlock_irqrestore(&host->slot_lock, flags);
+ return;
+ } else
+ host->mmc = mmc;
+ spin_unlock_irqrestore(&host->slot_lock, flags);
+ mmc_omap_select_slot(slot, 1);
+ mmc_omap_start_request(host, req);
+}
+
+static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
+ int vdd)
+{
+ struct mmc_omap_host *host;
+
+ host = slot->host;
+
+ if (slot->pdata->set_power != NULL)
+ slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
+ vdd);
+ if (mmc_omap2()) {
+ u16 w;
+
+ if (power_on) {
+ w = OMAP_MMC_READ(host, CON);
+ OMAP_MMC_WRITE(host, CON, w | (1 << 11));
+ } else {
+ w = OMAP_MMC_READ(host, CON);
+ OMAP_MMC_WRITE(host, CON, w & ~(1 << 11));
+ }
+ }
+}
+
+static int mmc_omap_calc_divisor(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mmc_omap_slot *slot = mmc_priv(mmc);
+ struct mmc_omap_host *host = slot->host;
+ int func_clk_rate = clk_get_rate(host->fclk);
+ int dsor;
+
+ if (ios->clock == 0)
+ return 0;
+
+ dsor = func_clk_rate / ios->clock;
+ if (dsor < 1)
+ dsor = 1;
+
+ if (func_clk_rate / dsor > ios->clock)
+ dsor++;
+
+ if (dsor > 250)
+ dsor = 250;
+
+ slot->fclk_freq = func_clk_rate / dsor;
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ dsor |= 1 << 15;
+
+ return dsor;
+}
+
+static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mmc_omap_slot *slot = mmc_priv(mmc);
+ struct mmc_omap_host *host = slot->host;
+ int i, dsor;
+ int clk_enabled;
+
+ mmc_omap_select_slot(slot, 0);
+
+ dsor = mmc_omap_calc_divisor(mmc, ios);
+
+ if (ios->vdd != slot->vdd)
+ slot->vdd = ios->vdd;
+
+ clk_enabled = 0;
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ mmc_omap_set_power(slot, 0, ios->vdd);
+ break;
+ case MMC_POWER_UP:
+ /* Cannot touch dsor yet, just power up MMC */
+ mmc_omap_set_power(slot, 1, ios->vdd);
+ goto exit;
+ case MMC_POWER_ON:
+ mmc_omap_fclk_enable(host, 1);
+ clk_enabled = 1;
+ dsor |= 1 << 11;
+ break;
+ }
+
+ if (slot->bus_mode != ios->bus_mode) {
+ if (slot->pdata->set_bus_mode != NULL)
+ slot->pdata->set_bus_mode(mmc_dev(mmc), slot->id,
+ ios->bus_mode);
+ slot->bus_mode = ios->bus_mode;
+ }
+
+ /* On insanely high arm_per frequencies something sometimes
+ * goes somehow out of sync, and the POW bit is not being set,
+ * which results in the while loop below getting stuck.
+ * Writing to the CON register twice seems to do the trick. */
+ for (i = 0; i < 2; i++)
+ OMAP_MMC_WRITE(host, CON, dsor);
+ slot->saved_con = dsor;
+ if (ios->power_mode == MMC_POWER_ON) {
+ /* worst case at 400kHz, 80 cycles makes 200 microsecs */
+ int usecs = 250;
+
+ /* Send clock cycles, poll completion */
+ OMAP_MMC_WRITE(host, IE, 0);
+ OMAP_MMC_WRITE(host, STAT, 0xffff);
+ OMAP_MMC_WRITE(host, CMD, 1 << 7);
+ while (usecs > 0 && (OMAP_MMC_READ(host, STAT) & 1) == 0) {
+ udelay(1);
+ usecs--;
+ }
+ OMAP_MMC_WRITE(host, STAT, 1);
+ }
+
+exit:
+ mmc_omap_release_slot(slot, clk_enabled);
+}
+
+static const struct mmc_host_ops mmc_omap_ops = {
+ .request = mmc_omap_request,
+ .set_ios = mmc_omap_set_ios,
+};
+
+static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
+{
+ struct mmc_omap_slot *slot = NULL;
+ struct mmc_host *mmc;
+ int r;
+
+ mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev);
+ if (mmc == NULL)
+ return -ENOMEM;
+
+ slot = mmc_priv(mmc);
+ slot->host = host;
+ slot->mmc = mmc;
+ slot->id = id;
+ slot->pdata = &host->pdata->slots[id];
+
+ host->slots[id] = slot;
+
+ mmc->caps = 0;
+ if (host->pdata->slots[id].wires >= 4)
+ mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_ERASE;
+
+ mmc->ops = &mmc_omap_ops;
+ mmc->f_min = 400000;
+
+ if (mmc_omap2())
+ mmc->f_max = 48000000;
+ else
+ mmc->f_max = 24000000;
+ if (host->pdata->max_freq)
+ mmc->f_max = min(host->pdata->max_freq, mmc->f_max);
+ mmc->ocr_avail = slot->pdata->ocr_mask;
+
+ /* Use scatterlist DMA to reduce per-transfer costs.
+ * NOTE max_seg_size assumption that small blocks aren't
+ * normally used (except e.g. for reading SD registers).
+ */
+ mmc->max_segs = 32;
+ mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */
+ mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ if (slot->pdata->get_cover_state != NULL) {
+ setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
+ (unsigned long)slot);
+ tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
+ (unsigned long)slot);
+ }
+
+ r = mmc_add_host(mmc);
+ if (r < 0)
+ goto err_remove_host;
+
+ if (slot->pdata->name != NULL) {
+ r = device_create_file(&mmc->class_dev,
+ &dev_attr_slot_name);
+ if (r < 0)
+ goto err_remove_host;
+ }
+
+ if (slot->pdata->get_cover_state != NULL) {
+ r = device_create_file(&mmc->class_dev,
+ &dev_attr_cover_switch);
+ if (r < 0)
+ goto err_remove_slot_name;
+ tasklet_schedule(&slot->cover_tasklet);
+ }
+
+ return 0;
+
+err_remove_slot_name:
+ if (slot->pdata->name != NULL)
+ device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
+err_remove_host:
+ mmc_remove_host(mmc);
+ mmc_free_host(mmc);
+ return r;
+}
+
+static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
+{
+ struct mmc_host *mmc = slot->mmc;
+
+ if (slot->pdata->name != NULL)
+ device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
+ if (slot->pdata->get_cover_state != NULL)
+ device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
+
+ tasklet_kill(&slot->cover_tasklet);
+ del_timer_sync(&slot->cover_timer);
+ flush_workqueue(slot->host->mmc_omap_wq);
+
+ mmc_remove_host(mmc);
+ mmc_free_host(mmc);
+}
+
+static int mmc_omap_probe(struct platform_device *pdev)
+{
+ struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
+ struct mmc_omap_host *host = NULL;
+ struct resource *res;
+ dma_cap_mask_t mask;
+ unsigned sig = 0;
+ int i, ret = 0;
+ int irq;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "platform data missing\n");
+ return -ENXIO;
+ }
+ if (pdata->nr_slots == 0) {
+ dev_err(&pdev->dev, "no slots\n");
+ return -EPROBE_DEFER;
+ }
+
+ host = devm_kzalloc(&pdev->dev, sizeof(struct mmc_omap_host),
+ GFP_KERNEL);
+ if (host == NULL)
+ return -ENOMEM;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->virt_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->virt_base))
+ return PTR_ERR(host->virt_base);
+
+ INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
+ INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
+
+ INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
+ setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
+ (unsigned long) host);
+
+ spin_lock_init(&host->clk_lock);
+ setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
+
+ spin_lock_init(&host->dma_lock);
+ spin_lock_init(&host->slot_lock);
+ init_waitqueue_head(&host->slot_wq);
+
+ host->pdata = pdata;
+ host->features = host->pdata->slots[0].features;
+ host->dev = &pdev->dev;
+ platform_set_drvdata(pdev, host);
+
+ host->id = pdev->id;
+ host->irq = irq;
+ host->phys_base = res->start;
+ host->iclk = clk_get(&pdev->dev, "ick");
+ if (IS_ERR(host->iclk))
+ return PTR_ERR(host->iclk);
+ clk_enable(host->iclk);
+
+ host->fclk = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(host->fclk)) {
+ ret = PTR_ERR(host->fclk);
+ goto err_free_iclk;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->dma_tx_burst = -1;
+ host->dma_rx_burst = -1;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
+ if (res)
+ sig = res->start;
+ host->dma_tx = dma_request_slave_channel_compat(mask,
+ omap_dma_filter_fn, &sig, &pdev->dev, "tx");
+ if (!host->dma_tx)
+ dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
+ sig);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
+ if (res)
+ sig = res->start;
+ host->dma_rx = dma_request_slave_channel_compat(mask,
+ omap_dma_filter_fn, &sig, &pdev->dev, "rx");
+ if (!host->dma_rx)
+ dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
+ sig);
+
+ ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
+ if (ret)
+ goto err_free_dma;
+
+ if (pdata->init != NULL) {
+ ret = pdata->init(&pdev->dev);
+ if (ret < 0)
+ goto err_free_irq;
+ }
+
+ host->nr_slots = pdata->nr_slots;
+ host->reg_shift = (mmc_omap7xx() ? 1 : 2);
+
+ host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
+ if (!host->mmc_omap_wq)
+ goto err_plat_cleanup;
+
+ for (i = 0; i < pdata->nr_slots; i++) {
+ ret = mmc_omap_new_slot(host, i);
+ if (ret < 0) {
+ while (--i >= 0)
+ mmc_omap_remove_slot(host->slots[i]);
+
+ goto err_destroy_wq;
+ }
+ }
+
+ return 0;
+
+err_destroy_wq:
+ destroy_workqueue(host->mmc_omap_wq);
+err_plat_cleanup:
+ if (pdata->cleanup)
+ pdata->cleanup(&pdev->dev);
+err_free_irq:
+ free_irq(host->irq, host);
+err_free_dma:
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
+ clk_put(host->fclk);
+err_free_iclk:
+ clk_disable(host->iclk);
+ clk_put(host->iclk);
+ return ret;
+}
+
+static int mmc_omap_remove(struct platform_device *pdev)
+{
+ struct mmc_omap_host *host = platform_get_drvdata(pdev);
+ int i;
+
+ BUG_ON(host == NULL);
+
+ for (i = 0; i < host->nr_slots; i++)
+ mmc_omap_remove_slot(host->slots[i]);
+
+ if (host->pdata->cleanup)
+ host->pdata->cleanup(&pdev->dev);
+
+ mmc_omap_fclk_enable(host, 0);
+ free_irq(host->irq, host);
+ clk_put(host->fclk);
+ clk_disable(host->iclk);
+ clk_put(host->iclk);
+
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
+
+ destroy_workqueue(host->mmc_omap_wq);
+
+ return 0;
+}
+
+#if IS_BUILTIN(CONFIG_OF)
+static const struct of_device_id mmc_omap_match[] = {
+ { .compatible = "ti,omap2420-mmc", },
+ { },
+};
+#endif
+
+static struct platform_driver mmc_omap_driver = {
+ .probe = mmc_omap_probe,
+ .remove = mmc_omap_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(mmc_omap_match),
+ },
+};
+
+module_platform_driver(mmc_omap_driver);
+MODULE_DESCRIPTION("OMAP Multimedia Card driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("Juha Yrjölä");
diff --git a/kernel/drivers/mmc/host/omap_hsmmc.c b/kernel/drivers/mmc/host/omap_hsmmc.c
new file mode 100644
index 000000000..9df2b6801
--- /dev/null
+++ b/kernel/drivers/mmc/host/omap_hsmmc.c
@@ -0,0 +1,2344 @@
+/*
+ * drivers/mmc/host/omap_hsmmc.c
+ *
+ * Driver for OMAP2430/3430 MMC controller.
+ *
+ * Copyright (C) 2007 Texas Instruments.
+ *
+ * Authors:
+ * Syed Mohammed Khasim <x0khasim@ti.com>
+ * Madhusudhan <madhu.cr@ti.com>
+ * Mohit Jalori <mjalori@ti.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/dmaengine.h>
+#include <linux/seq_file.h>
+#include <linux/sizes.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/omap-dmaengine.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_data/hsmmc-omap.h>
+
+/* OMAP HSMMC Host Controller Registers */
+#define OMAP_HSMMC_SYSSTATUS 0x0014
+#define OMAP_HSMMC_CON 0x002C
+#define OMAP_HSMMC_SDMASA 0x0100
+#define OMAP_HSMMC_BLK 0x0104
+#define OMAP_HSMMC_ARG 0x0108
+#define OMAP_HSMMC_CMD 0x010C
+#define OMAP_HSMMC_RSP10 0x0110
+#define OMAP_HSMMC_RSP32 0x0114
+#define OMAP_HSMMC_RSP54 0x0118
+#define OMAP_HSMMC_RSP76 0x011C
+#define OMAP_HSMMC_DATA 0x0120
+#define OMAP_HSMMC_PSTATE 0x0124
+#define OMAP_HSMMC_HCTL 0x0128
+#define OMAP_HSMMC_SYSCTL 0x012C
+#define OMAP_HSMMC_STAT 0x0130
+#define OMAP_HSMMC_IE 0x0134
+#define OMAP_HSMMC_ISE 0x0138
+#define OMAP_HSMMC_AC12 0x013C
+#define OMAP_HSMMC_CAPA 0x0140
+
+#define VS18 (1 << 26)
+#define VS30 (1 << 25)
+#define HSS (1 << 21)
+#define SDVS18 (0x5 << 9)
+#define SDVS30 (0x6 << 9)
+#define SDVS33 (0x7 << 9)
+#define SDVS_MASK 0x00000E00
+#define SDVSCLR 0xFFFFF1FF
+#define SDVSDET 0x00000400
+#define AUTOIDLE 0x1
+#define SDBP (1 << 8)
+#define DTO 0xe
+#define ICE 0x1
+#define ICS 0x2
+#define CEN (1 << 2)
+#define CLKD_MAX 0x3FF /* max clock divisor: 1023 */
+#define CLKD_MASK 0x0000FFC0
+#define CLKD_SHIFT 6
+#define DTO_MASK 0x000F0000
+#define DTO_SHIFT 16
+#define INIT_STREAM (1 << 1)
+#define ACEN_ACMD23 (2 << 2)
+#define DP_SELECT (1 << 21)
+#define DDIR (1 << 4)
+#define DMAE 0x1
+#define MSBS (1 << 5)
+#define BCE (1 << 1)
+#define FOUR_BIT (1 << 1)
+#define HSPE (1 << 2)
+#define IWE (1 << 24)
+#define DDR (1 << 19)
+#define CLKEXTFREE (1 << 16)
+#define CTPL (1 << 11)
+#define DW8 (1 << 5)
+#define OD 0x1
+#define STAT_CLEAR 0xFFFFFFFF
+#define INIT_STREAM_CMD 0x00000000
+#define DUAL_VOLT_OCR_BIT 7
+#define SRC (1 << 25)
+#define SRD (1 << 26)
+#define SOFTRESET (1 << 1)
+
+/* PSTATE */
+#define DLEV_DAT(x) (1 << (20 + (x)))
+
+/* Interrupt masks for IE and ISE register */
+#define CC_EN (1 << 0)
+#define TC_EN (1 << 1)
+#define BWR_EN (1 << 4)
+#define BRR_EN (1 << 5)
+#define CIRQ_EN (1 << 8)
+#define ERR_EN (1 << 15)
+#define CTO_EN (1 << 16)
+#define CCRC_EN (1 << 17)
+#define CEB_EN (1 << 18)
+#define CIE_EN (1 << 19)
+#define DTO_EN (1 << 20)
+#define DCRC_EN (1 << 21)
+#define DEB_EN (1 << 22)
+#define ACE_EN (1 << 24)
+#define CERR_EN (1 << 28)
+#define BADA_EN (1 << 29)
+
+#define INT_EN_MASK (BADA_EN | CERR_EN | ACE_EN | DEB_EN | DCRC_EN |\
+ DTO_EN | CIE_EN | CEB_EN | CCRC_EN | CTO_EN | \
+ BRR_EN | BWR_EN | TC_EN | CC_EN)
+
+#define CNI (1 << 7)
+#define ACIE (1 << 4)
+#define ACEB (1 << 3)
+#define ACCE (1 << 2)
+#define ACTO (1 << 1)
+#define ACNE (1 << 0)
+
+#define MMC_AUTOSUSPEND_DELAY 100
+#define MMC_TIMEOUT_MS 20 /* 20 mSec */
+#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */
+#define OMAP_MMC_MIN_CLOCK 400000
+#define OMAP_MMC_MAX_CLOCK 52000000
+#define DRIVER_NAME "omap_hsmmc"
+
+#define VDD_1V8 1800000 /* 180000 uV */
+#define VDD_3V0 3000000 /* 300000 uV */
+#define VDD_165_195 (ffs(MMC_VDD_165_195) - 1)
+
+/*
+ * One controller can have multiple slots, like on some omap boards using
+ * omap.c controller driver. Luckily this is not currently done on any known
+ * omap_hsmmc.c device.
+ */
+#define mmc_pdata(host) host->pdata
+
+/*
+ * MMC Host controller read/write API's
+ */
+#define OMAP_HSMMC_READ(base, reg) \
+ __raw_readl((base) + OMAP_HSMMC_##reg)
+
+#define OMAP_HSMMC_WRITE(base, reg, val) \
+ __raw_writel((val), (base) + OMAP_HSMMC_##reg)
+
+struct omap_hsmmc_next {
+ unsigned int dma_len;
+ s32 cookie;
+};
+
+struct omap_hsmmc_host {
+ struct device *dev;
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ struct clk *fclk;
+ struct clk *dbclk;
+ /*
+ * vcc == configured supply
+ * vcc_aux == optional
+ * - MMC1, supply for DAT4..DAT7
+ * - MMC2/MMC2, external level shifter voltage supply, for
+ * chip (SDIO, eMMC, etc) or transceiver (MMC2 only)
+ */
+ struct regulator *vcc;
+ struct regulator *vcc_aux;
+ struct regulator *pbias;
+ bool pbias_enabled;
+ void __iomem *base;
+ resource_size_t mapbase;
+ spinlock_t irq_lock; /* Prevent races with irq handler */
+ unsigned int dma_len;
+ unsigned int dma_sg_idx;
+ unsigned char bus_mode;
+ unsigned char power_mode;
+ int suspended;
+ u32 con;
+ u32 hctl;
+ u32 sysctl;
+ u32 capa;
+ int irq;
+ int wake_irq;
+ int use_dma, dma_ch;
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ int response_busy;
+ int context_loss;
+ int protect_card;
+ int reqs_blocked;
+ int use_reg;
+ int req_in_progress;
+ unsigned long clk_rate;
+ unsigned int flags;
+#define AUTO_CMD23 (1 << 0) /* Auto CMD23 support */
+#define HSMMC_SDIO_IRQ_ENABLED (1 << 1) /* SDIO irq enabled */
+#define HSMMC_WAKE_IRQ_ENABLED (1 << 2)
+ struct omap_hsmmc_next next_data;
+ struct omap_hsmmc_platform_data *pdata;
+
+ /* return MMC cover switch state, can be NULL if not supported.
+ *
+ * possible return values:
+ * 0 - closed
+ * 1 - open
+ */
+ int (*get_cover_state)(struct device *dev);
+
+ int (*card_detect)(struct device *dev);
+};
+
+struct omap_mmc_of_data {
+ u32 reg_offset;
+ u8 controller_flags;
+};
+
+static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host);
+
+static int omap_hsmmc_card_detect(struct device *dev)
+{
+ struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+
+ return mmc_gpio_get_cd(host->mmc);
+}
+
+static int omap_hsmmc_get_cover_state(struct device *dev)
+{
+ struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+
+ return mmc_gpio_get_cd(host->mmc);
+}
+
+#ifdef CONFIG_REGULATOR
+
+static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
+{
+ struct omap_hsmmc_host *host =
+ platform_get_drvdata(to_platform_device(dev));
+ int ret = 0;
+
+ /*
+ * If we don't see a Vcc regulator, assume it's a fixed
+ * voltage always-on regulator.
+ */
+ if (!host->vcc)
+ return 0;
+
+ if (mmc_pdata(host)->before_set_reg)
+ mmc_pdata(host)->before_set_reg(dev, power_on, vdd);
+
+ if (host->pbias) {
+ if (host->pbias_enabled == 1) {
+ ret = regulator_disable(host->pbias);
+ if (!ret)
+ host->pbias_enabled = 0;
+ }
+ regulator_set_voltage(host->pbias, VDD_3V0, VDD_3V0);
+ }
+
+ /*
+ * Assume Vcc regulator is used only to power the card ... OMAP
+ * VDDS is used to power the pins, optionally with a transceiver to
+ * support cards using voltages other than VDDS (1.8V nominal). When a
+ * transceiver is used, DAT3..7 are muxed as transceiver control pins.
+ *
+ * In some cases this regulator won't support enable/disable;
+ * e.g. it's a fixed rail for a WLAN chip.
+ *
+ * In other cases vcc_aux switches interface power. Example, for
+ * eMMC cards it represents VccQ. Sometimes transceivers or SDIO
+ * chips/cards need an interface voltage rail too.
+ */
+ if (power_on) {
+ if (host->vcc)
+ ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
+ /* Enable interface voltage rail, if needed */
+ if (ret == 0 && host->vcc_aux) {
+ ret = regulator_enable(host->vcc_aux);
+ if (ret < 0 && host->vcc)
+ ret = mmc_regulator_set_ocr(host->mmc,
+ host->vcc, 0);
+ }
+ } else {
+ /* Shut down the rail */
+ if (host->vcc_aux)
+ ret = regulator_disable(host->vcc_aux);
+ if (host->vcc) {
+ /* Then proceed to shut down the local regulator */
+ ret = mmc_regulator_set_ocr(host->mmc,
+ host->vcc, 0);
+ }
+ }
+
+ if (host->pbias) {
+ if (vdd <= VDD_165_195)
+ ret = regulator_set_voltage(host->pbias, VDD_1V8,
+ VDD_1V8);
+ else
+ ret = regulator_set_voltage(host->pbias, VDD_3V0,
+ VDD_3V0);
+ if (ret < 0)
+ goto error_set_power;
+
+ if (host->pbias_enabled == 0) {
+ ret = regulator_enable(host->pbias);
+ if (!ret)
+ host->pbias_enabled = 1;
+ }
+ }
+
+ if (mmc_pdata(host)->after_set_reg)
+ mmc_pdata(host)->after_set_reg(dev, power_on, vdd);
+
+error_set_power:
+ return ret;
+}
+
+static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
+{
+ struct regulator *reg;
+ int ocr_value = 0;
+
+ reg = devm_regulator_get(host->dev, "vmmc");
+ if (IS_ERR(reg)) {
+ dev_err(host->dev, "unable to get vmmc regulator %ld\n",
+ PTR_ERR(reg));
+ return PTR_ERR(reg);
+ } else {
+ host->vcc = reg;
+ ocr_value = mmc_regulator_get_ocrmask(reg);
+ if (!mmc_pdata(host)->ocr_mask) {
+ mmc_pdata(host)->ocr_mask = ocr_value;
+ } else {
+ if (!(mmc_pdata(host)->ocr_mask & ocr_value)) {
+ dev_err(host->dev, "ocrmask %x is not supported\n",
+ mmc_pdata(host)->ocr_mask);
+ mmc_pdata(host)->ocr_mask = 0;
+ return -EINVAL;
+ }
+ }
+ }
+ mmc_pdata(host)->set_power = omap_hsmmc_set_power;
+
+ /* Allow an aux regulator */
+ reg = devm_regulator_get_optional(host->dev, "vmmc_aux");
+ host->vcc_aux = IS_ERR(reg) ? NULL : reg;
+
+ reg = devm_regulator_get_optional(host->dev, "pbias");
+ host->pbias = IS_ERR(reg) ? NULL : reg;
+
+ /* For eMMC do not power off when not in sleep state */
+ if (mmc_pdata(host)->no_regulator_off_init)
+ return 0;
+ /*
+ * To disable boot_on regulator, enable regulator
+ * to increase usecount and then disable it.
+ */
+ if ((host->vcc && regulator_is_enabled(host->vcc) > 0) ||
+ (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) {
+ int vdd = ffs(mmc_pdata(host)->ocr_mask) - 1;
+
+ mmc_pdata(host)->set_power(host->dev, 1, vdd);
+ mmc_pdata(host)->set_power(host->dev, 0, 0);
+ }
+
+ return 0;
+}
+
+static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
+{
+ mmc_pdata(host)->set_power = NULL;
+}
+
+static inline int omap_hsmmc_have_reg(void)
+{
+ return 1;
+}
+
+#else
+
+static inline int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
+{
+ return -EINVAL;
+}
+
+static inline void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
+{
+}
+
+static inline int omap_hsmmc_have_reg(void)
+{
+ return 0;
+}
+
+#endif
+
+static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id);
+
+static int omap_hsmmc_gpio_init(struct mmc_host *mmc,
+ struct omap_hsmmc_host *host,
+ struct omap_hsmmc_platform_data *pdata)
+{
+ int ret;
+
+ if (gpio_is_valid(pdata->gpio_cod)) {
+ ret = mmc_gpio_request_cd(mmc, pdata->gpio_cod, 0);
+ if (ret)
+ return ret;
+
+ host->get_cover_state = omap_hsmmc_get_cover_state;
+ mmc_gpio_set_cd_isr(mmc, omap_hsmmc_cover_irq);
+ } else if (gpio_is_valid(pdata->gpio_cd)) {
+ ret = mmc_gpio_request_cd(mmc, pdata->gpio_cd, 0);
+ if (ret)
+ return ret;
+
+ host->card_detect = omap_hsmmc_card_detect;
+ }
+
+ if (gpio_is_valid(pdata->gpio_wp)) {
+ ret = mmc_gpio_request_ro(mmc, pdata->gpio_wp);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Start clock to the card
+ */
+static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host)
+{
+ OMAP_HSMMC_WRITE(host->base, SYSCTL,
+ OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
+}
+
+/*
+ * Stop clock to the card
+ */
+static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
+{
+ OMAP_HSMMC_WRITE(host->base, SYSCTL,
+ OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
+ if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0)
+ dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n");
+}
+
+static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host,
+ struct mmc_command *cmd)
+{
+ u32 irq_mask = INT_EN_MASK;
+ unsigned long flags;
+
+ if (host->use_dma)
+ irq_mask &= ~(BRR_EN | BWR_EN);
+
+ /* Disable timeout for erases */
+ if (cmd->opcode == MMC_ERASE)
+ irq_mask &= ~DTO_EN;
+
+ spin_lock_irqsave(&host->irq_lock, flags);
+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
+ OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
+
+ /* latch pending CIRQ, but don't signal MMC core */
+ if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
+ irq_mask |= CIRQ_EN;
+ OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
+}
+
+static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
+{
+ u32 irq_mask = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->irq_lock, flags);
+ /* no transfer running but need to keep cirq if enabled */
+ if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
+ irq_mask |= CIRQ_EN;
+ OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
+ OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
+}
+
+/* Calculate divisor for the given clock frequency */
+static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios)
+{
+ u16 dsor = 0;
+
+ if (ios->clock) {
+ dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock);
+ if (dsor > CLKD_MAX)
+ dsor = CLKD_MAX;
+ }
+
+ return dsor;
+}
+
+static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
+{
+ struct mmc_ios *ios = &host->mmc->ios;
+ unsigned long regval;
+ unsigned long timeout;
+ unsigned long clkdiv;
+
+ dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock);
+
+ omap_hsmmc_stop_clock(host);
+
+ regval = OMAP_HSMMC_READ(host->base, SYSCTL);
+ regval = regval & ~(CLKD_MASK | DTO_MASK);
+ clkdiv = calc_divisor(host, ios);
+ regval = regval | (clkdiv << 6) | (DTO << 16);
+ OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
+ OMAP_HSMMC_WRITE(host->base, SYSCTL,
+ OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
+
+ /* Wait till the ICS bit is set */
+ timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
+ while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
+ && time_before(jiffies, timeout))
+ cpu_relax();
+
+ /*
+ * Enable High-Speed Support
+ * Pre-Requisites
+ * - Controller should support High-Speed-Enable Bit
+ * - Controller should not be using DDR Mode
+ * - Controller should advertise that it supports High Speed
+ * in capabilities register
+ * - MMC/SD clock coming out of controller > 25MHz
+ */
+ if ((mmc_pdata(host)->features & HSMMC_HAS_HSPE_SUPPORT) &&
+ (ios->timing != MMC_TIMING_MMC_DDR52) &&
+ (ios->timing != MMC_TIMING_UHS_DDR50) &&
+ ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) {
+ regval = OMAP_HSMMC_READ(host->base, HCTL);
+ if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000)
+ regval |= HSPE;
+ else
+ regval &= ~HSPE;
+
+ OMAP_HSMMC_WRITE(host->base, HCTL, regval);
+ }
+
+ omap_hsmmc_start_clock(host);
+}
+
+static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
+{
+ struct mmc_ios *ios = &host->mmc->ios;
+ u32 con;
+
+ con = OMAP_HSMMC_READ(host->base, CON);
+ if (ios->timing == MMC_TIMING_MMC_DDR52 ||
+ ios->timing == MMC_TIMING_UHS_DDR50)
+ con |= DDR; /* configure in DDR mode */
+ else
+ con &= ~DDR;
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_8:
+ OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
+ break;
+ case MMC_BUS_WIDTH_4:
+ OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
+ break;
+ case MMC_BUS_WIDTH_1:
+ OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
+ break;
+ }
+}
+
+static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host)
+{
+ struct mmc_ios *ios = &host->mmc->ios;
+ u32 con;
+
+ con = OMAP_HSMMC_READ(host->base, CON);
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ OMAP_HSMMC_WRITE(host->base, CON, con | OD);
+ else
+ OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
+}
+
+#ifdef CONFIG_PM
+
+/*
+ * Restore the MMC host context, if it was lost as result of a
+ * power state change.
+ */
+static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
+{
+ struct mmc_ios *ios = &host->mmc->ios;
+ u32 hctl, capa;
+ unsigned long timeout;
+
+ if (host->con == OMAP_HSMMC_READ(host->base, CON) &&
+ host->hctl == OMAP_HSMMC_READ(host->base, HCTL) &&
+ host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) &&
+ host->capa == OMAP_HSMMC_READ(host->base, CAPA))
+ return 0;
+
+ host->context_loss++;
+
+ if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
+ if (host->power_mode != MMC_POWER_OFF &&
+ (1 << ios->vdd) <= MMC_VDD_23_24)
+ hctl = SDVS18;
+ else
+ hctl = SDVS30;
+ capa = VS30 | VS18;
+ } else {
+ hctl = SDVS18;
+ capa = VS18;
+ }
+
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
+ hctl |= IWE;
+
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) | hctl);
+
+ OMAP_HSMMC_WRITE(host->base, CAPA,
+ OMAP_HSMMC_READ(host->base, CAPA) | capa);
+
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
+
+ timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
+ while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP
+ && time_before(jiffies, timeout))
+ ;
+
+ OMAP_HSMMC_WRITE(host->base, ISE, 0);
+ OMAP_HSMMC_WRITE(host->base, IE, 0);
+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
+
+ /* Do not initialize card-specific things if the power is off */
+ if (host->power_mode == MMC_POWER_OFF)
+ goto out;
+
+ omap_hsmmc_set_bus_width(host);
+
+ omap_hsmmc_set_clock(host);
+
+ omap_hsmmc_set_bus_mode(host);
+
+out:
+ dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n",
+ host->context_loss);
+ return 0;
+}
+
+/*
+ * Save the MMC host context (store the number of power state changes so far).
+ */
+static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
+{
+ host->con = OMAP_HSMMC_READ(host->base, CON);
+ host->hctl = OMAP_HSMMC_READ(host->base, HCTL);
+ host->sysctl = OMAP_HSMMC_READ(host->base, SYSCTL);
+ host->capa = OMAP_HSMMC_READ(host->base, CAPA);
+}
+
+#else
+
+static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
+{
+ return 0;
+}
+
+static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
+{
+}
+
+#endif
+
+/*
+ * Send init stream sequence to card
+ * before sending IDLE command
+ */
+static void send_init_stream(struct omap_hsmmc_host *host)
+{
+ int reg = 0;
+ unsigned long timeout;
+
+ if (host->protect_card)
+ return;
+
+ disable_irq(host->irq);
+
+ OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
+ OMAP_HSMMC_WRITE(host->base, CON,
+ OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
+ OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);
+
+ timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
+ while ((reg != CC_EN) && time_before(jiffies, timeout))
+ reg = OMAP_HSMMC_READ(host->base, STAT) & CC_EN;
+
+ OMAP_HSMMC_WRITE(host->base, CON,
+ OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM);
+
+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
+ OMAP_HSMMC_READ(host->base, STAT);
+
+ enable_irq(host->irq);
+}
+
+static inline
+int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host)
+{
+ int r = 1;
+
+ if (host->get_cover_state)
+ r = host->get_cover_state(host->dev);
+ return r;
+}
+
+static ssize_t
+omap_hsmmc_show_cover_switch(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+ return sprintf(buf, "%s\n",
+ omap_hsmmc_cover_is_closed(host) ? "closed" : "open");
+}
+
+static DEVICE_ATTR(cover_switch, S_IRUGO, omap_hsmmc_show_cover_switch, NULL);
+
+static ssize_t
+omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+ return sprintf(buf, "%s\n", mmc_pdata(host)->name);
+}
+
+static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL);
+
+/*
+ * Configure the response type and send the cmd.
+ */
+static void
+omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
+ struct mmc_data *data)
+{
+ int cmdreg = 0, resptype = 0, cmdtype = 0;
+
+ dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n",
+ mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
+ host->cmd = cmd;
+
+ omap_hsmmc_enable_irq(host, cmd);
+
+ host->response_busy = 0;
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136)
+ resptype = 1;
+ else if (cmd->flags & MMC_RSP_BUSY) {
+ resptype = 3;
+ host->response_busy = 1;
+ } else
+ resptype = 2;
+ }
+
+ /*
+ * Unlike OMAP1 controller, the cmdtype does not seem to be based on
+ * ac, bc, adtc, bcr. Only commands ending an open ended transfer need
+ * a val of 0x3, rest 0x0.
+ */
+ if (cmd == host->mrq->stop)
+ cmdtype = 0x3;
+
+ cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
+
+ if ((host->flags & AUTO_CMD23) && mmc_op_multi(cmd->opcode) &&
+ host->mrq->sbc) {
+ cmdreg |= ACEN_ACMD23;
+ OMAP_HSMMC_WRITE(host->base, SDMASA, host->mrq->sbc->arg);
+ }
+ if (data) {
+ cmdreg |= DP_SELECT | MSBS | BCE;
+ if (data->flags & MMC_DATA_READ)
+ cmdreg |= DDIR;
+ else
+ cmdreg &= ~(DDIR);
+ }
+
+ if (host->use_dma)
+ cmdreg |= DMAE;
+
+ host->req_in_progress = 1;
+
+ OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
+ OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
+}
+
+static int
+omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
+{
+ if (data->flags & MMC_DATA_WRITE)
+ return DMA_TO_DEVICE;
+ else
+ return DMA_FROM_DEVICE;
+}
+
+static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
+ struct mmc_data *data)
+{
+ return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
+}
+
+static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
+{
+ int dma_ch;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->irq_lock, flags);
+ host->req_in_progress = 0;
+ dma_ch = host->dma_ch;
+ spin_unlock_irqrestore(&host->irq_lock, flags);
+
+ omap_hsmmc_disable_irq(host);
+ /* Do not complete the request if DMA is still in progress */
+ if (mrq->data && host->use_dma && dma_ch != -1)
+ return;
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
+}
+
+/*
+ * Notify the transfer complete to MMC core
+ */
+static void
+omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
+{
+ if (!data) {
+ struct mmc_request *mrq = host->mrq;
+
+ /* TC before CC from CMD6 - don't know why, but it happens */
+ if (host->cmd && host->cmd->opcode == 6 &&
+ host->response_busy) {
+ host->response_busy = 0;
+ return;
+ }
+
+ omap_hsmmc_request_done(host, mrq);
+ return;
+ }
+
+ host->data = NULL;
+
+ if (!data->error)
+ data->bytes_xfered += data->blocks * (data->blksz);
+ else
+ data->bytes_xfered = 0;
+
+ if (data->stop && (data->error || !host->mrq->sbc))
+ omap_hsmmc_start_command(host, data->stop, NULL);
+ else
+ omap_hsmmc_request_done(host, data->mrq);
+}
+
+/*
+ * Notify the core about command completion
+ */
+static void
+omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
+{
+ if (host->mrq->sbc && (host->cmd == host->mrq->sbc) &&
+ !host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) {
+ host->cmd = NULL;
+ omap_hsmmc_start_dma_transfer(host);
+ omap_hsmmc_start_command(host, host->mrq->cmd,
+ host->mrq->data);
+ return;
+ }
+
+ host->cmd = NULL;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ /* response type 2 */
+ cmd->resp[3] = OMAP_HSMMC_READ(host->base, RSP10);
+ cmd->resp[2] = OMAP_HSMMC_READ(host->base, RSP32);
+ cmd->resp[1] = OMAP_HSMMC_READ(host->base, RSP54);
+ cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP76);
+ } else {
+ /* response types 1, 1b, 3, 4, 5, 6 */
+ cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
+ }
+ }
+ if ((host->data == NULL && !host->response_busy) || cmd->error)
+ omap_hsmmc_request_done(host, host->mrq);
+}
+
+/*
+ * DMA clean up for command errors
+ */
+static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
+{
+ int dma_ch;
+ unsigned long flags;
+
+ host->data->error = errno;
+
+ spin_lock_irqsave(&host->irq_lock, flags);
+ dma_ch = host->dma_ch;
+ host->dma_ch = -1;
+ spin_unlock_irqrestore(&host->irq_lock, flags);
+
+ if (host->use_dma && dma_ch != -1) {
+ struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
+
+ dmaengine_terminate_all(chan);
+ dma_unmap_sg(chan->device->dev,
+ host->data->sg, host->data->sg_len,
+ omap_hsmmc_get_dma_dir(host, host->data));
+
+ host->data->host_cookie = 0;
+ }
+ host->data = NULL;
+}
+
+/*
+ * Readable error output
+ */
+#ifdef CONFIG_MMC_DEBUG
+static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status)
+{
+ /* --- means reserved bit without definition at documentation */
+ static const char *omap_hsmmc_status_bits[] = {
+ "CC" , "TC" , "BGE", "---", "BWR" , "BRR" , "---" , "---" ,
+ "CIRQ", "OBI" , "---", "---", "---" , "---" , "---" , "ERRI",
+ "CTO" , "CCRC", "CEB", "CIE", "DTO" , "DCRC", "DEB" , "---" ,
+ "ACE" , "---" , "---", "---", "CERR", "BADA", "---" , "---"
+ };
+ char res[256];
+ char *buf = res;
+ int len, i;
+
+ len = sprintf(buf, "MMC IRQ 0x%x :", status);
+ buf += len;
+
+ for (i = 0; i < ARRAY_SIZE(omap_hsmmc_status_bits); i++)
+ if (status & (1 << i)) {
+ len = sprintf(buf, " %s", omap_hsmmc_status_bits[i]);
+ buf += len;
+ }
+
+ dev_vdbg(mmc_dev(host->mmc), "%s\n", res);
+}
+#else
+static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host,
+ u32 status)
+{
+}
+#endif /* CONFIG_MMC_DEBUG */
+
+/*
+ * MMC controller internal state machines reset
+ *
+ * Used to reset command or data internal state machines, using respectively
+ * SRC or SRD bit of SYSCTL register
+ * Can be called from interrupt context
+ */
+static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
+ unsigned long bit)
+{
+ unsigned long i = 0;
+ unsigned long limit = MMC_TIMEOUT_US;
+
+ OMAP_HSMMC_WRITE(host->base, SYSCTL,
+ OMAP_HSMMC_READ(host->base, SYSCTL) | bit);
+
+ /*
+ * OMAP4 ES2 and greater has an updated reset logic.
+ * Monitor a 0->1 transition first
+ */
+ if (mmc_pdata(host)->features & HSMMC_HAS_UPDATED_RESET) {
+ while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit))
+ && (i++ < limit))
+ udelay(1);
+ }
+ i = 0;
+
+ while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) &&
+ (i++ < limit))
+ udelay(1);
+
+ if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit)
+ dev_err(mmc_dev(host->mmc),
+ "Timeout waiting on controller reset in %s\n",
+ __func__);
+}
+
+static void hsmmc_command_incomplete(struct omap_hsmmc_host *host,
+ int err, int end_cmd)
+{
+ if (end_cmd) {
+ omap_hsmmc_reset_controller_fsm(host, SRC);
+ if (host->cmd)
+ host->cmd->error = err;
+ }
+
+ if (host->data) {
+ omap_hsmmc_reset_controller_fsm(host, SRD);
+ omap_hsmmc_dma_cleanup(host, err);
+ } else if (host->mrq && host->mrq->cmd)
+ host->mrq->cmd->error = err;
+}
+
+static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
+{
+ struct mmc_data *data;
+ int end_cmd = 0, end_trans = 0;
+ int error = 0;
+
+ data = host->data;
+ dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
+
+ if (status & ERR_EN) {
+ omap_hsmmc_dbg_report_irq(host, status);
+
+ if (status & (CTO_EN | CCRC_EN))
+ end_cmd = 1;
+ if (status & (CTO_EN | DTO_EN))
+ hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
+ else if (status & (CCRC_EN | DCRC_EN))
+ hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
+
+ if (status & ACE_EN) {
+ u32 ac12;
+ ac12 = OMAP_HSMMC_READ(host->base, AC12);
+ if (!(ac12 & ACNE) && host->mrq->sbc) {
+ end_cmd = 1;
+ if (ac12 & ACTO)
+ error = -ETIMEDOUT;
+ else if (ac12 & (ACCE | ACEB | ACIE))
+ error = -EILSEQ;
+ host->mrq->sbc->error = error;
+ hsmmc_command_incomplete(host, error, end_cmd);
+ }
+ dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
+ }
+ if (host->data || host->response_busy) {
+ end_trans = !end_cmd;
+ host->response_busy = 0;
+ }
+ }
+
+ OMAP_HSMMC_WRITE(host->base, STAT, status);
+ if (end_cmd || ((status & CC_EN) && host->cmd))
+ omap_hsmmc_cmd_done(host, host->cmd);
+ if ((end_trans || (status & TC_EN)) && host->mrq)
+ omap_hsmmc_xfer_done(host, data);
+}
+
+/*
+ * MMC controller IRQ handler
+ */
+static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
+{
+ struct omap_hsmmc_host *host = dev_id;
+ int status;
+
+ status = OMAP_HSMMC_READ(host->base, STAT);
+ while (status & (INT_EN_MASK | CIRQ_EN)) {
+ if (host->req_in_progress)
+ omap_hsmmc_do_irq(host, status);
+
+ if (status & CIRQ_EN)
+ mmc_signal_sdio_irq(host->mmc);
+
+ /* Flush posted write */
+ status = OMAP_HSMMC_READ(host->base, STAT);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t omap_hsmmc_wake_irq(int irq, void *dev_id)
+{
+ struct omap_hsmmc_host *host = dev_id;
+
+ /* cirq is level triggered, disable to avoid infinite loop */
+ spin_lock(&host->irq_lock);
+ if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
+ disable_irq_nosync(host->wake_irq);
+ host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
+ }
+ spin_unlock(&host->irq_lock);
+ pm_request_resume(host->dev); /* no use counter */
+
+ return IRQ_HANDLED;
+}
+
+static void set_sd_bus_power(struct omap_hsmmc_host *host)
+{
+ unsigned long i;
+
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
+ for (i = 0; i < loops_per_jiffy; i++) {
+ if (OMAP_HSMMC_READ(host->base, HCTL) & SDBP)
+ break;
+ cpu_relax();
+ }
+}
+
+/*
+ * Switch MMC interface voltage ... only relevant for MMC1.
+ *
+ * MMC2 and MMC3 use fixed 1.8V levels, and maybe a transceiver.
+ * The MMC2 transceiver controls are used instead of DAT4..DAT7.
+ * Some chips, like eMMC ones, use internal transceivers.
+ */
+static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
+{
+ u32 reg_val = 0;
+ int ret;
+
+ /* Disable the clocks */
+ pm_runtime_put_sync(host->dev);
+ if (host->dbclk)
+ clk_disable_unprepare(host->dbclk);
+
+ /* Turn the power off */
+ ret = mmc_pdata(host)->set_power(host->dev, 0, 0);
+
+ /* Turn the power ON with given VDD 1.8 or 3.0v */
+ if (!ret)
+ ret = mmc_pdata(host)->set_power(host->dev, 1, vdd);
+ pm_runtime_get_sync(host->dev);
+ if (host->dbclk)
+ clk_prepare_enable(host->dbclk);
+
+ if (ret != 0)
+ goto err;
+
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR);
+ reg_val = OMAP_HSMMC_READ(host->base, HCTL);
+
+ /*
+ * If a MMC dual voltage card is detected, the set_ios fn calls
+ * this fn with VDD bit set for 1.8V. Upon card removal from the
+ * slot, omap_hsmmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF.
+ *
+ * Cope with a bit of slop in the range ... per data sheets:
+ * - "1.8V" for vdds_mmc1/vdds_mmc1a can be up to 2.45V max,
+ * but recommended values are 1.71V to 1.89V
+ * - "3.0V" for vdds_mmc1/vdds_mmc1a can be up to 3.5V max,
+ * but recommended values are 2.7V to 3.3V
+ *
+ * Board setup code shouldn't permit anything very out-of-range.
+ * TWL4030-family VMMC1 and VSIM regulators are fine (avoiding the
+ * middle range) but VSIM can't power DAT4..DAT7 at more than 3V.
+ */
+ if ((1 << vdd) <= MMC_VDD_23_24)
+ reg_val |= SDVS18;
+ else
+ reg_val |= SDVS30;
+
+ OMAP_HSMMC_WRITE(host->base, HCTL, reg_val);
+ set_sd_bus_power(host);
+
+ return 0;
+err:
+ dev_err(mmc_dev(host->mmc), "Unable to switch operating voltage\n");
+ return ret;
+}
+
+/* Protect the card while the cover is open */
+static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
+{
+ if (!host->get_cover_state)
+ return;
+
+ host->reqs_blocked = 0;
+ if (host->get_cover_state(host->dev)) {
+ if (host->protect_card) {
+ dev_info(host->dev, "%s: cover is closed, "
+ "card is now accessible\n",
+ mmc_hostname(host->mmc));
+ host->protect_card = 0;
+ }
+ } else {
+ if (!host->protect_card) {
+ dev_info(host->dev, "%s: cover is open, "
+ "card is now inaccessible\n",
+ mmc_hostname(host->mmc));
+ host->protect_card = 1;
+ }
+ }
+}
+
+/*
+ * irq handler when (cell-phone) cover is mounted/removed
+ */
+static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id)
+{
+ struct omap_hsmmc_host *host = dev_id;
+
+ sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
+
+ omap_hsmmc_protect_card(host);
+ mmc_detect_change(host->mmc, (HZ * 200) / 1000);
+ return IRQ_HANDLED;
+}
+
+static void omap_hsmmc_dma_callback(void *param)
+{
+ struct omap_hsmmc_host *host = param;
+ struct dma_chan *chan;
+ struct mmc_data *data;
+ int req_in_progress;
+
+ spin_lock_irq(&host->irq_lock);
+ if (host->dma_ch < 0) {
+ spin_unlock_irq(&host->irq_lock);
+ return;
+ }
+
+ data = host->mrq->data;
+ chan = omap_hsmmc_get_dma_chan(host, data);
+ if (!data->host_cookie)
+ dma_unmap_sg(chan->device->dev,
+ data->sg, data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
+
+ req_in_progress = host->req_in_progress;
+ host->dma_ch = -1;
+ spin_unlock_irq(&host->irq_lock);
+
+ /* If DMA has finished after TC, complete the request */
+ if (!req_in_progress) {
+ struct mmc_request *mrq = host->mrq;
+
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
+ }
+}
+
+static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
+ struct mmc_data *data,
+ struct omap_hsmmc_next *next,
+ struct dma_chan *chan)
+{
+ int dma_len;
+
+ if (!next && data->host_cookie &&
+ data->host_cookie != host->next_data.cookie) {
+ dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d"
+ " host->next_data.cookie %d\n",
+ __func__, data->host_cookie, host->next_data.cookie);
+ data->host_cookie = 0;
+ }
+
+ /* Check if next job is already prepared */
+ if (next || data->host_cookie != host->next_data.cookie) {
+ dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
+
+ } else {
+ dma_len = host->next_data.dma_len;
+ host->next_data.dma_len = 0;
+ }
+
+
+ if (dma_len == 0)
+ return -EINVAL;
+
+ if (next) {
+ next->dma_len = dma_len;
+ data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
+ } else
+ host->dma_len = dma_len;
+
+ return 0;
+}
+
+/*
+ * Routine to configure and start DMA for the MMC card
+ */
+static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
+ struct mmc_request *req)
+{
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor *tx;
+ int ret = 0, i;
+ struct mmc_data *data = req->data;
+ struct dma_chan *chan;
+
+ /* Sanity check: all the SG entries must be aligned by block size. */
+ for (i = 0; i < data->sg_len; i++) {
+ struct scatterlist *sgl;
+
+ sgl = data->sg + i;
+ if (sgl->length % data->blksz)
+ return -EINVAL;
+ }
+ if ((data->blksz % 4) != 0)
+ /* REVISIT: The MMC buffer increments only when MSB is written.
+ * Return error for blksz which is non multiple of four.
+ */
+ return -EINVAL;
+
+ BUG_ON(host->dma_ch != -1);
+
+ chan = omap_hsmmc_get_dma_chan(host, data);
+
+ cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
+ cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = data->blksz / 4;
+ cfg.dst_maxburst = data->blksz / 4;
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret)
+ return ret;
+
+ ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
+ if (ret)
+ return ret;
+
+ tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
+ data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx) {
+ dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
+ /* FIXME: cleanup */
+ return -1;
+ }
+
+ tx->callback = omap_hsmmc_dma_callback;
+ tx->callback_param = host;
+
+ /* Does not fail */
+ dmaengine_submit(tx);
+
+ host->dma_ch = 1;
+
+ return 0;
+}
+
+static void set_data_timeout(struct omap_hsmmc_host *host,
+ unsigned int timeout_ns,
+ unsigned int timeout_clks)
+{
+ unsigned int timeout, cycle_ns;
+ uint32_t reg, clkd, dto = 0;
+
+ reg = OMAP_HSMMC_READ(host->base, SYSCTL);
+ clkd = (reg & CLKD_MASK) >> CLKD_SHIFT;
+ if (clkd == 0)
+ clkd = 1;
+
+ cycle_ns = 1000000000 / (host->clk_rate / clkd);
+ timeout = timeout_ns / cycle_ns;
+ timeout += timeout_clks;
+ if (timeout) {
+ while ((timeout & 0x80000000) == 0) {
+ dto += 1;
+ timeout <<= 1;
+ }
+ dto = 31 - dto;
+ timeout <<= 1;
+ if (timeout && dto)
+ dto += 1;
+ if (dto >= 13)
+ dto -= 13;
+ else
+ dto = 0;
+ if (dto > 14)
+ dto = 14;
+ }
+
+ reg &= ~DTO_MASK;
+ reg |= dto << DTO_SHIFT;
+ OMAP_HSMMC_WRITE(host->base, SYSCTL, reg);
+}
+
+static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host)
+{
+ struct mmc_request *req = host->mrq;
+ struct dma_chan *chan;
+
+ if (!req->data)
+ return;
+ OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
+ | (req->data->blocks << 16));
+ set_data_timeout(host, req->data->timeout_ns,
+ req->data->timeout_clks);
+ chan = omap_hsmmc_get_dma_chan(host, req->data);
+ dma_async_issue_pending(chan);
+}
+
+/*
+ * Configure block length for MMC/SD cards and initiate the transfer.
+ */
+static int
+omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
+{
+ int ret;
+ host->data = req->data;
+
+ if (req->data == NULL) {
+ OMAP_HSMMC_WRITE(host->base, BLK, 0);
+ /*
+ * Set an arbitrary 100ms data timeout for commands with
+ * busy signal.
+ */
+ if (req->cmd->flags & MMC_RSP_BUSY)
+ set_data_timeout(host, 100000000U, 0);
+ return 0;
+ }
+
+ if (host->use_dma) {
+ ret = omap_hsmmc_setup_dma_transfer(host, req);
+ if (ret != 0) {
+ dev_err(mmc_dev(host->mmc), "MMC start dma failure\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (host->use_dma && data->host_cookie) {
+ struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
+
+ dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
+ data->host_cookie = 0;
+ }
+}
+
+static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+ if (mrq->data->host_cookie) {
+ mrq->data->host_cookie = 0;
+ return ;
+ }
+
+ if (host->use_dma) {
+ struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
+
+ if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
+ &host->next_data, c))
+ mrq->data->host_cookie = 0;
+ }
+}
+
+/*
+ * Request function. for read/write operation
+ */
+static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+ int err;
+
+ BUG_ON(host->req_in_progress);
+ BUG_ON(host->dma_ch != -1);
+ pm_runtime_get_sync(host->dev);
+ if (host->protect_card) {
+ if (host->reqs_blocked < 3) {
+ /*
+ * Ensure the controller is left in a consistent
+ * state by resetting the command and data state
+ * machines.
+ */
+ omap_hsmmc_reset_controller_fsm(host, SRD);
+ omap_hsmmc_reset_controller_fsm(host, SRC);
+ host->reqs_blocked += 1;
+ }
+ req->cmd->error = -EBADF;
+ if (req->data)
+ req->data->error = -EBADF;
+ req->cmd->retries = 0;
+ mmc_request_done(mmc, req);
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
+ return;
+ } else if (host->reqs_blocked)
+ host->reqs_blocked = 0;
+ WARN_ON(host->mrq != NULL);
+ host->mrq = req;
+ host->clk_rate = clk_get_rate(host->fclk);
+ err = omap_hsmmc_prepare_data(host, req);
+ if (err) {
+ req->cmd->error = err;
+ if (req->data)
+ req->data->error = err;
+ host->mrq = NULL;
+ mmc_request_done(mmc, req);
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
+ return;
+ }
+ if (req->sbc && !(host->flags & AUTO_CMD23)) {
+ omap_hsmmc_start_command(host, req->sbc, NULL);
+ return;
+ }
+
+ omap_hsmmc_start_dma_transfer(host);
+ omap_hsmmc_start_command(host, req->cmd, req->data);
+}
+
+/* Routine to configure clock values. Exposed API to core */
+static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+ int do_send_init_stream = 0;
+
+ pm_runtime_get_sync(host->dev);
+
+ if (ios->power_mode != host->power_mode) {
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ mmc_pdata(host)->set_power(host->dev, 0, 0);
+ break;
+ case MMC_POWER_UP:
+ mmc_pdata(host)->set_power(host->dev, 1, ios->vdd);
+ break;
+ case MMC_POWER_ON:
+ do_send_init_stream = 1;
+ break;
+ }
+ host->power_mode = ios->power_mode;
+ }
+
+ /* FIXME: set registers based only on changes to ios */
+
+ omap_hsmmc_set_bus_width(host);
+
+ if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
+ /* Only MMC1 can interface at 3V without some flavor
+ * of external transceiver; but they all handle 1.8V.
+ */
+ if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) &&
+ (ios->vdd == DUAL_VOLT_OCR_BIT)) {
+ /*
+ * The mmc_select_voltage fn of the core does
+ * not seem to set the power_mode to
+ * MMC_POWER_UP upon recalculating the voltage.
+ * vdd 1.8v.
+ */
+ if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0)
+ dev_dbg(mmc_dev(host->mmc),
+ "Switch operation failed\n");
+ }
+ }
+
+ omap_hsmmc_set_clock(host);
+
+ if (do_send_init_stream)
+ send_init_stream(host);
+
+ omap_hsmmc_set_bus_mode(host);
+
+ pm_runtime_put_autosuspend(host->dev);
+}
+
+static int omap_hsmmc_get_cd(struct mmc_host *mmc)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+ if (!host->card_detect)
+ return -ENOSYS;
+ return host->card_detect(host->dev);
+}
+
+static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+ if (mmc_pdata(host)->init_card)
+ mmc_pdata(host)->init_card(card);
+}
+
+static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+ u32 irq_mask, con;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->irq_lock, flags);
+
+ con = OMAP_HSMMC_READ(host->base, CON);
+ irq_mask = OMAP_HSMMC_READ(host->base, ISE);
+ if (enable) {
+ host->flags |= HSMMC_SDIO_IRQ_ENABLED;
+ irq_mask |= CIRQ_EN;
+ con |= CTPL | CLKEXTFREE;
+ } else {
+ host->flags &= ~HSMMC_SDIO_IRQ_ENABLED;
+ irq_mask &= ~CIRQ_EN;
+ con &= ~(CTPL | CLKEXTFREE);
+ }
+ OMAP_HSMMC_WRITE(host->base, CON, con);
+ OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
+
+ /*
+ * if enable, piggy back detection on current request
+ * but always disable immediately
+ */
+ if (!host->req_in_progress || !enable)
+ OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
+
+ /* flush posted write */
+ OMAP_HSMMC_READ(host->base, IE);
+
+ spin_unlock_irqrestore(&host->irq_lock, flags);
+}
+
+static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ int ret;
+
+ /*
+ * For omaps with wake-up path, wakeirq will be irq from pinctrl and
+ * for other omaps, wakeirq will be from GPIO (dat line remuxed to
+ * gpio). wakeirq is needed to detect sdio irq in runtime suspend state
+ * with functional clock disabled.
+ */
+ if (!host->dev->of_node || !host->wake_irq)
+ return -ENODEV;
+
+ /* Prevent auto-enabling of IRQ */
+ irq_set_status_flags(host->wake_irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(host->dev, host->wake_irq, omap_hsmmc_wake_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ mmc_hostname(mmc), host);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n");
+ goto err;
+ }
+
+ /*
+ * Some omaps don't have wake-up path from deeper idle states
+ * and need to remux SDIO DAT1 to GPIO for wake-up from idle.
+ */
+ if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) {
+ struct pinctrl *p = devm_pinctrl_get(host->dev);
+ if (!p) {
+ ret = -ENODEV;
+ goto err_free_irq;
+ }
+ if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
+ dev_info(host->dev, "missing default pinctrl state\n");
+ devm_pinctrl_put(p);
+ ret = -EINVAL;
+ goto err_free_irq;
+ }
+
+ if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) {
+ dev_info(host->dev, "missing idle pinctrl state\n");
+ devm_pinctrl_put(p);
+ ret = -EINVAL;
+ goto err_free_irq;
+ }
+ devm_pinctrl_put(p);
+ }
+
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) | IWE);
+ return 0;
+
+err_free_irq:
+ devm_free_irq(host->dev, host->wake_irq, host);
+err:
+ dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n");
+ host->wake_irq = 0;
+ return ret;
+}
+
+static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
+{
+ u32 hctl, capa, value;
+
+ /* Only MMC1 supports 3.0V */
+ if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
+ hctl = SDVS30;
+ capa = VS30 | VS18;
+ } else {
+ hctl = SDVS18;
+ capa = VS18;
+ }
+
+ value = OMAP_HSMMC_READ(host->base, HCTL) & ~SDVS_MASK;
+ OMAP_HSMMC_WRITE(host->base, HCTL, value | hctl);
+
+ value = OMAP_HSMMC_READ(host->base, CAPA);
+ OMAP_HSMMC_WRITE(host->base, CAPA, value | capa);
+
+ /* Set SD bus power bit */
+ set_sd_bus_power(host);
+}
+
+static int omap_hsmmc_multi_io_quirk(struct mmc_card *card,
+ unsigned int direction, int blk_size)
+{
+ /* This controller can't do multiblock reads due to hw bugs */
+ if (direction == MMC_DATA_READ)
+ return 1;
+
+ return blk_size;
+}
+
+static struct mmc_host_ops omap_hsmmc_ops = {
+ .post_req = omap_hsmmc_post_req,
+ .pre_req = omap_hsmmc_pre_req,
+ .request = omap_hsmmc_request,
+ .set_ios = omap_hsmmc_set_ios,
+ .get_cd = omap_hsmmc_get_cd,
+ .get_ro = mmc_gpio_get_ro,
+ .init_card = omap_hsmmc_init_card,
+ .enable_sdio_irq = omap_hsmmc_enable_sdio_irq,
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
+{
+ struct mmc_host *mmc = s->private;
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+ seq_printf(s, "mmc%d:\n", mmc->index);
+ seq_printf(s, "sdio irq mode\t%s\n",
+ (mmc->caps & MMC_CAP_SDIO_IRQ) ? "interrupt" : "polling");
+
+ if (mmc->caps & MMC_CAP_SDIO_IRQ) {
+ seq_printf(s, "sdio irq \t%s\n",
+ (host->flags & HSMMC_SDIO_IRQ_ENABLED) ? "enabled"
+ : "disabled");
+ }
+ seq_printf(s, "ctx_loss:\t%d\n", host->context_loss);
+
+ pm_runtime_get_sync(host->dev);
+ seq_puts(s, "\nregs:\n");
+ seq_printf(s, "CON:\t\t0x%08x\n",
+ OMAP_HSMMC_READ(host->base, CON));
+ seq_printf(s, "PSTATE:\t\t0x%08x\n",
+ OMAP_HSMMC_READ(host->base, PSTATE));
+ seq_printf(s, "HCTL:\t\t0x%08x\n",
+ OMAP_HSMMC_READ(host->base, HCTL));
+ seq_printf(s, "SYSCTL:\t\t0x%08x\n",
+ OMAP_HSMMC_READ(host->base, SYSCTL));
+ seq_printf(s, "IE:\t\t0x%08x\n",
+ OMAP_HSMMC_READ(host->base, IE));
+ seq_printf(s, "ISE:\t\t0x%08x\n",
+ OMAP_HSMMC_READ(host->base, ISE));
+ seq_printf(s, "CAPA:\t\t0x%08x\n",
+ OMAP_HSMMC_READ(host->base, CAPA));
+
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
+
+ return 0;
+}
+
+static int omap_hsmmc_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, omap_hsmmc_regs_show, inode->i_private);
+}
+
+static const struct file_operations mmc_regs_fops = {
+ .open = omap_hsmmc_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void omap_hsmmc_debugfs(struct mmc_host *mmc)
+{
+ if (mmc->debugfs_root)
+ debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root,
+ mmc, &mmc_regs_fops);
+}
+
+#else
+
+static void omap_hsmmc_debugfs(struct mmc_host *mmc)
+{
+}
+
+#endif
+
+#ifdef CONFIG_OF
+static const struct omap_mmc_of_data omap3_pre_es3_mmc_of_data = {
+ /* See 35xx errata 2.1.1.128 in SPRZ278F */
+ .controller_flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ,
+};
+
+static const struct omap_mmc_of_data omap4_mmc_of_data = {
+ .reg_offset = 0x100,
+};
+static const struct omap_mmc_of_data am33xx_mmc_of_data = {
+ .reg_offset = 0x100,
+ .controller_flags = OMAP_HSMMC_SWAKEUP_MISSING,
+};
+
+static const struct of_device_id omap_mmc_of_match[] = {
+ {
+ .compatible = "ti,omap2-hsmmc",
+ },
+ {
+ .compatible = "ti,omap3-pre-es3-hsmmc",
+ .data = &omap3_pre_es3_mmc_of_data,
+ },
+ {
+ .compatible = "ti,omap3-hsmmc",
+ },
+ {
+ .compatible = "ti,omap4-hsmmc",
+ .data = &omap4_mmc_of_data,
+ },
+ {
+ .compatible = "ti,am33xx-hsmmc",
+ .data = &am33xx_mmc_of_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_mmc_of_match);
+
+static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
+{
+ struct omap_hsmmc_platform_data *pdata;
+ struct device_node *np = dev->of_node;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM); /* out of memory */
+
+ if (of_find_property(np, "ti,dual-volt", NULL))
+ pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT;
+
+ pdata->gpio_cd = -EINVAL;
+ pdata->gpio_cod = -EINVAL;
+ pdata->gpio_wp = -EINVAL;
+
+ if (of_find_property(np, "ti,non-removable", NULL)) {
+ pdata->nonremovable = true;
+ pdata->no_regulator_off_init = true;
+ }
+
+ if (of_find_property(np, "ti,needs-special-reset", NULL))
+ pdata->features |= HSMMC_HAS_UPDATED_RESET;
+
+ if (of_find_property(np, "ti,needs-special-hs-handling", NULL))
+ pdata->features |= HSMMC_HAS_HSPE_SUPPORT;
+
+ return pdata;
+}
+#else
+static inline struct omap_hsmmc_platform_data
+ *of_get_hsmmc_pdata(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int omap_hsmmc_probe(struct platform_device *pdev)
+{
+ struct omap_hsmmc_platform_data *pdata = pdev->dev.platform_data;
+ struct mmc_host *mmc;
+ struct omap_hsmmc_host *host = NULL;
+ struct resource *res;
+ int ret, irq;
+ const struct of_device_id *match;
+ dma_cap_mask_t mask;
+ unsigned tx_req, rx_req;
+ const struct omap_mmc_of_data *data;
+ void __iomem *base;
+
+ match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
+ if (match) {
+ pdata = of_get_hsmmc_pdata(&pdev->dev);
+
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ if (match->data) {
+ data = match->data;
+ pdata->reg_offset = data->reg_offset;
+ pdata->controller_flags |= data->controller_flags;
+ }
+ }
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "Platform Data is missing\n");
+ return -ENXIO;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (res == NULL || irq < 0)
+ return -ENXIO;
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto err1;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->pdata = pdata;
+ host->dev = &pdev->dev;
+ host->use_dma = 1;
+ host->dma_ch = -1;
+ host->irq = irq;
+ host->mapbase = res->start + pdata->reg_offset;
+ host->base = base + pdata->reg_offset;
+ host->power_mode = MMC_POWER_OFF;
+ host->next_data.cookie = 1;
+ host->pbias_enabled = 0;
+
+ ret = omap_hsmmc_gpio_init(mmc, host, pdata);
+ if (ret)
+ goto err_gpio;
+
+ platform_set_drvdata(pdev, host);
+
+ if (pdev->dev.of_node)
+ host->wake_irq = irq_of_parse_and_map(pdev->dev.of_node, 1);
+
+ mmc->ops = &omap_hsmmc_ops;
+
+ mmc->f_min = OMAP_MMC_MIN_CLOCK;
+
+ if (pdata->max_freq > 0)
+ mmc->f_max = pdata->max_freq;
+ else if (mmc->f_max == 0)
+ mmc->f_max = OMAP_MMC_MAX_CLOCK;
+
+ spin_lock_init(&host->irq_lock);
+
+ host->fclk = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(host->fclk)) {
+ ret = PTR_ERR(host->fclk);
+ host->fclk = NULL;
+ goto err1;
+ }
+
+ if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
+ dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
+ omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
+ }
+
+ pm_runtime_enable(host->dev);
+ pm_runtime_get_sync(host->dev);
+ pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(host->dev);
+
+ omap_hsmmc_context_save(host);
+
+ host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck");
+ /*
+ * MMC can still work without debounce clock.
+ */
+ if (IS_ERR(host->dbclk)) {
+ host->dbclk = NULL;
+ } else if (clk_prepare_enable(host->dbclk) != 0) {
+ dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
+ host->dbclk = NULL;
+ }
+
+ /* Since we do only SG emulation, we can have as many segs
+ * as we want. */
+ mmc->max_segs = 1024;
+
+ mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
+ mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
+
+ mmc->caps |= mmc_pdata(host)->caps;
+ if (mmc->caps & MMC_CAP_8_BIT_DATA)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ if (mmc_pdata(host)->nonremovable)
+ mmc->caps |= MMC_CAP_NONREMOVABLE;
+
+ mmc->pm_caps |= mmc_pdata(host)->pm_caps;
+
+ omap_hsmmc_conf_bus_power(host);
+
+ if (!pdev->dev.of_node) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
+ if (!res) {
+ dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
+ ret = -ENXIO;
+ goto err_irq;
+ }
+ tx_req = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
+ if (!res) {
+ dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
+ ret = -ENXIO;
+ goto err_irq;
+ }
+ rx_req = res->start;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->rx_chan =
+ dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
+ &rx_req, &pdev->dev, "rx");
+
+ if (!host->rx_chan) {
+ dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
+ ret = -ENXIO;
+ goto err_irq;
+ }
+
+ host->tx_chan =
+ dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
+ &tx_req, &pdev->dev, "tx");
+
+ if (!host->tx_chan) {
+ dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
+ ret = -ENXIO;
+ goto err_irq;
+ }
+
+ /* Request IRQ for MMC operations */
+ ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
+ mmc_hostname(mmc), host);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
+ goto err_irq;
+ }
+
+ if (omap_hsmmc_have_reg() && !mmc_pdata(host)->set_power) {
+ ret = omap_hsmmc_reg_get(host);
+ if (ret)
+ goto err_irq;
+ host->use_reg = 1;
+ }
+
+ mmc->ocr_avail = mmc_pdata(host)->ocr_mask;
+
+ omap_hsmmc_disable_irq(host);
+
+ /*
+ * For now, only support SDIO interrupt if we have a separate
+ * wake-up interrupt configured from device tree. This is because
+ * the wake-up interrupt is needed for idle state and some
+ * platforms need special quirks. And we don't want to add new
+ * legacy mux platform init code callbacks any longer as we
+ * are moving to DT based booting anyways.
+ */
+ ret = omap_hsmmc_configure_wake_irq(host);
+ if (!ret)
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+
+ omap_hsmmc_protect_card(host);
+
+ mmc_add_host(mmc);
+
+ if (mmc_pdata(host)->name != NULL) {
+ ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
+ if (ret < 0)
+ goto err_slot_name;
+ }
+ if (host->get_cover_state) {
+ ret = device_create_file(&mmc->class_dev,
+ &dev_attr_cover_switch);
+ if (ret < 0)
+ goto err_slot_name;
+ }
+
+ omap_hsmmc_debugfs(mmc);
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
+
+ return 0;
+
+err_slot_name:
+ mmc_remove_host(mmc);
+ if (host->use_reg)
+ omap_hsmmc_reg_put(host);
+err_irq:
+ if (host->tx_chan)
+ dma_release_channel(host->tx_chan);
+ if (host->rx_chan)
+ dma_release_channel(host->rx_chan);
+ pm_runtime_put_sync(host->dev);
+ pm_runtime_disable(host->dev);
+ if (host->dbclk)
+ clk_disable_unprepare(host->dbclk);
+err1:
+err_gpio:
+ mmc_free_host(mmc);
+err:
+ return ret;
+}
+
+static int omap_hsmmc_remove(struct platform_device *pdev)
+{
+ struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(host->dev);
+ mmc_remove_host(host->mmc);
+ if (host->use_reg)
+ omap_hsmmc_reg_put(host);
+
+ if (host->tx_chan)
+ dma_release_channel(host->tx_chan);
+ if (host->rx_chan)
+ dma_release_channel(host->rx_chan);
+
+ pm_runtime_put_sync(host->dev);
+ pm_runtime_disable(host->dev);
+ if (host->dbclk)
+ clk_disable_unprepare(host->dbclk);
+
+ mmc_free_host(host->mmc);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int omap_hsmmc_suspend(struct device *dev)
+{
+ struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+
+ if (!host)
+ return 0;
+
+ pm_runtime_get_sync(host->dev);
+
+ if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
+ OMAP_HSMMC_WRITE(host->base, ISE, 0);
+ OMAP_HSMMC_WRITE(host->base, IE, 0);
+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
+ }
+
+ /* do not wake up due to sdio irq */
+ if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
+ !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
+ disable_irq(host->wake_irq);
+
+ if (host->dbclk)
+ clk_disable_unprepare(host->dbclk);
+
+ pm_runtime_put_sync(host->dev);
+ return 0;
+}
+
+/* Routine to resume the MMC device */
+static int omap_hsmmc_resume(struct device *dev)
+{
+ struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+
+ if (!host)
+ return 0;
+
+ pm_runtime_get_sync(host->dev);
+
+ if (host->dbclk)
+ clk_prepare_enable(host->dbclk);
+
+ if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
+ omap_hsmmc_conf_bus_power(host);
+
+ omap_hsmmc_protect_card(host);
+
+ if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
+ !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
+ enable_irq(host->wake_irq);
+
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
+ return 0;
+}
+#endif
+
+static int omap_hsmmc_runtime_suspend(struct device *dev)
+{
+ struct omap_hsmmc_host *host;
+ unsigned long flags;
+ int ret = 0;
+
+ host = platform_get_drvdata(to_platform_device(dev));
+ omap_hsmmc_context_save(host);
+ dev_dbg(dev, "disabled\n");
+
+ spin_lock_irqsave(&host->irq_lock, flags);
+ if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
+ (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
+ /* disable sdio irq handling to prevent race */
+ OMAP_HSMMC_WRITE(host->base, ISE, 0);
+ OMAP_HSMMC_WRITE(host->base, IE, 0);
+
+ if (!(OMAP_HSMMC_READ(host->base, PSTATE) & DLEV_DAT(1))) {
+ /*
+ * dat1 line low, pending sdio irq
+ * race condition: possible irq handler running on
+ * multi-core, abort
+ */
+ dev_dbg(dev, "pending sdio irq, abort suspend\n");
+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
+ OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
+ OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
+ pm_runtime_mark_last_busy(dev);
+ ret = -EBUSY;
+ goto abort;
+ }
+
+ pinctrl_pm_select_idle_state(dev);
+
+ WARN_ON(host->flags & HSMMC_WAKE_IRQ_ENABLED);
+ enable_irq(host->wake_irq);
+ host->flags |= HSMMC_WAKE_IRQ_ENABLED;
+ } else {
+ pinctrl_pm_select_idle_state(dev);
+ }
+
+abort:
+ spin_unlock_irqrestore(&host->irq_lock, flags);
+ return ret;
+}
+
+static int omap_hsmmc_runtime_resume(struct device *dev)
+{
+ struct omap_hsmmc_host *host;
+ unsigned long flags;
+
+ host = platform_get_drvdata(to_platform_device(dev));
+ omap_hsmmc_context_restore(host);
+ dev_dbg(dev, "enabled\n");
+
+ spin_lock_irqsave(&host->irq_lock, flags);
+ if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
+ (host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
+ /* sdio irq flag can't change while in runtime suspend */
+ if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
+ disable_irq_nosync(host->wake_irq);
+ host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
+ }
+
+ pinctrl_pm_select_default_state(host->dev);
+
+ /* irq lost, if pinmux incorrect */
+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
+ OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
+ OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
+ } else {
+ pinctrl_pm_select_default_state(host->dev);
+ }
+ spin_unlock_irqrestore(&host->irq_lock, flags);
+ return 0;
+}
+
+static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume)
+ .runtime_suspend = omap_hsmmc_runtime_suspend,
+ .runtime_resume = omap_hsmmc_runtime_resume,
+};
+
+static struct platform_driver omap_hsmmc_driver = {
+ .probe = omap_hsmmc_probe,
+ .remove = omap_hsmmc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &omap_hsmmc_dev_pm_ops,
+ .of_match_table = of_match_ptr(omap_mmc_of_match),
+ },
+};
+
+module_platform_driver(omap_hsmmc_driver);
+MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/kernel/drivers/mmc/host/pxamci.c b/kernel/drivers/mmc/host/pxamci.c
new file mode 100644
index 000000000..1b6d0bfe3
--- /dev/null
+++ b/kernel/drivers/mmc/host/pxamci.c
@@ -0,0 +1,896 @@
+/*
+ * linux/drivers/mmc/host/pxa.c - PXA MMCI driver
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This hardware is really sick:
+ * - No way to clear interrupts.
+ * - Have to turn off the clock whenever we touch the device.
+ * - Doesn't tell you how many data blocks were transferred.
+ * Yuck!
+ *
+ * 1 and 3 byte data transfers not supported
+ * max block length up to 1023
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mmc/host.h>
+#include <linux/io.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/gfp.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+
+#include <asm/sizes.h>
+
+#include <mach/hardware.h>
+#include <mach/dma.h>
+#include <linux/platform_data/mmc-pxamci.h>
+
+#include "pxamci.h"
+
+#define DRIVER_NAME "pxa2xx-mci"
+
+#define NR_SG 1
+#define CLKRT_OFF (~0)
+
+#define mmc_has_26MHz() (cpu_is_pxa300() || cpu_is_pxa310() \
+ || cpu_is_pxa935())
+
+struct pxamci_host {
+ struct mmc_host *mmc;
+ spinlock_t lock;
+ struct resource *res;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned long clkrate;
+ int irq;
+ int dma;
+ unsigned int clkrt;
+ unsigned int cmdat;
+ unsigned int imask;
+ unsigned int power_mode;
+ struct pxamci_platform_data *pdata;
+
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+
+ dma_addr_t sg_dma;
+ struct pxa_dma_desc *sg_cpu;
+ unsigned int dma_len;
+
+ unsigned int dma_dir;
+ unsigned int dma_drcmrrx;
+ unsigned int dma_drcmrtx;
+
+ struct regulator *vcc;
+};
+
+static inline void pxamci_init_ocr(struct pxamci_host *host)
+{
+#ifdef CONFIG_REGULATOR
+ host->vcc = regulator_get_optional(mmc_dev(host->mmc), "vmmc");
+
+ if (IS_ERR(host->vcc))
+ host->vcc = NULL;
+ else {
+ host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
+ if (host->pdata && host->pdata->ocr_mask)
+ dev_warn(mmc_dev(host->mmc),
+ "ocr_mask/setpower will not be used\n");
+ }
+#endif
+ if (host->vcc == NULL) {
+ /* fall-back to platform data */
+ host->mmc->ocr_avail = host->pdata ?
+ host->pdata->ocr_mask :
+ MMC_VDD_32_33 | MMC_VDD_33_34;
+ }
+}
+
+static inline int pxamci_set_power(struct pxamci_host *host,
+ unsigned char power_mode,
+ unsigned int vdd)
+{
+ int on;
+
+ if (host->vcc) {
+ int ret;
+
+ if (power_mode == MMC_POWER_UP) {
+ ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
+ if (ret)
+ return ret;
+ } else if (power_mode == MMC_POWER_OFF) {
+ ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
+ if (ret)
+ return ret;
+ }
+ }
+ if (!host->vcc && host->pdata &&
+ gpio_is_valid(host->pdata->gpio_power)) {
+ on = ((1 << vdd) & host->pdata->ocr_mask);
+ gpio_set_value(host->pdata->gpio_power,
+ !!on ^ host->pdata->gpio_power_invert);
+ }
+ if (!host->vcc && host->pdata && host->pdata->setpower)
+ return host->pdata->setpower(mmc_dev(host->mmc), vdd);
+
+ return 0;
+}
+
+static void pxamci_stop_clock(struct pxamci_host *host)
+{
+ if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
+ unsigned long timeout = 10000;
+ unsigned int v;
+
+ writel(STOP_CLOCK, host->base + MMC_STRPCL);
+
+ do {
+ v = readl(host->base + MMC_STAT);
+ if (!(v & STAT_CLK_EN))
+ break;
+ udelay(1);
+ } while (timeout--);
+
+ if (v & STAT_CLK_EN)
+ dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
+ }
+}
+
+static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->imask &= ~mask;
+ writel(host->imask, host->base + MMC_I_MASK);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->imask |= mask;
+ writel(host->imask, host->base + MMC_I_MASK);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
+{
+ unsigned int nob = data->blocks;
+ unsigned long long clks;
+ unsigned int timeout;
+ bool dalgn = 0;
+ u32 dcmd;
+ int i;
+
+ host->data = data;
+
+ if (data->flags & MMC_DATA_STREAM)
+ nob = 0xffff;
+
+ writel(nob, host->base + MMC_NOB);
+ writel(data->blksz, host->base + MMC_BLKLEN);
+
+ clks = (unsigned long long)data->timeout_ns * host->clkrate;
+ do_div(clks, 1000000000UL);
+ timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
+ writel((timeout + 255) / 256, host->base + MMC_RDTO);
+
+ if (data->flags & MMC_DATA_READ) {
+ host->dma_dir = DMA_FROM_DEVICE;
+ dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
+ DRCMR(host->dma_drcmrtx) = 0;
+ DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD;
+ } else {
+ host->dma_dir = DMA_TO_DEVICE;
+ dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
+ DRCMR(host->dma_drcmrrx) = 0;
+ DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD;
+ }
+
+ dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
+
+ host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ host->dma_dir);
+
+ for (i = 0; i < host->dma_len; i++) {
+ unsigned int length = sg_dma_len(&data->sg[i]);
+ host->sg_cpu[i].dcmd = dcmd | length;
+ if (length & 31 && !(data->flags & MMC_DATA_READ))
+ host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN;
+ /* Not aligned to 8-byte boundary? */
+ if (sg_dma_address(&data->sg[i]) & 0x7)
+ dalgn = 1;
+ if (data->flags & MMC_DATA_READ) {
+ host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
+ host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
+ } else {
+ host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
+ host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
+ }
+ host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
+ sizeof(struct pxa_dma_desc);
+ }
+ host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
+ wmb();
+
+ /*
+ * The PXA27x DMA controller encounters overhead when working with
+ * unaligned (to 8-byte boundaries) data, so switch on byte alignment
+ * mode only if we have unaligned data.
+ */
+ if (dalgn)
+ DALGN |= (1 << host->dma);
+ else
+ DALGN &= ~(1 << host->dma);
+ DDADR(host->dma) = host->sg_dma;
+
+ /*
+ * workaround for erratum #91:
+ * only start DMA now if we are doing a read,
+ * otherwise we wait until CMD/RESP has finished
+ * before starting DMA.
+ */
+ if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ)
+ DCSR(host->dma) = DCSR_RUN;
+}
+
+static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
+{
+ WARN_ON(host->cmd != NULL);
+ host->cmd = cmd;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ cmdat |= CMDAT_BUSY;
+
+#define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
+ switch (RSP_TYPE(mmc_resp_type(cmd))) {
+ case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
+ cmdat |= CMDAT_RESP_SHORT;
+ break;
+ case RSP_TYPE(MMC_RSP_R3):
+ cmdat |= CMDAT_RESP_R3;
+ break;
+ case RSP_TYPE(MMC_RSP_R2):
+ cmdat |= CMDAT_RESP_R2;
+ break;
+ default:
+ break;
+ }
+
+ writel(cmd->opcode, host->base + MMC_CMD);
+ writel(cmd->arg >> 16, host->base + MMC_ARGH);
+ writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
+ writel(cmdat, host->base + MMC_CMDAT);
+ writel(host->clkrt, host->base + MMC_CLKRT);
+
+ writel(START_CLOCK, host->base + MMC_STRPCL);
+
+ pxamci_enable_irq(host, END_CMD_RES);
+}
+
+static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
+{
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+ mmc_request_done(host->mmc, mrq);
+}
+
+static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
+{
+ struct mmc_command *cmd = host->cmd;
+ int i;
+ u32 v;
+
+ if (!cmd)
+ return 0;
+
+ host->cmd = NULL;
+
+ /*
+ * Did I mention this is Sick. We always need to
+ * discard the upper 8 bits of the first 16-bit word.
+ */
+ v = readl(host->base + MMC_RES) & 0xffff;
+ for (i = 0; i < 4; i++) {
+ u32 w1 = readl(host->base + MMC_RES) & 0xffff;
+ u32 w2 = readl(host->base + MMC_RES) & 0xffff;
+ cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
+ v = w2;
+ }
+
+ if (stat & STAT_TIME_OUT_RESPONSE) {
+ cmd->error = -ETIMEDOUT;
+ } else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
+ /*
+ * workaround for erratum #42:
+ * Intel PXA27x Family Processor Specification Update Rev 001
+ * A bogus CRC error can appear if the msb of a 136 bit
+ * response is a one.
+ */
+ if (cpu_is_pxa27x() &&
+ (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000))
+ pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
+ else
+ cmd->error = -EILSEQ;
+ }
+
+ pxamci_disable_irq(host, END_CMD_RES);
+ if (host->data && !cmd->error) {
+ pxamci_enable_irq(host, DATA_TRAN_DONE);
+ /*
+ * workaround for erratum #91, if doing write
+ * enable DMA late
+ */
+ if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE)
+ DCSR(host->dma) = DCSR_RUN;
+ } else {
+ pxamci_finish_request(host, host->mrq);
+ }
+
+ return 1;
+}
+
+static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
+{
+ struct mmc_data *data = host->data;
+
+ if (!data)
+ return 0;
+
+ DCSR(host->dma) = 0;
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ host->dma_dir);
+
+ if (stat & STAT_READ_TIME_OUT)
+ data->error = -ETIMEDOUT;
+ else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
+ data->error = -EILSEQ;
+
+ /*
+ * There appears to be a hardware design bug here. There seems to
+ * be no way to find out how much data was transferred to the card.
+ * This means that if there was an error on any block, we mark all
+ * data blocks as being in error.
+ */
+ if (!data->error)
+ data->bytes_xfered = data->blocks * data->blksz;
+ else
+ data->bytes_xfered = 0;
+
+ pxamci_disable_irq(host, DATA_TRAN_DONE);
+
+ host->data = NULL;
+ if (host->mrq->stop) {
+ pxamci_stop_clock(host);
+ pxamci_start_cmd(host, host->mrq->stop, host->cmdat);
+ } else {
+ pxamci_finish_request(host, host->mrq);
+ }
+
+ return 1;
+}
+
+static irqreturn_t pxamci_irq(int irq, void *devid)
+{
+ struct pxamci_host *host = devid;
+ unsigned int ireg;
+ int handled = 0;
+
+ ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK);
+
+ if (ireg) {
+ unsigned stat = readl(host->base + MMC_STAT);
+
+ pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
+
+ if (ireg & END_CMD_RES)
+ handled |= pxamci_cmd_done(host, stat);
+ if (ireg & DATA_TRAN_DONE)
+ handled |= pxamci_data_done(host, stat);
+ if (ireg & SDIO_INT) {
+ mmc_signal_sdio_irq(host->mmc);
+ handled = 1;
+ }
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct pxamci_host *host = mmc_priv(mmc);
+ unsigned int cmdat;
+
+ WARN_ON(host->mrq != NULL);
+
+ host->mrq = mrq;
+
+ pxamci_stop_clock(host);
+
+ cmdat = host->cmdat;
+ host->cmdat &= ~CMDAT_INIT;
+
+ if (mrq->data) {
+ pxamci_setup_data(host, mrq->data);
+
+ cmdat &= ~CMDAT_BUSY;
+ cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
+ if (mrq->data->flags & MMC_DATA_WRITE)
+ cmdat |= CMDAT_WRITE;
+
+ if (mrq->data->flags & MMC_DATA_STREAM)
+ cmdat |= CMDAT_STREAM;
+ }
+
+ pxamci_start_cmd(host, mrq->cmd, cmdat);
+}
+
+static int pxamci_get_ro(struct mmc_host *mmc)
+{
+ struct pxamci_host *host = mmc_priv(mmc);
+
+ if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) {
+ if (host->pdata->gpio_card_ro_invert)
+ return !gpio_get_value(host->pdata->gpio_card_ro);
+ else
+ return gpio_get_value(host->pdata->gpio_card_ro);
+ }
+ if (host->pdata && host->pdata->get_ro)
+ return !!host->pdata->get_ro(mmc_dev(mmc));
+ /*
+ * Board doesn't support read only detection; let the mmc core
+ * decide what to do.
+ */
+ return -ENOSYS;
+}
+
+static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct pxamci_host *host = mmc_priv(mmc);
+
+ if (ios->clock) {
+ unsigned long rate = host->clkrate;
+ unsigned int clk = rate / ios->clock;
+
+ if (host->clkrt == CLKRT_OFF)
+ clk_prepare_enable(host->clk);
+
+ if (ios->clock == 26000000) {
+ /* to support 26MHz */
+ host->clkrt = 7;
+ } else {
+ /* to handle (19.5MHz, 26MHz) */
+ if (!clk)
+ clk = 1;
+
+ /*
+ * clk might result in a lower divisor than we
+ * desire. check for that condition and adjust
+ * as appropriate.
+ */
+ if (rate / clk > ios->clock)
+ clk <<= 1;
+ host->clkrt = fls(clk) - 1;
+ }
+
+ /*
+ * we write clkrt on the next command
+ */
+ } else {
+ pxamci_stop_clock(host);
+ if (host->clkrt != CLKRT_OFF) {
+ host->clkrt = CLKRT_OFF;
+ clk_disable_unprepare(host->clk);
+ }
+ }
+
+ if (host->power_mode != ios->power_mode) {
+ int ret;
+
+ host->power_mode = ios->power_mode;
+
+ ret = pxamci_set_power(host, ios->power_mode, ios->vdd);
+ if (ret) {
+ dev_err(mmc_dev(mmc), "unable to set power\n");
+ /*
+ * The .set_ios() function in the mmc_host_ops
+ * struct return void, and failing to set the
+ * power should be rare so we print an error and
+ * return here.
+ */
+ return;
+ }
+
+ if (ios->power_mode == MMC_POWER_ON)
+ host->cmdat |= CMDAT_INIT;
+ }
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ host->cmdat |= CMDAT_SD_4DAT;
+ else
+ host->cmdat &= ~CMDAT_SD_4DAT;
+
+ dev_dbg(mmc_dev(mmc), "PXAMCI: clkrt = %x cmdat = %x\n",
+ host->clkrt, host->cmdat);
+}
+
+static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
+{
+ struct pxamci_host *pxa_host = mmc_priv(host);
+
+ if (enable)
+ pxamci_enable_irq(pxa_host, SDIO_INT);
+ else
+ pxamci_disable_irq(pxa_host, SDIO_INT);
+}
+
+static const struct mmc_host_ops pxamci_ops = {
+ .request = pxamci_request,
+ .get_ro = pxamci_get_ro,
+ .set_ios = pxamci_set_ios,
+ .enable_sdio_irq = pxamci_enable_sdio_irq,
+};
+
+static void pxamci_dma_irq(int dma, void *devid)
+{
+ struct pxamci_host *host = devid;
+ int dcsr = DCSR(dma);
+ DCSR(dma) = dcsr & ~DCSR_STOPIRQEN;
+
+ if (dcsr & DCSR_ENDINTR) {
+ writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
+ } else {
+ pr_err("%s: DMA error on channel %d (DCSR=%#x)\n",
+ mmc_hostname(host->mmc), dma, dcsr);
+ host->data->error = -EIO;
+ pxamci_data_done(host, 0);
+ }
+}
+
+static irqreturn_t pxamci_detect_irq(int irq, void *devid)
+{
+ struct pxamci_host *host = mmc_priv(devid);
+
+ mmc_detect_change(devid, msecs_to_jiffies(host->pdata->detect_delay_ms));
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id pxa_mmc_dt_ids[] = {
+ { .compatible = "marvell,pxa-mmc" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids);
+
+static int pxamci_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct pxamci_platform_data *pdata;
+ u32 tmp;
+
+ if (!np)
+ return 0;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->gpio_card_detect =
+ of_get_named_gpio(np, "cd-gpios", 0);
+ pdata->gpio_card_ro =
+ of_get_named_gpio(np, "wp-gpios", 0);
+
+ /* pxa-mmc specific */
+ pdata->gpio_power =
+ of_get_named_gpio(np, "pxa-mmc,gpio-power", 0);
+
+ if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0)
+ pdata->detect_delay_ms = tmp;
+
+ pdev->dev.platform_data = pdata;
+
+ return 0;
+}
+#else
+static int pxamci_of_init(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
+static int pxamci_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct pxamci_host *host = NULL;
+ struct resource *r, *dmarx, *dmatx;
+ int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
+
+ ret = pxamci_of_init(pdev);
+ if (ret)
+ return ret;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!r || irq < 0)
+ return -ENXIO;
+
+ r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
+ if (!r)
+ return -EBUSY;
+
+ mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mmc->ops = &pxamci_ops;
+
+ /*
+ * We can do SG-DMA, but we don't because we never know how much
+ * data we successfully wrote to the card.
+ */
+ mmc->max_segs = NR_SG;
+
+ /*
+ * Our hardware DMA can handle a maximum of one page per SG entry.
+ */
+ mmc->max_seg_size = PAGE_SIZE;
+
+ /*
+ * Block length register is only 10 bits before PXA27x.
+ */
+ mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048;
+
+ /*
+ * Block count register is 16 bits.
+ */
+ mmc->max_blk_count = 65535;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->dma = -1;
+ host->pdata = pdev->dev.platform_data;
+ host->clkrt = CLKRT_OFF;
+
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ host->clk = NULL;
+ goto out;
+ }
+
+ host->clkrate = clk_get_rate(host->clk);
+
+ /*
+ * Calculate minimum clock rate, rounding up.
+ */
+ mmc->f_min = (host->clkrate + 63) / 64;
+ mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate;
+
+ pxamci_init_ocr(host);
+
+ mmc->caps = 0;
+ host->cmdat = 0;
+ if (!cpu_is_pxa25x()) {
+ mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+ host->cmdat |= CMDAT_SDIO_INT_EN;
+ if (mmc_has_26MHz())
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_SD_HIGHSPEED;
+ }
+
+ host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
+ if (!host->sg_cpu) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock_init(&host->lock);
+ host->res = r;
+ host->irq = irq;
+ host->imask = MMC_I_MASK_ALL;
+
+ host->base = ioremap(r->start, SZ_4K);
+ if (!host->base) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Ensure that the host controller is shut down, and setup
+ * with our defaults.
+ */
+ pxamci_stop_clock(host);
+ writel(0, host->base + MMC_SPI);
+ writel(64, host->base + MMC_RESTO);
+ writel(host->imask, host->base + MMC_I_MASK);
+
+ host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
+ pxamci_dma_irq, host);
+ if (host->dma < 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
+ if (ret)
+ goto out;
+
+ platform_set_drvdata(pdev, mmc);
+
+ dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!dmarx) {
+ ret = -ENXIO;
+ goto out;
+ }
+ host->dma_drcmrrx = dmarx->start;
+
+ dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!dmatx) {
+ ret = -ENXIO;
+ goto out;
+ }
+ host->dma_drcmrtx = dmatx->start;
+
+ if (host->pdata) {
+ gpio_cd = host->pdata->gpio_card_detect;
+ gpio_ro = host->pdata->gpio_card_ro;
+ gpio_power = host->pdata->gpio_power;
+ }
+ if (gpio_is_valid(gpio_power)) {
+ ret = gpio_request(gpio_power, "mmc card power");
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power);
+ goto out;
+ }
+ gpio_direction_output(gpio_power,
+ host->pdata->gpio_power_invert);
+ }
+ if (gpio_is_valid(gpio_ro)) {
+ ret = gpio_request(gpio_ro, "mmc card read only");
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
+ goto err_gpio_ro;
+ }
+ gpio_direction_input(gpio_ro);
+ }
+ if (gpio_is_valid(gpio_cd)) {
+ ret = gpio_request(gpio_cd, "mmc card detect");
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
+ goto err_gpio_cd;
+ }
+ gpio_direction_input(gpio_cd);
+
+ ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "mmc card detect", mmc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request card detect IRQ\n");
+ goto err_request_irq;
+ }
+ }
+
+ if (host->pdata && host->pdata->init)
+ host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
+
+ if (gpio_is_valid(gpio_power) && host->pdata->setpower)
+ dev_warn(&pdev->dev, "gpio_power and setpower() both defined\n");
+ if (gpio_is_valid(gpio_ro) && host->pdata->get_ro)
+ dev_warn(&pdev->dev, "gpio_ro and get_ro() both defined\n");
+
+ mmc_add_host(mmc);
+
+ return 0;
+
+err_request_irq:
+ gpio_free(gpio_cd);
+err_gpio_cd:
+ gpio_free(gpio_ro);
+err_gpio_ro:
+ gpio_free(gpio_power);
+ out:
+ if (host) {
+ if (host->dma >= 0)
+ pxa_free_dma(host->dma);
+ if (host->base)
+ iounmap(host->base);
+ if (host->sg_cpu)
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+ if (host->clk)
+ clk_put(host->clk);
+ }
+ if (mmc)
+ mmc_free_host(mmc);
+ release_resource(r);
+ return ret;
+}
+
+static int pxamci_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ int gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
+
+ if (mmc) {
+ struct pxamci_host *host = mmc_priv(mmc);
+
+ mmc_remove_host(mmc);
+
+ if (host->pdata) {
+ gpio_cd = host->pdata->gpio_card_detect;
+ gpio_ro = host->pdata->gpio_card_ro;
+ gpio_power = host->pdata->gpio_power;
+ }
+ if (gpio_is_valid(gpio_cd)) {
+ free_irq(gpio_to_irq(gpio_cd), mmc);
+ gpio_free(gpio_cd);
+ }
+ if (gpio_is_valid(gpio_ro))
+ gpio_free(gpio_ro);
+ if (gpio_is_valid(gpio_power))
+ gpio_free(gpio_power);
+ if (host->vcc)
+ regulator_put(host->vcc);
+
+ if (host->pdata && host->pdata->exit)
+ host->pdata->exit(&pdev->dev, mmc);
+
+ pxamci_stop_clock(host);
+ writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
+ END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
+ host->base + MMC_I_MASK);
+
+ DRCMR(host->dma_drcmrrx) = 0;
+ DRCMR(host->dma_drcmrtx) = 0;
+
+ free_irq(host->irq, host);
+ pxa_free_dma(host->dma);
+ iounmap(host->base);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+
+ clk_put(host->clk);
+
+ release_resource(host->res);
+
+ mmc_free_host(mmc);
+ }
+ return 0;
+}
+
+static struct platform_driver pxamci_driver = {
+ .probe = pxamci_probe,
+ .remove = pxamci_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(pxa_mmc_dt_ids),
+ },
+};
+
+module_platform_driver(pxamci_driver);
+
+MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa2xx-mci");
diff --git a/kernel/drivers/mmc/host/pxamci.h b/kernel/drivers/mmc/host/pxamci.h
new file mode 100644
index 000000000..f6c2e2fcc
--- /dev/null
+++ b/kernel/drivers/mmc/host/pxamci.h
@@ -0,0 +1,90 @@
+#define MMC_STRPCL 0x0000
+#define STOP_CLOCK (1 << 0)
+#define START_CLOCK (2 << 0)
+
+#define MMC_STAT 0x0004
+#define STAT_END_CMD_RES (1 << 13)
+#define STAT_PRG_DONE (1 << 12)
+#define STAT_DATA_TRAN_DONE (1 << 11)
+#define STAT_CLK_EN (1 << 8)
+#define STAT_RECV_FIFO_FULL (1 << 7)
+#define STAT_XMIT_FIFO_EMPTY (1 << 6)
+#define STAT_RES_CRC_ERR (1 << 5)
+#define STAT_SPI_READ_ERROR_TOKEN (1 << 4)
+#define STAT_CRC_READ_ERROR (1 << 3)
+#define STAT_CRC_WRITE_ERROR (1 << 2)
+#define STAT_TIME_OUT_RESPONSE (1 << 1)
+#define STAT_READ_TIME_OUT (1 << 0)
+
+#define MMC_CLKRT 0x0008 /* 3 bit */
+
+#define MMC_SPI 0x000c
+#define SPI_CS_ADDRESS (1 << 3)
+#define SPI_CS_EN (1 << 2)
+#define CRC_ON (1 << 1)
+#define SPI_EN (1 << 0)
+
+#define MMC_CMDAT 0x0010
+#define CMDAT_SDIO_INT_EN (1 << 11)
+#define CMDAT_SD_4DAT (1 << 8)
+#define CMDAT_DMAEN (1 << 7)
+#define CMDAT_INIT (1 << 6)
+#define CMDAT_BUSY (1 << 5)
+#define CMDAT_STREAM (1 << 4) /* 1 = stream */
+#define CMDAT_WRITE (1 << 3) /* 1 = write */
+#define CMDAT_DATAEN (1 << 2)
+#define CMDAT_RESP_NONE (0 << 0)
+#define CMDAT_RESP_SHORT (1 << 0)
+#define CMDAT_RESP_R2 (2 << 0)
+#define CMDAT_RESP_R3 (3 << 0)
+
+#define MMC_RESTO 0x0014 /* 7 bit */
+
+#define MMC_RDTO 0x0018 /* 16 bit */
+
+#define MMC_BLKLEN 0x001c /* 10 bit */
+
+#define MMC_NOB 0x0020 /* 16 bit */
+
+#define MMC_PRTBUF 0x0024
+#define BUF_PART_FULL (1 << 0)
+
+#define MMC_I_MASK 0x0028
+
+/*PXA27x MMC interrupts*/
+#define SDIO_SUSPEND_ACK (1 << 12)
+#define SDIO_INT (1 << 11)
+#define RD_STALLED (1 << 10)
+#define RES_ERR (1 << 9)
+#define DAT_ERR (1 << 8)
+#define TINT (1 << 7)
+
+/*PXA2xx MMC interrupts*/
+#define TXFIFO_WR_REQ (1 << 6)
+#define RXFIFO_RD_REQ (1 << 5)
+#define CLK_IS_OFF (1 << 4)
+#define STOP_CMD (1 << 3)
+#define END_CMD_RES (1 << 2)
+#define PRG_DONE (1 << 1)
+#define DATA_TRAN_DONE (1 << 0)
+
+#if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx)
+#define MMC_I_MASK_ALL 0x00001fff
+#else
+#define MMC_I_MASK_ALL 0x0000007f
+#endif
+
+#define MMC_I_REG 0x002c
+/* same as MMC_I_MASK */
+
+#define MMC_CMD 0x0030
+
+#define MMC_ARGH 0x0034 /* 16 bit */
+
+#define MMC_ARGL 0x0038 /* 16 bit */
+
+#define MMC_RES 0x003c /* 16 bit */
+
+#define MMC_RXFIFO 0x0040 /* 8 bit */
+
+#define MMC_TXFIFO 0x0044 /* 8 bit */
diff --git a/kernel/drivers/mmc/host/rtsx_pci_sdmmc.c b/kernel/drivers/mmc/host/rtsx_pci_sdmmc.c
new file mode 100644
index 000000000..1d3d6c4bf
--- /dev/null
+++ b/kernel/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -0,0 +1,1498 @@
+/* Realtek PCI-Express SD/MMC Card Interface driver
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/card.h>
+#include <linux/mfd/rtsx_pci.h>
+#include <asm/unaligned.h>
+
+struct realtek_pci_sdmmc {
+ struct platform_device *pdev;
+ struct rtsx_pcr *pcr;
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct workqueue_struct *workq;
+#define SDMMC_WORKQ_NAME "rtsx_pci_sdmmc_workq"
+
+ struct work_struct work;
+ struct mutex host_mutex;
+
+ u8 ssc_depth;
+ unsigned int clock;
+ bool vpclk;
+ bool double_clk;
+ bool eject;
+ bool initial_mode;
+ int power_state;
+#define SDMMC_POWER_ON 1
+#define SDMMC_POWER_OFF 0
+
+ int sg_count;
+ s32 cookie;
+ int cookie_sg_count;
+ bool using_cookie;
+};
+
+static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host)
+{
+ return &(host->pdev->dev);
+}
+
+static inline void sd_clear_error(struct realtek_pci_sdmmc *host)
+{
+ rtsx_pci_write_register(host->pcr, CARD_STOP,
+ SD_STOP | SD_CLR_ERR, SD_STOP | SD_CLR_ERR);
+}
+
+#ifdef DEBUG
+static void dump_reg_range(struct realtek_pci_sdmmc *host, u16 start, u16 end)
+{
+ u16 len = end - start + 1;
+ int i;
+ u8 data[8];
+
+ for (i = 0; i < len; i += 8) {
+ int j;
+ int n = min(8, len - i);
+
+ memset(&data, 0, sizeof(data));
+ for (j = 0; j < n; j++)
+ rtsx_pci_read_register(host->pcr, start + i + j,
+ data + j);
+ dev_dbg(sdmmc_dev(host), "0x%04X(%d): %8ph\n",
+ start + i, n, data);
+ }
+}
+
+static void sd_print_debug_regs(struct realtek_pci_sdmmc *host)
+{
+ dump_reg_range(host, 0xFDA0, 0xFDB3);
+ dump_reg_range(host, 0xFD52, 0xFD69);
+}
+#else
+#define sd_print_debug_regs(host)
+#endif /* DEBUG */
+
+static inline int sd_get_cd_int(struct realtek_pci_sdmmc *host)
+{
+ return rtsx_pci_readl(host->pcr, RTSX_BIPR) & SD_EXIST;
+}
+
+static void sd_cmd_set_sd_cmd(struct rtsx_pcr *pcr, struct mmc_command *cmd)
+{
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD0, 0xFF,
+ SD_CMD_START | cmd->opcode);
+ rtsx_pci_write_be32(pcr, SD_CMD1, cmd->arg);
+}
+
+static void sd_cmd_set_data_len(struct rtsx_pcr *pcr, u16 blocks, u16 blksz)
+{
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, blocks);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, blocks >> 8);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, blksz);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, blksz >> 8);
+}
+
+static int sd_response_type(struct mmc_command *cmd)
+{
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ return SD_RSP_TYPE_R0;
+ case MMC_RSP_R1:
+ return SD_RSP_TYPE_R1;
+ case MMC_RSP_R1 & ~MMC_RSP_CRC:
+ return SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
+ case MMC_RSP_R1B:
+ return SD_RSP_TYPE_R1b;
+ case MMC_RSP_R2:
+ return SD_RSP_TYPE_R2;
+ case MMC_RSP_R3:
+ return SD_RSP_TYPE_R3;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sd_status_index(int resp_type)
+{
+ if (resp_type == SD_RSP_TYPE_R0)
+ return 0;
+ else if (resp_type == SD_RSP_TYPE_R2)
+ return 16;
+
+ return 5;
+}
+/*
+ * sd_pre_dma_transfer - do dma_map_sg() or using cookie
+ *
+ * @pre: if called in pre_req()
+ * return:
+ * 0 - do dma_map_sg()
+ * 1 - using cookie
+ */
+static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host,
+ struct mmc_data *data, bool pre)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int read = data->flags & MMC_DATA_READ;
+ int count = 0;
+ int using_cookie = 0;
+
+ if (!pre && data->host_cookie && data->host_cookie != host->cookie) {
+ dev_err(sdmmc_dev(host),
+ "error: data->host_cookie = %d, host->cookie = %d\n",
+ data->host_cookie, host->cookie);
+ data->host_cookie = 0;
+ }
+
+ if (pre || data->host_cookie != host->cookie) {
+ count = rtsx_pci_dma_map_sg(pcr, data->sg, data->sg_len, read);
+ } else {
+ count = host->cookie_sg_count;
+ using_cookie = 1;
+ }
+
+ if (pre) {
+ host->cookie_sg_count = count;
+ if (++host->cookie < 0)
+ host->cookie = 1;
+ data->host_cookie = host->cookie;
+ } else {
+ host->sg_count = count;
+ }
+
+ return using_cookie;
+}
+
+static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (data->host_cookie) {
+ dev_err(sdmmc_dev(host),
+ "error: reset data->host_cookie = %d\n",
+ data->host_cookie);
+ data->host_cookie = 0;
+ }
+
+ sd_pre_dma_transfer(host, data, true);
+ dev_dbg(sdmmc_dev(host), "pre dma sg: %d\n", host->cookie_sg_count);
+}
+
+static void sdmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_pcr *pcr = host->pcr;
+ struct mmc_data *data = mrq->data;
+ int read = data->flags & MMC_DATA_READ;
+
+ rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, read);
+ data->host_cookie = 0;
+}
+
+static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
+ struct mmc_command *cmd)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ u8 cmd_idx = (u8)cmd->opcode;
+ u32 arg = cmd->arg;
+ int err = 0;
+ int timeout = 100;
+ int i;
+ u8 *ptr;
+ int rsp_type;
+ int stat_idx;
+ bool clock_toggled = false;
+
+ dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
+ __func__, cmd_idx, arg);
+
+ rsp_type = sd_response_type(cmd);
+ if (rsp_type < 0)
+ goto out;
+
+ stat_idx = sd_status_index(rsp_type);
+
+ if (rsp_type == SD_RSP_TYPE_R1b)
+ timeout = 3000;
+
+ if (cmd->opcode == SD_SWITCH_VOLTAGE) {
+ err = rtsx_pci_write_register(pcr, SD_BUS_STAT,
+ 0xFF, SD_CLK_TOGGLE_EN);
+ if (err < 0)
+ goto out;
+
+ clock_toggled = true;
+ }
+
+ rtsx_pci_init_cmd(pcr);
+ sd_cmd_set_sd_cmd(pcr, cmd);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, rsp_type);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER,
+ 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END | SD_STAT_IDLE,
+ SD_TRANSFER_END | SD_STAT_IDLE);
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ /* Read data from ping-pong buffer */
+ for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
+ } else if (rsp_type != SD_RSP_TYPE_R0) {
+ /* Read data from SD_CMDx registers */
+ for (i = SD_CMD0; i <= SD_CMD4; i++)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
+ }
+
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0);
+
+ err = rtsx_pci_send_cmd(pcr, timeout);
+ if (err < 0) {
+ sd_print_debug_regs(host);
+ sd_clear_error(host);
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_pci_send_cmd error (err = %d)\n", err);
+ goto out;
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R0) {
+ err = 0;
+ goto out;
+ }
+
+ /* Eliminate returned value of CHECK_REG_CMD */
+ ptr = rtsx_pci_get_cmd_data(pcr) + 1;
+
+ /* Check (Start,Transmission) bit of Response */
+ if ((ptr[0] & 0xC0) != 0) {
+ err = -EILSEQ;
+ dev_dbg(sdmmc_dev(host), "Invalid response bit\n");
+ goto out;
+ }
+
+ /* Check CRC7 */
+ if (!(rsp_type & SD_NO_CHECK_CRC7)) {
+ if (ptr[stat_idx] & SD_CRC7_ERR) {
+ err = -EILSEQ;
+ dev_dbg(sdmmc_dev(host), "CRC7 error\n");
+ goto out;
+ }
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ /*
+ * The controller offloads the last byte {CRC-7, end bit 1'b1}
+ * of response type R2. Assign dummy CRC, 0, and end bit to the
+ * byte(ptr[16], goes into the LSB of resp[3] later).
+ */
+ ptr[16] = 1;
+
+ for (i = 0; i < 4; i++) {
+ cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4);
+ dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n",
+ i, cmd->resp[i]);
+ }
+ } else {
+ cmd->resp[0] = get_unaligned_be32(ptr + 1);
+ dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n",
+ cmd->resp[0]);
+ }
+
+out:
+ cmd->error = err;
+
+ if (err && clock_toggled)
+ rtsx_pci_write_register(pcr, SD_BUS_STAT,
+ SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
+}
+
+static int sd_read_data(struct realtek_pci_sdmmc *host, struct mmc_command *cmd,
+ u16 byte_cnt, u8 *buf, int buf_len, int timeout)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+ u8 trans_mode;
+
+ dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
+ __func__, cmd->opcode, cmd->arg);
+
+ if (!buf)
+ buf_len = 0;
+
+ if (cmd->opcode == MMC_SEND_TUNING_BLOCK)
+ trans_mode = SD_TM_AUTO_TUNING;
+ else
+ trans_mode = SD_TM_NORMAL_READ;
+
+ rtsx_pci_init_cmd(pcr);
+ sd_cmd_set_sd_cmd(pcr, cmd);
+ sd_cmd_set_data_len(pcr, 1, byte_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ if (trans_mode != SD_TM_AUTO_TUNING)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER,
+ 0xFF, trans_mode | SD_TRANSFER_START);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ err = rtsx_pci_send_cmd(pcr, timeout);
+ if (err < 0) {
+ sd_print_debug_regs(host);
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_pci_send_cmd fail (err = %d)\n", err);
+ return err;
+ }
+
+ if (buf && buf_len) {
+ err = rtsx_pci_read_ppbuf(pcr, buf, buf_len);
+ if (err < 0) {
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_pci_read_ppbuf fail (err = %d)\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int sd_write_data(struct realtek_pci_sdmmc *host,
+ struct mmc_command *cmd, u16 byte_cnt, u8 *buf, int buf_len,
+ int timeout)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+
+ dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
+ __func__, cmd->opcode, cmd->arg);
+
+ if (!buf)
+ buf_len = 0;
+
+ sd_send_cmd_get_rsp(host, cmd);
+ if (cmd->error)
+ return cmd->error;
+
+ if (buf && buf_len) {
+ err = rtsx_pci_write_ppbuf(pcr, buf, buf_len);
+ if (err < 0) {
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_pci_write_ppbuf fail (err = %d)\n", err);
+ return err;
+ }
+ }
+
+ rtsx_pci_init_cmd(pcr);
+ sd_cmd_set_data_len(pcr, 1, byte_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ SD_TRANSFER_START | SD_TM_AUTO_WRITE_3);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ err = rtsx_pci_send_cmd(pcr, timeout);
+ if (err < 0) {
+ sd_print_debug_regs(host);
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_pci_send_cmd fail (err = %d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int sd_read_long_data(struct realtek_pci_sdmmc *host,
+ struct mmc_request *mrq)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ struct mmc_host *mmc = host->mmc;
+ struct mmc_card *card = mmc->card;
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+ int uhs = mmc_card_uhs(card);
+ u8 cfg2 = 0;
+ int err;
+ int resp_type;
+ size_t data_len = data->blksz * data->blocks;
+
+ dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
+ __func__, cmd->opcode, cmd->arg);
+
+ resp_type = sd_response_type(cmd);
+ if (resp_type < 0)
+ return resp_type;
+
+ if (!uhs)
+ cfg2 |= SD_NO_CHECK_WAIT_CRC_TO;
+
+ rtsx_pci_init_cmd(pcr);
+ sd_cmd_set_sd_cmd(pcr, cmd);
+ sd_cmd_set_data_len(pcr, data->blocks, data->blksz);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
+ DMA_DONE_INT, DMA_DONE_INT);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC3,
+ 0xFF, (u8)(data_len >> 24));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC2,
+ 0xFF, (u8)(data_len >> 16));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC1,
+ 0xFF, (u8)(data_len >> 8));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC0, 0xFF, (u8)data_len);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL,
+ 0x03 | DMA_PACK_SIZE_MASK,
+ DMA_DIR_FROM_CARD | DMA_EN | DMA_512);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, RING_BUFFER);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2 | resp_type);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ SD_TRANSFER_START | SD_TM_AUTO_READ_2);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+ rtsx_pci_send_cmd_no_wait(pcr);
+
+ err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, 1, 10000);
+ if (err < 0) {
+ sd_print_debug_regs(host);
+ sd_clear_error(host);
+ return err;
+ }
+
+ return 0;
+}
+
+static int sd_write_long_data(struct realtek_pci_sdmmc *host,
+ struct mmc_request *mrq)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ struct mmc_host *mmc = host->mmc;
+ struct mmc_card *card = mmc->card;
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+ int uhs = mmc_card_uhs(card);
+ u8 cfg2;
+ int err;
+ size_t data_len = data->blksz * data->blocks;
+
+ sd_send_cmd_get_rsp(host, cmd);
+ if (cmd->error)
+ return cmd->error;
+
+ dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
+ __func__, cmd->opcode, cmd->arg);
+
+ cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0;
+
+ if (!uhs)
+ cfg2 |= SD_NO_CHECK_WAIT_CRC_TO;
+
+ rtsx_pci_init_cmd(pcr);
+ sd_cmd_set_data_len(pcr, data->blocks, data->blksz);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
+ DMA_DONE_INT, DMA_DONE_INT);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC3,
+ 0xFF, (u8)(data_len >> 24));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC2,
+ 0xFF, (u8)(data_len >> 16));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC1,
+ 0xFF, (u8)(data_len >> 8));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC0, 0xFF, (u8)data_len);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL,
+ 0x03 | DMA_PACK_SIZE_MASK,
+ DMA_DIR_TO_CARD | DMA_EN | DMA_512);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, RING_BUFFER);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ SD_TRANSFER_START | SD_TM_AUTO_WRITE_3);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+ rtsx_pci_send_cmd_no_wait(pcr);
+ err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, 0, 10000);
+ if (err < 0) {
+ sd_clear_error(host);
+ return err;
+ }
+
+ return 0;
+}
+
+static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+
+ if (host->sg_count < 0) {
+ data->error = host->sg_count;
+ dev_dbg(sdmmc_dev(host), "%s: sg_count = %d is invalid\n",
+ __func__, host->sg_count);
+ return data->error;
+ }
+
+ if (data->flags & MMC_DATA_READ)
+ return sd_read_long_data(host, mrq);
+
+ return sd_write_long_data(host, mrq);
+}
+
+static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
+{
+ rtsx_pci_write_register(host->pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
+}
+
+static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
+{
+ rtsx_pci_write_register(host->pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
+}
+
+static void sd_normal_rw(struct realtek_pci_sdmmc *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+ u8 *buf;
+
+ buf = kzalloc(data->blksz, GFP_NOIO);
+ if (!buf) {
+ cmd->error = -ENOMEM;
+ return;
+ }
+
+ if (data->flags & MMC_DATA_READ) {
+ if (host->initial_mode)
+ sd_disable_initial_mode(host);
+
+ cmd->error = sd_read_data(host, cmd, (u16)data->blksz, buf,
+ data->blksz, 200);
+
+ if (host->initial_mode)
+ sd_enable_initial_mode(host);
+
+ sg_copy_from_buffer(data->sg, data->sg_len, buf, data->blksz);
+ } else {
+ sg_copy_to_buffer(data->sg, data->sg_len, buf, data->blksz);
+
+ cmd->error = sd_write_data(host, cmd, (u16)data->blksz, buf,
+ data->blksz, 200);
+ }
+
+ kfree(buf);
+}
+
+static int sd_change_phase(struct realtek_pci_sdmmc *host,
+ u8 sample_point, bool rx)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+
+ dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n",
+ __func__, rx ? "RX" : "TX", sample_point);
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
+ if (rx)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD_VPRX_CTL, 0x1F, sample_point);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD_VPTX_CTL, 0x1F, sample_point);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
+
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static inline u32 test_phase_bit(u32 phase_map, unsigned int bit)
+{
+ bit %= RTSX_PHASE_MAX;
+ return phase_map & (1 << bit);
+}
+
+static int sd_get_phase_len(u32 phase_map, unsigned int start_bit)
+{
+ int i;
+
+ for (i = 0; i < RTSX_PHASE_MAX; i++) {
+ if (test_phase_bit(phase_map, start_bit + i) == 0)
+ return i;
+ }
+ return RTSX_PHASE_MAX;
+}
+
+static u8 sd_search_final_phase(struct realtek_pci_sdmmc *host, u32 phase_map)
+{
+ int start = 0, len = 0;
+ int start_final = 0, len_final = 0;
+ u8 final_phase = 0xFF;
+
+ if (phase_map == 0) {
+ dev_err(sdmmc_dev(host), "phase error: [map:%x]\n", phase_map);
+ return final_phase;
+ }
+
+ while (start < RTSX_PHASE_MAX) {
+ len = sd_get_phase_len(phase_map, start);
+ if (len_final < len) {
+ start_final = start;
+ len_final = len;
+ }
+ start += len ? len : 1;
+ }
+
+ final_phase = (start_final + len_final / 2) % RTSX_PHASE_MAX;
+ dev_dbg(sdmmc_dev(host), "phase: [map:%x] [maxlen:%d] [final:%d]\n",
+ phase_map, len_final, final_phase);
+
+ return final_phase;
+}
+
+static void sd_wait_data_idle(struct realtek_pci_sdmmc *host)
+{
+ int err, i;
+ u8 val = 0;
+
+ for (i = 0; i < 100; i++) {
+ err = rtsx_pci_read_register(host->pcr, SD_DATA_STATE, &val);
+ if (val & SD_DATA_IDLE)
+ return;
+
+ udelay(100);
+ }
+}
+
+static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
+ u8 opcode, u8 sample_point)
+{
+ int err;
+ struct mmc_command cmd = {0};
+
+ err = sd_change_phase(host, sample_point, true);
+ if (err < 0)
+ return err;
+
+ cmd.opcode = opcode;
+ err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100);
+ if (err < 0) {
+ /* Wait till SD DATA IDLE */
+ sd_wait_data_idle(host);
+ sd_clear_error(host);
+ return err;
+ }
+
+ return 0;
+}
+
+static int sd_tuning_phase(struct realtek_pci_sdmmc *host,
+ u8 opcode, u32 *phase_map)
+{
+ int err, i;
+ u32 raw_phase_map = 0;
+
+ for (i = 0; i < RTSX_PHASE_MAX; i++) {
+ err = sd_tuning_rx_cmd(host, opcode, (u8)i);
+ if (err == 0)
+ raw_phase_map |= 1 << i;
+ }
+
+ if (phase_map)
+ *phase_map = raw_phase_map;
+
+ return 0;
+}
+
+static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode)
+{
+ int err, i;
+ u32 raw_phase_map[RX_TUNING_CNT] = {0}, phase_map;
+ u8 final_phase;
+
+ for (i = 0; i < RX_TUNING_CNT; i++) {
+ err = sd_tuning_phase(host, opcode, &(raw_phase_map[i]));
+ if (err < 0)
+ return err;
+
+ if (raw_phase_map[i] == 0)
+ break;
+ }
+
+ phase_map = 0xFFFFFFFF;
+ for (i = 0; i < RX_TUNING_CNT; i++) {
+ dev_dbg(sdmmc_dev(host), "RX raw_phase_map[%d] = 0x%08x\n",
+ i, raw_phase_map[i]);
+ phase_map &= raw_phase_map[i];
+ }
+ dev_dbg(sdmmc_dev(host), "RX phase_map = 0x%08x\n", phase_map);
+
+ if (phase_map) {
+ final_phase = sd_search_final_phase(host, phase_map);
+ if (final_phase == 0xFF)
+ return -EINVAL;
+
+ err = sd_change_phase(host, final_phase, true);
+ if (err < 0)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int sdio_extblock_cmd(struct mmc_command *cmd,
+ struct mmc_data *data)
+{
+ return (cmd->opcode == SD_IO_RW_EXTENDED) && (data->blksz == 512);
+}
+
+static inline int sd_rw_cmd(struct mmc_command *cmd)
+{
+ return mmc_op_multi(cmd->opcode) ||
+ (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
+ (cmd->opcode == MMC_WRITE_BLOCK);
+}
+
+static void sd_request(struct work_struct *work)
+{
+ struct realtek_pci_sdmmc *host = container_of(work,
+ struct realtek_pci_sdmmc, work);
+ struct rtsx_pcr *pcr = host->pcr;
+
+ struct mmc_host *mmc = host->mmc;
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+
+ unsigned int data_size = 0;
+ int err;
+
+ if (host->eject || !sd_get_cd_int(host)) {
+ cmd->error = -ENOMEDIUM;
+ goto finish;
+ }
+
+ err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD);
+ if (err) {
+ cmd->error = err;
+ goto finish;
+ }
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ rtsx_pci_start_run(pcr);
+
+ rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth,
+ host->initial_mode, host->double_clk, host->vpclk);
+ rtsx_pci_write_register(pcr, CARD_SELECT, 0x07, SD_MOD_SEL);
+ rtsx_pci_write_register(pcr, CARD_SHARE_MODE,
+ CARD_SHARE_MASK, CARD_SHARE_48_SD);
+
+ mutex_lock(&host->host_mutex);
+ host->mrq = mrq;
+ mutex_unlock(&host->host_mutex);
+
+ if (mrq->data)
+ data_size = data->blocks * data->blksz;
+
+ if (!data_size) {
+ sd_send_cmd_get_rsp(host, cmd);
+ } else if (sd_rw_cmd(cmd) || sdio_extblock_cmd(cmd, data)) {
+ cmd->error = sd_rw_multi(host, mrq);
+ if (!host->using_cookie)
+ sdmmc_post_req(host->mmc, host->mrq, 0);
+
+ if (mmc_op_multi(cmd->opcode) && mrq->stop)
+ sd_send_cmd_get_rsp(host, mrq->stop);
+ } else {
+ sd_normal_rw(host, mrq);
+ }
+
+ if (mrq->data) {
+ if (cmd->error || data->error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blocks * data->blksz;
+ }
+
+ mutex_unlock(&pcr->pcr_mutex);
+
+finish:
+ if (cmd->error) {
+ dev_dbg(sdmmc_dev(host), "CMD %d 0x%08x error(%d)\n",
+ cmd->opcode, cmd->arg, cmd->error);
+ }
+
+ mutex_lock(&host->host_mutex);
+ host->mrq = NULL;
+ mutex_unlock(&host->host_mutex);
+
+ mmc_request_done(mmc, mrq);
+}
+
+static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ mutex_lock(&host->host_mutex);
+ host->mrq = mrq;
+ mutex_unlock(&host->host_mutex);
+
+ if (sd_rw_cmd(mrq->cmd) || sdio_extblock_cmd(mrq->cmd, data))
+ host->using_cookie = sd_pre_dma_transfer(host, data, false);
+
+ queue_work(host->workq, &host->work);
+}
+
+static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
+ unsigned char bus_width)
+{
+ int err = 0;
+ u8 width[] = {
+ [MMC_BUS_WIDTH_1] = SD_BUS_WIDTH_1BIT,
+ [MMC_BUS_WIDTH_4] = SD_BUS_WIDTH_4BIT,
+ [MMC_BUS_WIDTH_8] = SD_BUS_WIDTH_8BIT,
+ };
+
+ if (bus_width <= MMC_BUS_WIDTH_8)
+ err = rtsx_pci_write_register(host->pcr, SD_CFG1,
+ 0x03, width[bus_width]);
+
+ return err;
+}
+
+static int sd_power_on(struct realtek_pci_sdmmc *host)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+
+ if (host->power_state == SDMMC_POWER_ON)
+ return 0;
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE,
+ CARD_SHARE_MASK, CARD_SHARE_48_SD);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN,
+ SD_CLK_EN, SD_CLK_EN);
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ err = rtsx_pci_card_pull_ctl_enable(pcr, RTSX_SD_CARD);
+ if (err < 0)
+ return err;
+
+ err = rtsx_pci_card_power_on(pcr, RTSX_SD_CARD);
+ if (err < 0)
+ return err;
+
+ err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
+ if (err < 0)
+ return err;
+
+ host->power_state = SDMMC_POWER_ON;
+ return 0;
+}
+
+static int sd_power_off(struct realtek_pci_sdmmc *host)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+
+ host->power_state = SDMMC_POWER_OFF;
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, SD_CLK_EN, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE, SD_OUTPUT_EN, 0);
+
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ err = rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
+ if (err < 0)
+ return err;
+
+ return rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
+}
+
+static int sd_set_power_mode(struct realtek_pci_sdmmc *host,
+ unsigned char power_mode)
+{
+ int err;
+
+ if (power_mode == MMC_POWER_OFF)
+ err = sd_power_off(host);
+ else
+ err = sd_power_on(host);
+
+ return err;
+}
+
+static int sd_set_timing(struct realtek_pci_sdmmc *host, unsigned char timing)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err = 0;
+
+ rtsx_pci_init_cmd(pcr);
+
+ switch (timing) {
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_UHS_SDR50:
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1,
+ 0x0C | SD_ASYNC_FIFO_NOT_RST,
+ SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0);
+ break;
+
+ case MMC_TIMING_MMC_DDR52:
+ case MMC_TIMING_UHS_DDR50:
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1,
+ 0x0C | SD_ASYNC_FIFO_NOT_RST,
+ SD_DDR_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_PUSH_POINT_CTL,
+ DDR_VAR_TX_CMD_DAT, DDR_VAR_TX_CMD_DAT);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL,
+ DDR_VAR_RX_DAT | DDR_VAR_RX_CMD,
+ DDR_VAR_RX_DAT | DDR_VAR_RX_CMD);
+ break;
+
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1,
+ 0x0C, SD_20_MODE);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF,
+ CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_PUSH_POINT_CTL,
+ SD20_TX_SEL_MASK, SD20_TX_14_AHEAD);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL,
+ SD20_RX_SEL_MASK, SD20_RX_14_DELAY);
+ break;
+
+ default:
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD_CFG1, 0x0C, SD_20_MODE);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF,
+ CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL,
+ SD20_RX_SEL_MASK, SD20_RX_POS_EDGE);
+ break;
+ }
+
+ err = rtsx_pci_send_cmd(pcr, 100);
+
+ return err;
+}
+
+static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_pcr *pcr = host->pcr;
+
+ if (host->eject)
+ return;
+
+ if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD))
+ return;
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ rtsx_pci_start_run(pcr);
+
+ sd_set_bus_width(host, ios->bus_width);
+ sd_set_power_mode(host, ios->power_mode);
+ sd_set_timing(host, ios->timing);
+
+ host->vpclk = false;
+ host->double_clk = true;
+
+ switch (ios->timing) {
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_UHS_SDR50:
+ host->ssc_depth = RTSX_SSC_DEPTH_2M;
+ host->vpclk = true;
+ host->double_clk = false;
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_UHS_SDR25:
+ host->ssc_depth = RTSX_SSC_DEPTH_1M;
+ break;
+ default:
+ host->ssc_depth = RTSX_SSC_DEPTH_500K;
+ break;
+ }
+
+ host->initial_mode = (ios->clock <= 1000000) ? true : false;
+
+ host->clock = ios->clock;
+ rtsx_pci_switch_clock(pcr, ios->clock, host->ssc_depth,
+ host->initial_mode, host->double_clk, host->vpclk);
+
+ mutex_unlock(&pcr->pcr_mutex);
+}
+
+static int sdmmc_get_ro(struct mmc_host *mmc)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_pcr *pcr = host->pcr;
+ int ro = 0;
+ u32 val;
+
+ if (host->eject)
+ return -ENOMEDIUM;
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ rtsx_pci_start_run(pcr);
+
+ /* Check SD mechanical write-protect switch */
+ val = rtsx_pci_readl(pcr, RTSX_BIPR);
+ dev_dbg(sdmmc_dev(host), "%s: RTSX_BIPR = 0x%08x\n", __func__, val);
+ if (val & SD_WRITE_PROTECT)
+ ro = 1;
+
+ mutex_unlock(&pcr->pcr_mutex);
+
+ return ro;
+}
+
+static int sdmmc_get_cd(struct mmc_host *mmc)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_pcr *pcr = host->pcr;
+ int cd = 0;
+ u32 val;
+
+ if (host->eject)
+ return cd;
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ rtsx_pci_start_run(pcr);
+
+ /* Check SD card detect */
+ val = rtsx_pci_card_exist(pcr);
+ dev_dbg(sdmmc_dev(host), "%s: RTSX_BIPR = 0x%08x\n", __func__, val);
+ if (val & SD_EXIST)
+ cd = 1;
+
+ mutex_unlock(&pcr->pcr_mutex);
+
+ return cd;
+}
+
+static int sd_wait_voltage_stable_1(struct realtek_pci_sdmmc *host)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+ u8 stat;
+
+ /* Reference to Signal Voltage Switch Sequence in SD spec.
+ * Wait for a period of time so that the card can drive SD_CMD and
+ * SD_DAT[3:0] to low after sending back CMD11 response.
+ */
+ mdelay(1);
+
+ /* SD_CMD, SD_DAT[3:0] should be driven to low by card;
+ * If either one of SD_CMD,SD_DAT[3:0] is not low,
+ * abort the voltage switch sequence;
+ */
+ err = rtsx_pci_read_register(pcr, SD_BUS_STAT, &stat);
+ if (err < 0)
+ return err;
+
+ if (stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
+ SD_DAT1_STATUS | SD_DAT0_STATUS))
+ return -EINVAL;
+
+ /* Stop toggle SD clock */
+ err = rtsx_pci_write_register(pcr, SD_BUS_STAT,
+ 0xFF, SD_CLK_FORCE_STOP);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int sd_wait_voltage_stable_2(struct realtek_pci_sdmmc *host)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+ u8 stat, mask, val;
+
+ /* Wait 1.8V output of voltage regulator in card stable */
+ msleep(50);
+
+ /* Toggle SD clock again */
+ err = rtsx_pci_write_register(pcr, SD_BUS_STAT, 0xFF, SD_CLK_TOGGLE_EN);
+ if (err < 0)
+ return err;
+
+ /* Wait for a period of time so that the card can drive
+ * SD_DAT[3:0] to high at 1.8V
+ */
+ msleep(20);
+
+ /* SD_CMD, SD_DAT[3:0] should be pulled high by host */
+ err = rtsx_pci_read_register(pcr, SD_BUS_STAT, &stat);
+ if (err < 0)
+ return err;
+
+ mask = SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
+ SD_DAT1_STATUS | SD_DAT0_STATUS;
+ val = SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
+ SD_DAT1_STATUS | SD_DAT0_STATUS;
+ if ((stat & mask) != val) {
+ dev_dbg(sdmmc_dev(host),
+ "%s: SD_BUS_STAT = 0x%x\n", __func__, stat);
+ rtsx_pci_write_register(pcr, SD_BUS_STAT,
+ SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
+ rtsx_pci_write_register(pcr, CARD_CLK_EN, 0xFF, 0);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_pcr *pcr = host->pcr;
+ int err = 0;
+ u8 voltage;
+
+ dev_dbg(sdmmc_dev(host), "%s: signal_voltage = %d\n",
+ __func__, ios->signal_voltage);
+
+ if (host->eject)
+ return -ENOMEDIUM;
+
+ err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD);
+ if (err)
+ return err;
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ rtsx_pci_start_run(pcr);
+
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ voltage = OUTPUT_3V3;
+ else
+ voltage = OUTPUT_1V8;
+
+ if (voltage == OUTPUT_1V8) {
+ err = sd_wait_voltage_stable_1(host);
+ if (err < 0)
+ goto out;
+ }
+
+ err = rtsx_pci_switch_output_voltage(pcr, voltage);
+ if (err < 0)
+ goto out;
+
+ if (voltage == OUTPUT_1V8) {
+ err = sd_wait_voltage_stable_2(host);
+ if (err < 0)
+ goto out;
+ }
+
+out:
+ /* Stop toggle SD clock in idle */
+ err = rtsx_pci_write_register(pcr, SD_BUS_STAT,
+ SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
+
+ mutex_unlock(&pcr->pcr_mutex);
+
+ return err;
+}
+
+static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_pcr *pcr = host->pcr;
+ int err = 0;
+
+ if (host->eject)
+ return -ENOMEDIUM;
+
+ err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD);
+ if (err)
+ return err;
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ rtsx_pci_start_run(pcr);
+
+ /* Set initial TX phase */
+ switch (mmc->ios.timing) {
+ case MMC_TIMING_UHS_SDR104:
+ err = sd_change_phase(host, SDR104_TX_PHASE(pcr), false);
+ break;
+
+ case MMC_TIMING_UHS_SDR50:
+ err = sd_change_phase(host, SDR50_TX_PHASE(pcr), false);
+ break;
+
+ case MMC_TIMING_UHS_DDR50:
+ err = sd_change_phase(host, DDR50_TX_PHASE(pcr), false);
+ break;
+
+ default:
+ err = 0;
+ }
+
+ if (err)
+ goto out;
+
+ /* Tuning RX phase */
+ if ((mmc->ios.timing == MMC_TIMING_UHS_SDR104) ||
+ (mmc->ios.timing == MMC_TIMING_UHS_SDR50))
+ err = sd_tuning_rx(host, opcode);
+ else if (mmc->ios.timing == MMC_TIMING_UHS_DDR50)
+ err = sd_change_phase(host, DDR50_RX_PHASE(pcr), true);
+
+out:
+ mutex_unlock(&pcr->pcr_mutex);
+
+ return err;
+}
+
+static const struct mmc_host_ops realtek_pci_sdmmc_ops = {
+ .pre_req = sdmmc_pre_req,
+ .post_req = sdmmc_post_req,
+ .request = sdmmc_request,
+ .set_ios = sdmmc_set_ios,
+ .get_ro = sdmmc_get_ro,
+ .get_cd = sdmmc_get_cd,
+ .start_signal_voltage_switch = sdmmc_switch_voltage,
+ .execute_tuning = sdmmc_execute_tuning,
+};
+
+static void init_extra_caps(struct realtek_pci_sdmmc *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ struct rtsx_pcr *pcr = host->pcr;
+
+ dev_dbg(sdmmc_dev(host), "pcr->extra_caps = 0x%x\n", pcr->extra_caps);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50)
+ mmc->caps |= MMC_CAP_UHS_SDR50;
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ mmc->caps |= MMC_CAP_UHS_SDR104;
+ if (pcr->extra_caps & EXTRA_CAPS_SD_DDR50)
+ mmc->caps |= MMC_CAP_UHS_DDR50;
+ if (pcr->extra_caps & EXTRA_CAPS_MMC_HSDDR)
+ mmc->caps |= MMC_CAP_1_8V_DDR;
+ if (pcr->extra_caps & EXTRA_CAPS_MMC_8BIT)
+ mmc->caps |= MMC_CAP_8_BIT_DATA;
+}
+
+static void realtek_init_host(struct realtek_pci_sdmmc *host)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ mmc->f_min = 250000;
+ mmc->f_max = 208000000;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST |
+ MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
+ mmc->caps2 = MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_FULL_PWR_CYCLE;
+ mmc->max_current_330 = 400;
+ mmc->max_current_180 = 800;
+ mmc->ops = &realtek_pci_sdmmc_ops;
+
+ init_extra_caps(host);
+
+ mmc->max_segs = 256;
+ mmc->max_seg_size = 65536;
+ mmc->max_blk_size = 512;
+ mmc->max_blk_count = 65535;
+ mmc->max_req_size = 524288;
+}
+
+static void rtsx_pci_sdmmc_card_event(struct platform_device *pdev)
+{
+ struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
+
+ host->cookie = -1;
+ mmc_detect_change(host->mmc, 0);
+}
+
+static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct realtek_pci_sdmmc *host;
+ struct rtsx_pcr *pcr;
+ struct pcr_handle *handle = pdev->dev.platform_data;
+
+ if (!handle)
+ return -ENXIO;
+
+ pcr = handle->pcr;
+ if (!pcr)
+ return -ENXIO;
+
+ dev_dbg(&(pdev->dev), ": Realtek PCI-E SDMMC controller found\n");
+
+ mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ host = mmc_priv(mmc);
+ host->workq = create_singlethread_workqueue(SDMMC_WORKQ_NAME);
+ if (!host->workq) {
+ mmc_free_host(mmc);
+ return -ENOMEM;
+ }
+ host->pcr = pcr;
+ host->mmc = mmc;
+ host->pdev = pdev;
+ host->cookie = -1;
+ host->power_state = SDMMC_POWER_OFF;
+ INIT_WORK(&host->work, sd_request);
+ platform_set_drvdata(pdev, host);
+ pcr->slots[RTSX_SD_CARD].p_dev = pdev;
+ pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event;
+
+ mutex_init(&host->host_mutex);
+
+ realtek_init_host(host);
+
+ mmc_add_host(mmc);
+
+ return 0;
+}
+
+static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
+{
+ struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
+ struct rtsx_pcr *pcr;
+ struct mmc_host *mmc;
+
+ if (!host)
+ return 0;
+
+ pcr = host->pcr;
+ pcr->slots[RTSX_SD_CARD].p_dev = NULL;
+ pcr->slots[RTSX_SD_CARD].card_event = NULL;
+ mmc = host->mmc;
+
+ cancel_work_sync(&host->work);
+
+ mutex_lock(&host->host_mutex);
+ if (host->mrq) {
+ dev_dbg(&(pdev->dev),
+ "%s: Controller removed during transfer\n",
+ mmc_hostname(mmc));
+
+ rtsx_pci_complete_unfinished_transfer(pcr);
+
+ host->mrq->cmd->error = -ENOMEDIUM;
+ if (host->mrq->stop)
+ host->mrq->stop->error = -ENOMEDIUM;
+ mmc_request_done(mmc, host->mrq);
+ }
+ mutex_unlock(&host->host_mutex);
+
+ mmc_remove_host(mmc);
+ host->eject = true;
+
+ flush_workqueue(host->workq);
+ destroy_workqueue(host->workq);
+ host->workq = NULL;
+
+ mmc_free_host(mmc);
+
+ dev_dbg(&(pdev->dev),
+ ": Realtek PCI-E SDMMC controller has been removed\n");
+
+ return 0;
+}
+
+static struct platform_device_id rtsx_pci_sdmmc_ids[] = {
+ {
+ .name = DRV_NAME_RTSX_PCI_SDMMC,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, rtsx_pci_sdmmc_ids);
+
+static struct platform_driver rtsx_pci_sdmmc_driver = {
+ .probe = rtsx_pci_sdmmc_drv_probe,
+ .remove = rtsx_pci_sdmmc_drv_remove,
+ .id_table = rtsx_pci_sdmmc_ids,
+ .driver = {
+ .name = DRV_NAME_RTSX_PCI_SDMMC,
+ },
+};
+module_platform_driver(rtsx_pci_sdmmc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
+MODULE_DESCRIPTION("Realtek PCI-E SD/MMC Card Host Driver");
diff --git a/kernel/drivers/mmc/host/rtsx_usb_sdmmc.c b/kernel/drivers/mmc/host/rtsx_usb_sdmmc.c
new file mode 100644
index 000000000..88af827e0
--- /dev/null
+++ b/kernel/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -0,0 +1,1463 @@
+/* Realtek USB SD/MMC Card Interface driver
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Roger Tseng <rogerable@realtek.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/card.h>
+#include <linux/scatterlist.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mfd/rtsx_usb.h>
+#include <asm/unaligned.h>
+
+#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
+ defined(CONFIG_MMC_REALTEK_USB_MODULE))
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#define RTSX_USB_USE_LEDS_CLASS
+#endif
+
+struct rtsx_usb_sdmmc {
+ struct platform_device *pdev;
+ struct rtsx_ucr *ucr;
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+
+ struct mutex host_mutex;
+
+ u8 ssc_depth;
+ unsigned int clock;
+ bool vpclk;
+ bool double_clk;
+ bool host_removal;
+ bool card_exist;
+ bool initial_mode;
+ bool ddr_mode;
+
+ unsigned char power_mode;
+
+#ifdef RTSX_USB_USE_LEDS_CLASS
+ struct led_classdev led;
+ char led_name[32];
+ struct work_struct led_work;
+#endif
+};
+
+static inline struct device *sdmmc_dev(struct rtsx_usb_sdmmc *host)
+{
+ return &(host->pdev->dev);
+}
+
+static inline void sd_clear_error(struct rtsx_usb_sdmmc *host)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ rtsx_usb_ep0_write_register(ucr, CARD_STOP,
+ SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ rtsx_usb_clear_dma_err(ucr);
+ rtsx_usb_clear_fsm_err(ucr);
+}
+
+#ifdef DEBUG
+static void sd_print_debug_regs(struct rtsx_usb_sdmmc *host)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ u8 val = 0;
+
+ rtsx_usb_ep0_read_register(ucr, SD_STAT1, &val);
+ dev_dbg(sdmmc_dev(host), "SD_STAT1: 0x%x\n", val);
+ rtsx_usb_ep0_read_register(ucr, SD_STAT2, &val);
+ dev_dbg(sdmmc_dev(host), "SD_STAT2: 0x%x\n", val);
+ rtsx_usb_ep0_read_register(ucr, SD_BUS_STAT, &val);
+ dev_dbg(sdmmc_dev(host), "SD_BUS_STAT: 0x%x\n", val);
+}
+#else
+#define sd_print_debug_regs(host)
+#endif /* DEBUG */
+
+static int sd_read_data(struct rtsx_usb_sdmmc *host, struct mmc_command *cmd,
+ u16 byte_cnt, u8 *buf, int buf_len, int timeout)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ int err;
+ u8 trans_mode;
+
+ if (!buf)
+ buf_len = 0;
+
+ rtsx_usb_init_cmd(ucr);
+ if (cmd != NULL) {
+ dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD%d\n", __func__
+ , cmd->opcode);
+ if (cmd->opcode == MMC_SEND_TUNING_BLOCK)
+ trans_mode = SD_TM_AUTO_TUNING;
+ else
+ trans_mode = SD_TM_NORMAL_READ;
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ SD_CMD0, 0xFF, (u8)(cmd->opcode) | 0x40);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ SD_CMD1, 0xFF, (u8)(cmd->arg >> 24));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ SD_CMD2, 0xFF, (u8)(cmd->arg >> 16));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ SD_CMD3, 0xFF, (u8)(cmd->arg >> 8));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ SD_CMD4, 0xFF, (u8)cmd->arg);
+ } else {
+ trans_mode = SD_TM_AUTO_READ_3;
+ }
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_H,
+ 0xFF, (u8)(byte_cnt >> 8));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ if (trans_mode != SD_TM_AUTO_TUNING)
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER,
+ 0xFF, trans_mode | SD_TRANSFER_START);
+ rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ if (cmd != NULL) {
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD1, 0, 0);
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD2, 0, 0);
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD3, 0, 0);
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD4, 0, 0);
+ }
+
+ err = rtsx_usb_send_cmd(ucr, MODE_CR, timeout);
+ if (err) {
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_usb_send_cmd failed (err = %d)\n", err);
+ return err;
+ }
+
+ err = rtsx_usb_get_rsp(ucr, !cmd ? 1 : 5, timeout);
+ if (err || (ucr->rsp_buf[0] & SD_TRANSFER_ERR)) {
+ sd_print_debug_regs(host);
+
+ if (!err) {
+ dev_dbg(sdmmc_dev(host),
+ "Transfer failed (SD_TRANSFER = %02x)\n",
+ ucr->rsp_buf[0]);
+ err = -EIO;
+ } else {
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_usb_get_rsp failed (err = %d)\n", err);
+ }
+
+ return err;
+ }
+
+ if (cmd != NULL) {
+ cmd->resp[0] = get_unaligned_be32(ucr->rsp_buf + 1);
+ dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n",
+ cmd->resp[0]);
+ }
+
+ if (buf && buf_len) {
+ /* 2-byte aligned part */
+ err = rtsx_usb_read_ppbuf(ucr, buf, byte_cnt - (byte_cnt % 2));
+ if (err) {
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_usb_read_ppbuf failed (err = %d)\n", err);
+ return err;
+ }
+
+ /* unaligned byte */
+ if (byte_cnt % 2)
+ return rtsx_usb_read_register(ucr,
+ PPBUF_BASE2 + byte_cnt,
+ buf + byte_cnt - 1);
+ }
+
+ return 0;
+}
+
+static int sd_write_data(struct rtsx_usb_sdmmc *host, struct mmc_command *cmd,
+ u16 byte_cnt, u8 *buf, int buf_len, int timeout)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ int err;
+ u8 trans_mode;
+
+ if (!buf)
+ buf_len = 0;
+
+ if (buf && buf_len) {
+ err = rtsx_usb_write_ppbuf(ucr, buf, buf_len);
+ if (err) {
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_usb_write_ppbuf failed (err = %d)\n",
+ err);
+ return err;
+ }
+ }
+
+ trans_mode = (cmd != NULL) ? SD_TM_AUTO_WRITE_2 : SD_TM_AUTO_WRITE_3;
+ rtsx_usb_init_cmd(ucr);
+
+ if (cmd != NULL) {
+ dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD%d\n", __func__,
+ cmd->opcode);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ SD_CMD0, 0xFF, (u8)(cmd->opcode) | 0x40);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ SD_CMD1, 0xFF, (u8)(cmd->arg >> 24));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ SD_CMD2, 0xFF, (u8)(cmd->arg >> 16));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ SD_CMD3, 0xFF, (u8)(cmd->arg >> 8));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ SD_CMD4, 0xFF, (u8)cmd->arg);
+ }
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_H,
+ 0xFF, (u8)(byte_cnt >> 8));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ trans_mode | SD_TRANSFER_START);
+ rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ if (cmd != NULL) {
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD1, 0, 0);
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD2, 0, 0);
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD3, 0, 0);
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD4, 0, 0);
+ }
+
+ err = rtsx_usb_send_cmd(ucr, MODE_CR, timeout);
+ if (err) {
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_usb_send_cmd failed (err = %d)\n", err);
+ return err;
+ }
+
+ err = rtsx_usb_get_rsp(ucr, !cmd ? 1 : 5, timeout);
+ if (err) {
+ sd_print_debug_regs(host);
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_usb_get_rsp failed (err = %d)\n", err);
+ return err;
+ }
+
+ if (cmd != NULL) {
+ cmd->resp[0] = get_unaligned_be32(ucr->rsp_buf + 1);
+ dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n",
+ cmd->resp[0]);
+ }
+
+ return 0;
+}
+
+static void sd_send_cmd_get_rsp(struct rtsx_usb_sdmmc *host,
+ struct mmc_command *cmd)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ u8 cmd_idx = (u8)cmd->opcode;
+ u32 arg = cmd->arg;
+ int err = 0;
+ int timeout = 100;
+ int i;
+ u8 *ptr;
+ int stat_idx = 0;
+ int len = 2;
+ u8 rsp_type;
+
+ dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
+ __func__, cmd_idx, arg);
+
+ /* Response type:
+ * R0
+ * R1, R5, R6, R7
+ * R1b
+ * R2
+ * R3, R4
+ */
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ rsp_type = SD_RSP_TYPE_R0;
+ break;
+ case MMC_RSP_R1:
+ rsp_type = SD_RSP_TYPE_R1;
+ break;
+ case MMC_RSP_R1 & ~MMC_RSP_CRC:
+ rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
+ break;
+ case MMC_RSP_R1B:
+ rsp_type = SD_RSP_TYPE_R1b;
+ break;
+ case MMC_RSP_R2:
+ rsp_type = SD_RSP_TYPE_R2;
+ break;
+ case MMC_RSP_R3:
+ rsp_type = SD_RSP_TYPE_R3;
+ break;
+ default:
+ dev_dbg(sdmmc_dev(host), "cmd->flag is not valid\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R1b)
+ timeout = 3000;
+
+ if (cmd->opcode == SD_SWITCH_VOLTAGE) {
+ err = rtsx_usb_write_register(ucr, SD_BUS_STAT,
+ SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP,
+ SD_CLK_TOGGLE_EN);
+ if (err)
+ goto out;
+ }
+
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD0, 0xFF, 0x40 | cmd_idx);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD1, 0xFF, (u8)(arg >> 24));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD2, 0xFF, (u8)(arg >> 16));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD3, 0xFF, (u8)(arg >> 8));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD4, 0xFF, (u8)arg);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF, rsp_type);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER,
+ 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
+ rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END | SD_STAT_IDLE,
+ SD_TRANSFER_END | SD_STAT_IDLE);
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ /* Read data from ping-pong buffer */
+ for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++)
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, (u16)i, 0, 0);
+ stat_idx = 16;
+ } else if (rsp_type != SD_RSP_TYPE_R0) {
+ /* Read data from SD_CMDx registers */
+ for (i = SD_CMD0; i <= SD_CMD4; i++)
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, (u16)i, 0, 0);
+ stat_idx = 5;
+ }
+ len += stat_idx;
+
+ rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_STAT1, 0, 0);
+
+ err = rtsx_usb_send_cmd(ucr, MODE_CR, 100);
+ if (err) {
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_usb_send_cmd error (err = %d)\n", err);
+ goto out;
+ }
+
+ err = rtsx_usb_get_rsp(ucr, len, timeout);
+ if (err || (ucr->rsp_buf[0] & SD_TRANSFER_ERR)) {
+ sd_print_debug_regs(host);
+ sd_clear_error(host);
+
+ if (!err) {
+ dev_dbg(sdmmc_dev(host),
+ "Transfer failed (SD_TRANSFER = %02x)\n",
+ ucr->rsp_buf[0]);
+ err = -EIO;
+ } else {
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_usb_get_rsp failed (err = %d)\n", err);
+ }
+
+ goto out;
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R0) {
+ err = 0;
+ goto out;
+ }
+
+ /* Skip result of CHECK_REG_CMD */
+ ptr = ucr->rsp_buf + 1;
+
+ /* Check (Start,Transmission) bit of Response */
+ if ((ptr[0] & 0xC0) != 0) {
+ err = -EILSEQ;
+ dev_dbg(sdmmc_dev(host), "Invalid response bit\n");
+ goto out;
+ }
+
+ /* Check CRC7 */
+ if (!(rsp_type & SD_NO_CHECK_CRC7)) {
+ if (ptr[stat_idx] & SD_CRC7_ERR) {
+ err = -EILSEQ;
+ dev_dbg(sdmmc_dev(host), "CRC7 error\n");
+ goto out;
+ }
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ /*
+ * The controller offloads the last byte {CRC-7, end bit 1'b1}
+ * of response type R2. Assign dummy CRC, 0, and end bit to the
+ * byte(ptr[16], goes into the LSB of resp[3] later).
+ */
+ ptr[16] = 1;
+
+ for (i = 0; i < 4; i++) {
+ cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4);
+ dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n",
+ i, cmd->resp[i]);
+ }
+ } else {
+ cmd->resp[0] = get_unaligned_be32(ptr + 1);
+ dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n",
+ cmd->resp[0]);
+ }
+
+out:
+ cmd->error = err;
+}
+
+static int sd_rw_multi(struct rtsx_usb_sdmmc *host, struct mmc_request *mrq)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ struct mmc_data *data = mrq->data;
+ int read = (data->flags & MMC_DATA_READ) ? 1 : 0;
+ u8 cfg2, trans_mode;
+ int err;
+ u8 flag;
+ size_t data_len = data->blksz * data->blocks;
+ unsigned int pipe;
+
+ if (read) {
+ dev_dbg(sdmmc_dev(host), "%s: read %zu bytes\n",
+ __func__, data_len);
+ cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0;
+ trans_mode = SD_TM_AUTO_READ_3;
+ } else {
+ dev_dbg(sdmmc_dev(host), "%s: write %zu bytes\n",
+ __func__, data_len);
+ cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0;
+ trans_mode = SD_TM_AUTO_WRITE_3;
+ }
+
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0x00);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, 0x02);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_L,
+ 0xFF, (u8)data->blocks);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_H,
+ 0xFF, (u8)(data->blocks >> 8));
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, RING_BUFFER);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC3,
+ 0xFF, (u8)(data_len >> 24));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC2,
+ 0xFF, (u8)(data_len >> 16));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC1,
+ 0xFF, (u8)(data_len >> 8));
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC0,
+ 0xFF, (u8)data_len);
+ if (read) {
+ flag = MODE_CDIR;
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_CTL,
+ 0x03 | DMA_PACK_SIZE_MASK,
+ DMA_DIR_FROM_CARD | DMA_EN | DMA_512);
+ } else {
+ flag = MODE_CDOR;
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_CTL,
+ 0x03 | DMA_PACK_SIZE_MASK,
+ DMA_DIR_TO_CARD | DMA_EN | DMA_512);
+ }
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ trans_mode | SD_TRANSFER_START);
+ rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ err = rtsx_usb_send_cmd(ucr, flag, 100);
+ if (err)
+ return err;
+
+ if (read)
+ pipe = usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN);
+ else
+ pipe = usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT);
+
+ err = rtsx_usb_transfer_data(ucr, pipe, data->sg, data_len,
+ data->sg_len, NULL, 10000);
+ if (err) {
+ dev_dbg(sdmmc_dev(host), "rtsx_usb_transfer_data error %d\n"
+ , err);
+ sd_clear_error(host);
+ return err;
+ }
+
+ return rtsx_usb_get_rsp(ucr, 1, 2000);
+}
+
+static inline void sd_enable_initial_mode(struct rtsx_usb_sdmmc *host)
+{
+ rtsx_usb_write_register(host->ucr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
+}
+
+static inline void sd_disable_initial_mode(struct rtsx_usb_sdmmc *host)
+{
+ rtsx_usb_write_register(host->ucr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
+}
+
+static void sd_normal_rw(struct rtsx_usb_sdmmc *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+ u8 *buf;
+
+ buf = kzalloc(data->blksz, GFP_NOIO);
+ if (!buf) {
+ cmd->error = -ENOMEM;
+ return;
+ }
+
+ if (data->flags & MMC_DATA_READ) {
+ if (host->initial_mode)
+ sd_disable_initial_mode(host);
+
+ cmd->error = sd_read_data(host, cmd, (u16)data->blksz, buf,
+ data->blksz, 200);
+
+ if (host->initial_mode)
+ sd_enable_initial_mode(host);
+
+ sg_copy_from_buffer(data->sg, data->sg_len, buf, data->blksz);
+ } else {
+ sg_copy_to_buffer(data->sg, data->sg_len, buf, data->blksz);
+
+ cmd->error = sd_write_data(host, cmd, (u16)data->blksz, buf,
+ data->blksz, 200);
+ }
+
+ kfree(buf);
+}
+
+static int sd_change_phase(struct rtsx_usb_sdmmc *host, u8 sample_point, int tx)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ int err;
+
+ dev_dbg(sdmmc_dev(host), "%s: %s sample_point = %d\n",
+ __func__, tx ? "TX" : "RX", sample_point);
+
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, CLK_CHANGE);
+
+ if (tx)
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ 0x0F, sample_point);
+ else
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ 0x0F, sample_point);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, 0);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_RST, 0);
+
+ err = rtsx_usb_send_cmd(ucr, MODE_C, 100);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static inline u32 get_phase_point(u32 phase_map, unsigned int idx)
+{
+ idx &= MAX_PHASE;
+ return phase_map & (1 << idx);
+}
+
+static int get_phase_len(u32 phase_map, unsigned int idx)
+{
+ int i;
+
+ for (i = 0; i < MAX_PHASE + 1; i++) {
+ if (get_phase_point(phase_map, idx + i) == 0)
+ return i;
+ }
+ return MAX_PHASE + 1;
+}
+
+static u8 sd_search_final_phase(struct rtsx_usb_sdmmc *host, u32 phase_map)
+{
+ int start = 0, len = 0;
+ int start_final = 0, len_final = 0;
+ u8 final_phase = 0xFF;
+
+ if (phase_map == 0) {
+ dev_dbg(sdmmc_dev(host), "Phase: [map:%x]\n", phase_map);
+ return final_phase;
+ }
+
+ while (start < MAX_PHASE + 1) {
+ len = get_phase_len(phase_map, start);
+ if (len_final < len) {
+ start_final = start;
+ len_final = len;
+ }
+ start += len ? len : 1;
+ }
+
+ final_phase = (start_final + len_final / 2) & MAX_PHASE;
+ dev_dbg(sdmmc_dev(host), "Phase: [map:%x] [maxlen:%d] [final:%d]\n",
+ phase_map, len_final, final_phase);
+
+ return final_phase;
+}
+
+static void sd_wait_data_idle(struct rtsx_usb_sdmmc *host)
+{
+ int err, i;
+ u8 val = 0;
+
+ for (i = 0; i < 100; i++) {
+ err = rtsx_usb_ep0_read_register(host->ucr,
+ SD_DATA_STATE, &val);
+ if (val & SD_DATA_IDLE)
+ return;
+
+ usleep_range(100, 1000);
+ }
+}
+
+static int sd_tuning_rx_cmd(struct rtsx_usb_sdmmc *host,
+ u8 opcode, u8 sample_point)
+{
+ int err;
+ struct mmc_command cmd = {0};
+
+ err = sd_change_phase(host, sample_point, 0);
+ if (err)
+ return err;
+
+ cmd.opcode = MMC_SEND_TUNING_BLOCK;
+ err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100);
+ if (err) {
+ /* Wait till SD DATA IDLE */
+ sd_wait_data_idle(host);
+ sd_clear_error(host);
+ return err;
+ }
+
+ return 0;
+}
+
+static void sd_tuning_phase(struct rtsx_usb_sdmmc *host,
+ u8 opcode, u16 *phase_map)
+{
+ int err, i;
+ u16 raw_phase_map = 0;
+
+ for (i = MAX_PHASE; i >= 0; i--) {
+ err = sd_tuning_rx_cmd(host, opcode, (u8)i);
+ if (!err)
+ raw_phase_map |= 1 << i;
+ }
+
+ if (phase_map)
+ *phase_map = raw_phase_map;
+}
+
+static int sd_tuning_rx(struct rtsx_usb_sdmmc *host, u8 opcode)
+{
+ int err, i;
+ u16 raw_phase_map[RX_TUNING_CNT] = {0}, phase_map;
+ u8 final_phase;
+
+ /* setting fixed default TX phase */
+ err = sd_change_phase(host, 0x01, 1);
+ if (err) {
+ dev_dbg(sdmmc_dev(host), "TX phase setting failed\n");
+ return err;
+ }
+
+ /* tuning RX phase */
+ for (i = 0; i < RX_TUNING_CNT; i++) {
+ sd_tuning_phase(host, opcode, &(raw_phase_map[i]));
+
+ if (raw_phase_map[i] == 0)
+ break;
+ }
+
+ phase_map = 0xFFFF;
+ for (i = 0; i < RX_TUNING_CNT; i++) {
+ dev_dbg(sdmmc_dev(host), "RX raw_phase_map[%d] = 0x%04x\n",
+ i, raw_phase_map[i]);
+ phase_map &= raw_phase_map[i];
+ }
+ dev_dbg(sdmmc_dev(host), "RX phase_map = 0x%04x\n", phase_map);
+
+ if (phase_map) {
+ final_phase = sd_search_final_phase(host, phase_map);
+ if (final_phase == 0xFF)
+ return -EINVAL;
+
+ err = sd_change_phase(host, final_phase, 0);
+ if (err)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sdmmc_get_ro(struct mmc_host *mmc)
+{
+ struct rtsx_usb_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_ucr *ucr = host->ucr;
+ int err;
+ u16 val;
+
+ if (host->host_removal)
+ return -ENOMEDIUM;
+
+ mutex_lock(&ucr->dev_mutex);
+
+ /* Check SD card detect */
+ err = rtsx_usb_get_card_status(ucr, &val);
+
+ mutex_unlock(&ucr->dev_mutex);
+
+
+ /* Treat failed detection as non-ro */
+ if (err)
+ return 0;
+
+ if (val & SD_WP)
+ return 1;
+
+ return 0;
+}
+
+static int sdmmc_get_cd(struct mmc_host *mmc)
+{
+ struct rtsx_usb_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_ucr *ucr = host->ucr;
+ int err;
+ u16 val;
+
+ if (host->host_removal)
+ return -ENOMEDIUM;
+
+ mutex_lock(&ucr->dev_mutex);
+
+ /* Check SD card detect */
+ err = rtsx_usb_get_card_status(ucr, &val);
+
+ mutex_unlock(&ucr->dev_mutex);
+
+ /* Treat failed detection as non-exist */
+ if (err)
+ goto no_card;
+
+ if (val & SD_CD) {
+ host->card_exist = true;
+ return 1;
+ }
+
+no_card:
+ host->card_exist = false;
+ return 0;
+}
+
+static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct rtsx_usb_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_ucr *ucr = host->ucr;
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+ unsigned int data_size = 0;
+
+ dev_dbg(sdmmc_dev(host), "%s\n", __func__);
+
+ if (host->host_removal) {
+ cmd->error = -ENOMEDIUM;
+ goto finish;
+ }
+
+ if ((!host->card_exist)) {
+ cmd->error = -ENOMEDIUM;
+ goto finish_detect_card;
+ }
+
+ /*
+ * Reject SDIO CMDs to speed up card identification
+ * since unsupported
+ */
+ if (cmd->opcode == SD_IO_SEND_OP_COND ||
+ cmd->opcode == SD_IO_RW_DIRECT ||
+ cmd->opcode == SD_IO_RW_EXTENDED) {
+ cmd->error = -EINVAL;
+ goto finish;
+ }
+
+ mutex_lock(&ucr->dev_mutex);
+
+ mutex_lock(&host->host_mutex);
+ host->mrq = mrq;
+ mutex_unlock(&host->host_mutex);
+
+ if (mrq->data)
+ data_size = data->blocks * data->blksz;
+
+ if (!data_size) {
+ sd_send_cmd_get_rsp(host, cmd);
+ } else if ((!(data_size % 512) && cmd->opcode != MMC_SEND_EXT_CSD) ||
+ mmc_op_multi(cmd->opcode)) {
+ sd_send_cmd_get_rsp(host, cmd);
+
+ if (!cmd->error) {
+ sd_rw_multi(host, mrq);
+
+ if (mmc_op_multi(cmd->opcode) && mrq->stop) {
+ sd_send_cmd_get_rsp(host, mrq->stop);
+ rtsx_usb_write_register(ucr, MC_FIFO_CTL,
+ FIFO_FLUSH, FIFO_FLUSH);
+ }
+ }
+ } else {
+ sd_normal_rw(host, mrq);
+ }
+
+ if (mrq->data) {
+ if (cmd->error || data->error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blocks * data->blksz;
+ }
+
+ mutex_unlock(&ucr->dev_mutex);
+
+finish_detect_card:
+ if (cmd->error) {
+ /*
+ * detect card when fail to update card existence state and
+ * speed up card removal when retry
+ */
+ sdmmc_get_cd(mmc);
+ dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error);
+ }
+
+finish:
+ mutex_lock(&host->host_mutex);
+ host->mrq = NULL;
+ mutex_unlock(&host->host_mutex);
+
+ mmc_request_done(mmc, mrq);
+}
+
+static int sd_set_bus_width(struct rtsx_usb_sdmmc *host,
+ unsigned char bus_width)
+{
+ int err = 0;
+ u8 width[] = {
+ [MMC_BUS_WIDTH_1] = SD_BUS_WIDTH_1BIT,
+ [MMC_BUS_WIDTH_4] = SD_BUS_WIDTH_4BIT,
+ [MMC_BUS_WIDTH_8] = SD_BUS_WIDTH_8BIT,
+ };
+
+ if (bus_width <= MMC_BUS_WIDTH_8)
+ err = rtsx_usb_write_register(host->ucr, SD_CFG1,
+ 0x03, width[bus_width]);
+
+ return err;
+}
+
+static int sd_pull_ctl_disable_lqfp48(struct rtsx_ucr *ucr)
+{
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5);
+
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
+}
+
+static int sd_pull_ctl_disable_qfn24(struct rtsx_ucr *ucr)
+{
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x65);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x56);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59);
+
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
+}
+
+static int sd_pull_ctl_enable_lqfp48(struct rtsx_ucr *ucr)
+{
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xAA);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0xAA);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0xA9);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5);
+
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
+}
+
+static int sd_pull_ctl_enable_qfn24(struct rtsx_ucr *ucr)
+{
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xA5);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x9A);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0xA5);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x9A);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x65);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x5A);
+
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
+}
+
+static int sd_power_on(struct rtsx_usb_sdmmc *host)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ int err;
+
+ dev_dbg(sdmmc_dev(host), "%s\n", __func__);
+ rtsx_usb_init_cmd(ucr);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_SHARE_MODE,
+ CARD_SHARE_MASK, CARD_SHARE_SD);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_EN,
+ SD_CLK_EN, SD_CLK_EN);
+ err = rtsx_usb_send_cmd(ucr, MODE_C, 100);
+ if (err)
+ return err;
+
+ if (CHECK_PKG(ucr, LQFP48))
+ err = sd_pull_ctl_enable_lqfp48(ucr);
+ else
+ err = sd_pull_ctl_enable_qfn24(ucr);
+ if (err)
+ return err;
+
+ err = rtsx_usb_write_register(ucr, CARD_PWR_CTL,
+ POWER_MASK, PARTIAL_POWER_ON);
+ if (err)
+ return err;
+
+ usleep_range(800, 1000);
+
+ rtsx_usb_init_cmd(ucr);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
+ POWER_MASK|LDO3318_PWR_MASK, POWER_ON|LDO_ON);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_OE,
+ SD_OUTPUT_EN, SD_OUTPUT_EN);
+
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
+}
+
+static int sd_power_off(struct rtsx_usb_sdmmc *host)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ int err;
+
+ dev_dbg(sdmmc_dev(host), "%s\n", __func__);
+ rtsx_usb_init_cmd(ucr);
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_EN, SD_CLK_EN, 0);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_OE, SD_OUTPUT_EN, 0);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
+ POWER_MASK, POWER_OFF);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
+ POWER_MASK|LDO3318_PWR_MASK, POWER_OFF|LDO_SUSPEND);
+
+ err = rtsx_usb_send_cmd(ucr, MODE_C, 100);
+ if (err)
+ return err;
+
+ if (CHECK_PKG(ucr, LQFP48))
+ return sd_pull_ctl_disable_lqfp48(ucr);
+ return sd_pull_ctl_disable_qfn24(ucr);
+}
+
+static int sd_set_power_mode(struct rtsx_usb_sdmmc *host,
+ unsigned char power_mode)
+{
+ int err;
+
+ if (power_mode != MMC_POWER_OFF)
+ power_mode = MMC_POWER_ON;
+
+ if (power_mode == host->power_mode)
+ return 0;
+
+ if (power_mode == MMC_POWER_OFF) {
+ err = sd_power_off(host);
+ pm_runtime_put(sdmmc_dev(host));
+ } else {
+ pm_runtime_get_sync(sdmmc_dev(host));
+ err = sd_power_on(host);
+ }
+
+ if (!err)
+ host->power_mode = power_mode;
+
+ return err;
+}
+
+static int sd_set_timing(struct rtsx_usb_sdmmc *host,
+ unsigned char timing, bool *ddr_mode)
+{
+ struct rtsx_ucr *ucr = host->ucr;
+ int err;
+
+ *ddr_mode = false;
+
+ rtsx_usb_init_cmd(ucr);
+
+ switch (timing) {
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_UHS_SDR50:
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1,
+ 0x0C | SD_ASYNC_FIFO_RST,
+ SD_30_MODE | SD_ASYNC_FIFO_RST);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ break;
+
+ case MMC_TIMING_UHS_DDR50:
+ *ddr_mode = true;
+
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1,
+ 0x0C | SD_ASYNC_FIFO_RST,
+ SD_DDR_MODE | SD_ASYNC_FIFO_RST);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PUSH_POINT_CTL,
+ DDR_VAR_TX_CMD_DAT, DDR_VAR_TX_CMD_DAT);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL,
+ DDR_VAR_RX_DAT | DDR_VAR_RX_CMD,
+ DDR_VAR_RX_DAT | DDR_VAR_RX_CMD);
+ break;
+
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1,
+ 0x0C, SD_20_MODE);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF,
+ CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PUSH_POINT_CTL,
+ SD20_TX_SEL_MASK, SD20_TX_14_AHEAD);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL,
+ SD20_RX_SEL_MASK, SD20_RX_14_DELAY);
+ break;
+
+ default:
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ SD_CFG1, 0x0C, SD_20_MODE);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF,
+ CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
+ SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL,
+ SD20_RX_SEL_MASK, SD20_RX_POS_EDGE);
+ break;
+ }
+
+ err = rtsx_usb_send_cmd(ucr, MODE_C, 100);
+
+ return err;
+}
+
+static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct rtsx_usb_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_ucr *ucr = host->ucr;
+
+ dev_dbg(sdmmc_dev(host), "%s\n", __func__);
+ mutex_lock(&ucr->dev_mutex);
+
+ if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) {
+ mutex_unlock(&ucr->dev_mutex);
+ return;
+ }
+
+ sd_set_power_mode(host, ios->power_mode);
+ sd_set_bus_width(host, ios->bus_width);
+ sd_set_timing(host, ios->timing, &host->ddr_mode);
+
+ host->vpclk = false;
+ host->double_clk = true;
+
+ switch (ios->timing) {
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_UHS_SDR50:
+ host->ssc_depth = SSC_DEPTH_2M;
+ host->vpclk = true;
+ host->double_clk = false;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_UHS_SDR25:
+ host->ssc_depth = SSC_DEPTH_1M;
+ break;
+ default:
+ host->ssc_depth = SSC_DEPTH_512K;
+ break;
+ }
+
+ host->initial_mode = (ios->clock <= 1000000) ? true : false;
+ host->clock = ios->clock;
+
+ rtsx_usb_switch_clock(host->ucr, host->clock, host->ssc_depth,
+ host->initial_mode, host->double_clk, host->vpclk);
+
+ mutex_unlock(&ucr->dev_mutex);
+ dev_dbg(sdmmc_dev(host), "%s end\n", __func__);
+}
+
+static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct rtsx_usb_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_ucr *ucr = host->ucr;
+ int err = 0;
+
+ dev_dbg(sdmmc_dev(host), "%s: signal_voltage = %d\n",
+ __func__, ios->signal_voltage);
+
+ if (host->host_removal)
+ return -ENOMEDIUM;
+
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_120)
+ return -EPERM;
+
+ mutex_lock(&ucr->dev_mutex);
+
+ err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD);
+ if (err) {
+ mutex_unlock(&ucr->dev_mutex);
+ return err;
+ }
+
+ /* Let mmc core do the busy checking, simply stop the forced-toggle
+ * clock(while issuing CMD11) and switch voltage.
+ */
+ rtsx_usb_init_cmd(ucr);
+
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PAD_CTL,
+ SD_IO_USING_1V8, SD_IO_USING_3V3);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, LDO_POWER_CFG,
+ TUNE_SD18_MASK, TUNE_SD18_3V3);
+ } else {
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BUS_STAT,
+ SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP,
+ SD_CLK_FORCE_STOP);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PAD_CTL,
+ SD_IO_USING_1V8, SD_IO_USING_1V8);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, LDO_POWER_CFG,
+ TUNE_SD18_MASK, TUNE_SD18_1V8);
+ }
+
+ err = rtsx_usb_send_cmd(ucr, MODE_C, 100);
+ mutex_unlock(&ucr->dev_mutex);
+
+ return err;
+}
+
+static int sdmmc_card_busy(struct mmc_host *mmc)
+{
+ struct rtsx_usb_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_ucr *ucr = host->ucr;
+ int err;
+ u8 stat;
+ u8 mask = SD_DAT3_STATUS | SD_DAT2_STATUS | SD_DAT1_STATUS
+ | SD_DAT0_STATUS;
+
+ dev_dbg(sdmmc_dev(host), "%s\n", __func__);
+
+ mutex_lock(&ucr->dev_mutex);
+
+ err = rtsx_usb_write_register(ucr, SD_BUS_STAT,
+ SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP,
+ SD_CLK_TOGGLE_EN);
+ if (err)
+ goto out;
+
+ mdelay(1);
+
+ err = rtsx_usb_read_register(ucr, SD_BUS_STAT, &stat);
+ if (err)
+ goto out;
+
+ err = rtsx_usb_write_register(ucr, SD_BUS_STAT,
+ SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
+out:
+ mutex_unlock(&ucr->dev_mutex);
+
+ if (err)
+ return err;
+
+ /* check if any pin between dat[0:3] is low */
+ if ((stat & mask) != mask)
+ return 1;
+ else
+ return 0;
+}
+
+static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct rtsx_usb_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_ucr *ucr = host->ucr;
+ int err = 0;
+
+ if (host->host_removal)
+ return -ENOMEDIUM;
+
+ mutex_lock(&ucr->dev_mutex);
+
+ if (!host->ddr_mode)
+ err = sd_tuning_rx(host, MMC_SEND_TUNING_BLOCK);
+
+ mutex_unlock(&ucr->dev_mutex);
+
+ return err;
+}
+
+static const struct mmc_host_ops rtsx_usb_sdmmc_ops = {
+ .request = sdmmc_request,
+ .set_ios = sdmmc_set_ios,
+ .get_ro = sdmmc_get_ro,
+ .get_cd = sdmmc_get_cd,
+ .start_signal_voltage_switch = sdmmc_switch_voltage,
+ .card_busy = sdmmc_card_busy,
+ .execute_tuning = sdmmc_execute_tuning,
+};
+
+#ifdef RTSX_USB_USE_LEDS_CLASS
+static void rtsx_usb_led_control(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct rtsx_usb_sdmmc *host = container_of(led,
+ struct rtsx_usb_sdmmc, led);
+
+ if (host->host_removal)
+ return;
+
+ host->led.brightness = brightness;
+ schedule_work(&host->led_work);
+}
+
+static void rtsx_usb_update_led(struct work_struct *work)
+{
+ struct rtsx_usb_sdmmc *host =
+ container_of(work, struct rtsx_usb_sdmmc, led_work);
+ struct rtsx_ucr *ucr = host->ucr;
+
+ mutex_lock(&ucr->dev_mutex);
+
+ if (host->led.brightness == LED_OFF)
+ rtsx_usb_turn_off_led(ucr);
+ else
+ rtsx_usb_turn_on_led(ucr);
+
+ mutex_unlock(&ucr->dev_mutex);
+}
+#endif
+
+static void rtsx_usb_init_host(struct rtsx_usb_sdmmc *host)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ mmc->f_min = 250000;
+ mmc->f_max = 208000000;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST |
+ MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 |
+ MMC_CAP_NEEDS_POLL;
+ mmc->caps2 = MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_FULL_PWR_CYCLE;
+
+ mmc->max_current_330 = 400;
+ mmc->max_current_180 = 800;
+ mmc->ops = &rtsx_usb_sdmmc_ops;
+ mmc->max_segs = 256;
+ mmc->max_seg_size = 65536;
+ mmc->max_blk_size = 512;
+ mmc->max_blk_count = 65535;
+ mmc->max_req_size = 524288;
+
+ host->power_mode = MMC_POWER_OFF;
+}
+
+static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct rtsx_usb_sdmmc *host;
+ struct rtsx_ucr *ucr;
+#ifdef RTSX_USB_USE_LEDS_CLASS
+ int err;
+#endif
+
+ ucr = usb_get_intfdata(to_usb_interface(pdev->dev.parent));
+ if (!ucr)
+ return -ENXIO;
+
+ dev_dbg(&(pdev->dev), ": Realtek USB SD/MMC controller found\n");
+
+ mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ host = mmc_priv(mmc);
+ host->ucr = ucr;
+ host->mmc = mmc;
+ host->pdev = pdev;
+ platform_set_drvdata(pdev, host);
+
+ mutex_init(&host->host_mutex);
+ rtsx_usb_init_host(host);
+ pm_runtime_enable(&pdev->dev);
+
+#ifdef RTSX_USB_USE_LEDS_CLASS
+ snprintf(host->led_name, sizeof(host->led_name),
+ "%s::", mmc_hostname(mmc));
+ host->led.name = host->led_name;
+ host->led.brightness = LED_OFF;
+ host->led.default_trigger = mmc_hostname(mmc);
+ host->led.brightness_set = rtsx_usb_led_control;
+
+ err = led_classdev_register(mmc_dev(mmc), &host->led);
+ if (err)
+ dev_err(&(pdev->dev),
+ "Failed to register LED device: %d\n", err);
+ INIT_WORK(&host->led_work, rtsx_usb_update_led);
+
+#endif
+ mmc_add_host(mmc);
+
+ return 0;
+}
+
+static int rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev)
+{
+ struct rtsx_usb_sdmmc *host = platform_get_drvdata(pdev);
+ struct mmc_host *mmc;
+
+ if (!host)
+ return 0;
+
+ mmc = host->mmc;
+ host->host_removal = true;
+
+ mutex_lock(&host->host_mutex);
+ if (host->mrq) {
+ dev_dbg(&(pdev->dev),
+ "%s: Controller removed during transfer\n",
+ mmc_hostname(mmc));
+ host->mrq->cmd->error = -ENOMEDIUM;
+ if (host->mrq->stop)
+ host->mrq->stop->error = -ENOMEDIUM;
+ mmc_request_done(mmc, host->mrq);
+ }
+ mutex_unlock(&host->host_mutex);
+
+ mmc_remove_host(mmc);
+
+#ifdef RTSX_USB_USE_LEDS_CLASS
+ cancel_work_sync(&host->led_work);
+ led_classdev_unregister(&host->led);
+#endif
+
+ mmc_free_host(mmc);
+ pm_runtime_disable(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+
+ dev_dbg(&(pdev->dev),
+ ": Realtek USB SD/MMC module has been removed\n");
+
+ return 0;
+}
+
+static struct platform_device_id rtsx_usb_sdmmc_ids[] = {
+ {
+ .name = "rtsx_usb_sdmmc",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, rtsx_usb_sdmmc_ids);
+
+static struct platform_driver rtsx_usb_sdmmc_driver = {
+ .probe = rtsx_usb_sdmmc_drv_probe,
+ .remove = rtsx_usb_sdmmc_drv_remove,
+ .id_table = rtsx_usb_sdmmc_ids,
+ .driver = {
+ .name = "rtsx_usb_sdmmc",
+ },
+};
+module_platform_driver(rtsx_usb_sdmmc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Roger Tseng <rogerable@realtek.com>");
+MODULE_DESCRIPTION("Realtek USB SD/MMC Card Host Driver");
diff --git a/kernel/drivers/mmc/host/s3cmci.c b/kernel/drivers/mmc/host/s3cmci.c
new file mode 100644
index 000000000..94cddf381
--- /dev/null
+++ b/kernel/drivers/mmc/host/s3cmci.c
@@ -0,0 +1,1889 @@
+/*
+ * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver
+ *
+ * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de>
+ *
+ * Current driver maintained by Ben Dooks and Simtec Electronics
+ * Copyright (C) 2008 Simtec Electronics <ben-linux@fluff.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_device.h>
+#include <linux/cpufreq.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <plat/gpio-cfg.h>
+#include <mach/dma.h>
+#include <mach/gpio-samsung.h>
+
+#include <linux/platform_data/dma-s3c24xx.h>
+#include <linux/platform_data/mmc-s3cmci.h>
+
+#include "s3cmci.h"
+
+#define DRIVER_NAME "s3c-mci"
+
+#define S3C2410_SDICON (0x00)
+#define S3C2410_SDIPRE (0x04)
+#define S3C2410_SDICMDARG (0x08)
+#define S3C2410_SDICMDCON (0x0C)
+#define S3C2410_SDICMDSTAT (0x10)
+#define S3C2410_SDIRSP0 (0x14)
+#define S3C2410_SDIRSP1 (0x18)
+#define S3C2410_SDIRSP2 (0x1C)
+#define S3C2410_SDIRSP3 (0x20)
+#define S3C2410_SDITIMER (0x24)
+#define S3C2410_SDIBSIZE (0x28)
+#define S3C2410_SDIDCON (0x2C)
+#define S3C2410_SDIDCNT (0x30)
+#define S3C2410_SDIDSTA (0x34)
+#define S3C2410_SDIFSTA (0x38)
+
+#define S3C2410_SDIDATA (0x3C)
+#define S3C2410_SDIIMSK (0x40)
+
+#define S3C2440_SDIDATA (0x40)
+#define S3C2440_SDIIMSK (0x3C)
+
+#define S3C2440_SDICON_SDRESET (1 << 8)
+#define S3C2410_SDICON_SDIOIRQ (1 << 3)
+#define S3C2410_SDICON_FIFORESET (1 << 1)
+#define S3C2410_SDICON_CLOCKTYPE (1 << 0)
+
+#define S3C2410_SDICMDCON_LONGRSP (1 << 10)
+#define S3C2410_SDICMDCON_WAITRSP (1 << 9)
+#define S3C2410_SDICMDCON_CMDSTART (1 << 8)
+#define S3C2410_SDICMDCON_SENDERHOST (1 << 6)
+#define S3C2410_SDICMDCON_INDEX (0x3f)
+
+#define S3C2410_SDICMDSTAT_CRCFAIL (1 << 12)
+#define S3C2410_SDICMDSTAT_CMDSENT (1 << 11)
+#define S3C2410_SDICMDSTAT_CMDTIMEOUT (1 << 10)
+#define S3C2410_SDICMDSTAT_RSPFIN (1 << 9)
+
+#define S3C2440_SDIDCON_DS_WORD (2 << 22)
+#define S3C2410_SDIDCON_TXAFTERRESP (1 << 20)
+#define S3C2410_SDIDCON_RXAFTERCMD (1 << 19)
+#define S3C2410_SDIDCON_BLOCKMODE (1 << 17)
+#define S3C2410_SDIDCON_WIDEBUS (1 << 16)
+#define S3C2410_SDIDCON_DMAEN (1 << 15)
+#define S3C2410_SDIDCON_STOP (1 << 14)
+#define S3C2440_SDIDCON_DATSTART (1 << 14)
+
+#define S3C2410_SDIDCON_XFER_RXSTART (2 << 12)
+#define S3C2410_SDIDCON_XFER_TXSTART (3 << 12)
+
+#define S3C2410_SDIDCON_BLKNUM_MASK (0xFFF)
+
+#define S3C2410_SDIDSTA_SDIOIRQDETECT (1 << 9)
+#define S3C2410_SDIDSTA_FIFOFAIL (1 << 8)
+#define S3C2410_SDIDSTA_CRCFAIL (1 << 7)
+#define S3C2410_SDIDSTA_RXCRCFAIL (1 << 6)
+#define S3C2410_SDIDSTA_DATATIMEOUT (1 << 5)
+#define S3C2410_SDIDSTA_XFERFINISH (1 << 4)
+#define S3C2410_SDIDSTA_TXDATAON (1 << 1)
+#define S3C2410_SDIDSTA_RXDATAON (1 << 0)
+
+#define S3C2440_SDIFSTA_FIFORESET (1 << 16)
+#define S3C2440_SDIFSTA_FIFOFAIL (3 << 14)
+#define S3C2410_SDIFSTA_TFDET (1 << 13)
+#define S3C2410_SDIFSTA_RFDET (1 << 12)
+#define S3C2410_SDIFSTA_COUNTMASK (0x7f)
+
+#define S3C2410_SDIIMSK_RESPONSECRC (1 << 17)
+#define S3C2410_SDIIMSK_CMDSENT (1 << 16)
+#define S3C2410_SDIIMSK_CMDTIMEOUT (1 << 15)
+#define S3C2410_SDIIMSK_RESPONSEND (1 << 14)
+#define S3C2410_SDIIMSK_SDIOIRQ (1 << 12)
+#define S3C2410_SDIIMSK_FIFOFAIL (1 << 11)
+#define S3C2410_SDIIMSK_CRCSTATUS (1 << 10)
+#define S3C2410_SDIIMSK_DATACRC (1 << 9)
+#define S3C2410_SDIIMSK_DATATIMEOUT (1 << 8)
+#define S3C2410_SDIIMSK_DATAFINISH (1 << 7)
+#define S3C2410_SDIIMSK_TXFIFOHALF (1 << 4)
+#define S3C2410_SDIIMSK_RXFIFOLAST (1 << 2)
+#define S3C2410_SDIIMSK_RXFIFOHALF (1 << 0)
+
+enum dbg_channels {
+ dbg_err = (1 << 0),
+ dbg_debug = (1 << 1),
+ dbg_info = (1 << 2),
+ dbg_irq = (1 << 3),
+ dbg_sg = (1 << 4),
+ dbg_dma = (1 << 5),
+ dbg_pio = (1 << 6),
+ dbg_fail = (1 << 7),
+ dbg_conf = (1 << 8),
+};
+
+static const int dbgmap_err = dbg_fail;
+static const int dbgmap_info = dbg_info | dbg_conf;
+static const int dbgmap_debug = dbg_err | dbg_debug;
+
+#define dbg(host, channels, args...) \
+ do { \
+ if (dbgmap_err & channels) \
+ dev_err(&host->pdev->dev, args); \
+ else if (dbgmap_info & channels) \
+ dev_info(&host->pdev->dev, args); \
+ else if (dbgmap_debug & channels) \
+ dev_dbg(&host->pdev->dev, args); \
+ } while (0)
+
+static void finalize_request(struct s3cmci_host *host);
+static void s3cmci_send_request(struct mmc_host *mmc);
+static void s3cmci_reset(struct s3cmci_host *host);
+
+#ifdef CONFIG_MMC_DEBUG
+
+static void dbg_dumpregs(struct s3cmci_host *host, char *prefix)
+{
+ u32 con, pre, cmdarg, cmdcon, cmdsta, r0, r1, r2, r3, timer, bsize;
+ u32 datcon, datcnt, datsta, fsta, imask;
+
+ con = readl(host->base + S3C2410_SDICON);
+ pre = readl(host->base + S3C2410_SDIPRE);
+ cmdarg = readl(host->base + S3C2410_SDICMDARG);
+ cmdcon = readl(host->base + S3C2410_SDICMDCON);
+ cmdsta = readl(host->base + S3C2410_SDICMDSTAT);
+ r0 = readl(host->base + S3C2410_SDIRSP0);
+ r1 = readl(host->base + S3C2410_SDIRSP1);
+ r2 = readl(host->base + S3C2410_SDIRSP2);
+ r3 = readl(host->base + S3C2410_SDIRSP3);
+ timer = readl(host->base + S3C2410_SDITIMER);
+ bsize = readl(host->base + S3C2410_SDIBSIZE);
+ datcon = readl(host->base + S3C2410_SDIDCON);
+ datcnt = readl(host->base + S3C2410_SDIDCNT);
+ datsta = readl(host->base + S3C2410_SDIDSTA);
+ fsta = readl(host->base + S3C2410_SDIFSTA);
+ imask = readl(host->base + host->sdiimsk);
+
+ dbg(host, dbg_debug, "%s CON:[%08x] PRE:[%08x] TMR:[%08x]\n",
+ prefix, con, pre, timer);
+
+ dbg(host, dbg_debug, "%s CCON:[%08x] CARG:[%08x] CSTA:[%08x]\n",
+ prefix, cmdcon, cmdarg, cmdsta);
+
+ dbg(host, dbg_debug, "%s DCON:[%08x] FSTA:[%08x]"
+ " DSTA:[%08x] DCNT:[%08x]\n",
+ prefix, datcon, fsta, datsta, datcnt);
+
+ dbg(host, dbg_debug, "%s R0:[%08x] R1:[%08x]"
+ " R2:[%08x] R3:[%08x]\n",
+ prefix, r0, r1, r2, r3);
+}
+
+static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd,
+ int stop)
+{
+ snprintf(host->dbgmsg_cmd, 300,
+ "#%u%s op:%i arg:0x%08x flags:0x08%x retries:%u",
+ host->ccnt, (stop ? " (STOP)" : ""),
+ cmd->opcode, cmd->arg, cmd->flags, cmd->retries);
+
+ if (cmd->data) {
+ snprintf(host->dbgmsg_dat, 300,
+ "#%u bsize:%u blocks:%u bytes:%u",
+ host->dcnt, cmd->data->blksz,
+ cmd->data->blocks,
+ cmd->data->blocks * cmd->data->blksz);
+ } else {
+ host->dbgmsg_dat[0] = '\0';
+ }
+}
+
+static void dbg_dumpcmd(struct s3cmci_host *host, struct mmc_command *cmd,
+ int fail)
+{
+ unsigned int dbglvl = fail ? dbg_fail : dbg_debug;
+
+ if (!cmd)
+ return;
+
+ if (cmd->error == 0) {
+ dbg(host, dbglvl, "CMD[OK] %s R0:0x%08x\n",
+ host->dbgmsg_cmd, cmd->resp[0]);
+ } else {
+ dbg(host, dbglvl, "CMD[ERR %i] %s Status:%s\n",
+ cmd->error, host->dbgmsg_cmd, host->status);
+ }
+
+ if (!cmd->data)
+ return;
+
+ if (cmd->data->error == 0) {
+ dbg(host, dbglvl, "DAT[OK] %s\n", host->dbgmsg_dat);
+ } else {
+ dbg(host, dbglvl, "DAT[ERR %i] %s DCNT:0x%08x\n",
+ cmd->data->error, host->dbgmsg_dat,
+ readl(host->base + S3C2410_SDIDCNT));
+ }
+}
+#else
+static void dbg_dumpcmd(struct s3cmci_host *host,
+ struct mmc_command *cmd, int fail) { }
+
+static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd,
+ int stop) { }
+
+static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { }
+
+#endif /* CONFIG_MMC_DEBUG */
+
+/**
+ * s3cmci_host_usedma - return whether the host is using dma or pio
+ * @host: The host state
+ *
+ * Return true if the host is using DMA to transfer data, else false
+ * to use PIO mode. Will return static data depending on the driver
+ * configuration.
+ */
+static inline bool s3cmci_host_usedma(struct s3cmci_host *host)
+{
+#ifdef CONFIG_MMC_S3C_PIO
+ return false;
+#else /* CONFIG_MMC_S3C_DMA */
+ return true;
+#endif
+}
+
+static inline u32 enable_imask(struct s3cmci_host *host, u32 imask)
+{
+ u32 newmask;
+
+ newmask = readl(host->base + host->sdiimsk);
+ newmask |= imask;
+
+ writel(newmask, host->base + host->sdiimsk);
+
+ return newmask;
+}
+
+static inline u32 disable_imask(struct s3cmci_host *host, u32 imask)
+{
+ u32 newmask;
+
+ newmask = readl(host->base + host->sdiimsk);
+ newmask &= ~imask;
+
+ writel(newmask, host->base + host->sdiimsk);
+
+ return newmask;
+}
+
+static inline void clear_imask(struct s3cmci_host *host)
+{
+ u32 mask = readl(host->base + host->sdiimsk);
+
+ /* preserve the SDIO IRQ mask state */
+ mask &= S3C2410_SDIIMSK_SDIOIRQ;
+ writel(mask, host->base + host->sdiimsk);
+}
+
+/**
+ * s3cmci_check_sdio_irq - test whether the SDIO IRQ is being signalled
+ * @host: The host to check.
+ *
+ * Test to see if the SDIO interrupt is being signalled in case the
+ * controller has failed to re-detect a card interrupt. Read GPE8 and
+ * see if it is low and if so, signal a SDIO interrupt.
+ *
+ * This is currently called if a request is finished (we assume that the
+ * bus is now idle) and when the SDIO IRQ is enabled in case the IRQ is
+ * already being indicated.
+*/
+static void s3cmci_check_sdio_irq(struct s3cmci_host *host)
+{
+ if (host->sdio_irqen) {
+ if (gpio_get_value(S3C2410_GPE(8)) == 0) {
+ pr_debug("%s: signalling irq\n", __func__);
+ mmc_signal_sdio_irq(host->mmc);
+ }
+ }
+}
+
+static inline int get_data_buffer(struct s3cmci_host *host,
+ u32 *bytes, u32 **pointer)
+{
+ struct scatterlist *sg;
+
+ if (host->pio_active == XFER_NONE)
+ return -EINVAL;
+
+ if ((!host->mrq) || (!host->mrq->data))
+ return -EINVAL;
+
+ if (host->pio_sgptr >= host->mrq->data->sg_len) {
+ dbg(host, dbg_debug, "no more buffers (%i/%i)\n",
+ host->pio_sgptr, host->mrq->data->sg_len);
+ return -EBUSY;
+ }
+ sg = &host->mrq->data->sg[host->pio_sgptr];
+
+ *bytes = sg->length;
+ *pointer = sg_virt(sg);
+
+ host->pio_sgptr++;
+
+ dbg(host, dbg_sg, "new buffer (%i/%i)\n",
+ host->pio_sgptr, host->mrq->data->sg_len);
+
+ return 0;
+}
+
+static inline u32 fifo_count(struct s3cmci_host *host)
+{
+ u32 fifostat = readl(host->base + S3C2410_SDIFSTA);
+
+ fifostat &= S3C2410_SDIFSTA_COUNTMASK;
+ return fifostat;
+}
+
+static inline u32 fifo_free(struct s3cmci_host *host)
+{
+ u32 fifostat = readl(host->base + S3C2410_SDIFSTA);
+
+ fifostat &= S3C2410_SDIFSTA_COUNTMASK;
+ return 63 - fifostat;
+}
+
+/**
+ * s3cmci_enable_irq - enable IRQ, after having disabled it.
+ * @host: The device state.
+ * @more: True if more IRQs are expected from transfer.
+ *
+ * Enable the main IRQ if needed after it has been disabled.
+ *
+ * The IRQ can be one of the following states:
+ * - disabled during IDLE
+ * - disabled whilst processing data
+ * - enabled during transfer
+ * - enabled whilst awaiting SDIO interrupt detection
+ */
+static void s3cmci_enable_irq(struct s3cmci_host *host, bool more)
+{
+ unsigned long flags;
+ bool enable = false;
+
+ local_irq_save(flags);
+
+ host->irq_enabled = more;
+ host->irq_disabled = false;
+
+ enable = more | host->sdio_irqen;
+
+ if (host->irq_state != enable) {
+ host->irq_state = enable;
+
+ if (enable)
+ enable_irq(host->irq);
+ else
+ disable_irq(host->irq);
+ }
+
+ local_irq_restore(flags);
+}
+
+/**
+ *
+ */
+static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* pr_debug("%s: transfer %d\n", __func__, transfer); */
+
+ host->irq_disabled = transfer;
+
+ if (transfer && host->irq_state) {
+ host->irq_state = false;
+ disable_irq(host->irq);
+ }
+
+ local_irq_restore(flags);
+}
+
+static void do_pio_read(struct s3cmci_host *host)
+{
+ int res;
+ u32 fifo;
+ u32 *ptr;
+ u32 fifo_words;
+ void __iomem *from_ptr;
+
+ /* write real prescaler to host, it might be set slow to fix */
+ writel(host->prescaler, host->base + S3C2410_SDIPRE);
+
+ from_ptr = host->base + host->sdidata;
+
+ while ((fifo = fifo_count(host))) {
+ if (!host->pio_bytes) {
+ res = get_data_buffer(host, &host->pio_bytes,
+ &host->pio_ptr);
+ if (res) {
+ host->pio_active = XFER_NONE;
+ host->complete_what = COMPLETION_FINALIZE;
+
+ dbg(host, dbg_pio, "pio_read(): "
+ "complete (no more data).\n");
+ return;
+ }
+
+ dbg(host, dbg_pio,
+ "pio_read(): new target: [%i]@[%p]\n",
+ host->pio_bytes, host->pio_ptr);
+ }
+
+ dbg(host, dbg_pio,
+ "pio_read(): fifo:[%02i] buffer:[%03i] dcnt:[%08X]\n",
+ fifo, host->pio_bytes,
+ readl(host->base + S3C2410_SDIDCNT));
+
+ /* If we have reached the end of the block, we can
+ * read a word and get 1 to 3 bytes. If we in the
+ * middle of the block, we have to read full words,
+ * otherwise we will write garbage, so round down to
+ * an even multiple of 4. */
+ if (fifo >= host->pio_bytes)
+ fifo = host->pio_bytes;
+ else
+ fifo -= fifo & 3;
+
+ host->pio_bytes -= fifo;
+ host->pio_count += fifo;
+
+ fifo_words = fifo >> 2;
+ ptr = host->pio_ptr;
+ while (fifo_words--)
+ *ptr++ = readl(from_ptr);
+ host->pio_ptr = ptr;
+
+ if (fifo & 3) {
+ u32 n = fifo & 3;
+ u32 data = readl(from_ptr);
+ u8 *p = (u8 *)host->pio_ptr;
+
+ while (n--) {
+ *p++ = data;
+ data >>= 8;
+ }
+ }
+ }
+
+ if (!host->pio_bytes) {
+ res = get_data_buffer(host, &host->pio_bytes, &host->pio_ptr);
+ if (res) {
+ dbg(host, dbg_pio,
+ "pio_read(): complete (no more buffers).\n");
+ host->pio_active = XFER_NONE;
+ host->complete_what = COMPLETION_FINALIZE;
+
+ return;
+ }
+ }
+
+ enable_imask(host,
+ S3C2410_SDIIMSK_RXFIFOHALF | S3C2410_SDIIMSK_RXFIFOLAST);
+}
+
+static void do_pio_write(struct s3cmci_host *host)
+{
+ void __iomem *to_ptr;
+ int res;
+ u32 fifo;
+ u32 *ptr;
+
+ to_ptr = host->base + host->sdidata;
+
+ while ((fifo = fifo_free(host)) > 3) {
+ if (!host->pio_bytes) {
+ res = get_data_buffer(host, &host->pio_bytes,
+ &host->pio_ptr);
+ if (res) {
+ dbg(host, dbg_pio,
+ "pio_write(): complete (no more data).\n");
+ host->pio_active = XFER_NONE;
+
+ return;
+ }
+
+ dbg(host, dbg_pio,
+ "pio_write(): new source: [%i]@[%p]\n",
+ host->pio_bytes, host->pio_ptr);
+
+ }
+
+ /* If we have reached the end of the block, we have to
+ * write exactly the remaining number of bytes. If we
+ * in the middle of the block, we have to write full
+ * words, so round down to an even multiple of 4. */
+ if (fifo >= host->pio_bytes)
+ fifo = host->pio_bytes;
+ else
+ fifo -= fifo & 3;
+
+ host->pio_bytes -= fifo;
+ host->pio_count += fifo;
+
+ fifo = (fifo + 3) >> 2;
+ ptr = host->pio_ptr;
+ while (fifo--)
+ writel(*ptr++, to_ptr);
+ host->pio_ptr = ptr;
+ }
+
+ enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
+}
+
+static void pio_tasklet(unsigned long data)
+{
+ struct s3cmci_host *host = (struct s3cmci_host *) data;
+
+ s3cmci_disable_irq(host, true);
+
+ if (host->pio_active == XFER_WRITE)
+ do_pio_write(host);
+
+ if (host->pio_active == XFER_READ)
+ do_pio_read(host);
+
+ if (host->complete_what == COMPLETION_FINALIZE) {
+ clear_imask(host);
+ if (host->pio_active != XFER_NONE) {
+ dbg(host, dbg_err, "unfinished %s "
+ "- pio_count:[%u] pio_bytes:[%u]\n",
+ (host->pio_active == XFER_READ) ? "read" : "write",
+ host->pio_count, host->pio_bytes);
+
+ if (host->mrq->data)
+ host->mrq->data->error = -EINVAL;
+ }
+
+ s3cmci_enable_irq(host, false);
+ finalize_request(host);
+ } else
+ s3cmci_enable_irq(host, true);
+}
+
+/*
+ * ISR for SDI Interface IRQ
+ * Communication between driver and ISR works as follows:
+ * host->mrq points to current request
+ * host->complete_what Indicates when the request is considered done
+ * COMPLETION_CMDSENT when the command was sent
+ * COMPLETION_RSPFIN when a response was received
+ * COMPLETION_XFERFINISH when the data transfer is finished
+ * COMPLETION_XFERFINISH_RSPFIN both of the above.
+ * host->complete_request is the completion-object the driver waits for
+ *
+ * 1) Driver sets up host->mrq and host->complete_what
+ * 2) Driver prepares the transfer
+ * 3) Driver enables interrupts
+ * 4) Driver starts transfer
+ * 5) Driver waits for host->complete_rquest
+ * 6) ISR checks for request status (errors and success)
+ * 6) ISR sets host->mrq->cmd->error and host->mrq->data->error
+ * 7) ISR completes host->complete_request
+ * 8) ISR disables interrupts
+ * 9) Driver wakes up and takes care of the request
+ *
+ * Note: "->error"-fields are expected to be set to 0 before the request
+ * was issued by mmc.c - therefore they are only set, when an error
+ * contition comes up
+ */
+
+static irqreturn_t s3cmci_irq(int irq, void *dev_id)
+{
+ struct s3cmci_host *host = dev_id;
+ struct mmc_command *cmd;
+ u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk;
+ u32 mci_cclear = 0, mci_dclear;
+ unsigned long iflags;
+
+ mci_dsta = readl(host->base + S3C2410_SDIDSTA);
+ mci_imsk = readl(host->base + host->sdiimsk);
+
+ if (mci_dsta & S3C2410_SDIDSTA_SDIOIRQDETECT) {
+ if (mci_imsk & S3C2410_SDIIMSK_SDIOIRQ) {
+ mci_dclear = S3C2410_SDIDSTA_SDIOIRQDETECT;
+ writel(mci_dclear, host->base + S3C2410_SDIDSTA);
+
+ mmc_signal_sdio_irq(host->mmc);
+ return IRQ_HANDLED;
+ }
+ }
+
+ spin_lock_irqsave(&host->complete_lock, iflags);
+
+ mci_csta = readl(host->base + S3C2410_SDICMDSTAT);
+ mci_dcnt = readl(host->base + S3C2410_SDIDCNT);
+ mci_fsta = readl(host->base + S3C2410_SDIFSTA);
+ mci_dclear = 0;
+
+ if ((host->complete_what == COMPLETION_NONE) ||
+ (host->complete_what == COMPLETION_FINALIZE)) {
+ host->status = "nothing to complete";
+ clear_imask(host);
+ goto irq_out;
+ }
+
+ if (!host->mrq) {
+ host->status = "no active mrq";
+ clear_imask(host);
+ goto irq_out;
+ }
+
+ cmd = host->cmd_is_stop ? host->mrq->stop : host->mrq->cmd;
+
+ if (!cmd) {
+ host->status = "no active cmd";
+ clear_imask(host);
+ goto irq_out;
+ }
+
+ if (!s3cmci_host_usedma(host)) {
+ if ((host->pio_active == XFER_WRITE) &&
+ (mci_fsta & S3C2410_SDIFSTA_TFDET)) {
+
+ disable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
+ tasklet_schedule(&host->pio_tasklet);
+ host->status = "pio tx";
+ }
+
+ if ((host->pio_active == XFER_READ) &&
+ (mci_fsta & S3C2410_SDIFSTA_RFDET)) {
+
+ disable_imask(host,
+ S3C2410_SDIIMSK_RXFIFOHALF |
+ S3C2410_SDIIMSK_RXFIFOLAST);
+
+ tasklet_schedule(&host->pio_tasklet);
+ host->status = "pio rx";
+ }
+ }
+
+ if (mci_csta & S3C2410_SDICMDSTAT_CMDTIMEOUT) {
+ dbg(host, dbg_err, "CMDSTAT: error CMDTIMEOUT\n");
+ cmd->error = -ETIMEDOUT;
+ host->status = "error: command timeout";
+ goto fail_transfer;
+ }
+
+ if (mci_csta & S3C2410_SDICMDSTAT_CMDSENT) {
+ if (host->complete_what == COMPLETION_CMDSENT) {
+ host->status = "ok: command sent";
+ goto close_transfer;
+ }
+
+ mci_cclear |= S3C2410_SDICMDSTAT_CMDSENT;
+ }
+
+ if (mci_csta & S3C2410_SDICMDSTAT_CRCFAIL) {
+ if (cmd->flags & MMC_RSP_CRC) {
+ if (host->mrq->cmd->flags & MMC_RSP_136) {
+ dbg(host, dbg_irq,
+ "fixup: ignore CRC fail with long rsp\n");
+ } else {
+ /* note, we used to fail the transfer
+ * here, but it seems that this is just
+ * the hardware getting it wrong.
+ *
+ * cmd->error = -EILSEQ;
+ * host->status = "error: bad command crc";
+ * goto fail_transfer;
+ */
+ }
+ }
+
+ mci_cclear |= S3C2410_SDICMDSTAT_CRCFAIL;
+ }
+
+ if (mci_csta & S3C2410_SDICMDSTAT_RSPFIN) {
+ if (host->complete_what == COMPLETION_RSPFIN) {
+ host->status = "ok: command response received";
+ goto close_transfer;
+ }
+
+ if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN)
+ host->complete_what = COMPLETION_XFERFINISH;
+
+ mci_cclear |= S3C2410_SDICMDSTAT_RSPFIN;
+ }
+
+ /* errors handled after this point are only relevant
+ when a data transfer is in progress */
+
+ if (!cmd->data)
+ goto clear_status_bits;
+
+ /* Check for FIFO failure */
+ if (host->is2440) {
+ if (mci_fsta & S3C2440_SDIFSTA_FIFOFAIL) {
+ dbg(host, dbg_err, "FIFO failure\n");
+ host->mrq->data->error = -EILSEQ;
+ host->status = "error: 2440 fifo failure";
+ goto fail_transfer;
+ }
+ } else {
+ if (mci_dsta & S3C2410_SDIDSTA_FIFOFAIL) {
+ dbg(host, dbg_err, "FIFO failure\n");
+ cmd->data->error = -EILSEQ;
+ host->status = "error: fifo failure";
+ goto fail_transfer;
+ }
+ }
+
+ if (mci_dsta & S3C2410_SDIDSTA_RXCRCFAIL) {
+ dbg(host, dbg_err, "bad data crc (outgoing)\n");
+ cmd->data->error = -EILSEQ;
+ host->status = "error: bad data crc (outgoing)";
+ goto fail_transfer;
+ }
+
+ if (mci_dsta & S3C2410_SDIDSTA_CRCFAIL) {
+ dbg(host, dbg_err, "bad data crc (incoming)\n");
+ cmd->data->error = -EILSEQ;
+ host->status = "error: bad data crc (incoming)";
+ goto fail_transfer;
+ }
+
+ if (mci_dsta & S3C2410_SDIDSTA_DATATIMEOUT) {
+ dbg(host, dbg_err, "data timeout\n");
+ cmd->data->error = -ETIMEDOUT;
+ host->status = "error: data timeout";
+ goto fail_transfer;
+ }
+
+ if (mci_dsta & S3C2410_SDIDSTA_XFERFINISH) {
+ if (host->complete_what == COMPLETION_XFERFINISH) {
+ host->status = "ok: data transfer completed";
+ goto close_transfer;
+ }
+
+ if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN)
+ host->complete_what = COMPLETION_RSPFIN;
+
+ mci_dclear |= S3C2410_SDIDSTA_XFERFINISH;
+ }
+
+clear_status_bits:
+ writel(mci_cclear, host->base + S3C2410_SDICMDSTAT);
+ writel(mci_dclear, host->base + S3C2410_SDIDSTA);
+
+ goto irq_out;
+
+fail_transfer:
+ host->pio_active = XFER_NONE;
+
+close_transfer:
+ host->complete_what = COMPLETION_FINALIZE;
+
+ clear_imask(host);
+ tasklet_schedule(&host->pio_tasklet);
+
+ goto irq_out;
+
+irq_out:
+ dbg(host, dbg_irq,
+ "csta:0x%08x dsta:0x%08x fsta:0x%08x dcnt:0x%08x status:%s.\n",
+ mci_csta, mci_dsta, mci_fsta, mci_dcnt, host->status);
+
+ spin_unlock_irqrestore(&host->complete_lock, iflags);
+ return IRQ_HANDLED;
+
+}
+
+/*
+ * ISR for the CardDetect Pin
+*/
+
+static irqreturn_t s3cmci_irq_cd(int irq, void *dev_id)
+{
+ struct s3cmci_host *host = (struct s3cmci_host *)dev_id;
+
+ dbg(host, dbg_irq, "card detect\n");
+
+ mmc_detect_change(host->mmc, msecs_to_jiffies(500));
+
+ return IRQ_HANDLED;
+}
+
+static void s3cmci_dma_done_callback(void *arg)
+{
+ struct s3cmci_host *host = arg;
+ unsigned long iflags;
+
+ BUG_ON(!host->mrq);
+ BUG_ON(!host->mrq->data);
+
+ spin_lock_irqsave(&host->complete_lock, iflags);
+
+ dbg(host, dbg_dma, "DMA FINISHED\n");
+
+ host->dma_complete = 1;
+ host->complete_what = COMPLETION_FINALIZE;
+
+ tasklet_schedule(&host->pio_tasklet);
+ spin_unlock_irqrestore(&host->complete_lock, iflags);
+
+}
+
+static void finalize_request(struct s3cmci_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd;
+ int debug_as_failure = 0;
+
+ if (host->complete_what != COMPLETION_FINALIZE)
+ return;
+
+ if (!mrq)
+ return;
+ cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
+
+ if (cmd->data && (cmd->error == 0) &&
+ (cmd->data->error == 0)) {
+ if (s3cmci_host_usedma(host) && (!host->dma_complete)) {
+ dbg(host, dbg_dma, "DMA Missing (%d)!\n",
+ host->dma_complete);
+ return;
+ }
+ }
+
+ /* Read response from controller. */
+ cmd->resp[0] = readl(host->base + S3C2410_SDIRSP0);
+ cmd->resp[1] = readl(host->base + S3C2410_SDIRSP1);
+ cmd->resp[2] = readl(host->base + S3C2410_SDIRSP2);
+ cmd->resp[3] = readl(host->base + S3C2410_SDIRSP3);
+
+ writel(host->prescaler, host->base + S3C2410_SDIPRE);
+
+ if (cmd->error)
+ debug_as_failure = 1;
+
+ if (cmd->data && cmd->data->error)
+ debug_as_failure = 1;
+
+ dbg_dumpcmd(host, cmd, debug_as_failure);
+
+ /* Cleanup controller */
+ writel(0, host->base + S3C2410_SDICMDARG);
+ writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
+ writel(0, host->base + S3C2410_SDICMDCON);
+ clear_imask(host);
+
+ if (cmd->data && cmd->error)
+ cmd->data->error = cmd->error;
+
+ if (cmd->data && cmd->data->stop && (!host->cmd_is_stop)) {
+ host->cmd_is_stop = 1;
+ s3cmci_send_request(host->mmc);
+ return;
+ }
+
+ /* If we have no data transfer we are finished here */
+ if (!mrq->data)
+ goto request_done;
+
+ /* Calculate the amout of bytes transfer if there was no error */
+ if (mrq->data->error == 0) {
+ mrq->data->bytes_xfered =
+ (mrq->data->blocks * mrq->data->blksz);
+ } else {
+ mrq->data->bytes_xfered = 0;
+ }
+
+ /* If we had an error while transferring data we flush the
+ * DMA channel and the fifo to clear out any garbage. */
+ if (mrq->data->error != 0) {
+ if (s3cmci_host_usedma(host))
+ dmaengine_terminate_all(host->dma);
+
+ if (host->is2440) {
+ /* Clear failure register and reset fifo. */
+ writel(S3C2440_SDIFSTA_FIFORESET |
+ S3C2440_SDIFSTA_FIFOFAIL,
+ host->base + S3C2410_SDIFSTA);
+ } else {
+ u32 mci_con;
+
+ /* reset fifo */
+ mci_con = readl(host->base + S3C2410_SDICON);
+ mci_con |= S3C2410_SDICON_FIFORESET;
+
+ writel(mci_con, host->base + S3C2410_SDICON);
+ }
+ }
+
+request_done:
+ host->complete_what = COMPLETION_NONE;
+ host->mrq = NULL;
+
+ s3cmci_check_sdio_irq(host);
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void s3cmci_send_command(struct s3cmci_host *host,
+ struct mmc_command *cmd)
+{
+ u32 ccon, imsk;
+
+ imsk = S3C2410_SDIIMSK_CRCSTATUS | S3C2410_SDIIMSK_CMDTIMEOUT |
+ S3C2410_SDIIMSK_RESPONSEND | S3C2410_SDIIMSK_CMDSENT |
+ S3C2410_SDIIMSK_RESPONSECRC;
+
+ enable_imask(host, imsk);
+
+ if (cmd->data)
+ host->complete_what = COMPLETION_XFERFINISH_RSPFIN;
+ else if (cmd->flags & MMC_RSP_PRESENT)
+ host->complete_what = COMPLETION_RSPFIN;
+ else
+ host->complete_what = COMPLETION_CMDSENT;
+
+ writel(cmd->arg, host->base + S3C2410_SDICMDARG);
+
+ ccon = cmd->opcode & S3C2410_SDICMDCON_INDEX;
+ ccon |= S3C2410_SDICMDCON_SENDERHOST | S3C2410_SDICMDCON_CMDSTART;
+
+ if (cmd->flags & MMC_RSP_PRESENT)
+ ccon |= S3C2410_SDICMDCON_WAITRSP;
+
+ if (cmd->flags & MMC_RSP_136)
+ ccon |= S3C2410_SDICMDCON_LONGRSP;
+
+ writel(ccon, host->base + S3C2410_SDICMDCON);
+}
+
+static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
+{
+ u32 dcon, imsk, stoptries = 3;
+
+ /* write DCON register */
+
+ if (!data) {
+ writel(0, host->base + S3C2410_SDIDCON);
+ return 0;
+ }
+
+ if ((data->blksz & 3) != 0) {
+ /* We cannot deal with unaligned blocks with more than
+ * one block being transferred. */
+
+ if (data->blocks > 1) {
+ pr_warn("%s: can't do non-word sized block transfers (blksz %d)\n",
+ __func__, data->blksz);
+ return -EINVAL;
+ }
+ }
+
+ while (readl(host->base + S3C2410_SDIDSTA) &
+ (S3C2410_SDIDSTA_TXDATAON | S3C2410_SDIDSTA_RXDATAON)) {
+
+ dbg(host, dbg_err,
+ "mci_setup_data() transfer stillin progress.\n");
+
+ writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
+ s3cmci_reset(host);
+
+ if ((stoptries--) == 0) {
+ dbg_dumpregs(host, "DRF");
+ return -EINVAL;
+ }
+ }
+
+ dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK;
+
+ if (s3cmci_host_usedma(host))
+ dcon |= S3C2410_SDIDCON_DMAEN;
+
+ if (host->bus_width == MMC_BUS_WIDTH_4)
+ dcon |= S3C2410_SDIDCON_WIDEBUS;
+
+ if (!(data->flags & MMC_DATA_STREAM))
+ dcon |= S3C2410_SDIDCON_BLOCKMODE;
+
+ if (data->flags & MMC_DATA_WRITE) {
+ dcon |= S3C2410_SDIDCON_TXAFTERRESP;
+ dcon |= S3C2410_SDIDCON_XFER_TXSTART;
+ }
+
+ if (data->flags & MMC_DATA_READ) {
+ dcon |= S3C2410_SDIDCON_RXAFTERCMD;
+ dcon |= S3C2410_SDIDCON_XFER_RXSTART;
+ }
+
+ if (host->is2440) {
+ dcon |= S3C2440_SDIDCON_DS_WORD;
+ dcon |= S3C2440_SDIDCON_DATSTART;
+ }
+
+ writel(dcon, host->base + S3C2410_SDIDCON);
+
+ /* write BSIZE register */
+
+ writel(data->blksz, host->base + S3C2410_SDIBSIZE);
+
+ /* add to IMASK register */
+ imsk = S3C2410_SDIIMSK_FIFOFAIL | S3C2410_SDIIMSK_DATACRC |
+ S3C2410_SDIIMSK_DATATIMEOUT | S3C2410_SDIIMSK_DATAFINISH;
+
+ enable_imask(host, imsk);
+
+ /* write TIMER register */
+
+ if (host->is2440) {
+ writel(0x007FFFFF, host->base + S3C2410_SDITIMER);
+ } else {
+ writel(0x0000FFFF, host->base + S3C2410_SDITIMER);
+
+ /* FIX: set slow clock to prevent timeouts on read */
+ if (data->flags & MMC_DATA_READ)
+ writel(0xFF, host->base + S3C2410_SDIPRE);
+ }
+
+ return 0;
+}
+
+#define BOTH_DIR (MMC_DATA_WRITE | MMC_DATA_READ)
+
+static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data)
+{
+ int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
+
+ BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
+
+ host->pio_sgptr = 0;
+ host->pio_bytes = 0;
+ host->pio_count = 0;
+ host->pio_active = rw ? XFER_WRITE : XFER_READ;
+
+ if (rw) {
+ do_pio_write(host);
+ enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
+ } else {
+ enable_imask(host, S3C2410_SDIIMSK_RXFIFOHALF
+ | S3C2410_SDIIMSK_RXFIFOLAST);
+ }
+
+ return 0;
+}
+
+static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
+{
+ int rw = data->flags & MMC_DATA_WRITE;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config conf = {
+ .src_addr = host->mem->start + host->sdidata,
+ .dst_addr = host->mem->start + host->sdidata,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ };
+
+ BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
+
+ /* Restore prescaler value */
+ writel(host->prescaler, host->base + S3C2410_SDIPRE);
+
+ if (!rw)
+ conf.direction = DMA_DEV_TO_MEM;
+ else
+ conf.direction = DMA_MEM_TO_DEV;
+
+ dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ dmaengine_slave_config(host->dma, &conf);
+ desc = dmaengine_prep_slave_sg(host->dma, data->sg, data->sg_len,
+ conf.direction,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc)
+ goto unmap_exit;
+ desc->callback = s3cmci_dma_done_callback;
+ desc->callback_param = host;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(host->dma);
+
+ return 0;
+
+unmap_exit:
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ return -ENOMEM;
+}
+
+static void s3cmci_send_request(struct mmc_host *mmc)
+{
+ struct s3cmci_host *host = mmc_priv(mmc);
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
+
+ host->ccnt++;
+ prepare_dbgmsg(host, cmd, host->cmd_is_stop);
+
+ /* Clear command, data and fifo status registers
+ Fifo clear only necessary on 2440, but doesn't hurt on 2410
+ */
+ writel(0xFFFFFFFF, host->base + S3C2410_SDICMDSTAT);
+ writel(0xFFFFFFFF, host->base + S3C2410_SDIDSTA);
+ writel(0xFFFFFFFF, host->base + S3C2410_SDIFSTA);
+
+ if (cmd->data) {
+ int res = s3cmci_setup_data(host, cmd->data);
+
+ host->dcnt++;
+
+ if (res) {
+ dbg(host, dbg_err, "setup data error %d\n", res);
+ cmd->error = res;
+ cmd->data->error = res;
+
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ if (s3cmci_host_usedma(host))
+ res = s3cmci_prepare_dma(host, cmd->data);
+ else
+ res = s3cmci_prepare_pio(host, cmd->data);
+
+ if (res) {
+ dbg(host, dbg_err, "data prepare error %d\n", res);
+ cmd->error = res;
+ cmd->data->error = res;
+
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ }
+
+ /* Send command */
+ s3cmci_send_command(host, cmd);
+
+ /* Enable Interrupt */
+ s3cmci_enable_irq(host, true);
+}
+
+static int s3cmci_card_present(struct mmc_host *mmc)
+{
+ struct s3cmci_host *host = mmc_priv(mmc);
+ struct s3c24xx_mci_pdata *pdata = host->pdata;
+ int ret;
+
+ if (pdata->no_detect)
+ return -ENOSYS;
+
+ ret = gpio_get_value(pdata->gpio_detect) ? 0 : 1;
+ return ret ^ pdata->detect_invert;
+}
+
+static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct s3cmci_host *host = mmc_priv(mmc);
+
+ host->status = "mmc request";
+ host->cmd_is_stop = 0;
+ host->mrq = mrq;
+
+ if (s3cmci_card_present(mmc) == 0) {
+ dbg(host, dbg_err, "%s: no medium present\n", __func__);
+ host->mrq->cmd->error = -ENOMEDIUM;
+ mmc_request_done(mmc, mrq);
+ } else
+ s3cmci_send_request(mmc);
+}
+
+static void s3cmci_set_clk(struct s3cmci_host *host, struct mmc_ios *ios)
+{
+ u32 mci_psc;
+
+ /* Set clock */
+ for (mci_psc = 0; mci_psc < 255; mci_psc++) {
+ host->real_rate = host->clk_rate / (host->clk_div*(mci_psc+1));
+
+ if (host->real_rate <= ios->clock)
+ break;
+ }
+
+ if (mci_psc > 255)
+ mci_psc = 255;
+
+ host->prescaler = mci_psc;
+ writel(host->prescaler, host->base + S3C2410_SDIPRE);
+
+ /* If requested clock is 0, real_rate will be 0, too */
+ if (ios->clock == 0)
+ host->real_rate = 0;
+}
+
+static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct s3cmci_host *host = mmc_priv(mmc);
+ u32 mci_con;
+
+ /* Set the power state */
+
+ mci_con = readl(host->base + S3C2410_SDICON);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_ON:
+ case MMC_POWER_UP:
+ /* Configure GPE5...GPE10 pins in SD mode */
+ s3c_gpio_cfgall_range(S3C2410_GPE(5), 6, S3C_GPIO_SFN(2),
+ S3C_GPIO_PULL_NONE);
+
+ if (host->pdata->set_power)
+ host->pdata->set_power(ios->power_mode, ios->vdd);
+
+ if (!host->is2440)
+ mci_con |= S3C2410_SDICON_FIFORESET;
+
+ break;
+
+ case MMC_POWER_OFF:
+ default:
+ gpio_direction_output(S3C2410_GPE(5), 0);
+
+ if (host->is2440)
+ mci_con |= S3C2440_SDICON_SDRESET;
+
+ if (host->pdata->set_power)
+ host->pdata->set_power(ios->power_mode, ios->vdd);
+
+ break;
+ }
+
+ s3cmci_set_clk(host, ios);
+
+ /* Set CLOCK_ENABLE */
+ if (ios->clock)
+ mci_con |= S3C2410_SDICON_CLOCKTYPE;
+ else
+ mci_con &= ~S3C2410_SDICON_CLOCKTYPE;
+
+ writel(mci_con, host->base + S3C2410_SDICON);
+
+ if ((ios->power_mode == MMC_POWER_ON) ||
+ (ios->power_mode == MMC_POWER_UP)) {
+ dbg(host, dbg_conf, "running at %lukHz (requested: %ukHz).\n",
+ host->real_rate/1000, ios->clock/1000);
+ } else {
+ dbg(host, dbg_conf, "powered down.\n");
+ }
+
+ host->bus_width = ios->bus_width;
+}
+
+static void s3cmci_reset(struct s3cmci_host *host)
+{
+ u32 con = readl(host->base + S3C2410_SDICON);
+
+ con |= S3C2440_SDICON_SDRESET;
+ writel(con, host->base + S3C2410_SDICON);
+}
+
+static int s3cmci_get_ro(struct mmc_host *mmc)
+{
+ struct s3cmci_host *host = mmc_priv(mmc);
+ struct s3c24xx_mci_pdata *pdata = host->pdata;
+ int ret;
+
+ if (pdata->no_wprotect)
+ return 0;
+
+ ret = gpio_get_value(pdata->gpio_wprotect) ? 1 : 0;
+ ret ^= pdata->wprotect_invert;
+
+ return ret;
+}
+
+static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct s3cmci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u32 con;
+
+ local_irq_save(flags);
+
+ con = readl(host->base + S3C2410_SDICON);
+ host->sdio_irqen = enable;
+
+ if (enable == host->sdio_irqen)
+ goto same_state;
+
+ if (enable) {
+ con |= S3C2410_SDICON_SDIOIRQ;
+ enable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
+
+ if (!host->irq_state && !host->irq_disabled) {
+ host->irq_state = true;
+ enable_irq(host->irq);
+ }
+ } else {
+ disable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
+ con &= ~S3C2410_SDICON_SDIOIRQ;
+
+ if (!host->irq_enabled && host->irq_state) {
+ disable_irq_nosync(host->irq);
+ host->irq_state = false;
+ }
+ }
+
+ writel(con, host->base + S3C2410_SDICON);
+
+ same_state:
+ local_irq_restore(flags);
+
+ s3cmci_check_sdio_irq(host);
+}
+
+static struct mmc_host_ops s3cmci_ops = {
+ .request = s3cmci_request,
+ .set_ios = s3cmci_set_ios,
+ .get_ro = s3cmci_get_ro,
+ .get_cd = s3cmci_card_present,
+ .enable_sdio_irq = s3cmci_enable_sdio_irq,
+};
+
+static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
+ /* This is currently here to avoid a number of if (host->pdata)
+ * checks. Any zero fields to ensure reasonable defaults are picked. */
+ .no_wprotect = 1,
+ .no_detect = 1,
+};
+
+#ifdef CONFIG_CPU_FREQ
+
+static int s3cmci_cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct s3cmci_host *host;
+ struct mmc_host *mmc;
+ unsigned long newclk;
+ unsigned long flags;
+
+ host = container_of(nb, struct s3cmci_host, freq_transition);
+ newclk = clk_get_rate(host->clk);
+ mmc = host->mmc;
+
+ if ((val == CPUFREQ_PRECHANGE && newclk > host->clk_rate) ||
+ (val == CPUFREQ_POSTCHANGE && newclk < host->clk_rate)) {
+ spin_lock_irqsave(&mmc->lock, flags);
+
+ host->clk_rate = newclk;
+
+ if (mmc->ios.power_mode != MMC_POWER_OFF &&
+ mmc->ios.clock != 0)
+ s3cmci_set_clk(host, &mmc->ios);
+
+ spin_unlock_irqrestore(&mmc->lock, flags);
+ }
+
+ return 0;
+}
+
+static inline int s3cmci_cpufreq_register(struct s3cmci_host *host)
+{
+ host->freq_transition.notifier_call = s3cmci_cpufreq_transition;
+
+ return cpufreq_register_notifier(&host->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host)
+{
+ cpufreq_unregister_notifier(&host->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+#else
+static inline int s3cmci_cpufreq_register(struct s3cmci_host *host)
+{
+ return 0;
+}
+
+static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host)
+{
+}
+#endif
+
+
+#ifdef CONFIG_DEBUG_FS
+
+static int s3cmci_state_show(struct seq_file *seq, void *v)
+{
+ struct s3cmci_host *host = seq->private;
+
+ seq_printf(seq, "Register base = 0x%08x\n", (u32)host->base);
+ seq_printf(seq, "Clock rate = %ld\n", host->clk_rate);
+ seq_printf(seq, "Prescale = %d\n", host->prescaler);
+ seq_printf(seq, "is2440 = %d\n", host->is2440);
+ seq_printf(seq, "IRQ = %d\n", host->irq);
+ seq_printf(seq, "IRQ enabled = %d\n", host->irq_enabled);
+ seq_printf(seq, "IRQ disabled = %d\n", host->irq_disabled);
+ seq_printf(seq, "IRQ state = %d\n", host->irq_state);
+ seq_printf(seq, "CD IRQ = %d\n", host->irq_cd);
+ seq_printf(seq, "Do DMA = %d\n", s3cmci_host_usedma(host));
+ seq_printf(seq, "SDIIMSK at %d\n", host->sdiimsk);
+ seq_printf(seq, "SDIDATA at %d\n", host->sdidata);
+
+ return 0;
+}
+
+static int s3cmci_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, s3cmci_state_show, inode->i_private);
+}
+
+static const struct file_operations s3cmci_fops_state = {
+ .owner = THIS_MODULE,
+ .open = s3cmci_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#define DBG_REG(_r) { .addr = S3C2410_SDI##_r, .name = #_r }
+
+struct s3cmci_reg {
+ unsigned short addr;
+ unsigned char *name;
+} debug_regs[] = {
+ DBG_REG(CON),
+ DBG_REG(PRE),
+ DBG_REG(CMDARG),
+ DBG_REG(CMDCON),
+ DBG_REG(CMDSTAT),
+ DBG_REG(RSP0),
+ DBG_REG(RSP1),
+ DBG_REG(RSP2),
+ DBG_REG(RSP3),
+ DBG_REG(TIMER),
+ DBG_REG(BSIZE),
+ DBG_REG(DCON),
+ DBG_REG(DCNT),
+ DBG_REG(DSTA),
+ DBG_REG(FSTA),
+ {}
+};
+
+static int s3cmci_regs_show(struct seq_file *seq, void *v)
+{
+ struct s3cmci_host *host = seq->private;
+ struct s3cmci_reg *rptr = debug_regs;
+
+ for (; rptr->name; rptr++)
+ seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
+ readl(host->base + rptr->addr));
+
+ seq_printf(seq, "SDIIMSK\t=0x%08x\n", readl(host->base + host->sdiimsk));
+
+ return 0;
+}
+
+static int s3cmci_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, s3cmci_regs_show, inode->i_private);
+}
+
+static const struct file_operations s3cmci_fops_regs = {
+ .owner = THIS_MODULE,
+ .open = s3cmci_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void s3cmci_debugfs_attach(struct s3cmci_host *host)
+{
+ struct device *dev = &host->pdev->dev;
+
+ host->debug_root = debugfs_create_dir(dev_name(dev), NULL);
+ if (IS_ERR(host->debug_root)) {
+ dev_err(dev, "failed to create debugfs root\n");
+ return;
+ }
+
+ host->debug_state = debugfs_create_file("state", 0444,
+ host->debug_root, host,
+ &s3cmci_fops_state);
+
+ if (IS_ERR(host->debug_state))
+ dev_err(dev, "failed to create debug state file\n");
+
+ host->debug_regs = debugfs_create_file("regs", 0444,
+ host->debug_root, host,
+ &s3cmci_fops_regs);
+
+ if (IS_ERR(host->debug_regs))
+ dev_err(dev, "failed to create debug regs file\n");
+}
+
+static void s3cmci_debugfs_remove(struct s3cmci_host *host)
+{
+ debugfs_remove(host->debug_regs);
+ debugfs_remove(host->debug_state);
+ debugfs_remove(host->debug_root);
+}
+
+#else
+static inline void s3cmci_debugfs_attach(struct s3cmci_host *host) { }
+static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { }
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int s3cmci_probe(struct platform_device *pdev)
+{
+ struct s3cmci_host *host;
+ struct mmc_host *mmc;
+ int ret;
+ int is2440;
+ int i;
+
+ is2440 = platform_get_device_id(pdev)->driver_data;
+
+ mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto probe_out;
+ }
+
+ for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++) {
+ ret = gpio_request(i, dev_name(&pdev->dev));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get gpio %d\n", i);
+
+ for (i--; i >= S3C2410_GPE(5); i--)
+ gpio_free(i);
+
+ goto probe_free_host;
+ }
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->pdev = pdev;
+ host->is2440 = is2440;
+
+ host->pdata = pdev->dev.platform_data;
+ if (!host->pdata) {
+ pdev->dev.platform_data = &s3cmci_def_pdata;
+ host->pdata = &s3cmci_def_pdata;
+ }
+
+ spin_lock_init(&host->complete_lock);
+ tasklet_init(&host->pio_tasklet, pio_tasklet, (unsigned long) host);
+
+ if (is2440) {
+ host->sdiimsk = S3C2440_SDIIMSK;
+ host->sdidata = S3C2440_SDIDATA;
+ host->clk_div = 1;
+ } else {
+ host->sdiimsk = S3C2410_SDIIMSK;
+ host->sdidata = S3C2410_SDIDATA;
+ host->clk_div = 2;
+ }
+
+ host->complete_what = COMPLETION_NONE;
+ host->pio_active = XFER_NONE;
+
+ host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!host->mem) {
+ dev_err(&pdev->dev,
+ "failed to get io memory region resource.\n");
+
+ ret = -ENOENT;
+ goto probe_free_gpio;
+ }
+
+ host->mem = request_mem_region(host->mem->start,
+ resource_size(host->mem), pdev->name);
+
+ if (!host->mem) {
+ dev_err(&pdev->dev, "failed to request io memory region.\n");
+ ret = -ENOENT;
+ goto probe_free_gpio;
+ }
+
+ host->base = ioremap(host->mem->start, resource_size(host->mem));
+ if (!host->base) {
+ dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
+ ret = -EINVAL;
+ goto probe_free_mem_region;
+ }
+
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq == 0) {
+ dev_err(&pdev->dev, "failed to get interrupt resource.\n");
+ ret = -EINVAL;
+ goto probe_iounmap;
+ }
+
+ if (request_irq(host->irq, s3cmci_irq, 0, DRIVER_NAME, host)) {
+ dev_err(&pdev->dev, "failed to request mci interrupt.\n");
+ ret = -ENOENT;
+ goto probe_iounmap;
+ }
+
+ /* We get spurious interrupts even when we have set the IMSK
+ * register to ignore everything, so use disable_irq() to make
+ * ensure we don't lock the system with un-serviceable requests. */
+
+ disable_irq(host->irq);
+ host->irq_state = false;
+
+ if (!host->pdata->no_detect) {
+ ret = gpio_request(host->pdata->gpio_detect, "s3cmci detect");
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get detect gpio\n");
+ goto probe_free_irq;
+ }
+
+ host->irq_cd = gpio_to_irq(host->pdata->gpio_detect);
+
+ if (host->irq_cd >= 0) {
+ if (request_irq(host->irq_cd, s3cmci_irq_cd,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ DRIVER_NAME, host)) {
+ dev_err(&pdev->dev,
+ "can't get card detect irq.\n");
+ ret = -ENOENT;
+ goto probe_free_gpio_cd;
+ }
+ } else {
+ dev_warn(&pdev->dev,
+ "host detect has no irq available\n");
+ gpio_direction_input(host->pdata->gpio_detect);
+ }
+ } else
+ host->irq_cd = -1;
+
+ if (!host->pdata->no_wprotect) {
+ ret = gpio_request(host->pdata->gpio_wprotect, "s3cmci wp");
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get writeprotect\n");
+ goto probe_free_irq_cd;
+ }
+
+ gpio_direction_input(host->pdata->gpio_wprotect);
+ }
+
+ /* depending on the dma state, get a dma channel to use. */
+
+ if (s3cmci_host_usedma(host)) {
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->dma = dma_request_slave_channel_compat(mask,
+ s3c24xx_dma_filter, (void *)DMACH_SDI, &pdev->dev, "rx-tx");
+ if (!host->dma) {
+ dev_err(&pdev->dev, "cannot get DMA channel.\n");
+ ret = -EBUSY;
+ goto probe_free_gpio_wp;
+ }
+ }
+
+ host->clk = clk_get(&pdev->dev, "sdi");
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "failed to find clock source.\n");
+ ret = PTR_ERR(host->clk);
+ host->clk = NULL;
+ goto probe_free_dma;
+ }
+
+ ret = clk_prepare_enable(host->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock source.\n");
+ goto clk_free;
+ }
+
+ host->clk_rate = clk_get_rate(host->clk);
+
+ mmc->ops = &s3cmci_ops;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+#ifdef CONFIG_MMC_S3C_HW_SDIO_IRQ
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+#else
+ mmc->caps = MMC_CAP_4_BIT_DATA;
+#endif
+ mmc->f_min = host->clk_rate / (host->clk_div * 256);
+ mmc->f_max = host->clk_rate / host->clk_div;
+
+ if (host->pdata->ocr_avail)
+ mmc->ocr_avail = host->pdata->ocr_avail;
+
+ mmc->max_blk_count = 4095;
+ mmc->max_blk_size = 4095;
+ mmc->max_req_size = 4095 * 512;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ mmc->max_segs = 128;
+
+ dbg(host, dbg_debug,
+ "probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%p.\n",
+ (host->is2440?"2440":""),
+ host->base, host->irq, host->irq_cd, host->dma);
+
+ ret = s3cmci_cpufreq_register(host);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register cpufreq\n");
+ goto free_dmabuf;
+ }
+
+ ret = mmc_add_host(mmc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add mmc host.\n");
+ goto free_cpufreq;
+ }
+
+ s3cmci_debugfs_attach(host);
+
+ platform_set_drvdata(pdev, mmc);
+ dev_info(&pdev->dev, "%s - using %s, %s SDIO IRQ\n", mmc_hostname(mmc),
+ s3cmci_host_usedma(host) ? "dma" : "pio",
+ mmc->caps & MMC_CAP_SDIO_IRQ ? "hw" : "sw");
+
+ return 0;
+
+ free_cpufreq:
+ s3cmci_cpufreq_deregister(host);
+
+ free_dmabuf:
+ clk_disable_unprepare(host->clk);
+
+ clk_free:
+ clk_put(host->clk);
+
+ probe_free_dma:
+ if (s3cmci_host_usedma(host))
+ dma_release_channel(host->dma);
+
+ probe_free_gpio_wp:
+ if (!host->pdata->no_wprotect)
+ gpio_free(host->pdata->gpio_wprotect);
+
+ probe_free_gpio_cd:
+ if (!host->pdata->no_detect)
+ gpio_free(host->pdata->gpio_detect);
+
+ probe_free_irq_cd:
+ if (host->irq_cd >= 0)
+ free_irq(host->irq_cd, host);
+
+ probe_free_irq:
+ free_irq(host->irq, host);
+
+ probe_iounmap:
+ iounmap(host->base);
+
+ probe_free_mem_region:
+ release_mem_region(host->mem->start, resource_size(host->mem));
+
+ probe_free_gpio:
+ for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
+ gpio_free(i);
+
+ probe_free_host:
+ mmc_free_host(mmc);
+
+ probe_out:
+ return ret;
+}
+
+static void s3cmci_shutdown(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct s3cmci_host *host = mmc_priv(mmc);
+
+ if (host->irq_cd >= 0)
+ free_irq(host->irq_cd, host);
+
+ s3cmci_debugfs_remove(host);
+ s3cmci_cpufreq_deregister(host);
+ mmc_remove_host(mmc);
+ clk_disable_unprepare(host->clk);
+}
+
+static int s3cmci_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct s3cmci_host *host = mmc_priv(mmc);
+ struct s3c24xx_mci_pdata *pd = host->pdata;
+ int i;
+
+ s3cmci_shutdown(pdev);
+
+ clk_put(host->clk);
+
+ tasklet_disable(&host->pio_tasklet);
+
+ if (s3cmci_host_usedma(host))
+ dma_release_channel(host->dma);
+
+ free_irq(host->irq, host);
+
+ if (!pd->no_wprotect)
+ gpio_free(pd->gpio_wprotect);
+
+ if (!pd->no_detect)
+ gpio_free(pd->gpio_detect);
+
+ for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
+ gpio_free(i);
+
+
+ iounmap(host->base);
+ release_mem_region(host->mem->start, resource_size(host->mem));
+
+ mmc_free_host(mmc);
+ return 0;
+}
+
+static struct platform_device_id s3cmci_driver_ids[] = {
+ {
+ .name = "s3c2410-sdi",
+ .driver_data = 0,
+ }, {
+ .name = "s3c2412-sdi",
+ .driver_data = 1,
+ }, {
+ .name = "s3c2440-sdi",
+ .driver_data = 1,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
+
+static struct platform_driver s3cmci_driver = {
+ .driver = {
+ .name = "s3c-sdi",
+ },
+ .id_table = s3cmci_driver_ids,
+ .probe = s3cmci_probe,
+ .remove = s3cmci_remove,
+ .shutdown = s3cmci_shutdown,
+};
+
+module_platform_driver(s3cmci_driver);
+
+MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>");
diff --git a/kernel/drivers/mmc/host/s3cmci.h b/kernel/drivers/mmc/host/s3cmci.h
new file mode 100644
index 000000000..cc2e46cb5
--- /dev/null
+++ b/kernel/drivers/mmc/host/s3cmci.h
@@ -0,0 +1,80 @@
+/*
+ * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver
+ *
+ * Copyright (C) 2004-2006 Thomas Kleffel, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+enum s3cmci_waitfor {
+ COMPLETION_NONE,
+ COMPLETION_FINALIZE,
+ COMPLETION_CMDSENT,
+ COMPLETION_RSPFIN,
+ COMPLETION_XFERFINISH,
+ COMPLETION_XFERFINISH_RSPFIN,
+};
+
+struct s3cmci_host {
+ struct platform_device *pdev;
+ struct s3c24xx_mci_pdata *pdata;
+ struct mmc_host *mmc;
+ struct resource *mem;
+ struct clk *clk;
+ void __iomem *base;
+ int irq;
+ int irq_cd;
+ struct dma_chan *dma;
+
+ unsigned long clk_rate;
+ unsigned long clk_div;
+ unsigned long real_rate;
+ u8 prescaler;
+
+ int is2440;
+ unsigned sdiimsk;
+ unsigned sdidata;
+
+ bool irq_disabled;
+ bool irq_enabled;
+ bool irq_state;
+ int sdio_irqen;
+
+ struct mmc_request *mrq;
+ int cmd_is_stop;
+
+ spinlock_t complete_lock;
+ enum s3cmci_waitfor complete_what;
+
+ int dma_complete;
+
+ u32 pio_sgptr;
+ u32 pio_bytes;
+ u32 pio_count;
+ u32 *pio_ptr;
+#define XFER_NONE 0
+#define XFER_READ 1
+#define XFER_WRITE 2
+ u32 pio_active;
+
+ int bus_width;
+
+ char dbgmsg_cmd[301];
+ char dbgmsg_dat[301];
+ char *status;
+
+ unsigned int ccnt, dcnt;
+ struct tasklet_struct pio_tasklet;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debug_root;
+ struct dentry *debug_state;
+ struct dentry *debug_regs;
+#endif
+
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block freq_transition;
+#endif
+};
diff --git a/kernel/drivers/mmc/host/sdhci-acpi.c b/kernel/drivers/mmc/host/sdhci-acpi.c
new file mode 100644
index 000000000..22d929fa3
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-acpi.c
@@ -0,0 +1,473 @@
+/*
+ * Secure Digital Host Controller Interface ACPI driver.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/compiler.h>
+#include <linux/stddef.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/acpi.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/pm.h>
+#include <linux/mmc/slot-gpio.h>
+
+#include "sdhci.h"
+
+enum {
+ SDHCI_ACPI_SD_CD = BIT(0),
+ SDHCI_ACPI_RUNTIME_PM = BIT(1),
+ SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL = BIT(2),
+};
+
+struct sdhci_acpi_chip {
+ const struct sdhci_ops *ops;
+ unsigned int quirks;
+ unsigned int quirks2;
+ unsigned long caps;
+ unsigned int caps2;
+ mmc_pm_flag_t pm_caps;
+};
+
+struct sdhci_acpi_slot {
+ const struct sdhci_acpi_chip *chip;
+ unsigned int quirks;
+ unsigned int quirks2;
+ unsigned long caps;
+ unsigned int caps2;
+ mmc_pm_flag_t pm_caps;
+ unsigned int flags;
+ int (*probe_slot)(struct platform_device *, const char *, const char *);
+ int (*remove_slot)(struct platform_device *);
+};
+
+struct sdhci_acpi_host {
+ struct sdhci_host *host;
+ const struct sdhci_acpi_slot *slot;
+ struct platform_device *pdev;
+ bool use_runtime_pm;
+ bool dma_setup;
+};
+
+static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
+{
+ return c->slot && (c->slot->flags & flag);
+}
+
+static int sdhci_acpi_enable_dma(struct sdhci_host *host)
+{
+ struct sdhci_acpi_host *c = sdhci_priv(host);
+ struct device *dev = &c->pdev->dev;
+ int err = -1;
+
+ if (c->dma_setup)
+ return 0;
+
+ if (host->flags & SDHCI_USE_64_BIT_DMA) {
+ if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) {
+ host->flags &= ~SDHCI_USE_64_BIT_DMA;
+ } else {
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (err)
+ dev_warn(dev, "Failed to set 64-bit DMA mask\n");
+ }
+ }
+
+ if (err)
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
+ c->dma_setup = !err;
+
+ return err;
+}
+
+static void sdhci_acpi_int_hw_reset(struct sdhci_host *host)
+{
+ u8 reg;
+
+ reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ reg |= 0x10;
+ sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
+ /* For eMMC, minimum is 1us but give it 9us for good measure */
+ udelay(9);
+ reg &= ~0x10;
+ sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
+ /* For eMMC, minimum is 200us but give it 300us for good measure */
+ usleep_range(300, 1000);
+}
+
+static const struct sdhci_ops sdhci_acpi_ops_dflt = {
+ .set_clock = sdhci_set_clock,
+ .enable_dma = sdhci_acpi_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_ops sdhci_acpi_ops_int = {
+ .set_clock = sdhci_set_clock,
+ .enable_dma = sdhci_acpi_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .hw_reset = sdhci_acpi_int_hw_reset,
+};
+
+static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
+ .ops = &sdhci_acpi_ops_int,
+};
+
+static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
+ const char *hid, const char *uid)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct sdhci_host *host;
+
+ if (!c || !c->host)
+ return 0;
+
+ host = c->host;
+
+ /* Platform specific code during emmc probe slot goes here */
+
+ if (hid && uid && !strcmp(hid, "80860F14") && !strcmp(uid, "1") &&
+ sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 &&
+ sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807)
+ host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
+
+ return 0;
+}
+
+static int sdhci_acpi_sdio_probe_slot(struct platform_device *pdev,
+ const char *hid, const char *uid)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct sdhci_host *host;
+
+ if (!c || !c->host)
+ return 0;
+
+ host = c->host;
+
+ /* Platform specific code during sdio probe slot goes here */
+
+ return 0;
+}
+
+static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
+ const char *hid, const char *uid)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct sdhci_host *host;
+
+ if (!c || !c->host || !c->slot)
+ return 0;
+
+ host = c->host;
+
+ /* Platform specific code during sd probe slot goes here */
+
+ return 0;
+}
+
+static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
+ .chip = &sdhci_acpi_chip_int,
+ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
+ MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
+ MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
+ .caps2 = MMC_CAP2_HC_ERASE_SZ,
+ .flags = SDHCI_ACPI_RUNTIME_PM,
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_STOP_WITH_TC,
+ .probe_slot = sdhci_acpi_emmc_probe_slot,
+};
+
+static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+ .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD |
+ MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
+ .flags = SDHCI_ACPI_RUNTIME_PM,
+ .pm_caps = MMC_PM_KEEP_POWER,
+ .probe_slot = sdhci_acpi_sdio_probe_slot,
+};
+
+static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
+ .flags = SDHCI_ACPI_SD_CD | SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL |
+ SDHCI_ACPI_RUNTIME_PM,
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
+ SDHCI_QUIRK2_STOP_WITH_TC,
+ .caps = MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
+ .probe_slot = sdhci_acpi_sd_probe_slot,
+};
+
+struct sdhci_acpi_uid_slot {
+ const char *hid;
+ const char *uid;
+ const struct sdhci_acpi_slot *slot;
+};
+
+static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
+ { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc },
+ { "80860F14" , "3" , &sdhci_acpi_slot_int_sd },
+ { "80860F16" , NULL, &sdhci_acpi_slot_int_sd },
+ { "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio },
+ { "INT33BB" , "3" , &sdhci_acpi_slot_int_sd },
+ { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
+ { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio },
+ { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio },
+ { "PNP0D40" },
+ { },
+};
+
+static const struct acpi_device_id sdhci_acpi_ids[] = {
+ { "80860F14" },
+ { "80860F16" },
+ { "INT33BB" },
+ { "INT33C6" },
+ { "INT3436" },
+ { "INT344D" },
+ { "PNP0D40" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
+
+static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid,
+ const char *uid)
+{
+ const struct sdhci_acpi_uid_slot *u;
+
+ for (u = sdhci_acpi_uids; u->hid; u++) {
+ if (strcmp(u->hid, hid))
+ continue;
+ if (!u->uid)
+ return u->slot;
+ if (uid && !strcmp(u->uid, uid))
+ return u->slot;
+ }
+ return NULL;
+}
+
+static int sdhci_acpi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ acpi_handle handle = ACPI_HANDLE(dev);
+ struct acpi_device *device;
+ struct sdhci_acpi_host *c;
+ struct sdhci_host *host;
+ struct resource *iomem;
+ resource_size_t len;
+ const char *hid;
+ const char *uid;
+ int err;
+
+ if (acpi_bus_get_device(handle, &device))
+ return -ENODEV;
+
+ if (acpi_bus_get_status(device) || !device->status.present)
+ return -ENODEV;
+
+ hid = acpi_device_hid(device);
+ uid = device->pnp.unique_id;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem)
+ return -ENOMEM;
+
+ len = resource_size(iomem);
+ if (len < 0x100)
+ dev_err(dev, "Invalid iomem size!\n");
+
+ if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev)))
+ return -ENOMEM;
+
+ host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ c = sdhci_priv(host);
+ c->host = host;
+ c->slot = sdhci_acpi_get_slot(hid, uid);
+ c->pdev = pdev;
+ c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM);
+
+ platform_set_drvdata(pdev, c);
+
+ host->hw_name = "ACPI";
+ host->ops = &sdhci_acpi_ops_dflt;
+ host->irq = platform_get_irq(pdev, 0);
+
+ host->ioaddr = devm_ioremap_nocache(dev, iomem->start,
+ resource_size(iomem));
+ if (host->ioaddr == NULL) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ if (c->slot) {
+ if (c->slot->probe_slot) {
+ err = c->slot->probe_slot(pdev, hid, uid);
+ if (err)
+ goto err_free;
+ }
+ if (c->slot->chip) {
+ host->ops = c->slot->chip->ops;
+ host->quirks |= c->slot->chip->quirks;
+ host->quirks2 |= c->slot->chip->quirks2;
+ host->mmc->caps |= c->slot->chip->caps;
+ host->mmc->caps2 |= c->slot->chip->caps2;
+ host->mmc->pm_caps |= c->slot->chip->pm_caps;
+ }
+ host->quirks |= c->slot->quirks;
+ host->quirks2 |= c->slot->quirks2;
+ host->mmc->caps |= c->slot->caps;
+ host->mmc->caps2 |= c->slot->caps2;
+ host->mmc->pm_caps |= c->slot->pm_caps;
+ }
+
+ host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
+
+ if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
+ bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
+
+ if (mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL)) {
+ dev_warn(dev, "failed to setup card detect gpio\n");
+ c->use_runtime_pm = false;
+ }
+ }
+
+ err = sdhci_add_host(host);
+ if (err)
+ goto err_free;
+
+ if (c->use_runtime_pm) {
+ pm_runtime_set_active(dev);
+ pm_suspend_ignore_children(dev, 1);
+ pm_runtime_set_autosuspend_delay(dev, 50);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return 0;
+
+err_free:
+ sdhci_free_host(c->host);
+ return err;
+}
+
+static int sdhci_acpi_remove(struct platform_device *pdev)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int dead;
+
+ if (c->use_runtime_pm) {
+ pm_runtime_get_sync(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ }
+
+ if (c->slot && c->slot->remove_slot)
+ c->slot->remove_slot(pdev);
+
+ dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0);
+ sdhci_remove_host(c->host, dead);
+ sdhci_free_host(c->host);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int sdhci_acpi_suspend(struct device *dev)
+{
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+
+ return sdhci_suspend_host(c->host);
+}
+
+static int sdhci_acpi_resume(struct device *dev)
+{
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+
+ return sdhci_resume_host(c->host);
+}
+
+#else
+
+#define sdhci_acpi_suspend NULL
+#define sdhci_acpi_resume NULL
+
+#endif
+
+#ifdef CONFIG_PM
+
+static int sdhci_acpi_runtime_suspend(struct device *dev)
+{
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+
+ return sdhci_runtime_suspend_host(c->host);
+}
+
+static int sdhci_acpi_runtime_resume(struct device *dev)
+{
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+
+ return sdhci_runtime_resume_host(c->host);
+}
+
+#endif
+
+static const struct dev_pm_ops sdhci_acpi_pm_ops = {
+ .suspend = sdhci_acpi_suspend,
+ .resume = sdhci_acpi_resume,
+ SET_RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend,
+ sdhci_acpi_runtime_resume, NULL)
+};
+
+static struct platform_driver sdhci_acpi_driver = {
+ .driver = {
+ .name = "sdhci-acpi",
+ .acpi_match_table = sdhci_acpi_ids,
+ .pm = &sdhci_acpi_pm_ops,
+ },
+ .probe = sdhci_acpi_probe,
+ .remove = sdhci_acpi_remove,
+};
+
+module_platform_driver(sdhci_acpi_driver);
+
+MODULE_DESCRIPTION("Secure Digital Host Controller Interface ACPI driver");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-bcm-kona.c b/kernel/drivers/mmc/host/sdhci-bcm-kona.c
new file mode 100644
index 000000000..2bd90fb35
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-bcm-kona.c
@@ -0,0 +1,339 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/platform_device.h>
+#include <linux/mmc/host.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/mmc/slot-gpio.h>
+
+#include "sdhci-pltfm.h"
+#include "sdhci.h"
+
+#define SDHCI_SOFT_RESET 0x01000000
+#define KONA_SDHOST_CORECTRL 0x8000
+#define KONA_SDHOST_CD_PINCTRL 0x00000008
+#define KONA_SDHOST_STOP_HCLK 0x00000004
+#define KONA_SDHOST_RESET 0x00000002
+#define KONA_SDHOST_EN 0x00000001
+
+#define KONA_SDHOST_CORESTAT 0x8004
+#define KONA_SDHOST_WP 0x00000002
+#define KONA_SDHOST_CD_SW 0x00000001
+
+#define KONA_SDHOST_COREIMR 0x8008
+#define KONA_SDHOST_IP 0x00000001
+
+#define KONA_SDHOST_COREISR 0x800C
+#define KONA_SDHOST_COREIMSR 0x8010
+#define KONA_SDHOST_COREDBG1 0x8014
+#define KONA_SDHOST_COREGPO_MASK 0x8018
+
+#define SD_DETECT_GPIO_DEBOUNCE_128MS 128
+
+#define KONA_MMC_AUTOSUSPEND_DELAY (50)
+
+struct sdhci_bcm_kona_dev {
+ struct mutex write_lock; /* protect back to back writes */
+};
+
+
+static int sdhci_bcm_kona_sd_reset(struct sdhci_host *host)
+{
+ unsigned int val;
+ unsigned long timeout;
+
+ /* This timeout should be sufficent for core to reset */
+ timeout = jiffies + msecs_to_jiffies(100);
+
+ /* reset the host using the top level reset */
+ val = sdhci_readl(host, KONA_SDHOST_CORECTRL);
+ val |= KONA_SDHOST_RESET;
+ sdhci_writel(host, val, KONA_SDHOST_CORECTRL);
+
+ while (!(sdhci_readl(host, KONA_SDHOST_CORECTRL) & KONA_SDHOST_RESET)) {
+ if (time_is_before_jiffies(timeout)) {
+ pr_err("Error: sd host is stuck in reset!!!\n");
+ return -EFAULT;
+ }
+ }
+
+ /* bring the host out of reset */
+ val = sdhci_readl(host, KONA_SDHOST_CORECTRL);
+ val &= ~KONA_SDHOST_RESET;
+
+ /*
+ * Back-to-Back register write needs a delay of 1ms at bootup (min 10uS)
+ * Back-to-Back writes to same register needs delay when SD bus clock
+ * is very low w.r.t AHB clock, mainly during boot-time and during card
+ * insert-removal.
+ */
+ usleep_range(1000, 5000);
+ sdhci_writel(host, val, KONA_SDHOST_CORECTRL);
+
+ return 0;
+}
+
+static void sdhci_bcm_kona_sd_init(struct sdhci_host *host)
+{
+ unsigned int val;
+
+ /* enable the interrupt from the IP core */
+ val = sdhci_readl(host, KONA_SDHOST_COREIMR);
+ val |= KONA_SDHOST_IP;
+ sdhci_writel(host, val, KONA_SDHOST_COREIMR);
+
+ /* Enable the AHB clock gating module to the host */
+ val = sdhci_readl(host, KONA_SDHOST_CORECTRL);
+ val |= KONA_SDHOST_EN;
+
+ /*
+ * Back-to-Back register write needs a delay of 1ms at bootup (min 10uS)
+ * Back-to-Back writes to same register needs delay when SD bus clock
+ * is very low w.r.t AHB clock, mainly during boot-time and during card
+ * insert-removal.
+ */
+ usleep_range(1000, 5000);
+ sdhci_writel(host, val, KONA_SDHOST_CORECTRL);
+}
+
+/*
+ * Software emulation of the SD card insertion/removal. Set insert=1 for insert
+ * and insert=0 for removal. The card detection is done by GPIO. For Broadcom
+ * IP to function properly the bit 0 of CORESTAT register needs to be set/reset
+ * to generate the CD IRQ handled in sdhci.c which schedules card_tasklet.
+ */
+static int sdhci_bcm_kona_sd_card_emulate(struct sdhci_host *host, int insert)
+{
+ struct sdhci_pltfm_host *pltfm_priv = sdhci_priv(host);
+ struct sdhci_bcm_kona_dev *kona_dev = sdhci_pltfm_priv(pltfm_priv);
+ u32 val;
+
+ /*
+ * Back-to-Back register write needs a delay of min 10uS.
+ * Back-to-Back writes to same register needs delay when SD bus clock
+ * is very low w.r.t AHB clock, mainly during boot-time and during card
+ * insert-removal.
+ * We keep 20uS
+ */
+ mutex_lock(&kona_dev->write_lock);
+ udelay(20);
+ val = sdhci_readl(host, KONA_SDHOST_CORESTAT);
+
+ if (insert) {
+ int ret;
+
+ ret = mmc_gpio_get_ro(host->mmc);
+ if (ret >= 0)
+ val = (val & ~KONA_SDHOST_WP) |
+ ((ret) ? KONA_SDHOST_WP : 0);
+
+ val |= KONA_SDHOST_CD_SW;
+ sdhci_writel(host, val, KONA_SDHOST_CORESTAT);
+ } else {
+ val &= ~KONA_SDHOST_CD_SW;
+ sdhci_writel(host, val, KONA_SDHOST_CORESTAT);
+ }
+ mutex_unlock(&kona_dev->write_lock);
+
+ return 0;
+}
+
+/*
+ * SD card interrupt event callback
+ */
+static void sdhci_bcm_kona_card_event(struct sdhci_host *host)
+{
+ if (mmc_gpio_get_cd(host->mmc) > 0) {
+ dev_dbg(mmc_dev(host->mmc),
+ "card inserted\n");
+ sdhci_bcm_kona_sd_card_emulate(host, 1);
+ } else {
+ dev_dbg(mmc_dev(host->mmc),
+ "card removed\n");
+ sdhci_bcm_kona_sd_card_emulate(host, 0);
+ }
+}
+
+static void sdhci_bcm_kona_init_74_clocks(struct sdhci_host *host,
+ u8 power_mode)
+{
+ /*
+ * JEDEC and SD spec specify supplying 74 continuous clocks to
+ * device after power up. With minimum bus (100KHz) that
+ * that translates to 740us
+ */
+ if (power_mode != MMC_POWER_OFF)
+ udelay(740);
+}
+
+static struct sdhci_ops sdhci_bcm_kona_ops = {
+ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
+ .platform_send_init_74_clocks = sdhci_bcm_kona_init_74_clocks,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .card_event = sdhci_bcm_kona_card_event,
+};
+
+static struct sdhci_pltfm_data sdhci_pltfm_data_kona = {
+ .ops = &sdhci_bcm_kona_ops,
+ .quirks = SDHCI_QUIRK_NO_CARD_NO_RESET |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_32BIT_DMA_SIZE | SDHCI_QUIRK_32BIT_ADMA_SIZE |
+ SDHCI_QUIRK_FORCE_BLK_SZ_2048 |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+};
+
+static const struct of_device_id sdhci_bcm_kona_of_match[] = {
+ { .compatible = "brcm,kona-sdhci"},
+ { .compatible = "bcm,kona-sdhci"}, /* deprecated name */
+ {}
+};
+MODULE_DEVICE_TABLE(of, sdhci_bcm_kona_of_match);
+
+static int sdhci_bcm_kona_probe(struct platform_device *pdev)
+{
+ struct sdhci_bcm_kona_dev *kona_dev = NULL;
+ struct sdhci_pltfm_host *pltfm_priv;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host;
+ int ret;
+
+ ret = 0;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_pltfm_data_kona,
+ sizeof(*kona_dev));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ dev_dbg(dev, "%s: inited. IOADDR=%p\n", __func__, host->ioaddr);
+
+ pltfm_priv = sdhci_priv(host);
+
+ kona_dev = sdhci_pltfm_priv(pltfm_priv);
+ mutex_init(&kona_dev->write_lock);
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err_pltfm_free;
+
+ if (!host->mmc->f_max) {
+ dev_err(&pdev->dev, "Missing max-freq for SDHCI cfg\n");
+ ret = -ENXIO;
+ goto err_pltfm_free;
+ }
+
+ /* Get and enable the core clock */
+ pltfm_priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pltfm_priv->clk)) {
+ dev_err(dev, "Failed to get core clock\n");
+ ret = PTR_ERR(pltfm_priv->clk);
+ goto err_pltfm_free;
+ }
+
+ if (clk_set_rate(pltfm_priv->clk, host->mmc->f_max) != 0) {
+ dev_err(dev, "Failed to set rate core clock\n");
+ goto err_pltfm_free;
+ }
+
+ if (clk_prepare_enable(pltfm_priv->clk) != 0) {
+ dev_err(dev, "Failed to enable core clock\n");
+ goto err_pltfm_free;
+ }
+
+ dev_dbg(dev, "non-removable=%c\n",
+ (host->mmc->caps & MMC_CAP_NONREMOVABLE) ? 'Y' : 'N');
+ dev_dbg(dev, "cd_gpio %c, wp_gpio %c\n",
+ (mmc_gpio_get_cd(host->mmc) != -ENOSYS) ? 'Y' : 'N',
+ (mmc_gpio_get_ro(host->mmc) != -ENOSYS) ? 'Y' : 'N');
+
+ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
+ dev_dbg(dev, "is_8bit=%c\n",
+ (host->mmc->caps | MMC_CAP_8_BIT_DATA) ? 'Y' : 'N');
+
+ ret = sdhci_bcm_kona_sd_reset(host);
+ if (ret)
+ goto err_clk_disable;
+
+ sdhci_bcm_kona_sd_init(host);
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(dev, "Failed sdhci_add_host\n");
+ goto err_reset;
+ }
+
+ /* if device is eMMC, emulate card insert right here */
+ if (host->mmc->caps & MMC_CAP_NONREMOVABLE) {
+ ret = sdhci_bcm_kona_sd_card_emulate(host, 1);
+ if (ret) {
+ dev_err(dev,
+ "unable to emulate card insertion\n");
+ goto err_remove_host;
+ }
+ }
+ /*
+ * Since the card detection GPIO interrupt is configured to be
+ * edge sensitive, check the initial GPIO value here, emulate
+ * only if the card is present
+ */
+ if (mmc_gpio_get_cd(host->mmc) > 0)
+ sdhci_bcm_kona_sd_card_emulate(host, 1);
+
+ dev_dbg(dev, "initialized properly\n");
+ return 0;
+
+err_remove_host:
+ sdhci_remove_host(host, 0);
+
+err_reset:
+ sdhci_bcm_kona_sd_reset(host);
+
+err_clk_disable:
+ clk_disable_unprepare(pltfm_priv->clk);
+
+err_pltfm_free:
+ sdhci_pltfm_free(pdev);
+
+ dev_err(dev, "Probing of sdhci-pltfm failed: %d\n", ret);
+ return ret;
+}
+
+static struct platform_driver sdhci_bcm_kona_driver = {
+ .driver = {
+ .name = "sdhci-kona",
+ .pm = SDHCI_PLTFM_PMOPS,
+ .of_match_table = sdhci_bcm_kona_of_match,
+ },
+ .probe = sdhci_bcm_kona_probe,
+ .remove = sdhci_pltfm_unregister,
+};
+module_platform_driver(sdhci_bcm_kona_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for Broadcom Kona platform");
+MODULE_AUTHOR("Broadcom");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-bcm2835.c b/kernel/drivers/mmc/host/sdhci-bcm2835.c
new file mode 100644
index 000000000..0ef0343c6
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-bcm2835.c
@@ -0,0 +1,202 @@
+/*
+ * BCM2835 SDHCI
+ * Copyright (C) 2012 Stephen Warren
+ * Based on U-Boot's MMC driver for the BCM2835 by Oleksandr Tymoshenko & me
+ * Portions of the code there were obviously based on the Linux kernel at:
+ * git://github.com/raspberrypi/linux.git rpi-3.6.y
+ * commit f5b930b "Main bcm2708 linux port" signed-off-by Dom Cobley.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mmc/host.h>
+#include "sdhci-pltfm.h"
+
+/*
+ * 400KHz is max freq for card ID etc. Use that as min card clock. We need to
+ * know the min to enable static calculation of max BCM2835_SDHCI_WRITE_DELAY.
+ */
+#define MIN_FREQ 400000
+
+/*
+ * The Arasan has a bugette whereby it may lose the content of successive
+ * writes to registers that are within two SD-card clock cycles of each other
+ * (a clock domain crossing problem). It seems, however, that the data
+ * register does not have this problem, which is just as well - otherwise we'd
+ * have to nobble the DMA engine too.
+ *
+ * This should probably be dynamically calculated based on the actual card
+ * frequency. However, this is the longest we'll have to wait, and doesn't
+ * seem to slow access down too much, so the added complexity doesn't seem
+ * worth it for now.
+ *
+ * 1/MIN_FREQ is (max) time per tick of eMMC clock.
+ * 2/MIN_FREQ is time for two ticks.
+ * Multiply by 1000000 to get uS per two ticks.
+ * *1000000 for uSecs.
+ * +1 for hack rounding.
+ */
+#define BCM2835_SDHCI_WRITE_DELAY (((2 * 1000000) / MIN_FREQ) + 1)
+
+struct bcm2835_sdhci {
+ u32 shadow;
+};
+
+static void bcm2835_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ writel(val, host->ioaddr + reg);
+
+ udelay(BCM2835_SDHCI_WRITE_DELAY);
+}
+
+static inline u32 bcm2835_sdhci_readl(struct sdhci_host *host, int reg)
+{
+ u32 val = readl(host->ioaddr + reg);
+
+ if (reg == SDHCI_CAPABILITIES)
+ val |= SDHCI_CAN_VDD_330;
+
+ return val;
+}
+
+static void bcm2835_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct bcm2835_sdhci *bcm2835_host = pltfm_host->priv;
+ u32 oldval = (reg == SDHCI_COMMAND) ? bcm2835_host->shadow :
+ bcm2835_sdhci_readl(host, reg & ~3);
+ u32 word_num = (reg >> 1) & 1;
+ u32 word_shift = word_num * 16;
+ u32 mask = 0xffff << word_shift;
+ u32 newval = (oldval & ~mask) | (val << word_shift);
+
+ if (reg == SDHCI_TRANSFER_MODE)
+ bcm2835_host->shadow = newval;
+ else
+ bcm2835_sdhci_writel(host, newval, reg & ~3);
+}
+
+static u16 bcm2835_sdhci_readw(struct sdhci_host *host, int reg)
+{
+ u32 val = bcm2835_sdhci_readl(host, (reg & ~3));
+ u32 word_num = (reg >> 1) & 1;
+ u32 word_shift = word_num * 16;
+ u32 word = (val >> word_shift) & 0xffff;
+
+ return word;
+}
+
+static void bcm2835_sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ u32 oldval = bcm2835_sdhci_readl(host, reg & ~3);
+ u32 byte_num = reg & 3;
+ u32 byte_shift = byte_num * 8;
+ u32 mask = 0xff << byte_shift;
+ u32 newval = (oldval & ~mask) | (val << byte_shift);
+
+ bcm2835_sdhci_writel(host, newval, reg & ~3);
+}
+
+static u8 bcm2835_sdhci_readb(struct sdhci_host *host, int reg)
+{
+ u32 val = bcm2835_sdhci_readl(host, (reg & ~3));
+ u32 byte_num = reg & 3;
+ u32 byte_shift = byte_num * 8;
+ u32 byte = (val >> byte_shift) & 0xff;
+
+ return byte;
+}
+
+static unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host)
+{
+ return MIN_FREQ;
+}
+
+static const struct sdhci_ops bcm2835_sdhci_ops = {
+ .write_l = bcm2835_sdhci_writel,
+ .write_w = bcm2835_sdhci_writew,
+ .write_b = bcm2835_sdhci_writeb,
+ .read_l = bcm2835_sdhci_readl,
+ .read_w = bcm2835_sdhci_readw,
+ .read_b = bcm2835_sdhci_readb,
+ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_min_clock = bcm2835_sdhci_get_min_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data bcm2835_sdhci_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+ .ops = &bcm2835_sdhci_ops,
+};
+
+static int bcm2835_sdhci_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct bcm2835_sdhci *bcm2835_host;
+ struct sdhci_pltfm_host *pltfm_host;
+ int ret;
+
+ host = sdhci_pltfm_init(pdev, &bcm2835_sdhci_pdata, 0);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ bcm2835_host = devm_kzalloc(&pdev->dev, sizeof(*bcm2835_host),
+ GFP_KERNEL);
+ if (!bcm2835_host) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to allocate bcm2835_sdhci\n");
+ return -ENOMEM;
+ }
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = bcm2835_host;
+
+ pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pltfm_host->clk)) {
+ ret = PTR_ERR(pltfm_host->clk);
+ goto err;
+ }
+
+ return sdhci_add_host(host);
+
+err:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static const struct of_device_id bcm2835_sdhci_of_match[] = {
+ { .compatible = "brcm,bcm2835-sdhci" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bcm2835_sdhci_of_match);
+
+static struct platform_driver bcm2835_sdhci_driver = {
+ .driver = {
+ .name = "sdhci-bcm2835",
+ .of_match_table = bcm2835_sdhci_of_match,
+ .pm = SDHCI_PLTFM_PMOPS,
+ },
+ .probe = bcm2835_sdhci_probe,
+ .remove = sdhci_pltfm_unregister,
+};
+module_platform_driver(bcm2835_sdhci_driver);
+
+MODULE_DESCRIPTION("BCM2835 SDHCI driver");
+MODULE_AUTHOR("Stephen Warren");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-cns3xxx.c b/kernel/drivers/mmc/host/sdhci-cns3xxx.c
new file mode 100644
index 000000000..59f2923f8
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-cns3xxx.c
@@ -0,0 +1,115 @@
+/*
+ * SDHCI support for CNS3xxx SoC
+ *
+ * Copyright 2008 Cavium Networks
+ * Copyright 2010 MontaVista Software, LLC.
+ *
+ * Authors: Scott Shu
+ * Anton Vorontsov <avorontsov@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include "sdhci-pltfm.h"
+
+static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host)
+{
+ return 150000000;
+}
+
+static void sdhci_cns3xxx_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ int div = 1;
+ u16 clk;
+ unsigned long timeout;
+
+ host->mmc->actual_clock = 0;
+
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ while (host->max_clk / div > clock) {
+ /*
+ * On CNS3xxx divider grows linearly up to 4, and then
+ * exponentially up to 256.
+ */
+ if (div < 4)
+ div += 1;
+ else if (div < 256)
+ div *= 2;
+ else
+ break;
+ }
+
+ dev_dbg(dev, "desired SD clock: %d, actual: %d\n",
+ clock, host->max_clk / div);
+
+ /* Divide by 3 is special. */
+ if (div != 3)
+ div >>= 1;
+
+ clk = div << SDHCI_DIVIDER_SHIFT;
+ clk |= SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ timeout = 20;
+ while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
+ & SDHCI_CLOCK_INT_STABLE)) {
+ if (timeout == 0) {
+ dev_warn(dev, "clock is unstable");
+ break;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+}
+
+static const struct sdhci_ops sdhci_cns3xxx_ops = {
+ .get_max_clock = sdhci_cns3xxx_get_max_clk,
+ .set_clock = sdhci_cns3xxx_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
+ .ops = &sdhci_cns3xxx_ops,
+ .quirks = SDHCI_QUIRK_BROKEN_DMA |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+};
+
+static int sdhci_cns3xxx_probe(struct platform_device *pdev)
+{
+ return sdhci_pltfm_register(pdev, &sdhci_cns3xxx_pdata, 0);
+}
+
+static struct platform_driver sdhci_cns3xxx_driver = {
+ .driver = {
+ .name = "sdhci-cns3xxx",
+ .pm = SDHCI_PLTFM_PMOPS,
+ },
+ .probe = sdhci_cns3xxx_probe,
+ .remove = sdhci_pltfm_unregister,
+};
+
+module_platform_driver(sdhci_cns3xxx_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for CNS3xxx");
+MODULE_AUTHOR("Scott Shu, "
+ "Anton Vorontsov <avorontsov@mvista.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-dove.c b/kernel/drivers/mmc/host/sdhci-dove.c
new file mode 100644
index 000000000..407c21f15
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-dove.c
@@ -0,0 +1,132 @@
+/*
+ * sdhci-dove.c Support for SDHCI on Marvell's Dove SoC
+ *
+ * Author: Saeed Bishara <saeed@marvell.com>
+ * Mike Rapoport <mike@compulab.co.il>
+ * Based on sdhci-cns3xxx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include "sdhci-pltfm.h"
+
+static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
+{
+ u16 ret;
+
+ switch (reg) {
+ case SDHCI_HOST_VERSION:
+ case SDHCI_SLOT_INT_STATUS:
+ /* those registers don't exist */
+ return 0;
+ default:
+ ret = readw(host->ioaddr + reg);
+ }
+ return ret;
+}
+
+static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
+{
+ u32 ret;
+
+ ret = readl(host->ioaddr + reg);
+
+ switch (reg) {
+ case SDHCI_CAPABILITIES:
+ /* Mask the support for 3.0V */
+ ret &= ~SDHCI_CAN_VDD_300;
+ break;
+ }
+ return ret;
+}
+
+static const struct sdhci_ops sdhci_dove_ops = {
+ .read_w = sdhci_dove_readw,
+ .read_l = sdhci_dove_readl,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data sdhci_dove_pdata = {
+ .ops = &sdhci_dove_ops,
+ .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
+ SDHCI_QUIRK_NO_BUSY_IRQ |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_FORCE_DMA |
+ SDHCI_QUIRK_NO_HISPD_BIT,
+};
+
+static int sdhci_dove_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ int ret;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_dove_pdata, 0);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
+
+ if (!IS_ERR(pltfm_host->clk))
+ clk_prepare_enable(pltfm_host->clk);
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err_sdhci_add;
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_sdhci_add;
+
+ return 0;
+
+err_sdhci_add:
+ clk_disable_unprepare(pltfm_host->clk);
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static const struct of_device_id sdhci_dove_of_match_table[] = {
+ { .compatible = "marvell,dove-sdhci", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table);
+
+static struct platform_driver sdhci_dove_driver = {
+ .driver = {
+ .name = "sdhci-dove",
+ .pm = SDHCI_PLTFM_PMOPS,
+ .of_match_table = sdhci_dove_of_match_table,
+ },
+ .probe = sdhci_dove_probe,
+ .remove = sdhci_pltfm_unregister,
+};
+
+module_platform_driver(sdhci_dove_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for Dove");
+MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>, "
+ "Mike Rapoport <mike@compulab.co.il>");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-esdhc-imx.c b/kernel/drivers/mmc/host/sdhci-esdhc-imx.c
new file mode 100644
index 000000000..82f512d87
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -0,0 +1,1155 @@
+/*
+ * Freescale eSDHC i.MX controller driver for the platform bus.
+ *
+ * derived from the OF-version.
+ *
+ * Copyright (c) 2010 Pengutronix e.K.
+ * Author: Wolfram Sang <w.sang@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_data/mmc-esdhc-imx.h>
+#include <linux/pm_runtime.h>
+#include "sdhci-pltfm.h"
+#include "sdhci-esdhc.h"
+
+#define ESDHC_CTRL_D3CD 0x08
+/* VENDOR SPEC register */
+#define ESDHC_VENDOR_SPEC 0xc0
+#define ESDHC_VENDOR_SPEC_SDIO_QUIRK (1 << 1)
+#define ESDHC_VENDOR_SPEC_VSELECT (1 << 1)
+#define ESDHC_VENDOR_SPEC_FRC_SDCLK_ON (1 << 8)
+#define ESDHC_WTMK_LVL 0x44
+#define ESDHC_MIX_CTRL 0x48
+#define ESDHC_MIX_CTRL_DDREN (1 << 3)
+#define ESDHC_MIX_CTRL_AC23EN (1 << 7)
+#define ESDHC_MIX_CTRL_EXE_TUNE (1 << 22)
+#define ESDHC_MIX_CTRL_SMPCLK_SEL (1 << 23)
+#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25)
+/* Bits 3 and 6 are not SDHCI standard definitions */
+#define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7
+/* Tuning bits */
+#define ESDHC_MIX_CTRL_TUNING_MASK 0x03c00000
+
+/* dll control register */
+#define ESDHC_DLL_CTRL 0x60
+#define ESDHC_DLL_OVERRIDE_VAL_SHIFT 9
+#define ESDHC_DLL_OVERRIDE_EN_SHIFT 8
+
+/* tune control register */
+#define ESDHC_TUNE_CTRL_STATUS 0x68
+#define ESDHC_TUNE_CTRL_STEP 1
+#define ESDHC_TUNE_CTRL_MIN 0
+#define ESDHC_TUNE_CTRL_MAX ((1 << 7) - 1)
+
+#define ESDHC_TUNING_CTRL 0xcc
+#define ESDHC_STD_TUNING_EN (1 << 24)
+/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
+#define ESDHC_TUNING_START_TAP 0x1
+
+/* pinctrl state */
+#define ESDHC_PINCTRL_STATE_100MHZ "state_100mhz"
+#define ESDHC_PINCTRL_STATE_200MHZ "state_200mhz"
+
+/*
+ * Our interpretation of the SDHCI_HOST_CONTROL register
+ */
+#define ESDHC_CTRL_4BITBUS (0x1 << 1)
+#define ESDHC_CTRL_8BITBUS (0x2 << 1)
+#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1)
+
+/*
+ * There is an INT DMA ERR mis-match between eSDHC and STD SDHC SPEC:
+ * Bit25 is used in STD SPEC, and is reserved in fsl eSDHC design,
+ * but bit28 is used as the INT DMA ERR in fsl eSDHC design.
+ * Define this macro DMA error INT for fsl eSDHC
+ */
+#define ESDHC_INT_VENDOR_SPEC_DMA_ERR (1 << 28)
+
+/*
+ * The CMDTYPE of the CMD register (offset 0xE) should be set to
+ * "11" when the STOP CMD12 is issued on imx53 to abort one
+ * open ended multi-blk IO. Otherwise the TC INT wouldn't
+ * be generated.
+ * In exact block transfer, the controller doesn't complete the
+ * operations automatically as required at the end of the
+ * transfer and remains on hold if the abort command is not sent.
+ * As a result, the TC flag is not asserted and SW received timeout
+ * exeception. Bit1 of Vendor Spec registor is used to fix it.
+ */
+#define ESDHC_FLAG_MULTIBLK_NO_INT BIT(1)
+/*
+ * The flag enables the workaround for ESDHC errata ENGcm07207 which
+ * affects i.MX25 and i.MX35.
+ */
+#define ESDHC_FLAG_ENGCM07207 BIT(2)
+/*
+ * The flag tells that the ESDHC controller is an USDHC block that is
+ * integrated on the i.MX6 series.
+ */
+#define ESDHC_FLAG_USDHC BIT(3)
+/* The IP supports manual tuning process */
+#define ESDHC_FLAG_MAN_TUNING BIT(4)
+/* The IP supports standard tuning process */
+#define ESDHC_FLAG_STD_TUNING BIT(5)
+/* The IP has SDHCI_CAPABILITIES_1 register */
+#define ESDHC_FLAG_HAVE_CAP1 BIT(6)
+
+struct esdhc_soc_data {
+ u32 flags;
+};
+
+static struct esdhc_soc_data esdhc_imx25_data = {
+ .flags = ESDHC_FLAG_ENGCM07207,
+};
+
+static struct esdhc_soc_data esdhc_imx35_data = {
+ .flags = ESDHC_FLAG_ENGCM07207,
+};
+
+static struct esdhc_soc_data esdhc_imx51_data = {
+ .flags = 0,
+};
+
+static struct esdhc_soc_data esdhc_imx53_data = {
+ .flags = ESDHC_FLAG_MULTIBLK_NO_INT,
+};
+
+static struct esdhc_soc_data usdhc_imx6q_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING,
+};
+
+static struct esdhc_soc_data usdhc_imx6sl_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1,
+};
+
+struct pltfm_imx_data {
+ u32 scratchpad;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_100mhz;
+ struct pinctrl_state *pins_200mhz;
+ const struct esdhc_soc_data *socdata;
+ struct esdhc_platform_data boarddata;
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+ struct clk *clk_per;
+ enum {
+ NO_CMD_PENDING, /* no multiblock command pending*/
+ MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
+ WAIT_FOR_INT, /* sent CMD12, waiting for response INT */
+ } multiblock_status;
+ u32 is_ddr;
+};
+
+static struct platform_device_id imx_esdhc_devtype[] = {
+ {
+ .name = "sdhci-esdhc-imx25",
+ .driver_data = (kernel_ulong_t) &esdhc_imx25_data,
+ }, {
+ .name = "sdhci-esdhc-imx35",
+ .driver_data = (kernel_ulong_t) &esdhc_imx35_data,
+ }, {
+ .name = "sdhci-esdhc-imx51",
+ .driver_data = (kernel_ulong_t) &esdhc_imx51_data,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, imx_esdhc_devtype);
+
+static const struct of_device_id imx_esdhc_dt_ids[] = {
+ { .compatible = "fsl,imx25-esdhc", .data = &esdhc_imx25_data, },
+ { .compatible = "fsl,imx35-esdhc", .data = &esdhc_imx35_data, },
+ { .compatible = "fsl,imx51-esdhc", .data = &esdhc_imx51_data, },
+ { .compatible = "fsl,imx53-esdhc", .data = &esdhc_imx53_data, },
+ { .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, },
+ { .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
+
+static inline int is_imx25_esdhc(struct pltfm_imx_data *data)
+{
+ return data->socdata == &esdhc_imx25_data;
+}
+
+static inline int is_imx53_esdhc(struct pltfm_imx_data *data)
+{
+ return data->socdata == &esdhc_imx53_data;
+}
+
+static inline int is_imx6q_usdhc(struct pltfm_imx_data *data)
+{
+ return data->socdata == &usdhc_imx6q_data;
+}
+
+static inline int esdhc_is_usdhc(struct pltfm_imx_data *data)
+{
+ return !!(data->socdata->flags & ESDHC_FLAG_USDHC);
+}
+
+static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)
+{
+ void __iomem *base = host->ioaddr + (reg & ~0x3);
+ u32 shift = (reg & 0x3) * 8;
+
+ writel(((readl(base) & ~(mask << shift)) | (val << shift)), base);
+}
+
+static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ u32 val = readl(host->ioaddr + reg);
+
+ if (unlikely(reg == SDHCI_PRESENT_STATE)) {
+ u32 fsl_prss = val;
+ /* save the least 20 bits */
+ val = fsl_prss & 0x000FFFFF;
+ /* move dat[0-3] bits */
+ val |= (fsl_prss & 0x0F000000) >> 4;
+ /* move cmd line bit */
+ val |= (fsl_prss & 0x00800000) << 1;
+ }
+
+ if (unlikely(reg == SDHCI_CAPABILITIES)) {
+ /* ignore bit[0-15] as it stores cap_1 register val for mx6sl */
+ if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1)
+ val &= 0xffff0000;
+
+ /* In FSL esdhc IC module, only bit20 is used to indicate the
+ * ADMA2 capability of esdhc, but this bit is messed up on
+ * some SOCs (e.g. on MX25, MX35 this bit is set, but they
+ * don't actually support ADMA2). So set the BROKEN_ADMA
+ * uirk on MX25/35 platforms.
+ */
+
+ if (val & SDHCI_CAN_DO_ADMA1) {
+ val &= ~SDHCI_CAN_DO_ADMA1;
+ val |= SDHCI_CAN_DO_ADMA2;
+ }
+ }
+
+ if (unlikely(reg == SDHCI_CAPABILITIES_1)) {
+ if (esdhc_is_usdhc(imx_data)) {
+ if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1)
+ val = readl(host->ioaddr + SDHCI_CAPABILITIES) & 0xFFFF;
+ else
+ /* imx6q/dl does not have cap_1 register, fake one */
+ val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104
+ | SDHCI_SUPPORT_SDR50
+ | SDHCI_USE_SDR50_TUNING;
+ }
+ }
+
+ if (unlikely(reg == SDHCI_MAX_CURRENT) && esdhc_is_usdhc(imx_data)) {
+ val = 0;
+ val |= 0xFF << SDHCI_MAX_CURRENT_330_SHIFT;
+ val |= 0xFF << SDHCI_MAX_CURRENT_300_SHIFT;
+ val |= 0xFF << SDHCI_MAX_CURRENT_180_SHIFT;
+ }
+
+ if (unlikely(reg == SDHCI_INT_STATUS)) {
+ if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) {
+ val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR;
+ val |= SDHCI_INT_ADMA_ERROR;
+ }
+
+ /*
+ * mask off the interrupt we get in response to the manually
+ * sent CMD12
+ */
+ if ((imx_data->multiblock_status == WAIT_FOR_INT) &&
+ ((val & SDHCI_INT_RESPONSE) == SDHCI_INT_RESPONSE)) {
+ val &= ~SDHCI_INT_RESPONSE;
+ writel(SDHCI_INT_RESPONSE, host->ioaddr +
+ SDHCI_INT_STATUS);
+ imx_data->multiblock_status = NO_CMD_PENDING;
+ }
+ }
+
+ return val;
+}
+
+static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ u32 data;
+
+ if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
+ if (val & SDHCI_INT_CARD_INT) {
+ /*
+ * Clear and then set D3CD bit to avoid missing the
+ * card interrupt. This is a eSDHC controller problem
+ * so we need to apply the following workaround: clear
+ * and set D3CD bit will make eSDHC re-sample the card
+ * interrupt. In case a card interrupt was lost,
+ * re-sample it by the following steps.
+ */
+ data = readl(host->ioaddr + SDHCI_HOST_CONTROL);
+ data &= ~ESDHC_CTRL_D3CD;
+ writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
+ data |= ESDHC_CTRL_D3CD;
+ writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
+ }
+ }
+
+ if (unlikely((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
+ && (reg == SDHCI_INT_STATUS)
+ && (val & SDHCI_INT_DATA_END))) {
+ u32 v;
+ v = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK;
+ writel(v, host->ioaddr + ESDHC_VENDOR_SPEC);
+
+ if (imx_data->multiblock_status == MULTIBLK_IN_PROCESS)
+ {
+ /* send a manual CMD12 with RESPTYP=none */
+ data = MMC_STOP_TRANSMISSION << 24 |
+ SDHCI_CMD_ABORTCMD << 16;
+ writel(data, host->ioaddr + SDHCI_TRANSFER_MODE);
+ imx_data->multiblock_status = WAIT_FOR_INT;
+ }
+ }
+
+ if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
+ if (val & SDHCI_INT_ADMA_ERROR) {
+ val &= ~SDHCI_INT_ADMA_ERROR;
+ val |= ESDHC_INT_VENDOR_SPEC_DMA_ERR;
+ }
+ }
+
+ writel(val, host->ioaddr + reg);
+}
+
+static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ u16 ret = 0;
+ u32 val;
+
+ if (unlikely(reg == SDHCI_HOST_VERSION)) {
+ reg ^= 2;
+ if (esdhc_is_usdhc(imx_data)) {
+ /*
+ * The usdhc register returns a wrong host version.
+ * Correct it here.
+ */
+ return SDHCI_SPEC_300;
+ }
+ }
+
+ if (unlikely(reg == SDHCI_HOST_CONTROL2)) {
+ val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ if (val & ESDHC_VENDOR_SPEC_VSELECT)
+ ret |= SDHCI_CTRL_VDD_180;
+
+ if (esdhc_is_usdhc(imx_data)) {
+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
+ val = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
+ /* the std tuning bits is in ACMD12_ERR for imx6sl */
+ val = readl(host->ioaddr + SDHCI_ACMD12_ERR);
+ }
+
+ if (val & ESDHC_MIX_CTRL_EXE_TUNE)
+ ret |= SDHCI_CTRL_EXEC_TUNING;
+ if (val & ESDHC_MIX_CTRL_SMPCLK_SEL)
+ ret |= SDHCI_CTRL_TUNED_CLK;
+
+ ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
+
+ return ret;
+ }
+
+ if (unlikely(reg == SDHCI_TRANSFER_MODE)) {
+ if (esdhc_is_usdhc(imx_data)) {
+ u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ ret = m & ESDHC_MIX_CTRL_SDHCI_MASK;
+ /* Swap AC23 bit */
+ if (m & ESDHC_MIX_CTRL_AC23EN) {
+ ret &= ~ESDHC_MIX_CTRL_AC23EN;
+ ret |= SDHCI_TRNS_AUTO_CMD23;
+ }
+ } else {
+ ret = readw(host->ioaddr + SDHCI_TRANSFER_MODE);
+ }
+
+ return ret;
+ }
+
+ return readw(host->ioaddr + reg);
+}
+
+static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ u32 new_val = 0;
+
+ switch (reg) {
+ case SDHCI_CLOCK_CONTROL:
+ new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ if (val & SDHCI_CLOCK_CARD_EN)
+ new_val |= ESDHC_VENDOR_SPEC_FRC_SDCLK_ON;
+ else
+ new_val &= ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON;
+ writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
+ return;
+ case SDHCI_HOST_CONTROL2:
+ new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ if (val & SDHCI_CTRL_VDD_180)
+ new_val |= ESDHC_VENDOR_SPEC_VSELECT;
+ else
+ new_val &= ~ESDHC_VENDOR_SPEC_VSELECT;
+ writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
+ new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ if (val & SDHCI_CTRL_TUNED_CLK)
+ new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL;
+ else
+ new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
+ writel(new_val , host->ioaddr + ESDHC_MIX_CTRL);
+ } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
+ u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
+ u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ if (val & SDHCI_CTRL_TUNED_CLK) {
+ v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
+ } else {
+ v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
+ m &= ~ESDHC_MIX_CTRL_FBCLK_SEL;
+ }
+
+ if (val & SDHCI_CTRL_EXEC_TUNING) {
+ v |= ESDHC_MIX_CTRL_EXE_TUNE;
+ m |= ESDHC_MIX_CTRL_FBCLK_SEL;
+ } else {
+ v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
+ }
+
+ writel(v, host->ioaddr + SDHCI_ACMD12_ERR);
+ writel(m, host->ioaddr + ESDHC_MIX_CTRL);
+ }
+ return;
+ case SDHCI_TRANSFER_MODE:
+ if ((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
+ && (host->cmd->opcode == SD_IO_RW_EXTENDED)
+ && (host->cmd->data->blocks > 1)
+ && (host->cmd->data->flags & MMC_DATA_READ)) {
+ u32 v;
+ v = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ v |= ESDHC_VENDOR_SPEC_SDIO_QUIRK;
+ writel(v, host->ioaddr + ESDHC_VENDOR_SPEC);
+ }
+
+ if (esdhc_is_usdhc(imx_data)) {
+ u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ /* Swap AC23 bit */
+ if (val & SDHCI_TRNS_AUTO_CMD23) {
+ val &= ~SDHCI_TRNS_AUTO_CMD23;
+ val |= ESDHC_MIX_CTRL_AC23EN;
+ }
+ m = val | (m & ~ESDHC_MIX_CTRL_SDHCI_MASK);
+ writel(m, host->ioaddr + ESDHC_MIX_CTRL);
+ } else {
+ /*
+ * Postpone this write, we must do it together with a
+ * command write that is down below.
+ */
+ imx_data->scratchpad = val;
+ }
+ return;
+ case SDHCI_COMMAND:
+ if (host->cmd->opcode == MMC_STOP_TRANSMISSION)
+ val |= SDHCI_CMD_ABORTCMD;
+
+ if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
+ (imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
+ imx_data->multiblock_status = MULTIBLK_IN_PROCESS;
+
+ if (esdhc_is_usdhc(imx_data))
+ writel(val << 16,
+ host->ioaddr + SDHCI_TRANSFER_MODE);
+ else
+ writel(val << 16 | imx_data->scratchpad,
+ host->ioaddr + SDHCI_TRANSFER_MODE);
+ return;
+ case SDHCI_BLOCK_SIZE:
+ val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
+ break;
+ }
+ esdhc_clrset_le(host, 0xffff, val, reg);
+}
+
+static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ u32 new_val;
+ u32 mask;
+
+ switch (reg) {
+ case SDHCI_POWER_CONTROL:
+ /*
+ * FSL put some DMA bits here
+ * If your board has a regulator, code should be here
+ */
+ return;
+ case SDHCI_HOST_CONTROL:
+ /* FSL messed up here, so we need to manually compose it. */
+ new_val = val & SDHCI_CTRL_LED;
+ /* ensure the endianness */
+ new_val |= ESDHC_HOST_CONTROL_LE;
+ /* bits 8&9 are reserved on mx25 */
+ if (!is_imx25_esdhc(imx_data)) {
+ /* DMA mode bits are shifted */
+ new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5;
+ }
+
+ /*
+ * Do not touch buswidth bits here. This is done in
+ * esdhc_pltfm_bus_width.
+ * Do not touch the D3CD bit either which is used for the
+ * SDIO interrupt errata workaround.
+ */
+ mask = 0xffff & ~(ESDHC_CTRL_BUSWIDTH_MASK | ESDHC_CTRL_D3CD);
+
+ esdhc_clrset_le(host, mask, new_val, reg);
+ return;
+ }
+ esdhc_clrset_le(host, 0xff, val, reg);
+
+ /*
+ * The esdhc has a design violation to SDHC spec which tells
+ * that software reset should not affect card detection circuit.
+ * But esdhc clears its SYSCTL register bits [0..2] during the
+ * software reset. This will stop those clocks that card detection
+ * circuit relies on. To work around it, we turn the clocks on back
+ * to keep card detection circuit functional.
+ */
+ if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1)) {
+ esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL);
+ /*
+ * The reset on usdhc fails to clear MIX_CTRL register.
+ * Do it manually here.
+ */
+ if (esdhc_is_usdhc(imx_data)) {
+ /* the tuning bits should be kept during reset */
+ new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ writel(new_val & ESDHC_MIX_CTRL_TUNING_MASK,
+ host->ioaddr + ESDHC_MIX_CTRL);
+ imx_data->is_ddr = 0;
+ }
+ }
+}
+
+static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+
+ if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock))
+ return boarddata->f_max;
+ else
+ return pltfm_host->clock;
+}
+
+static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return pltfm_host->clock / 256 / 16;
+}
+
+static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
+ unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ unsigned int host_clock = pltfm_host->clock;
+ int pre_div = 2;
+ int div = 1;
+ u32 temp, val;
+
+ if (clock == 0) {
+ host->mmc->actual_clock = 0;
+
+ if (esdhc_is_usdhc(imx_data)) {
+ val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
+ host->ioaddr + ESDHC_VENDOR_SPEC);
+ }
+ return;
+ }
+
+ if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr)
+ pre_div = 1;
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | ESDHC_CLOCK_MASK);
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
+ while (host_clock / pre_div / 16 > clock && pre_div < 256)
+ pre_div *= 2;
+
+ while (host_clock / pre_div / div > clock && div < 16)
+ div++;
+
+ host->mmc->actual_clock = host_clock / pre_div / div;
+ dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
+ clock, host->mmc->actual_clock);
+
+ if (imx_data->is_ddr)
+ pre_div >>= 2;
+ else
+ pre_div >>= 1;
+ div--;
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | (div << ESDHC_DIVIDER_SHIFT)
+ | (pre_div << ESDHC_PREDIV_SHIFT));
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
+ if (esdhc_is_usdhc(imx_data)) {
+ val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
+ writel(val | ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
+ host->ioaddr + ESDHC_VENDOR_SPEC);
+ }
+
+ mdelay(1);
+}
+
+static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+
+ switch (boarddata->wp_type) {
+ case ESDHC_WP_GPIO:
+ return mmc_gpio_get_ro(host->mmc);
+ case ESDHC_WP_CONTROLLER:
+ return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
+ SDHCI_WRITE_PROTECT);
+ case ESDHC_WP_NONE:
+ break;
+ }
+
+ return -ENOSYS;
+}
+
+static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
+{
+ u32 ctrl;
+
+ switch (width) {
+ case MMC_BUS_WIDTH_8:
+ ctrl = ESDHC_CTRL_8BITBUS;
+ break;
+ case MMC_BUS_WIDTH_4:
+ ctrl = ESDHC_CTRL_4BITBUS;
+ break;
+ default:
+ ctrl = 0;
+ break;
+ }
+
+ esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl,
+ SDHCI_HOST_CONTROL);
+}
+
+static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
+{
+ u32 reg;
+
+ /* FIXME: delay a bit for card to be ready for next tuning due to errors */
+ mdelay(1);
+
+ reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL |
+ ESDHC_MIX_CTRL_FBCLK_SEL;
+ writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
+ writel(val << 8, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
+ dev_dbg(mmc_dev(host->mmc),
+ "tunning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n",
+ val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS));
+}
+
+static void esdhc_post_tuning(struct sdhci_host *host)
+{
+ u32 reg;
+
+ reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ reg &= ~ESDHC_MIX_CTRL_EXE_TUNE;
+ writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
+}
+
+static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
+{
+ int min, max, avg, ret;
+
+ /* find the mininum delay first which can pass tuning */
+ min = ESDHC_TUNE_CTRL_MIN;
+ while (min < ESDHC_TUNE_CTRL_MAX) {
+ esdhc_prepare_tuning(host, min);
+ if (!mmc_send_tuning(host->mmc))
+ break;
+ min += ESDHC_TUNE_CTRL_STEP;
+ }
+
+ /* find the maxinum delay which can not pass tuning */
+ max = min + ESDHC_TUNE_CTRL_STEP;
+ while (max < ESDHC_TUNE_CTRL_MAX) {
+ esdhc_prepare_tuning(host, max);
+ if (mmc_send_tuning(host->mmc)) {
+ max -= ESDHC_TUNE_CTRL_STEP;
+ break;
+ }
+ max += ESDHC_TUNE_CTRL_STEP;
+ }
+
+ /* use average delay to get the best timing */
+ avg = (min + max) / 2;
+ esdhc_prepare_tuning(host, avg);
+ ret = mmc_send_tuning(host->mmc);
+ esdhc_post_tuning(host);
+
+ dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
+ ret ? "failed" : "passed", avg, ret);
+
+ return ret;
+}
+
+static int esdhc_change_pinstate(struct sdhci_host *host,
+ unsigned int uhs)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pinctrl_state *pinctrl;
+
+ dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs);
+
+ if (IS_ERR(imx_data->pinctrl) ||
+ IS_ERR(imx_data->pins_default) ||
+ IS_ERR(imx_data->pins_100mhz) ||
+ IS_ERR(imx_data->pins_200mhz))
+ return -EINVAL;
+
+ switch (uhs) {
+ case MMC_TIMING_UHS_SDR50:
+ pinctrl = imx_data->pins_100mhz;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ pinctrl = imx_data->pins_200mhz;
+ break;
+ default:
+ /* back to default state for other legacy timing */
+ pinctrl = imx_data->pins_default;
+ }
+
+ return pinctrl_select_state(imx_data->pinctrl, pinctrl);
+}
+
+static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+
+ switch (timing) {
+ case MMC_TIMING_UHS_SDR12:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ writel(readl(host->ioaddr + ESDHC_MIX_CTRL) |
+ ESDHC_MIX_CTRL_DDREN,
+ host->ioaddr + ESDHC_MIX_CTRL);
+ imx_data->is_ddr = 1;
+ if (boarddata->delay_line) {
+ u32 v;
+ v = boarddata->delay_line <<
+ ESDHC_DLL_OVERRIDE_VAL_SHIFT |
+ (1 << ESDHC_DLL_OVERRIDE_EN_SHIFT);
+ if (is_imx53_esdhc(imx_data))
+ v <<= 1;
+ writel(v, host->ioaddr + ESDHC_DLL_CTRL);
+ }
+ break;
+ }
+
+ esdhc_change_pinstate(host, timing);
+}
+
+static void esdhc_reset(struct sdhci_host *host, u8 mask)
+{
+ sdhci_reset(host, mask);
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+}
+
+static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+
+ return esdhc_is_usdhc(imx_data) ? 1 << 28 : 1 << 27;
+}
+
+static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+
+ /* use maximum timeout counter */
+ sdhci_writeb(host, esdhc_is_usdhc(imx_data) ? 0xF : 0xE,
+ SDHCI_TIMEOUT_CONTROL);
+}
+
+static struct sdhci_ops sdhci_esdhc_ops = {
+ .read_l = esdhc_readl_le,
+ .read_w = esdhc_readw_le,
+ .write_l = esdhc_writel_le,
+ .write_w = esdhc_writew_le,
+ .write_b = esdhc_writeb_le,
+ .set_clock = esdhc_pltfm_set_clock,
+ .get_max_clock = esdhc_pltfm_get_max_clock,
+ .get_min_clock = esdhc_pltfm_get_min_clock,
+ .get_max_timeout_count = esdhc_get_max_timeout_count,
+ .get_ro = esdhc_pltfm_get_ro,
+ .set_timeout = esdhc_set_timeout,
+ .set_bus_width = esdhc_pltfm_set_bus_width,
+ .set_uhs_signaling = esdhc_set_uhs_signaling,
+ .reset = esdhc_reset,
+};
+
+static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_HISPD_BIT
+ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
+ | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
+ | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
+ .ops = &sdhci_esdhc_ops,
+};
+
+#ifdef CONFIG_OF
+static int
+sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
+ struct sdhci_host *host,
+ struct esdhc_platform_data *boarddata)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ if (of_get_property(np, "non-removable", NULL))
+ boarddata->cd_type = ESDHC_CD_PERMANENT;
+
+ if (of_get_property(np, "fsl,cd-controller", NULL))
+ boarddata->cd_type = ESDHC_CD_CONTROLLER;
+
+ if (of_get_property(np, "fsl,wp-controller", NULL))
+ boarddata->wp_type = ESDHC_WP_CONTROLLER;
+
+ boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
+ if (gpio_is_valid(boarddata->cd_gpio))
+ boarddata->cd_type = ESDHC_CD_GPIO;
+
+ boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
+ if (gpio_is_valid(boarddata->wp_gpio))
+ boarddata->wp_type = ESDHC_WP_GPIO;
+
+ of_property_read_u32(np, "bus-width", &boarddata->max_bus_width);
+
+ of_property_read_u32(np, "max-frequency", &boarddata->f_max);
+
+ if (of_find_property(np, "no-1-8-v", NULL))
+ boarddata->support_vsel = false;
+ else
+ boarddata->support_vsel = true;
+
+ if (of_property_read_u32(np, "fsl,delay-line", &boarddata->delay_line))
+ boarddata->delay_line = 0;
+
+ mmc_of_parse_voltage(np, &host->ocr_mask);
+
+ return 0;
+}
+#else
+static inline int
+sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
+ struct sdhci_host *host,
+ struct esdhc_platform_data *boarddata)
+{
+ return -ENODEV;
+}
+#endif
+
+static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(imx_esdhc_dt_ids, &pdev->dev);
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_host *host;
+ struct esdhc_platform_data *boarddata;
+ int err;
+ struct pltfm_imx_data *imx_data;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+
+ imx_data = devm_kzalloc(&pdev->dev, sizeof(*imx_data), GFP_KERNEL);
+ if (!imx_data) {
+ err = -ENOMEM;
+ goto free_sdhci;
+ }
+
+ imx_data->socdata = of_id ? of_id->data : (struct esdhc_soc_data *)
+ pdev->id_entry->driver_data;
+ pltfm_host->priv = imx_data;
+
+ imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(imx_data->clk_ipg)) {
+ err = PTR_ERR(imx_data->clk_ipg);
+ goto free_sdhci;
+ }
+
+ imx_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(imx_data->clk_ahb)) {
+ err = PTR_ERR(imx_data->clk_ahb);
+ goto free_sdhci;
+ }
+
+ imx_data->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(imx_data->clk_per)) {
+ err = PTR_ERR(imx_data->clk_per);
+ goto free_sdhci;
+ }
+
+ pltfm_host->clk = imx_data->clk_per;
+ pltfm_host->clock = clk_get_rate(pltfm_host->clk);
+ clk_prepare_enable(imx_data->clk_per);
+ clk_prepare_enable(imx_data->clk_ipg);
+ clk_prepare_enable(imx_data->clk_ahb);
+
+ imx_data->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(imx_data->pinctrl)) {
+ err = PTR_ERR(imx_data->pinctrl);
+ goto disable_clk;
+ }
+
+ imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(imx_data->pins_default))
+ dev_warn(mmc_dev(host->mmc), "could not get default state\n");
+
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
+ if (imx_data->socdata->flags & ESDHC_FLAG_ENGCM07207)
+ /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
+ host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK
+ | SDHCI_QUIRK_BROKEN_ADMA;
+
+ /*
+ * The imx6q ROM code will change the default watermark level setting
+ * to something insane. Change it back here.
+ */
+ if (esdhc_is_usdhc(imx_data)) {
+ writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL);
+ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+ host->mmc->caps |= MMC_CAP_1_8V_DDR;
+ }
+
+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
+ sdhci_esdhc_ops.platform_execute_tuning =
+ esdhc_executing_tuning;
+
+ if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
+ writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
+ ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP,
+ host->ioaddr + ESDHC_TUNING_CTRL);
+
+ boarddata = &imx_data->boarddata;
+ if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) {
+ if (!host->mmc->parent->platform_data) {
+ dev_err(mmc_dev(host->mmc), "no board data!\n");
+ err = -EINVAL;
+ goto disable_clk;
+ }
+ imx_data->boarddata = *((struct esdhc_platform_data *)
+ host->mmc->parent->platform_data);
+ }
+
+ /* card_detect */
+ if (boarddata->cd_type == ESDHC_CD_CONTROLLER)
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
+ switch (boarddata->max_bus_width) {
+ case 8:
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
+ break;
+ case 4:
+ host->mmc->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 1:
+ default:
+ host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
+ break;
+ }
+
+ /* sdr50 and sdr104 needs work on 1.8v signal voltage */
+ if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
+ !IS_ERR(imx_data->pins_default)) {
+ imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ ESDHC_PINCTRL_STATE_100MHZ);
+ imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ ESDHC_PINCTRL_STATE_200MHZ);
+ if (IS_ERR(imx_data->pins_100mhz) ||
+ IS_ERR(imx_data->pins_200mhz)) {
+ dev_warn(mmc_dev(host->mmc),
+ "could not get ultra high speed state, work on normal mode\n");
+ /* fall back to not support uhs by specify no 1.8v quirk */
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+ } else {
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+
+ /* call to generic mmc_of_parse to support additional capabilities */
+ err = mmc_of_parse(host->mmc);
+ if (err)
+ goto disable_clk;
+
+ err = sdhci_add_host(host);
+ if (err)
+ goto disable_clk;
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_suspend_ignore_children(&pdev->dev, 1);
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
+ clk_disable_unprepare(imx_data->clk_ahb);
+free_sdhci:
+ sdhci_pltfm_free(pdev);
+ return err;
+}
+
+static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ sdhci_remove_host(host, dead);
+
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
+ clk_disable_unprepare(imx_data->clk_ahb);
+
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sdhci_esdhc_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ int ret;
+
+ ret = sdhci_runtime_suspend_host(host);
+
+ if (!sdhci_sdio_irq_enabled(host)) {
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
+ }
+ clk_disable_unprepare(imx_data->clk_ahb);
+
+ return ret;
+}
+
+static int sdhci_esdhc_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+
+ if (!sdhci_sdio_irq_enabled(host)) {
+ clk_prepare_enable(imx_data->clk_per);
+ clk_prepare_enable(imx_data->clk_ipg);
+ }
+ clk_prepare_enable(imx_data->clk_ahb);
+
+ return sdhci_runtime_resume_host(host);
+}
+#endif
+
+static const struct dev_pm_ops sdhci_esdhc_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_pltfm_resume)
+ SET_RUNTIME_PM_OPS(sdhci_esdhc_runtime_suspend,
+ sdhci_esdhc_runtime_resume, NULL)
+};
+
+static struct platform_driver sdhci_esdhc_imx_driver = {
+ .driver = {
+ .name = "sdhci-esdhc-imx",
+ .of_match_table = imx_esdhc_dt_ids,
+ .pm = &sdhci_esdhc_pmops,
+ },
+ .id_table = imx_esdhc_devtype,
+ .probe = sdhci_esdhc_imx_probe,
+ .remove = sdhci_esdhc_imx_remove,
+};
+
+module_platform_driver(sdhci_esdhc_imx_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC");
+MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-esdhc.h b/kernel/drivers/mmc/host/sdhci-esdhc.h
new file mode 100644
index 000000000..3497cfaf6
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-esdhc.h
@@ -0,0 +1,50 @@
+/*
+ * Freescale eSDHC controller driver generics for OF and pltfm.
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Copyright (c) 2010 Pengutronix e.K.
+ * Author: Wolfram Sang <w.sang@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+
+#ifndef _DRIVERS_MMC_SDHCI_ESDHC_H
+#define _DRIVERS_MMC_SDHCI_ESDHC_H
+
+/*
+ * Ops and quirks for the Freescale eSDHC controller.
+ */
+
+#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
+ SDHCI_QUIRK_NO_BUSY_IRQ | \
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
+ SDHCI_QUIRK_PIO_NEEDS_DELAY)
+
+#define ESDHC_SYSTEM_CONTROL 0x2c
+#define ESDHC_CLOCK_MASK 0x0000fff0
+#define ESDHC_PREDIV_SHIFT 8
+#define ESDHC_DIVIDER_SHIFT 4
+#define ESDHC_CLOCK_PEREN 0x00000004
+#define ESDHC_CLOCK_HCKEN 0x00000002
+#define ESDHC_CLOCK_IPGEN 0x00000001
+
+/* pltfm-specific */
+#define ESDHC_HOST_CONTROL_LE 0x20
+
+/*
+ * P2020 interpretation of the SDHCI_HOST_CONTROL register
+ */
+#define ESDHC_CTRL_4BITBUS (0x1 << 1)
+#define ESDHC_CTRL_8BITBUS (0x2 << 1)
+#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1)
+
+/* OF-specific */
+#define ESDHC_DMA_SYSCTL 0x40c
+#define ESDHC_DMA_SNOOP 0x00000040
+
+#define ESDHC_HOST_CONTROL_RES 0x05
+
+#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/kernel/drivers/mmc/host/sdhci-iproc.c b/kernel/drivers/mmc/host/sdhci-iproc.c
new file mode 100644
index 000000000..3b423b0ad
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-iproc.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * iProc SDHCI platform driver
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mmc/host.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include "sdhci-pltfm.h"
+
+struct sdhci_iproc_data {
+ const struct sdhci_pltfm_data *pdata;
+ u32 caps;
+ u32 caps1;
+};
+
+struct sdhci_iproc_host {
+ const struct sdhci_iproc_data *data;
+ u32 shadow_cmd;
+ u32 shadow_blk;
+};
+
+#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
+
+static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
+{
+ u32 val = readl(host->ioaddr + reg);
+
+ pr_debug("%s: readl [0x%02x] 0x%08x\n",
+ mmc_hostname(host->mmc), reg, val);
+ return val;
+}
+
+static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
+{
+ u32 val = sdhci_iproc_readl(host, (reg & ~3));
+ u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
+ return word;
+}
+
+static u8 sdhci_iproc_readb(struct sdhci_host *host, int reg)
+{
+ u32 val = sdhci_iproc_readl(host, (reg & ~3));
+ u8 byte = val >> REG_OFFSET_IN_BITS(reg) & 0xff;
+ return byte;
+}
+
+static inline void sdhci_iproc_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ pr_debug("%s: writel [0x%02x] 0x%08x\n",
+ mmc_hostname(host->mmc), reg, val);
+
+ writel(val, host->ioaddr + reg);
+
+ if (host->clock <= 400000) {
+ /* Round up to micro-second four SD clock delay */
+ if (host->clock)
+ udelay((4 * 1000000 + host->clock - 1) / host->clock);
+ else
+ udelay(10);
+ }
+}
+
+/*
+ * The Arasan has a bugette whereby it may lose the content of successive
+ * writes to the same register that are within two SD-card clock cycles of
+ * each other (a clock domain crossing problem). The data
+ * register does not have this problem, which is just as well - otherwise we'd
+ * have to nobble the DMA engine too.
+ *
+ * This wouldn't be a problem with the code except that we can only write the
+ * controller with 32-bit writes. So two different 16-bit registers are
+ * written back to back creates the problem.
+ *
+ * In reality, this only happens when SDHCI_BLOCK_SIZE and SDHCI_BLOCK_COUNT
+ * are written followed by SDHCI_TRANSFER_MODE and SDHCI_COMMAND.
+ * The BLOCK_SIZE and BLOCK_COUNT are meaningless until a command issued so
+ * the work around can be further optimized. We can keep shadow values of
+ * BLOCK_SIZE, BLOCK_COUNT, and TRANSFER_MODE until a COMMAND is issued.
+ * Then, write the BLOCK_SIZE+BLOCK_COUNT in a single 32-bit write followed
+ * by the TRANSFER+COMMAND in another 32-bit write.
+ */
+static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
+ u32 word_shift = REG_OFFSET_IN_BITS(reg);
+ u32 mask = 0xffff << word_shift;
+ u32 oldval, newval;
+
+ if (reg == SDHCI_COMMAND) {
+ /* Write the block now as we are issuing a command */
+ if (iproc_host->shadow_blk != 0) {
+ sdhci_iproc_writel(host, iproc_host->shadow_blk,
+ SDHCI_BLOCK_SIZE);
+ iproc_host->shadow_blk = 0;
+ }
+ oldval = iproc_host->shadow_cmd;
+ } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
+ /* Block size and count are stored in shadow reg */
+ oldval = iproc_host->shadow_blk;
+ } else {
+ /* Read reg, all other registers are not shadowed */
+ oldval = sdhci_iproc_readl(host, (reg & ~3));
+ }
+ newval = (oldval & ~mask) | (val << word_shift);
+
+ if (reg == SDHCI_TRANSFER_MODE) {
+ /* Save the transfer mode until the command is issued */
+ iproc_host->shadow_cmd = newval;
+ } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
+ /* Save the block info until the command is issued */
+ iproc_host->shadow_blk = newval;
+ } else {
+ /* Command or other regular 32-bit write */
+ sdhci_iproc_writel(host, newval, reg & ~3);
+ }
+}
+
+static void sdhci_iproc_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ u32 oldval = sdhci_iproc_readl(host, (reg & ~3));
+ u32 byte_shift = REG_OFFSET_IN_BITS(reg);
+ u32 mask = 0xff << byte_shift;
+ u32 newval = (oldval & ~mask) | (val << byte_shift);
+
+ sdhci_iproc_writel(host, newval, reg & ~3);
+}
+
+static const struct sdhci_ops sdhci_iproc_ops = {
+ .read_l = sdhci_iproc_readl,
+ .read_w = sdhci_iproc_readw,
+ .read_b = sdhci_iproc_readb,
+ .write_l = sdhci_iproc_writel,
+ .write_w = sdhci_iproc_writew,
+ .write_b = sdhci_iproc_writeb,
+ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
+ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+ .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
+ .ops = &sdhci_iproc_ops,
+};
+
+static const struct sdhci_iproc_data iproc_data = {
+ .pdata = &sdhci_iproc_pltfm_data,
+ .caps = 0x05E90000,
+ .caps1 = 0x00000064,
+};
+
+static const struct of_device_id sdhci_iproc_of_match[] = {
+ { .compatible = "brcm,sdhci-iproc-cygnus", .data = &iproc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdhci_iproc_of_match);
+
+static int sdhci_iproc_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct sdhci_iproc_data *iproc_data;
+ struct sdhci_host *host;
+ struct sdhci_iproc_host *iproc_host;
+ struct sdhci_pltfm_host *pltfm_host;
+ int ret;
+
+ match = of_match_device(sdhci_iproc_of_match, &pdev->dev);
+ if (!match)
+ return -EINVAL;
+ iproc_data = match->data;
+
+ host = sdhci_pltfm_init(pdev, iproc_data->pdata, sizeof(*iproc_host));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+ iproc_host = sdhci_pltfm_priv(pltfm_host);
+
+ iproc_host->data = iproc_data;
+
+ mmc_of_parse(host->mmc);
+ sdhci_get_of_property(pdev);
+
+ /* Enable EMMC 1/8V DDR capable */
+ host->mmc->caps |= MMC_CAP_1_8V_DDR;
+
+ pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pltfm_host->clk)) {
+ ret = PTR_ERR(pltfm_host->clk);
+ goto err;
+ }
+
+ if (iproc_host->data->pdata->quirks & SDHCI_QUIRK_MISSING_CAPS) {
+ host->caps = iproc_host->data->caps;
+ host->caps1 = iproc_host->data->caps1;
+ }
+
+ return sdhci_add_host(host);
+
+err:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int sdhci_iproc_remove(struct platform_device *pdev)
+{
+ return sdhci_pltfm_unregister(pdev);
+}
+
+static struct platform_driver sdhci_iproc_driver = {
+ .driver = {
+ .name = "sdhci-iproc",
+ .of_match_table = sdhci_iproc_of_match,
+ .pm = SDHCI_PLTFM_PMOPS,
+ },
+ .probe = sdhci_iproc_probe,
+ .remove = sdhci_iproc_remove,
+};
+module_platform_driver(sdhci_iproc_driver);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("IPROC SDHCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-msm.c b/kernel/drivers/mmc/host/sdhci-msm.c
new file mode 100644
index 000000000..4a09f7608
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-msm.c
@@ -0,0 +1,593 @@
+/*
+ * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+#include <linux/mmc/mmc.h>
+#include <linux/slab.h>
+
+#include "sdhci-pltfm.h"
+
+#define CORE_MCI_VERSION 0x50
+#define CORE_VERSION_MAJOR_SHIFT 28
+#define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
+#define CORE_VERSION_MINOR_MASK 0xff
+
+#define CORE_HC_MODE 0x78
+#define HC_MODE_EN 0x1
+#define CORE_POWER 0x0
+#define CORE_SW_RST BIT(7)
+
+#define MAX_PHASES 16
+#define CORE_DLL_LOCK BIT(7)
+#define CORE_DLL_EN BIT(16)
+#define CORE_CDR_EN BIT(17)
+#define CORE_CK_OUT_EN BIT(18)
+#define CORE_CDR_EXT_EN BIT(19)
+#define CORE_DLL_PDN BIT(29)
+#define CORE_DLL_RST BIT(30)
+#define CORE_DLL_CONFIG 0x100
+#define CORE_DLL_STATUS 0x108
+
+#define CORE_VENDOR_SPEC 0x10c
+#define CORE_CLK_PWRSAVE BIT(1)
+
+#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11c
+
+#define CDR_SELEXT_SHIFT 20
+#define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
+#define CMUX_SHIFT_PHASE_SHIFT 24
+#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
+
+struct sdhci_msm_host {
+ struct platform_device *pdev;
+ void __iomem *core_mem; /* MSM SDCC mapped address */
+ struct clk *clk; /* main SD/MMC bus clock */
+ struct clk *pclk; /* SDHC peripheral bus clock */
+ struct clk *bus_clk; /* SDHC bus voter clock */
+ struct mmc_host *mmc;
+ struct sdhci_pltfm_data sdhci_msm_pdata;
+};
+
+/* Platform specific tuning */
+static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
+{
+ u32 wait_cnt = 50;
+ u8 ck_out_en;
+ struct mmc_host *mmc = host->mmc;
+
+ /* Poll for CK_OUT_EN bit. max. poll time = 50us */
+ ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
+ CORE_CK_OUT_EN);
+
+ while (ck_out_en != poll) {
+ if (--wait_cnt == 0) {
+ dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
+ mmc_hostname(mmc), poll);
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+
+ ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
+ CORE_CK_OUT_EN);
+ }
+
+ return 0;
+}
+
+static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
+{
+ int rc;
+ static const u8 grey_coded_phase_table[] = {
+ 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
+ 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
+ };
+ unsigned long flags;
+ u32 config;
+ struct mmc_host *mmc = host->mmc;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
+ config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
+ rc = msm_dll_poll_ck_out_en(host, 0);
+ if (rc)
+ goto err_out;
+
+ /*
+ * Write the selected DLL clock output phase (0 ... 15)
+ * to CDR_SELEXT bit field of DLL_CONFIG register.
+ */
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~CDR_SELEXT_MASK;
+ config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
+ rc = msm_dll_poll_ck_out_en(host, 1);
+ if (rc)
+ goto err_out;
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config |= CORE_CDR_EN;
+ config &= ~CORE_CDR_EXT_EN;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ goto out;
+
+err_out:
+ dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
+ mmc_hostname(mmc), phase);
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+ return rc;
+}
+
+/*
+ * Find out the greatest range of consecuitive selected
+ * DLL clock output phases that can be used as sampling
+ * setting for SD3.0 UHS-I card read operation (in SDR104
+ * timing mode) or for eMMC4.5 card read operation (in HS200
+ * timing mode).
+ * Select the 3/4 of the range and configure the DLL with the
+ * selected DLL clock output phase.
+ */
+
+static int msm_find_most_appropriate_phase(struct sdhci_host *host,
+ u8 *phase_table, u8 total_phases)
+{
+ int ret;
+ u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
+ u8 phases_per_row[MAX_PHASES] = { 0 };
+ int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
+ int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
+ bool phase_0_found = false, phase_15_found = false;
+ struct mmc_host *mmc = host->mmc;
+
+ if (!total_phases || (total_phases > MAX_PHASES)) {
+ dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
+ mmc_hostname(mmc), total_phases);
+ return -EINVAL;
+ }
+
+ for (cnt = 0; cnt < total_phases; cnt++) {
+ ranges[row_index][col_index] = phase_table[cnt];
+ phases_per_row[row_index] += 1;
+ col_index++;
+
+ if ((cnt + 1) == total_phases) {
+ continue;
+ /* check if next phase in phase_table is consecutive or not */
+ } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
+ row_index++;
+ col_index = 0;
+ }
+ }
+
+ if (row_index >= MAX_PHASES)
+ return -EINVAL;
+
+ /* Check if phase-0 is present in first valid window? */
+ if (!ranges[0][0]) {
+ phase_0_found = true;
+ phase_0_raw_index = 0;
+ /* Check if cycle exist between 2 valid windows */
+ for (cnt = 1; cnt <= row_index; cnt++) {
+ if (phases_per_row[cnt]) {
+ for (i = 0; i < phases_per_row[cnt]; i++) {
+ if (ranges[cnt][i] == 15) {
+ phase_15_found = true;
+ phase_15_raw_index = cnt;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ /* If 2 valid windows form cycle then merge them as single window */
+ if (phase_0_found && phase_15_found) {
+ /* number of phases in raw where phase 0 is present */
+ u8 phases_0 = phases_per_row[phase_0_raw_index];
+ /* number of phases in raw where phase 15 is present */
+ u8 phases_15 = phases_per_row[phase_15_raw_index];
+
+ if (phases_0 + phases_15 >= MAX_PHASES)
+ /*
+ * If there are more than 1 phase windows then total
+ * number of phases in both the windows should not be
+ * more than or equal to MAX_PHASES.
+ */
+ return -EINVAL;
+
+ /* Merge 2 cyclic windows */
+ i = phases_15;
+ for (cnt = 0; cnt < phases_0; cnt++) {
+ ranges[phase_15_raw_index][i] =
+ ranges[phase_0_raw_index][cnt];
+ if (++i >= MAX_PHASES)
+ break;
+ }
+
+ phases_per_row[phase_0_raw_index] = 0;
+ phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
+ }
+
+ for (cnt = 0; cnt <= row_index; cnt++) {
+ if (phases_per_row[cnt] > curr_max) {
+ curr_max = phases_per_row[cnt];
+ selected_row_index = cnt;
+ }
+ }
+
+ i = (curr_max * 3) / 4;
+ if (i)
+ i--;
+
+ ret = ranges[selected_row_index][i];
+
+ if (ret >= MAX_PHASES) {
+ ret = -EINVAL;
+ dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
+ mmc_hostname(mmc), ret);
+ }
+
+ return ret;
+}
+
+static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
+{
+ u32 mclk_freq = 0, config;
+
+ /* Program the MCLK value to MCLK_FREQ bit field */
+ if (host->clock <= 112000000)
+ mclk_freq = 0;
+ else if (host->clock <= 125000000)
+ mclk_freq = 1;
+ else if (host->clock <= 137000000)
+ mclk_freq = 2;
+ else if (host->clock <= 150000000)
+ mclk_freq = 3;
+ else if (host->clock <= 162000000)
+ mclk_freq = 4;
+ else if (host->clock <= 175000000)
+ mclk_freq = 5;
+ else if (host->clock <= 187000000)
+ mclk_freq = 6;
+ else if (host->clock <= 200000000)
+ mclk_freq = 7;
+
+ config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config &= ~CMUX_SHIFT_PHASE_MASK;
+ config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
+ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+}
+
+/* Initialize the DLL (Programmable Delay Line) */
+static int msm_init_cm_dll(struct sdhci_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ int wait_cnt = 50;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /*
+ * Make sure that clock is always enabled when DLL
+ * tuning is in progress. Keeping PWRSAVE ON may
+ * turn off the clock.
+ */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
+ & ~CORE_CLK_PWRSAVE), host->ioaddr + CORE_VENDOR_SPEC);
+
+ /* Write 1 to DLL_RST bit of DLL_CONFIG register */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+ msm_cm_dll_set_freq(host);
+
+ /* Write 0 to DLL_RST bit of DLL_CONFIG register */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Write 0 to DLL_PDN bit of DLL_CONFIG register */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Set DLL_EN bit to 1. */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Set CK_OUT_EN bit to 1. */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+
+ /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
+ while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
+ CORE_DLL_LOCK)) {
+ /* max. wait for 50us sec for LOCK bit to be set */
+ if (--wait_cnt == 0) {
+ dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
+ mmc_hostname(mmc));
+ spin_unlock_irqrestore(&host->lock, flags);
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ return 0;
+}
+
+static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+ int tuning_seq_cnt = 3;
+ u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
+ int rc;
+ struct mmc_host *mmc = host->mmc;
+ struct mmc_ios ios = host->mmc->ios;
+
+ /*
+ * Tuning is required for SDR104, HS200 and HS400 cards and
+ * if clock frequency is greater than 100MHz in these modes.
+ */
+ if (host->clock <= 100 * 1000 * 1000 ||
+ !((ios.timing == MMC_TIMING_MMC_HS200) ||
+ (ios.timing == MMC_TIMING_UHS_SDR104)))
+ return 0;
+
+retry:
+ /* First of all reset the tuning block */
+ rc = msm_init_cm_dll(host);
+ if (rc)
+ return rc;
+
+ phase = 0;
+ do {
+ /* Set the phase in delay line hw block */
+ rc = msm_config_cm_dll_phase(host, phase);
+ if (rc)
+ return rc;
+
+ rc = mmc_send_tuning(mmc);
+ if (!rc) {
+ /* Tuning is successful at this tuning point */
+ tuned_phases[tuned_phase_cnt++] = phase;
+ dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
+ mmc_hostname(mmc), phase);
+ }
+ } while (++phase < ARRAY_SIZE(tuned_phases));
+
+ if (tuned_phase_cnt) {
+ rc = msm_find_most_appropriate_phase(host, tuned_phases,
+ tuned_phase_cnt);
+ if (rc < 0)
+ return rc;
+ else
+ phase = rc;
+
+ /*
+ * Finally set the selected phase in delay
+ * line hw block.
+ */
+ rc = msm_config_cm_dll_phase(host, phase);
+ if (rc)
+ return rc;
+ dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
+ mmc_hostname(mmc), phase);
+ } else {
+ if (--tuning_seq_cnt)
+ goto retry;
+ /* Tuning failed */
+ dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
+ mmc_hostname(mmc));
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static const struct of_device_id sdhci_msm_dt_match[] = {
+ { .compatible = "qcom,sdhci-msm-v4" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
+
+static struct sdhci_ops sdhci_msm_ops = {
+ .platform_execute_tuning = sdhci_msm_execute_tuning,
+ .reset = sdhci_reset,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static int sdhci_msm_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_msm_host *msm_host;
+ struct resource *core_memres;
+ int ret;
+ u16 host_version, core_minor;
+ u32 core_version, caps;
+ u8 core_major;
+
+ msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
+ if (!msm_host)
+ return -ENOMEM;
+
+ msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
+ host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = msm_host;
+ msm_host->mmc = host->mmc;
+ msm_host->pdev = pdev;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto pltfm_free;
+
+ sdhci_get_of_property(pdev);
+
+ /* Setup SDCC bus voter clock. */
+ msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (!IS_ERR(msm_host->bus_clk)) {
+ /* Vote for max. clk rate for max. performance */
+ ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
+ if (ret)
+ goto pltfm_free;
+ ret = clk_prepare_enable(msm_host->bus_clk);
+ if (ret)
+ goto pltfm_free;
+ }
+
+ /* Setup main peripheral bus clock */
+ msm_host->pclk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(msm_host->pclk)) {
+ ret = PTR_ERR(msm_host->pclk);
+ dev_err(&pdev->dev, "Perpheral clk setup failed (%d)\n", ret);
+ goto bus_clk_disable;
+ }
+
+ ret = clk_prepare_enable(msm_host->pclk);
+ if (ret)
+ goto bus_clk_disable;
+
+ /* Setup SDC MMC clock */
+ msm_host->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(msm_host->clk)) {
+ ret = PTR_ERR(msm_host->clk);
+ dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
+ goto pclk_disable;
+ }
+
+ ret = clk_prepare_enable(msm_host->clk);
+ if (ret)
+ goto pclk_disable;
+
+ core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);
+
+ if (IS_ERR(msm_host->core_mem)) {
+ dev_err(&pdev->dev, "Failed to remap registers\n");
+ ret = PTR_ERR(msm_host->core_mem);
+ goto clk_disable;
+ }
+
+ /* Reset the core and Enable SDHC mode */
+ writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) |
+ CORE_SW_RST, msm_host->core_mem + CORE_POWER);
+
+ /* SW reset can take upto 10HCLK + 15MCLK cycles. (min 40us) */
+ usleep_range(1000, 5000);
+ if (readl(msm_host->core_mem + CORE_POWER) & CORE_SW_RST) {
+ dev_err(&pdev->dev, "Stuck in reset\n");
+ ret = -ETIMEDOUT;
+ goto clk_disable;
+ }
+
+ /* Set HC_MODE_EN bit in HC_MODE register */
+ writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
+
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
+
+ host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
+ dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
+ host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
+ SDHCI_VENDOR_VER_SHIFT));
+
+ core_version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
+ core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
+ CORE_VERSION_MAJOR_SHIFT;
+ core_minor = core_version & CORE_VERSION_MINOR_MASK;
+ dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
+ core_version, core_major, core_minor);
+
+ /*
+ * Support for some capabilities is not advertised by newer
+ * controller versions and must be explicitly enabled.
+ */
+ if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
+ caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
+ caps |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
+ writel_relaxed(caps, host->ioaddr +
+ CORE_VENDOR_SPEC_CAPABILITIES0);
+ }
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto clk_disable;
+
+ return 0;
+
+clk_disable:
+ clk_disable_unprepare(msm_host->clk);
+pclk_disable:
+ clk_disable_unprepare(msm_host->pclk);
+bus_clk_disable:
+ if (!IS_ERR(msm_host->bus_clk))
+ clk_disable_unprepare(msm_host->bus_clk);
+pltfm_free:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int sdhci_msm_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
+ 0xffffffff);
+
+ sdhci_remove_host(host, dead);
+ sdhci_pltfm_free(pdev);
+ clk_disable_unprepare(msm_host->clk);
+ clk_disable_unprepare(msm_host->pclk);
+ if (!IS_ERR(msm_host->bus_clk))
+ clk_disable_unprepare(msm_host->bus_clk);
+ return 0;
+}
+
+static struct platform_driver sdhci_msm_driver = {
+ .probe = sdhci_msm_probe,
+ .remove = sdhci_msm_remove,
+ .driver = {
+ .name = "sdhci_msm",
+ .of_match_table = sdhci_msm_dt_match,
+ },
+};
+
+module_platform_driver(sdhci_msm_driver);
+
+MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-of-arasan.c b/kernel/drivers/mmc/host/sdhci-of-arasan.c
new file mode 100644
index 000000000..6287d426c
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-of-arasan.c
@@ -0,0 +1,229 @@
+/*
+ * Arasan Secure Digital Host Controller Interface.
+ * Copyright (C) 2011 - 2012 Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2012 Wind River Systems, Inc.
+ * Copyright (C) 2013 Pengutronix e.K.
+ * Copyright (C) 2013 Xilinx Inc.
+ *
+ * Based on sdhci-of-esdhc.c
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ *
+ * Authors: Xiaobo Xie <X.Xie@freescale.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/module.h>
+#include "sdhci-pltfm.h"
+
+#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
+
+#define CLK_CTRL_TIMEOUT_SHIFT 16
+#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
+#define CLK_CTRL_TIMEOUT_MIN_EXP 13
+
+/**
+ * struct sdhci_arasan_data
+ * @clk_ahb: Pointer to the AHB clock
+ */
+struct sdhci_arasan_data {
+ struct clk *clk_ahb;
+};
+
+static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
+{
+ u32 div;
+ unsigned long freq;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET);
+ div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT;
+
+ freq = clk_get_rate(pltfm_host->clk);
+ freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div);
+
+ return freq;
+}
+
+static struct sdhci_ops sdhci_arasan_ops = {
+ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_timeout_clock = sdhci_arasan_get_timeout_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static struct sdhci_pltfm_data sdhci_arasan_pdata = {
+ .ops = &sdhci_arasan_ops,
+};
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * sdhci_arasan_suspend - Suspend method for the driver
+ * @dev: Address of the device structure
+ * Returns 0 on success and error value on error
+ *
+ * Put the device in a low power state.
+ */
+static int sdhci_arasan_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
+ int ret;
+
+ ret = sdhci_suspend_host(host);
+ if (ret)
+ return ret;
+
+ clk_disable(pltfm_host->clk);
+ clk_disable(sdhci_arasan->clk_ahb);
+
+ return 0;
+}
+
+/**
+ * sdhci_arasan_resume - Resume method for the driver
+ * @dev: Address of the device structure
+ * Returns 0 on success and error value on error
+ *
+ * Resume operation after suspend
+ */
+static int sdhci_arasan_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
+ int ret;
+
+ ret = clk_enable(sdhci_arasan->clk_ahb);
+ if (ret) {
+ dev_err(dev, "Cannot enable AHB clock.\n");
+ return ret;
+ }
+
+ ret = clk_enable(pltfm_host->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable SD clock.\n");
+ clk_disable(sdhci_arasan->clk_ahb);
+ return ret;
+ }
+
+ return sdhci_resume_host(host);
+}
+#endif /* ! CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend,
+ sdhci_arasan_resume);
+
+static int sdhci_arasan_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct clk *clk_xin;
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_arasan_data *sdhci_arasan;
+
+ sdhci_arasan = devm_kzalloc(&pdev->dev, sizeof(*sdhci_arasan),
+ GFP_KERNEL);
+ if (!sdhci_arasan)
+ return -ENOMEM;
+
+ sdhci_arasan->clk_ahb = devm_clk_get(&pdev->dev, "clk_ahb");
+ if (IS_ERR(sdhci_arasan->clk_ahb)) {
+ dev_err(&pdev->dev, "clk_ahb clock not found.\n");
+ return PTR_ERR(sdhci_arasan->clk_ahb);
+ }
+
+ clk_xin = devm_clk_get(&pdev->dev, "clk_xin");
+ if (IS_ERR(clk_xin)) {
+ dev_err(&pdev->dev, "clk_xin clock not found.\n");
+ return PTR_ERR(clk_xin);
+ }
+
+ ret = clk_prepare_enable(sdhci_arasan->clk_ahb);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable AHB clock.\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(clk_xin);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable SD clock.\n");
+ goto clk_dis_ahb;
+ }
+
+ host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata, 0);
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ goto clk_disable_all;
+ }
+
+ sdhci_get_of_property(pdev);
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = sdhci_arasan;
+ pltfm_host->clk = clk_xin;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret) {
+ dev_err(&pdev->dev, "parsing dt failed (%u)\n", ret);
+ goto clk_disable_all;
+ }
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_pltfm_free;
+
+ return 0;
+
+err_pltfm_free:
+ sdhci_pltfm_free(pdev);
+clk_disable_all:
+ clk_disable_unprepare(clk_xin);
+clk_dis_ahb:
+ clk_disable_unprepare(sdhci_arasan->clk_ahb);
+
+ return ret;
+}
+
+static int sdhci_arasan_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
+
+ clk_disable_unprepare(sdhci_arasan->clk_ahb);
+
+ return sdhci_pltfm_unregister(pdev);
+}
+
+static const struct of_device_id sdhci_arasan_of_match[] = {
+ { .compatible = "arasan,sdhci-8.9a" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
+
+static struct platform_driver sdhci_arasan_driver = {
+ .driver = {
+ .name = "sdhci-arasan",
+ .of_match_table = sdhci_arasan_of_match,
+ .pm = &sdhci_arasan_dev_pm_ops,
+ },
+ .probe = sdhci_arasan_probe,
+ .remove = sdhci_arasan_remove,
+};
+
+module_platform_driver(sdhci_arasan_driver);
+
+MODULE_DESCRIPTION("Driver for the Arasan SDHCI Controller");
+MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com>");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/mmc/host/sdhci-of-esdhc.c b/kernel/drivers/mmc/host/sdhci-of-esdhc.c
new file mode 100644
index 000000000..22e9111b1
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-of-esdhc.c
@@ -0,0 +1,412 @@
+/*
+ * Freescale eSDHC controller driver.
+ *
+ * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ *
+ * Authors: Xiaobo Xie <X.Xie@freescale.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mmc/host.h>
+#include "sdhci-pltfm.h"
+#include "sdhci-esdhc.h"
+
+#define VENDOR_V_22 0x12
+#define VENDOR_V_23 0x13
+static u32 esdhc_readl(struct sdhci_host *host, int reg)
+{
+ u32 ret;
+
+ ret = in_be32(host->ioaddr + reg);
+ /*
+ * The bit of ADMA flag in eSDHC is not compatible with standard
+ * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
+ * supported by eSDHC.
+ * And for many FSL eSDHC controller, the reset value of field
+ * SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA,
+ * only these vendor version is greater than 2.2/0x12 support ADMA.
+ * For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the
+ * the verdor version number, oxFE is SDHCI_HOST_VERSION.
+ */
+ if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) {
+ u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
+ tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
+ if (tmp > VENDOR_V_22)
+ ret |= SDHCI_CAN_DO_ADMA2;
+ }
+
+ return ret;
+}
+
+static u16 esdhc_readw(struct sdhci_host *host, int reg)
+{
+ u16 ret;
+ int base = reg & ~0x3;
+ int shift = (reg & 0x2) * 8;
+
+ if (unlikely(reg == SDHCI_HOST_VERSION))
+ ret = in_be32(host->ioaddr + base) & 0xffff;
+ else
+ ret = (in_be32(host->ioaddr + base) >> shift) & 0xffff;
+ return ret;
+}
+
+static u8 esdhc_readb(struct sdhci_host *host, int reg)
+{
+ int base = reg & ~0x3;
+ int shift = (reg & 0x3) * 8;
+ u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff;
+
+ /*
+ * "DMA select" locates at offset 0x28 in SD specification, but on
+ * P5020 or P3041, it locates at 0x29.
+ */
+ if (reg == SDHCI_HOST_CONTROL) {
+ u32 dma_bits;
+
+ dma_bits = in_be32(host->ioaddr + reg);
+ /* DMA select is 22,23 bits in Protocol Control Register */
+ dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK;
+
+ /* fixup the result */
+ ret &= ~SDHCI_CTRL_DMA_MASK;
+ ret |= dma_bits;
+ }
+
+ return ret;
+}
+
+static void esdhc_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ /*
+ * Enable IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
+ * when SYSCTL[RSTD]) is set for some special operations.
+ * No any impact other operation.
+ */
+ if (reg == SDHCI_INT_ENABLE)
+ val |= SDHCI_INT_BLK_GAP;
+ sdhci_be32bs_writel(host, val, reg);
+}
+
+static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ if (reg == SDHCI_BLOCK_SIZE) {
+ /*
+ * Two last DMA bits are reserved, and first one is used for
+ * non-standard blksz of 4096 bytes that we don't support
+ * yet. So clear the DMA boundary bits.
+ */
+ val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
+ }
+ sdhci_be32bs_writew(host, val, reg);
+}
+
+static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ /*
+ * "DMA select" location is offset 0x28 in SD specification, but on
+ * P5020 or P3041, it's located at 0x29.
+ */
+ if (reg == SDHCI_HOST_CONTROL) {
+ u32 dma_bits;
+
+ /*
+ * If host control register is not standard, exit
+ * this function
+ */
+ if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
+ return;
+
+ /* DMA select is 22,23 bits in Protocol Control Register */
+ dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5;
+ clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5,
+ dma_bits);
+ val &= ~SDHCI_CTRL_DMA_MASK;
+ val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK;
+ }
+
+ /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
+ if (reg == SDHCI_HOST_CONTROL)
+ val &= ~ESDHC_HOST_CONTROL_RES;
+ sdhci_be32bs_writeb(host, val, reg);
+}
+
+/*
+ * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
+ * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
+ * and Block Gap Event(IRQSTAT[BGE]) are also set.
+ * For Continue, apply soft reset for data(SYSCTL[RSTD]);
+ * and re-issue the entire read transaction from beginning.
+ */
+static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask)
+{
+ u32 tmp;
+ bool applicable;
+ dma_addr_t dmastart;
+ dma_addr_t dmanow;
+
+ tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
+ tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
+
+ applicable = (intmask & SDHCI_INT_DATA_END) &&
+ (intmask & SDHCI_INT_BLK_GAP) &&
+ (tmp == VENDOR_V_23);
+ if (!applicable)
+ return;
+
+ host->data->error = 0;
+ dmastart = sg_dma_address(host->data->sg);
+ dmanow = dmastart + host->data->bytes_xfered;
+ /*
+ * Force update to the next DMA block boundary.
+ */
+ dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
+ SDHCI_DEFAULT_BOUNDARY_SIZE;
+ host->data->bytes_xfered = dmanow - dmastart;
+ sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
+}
+
+static int esdhc_of_enable_dma(struct sdhci_host *host)
+{
+ setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
+ return 0;
+}
+
+static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return pltfm_host->clock;
+}
+
+static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return pltfm_host->clock / 256 / 16;
+}
+
+static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ int pre_div = 2;
+ int div = 1;
+ u32 temp;
+
+ host->mmc->actual_clock = 0;
+
+ if (clock == 0)
+ return;
+
+ /* Workaround to reduce the clock frequency for p1010 esdhc */
+ if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
+ if (clock > 20000000)
+ clock -= 5000000;
+ if (clock > 40000000)
+ clock -= 5000000;
+ }
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | ESDHC_CLOCK_MASK);
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
+ while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+ pre_div *= 2;
+
+ while (host->max_clk / pre_div / div > clock && div < 16)
+ div++;
+
+ dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
+ clock, host->max_clk / pre_div / div);
+
+ pre_div >>= 1;
+ div--;
+
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | (div << ESDHC_DIVIDER_SHIFT)
+ | (pre_div << ESDHC_PREDIV_SHIFT));
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ mdelay(1);
+}
+
+static void esdhc_of_platform_init(struct sdhci_host *host)
+{
+ u32 vvn;
+
+ vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
+ vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
+ if (vvn == VENDOR_V_22)
+ host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
+
+ if (vvn > VENDOR_V_22)
+ host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
+}
+
+static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
+{
+ u32 ctrl;
+
+ switch (width) {
+ case MMC_BUS_WIDTH_8:
+ ctrl = ESDHC_CTRL_8BITBUS;
+ break;
+
+ case MMC_BUS_WIDTH_4:
+ ctrl = ESDHC_CTRL_4BITBUS;
+ break;
+
+ default:
+ ctrl = 0;
+ break;
+ }
+
+ clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL,
+ ESDHC_CTRL_BUSWIDTH_MASK, ctrl);
+}
+
+static void esdhc_reset(struct sdhci_host *host, u8 mask)
+{
+ sdhci_reset(host, mask);
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+}
+
+static const struct sdhci_ops sdhci_esdhc_ops = {
+ .read_l = esdhc_readl,
+ .read_w = esdhc_readw,
+ .read_b = esdhc_readb,
+ .write_l = esdhc_writel,
+ .write_w = esdhc_writew,
+ .write_b = esdhc_writeb,
+ .set_clock = esdhc_of_set_clock,
+ .enable_dma = esdhc_of_enable_dma,
+ .get_max_clock = esdhc_of_get_max_clock,
+ .get_min_clock = esdhc_of_get_min_clock,
+ .platform_init = esdhc_of_platform_init,
+ .adma_workaround = esdhci_of_adma_workaround,
+ .set_bus_width = esdhc_pltfm_set_bus_width,
+ .reset = esdhc_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+#ifdef CONFIG_PM
+
+static u32 esdhc_proctl;
+static int esdhc_of_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL);
+
+ return sdhci_suspend_host(host);
+}
+
+static int esdhc_of_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ int ret = sdhci_resume_host(host);
+
+ if (ret == 0) {
+ /* Isn't this already done by sdhci_resume_host() ? --rmk */
+ esdhc_of_enable_dma(host);
+ sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
+ }
+
+ return ret;
+}
+
+static const struct dev_pm_ops esdhc_pmops = {
+ .suspend = esdhc_of_suspend,
+ .resume = esdhc_of_resume,
+};
+#define ESDHC_PMOPS (&esdhc_pmops)
+#else
+#define ESDHC_PMOPS NULL
+#endif
+
+static const struct sdhci_pltfm_data sdhci_esdhc_pdata = {
+ /*
+ * card detection could be handled via GPIO
+ * eSDHC cannot support End Attribute in NOP ADMA descriptor
+ */
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
+ | SDHCI_QUIRK_NO_CARD_NO_RESET
+ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .ops = &sdhci_esdhc_ops,
+};
+
+static int sdhci_esdhc_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct device_node *np;
+ int ret;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_pdata, 0);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ sdhci_get_of_property(pdev);
+
+ np = pdev->dev.of_node;
+ if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
+ /*
+ * Freescale messed up with P2020 as it has a non-standard
+ * host control register
+ */
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
+ }
+
+ /* call to generic mmc_of_parse to support additional capabilities */
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err;
+
+ mmc_of_parse_voltage(np, &host->ocr_mask);
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err;
+
+ return 0;
+ err:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static const struct of_device_id sdhci_esdhc_of_match[] = {
+ { .compatible = "fsl,mpc8379-esdhc" },
+ { .compatible = "fsl,mpc8536-esdhc" },
+ { .compatible = "fsl,esdhc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
+
+static struct platform_driver sdhci_esdhc_driver = {
+ .driver = {
+ .name = "sdhci-esdhc",
+ .of_match_table = sdhci_esdhc_of_match,
+ .pm = ESDHC_PMOPS,
+ },
+ .probe = sdhci_esdhc_probe,
+ .remove = sdhci_pltfm_unregister,
+};
+
+module_platform_driver(sdhci_esdhc_driver);
+
+MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
+MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
+ "Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-of-hlwd.c b/kernel/drivers/mmc/host/sdhci-of-hlwd.c
new file mode 100644
index 000000000..4079a96ad
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-of-hlwd.c
@@ -0,0 +1,98 @@
+/*
+ * drivers/mmc/host/sdhci-of-hlwd.c
+ *
+ * Nintendo Wii Secure Digital Host Controller Interface.
+ * Copyright (C) 2009 The GameCube Linux Team
+ * Copyright (C) 2009 Albert Herranz
+ *
+ * Based on sdhci-of-esdhc.c
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ *
+ * Authors: Xiaobo Xie <X.Xie@freescale.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mmc/host.h>
+#include "sdhci-pltfm.h"
+
+/*
+ * Ops and quirks for the Nintendo Wii SDHCI controllers.
+ */
+
+/*
+ * We need a small delay after each write, or things go horribly wrong.
+ */
+#define SDHCI_HLWD_WRITE_DELAY 5 /* usecs */
+
+static void sdhci_hlwd_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ sdhci_be32bs_writel(host, val, reg);
+ udelay(SDHCI_HLWD_WRITE_DELAY);
+}
+
+static void sdhci_hlwd_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ sdhci_be32bs_writew(host, val, reg);
+ udelay(SDHCI_HLWD_WRITE_DELAY);
+}
+
+static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ sdhci_be32bs_writeb(host, val, reg);
+ udelay(SDHCI_HLWD_WRITE_DELAY);
+}
+
+static const struct sdhci_ops sdhci_hlwd_ops = {
+ .read_l = sdhci_be32bs_readl,
+ .read_w = sdhci_be32bs_readw,
+ .read_b = sdhci_be32bs_readb,
+ .write_l = sdhci_hlwd_writel,
+ .write_w = sdhci_hlwd_writew,
+ .write_b = sdhci_hlwd_writeb,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data sdhci_hlwd_pdata = {
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_32BIT_DMA_SIZE,
+ .ops = &sdhci_hlwd_ops,
+};
+
+static int sdhci_hlwd_probe(struct platform_device *pdev)
+{
+ return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata, 0);
+}
+
+static const struct of_device_id sdhci_hlwd_of_match[] = {
+ { .compatible = "nintendo,hollywood-sdhci" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdhci_hlwd_of_match);
+
+static struct platform_driver sdhci_hlwd_driver = {
+ .driver = {
+ .name = "sdhci-hlwd",
+ .of_match_table = sdhci_hlwd_of_match,
+ .pm = SDHCI_PLTFM_PMOPS,
+ },
+ .probe = sdhci_hlwd_probe,
+ .remove = sdhci_pltfm_unregister,
+};
+
+module_platform_driver(sdhci_hlwd_driver);
+
+MODULE_DESCRIPTION("Nintendo Wii SDHCI OF driver");
+MODULE_AUTHOR("The GameCube Linux Team, Albert Herranz");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-pci-data.c b/kernel/drivers/mmc/host/sdhci-pci-data.c
new file mode 100644
index 000000000..a61121776
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-pci-data.c
@@ -0,0 +1,5 @@
+#include <linux/module.h>
+#include <linux/mmc/sdhci-pci-data.h>
+
+struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno);
+EXPORT_SYMBOL_GPL(sdhci_pci_get_data);
diff --git a/kernel/drivers/mmc/host/sdhci-pci-o2micro.c b/kernel/drivers/mmc/host/sdhci-pci-o2micro.c
new file mode 100644
index 000000000..e2ec108db
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) 2013 BayHub Technology Ltd.
+ *
+ * Authors: Peter Guo <peter.guo@bayhubtech.com>
+ * Adam Lee <adam.lee@canonical.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pci.h>
+
+#include "sdhci.h"
+#include "sdhci-pci.h"
+#include "sdhci-pci-o2micro.h"
+
+static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value)
+{
+ u32 scratch_32;
+ pci_read_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, &scratch_32);
+
+ scratch_32 &= 0x0000FFFF;
+ scratch_32 |= value;
+
+ pci_write_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, scratch_32);
+}
+
+static void o2_pci_led_enable(struct sdhci_pci_chip *chip)
+{
+ int ret;
+ u32 scratch_32;
+
+ /* Set led of SD host function enable */
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_FUNC_REG0, &scratch_32);
+ if (ret)
+ return;
+
+ scratch_32 &= ~O2_SD_FREG0_LEDOFF;
+ pci_write_config_dword(chip->pdev,
+ O2_SD_FUNC_REG0, scratch_32);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_TEST_REG, &scratch_32);
+ if (ret)
+ return;
+
+ scratch_32 |= O2_SD_LED_ENABLE;
+ pci_write_config_dword(chip->pdev,
+ O2_SD_TEST_REG, scratch_32);
+
+}
+
+void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
+{
+ u32 scratch_32;
+ int ret;
+ /* Improve write performance for SD3.0 */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_DEV_CTRL, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~((1 << 12) | (1 << 13) | (1 << 14));
+ pci_write_config_dword(chip->pdev, O2_SD_DEV_CTRL, scratch_32);
+
+ /* Enable Link abnormal reset generating Reset */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_MISC_REG5, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~((1 << 19) | (1 << 11));
+ scratch_32 |= (1 << 10);
+ pci_write_config_dword(chip->pdev, O2_SD_MISC_REG5, scratch_32);
+
+ /* set card power over current protection */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_TEST_REG, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 |= (1 << 4);
+ pci_write_config_dword(chip->pdev, O2_SD_TEST_REG, scratch_32);
+
+ /* adjust the output delay for SD mode */
+ pci_write_config_dword(chip->pdev, O2_SD_DELAY_CTRL, 0x00002492);
+
+ /* Set the output voltage setting of Aux 1.2v LDO */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_LD0_CTRL, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(3 << 12);
+ pci_write_config_dword(chip->pdev, O2_SD_LD0_CTRL, scratch_32);
+
+ /* Set Max power supply capability of SD host */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_CAP_REG0, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(0x01FE);
+ scratch_32 |= 0x00CC;
+ pci_write_config_dword(chip->pdev, O2_SD_CAP_REG0, scratch_32);
+ /* Set DLL Tuning Window */
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_TUNING_CTRL, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(0x000000FF);
+ scratch_32 |= 0x00000066;
+ pci_write_config_dword(chip->pdev, O2_SD_TUNING_CTRL, scratch_32);
+
+ /* Set UHS2 T_EIDLE */
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_UHS2_L1_CTRL, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(0x000000FC);
+ scratch_32 |= 0x00000084;
+ pci_write_config_dword(chip->pdev, O2_SD_UHS2_L1_CTRL, scratch_32);
+
+ /* Set UHS2 Termination */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_FUNC_REG3, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~((1 << 21) | (1 << 30));
+
+ pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG3, scratch_32);
+
+ /* Set L1 Entrance Timer */
+ ret = pci_read_config_dword(chip->pdev, O2_SD_CAPS, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(0xf0000000);
+ scratch_32 |= 0x30000000;
+ pci_write_config_dword(chip->pdev, O2_SD_CAPS, scratch_32);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_MISC_CTRL4, &scratch_32);
+ if (ret)
+ return;
+ scratch_32 &= ~(0x000f0000);
+ scratch_32 |= 0x00080000;
+ pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL4, scratch_32);
+}
+EXPORT_SYMBOL_GPL(sdhci_pci_o2_fujin2_pci_init);
+
+int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
+{
+ struct sdhci_pci_chip *chip;
+ struct sdhci_host *host;
+ u32 reg;
+
+ chip = slot->chip;
+ host = slot->host;
+ switch (chip->pdev->device) {
+ case PCI_DEVICE_ID_O2_SDS0:
+ case PCI_DEVICE_ID_O2_SEABIRD0:
+ case PCI_DEVICE_ID_O2_SEABIRD1:
+ case PCI_DEVICE_ID_O2_SDS1:
+ case PCI_DEVICE_ID_O2_FUJIN2:
+ reg = sdhci_readl(host, O2_SD_VENDOR_SETTING);
+ if (reg & 0x1)
+ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+
+ if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2)
+ break;
+ /* set dll watch dog timer */
+ reg = sdhci_readl(host, O2_SD_VENDOR_SETTING2);
+ reg |= (1 << 12);
+ sdhci_writel(host, reg, O2_SD_VENDOR_SETTING2);
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe_slot);
+
+int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+{
+ int ret;
+ u8 scratch;
+ u32 scratch_32;
+
+ switch (chip->pdev->device) {
+ case PCI_DEVICE_ID_O2_8220:
+ case PCI_DEVICE_ID_O2_8221:
+ case PCI_DEVICE_ID_O2_8320:
+ case PCI_DEVICE_ID_O2_8321:
+ /* This extra setup is required due to broken ADMA. */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+ /* Set Multi 3 to VCC3V# */
+ pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
+
+ /* Disable CLK_REQ# support after media DET */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_CLKREQ, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x20;
+ pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
+
+ /* Choose capabilities, enable SDMA. We have to write 0x01
+ * to the capabilities register first to unlock it.
+ */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x01;
+ pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
+ pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
+
+ /* Disable ADMA1/2 */
+ pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
+ pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
+
+ /* Disable the infinite transfer mode */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_INF_MOD, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x08;
+ pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
+
+ /* Lock WP */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ break;
+ case PCI_DEVICE_ID_O2_SDS0:
+ case PCI_DEVICE_ID_O2_SDS1:
+ case PCI_DEVICE_ID_O2_FUJIN2:
+ /* UnLock WP */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+ /* DevId=8520 subId= 0x11 or 0x12 Type Chip support */
+ if (chip->pdev->device == PCI_DEVICE_ID_O2_FUJIN2) {
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_FUNC_REG0,
+ &scratch_32);
+ scratch_32 = ((scratch_32 & 0xFF000000) >> 24);
+
+ /* Check Whether subId is 0x11 or 0x12 */
+ if ((scratch_32 == 0x11) || (scratch_32 == 0x12)) {
+ scratch_32 = 0x2c280000;
+
+ /* Set Base Clock to 208MZ */
+ o2_pci_set_baseclk(chip, scratch_32);
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_FUNC_REG4,
+ &scratch_32);
+
+ /* Enable Base Clk setting change */
+ scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET;
+ pci_write_config_dword(chip->pdev,
+ O2_SD_FUNC_REG4,
+ scratch_32);
+
+ /* Set Tuning Window to 4 */
+ pci_write_config_byte(chip->pdev,
+ O2_SD_TUNING_CTRL, 0x44);
+
+ break;
+ }
+ }
+
+ /* Enable 8520 led function */
+ o2_pci_led_enable(chip);
+
+ /* Set timeout CLK */
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_CLK_SETTING, &scratch_32);
+ if (ret)
+ return ret;
+
+ scratch_32 &= ~(0xFF00);
+ scratch_32 |= 0x07E0C800;
+ pci_write_config_dword(chip->pdev,
+ O2_SD_CLK_SETTING, scratch_32);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_CLKREQ, &scratch_32);
+ if (ret)
+ return ret;
+ scratch_32 |= 0x3;
+ pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, &scratch_32);
+ if (ret)
+ return ret;
+
+ scratch_32 &= ~(0x1F3F070E);
+ scratch_32 |= 0x18270106;
+ pci_write_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, scratch_32);
+
+ /* Disable UHS1 funciton */
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_CAP_REG2, &scratch_32);
+ if (ret)
+ return ret;
+ scratch_32 &= ~(0xE0);
+ pci_write_config_dword(chip->pdev,
+ O2_SD_CAP_REG2, scratch_32);
+
+ if (chip->pdev->device == PCI_DEVICE_ID_O2_FUJIN2)
+ sdhci_pci_o2_fujin2_pci_init(chip);
+
+ /* Lock WP */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ break;
+ case PCI_DEVICE_ID_O2_SEABIRD0:
+ case PCI_DEVICE_ID_O2_SEABIRD1:
+ /* UnLock WP */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, &scratch_32);
+
+ if ((scratch_32 & 0xff000000) == 0x01000000) {
+ scratch_32 &= 0x0000FFFF;
+ scratch_32 |= 0x1F340000;
+
+ pci_write_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, scratch_32);
+ } else {
+ scratch_32 &= 0x0000FFFF;
+ scratch_32 |= 0x2c280000;
+
+ pci_write_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, scratch_32);
+
+ ret = pci_read_config_dword(chip->pdev,
+ O2_SD_FUNC_REG4,
+ &scratch_32);
+ scratch_32 |= (1 << 22);
+ pci_write_config_dword(chip->pdev,
+ O2_SD_FUNC_REG4, scratch_32);
+ }
+
+ /* Set Tuning Windows to 5 */
+ pci_write_config_byte(chip->pdev,
+ O2_SD_TUNING_CTRL, 0x55);
+ /* Lock WP */
+ ret = pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe);
+
+int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip)
+{
+ sdhci_pci_o2_probe(chip);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdhci_pci_o2_resume);
diff --git a/kernel/drivers/mmc/host/sdhci-pci-o2micro.h b/kernel/drivers/mmc/host/sdhci-pci-o2micro.h
new file mode 100644
index 000000000..f7ffc908d
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-pci-o2micro.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2013 BayHub Technology Ltd.
+ *
+ * Authors: Peter Guo <peter.guo@bayhubtech.com>
+ * Adam Lee <adam.lee@canonical.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDHCI_PCI_O2MICRO_H
+#define __SDHCI_PCI_O2MICRO_H
+
+#include "sdhci-pci.h"
+
+/*
+ * O2Micro device IDs
+ */
+
+#define PCI_DEVICE_ID_O2_SDS0 0x8420
+#define PCI_DEVICE_ID_O2_SDS1 0x8421
+#define PCI_DEVICE_ID_O2_FUJIN2 0x8520
+#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620
+#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621
+
+/*
+ * O2Micro device registers
+ */
+
+#define O2_SD_MISC_REG5 0x64
+#define O2_SD_LD0_CTRL 0x68
+#define O2_SD_DEV_CTRL 0x88
+#define O2_SD_LOCK_WP 0xD3
+#define O2_SD_TEST_REG 0xD4
+#define O2_SD_FUNC_REG0 0xDC
+#define O2_SD_MULTI_VCC3V 0xEE
+#define O2_SD_CLKREQ 0xEC
+#define O2_SD_CAPS 0xE0
+#define O2_SD_ADMA1 0xE2
+#define O2_SD_ADMA2 0xE7
+#define O2_SD_INF_MOD 0xF1
+#define O2_SD_MISC_CTRL4 0xFC
+#define O2_SD_TUNING_CTRL 0x300
+#define O2_SD_PLL_SETTING 0x304
+#define O2_SD_CLK_SETTING 0x328
+#define O2_SD_CAP_REG2 0x330
+#define O2_SD_CAP_REG0 0x334
+#define O2_SD_UHS1_CAP_SETTING 0x33C
+#define O2_SD_DELAY_CTRL 0x350
+#define O2_SD_UHS2_L1_CTRL 0x35C
+#define O2_SD_FUNC_REG3 0x3E0
+#define O2_SD_FUNC_REG4 0x3E4
+#define O2_SD_LED_ENABLE BIT(6)
+#define O2_SD_FREG0_LEDOFF BIT(13)
+#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22)
+
+#define O2_SD_VENDOR_SETTING 0x110
+#define O2_SD_VENDOR_SETTING2 0x1C8
+
+extern void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip);
+
+extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
+
+extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
+
+extern int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
+
+#endif /* __SDHCI_PCI_O2MICRO_H */
diff --git a/kernel/drivers/mmc/host/sdhci-pci.c b/kernel/drivers/mmc/host/sdhci-pci.c
new file mode 100644
index 000000000..7a3fc16d0
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-pci.c
@@ -0,0 +1,1708 @@
+/* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface
+ *
+ * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * Thanks to the following companies for their support:
+ *
+ * - JMicron (hardware and technical support)
+ */
+
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/mmc/host.h>
+#include <linux/scatterlist.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/mmc/sdhci-pci-data.h>
+
+#include "sdhci.h"
+#include "sdhci-pci.h"
+#include "sdhci-pci-o2micro.h"
+
+/*****************************************************************************\
+ * *
+ * Hardware specific quirk handling *
+ * *
+\*****************************************************************************/
+
+static int ricoh_probe(struct sdhci_pci_chip *chip)
+{
+ if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG ||
+ chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY)
+ chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
+ return 0;
+}
+
+static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->caps =
+ ((0x21 << SDHCI_TIMEOUT_CLK_SHIFT)
+ & SDHCI_TIMEOUT_CLK_MASK) |
+
+ ((0x21 << SDHCI_CLOCK_BASE_SHIFT)
+ & SDHCI_CLOCK_BASE_MASK) |
+
+ SDHCI_TIMEOUT_CLK_UNIT |
+ SDHCI_CAN_VDD_330 |
+ SDHCI_CAN_DO_HISPD |
+ SDHCI_CAN_DO_SDMA;
+ return 0;
+}
+
+static int ricoh_mmc_resume(struct sdhci_pci_chip *chip)
+{
+ /* Apply a delay to allow controller to settle */
+ /* Otherwise it becomes confused if card state changed
+ during suspend */
+ msleep(500);
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_ricoh = {
+ .probe = ricoh_probe,
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_FORCE_DMA |
+ SDHCI_QUIRK_CLOCK_BEFORE_RESET,
+};
+
+static const struct sdhci_pci_fixes sdhci_ricoh_mmc = {
+ .probe_slot = ricoh_mmc_probe_slot,
+ .resume = ricoh_mmc_resume,
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_CLOCK_BEFORE_RESET |
+ SDHCI_QUIRK_NO_CARD_NO_RESET |
+ SDHCI_QUIRK_MISSING_CAPS
+};
+
+static const struct sdhci_pci_fixes sdhci_ene_712 = {
+ .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_BROKEN_DMA,
+};
+
+static const struct sdhci_pci_fixes sdhci_ene_714 = {
+ .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
+ SDHCI_QUIRK_BROKEN_DMA,
+};
+
+static const struct sdhci_pci_fixes sdhci_cafe = {
+ .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
+ SDHCI_QUIRK_NO_BUSY_IRQ |
+ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_qrk = {
+ .quirks = SDHCI_QUIRK_NO_HISPD_BIT,
+};
+
+static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+ return 0;
+}
+
+/*
+ * ADMA operation is disabled for Moorestown platform due to
+ * hardware bugs.
+ */
+static int mrst_hc_probe(struct sdhci_pci_chip *chip)
+{
+ /*
+ * slots number is fixed here for MRST as SDIO3/5 are never used and
+ * have hardware bugs.
+ */
+ chip->num_slots = 1;
+ return 0;
+}
+
+static int pch_hc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id)
+{
+ struct sdhci_pci_slot *slot = dev_id;
+ struct sdhci_host *host = slot->host;
+
+ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+ return IRQ_HANDLED;
+}
+
+static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
+{
+ int err, irq, gpio = slot->cd_gpio;
+
+ slot->cd_gpio = -EINVAL;
+ slot->cd_irq = -EINVAL;
+
+ if (!gpio_is_valid(gpio))
+ return;
+
+ err = gpio_request(gpio, "sd_cd");
+ if (err < 0)
+ goto out;
+
+ err = gpio_direction_input(gpio);
+ if (err < 0)
+ goto out_free;
+
+ irq = gpio_to_irq(gpio);
+ if (irq < 0)
+ goto out_free;
+
+ err = request_irq(irq, sdhci_pci_sd_cd, IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING, "sd_cd", slot);
+ if (err)
+ goto out_free;
+
+ slot->cd_gpio = gpio;
+ slot->cd_irq = irq;
+
+ return;
+
+out_free:
+ gpio_free(gpio);
+out:
+ dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n");
+}
+
+static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
+{
+ if (slot->cd_irq >= 0)
+ free_irq(slot->cd_irq, slot);
+ if (gpio_is_valid(slot->cd_gpio))
+ gpio_free(slot->cd_gpio);
+}
+
+#else
+
+static inline void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
+{
+}
+
+static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
+{
+}
+
+#endif
+
+static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
+ slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC |
+ MMC_CAP2_HC_ERASE_SZ;
+ return 0;
+}
+
+static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
+ .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
+ .probe_slot = mrst_hc_probe_slot,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
+ .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
+ .probe = mrst_hc_probe,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .allow_runtime_pm = true,
+ .own_cd_for_runtime_pm = true,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+ .allow_runtime_pm = true,
+ .probe_slot = mfd_sdio_probe_slot,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .allow_runtime_pm = true,
+ .probe_slot = mfd_emmc_probe_slot,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
+ .quirks = SDHCI_QUIRK_BROKEN_ADMA,
+ .probe_slot = pch_hc_probe_slot,
+};
+
+static void sdhci_pci_int_hw_reset(struct sdhci_host *host)
+{
+ u8 reg;
+
+ reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ reg |= 0x10;
+ sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
+ /* For eMMC, minimum is 1us but give it 9us for good measure */
+ udelay(9);
+ reg &= ~0x10;
+ sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
+ /* For eMMC, minimum is 200us but give it 300us for good measure */
+ usleep_range(300, 1000);
+}
+
+static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
+ MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
+ MMC_CAP_BUS_WIDTH_TEST |
+ MMC_CAP_WAIT_WHILE_BUSY;
+ slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
+ slot->hw_reset = sdhci_pci_int_hw_reset;
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
+ slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
+ return 0;
+}
+
+static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
+ MMC_CAP_BUS_WIDTH_TEST |
+ MMC_CAP_WAIT_WHILE_BUSY;
+ return 0;
+}
+
+static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST |
+ MMC_CAP_WAIT_WHILE_BUSY;
+ slot->cd_con_id = NULL;
+ slot->cd_idx = 0;
+ slot->cd_override_level = true;
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
+ .allow_runtime_pm = true,
+ .probe_slot = byt_emmc_probe_slot,
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_STOP_WITH_TC,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .allow_runtime_pm = true,
+ .probe_slot = byt_sdio_probe_slot,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_STOP_WITH_TC,
+ .allow_runtime_pm = true,
+ .own_cd_for_runtime_pm = true,
+ .probe_slot = byt_sd_probe_slot,
+};
+
+/* Define Host controllers for Intel Merrifield platform */
+#define INTEL_MRFL_EMMC_0 0
+#define INTEL_MRFL_EMMC_1 1
+
+static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_0) &&
+ (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_1))
+ /* SD support is not ready yet */
+ return -ENODEV;
+
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
+ MMC_CAP_1_8V_DDR;
+
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .allow_runtime_pm = true,
+ .probe_slot = intel_mrfl_mmc_probe_slot,
+};
+
+/* O2Micro extra registers */
+#define O2_SD_LOCK_WP 0xD3
+#define O2_SD_MULTI_VCC3V 0xEE
+#define O2_SD_CLKREQ 0xEC
+#define O2_SD_CAPS 0xE0
+#define O2_SD_ADMA1 0xE2
+#define O2_SD_ADMA2 0xE7
+#define O2_SD_INF_MOD 0xF1
+
+static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
+{
+ u8 scratch;
+ int ret;
+
+ ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
+ if (ret)
+ return ret;
+
+ /*
+ * Turn PMOS on [bit 0], set over current detection to 2.4 V
+ * [bit 1:2] and enable over current debouncing [bit 6].
+ */
+ if (on)
+ scratch |= 0x47;
+ else
+ scratch &= ~0x47;
+
+ ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int jmicron_probe(struct sdhci_pci_chip *chip)
+{
+ int ret;
+ u16 mmcdev = 0;
+
+ if (chip->pdev->revision == 0) {
+ chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_32BIT_DMA_SIZE |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE |
+ SDHCI_QUIRK_RESET_AFTER_REQUEST |
+ SDHCI_QUIRK_BROKEN_SMALL_PIO;
+ }
+
+ /*
+ * JMicron chips can have two interfaces to the same hardware
+ * in order to work around limitations in Microsoft's driver.
+ * We need to make sure we only bind to one of them.
+ *
+ * This code assumes two things:
+ *
+ * 1. The PCI code adds subfunctions in order.
+ *
+ * 2. The MMC interface has a lower subfunction number
+ * than the SD interface.
+ */
+ if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
+ mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
+ else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
+ mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
+
+ if (mmcdev) {
+ struct pci_dev *sd_dev;
+
+ sd_dev = NULL;
+ while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
+ mmcdev, sd_dev)) != NULL) {
+ if ((PCI_SLOT(chip->pdev->devfn) ==
+ PCI_SLOT(sd_dev->devfn)) &&
+ (chip->pdev->bus == sd_dev->bus))
+ break;
+ }
+
+ if (sd_dev) {
+ pci_dev_put(sd_dev);
+ dev_info(&chip->pdev->dev, "Refusing to bind to "
+ "secondary interface.\n");
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * JMicron chips need a bit of a nudge to enable the power
+ * output pins.
+ */
+ ret = jmicron_pmos(chip, 1);
+ if (ret) {
+ dev_err(&chip->pdev->dev, "Failure enabling card power\n");
+ return ret;
+ }
+
+ /* quirk for unsable RO-detection on JM388 chips */
+ if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
+ chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
+ chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT;
+
+ return 0;
+}
+
+static void jmicron_enable_mmc(struct sdhci_host *host, int on)
+{
+ u8 scratch;
+
+ scratch = readb(host->ioaddr + 0xC0);
+
+ if (on)
+ scratch |= 0x01;
+ else
+ scratch &= ~0x01;
+
+ writeb(scratch, host->ioaddr + 0xC0);
+}
+
+static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
+{
+ if (slot->chip->pdev->revision == 0) {
+ u16 version;
+
+ version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION);
+ version = (version & SDHCI_VENDOR_VER_MASK) >>
+ SDHCI_VENDOR_VER_SHIFT;
+
+ /*
+ * Older versions of the chip have lots of nasty glitches
+ * in the ADMA engine. It's best just to avoid it
+ * completely.
+ */
+ if (version < 0xAC)
+ slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
+ }
+
+ /* JM388 MMC doesn't support 1.8V while SD supports it */
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
+ slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
+ MMC_VDD_29_30 | MMC_VDD_30_31 |
+ MMC_VDD_165_195; /* allow 1.8V */
+ slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
+ MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
+ }
+
+ /*
+ * The secondary interface requires a bit set to get the
+ * interrupts.
+ */
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
+ jmicron_enable_mmc(slot->host, 1);
+
+ slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
+
+ return 0;
+}
+
+static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+ if (dead)
+ return;
+
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
+ jmicron_enable_mmc(slot->host, 0);
+}
+
+static int jmicron_suspend(struct sdhci_pci_chip *chip)
+{
+ int i;
+
+ if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+ chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
+ for (i = 0; i < chip->num_slots; i++)
+ jmicron_enable_mmc(chip->slots[i]->host, 0);
+ }
+
+ return 0;
+}
+
+static int jmicron_resume(struct sdhci_pci_chip *chip)
+{
+ int ret, i;
+
+ if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+ chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
+ for (i = 0; i < chip->num_slots; i++)
+ jmicron_enable_mmc(chip->slots[i]->host, 1);
+ }
+
+ ret = jmicron_pmos(chip, 1);
+ if (ret) {
+ dev_err(&chip->pdev->dev, "Failure enabling card power\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_o2 = {
+ .probe = sdhci_pci_o2_probe,
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .probe_slot = sdhci_pci_o2_probe_slot,
+ .resume = sdhci_pci_o2_resume,
+};
+
+static const struct sdhci_pci_fixes sdhci_jmicron = {
+ .probe = jmicron_probe,
+
+ .probe_slot = jmicron_probe_slot,
+ .remove_slot = jmicron_remove_slot,
+
+ .suspend = jmicron_suspend,
+ .resume = jmicron_resume,
+};
+
+/* SysKonnect CardBus2SDIO extra registers */
+#define SYSKT_CTRL 0x200
+#define SYSKT_RDFIFO_STAT 0x204
+#define SYSKT_WRFIFO_STAT 0x208
+#define SYSKT_POWER_DATA 0x20c
+#define SYSKT_POWER_330 0xef
+#define SYSKT_POWER_300 0xf8
+#define SYSKT_POWER_184 0xcc
+#define SYSKT_POWER_CMD 0x20d
+#define SYSKT_POWER_START (1 << 7)
+#define SYSKT_POWER_STATUS 0x20e
+#define SYSKT_POWER_STATUS_OK (1 << 0)
+#define SYSKT_BOARD_REV 0x210
+#define SYSKT_CHIP_REV 0x211
+#define SYSKT_CONF_DATA 0x212
+#define SYSKT_CONF_DATA_1V8 (1 << 2)
+#define SYSKT_CONF_DATA_2V5 (1 << 1)
+#define SYSKT_CONF_DATA_3V3 (1 << 0)
+
+static int syskt_probe(struct sdhci_pci_chip *chip)
+{
+ if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
+ chip->pdev->class &= ~0x0000FF;
+ chip->pdev->class |= PCI_SDHCI_IFDMA;
+ }
+ return 0;
+}
+
+static int syskt_probe_slot(struct sdhci_pci_slot *slot)
+{
+ int tm, ps;
+
+ u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV);
+ u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV);
+ dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, "
+ "board rev %d.%d, chip rev %d.%d\n",
+ board_rev >> 4, board_rev & 0xf,
+ chip_rev >> 4, chip_rev & 0xf);
+ if (chip_rev >= 0x20)
+ slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA;
+
+ writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA);
+ writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD);
+ udelay(50);
+ tm = 10; /* Wait max 1 ms */
+ do {
+ ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS);
+ if (ps & SYSKT_POWER_STATUS_OK)
+ break;
+ udelay(100);
+ } while (--tm);
+ if (!tm) {
+ dev_err(&slot->chip->pdev->dev,
+ "power regulator never stabilized");
+ writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_syskt = {
+ .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER,
+ .probe = syskt_probe,
+ .probe_slot = syskt_probe_slot,
+};
+
+static int via_probe(struct sdhci_pci_chip *chip)
+{
+ if (chip->pdev->revision == 0x10)
+ chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
+
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_via = {
+ .probe = via_probe,
+};
+
+static int rtsx_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps2 |= MMC_CAP2_HS200;
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_rtsx = {
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_BROKEN_64_BIT_DMA |
+ SDHCI_QUIRK2_BROKEN_DDR50,
+ .probe_slot = rtsx_probe_slot,
+};
+
+static int amd_probe(struct sdhci_pci_chip *chip)
+{
+ struct pci_dev *smbus_dev;
+
+ smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
+
+ if (smbus_dev && (smbus_dev->revision < 0x51)) {
+ chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
+ chip->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
+ }
+
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_amd = {
+ .probe = amd_probe,
+};
+
+static const struct pci_device_id pci_ids[] = {
+ {
+ .vendor = PCI_VENDOR_ID_RICOH,
+ .device = PCI_DEVICE_ID_RICOH_R5C822,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_ricoh,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_RICOH,
+ .device = 0x843,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_RICOH,
+ .device = 0xe822,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_RICOH,
+ .device = 0xe823,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_ENE,
+ .device = PCI_DEVICE_ID_ENE_CB712_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_ene_712,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_ENE,
+ .device = PCI_DEVICE_ID_ENE_CB712_SD_2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_ene_712,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_ENE,
+ .device = PCI_DEVICE_ID_ENE_CB714_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_ene_714,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_ENE,
+ .device = PCI_DEVICE_ID_ENE_CB714_SD_2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_ene_714,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_MARVELL,
+ .device = PCI_DEVICE_ID_MARVELL_88ALP01_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_cafe,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_JMICRON,
+ .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_jmicron,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_JMICRON,
+ .device = PCI_DEVICE_ID_JMICRON_JMB38X_MMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_jmicron,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_JMICRON,
+ .device = PCI_DEVICE_ID_JMICRON_JMB388_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_jmicron,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_JMICRON,
+ .device = PCI_DEVICE_ID_JMICRON_JMB388_ESD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_jmicron,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_SYSKONNECT,
+ .device = 0x8000,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_syskt,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = 0x95d0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_via,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_REALTEK,
+ .device = 0x5250,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_rtsx,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_QRK_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_qrk,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MRST_SD0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc0,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MRST_SD1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MRST_SD2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MFD_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MFD_SDIO1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MFD_SDIO2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MFD_EMMC0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MFD_EMMC1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_PCH_SDIO0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_PCH_SDIO1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BYT_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BYT_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BYT_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BYT_EMMC2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BSW_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BSW_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BSW_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_SDIO0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_SDIO1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_SDIO2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_EMMC0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CLV_EMMC1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MRFL_MMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_SPT_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_SPT_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_SPT_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_8120,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_8220,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_8221,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_8320,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_8321,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_FUJIN2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_SDS0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_SDS1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_SEABIRD0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_O2,
+ .device = PCI_DEVICE_ID_O2_SEABIRD1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_o2,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_ANY_ID,
+ .class = PCI_CLASS_SYSTEM_SDHCI << 8,
+ .class_mask = 0xFFFF00,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_amd,
+ },
+ { /* Generic SD host controller */
+ PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
+ },
+
+ { /* end: all zeroes */ },
+};
+
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+/*****************************************************************************\
+ * *
+ * SDHCI core callbacks *
+ * *
+\*****************************************************************************/
+
+static int sdhci_pci_enable_dma(struct sdhci_host *host)
+{
+ struct sdhci_pci_slot *slot;
+ struct pci_dev *pdev;
+ int ret = -1;
+
+ slot = sdhci_priv(host);
+ pdev = slot->chip->pdev;
+
+ if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
+ ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
+ (host->flags & SDHCI_USE_SDMA)) {
+ dev_warn(&pdev->dev, "Will use DMA mode even though HW "
+ "doesn't fully claim to support it.\n");
+ }
+
+ if (host->flags & SDHCI_USE_64_BIT_DMA) {
+ if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) {
+ host->flags &= ~SDHCI_USE_64_BIT_DMA;
+ } else {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret)
+ dev_warn(&pdev->dev, "Failed to set 64-bit DMA mask\n");
+ }
+ }
+ if (ret)
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
+static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width)
+{
+ u8 ctrl;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+ switch (width) {
+ case MMC_BUS_WIDTH_8:
+ ctrl |= SDHCI_CTRL_8BITBUS;
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ break;
+ case MMC_BUS_WIDTH_4:
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ break;
+ default:
+ ctrl &= ~(SDHCI_CTRL_8BITBUS | SDHCI_CTRL_4BITBUS);
+ break;
+ }
+
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+
+static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ int rst_n_gpio = slot->rst_n_gpio;
+
+ if (!gpio_is_valid(rst_n_gpio))
+ return;
+ gpio_set_value_cansleep(rst_n_gpio, 0);
+ /* For eMMC, minimum is 1us but give it 10us for good measure */
+ udelay(10);
+ gpio_set_value_cansleep(rst_n_gpio, 1);
+ /* For eMMC, minimum is 200us but give it 300us for good measure */
+ usleep_range(300, 1000);
+}
+
+static void sdhci_pci_hw_reset(struct sdhci_host *host)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+
+ if (slot->hw_reset)
+ slot->hw_reset(host);
+}
+
+static const struct sdhci_ops sdhci_pci_ops = {
+ .set_clock = sdhci_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_pci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .hw_reset = sdhci_pci_hw_reset,
+};
+
+/*****************************************************************************\
+ * *
+ * Suspend/resume *
+ * *
+\*****************************************************************************/
+
+#ifdef CONFIG_PM
+
+static int sdhci_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct sdhci_pci_chip *chip;
+ struct sdhci_pci_slot *slot;
+ mmc_pm_flag_t slot_pm_flags;
+ mmc_pm_flag_t pm_flags = 0;
+ int i, ret;
+
+ chip = pci_get_drvdata(pdev);
+ if (!chip)
+ return 0;
+
+ for (i = 0; i < chip->num_slots; i++) {
+ slot = chip->slots[i];
+ if (!slot)
+ continue;
+
+ ret = sdhci_suspend_host(slot->host);
+
+ if (ret)
+ goto err_pci_suspend;
+
+ slot_pm_flags = slot->host->mmc->pm_flags;
+ if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ)
+ sdhci_enable_irq_wakeups(slot->host);
+
+ pm_flags |= slot_pm_flags;
+ }
+
+ if (chip->fixes && chip->fixes->suspend) {
+ ret = chip->fixes->suspend(chip);
+ if (ret)
+ goto err_pci_suspend;
+ }
+
+ if (pm_flags & MMC_PM_KEEP_POWER) {
+ if (pm_flags & MMC_PM_WAKE_SDIO_IRQ)
+ device_init_wakeup(dev, true);
+ else
+ device_init_wakeup(dev, false);
+ } else
+ device_init_wakeup(dev, false);
+
+ return 0;
+
+err_pci_suspend:
+ while (--i >= 0)
+ sdhci_resume_host(chip->slots[i]->host);
+ return ret;
+}
+
+static int sdhci_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct sdhci_pci_chip *chip;
+ struct sdhci_pci_slot *slot;
+ int i, ret;
+
+ chip = pci_get_drvdata(pdev);
+ if (!chip)
+ return 0;
+
+ if (chip->fixes && chip->fixes->resume) {
+ ret = chip->fixes->resume(chip);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < chip->num_slots; i++) {
+ slot = chip->slots[i];
+ if (!slot)
+ continue;
+
+ ret = sdhci_resume_host(slot->host);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sdhci_pci_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct sdhci_pci_chip *chip;
+ struct sdhci_pci_slot *slot;
+ int i, ret;
+
+ chip = pci_get_drvdata(pdev);
+ if (!chip)
+ return 0;
+
+ for (i = 0; i < chip->num_slots; i++) {
+ slot = chip->slots[i];
+ if (!slot)
+ continue;
+
+ ret = sdhci_runtime_suspend_host(slot->host);
+
+ if (ret)
+ goto err_pci_runtime_suspend;
+ }
+
+ if (chip->fixes && chip->fixes->suspend) {
+ ret = chip->fixes->suspend(chip);
+ if (ret)
+ goto err_pci_runtime_suspend;
+ }
+
+ return 0;
+
+err_pci_runtime_suspend:
+ while (--i >= 0)
+ sdhci_runtime_resume_host(chip->slots[i]->host);
+ return ret;
+}
+
+static int sdhci_pci_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct sdhci_pci_chip *chip;
+ struct sdhci_pci_slot *slot;
+ int i, ret;
+
+ chip = pci_get_drvdata(pdev);
+ if (!chip)
+ return 0;
+
+ if (chip->fixes && chip->fixes->resume) {
+ ret = chip->fixes->resume(chip);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < chip->num_slots; i++) {
+ slot = chip->slots[i];
+ if (!slot)
+ continue;
+
+ ret = sdhci_runtime_resume_host(slot->host);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+#else /* CONFIG_PM */
+
+#define sdhci_pci_suspend NULL
+#define sdhci_pci_resume NULL
+
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops sdhci_pci_pm_ops = {
+ .suspend = sdhci_pci_suspend,
+ .resume = sdhci_pci_resume,
+ SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend,
+ sdhci_pci_runtime_resume, NULL)
+};
+
+/*****************************************************************************\
+ * *
+ * Device probing/removal *
+ * *
+\*****************************************************************************/
+
+static struct sdhci_pci_slot *sdhci_pci_probe_slot(
+ struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
+ int slotno)
+{
+ struct sdhci_pci_slot *slot;
+ struct sdhci_host *host;
+ int ret, bar = first_bar + slotno;
+
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (pci_resource_len(pdev, bar) < 0x100) {
+ dev_err(&pdev->dev, "Invalid iomem size. You may "
+ "experience problems.\n");
+ }
+
+ if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
+ dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
+ dev_err(&pdev->dev, "Unknown interface. Aborting.\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot));
+ if (IS_ERR(host)) {
+ dev_err(&pdev->dev, "cannot allocate host\n");
+ return ERR_CAST(host);
+ }
+
+ slot = sdhci_priv(host);
+
+ slot->chip = chip;
+ slot->host = host;
+ slot->pci_bar = bar;
+ slot->rst_n_gpio = -EINVAL;
+ slot->cd_gpio = -EINVAL;
+ slot->cd_idx = -1;
+
+ /* Retrieve platform data if there is any */
+ if (*sdhci_pci_get_data)
+ slot->data = sdhci_pci_get_data(pdev, slotno);
+
+ if (slot->data) {
+ if (slot->data->setup) {
+ ret = slot->data->setup(slot->data);
+ if (ret) {
+ dev_err(&pdev->dev, "platform setup failed\n");
+ goto free;
+ }
+ }
+ slot->rst_n_gpio = slot->data->rst_n_gpio;
+ slot->cd_gpio = slot->data->cd_gpio;
+ }
+
+ host->hw_name = "PCI";
+ host->ops = &sdhci_pci_ops;
+ host->quirks = chip->quirks;
+ host->quirks2 = chip->quirks2;
+
+ host->irq = pdev->irq;
+
+ ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc));
+ if (ret) {
+ dev_err(&pdev->dev, "cannot request region\n");
+ goto cleanup;
+ }
+
+ host->ioaddr = pci_ioremap_bar(pdev, bar);
+ if (!host->ioaddr) {
+ dev_err(&pdev->dev, "failed to remap registers\n");
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ if (chip->fixes && chip->fixes->probe_slot) {
+ ret = chip->fixes->probe_slot(slot);
+ if (ret)
+ goto unmap;
+ }
+
+ if (gpio_is_valid(slot->rst_n_gpio)) {
+ if (!gpio_request(slot->rst_n_gpio, "eMMC_reset")) {
+ gpio_direction_output(slot->rst_n_gpio, 1);
+ slot->host->mmc->caps |= MMC_CAP_HW_RESET;
+ slot->hw_reset = sdhci_pci_gpio_hw_reset;
+ } else {
+ dev_warn(&pdev->dev, "failed to request rst_n_gpio\n");
+ slot->rst_n_gpio = -EINVAL;
+ }
+ }
+
+ host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+ host->mmc->slotno = slotno;
+ host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
+
+ if (slot->cd_idx >= 0 &&
+ mmc_gpiod_request_cd(host->mmc, slot->cd_con_id, slot->cd_idx,
+ slot->cd_override_level, 0, NULL)) {
+ dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
+ slot->cd_idx = -1;
+ }
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto remove;
+
+ sdhci_pci_add_own_cd(slot);
+
+ /*
+ * Check if the chip needs a separate GPIO for card detect to wake up
+ * from runtime suspend. If it is not there, don't allow runtime PM.
+ * Note sdhci_pci_add_own_cd() sets slot->cd_gpio to -EINVAL on failure.
+ */
+ if (chip->fixes && chip->fixes->own_cd_for_runtime_pm &&
+ !gpio_is_valid(slot->cd_gpio) && slot->cd_idx < 0)
+ chip->allow_runtime_pm = false;
+
+ return slot;
+
+remove:
+ if (gpio_is_valid(slot->rst_n_gpio))
+ gpio_free(slot->rst_n_gpio);
+
+ if (chip->fixes && chip->fixes->remove_slot)
+ chip->fixes->remove_slot(slot, 0);
+
+unmap:
+ iounmap(host->ioaddr);
+
+release:
+ pci_release_region(pdev, bar);
+
+cleanup:
+ if (slot->data && slot->data->cleanup)
+ slot->data->cleanup(slot->data);
+
+free:
+ sdhci_free_host(host);
+
+ return ERR_PTR(ret);
+}
+
+static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
+{
+ int dead;
+ u32 scratch;
+
+ sdhci_pci_remove_own_cd(slot);
+
+ dead = 0;
+ scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
+ if (scratch == (u32)-1)
+ dead = 1;
+
+ sdhci_remove_host(slot->host, dead);
+
+ if (gpio_is_valid(slot->rst_n_gpio))
+ gpio_free(slot->rst_n_gpio);
+
+ if (slot->chip->fixes && slot->chip->fixes->remove_slot)
+ slot->chip->fixes->remove_slot(slot, dead);
+
+ if (slot->data && slot->data->cleanup)
+ slot->data->cleanup(slot->data);
+
+ pci_release_region(slot->chip->pdev, slot->pci_bar);
+
+ sdhci_free_host(slot->host);
+}
+
+static void sdhci_pci_runtime_pm_allow(struct device *dev)
+{
+ pm_runtime_put_noidle(dev);
+ pm_runtime_allow(dev);
+ pm_runtime_set_autosuspend_delay(dev, 50);
+ pm_runtime_use_autosuspend(dev);
+ pm_suspend_ignore_children(dev, 1);
+}
+
+static void sdhci_pci_runtime_pm_forbid(struct device *dev)
+{
+ pm_runtime_forbid(dev);
+ pm_runtime_get_noresume(dev);
+}
+
+static int sdhci_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct sdhci_pci_chip *chip;
+ struct sdhci_pci_slot *slot;
+
+ u8 slots, first_bar;
+ int ret, i;
+
+ BUG_ON(pdev == NULL);
+ BUG_ON(ent == NULL);
+
+ dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
+ (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
+
+ ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
+ if (ret)
+ return ret;
+
+ slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
+ dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
+ if (slots == 0)
+ return -ENODEV;
+
+ BUG_ON(slots > MAX_SLOTS);
+
+ ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
+ if (ret)
+ return ret;
+
+ first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
+
+ if (first_bar > 5) {
+ dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
+ return -ENODEV;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ chip = kzalloc(sizeof(struct sdhci_pci_chip), GFP_KERNEL);
+ if (!chip) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ chip->pdev = pdev;
+ chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
+ if (chip->fixes) {
+ chip->quirks = chip->fixes->quirks;
+ chip->quirks2 = chip->fixes->quirks2;
+ chip->allow_runtime_pm = chip->fixes->allow_runtime_pm;
+ }
+ chip->num_slots = slots;
+
+ pci_set_drvdata(pdev, chip);
+
+ if (chip->fixes && chip->fixes->probe) {
+ ret = chip->fixes->probe(chip);
+ if (ret)
+ goto free;
+ }
+
+ slots = chip->num_slots; /* Quirk may have changed this */
+
+ for (i = 0; i < slots; i++) {
+ slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
+ if (IS_ERR(slot)) {
+ for (i--; i >= 0; i--)
+ sdhci_pci_remove_slot(chip->slots[i]);
+ ret = PTR_ERR(slot);
+ goto free;
+ }
+
+ chip->slots[i] = slot;
+ }
+
+ if (chip->allow_runtime_pm)
+ sdhci_pci_runtime_pm_allow(&pdev->dev);
+
+ return 0;
+
+free:
+ pci_set_drvdata(pdev, NULL);
+ kfree(chip);
+
+err:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void sdhci_pci_remove(struct pci_dev *pdev)
+{
+ int i;
+ struct sdhci_pci_chip *chip;
+
+ chip = pci_get_drvdata(pdev);
+
+ if (chip) {
+ if (chip->allow_runtime_pm)
+ sdhci_pci_runtime_pm_forbid(&pdev->dev);
+
+ for (i = 0; i < chip->num_slots; i++)
+ sdhci_pci_remove_slot(chip->slots[i]);
+
+ pci_set_drvdata(pdev, NULL);
+ kfree(chip);
+ }
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver sdhci_driver = {
+ .name = "sdhci-pci",
+ .id_table = pci_ids,
+ .probe = sdhci_pci_probe,
+ .remove = sdhci_pci_remove,
+ .driver = {
+ .pm = &sdhci_pci_pm_ops
+ },
+};
+
+module_pci_driver(sdhci_driver);
+
+MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
+MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/mmc/host/sdhci-pci.h b/kernel/drivers/mmc/host/sdhci-pci.h
new file mode 100644
index 000000000..1ec684d06
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-pci.h
@@ -0,0 +1,89 @@
+#ifndef __SDHCI_PCI_H
+#define __SDHCI_PCI_H
+
+/*
+ * PCI device IDs
+ */
+
+#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
+#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a
+#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14
+#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15
+#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16
+#define PCI_DEVICE_ID_INTEL_BYT_EMMC2 0x0f50
+#define PCI_DEVICE_ID_INTEL_BSW_EMMC 0x2294
+#define PCI_DEVICE_ID_INTEL_BSW_SDIO 0x2295
+#define PCI_DEVICE_ID_INTEL_BSW_SD 0x2296
+#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb
+#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5
+#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6
+#define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7
+#define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b
+#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c
+#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d
+
+/*
+ * PCI registers
+ */
+
+#define PCI_SDHCI_IFPIO 0x00
+#define PCI_SDHCI_IFDMA 0x01
+#define PCI_SDHCI_IFVENDOR 0x02
+
+#define PCI_SLOT_INFO 0x40 /* 8 bits */
+#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7)
+#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07
+
+#define MAX_SLOTS 8
+
+struct sdhci_pci_chip;
+struct sdhci_pci_slot;
+
+struct sdhci_pci_fixes {
+ unsigned int quirks;
+ unsigned int quirks2;
+ bool allow_runtime_pm;
+ bool own_cd_for_runtime_pm;
+
+ int (*probe) (struct sdhci_pci_chip *);
+
+ int (*probe_slot) (struct sdhci_pci_slot *);
+ void (*remove_slot) (struct sdhci_pci_slot *, int);
+
+ int (*suspend) (struct sdhci_pci_chip *);
+ int (*resume) (struct sdhci_pci_chip *);
+};
+
+struct sdhci_pci_slot {
+ struct sdhci_pci_chip *chip;
+ struct sdhci_host *host;
+ struct sdhci_pci_data *data;
+
+ int pci_bar;
+ int rst_n_gpio;
+ int cd_gpio;
+ int cd_irq;
+
+ char *cd_con_id;
+ int cd_idx;
+ bool cd_override_level;
+
+ void (*hw_reset)(struct sdhci_host *host);
+};
+
+struct sdhci_pci_chip {
+ struct pci_dev *pdev;
+
+ unsigned int quirks;
+ unsigned int quirks2;
+ bool allow_runtime_pm;
+ const struct sdhci_pci_fixes *fixes;
+
+ int num_slots; /* Slots on controller */
+ struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */
+};
+
+#endif /* __SDHCI_PCI_H */
diff --git a/kernel/drivers/mmc/host/sdhci-pltfm.c b/kernel/drivers/mmc/host/sdhci-pltfm.c
new file mode 100644
index 000000000..a207f5aaf
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-pltfm.c
@@ -0,0 +1,276 @@
+/*
+ * sdhci-pltfm.c Support for SDHCI platform devices
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * Copyright (c) 2007, 2011 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ *
+ * Authors: Xiaobo Xie <X.Xie@freescale.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * SDHCI platform devices
+ *
+ * Inspired by sdhci-pci.c, by Pierre Ossman
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#ifdef CONFIG_PPC
+#include <asm/machdep.h>
+#endif
+#include "sdhci-pltfm.h"
+
+unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk);
+}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock);
+
+static const struct sdhci_ops sdhci_pltfm_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+#ifdef CONFIG_OF
+static bool sdhci_of_wp_inverted(struct device_node *np)
+{
+ if (of_get_property(np, "sdhci,wp-inverted", NULL) ||
+ of_get_property(np, "wp-inverted", NULL))
+ return true;
+
+ /* Old device trees don't have the wp-inverted property. */
+#ifdef CONFIG_PPC
+ return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
+#else
+ return false;
+#endif /* CONFIG_PPC */
+}
+
+void sdhci_get_of_property(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ const __be32 *clk;
+ u32 bus_width;
+ int size;
+
+ if (of_get_property(np, "sdhci,auto-cmd12", NULL))
+ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+
+ if (of_get_property(np, "sdhci,1-bit-only", NULL) ||
+ (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
+ bus_width == 1))
+ host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
+
+ if (sdhci_of_wp_inverted(np))
+ host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
+
+ if (of_get_property(np, "broken-cd", NULL))
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
+ if (of_get_property(np, "no-1-8-v", NULL))
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+
+ if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc"))
+ host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
+
+ if (of_device_is_compatible(np, "fsl,p2020-esdhc") ||
+ of_device_is_compatible(np, "fsl,p1010-esdhc") ||
+ of_device_is_compatible(np, "fsl,t4240-esdhc") ||
+ of_device_is_compatible(np, "fsl,mpc8536-esdhc"))
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
+ clk = of_get_property(np, "clock-frequency", &size);
+ if (clk && size == sizeof(*clk) && *clk)
+ pltfm_host->clock = be32_to_cpup(clk);
+
+ if (of_find_property(np, "keep-power-in-suspend", NULL))
+ host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
+
+ if (of_find_property(np, "enable-sdio-wakeup", NULL))
+ host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+}
+#else
+void sdhci_get_of_property(struct platform_device *pdev) {}
+#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(sdhci_get_of_property);
+
+struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
+ const struct sdhci_pltfm_data *pdata,
+ size_t priv_size)
+{
+ struct sdhci_host *host;
+ struct resource *iomem;
+ int ret;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (resource_size(iomem) < 0x100)
+ dev_err(&pdev->dev, "Invalid iomem size!\n");
+
+ host = sdhci_alloc_host(&pdev->dev,
+ sizeof(struct sdhci_pltfm_host) + priv_size);
+
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ goto err;
+ }
+
+ host->hw_name = dev_name(&pdev->dev);
+ if (pdata && pdata->ops)
+ host->ops = pdata->ops;
+ else
+ host->ops = &sdhci_pltfm_ops;
+ if (pdata) {
+ host->quirks = pdata->quirks;
+ host->quirks2 = pdata->quirks2;
+ }
+
+ host->irq = platform_get_irq(pdev, 0);
+
+ if (!request_mem_region(iomem->start, resource_size(iomem),
+ mmc_hostname(host->mmc))) {
+ dev_err(&pdev->dev, "cannot request region\n");
+ ret = -EBUSY;
+ goto err_request;
+ }
+
+ host->ioaddr = ioremap(iomem->start, resource_size(iomem));
+ if (!host->ioaddr) {
+ dev_err(&pdev->dev, "failed to remap registers\n");
+ ret = -ENOMEM;
+ goto err_remap;
+ }
+
+ /*
+ * Some platforms need to probe the controller to be able to
+ * determine which caps should be used.
+ */
+ if (host->ops && host->ops->platform_init)
+ host->ops->platform_init(host);
+
+ platform_set_drvdata(pdev, host);
+
+ return host;
+
+err_remap:
+ release_mem_region(iomem->start, resource_size(iomem));
+err_request:
+ sdhci_free_host(host);
+err:
+ dev_err(&pdev->dev, "%s failed %d\n", __func__, ret);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_init);
+
+void sdhci_pltfm_free(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ iounmap(host->ioaddr);
+ release_mem_region(iomem->start, resource_size(iomem));
+ sdhci_free_host(host);
+}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_free);
+
+int sdhci_pltfm_register(struct platform_device *pdev,
+ const struct sdhci_pltfm_data *pdata,
+ size_t priv_size)
+{
+ struct sdhci_host *host;
+ int ret = 0;
+
+ host = sdhci_pltfm_init(pdev, pdata, priv_size);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ sdhci_get_of_property(pdev);
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ sdhci_pltfm_free(pdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_register);
+
+int sdhci_pltfm_unregister(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+
+ sdhci_remove_host(host, dead);
+ clk_disable_unprepare(pltfm_host->clk);
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
+
+#ifdef CONFIG_PM
+int sdhci_pltfm_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ return sdhci_suspend_host(host);
+}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
+
+int sdhci_pltfm_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ return sdhci_resume_host(host);
+}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
+
+const struct dev_pm_ops sdhci_pltfm_pmops = {
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+};
+EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops);
+#endif /* CONFIG_PM */
+
+static int __init sdhci_pltfm_drv_init(void)
+{
+ pr_info("sdhci-pltfm: SDHCI platform and OF driver helper\n");
+
+ return 0;
+}
+module_init(sdhci_pltfm_drv_init);
+
+static void __exit sdhci_pltfm_drv_exit(void)
+{
+}
+module_exit(sdhci_pltfm_drv_exit);
+
+MODULE_DESCRIPTION("SDHCI platform and OF driver helper");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-pltfm.h b/kernel/drivers/mmc/host/sdhci-pltfm.h
new file mode 100644
index 000000000..04bc2481e
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-pltfm.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2010 MontaVista Software, LLC.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DRIVERS_MMC_SDHCI_PLTFM_H
+#define _DRIVERS_MMC_SDHCI_PLTFM_H
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include "sdhci.h"
+
+struct sdhci_pltfm_data {
+ const struct sdhci_ops *ops;
+ unsigned int quirks;
+ unsigned int quirks2;
+};
+
+struct sdhci_pltfm_host {
+ struct clk *clk;
+ void *priv; /* to handle quirks across io-accessor calls */
+
+ /* migrate from sdhci_of_host */
+ unsigned int clock;
+ u16 xfer_mode_shadow;
+
+ unsigned long private[0] ____cacheline_aligned;
+};
+
+#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+/*
+ * These accessors are designed for big endian hosts doing I/O to
+ * little endian controllers incorporating a 32-bit hardware byte swapper.
+ */
+static inline u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg)
+{
+ return in_be32(host->ioaddr + reg);
+}
+
+static inline u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg)
+{
+ return in_be16(host->ioaddr + (reg ^ 0x2));
+}
+
+static inline u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg)
+{
+ return in_8(host->ioaddr + (reg ^ 0x3));
+}
+
+static inline void sdhci_be32bs_writel(struct sdhci_host *host,
+ u32 val, int reg)
+{
+ out_be32(host->ioaddr + reg, val);
+}
+
+static inline void sdhci_be32bs_writew(struct sdhci_host *host,
+ u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int base = reg & ~0x3;
+ int shift = (reg & 0x2) * 8;
+
+ switch (reg) {
+ case SDHCI_TRANSFER_MODE:
+ /*
+ * Postpone this write, we must do it together with a
+ * command write that is down below.
+ */
+ pltfm_host->xfer_mode_shadow = val;
+ return;
+ case SDHCI_COMMAND:
+ sdhci_be32bs_writel(host,
+ val << 16 | pltfm_host->xfer_mode_shadow,
+ SDHCI_TRANSFER_MODE);
+ return;
+ }
+ clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift);
+}
+
+static inline void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ int base = reg & ~0x3;
+ int shift = (reg & 0x3) * 8;
+
+ clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift);
+}
+#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */
+
+extern void sdhci_get_of_property(struct platform_device *pdev);
+
+extern struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
+ const struct sdhci_pltfm_data *pdata,
+ size_t priv_size);
+extern void sdhci_pltfm_free(struct platform_device *pdev);
+
+extern int sdhci_pltfm_register(struct platform_device *pdev,
+ const struct sdhci_pltfm_data *pdata,
+ size_t priv_size);
+extern int sdhci_pltfm_unregister(struct platform_device *pdev);
+
+extern unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host);
+
+static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host)
+{
+ return (void *)host->private;
+}
+
+#ifdef CONFIG_PM
+extern int sdhci_pltfm_suspend(struct device *dev);
+extern int sdhci_pltfm_resume(struct device *dev);
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
+#else
+#define SDHCI_PLTFM_PMOPS NULL
+#endif
+
+#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/kernel/drivers/mmc/host/sdhci-pxav2.c b/kernel/drivers/mmc/host/sdhci-pxav2.c
new file mode 100644
index 000000000..f98008b5e
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-pxav2.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2010 Marvell International Ltd.
+ * Zhangfei Gao <zhangfei.gao@marvell.com>
+ * Kevin Wang <dwang4@marvell.com>
+ * Jun Nie <njun@marvell.com>
+ * Qiming Wu <wuqm@marvell.com>
+ * Philip Rakity <prakity@marvell.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_data/pxa_sdhci.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+#define SD_FIFO_PARAM 0xe0
+#define DIS_PAD_SD_CLK_GATE 0x0400 /* Turn on/off Dynamic SD Clock Gating */
+#define CLK_GATE_ON 0x0200 /* Disable/enable Clock Gate */
+#define CLK_GATE_CTL 0x0100 /* Clock Gate Control */
+#define CLK_GATE_SETTING_BITS (DIS_PAD_SD_CLK_GATE | \
+ CLK_GATE_ON | CLK_GATE_CTL)
+
+#define SD_CLOCK_BURST_SIZE_SETUP 0xe6
+#define SDCLK_SEL_SHIFT 8
+#define SDCLK_SEL_MASK 0x3
+#define SDCLK_DELAY_SHIFT 10
+#define SDCLK_DELAY_MASK 0x3c
+
+#define SD_CE_ATA_2 0xea
+#define MMC_CARD 0x1000
+#define MMC_WIDTH 0x0100
+
+static void pxav2_reset(struct sdhci_host *host, u8 mask)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+
+ sdhci_reset(host, mask);
+
+ if (mask == SDHCI_RESET_ALL) {
+ u16 tmp = 0;
+
+ /*
+ * tune timing of read data/command when crc error happen
+ * no performance impact
+ */
+ if (pdata && pdata->clk_delay_sel == 1) {
+ tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
+
+ tmp &= ~(SDCLK_DELAY_MASK << SDCLK_DELAY_SHIFT);
+ tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK)
+ << SDCLK_DELAY_SHIFT;
+ tmp &= ~(SDCLK_SEL_MASK << SDCLK_SEL_SHIFT);
+ tmp |= (1 & SDCLK_SEL_MASK) << SDCLK_SEL_SHIFT;
+
+ writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
+ }
+
+ if (pdata && (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING)) {
+ tmp = readw(host->ioaddr + SD_FIFO_PARAM);
+ tmp &= ~CLK_GATE_SETTING_BITS;
+ writew(tmp, host->ioaddr + SD_FIFO_PARAM);
+ } else {
+ tmp = readw(host->ioaddr + SD_FIFO_PARAM);
+ tmp &= ~CLK_GATE_SETTING_BITS;
+ tmp |= CLK_GATE_SETTING_BITS;
+ writew(tmp, host->ioaddr + SD_FIFO_PARAM);
+ }
+ }
+}
+
+static void pxav2_mmc_set_bus_width(struct sdhci_host *host, int width)
+{
+ u8 ctrl;
+ u16 tmp;
+
+ ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
+ tmp = readw(host->ioaddr + SD_CE_ATA_2);
+ if (width == MMC_BUS_WIDTH_8) {
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ tmp |= MMC_CARD | MMC_WIDTH;
+ } else {
+ tmp &= ~(MMC_CARD | MMC_WIDTH);
+ if (width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ }
+ writew(tmp, host->ioaddr + SD_CE_ATA_2);
+ writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
+}
+
+static const struct sdhci_ops pxav2_sdhci_ops = {
+ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = pxav2_mmc_set_bus_width,
+ .reset = pxav2_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id sdhci_pxav2_of_match[] = {
+ {
+ .compatible = "mrvl,pxav2-mmc",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdhci_pxav2_of_match);
+
+static struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev)
+{
+ struct sdhci_pxa_platdata *pdata;
+ struct device_node *np = dev->of_node;
+ u32 bus_width;
+ u32 clk_delay_cycles;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ if (of_find_property(np, "non-removable", NULL))
+ pdata->flags |= PXA_FLAG_CARD_PERMANENT;
+
+ of_property_read_u32(np, "bus-width", &bus_width);
+ if (bus_width == 8)
+ pdata->flags |= PXA_FLAG_SD_8_BIT_CAPABLE_SLOT;
+
+ of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
+ if (clk_delay_cycles > 0) {
+ pdata->clk_delay_sel = 1;
+ pdata->clk_delay_cycles = clk_delay_cycles;
+ }
+
+ return pdata;
+}
+#else
+static inline struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
+static int sdhci_pxav2_probe(struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host = NULL;
+ const struct of_device_id *match;
+
+ int ret;
+ struct clk *clk;
+
+ host = sdhci_pltfm_init(pdev, NULL, 0);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = NULL;
+
+ clk = clk_get(dev, "PXA-SDHCLK");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get io clock\n");
+ ret = PTR_ERR(clk);
+ goto err_clk_get;
+ }
+ pltfm_host->clk = clk;
+ clk_prepare_enable(clk);
+
+ host->quirks = SDHCI_QUIRK_BROKEN_ADMA
+ | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
+ | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
+
+ match = of_match_device(of_match_ptr(sdhci_pxav2_of_match), &pdev->dev);
+ if (match) {
+ pdata = pxav2_get_mmc_pdata(dev);
+ }
+ if (pdata) {
+ if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
+ /* on-chip device */
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+ }
+
+ /* If slot design supports 8 bit data, indicate this to MMC. */
+ if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+
+ if (pdata->quirks)
+ host->quirks |= pdata->quirks;
+ if (pdata->host_caps)
+ host->mmc->caps |= pdata->host_caps;
+ if (pdata->pm_caps)
+ host->mmc->pm_caps |= pdata->pm_caps;
+ }
+
+ host->ops = &pxav2_sdhci_ops;
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add host\n");
+ goto err_add_host;
+ }
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+
+err_add_host:
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+err_clk_get:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int sdhci_pxav2_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ sdhci_remove_host(host, 1);
+
+ clk_disable_unprepare(pltfm_host->clk);
+ clk_put(pltfm_host->clk);
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_pxav2_driver = {
+ .driver = {
+ .name = "sdhci-pxav2",
+#ifdef CONFIG_OF
+ .of_match_table = sdhci_pxav2_of_match,
+#endif
+ .pm = SDHCI_PLTFM_PMOPS,
+ },
+ .probe = sdhci_pxav2_probe,
+ .remove = sdhci_pxav2_remove,
+};
+
+module_platform_driver(sdhci_pxav2_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for pxav2");
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_LICENSE("GPL v2");
+
diff --git a/kernel/drivers/mmc/host/sdhci-pxav3.c b/kernel/drivers/mmc/host/sdhci-pxav3.c
new file mode 100644
index 000000000..b5103a247
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-pxav3.c
@@ -0,0 +1,595 @@
+/*
+ * Copyright (C) 2010 Marvell International Ltd.
+ * Zhangfei Gao <zhangfei.gao@marvell.com>
+ * Kevin Wang <dwang4@marvell.com>
+ * Mingwei Wang <mwwang@marvell.com>
+ * Philip Rakity <prakity@marvell.com>
+ * Mark Brown <markb@marvell.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/platform_data/pxa_sdhci.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/mbus.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+#define PXAV3_RPM_DELAY_MS 50
+
+#define SD_CLOCK_BURST_SIZE_SETUP 0x10A
+#define SDCLK_SEL 0x100
+#define SDCLK_DELAY_SHIFT 9
+#define SDCLK_DELAY_MASK 0x1f
+
+#define SD_CFG_FIFO_PARAM 0x100
+#define SDCFG_GEN_PAD_CLK_ON (1<<6)
+#define SDCFG_GEN_PAD_CLK_CNT_MASK 0xFF
+#define SDCFG_GEN_PAD_CLK_CNT_SHIFT 24
+
+#define SD_SPI_MODE 0x108
+#define SD_CE_ATA_1 0x10C
+
+#define SD_CE_ATA_2 0x10E
+#define SDCE_MISC_INT (1<<2)
+#define SDCE_MISC_INT_EN (1<<1)
+
+struct sdhci_pxa {
+ struct clk *clk_core;
+ struct clk *clk_io;
+ u8 power_mode;
+ void __iomem *sdio3_conf_reg;
+};
+
+/*
+ * These registers are relative to the second register region, for the
+ * MBus bridge.
+ */
+#define SDHCI_WINDOW_CTRL(i) (0x80 + ((i) << 3))
+#define SDHCI_WINDOW_BASE(i) (0x84 + ((i) << 3))
+#define SDHCI_MAX_WIN_NUM 8
+
+/*
+ * Fields below belong to SDIO3 Configuration Register (third register
+ * region for the Armada 38x flavor)
+ */
+
+#define SDIO3_CONF_CLK_INV BIT(0)
+#define SDIO3_CONF_SD_FB_CLK BIT(2)
+
+static int mv_conf_mbus_windows(struct platform_device *pdev,
+ const struct mbus_dram_target_info *dram)
+{
+ int i;
+ void __iomem *regs;
+ struct resource *res;
+
+ if (!dram) {
+ dev_err(&pdev->dev, "no mbus dram info\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot get mbus registers\n");
+ return -EINVAL;
+ }
+
+ regs = ioremap(res->start, resource_size(res));
+ if (!regs) {
+ dev_err(&pdev->dev, "cannot map mbus registers\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < SDHCI_MAX_WIN_NUM; i++) {
+ writel(0, regs + SDHCI_WINDOW_CTRL(i));
+ writel(0, regs + SDHCI_WINDOW_BASE(i));
+ }
+
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+
+ /* Write size, attributes and target id to control register */
+ writel(((cs->size - 1) & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ (dram->mbus_dram_target_id << 4) | 1,
+ regs + SDHCI_WINDOW_CTRL(i));
+ /* Write base address to base register */
+ writel(cs->base, regs + SDHCI_WINDOW_BASE(i));
+ }
+
+ iounmap(regs);
+
+ return 0;
+}
+
+static int armada_38x_quirks(struct platform_device *pdev,
+ struct sdhci_host *host)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_pxa *pxa = pltfm_host->priv;
+ struct resource *res;
+
+ host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "conf-sdio3");
+ if (res) {
+ pxa->sdio3_conf_reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pxa->sdio3_conf_reg))
+ return PTR_ERR(pxa->sdio3_conf_reg);
+ } else {
+ /*
+ * According to erratum 'FE-2946959' both SDR50 and DDR50
+ * modes require specific clock adjustments in SDIO3
+ * Configuration register, if the adjustment is not done,
+ * remove them from the capabilities.
+ */
+ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50);
+
+ dev_warn(&pdev->dev, "conf-sdio3 register not found: disabling SDR50 and DDR50 modes.\nConsider updating your dtb\n");
+ }
+
+ /*
+ * According to erratum 'ERR-7878951' Armada 38x SDHCI
+ * controller has different capabilities than the ones shown
+ * in its registers
+ */
+ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ if (of_property_read_bool(np, "no-1-8-v")) {
+ host->caps &= ~SDHCI_CAN_VDD_180;
+ host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
+ } else {
+ host->caps &= ~SDHCI_CAN_VDD_330;
+ }
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_USE_SDR50_TUNING);
+
+ return 0;
+}
+
+static void pxav3_reset(struct sdhci_host *host, u8 mask)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+
+ sdhci_reset(host, mask);
+
+ if (mask == SDHCI_RESET_ALL) {
+ /*
+ * tune timing of read data/command when crc error happen
+ * no performance impact
+ */
+ if (pdata && 0 != pdata->clk_delay_cycles) {
+ u16 tmp;
+
+ tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
+ tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK)
+ << SDCLK_DELAY_SHIFT;
+ tmp |= SDCLK_SEL;
+ writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
+ }
+ }
+}
+
+#define MAX_WAIT_COUNT 5
+static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_pxa *pxa = pltfm_host->priv;
+ u16 tmp;
+ int count;
+
+ if (pxa->power_mode == MMC_POWER_UP
+ && power_mode == MMC_POWER_ON) {
+
+ dev_dbg(mmc_dev(host->mmc),
+ "%s: slot->power_mode = %d,"
+ "ios->power_mode = %d\n",
+ __func__,
+ pxa->power_mode,
+ power_mode);
+
+ /* set we want notice of when 74 clocks are sent */
+ tmp = readw(host->ioaddr + SD_CE_ATA_2);
+ tmp |= SDCE_MISC_INT_EN;
+ writew(tmp, host->ioaddr + SD_CE_ATA_2);
+
+ /* start sending the 74 clocks */
+ tmp = readw(host->ioaddr + SD_CFG_FIFO_PARAM);
+ tmp |= SDCFG_GEN_PAD_CLK_ON;
+ writew(tmp, host->ioaddr + SD_CFG_FIFO_PARAM);
+
+ /* slowest speed is about 100KHz or 10usec per clock */
+ udelay(740);
+ count = 0;
+
+ while (count++ < MAX_WAIT_COUNT) {
+ if ((readw(host->ioaddr + SD_CE_ATA_2)
+ & SDCE_MISC_INT) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (count == MAX_WAIT_COUNT)
+ dev_warn(mmc_dev(host->mmc), "74 clock interrupt not cleared\n");
+
+ /* clear the interrupt bit if posted */
+ tmp = readw(host->ioaddr + SD_CE_ATA_2);
+ tmp |= SDCE_MISC_INT;
+ writew(tmp, host->ioaddr + SD_CE_ATA_2);
+ }
+ pxa->power_mode = power_mode;
+}
+
+static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_pxa *pxa = pltfm_host->priv;
+ u16 ctrl_2;
+
+ /*
+ * Set V18_EN -- UHS modes do not work without this.
+ * does not change signaling voltage
+ */
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ switch (uhs) {
+ case MMC_TIMING_UHS_SDR12:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ break;
+ case MMC_TIMING_UHS_SDR25:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180;
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ case MMC_TIMING_UHS_DDR50:
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180;
+ break;
+ }
+
+ /*
+ * Update SDIO3 Configuration register according to erratum
+ * FE-2946959
+ */
+ if (pxa->sdio3_conf_reg) {
+ u8 reg_val = readb(pxa->sdio3_conf_reg);
+
+ if (uhs == MMC_TIMING_UHS_SDR50 ||
+ uhs == MMC_TIMING_UHS_DDR50) {
+ reg_val &= ~SDIO3_CONF_CLK_INV;
+ reg_val |= SDIO3_CONF_SD_FB_CLK;
+ } else {
+ reg_val |= SDIO3_CONF_CLK_INV;
+ reg_val &= ~SDIO3_CONF_SD_FB_CLK;
+ }
+ writeb(reg_val, pxa->sdio3_conf_reg);
+ }
+
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+ dev_dbg(mmc_dev(host->mmc),
+ "%s uhs = %d, ctrl_2 = %04X\n",
+ __func__, uhs, ctrl_2);
+}
+
+static const struct sdhci_ops pxav3_sdhci_ops = {
+ .set_clock = sdhci_set_clock,
+ .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = pxav3_reset,
+ .set_uhs_signaling = pxav3_set_uhs_signaling,
+};
+
+static struct sdhci_pltfm_data sdhci_pxav3_pdata = {
+ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
+ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
+ | SDHCI_QUIRK_32BIT_ADMA_SIZE
+ | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .ops = &pxav3_sdhci_ops,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id sdhci_pxav3_of_match[] = {
+ {
+ .compatible = "mrvl,pxav3-mmc",
+ },
+ {
+ .compatible = "marvell,armada-380-sdhci",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdhci_pxav3_of_match);
+
+static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
+{
+ struct sdhci_pxa_platdata *pdata;
+ struct device_node *np = dev->of_node;
+ u32 clk_delay_cycles;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ if (!of_property_read_u32(np, "mrvl,clk-delay-cycles",
+ &clk_delay_cycles))
+ pdata->clk_delay_cycles = clk_delay_cycles;
+
+ return pdata;
+}
+#else
+static inline struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
+static int sdhci_pxav3_probe(struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct sdhci_host *host = NULL;
+ struct sdhci_pxa *pxa = NULL;
+ const struct of_device_id *match;
+ int ret;
+
+ pxa = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_pxa), GFP_KERNEL);
+ if (!pxa)
+ return -ENOMEM;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_pxav3_pdata, 0);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = pxa;
+
+ pxa->clk_io = devm_clk_get(dev, "io");
+ if (IS_ERR(pxa->clk_io))
+ pxa->clk_io = devm_clk_get(dev, NULL);
+ if (IS_ERR(pxa->clk_io)) {
+ dev_err(dev, "failed to get io clock\n");
+ ret = PTR_ERR(pxa->clk_io);
+ goto err_clk_get;
+ }
+ pltfm_host->clk = pxa->clk_io;
+ clk_prepare_enable(pxa->clk_io);
+
+ pxa->clk_core = devm_clk_get(dev, "core");
+ if (!IS_ERR(pxa->clk_core))
+ clk_prepare_enable(pxa->clk_core);
+
+ /* enable 1/8V DDR capable */
+ host->mmc->caps |= MMC_CAP_1_8V_DDR;
+
+ if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
+ ret = armada_38x_quirks(pdev, host);
+ if (ret < 0)
+ goto err_clk_get;
+ ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
+ if (ret < 0)
+ goto err_mbus_win;
+ }
+
+ match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev);
+ if (match) {
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err_of_parse;
+ sdhci_get_of_property(pdev);
+ pdata = pxav3_get_mmc_pdata(dev);
+ } else if (pdata) {
+ /* on-chip device */
+ if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
+ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+
+ /* If slot design supports 8 bit data, indicate this to MMC. */
+ if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+
+ if (pdata->quirks)
+ host->quirks |= pdata->quirks;
+ if (pdata->quirks2)
+ host->quirks2 |= pdata->quirks2;
+ if (pdata->host_caps)
+ host->mmc->caps |= pdata->host_caps;
+ if (pdata->host_caps2)
+ host->mmc->caps2 |= pdata->host_caps2;
+ if (pdata->pm_caps)
+ host->mmc->pm_caps |= pdata->pm_caps;
+
+ if (gpio_is_valid(pdata->ext_cd_gpio)) {
+ ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio,
+ 0);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to allocate card detect gpio\n");
+ goto err_cd_req;
+ }
+ }
+ }
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_suspend_ignore_children(&pdev->dev, 1);
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add host\n");
+ goto err_add_host;
+ }
+
+ platform_set_drvdata(pdev, host);
+
+ if (host->mmc->pm_caps & MMC_PM_KEEP_POWER) {
+ device_init_wakeup(&pdev->dev, 1);
+ host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ;
+ } else {
+ device_init_wakeup(&pdev->dev, 0);
+ }
+
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+
+err_add_host:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+err_of_parse:
+err_cd_req:
+err_mbus_win:
+ clk_disable_unprepare(pxa->clk_io);
+ clk_disable_unprepare(pxa->clk_core);
+err_clk_get:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int sdhci_pxav3_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_pxa *pxa = pltfm_host->priv;
+
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ sdhci_remove_host(host, 1);
+
+ clk_disable_unprepare(pxa->clk_io);
+ clk_disable_unprepare(pxa->clk_core);
+
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_pxav3_suspend(struct device *dev)
+{
+ int ret;
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ ret = sdhci_suspend_host(host);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int sdhci_pxav3_resume(struct device *dev)
+{
+ int ret;
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ ret = sdhci_resume_host(host);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int sdhci_pxav3_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_pxa *pxa = pltfm_host->priv;
+ int ret;
+
+ ret = sdhci_runtime_suspend_host(host);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(pxa->clk_io);
+ if (!IS_ERR(pxa->clk_core))
+ clk_disable_unprepare(pxa->clk_core);
+
+ return 0;
+}
+
+static int sdhci_pxav3_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_pxa *pxa = pltfm_host->priv;
+
+ clk_prepare_enable(pxa->clk_io);
+ if (!IS_ERR(pxa->clk_core))
+ clk_prepare_enable(pxa->clk_core);
+
+ return sdhci_runtime_resume_host(host);
+}
+#endif
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops sdhci_pxav3_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_pxav3_suspend, sdhci_pxav3_resume)
+ SET_RUNTIME_PM_OPS(sdhci_pxav3_runtime_suspend,
+ sdhci_pxav3_runtime_resume, NULL)
+};
+
+#define SDHCI_PXAV3_PMOPS (&sdhci_pxav3_pmops)
+
+#else
+#define SDHCI_PXAV3_PMOPS NULL
+#endif
+
+static struct platform_driver sdhci_pxav3_driver = {
+ .driver = {
+ .name = "sdhci-pxav3",
+#ifdef CONFIG_OF
+ .of_match_table = sdhci_pxav3_of_match,
+#endif
+ .pm = SDHCI_PXAV3_PMOPS,
+ },
+ .probe = sdhci_pxav3_probe,
+ .remove = sdhci_pxav3_remove,
+};
+
+module_platform_driver(sdhci_pxav3_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for pxav3");
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_LICENSE("GPL v2");
+
diff --git a/kernel/drivers/mmc/host/sdhci-s3c-regs.h b/kernel/drivers/mmc/host/sdhci-s3c-regs.h
new file mode 100644
index 000000000..e34049ad4
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-s3c-regs.h
@@ -0,0 +1,87 @@
+/* linux/arch/arm/plat-s3c/include/plat/regs-sdhci.h
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C Platform - SDHCI (HSMMC) register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __PLAT_S3C_SDHCI_REGS_H
+#define __PLAT_S3C_SDHCI_REGS_H __FILE__
+
+#define S3C_SDHCI_CONTROL2 (0x80)
+#define S3C_SDHCI_CONTROL3 (0x84)
+#define S3C64XX_SDHCI_CONTROL4 (0x8C)
+
+#define S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR (1 << 31)
+#define S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK (1 << 30)
+#define S3C_SDHCI_CTRL2_CDINVRXD3 (1 << 29)
+#define S3C_SDHCI_CTRL2_SLCARDOUT (1 << 28)
+
+#define S3C_SDHCI_CTRL2_FLTCLKSEL_MASK (0xf << 24)
+#define S3C_SDHCI_CTRL2_FLTCLKSEL_SHIFT (24)
+#define S3C_SDHCI_CTRL2_FLTCLKSEL(_x) ((_x) << 24)
+
+#define S3C_SDHCI_CTRL2_LVLDAT_MASK (0xff << 16)
+#define S3C_SDHCI_CTRL2_LVLDAT_SHIFT (16)
+#define S3C_SDHCI_CTRL2_LVLDAT(_x) ((_x) << 16)
+
+#define S3C_SDHCI_CTRL2_ENFBCLKTX (1 << 15)
+#define S3C_SDHCI_CTRL2_ENFBCLKRX (1 << 14)
+#define S3C_SDHCI_CTRL2_SDCDSEL (1 << 13)
+#define S3C_SDHCI_CTRL2_SDSIGPC (1 << 12)
+#define S3C_SDHCI_CTRL2_ENBUSYCHKTXSTART (1 << 11)
+
+#define S3C_SDHCI_CTRL2_DFCNT_MASK (0x3 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_SHIFT (9)
+#define S3C_SDHCI_CTRL2_DFCNT_NONE (0x0 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_4SDCLK (0x1 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_16SDCLK (0x2 << 9)
+#define S3C_SDHCI_CTRL2_DFCNT_64SDCLK (0x3 << 9)
+
+#define S3C_SDHCI_CTRL2_ENCLKOUTHOLD (1 << 8)
+#define S3C_SDHCI_CTRL2_RWAITMODE (1 << 7)
+#define S3C_SDHCI_CTRL2_DISBUFRD (1 << 6)
+#define S3C_SDHCI_CTRL2_SELBASECLK_MASK (0x3 << 4)
+#define S3C_SDHCI_CTRL2_SELBASECLK_SHIFT (4)
+#define S3C_SDHCI_CTRL2_PWRSYNC (1 << 3)
+#define S3C_SDHCI_CTRL2_ENCLKOUTMSKCON (1 << 1)
+#define S3C_SDHCI_CTRL2_HWINITFIN (1 << 0)
+
+#define S3C_SDHCI_CTRL3_FCSEL3 (1 << 31)
+#define S3C_SDHCI_CTRL3_FCSEL2 (1 << 23)
+#define S3C_SDHCI_CTRL3_FCSEL1 (1 << 15)
+#define S3C_SDHCI_CTRL3_FCSEL0 (1 << 7)
+
+#define S3C_SDHCI_CTRL3_FIA3_MASK (0x7f << 24)
+#define S3C_SDHCI_CTRL3_FIA3_SHIFT (24)
+#define S3C_SDHCI_CTRL3_FIA3(_x) ((_x) << 24)
+
+#define S3C_SDHCI_CTRL3_FIA2_MASK (0x7f << 16)
+#define S3C_SDHCI_CTRL3_FIA2_SHIFT (16)
+#define S3C_SDHCI_CTRL3_FIA2(_x) ((_x) << 16)
+
+#define S3C_SDHCI_CTRL3_FIA1_MASK (0x7f << 8)
+#define S3C_SDHCI_CTRL3_FIA1_SHIFT (8)
+#define S3C_SDHCI_CTRL3_FIA1(_x) ((_x) << 8)
+
+#define S3C_SDHCI_CTRL3_FIA0_MASK (0x7f << 0)
+#define S3C_SDHCI_CTRL3_FIA0_SHIFT (0)
+#define S3C_SDHCI_CTRL3_FIA0(_x) ((_x) << 0)
+
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_MASK (0x3 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_SHIFT (16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_2mA (0x0 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_4mA (0x1 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_7mA (0x2 << 16)
+#define S3C64XX_SDHCI_CONTROL4_DRIVE_9mA (0x3 << 16)
+
+#define S3C64XX_SDHCI_CONTROL4_BUSY (1)
+
+#endif /* __PLAT_S3C_SDHCI_REGS_H */
diff --git a/kernel/drivers/mmc/host/sdhci-s3c.c b/kernel/drivers/mmc/host/sdhci-s3c.c
new file mode 100644
index 000000000..c6d2dd731
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-s3c.c
@@ -0,0 +1,777 @@
+/* linux/drivers/mmc/host/sdhci-s3c.c
+ *
+ * Copyright 2008 Openmoko Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * SDHCI (HSMMC) support for Samsung SoC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/mmc-sdhci-s3c.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mmc/host.h>
+
+#include "sdhci-s3c-regs.h"
+#include "sdhci.h"
+
+#define MAX_BUS_CLK (4)
+
+/**
+ * struct sdhci_s3c - S3C SDHCI instance
+ * @host: The SDHCI host created
+ * @pdev: The platform device we where created from.
+ * @ioarea: The resource created when we claimed the IO area.
+ * @pdata: The platform data for this controller.
+ * @cur_clk: The index of the current bus clock.
+ * @clk_io: The clock for the internal bus interface.
+ * @clk_bus: The clocks that are available for the SD/MMC bus clock.
+ */
+struct sdhci_s3c {
+ struct sdhci_host *host;
+ struct platform_device *pdev;
+ struct resource *ioarea;
+ struct s3c_sdhci_platdata *pdata;
+ int cur_clk;
+ int ext_cd_irq;
+ int ext_cd_gpio;
+
+ struct clk *clk_io;
+ struct clk *clk_bus[MAX_BUS_CLK];
+ unsigned long clk_rates[MAX_BUS_CLK];
+
+ bool no_divider;
+};
+
+/**
+ * struct sdhci_s3c_driver_data - S3C SDHCI platform specific driver data
+ * @sdhci_quirks: sdhci host specific quirks.
+ *
+ * Specifies platform specific configuration of sdhci controller.
+ * Note: A structure for driver specific platform data is used for future
+ * expansion of its usage.
+ */
+struct sdhci_s3c_drv_data {
+ unsigned int sdhci_quirks;
+ bool no_divider;
+};
+
+static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
+{
+ return sdhci_priv(host);
+}
+
+/**
+ * sdhci_s3c_get_max_clk - callback to get maximum clock frequency.
+ * @host: The SDHCI host instance.
+ *
+ * Callback to return the maximum clock rate acheivable by the controller.
+*/
+static unsigned int sdhci_s3c_get_max_clk(struct sdhci_host *host)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ unsigned long rate, max = 0;
+ int src;
+
+ for (src = 0; src < MAX_BUS_CLK; src++) {
+ rate = ourhost->clk_rates[src];
+ if (rate > max)
+ max = rate;
+ }
+
+ return max;
+}
+
+/**
+ * sdhci_s3c_consider_clock - consider one the bus clocks for current setting
+ * @ourhost: Our SDHCI instance.
+ * @src: The source clock index.
+ * @wanted: The clock frequency wanted.
+ */
+static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
+ unsigned int src,
+ unsigned int wanted)
+{
+ unsigned long rate;
+ struct clk *clksrc = ourhost->clk_bus[src];
+ int shift;
+
+ if (IS_ERR(clksrc))
+ return UINT_MAX;
+
+ /*
+ * If controller uses a non-standard clock division, find the best clock
+ * speed possible with selected clock source and skip the division.
+ */
+ if (ourhost->no_divider) {
+ rate = clk_round_rate(clksrc, wanted);
+ return wanted - rate;
+ }
+
+ rate = ourhost->clk_rates[src];
+
+ for (shift = 0; shift <= 8; ++shift) {
+ if ((rate >> shift) <= wanted)
+ break;
+ }
+
+ if (shift > 8) {
+ dev_dbg(&ourhost->pdev->dev,
+ "clk %d: rate %ld, min rate %lu > wanted %u\n",
+ src, rate, rate / 256, wanted);
+ return UINT_MAX;
+ }
+
+ dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n",
+ src, rate, wanted, rate >> shift);
+
+ return wanted - (rate >> shift);
+}
+
+/**
+ * sdhci_s3c_set_clock - callback on clock change
+ * @host: The SDHCI host being changed
+ * @clock: The clock rate being requested.
+ *
+ * When the card's clock is going to be changed, look at the new frequency
+ * and find the best clock source to go with it.
+*/
+static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ unsigned int best = UINT_MAX;
+ unsigned int delta;
+ int best_src = 0;
+ int src;
+ u32 ctrl;
+
+ host->mmc->actual_clock = 0;
+
+ /* don't bother if the clock is going off. */
+ if (clock == 0) {
+ sdhci_set_clock(host, clock);
+ return;
+ }
+
+ for (src = 0; src < MAX_BUS_CLK; src++) {
+ delta = sdhci_s3c_consider_clock(ourhost, src, clock);
+ if (delta < best) {
+ best = delta;
+ best_src = src;
+ }
+ }
+
+ dev_dbg(&ourhost->pdev->dev,
+ "selected source %d, clock %d, delta %d\n",
+ best_src, clock, best);
+
+ /* select the new clock source */
+ if (ourhost->cur_clk != best_src) {
+ struct clk *clk = ourhost->clk_bus[best_src];
+
+ clk_prepare_enable(clk);
+ if (ourhost->cur_clk >= 0)
+ clk_disable_unprepare(
+ ourhost->clk_bus[ourhost->cur_clk]);
+
+ ourhost->cur_clk = best_src;
+ host->max_clk = ourhost->clk_rates[best_src];
+ }
+
+ /* turn clock off to card before changing clock source */
+ writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
+
+ ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
+ ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
+ ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
+ writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2);
+
+ /* reprogram default hardware configuration */
+ writel(S3C64XX_SDHCI_CONTROL4_DRIVE_9mA,
+ host->ioaddr + S3C64XX_SDHCI_CONTROL4);
+
+ ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
+ ctrl |= (S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR |
+ S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK |
+ S3C_SDHCI_CTRL2_ENFBCLKRX |
+ S3C_SDHCI_CTRL2_DFCNT_NONE |
+ S3C_SDHCI_CTRL2_ENCLKOUTHOLD);
+ writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2);
+
+ /* reconfigure the controller for new clock rate */
+ ctrl = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
+ if (clock < 25 * 1000000)
+ ctrl |= (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2);
+ writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3);
+
+ sdhci_set_clock(host, clock);
+}
+
+/**
+ * sdhci_s3c_get_min_clock - callback to get minimal supported clock value
+ * @host: The SDHCI host being queried
+ *
+ * To init mmc host properly a minimal clock value is needed. For high system
+ * bus clock's values the standard formula gives values out of allowed range.
+ * The clock still can be set to lower values, if clock source other then
+ * system bus is selected.
+*/
+static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ unsigned long rate, min = ULONG_MAX;
+ int src;
+
+ for (src = 0; src < MAX_BUS_CLK; src++) {
+ rate = ourhost->clk_rates[src] / 256;
+ if (!rate)
+ continue;
+ if (rate < min)
+ min = rate;
+ }
+
+ return min;
+}
+
+/* sdhci_cmu_get_max_clk - callback to get maximum clock frequency.*/
+static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ unsigned long rate, max = 0;
+ int src;
+
+ for (src = 0; src < MAX_BUS_CLK; src++) {
+ struct clk *clk;
+
+ clk = ourhost->clk_bus[src];
+ if (IS_ERR(clk))
+ continue;
+
+ rate = clk_round_rate(clk, ULONG_MAX);
+ if (rate > max)
+ max = rate;
+ }
+
+ return max;
+}
+
+/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */
+static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ unsigned long rate, min = ULONG_MAX;
+ int src;
+
+ for (src = 0; src < MAX_BUS_CLK; src++) {
+ struct clk *clk;
+
+ clk = ourhost->clk_bus[src];
+ if (IS_ERR(clk))
+ continue;
+
+ rate = clk_round_rate(clk, 0);
+ if (rate < min)
+ min = rate;
+ }
+
+ return min;
+}
+
+/* sdhci_cmu_set_clock - callback on clock change.*/
+static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ struct device *dev = &ourhost->pdev->dev;
+ unsigned long timeout;
+ u16 clk = 0;
+ int ret;
+
+ host->mmc->actual_clock = 0;
+
+ /* If the clock is going off, set to 0 at clock control register */
+ if (clock == 0) {
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ return;
+ }
+
+ sdhci_s3c_set_clock(host, clock);
+
+ /* Reset SD Clock Enable */
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ spin_unlock_irq(&host->lock);
+ ret = clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
+ spin_lock_irq(&host->lock);
+ if (ret != 0) {
+ dev_err(dev, "%s: failed to set clock rate %uHz\n",
+ mmc_hostname(host->mmc), clock);
+ return;
+ }
+
+ clk = SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ /* Wait max 20 ms */
+ timeout = 20;
+ while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
+ & SDHCI_CLOCK_INT_STABLE)) {
+ if (timeout == 0) {
+ dev_err(dev, "%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+}
+
+/**
+ * sdhci_s3c_set_bus_width - support 8bit buswidth
+ * @host: The SDHCI host being queried
+ * @width: MMC_BUS_WIDTH_ macro for the bus width being requested
+ *
+ * We have 8-bit width support but is not a v3 controller.
+ * So we add platform_bus_width() and support 8bit width.
+ */
+static void sdhci_s3c_set_bus_width(struct sdhci_host *host, int width)
+{
+ u8 ctrl;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+ switch (width) {
+ case MMC_BUS_WIDTH_8:
+ ctrl |= SDHCI_CTRL_8BITBUS;
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ break;
+ case MMC_BUS_WIDTH_4:
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ break;
+ default:
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ break;
+ }
+
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+
+static struct sdhci_ops sdhci_s3c_ops = {
+ .get_max_clock = sdhci_s3c_get_max_clk,
+ .set_clock = sdhci_s3c_set_clock,
+ .get_min_clock = sdhci_s3c_get_min_clock,
+ .set_bus_width = sdhci_s3c_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+#ifdef CONFIG_OF
+static int sdhci_s3c_parse_dt(struct device *dev,
+ struct sdhci_host *host, struct s3c_sdhci_platdata *pdata)
+{
+ struct device_node *node = dev->of_node;
+ u32 max_width;
+
+ /* if the bus-width property is not specified, assume width as 1 */
+ if (of_property_read_u32(node, "bus-width", &max_width))
+ max_width = 1;
+ pdata->max_width = max_width;
+
+ /* get the card detection method */
+ if (of_get_property(node, "broken-cd", NULL)) {
+ pdata->cd_type = S3C_SDHCI_CD_NONE;
+ return 0;
+ }
+
+ if (of_get_property(node, "non-removable", NULL)) {
+ pdata->cd_type = S3C_SDHCI_CD_PERMANENT;
+ return 0;
+ }
+
+ if (of_get_named_gpio(node, "cd-gpios", 0))
+ return 0;
+
+ /* assuming internal card detect that will be configured by pinctrl */
+ pdata->cd_type = S3C_SDHCI_CD_INTERNAL;
+ return 0;
+}
+#else
+static int sdhci_s3c_parse_dt(struct device *dev,
+ struct sdhci_host *host, struct s3c_sdhci_platdata *pdata)
+{
+ return -EINVAL;
+}
+#endif
+
+static const struct of_device_id sdhci_s3c_dt_match[];
+
+static inline struct sdhci_s3c_drv_data *sdhci_s3c_get_driver_data(
+ struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(sdhci_s3c_dt_match, pdev->dev.of_node);
+ return (struct sdhci_s3c_drv_data *)match->data;
+ }
+#endif
+ return (struct sdhci_s3c_drv_data *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
+static int sdhci_s3c_probe(struct platform_device *pdev)
+{
+ struct s3c_sdhci_platdata *pdata;
+ struct sdhci_s3c_drv_data *drv_data;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host;
+ struct sdhci_s3c *sc;
+ struct resource *res;
+ int ret, irq, ptr, clks;
+
+ if (!pdev->dev.platform_data && !pdev->dev.of_node) {
+ dev_err(dev, "no device data specified\n");
+ return -ENOENT;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no irq specified\n");
+ return irq;
+ }
+
+ host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));
+ if (IS_ERR(host)) {
+ dev_err(dev, "sdhci_alloc_host() failed\n");
+ return PTR_ERR(host);
+ }
+ sc = sdhci_priv(host);
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto err_pdata_io_clk;
+ }
+
+ if (pdev->dev.of_node) {
+ ret = sdhci_s3c_parse_dt(&pdev->dev, host, pdata);
+ if (ret)
+ goto err_pdata_io_clk;
+ } else {
+ memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
+ sc->ext_cd_gpio = -1; /* invalid gpio number */
+ }
+
+ drv_data = sdhci_s3c_get_driver_data(pdev);
+
+ sc->host = host;
+ sc->pdev = pdev;
+ sc->pdata = pdata;
+ sc->cur_clk = -1;
+
+ platform_set_drvdata(pdev, host);
+
+ sc->clk_io = devm_clk_get(dev, "hsmmc");
+ if (IS_ERR(sc->clk_io)) {
+ dev_err(dev, "failed to get io clock\n");
+ ret = PTR_ERR(sc->clk_io);
+ goto err_pdata_io_clk;
+ }
+
+ /* enable the local io clock and keep it running for the moment. */
+ clk_prepare_enable(sc->clk_io);
+
+ for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
+ char name[14];
+
+ snprintf(name, 14, "mmc_busclk.%d", ptr);
+ sc->clk_bus[ptr] = devm_clk_get(dev, name);
+ if (IS_ERR(sc->clk_bus[ptr]))
+ continue;
+
+ clks++;
+ sc->clk_rates[ptr] = clk_get_rate(sc->clk_bus[ptr]);
+
+ dev_info(dev, "clock source %d: %s (%ld Hz)\n",
+ ptr, name, sc->clk_rates[ptr]);
+ }
+
+ if (clks == 0) {
+ dev_err(dev, "failed to find any bus clocks\n");
+ ret = -ENOENT;
+ goto err_no_busclks;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->ioaddr)) {
+ ret = PTR_ERR(host->ioaddr);
+ goto err_req_regs;
+ }
+
+ /* Ensure we have minimal gpio selected CMD/CLK/Detect */
+ if (pdata->cfg_gpio)
+ pdata->cfg_gpio(pdev, pdata->max_width);
+
+ host->hw_name = "samsung-hsmmc";
+ host->ops = &sdhci_s3c_ops;
+ host->quirks = 0;
+ host->quirks2 = 0;
+ host->irq = irq;
+
+ /* Setup quirks for the controller */
+ host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
+ host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
+ if (drv_data) {
+ host->quirks |= drv_data->sdhci_quirks;
+ sc->no_divider = drv_data->no_divider;
+ }
+
+#ifndef CONFIG_MMC_SDHCI_S3C_DMA
+
+ /* we currently see overruns on errors, so disable the SDMA
+ * support as well. */
+ host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
+
+#endif /* CONFIG_MMC_SDHCI_S3C_DMA */
+
+ /* It seems we do not get an DATA transfer complete on non-busy
+ * transfers, not sure if this is a problem with this specific
+ * SDHCI block, or a missing configuration that needs to be set. */
+ host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
+
+ /* This host supports the Auto CMD12 */
+ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+
+ /* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */
+ host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC;
+
+ if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
+ pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
+ if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
+ host->mmc->caps = MMC_CAP_NONREMOVABLE;
+
+ switch (pdata->max_width) {
+ case 8:
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+ case 4:
+ host->mmc->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ }
+
+ if (pdata->pm_caps)
+ host->mmc->pm_caps |= pdata->pm_caps;
+
+ host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_32BIT_DMA_SIZE);
+
+ /* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
+ host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
+
+ /*
+ * If controller does not have internal clock divider,
+ * we can use overriding functions instead of default.
+ */
+ if (sc->no_divider) {
+ sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
+ sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
+ sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
+ }
+
+ /* It supports additional host capabilities if needed */
+ if (pdata->host_caps)
+ host->mmc->caps |= pdata->host_caps;
+
+ if (pdata->host_caps2)
+ host->mmc->caps2 |= pdata->host_caps2;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_suspend_ignore_children(&pdev->dev, 1);
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err_req_regs;
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(dev, "sdhci_add_host() failed\n");
+ goto err_req_regs;
+ }
+
+#ifdef CONFIG_PM
+ if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
+ clk_disable_unprepare(sc->clk_io);
+#endif
+ return 0;
+
+ err_req_regs:
+ pm_runtime_disable(&pdev->dev);
+
+ err_no_busclks:
+ clk_disable_unprepare(sc->clk_io);
+
+ err_pdata_io_clk:
+ sdhci_free_host(host);
+
+ return ret;
+}
+
+static int sdhci_s3c_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_s3c *sc = sdhci_priv(host);
+
+ if (sc->ext_cd_irq)
+ free_irq(sc->ext_cd_irq, sc);
+
+#ifdef CONFIG_PM
+ if (sc->pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
+ clk_prepare_enable(sc->clk_io);
+#endif
+ sdhci_remove_host(host, 1);
+
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ clk_disable_unprepare(sc->clk_io);
+
+ sdhci_free_host(host);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_s3c_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ return sdhci_suspend_host(host);
+}
+
+static int sdhci_s3c_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ return sdhci_resume_host(host);
+}
+#endif
+
+#ifdef CONFIG_PM
+static int sdhci_s3c_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ struct clk *busclk = ourhost->clk_io;
+ int ret;
+
+ ret = sdhci_runtime_suspend_host(host);
+
+ if (ourhost->cur_clk >= 0)
+ clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]);
+ clk_disable_unprepare(busclk);
+ return ret;
+}
+
+static int sdhci_s3c_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ struct clk *busclk = ourhost->clk_io;
+ int ret;
+
+ clk_prepare_enable(busclk);
+ if (ourhost->cur_clk >= 0)
+ clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]);
+ ret = sdhci_runtime_resume_host(host);
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops sdhci_s3c_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_s3c_suspend, sdhci_s3c_resume)
+ SET_RUNTIME_PM_OPS(sdhci_s3c_runtime_suspend, sdhci_s3c_runtime_resume,
+ NULL)
+};
+
+#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
+
+#else
+#define SDHCI_S3C_PMOPS NULL
+#endif
+
+#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212)
+static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
+ .no_divider = true,
+};
+#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data)
+#else
+#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)NULL)
+#endif
+
+static struct platform_device_id sdhci_s3c_driver_ids[] = {
+ {
+ .name = "s3c-sdhci",
+ .driver_data = (kernel_ulong_t)NULL,
+ }, {
+ .name = "exynos4-sdhci",
+ .driver_data = EXYNOS4_SDHCI_DRV_DATA,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id sdhci_s3c_dt_match[] = {
+ { .compatible = "samsung,s3c6410-sdhci", },
+ { .compatible = "samsung,exynos4210-sdhci",
+ .data = (void *)EXYNOS4_SDHCI_DRV_DATA },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match);
+#endif
+
+static struct platform_driver sdhci_s3c_driver = {
+ .probe = sdhci_s3c_probe,
+ .remove = sdhci_s3c_remove,
+ .id_table = sdhci_s3c_driver_ids,
+ .driver = {
+ .name = "s3c-sdhci",
+ .of_match_table = of_match_ptr(sdhci_s3c_dt_match),
+ .pm = SDHCI_S3C_PMOPS,
+ },
+};
+
+module_platform_driver(sdhci_s3c_driver);
+
+MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue");
+MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:s3c-sdhci");
diff --git a/kernel/drivers/mmc/host/sdhci-sirf.c b/kernel/drivers/mmc/host/sdhci-sirf.c
new file mode 100644
index 000000000..32848eb7a
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-sirf.c
@@ -0,0 +1,252 @@
+/*
+ * SDHCI support for SiRF primaII and marco SoCs
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/mmc/slot-gpio.h>
+#include "sdhci-pltfm.h"
+
+#define SDHCI_CLK_DELAY_SETTING 0x4C
+#define SDHCI_SIRF_8BITBUS BIT(3)
+#define SIRF_TUNING_COUNT 128
+
+struct sdhci_sirf_priv {
+ int gpio_cd;
+};
+
+static void sdhci_sirf_set_bus_width(struct sdhci_host *host, int width)
+{
+ u8 ctrl;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl &= ~(SDHCI_CTRL_4BITBUS | SDHCI_SIRF_8BITBUS);
+
+ /*
+ * CSR atlas7 and prima2 SD host version is not 3.0
+ * 8bit-width enable bit of CSR SD hosts is 3,
+ * while stardard hosts use bit 5
+ */
+ if (width == MMC_BUS_WIDTH_8)
+ ctrl |= SDHCI_SIRF_8BITBUS;
+ else if (width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+
+static int sdhci_sirf_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+ int tuning_seq_cnt = 3;
+ u8 phase, tuned_phases[SIRF_TUNING_COUNT];
+ u8 tuned_phase_cnt = 0;
+ int rc = 0, longest_range = 0;
+ int start = -1, end = 0, tuning_value = -1, range = 0;
+ u16 clock_setting;
+ struct mmc_host *mmc = host->mmc;
+
+ clock_setting = sdhci_readw(host, SDHCI_CLK_DELAY_SETTING);
+ clock_setting &= ~0x3fff;
+
+retry:
+ phase = 0;
+ do {
+ sdhci_writel(host,
+ clock_setting | phase,
+ SDHCI_CLK_DELAY_SETTING);
+
+ if (!mmc_send_tuning(mmc)) {
+ /* Tuning is successful at this tuning point */
+ tuned_phases[tuned_phase_cnt++] = phase;
+ dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
+ mmc_hostname(mmc), phase);
+ if (start == -1)
+ start = phase;
+ end = phase;
+ range++;
+ if (phase == (SIRF_TUNING_COUNT - 1)
+ && range > longest_range)
+ tuning_value = (start + end) / 2;
+ } else {
+ dev_dbg(mmc_dev(mmc), "%s: Found bad phase = %d\n",
+ mmc_hostname(mmc), phase);
+ if (range > longest_range) {
+ tuning_value = (start + end) / 2;
+ longest_range = range;
+ }
+ start = -1;
+ end = range = 0;
+ }
+ } while (++phase < ARRAY_SIZE(tuned_phases));
+
+ if (tuned_phase_cnt && tuning_value > 0) {
+ /*
+ * Finally set the selected phase in delay
+ * line hw block.
+ */
+ phase = tuning_value;
+ sdhci_writel(host,
+ clock_setting | phase,
+ SDHCI_CLK_DELAY_SETTING);
+
+ dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
+ mmc_hostname(mmc), phase);
+ } else {
+ if (--tuning_seq_cnt)
+ goto retry;
+ /* Tuning failed */
+ dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
+ mmc_hostname(mmc));
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static struct sdhci_ops sdhci_sirf_ops = {
+ .platform_execute_tuning = sdhci_sirf_execute_tuning,
+ .set_clock = sdhci_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = sdhci_sirf_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static struct sdhci_pltfm_data sdhci_sirf_pdata = {
+ .ops = &sdhci_sirf_ops,
+ .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
+ SDHCI_QUIRK_DELAY_AFTER_POWER,
+};
+
+static int sdhci_sirf_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_sirf_priv *priv;
+ struct clk *clk;
+ int gpio_cd;
+ int ret;
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "unable to get clock");
+ return PTR_ERR(clk);
+ }
+
+ if (pdev->dev.of_node)
+ gpio_cd = of_get_named_gpio(pdev->dev.of_node, "cd-gpios", 0);
+ else
+ gpio_cd = -EINVAL;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_sirf_pdata, sizeof(struct sdhci_sirf_priv));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->clk = clk;
+ priv = sdhci_pltfm_priv(pltfm_host);
+ priv->gpio_cd = gpio_cd;
+
+ sdhci_get_of_property(pdev);
+
+ ret = clk_prepare_enable(pltfm_host->clk);
+ if (ret)
+ goto err_clk_prepare;
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_sdhci_add;
+
+ /*
+ * We must request the IRQ after sdhci_add_host(), as the tasklet only
+ * gets setup in sdhci_add_host() and we oops.
+ */
+ if (gpio_is_valid(priv->gpio_cd)) {
+ ret = mmc_gpio_request_cd(host->mmc, priv->gpio_cd, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "card detect irq request failed: %d\n",
+ ret);
+ goto err_request_cd;
+ }
+ mmc_gpiod_request_cd_irq(host->mmc);
+ }
+
+ return 0;
+
+err_request_cd:
+ sdhci_remove_host(host, 0);
+err_sdhci_add:
+ clk_disable_unprepare(pltfm_host->clk);
+err_clk_prepare:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_sirf_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int ret;
+
+ ret = sdhci_suspend_host(host);
+ if (ret)
+ return ret;
+
+ clk_disable(pltfm_host->clk);
+
+ return 0;
+}
+
+static int sdhci_sirf_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int ret;
+
+ ret = clk_enable(pltfm_host->clk);
+ if (ret) {
+ dev_dbg(dev, "Resume: Error enabling clock\n");
+ return ret;
+ }
+
+ return sdhci_resume_host(host);
+}
+
+static SIMPLE_DEV_PM_OPS(sdhci_sirf_pm_ops, sdhci_sirf_suspend, sdhci_sirf_resume);
+#endif
+
+static const struct of_device_id sdhci_sirf_of_match[] = {
+ { .compatible = "sirf,prima2-sdhc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdhci_sirf_of_match);
+
+static struct platform_driver sdhci_sirf_driver = {
+ .driver = {
+ .name = "sdhci-sirf",
+ .of_match_table = sdhci_sirf_of_match,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &sdhci_sirf_pm_ops,
+#endif
+ },
+ .probe = sdhci_sirf_probe,
+ .remove = sdhci_pltfm_unregister,
+};
+
+module_platform_driver(sdhci_sirf_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for SiRFprimaII/SiRFmarco");
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-spear.c b/kernel/drivers/mmc/host/sdhci-spear.c
new file mode 100644
index 000000000..df088343d
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-spear.c
@@ -0,0 +1,215 @@
+/*
+ * drivers/mmc/host/sdhci-spear.c
+ *
+ * Support of SDHCI platform devices for spear soc family
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * Inspired by sdhci-pltfm.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/io.h>
+#include "sdhci.h"
+
+struct spear_sdhci {
+ struct clk *clk;
+ int card_int_gpio;
+};
+
+/* sdhci ops */
+static const struct sdhci_ops sdhci_pltfm_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static void sdhci_probe_config_dt(struct device_node *np,
+ struct spear_sdhci *host)
+{
+ int cd_gpio;
+
+ cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
+ if (!gpio_is_valid(cd_gpio))
+ cd_gpio = -1;
+
+ host->card_int_gpio = cd_gpio;
+}
+
+static int sdhci_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct resource *iomem;
+ struct spear_sdhci *sdhci;
+ struct device *dev;
+ int ret;
+
+ dev = pdev->dev.parent ? pdev->dev.parent : &pdev->dev;
+ host = sdhci_alloc_host(dev, sizeof(*sdhci));
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
+ goto err;
+ }
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->ioaddr = devm_ioremap_resource(&pdev->dev, iomem);
+ if (IS_ERR(host->ioaddr)) {
+ ret = PTR_ERR(host->ioaddr);
+ dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret);
+ goto err_host;
+ }
+
+ host->hw_name = "sdhci";
+ host->ops = &sdhci_pltfm_ops;
+ host->irq = platform_get_irq(pdev, 0);
+ host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
+
+ sdhci = sdhci_priv(host);
+
+ /* clk enable */
+ sdhci->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sdhci->clk)) {
+ ret = PTR_ERR(sdhci->clk);
+ dev_dbg(&pdev->dev, "Error getting clock\n");
+ goto err_host;
+ }
+
+ ret = clk_prepare_enable(sdhci->clk);
+ if (ret) {
+ dev_dbg(&pdev->dev, "Error enabling clock\n");
+ goto err_host;
+ }
+
+ ret = clk_set_rate(sdhci->clk, 50000000);
+ if (ret)
+ dev_dbg(&pdev->dev, "Error setting desired clk, clk=%lu\n",
+ clk_get_rate(sdhci->clk));
+
+ sdhci_probe_config_dt(pdev->dev.of_node, sdhci);
+ /*
+ * It is optional to use GPIOs for sdhci card detection. If
+ * sdhci->card_int_gpio < 0, then use original sdhci lines otherwise
+ * GPIO lines. We use the built-in GPIO support for this.
+ */
+ if (sdhci->card_int_gpio >= 0) {
+ ret = mmc_gpio_request_cd(host->mmc, sdhci->card_int_gpio, 0);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev,
+ "failed to request card-detect gpio%d\n",
+ sdhci->card_int_gpio);
+ goto disable_clk;
+ }
+ }
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_dbg(&pdev->dev, "error adding host\n");
+ goto disable_clk;
+ }
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(sdhci->clk);
+err_host:
+ sdhci_free_host(host);
+err:
+ dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
+ return ret;
+}
+
+static int sdhci_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct spear_sdhci *sdhci = sdhci_priv(host);
+ int dead = 0;
+ u32 scratch;
+
+ scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
+ if (scratch == (u32)-1)
+ dead = 1;
+
+ sdhci_remove_host(host, dead);
+ clk_disable_unprepare(sdhci->clk);
+ sdhci_free_host(host);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct spear_sdhci *sdhci = sdhci_priv(host);
+ int ret;
+
+ ret = sdhci_suspend_host(host);
+ if (!ret)
+ clk_disable(sdhci->clk);
+
+ return ret;
+}
+
+static int sdhci_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct spear_sdhci *sdhci = sdhci_priv(host);
+ int ret;
+
+ ret = clk_enable(sdhci->clk);
+ if (ret) {
+ dev_dbg(dev, "Resume: Error enabling clock\n");
+ return ret;
+ }
+
+ return sdhci_resume_host(host);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id sdhci_spear_id_table[] = {
+ { .compatible = "st,spear300-sdhci" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sdhci_spear_id_table);
+#endif
+
+static struct platform_driver sdhci_driver = {
+ .driver = {
+ .name = "sdhci",
+ .pm = &sdhci_pm_ops,
+ .of_match_table = of_match_ptr(sdhci_spear_id_table),
+ },
+ .probe = sdhci_probe,
+ .remove = sdhci_remove,
+};
+
+module_platform_driver(sdhci_driver);
+
+MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-st.c b/kernel/drivers/mmc/host/sdhci-st.c
new file mode 100644
index 000000000..682f2bb0f
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-st.c
@@ -0,0 +1,512 @@
+/*
+ * Support for SDHCI on STMicroelectronics SoCs
+ *
+ * Copyright (C) 2014 STMicroelectronics Ltd
+ * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+ * Contributors: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * Based on sdhci-cns3xxx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/mmc/host.h>
+#include <linux/reset.h>
+#include "sdhci-pltfm.h"
+
+struct st_mmc_platform_data {
+ struct reset_control *rstc;
+ void __iomem *top_ioaddr;
+};
+
+/* MMCSS glue logic to setup the HC on some ST SoCs (e.g. STiH407 family) */
+
+#define ST_MMC_CCONFIG_REG_1 0x400
+#define ST_MMC_CCONFIG_TIMEOUT_CLK_UNIT BIT(24)
+#define ST_MMC_CCONFIG_TIMEOUT_CLK_FREQ BIT(12)
+#define ST_MMC_CCONFIG_TUNING_COUNT_DEFAULT BIT(8)
+#define ST_MMC_CCONFIG_ASYNC_WAKEUP BIT(0)
+#define ST_MMC_CCONFIG_1_DEFAULT \
+ ((ST_MMC_CCONFIG_TIMEOUT_CLK_UNIT) | \
+ (ST_MMC_CCONFIG_TIMEOUT_CLK_FREQ) | \
+ (ST_MMC_CCONFIG_TUNING_COUNT_DEFAULT))
+
+#define ST_MMC_CCONFIG_REG_2 0x404
+#define ST_MMC_CCONFIG_HIGH_SPEED BIT(28)
+#define ST_MMC_CCONFIG_ADMA2 BIT(24)
+#define ST_MMC_CCONFIG_8BIT BIT(20)
+#define ST_MMC_CCONFIG_MAX_BLK_LEN 16
+#define MAX_BLK_LEN_1024 1
+#define MAX_BLK_LEN_2048 2
+#define BASE_CLK_FREQ_200 0xc8
+#define BASE_CLK_FREQ_100 0x64
+#define BASE_CLK_FREQ_50 0x32
+#define ST_MMC_CCONFIG_2_DEFAULT \
+ (ST_MMC_CCONFIG_HIGH_SPEED | ST_MMC_CCONFIG_ADMA2 | \
+ ST_MMC_CCONFIG_8BIT | \
+ (MAX_BLK_LEN_1024 << ST_MMC_CCONFIG_MAX_BLK_LEN))
+
+#define ST_MMC_CCONFIG_REG_3 0x408
+#define ST_MMC_CCONFIG_EMMC_SLOT_TYPE BIT(28)
+#define ST_MMC_CCONFIG_64BIT BIT(24)
+#define ST_MMC_CCONFIG_ASYNCH_INTR_SUPPORT BIT(20)
+#define ST_MMC_CCONFIG_1P8_VOLT BIT(16)
+#define ST_MMC_CCONFIG_3P0_VOLT BIT(12)
+#define ST_MMC_CCONFIG_3P3_VOLT BIT(8)
+#define ST_MMC_CCONFIG_SUSP_RES_SUPPORT BIT(4)
+#define ST_MMC_CCONFIG_SDMA BIT(0)
+#define ST_MMC_CCONFIG_3_DEFAULT \
+ (ST_MMC_CCONFIG_ASYNCH_INTR_SUPPORT | \
+ ST_MMC_CCONFIG_3P3_VOLT | \
+ ST_MMC_CCONFIG_SUSP_RES_SUPPORT | \
+ ST_MMC_CCONFIG_SDMA)
+
+#define ST_MMC_CCONFIG_REG_4 0x40c
+#define ST_MMC_CCONFIG_D_DRIVER BIT(20)
+#define ST_MMC_CCONFIG_C_DRIVER BIT(16)
+#define ST_MMC_CCONFIG_A_DRIVER BIT(12)
+#define ST_MMC_CCONFIG_DDR50 BIT(8)
+#define ST_MMC_CCONFIG_SDR104 BIT(4)
+#define ST_MMC_CCONFIG_SDR50 BIT(0)
+#define ST_MMC_CCONFIG_4_DEFAULT 0
+
+#define ST_MMC_CCONFIG_REG_5 0x410
+#define ST_MMC_CCONFIG_TUNING_FOR_SDR50 BIT(8)
+#define RETUNING_TIMER_CNT_MAX 0xf
+#define ST_MMC_CCONFIG_5_DEFAULT 0
+
+/* I/O configuration for Arasan IP */
+#define ST_MMC_GP_OUTPUT 0x450
+#define ST_MMC_GP_OUTPUT_CD BIT(12)
+
+#define ST_MMC_STATUS_R 0x460
+
+#define ST_TOP_MMC_DLY_FIX_OFF(x) (x - 0x8)
+
+/* TOP config registers to manage static and dynamic delay */
+#define ST_TOP_MMC_TX_CLK_DLY ST_TOP_MMC_DLY_FIX_OFF(0x8)
+#define ST_TOP_MMC_RX_CLK_DLY ST_TOP_MMC_DLY_FIX_OFF(0xc)
+/* MMC delay control register */
+#define ST_TOP_MMC_DLY_CTRL ST_TOP_MMC_DLY_FIX_OFF(0x18)
+#define ST_TOP_MMC_DLY_CTRL_DLL_BYPASS_CMD BIT(0)
+#define ST_TOP_MMC_DLY_CTRL_DLL_BYPASS_PH_SEL BIT(1)
+#define ST_TOP_MMC_DLY_CTRL_TX_DLL_ENABLE BIT(8)
+#define ST_TOP_MMC_DLY_CTRL_RX_DLL_ENABLE BIT(9)
+#define ST_TOP_MMC_DLY_CTRL_ATUNE_NOT_CFG_DLY BIT(10)
+#define ST_TOP_MMC_START_DLL_LOCK BIT(11)
+
+/* register to provide the phase-shift value for DLL */
+#define ST_TOP_MMC_TX_DLL_STEP_DLY ST_TOP_MMC_DLY_FIX_OFF(0x1c)
+#define ST_TOP_MMC_RX_DLL_STEP_DLY ST_TOP_MMC_DLY_FIX_OFF(0x20)
+#define ST_TOP_MMC_RX_CMD_STEP_DLY ST_TOP_MMC_DLY_FIX_OFF(0x24)
+
+/* phase shift delay on the tx clk 2.188ns */
+#define ST_TOP_MMC_TX_DLL_STEP_DLY_VALID 0x6
+
+#define ST_TOP_MMC_DLY_MAX 0xf
+
+#define ST_TOP_MMC_DYN_DLY_CONF \
+ (ST_TOP_MMC_DLY_CTRL_TX_DLL_ENABLE | \
+ ST_TOP_MMC_DLY_CTRL_ATUNE_NOT_CFG_DLY | \
+ ST_TOP_MMC_START_DLL_LOCK)
+
+/*
+ * For clock speeds greater than 90MHz, we need to check that the
+ * DLL procedure has finished before switching to ultra-speed modes.
+ */
+#define CLK_TO_CHECK_DLL_LOCK 90000000
+
+static inline void st_mmcss_set_static_delay(void __iomem *ioaddr)
+{
+ if (!ioaddr)
+ return;
+
+ writel_relaxed(0x0, ioaddr + ST_TOP_MMC_DLY_CTRL);
+ writel_relaxed(ST_TOP_MMC_DLY_MAX,
+ ioaddr + ST_TOP_MMC_TX_CLK_DLY);
+}
+
+/**
+ * st_mmcss_cconfig: configure the Arasan HC inside the flashSS.
+ * @np: dt device node.
+ * @host: sdhci host
+ * Description: this function is to configure the Arasan host controller.
+ * On some ST SoCs, i.e. STiH407 family, the MMC devices inside a dedicated
+ * flashSS sub-system which needs to be configured to be compliant to eMMC 4.5
+ * or eMMC4.3. This has to be done before registering the sdhci host.
+ */
+static void st_mmcss_cconfig(struct device_node *np, struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct mmc_host *mhost = host->mmc;
+ u32 cconf2, cconf3, cconf4, cconf5;
+
+ if (!of_device_is_compatible(np, "st,sdhci-stih407"))
+ return;
+
+ cconf2 = ST_MMC_CCONFIG_2_DEFAULT;
+ cconf3 = ST_MMC_CCONFIG_3_DEFAULT;
+ cconf4 = ST_MMC_CCONFIG_4_DEFAULT;
+ cconf5 = ST_MMC_CCONFIG_5_DEFAULT;
+
+ writel_relaxed(ST_MMC_CCONFIG_1_DEFAULT,
+ host->ioaddr + ST_MMC_CCONFIG_REG_1);
+
+ /* Set clock frequency, default to 50MHz if max-frequency is not
+ * provided */
+
+ switch (mhost->f_max) {
+ case 200000000:
+ clk_set_rate(pltfm_host->clk, mhost->f_max);
+ cconf2 |= BASE_CLK_FREQ_200;
+ break;
+ case 100000000:
+ clk_set_rate(pltfm_host->clk, mhost->f_max);
+ cconf2 |= BASE_CLK_FREQ_100;
+ break;
+ default:
+ clk_set_rate(pltfm_host->clk, 50000000);
+ cconf2 |= BASE_CLK_FREQ_50;
+ }
+
+ writel_relaxed(cconf2, host->ioaddr + ST_MMC_CCONFIG_REG_2);
+
+ if (mhost->caps & MMC_CAP_NONREMOVABLE)
+ cconf3 |= ST_MMC_CCONFIG_EMMC_SLOT_TYPE;
+ else
+ /* CARD _D ET_CTRL */
+ writel_relaxed(ST_MMC_GP_OUTPUT_CD,
+ host->ioaddr + ST_MMC_GP_OUTPUT);
+
+ if (mhost->caps & MMC_CAP_UHS_SDR50) {
+ /* use 1.8V */
+ cconf3 |= ST_MMC_CCONFIG_1P8_VOLT;
+ cconf4 |= ST_MMC_CCONFIG_SDR50;
+ /* Use tuning */
+ cconf5 |= ST_MMC_CCONFIG_TUNING_FOR_SDR50;
+ /* Max timeout for retuning */
+ cconf5 |= RETUNING_TIMER_CNT_MAX;
+ }
+
+ if (mhost->caps & MMC_CAP_UHS_SDR104) {
+ /*
+ * SDR104 implies the HC can support HS200 mode, so
+ * it's mandatory to use 1.8V
+ */
+ cconf3 |= ST_MMC_CCONFIG_1P8_VOLT;
+ cconf4 |= ST_MMC_CCONFIG_SDR104;
+ /* Max timeout for retuning */
+ cconf5 |= RETUNING_TIMER_CNT_MAX;
+ }
+
+ if (mhost->caps & MMC_CAP_UHS_DDR50)
+ cconf4 |= ST_MMC_CCONFIG_DDR50;
+
+ writel_relaxed(cconf3, host->ioaddr + ST_MMC_CCONFIG_REG_3);
+ writel_relaxed(cconf4, host->ioaddr + ST_MMC_CCONFIG_REG_4);
+ writel_relaxed(cconf5, host->ioaddr + ST_MMC_CCONFIG_REG_5);
+}
+
+static inline void st_mmcss_set_dll(void __iomem *ioaddr)
+{
+ if (!ioaddr)
+ return;
+
+ writel_relaxed(ST_TOP_MMC_DYN_DLY_CONF, ioaddr + ST_TOP_MMC_DLY_CTRL);
+ writel_relaxed(ST_TOP_MMC_TX_DLL_STEP_DLY_VALID,
+ ioaddr + ST_TOP_MMC_TX_DLL_STEP_DLY);
+}
+
+static int st_mmcss_lock_dll(void __iomem *ioaddr)
+{
+ unsigned long curr, value;
+ unsigned long finish = jiffies + HZ;
+
+ /* Checks if the DLL procedure is finished */
+ do {
+ curr = jiffies;
+ value = readl(ioaddr + ST_MMC_STATUS_R);
+ if (value & 0x1)
+ return 0;
+
+ cpu_relax();
+ } while (!time_after_eq(curr, finish));
+
+ return -EBUSY;
+}
+
+static int sdhci_st_set_dll_for_clock(struct sdhci_host *host)
+{
+ int ret = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct st_mmc_platform_data *pdata = pltfm_host->priv;
+
+ if (host->clock > CLK_TO_CHECK_DLL_LOCK) {
+ st_mmcss_set_dll(pdata->top_ioaddr);
+ ret = st_mmcss_lock_dll(host->ioaddr);
+ }
+
+ return ret;
+}
+
+static void sdhci_st_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int uhs)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct st_mmc_platform_data *pdata = pltfm_host->priv;
+ u16 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ int ret = 0;
+
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ switch (uhs) {
+ /*
+ * Set V18_EN -- UHS modes do not work without this.
+ * does not change signaling voltage
+ */
+
+ case MMC_TIMING_UHS_SDR12:
+ st_mmcss_set_static_delay(pdata->top_ioaddr);
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12 | SDHCI_CTRL_VDD_180;
+ break;
+ case MMC_TIMING_UHS_SDR25:
+ st_mmcss_set_static_delay(pdata->top_ioaddr);
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25 | SDHCI_CTRL_VDD_180;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ st_mmcss_set_static_delay(pdata->top_ioaddr);
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180;
+ ret = sdhci_st_set_dll_for_clock(host);
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ st_mmcss_set_static_delay(pdata->top_ioaddr);
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180;
+ ret = sdhci_st_set_dll_for_clock(host);
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ st_mmcss_set_static_delay(pdata->top_ioaddr);
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180;
+ break;
+ }
+
+ if (ret)
+ dev_warn(mmc_dev(host->mmc), "Error setting dll for clock "
+ "(uhs %d)\n", uhs);
+
+ dev_dbg(mmc_dev(host->mmc), "uhs %d, ctrl_2 %04X\n", uhs, ctrl_2);
+
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+}
+
+static u32 sdhci_st_readl(struct sdhci_host *host, int reg)
+{
+ u32 ret;
+
+ switch (reg) {
+ case SDHCI_CAPABILITIES:
+ ret = readl_relaxed(host->ioaddr + reg);
+ /* Support 3.3V and 1.8V */
+ ret &= ~SDHCI_CAN_VDD_300;
+ break;
+ default:
+ ret = readl_relaxed(host->ioaddr + reg);
+ }
+ return ret;
+}
+
+static const struct sdhci_ops sdhci_st_ops = {
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .read_l = sdhci_st_readl,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_st_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data sdhci_st_pdata = {
+ .ops = &sdhci_st_ops,
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_NO_HISPD_BIT,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_STOP_WITH_TC,
+};
+
+
+static int sdhci_st_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sdhci_host *host;
+ struct st_mmc_platform_data *pdata;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct clk *clk;
+ int ret = 0;
+ u16 host_version;
+ struct resource *res;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ clk = devm_clk_get(&pdev->dev, "mmc");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Peripheral clk not found\n");
+ return PTR_ERR(clk);
+ }
+
+ pdata->rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(pdata->rstc))
+ pdata->rstc = NULL;
+ else
+ reset_control_deassert(pdata->rstc);
+
+ host = sdhci_pltfm_init(pdev, &sdhci_st_pdata, 0);
+ if (IS_ERR(host)) {
+ dev_err(&pdev->dev, "Failed sdhci_pltfm_init\n");
+ ret = PTR_ERR(host);
+ goto err_pltfm_init;
+ }
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed mmc_of_parse\n");
+ goto err_of;
+ }
+
+ clk_prepare_enable(clk);
+
+ /* Configure the FlashSS Top registers for setting eMMC TX/RX delay */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "top-mmc-delay");
+ pdata->top_ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->top_ioaddr)) {
+ dev_warn(&pdev->dev, "FlashSS Top Dly registers not available");
+ pdata->top_ioaddr = NULL;
+ }
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = pdata;
+ pltfm_host->clk = clk;
+
+ /* Configure the Arasan HC inside the flashSS */
+ st_mmcss_cconfig(np, host);
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed sdhci_add_host\n");
+ goto err_out;
+ }
+
+ platform_set_drvdata(pdev, host);
+
+ host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
+
+ dev_info(&pdev->dev, "SDHCI ST Initialised: Host Version: 0x%x Vendor Version 0x%x\n",
+ ((host_version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT),
+ ((host_version & SDHCI_VENDOR_VER_MASK) >>
+ SDHCI_VENDOR_VER_SHIFT));
+
+ return 0;
+
+err_out:
+ clk_disable_unprepare(clk);
+err_of:
+ sdhci_pltfm_free(pdev);
+err_pltfm_init:
+ if (pdata->rstc)
+ reset_control_assert(pdata->rstc);
+
+ return ret;
+}
+
+static int sdhci_st_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct st_mmc_platform_data *pdata = pltfm_host->priv;
+ int ret;
+
+ ret = sdhci_pltfm_unregister(pdev);
+
+ if (pdata->rstc)
+ reset_control_assert(pdata->rstc);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_st_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct st_mmc_platform_data *pdata = pltfm_host->priv;
+ int ret = sdhci_suspend_host(host);
+
+ if (ret)
+ goto out;
+
+ if (pdata->rstc)
+ reset_control_assert(pdata->rstc);
+
+ clk_disable_unprepare(pltfm_host->clk);
+out:
+ return ret;
+}
+
+static int sdhci_st_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct st_mmc_platform_data *pdata = pltfm_host->priv;
+ struct device_node *np = dev->of_node;
+
+ clk_prepare_enable(pltfm_host->clk);
+
+ if (pdata->rstc)
+ reset_control_deassert(pdata->rstc);
+
+ st_mmcss_cconfig(np, host);
+
+ return sdhci_resume_host(host);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(sdhci_st_pmops, sdhci_st_suspend, sdhci_st_resume);
+
+static const struct of_device_id st_sdhci_match[] = {
+ { .compatible = "st,sdhci" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, st_sdhci_match);
+
+static struct platform_driver sdhci_st_driver = {
+ .probe = sdhci_st_probe,
+ .remove = sdhci_st_remove,
+ .driver = {
+ .name = "sdhci-st",
+ .pm = &sdhci_st_pmops,
+ .of_match_table = of_match_ptr(st_sdhci_match),
+ },
+};
+
+module_platform_driver(sdhci_st_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for STMicroelectronics SoCs");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:st-sdhci");
diff --git a/kernel/drivers/mmc/host/sdhci-tegra.c b/kernel/drivers/mmc/host/sdhci-tegra.c
new file mode 100644
index 000000000..ad28b49f0
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-tegra.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/gpio/consumer.h>
+
+#include "sdhci-pltfm.h"
+
+/* Tegra SDHOST controller vendor register definitions */
+#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
+#define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8
+#define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10
+#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
+#define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200
+
+#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
+#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
+#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
+#define NVQUIRK_DISABLE_SDR50 BIT(3)
+#define NVQUIRK_DISABLE_SDR104 BIT(4)
+#define NVQUIRK_DISABLE_DDR50 BIT(5)
+
+struct sdhci_tegra_soc_data {
+ const struct sdhci_pltfm_data *pdata;
+ u32 nvquirks;
+};
+
+struct sdhci_tegra {
+ const struct sdhci_tegra_soc_data *soc_data;
+ struct gpio_desc *power_gpio;
+};
+
+static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+
+ if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
+ (reg == SDHCI_HOST_VERSION))) {
+ /* Erratum: Version register is invalid in HW. */
+ return SDHCI_SPEC_200;
+ }
+
+ return readw(host->ioaddr + reg);
+}
+
+static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ switch (reg) {
+ case SDHCI_TRANSFER_MODE:
+ /*
+ * Postpone this write, we must do it together with a
+ * command write that is down below.
+ */
+ pltfm_host->xfer_mode_shadow = val;
+ return;
+ case SDHCI_COMMAND:
+ writel((val << 16) | pltfm_host->xfer_mode_shadow,
+ host->ioaddr + SDHCI_TRANSFER_MODE);
+ return;
+ }
+
+ writew(val, host->ioaddr + reg);
+}
+
+static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+
+ /* Seems like we're getting spurious timeout and crc errors, so
+ * disable signalling of them. In case of real errors software
+ * timers should take care of eventually detecting them.
+ */
+ if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
+ val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
+
+ writel(val, host->ioaddr + reg);
+
+ if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
+ (reg == SDHCI_INT_ENABLE))) {
+ /* Erratum: Must enable block gap interrupt detection */
+ u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+ if (val & SDHCI_INT_CARD_INT)
+ gap_ctrl |= 0x8;
+ else
+ gap_ctrl &= ~0x8;
+ writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+ }
+}
+
+static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
+{
+ return mmc_gpio_get_ro(host->mmc);
+}
+
+static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+ u32 misc_ctrl;
+
+ sdhci_reset(host, mask);
+
+ if (!(mask & SDHCI_RESET_ALL))
+ return;
+
+ misc_ctrl = sdhci_readw(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+ /* Erratum: Enable SDHCI spec v3.00 support */
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
+ /* Don't advertise UHS modes which aren't supported yet */
+ if (soc_data->nvquirks & NVQUIRK_DISABLE_SDR50)
+ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50;
+ if (soc_data->nvquirks & NVQUIRK_DISABLE_DDR50)
+ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50;
+ if (soc_data->nvquirks & NVQUIRK_DISABLE_SDR104)
+ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104;
+ sdhci_writew(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+}
+
+static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width)
+{
+ u32 ctrl;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ if ((host->mmc->caps & MMC_CAP_8_BIT_DATA) &&
+ (bus_width == MMC_BUS_WIDTH_8)) {
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ ctrl |= SDHCI_CTRL_8BITBUS;
+ } else {
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ if (bus_width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ }
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+
+static const struct sdhci_ops tegra_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
+ .read_w = tegra_sdhci_readw,
+ .write_l = tegra_sdhci_writel,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = tegra_sdhci_set_bus_width,
+ .reset = tegra_sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+};
+
+static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .ops = &tegra_sdhci_ops,
+};
+
+static struct sdhci_tegra_soc_data soc_data_tegra20 = {
+ .pdata = &sdhci_tegra20_pdata,
+ .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
+ NVQUIRK_ENABLE_BLOCK_GAP_DET,
+};
+
+static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .ops = &tegra_sdhci_ops,
+};
+
+static struct sdhci_tegra_soc_data soc_data_tegra30 = {
+ .pdata = &sdhci_tegra30_pdata,
+ .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
+ NVQUIRK_DISABLE_SDR50 |
+ NVQUIRK_DISABLE_SDR104,
+};
+
+static const struct sdhci_ops tegra114_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
+ .read_w = tegra_sdhci_readw,
+ .write_w = tegra_sdhci_writew,
+ .write_l = tegra_sdhci_writel,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = tegra_sdhci_set_bus_width,
+ .reset = tegra_sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+};
+
+static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .ops = &tegra114_sdhci_ops,
+};
+
+static struct sdhci_tegra_soc_data soc_data_tegra114 = {
+ .pdata = &sdhci_tegra114_pdata,
+ .nvquirks = NVQUIRK_DISABLE_SDR50 |
+ NVQUIRK_DISABLE_DDR50 |
+ NVQUIRK_DISABLE_SDR104,
+};
+
+static const struct of_device_id sdhci_tegra_dt_match[] = {
+ { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
+ { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
+ { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
+ { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
+
+static int sdhci_tegra_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct sdhci_tegra_soc_data *soc_data;
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_tegra *tegra_host;
+ struct clk *clk;
+ int rc;
+
+ match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
+ if (!match)
+ return -EINVAL;
+ soc_data = match->data;
+
+ host = sdhci_pltfm_init(pdev, soc_data->pdata, 0);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+ pltfm_host = sdhci_priv(host);
+
+ tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL);
+ if (!tegra_host) {
+ dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n");
+ rc = -ENOMEM;
+ goto err_alloc_tegra_host;
+ }
+ tegra_host->soc_data = soc_data;
+ pltfm_host->priv = tegra_host;
+
+ rc = mmc_of_parse(host->mmc);
+ if (rc)
+ goto err_parse_dt;
+
+ tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(tegra_host->power_gpio)) {
+ rc = PTR_ERR(tegra_host->power_gpio);
+ goto err_power_req;
+ }
+
+ clk = devm_clk_get(mmc_dev(host->mmc), NULL);
+ if (IS_ERR(clk)) {
+ dev_err(mmc_dev(host->mmc), "clk err\n");
+ rc = PTR_ERR(clk);
+ goto err_clk_get;
+ }
+ clk_prepare_enable(clk);
+ pltfm_host->clk = clk;
+
+ rc = sdhci_add_host(host);
+ if (rc)
+ goto err_add_host;
+
+ return 0;
+
+err_add_host:
+ clk_disable_unprepare(pltfm_host->clk);
+err_clk_get:
+err_power_req:
+err_parse_dt:
+err_alloc_tegra_host:
+ sdhci_pltfm_free(pdev);
+ return rc;
+}
+
+static struct platform_driver sdhci_tegra_driver = {
+ .driver = {
+ .name = "sdhci-tegra",
+ .of_match_table = sdhci_tegra_dt_match,
+ .pm = SDHCI_PLTFM_PMOPS,
+ },
+ .probe = sdhci_tegra_probe,
+ .remove = sdhci_pltfm_unregister,
+};
+
+module_platform_driver(sdhci_tegra_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for Tegra");
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci.c b/kernel/drivers/mmc/host/sdhci.c
new file mode 100644
index 000000000..f09bc10c9
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci.c
@@ -0,0 +1,3590 @@
+/*
+ * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
+ *
+ * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * Thanks to the following companies for their support:
+ *
+ * - JMicron (hardware and technical support)
+ */
+
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/leds.h>
+
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
+
+#include "sdhci.h"
+
+#define DRIVER_NAME "sdhci"
+
+#define DBG(f, x...) \
+ pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
+
+#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
+ defined(CONFIG_MMC_SDHCI_MODULE))
+#define SDHCI_USE_LEDS_CLASS
+#endif
+
+#define MAX_TUNING_LOOP 40
+
+static unsigned int debug_quirks = 0;
+static unsigned int debug_quirks2;
+
+static void sdhci_finish_data(struct sdhci_host *);
+
+static void sdhci_finish_command(struct sdhci_host *);
+static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
+static void sdhci_tuning_timer(unsigned long data);
+static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
+static int sdhci_pre_dma_transfer(struct sdhci_host *host,
+ struct mmc_data *data,
+ struct sdhci_host_next *next);
+static int sdhci_do_get_cd(struct sdhci_host *host);
+
+#ifdef CONFIG_PM
+static int sdhci_runtime_pm_get(struct sdhci_host *host);
+static int sdhci_runtime_pm_put(struct sdhci_host *host);
+static void sdhci_runtime_pm_bus_on(struct sdhci_host *host);
+static void sdhci_runtime_pm_bus_off(struct sdhci_host *host);
+#else
+static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
+{
+ return 0;
+}
+static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
+{
+ return 0;
+}
+static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+{
+}
+static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
+{
+}
+#endif
+
+static void sdhci_dumpregs(struct sdhci_host *host)
+{
+ pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
+ mmc_hostname(host->mmc));
+
+ pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
+ sdhci_readl(host, SDHCI_DMA_ADDRESS),
+ sdhci_readw(host, SDHCI_HOST_VERSION));
+ pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
+ sdhci_readw(host, SDHCI_BLOCK_SIZE),
+ sdhci_readw(host, SDHCI_BLOCK_COUNT));
+ pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
+ sdhci_readl(host, SDHCI_ARGUMENT),
+ sdhci_readw(host, SDHCI_TRANSFER_MODE));
+ pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
+ sdhci_readl(host, SDHCI_PRESENT_STATE),
+ sdhci_readb(host, SDHCI_HOST_CONTROL));
+ pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
+ sdhci_readb(host, SDHCI_POWER_CONTROL),
+ sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
+ pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
+ sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
+ sdhci_readw(host, SDHCI_CLOCK_CONTROL));
+ pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
+ sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
+ sdhci_readl(host, SDHCI_INT_STATUS));
+ pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
+ sdhci_readl(host, SDHCI_INT_ENABLE),
+ sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
+ pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
+ sdhci_readw(host, SDHCI_ACMD12_ERR),
+ sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
+ pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
+ sdhci_readl(host, SDHCI_CAPABILITIES),
+ sdhci_readl(host, SDHCI_CAPABILITIES_1));
+ pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
+ sdhci_readw(host, SDHCI_COMMAND),
+ sdhci_readl(host, SDHCI_MAX_CURRENT));
+ pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
+ sdhci_readw(host, SDHCI_HOST_CONTROL2));
+
+ if (host->flags & SDHCI_USE_ADMA) {
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
+ readl(host->ioaddr + SDHCI_ADMA_ERROR),
+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
+ else
+ pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
+ readl(host->ioaddr + SDHCI_ADMA_ERROR),
+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
+ }
+
+ pr_debug(DRIVER_NAME ": ===========================================\n");
+}
+
+/*****************************************************************************\
+ * *
+ * Low level functions *
+ * *
+\*****************************************************************************/
+
+static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
+{
+ u32 present;
+
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
+ (host->mmc->caps & MMC_CAP_NONREMOVABLE))
+ return;
+
+ if (enable) {
+ present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
+
+ host->ier |= present ? SDHCI_INT_CARD_REMOVE :
+ SDHCI_INT_CARD_INSERT;
+ } else {
+ host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
+ }
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+}
+
+static void sdhci_enable_card_detection(struct sdhci_host *host)
+{
+ sdhci_set_card_detection(host, true);
+}
+
+static void sdhci_disable_card_detection(struct sdhci_host *host)
+{
+ sdhci_set_card_detection(host, false);
+}
+
+void sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ unsigned long timeout;
+
+ sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
+
+ if (mask & SDHCI_RESET_ALL) {
+ host->clock = 0;
+ /* Reset-all turns off SD Bus Power */
+ if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
+ sdhci_runtime_pm_bus_off(host);
+ }
+
+ /* Wait max 100 ms */
+ timeout = 100;
+
+ /* hw clears the bit when it's done */
+ while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
+ if (timeout == 0) {
+ pr_err("%s: Reset 0x%x never completed.\n",
+ mmc_hostname(host->mmc), (int)mask);
+ sdhci_dumpregs(host);
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+}
+EXPORT_SYMBOL_GPL(sdhci_reset);
+
+static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
+{
+ if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
+ if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT))
+ return;
+ }
+
+ host->ops->reset(host, mask);
+
+ if (mask & SDHCI_RESET_ALL) {
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+ if (host->ops->enable_dma)
+ host->ops->enable_dma(host);
+ }
+
+ /* Resetting the controller clears many */
+ host->preset_enabled = false;
+ }
+}
+
+static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
+
+static void sdhci_init(struct sdhci_host *host, int soft)
+{
+ if (soft)
+ sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
+ else
+ sdhci_do_reset(host, SDHCI_RESET_ALL);
+
+ host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
+ SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
+ SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
+ SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
+ SDHCI_INT_RESPONSE;
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
+ if (soft) {
+ /* force clock reconfiguration */
+ host->clock = 0;
+ sdhci_set_ios(host->mmc, &host->mmc->ios);
+ }
+}
+
+static void sdhci_reinit(struct sdhci_host *host)
+{
+ sdhci_init(host, 0);
+ /*
+ * Retuning stuffs are affected by different cards inserted and only
+ * applicable to UHS-I cards. So reset these fields to their initial
+ * value when card is removed.
+ */
+ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
+ host->flags &= ~SDHCI_USING_RETUNING_TIMER;
+
+ del_timer_sync(&host->tuning_timer);
+ host->flags &= ~SDHCI_NEEDS_RETUNING;
+ }
+ sdhci_enable_card_detection(host);
+}
+
+static void sdhci_activate_led(struct sdhci_host *host)
+{
+ u8 ctrl;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl |= SDHCI_CTRL_LED;
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+
+static void sdhci_deactivate_led(struct sdhci_host *host)
+{
+ u8 ctrl;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl &= ~SDHCI_CTRL_LED;
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+
+#ifdef SDHCI_USE_LEDS_CLASS
+static void sdhci_led_control(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct sdhci_host *host = container_of(led, struct sdhci_host, led);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->runtime_suspended)
+ goto out;
+
+ if (brightness == LED_OFF)
+ sdhci_deactivate_led(host);
+ else
+ sdhci_activate_led(host);
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+#endif
+
+/*****************************************************************************\
+ * *
+ * Core functions *
+ * *
+\*****************************************************************************/
+
+static void sdhci_read_block_pio(struct sdhci_host *host)
+{
+ unsigned long flags;
+ size_t blksize, len, chunk;
+ u32 uninitialized_var(scratch);
+ u8 *buf;
+
+ DBG("PIO reading\n");
+
+ blksize = host->data->blksz;
+ chunk = 0;
+
+ local_irq_save(flags);
+
+ while (blksize) {
+ if (!sg_miter_next(&host->sg_miter))
+ BUG();
+
+ len = min(host->sg_miter.length, blksize);
+
+ blksize -= len;
+ host->sg_miter.consumed = len;
+
+ buf = host->sg_miter.addr;
+
+ while (len) {
+ if (chunk == 0) {
+ scratch = sdhci_readl(host, SDHCI_BUFFER);
+ chunk = 4;
+ }
+
+ *buf = scratch & 0xFF;
+
+ buf++;
+ scratch >>= 8;
+ chunk--;
+ len--;
+ }
+ }
+
+ sg_miter_stop(&host->sg_miter);
+
+ local_irq_restore(flags);
+}
+
+static void sdhci_write_block_pio(struct sdhci_host *host)
+{
+ unsigned long flags;
+ size_t blksize, len, chunk;
+ u32 scratch;
+ u8 *buf;
+
+ DBG("PIO writing\n");
+
+ blksize = host->data->blksz;
+ chunk = 0;
+ scratch = 0;
+
+ local_irq_save(flags);
+
+ while (blksize) {
+ if (!sg_miter_next(&host->sg_miter))
+ BUG();
+
+ len = min(host->sg_miter.length, blksize);
+
+ blksize -= len;
+ host->sg_miter.consumed = len;
+
+ buf = host->sg_miter.addr;
+
+ while (len) {
+ scratch |= (u32)*buf << (chunk * 8);
+
+ buf++;
+ chunk++;
+ len--;
+
+ if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
+ sdhci_writel(host, scratch, SDHCI_BUFFER);
+ chunk = 0;
+ scratch = 0;
+ }
+ }
+ }
+
+ sg_miter_stop(&host->sg_miter);
+
+ local_irq_restore(flags);
+}
+
+static void sdhci_transfer_pio(struct sdhci_host *host)
+{
+ u32 mask;
+
+ BUG_ON(!host->data);
+
+ if (host->blocks == 0)
+ return;
+
+ if (host->data->flags & MMC_DATA_READ)
+ mask = SDHCI_DATA_AVAILABLE;
+ else
+ mask = SDHCI_SPACE_AVAILABLE;
+
+ /*
+ * Some controllers (JMicron JMB38x) mess up the buffer bits
+ * for transfers < 4 bytes. As long as it is just one block,
+ * we can ignore the bits.
+ */
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
+ (host->data->blocks == 1))
+ mask = ~0;
+
+ while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
+ if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
+ udelay(100);
+
+ if (host->data->flags & MMC_DATA_READ)
+ sdhci_read_block_pio(host);
+ else
+ sdhci_write_block_pio(host);
+
+ host->blocks--;
+ if (host->blocks == 0)
+ break;
+ }
+
+ DBG("PIO transfer complete.\n");
+}
+
+static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
+{
+ local_irq_save(*flags);
+ return kmap_atomic(sg_page(sg)) + sg->offset;
+}
+
+static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
+{
+ kunmap_atomic(buffer);
+ local_irq_restore(*flags);
+}
+
+static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
+ dma_addr_t addr, int len, unsigned cmd)
+{
+ struct sdhci_adma2_64_desc *dma_desc = desc;
+
+ /* 32-bit and 64-bit descriptors have these members in same position */
+ dma_desc->cmd = cpu_to_le16(cmd);
+ dma_desc->len = cpu_to_le16(len);
+ dma_desc->addr_lo = cpu_to_le32((u32)addr);
+
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
+}
+
+static void sdhci_adma_mark_end(void *desc)
+{
+ struct sdhci_adma2_64_desc *dma_desc = desc;
+
+ /* 32-bit and 64-bit descriptors have 'cmd' in same position */
+ dma_desc->cmd |= cpu_to_le16(ADMA2_END);
+}
+
+static int sdhci_adma_table_pre(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ int direction;
+
+ void *desc;
+ void *align;
+ dma_addr_t addr;
+ dma_addr_t align_addr;
+ int len, offset;
+
+ struct scatterlist *sg;
+ int i;
+ char *buffer;
+ unsigned long flags;
+
+ /*
+ * The spec does not specify endianness of descriptor table.
+ * We currently guess that it is LE.
+ */
+
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+
+ host->align_addr = dma_map_single(mmc_dev(host->mmc),
+ host->align_buffer, host->align_buffer_sz, direction);
+ if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
+ goto fail;
+ BUG_ON(host->align_addr & host->align_mask);
+
+ host->sg_count = sdhci_pre_dma_transfer(host, data, NULL);
+ if (host->sg_count < 0)
+ goto unmap_align;
+
+ desc = host->adma_table;
+ align = host->align_buffer;
+
+ align_addr = host->align_addr;
+
+ for_each_sg(data->sg, sg, host->sg_count, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ /*
+ * The SDHCI specification states that ADMA
+ * addresses must be 32-bit aligned. If they
+ * aren't, then we use a bounce buffer for
+ * the (up to three) bytes that screw up the
+ * alignment.
+ */
+ offset = (host->align_sz - (addr & host->align_mask)) &
+ host->align_mask;
+ if (offset) {
+ if (data->flags & MMC_DATA_WRITE) {
+ buffer = sdhci_kmap_atomic(sg, &flags);
+ memcpy(align, buffer, offset);
+ sdhci_kunmap_atomic(buffer, &flags);
+ }
+
+ /* tran, valid */
+ sdhci_adma_write_desc(host, desc, align_addr, offset,
+ ADMA2_TRAN_VALID);
+
+ BUG_ON(offset > 65536);
+
+ align += host->align_sz;
+ align_addr += host->align_sz;
+
+ desc += host->desc_sz;
+
+ addr += offset;
+ len -= offset;
+ }
+
+ BUG_ON(len > 65536);
+
+ /* tran, valid */
+ sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
+ desc += host->desc_sz;
+
+ /*
+ * If this triggers then we have a calculation bug
+ * somewhere. :/
+ */
+ WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
+ }
+
+ if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
+ /*
+ * Mark the last descriptor as the terminating descriptor
+ */
+ if (desc != host->adma_table) {
+ desc -= host->desc_sz;
+ sdhci_adma_mark_end(desc);
+ }
+ } else {
+ /*
+ * Add a terminating entry.
+ */
+
+ /* nop, end, valid */
+ sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
+ }
+
+ /*
+ * Resync align buffer as we might have changed it.
+ */
+ if (data->flags & MMC_DATA_WRITE) {
+ dma_sync_single_for_device(mmc_dev(host->mmc),
+ host->align_addr, host->align_buffer_sz, direction);
+ }
+
+ return 0;
+
+unmap_align:
+ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+ host->align_buffer_sz, direction);
+fail:
+ return -EINVAL;
+}
+
+static void sdhci_adma_table_post(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ int direction;
+
+ struct scatterlist *sg;
+ int i, size;
+ void *align;
+ char *buffer;
+ unsigned long flags;
+ bool has_unaligned;
+
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+
+ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+ host->align_buffer_sz, direction);
+
+ /* Do a quick scan of the SG list for any unaligned mappings */
+ has_unaligned = false;
+ for_each_sg(data->sg, sg, host->sg_count, i)
+ if (sg_dma_address(sg) & host->align_mask) {
+ has_unaligned = true;
+ break;
+ }
+
+ if (has_unaligned && data->flags & MMC_DATA_READ) {
+ dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
+ data->sg_len, direction);
+
+ align = host->align_buffer;
+
+ for_each_sg(data->sg, sg, host->sg_count, i) {
+ if (sg_dma_address(sg) & host->align_mask) {
+ size = host->align_sz -
+ (sg_dma_address(sg) & host->align_mask);
+
+ buffer = sdhci_kmap_atomic(sg, &flags);
+ memcpy(buffer, align, size);
+ sdhci_kunmap_atomic(buffer, &flags);
+
+ align += host->align_sz;
+ }
+ }
+ }
+
+ if (!data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, direction);
+}
+
+static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ u8 count;
+ struct mmc_data *data = cmd->data;
+ unsigned target_timeout, current_timeout;
+
+ /*
+ * If the host controller provides us with an incorrect timeout
+ * value, just skip the check and use 0xE. The hardware may take
+ * longer to time out, but that's much better than having a too-short
+ * timeout value.
+ */
+ if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
+ return 0xE;
+
+ /* Unspecified timeout, assume max */
+ if (!data && !cmd->busy_timeout)
+ return 0xE;
+
+ /* timeout in us */
+ if (!data)
+ target_timeout = cmd->busy_timeout * 1000;
+ else {
+ target_timeout = data->timeout_ns / 1000;
+ if (host->clock)
+ target_timeout += data->timeout_clks / host->clock;
+ }
+
+ /*
+ * Figure out needed cycles.
+ * We do this in steps in order to fit inside a 32 bit int.
+ * The first step is the minimum timeout, which will have a
+ * minimum resolution of 6 bits:
+ * (1) 2^13*1000 > 2^22,
+ * (2) host->timeout_clk < 2^16
+ * =>
+ * (1) / (2) > 2^6
+ */
+ count = 0;
+ current_timeout = (1 << 13) * 1000 / host->timeout_clk;
+ while (current_timeout < target_timeout) {
+ count++;
+ current_timeout <<= 1;
+ if (count >= 0xF)
+ break;
+ }
+
+ if (count >= 0xF) {
+ DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
+ mmc_hostname(host->mmc), count, cmd->opcode);
+ count = 0xE;
+ }
+
+ return count;
+}
+
+static void sdhci_set_transfer_irqs(struct sdhci_host *host)
+{
+ u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
+ u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
+
+ if (host->flags & SDHCI_REQ_USE_DMA)
+ host->ier = (host->ier & ~pio_irqs) | dma_irqs;
+ else
+ host->ier = (host->ier & ~dma_irqs) | pio_irqs;
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+}
+
+static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ u8 count;
+
+ if (host->ops->set_timeout) {
+ host->ops->set_timeout(host, cmd);
+ } else {
+ count = sdhci_calc_timeout(host, cmd);
+ sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
+ }
+}
+
+static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ u8 ctrl;
+ struct mmc_data *data = cmd->data;
+ int ret;
+
+ WARN_ON(host->data);
+
+ if (data || (cmd->flags & MMC_RSP_BUSY))
+ sdhci_set_timeout(host, cmd);
+
+ if (!data)
+ return;
+
+ /* Sanity checks */
+ BUG_ON(data->blksz * data->blocks > 524288);
+ BUG_ON(data->blksz > host->mmc->max_blk_size);
+ BUG_ON(data->blocks > 65535);
+
+ host->data = data;
+ host->data_early = 0;
+ host->data->bytes_xfered = 0;
+
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
+ host->flags |= SDHCI_REQ_USE_DMA;
+
+ /*
+ * FIXME: This doesn't account for merging when mapping the
+ * scatterlist.
+ */
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ int broken, i;
+ struct scatterlist *sg;
+
+ broken = 0;
+ if (host->flags & SDHCI_USE_ADMA) {
+ if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
+ broken = 1;
+ } else {
+ if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
+ broken = 1;
+ }
+
+ if (unlikely(broken)) {
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->length & 0x3) {
+ DBG("Reverting to PIO because of "
+ "transfer size (%d)\n",
+ sg->length);
+ host->flags &= ~SDHCI_REQ_USE_DMA;
+ break;
+ }
+ }
+ }
+ }
+
+ /*
+ * The assumption here being that alignment is the same after
+ * translation to device address space.
+ */
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ int broken, i;
+ struct scatterlist *sg;
+
+ broken = 0;
+ if (host->flags & SDHCI_USE_ADMA) {
+ /*
+ * As we use 3 byte chunks to work around
+ * alignment problems, we need to check this
+ * quirk.
+ */
+ if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
+ broken = 1;
+ } else {
+ if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
+ broken = 1;
+ }
+
+ if (unlikely(broken)) {
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->offset & 0x3) {
+ DBG("Reverting to PIO because of "
+ "bad alignment\n");
+ host->flags &= ~SDHCI_REQ_USE_DMA;
+ break;
+ }
+ }
+ }
+ }
+
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ if (host->flags & SDHCI_USE_ADMA) {
+ ret = sdhci_adma_table_pre(host, data);
+ if (ret) {
+ /*
+ * This only happens when someone fed
+ * us an invalid request.
+ */
+ WARN_ON(1);
+ host->flags &= ~SDHCI_REQ_USE_DMA;
+ } else {
+ sdhci_writel(host, host->adma_addr,
+ SDHCI_ADMA_ADDRESS);
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ sdhci_writel(host,
+ (u64)host->adma_addr >> 32,
+ SDHCI_ADMA_ADDRESS_HI);
+ }
+ } else {
+ int sg_cnt;
+
+ sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
+ if (sg_cnt <= 0) {
+ /*
+ * This only happens when someone fed
+ * us an invalid request.
+ */
+ WARN_ON(1);
+ host->flags &= ~SDHCI_REQ_USE_DMA;
+ } else {
+ WARN_ON(sg_cnt != 1);
+ sdhci_writel(host, sg_dma_address(data->sg),
+ SDHCI_DMA_ADDRESS);
+ }
+ }
+ }
+
+ /*
+ * Always adjust the DMA selection as some controllers
+ * (e.g. JMicron) can't do PIO properly when the selection
+ * is ADMA.
+ */
+ if (host->version >= SDHCI_SPEC_200) {
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl &= ~SDHCI_CTRL_DMA_MASK;
+ if ((host->flags & SDHCI_REQ_USE_DMA) &&
+ (host->flags & SDHCI_USE_ADMA)) {
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ ctrl |= SDHCI_CTRL_ADMA64;
+ else
+ ctrl |= SDHCI_CTRL_ADMA32;
+ } else {
+ ctrl |= SDHCI_CTRL_SDMA;
+ }
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ }
+
+ if (!(host->flags & SDHCI_REQ_USE_DMA)) {
+ int flags;
+
+ flags = SG_MITER_ATOMIC;
+ if (host->data->flags & MMC_DATA_READ)
+ flags |= SG_MITER_TO_SG;
+ else
+ flags |= SG_MITER_FROM_SG;
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
+ host->blocks = data->blocks;
+ }
+
+ sdhci_set_transfer_irqs(host);
+
+ /* Set the DMA boundary value and block size */
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
+ data->blksz), SDHCI_BLOCK_SIZE);
+ sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+}
+
+static void sdhci_set_transfer_mode(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ u16 mode = 0;
+ struct mmc_data *data = cmd->data;
+
+ if (data == NULL) {
+ if (host->quirks2 &
+ SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
+ sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
+ } else {
+ /* clear Auto CMD settings for no data CMDs */
+ mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
+ sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
+ SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
+ }
+ return;
+ }
+
+ WARN_ON(!host->data);
+
+ if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
+ mode = SDHCI_TRNS_BLK_CNT_EN;
+
+ if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
+ mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
+ /*
+ * If we are sending CMD23, CMD12 never gets sent
+ * on successful completion (so no Auto-CMD12).
+ */
+ if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
+ (cmd->opcode != SD_IO_RW_EXTENDED))
+ mode |= SDHCI_TRNS_AUTO_CMD12;
+ else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
+ mode |= SDHCI_TRNS_AUTO_CMD23;
+ sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
+ }
+ }
+
+ if (data->flags & MMC_DATA_READ)
+ mode |= SDHCI_TRNS_READ;
+ if (host->flags & SDHCI_REQ_USE_DMA)
+ mode |= SDHCI_TRNS_DMA;
+
+ sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
+}
+
+static void sdhci_finish_data(struct sdhci_host *host)
+{
+ struct mmc_data *data;
+
+ BUG_ON(!host->data);
+
+ data = host->data;
+ host->data = NULL;
+
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ if (host->flags & SDHCI_USE_ADMA)
+ sdhci_adma_table_post(host, data);
+ else {
+ if (!data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+ }
+
+ /*
+ * The specification states that the block count register must
+ * be updated, but it does not specify at what point in the
+ * data flow. That makes the register entirely useless to read
+ * back so we have to assume that nothing made it to the card
+ * in the event of an error.
+ */
+ if (data->error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blksz * data->blocks;
+
+ /*
+ * Need to send CMD12 if -
+ * a) open-ended multiblock transfer (no CMD23)
+ * b) error in multiblock transfer
+ */
+ if (data->stop &&
+ (data->error ||
+ !host->mrq->sbc)) {
+
+ /*
+ * The controller needs a reset of internal state machines
+ * upon error conditions.
+ */
+ if (data->error) {
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+ }
+
+ sdhci_send_command(host, data->stop);
+ } else
+ tasklet_schedule(&host->finish_tasklet);
+}
+
+void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ int flags;
+ u32 mask;
+ unsigned long timeout;
+
+ WARN_ON(host->cmd);
+
+ /* Wait max 10 ms */
+ timeout = 10;
+
+ mask = SDHCI_CMD_INHIBIT;
+ if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
+ mask |= SDHCI_DATA_INHIBIT;
+
+ /* We shouldn't wait for data inihibit for stop commands, even
+ though they might use busy signaling */
+ if (host->mrq->data && (cmd == host->mrq->data->stop))
+ mask &= ~SDHCI_DATA_INHIBIT;
+
+ while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
+ if (timeout == 0) {
+ pr_err("%s: Controller never released "
+ "inhibit bit(s).\n", mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ cmd->error = -EIO;
+ tasklet_schedule(&host->finish_tasklet);
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ timeout = jiffies;
+ if (!cmd->data && cmd->busy_timeout > 9000)
+ timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
+ else
+ timeout += 10 * HZ;
+ mod_timer(&host->timer, timeout);
+
+ host->cmd = cmd;
+ host->busy_handle = 0;
+
+ sdhci_prepare_data(host, cmd);
+
+ sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
+
+ sdhci_set_transfer_mode(host, cmd);
+
+ if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
+ pr_err("%s: Unsupported response type!\n",
+ mmc_hostname(host->mmc));
+ cmd->error = -EINVAL;
+ tasklet_schedule(&host->finish_tasklet);
+ return;
+ }
+
+ if (!(cmd->flags & MMC_RSP_PRESENT))
+ flags = SDHCI_CMD_RESP_NONE;
+ else if (cmd->flags & MMC_RSP_136)
+ flags = SDHCI_CMD_RESP_LONG;
+ else if (cmd->flags & MMC_RSP_BUSY)
+ flags = SDHCI_CMD_RESP_SHORT_BUSY;
+ else
+ flags = SDHCI_CMD_RESP_SHORT;
+
+ if (cmd->flags & MMC_RSP_CRC)
+ flags |= SDHCI_CMD_CRC;
+ if (cmd->flags & MMC_RSP_OPCODE)
+ flags |= SDHCI_CMD_INDEX;
+
+ /* CMD19 is special in that the Data Present Select should be set */
+ if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
+ cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
+ flags |= SDHCI_CMD_DATA;
+
+ sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
+}
+EXPORT_SYMBOL_GPL(sdhci_send_command);
+
+static void sdhci_finish_command(struct sdhci_host *host)
+{
+ int i;
+
+ BUG_ON(host->cmd == NULL);
+
+ if (host->cmd->flags & MMC_RSP_PRESENT) {
+ if (host->cmd->flags & MMC_RSP_136) {
+ /* CRC is stripped so we need to do some shifting. */
+ for (i = 0;i < 4;i++) {
+ host->cmd->resp[i] = sdhci_readl(host,
+ SDHCI_RESPONSE + (3-i)*4) << 8;
+ if (i != 3)
+ host->cmd->resp[i] |=
+ sdhci_readb(host,
+ SDHCI_RESPONSE + (3-i)*4-1);
+ }
+ } else {
+ host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
+ }
+ }
+
+ host->cmd->error = 0;
+
+ /* Finished CMD23, now send actual command. */
+ if (host->cmd == host->mrq->sbc) {
+ host->cmd = NULL;
+ sdhci_send_command(host, host->mrq->cmd);
+ } else {
+
+ /* Processed actual command. */
+ if (host->data && host->data_early)
+ sdhci_finish_data(host);
+
+ if (!host->cmd->data)
+ tasklet_schedule(&host->finish_tasklet);
+
+ host->cmd = NULL;
+ }
+}
+
+static u16 sdhci_get_preset_value(struct sdhci_host *host)
+{
+ u16 preset = 0;
+
+ switch (host->timing) {
+ case MMC_TIMING_UHS_SDR12:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
+ break;
+ case MMC_TIMING_UHS_SDR25:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
+ break;
+ case MMC_TIMING_MMC_HS400:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
+ break;
+ default:
+ pr_warn("%s: Invalid UHS-I mode selected\n",
+ mmc_hostname(host->mmc));
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
+ break;
+ }
+ return preset;
+}
+
+void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ int div = 0; /* Initialized for compiler warning */
+ int real_div = div, clk_mul = 1;
+ u16 clk = 0;
+ unsigned long timeout;
+
+ host->mmc->actual_clock = 0;
+
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ if (host->version >= SDHCI_SPEC_300) {
+ if (host->preset_enabled) {
+ u16 pre_val;
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ pre_val = sdhci_get_preset_value(host);
+ div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
+ >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
+ if (host->clk_mul &&
+ (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
+ clk = SDHCI_PROG_CLOCK_MODE;
+ real_div = div + 1;
+ clk_mul = host->clk_mul;
+ } else {
+ real_div = max_t(int, 1, div << 1);
+ }
+ goto clock_set;
+ }
+
+ /*
+ * Check if the Host Controller supports Programmable Clock
+ * Mode.
+ */
+ if (host->clk_mul) {
+ for (div = 1; div <= 1024; div++) {
+ if ((host->max_clk * host->clk_mul / div)
+ <= clock)
+ break;
+ }
+ /*
+ * Set Programmable Clock Mode in the Clock
+ * Control register.
+ */
+ clk = SDHCI_PROG_CLOCK_MODE;
+ real_div = div;
+ clk_mul = host->clk_mul;
+ div--;
+ } else {
+ /* Version 3.00 divisors must be a multiple of 2. */
+ if (host->max_clk <= clock)
+ div = 1;
+ else {
+ for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
+ div += 2) {
+ if ((host->max_clk / div) <= clock)
+ break;
+ }
+ }
+ real_div = div;
+ div >>= 1;
+ }
+ } else {
+ /* Version 2.00 divisors must be a power of 2. */
+ for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
+ if ((host->max_clk / div) <= clock)
+ break;
+ }
+ real_div = div;
+ div >>= 1;
+ }
+
+clock_set:
+ if (real_div)
+ host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
+ clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
+ clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
+ << SDHCI_DIVIDER_HI_SHIFT;
+ clk |= SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ /* Wait max 20 ms */
+ timeout = 20;
+ while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
+ & SDHCI_CLOCK_INT_STABLE)) {
+ if (timeout == 0) {
+ pr_err("%s: Internal clock never "
+ "stabilised.\n", mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+}
+EXPORT_SYMBOL_GPL(sdhci_set_clock);
+
+static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+ u8 pwr = 0;
+
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ spin_unlock_irq(&host->lock);
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ spin_lock_irq(&host->lock);
+
+ if (mode != MMC_POWER_OFF)
+ sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
+ else
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
+ return;
+ }
+
+ if (mode != MMC_POWER_OFF) {
+ switch (1 << vdd) {
+ case MMC_VDD_165_195:
+ pwr = SDHCI_POWER_180;
+ break;
+ case MMC_VDD_29_30:
+ case MMC_VDD_30_31:
+ pwr = SDHCI_POWER_300;
+ break;
+ case MMC_VDD_32_33:
+ case MMC_VDD_33_34:
+ pwr = SDHCI_POWER_330;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ if (host->pwr == pwr)
+ return;
+
+ host->pwr = pwr;
+
+ if (pwr == 0) {
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
+ sdhci_runtime_pm_bus_off(host);
+ vdd = 0;
+ } else {
+ /*
+ * Spec says that we should clear the power reg before setting
+ * a new value. Some controllers don't seem to like this though.
+ */
+ if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
+ /*
+ * At least the Marvell CaFe chip gets confused if we set the
+ * voltage and set turn on power at the same time, so set the
+ * voltage first.
+ */
+ if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+
+ pwr |= SDHCI_POWER_ON;
+
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+
+ if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
+ sdhci_runtime_pm_bus_on(host);
+
+ /*
+ * Some controllers need an extra 10ms delay of 10ms before
+ * they can apply clock after applying power
+ */
+ if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
+ mdelay(10);
+ }
+}
+
+/*****************************************************************************\
+ * *
+ * MMC callbacks *
+ * *
+\*****************************************************************************/
+
+static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sdhci_host *host;
+ int present;
+ unsigned long flags;
+ u32 tuning_opcode;
+
+ host = mmc_priv(mmc);
+
+ sdhci_runtime_pm_get(host);
+
+ /* Firstly check card presence */
+ present = sdhci_do_get_cd(host);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ WARN_ON(host->mrq != NULL);
+
+#ifndef SDHCI_USE_LEDS_CLASS
+ sdhci_activate_led(host);
+#endif
+
+ /*
+ * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
+ * requests if Auto-CMD12 is enabled.
+ */
+ if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
+ if (mrq->stop) {
+ mrq->data->stop = NULL;
+ mrq->stop = NULL;
+ }
+ }
+
+ host->mrq = mrq;
+
+ if (!present || host->flags & SDHCI_DEVICE_DEAD) {
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ } else {
+ u32 present_state;
+
+ present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ /*
+ * Check if the re-tuning timer has already expired and there
+ * is no on-going data transfer and DAT0 is not busy. If so,
+ * we need to execute tuning procedure before sending command.
+ */
+ if ((host->flags & SDHCI_NEEDS_RETUNING) &&
+ !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) &&
+ (present_state & SDHCI_DATA_0_LVL_MASK)) {
+ if (mmc->card) {
+ /* eMMC uses cmd21 but sd and sdio use cmd19 */
+ tuning_opcode =
+ mmc->card->type == MMC_TYPE_MMC ?
+ MMC_SEND_TUNING_BLOCK_HS200 :
+ MMC_SEND_TUNING_BLOCK;
+
+ /* Here we need to set the host->mrq to NULL,
+ * in case the pending finish_tasklet
+ * finishes it incorrectly.
+ */
+ host->mrq = NULL;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ sdhci_execute_tuning(mmc, tuning_opcode);
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Restore original mmc_request structure */
+ host->mrq = mrq;
+ }
+ }
+
+ if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
+ sdhci_send_command(host, mrq->sbc);
+ else
+ sdhci_send_command(host, mrq->cmd);
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+void sdhci_set_bus_width(struct sdhci_host *host, int width)
+{
+ u8 ctrl;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ if (width == MMC_BUS_WIDTH_8) {
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ if (host->version >= SDHCI_SPEC_300)
+ ctrl |= SDHCI_CTRL_8BITBUS;
+ } else {
+ if (host->version >= SDHCI_SPEC_300)
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ if (width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ }
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
+
+void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
+{
+ u16 ctrl_2;
+
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ if ((timing == MMC_TIMING_MMC_HS200) ||
+ (timing == MMC_TIMING_UHS_SDR104))
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ else if (timing == MMC_TIMING_UHS_SDR12)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ else if (timing == MMC_TIMING_UHS_SDR25)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ else if (timing == MMC_TIMING_UHS_SDR50)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+ else if ((timing == MMC_TIMING_UHS_DDR50) ||
+ (timing == MMC_TIMING_MMC_DDR52))
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+ else if (timing == MMC_TIMING_MMC_HS400)
+ ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+}
+EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
+
+static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
+{
+ unsigned long flags;
+ u8 ctrl;
+ struct mmc_host *mmc = host->mmc;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->flags & SDHCI_DEVICE_DEAD) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ if (!IS_ERR(mmc->supply.vmmc) &&
+ ios->power_mode == MMC_POWER_OFF)
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ return;
+ }
+
+ /*
+ * Reset the chip on each power off.
+ * Should clear out any weird states.
+ */
+ if (ios->power_mode == MMC_POWER_OFF) {
+ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+ sdhci_reinit(host);
+ }
+
+ if (host->version >= SDHCI_SPEC_300 &&
+ (ios->power_mode == MMC_POWER_UP) &&
+ !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
+ sdhci_enable_preset_value(host, false);
+
+ if (!ios->clock || ios->clock != host->clock) {
+ host->ops->set_clock(host, ios->clock);
+ host->clock = ios->clock;
+
+ if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
+ host->clock) {
+ host->timeout_clk = host->mmc->actual_clock ?
+ host->mmc->actual_clock / 1000 :
+ host->clock / 1000;
+ host->mmc->max_busy_timeout =
+ host->ops->get_max_timeout_count ?
+ host->ops->get_max_timeout_count(host) :
+ 1 << 27;
+ host->mmc->max_busy_timeout /= host->timeout_clk;
+ }
+ }
+
+ sdhci_set_power(host, ios->power_mode, ios->vdd);
+
+ if (host->ops->platform_send_init_74_clocks)
+ host->ops->platform_send_init_74_clocks(host, ios->power_mode);
+
+ host->ops->set_bus_width(host, ios->bus_width);
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+ if ((ios->timing == MMC_TIMING_SD_HS ||
+ ios->timing == MMC_TIMING_MMC_HS)
+ && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
+ ctrl |= SDHCI_CTRL_HISPD;
+ else
+ ctrl &= ~SDHCI_CTRL_HISPD;
+
+ if (host->version >= SDHCI_SPEC_300) {
+ u16 clk, ctrl_2;
+
+ /* In case of UHS-I modes, set High Speed Enable */
+ if ((ios->timing == MMC_TIMING_MMC_HS400) ||
+ (ios->timing == MMC_TIMING_MMC_HS200) ||
+ (ios->timing == MMC_TIMING_MMC_DDR52) ||
+ (ios->timing == MMC_TIMING_UHS_SDR50) ||
+ (ios->timing == MMC_TIMING_UHS_SDR104) ||
+ (ios->timing == MMC_TIMING_UHS_DDR50) ||
+ (ios->timing == MMC_TIMING_UHS_SDR25))
+ ctrl |= SDHCI_CTRL_HISPD;
+
+ if (!host->preset_enabled) {
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ /*
+ * We only need to set Driver Strength if the
+ * preset value enable is not set.
+ */
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
+ if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
+ ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
+ else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
+ ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
+
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+ } else {
+ /*
+ * According to SDHC Spec v3.00, if the Preset Value
+ * Enable in the Host Control 2 register is set, we
+ * need to reset SD Clock Enable before changing High
+ * Speed Enable to avoid generating clock gliches.
+ */
+
+ /* Reset SD Clock Enable */
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+ /* Re-enable SD Clock */
+ host->ops->set_clock(host, host->clock);
+ }
+
+ /* Reset SD Clock Enable */
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ host->ops->set_uhs_signaling(host, ios->timing);
+ host->timing = ios->timing;
+
+ if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
+ ((ios->timing == MMC_TIMING_UHS_SDR12) ||
+ (ios->timing == MMC_TIMING_UHS_SDR25) ||
+ (ios->timing == MMC_TIMING_UHS_SDR50) ||
+ (ios->timing == MMC_TIMING_UHS_SDR104) ||
+ (ios->timing == MMC_TIMING_UHS_DDR50))) {
+ u16 preset;
+
+ sdhci_enable_preset_value(host, true);
+ preset = sdhci_get_preset_value(host);
+ ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
+ >> SDHCI_PRESET_DRV_SHIFT;
+ }
+
+ /* Re-enable SD Clock */
+ host->ops->set_clock(host, host->clock);
+ } else
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+ /*
+ * Some (ENE) controllers go apeshit on some ios operation,
+ * signalling timeout and CRC errors even on CMD0. Resetting
+ * it on each ios seems to solve the problem.
+ */
+ if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
+ sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_runtime_pm_get(host);
+ sdhci_do_set_ios(host, ios);
+ sdhci_runtime_pm_put(host);
+}
+
+static int sdhci_do_get_cd(struct sdhci_host *host)
+{
+ int gpio_cd = mmc_gpio_get_cd(host->mmc);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ return 0;
+
+ /* If polling/nonremovable, assume that the card is always present. */
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
+ (host->mmc->caps & MMC_CAP_NONREMOVABLE))
+ return 1;
+
+ /* Try slot gpio detect */
+ if (!IS_ERR_VALUE(gpio_cd))
+ return !!gpio_cd;
+
+ /* Host native card detect */
+ return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+}
+
+static int sdhci_get_cd(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int ret;
+
+ sdhci_runtime_pm_get(host);
+ ret = sdhci_do_get_cd(host);
+ sdhci_runtime_pm_put(host);
+ return ret;
+}
+
+static int sdhci_check_ro(struct sdhci_host *host)
+{
+ unsigned long flags;
+ int is_readonly;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ is_readonly = 0;
+ else if (host->ops->get_ro)
+ is_readonly = host->ops->get_ro(host);
+ else
+ is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
+ & SDHCI_WRITE_PROTECT);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /* This quirk needs to be replaced by a callback-function later */
+ return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
+ !is_readonly : is_readonly;
+}
+
+#define SAMPLE_COUNT 5
+
+static int sdhci_do_get_ro(struct sdhci_host *host)
+{
+ int i, ro_count;
+
+ if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
+ return sdhci_check_ro(host);
+
+ ro_count = 0;
+ for (i = 0; i < SAMPLE_COUNT; i++) {
+ if (sdhci_check_ro(host)) {
+ if (++ro_count > SAMPLE_COUNT / 2)
+ return 1;
+ }
+ msleep(30);
+ }
+ return 0;
+}
+
+static void sdhci_hw_reset(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (host->ops && host->ops->hw_reset)
+ host->ops->hw_reset(host);
+}
+
+static int sdhci_get_ro(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int ret;
+
+ sdhci_runtime_pm_get(host);
+ ret = sdhci_do_get_ro(host);
+ sdhci_runtime_pm_put(host);
+ return ret;
+}
+
+static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
+{
+ if (!(host->flags & SDHCI_DEVICE_DEAD)) {
+ if (enable)
+ host->ier |= SDHCI_INT_CARD_INT;
+ else
+ host->ier &= ~SDHCI_INT_CARD_INT;
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ mmiowb();
+ }
+}
+
+static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ sdhci_runtime_pm_get(host);
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (enable)
+ host->flags |= SDHCI_SDIO_IRQ_ENABLED;
+ else
+ host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
+
+ sdhci_enable_sdio_irq_nolock(host, enable);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ sdhci_runtime_pm_put(host);
+}
+
+static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
+ struct mmc_ios *ios)
+{
+ struct mmc_host *mmc = host->mmc;
+ u16 ctrl;
+ int ret;
+
+ /*
+ * Signal Voltage Switching is only applicable for Host Controllers
+ * v3.00 and above.
+ */
+ if (host->version < SDHCI_SPEC_300)
+ return 0;
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
+ ctrl &= ~SDHCI_CTRL_VDD_180;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
+ 3600000);
+ if (ret) {
+ pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
+ mmc_hostname(mmc));
+ return -EIO;
+ }
+ }
+ /* Wait for 5ms */
+ usleep_range(5000, 5500);
+
+ /* 3.3V regulator output should be stable within 5 ms */
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (!(ctrl & SDHCI_CTRL_VDD_180))
+ return 0;
+
+ pr_warn("%s: 3.3V regulator output did not became stable\n",
+ mmc_hostname(mmc));
+
+ return -EAGAIN;
+ case MMC_SIGNAL_VOLTAGE_180:
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = regulator_set_voltage(mmc->supply.vqmmc,
+ 1700000, 1950000);
+ if (ret) {
+ pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
+ mmc_hostname(mmc));
+ return -EIO;
+ }
+ }
+
+ /*
+ * Enable 1.8V Signal Enable in the Host Control2
+ * register
+ */
+ ctrl |= SDHCI_CTRL_VDD_180;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+ /* Some controller need to do more when switching */
+ if (host->ops->voltage_switch)
+ host->ops->voltage_switch(host);
+
+ /* 1.8V regulator output should be stable within 5 ms */
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (ctrl & SDHCI_CTRL_VDD_180)
+ return 0;
+
+ pr_warn("%s: 1.8V regulator output did not became stable\n",
+ mmc_hostname(mmc));
+
+ return -EAGAIN;
+ case MMC_SIGNAL_VOLTAGE_120:
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000,
+ 1300000);
+ if (ret) {
+ pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
+ mmc_hostname(mmc));
+ return -EIO;
+ }
+ }
+ return 0;
+ default:
+ /* No signal voltage switch required */
+ return 0;
+ }
+}
+
+static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int err;
+
+ if (host->version < SDHCI_SPEC_300)
+ return 0;
+ sdhci_runtime_pm_get(host);
+ err = sdhci_do_start_signal_voltage_switch(host, ios);
+ sdhci_runtime_pm_put(host);
+ return err;
+}
+
+static int sdhci_card_busy(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 present_state;
+
+ sdhci_runtime_pm_get(host);
+ /* Check whether DAT[3:0] is 0000 */
+ present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ sdhci_runtime_pm_put(host);
+
+ return !(present_state & SDHCI_DATA_LVL_MASK);
+}
+
+static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->flags |= SDHCI_HS400_TUNING;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return 0;
+}
+
+static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u16 ctrl;
+ int tuning_loop_counter = MAX_TUNING_LOOP;
+ int err = 0;
+ unsigned long flags;
+ unsigned int tuning_count = 0;
+ bool hs400_tuning;
+
+ sdhci_runtime_pm_get(host);
+ spin_lock_irqsave(&host->lock, flags);
+
+ hs400_tuning = host->flags & SDHCI_HS400_TUNING;
+ host->flags &= ~SDHCI_HS400_TUNING;
+
+ if (host->tuning_mode == SDHCI_TUNING_MODE_1)
+ tuning_count = host->tuning_count;
+
+ /*
+ * The Host Controller needs tuning only in case of SDR104 mode
+ * and for SDR50 mode when Use Tuning for SDR50 is set in the
+ * Capabilities register.
+ * If the Host Controller supports the HS200 mode then the
+ * tuning function has to be executed.
+ */
+ switch (host->timing) {
+ /* HS400 tuning is done in HS200 mode */
+ case MMC_TIMING_MMC_HS400:
+ err = -EINVAL;
+ goto out_unlock;
+
+ case MMC_TIMING_MMC_HS200:
+ /*
+ * Periodic re-tuning for HS400 is not expected to be needed, so
+ * disable it here.
+ */
+ if (hs400_tuning)
+ tuning_count = 0;
+ break;
+
+ case MMC_TIMING_UHS_SDR104:
+ break;
+
+ case MMC_TIMING_UHS_SDR50:
+ if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
+ host->flags & SDHCI_SDR104_NEEDS_TUNING)
+ break;
+ /* FALLTHROUGH */
+
+ default:
+ goto out_unlock;
+ }
+
+ if (host->ops->platform_execute_tuning) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ err = host->ops->platform_execute_tuning(host, opcode);
+ sdhci_runtime_pm_put(host);
+ return err;
+ }
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl |= SDHCI_CTRL_EXEC_TUNING;
+ if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
+ ctrl |= SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+ /*
+ * As per the Host Controller spec v3.00, tuning command
+ * generates Buffer Read Ready interrupt, so enable that.
+ *
+ * Note: The spec clearly says that when tuning sequence
+ * is being performed, the controller does not generate
+ * interrupts other than Buffer Read Ready interrupt. But
+ * to make sure we don't hit a controller bug, we _only_
+ * enable Buffer Read Ready interrupt here.
+ */
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
+
+ /*
+ * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
+ * of loops reaches 40 times or a timeout of 150ms occurs.
+ */
+ do {
+ struct mmc_command cmd = {0};
+ struct mmc_request mrq = {NULL};
+
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+ cmd.retries = 0;
+ cmd.data = NULL;
+ cmd.error = 0;
+
+ if (tuning_loop_counter-- == 0)
+ break;
+
+ mrq.cmd = &cmd;
+ host->mrq = &mrq;
+
+ /*
+ * In response to CMD19, the card sends 64 bytes of tuning
+ * block to the Host Controller. So we set the block size
+ * to 64 here.
+ */
+ if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
+ if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
+ SDHCI_BLOCK_SIZE);
+ else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
+ SDHCI_BLOCK_SIZE);
+ } else {
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
+ SDHCI_BLOCK_SIZE);
+ }
+
+ /*
+ * The tuning block is sent by the card to the host controller.
+ * So we set the TRNS_READ bit in the Transfer Mode register.
+ * This also takes care of setting DMA Enable and Multi Block
+ * Select in the same register to 0.
+ */
+ sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
+
+ sdhci_send_command(host, &cmd);
+
+ host->cmd = NULL;
+ host->mrq = NULL;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ /* Wait for Buffer Read Ready interrupt */
+ wait_event_interruptible_timeout(host->buf_ready_int,
+ (host->tuning_done == 1),
+ msecs_to_jiffies(50));
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (!host->tuning_done) {
+ pr_info(DRIVER_NAME ": Timeout waiting for "
+ "Buffer Read Ready interrupt during tuning "
+ "procedure, falling back to fixed sampling "
+ "clock\n");
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl &= ~SDHCI_CTRL_TUNED_CLK;
+ ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+ err = -EIO;
+ goto out;
+ }
+
+ host->tuning_done = 0;
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ /* eMMC spec does not require a delay between tuning cycles */
+ if (opcode == MMC_SEND_TUNING_BLOCK)
+ mdelay(1);
+ } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
+
+ /*
+ * The Host Driver has exhausted the maximum number of loops allowed,
+ * so use fixed sampling frequency.
+ */
+ if (tuning_loop_counter < 0) {
+ ctrl &= ~SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ }
+ if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
+ pr_info(DRIVER_NAME ": Tuning procedure"
+ " failed, falling back to fixed sampling"
+ " clock\n");
+ err = -EIO;
+ }
+
+out:
+ host->flags &= ~SDHCI_NEEDS_RETUNING;
+
+ if (tuning_count) {
+ host->flags |= SDHCI_USING_RETUNING_TIMER;
+ mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ);
+ }
+
+ /*
+ * In case tuning fails, host controllers which support re-tuning can
+ * try tuning again at a later time, when the re-tuning timer expires.
+ * So for these controllers, we return 0. Since there might be other
+ * controllers who do not have this capability, we return error for
+ * them. SDHCI_USING_RETUNING_TIMER means the host is currently using
+ * a retuning timer to do the retuning for the card.
+ */
+ if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
+ err = 0;
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+out_unlock:
+ spin_unlock_irqrestore(&host->lock, flags);
+ sdhci_runtime_pm_put(host);
+
+ return err;
+}
+
+
+static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
+{
+ /* Host Controller v3.00 defines preset value registers */
+ if (host->version < SDHCI_SPEC_300)
+ return;
+
+ /*
+ * We only enable or disable Preset Value if they are not already
+ * enabled or disabled respectively. Otherwise, we bail out.
+ */
+ if (host->preset_enabled != enable) {
+ u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ if (enable)
+ ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
+ else
+ ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
+
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+ if (enable)
+ host->flags |= SDHCI_PV_ENABLED;
+ else
+ host->flags &= ~SDHCI_PV_ENABLED;
+
+ host->preset_enabled = enable;
+ }
+}
+
+static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ if (data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ data->flags & MMC_DATA_WRITE ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ mrq->data->host_cookie = 0;
+ }
+}
+
+static int sdhci_pre_dma_transfer(struct sdhci_host *host,
+ struct mmc_data *data,
+ struct sdhci_host_next *next)
+{
+ int sg_count;
+
+ if (!next && data->host_cookie &&
+ data->host_cookie != host->next_data.cookie) {
+ pr_debug(DRIVER_NAME "[%s] invalid cookie: %d, next-cookie %d\n",
+ __func__, data->host_cookie, host->next_data.cookie);
+ data->host_cookie = 0;
+ }
+
+ /* Check if next job is already prepared */
+ if (next ||
+ (!next && data->host_cookie != host->next_data.cookie)) {
+ sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len,
+ data->flags & MMC_DATA_WRITE ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ } else {
+ sg_count = host->next_data.sg_count;
+ host->next_data.sg_count = 0;
+ }
+
+
+ if (sg_count == 0)
+ return -EINVAL;
+
+ if (next) {
+ next->sg_count = sg_count;
+ data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
+ } else
+ host->sg_count = sg_count;
+
+ return sg_count;
+}
+
+static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (mrq->data->host_cookie) {
+ mrq->data->host_cookie = 0;
+ return;
+ }
+
+ if (host->flags & SDHCI_REQ_USE_DMA)
+ if (sdhci_pre_dma_transfer(host,
+ mrq->data,
+ &host->next_data) < 0)
+ mrq->data->host_cookie = 0;
+}
+
+static void sdhci_card_event(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int present;
+
+ /* First check if client has provided their own card event */
+ if (host->ops->card_event)
+ host->ops->card_event(host);
+
+ present = sdhci_do_get_cd(host);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Check host->mrq first in case we are runtime suspended */
+ if (host->mrq && !present) {
+ pr_err("%s: Card removed during transfer!\n",
+ mmc_hostname(host->mmc));
+ pr_err("%s: Resetting controller.\n",
+ mmc_hostname(host->mmc));
+
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static const struct mmc_host_ops sdhci_ops = {
+ .request = sdhci_request,
+ .post_req = sdhci_post_req,
+ .pre_req = sdhci_pre_req,
+ .set_ios = sdhci_set_ios,
+ .get_cd = sdhci_get_cd,
+ .get_ro = sdhci_get_ro,
+ .hw_reset = sdhci_hw_reset,
+ .enable_sdio_irq = sdhci_enable_sdio_irq,
+ .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
+ .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
+ .execute_tuning = sdhci_execute_tuning,
+ .card_event = sdhci_card_event,
+ .card_busy = sdhci_card_busy,
+};
+
+/*****************************************************************************\
+ * *
+ * Tasklets *
+ * *
+\*****************************************************************************/
+
+static void sdhci_tasklet_finish(unsigned long param)
+{
+ struct sdhci_host *host;
+ unsigned long flags;
+ struct mmc_request *mrq;
+
+ host = (struct sdhci_host*)param;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /*
+ * If this tasklet gets rescheduled while running, it will
+ * be run again afterwards but without any active request.
+ */
+ if (!host->mrq) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ del_timer(&host->timer);
+
+ mrq = host->mrq;
+
+ /*
+ * The controller needs a reset of internal state machines
+ * upon error conditions.
+ */
+ if (!(host->flags & SDHCI_DEVICE_DEAD) &&
+ ((mrq->cmd && mrq->cmd->error) ||
+ (mrq->sbc && mrq->sbc->error) ||
+ (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
+ (mrq->data->stop && mrq->data->stop->error))) ||
+ (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
+
+ /* Some controllers need this kick or reset won't work here */
+ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
+ /* This is to force an update */
+ host->ops->set_clock(host, host->clock);
+
+ /* Spec says we should do both at the same time, but Ricoh
+ controllers do not like that. */
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+ }
+
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+
+#ifndef SDHCI_USE_LEDS_CLASS
+ sdhci_deactivate_led(host);
+#endif
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mmc_request_done(host->mmc, mrq);
+ sdhci_runtime_pm_put(host);
+}
+
+static void sdhci_timeout_timer(unsigned long data)
+{
+ struct sdhci_host *host;
+ unsigned long flags;
+
+ host = (struct sdhci_host*)data;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->mrq) {
+ pr_err("%s: Timeout waiting for hardware "
+ "interrupt.\n", mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+
+ if (host->data) {
+ host->data->error = -ETIMEDOUT;
+ sdhci_finish_data(host);
+ } else {
+ if (host->cmd)
+ host->cmd->error = -ETIMEDOUT;
+ else
+ host->mrq->cmd->error = -ETIMEDOUT;
+
+ tasklet_schedule(&host->finish_tasklet);
+ }
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void sdhci_tuning_timer(unsigned long data)
+{
+ struct sdhci_host *host;
+ unsigned long flags;
+
+ host = (struct sdhci_host *)data;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->flags |= SDHCI_NEEDS_RETUNING;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*****************************************************************************\
+ * *
+ * Interrupt handling *
+ * *
+\*****************************************************************************/
+
+static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
+{
+ BUG_ON(intmask == 0);
+
+ if (!host->cmd) {
+ pr_err("%s: Got command interrupt 0x%08x even "
+ "though no command operation was in progress.\n",
+ mmc_hostname(host->mmc), (unsigned)intmask);
+ sdhci_dumpregs(host);
+ return;
+ }
+
+ if (intmask & SDHCI_INT_TIMEOUT)
+ host->cmd->error = -ETIMEDOUT;
+ else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
+ SDHCI_INT_INDEX))
+ host->cmd->error = -EILSEQ;
+
+ if (host->cmd->error) {
+ tasklet_schedule(&host->finish_tasklet);
+ return;
+ }
+
+ /*
+ * The host can send and interrupt when the busy state has
+ * ended, allowing us to wait without wasting CPU cycles.
+ * Unfortunately this is overloaded on the "data complete"
+ * interrupt, so we need to take some care when handling
+ * it.
+ *
+ * Note: The 1.0 specification is a bit ambiguous about this
+ * feature so there might be some problems with older
+ * controllers.
+ */
+ if (host->cmd->flags & MMC_RSP_BUSY) {
+ if (host->cmd->data)
+ DBG("Cannot wait for busy signal when also "
+ "doing a data transfer");
+ else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)
+ && !host->busy_handle) {
+ /* Mark that command complete before busy is ended */
+ host->busy_handle = 1;
+ return;
+ }
+
+ /* The controller does not support the end-of-busy IRQ,
+ * fall through and take the SDHCI_INT_RESPONSE */
+ } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
+ host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) {
+ *mask &= ~SDHCI_INT_DATA_END;
+ }
+
+ if (intmask & SDHCI_INT_RESPONSE)
+ sdhci_finish_command(host);
+}
+
+#ifdef CONFIG_MMC_DEBUG
+static void sdhci_adma_show_error(struct sdhci_host *host)
+{
+ const char *name = mmc_hostname(host->mmc);
+ void *desc = host->adma_table;
+
+ sdhci_dumpregs(host);
+
+ while (true) {
+ struct sdhci_adma2_64_desc *dma_desc = desc;
+
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
+ name, desc, le32_to_cpu(dma_desc->addr_hi),
+ le32_to_cpu(dma_desc->addr_lo),
+ le16_to_cpu(dma_desc->len),
+ le16_to_cpu(dma_desc->cmd));
+ else
+ DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
+ name, desc, le32_to_cpu(dma_desc->addr_lo),
+ le16_to_cpu(dma_desc->len),
+ le16_to_cpu(dma_desc->cmd));
+
+ desc += host->desc_sz;
+
+ if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
+ break;
+ }
+}
+#else
+static void sdhci_adma_show_error(struct sdhci_host *host) { }
+#endif
+
+static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
+{
+ u32 command;
+ BUG_ON(intmask == 0);
+
+ /* CMD19 generates _only_ Buffer Read Ready interrupt */
+ if (intmask & SDHCI_INT_DATA_AVAIL) {
+ command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
+ if (command == MMC_SEND_TUNING_BLOCK ||
+ command == MMC_SEND_TUNING_BLOCK_HS200) {
+ host->tuning_done = 1;
+ wake_up(&host->buf_ready_int);
+ return;
+ }
+ }
+
+ if (!host->data) {
+ /*
+ * The "data complete" interrupt is also used to
+ * indicate that a busy state has ended. See comment
+ * above in sdhci_cmd_irq().
+ */
+ if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
+ if (intmask & SDHCI_INT_DATA_TIMEOUT) {
+ host->cmd->error = -ETIMEDOUT;
+ tasklet_schedule(&host->finish_tasklet);
+ return;
+ }
+ if (intmask & SDHCI_INT_DATA_END) {
+ /*
+ * Some cards handle busy-end interrupt
+ * before the command completed, so make
+ * sure we do things in the proper order.
+ */
+ if (host->busy_handle)
+ sdhci_finish_command(host);
+ else
+ host->busy_handle = 1;
+ return;
+ }
+ }
+
+ pr_err("%s: Got data interrupt 0x%08x even "
+ "though no data operation was in progress.\n",
+ mmc_hostname(host->mmc), (unsigned)intmask);
+ sdhci_dumpregs(host);
+
+ return;
+ }
+
+ if (intmask & SDHCI_INT_DATA_TIMEOUT)
+ host->data->error = -ETIMEDOUT;
+ else if (intmask & SDHCI_INT_DATA_END_BIT)
+ host->data->error = -EILSEQ;
+ else if ((intmask & SDHCI_INT_DATA_CRC) &&
+ SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
+ != MMC_BUS_TEST_R)
+ host->data->error = -EILSEQ;
+ else if (intmask & SDHCI_INT_ADMA_ERROR) {
+ pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
+ sdhci_adma_show_error(host);
+ host->data->error = -EIO;
+ if (host->ops->adma_workaround)
+ host->ops->adma_workaround(host, intmask);
+ }
+
+ if (host->data->error)
+ sdhci_finish_data(host);
+ else {
+ if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
+ sdhci_transfer_pio(host);
+
+ /*
+ * We currently don't do anything fancy with DMA
+ * boundaries, but as we can't disable the feature
+ * we need to at least restart the transfer.
+ *
+ * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
+ * should return a valid address to continue from, but as
+ * some controllers are faulty, don't trust them.
+ */
+ if (intmask & SDHCI_INT_DMA_END) {
+ u32 dmastart, dmanow;
+ dmastart = sg_dma_address(host->data->sg);
+ dmanow = dmastart + host->data->bytes_xfered;
+ /*
+ * Force update to the next DMA block boundary.
+ */
+ dmanow = (dmanow &
+ ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
+ SDHCI_DEFAULT_BOUNDARY_SIZE;
+ host->data->bytes_xfered = dmanow - dmastart;
+ DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
+ " next 0x%08x\n",
+ mmc_hostname(host->mmc), dmastart,
+ host->data->bytes_xfered, dmanow);
+ sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
+ }
+
+ if (intmask & SDHCI_INT_DATA_END) {
+ if (host->cmd) {
+ /*
+ * Data managed to finish before the
+ * command completed. Make sure we do
+ * things in the proper order.
+ */
+ host->data_early = 1;
+ } else {
+ sdhci_finish_data(host);
+ }
+ }
+ }
+}
+
+static irqreturn_t sdhci_irq(int irq, void *dev_id)
+{
+ irqreturn_t result = IRQ_NONE;
+ struct sdhci_host *host = dev_id;
+ u32 intmask, mask, unexpected = 0;
+ int max_loops = 16;
+
+ spin_lock(&host->lock);
+
+ if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
+ spin_unlock(&host->lock);
+ return IRQ_NONE;
+ }
+
+ intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+ if (!intmask || intmask == 0xffffffff) {
+ result = IRQ_NONE;
+ goto out;
+ }
+
+ do {
+ /* Clear selected interrupts. */
+ mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
+ SDHCI_INT_BUS_POWER);
+ sdhci_writel(host, mask, SDHCI_INT_STATUS);
+
+ DBG("*** %s got interrupt: 0x%08x\n",
+ mmc_hostname(host->mmc), intmask);
+
+ if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+ u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
+
+ /*
+ * There is a observation on i.mx esdhc. INSERT
+ * bit will be immediately set again when it gets
+ * cleared, if a card is inserted. We have to mask
+ * the irq to prevent interrupt storm which will
+ * freeze the system. And the REMOVE gets the
+ * same situation.
+ *
+ * More testing are needed here to ensure it works
+ * for other platforms though.
+ */
+ host->ier &= ~(SDHCI_INT_CARD_INSERT |
+ SDHCI_INT_CARD_REMOVE);
+ host->ier |= present ? SDHCI_INT_CARD_REMOVE :
+ SDHCI_INT_CARD_INSERT;
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
+ sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
+ SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+
+ host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
+ SDHCI_INT_CARD_REMOVE);
+ result = IRQ_WAKE_THREAD;
+ }
+
+ if (intmask & SDHCI_INT_CMD_MASK)
+ sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
+ &intmask);
+
+ if (intmask & SDHCI_INT_DATA_MASK)
+ sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
+
+ if (intmask & SDHCI_INT_BUS_POWER)
+ pr_err("%s: Card is consuming too much power!\n",
+ mmc_hostname(host->mmc));
+
+ if (intmask & SDHCI_INT_CARD_INT) {
+ sdhci_enable_sdio_irq_nolock(host, false);
+ host->thread_isr |= SDHCI_INT_CARD_INT;
+ result = IRQ_WAKE_THREAD;
+ }
+
+ intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
+ SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
+ SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
+ SDHCI_INT_CARD_INT);
+
+ if (intmask) {
+ unexpected |= intmask;
+ sdhci_writel(host, intmask, SDHCI_INT_STATUS);
+ }
+
+ if (result == IRQ_NONE)
+ result = IRQ_HANDLED;
+
+ intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+ } while (intmask && --max_loops);
+out:
+ spin_unlock(&host->lock);
+
+ if (unexpected) {
+ pr_err("%s: Unexpected interrupt 0x%08x.\n",
+ mmc_hostname(host->mmc), unexpected);
+ sdhci_dumpregs(host);
+ }
+
+ return result;
+}
+
+static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
+{
+ struct sdhci_host *host = dev_id;
+ unsigned long flags;
+ u32 isr;
+
+ spin_lock_irqsave(&host->lock, flags);
+ isr = host->thread_isr;
+ host->thread_isr = 0;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+ sdhci_card_event(host->mmc);
+ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+ }
+
+ if (isr & SDHCI_INT_CARD_INT) {
+ sdio_run_irqs(host->mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
+ sdhci_enable_sdio_irq_nolock(host, true);
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
+ return isr ? IRQ_HANDLED : IRQ_NONE;
+}
+
+#ifdef CONFIG_PREEMPT_RT_BASE
+static irqreturn_t sdhci_rt_irq(int irq, void *dev_id)
+{
+ irqreturn_t ret;
+
+ local_bh_disable();
+ ret = sdhci_irq(irq, dev_id);
+ local_bh_enable();
+ if (ret == IRQ_WAKE_THREAD)
+ ret = sdhci_thread_irq(irq, dev_id);
+ return ret;
+}
+#endif
+
+static int sdhci_req_irq(struct sdhci_host *host)
+{
+#ifdef CONFIG_PREEMPT_RT_BASE
+ return request_threaded_irq(host->irq, NULL, sdhci_rt_irq,
+ IRQF_SHARED, mmc_hostname(host->mmc), host);
+#else
+ return request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
+ IRQF_SHARED, mmc_hostname(host->mmc), host);
+#endif
+}
+
+/*****************************************************************************\
+ * *
+ * Suspend/resume *
+ * *
+\*****************************************************************************/
+
+#ifdef CONFIG_PM
+void sdhci_enable_irq_wakeups(struct sdhci_host *host)
+{
+ u8 val;
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
+ | SDHCI_WAKE_ON_INT;
+
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val |= mask ;
+ /* Avoid fake wake up */
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
+ sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+}
+EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
+
+static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
+{
+ u8 val;
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
+ | SDHCI_WAKE_ON_INT;
+
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val &= ~mask;
+ sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+}
+
+int sdhci_suspend_host(struct sdhci_host *host)
+{
+ sdhci_disable_card_detection(host);
+
+ /* Disable tuning since we are suspending */
+ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
+ del_timer_sync(&host->tuning_timer);
+ host->flags &= ~SDHCI_NEEDS_RETUNING;
+ }
+
+ if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ host->ier = 0;
+ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
+ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+ free_irq(host->irq, host);
+ } else {
+ sdhci_enable_irq_wakeups(host);
+ enable_irq_wake(host->irq);
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(sdhci_suspend_host);
+
+int sdhci_resume_host(struct sdhci_host *host)
+{
+ int ret = 0;
+
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+ if (host->ops->enable_dma)
+ host->ops->enable_dma(host);
+ }
+
+ if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ ret = sdhci_req_irq(host);
+ if (ret)
+ return ret;
+ } else {
+ sdhci_disable_irq_wakeups(host);
+ disable_irq_wake(host->irq);
+ }
+
+ if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
+ (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
+ /* Card keeps power but host controller does not */
+ sdhci_init(host, 0);
+ host->pwr = 0;
+ host->clock = 0;
+ sdhci_do_set_ios(host, &host->mmc->ios);
+ } else {
+ sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
+ mmiowb();
+ }
+
+ sdhci_enable_card_detection(host);
+
+ /* Set the re-tuning expiration flag */
+ if (host->flags & SDHCI_USING_RETUNING_TIMER)
+ host->flags |= SDHCI_NEEDS_RETUNING;
+
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(sdhci_resume_host);
+
+static int sdhci_runtime_pm_get(struct sdhci_host *host)
+{
+ return pm_runtime_get_sync(host->mmc->parent);
+}
+
+static int sdhci_runtime_pm_put(struct sdhci_host *host)
+{
+ pm_runtime_mark_last_busy(host->mmc->parent);
+ return pm_runtime_put_autosuspend(host->mmc->parent);
+}
+
+static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+{
+ if (host->runtime_suspended || host->bus_on)
+ return;
+ host->bus_on = true;
+ pm_runtime_get_noresume(host->mmc->parent);
+}
+
+static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
+{
+ if (host->runtime_suspended || !host->bus_on)
+ return;
+ host->bus_on = false;
+ pm_runtime_put_noidle(host->mmc->parent);
+}
+
+int sdhci_runtime_suspend_host(struct sdhci_host *host)
+{
+ unsigned long flags;
+
+ /* Disable tuning since we are suspending */
+ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
+ del_timer_sync(&host->tuning_timer);
+ host->flags &= ~SDHCI_NEEDS_RETUNING;
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->ier &= SDHCI_INT_CARD_INT;
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ synchronize_hardirq(host->irq);
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = true;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
+
+int sdhci_runtime_resume_host(struct sdhci_host *host)
+{
+ unsigned long flags;
+ int host_flags = host->flags;
+
+ if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+ if (host->ops->enable_dma)
+ host->ops->enable_dma(host);
+ }
+
+ sdhci_init(host, 0);
+
+ /* Force clock and power re-program */
+ host->pwr = 0;
+ host->clock = 0;
+ sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
+ sdhci_do_set_ios(host, &host->mmc->ios);
+
+ if ((host_flags & SDHCI_PV_ENABLED) &&
+ !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_enable_preset_value(host, true);
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
+ /* Set the re-tuning expiration flag */
+ if (host->flags & SDHCI_USING_RETUNING_TIMER)
+ host->flags |= SDHCI_NEEDS_RETUNING;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->runtime_suspended = false;
+
+ /* Enable SDIO IRQ */
+ if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
+ sdhci_enable_sdio_irq_nolock(host, true);
+
+ /* Enable Card Detection */
+ sdhci_enable_card_detection(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
+
+#endif /* CONFIG_PM */
+
+/*****************************************************************************\
+ * *
+ * Device allocation/registration *
+ * *
+\*****************************************************************************/
+
+struct sdhci_host *sdhci_alloc_host(struct device *dev,
+ size_t priv_size)
+{
+ struct mmc_host *mmc;
+ struct sdhci_host *host;
+
+ WARN_ON(dev == NULL);
+
+ mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
+ if (!mmc)
+ return ERR_PTR(-ENOMEM);
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ return host;
+}
+
+EXPORT_SYMBOL_GPL(sdhci_alloc_host);
+
+int sdhci_add_host(struct sdhci_host *host)
+{
+ struct mmc_host *mmc;
+ u32 caps[2] = {0, 0};
+ u32 max_current_caps;
+ unsigned int ocr_avail;
+ unsigned int override_timeout_clk;
+ int ret;
+
+ WARN_ON(host == NULL);
+ if (host == NULL)
+ return -EINVAL;
+
+ mmc = host->mmc;
+
+ if (debug_quirks)
+ host->quirks = debug_quirks;
+ if (debug_quirks2)
+ host->quirks2 = debug_quirks2;
+
+ override_timeout_clk = host->timeout_clk;
+
+ sdhci_do_reset(host, SDHCI_RESET_ALL);
+
+ host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
+ host->version = (host->version & SDHCI_SPEC_VER_MASK)
+ >> SDHCI_SPEC_VER_SHIFT;
+ if (host->version > SDHCI_SPEC_300) {
+ pr_err("%s: Unknown controller version (%d). "
+ "You may experience problems.\n", mmc_hostname(mmc),
+ host->version);
+ }
+
+ caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
+ sdhci_readl(host, SDHCI_CAPABILITIES);
+
+ if (host->version >= SDHCI_SPEC_300)
+ caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
+ host->caps1 :
+ sdhci_readl(host, SDHCI_CAPABILITIES_1);
+
+ if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
+ host->flags |= SDHCI_USE_SDMA;
+ else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
+ DBG("Controller doesn't have SDMA capability\n");
+ else
+ host->flags |= SDHCI_USE_SDMA;
+
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
+ (host->flags & SDHCI_USE_SDMA)) {
+ DBG("Disabling DMA as it is marked broken\n");
+ host->flags &= ~SDHCI_USE_SDMA;
+ }
+
+ if ((host->version >= SDHCI_SPEC_200) &&
+ (caps[0] & SDHCI_CAN_DO_ADMA2))
+ host->flags |= SDHCI_USE_ADMA;
+
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
+ (host->flags & SDHCI_USE_ADMA)) {
+ DBG("Disabling ADMA as it is marked broken\n");
+ host->flags &= ~SDHCI_USE_ADMA;
+ }
+
+ /*
+ * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
+ * and *must* do 64-bit DMA. A driver has the opportunity to change
+ * that during the first call to ->enable_dma(). Similarly
+ * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
+ * implement.
+ */
+ if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT)
+ host->flags |= SDHCI_USE_64_BIT_DMA;
+
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+ if (host->ops->enable_dma) {
+ if (host->ops->enable_dma(host)) {
+ pr_warn("%s: No suitable DMA available - falling back to PIO\n",
+ mmc_hostname(mmc));
+ host->flags &=
+ ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
+ }
+ }
+ }
+
+ /* SDMA does not support 64-bit DMA */
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ host->flags &= ~SDHCI_USE_SDMA;
+
+ if (host->flags & SDHCI_USE_ADMA) {
+ /*
+ * The DMA descriptor table size is calculated as the maximum
+ * number of segments times 2, to allow for an alignment
+ * descriptor for each segment, plus 1 for a nop end descriptor,
+ * all multipled by the descriptor size.
+ */
+ if (host->flags & SDHCI_USE_64_BIT_DMA) {
+ host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
+ SDHCI_ADMA2_64_DESC_SZ;
+ host->align_buffer_sz = SDHCI_MAX_SEGS *
+ SDHCI_ADMA2_64_ALIGN;
+ host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
+ host->align_sz = SDHCI_ADMA2_64_ALIGN;
+ host->align_mask = SDHCI_ADMA2_64_ALIGN - 1;
+ } else {
+ host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
+ SDHCI_ADMA2_32_DESC_SZ;
+ host->align_buffer_sz = SDHCI_MAX_SEGS *
+ SDHCI_ADMA2_32_ALIGN;
+ host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
+ host->align_sz = SDHCI_ADMA2_32_ALIGN;
+ host->align_mask = SDHCI_ADMA2_32_ALIGN - 1;
+ }
+ host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
+ host->adma_table_sz,
+ &host->adma_addr,
+ GFP_KERNEL);
+ host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
+ if (!host->adma_table || !host->align_buffer) {
+ dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
+ host->adma_table, host->adma_addr);
+ kfree(host->align_buffer);
+ pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
+ mmc_hostname(mmc));
+ host->flags &= ~SDHCI_USE_ADMA;
+ host->adma_table = NULL;
+ host->align_buffer = NULL;
+ } else if (host->adma_addr & host->align_mask) {
+ pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
+ mmc_hostname(mmc));
+ host->flags &= ~SDHCI_USE_ADMA;
+ dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
+ host->adma_table, host->adma_addr);
+ kfree(host->align_buffer);
+ host->adma_table = NULL;
+ host->align_buffer = NULL;
+ }
+ }
+
+ /*
+ * If we use DMA, then it's up to the caller to set the DMA
+ * mask, but PIO does not need the hw shim so we set a new
+ * mask here in that case.
+ */
+ if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
+ host->dma_mask = DMA_BIT_MASK(64);
+ mmc_dev(mmc)->dma_mask = &host->dma_mask;
+ }
+
+ if (host->version >= SDHCI_SPEC_300)
+ host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
+ >> SDHCI_CLOCK_BASE_SHIFT;
+ else
+ host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
+ >> SDHCI_CLOCK_BASE_SHIFT;
+
+ host->max_clk *= 1000000;
+ if (host->max_clk == 0 || host->quirks &
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
+ if (!host->ops->get_max_clock) {
+ pr_err("%s: Hardware doesn't specify base clock "
+ "frequency.\n", mmc_hostname(mmc));
+ return -ENODEV;
+ }
+ host->max_clk = host->ops->get_max_clock(host);
+ }
+
+ host->next_data.cookie = 1;
+ /*
+ * In case of Host Controller v3.00, find out whether clock
+ * multiplier is supported.
+ */
+ host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
+ SDHCI_CLOCK_MUL_SHIFT;
+
+ /*
+ * In case the value in Clock Multiplier is 0, then programmable
+ * clock mode is not supported, otherwise the actual clock
+ * multiplier is one more than the value of Clock Multiplier
+ * in the Capabilities Register.
+ */
+ if (host->clk_mul)
+ host->clk_mul += 1;
+
+ /*
+ * Set host parameters.
+ */
+ mmc->ops = &sdhci_ops;
+ mmc->f_max = host->max_clk;
+ if (host->ops->get_min_clock)
+ mmc->f_min = host->ops->get_min_clock(host);
+ else if (host->version >= SDHCI_SPEC_300) {
+ if (host->clk_mul) {
+ mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
+ mmc->f_max = host->max_clk * host->clk_mul;
+ } else
+ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
+ } else
+ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
+
+ if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
+ host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
+ SDHCI_TIMEOUT_CLK_SHIFT;
+ if (host->timeout_clk == 0) {
+ if (host->ops->get_timeout_clock) {
+ host->timeout_clk =
+ host->ops->get_timeout_clock(host);
+ } else {
+ pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
+ mmc_hostname(mmc));
+ return -ENODEV;
+ }
+ }
+
+ if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
+ host->timeout_clk *= 1000;
+
+ mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
+ host->ops->get_max_timeout_count(host) : 1 << 27;
+ mmc->max_busy_timeout /= host->timeout_clk;
+ }
+
+ if (override_timeout_clk)
+ host->timeout_clk = override_timeout_clk;
+
+ mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+
+ if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
+ host->flags |= SDHCI_AUTO_CMD12;
+
+ /* Auto-CMD23 stuff only works in ADMA or PIO. */
+ if ((host->version >= SDHCI_SPEC_300) &&
+ ((host->flags & SDHCI_USE_ADMA) ||
+ !(host->flags & SDHCI_USE_SDMA)) &&
+ !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
+ host->flags |= SDHCI_AUTO_CMD23;
+ DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
+ } else {
+ DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
+ }
+
+ /*
+ * A controller may support 8-bit width, but the board itself
+ * might not have the pins brought out. Boards that support
+ * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
+ * their platform code before calling sdhci_add_host(), and we
+ * won't assume 8-bit width for hosts without that CAP.
+ */
+ if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
+ mmc->caps &= ~MMC_CAP_CMD23;
+
+ if (caps[0] & SDHCI_CAN_DO_HISPD)
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
+
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
+ !(mmc->caps & MMC_CAP_NONREMOVABLE))
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+
+ /* If there are external regulators, get them */
+ if (mmc_regulator_get_supply(mmc) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = regulator_enable(mmc->supply.vqmmc);
+ if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
+ 1950000))
+ caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
+ SDHCI_SUPPORT_SDR50 |
+ SDHCI_SUPPORT_DDR50);
+ if (ret) {
+ pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
+ mmc_hostname(mmc), ret);
+ mmc->supply.vqmmc = ERR_PTR(-EINVAL);
+ }
+ }
+
+ if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
+ caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
+ SDHCI_SUPPORT_DDR50);
+
+ /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
+ if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
+ SDHCI_SUPPORT_DDR50))
+ mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
+
+ /* SDR104 supports also implies SDR50 support */
+ if (caps[1] & SDHCI_SUPPORT_SDR104) {
+ mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
+ /* SD3.0: SDR104 is supported so (for eMMC) the caps2
+ * field can be promoted to support HS200.
+ */
+ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
+ mmc->caps2 |= MMC_CAP2_HS200;
+ } else if (caps[1] & SDHCI_SUPPORT_SDR50)
+ mmc->caps |= MMC_CAP_UHS_SDR50;
+
+ if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
+ (caps[1] & SDHCI_SUPPORT_HS400))
+ mmc->caps2 |= MMC_CAP2_HS400;
+
+ if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
+ (IS_ERR(mmc->supply.vqmmc) ||
+ !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
+ 1300000)))
+ mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
+
+ if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
+ !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
+ mmc->caps |= MMC_CAP_UHS_DDR50;
+
+ /* Does the host need tuning for SDR50? */
+ if (caps[1] & SDHCI_USE_SDR50_TUNING)
+ host->flags |= SDHCI_SDR50_NEEDS_TUNING;
+
+ /* Does the host need tuning for SDR104 / HS200? */
+ if (mmc->caps2 & MMC_CAP2_HS200)
+ host->flags |= SDHCI_SDR104_NEEDS_TUNING;
+
+ /* Driver Type(s) (A, C, D) supported by the host */
+ if (caps[1] & SDHCI_DRIVER_TYPE_A)
+ mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
+ if (caps[1] & SDHCI_DRIVER_TYPE_C)
+ mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
+ if (caps[1] & SDHCI_DRIVER_TYPE_D)
+ mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
+
+ /* Initial value for re-tuning timer count */
+ host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
+ SDHCI_RETUNING_TIMER_COUNT_SHIFT;
+
+ /*
+ * In case Re-tuning Timer is not disabled, the actual value of
+ * re-tuning timer will be 2 ^ (n - 1).
+ */
+ if (host->tuning_count)
+ host->tuning_count = 1 << (host->tuning_count - 1);
+
+ /* Re-tuning mode supported by the Host Controller */
+ host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
+ SDHCI_RETUNING_MODE_SHIFT;
+
+ ocr_avail = 0;
+
+ /*
+ * According to SD Host Controller spec v3.00, if the Host System
+ * can afford more than 150mA, Host Driver should set XPC to 1. Also
+ * the value is meaningful only if Voltage Support in the Capabilities
+ * register is set. The actual current value is 4 times the register
+ * value.
+ */
+ max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
+ if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
+ int curr = regulator_get_current_limit(mmc->supply.vmmc);
+ if (curr > 0) {
+
+ /* convert to SDHCI_MAX_CURRENT format */
+ curr = curr/1000; /* convert to mA */
+ curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
+
+ curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
+ max_current_caps =
+ (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
+ (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
+ (curr << SDHCI_MAX_CURRENT_180_SHIFT);
+ }
+ }
+
+ if (caps[0] & SDHCI_CAN_VDD_330) {
+ ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
+
+ mmc->max_current_330 = ((max_current_caps &
+ SDHCI_MAX_CURRENT_330_MASK) >>
+ SDHCI_MAX_CURRENT_330_SHIFT) *
+ SDHCI_MAX_CURRENT_MULTIPLIER;
+ }
+ if (caps[0] & SDHCI_CAN_VDD_300) {
+ ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
+
+ mmc->max_current_300 = ((max_current_caps &
+ SDHCI_MAX_CURRENT_300_MASK) >>
+ SDHCI_MAX_CURRENT_300_SHIFT) *
+ SDHCI_MAX_CURRENT_MULTIPLIER;
+ }
+ if (caps[0] & SDHCI_CAN_VDD_180) {
+ ocr_avail |= MMC_VDD_165_195;
+
+ mmc->max_current_180 = ((max_current_caps &
+ SDHCI_MAX_CURRENT_180_MASK) >>
+ SDHCI_MAX_CURRENT_180_SHIFT) *
+ SDHCI_MAX_CURRENT_MULTIPLIER;
+ }
+
+ /* If OCR set by external regulators, use it instead */
+ if (mmc->ocr_avail)
+ ocr_avail = mmc->ocr_avail;
+
+ if (host->ocr_mask)
+ ocr_avail &= host->ocr_mask;
+
+ mmc->ocr_avail = ocr_avail;
+ mmc->ocr_avail_sdio = ocr_avail;
+ if (host->ocr_avail_sdio)
+ mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
+ mmc->ocr_avail_sd = ocr_avail;
+ if (host->ocr_avail_sd)
+ mmc->ocr_avail_sd &= host->ocr_avail_sd;
+ else /* normal SD controllers don't support 1.8V */
+ mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
+ mmc->ocr_avail_mmc = ocr_avail;
+ if (host->ocr_avail_mmc)
+ mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
+
+ if (mmc->ocr_avail == 0) {
+ pr_err("%s: Hardware doesn't report any "
+ "support voltages.\n", mmc_hostname(mmc));
+ return -ENODEV;
+ }
+
+ spin_lock_init(&host->lock);
+
+ /*
+ * Maximum number of segments. Depends on if the hardware
+ * can do scatter/gather or not.
+ */
+ if (host->flags & SDHCI_USE_ADMA)
+ mmc->max_segs = SDHCI_MAX_SEGS;
+ else if (host->flags & SDHCI_USE_SDMA)
+ mmc->max_segs = 1;
+ else /* PIO */
+ mmc->max_segs = SDHCI_MAX_SEGS;
+
+ /*
+ * Maximum number of sectors in one transfer. Limited by SDMA boundary
+ * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
+ * is less anyway.
+ */
+ mmc->max_req_size = 524288;
+
+ /*
+ * Maximum segment size. Could be one segment with the maximum number
+ * of bytes. When doing hardware scatter/gather, each entry cannot
+ * be larger than 64 KiB though.
+ */
+ if (host->flags & SDHCI_USE_ADMA) {
+ if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
+ mmc->max_seg_size = 65535;
+ else
+ mmc->max_seg_size = 65536;
+ } else {
+ mmc->max_seg_size = mmc->max_req_size;
+ }
+
+ /*
+ * Maximum block size. This varies from controller to controller and
+ * is specified in the capabilities register.
+ */
+ if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
+ mmc->max_blk_size = 2;
+ } else {
+ mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
+ SDHCI_MAX_BLOCK_SHIFT;
+ if (mmc->max_blk_size >= 3) {
+ pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
+ mmc_hostname(mmc));
+ mmc->max_blk_size = 0;
+ }
+ }
+
+ mmc->max_blk_size = 512 << mmc->max_blk_size;
+
+ /*
+ * Maximum block count.
+ */
+ mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
+
+ /*
+ * Init tasklets.
+ */
+ tasklet_init(&host->finish_tasklet,
+ sdhci_tasklet_finish, (unsigned long)host);
+
+ setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
+
+ init_waitqueue_head(&host->buf_ready_int);
+
+ if (host->version >= SDHCI_SPEC_300) {
+ /* Initialize re-tuning timer */
+ init_timer(&host->tuning_timer);
+ host->tuning_timer.data = (unsigned long)host;
+ host->tuning_timer.function = sdhci_tuning_timer;
+ }
+
+ sdhci_init(host, 0);
+
+ ret = sdhci_req_irq(host);
+ if (ret) {
+ pr_err("%s: Failed to request IRQ %d: %d\n",
+ mmc_hostname(mmc), host->irq, ret);
+ goto untasklet;
+ }
+
+#ifdef CONFIG_MMC_DEBUG
+ sdhci_dumpregs(host);
+#endif
+
+#ifdef SDHCI_USE_LEDS_CLASS
+ snprintf(host->led_name, sizeof(host->led_name),
+ "%s::", mmc_hostname(mmc));
+ host->led.name = host->led_name;
+ host->led.brightness = LED_OFF;
+ host->led.default_trigger = mmc_hostname(mmc);
+ host->led.brightness_set = sdhci_led_control;
+
+ ret = led_classdev_register(mmc_dev(mmc), &host->led);
+ if (ret) {
+ pr_err("%s: Failed to register LED device: %d\n",
+ mmc_hostname(mmc), ret);
+ goto reset;
+ }
+#endif
+
+ mmiowb();
+
+ mmc_add_host(mmc);
+
+ pr_info("%s: SDHCI controller on %s [%s] using %s\n",
+ mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
+ (host->flags & SDHCI_USE_ADMA) ?
+ (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
+ (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
+
+ sdhci_enable_card_detection(host);
+
+ return 0;
+
+#ifdef SDHCI_USE_LEDS_CLASS
+reset:
+ sdhci_do_reset(host, SDHCI_RESET_ALL);
+ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
+ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+ free_irq(host->irq, host);
+#endif
+untasklet:
+ tasklet_kill(&host->finish_tasklet);
+
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(sdhci_add_host);
+
+void sdhci_remove_host(struct sdhci_host *host, int dead)
+{
+ struct mmc_host *mmc = host->mmc;
+ unsigned long flags;
+
+ if (dead) {
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->flags |= SDHCI_DEVICE_DEAD;
+
+ if (host->mrq) {
+ pr_err("%s: Controller removed during "
+ " transfer!\n", mmc_hostname(mmc));
+
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
+ sdhci_disable_card_detection(host);
+
+ mmc_remove_host(mmc);
+
+#ifdef SDHCI_USE_LEDS_CLASS
+ led_classdev_unregister(&host->led);
+#endif
+
+ if (!dead)
+ sdhci_do_reset(host, SDHCI_RESET_ALL);
+
+ sdhci_writel(host, 0, SDHCI_INT_ENABLE);
+ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+ free_irq(host->irq, host);
+
+ del_timer_sync(&host->timer);
+
+ tasklet_kill(&host->finish_tasklet);
+
+ if (!IS_ERR(mmc->supply.vqmmc))
+ regulator_disable(mmc->supply.vqmmc);
+
+ if (host->adma_table)
+ dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
+ host->adma_table, host->adma_addr);
+ kfree(host->align_buffer);
+
+ host->adma_table = NULL;
+ host->align_buffer = NULL;
+}
+
+EXPORT_SYMBOL_GPL(sdhci_remove_host);
+
+void sdhci_free_host(struct sdhci_host *host)
+{
+ mmc_free_host(host->mmc);
+}
+
+EXPORT_SYMBOL_GPL(sdhci_free_host);
+
+/*****************************************************************************\
+ * *
+ * Driver init/exit *
+ * *
+\*****************************************************************************/
+
+static int __init sdhci_drv_init(void)
+{
+ pr_info(DRIVER_NAME
+ ": Secure Digital Host Controller Interface driver\n");
+ pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
+
+ return 0;
+}
+
+static void __exit sdhci_drv_exit(void)
+{
+}
+
+module_init(sdhci_drv_init);
+module_exit(sdhci_drv_exit);
+
+module_param(debug_quirks, uint, 0444);
+module_param(debug_quirks2, uint, 0444);
+
+MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
+MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
+MODULE_LICENSE("GPL");
+
+MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
+MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
diff --git a/kernel/drivers/mmc/host/sdhci.h b/kernel/drivers/mmc/host/sdhci.h
new file mode 100644
index 000000000..e639b7f43
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci.h
@@ -0,0 +1,663 @@
+/*
+ * linux/drivers/mmc/host/sdhci.h - Secure Digital Host Controller Interface driver
+ *
+ * Header file for Host Controller registers and I/O accessors.
+ *
+ * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+#ifndef __SDHCI_HW_H
+#define __SDHCI_HW_H
+
+#include <linux/scatterlist.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+#include <linux/mmc/host.h>
+
+/*
+ * Controller registers
+ */
+
+#define SDHCI_DMA_ADDRESS 0x00
+#define SDHCI_ARGUMENT2 SDHCI_DMA_ADDRESS
+
+#define SDHCI_BLOCK_SIZE 0x04
+#define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF))
+
+#define SDHCI_BLOCK_COUNT 0x06
+
+#define SDHCI_ARGUMENT 0x08
+
+#define SDHCI_TRANSFER_MODE 0x0C
+#define SDHCI_TRNS_DMA 0x01
+#define SDHCI_TRNS_BLK_CNT_EN 0x02
+#define SDHCI_TRNS_AUTO_CMD12 0x04
+#define SDHCI_TRNS_AUTO_CMD23 0x08
+#define SDHCI_TRNS_READ 0x10
+#define SDHCI_TRNS_MULTI 0x20
+
+#define SDHCI_COMMAND 0x0E
+#define SDHCI_CMD_RESP_MASK 0x03
+#define SDHCI_CMD_CRC 0x08
+#define SDHCI_CMD_INDEX 0x10
+#define SDHCI_CMD_DATA 0x20
+#define SDHCI_CMD_ABORTCMD 0xC0
+
+#define SDHCI_CMD_RESP_NONE 0x00
+#define SDHCI_CMD_RESP_LONG 0x01
+#define SDHCI_CMD_RESP_SHORT 0x02
+#define SDHCI_CMD_RESP_SHORT_BUSY 0x03
+
+#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
+#define SDHCI_GET_CMD(c) ((c>>8) & 0x3f)
+
+#define SDHCI_RESPONSE 0x10
+
+#define SDHCI_BUFFER 0x20
+
+#define SDHCI_PRESENT_STATE 0x24
+#define SDHCI_CMD_INHIBIT 0x00000001
+#define SDHCI_DATA_INHIBIT 0x00000002
+#define SDHCI_DOING_WRITE 0x00000100
+#define SDHCI_DOING_READ 0x00000200
+#define SDHCI_SPACE_AVAILABLE 0x00000400
+#define SDHCI_DATA_AVAILABLE 0x00000800
+#define SDHCI_CARD_PRESENT 0x00010000
+#define SDHCI_WRITE_PROTECT 0x00080000
+#define SDHCI_DATA_LVL_MASK 0x00F00000
+#define SDHCI_DATA_LVL_SHIFT 20
+#define SDHCI_DATA_0_LVL_MASK 0x00100000
+
+#define SDHCI_HOST_CONTROL 0x28
+#define SDHCI_CTRL_LED 0x01
+#define SDHCI_CTRL_4BITBUS 0x02
+#define SDHCI_CTRL_HISPD 0x04
+#define SDHCI_CTRL_DMA_MASK 0x18
+#define SDHCI_CTRL_SDMA 0x00
+#define SDHCI_CTRL_ADMA1 0x08
+#define SDHCI_CTRL_ADMA32 0x10
+#define SDHCI_CTRL_ADMA64 0x18
+#define SDHCI_CTRL_8BITBUS 0x20
+
+#define SDHCI_POWER_CONTROL 0x29
+#define SDHCI_POWER_ON 0x01
+#define SDHCI_POWER_180 0x0A
+#define SDHCI_POWER_300 0x0C
+#define SDHCI_POWER_330 0x0E
+
+#define SDHCI_BLOCK_GAP_CONTROL 0x2A
+
+#define SDHCI_WAKE_UP_CONTROL 0x2B
+#define SDHCI_WAKE_ON_INT 0x01
+#define SDHCI_WAKE_ON_INSERT 0x02
+#define SDHCI_WAKE_ON_REMOVE 0x04
+
+#define SDHCI_CLOCK_CONTROL 0x2C
+#define SDHCI_DIVIDER_SHIFT 8
+#define SDHCI_DIVIDER_HI_SHIFT 6
+#define SDHCI_DIV_MASK 0xFF
+#define SDHCI_DIV_MASK_LEN 8
+#define SDHCI_DIV_HI_MASK 0x300
+#define SDHCI_PROG_CLOCK_MODE 0x0020
+#define SDHCI_CLOCK_CARD_EN 0x0004
+#define SDHCI_CLOCK_INT_STABLE 0x0002
+#define SDHCI_CLOCK_INT_EN 0x0001
+
+#define SDHCI_TIMEOUT_CONTROL 0x2E
+
+#define SDHCI_SOFTWARE_RESET 0x2F
+#define SDHCI_RESET_ALL 0x01
+#define SDHCI_RESET_CMD 0x02
+#define SDHCI_RESET_DATA 0x04
+
+#define SDHCI_INT_STATUS 0x30
+#define SDHCI_INT_ENABLE 0x34
+#define SDHCI_SIGNAL_ENABLE 0x38
+#define SDHCI_INT_RESPONSE 0x00000001
+#define SDHCI_INT_DATA_END 0x00000002
+#define SDHCI_INT_BLK_GAP 0x00000004
+#define SDHCI_INT_DMA_END 0x00000008
+#define SDHCI_INT_SPACE_AVAIL 0x00000010
+#define SDHCI_INT_DATA_AVAIL 0x00000020
+#define SDHCI_INT_CARD_INSERT 0x00000040
+#define SDHCI_INT_CARD_REMOVE 0x00000080
+#define SDHCI_INT_CARD_INT 0x00000100
+#define SDHCI_INT_ERROR 0x00008000
+#define SDHCI_INT_TIMEOUT 0x00010000
+#define SDHCI_INT_CRC 0x00020000
+#define SDHCI_INT_END_BIT 0x00040000
+#define SDHCI_INT_INDEX 0x00080000
+#define SDHCI_INT_DATA_TIMEOUT 0x00100000
+#define SDHCI_INT_DATA_CRC 0x00200000
+#define SDHCI_INT_DATA_END_BIT 0x00400000
+#define SDHCI_INT_BUS_POWER 0x00800000
+#define SDHCI_INT_ACMD12ERR 0x01000000
+#define SDHCI_INT_ADMA_ERROR 0x02000000
+
+#define SDHCI_INT_NORMAL_MASK 0x00007FFF
+#define SDHCI_INT_ERROR_MASK 0xFFFF8000
+
+#define SDHCI_INT_CMD_MASK (SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
+ SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX)
+#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
+ SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
+ SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
+ SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \
+ SDHCI_INT_BLK_GAP)
+#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
+
+#define SDHCI_ACMD12_ERR 0x3C
+
+#define SDHCI_HOST_CONTROL2 0x3E
+#define SDHCI_CTRL_UHS_MASK 0x0007
+#define SDHCI_CTRL_UHS_SDR12 0x0000
+#define SDHCI_CTRL_UHS_SDR25 0x0001
+#define SDHCI_CTRL_UHS_SDR50 0x0002
+#define SDHCI_CTRL_UHS_SDR104 0x0003
+#define SDHCI_CTRL_UHS_DDR50 0x0004
+#define SDHCI_CTRL_HS400 0x0005 /* Non-standard */
+#define SDHCI_CTRL_VDD_180 0x0008
+#define SDHCI_CTRL_DRV_TYPE_MASK 0x0030
+#define SDHCI_CTRL_DRV_TYPE_B 0x0000
+#define SDHCI_CTRL_DRV_TYPE_A 0x0010
+#define SDHCI_CTRL_DRV_TYPE_C 0x0020
+#define SDHCI_CTRL_DRV_TYPE_D 0x0030
+#define SDHCI_CTRL_EXEC_TUNING 0x0040
+#define SDHCI_CTRL_TUNED_CLK 0x0080
+#define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
+
+#define SDHCI_CAPABILITIES 0x40
+#define SDHCI_TIMEOUT_CLK_MASK 0x0000003F
+#define SDHCI_TIMEOUT_CLK_SHIFT 0
+#define SDHCI_TIMEOUT_CLK_UNIT 0x00000080
+#define SDHCI_CLOCK_BASE_MASK 0x00003F00
+#define SDHCI_CLOCK_V3_BASE_MASK 0x0000FF00
+#define SDHCI_CLOCK_BASE_SHIFT 8
+#define SDHCI_MAX_BLOCK_MASK 0x00030000
+#define SDHCI_MAX_BLOCK_SHIFT 16
+#define SDHCI_CAN_DO_8BIT 0x00040000
+#define SDHCI_CAN_DO_ADMA2 0x00080000
+#define SDHCI_CAN_DO_ADMA1 0x00100000
+#define SDHCI_CAN_DO_HISPD 0x00200000
+#define SDHCI_CAN_DO_SDMA 0x00400000
+#define SDHCI_CAN_VDD_330 0x01000000
+#define SDHCI_CAN_VDD_300 0x02000000
+#define SDHCI_CAN_VDD_180 0x04000000
+#define SDHCI_CAN_64BIT 0x10000000
+
+#define SDHCI_SUPPORT_SDR50 0x00000001
+#define SDHCI_SUPPORT_SDR104 0x00000002
+#define SDHCI_SUPPORT_DDR50 0x00000004
+#define SDHCI_DRIVER_TYPE_A 0x00000010
+#define SDHCI_DRIVER_TYPE_C 0x00000020
+#define SDHCI_DRIVER_TYPE_D 0x00000040
+#define SDHCI_RETUNING_TIMER_COUNT_MASK 0x00000F00
+#define SDHCI_RETUNING_TIMER_COUNT_SHIFT 8
+#define SDHCI_USE_SDR50_TUNING 0x00002000
+#define SDHCI_RETUNING_MODE_MASK 0x0000C000
+#define SDHCI_RETUNING_MODE_SHIFT 14
+#define SDHCI_CLOCK_MUL_MASK 0x00FF0000
+#define SDHCI_CLOCK_MUL_SHIFT 16
+#define SDHCI_SUPPORT_HS400 0x80000000 /* Non-standard */
+
+#define SDHCI_CAPABILITIES_1 0x44
+
+#define SDHCI_MAX_CURRENT 0x48
+#define SDHCI_MAX_CURRENT_LIMIT 0xFF
+#define SDHCI_MAX_CURRENT_330_MASK 0x0000FF
+#define SDHCI_MAX_CURRENT_330_SHIFT 0
+#define SDHCI_MAX_CURRENT_300_MASK 0x00FF00
+#define SDHCI_MAX_CURRENT_300_SHIFT 8
+#define SDHCI_MAX_CURRENT_180_MASK 0xFF0000
+#define SDHCI_MAX_CURRENT_180_SHIFT 16
+#define SDHCI_MAX_CURRENT_MULTIPLIER 4
+
+/* 4C-4F reserved for more max current */
+
+#define SDHCI_SET_ACMD12_ERROR 0x50
+#define SDHCI_SET_INT_ERROR 0x52
+
+#define SDHCI_ADMA_ERROR 0x54
+
+/* 55-57 reserved */
+
+#define SDHCI_ADMA_ADDRESS 0x58
+#define SDHCI_ADMA_ADDRESS_HI 0x5C
+
+/* 60-FB reserved */
+
+#define SDHCI_PRESET_FOR_SDR12 0x66
+#define SDHCI_PRESET_FOR_SDR25 0x68
+#define SDHCI_PRESET_FOR_SDR50 0x6A
+#define SDHCI_PRESET_FOR_SDR104 0x6C
+#define SDHCI_PRESET_FOR_DDR50 0x6E
+#define SDHCI_PRESET_FOR_HS400 0x74 /* Non-standard */
+#define SDHCI_PRESET_DRV_MASK 0xC000
+#define SDHCI_PRESET_DRV_SHIFT 14
+#define SDHCI_PRESET_CLKGEN_SEL_MASK 0x400
+#define SDHCI_PRESET_CLKGEN_SEL_SHIFT 10
+#define SDHCI_PRESET_SDCLK_FREQ_MASK 0x3FF
+#define SDHCI_PRESET_SDCLK_FREQ_SHIFT 0
+
+#define SDHCI_SLOT_INT_STATUS 0xFC
+
+#define SDHCI_HOST_VERSION 0xFE
+#define SDHCI_VENDOR_VER_MASK 0xFF00
+#define SDHCI_VENDOR_VER_SHIFT 8
+#define SDHCI_SPEC_VER_MASK 0x00FF
+#define SDHCI_SPEC_VER_SHIFT 0
+#define SDHCI_SPEC_100 0
+#define SDHCI_SPEC_200 1
+#define SDHCI_SPEC_300 2
+
+/*
+ * End of controller registers.
+ */
+
+#define SDHCI_MAX_DIV_SPEC_200 256
+#define SDHCI_MAX_DIV_SPEC_300 2046
+
+/*
+ * Host SDMA buffer boundary. Valid values from 4K to 512K in powers of 2.
+ */
+#define SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024)
+#define SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12)
+
+/* ADMA2 32-bit DMA descriptor size */
+#define SDHCI_ADMA2_32_DESC_SZ 8
+
+/* ADMA2 32-bit DMA alignment */
+#define SDHCI_ADMA2_32_ALIGN 4
+
+/* ADMA2 32-bit descriptor */
+struct sdhci_adma2_32_desc {
+ __le16 cmd;
+ __le16 len;
+ __le32 addr;
+} __packed __aligned(SDHCI_ADMA2_32_ALIGN);
+
+/* ADMA2 64-bit DMA descriptor size */
+#define SDHCI_ADMA2_64_DESC_SZ 12
+
+/* ADMA2 64-bit DMA alignment */
+#define SDHCI_ADMA2_64_ALIGN 8
+
+/*
+ * ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte
+ * aligned.
+ */
+struct sdhci_adma2_64_desc {
+ __le16 cmd;
+ __le16 len;
+ __le32 addr_lo;
+ __le32 addr_hi;
+} __packed __aligned(4);
+
+#define ADMA2_TRAN_VALID 0x21
+#define ADMA2_NOP_END_VALID 0x3
+#define ADMA2_END 0x2
+
+/*
+ * Maximum segments assuming a 512KiB maximum requisition size and a minimum
+ * 4KiB page size.
+ */
+#define SDHCI_MAX_SEGS 128
+
+struct sdhci_host_next {
+ unsigned int sg_count;
+ s32 cookie;
+};
+
+struct sdhci_host {
+ /* Data set by hardware interface driver */
+ const char *hw_name; /* Hardware bus name */
+
+ unsigned int quirks; /* Deviations from spec. */
+
+/* Controller doesn't honor resets unless we touch the clock register */
+#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
+/* Controller has bad caps bits, but really supports DMA */
+#define SDHCI_QUIRK_FORCE_DMA (1<<1)
+/* Controller doesn't like to be reset when there is no card inserted. */
+#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
+/* Controller doesn't like clearing the power reg before a change */
+#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
+/* Controller has flaky internal state so reset it on each ios change */
+#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
+/* Controller has an unusable DMA engine */
+#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
+/* Controller has an unusable ADMA engine */
+#define SDHCI_QUIRK_BROKEN_ADMA (1<<6)
+/* Controller can only DMA from 32-bit aligned addresses */
+#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7)
+/* Controller can only DMA chunk sizes that are a multiple of 32 bits */
+#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8)
+/* Controller can only ADMA chunks that are a multiple of 32 bits */
+#define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9)
+/* Controller needs to be reset after each request to stay stable */
+#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10)
+/* Controller needs voltage and power writes to happen separately */
+#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11)
+/* Controller provides an incorrect timeout value for transfers */
+#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
+/* Controller has an issue with buffer bits for small transfers */
+#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13)
+/* Controller does not provide transfer-complete interrupt when not busy */
+#define SDHCI_QUIRK_NO_BUSY_IRQ (1<<14)
+/* Controller has unreliable card detection */
+#define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
+/* Controller reports inverted write-protect state */
+#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
+/* Controller does not like fast PIO transfers */
+#define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
+/* Controller has to be forced to use block size of 2048 bytes */
+#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
+/* Controller cannot do multi-block transfers */
+#define SDHCI_QUIRK_NO_MULTIBLOCK (1<<21)
+/* Controller can only handle 1-bit data transfers */
+#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22)
+/* Controller needs 10ms delay between applying power and clock */
+#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
+/* Controller uses SDCLK instead of TMCLK for data timeouts */
+#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
+/* Controller reports wrong base clock capability */
+#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
+/* Controller cannot support End Attribute in NOP ADMA descriptor */
+#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
+/* Controller is missing device caps. Use caps provided by host */
+#define SDHCI_QUIRK_MISSING_CAPS (1<<27)
+/* Controller uses Auto CMD12 command to stop the transfer */
+#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
+/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
+#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
+/* Controller treats ADMA descriptors with length 0000h incorrectly */
+#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1<<30)
+/* The read-only detection via SDHCI_PRESENT_STATE register is unstable */
+#define SDHCI_QUIRK_UNSTABLE_RO_DETECT (1<<31)
+
+ unsigned int quirks2; /* More deviations from spec. */
+
+#define SDHCI_QUIRK2_HOST_OFF_CARD_ON (1<<0)
+#define SDHCI_QUIRK2_HOST_NO_CMD23 (1<<1)
+/* The system physically doesn't support 1.8v, even if the host does */
+#define SDHCI_QUIRK2_NO_1_8_V (1<<2)
+#define SDHCI_QUIRK2_PRESET_VALUE_BROKEN (1<<3)
+#define SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON (1<<4)
+/* Controller has a non-standard host control register */
+#define SDHCI_QUIRK2_BROKEN_HOST_CONTROL (1<<5)
+/* Controller does not support HS200 */
+#define SDHCI_QUIRK2_BROKEN_HS200 (1<<6)
+/* Controller does not support DDR50 */
+#define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7)
+/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */
+#define SDHCI_QUIRK2_STOP_WITH_TC (1<<8)
+/* Controller does not support 64-bit DMA */
+#define SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1<<9)
+/* need clear transfer mode register before send cmd */
+#define SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1<<10)
+/* Capability register bit-63 indicates HS400 support */
+#define SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1<<11)
+/* forced tuned clock */
+#define SDHCI_QUIRK2_TUNING_WORK_AROUND (1<<12)
+/* disable the block count for single block transactions */
+#define SDHCI_QUIRK2_SUPPORT_SINGLE (1<<13)
+/* Controller broken with using ACMD23 */
+#define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14)
+
+ int irq; /* Device IRQ */
+ void __iomem *ioaddr; /* Mapped address */
+
+ const struct sdhci_ops *ops; /* Low level hw interface */
+
+ /* Internal data */
+ struct mmc_host *mmc; /* MMC structure */
+ u64 dma_mask; /* custom DMA mask */
+
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+ struct led_classdev led; /* LED control */
+ char led_name[32];
+#endif
+
+ spinlock_t lock; /* Mutex */
+
+ int flags; /* Host attributes */
+#define SDHCI_USE_SDMA (1<<0) /* Host is SDMA capable */
+#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */
+#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
+#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
+#define SDHCI_SDR50_NEEDS_TUNING (1<<4) /* SDR50 needs tuning */
+#define SDHCI_NEEDS_RETUNING (1<<5) /* Host needs retuning */
+#define SDHCI_AUTO_CMD12 (1<<6) /* Auto CMD12 support */
+#define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */
+#define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */
+#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
+#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
+#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
+#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
+#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
+
+ unsigned int version; /* SDHCI spec. version */
+
+ unsigned int max_clk; /* Max possible freq (MHz) */
+ unsigned int timeout_clk; /* Timeout freq (KHz) */
+ unsigned int clk_mul; /* Clock Muliplier value */
+
+ unsigned int clock; /* Current clock (MHz) */
+ u8 pwr; /* Current voltage */
+
+ bool runtime_suspended; /* Host is runtime suspended */
+ bool bus_on; /* Bus power prevents runtime suspend */
+ bool preset_enabled; /* Preset is enabled */
+
+ struct mmc_request *mrq; /* Current request */
+ struct mmc_command *cmd; /* Current command */
+ struct mmc_data *data; /* Current data request */
+ unsigned int data_early:1; /* Data finished before cmd */
+ unsigned int busy_handle:1; /* Handling the order of Busy-end */
+
+ struct sg_mapping_iter sg_miter; /* SG state for PIO */
+ unsigned int blocks; /* remaining PIO blocks */
+
+ int sg_count; /* Mapped sg entries */
+
+ void *adma_table; /* ADMA descriptor table */
+ void *align_buffer; /* Bounce buffer */
+
+ size_t adma_table_sz; /* ADMA descriptor table size */
+ size_t align_buffer_sz; /* Bounce buffer size */
+
+ dma_addr_t adma_addr; /* Mapped ADMA descr. table */
+ dma_addr_t align_addr; /* Mapped bounce buffer */
+
+ unsigned int desc_sz; /* ADMA descriptor size */
+ unsigned int align_sz; /* ADMA alignment */
+ unsigned int align_mask; /* ADMA alignment mask */
+
+ struct tasklet_struct finish_tasklet; /* Tasklet structures */
+
+ struct timer_list timer; /* Timer for timeouts */
+
+ u32 caps; /* Alternative CAPABILITY_0 */
+ u32 caps1; /* Alternative CAPABILITY_1 */
+
+ unsigned int ocr_avail_sdio; /* OCR bit masks */
+ unsigned int ocr_avail_sd;
+ unsigned int ocr_avail_mmc;
+ u32 ocr_mask; /* available voltages */
+
+ unsigned timing; /* Current timing */
+
+ u32 thread_isr;
+
+ /* cached registers */
+ u32 ier;
+
+ wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */
+ unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */
+
+ unsigned int tuning_count; /* Timer count for re-tuning */
+ unsigned int tuning_mode; /* Re-tuning mode supported by host */
+#define SDHCI_TUNING_MODE_1 0
+ struct timer_list tuning_timer; /* Timer for tuning */
+
+ struct sdhci_host_next next_data;
+ unsigned long private[0] ____cacheline_aligned;
+};
+
+struct sdhci_ops {
+#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
+ u32 (*read_l)(struct sdhci_host *host, int reg);
+ u16 (*read_w)(struct sdhci_host *host, int reg);
+ u8 (*read_b)(struct sdhci_host *host, int reg);
+ void (*write_l)(struct sdhci_host *host, u32 val, int reg);
+ void (*write_w)(struct sdhci_host *host, u16 val, int reg);
+ void (*write_b)(struct sdhci_host *host, u8 val, int reg);
+#endif
+
+ void (*set_clock)(struct sdhci_host *host, unsigned int clock);
+
+ int (*enable_dma)(struct sdhci_host *host);
+ unsigned int (*get_max_clock)(struct sdhci_host *host);
+ unsigned int (*get_min_clock)(struct sdhci_host *host);
+ unsigned int (*get_timeout_clock)(struct sdhci_host *host);
+ unsigned int (*get_max_timeout_count)(struct sdhci_host *host);
+ void (*set_timeout)(struct sdhci_host *host,
+ struct mmc_command *cmd);
+ void (*set_bus_width)(struct sdhci_host *host, int width);
+ void (*platform_send_init_74_clocks)(struct sdhci_host *host,
+ u8 power_mode);
+ unsigned int (*get_ro)(struct sdhci_host *host);
+ void (*reset)(struct sdhci_host *host, u8 mask);
+ int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
+ void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
+ void (*hw_reset)(struct sdhci_host *host);
+ void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+ void (*platform_init)(struct sdhci_host *host);
+ void (*card_event)(struct sdhci_host *host);
+ void (*voltage_switch)(struct sdhci_host *host);
+};
+
+#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
+
+static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ if (unlikely(host->ops->write_l))
+ host->ops->write_l(host, val, reg);
+ else
+ writel(val, host->ioaddr + reg);
+}
+
+static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ if (unlikely(host->ops->write_w))
+ host->ops->write_w(host, val, reg);
+ else
+ writew(val, host->ioaddr + reg);
+}
+
+static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ if (unlikely(host->ops->write_b))
+ host->ops->write_b(host, val, reg);
+ else
+ writeb(val, host->ioaddr + reg);
+}
+
+static inline u32 sdhci_readl(struct sdhci_host *host, int reg)
+{
+ if (unlikely(host->ops->read_l))
+ return host->ops->read_l(host, reg);
+ else
+ return readl(host->ioaddr + reg);
+}
+
+static inline u16 sdhci_readw(struct sdhci_host *host, int reg)
+{
+ if (unlikely(host->ops->read_w))
+ return host->ops->read_w(host, reg);
+ else
+ return readw(host->ioaddr + reg);
+}
+
+static inline u8 sdhci_readb(struct sdhci_host *host, int reg)
+{
+ if (unlikely(host->ops->read_b))
+ return host->ops->read_b(host, reg);
+ else
+ return readb(host->ioaddr + reg);
+}
+
+#else
+
+static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ writel(val, host->ioaddr + reg);
+}
+
+static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ writew(val, host->ioaddr + reg);
+}
+
+static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ writeb(val, host->ioaddr + reg);
+}
+
+static inline u32 sdhci_readl(struct sdhci_host *host, int reg)
+{
+ return readl(host->ioaddr + reg);
+}
+
+static inline u16 sdhci_readw(struct sdhci_host *host, int reg)
+{
+ return readw(host->ioaddr + reg);
+}
+
+static inline u8 sdhci_readb(struct sdhci_host *host, int reg)
+{
+ return readb(host->ioaddr + reg);
+}
+
+#endif /* CONFIG_MMC_SDHCI_IO_ACCESSORS */
+
+extern struct sdhci_host *sdhci_alloc_host(struct device *dev,
+ size_t priv_size);
+extern void sdhci_free_host(struct sdhci_host *host);
+
+static inline void *sdhci_priv(struct sdhci_host *host)
+{
+ return (void *)host->private;
+}
+
+extern void sdhci_card_detect(struct sdhci_host *host);
+extern int sdhci_add_host(struct sdhci_host *host);
+extern void sdhci_remove_host(struct sdhci_host *host, int dead);
+extern void sdhci_send_command(struct sdhci_host *host,
+ struct mmc_command *cmd);
+
+static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
+{
+ return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED);
+}
+
+void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
+void sdhci_set_bus_width(struct sdhci_host *host, int width);
+void sdhci_reset(struct sdhci_host *host, u8 mask);
+void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
+
+#ifdef CONFIG_PM
+extern int sdhci_suspend_host(struct sdhci_host *host);
+extern int sdhci_resume_host(struct sdhci_host *host);
+extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
+extern int sdhci_runtime_suspend_host(struct sdhci_host *host);
+extern int sdhci_runtime_resume_host(struct sdhci_host *host);
+#endif
+
+#endif /* __SDHCI_HW_H */
diff --git a/kernel/drivers/mmc/host/sdhci_f_sdh30.c b/kernel/drivers/mmc/host/sdhci_f_sdh30.c
new file mode 100644
index 000000000..2fe8b9148
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci_f_sdh30.c
@@ -0,0 +1,237 @@
+/*
+ * linux/drivers/mmc/host/sdhci_f_sdh30.c
+ *
+ * Copyright (C) 2013 - 2015 Fujitsu Semiconductor, Ltd
+ * Vincent Yang <vincent.yang@tw.fujitsu.com>
+ * Copyright (C) 2015 Linaro Ltd Andy Green <andy.green@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+
+#include "sdhci-pltfm.h"
+
+/* F_SDH30 extended Controller registers */
+#define F_SDH30_AHB_CONFIG 0x100
+#define F_SDH30_AHB_BIGED 0x00000040
+#define F_SDH30_BUSLOCK_DMA 0x00000020
+#define F_SDH30_BUSLOCK_EN 0x00000010
+#define F_SDH30_SIN 0x00000008
+#define F_SDH30_AHB_INCR_16 0x00000004
+#define F_SDH30_AHB_INCR_8 0x00000002
+#define F_SDH30_AHB_INCR_4 0x00000001
+
+#define F_SDH30_TUNING_SETTING 0x108
+#define F_SDH30_CMD_CHK_DIS 0x00010000
+
+#define F_SDH30_IO_CONTROL2 0x114
+#define F_SDH30_CRES_O_DN 0x00080000
+#define F_SDH30_MSEL_O_1_8 0x00040000
+
+#define F_SDH30_ESD_CONTROL 0x124
+#define F_SDH30_EMMC_RST 0x00000002
+#define F_SDH30_EMMC_HS200 0x01000000
+
+#define F_SDH30_CMD_DAT_DELAY 0x200
+
+#define F_SDH30_MIN_CLOCK 400000
+
+struct f_sdhost_priv {
+ struct clk *clk_iface;
+ struct clk *clk;
+ u32 vendor_hs200;
+ struct device *dev;
+};
+
+void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
+{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ u32 ctrl = 0;
+
+ usleep_range(2500, 3000);
+ ctrl = sdhci_readl(host, F_SDH30_IO_CONTROL2);
+ ctrl |= F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+ ctrl |= F_SDH30_MSEL_O_1_8;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+
+ ctrl &= ~F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+ usleep_range(2500, 3000);
+
+ if (priv->vendor_hs200) {
+ dev_info(priv->dev, "%s: setting hs200\n", __func__);
+ ctrl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ ctrl |= priv->vendor_hs200;
+ sdhci_writel(host, ctrl, F_SDH30_ESD_CONTROL);
+ }
+
+ ctrl = sdhci_readl(host, F_SDH30_TUNING_SETTING);
+ ctrl |= F_SDH30_CMD_CHK_DIS;
+ sdhci_writel(host, ctrl, F_SDH30_TUNING_SETTING);
+}
+
+unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
+{
+ return F_SDH30_MIN_CLOCK;
+}
+
+void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
+{
+ if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
+ sdhci_writew(host, 0xBC01, SDHCI_CLOCK_CONTROL);
+
+ sdhci_reset(host, mask);
+}
+
+static const struct sdhci_ops sdhci_f_sdh30_ops = {
+ .voltage_switch = sdhci_f_sdh30_soft_voltage_switch,
+ .get_min_clock = sdhci_f_sdh30_get_min_clock,
+ .reset = sdhci_f_sdh30_reset,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static int sdhci_f_sdh30_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq, ctrl = 0, ret = 0;
+ struct f_sdhost_priv *priv;
+ u32 reg = 0;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "%s: no irq specified\n", __func__);
+ return irq;
+ }
+
+ host = sdhci_alloc_host(dev, sizeof(struct sdhci_host) +
+ sizeof(struct f_sdhost_priv));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ priv = sdhci_priv(host);
+ priv->dev = dev;
+
+ host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+ SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
+ host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
+ SDHCI_QUIRK2_TUNING_WORK_AROUND;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, host);
+
+ sdhci_get_of_property(pdev);
+ host->hw_name = "f_sdh30";
+ host->ops = &sdhci_f_sdh30_ops;
+ host->irq = irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->ioaddr)) {
+ ret = PTR_ERR(host->ioaddr);
+ goto err;
+ }
+
+ priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(priv->clk_iface)) {
+ ret = PTR_ERR(priv->clk_iface);
+ goto err;
+ }
+
+ ret = clk_prepare_enable(priv->clk_iface);
+ if (ret)
+ goto err;
+
+ priv->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_clk;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto err_clk;
+
+ /* init vendor specific regs */
+ ctrl = sdhci_readw(host, F_SDH30_AHB_CONFIG);
+ ctrl |= F_SDH30_SIN | F_SDH30_AHB_INCR_16 | F_SDH30_AHB_INCR_8 |
+ F_SDH30_AHB_INCR_4;
+ ctrl &= ~(F_SDH30_AHB_BIGED | F_SDH30_BUSLOCK_EN);
+ sdhci_writew(host, ctrl, F_SDH30_AHB_CONFIG);
+
+ reg = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ sdhci_writel(host, reg & ~F_SDH30_EMMC_RST, F_SDH30_ESD_CONTROL);
+ msleep(20);
+ sdhci_writel(host, reg | F_SDH30_EMMC_RST, F_SDH30_ESD_CONTROL);
+
+ reg = sdhci_readl(host, SDHCI_CAPABILITIES);
+ if (reg & SDHCI_CAN_DO_8BIT)
+ priv->vendor_hs200 = F_SDH30_EMMC_HS200;
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_add_host;
+
+ return 0;
+
+err_add_host:
+ clk_disable_unprepare(priv->clk);
+err_clk:
+ clk_disable_unprepare(priv->clk_iface);
+err:
+ sdhci_free_host(host);
+ return ret;
+}
+
+static int sdhci_f_sdh30_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+
+ sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) ==
+ 0xffffffff);
+
+ clk_disable_unprepare(priv->clk_iface);
+ clk_disable_unprepare(priv->clk);
+
+ sdhci_free_host(host);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id f_sdh30_dt_ids[] = {
+ { .compatible = "fujitsu,mb86s70-sdhci-3.0" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, f_sdh30_dt_ids);
+
+static struct platform_driver sdhci_f_sdh30_driver = {
+ .driver = {
+ .name = "f_sdh30",
+ .of_match_table = f_sdh30_dt_ids,
+ .pm = SDHCI_PLTFM_PMOPS,
+ },
+ .probe = sdhci_f_sdh30_probe,
+ .remove = sdhci_f_sdh30_remove,
+};
+
+module_platform_driver(sdhci_f_sdh30_driver);
+
+MODULE_DESCRIPTION("F_SDH30 SD Card Controller driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("FUJITSU SEMICONDUCTOR LTD.");
+MODULE_ALIAS("platform:f_sdh30");
diff --git a/kernel/drivers/mmc/host/sdricoh_cs.c b/kernel/drivers/mmc/host/sdricoh_cs.c
new file mode 100644
index 000000000..b7e305775
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdricoh_cs.c
@@ -0,0 +1,552 @@
+/*
+ * sdricoh_cs.c - driver for Ricoh Secure Digital Card Readers that can be
+ * found on some Ricoh RL5c476 II cardbus bridge
+ *
+ * Copyright (C) 2006 - 2008 Sascha Sommer <saschasommer@freenet.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+/*
+#define DEBUG
+#define VERBOSE_DEBUG
+*/
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/scatterlist.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include <linux/io.h>
+
+#include <linux/mmc/host.h>
+
+#define DRIVER_NAME "sdricoh_cs"
+
+static unsigned int switchlocked;
+
+/* i/o region */
+#define SDRICOH_PCI_REGION 0
+#define SDRICOH_PCI_REGION_SIZE 0x1000
+
+/* registers */
+#define R104_VERSION 0x104
+#define R200_CMD 0x200
+#define R204_CMD_ARG 0x204
+#define R208_DATAIO 0x208
+#define R20C_RESP 0x20c
+#define R21C_STATUS 0x21c
+#define R2E0_INIT 0x2e0
+#define R2E4_STATUS_RESP 0x2e4
+#define R2F0_RESET 0x2f0
+#define R224_MODE 0x224
+#define R226_BLOCKSIZE 0x226
+#define R228_POWER 0x228
+#define R230_DATA 0x230
+
+/* flags for the R21C_STATUS register */
+#define STATUS_CMD_FINISHED 0x00000001
+#define STATUS_TRANSFER_FINISHED 0x00000004
+#define STATUS_CARD_INSERTED 0x00000020
+#define STATUS_CARD_LOCKED 0x00000080
+#define STATUS_CMD_TIMEOUT 0x00400000
+#define STATUS_READY_TO_READ 0x01000000
+#define STATUS_READY_TO_WRITE 0x02000000
+#define STATUS_BUSY 0x40000000
+
+/* timeouts */
+#define INIT_TIMEOUT 100
+#define CMD_TIMEOUT 100000
+#define TRANSFER_TIMEOUT 100000
+#define BUSY_TIMEOUT 32767
+
+/* list of supported pcmcia devices */
+static const struct pcmcia_device_id pcmcia_ids[] = {
+ /* vendor and device strings followed by their crc32 hashes */
+ PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay1Controller", 0xd9f522ed,
+ 0xc3901202),
+ PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay Controller", 0xd9f522ed,
+ 0xace80909),
+ PCMCIA_DEVICE_NULL,
+};
+
+MODULE_DEVICE_TABLE(pcmcia, pcmcia_ids);
+
+/* mmc privdata */
+struct sdricoh_host {
+ struct device *dev;
+ struct mmc_host *mmc; /* MMC structure */
+ unsigned char __iomem *iobase;
+ struct pci_dev *pci_dev;
+ int app_cmd;
+};
+
+/***************** register i/o helper functions *****************************/
+
+static inline unsigned int sdricoh_readl(struct sdricoh_host *host,
+ unsigned int reg)
+{
+ unsigned int value = readl(host->iobase + reg);
+ dev_vdbg(host->dev, "rl %x 0x%x\n", reg, value);
+ return value;
+}
+
+static inline void sdricoh_writel(struct sdricoh_host *host, unsigned int reg,
+ unsigned int value)
+{
+ writel(value, host->iobase + reg);
+ dev_vdbg(host->dev, "wl %x 0x%x\n", reg, value);
+
+}
+
+static inline unsigned int sdricoh_readw(struct sdricoh_host *host,
+ unsigned int reg)
+{
+ unsigned int value = readw(host->iobase + reg);
+ dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value);
+ return value;
+}
+
+static inline void sdricoh_writew(struct sdricoh_host *host, unsigned int reg,
+ unsigned short value)
+{
+ writew(value, host->iobase + reg);
+ dev_vdbg(host->dev, "ww %x 0x%x\n", reg, value);
+}
+
+static inline unsigned int sdricoh_readb(struct sdricoh_host *host,
+ unsigned int reg)
+{
+ unsigned int value = readb(host->iobase + reg);
+ dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value);
+ return value;
+}
+
+static int sdricoh_query_status(struct sdricoh_host *host, unsigned int wanted,
+ unsigned int timeout){
+ unsigned int loop;
+ unsigned int status = 0;
+ struct device *dev = host->dev;
+ for (loop = 0; loop < timeout; loop++) {
+ status = sdricoh_readl(host, R21C_STATUS);
+ sdricoh_writel(host, R2E4_STATUS_RESP, status);
+ if (status & wanted)
+ break;
+ }
+
+ if (loop == timeout) {
+ dev_err(dev, "query_status: timeout waiting for %x\n", wanted);
+ return -ETIMEDOUT;
+ }
+
+ /* do not do this check in the loop as some commands fail otherwise */
+ if (status & 0x7F0000) {
+ dev_err(dev, "waiting for status bit %x failed\n", wanted);
+ return -EINVAL;
+ }
+ return 0;
+
+}
+
+static int sdricoh_mmc_cmd(struct sdricoh_host *host, unsigned char opcode,
+ unsigned int arg)
+{
+ unsigned int status;
+ int result = 0;
+ unsigned int loop = 0;
+ /* reset status reg? */
+ sdricoh_writel(host, R21C_STATUS, 0x18);
+ /* fill parameters */
+ sdricoh_writel(host, R204_CMD_ARG, arg);
+ sdricoh_writel(host, R200_CMD, (0x10000 << 8) | opcode);
+ /* wait for command completion */
+ if (opcode) {
+ for (loop = 0; loop < CMD_TIMEOUT; loop++) {
+ status = sdricoh_readl(host, R21C_STATUS);
+ sdricoh_writel(host, R2E4_STATUS_RESP, status);
+ if (status & STATUS_CMD_FINISHED)
+ break;
+ }
+ /* don't check for timeout in the loop it is not always
+ reset correctly
+ */
+ if (loop == CMD_TIMEOUT || status & STATUS_CMD_TIMEOUT)
+ result = -ETIMEDOUT;
+
+ }
+
+ return result;
+
+}
+
+static int sdricoh_reset(struct sdricoh_host *host)
+{
+ dev_dbg(host->dev, "reset\n");
+ sdricoh_writel(host, R2F0_RESET, 0x10001);
+ sdricoh_writel(host, R2E0_INIT, 0x10000);
+ if (sdricoh_readl(host, R2E0_INIT) != 0x10000)
+ return -EIO;
+ sdricoh_writel(host, R2E0_INIT, 0x10007);
+
+ sdricoh_writel(host, R224_MODE, 0x2000000);
+ sdricoh_writel(host, R228_POWER, 0xe0);
+
+
+ /* status register ? */
+ sdricoh_writel(host, R21C_STATUS, 0x18);
+
+ return 0;
+}
+
+static int sdricoh_blockio(struct sdricoh_host *host, int read,
+ u8 *buf, int len)
+{
+ int size;
+ u32 data = 0;
+ /* wait until the data is available */
+ if (read) {
+ if (sdricoh_query_status(host, STATUS_READY_TO_READ,
+ TRANSFER_TIMEOUT))
+ return -ETIMEDOUT;
+ sdricoh_writel(host, R21C_STATUS, 0x18);
+ /* read data */
+ while (len) {
+ data = sdricoh_readl(host, R230_DATA);
+ size = min(len, 4);
+ len -= size;
+ while (size) {
+ *buf = data & 0xFF;
+ buf++;
+ data >>= 8;
+ size--;
+ }
+ }
+ } else {
+ if (sdricoh_query_status(host, STATUS_READY_TO_WRITE,
+ TRANSFER_TIMEOUT))
+ return -ETIMEDOUT;
+ sdricoh_writel(host, R21C_STATUS, 0x18);
+ /* write data */
+ while (len) {
+ size = min(len, 4);
+ len -= size;
+ while (size) {
+ data >>= 8;
+ data |= (u32)*buf << 24;
+ buf++;
+ size--;
+ }
+ sdricoh_writel(host, R230_DATA, data);
+ }
+ }
+
+ if (len)
+ return -EIO;
+
+ return 0;
+}
+
+static void sdricoh_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sdricoh_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = cmd->data;
+ struct device *dev = host->dev;
+ unsigned char opcode = cmd->opcode;
+ int i;
+
+ dev_dbg(dev, "=============================\n");
+ dev_dbg(dev, "sdricoh_request opcode=%i\n", opcode);
+
+ sdricoh_writel(host, R21C_STATUS, 0x18);
+
+ /* MMC_APP_CMDs need some special handling */
+ if (host->app_cmd) {
+ opcode |= 64;
+ host->app_cmd = 0;
+ } else if (opcode == 55)
+ host->app_cmd = 1;
+
+ /* read/write commands seem to require this */
+ if (data) {
+ sdricoh_writew(host, R226_BLOCKSIZE, data->blksz);
+ sdricoh_writel(host, R208_DATAIO, 0);
+ }
+
+ cmd->error = sdricoh_mmc_cmd(host, opcode, cmd->arg);
+
+ /* read response buffer */
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ /* CRC is stripped so we need to do some shifting. */
+ for (i = 0; i < 4; i++) {
+ cmd->resp[i] =
+ sdricoh_readl(host,
+ R20C_RESP + (3 - i) * 4) << 8;
+ if (i != 3)
+ cmd->resp[i] |=
+ sdricoh_readb(host, R20C_RESP +
+ (3 - i) * 4 - 1);
+ }
+ } else
+ cmd->resp[0] = sdricoh_readl(host, R20C_RESP);
+ }
+
+ /* transfer data */
+ if (data && cmd->error == 0) {
+ dev_dbg(dev, "transfer: blksz %i blocks %i sg_len %i "
+ "sg length %i\n", data->blksz, data->blocks,
+ data->sg_len, data->sg->length);
+
+ /* enter data reading mode */
+ sdricoh_writel(host, R21C_STATUS, 0x837f031e);
+ for (i = 0; i < data->blocks; i++) {
+ size_t len = data->blksz;
+ u8 *buf;
+ struct page *page;
+ int result;
+ page = sg_page(data->sg);
+
+ buf = kmap(page) + data->sg->offset + (len * i);
+ result =
+ sdricoh_blockio(host,
+ data->flags & MMC_DATA_READ, buf, len);
+ kunmap(page);
+ flush_dcache_page(page);
+ if (result) {
+ dev_err(dev, "sdricoh_request: cmd %i "
+ "block transfer failed\n", cmd->opcode);
+ cmd->error = result;
+ break;
+ } else
+ data->bytes_xfered += len;
+ }
+
+ sdricoh_writel(host, R208_DATAIO, 1);
+
+ if (sdricoh_query_status(host, STATUS_TRANSFER_FINISHED,
+ TRANSFER_TIMEOUT)) {
+ dev_err(dev, "sdricoh_request: transfer end error\n");
+ cmd->error = -EINVAL;
+ }
+ }
+ /* FIXME check busy flag */
+
+ mmc_request_done(mmc, mrq);
+ dev_dbg(dev, "=============================\n");
+}
+
+static void sdricoh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdricoh_host *host = mmc_priv(mmc);
+ dev_dbg(host->dev, "set_ios\n");
+
+ if (ios->power_mode == MMC_POWER_ON) {
+ sdricoh_writel(host, R228_POWER, 0xc0e0);
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4) {
+ sdricoh_writel(host, R224_MODE, 0x2000300);
+ sdricoh_writel(host, R228_POWER, 0x40e0);
+ } else {
+ sdricoh_writel(host, R224_MODE, 0x2000340);
+ }
+
+ } else if (ios->power_mode == MMC_POWER_UP) {
+ sdricoh_writel(host, R224_MODE, 0x2000320);
+ sdricoh_writel(host, R228_POWER, 0xe0);
+ }
+}
+
+static int sdricoh_get_ro(struct mmc_host *mmc)
+{
+ struct sdricoh_host *host = mmc_priv(mmc);
+ unsigned int status;
+
+ status = sdricoh_readl(host, R21C_STATUS);
+ sdricoh_writel(host, R2E4_STATUS_RESP, status);
+
+ /* some notebooks seem to have the locked flag switched */
+ if (switchlocked)
+ return !(status & STATUS_CARD_LOCKED);
+
+ return (status & STATUS_CARD_LOCKED);
+}
+
+static struct mmc_host_ops sdricoh_ops = {
+ .request = sdricoh_request,
+ .set_ios = sdricoh_set_ios,
+ .get_ro = sdricoh_get_ro,
+};
+
+/* initialize the control and register it to the mmc framework */
+static int sdricoh_init_mmc(struct pci_dev *pci_dev,
+ struct pcmcia_device *pcmcia_dev)
+{
+ int result = 0;
+ void __iomem *iobase = NULL;
+ struct mmc_host *mmc = NULL;
+ struct sdricoh_host *host = NULL;
+ struct device *dev = &pcmcia_dev->dev;
+ /* map iomem */
+ if (pci_resource_len(pci_dev, SDRICOH_PCI_REGION) !=
+ SDRICOH_PCI_REGION_SIZE) {
+ dev_dbg(dev, "unexpected pci resource len\n");
+ return -ENODEV;
+ }
+ iobase =
+ pci_iomap(pci_dev, SDRICOH_PCI_REGION, SDRICOH_PCI_REGION_SIZE);
+ if (!iobase) {
+ dev_err(dev, "unable to map iobase\n");
+ return -ENODEV;
+ }
+ /* check version? */
+ if (readl(iobase + R104_VERSION) != 0x4000) {
+ dev_dbg(dev, "no supported mmc controller found\n");
+ result = -ENODEV;
+ goto err;
+ }
+ /* allocate privdata */
+ mmc = pcmcia_dev->priv =
+ mmc_alloc_host(sizeof(struct sdricoh_host), &pcmcia_dev->dev);
+ if (!mmc) {
+ dev_err(dev, "mmc_alloc_host failed\n");
+ result = -ENOMEM;
+ goto err;
+ }
+ host = mmc_priv(mmc);
+
+ host->iobase = iobase;
+ host->dev = dev;
+ host->pci_dev = pci_dev;
+
+ mmc->ops = &sdricoh_ops;
+
+ /* FIXME: frequency and voltage handling is done by the controller
+ */
+ mmc->f_min = 450000;
+ mmc->f_max = 24000000;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ mmc->max_seg_size = 1024 * 512;
+ mmc->max_blk_size = 512;
+
+ /* reset the controller */
+ if (sdricoh_reset(host)) {
+ dev_dbg(dev, "could not reset\n");
+ result = -EIO;
+ goto err;
+
+ }
+
+ result = mmc_add_host(mmc);
+
+ if (!result) {
+ dev_dbg(dev, "mmc host registered\n");
+ return 0;
+ }
+
+err:
+ if (iobase)
+ pci_iounmap(pci_dev, iobase);
+ if (mmc)
+ mmc_free_host(mmc);
+
+ return result;
+}
+
+/* search for supported mmc controllers */
+static int sdricoh_pcmcia_probe(struct pcmcia_device *pcmcia_dev)
+{
+ struct pci_dev *pci_dev = NULL;
+
+ dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device"
+ " %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]);
+
+ /* search pci cardbus bridge that contains the mmc controller */
+ /* the io region is already claimed by yenta_socket... */
+ while ((pci_dev =
+ pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476,
+ pci_dev))) {
+ /* try to init the device */
+ if (!sdricoh_init_mmc(pci_dev, pcmcia_dev)) {
+ dev_info(&pcmcia_dev->dev, "MMC controller found\n");
+ return 0;
+ }
+
+ }
+ dev_err(&pcmcia_dev->dev, "No MMC controller was found.\n");
+ return -ENODEV;
+}
+
+static void sdricoh_pcmcia_detach(struct pcmcia_device *link)
+{
+ struct mmc_host *mmc = link->priv;
+
+ dev_dbg(&link->dev, "detach\n");
+
+ /* remove mmc host */
+ if (mmc) {
+ struct sdricoh_host *host = mmc_priv(mmc);
+ mmc_remove_host(mmc);
+ pci_iounmap(host->pci_dev, host->iobase);
+ pci_dev_put(host->pci_dev);
+ mmc_free_host(mmc);
+ }
+ pcmcia_disable_device(link);
+
+}
+
+#ifdef CONFIG_PM
+static int sdricoh_pcmcia_suspend(struct pcmcia_device *link)
+{
+ dev_dbg(&link->dev, "suspend\n");
+ return 0;
+}
+
+static int sdricoh_pcmcia_resume(struct pcmcia_device *link)
+{
+ struct mmc_host *mmc = link->priv;
+ dev_dbg(&link->dev, "resume\n");
+ sdricoh_reset(mmc_priv(mmc));
+ return 0;
+}
+#else
+#define sdricoh_pcmcia_suspend NULL
+#define sdricoh_pcmcia_resume NULL
+#endif
+
+static struct pcmcia_driver sdricoh_driver = {
+ .name = DRIVER_NAME,
+ .probe = sdricoh_pcmcia_probe,
+ .remove = sdricoh_pcmcia_detach,
+ .id_table = pcmcia_ids,
+ .suspend = sdricoh_pcmcia_suspend,
+ .resume = sdricoh_pcmcia_resume,
+};
+module_pcmcia_driver(sdricoh_driver);
+
+module_param(switchlocked, uint, 0444);
+
+MODULE_AUTHOR("Sascha Sommer <saschasommer@freenet.de>");
+MODULE_DESCRIPTION("Ricoh PCMCIA Secure Digital Interface driver");
+MODULE_LICENSE("GPL");
+
+MODULE_PARM_DESC(switchlocked, "Switch the cards locked status."
+ "Use this when unlocked cards are shown readonly (default 0)");
diff --git a/kernel/drivers/mmc/host/sh_mmcif.c b/kernel/drivers/mmc/host/sh_mmcif.c
new file mode 100644
index 000000000..7eff087cf
--- /dev/null
+++ b/kernel/drivers/mmc/host/sh_mmcif.c
@@ -0,0 +1,1571 @@
+/*
+ * MMCIF eMMC driver.
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ * Yusuke Goda <yusuke.goda.sx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ *
+ * TODO
+ * 1. DMA
+ * 2. Power management
+ * 3. Handle MMC errors better
+ *
+ */
+
+/*
+ * The MMCIF driver is now processing MMC requests asynchronously, according
+ * to the Linux MMC API requirement.
+ *
+ * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
+ * data, and optional stop. To achieve asynchronous processing each of these
+ * stages is split into two halves: a top and a bottom half. The top half
+ * initialises the hardware, installs a timeout handler to handle completion
+ * timeouts, and returns. In case of the command stage this immediately returns
+ * control to the caller, leaving all further processing to run asynchronously.
+ * All further request processing is performed by the bottom halves.
+ *
+ * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
+ * thread, a DMA completion callback, if DMA is used, a timeout work, and
+ * request- and stage-specific handler methods.
+ *
+ * Each bottom half run begins with either a hardware interrupt, a DMA callback
+ * invocation, or a timeout work run. In case of an error or a successful
+ * processing completion, the MMC core is informed and the request processing is
+ * finished. In case processing has to continue, i.e., if data has to be read
+ * from or written to the card, or if a stop command has to be sent, the next
+ * top half is called, which performs the necessary hardware handling and
+ * reschedules the timeout work. This returns the driver state machine into the
+ * bottom half waiting state.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sh_mmcif.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+
+#define DRIVER_NAME "sh_mmcif"
+#define DRIVER_VERSION "2010-04-28"
+
+/* CE_CMD_SET */
+#define CMD_MASK 0x3f000000
+#define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
+#define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
+#define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
+#define CMD_SET_RBSY (1 << 21) /* R1b */
+#define CMD_SET_CCSEN (1 << 20)
+#define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
+#define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
+#define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
+#define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
+#define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
+#define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
+#define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
+#define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
+#define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
+#define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
+#define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
+#define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
+#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
+#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
+#define CMD_SET_CCSH (1 << 5)
+#define CMD_SET_DARS (1 << 2) /* Dual Data Rate */
+#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
+#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
+#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
+
+/* CE_CMD_CTRL */
+#define CMD_CTRL_BREAK (1 << 0)
+
+/* CE_BLOCK_SET */
+#define BLOCK_SIZE_MASK 0x0000ffff
+
+/* CE_INT */
+#define INT_CCSDE (1 << 29)
+#define INT_CMD12DRE (1 << 26)
+#define INT_CMD12RBE (1 << 25)
+#define INT_CMD12CRE (1 << 24)
+#define INT_DTRANE (1 << 23)
+#define INT_BUFRE (1 << 22)
+#define INT_BUFWEN (1 << 21)
+#define INT_BUFREN (1 << 20)
+#define INT_CCSRCV (1 << 19)
+#define INT_RBSYE (1 << 17)
+#define INT_CRSPE (1 << 16)
+#define INT_CMDVIO (1 << 15)
+#define INT_BUFVIO (1 << 14)
+#define INT_WDATERR (1 << 11)
+#define INT_RDATERR (1 << 10)
+#define INT_RIDXERR (1 << 9)
+#define INT_RSPERR (1 << 8)
+#define INT_CCSTO (1 << 5)
+#define INT_CRCSTO (1 << 4)
+#define INT_WDATTO (1 << 3)
+#define INT_RDATTO (1 << 2)
+#define INT_RBSYTO (1 << 1)
+#define INT_RSPTO (1 << 0)
+#define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
+ INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
+ INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
+ INT_RDATTO | INT_RBSYTO | INT_RSPTO)
+
+#define INT_ALL (INT_RBSYE | INT_CRSPE | INT_BUFREN | \
+ INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
+ INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
+
+#define INT_CCS (INT_CCSTO | INT_CCSRCV | INT_CCSDE)
+
+/* CE_INT_MASK */
+#define MASK_ALL 0x00000000
+#define MASK_MCCSDE (1 << 29)
+#define MASK_MCMD12DRE (1 << 26)
+#define MASK_MCMD12RBE (1 << 25)
+#define MASK_MCMD12CRE (1 << 24)
+#define MASK_MDTRANE (1 << 23)
+#define MASK_MBUFRE (1 << 22)
+#define MASK_MBUFWEN (1 << 21)
+#define MASK_MBUFREN (1 << 20)
+#define MASK_MCCSRCV (1 << 19)
+#define MASK_MRBSYE (1 << 17)
+#define MASK_MCRSPE (1 << 16)
+#define MASK_MCMDVIO (1 << 15)
+#define MASK_MBUFVIO (1 << 14)
+#define MASK_MWDATERR (1 << 11)
+#define MASK_MRDATERR (1 << 10)
+#define MASK_MRIDXERR (1 << 9)
+#define MASK_MRSPERR (1 << 8)
+#define MASK_MCCSTO (1 << 5)
+#define MASK_MCRCSTO (1 << 4)
+#define MASK_MWDATTO (1 << 3)
+#define MASK_MRDATTO (1 << 2)
+#define MASK_MRBSYTO (1 << 1)
+#define MASK_MRSPTO (1 << 0)
+
+#define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
+ MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
+ MASK_MCRCSTO | MASK_MWDATTO | \
+ MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
+
+#define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \
+ MASK_MBUFREN | MASK_MBUFWEN | \
+ MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE | \
+ MASK_MCMD12RBE | MASK_MCMD12CRE)
+
+/* CE_HOST_STS1 */
+#define STS1_CMDSEQ (1 << 31)
+
+/* CE_HOST_STS2 */
+#define STS2_CRCSTE (1 << 31)
+#define STS2_CRC16E (1 << 30)
+#define STS2_AC12CRCE (1 << 29)
+#define STS2_RSPCRC7E (1 << 28)
+#define STS2_CRCSTEBE (1 << 27)
+#define STS2_RDATEBE (1 << 26)
+#define STS2_AC12REBE (1 << 25)
+#define STS2_RSPEBE (1 << 24)
+#define STS2_AC12IDXE (1 << 23)
+#define STS2_RSPIDXE (1 << 22)
+#define STS2_CCSTO (1 << 15)
+#define STS2_RDATTO (1 << 14)
+#define STS2_DATBSYTO (1 << 13)
+#define STS2_CRCSTTO (1 << 12)
+#define STS2_AC12BSYTO (1 << 11)
+#define STS2_RSPBSYTO (1 << 10)
+#define STS2_AC12RSPTO (1 << 9)
+#define STS2_RSPTO (1 << 8)
+#define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
+ STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
+#define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
+ STS2_DATBSYTO | STS2_CRCSTTO | \
+ STS2_AC12BSYTO | STS2_RSPBSYTO | \
+ STS2_AC12RSPTO | STS2_RSPTO)
+
+#define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
+#define CLKDEV_MMC_DATA 20000000 /* 20MHz */
+#define CLKDEV_INIT 400000 /* 400 KHz */
+
+enum mmcif_state {
+ STATE_IDLE,
+ STATE_REQUEST,
+ STATE_IOS,
+ STATE_TIMEOUT,
+};
+
+enum mmcif_wait_for {
+ MMCIF_WAIT_FOR_REQUEST,
+ MMCIF_WAIT_FOR_CMD,
+ MMCIF_WAIT_FOR_MREAD,
+ MMCIF_WAIT_FOR_MWRITE,
+ MMCIF_WAIT_FOR_READ,
+ MMCIF_WAIT_FOR_WRITE,
+ MMCIF_WAIT_FOR_READ_END,
+ MMCIF_WAIT_FOR_WRITE_END,
+ MMCIF_WAIT_FOR_STOP,
+};
+
+struct sh_mmcif_host {
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct platform_device *pd;
+ struct clk *hclk;
+ unsigned int clk;
+ int bus_width;
+ unsigned char timing;
+ bool sd_error;
+ bool dying;
+ long timeout;
+ void __iomem *addr;
+ u32 *pio_ptr;
+ spinlock_t lock; /* protect sh_mmcif_host::state */
+ enum mmcif_state state;
+ enum mmcif_wait_for wait_for;
+ struct delayed_work timeout_work;
+ size_t blocksize;
+ int sg_idx;
+ int sg_blkidx;
+ bool power;
+ bool card_present;
+ bool ccs_enable; /* Command Completion Signal support */
+ bool clk_ctrl2_enable;
+ struct mutex thread_lock;
+
+ /* DMA support */
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ struct completion dma_complete;
+ bool dma_active;
+};
+
+static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
+ unsigned int reg, u32 val)
+{
+ writel(val | readl(host->addr + reg), host->addr + reg);
+}
+
+static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
+ unsigned int reg, u32 val)
+{
+ writel(~val & readl(host->addr + reg), host->addr + reg);
+}
+
+static void mmcif_dma_complete(void *arg)
+{
+ struct sh_mmcif_host *host = arg;
+ struct mmc_request *mrq = host->mrq;
+
+ dev_dbg(&host->pd->dev, "Command completed\n");
+
+ if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
+ dev_name(&host->pd->dev)))
+ return;
+
+ complete(&host->dma_complete);
+}
+
+static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ struct scatterlist *sg = data->sg;
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan = host->chan_rx;
+ dma_cookie_t cookie = -EINVAL;
+ int ret;
+
+ ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
+ DMA_FROM_DEVICE);
+ if (ret > 0) {
+ host->dma_active = true;
+ desc = dmaengine_prep_slave_sg(chan, sg, ret,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ }
+
+ if (desc) {
+ desc->callback = mmcif_dma_complete;
+ desc->callback_param = host;
+ cookie = dmaengine_submit(desc);
+ sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
+ dma_async_issue_pending(chan);
+ }
+ dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
+ __func__, data->sg_len, ret, cookie);
+
+ if (!desc) {
+ /* DMA failed, fall back to PIO */
+ if (ret >= 0)
+ ret = -EIO;
+ host->chan_rx = NULL;
+ host->dma_active = false;
+ dma_release_channel(chan);
+ /* Free the Tx channel too */
+ chan = host->chan_tx;
+ if (chan) {
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ }
+ dev_warn(&host->pd->dev,
+ "DMA failed: %d, falling back to PIO\n", ret);
+ sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+ }
+
+ dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
+ desc, cookie, data->sg_len);
+}
+
+static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ struct scatterlist *sg = data->sg;
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan = host->chan_tx;
+ dma_cookie_t cookie = -EINVAL;
+ int ret;
+
+ ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
+ DMA_TO_DEVICE);
+ if (ret > 0) {
+ host->dma_active = true;
+ desc = dmaengine_prep_slave_sg(chan, sg, ret,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ }
+
+ if (desc) {
+ desc->callback = mmcif_dma_complete;
+ desc->callback_param = host;
+ cookie = dmaengine_submit(desc);
+ sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
+ dma_async_issue_pending(chan);
+ }
+ dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
+ __func__, data->sg_len, ret, cookie);
+
+ if (!desc) {
+ /* DMA failed, fall back to PIO */
+ if (ret >= 0)
+ ret = -EIO;
+ host->chan_tx = NULL;
+ host->dma_active = false;
+ dma_release_channel(chan);
+ /* Free the Rx channel too */
+ chan = host->chan_rx;
+ if (chan) {
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ }
+ dev_warn(&host->pd->dev,
+ "DMA failed: %d, falling back to PIO\n", ret);
+ sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+ }
+
+ dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
+ desc, cookie);
+}
+
+static struct dma_chan *
+sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
+ struct sh_mmcif_plat_data *pdata,
+ enum dma_transfer_direction direction)
+{
+ struct dma_slave_config cfg = { 0, };
+ struct dma_chan *chan;
+ void *slave_data = NULL;
+ struct resource *res;
+ dma_cap_mask_t mask;
+ int ret;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ if (pdata)
+ slave_data = direction == DMA_MEM_TO_DEV ?
+ (void *)pdata->slave_id_tx :
+ (void *)pdata->slave_id_rx;
+
+ chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ slave_data, &host->pd->dev,
+ direction == DMA_MEM_TO_DEV ? "tx" : "rx");
+
+ dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__,
+ direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan);
+
+ if (!chan)
+ return NULL;
+
+ res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
+
+ cfg.direction = direction;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ cfg.src_addr = res->start + MMCIF_CE_DATA;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ } else {
+ cfg.dst_addr = res->start + MMCIF_CE_DATA;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ }
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret < 0) {
+ dma_release_channel(chan);
+ return NULL;
+ }
+
+ return chan;
+}
+
+static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
+ struct sh_mmcif_plat_data *pdata)
+{
+ host->dma_active = false;
+
+ if (pdata) {
+ if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
+ return;
+ } else if (!host->pd->dev.of_node) {
+ return;
+ }
+
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ host->chan_tx = sh_mmcif_request_dma_one(host, pdata, DMA_MEM_TO_DEV);
+ if (!host->chan_tx)
+ return;
+
+ host->chan_rx = sh_mmcif_request_dma_one(host, pdata, DMA_DEV_TO_MEM);
+ if (!host->chan_rx) {
+ dma_release_channel(host->chan_tx);
+ host->chan_tx = NULL;
+ }
+}
+
+static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
+{
+ sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+ /* Descriptors are freed automatically */
+ if (host->chan_tx) {
+ struct dma_chan *chan = host->chan_tx;
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ }
+ if (host->chan_rx) {
+ struct dma_chan *chan = host->chan_rx;
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ }
+
+ host->dma_active = false;
+}
+
+static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
+{
+ struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+ bool sup_pclk = p ? p->sup_pclk : false;
+
+ sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
+ sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
+
+ if (!clk)
+ return;
+ if (sup_pclk && clk == host->clk)
+ sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
+ else
+ sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
+ ((fls(DIV_ROUND_UP(host->clk,
+ clk) - 1) - 1) << 16));
+
+ sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
+}
+
+static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
+{
+ u32 tmp;
+
+ tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
+
+ sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
+ sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
+ if (host->ccs_enable)
+ tmp |= SCCSTO_29;
+ if (host->clk_ctrl2_enable)
+ sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
+ sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
+ SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
+ /* byte swap on */
+ sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
+}
+
+static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
+{
+ u32 state1, state2;
+ int ret, timeout;
+
+ host->sd_error = false;
+
+ state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
+ state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
+ dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
+ dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
+
+ if (state1 & STS1_CMDSEQ) {
+ sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
+ sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
+ for (timeout = 10000000; timeout; timeout--) {
+ if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
+ & STS1_CMDSEQ))
+ break;
+ mdelay(1);
+ }
+ if (!timeout) {
+ dev_err(&host->pd->dev,
+ "Forced end of command sequence timeout err\n");
+ return -EIO;
+ }
+ sh_mmcif_sync_reset(host);
+ dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
+ return -EIO;
+ }
+
+ if (state2 & STS2_CRC_ERR) {
+ dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n",
+ host->state, host->wait_for);
+ ret = -EIO;
+ } else if (state2 & STS2_TIMEOUT_ERR) {
+ dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n",
+ host->state, host->wait_for);
+ ret = -ETIMEDOUT;
+ } else {
+ dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n",
+ host->state, host->wait_for);
+ ret = -EIO;
+ }
+ return ret;
+}
+
+static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
+{
+ struct mmc_data *data = host->mrq->data;
+
+ host->sg_blkidx += host->blocksize;
+
+ /* data->sg->length must be a multiple of host->blocksize? */
+ BUG_ON(host->sg_blkidx > data->sg->length);
+
+ if (host->sg_blkidx == data->sg->length) {
+ host->sg_blkidx = 0;
+ if (++host->sg_idx < data->sg_len)
+ host->pio_ptr = sg_virt(++data->sg);
+ } else {
+ host->pio_ptr = p;
+ }
+
+ return host->sg_idx != data->sg_len;
+}
+
+static void sh_mmcif_single_read(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+ BLOCK_SIZE_MASK) + 3;
+
+ host->wait_for = MMCIF_WAIT_FOR_READ;
+
+ /* buf read enable */
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+}
+
+static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p = sg_virt(data->sg);
+ int i;
+
+ if (host->sd_error) {
+ data->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+ return false;
+ }
+
+ for (i = 0; i < host->blocksize / 4; i++)
+ *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+
+ /* buffer read end */
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
+ host->wait_for = MMCIF_WAIT_FOR_READ_END;
+
+ return true;
+}
+
+static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+
+ if (!data->sg_len || !data->sg->length)
+ return;
+
+ host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+ BLOCK_SIZE_MASK;
+
+ host->wait_for = MMCIF_WAIT_FOR_MREAD;
+ host->sg_idx = 0;
+ host->sg_blkidx = 0;
+ host->pio_ptr = sg_virt(data->sg);
+
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+}
+
+static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p = host->pio_ptr;
+ int i;
+
+ if (host->sd_error) {
+ data->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+ return false;
+ }
+
+ BUG_ON(!data->sg->length);
+
+ for (i = 0; i < host->blocksize / 4; i++)
+ *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+
+ if (!sh_mmcif_next_block(host, p))
+ return false;
+
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+
+ return true;
+}
+
+static void sh_mmcif_single_write(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+ BLOCK_SIZE_MASK) + 3;
+
+ host->wait_for = MMCIF_WAIT_FOR_WRITE;
+
+ /* buf write enable */
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+}
+
+static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p = sg_virt(data->sg);
+ int i;
+
+ if (host->sd_error) {
+ data->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+ return false;
+ }
+
+ for (i = 0; i < host->blocksize / 4; i++)
+ sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+
+ /* buffer write end */
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
+ host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
+
+ return true;
+}
+
+static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+
+ if (!data->sg_len || !data->sg->length)
+ return;
+
+ host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+ BLOCK_SIZE_MASK;
+
+ host->wait_for = MMCIF_WAIT_FOR_MWRITE;
+ host->sg_idx = 0;
+ host->sg_blkidx = 0;
+ host->pio_ptr = sg_virt(data->sg);
+
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+}
+
+static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p = host->pio_ptr;
+ int i;
+
+ if (host->sd_error) {
+ data->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+ return false;
+ }
+
+ BUG_ON(!data->sg->length);
+
+ for (i = 0; i < host->blocksize / 4; i++)
+ sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+
+ if (!sh_mmcif_next_block(host, p))
+ return false;
+
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+
+ return true;
+}
+
+static void sh_mmcif_get_response(struct sh_mmcif_host *host,
+ struct mmc_command *cmd)
+{
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
+ cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
+ cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
+ cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
+ } else
+ cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
+}
+
+static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
+ struct mmc_command *cmd)
+{
+ cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
+}
+
+static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+ struct mmc_command *cmd = mrq->cmd;
+ u32 opc = cmd->opcode;
+ u32 tmp = 0;
+
+ /* Response Type check */
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ tmp |= CMD_SET_RTYP_NO;
+ break;
+ case MMC_RSP_R1:
+ case MMC_RSP_R1B:
+ case MMC_RSP_R3:
+ tmp |= CMD_SET_RTYP_6B;
+ break;
+ case MMC_RSP_R2:
+ tmp |= CMD_SET_RTYP_17B;
+ break;
+ default:
+ dev_err(&host->pd->dev, "Unsupported response type.\n");
+ break;
+ }
+ switch (opc) {
+ /* RBSY */
+ case MMC_SLEEP_AWAKE:
+ case MMC_SWITCH:
+ case MMC_STOP_TRANSMISSION:
+ case MMC_SET_WRITE_PROT:
+ case MMC_CLR_WRITE_PROT:
+ case MMC_ERASE:
+ tmp |= CMD_SET_RBSY;
+ break;
+ }
+ /* WDAT / DATW */
+ if (data) {
+ tmp |= CMD_SET_WDAT;
+ switch (host->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ tmp |= CMD_SET_DATW_1;
+ break;
+ case MMC_BUS_WIDTH_4:
+ tmp |= CMD_SET_DATW_4;
+ break;
+ case MMC_BUS_WIDTH_8:
+ tmp |= CMD_SET_DATW_8;
+ break;
+ default:
+ dev_err(&host->pd->dev, "Unsupported bus width.\n");
+ break;
+ }
+ switch (host->timing) {
+ case MMC_TIMING_MMC_DDR52:
+ /*
+ * MMC core will only set this timing, if the host
+ * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR
+ * capability. MMCIF implementations with this
+ * capability, e.g. sh73a0, will have to set it
+ * in their platform data.
+ */
+ tmp |= CMD_SET_DARS;
+ break;
+ }
+ }
+ /* DWEN */
+ if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
+ tmp |= CMD_SET_DWEN;
+ /* CMLTE/CMD12EN */
+ if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
+ tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
+ sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
+ data->blocks << 16);
+ }
+ /* RIDXC[1:0] check bits */
+ if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
+ opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
+ tmp |= CMD_SET_RIDXC_BITS;
+ /* RCRC7C[1:0] check bits */
+ if (opc == MMC_SEND_OP_COND)
+ tmp |= CMD_SET_CRC7C_BITS;
+ /* RCRC7C[1:0] internal CRC7 */
+ if (opc == MMC_ALL_SEND_CID ||
+ opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
+ tmp |= CMD_SET_CRC7C_INTERNAL;
+
+ return (opc << 24) | tmp;
+}
+
+static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
+ struct mmc_request *mrq, u32 opc)
+{
+ switch (opc) {
+ case MMC_READ_MULTIPLE_BLOCK:
+ sh_mmcif_multi_read(host, mrq);
+ return 0;
+ case MMC_WRITE_MULTIPLE_BLOCK:
+ sh_mmcif_multi_write(host, mrq);
+ return 0;
+ case MMC_WRITE_BLOCK:
+ sh_mmcif_single_write(host, mrq);
+ return 0;
+ case MMC_READ_SINGLE_BLOCK:
+ case MMC_SEND_EXT_CSD:
+ sh_mmcif_single_read(host, mrq);
+ return 0;
+ default:
+ dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc);
+ return -EINVAL;
+ }
+}
+
+static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_command *cmd = mrq->cmd;
+ u32 opc = cmd->opcode;
+ u32 mask;
+ unsigned long flags;
+
+ switch (opc) {
+ /* response busy check */
+ case MMC_SLEEP_AWAKE:
+ case MMC_SWITCH:
+ case MMC_STOP_TRANSMISSION:
+ case MMC_SET_WRITE_PROT:
+ case MMC_CLR_WRITE_PROT:
+ case MMC_ERASE:
+ mask = MASK_START_CMD | MASK_MRBSYE;
+ break;
+ default:
+ mask = MASK_START_CMD | MASK_MCRSPE;
+ break;
+ }
+
+ if (host->ccs_enable)
+ mask |= MASK_MCCSTO;
+
+ if (mrq->data) {
+ sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
+ sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
+ mrq->data->blksz);
+ }
+ opc = sh_mmcif_set_cmd(host, mrq);
+
+ if (host->ccs_enable)
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
+ else
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
+ /* set arg */
+ sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
+ /* set cmd */
+ spin_lock_irqsave(&host->lock, flags);
+ sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
+
+ host->wait_for = MMCIF_WAIT_FOR_CMD;
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ switch (mrq->cmd->opcode) {
+ case MMC_READ_MULTIPLE_BLOCK:
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
+ break;
+ case MMC_WRITE_MULTIPLE_BLOCK:
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
+ break;
+ default:
+ dev_err(&host->pd->dev, "unsupported stop cmd\n");
+ mrq->stop->error = sh_mmcif_error_manage(host);
+ return;
+ }
+
+ host->wait_for = MMCIF_WAIT_FOR_STOP;
+}
+
+static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sh_mmcif_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->state != STATE_IDLE) {
+ dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
+ spin_unlock_irqrestore(&host->lock, flags);
+ mrq->cmd->error = -EAGAIN;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ host->state = STATE_REQUEST;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ switch (mrq->cmd->opcode) {
+ /* MMCIF does not support SD/SDIO command */
+ case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */
+ case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
+ if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
+ break;
+ case MMC_APP_CMD:
+ case SD_IO_RW_DIRECT:
+ host->state = STATE_IDLE;
+ mrq->cmd->error = -ETIMEDOUT;
+ mmc_request_done(mmc, mrq);
+ return;
+ default:
+ break;
+ }
+
+ host->mrq = mrq;
+
+ sh_mmcif_start_cmd(host, mrq);
+}
+
+static int sh_mmcif_clk_update(struct sh_mmcif_host *host)
+{
+ int ret = clk_prepare_enable(host->hclk);
+
+ if (!ret) {
+ host->clk = clk_get_rate(host->hclk);
+ host->mmc->f_max = host->clk / 2;
+ host->mmc->f_min = host->clk / 512;
+ }
+
+ return ret;
+}
+
+static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ if (!IS_ERR(mmc->supply.vmmc))
+ /* Errors ignored... */
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
+ ios->power_mode ? ios->vdd : 0);
+}
+
+static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sh_mmcif_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->state != STATE_IDLE) {
+ dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ host->state = STATE_IOS;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (ios->power_mode == MMC_POWER_UP) {
+ if (!host->card_present) {
+ /* See if we also get DMA */
+ sh_mmcif_request_dma(host, host->pd->dev.platform_data);
+ host->card_present = true;
+ }
+ sh_mmcif_set_power(host, ios);
+ } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
+ /* clock stop */
+ sh_mmcif_clock_control(host, 0);
+ if (ios->power_mode == MMC_POWER_OFF) {
+ if (host->card_present) {
+ sh_mmcif_release_dma(host);
+ host->card_present = false;
+ }
+ }
+ if (host->power) {
+ pm_runtime_put_sync(&host->pd->dev);
+ clk_disable_unprepare(host->hclk);
+ host->power = false;
+ if (ios->power_mode == MMC_POWER_OFF)
+ sh_mmcif_set_power(host, ios);
+ }
+ host->state = STATE_IDLE;
+ return;
+ }
+
+ if (ios->clock) {
+ if (!host->power) {
+ sh_mmcif_clk_update(host);
+ pm_runtime_get_sync(&host->pd->dev);
+ host->power = true;
+ sh_mmcif_sync_reset(host);
+ }
+ sh_mmcif_clock_control(host, ios->clock);
+ }
+
+ host->timing = ios->timing;
+ host->bus_width = ios->bus_width;
+ host->state = STATE_IDLE;
+}
+
+static int sh_mmcif_get_cd(struct mmc_host *mmc)
+{
+ struct sh_mmcif_host *host = mmc_priv(mmc);
+ struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+ int ret = mmc_gpio_get_cd(mmc);
+
+ if (ret >= 0)
+ return ret;
+
+ if (!p || !p->get_cd)
+ return -ENOSYS;
+ else
+ return p->get_cd(host->pd);
+}
+
+static struct mmc_host_ops sh_mmcif_ops = {
+ .request = sh_mmcif_request,
+ .set_ios = sh_mmcif_set_ios,
+ .get_cd = sh_mmcif_get_cd,
+};
+
+static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
+{
+ struct mmc_command *cmd = host->mrq->cmd;
+ struct mmc_data *data = host->mrq->data;
+ long time;
+
+ if (host->sd_error) {
+ switch (cmd->opcode) {
+ case MMC_ALL_SEND_CID:
+ case MMC_SELECT_CARD:
+ case MMC_APP_CMD:
+ cmd->error = -ETIMEDOUT;
+ break;
+ default:
+ cmd->error = sh_mmcif_error_manage(host);
+ break;
+ }
+ dev_dbg(&host->pd->dev, "CMD%d error %d\n",
+ cmd->opcode, cmd->error);
+ host->sd_error = false;
+ return false;
+ }
+ if (!(cmd->flags & MMC_RSP_PRESENT)) {
+ cmd->error = 0;
+ return false;
+ }
+
+ sh_mmcif_get_response(host, cmd);
+
+ if (!data)
+ return false;
+
+ /*
+ * Completion can be signalled from DMA callback and error, so, have to
+ * reset here, before setting .dma_active
+ */
+ init_completion(&host->dma_complete);
+
+ if (data->flags & MMC_DATA_READ) {
+ if (host->chan_rx)
+ sh_mmcif_start_dma_rx(host);
+ } else {
+ if (host->chan_tx)
+ sh_mmcif_start_dma_tx(host);
+ }
+
+ if (!host->dma_active) {
+ data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
+ return !data->error;
+ }
+
+ /* Running in the IRQ thread, can sleep */
+ time = wait_for_completion_interruptible_timeout(&host->dma_complete,
+ host->timeout);
+
+ if (data->flags & MMC_DATA_READ)
+ dma_unmap_sg(host->chan_rx->device->dev,
+ data->sg, data->sg_len,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_sg(host->chan_tx->device->dev,
+ data->sg, data->sg_len,
+ DMA_TO_DEVICE);
+
+ if (host->sd_error) {
+ dev_err(host->mmc->parent,
+ "Error IRQ while waiting for DMA completion!\n");
+ /* Woken up by an error IRQ: abort DMA */
+ data->error = sh_mmcif_error_manage(host);
+ } else if (!time) {
+ dev_err(host->mmc->parent, "DMA timeout!\n");
+ data->error = -ETIMEDOUT;
+ } else if (time < 0) {
+ dev_err(host->mmc->parent,
+ "wait_for_completion_...() error %ld!\n", time);
+ data->error = time;
+ }
+ sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
+ BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+ host->dma_active = false;
+
+ if (data->error) {
+ data->bytes_xfered = 0;
+ /* Abort DMA */
+ if (data->flags & MMC_DATA_READ)
+ dmaengine_terminate_all(host->chan_rx);
+ else
+ dmaengine_terminate_all(host->chan_tx);
+ }
+
+ return false;
+}
+
+static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
+{
+ struct sh_mmcif_host *host = dev_id;
+ struct mmc_request *mrq;
+ bool wait = false;
+ unsigned long flags;
+ int wait_work;
+
+ spin_lock_irqsave(&host->lock, flags);
+ wait_work = host->wait_for;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ cancel_delayed_work_sync(&host->timeout_work);
+
+ mutex_lock(&host->thread_lock);
+
+ mrq = host->mrq;
+ if (!mrq) {
+ dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
+ host->state, host->wait_for);
+ mutex_unlock(&host->thread_lock);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * All handlers return true, if processing continues, and false, if the
+ * request has to be completed - successfully or not
+ */
+ switch (wait_work) {
+ case MMCIF_WAIT_FOR_REQUEST:
+ /* We're too late, the timeout has already kicked in */
+ mutex_unlock(&host->thread_lock);
+ return IRQ_HANDLED;
+ case MMCIF_WAIT_FOR_CMD:
+ /* Wait for data? */
+ wait = sh_mmcif_end_cmd(host);
+ break;
+ case MMCIF_WAIT_FOR_MREAD:
+ /* Wait for more data? */
+ wait = sh_mmcif_mread_block(host);
+ break;
+ case MMCIF_WAIT_FOR_READ:
+ /* Wait for data end? */
+ wait = sh_mmcif_read_block(host);
+ break;
+ case MMCIF_WAIT_FOR_MWRITE:
+ /* Wait data to write? */
+ wait = sh_mmcif_mwrite_block(host);
+ break;
+ case MMCIF_WAIT_FOR_WRITE:
+ /* Wait for data end? */
+ wait = sh_mmcif_write_block(host);
+ break;
+ case MMCIF_WAIT_FOR_STOP:
+ if (host->sd_error) {
+ mrq->stop->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error);
+ break;
+ }
+ sh_mmcif_get_cmd12response(host, mrq->stop);
+ mrq->stop->error = 0;
+ break;
+ case MMCIF_WAIT_FOR_READ_END:
+ case MMCIF_WAIT_FOR_WRITE_END:
+ if (host->sd_error) {
+ mrq->data->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error);
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ if (wait) {
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ /* Wait for more data */
+ mutex_unlock(&host->thread_lock);
+ return IRQ_HANDLED;
+ }
+
+ if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
+ struct mmc_data *data = mrq->data;
+ if (!mrq->cmd->error && data && !data->error)
+ data->bytes_xfered =
+ data->blocks * data->blksz;
+
+ if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
+ sh_mmcif_stop_cmd(host, mrq);
+ if (!mrq->stop->error) {
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ mutex_unlock(&host->thread_lock);
+ return IRQ_HANDLED;
+ }
+ }
+ }
+
+ host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+ host->state = STATE_IDLE;
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+
+ mutex_unlock(&host->thread_lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
+{
+ struct sh_mmcif_host *host = dev_id;
+ u32 state, mask;
+
+ state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
+ mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
+ if (host->ccs_enable)
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
+ else
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
+
+ if (state & ~MASK_CLEAN)
+ dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n",
+ state);
+
+ if (state & INT_ERR_STS || state & ~INT_ALL) {
+ host->sd_error = true;
+ dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state);
+ }
+ if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
+ if (!host->mrq)
+ dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state);
+ if (!host->dma_active)
+ return IRQ_WAKE_THREAD;
+ else if (host->sd_error)
+ mmcif_dma_complete(host);
+ } else {
+ dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void mmcif_timeout_work(struct work_struct *work)
+{
+ struct delayed_work *d = container_of(work, struct delayed_work, work);
+ struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
+ struct mmc_request *mrq = host->mrq;
+ unsigned long flags;
+
+ if (host->dying)
+ /* Don't run after mmc_remove_host() */
+ return;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->state == STATE_IDLE) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n",
+ host->wait_for, mrq->cmd->opcode);
+
+ host->state = STATE_TIMEOUT;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /*
+ * Handle races with cancel_delayed_work(), unless
+ * cancel_delayed_work_sync() is used
+ */
+ switch (host->wait_for) {
+ case MMCIF_WAIT_FOR_CMD:
+ mrq->cmd->error = sh_mmcif_error_manage(host);
+ break;
+ case MMCIF_WAIT_FOR_STOP:
+ mrq->stop->error = sh_mmcif_error_manage(host);
+ break;
+ case MMCIF_WAIT_FOR_MREAD:
+ case MMCIF_WAIT_FOR_MWRITE:
+ case MMCIF_WAIT_FOR_READ:
+ case MMCIF_WAIT_FOR_WRITE:
+ case MMCIF_WAIT_FOR_READ_END:
+ case MMCIF_WAIT_FOR_WRITE_END:
+ mrq->data->error = sh_mmcif_error_manage(host);
+ break;
+ default:
+ BUG();
+ }
+
+ host->state = STATE_IDLE;
+ host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
+{
+ struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_get_supply(mmc);
+
+ if (!pd)
+ return;
+
+ if (!mmc->ocr_avail)
+ mmc->ocr_avail = pd->ocr;
+ else if (pd->ocr)
+ dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
+}
+
+static int sh_mmcif_probe(struct platform_device *pdev)
+{
+ int ret = 0, irq[2];
+ struct mmc_host *mmc;
+ struct sh_mmcif_host *host;
+ struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
+ struct resource *res;
+ void __iomem *reg;
+ const char *name;
+
+ irq[0] = platform_get_irq(pdev, 0);
+ irq[1] = platform_get_irq(pdev, 1);
+ if (irq[0] < 0) {
+ dev_err(&pdev->dev, "Get irq error\n");
+ return -ENXIO;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ ret = mmc_of_parse(mmc);
+ if (ret < 0)
+ goto err_host;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->addr = reg;
+ host->timeout = msecs_to_jiffies(10000);
+ host->ccs_enable = !pd || !pd->ccs_unsupported;
+ host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
+
+ host->pd = pdev;
+
+ spin_lock_init(&host->lock);
+
+ mmc->ops = &sh_mmcif_ops;
+ sh_mmcif_init_ocr(host);
+
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
+ if (pd && pd->caps)
+ mmc->caps |= pd->caps;
+ mmc->max_segs = 32;
+ mmc->max_blk_size = 512;
+ mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
+ mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ platform_set_drvdata(pdev, host);
+
+ pm_runtime_enable(&pdev->dev);
+ host->power = false;
+
+ host->hclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->hclk)) {
+ ret = PTR_ERR(host->hclk);
+ dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
+ goto err_pm;
+ }
+ ret = sh_mmcif_clk_update(host);
+ if (ret < 0)
+ goto err_pm;
+
+ ret = pm_runtime_resume(&pdev->dev);
+ if (ret < 0)
+ goto err_clk;
+
+ INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
+
+ sh_mmcif_sync_reset(host);
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
+
+ name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error";
+ ret = devm_request_threaded_irq(&pdev->dev, irq[0], sh_mmcif_intr,
+ sh_mmcif_irqt, 0, name, host);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq error (%s)\n", name);
+ goto err_clk;
+ }
+ if (irq[1] >= 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq[1],
+ sh_mmcif_intr, sh_mmcif_irqt,
+ 0, "sh_mmc:int", host);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
+ goto err_clk;
+ }
+ }
+
+ if (pd && pd->use_cd_gpio) {
+ ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0);
+ if (ret < 0)
+ goto err_clk;
+ }
+
+ mutex_init(&host->thread_lock);
+
+ ret = mmc_add_host(mmc);
+ if (ret < 0)
+ goto err_clk;
+
+ dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
+
+ dev_info(&pdev->dev, "Chip version 0x%04x, clock rate %luMHz\n",
+ sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
+ clk_get_rate(host->hclk) / 1000000UL);
+
+ clk_disable_unprepare(host->hclk);
+ return ret;
+
+err_clk:
+ clk_disable_unprepare(host->hclk);
+err_pm:
+ pm_runtime_disable(&pdev->dev);
+err_host:
+ mmc_free_host(mmc);
+ return ret;
+}
+
+static int sh_mmcif_remove(struct platform_device *pdev)
+{
+ struct sh_mmcif_host *host = platform_get_drvdata(pdev);
+
+ host->dying = true;
+ clk_prepare_enable(host->hclk);
+ pm_runtime_get_sync(&pdev->dev);
+
+ dev_pm_qos_hide_latency_limit(&pdev->dev);
+
+ mmc_remove_host(host->mmc);
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
+
+ /*
+ * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
+ * mmc_remove_host() call above. But swapping order doesn't help either
+ * (a query on the linux-mmc mailing list didn't bring any replies).
+ */
+ cancel_delayed_work_sync(&host->timeout_work);
+
+ clk_disable_unprepare(host->hclk);
+ mmc_free_host(host->mmc);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sh_mmcif_suspend(struct device *dev)
+{
+ struct sh_mmcif_host *host = dev_get_drvdata(dev);
+
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
+
+ return 0;
+}
+
+static int sh_mmcif_resume(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static const struct of_device_id mmcif_of_match[] = {
+ { .compatible = "renesas,sh-mmcif" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mmcif_of_match);
+
+static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
+};
+
+static struct platform_driver sh_mmcif_driver = {
+ .probe = sh_mmcif_probe,
+ .remove = sh_mmcif_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &sh_mmcif_dev_pm_ops,
+ .of_match_table = mmcif_of_match,
+ },
+};
+
+module_platform_driver(sh_mmcif_driver);
+
+MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
diff --git a/kernel/drivers/mmc/host/sh_mobile_sdhi.c b/kernel/drivers/mmc/host/sh_mobile_sdhi.c
new file mode 100644
index 000000000..354f4f335
--- /dev/null
+++ b/kernel/drivers/mmc/host/sh_mobile_sdhi.c
@@ -0,0 +1,395 @@
+/*
+ * SuperH Mobile SDHI
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on "Compaq ASIC3 support":
+ *
+ * Copyright 2001 Compaq Computer Corporation.
+ * Copyright 2004-2005 Phil Blundell
+ * Copyright 2007-2008 OpenedHand Ltd.
+ *
+ * Authors: Phil Blundell <pb@handhelds.org>,
+ * Samuel Ortiz <sameo@openedhand.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sh_mobile_sdhi.h>
+#include <linux/mfd/tmio.h>
+#include <linux/sh_dma.h>
+#include <linux/delay.h>
+
+#include "tmio_mmc.h"
+
+#define EXT_ACC 0xe4
+
+#define host_to_priv(host) container_of((host)->pdata, struct sh_mobile_sdhi, mmc_data)
+
+struct sh_mobile_sdhi_of_data {
+ unsigned long tmio_flags;
+ unsigned long capabilities;
+ unsigned long capabilities2;
+ enum dma_slave_buswidth dma_buswidth;
+ dma_addr_t dma_rx_offset;
+};
+
+static const struct sh_mobile_sdhi_of_data sh_mobile_sdhi_of_cfg[] = {
+ {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
+ },
+};
+
+static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
+ TMIO_MMC_CLK_ACTUAL,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
+};
+
+static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
+ TMIO_MMC_CLK_ACTUAL,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
+ .dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dma_rx_offset = 0x2000,
+};
+
+static const struct of_device_id sh_mobile_sdhi_of_match[] = {
+ { .compatible = "renesas,sdhi-shmobile" },
+ { .compatible = "renesas,sdhi-sh7372" },
+ { .compatible = "renesas,sdhi-sh73a0", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,sdhi-r8a73a4", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,sdhi-r8a7740", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
+ { .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
+ { .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
+ { .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, },
+ { .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, },
+ { .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
+ { .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
+
+struct sh_mobile_sdhi {
+ struct clk *clk;
+ struct tmio_mmc_data mmc_data;
+ struct tmio_mmc_dma dma_priv;
+};
+
+static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
+{
+ u32 val;
+
+ /*
+ * see also
+ * sh_mobile_sdhi_of_data :: dma_buswidth
+ */
+ switch (sd_ctrl_read16(host, CTL_VERSION)) {
+ case 0x490C:
+ val = (width == 32) ? 0x0001 : 0x0000;
+ break;
+ case 0xCB0D:
+ val = (width == 32) ? 0x0000 : 0x0001;
+ break;
+ default:
+ /* nothing to do */
+ return;
+ }
+
+ sd_ctrl_write16(host, EXT_ACC, val);
+}
+
+static int sh_mobile_sdhi_clk_enable(struct platform_device *pdev, unsigned int *f)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct sh_mobile_sdhi *priv = host_to_priv(host);
+ int ret = clk_prepare_enable(priv->clk);
+ if (ret < 0)
+ return ret;
+
+ *f = clk_get_rate(priv->clk);
+
+ /* enable 16bit data access on SDBUF as default */
+ sh_mobile_sdhi_sdbuf_width(host, 16);
+
+ return 0;
+}
+
+static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct sh_mobile_sdhi *priv = host_to_priv(host);
+ clk_disable_unprepare(priv->clk);
+}
+
+static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
+{
+ int timeout = 1000;
+
+ while (--timeout && !(sd_ctrl_read16(host, CTL_STATUS2) & (1 << 13)))
+ udelay(1);
+
+ if (!timeout) {
+ dev_warn(&host->pdev->dev, "timeout waiting for SD bus idle\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
+{
+ switch (addr)
+ {
+ case CTL_SD_CMD:
+ case CTL_STOP_INTERNAL_ACTION:
+ case CTL_XFER_BLK_COUNT:
+ case CTL_SD_CARD_CLK_CTL:
+ case CTL_SD_XFER_LEN:
+ case CTL_SD_MEM_CARD_OPT:
+ case CTL_TRANSACTION_CTL:
+ case CTL_DMA_ENABLE:
+ return sh_mobile_sdhi_wait_idle(host);
+ }
+
+ return 0;
+}
+
+static int sh_mobile_sdhi_multi_io_quirk(struct mmc_card *card,
+ unsigned int direction, int blk_size)
+{
+ /*
+ * In Renesas controllers, when performing a
+ * multiple block read of one or two blocks,
+ * depending on the timing with which the
+ * response register is read, the response
+ * value may not be read properly.
+ * Use single block read for this HW bug
+ */
+ if ((direction == MMC_DATA_READ) &&
+ blk_size == 2)
+ return 1;
+
+ return blk_size;
+}
+
+static void sh_mobile_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable)
+{
+ sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0);
+
+ /* enable 32bit access if DMA mode if possibile */
+ sh_mobile_sdhi_sdbuf_width(host, enable ? 32 : 16);
+}
+
+static int sh_mobile_sdhi_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(sh_mobile_sdhi_of_match, &pdev->dev);
+ struct sh_mobile_sdhi *priv;
+ struct tmio_mmc_data *mmc_data;
+ struct tmio_mmc_data *mmd = pdev->dev.platform_data;
+ struct tmio_mmc_host *host;
+ struct resource *res;
+ int irq, ret, i = 0;
+ bool multiplexed_isr = true;
+ struct tmio_mmc_dma *dma_priv;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
+ if (priv == NULL) {
+ dev_err(&pdev->dev, "kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ mmc_data = &priv->mmc_data;
+ dma_priv = &priv->dma_priv;
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
+ goto eprobe;
+ }
+
+ host = tmio_mmc_host_alloc(pdev);
+ if (!host) {
+ ret = -ENOMEM;
+ goto eprobe;
+ }
+
+ host->dma = dma_priv;
+ host->write16_hook = sh_mobile_sdhi_write16_hook;
+ host->clk_enable = sh_mobile_sdhi_clk_enable;
+ host->clk_disable = sh_mobile_sdhi_clk_disable;
+ host->multi_io_quirk = sh_mobile_sdhi_multi_io_quirk;
+ /* SD control register space size is 0x100, 0x200 for bus_shift=1 */
+ if (resource_size(res) > 0x100)
+ host->bus_shift = 1;
+ else
+ host->bus_shift = 0;
+
+ if (mmd)
+ *mmc_data = *mmd;
+
+ dma_priv->filter = shdma_chan_filter;
+ dma_priv->enable = sh_mobile_sdhi_enable_dma;
+
+ mmc_data->alignment_shift = 1; /* 2-byte alignment */
+ mmc_data->capabilities |= MMC_CAP_MMC_HIGHSPEED;
+
+ /*
+ * All SDHI blocks support 2-byte and larger block sizes in 4-bit
+ * bus width mode.
+ */
+ mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES;
+
+ /*
+ * All SDHI blocks support SDIO IRQ signalling.
+ */
+ mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
+
+ /*
+ * All SDHI have CMD12 controll bit
+ */
+ mmc_data->flags |= TMIO_MMC_HAVE_CMD12_CTRL;
+
+ /*
+ * All SDHI need SDIO_INFO1 reserved bit
+ */
+ mmc_data->flags |= TMIO_MMC_SDIO_STATUS_QUIRK;
+
+ if (of_id && of_id->data) {
+ const struct sh_mobile_sdhi_of_data *of_data = of_id->data;
+ mmc_data->flags |= of_data->tmio_flags;
+ mmc_data->capabilities |= of_data->capabilities;
+ mmc_data->capabilities2 |= of_data->capabilities2;
+ mmc_data->dma_rx_offset = of_data->dma_rx_offset;
+ dma_priv->dma_buswidth = of_data->dma_buswidth;
+ }
+
+ ret = tmio_mmc_host_probe(host, mmc_data);
+ if (ret < 0)
+ goto efree;
+
+ /*
+ * Allow one or more specific (named) ISRs or
+ * one or more multiplexed (un-named) ISRs.
+ */
+
+ irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT);
+ if (irq >= 0) {
+ multiplexed_isr = false;
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_card_detect_irq, 0,
+ dev_name(&pdev->dev), host);
+ if (ret)
+ goto eirq;
+ }
+
+ irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO);
+ if (irq >= 0) {
+ multiplexed_isr = false;
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdio_irq, 0,
+ dev_name(&pdev->dev), host);
+ if (ret)
+ goto eirq;
+ }
+
+ irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDCARD);
+ if (irq >= 0) {
+ multiplexed_isr = false;
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdcard_irq, 0,
+ dev_name(&pdev->dev), host);
+ if (ret)
+ goto eirq;
+ } else if (!multiplexed_isr) {
+ dev_err(&pdev->dev,
+ "Principal SD-card IRQ is missing among named interrupts\n");
+ ret = irq;
+ goto eirq;
+ }
+
+ if (multiplexed_isr) {
+ while (1) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ break;
+ i++;
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
+ dev_name(&pdev->dev), host);
+ if (ret)
+ goto eirq;
+ }
+
+ /* There must be at least one IRQ source */
+ if (!i) {
+ ret = irq;
+ goto eirq;
+ }
+ }
+
+ dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n",
+ mmc_hostname(host->mmc), (unsigned long)
+ (platform_get_resource(pdev, IORESOURCE_MEM, 0)->start),
+ host->mmc->f_max / 1000000);
+
+ return ret;
+
+eirq:
+ tmio_mmc_host_remove(host);
+efree:
+ tmio_mmc_host_free(host);
+eprobe:
+ return ret;
+}
+
+static int sh_mobile_sdhi_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ tmio_mmc_host_remove(host);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
+ tmio_mmc_host_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver sh_mobile_sdhi_driver = {
+ .driver = {
+ .name = "sh_mobile_sdhi",
+ .pm = &tmio_mmc_dev_pm_ops,
+ .of_match_table = sh_mobile_sdhi_of_match,
+ },
+ .probe = sh_mobile_sdhi_probe,
+ .remove = sh_mobile_sdhi_remove,
+};
+
+module_platform_driver(sh_mobile_sdhi_driver);
+
+MODULE_DESCRIPTION("SuperH Mobile SDHI driver");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sh_mobile_sdhi");
diff --git a/kernel/drivers/mmc/host/sunxi-mmc.c b/kernel/drivers/mmc/host/sunxi-mmc.c
new file mode 100644
index 000000000..4d3e1ffe5
--- /dev/null
+++ b/kernel/drivers/mmc/host/sunxi-mmc.c
@@ -0,0 +1,1089 @@
+/*
+ * Driver for sunxi SD/MMC host controllers
+ * (C) Copyright 2007-2011 Reuuimlla Technology Co., Ltd.
+ * (C) Copyright 2007-2011 Aaron Maoye <leafy.myeh@reuuimllatech.com>
+ * (C) Copyright 2013-2014 O2S GmbH <www.o2s.ch>
+ * (C) Copyright 2013-2014 David Lanzend�rfer <david.lanzendoerfer@o2s.ch>
+ * (C) Copyright 2013-2014 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/reset.h>
+
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/slot-gpio.h>
+
+/* register offset definitions */
+#define SDXC_REG_GCTRL (0x00) /* SMC Global Control Register */
+#define SDXC_REG_CLKCR (0x04) /* SMC Clock Control Register */
+#define SDXC_REG_TMOUT (0x08) /* SMC Time Out Register */
+#define SDXC_REG_WIDTH (0x0C) /* SMC Bus Width Register */
+#define SDXC_REG_BLKSZ (0x10) /* SMC Block Size Register */
+#define SDXC_REG_BCNTR (0x14) /* SMC Byte Count Register */
+#define SDXC_REG_CMDR (0x18) /* SMC Command Register */
+#define SDXC_REG_CARG (0x1C) /* SMC Argument Register */
+#define SDXC_REG_RESP0 (0x20) /* SMC Response Register 0 */
+#define SDXC_REG_RESP1 (0x24) /* SMC Response Register 1 */
+#define SDXC_REG_RESP2 (0x28) /* SMC Response Register 2 */
+#define SDXC_REG_RESP3 (0x2C) /* SMC Response Register 3 */
+#define SDXC_REG_IMASK (0x30) /* SMC Interrupt Mask Register */
+#define SDXC_REG_MISTA (0x34) /* SMC Masked Interrupt Status Register */
+#define SDXC_REG_RINTR (0x38) /* SMC Raw Interrupt Status Register */
+#define SDXC_REG_STAS (0x3C) /* SMC Status Register */
+#define SDXC_REG_FTRGL (0x40) /* SMC FIFO Threshold Watermark Registe */
+#define SDXC_REG_FUNS (0x44) /* SMC Function Select Register */
+#define SDXC_REG_CBCR (0x48) /* SMC CIU Byte Count Register */
+#define SDXC_REG_BBCR (0x4C) /* SMC BIU Byte Count Register */
+#define SDXC_REG_DBGC (0x50) /* SMC Debug Enable Register */
+#define SDXC_REG_HWRST (0x78) /* SMC Card Hardware Reset for Register */
+#define SDXC_REG_DMAC (0x80) /* SMC IDMAC Control Register */
+#define SDXC_REG_DLBA (0x84) /* SMC IDMAC Descriptor List Base Addre */
+#define SDXC_REG_IDST (0x88) /* SMC IDMAC Status Register */
+#define SDXC_REG_IDIE (0x8C) /* SMC IDMAC Interrupt Enable Register */
+#define SDXC_REG_CHDA (0x90)
+#define SDXC_REG_CBDA (0x94)
+
+#define mmc_readl(host, reg) \
+ readl((host)->reg_base + SDXC_##reg)
+#define mmc_writel(host, reg, value) \
+ writel((value), (host)->reg_base + SDXC_##reg)
+
+/* global control register bits */
+#define SDXC_SOFT_RESET BIT(0)
+#define SDXC_FIFO_RESET BIT(1)
+#define SDXC_DMA_RESET BIT(2)
+#define SDXC_INTERRUPT_ENABLE_BIT BIT(4)
+#define SDXC_DMA_ENABLE_BIT BIT(5)
+#define SDXC_DEBOUNCE_ENABLE_BIT BIT(8)
+#define SDXC_POSEDGE_LATCH_DATA BIT(9)
+#define SDXC_DDR_MODE BIT(10)
+#define SDXC_MEMORY_ACCESS_DONE BIT(29)
+#define SDXC_ACCESS_DONE_DIRECT BIT(30)
+#define SDXC_ACCESS_BY_AHB BIT(31)
+#define SDXC_ACCESS_BY_DMA (0 << 31)
+#define SDXC_HARDWARE_RESET \
+ (SDXC_SOFT_RESET | SDXC_FIFO_RESET | SDXC_DMA_RESET)
+
+/* clock control bits */
+#define SDXC_CARD_CLOCK_ON BIT(16)
+#define SDXC_LOW_POWER_ON BIT(17)
+
+/* bus width */
+#define SDXC_WIDTH1 0
+#define SDXC_WIDTH4 1
+#define SDXC_WIDTH8 2
+
+/* smc command bits */
+#define SDXC_RESP_EXPIRE BIT(6)
+#define SDXC_LONG_RESPONSE BIT(7)
+#define SDXC_CHECK_RESPONSE_CRC BIT(8)
+#define SDXC_DATA_EXPIRE BIT(9)
+#define SDXC_WRITE BIT(10)
+#define SDXC_SEQUENCE_MODE BIT(11)
+#define SDXC_SEND_AUTO_STOP BIT(12)
+#define SDXC_WAIT_PRE_OVER BIT(13)
+#define SDXC_STOP_ABORT_CMD BIT(14)
+#define SDXC_SEND_INIT_SEQUENCE BIT(15)
+#define SDXC_UPCLK_ONLY BIT(21)
+#define SDXC_READ_CEATA_DEV BIT(22)
+#define SDXC_CCS_EXPIRE BIT(23)
+#define SDXC_ENABLE_BIT_BOOT BIT(24)
+#define SDXC_ALT_BOOT_OPTIONS BIT(25)
+#define SDXC_BOOT_ACK_EXPIRE BIT(26)
+#define SDXC_BOOT_ABORT BIT(27)
+#define SDXC_VOLTAGE_SWITCH BIT(28)
+#define SDXC_USE_HOLD_REGISTER BIT(29)
+#define SDXC_START BIT(31)
+
+/* interrupt bits */
+#define SDXC_RESP_ERROR BIT(1)
+#define SDXC_COMMAND_DONE BIT(2)
+#define SDXC_DATA_OVER BIT(3)
+#define SDXC_TX_DATA_REQUEST BIT(4)
+#define SDXC_RX_DATA_REQUEST BIT(5)
+#define SDXC_RESP_CRC_ERROR BIT(6)
+#define SDXC_DATA_CRC_ERROR BIT(7)
+#define SDXC_RESP_TIMEOUT BIT(8)
+#define SDXC_DATA_TIMEOUT BIT(9)
+#define SDXC_VOLTAGE_CHANGE_DONE BIT(10)
+#define SDXC_FIFO_RUN_ERROR BIT(11)
+#define SDXC_HARD_WARE_LOCKED BIT(12)
+#define SDXC_START_BIT_ERROR BIT(13)
+#define SDXC_AUTO_COMMAND_DONE BIT(14)
+#define SDXC_END_BIT_ERROR BIT(15)
+#define SDXC_SDIO_INTERRUPT BIT(16)
+#define SDXC_CARD_INSERT BIT(30)
+#define SDXC_CARD_REMOVE BIT(31)
+#define SDXC_INTERRUPT_ERROR_BIT \
+ (SDXC_RESP_ERROR | SDXC_RESP_CRC_ERROR | SDXC_DATA_CRC_ERROR | \
+ SDXC_RESP_TIMEOUT | SDXC_DATA_TIMEOUT | SDXC_FIFO_RUN_ERROR | \
+ SDXC_HARD_WARE_LOCKED | SDXC_START_BIT_ERROR | SDXC_END_BIT_ERROR)
+#define SDXC_INTERRUPT_DONE_BIT \
+ (SDXC_AUTO_COMMAND_DONE | SDXC_DATA_OVER | \
+ SDXC_COMMAND_DONE | SDXC_VOLTAGE_CHANGE_DONE)
+
+/* status */
+#define SDXC_RXWL_FLAG BIT(0)
+#define SDXC_TXWL_FLAG BIT(1)
+#define SDXC_FIFO_EMPTY BIT(2)
+#define SDXC_FIFO_FULL BIT(3)
+#define SDXC_CARD_PRESENT BIT(8)
+#define SDXC_CARD_DATA_BUSY BIT(9)
+#define SDXC_DATA_FSM_BUSY BIT(10)
+#define SDXC_DMA_REQUEST BIT(31)
+#define SDXC_FIFO_SIZE 16
+
+/* Function select */
+#define SDXC_CEATA_ON (0xceaa << 16)
+#define SDXC_SEND_IRQ_RESPONSE BIT(0)
+#define SDXC_SDIO_READ_WAIT BIT(1)
+#define SDXC_ABORT_READ_DATA BIT(2)
+#define SDXC_SEND_CCSD BIT(8)
+#define SDXC_SEND_AUTO_STOPCCSD BIT(9)
+#define SDXC_CEATA_DEV_IRQ_ENABLE BIT(10)
+
+/* IDMA controller bus mod bit field */
+#define SDXC_IDMAC_SOFT_RESET BIT(0)
+#define SDXC_IDMAC_FIX_BURST BIT(1)
+#define SDXC_IDMAC_IDMA_ON BIT(7)
+#define SDXC_IDMAC_REFETCH_DES BIT(31)
+
+/* IDMA status bit field */
+#define SDXC_IDMAC_TRANSMIT_INTERRUPT BIT(0)
+#define SDXC_IDMAC_RECEIVE_INTERRUPT BIT(1)
+#define SDXC_IDMAC_FATAL_BUS_ERROR BIT(2)
+#define SDXC_IDMAC_DESTINATION_INVALID BIT(4)
+#define SDXC_IDMAC_CARD_ERROR_SUM BIT(5)
+#define SDXC_IDMAC_NORMAL_INTERRUPT_SUM BIT(8)
+#define SDXC_IDMAC_ABNORMAL_INTERRUPT_SUM BIT(9)
+#define SDXC_IDMAC_HOST_ABORT_INTERRUPT BIT(10)
+#define SDXC_IDMAC_IDLE (0 << 13)
+#define SDXC_IDMAC_SUSPEND (1 << 13)
+#define SDXC_IDMAC_DESC_READ (2 << 13)
+#define SDXC_IDMAC_DESC_CHECK (3 << 13)
+#define SDXC_IDMAC_READ_REQUEST_WAIT (4 << 13)
+#define SDXC_IDMAC_WRITE_REQUEST_WAIT (5 << 13)
+#define SDXC_IDMAC_READ (6 << 13)
+#define SDXC_IDMAC_WRITE (7 << 13)
+#define SDXC_IDMAC_DESC_CLOSE (8 << 13)
+
+/*
+* If the idma-des-size-bits of property is ie 13, bufsize bits are:
+* Bits 0-12: buf1 size
+* Bits 13-25: buf2 size
+* Bits 26-31: not used
+* Since we only ever set buf1 size, we can simply store it directly.
+*/
+#define SDXC_IDMAC_DES0_DIC BIT(1) /* disable interrupt on completion */
+#define SDXC_IDMAC_DES0_LD BIT(2) /* last descriptor */
+#define SDXC_IDMAC_DES0_FD BIT(3) /* first descriptor */
+#define SDXC_IDMAC_DES0_CH BIT(4) /* chain mode */
+#define SDXC_IDMAC_DES0_ER BIT(5) /* end of ring */
+#define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */
+#define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */
+
+struct sunxi_idma_des {
+ u32 config;
+ u32 buf_size;
+ u32 buf_addr_ptr1;
+ u32 buf_addr_ptr2;
+};
+
+struct sunxi_mmc_host {
+ struct mmc_host *mmc;
+ struct reset_control *reset;
+
+ /* IO mapping base */
+ void __iomem *reg_base;
+
+ /* clock management */
+ struct clk *clk_ahb;
+ struct clk *clk_mmc;
+ struct clk *clk_sample;
+ struct clk *clk_output;
+
+ /* irq */
+ spinlock_t lock;
+ int irq;
+ u32 int_sum;
+ u32 sdio_imask;
+
+ /* dma */
+ u32 idma_des_size_bits;
+ dma_addr_t sg_dma;
+ void *sg_cpu;
+ bool wait_dma;
+
+ struct mmc_request *mrq;
+ struct mmc_request *manual_stop_mrq;
+ int ferror;
+};
+
+static int sunxi_mmc_reset_host(struct sunxi_mmc_host *host)
+{
+ unsigned long expire = jiffies + msecs_to_jiffies(250);
+ u32 rval;
+
+ mmc_writel(host, REG_GCTRL, SDXC_HARDWARE_RESET);
+ do {
+ rval = mmc_readl(host, REG_GCTRL);
+ } while (time_before(jiffies, expire) && (rval & SDXC_HARDWARE_RESET));
+
+ if (rval & SDXC_HARDWARE_RESET) {
+ dev_err(mmc_dev(host->mmc), "fatal err reset timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int sunxi_mmc_init_host(struct mmc_host *mmc)
+{
+ u32 rval;
+ struct sunxi_mmc_host *host = mmc_priv(mmc);
+
+ if (sunxi_mmc_reset_host(host))
+ return -EIO;
+
+ mmc_writel(host, REG_FTRGL, 0x20070008);
+ mmc_writel(host, REG_TMOUT, 0xffffffff);
+ mmc_writel(host, REG_IMASK, host->sdio_imask);
+ mmc_writel(host, REG_RINTR, 0xffffffff);
+ mmc_writel(host, REG_DBGC, 0xdeb);
+ mmc_writel(host, REG_FUNS, SDXC_CEATA_ON);
+ mmc_writel(host, REG_DLBA, host->sg_dma);
+
+ rval = mmc_readl(host, REG_GCTRL);
+ rval |= SDXC_INTERRUPT_ENABLE_BIT;
+ rval &= ~SDXC_ACCESS_DONE_DIRECT;
+ mmc_writel(host, REG_GCTRL, rval);
+
+ return 0;
+}
+
+static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host,
+ struct mmc_data *data)
+{
+ struct sunxi_idma_des *pdes = (struct sunxi_idma_des *)host->sg_cpu;
+ dma_addr_t next_desc = host->sg_dma;
+ int i, max_len = (1 << host->idma_des_size_bits);
+
+ for (i = 0; i < data->sg_len; i++) {
+ pdes[i].config = SDXC_IDMAC_DES0_CH | SDXC_IDMAC_DES0_OWN |
+ SDXC_IDMAC_DES0_DIC;
+
+ if (data->sg[i].length == max_len)
+ pdes[i].buf_size = 0; /* 0 == max_len */
+ else
+ pdes[i].buf_size = data->sg[i].length;
+
+ next_desc += sizeof(struct sunxi_idma_des);
+ pdes[i].buf_addr_ptr1 = sg_dma_address(&data->sg[i]);
+ pdes[i].buf_addr_ptr2 = (u32)next_desc;
+ }
+
+ pdes[0].config |= SDXC_IDMAC_DES0_FD;
+ pdes[i - 1].config |= SDXC_IDMAC_DES0_LD | SDXC_IDMAC_DES0_ER;
+ pdes[i - 1].config &= ~SDXC_IDMAC_DES0_DIC;
+ pdes[i - 1].buf_addr_ptr2 = 0;
+
+ /*
+ * Avoid the io-store starting the idmac hitting io-mem before the
+ * descriptors hit the main-mem.
+ */
+ wmb();
+}
+
+static enum dma_data_direction sunxi_mmc_get_dma_dir(struct mmc_data *data)
+{
+ if (data->flags & MMC_DATA_WRITE)
+ return DMA_TO_DEVICE;
+ else
+ return DMA_FROM_DEVICE;
+}
+
+static int sunxi_mmc_map_dma(struct sunxi_mmc_host *host,
+ struct mmc_data *data)
+{
+ u32 i, dma_len;
+ struct scatterlist *sg;
+
+ dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ sunxi_mmc_get_dma_dir(data));
+ if (dma_len == 0) {
+ dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
+ return -ENOMEM;
+ }
+
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->offset & 3 || sg->length & 3) {
+ dev_err(mmc_dev(host->mmc),
+ "unaligned scatterlist: os %x length %d\n",
+ sg->offset, sg->length);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void sunxi_mmc_start_dma(struct sunxi_mmc_host *host,
+ struct mmc_data *data)
+{
+ u32 rval;
+
+ sunxi_mmc_init_idma_des(host, data);
+
+ rval = mmc_readl(host, REG_GCTRL);
+ rval |= SDXC_DMA_ENABLE_BIT;
+ mmc_writel(host, REG_GCTRL, rval);
+ rval |= SDXC_DMA_RESET;
+ mmc_writel(host, REG_GCTRL, rval);
+
+ mmc_writel(host, REG_DMAC, SDXC_IDMAC_SOFT_RESET);
+
+ if (!(data->flags & MMC_DATA_WRITE))
+ mmc_writel(host, REG_IDIE, SDXC_IDMAC_RECEIVE_INTERRUPT);
+
+ mmc_writel(host, REG_DMAC,
+ SDXC_IDMAC_FIX_BURST | SDXC_IDMAC_IDMA_ON);
+}
+
+static void sunxi_mmc_send_manual_stop(struct sunxi_mmc_host *host,
+ struct mmc_request *req)
+{
+ u32 arg, cmd_val, ri;
+ unsigned long expire = jiffies + msecs_to_jiffies(1000);
+
+ cmd_val = SDXC_START | SDXC_RESP_EXPIRE |
+ SDXC_STOP_ABORT_CMD | SDXC_CHECK_RESPONSE_CRC;
+
+ if (req->cmd->opcode == SD_IO_RW_EXTENDED) {
+ cmd_val |= SD_IO_RW_DIRECT;
+ arg = (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
+ ((req->cmd->arg >> 28) & 0x7);
+ } else {
+ cmd_val |= MMC_STOP_TRANSMISSION;
+ arg = 0;
+ }
+
+ mmc_writel(host, REG_CARG, arg);
+ mmc_writel(host, REG_CMDR, cmd_val);
+
+ do {
+ ri = mmc_readl(host, REG_RINTR);
+ } while (!(ri & (SDXC_COMMAND_DONE | SDXC_INTERRUPT_ERROR_BIT)) &&
+ time_before(jiffies, expire));
+
+ if (!(ri & SDXC_COMMAND_DONE) || (ri & SDXC_INTERRUPT_ERROR_BIT)) {
+ dev_err(mmc_dev(host->mmc), "send stop command failed\n");
+ if (req->stop)
+ req->stop->resp[0] = -ETIMEDOUT;
+ } else {
+ if (req->stop)
+ req->stop->resp[0] = mmc_readl(host, REG_RESP0);
+ }
+
+ mmc_writel(host, REG_RINTR, 0xffff);
+}
+
+static void sunxi_mmc_dump_errinfo(struct sunxi_mmc_host *host)
+{
+ struct mmc_command *cmd = host->mrq->cmd;
+ struct mmc_data *data = host->mrq->data;
+
+ /* For some cmds timeout is normal with sd/mmc cards */
+ if ((host->int_sum & SDXC_INTERRUPT_ERROR_BIT) ==
+ SDXC_RESP_TIMEOUT && (cmd->opcode == SD_IO_SEND_OP_COND ||
+ cmd->opcode == SD_IO_RW_DIRECT))
+ return;
+
+ dev_err(mmc_dev(host->mmc),
+ "smc %d err, cmd %d,%s%s%s%s%s%s%s%s%s%s !!\n",
+ host->mmc->index, cmd->opcode,
+ data ? (data->flags & MMC_DATA_WRITE ? " WR" : " RD") : "",
+ host->int_sum & SDXC_RESP_ERROR ? " RE" : "",
+ host->int_sum & SDXC_RESP_CRC_ERROR ? " RCE" : "",
+ host->int_sum & SDXC_DATA_CRC_ERROR ? " DCE" : "",
+ host->int_sum & SDXC_RESP_TIMEOUT ? " RTO" : "",
+ host->int_sum & SDXC_DATA_TIMEOUT ? " DTO" : "",
+ host->int_sum & SDXC_FIFO_RUN_ERROR ? " FE" : "",
+ host->int_sum & SDXC_HARD_WARE_LOCKED ? " HL" : "",
+ host->int_sum & SDXC_START_BIT_ERROR ? " SBE" : "",
+ host->int_sum & SDXC_END_BIT_ERROR ? " EBE" : ""
+ );
+}
+
+/* Called in interrupt context! */
+static irqreturn_t sunxi_mmc_finalize_request(struct sunxi_mmc_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_data *data = mrq->data;
+ u32 rval;
+
+ mmc_writel(host, REG_IMASK, host->sdio_imask);
+ mmc_writel(host, REG_IDIE, 0);
+
+ if (host->int_sum & SDXC_INTERRUPT_ERROR_BIT) {
+ sunxi_mmc_dump_errinfo(host);
+ mrq->cmd->error = -ETIMEDOUT;
+
+ if (data) {
+ data->error = -ETIMEDOUT;
+ host->manual_stop_mrq = mrq;
+ }
+
+ if (mrq->stop)
+ mrq->stop->error = -ETIMEDOUT;
+ } else {
+ if (mrq->cmd->flags & MMC_RSP_136) {
+ mrq->cmd->resp[0] = mmc_readl(host, REG_RESP3);
+ mrq->cmd->resp[1] = mmc_readl(host, REG_RESP2);
+ mrq->cmd->resp[2] = mmc_readl(host, REG_RESP1);
+ mrq->cmd->resp[3] = mmc_readl(host, REG_RESP0);
+ } else {
+ mrq->cmd->resp[0] = mmc_readl(host, REG_RESP0);
+ }
+
+ if (data)
+ data->bytes_xfered = data->blocks * data->blksz;
+ }
+
+ if (data) {
+ mmc_writel(host, REG_IDST, 0x337);
+ mmc_writel(host, REG_DMAC, 0);
+ rval = mmc_readl(host, REG_GCTRL);
+ rval |= SDXC_DMA_RESET;
+ mmc_writel(host, REG_GCTRL, rval);
+ rval &= ~SDXC_DMA_ENABLE_BIT;
+ mmc_writel(host, REG_GCTRL, rval);
+ rval |= SDXC_FIFO_RESET;
+ mmc_writel(host, REG_GCTRL, rval);
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ sunxi_mmc_get_dma_dir(data));
+ }
+
+ mmc_writel(host, REG_RINTR, 0xffff);
+
+ host->mrq = NULL;
+ host->int_sum = 0;
+ host->wait_dma = false;
+
+ return host->manual_stop_mrq ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+}
+
+static irqreturn_t sunxi_mmc_irq(int irq, void *dev_id)
+{
+ struct sunxi_mmc_host *host = dev_id;
+ struct mmc_request *mrq;
+ u32 msk_int, idma_int;
+ bool finalize = false;
+ bool sdio_int = false;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ spin_lock(&host->lock);
+
+ idma_int = mmc_readl(host, REG_IDST);
+ msk_int = mmc_readl(host, REG_MISTA);
+
+ dev_dbg(mmc_dev(host->mmc), "irq: rq %p mi %08x idi %08x\n",
+ host->mrq, msk_int, idma_int);
+
+ mrq = host->mrq;
+ if (mrq) {
+ if (idma_int & SDXC_IDMAC_RECEIVE_INTERRUPT)
+ host->wait_dma = false;
+
+ host->int_sum |= msk_int;
+
+ /* Wait for COMMAND_DONE on RESPONSE_TIMEOUT before finalize */
+ if ((host->int_sum & SDXC_RESP_TIMEOUT) &&
+ !(host->int_sum & SDXC_COMMAND_DONE))
+ mmc_writel(host, REG_IMASK,
+ host->sdio_imask | SDXC_COMMAND_DONE);
+ /* Don't wait for dma on error */
+ else if (host->int_sum & SDXC_INTERRUPT_ERROR_BIT)
+ finalize = true;
+ else if ((host->int_sum & SDXC_INTERRUPT_DONE_BIT) &&
+ !host->wait_dma)
+ finalize = true;
+ }
+
+ if (msk_int & SDXC_SDIO_INTERRUPT)
+ sdio_int = true;
+
+ mmc_writel(host, REG_RINTR, msk_int);
+ mmc_writel(host, REG_IDST, idma_int);
+
+ if (finalize)
+ ret = sunxi_mmc_finalize_request(host);
+
+ spin_unlock(&host->lock);
+
+ if (finalize && ret == IRQ_HANDLED)
+ mmc_request_done(host->mmc, mrq);
+
+ if (sdio_int)
+ mmc_signal_sdio_irq(host->mmc);
+
+ return ret;
+}
+
+static irqreturn_t sunxi_mmc_handle_manual_stop(int irq, void *dev_id)
+{
+ struct sunxi_mmc_host *host = dev_id;
+ struct mmc_request *mrq;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&host->lock, iflags);
+ mrq = host->manual_stop_mrq;
+ spin_unlock_irqrestore(&host->lock, iflags);
+
+ if (!mrq) {
+ dev_err(mmc_dev(host->mmc), "no request for manual stop\n");
+ return IRQ_HANDLED;
+ }
+
+ dev_err(mmc_dev(host->mmc), "data error, sending stop command\n");
+
+ /*
+ * We will never have more than one outstanding request,
+ * and we do not complete the request until after
+ * we've cleared host->manual_stop_mrq so we do not need to
+ * spin lock this function.
+ * Additionally we have wait states within this function
+ * so having it in a lock is a very bad idea.
+ */
+ sunxi_mmc_send_manual_stop(host, mrq);
+
+ spin_lock_irqsave(&host->lock, iflags);
+ host->manual_stop_mrq = NULL;
+ spin_unlock_irqrestore(&host->lock, iflags);
+
+ mmc_request_done(host->mmc, mrq);
+
+ return IRQ_HANDLED;
+}
+
+static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
+{
+ unsigned long expire = jiffies + msecs_to_jiffies(250);
+ u32 rval;
+
+ rval = mmc_readl(host, REG_CLKCR);
+ rval &= ~(SDXC_CARD_CLOCK_ON | SDXC_LOW_POWER_ON);
+
+ if (oclk_en)
+ rval |= SDXC_CARD_CLOCK_ON;
+
+ mmc_writel(host, REG_CLKCR, rval);
+
+ rval = SDXC_START | SDXC_UPCLK_ONLY | SDXC_WAIT_PRE_OVER;
+ mmc_writel(host, REG_CMDR, rval);
+
+ do {
+ rval = mmc_readl(host, REG_CMDR);
+ } while (time_before(jiffies, expire) && (rval & SDXC_START));
+
+ /* clear irq status bits set by the command */
+ mmc_writel(host, REG_RINTR,
+ mmc_readl(host, REG_RINTR) & ~SDXC_SDIO_INTERRUPT);
+
+ if (rval & SDXC_START) {
+ dev_err(mmc_dev(host->mmc), "fatal err update clk timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
+ struct mmc_ios *ios)
+{
+ u32 rate, oclk_dly, rval, sclk_dly;
+ int ret;
+
+ rate = clk_round_rate(host->clk_mmc, ios->clock);
+ dev_dbg(mmc_dev(host->mmc), "setting clk to %d, rounded %d\n",
+ ios->clock, rate);
+
+ /* setting clock rate */
+ ret = clk_set_rate(host->clk_mmc, rate);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc), "error setting clk to %d: %d\n",
+ rate, ret);
+ return ret;
+ }
+
+ ret = sunxi_mmc_oclk_onoff(host, 0);
+ if (ret)
+ return ret;
+
+ /* clear internal divider */
+ rval = mmc_readl(host, REG_CLKCR);
+ rval &= ~0xff;
+ mmc_writel(host, REG_CLKCR, rval);
+
+ /* determine delays */
+ if (rate <= 400000) {
+ oclk_dly = 180;
+ sclk_dly = 42;
+ } else if (rate <= 25000000) {
+ oclk_dly = 180;
+ sclk_dly = 75;
+ } else if (rate <= 50000000) {
+ if (ios->timing == MMC_TIMING_UHS_DDR50) {
+ oclk_dly = 60;
+ sclk_dly = 120;
+ } else {
+ oclk_dly = 90;
+ sclk_dly = 150;
+ }
+ } else if (rate <= 100000000) {
+ oclk_dly = 6;
+ sclk_dly = 24;
+ } else if (rate <= 200000000) {
+ oclk_dly = 3;
+ sclk_dly = 12;
+ } else {
+ return -EINVAL;
+ }
+
+ clk_set_phase(host->clk_sample, sclk_dly);
+ clk_set_phase(host->clk_output, oclk_dly);
+
+ return sunxi_mmc_oclk_onoff(host, 1);
+}
+
+static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sunxi_mmc_host *host = mmc_priv(mmc);
+ u32 rval;
+
+ /* Set the power state */
+ switch (ios->power_mode) {
+ case MMC_POWER_ON:
+ break;
+
+ case MMC_POWER_UP:
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+
+ host->ferror = sunxi_mmc_init_host(mmc);
+ if (host->ferror)
+ return;
+
+ dev_dbg(mmc_dev(mmc), "power on!\n");
+ break;
+
+ case MMC_POWER_OFF:
+ dev_dbg(mmc_dev(mmc), "power off!\n");
+ sunxi_mmc_reset_host(host);
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ break;
+ }
+
+ /* set bus width */
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ mmc_writel(host, REG_WIDTH, SDXC_WIDTH1);
+ break;
+ case MMC_BUS_WIDTH_4:
+ mmc_writel(host, REG_WIDTH, SDXC_WIDTH4);
+ break;
+ case MMC_BUS_WIDTH_8:
+ mmc_writel(host, REG_WIDTH, SDXC_WIDTH8);
+ break;
+ }
+
+ /* set ddr mode */
+ rval = mmc_readl(host, REG_GCTRL);
+ if (ios->timing == MMC_TIMING_UHS_DDR50)
+ rval |= SDXC_DDR_MODE;
+ else
+ rval &= ~SDXC_DDR_MODE;
+ mmc_writel(host, REG_GCTRL, rval);
+
+ /* set up clock */
+ if (ios->clock && ios->power_mode) {
+ host->ferror = sunxi_mmc_clk_set_rate(host, ios);
+ /* Android code had a usleep_range(50000, 55000); here */
+ }
+}
+
+static void sunxi_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct sunxi_mmc_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u32 imask;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ imask = mmc_readl(host, REG_IMASK);
+ if (enable) {
+ host->sdio_imask = SDXC_SDIO_INTERRUPT;
+ imask |= SDXC_SDIO_INTERRUPT;
+ } else {
+ host->sdio_imask = 0;
+ imask &= ~SDXC_SDIO_INTERRUPT;
+ }
+ mmc_writel(host, REG_IMASK, imask);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void sunxi_mmc_hw_reset(struct mmc_host *mmc)
+{
+ struct sunxi_mmc_host *host = mmc_priv(mmc);
+ mmc_writel(host, REG_HWRST, 0);
+ udelay(10);
+ mmc_writel(host, REG_HWRST, 1);
+ udelay(300);
+}
+
+static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sunxi_mmc_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+ unsigned long iflags;
+ u32 imask = SDXC_INTERRUPT_ERROR_BIT;
+ u32 cmd_val = SDXC_START | (cmd->opcode & 0x3f);
+ bool wait_dma = host->wait_dma;
+ int ret;
+
+ /* Check for set_ios errors (should never happen) */
+ if (host->ferror) {
+ mrq->cmd->error = host->ferror;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ if (data) {
+ ret = sunxi_mmc_map_dma(host, data);
+ if (ret < 0) {
+ dev_err(mmc_dev(mmc), "map DMA failed\n");
+ cmd->error = ret;
+ data->error = ret;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ }
+
+ if (cmd->opcode == MMC_GO_IDLE_STATE) {
+ cmd_val |= SDXC_SEND_INIT_SEQUENCE;
+ imask |= SDXC_COMMAND_DONE;
+ }
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ cmd_val |= SDXC_RESP_EXPIRE;
+ if (cmd->flags & MMC_RSP_136)
+ cmd_val |= SDXC_LONG_RESPONSE;
+ if (cmd->flags & MMC_RSP_CRC)
+ cmd_val |= SDXC_CHECK_RESPONSE_CRC;
+
+ if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) {
+ cmd_val |= SDXC_DATA_EXPIRE | SDXC_WAIT_PRE_OVER;
+ if (cmd->data->flags & MMC_DATA_STREAM) {
+ imask |= SDXC_AUTO_COMMAND_DONE;
+ cmd_val |= SDXC_SEQUENCE_MODE |
+ SDXC_SEND_AUTO_STOP;
+ }
+
+ if (cmd->data->stop) {
+ imask |= SDXC_AUTO_COMMAND_DONE;
+ cmd_val |= SDXC_SEND_AUTO_STOP;
+ } else {
+ imask |= SDXC_DATA_OVER;
+ }
+
+ if (cmd->data->flags & MMC_DATA_WRITE)
+ cmd_val |= SDXC_WRITE;
+ else
+ wait_dma = true;
+ } else {
+ imask |= SDXC_COMMAND_DONE;
+ }
+ } else {
+ imask |= SDXC_COMMAND_DONE;
+ }
+
+ dev_dbg(mmc_dev(mmc), "cmd %d(%08x) arg %x ie 0x%08x len %d\n",
+ cmd_val & 0x3f, cmd_val, cmd->arg, imask,
+ mrq->data ? mrq->data->blksz * mrq->data->blocks : 0);
+
+ spin_lock_irqsave(&host->lock, iflags);
+
+ if (host->mrq || host->manual_stop_mrq) {
+ spin_unlock_irqrestore(&host->lock, iflags);
+
+ if (data)
+ dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
+ sunxi_mmc_get_dma_dir(data));
+
+ dev_err(mmc_dev(mmc), "request already pending\n");
+ mrq->cmd->error = -EBUSY;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ if (data) {
+ mmc_writel(host, REG_BLKSZ, data->blksz);
+ mmc_writel(host, REG_BCNTR, data->blksz * data->blocks);
+ sunxi_mmc_start_dma(host, data);
+ }
+
+ host->mrq = mrq;
+ host->wait_dma = wait_dma;
+ mmc_writel(host, REG_IMASK, host->sdio_imask | imask);
+ mmc_writel(host, REG_CARG, cmd->arg);
+ mmc_writel(host, REG_CMDR, cmd_val);
+
+ spin_unlock_irqrestore(&host->lock, iflags);
+}
+
+static const struct of_device_id sunxi_mmc_of_match[] = {
+ { .compatible = "allwinner,sun4i-a10-mmc", },
+ { .compatible = "allwinner,sun5i-a13-mmc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
+
+static struct mmc_host_ops sunxi_mmc_ops = {
+ .request = sunxi_mmc_request,
+ .set_ios = sunxi_mmc_set_ios,
+ .get_ro = mmc_gpio_get_ro,
+ .get_cd = mmc_gpio_get_cd,
+ .enable_sdio_irq = sunxi_mmc_enable_sdio_irq,
+ .hw_reset = sunxi_mmc_hw_reset,
+};
+
+static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ if (of_device_is_compatible(np, "allwinner,sun4i-a10-mmc"))
+ host->idma_des_size_bits = 13;
+ else
+ host->idma_des_size_bits = 16;
+
+ ret = mmc_regulator_get_supply(host->mmc);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get vmmc supply\n");
+ return ret;
+ }
+
+ host->reg_base = devm_ioremap_resource(&pdev->dev,
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ if (IS_ERR(host->reg_base))
+ return PTR_ERR(host->reg_base);
+
+ host->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(host->clk_ahb)) {
+ dev_err(&pdev->dev, "Could not get ahb clock\n");
+ return PTR_ERR(host->clk_ahb);
+ }
+
+ host->clk_mmc = devm_clk_get(&pdev->dev, "mmc");
+ if (IS_ERR(host->clk_mmc)) {
+ dev_err(&pdev->dev, "Could not get mmc clock\n");
+ return PTR_ERR(host->clk_mmc);
+ }
+
+ host->clk_output = devm_clk_get(&pdev->dev, "output");
+ if (IS_ERR(host->clk_output)) {
+ dev_err(&pdev->dev, "Could not get output clock\n");
+ return PTR_ERR(host->clk_output);
+ }
+
+ host->clk_sample = devm_clk_get(&pdev->dev, "sample");
+ if (IS_ERR(host->clk_sample)) {
+ dev_err(&pdev->dev, "Could not get sample clock\n");
+ return PTR_ERR(host->clk_sample);
+ }
+
+ host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
+ if (PTR_ERR(host->reset) == -EPROBE_DEFER)
+ return PTR_ERR(host->reset);
+
+ ret = clk_prepare_enable(host->clk_ahb);
+ if (ret) {
+ dev_err(&pdev->dev, "Enable ahb clk err %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(host->clk_mmc);
+ if (ret) {
+ dev_err(&pdev->dev, "Enable mmc clk err %d\n", ret);
+ goto error_disable_clk_ahb;
+ }
+
+ ret = clk_prepare_enable(host->clk_output);
+ if (ret) {
+ dev_err(&pdev->dev, "Enable output clk err %d\n", ret);
+ goto error_disable_clk_mmc;
+ }
+
+ ret = clk_prepare_enable(host->clk_sample);
+ if (ret) {
+ dev_err(&pdev->dev, "Enable sample clk err %d\n", ret);
+ goto error_disable_clk_output;
+ }
+
+ if (!IS_ERR(host->reset)) {
+ ret = reset_control_deassert(host->reset);
+ if (ret) {
+ dev_err(&pdev->dev, "reset err %d\n", ret);
+ goto error_disable_clk_sample;
+ }
+ }
+
+ /*
+ * Sometimes the controller asserts the irq on boot for some reason,
+ * make sure the controller is in a sane state before enabling irqs.
+ */
+ ret = sunxi_mmc_reset_host(host);
+ if (ret)
+ goto error_assert_reset;
+
+ host->irq = platform_get_irq(pdev, 0);
+ return devm_request_threaded_irq(&pdev->dev, host->irq, sunxi_mmc_irq,
+ sunxi_mmc_handle_manual_stop, 0, "sunxi-mmc", host);
+
+error_assert_reset:
+ if (!IS_ERR(host->reset))
+ reset_control_assert(host->reset);
+error_disable_clk_sample:
+ clk_disable_unprepare(host->clk_sample);
+error_disable_clk_output:
+ clk_disable_unprepare(host->clk_output);
+error_disable_clk_mmc:
+ clk_disable_unprepare(host->clk_mmc);
+error_disable_clk_ahb:
+ clk_disable_unprepare(host->clk_ahb);
+ return ret;
+}
+
+static int sunxi_mmc_probe(struct platform_device *pdev)
+{
+ struct sunxi_mmc_host *host;
+ struct mmc_host *mmc;
+ int ret;
+
+ mmc = mmc_alloc_host(sizeof(struct sunxi_mmc_host), &pdev->dev);
+ if (!mmc) {
+ dev_err(&pdev->dev, "mmc alloc host failed\n");
+ return -ENOMEM;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ spin_lock_init(&host->lock);
+
+ ret = sunxi_mmc_resource_request(host, pdev);
+ if (ret)
+ goto error_free_host;
+
+ host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &host->sg_dma, GFP_KERNEL);
+ if (!host->sg_cpu) {
+ dev_err(&pdev->dev, "Failed to allocate DMA descriptor mem\n");
+ ret = -ENOMEM;
+ goto error_free_host;
+ }
+
+ mmc->ops = &sunxi_mmc_ops;
+ mmc->max_blk_count = 8192;
+ mmc->max_blk_size = 4096;
+ mmc->max_segs = PAGE_SIZE / sizeof(struct sunxi_idma_des);
+ mmc->max_seg_size = (1 << host->idma_des_size_bits);
+ mmc->max_req_size = mmc->max_seg_size * mmc->max_segs;
+ /* 400kHz ~ 50MHz */
+ mmc->f_min = 400000;
+ mmc->f_max = 50000000;
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto error_free_dma;
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto error_free_dma;
+
+ dev_info(&pdev->dev, "base:0x%p irq:%u\n", host->reg_base, host->irq);
+ platform_set_drvdata(pdev, mmc);
+ return 0;
+
+error_free_dma:
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+error_free_host:
+ mmc_free_host(mmc);
+ return ret;
+}
+
+static int sunxi_mmc_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct sunxi_mmc_host *host = mmc_priv(mmc);
+
+ mmc_remove_host(mmc);
+ disable_irq(host->irq);
+ sunxi_mmc_reset_host(host);
+
+ if (!IS_ERR(host->reset))
+ reset_control_assert(host->reset);
+
+ clk_disable_unprepare(host->clk_mmc);
+ clk_disable_unprepare(host->clk_ahb);
+
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+ mmc_free_host(mmc);
+
+ return 0;
+}
+
+static struct platform_driver sunxi_mmc_driver = {
+ .driver = {
+ .name = "sunxi-mmc",
+ .of_match_table = of_match_ptr(sunxi_mmc_of_match),
+ },
+ .probe = sunxi_mmc_probe,
+ .remove = sunxi_mmc_remove,
+};
+module_platform_driver(sunxi_mmc_driver);
+
+MODULE_DESCRIPTION("Allwinner's SD/MMC Card Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("David Lanzend�rfer <david.lanzendoerfer@o2s.ch>");
+MODULE_ALIAS("platform:sunxi-mmc");
diff --git a/kernel/drivers/mmc/host/tifm_sd.c b/kernel/drivers/mmc/host/tifm_sd.c
new file mode 100644
index 000000000..93c4b40df
--- /dev/null
+++ b/kernel/drivers/mmc/host/tifm_sd.c
@@ -0,0 +1,1091 @@
+/*
+ * tifm_sd.c - TI FlashMedia driver
+ *
+ * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Special thanks to Brad Campbell for extensive testing of this driver.
+ *
+ */
+
+
+#include <linux/tifm.h>
+#include <linux/mmc/host.h>
+#include <linux/highmem.h>
+#include <linux/scatterlist.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+#define DRIVER_NAME "tifm_sd"
+#define DRIVER_VERSION "0.8"
+
+static bool no_dma = 0;
+static bool fixed_timeout = 0;
+module_param(no_dma, bool, 0644);
+module_param(fixed_timeout, bool, 0644);
+
+/* Constants here are mostly from OMAP5912 datasheet */
+#define TIFM_MMCSD_RESET 0x0002
+#define TIFM_MMCSD_CLKMASK 0x03ff
+#define TIFM_MMCSD_POWER 0x0800
+#define TIFM_MMCSD_4BBUS 0x8000
+#define TIFM_MMCSD_RXDE 0x8000 /* rx dma enable */
+#define TIFM_MMCSD_TXDE 0x0080 /* tx dma enable */
+#define TIFM_MMCSD_BUFINT 0x0c00 /* set bits: AE, AF */
+#define TIFM_MMCSD_DPE 0x0020 /* data timeout counted in kilocycles */
+#define TIFM_MMCSD_INAB 0x0080 /* abort / initialize command */
+#define TIFM_MMCSD_READ 0x8000
+
+#define TIFM_MMCSD_ERRMASK 0x01e0 /* set bits: CCRC, CTO, DCRC, DTO */
+#define TIFM_MMCSD_EOC 0x0001 /* end of command phase */
+#define TIFM_MMCSD_CD 0x0002 /* card detect */
+#define TIFM_MMCSD_CB 0x0004 /* card enter busy state */
+#define TIFM_MMCSD_BRS 0x0008 /* block received/sent */
+#define TIFM_MMCSD_EOFB 0x0010 /* card exit busy state */
+#define TIFM_MMCSD_DTO 0x0020 /* data time-out */
+#define TIFM_MMCSD_DCRC 0x0040 /* data crc error */
+#define TIFM_MMCSD_CTO 0x0080 /* command time-out */
+#define TIFM_MMCSD_CCRC 0x0100 /* command crc error */
+#define TIFM_MMCSD_AF 0x0400 /* fifo almost full */
+#define TIFM_MMCSD_AE 0x0800 /* fifo almost empty */
+#define TIFM_MMCSD_OCRB 0x1000 /* OCR busy */
+#define TIFM_MMCSD_CIRQ 0x2000 /* card irq (cmd40/sdio) */
+#define TIFM_MMCSD_CERR 0x4000 /* card status error */
+
+#define TIFM_MMCSD_ODTO 0x0040 /* open drain / extended timeout */
+#define TIFM_MMCSD_CARD_RO 0x0200 /* card is read-only */
+
+#define TIFM_MMCSD_FIFO_SIZE 0x0020
+
+#define TIFM_MMCSD_RSP_R0 0x0000
+#define TIFM_MMCSD_RSP_R1 0x0100
+#define TIFM_MMCSD_RSP_R2 0x0200
+#define TIFM_MMCSD_RSP_R3 0x0300
+#define TIFM_MMCSD_RSP_R4 0x0400
+#define TIFM_MMCSD_RSP_R5 0x0500
+#define TIFM_MMCSD_RSP_R6 0x0600
+
+#define TIFM_MMCSD_RSP_BUSY 0x0800
+
+#define TIFM_MMCSD_CMD_BC 0x0000
+#define TIFM_MMCSD_CMD_BCR 0x1000
+#define TIFM_MMCSD_CMD_AC 0x2000
+#define TIFM_MMCSD_CMD_ADTC 0x3000
+
+#define TIFM_MMCSD_MAX_BLOCK_SIZE 0x0800UL
+
+enum {
+ CMD_READY = 0x0001,
+ FIFO_READY = 0x0002,
+ BRS_READY = 0x0004,
+ SCMD_ACTIVE = 0x0008,
+ SCMD_READY = 0x0010,
+ CARD_BUSY = 0x0020,
+ DATA_CARRY = 0x0040
+};
+
+struct tifm_sd {
+ struct tifm_dev *dev;
+
+ unsigned short eject:1,
+ open_drain:1,
+ no_dma:1;
+ unsigned short cmd_flags;
+
+ unsigned int clk_freq;
+ unsigned int clk_div;
+ unsigned long timeout_jiffies;
+
+ struct tasklet_struct finish_tasklet;
+ struct timer_list timer;
+ struct mmc_request *req;
+
+ int sg_len;
+ int sg_pos;
+ unsigned int block_pos;
+ struct scatterlist bounce_buf;
+ unsigned char bounce_buf_data[TIFM_MMCSD_MAX_BLOCK_SIZE];
+};
+
+/* for some reason, host won't respond correctly to readw/writew */
+static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg,
+ unsigned int off, unsigned int cnt)
+{
+ struct tifm_dev *sock = host->dev;
+ unsigned char *buf;
+ unsigned int pos = 0, val;
+
+ buf = kmap_atomic(pg) + off;
+ if (host->cmd_flags & DATA_CARRY) {
+ buf[pos++] = host->bounce_buf_data[0];
+ host->cmd_flags &= ~DATA_CARRY;
+ }
+
+ while (pos < cnt) {
+ val = readl(sock->addr + SOCK_MMCSD_DATA);
+ buf[pos++] = val & 0xff;
+ if (pos == cnt) {
+ host->bounce_buf_data[0] = (val >> 8) & 0xff;
+ host->cmd_flags |= DATA_CARRY;
+ break;
+ }
+ buf[pos++] = (val >> 8) & 0xff;
+ }
+ kunmap_atomic(buf - off);
+}
+
+static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
+ unsigned int off, unsigned int cnt)
+{
+ struct tifm_dev *sock = host->dev;
+ unsigned char *buf;
+ unsigned int pos = 0, val;
+
+ buf = kmap_atomic(pg) + off;
+ if (host->cmd_flags & DATA_CARRY) {
+ val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00);
+ writel(val, sock->addr + SOCK_MMCSD_DATA);
+ host->cmd_flags &= ~DATA_CARRY;
+ }
+
+ while (pos < cnt) {
+ val = buf[pos++];
+ if (pos == cnt) {
+ host->bounce_buf_data[0] = val & 0xff;
+ host->cmd_flags |= DATA_CARRY;
+ break;
+ }
+ val |= (buf[pos++] << 8) & 0xff00;
+ writel(val, sock->addr + SOCK_MMCSD_DATA);
+ }
+ kunmap_atomic(buf - off);
+}
+
+static void tifm_sd_transfer_data(struct tifm_sd *host)
+{
+ struct mmc_data *r_data = host->req->cmd->data;
+ struct scatterlist *sg = r_data->sg;
+ unsigned int off, cnt, t_size = TIFM_MMCSD_FIFO_SIZE * 2;
+ unsigned int p_off, p_cnt;
+ struct page *pg;
+
+ if (host->sg_pos == host->sg_len)
+ return;
+ while (t_size) {
+ cnt = sg[host->sg_pos].length - host->block_pos;
+ if (!cnt) {
+ host->block_pos = 0;
+ host->sg_pos++;
+ if (host->sg_pos == host->sg_len) {
+ if ((r_data->flags & MMC_DATA_WRITE)
+ && (host->cmd_flags & DATA_CARRY))
+ writel(host->bounce_buf_data[0],
+ host->dev->addr
+ + SOCK_MMCSD_DATA);
+
+ return;
+ }
+ cnt = sg[host->sg_pos].length;
+ }
+ off = sg[host->sg_pos].offset + host->block_pos;
+
+ pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
+ p_off = offset_in_page(off);
+ p_cnt = PAGE_SIZE - p_off;
+ p_cnt = min(p_cnt, cnt);
+ p_cnt = min(p_cnt, t_size);
+
+ if (r_data->flags & MMC_DATA_READ)
+ tifm_sd_read_fifo(host, pg, p_off, p_cnt);
+ else if (r_data->flags & MMC_DATA_WRITE)
+ tifm_sd_write_fifo(host, pg, p_off, p_cnt);
+
+ t_size -= p_cnt;
+ host->block_pos += p_cnt;
+ }
+}
+
+static void tifm_sd_copy_page(struct page *dst, unsigned int dst_off,
+ struct page *src, unsigned int src_off,
+ unsigned int count)
+{
+ unsigned char *src_buf = kmap_atomic(src) + src_off;
+ unsigned char *dst_buf = kmap_atomic(dst) + dst_off;
+
+ memcpy(dst_buf, src_buf, count);
+
+ kunmap_atomic(dst_buf - dst_off);
+ kunmap_atomic(src_buf - src_off);
+}
+
+static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data)
+{
+ struct scatterlist *sg = r_data->sg;
+ unsigned int t_size = r_data->blksz;
+ unsigned int off, cnt;
+ unsigned int p_off, p_cnt;
+ struct page *pg;
+
+ dev_dbg(&host->dev->dev, "bouncing block\n");
+ while (t_size) {
+ cnt = sg[host->sg_pos].length - host->block_pos;
+ if (!cnt) {
+ host->block_pos = 0;
+ host->sg_pos++;
+ if (host->sg_pos == host->sg_len)
+ return;
+ cnt = sg[host->sg_pos].length;
+ }
+ off = sg[host->sg_pos].offset + host->block_pos;
+
+ pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
+ p_off = offset_in_page(off);
+ p_cnt = PAGE_SIZE - p_off;
+ p_cnt = min(p_cnt, cnt);
+ p_cnt = min(p_cnt, t_size);
+
+ if (r_data->flags & MMC_DATA_WRITE)
+ tifm_sd_copy_page(sg_page(&host->bounce_buf),
+ r_data->blksz - t_size,
+ pg, p_off, p_cnt);
+ else if (r_data->flags & MMC_DATA_READ)
+ tifm_sd_copy_page(pg, p_off, sg_page(&host->bounce_buf),
+ r_data->blksz - t_size, p_cnt);
+
+ t_size -= p_cnt;
+ host->block_pos += p_cnt;
+ }
+}
+
+static int tifm_sd_set_dma_data(struct tifm_sd *host, struct mmc_data *r_data)
+{
+ struct tifm_dev *sock = host->dev;
+ unsigned int t_size = TIFM_DMA_TSIZE * r_data->blksz;
+ unsigned int dma_len, dma_blk_cnt, dma_off;
+ struct scatterlist *sg = NULL;
+ unsigned long flags;
+
+ if (host->sg_pos == host->sg_len)
+ return 1;
+
+ if (host->cmd_flags & DATA_CARRY) {
+ host->cmd_flags &= ~DATA_CARRY;
+ local_irq_save(flags);
+ tifm_sd_bounce_block(host, r_data);
+ local_irq_restore(flags);
+ if (host->sg_pos == host->sg_len)
+ return 1;
+ }
+
+ dma_len = sg_dma_len(&r_data->sg[host->sg_pos]) - host->block_pos;
+ if (!dma_len) {
+ host->block_pos = 0;
+ host->sg_pos++;
+ if (host->sg_pos == host->sg_len)
+ return 1;
+ dma_len = sg_dma_len(&r_data->sg[host->sg_pos]);
+ }
+
+ if (dma_len < t_size) {
+ dma_blk_cnt = dma_len / r_data->blksz;
+ dma_off = host->block_pos;
+ host->block_pos += dma_blk_cnt * r_data->blksz;
+ } else {
+ dma_blk_cnt = TIFM_DMA_TSIZE;
+ dma_off = host->block_pos;
+ host->block_pos += t_size;
+ }
+
+ if (dma_blk_cnt)
+ sg = &r_data->sg[host->sg_pos];
+ else if (dma_len) {
+ if (r_data->flags & MMC_DATA_WRITE) {
+ local_irq_save(flags);
+ tifm_sd_bounce_block(host, r_data);
+ local_irq_restore(flags);
+ } else
+ host->cmd_flags |= DATA_CARRY;
+
+ sg = &host->bounce_buf;
+ dma_off = 0;
+ dma_blk_cnt = 1;
+ } else
+ return 1;
+
+ dev_dbg(&sock->dev, "setting dma for %d blocks\n", dma_blk_cnt);
+ writel(sg_dma_address(sg) + dma_off, sock->addr + SOCK_DMA_ADDRESS);
+ if (r_data->flags & MMC_DATA_WRITE)
+ writel((dma_blk_cnt << 8) | TIFM_DMA_TX | TIFM_DMA_EN,
+ sock->addr + SOCK_DMA_CONTROL);
+ else
+ writel((dma_blk_cnt << 8) | TIFM_DMA_EN,
+ sock->addr + SOCK_DMA_CONTROL);
+
+ return 0;
+}
+
+static unsigned int tifm_sd_op_flags(struct mmc_command *cmd)
+{
+ unsigned int rc = 0;
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ rc |= TIFM_MMCSD_RSP_R0;
+ break;
+ case MMC_RSP_R1B:
+ rc |= TIFM_MMCSD_RSP_BUSY; // deliberate fall-through
+ case MMC_RSP_R1:
+ rc |= TIFM_MMCSD_RSP_R1;
+ break;
+ case MMC_RSP_R2:
+ rc |= TIFM_MMCSD_RSP_R2;
+ break;
+ case MMC_RSP_R3:
+ rc |= TIFM_MMCSD_RSP_R3;
+ break;
+ default:
+ BUG();
+ }
+
+ switch (mmc_cmd_type(cmd)) {
+ case MMC_CMD_BC:
+ rc |= TIFM_MMCSD_CMD_BC;
+ break;
+ case MMC_CMD_BCR:
+ rc |= TIFM_MMCSD_CMD_BCR;
+ break;
+ case MMC_CMD_AC:
+ rc |= TIFM_MMCSD_CMD_AC;
+ break;
+ case MMC_CMD_ADTC:
+ rc |= TIFM_MMCSD_CMD_ADTC;
+ break;
+ default:
+ BUG();
+ }
+ return rc;
+}
+
+static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd)
+{
+ struct tifm_dev *sock = host->dev;
+ unsigned int cmd_mask = tifm_sd_op_flags(cmd);
+
+ if (host->open_drain)
+ cmd_mask |= TIFM_MMCSD_ODTO;
+
+ if (cmd->data && (cmd->data->flags & MMC_DATA_READ))
+ cmd_mask |= TIFM_MMCSD_READ;
+
+ dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n",
+ cmd->opcode, cmd->arg, cmd_mask);
+
+ writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH);
+ writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW);
+ writel(cmd->opcode | cmd_mask, sock->addr + SOCK_MMCSD_COMMAND);
+}
+
+static void tifm_sd_fetch_resp(struct mmc_command *cmd, struct tifm_dev *sock)
+{
+ cmd->resp[0] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x1c) << 16)
+ | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x18);
+ cmd->resp[1] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x14) << 16)
+ | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x10);
+ cmd->resp[2] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x0c) << 16)
+ | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x08);
+ cmd->resp[3] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x04) << 16)
+ | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x00);
+}
+
+static void tifm_sd_check_status(struct tifm_sd *host)
+{
+ struct tifm_dev *sock = host->dev;
+ struct mmc_command *cmd = host->req->cmd;
+
+ if (cmd->error)
+ goto finish_request;
+
+ if (!(host->cmd_flags & CMD_READY))
+ return;
+
+ if (cmd->data) {
+ if (cmd->data->error) {
+ if ((host->cmd_flags & SCMD_ACTIVE)
+ && !(host->cmd_flags & SCMD_READY))
+ return;
+
+ goto finish_request;
+ }
+
+ if (!(host->cmd_flags & BRS_READY))
+ return;
+
+ if (!(host->no_dma || (host->cmd_flags & FIFO_READY)))
+ return;
+
+ if (cmd->data->flags & MMC_DATA_WRITE) {
+ if (host->req->stop) {
+ if (!(host->cmd_flags & SCMD_ACTIVE)) {
+ host->cmd_flags |= SCMD_ACTIVE;
+ writel(TIFM_MMCSD_EOFB
+ | readl(sock->addr
+ + SOCK_MMCSD_INT_ENABLE),
+ sock->addr
+ + SOCK_MMCSD_INT_ENABLE);
+ tifm_sd_exec(host, host->req->stop);
+ return;
+ } else {
+ if (!(host->cmd_flags & SCMD_READY)
+ || (host->cmd_flags & CARD_BUSY))
+ return;
+ writel((~TIFM_MMCSD_EOFB)
+ & readl(sock->addr
+ + SOCK_MMCSD_INT_ENABLE),
+ sock->addr
+ + SOCK_MMCSD_INT_ENABLE);
+ }
+ } else {
+ if (host->cmd_flags & CARD_BUSY)
+ return;
+ writel((~TIFM_MMCSD_EOFB)
+ & readl(sock->addr
+ + SOCK_MMCSD_INT_ENABLE),
+ sock->addr + SOCK_MMCSD_INT_ENABLE);
+ }
+ } else {
+ if (host->req->stop) {
+ if (!(host->cmd_flags & SCMD_ACTIVE)) {
+ host->cmd_flags |= SCMD_ACTIVE;
+ tifm_sd_exec(host, host->req->stop);
+ return;
+ } else {
+ if (!(host->cmd_flags & SCMD_READY))
+ return;
+ }
+ }
+ }
+ }
+finish_request:
+ tasklet_schedule(&host->finish_tasklet);
+}
+
+/* Called from interrupt handler */
+static void tifm_sd_data_event(struct tifm_dev *sock)
+{
+ struct tifm_sd *host;
+ unsigned int fifo_status = 0;
+ struct mmc_data *r_data = NULL;
+
+ spin_lock(&sock->lock);
+ host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
+ fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS);
+ dev_dbg(&sock->dev, "data event: fifo_status %x, flags %x\n",
+ fifo_status, host->cmd_flags);
+
+ if (host->req) {
+ r_data = host->req->cmd->data;
+
+ if (r_data && (fifo_status & TIFM_FIFO_READY)) {
+ if (tifm_sd_set_dma_data(host, r_data)) {
+ host->cmd_flags |= FIFO_READY;
+ tifm_sd_check_status(host);
+ }
+ }
+ }
+
+ writel(fifo_status, sock->addr + SOCK_DMA_FIFO_STATUS);
+ spin_unlock(&sock->lock);
+}
+
+/* Called from interrupt handler */
+static void tifm_sd_card_event(struct tifm_dev *sock)
+{
+ struct tifm_sd *host;
+ unsigned int host_status = 0;
+ int cmd_error = 0;
+ struct mmc_command *cmd = NULL;
+ unsigned long flags;
+
+ spin_lock(&sock->lock);
+ host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
+ host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
+ dev_dbg(&sock->dev, "host event: host_status %x, flags %x\n",
+ host_status, host->cmd_flags);
+
+ if (host->req) {
+ cmd = host->req->cmd;
+
+ if (host_status & TIFM_MMCSD_ERRMASK) {
+ writel(host_status & TIFM_MMCSD_ERRMASK,
+ sock->addr + SOCK_MMCSD_STATUS);
+ if (host_status & TIFM_MMCSD_CTO)
+ cmd_error = -ETIMEDOUT;
+ else if (host_status & TIFM_MMCSD_CCRC)
+ cmd_error = -EILSEQ;
+
+ if (cmd->data) {
+ if (host_status & TIFM_MMCSD_DTO)
+ cmd->data->error = -ETIMEDOUT;
+ else if (host_status & TIFM_MMCSD_DCRC)
+ cmd->data->error = -EILSEQ;
+ }
+
+ writel(TIFM_FIFO_INT_SETALL,
+ sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
+ writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL);
+
+ if (host->req->stop) {
+ if (host->cmd_flags & SCMD_ACTIVE) {
+ host->req->stop->error = cmd_error;
+ host->cmd_flags |= SCMD_READY;
+ } else {
+ cmd->error = cmd_error;
+ host->cmd_flags |= SCMD_ACTIVE;
+ tifm_sd_exec(host, host->req->stop);
+ goto done;
+ }
+ } else
+ cmd->error = cmd_error;
+ } else {
+ if (host_status & (TIFM_MMCSD_EOC | TIFM_MMCSD_CERR)) {
+ if (!(host->cmd_flags & CMD_READY)) {
+ host->cmd_flags |= CMD_READY;
+ tifm_sd_fetch_resp(cmd, sock);
+ } else if (host->cmd_flags & SCMD_ACTIVE) {
+ host->cmd_flags |= SCMD_READY;
+ tifm_sd_fetch_resp(host->req->stop,
+ sock);
+ }
+ }
+ if (host_status & TIFM_MMCSD_BRS)
+ host->cmd_flags |= BRS_READY;
+ }
+
+ if (host->no_dma && cmd->data) {
+ if (host_status & TIFM_MMCSD_AE)
+ writel(host_status & TIFM_MMCSD_AE,
+ sock->addr + SOCK_MMCSD_STATUS);
+
+ if (host_status & (TIFM_MMCSD_AE | TIFM_MMCSD_AF
+ | TIFM_MMCSD_BRS)) {
+ local_irq_save(flags);
+ tifm_sd_transfer_data(host);
+ local_irq_restore(flags);
+ host_status &= ~TIFM_MMCSD_AE;
+ }
+ }
+
+ if (host_status & TIFM_MMCSD_EOFB)
+ host->cmd_flags &= ~CARD_BUSY;
+ else if (host_status & TIFM_MMCSD_CB)
+ host->cmd_flags |= CARD_BUSY;
+
+ tifm_sd_check_status(host);
+ }
+done:
+ writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
+ spin_unlock(&sock->lock);
+}
+
+static void tifm_sd_set_data_timeout(struct tifm_sd *host,
+ struct mmc_data *data)
+{
+ struct tifm_dev *sock = host->dev;
+ unsigned int data_timeout = data->timeout_clks;
+
+ if (fixed_timeout)
+ return;
+
+ data_timeout += data->timeout_ns /
+ ((1000000000UL / host->clk_freq) * host->clk_div);
+
+ if (data_timeout < 0xffff) {
+ writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
+ writel((~TIFM_MMCSD_DPE)
+ & readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
+ sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
+ } else {
+ data_timeout = (data_timeout >> 10) + 1;
+ if (data_timeout > 0xffff)
+ data_timeout = 0; /* set to unlimited */
+ writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
+ writel(TIFM_MMCSD_DPE
+ | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
+ sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
+ }
+}
+
+static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct tifm_sd *host = mmc_priv(mmc);
+ struct tifm_dev *sock = host->dev;
+ unsigned long flags;
+ struct mmc_data *r_data = mrq->cmd->data;
+
+ spin_lock_irqsave(&sock->lock, flags);
+ if (host->eject) {
+ mrq->cmd->error = -ENOMEDIUM;
+ goto err_out;
+ }
+
+ if (host->req) {
+ pr_err("%s : unfinished request detected\n",
+ dev_name(&sock->dev));
+ mrq->cmd->error = -ETIMEDOUT;
+ goto err_out;
+ }
+
+ host->cmd_flags = 0;
+ host->block_pos = 0;
+ host->sg_pos = 0;
+
+ if (mrq->data && !is_power_of_2(mrq->data->blksz))
+ host->no_dma = 1;
+ else
+ host->no_dma = no_dma ? 1 : 0;
+
+ if (r_data) {
+ tifm_sd_set_data_timeout(host, r_data);
+
+ if ((r_data->flags & MMC_DATA_WRITE) && !mrq->stop)
+ writel(TIFM_MMCSD_EOFB
+ | readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
+ sock->addr + SOCK_MMCSD_INT_ENABLE);
+
+ if (host->no_dma) {
+ writel(TIFM_MMCSD_BUFINT
+ | readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
+ sock->addr + SOCK_MMCSD_INT_ENABLE);
+ writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8)
+ | (TIFM_MMCSD_FIFO_SIZE - 1),
+ sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
+
+ host->sg_len = r_data->sg_len;
+ } else {
+ sg_init_one(&host->bounce_buf, host->bounce_buf_data,
+ r_data->blksz);
+
+ if(1 != tifm_map_sg(sock, &host->bounce_buf, 1,
+ r_data->flags & MMC_DATA_WRITE
+ ? PCI_DMA_TODEVICE
+ : PCI_DMA_FROMDEVICE)) {
+ pr_err("%s : scatterlist map failed\n",
+ dev_name(&sock->dev));
+ mrq->cmd->error = -ENOMEM;
+ goto err_out;
+ }
+ host->sg_len = tifm_map_sg(sock, r_data->sg,
+ r_data->sg_len,
+ r_data->flags
+ & MMC_DATA_WRITE
+ ? PCI_DMA_TODEVICE
+ : PCI_DMA_FROMDEVICE);
+ if (host->sg_len < 1) {
+ pr_err("%s : scatterlist map failed\n",
+ dev_name(&sock->dev));
+ tifm_unmap_sg(sock, &host->bounce_buf, 1,
+ r_data->flags & MMC_DATA_WRITE
+ ? PCI_DMA_TODEVICE
+ : PCI_DMA_FROMDEVICE);
+ mrq->cmd->error = -ENOMEM;
+ goto err_out;
+ }
+
+ writel(TIFM_FIFO_INT_SETALL,
+ sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
+ writel(ilog2(r_data->blksz) - 2,
+ sock->addr + SOCK_FIFO_PAGE_SIZE);
+ writel(TIFM_FIFO_ENABLE,
+ sock->addr + SOCK_FIFO_CONTROL);
+ writel(TIFM_FIFO_INTMASK,
+ sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
+
+ if (r_data->flags & MMC_DATA_WRITE)
+ writel(TIFM_MMCSD_TXDE,
+ sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
+ else
+ writel(TIFM_MMCSD_RXDE,
+ sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
+
+ tifm_sd_set_dma_data(host, r_data);
+ }
+
+ writel(r_data->blocks - 1,
+ sock->addr + SOCK_MMCSD_NUM_BLOCKS);
+ writel(r_data->blksz - 1,
+ sock->addr + SOCK_MMCSD_BLOCK_LEN);
+ }
+
+ host->req = mrq;
+ mod_timer(&host->timer, jiffies + host->timeout_jiffies);
+ writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
+ sock->addr + SOCK_CONTROL);
+ tifm_sd_exec(host, mrq->cmd);
+ spin_unlock_irqrestore(&sock->lock, flags);
+ return;
+
+err_out:
+ spin_unlock_irqrestore(&sock->lock, flags);
+ mmc_request_done(mmc, mrq);
+}
+
+static void tifm_sd_end_cmd(unsigned long data)
+{
+ struct tifm_sd *host = (struct tifm_sd*)data;
+ struct tifm_dev *sock = host->dev;
+ struct mmc_host *mmc = tifm_get_drvdata(sock);
+ struct mmc_request *mrq;
+ struct mmc_data *r_data = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sock->lock, flags);
+
+ del_timer(&host->timer);
+ mrq = host->req;
+ host->req = NULL;
+
+ if (!mrq) {
+ pr_err(" %s : no request to complete?\n",
+ dev_name(&sock->dev));
+ spin_unlock_irqrestore(&sock->lock, flags);
+ return;
+ }
+
+ r_data = mrq->cmd->data;
+ if (r_data) {
+ if (host->no_dma) {
+ writel((~TIFM_MMCSD_BUFINT)
+ & readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
+ sock->addr + SOCK_MMCSD_INT_ENABLE);
+ } else {
+ tifm_unmap_sg(sock, &host->bounce_buf, 1,
+ (r_data->flags & MMC_DATA_WRITE)
+ ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+ tifm_unmap_sg(sock, r_data->sg, r_data->sg_len,
+ (r_data->flags & MMC_DATA_WRITE)
+ ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+ }
+
+ r_data->bytes_xfered = r_data->blocks
+ - readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1;
+ r_data->bytes_xfered *= r_data->blksz;
+ r_data->bytes_xfered += r_data->blksz
+ - readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1;
+ }
+
+ writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
+ sock->addr + SOCK_CONTROL);
+
+ spin_unlock_irqrestore(&sock->lock, flags);
+ mmc_request_done(mmc, mrq);
+}
+
+static void tifm_sd_abort(unsigned long data)
+{
+ struct tifm_sd *host = (struct tifm_sd*)data;
+
+ pr_err("%s : card failed to respond for a long period of time "
+ "(%x, %x)\n",
+ dev_name(&host->dev->dev), host->req->cmd->opcode, host->cmd_flags);
+
+ tifm_eject(host->dev);
+}
+
+static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct tifm_sd *host = mmc_priv(mmc);
+ struct tifm_dev *sock = host->dev;
+ unsigned int clk_div1, clk_div2;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sock->lock, flags);
+
+ dev_dbg(&sock->dev, "ios: clock = %u, vdd = %x, bus_mode = %x, "
+ "chip_select = %x, power_mode = %x, bus_width = %x\n",
+ ios->clock, ios->vdd, ios->bus_mode, ios->chip_select,
+ ios->power_mode, ios->bus_width);
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4) {
+ writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG),
+ sock->addr + SOCK_MMCSD_CONFIG);
+ } else {
+ writel((~TIFM_MMCSD_4BBUS)
+ & readl(sock->addr + SOCK_MMCSD_CONFIG),
+ sock->addr + SOCK_MMCSD_CONFIG);
+ }
+
+ if (ios->clock) {
+ clk_div1 = 20000000 / ios->clock;
+ if (!clk_div1)
+ clk_div1 = 1;
+
+ clk_div2 = 24000000 / ios->clock;
+ if (!clk_div2)
+ clk_div2 = 1;
+
+ if ((20000000 / clk_div1) > ios->clock)
+ clk_div1++;
+ if ((24000000 / clk_div2) > ios->clock)
+ clk_div2++;
+ if ((20000000 / clk_div1) > (24000000 / clk_div2)) {
+ host->clk_freq = 20000000;
+ host->clk_div = clk_div1;
+ writel((~TIFM_CTRL_FAST_CLK)
+ & readl(sock->addr + SOCK_CONTROL),
+ sock->addr + SOCK_CONTROL);
+ } else {
+ host->clk_freq = 24000000;
+ host->clk_div = clk_div2;
+ writel(TIFM_CTRL_FAST_CLK
+ | readl(sock->addr + SOCK_CONTROL),
+ sock->addr + SOCK_CONTROL);
+ }
+ } else {
+ host->clk_div = 0;
+ }
+ host->clk_div &= TIFM_MMCSD_CLKMASK;
+ writel(host->clk_div
+ | ((~TIFM_MMCSD_CLKMASK)
+ & readl(sock->addr + SOCK_MMCSD_CONFIG)),
+ sock->addr + SOCK_MMCSD_CONFIG);
+
+ host->open_drain = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN);
+
+ /* chip_select : maybe later */
+ //vdd
+ //power is set before probe / after remove
+
+ spin_unlock_irqrestore(&sock->lock, flags);
+}
+
+static int tifm_sd_ro(struct mmc_host *mmc)
+{
+ int rc = 0;
+ struct tifm_sd *host = mmc_priv(mmc);
+ struct tifm_dev *sock = host->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sock->lock, flags);
+ if (TIFM_MMCSD_CARD_RO & readl(sock->addr + SOCK_PRESENT_STATE))
+ rc = 1;
+ spin_unlock_irqrestore(&sock->lock, flags);
+ return rc;
+}
+
+static const struct mmc_host_ops tifm_sd_ops = {
+ .request = tifm_sd_request,
+ .set_ios = tifm_sd_ios,
+ .get_ro = tifm_sd_ro
+};
+
+static int tifm_sd_initialize_host(struct tifm_sd *host)
+{
+ int rc;
+ unsigned int host_status = 0;
+ struct tifm_dev *sock = host->dev;
+
+ writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
+ mmiowb();
+ host->clk_div = 61;
+ host->clk_freq = 20000000;
+ writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL);
+ writel(host->clk_div | TIFM_MMCSD_POWER,
+ sock->addr + SOCK_MMCSD_CONFIG);
+
+ /* wait up to 0.51 sec for reset */
+ for (rc = 32; rc <= 256; rc <<= 1) {
+ if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) {
+ rc = 0;
+ break;
+ }
+ msleep(rc);
+ }
+
+ if (rc) {
+ pr_err("%s : controller failed to reset\n",
+ dev_name(&sock->dev));
+ return -ENODEV;
+ }
+
+ writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS);
+ writel(host->clk_div | TIFM_MMCSD_POWER,
+ sock->addr + SOCK_MMCSD_CONFIG);
+ writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
+
+ // command timeout fixed to 64 clocks for now
+ writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO);
+ writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND);
+
+ for (rc = 16; rc <= 64; rc <<= 1) {
+ host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
+ writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
+ if (!(host_status & TIFM_MMCSD_ERRMASK)
+ && (host_status & TIFM_MMCSD_EOC)) {
+ rc = 0;
+ break;
+ }
+ msleep(rc);
+ }
+
+ if (rc) {
+ pr_err("%s : card not ready - probe failed on initialization\n",
+ dev_name(&sock->dev));
+ return -ENODEV;
+ }
+
+ writel(TIFM_MMCSD_CERR | TIFM_MMCSD_BRS | TIFM_MMCSD_EOC
+ | TIFM_MMCSD_ERRMASK,
+ sock->addr + SOCK_MMCSD_INT_ENABLE);
+ mmiowb();
+
+ return 0;
+}
+
+static int tifm_sd_probe(struct tifm_dev *sock)
+{
+ struct mmc_host *mmc;
+ struct tifm_sd *host;
+ int rc = -EIO;
+
+ if (!(TIFM_SOCK_STATE_OCCUPIED
+ & readl(sock->addr + SOCK_PRESENT_STATE))) {
+ pr_warn("%s : card gone, unexpectedly\n",
+ dev_name(&sock->dev));
+ return rc;
+ }
+
+ mmc = mmc_alloc_host(sizeof(struct tifm_sd), &sock->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ host = mmc_priv(mmc);
+ tifm_set_drvdata(sock, mmc);
+ host->dev = sock;
+ host->timeout_jiffies = msecs_to_jiffies(1000);
+
+ tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd,
+ (unsigned long)host);
+ setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host);
+
+ mmc->ops = &tifm_sd_ops;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->caps = MMC_CAP_4_BIT_DATA;
+ mmc->f_min = 20000000 / 60;
+ mmc->f_max = 24000000;
+
+ mmc->max_blk_count = 2048;
+ mmc->max_segs = mmc->max_blk_count;
+ mmc->max_blk_size = min(TIFM_MMCSD_MAX_BLOCK_SIZE, PAGE_SIZE);
+ mmc->max_seg_size = mmc->max_blk_count * mmc->max_blk_size;
+ mmc->max_req_size = mmc->max_seg_size;
+
+ sock->card_event = tifm_sd_card_event;
+ sock->data_event = tifm_sd_data_event;
+ rc = tifm_sd_initialize_host(host);
+
+ if (!rc)
+ rc = mmc_add_host(mmc);
+ if (!rc)
+ return 0;
+
+ mmc_free_host(mmc);
+ return rc;
+}
+
+static void tifm_sd_remove(struct tifm_dev *sock)
+{
+ struct mmc_host *mmc = tifm_get_drvdata(sock);
+ struct tifm_sd *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sock->lock, flags);
+ host->eject = 1;
+ writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
+ mmiowb();
+ spin_unlock_irqrestore(&sock->lock, flags);
+
+ tasklet_kill(&host->finish_tasklet);
+
+ spin_lock_irqsave(&sock->lock, flags);
+ if (host->req) {
+ writel(TIFM_FIFO_INT_SETALL,
+ sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
+ writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
+ host->req->cmd->error = -ENOMEDIUM;
+ if (host->req->stop)
+ host->req->stop->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ }
+ spin_unlock_irqrestore(&sock->lock, flags);
+ mmc_remove_host(mmc);
+ dev_dbg(&sock->dev, "after remove\n");
+
+ mmc_free_host(mmc);
+}
+
+#ifdef CONFIG_PM
+
+static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
+{
+ return 0;
+}
+
+static int tifm_sd_resume(struct tifm_dev *sock)
+{
+ struct mmc_host *mmc = tifm_get_drvdata(sock);
+ struct tifm_sd *host = mmc_priv(mmc);
+ int rc;
+
+ rc = tifm_sd_initialize_host(host);
+ dev_dbg(&sock->dev, "resume initialize %d\n", rc);
+
+ if (rc)
+ host->eject = 1;
+
+ return rc;
+}
+
+#else
+
+#define tifm_sd_suspend NULL
+#define tifm_sd_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct tifm_device_id tifm_sd_id_tbl[] = {
+ { TIFM_TYPE_SD }, { }
+};
+
+static struct tifm_driver tifm_sd_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE
+ },
+ .id_table = tifm_sd_id_tbl,
+ .probe = tifm_sd_probe,
+ .remove = tifm_sd_remove,
+ .suspend = tifm_sd_suspend,
+ .resume = tifm_sd_resume
+};
+
+static int __init tifm_sd_init(void)
+{
+ return tifm_register_driver(&tifm_sd_driver);
+}
+
+static void __exit tifm_sd_exit(void)
+{
+ tifm_unregister_driver(&tifm_sd_driver);
+}
+
+MODULE_AUTHOR("Alex Dubov");
+MODULE_DESCRIPTION("TI FlashMedia SD driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(tifm, tifm_sd_id_tbl);
+MODULE_VERSION(DRIVER_VERSION);
+
+module_init(tifm_sd_init);
+module_exit(tifm_sd_exit);
diff --git a/kernel/drivers/mmc/host/tmio_mmc.c b/kernel/drivers/mmc/host/tmio_mmc.c
new file mode 100644
index 000000000..f746df493
--- /dev/null
+++ b/kernel/drivers/mmc/host/tmio_mmc.c
@@ -0,0 +1,164 @@
+/*
+ * linux/drivers/mmc/host/tmio_mmc.c
+ *
+ * Copyright (C) 2007 Ian Molton
+ * Copyright (C) 2004 Ian Molton
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver for the MMC / SD / SDIO cell found in:
+ *
+ * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
+ */
+
+#include <linux/device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+
+#include "tmio_mmc.h"
+
+#ifdef CONFIG_PM_SLEEP
+static int tmio_mmc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ int ret;
+
+ ret = pm_runtime_force_suspend(dev);
+
+ /* Tell MFD core it can disable us now.*/
+ if (!ret && cell->disable)
+ cell->disable(pdev);
+
+ return ret;
+}
+
+static int tmio_mmc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ int ret = 0;
+
+ /* Tell the MFD core we are ready to be enabled */
+ if (cell->resume)
+ ret = cell->resume(pdev);
+
+ if (!ret)
+ ret = pm_runtime_force_resume(dev);
+
+ return ret;
+}
+#endif
+
+static int tmio_mmc_probe(struct platform_device *pdev)
+{
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ struct tmio_mmc_data *pdata;
+ struct tmio_mmc_host *host;
+ struct resource *res;
+ int ret = -EINVAL, irq;
+
+ if (pdev->num_resources != 2)
+ goto out;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata || !pdata->hclk)
+ goto out;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out;
+ }
+
+ /* Tell the MFD core we are ready to be enabled */
+ if (cell->enable) {
+ ret = cell->enable(pdev);
+ if (ret)
+ goto out;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ pdata->flags |= TMIO_MMC_HAVE_HIGH_REG;
+
+ host = tmio_mmc_host_alloc(pdev);
+ if (!host)
+ goto cell_disable;
+
+ /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
+ host->bus_shift = resource_size(res) >> 10;
+
+ ret = tmio_mmc_host_probe(host, pdata);
+ if (ret)
+ goto host_free;
+
+ ret = request_irq(irq, tmio_mmc_irq, IRQF_TRIGGER_FALLING,
+ dev_name(&pdev->dev), host);
+ if (ret)
+ goto host_remove;
+
+ pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
+ (unsigned long)host->ctl, irq);
+
+ return 0;
+
+host_remove:
+ tmio_mmc_host_remove(host);
+host_free:
+ tmio_mmc_host_free(host);
+cell_disable:
+ if (cell->disable)
+ cell->disable(pdev);
+out:
+ return ret;
+}
+
+static int tmio_mmc_remove(struct platform_device *pdev)
+{
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+
+ if (mmc) {
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ free_irq(platform_get_irq(pdev, 0), host);
+ tmio_mmc_host_remove(host);
+ if (cell->disable)
+ cell->disable(pdev);
+ }
+
+ return 0;
+}
+
+/* ------------------- device registration ----------------------- */
+
+static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_suspend, tmio_mmc_resume)
+ SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
+ tmio_mmc_host_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver tmio_mmc_driver = {
+ .driver = {
+ .name = "tmio-mmc",
+ .pm = &tmio_mmc_dev_pm_ops,
+ },
+ .probe = tmio_mmc_probe,
+ .remove = tmio_mmc_remove,
+};
+
+module_platform_driver(tmio_mmc_driver);
+
+MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver");
+MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tmio-mmc");
diff --git a/kernel/drivers/mmc/host/tmio_mmc.h b/kernel/drivers/mmc/host/tmio_mmc.h
new file mode 100644
index 000000000..4a597f5a5
--- /dev/null
+++ b/kernel/drivers/mmc/host/tmio_mmc.h
@@ -0,0 +1,209 @@
+/*
+ * linux/drivers/mmc/host/tmio_mmc.h
+ *
+ * Copyright (C) 2007 Ian Molton
+ * Copyright (C) 2004 Ian Molton
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver for the MMC / SD / SDIO cell found in:
+ *
+ * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
+ */
+
+#ifndef TMIO_MMC_H
+#define TMIO_MMC_H
+
+#include <linux/dmaengine.h>
+#include <linux/highmem.h>
+#include <linux/mmc/tmio.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+
+/* Definitions for values the CTRL_SDIO_STATUS register can take. */
+#define TMIO_SDIO_STAT_IOIRQ 0x0001
+#define TMIO_SDIO_STAT_EXPUB52 0x4000
+#define TMIO_SDIO_STAT_EXWT 0x8000
+#define TMIO_SDIO_MASK_ALL 0xc007
+
+/* Define some IRQ masks */
+/* This is the mask used at reset by the chip */
+#define TMIO_MASK_ALL 0x837f031d
+#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
+#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
+#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
+ TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
+#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
+
+struct tmio_mmc_data;
+struct tmio_mmc_host;
+
+struct tmio_mmc_dma {
+ enum dma_slave_buswidth dma_buswidth;
+ bool (*filter)(struct dma_chan *chan, void *arg);
+ void (*enable)(struct tmio_mmc_host *host, bool enable);
+};
+
+struct tmio_mmc_host {
+ void __iomem *ctl;
+ struct mmc_command *cmd;
+ struct mmc_request *mrq;
+ struct mmc_data *data;
+ struct mmc_host *mmc;
+
+ /* Callbacks for clock / power control */
+ void (*set_pwr)(struct platform_device *host, int state);
+ void (*set_clk_div)(struct platform_device *host, int state);
+
+ /* pio related stuff */
+ struct scatterlist *sg_ptr;
+ struct scatterlist *sg_orig;
+ unsigned int sg_len;
+ unsigned int sg_off;
+ unsigned long bus_shift;
+
+ struct platform_device *pdev;
+ struct tmio_mmc_data *pdata;
+ struct tmio_mmc_dma *dma;
+
+ /* DMA support */
+ bool force_pio;
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ struct tasklet_struct dma_complete;
+ struct tasklet_struct dma_issue;
+ struct scatterlist bounce_sg;
+ u8 *bounce_buf;
+
+ /* Track lost interrupts */
+ struct delayed_work delayed_reset_work;
+ struct work_struct done;
+
+ /* Cache */
+ u32 sdcard_irq_mask;
+ u32 sdio_irq_mask;
+ unsigned int clk_cache;
+
+ spinlock_t lock; /* protect host private data */
+ unsigned long last_req_ts;
+ struct mutex ios_lock; /* protect set_ios() context */
+ bool native_hotplug;
+ bool sdio_irq_enabled;
+
+ int (*write16_hook)(struct tmio_mmc_host *host, int addr);
+ int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
+ void (*clk_disable)(struct platform_device *pdev);
+ int (*multi_io_quirk)(struct mmc_card *card,
+ unsigned int direction, int blk_size);
+};
+
+struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
+void tmio_mmc_host_free(struct tmio_mmc_host *host);
+int tmio_mmc_host_probe(struct tmio_mmc_host *host,
+ struct tmio_mmc_data *pdata);
+void tmio_mmc_host_remove(struct tmio_mmc_host *host);
+void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
+
+void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
+void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
+irqreturn_t tmio_mmc_irq(int irq, void *devid);
+irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid);
+irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid);
+irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid);
+
+static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
+ unsigned long *flags)
+{
+ local_irq_save(*flags);
+ return kmap_atomic(sg_page(sg)) + sg->offset;
+}
+
+static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg,
+ unsigned long *flags, void *virt)
+{
+ kunmap_atomic(virt - sg->offset);
+ local_irq_restore(*flags);
+}
+
+#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
+void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data);
+void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable);
+void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata);
+void tmio_mmc_release_dma(struct tmio_mmc_host *host);
+void tmio_mmc_abort_dma(struct tmio_mmc_host *host);
+#else
+static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
+ struct mmc_data *data)
+{
+}
+
+static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
+{
+}
+
+static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
+ struct tmio_mmc_data *pdata)
+{
+ host->chan_tx = NULL;
+ host->chan_rx = NULL;
+}
+
+static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
+{
+}
+
+static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
+{
+}
+#endif
+
+#ifdef CONFIG_PM
+int tmio_mmc_host_runtime_suspend(struct device *dev);
+int tmio_mmc_host_runtime_resume(struct device *dev);
+#endif
+
+static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
+{
+ return readw(host->ctl + (addr << host->bus_shift));
+}
+
+static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
+ u16 *buf, int count)
+{
+ readsw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+{
+ return readw(host->ctl + (addr << host->bus_shift)) |
+ readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+}
+
+static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
+{
+ /* If there is a hook and it returns non-zero then there
+ * is an error and the write should be skipped
+ */
+ if (host->write16_hook && host->write16_hook(host, addr))
+ return;
+ writew(val, host->ctl + (addr << host->bus_shift));
+}
+
+static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
+ u16 *buf, int count)
+{
+ writesw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+ writew(val, host->ctl + (addr << host->bus_shift));
+ writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+}
+
+
+#endif
diff --git a/kernel/drivers/mmc/host/tmio_mmc_dma.c b/kernel/drivers/mmc/host/tmio_mmc_dma.c
new file mode 100644
index 000000000..e4b05dbb9
--- /dev/null
+++ b/kernel/drivers/mmc/host/tmio_mmc_dma.c
@@ -0,0 +1,356 @@
+/*
+ * linux/drivers/mmc/tmio_mmc_dma.c
+ *
+ * Copyright (C) 2010-2011 Guennadi Liakhovetski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * DMA function for TMIO MMC implementations
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/tmio.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+
+#include "tmio_mmc.h"
+
+#define TMIO_MMC_MIN_DMA_LEN 8
+
+void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
+{
+ if (!host->chan_tx || !host->chan_rx)
+ return;
+
+ if (host->dma->enable)
+ host->dma->enable(host, enable);
+}
+
+void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
+{
+ tmio_mmc_enable_dma(host, false);
+
+ if (host->chan_rx)
+ dmaengine_terminate_all(host->chan_rx);
+ if (host->chan_tx)
+ dmaengine_terminate_all(host->chan_tx);
+
+ tmio_mmc_enable_dma(host, true);
+}
+
+static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
+{
+ struct scatterlist *sg = host->sg_ptr, *sg_tmp;
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan = host->chan_rx;
+ dma_cookie_t cookie;
+ int ret, i;
+ bool aligned = true, multiple = true;
+ unsigned int align = (1 << host->pdata->alignment_shift) - 1;
+
+ for_each_sg(sg, sg_tmp, host->sg_len, i) {
+ if (sg_tmp->offset & align)
+ aligned = false;
+ if (sg_tmp->length & align) {
+ multiple = false;
+ break;
+ }
+ }
+
+ if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+ (align & PAGE_MASK))) || !multiple) {
+ ret = -EINVAL;
+ goto pio;
+ }
+
+ if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
+ host->force_pio = true;
+ return;
+ }
+
+ tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
+
+ /* The only sg element can be unaligned, use our bounce buffer then */
+ if (!aligned) {
+ sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+ host->sg_ptr = &host->bounce_sg;
+ sg = host->sg_ptr;
+ }
+
+ ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
+ if (ret > 0)
+ desc = dmaengine_prep_slave_sg(chan, sg, ret,
+ DMA_DEV_TO_MEM, DMA_CTRL_ACK);
+
+ if (desc) {
+ cookie = dmaengine_submit(desc);
+ if (cookie < 0) {
+ desc = NULL;
+ ret = cookie;
+ }
+ }
+ dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
+ __func__, host->sg_len, ret, cookie, host->mrq);
+
+pio:
+ if (!desc) {
+ /* DMA failed, fall back to PIO */
+ tmio_mmc_enable_dma(host, false);
+ if (ret >= 0)
+ ret = -EIO;
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ /* Free the Tx channel too */
+ chan = host->chan_tx;
+ if (chan) {
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ }
+ dev_warn(&host->pdev->dev,
+ "DMA failed: %d, falling back to PIO\n", ret);
+ }
+
+ dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
+ desc, cookie, host->sg_len);
+}
+
+static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
+{
+ struct scatterlist *sg = host->sg_ptr, *sg_tmp;
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan = host->chan_tx;
+ dma_cookie_t cookie;
+ int ret, i;
+ bool aligned = true, multiple = true;
+ unsigned int align = (1 << host->pdata->alignment_shift) - 1;
+
+ for_each_sg(sg, sg_tmp, host->sg_len, i) {
+ if (sg_tmp->offset & align)
+ aligned = false;
+ if (sg_tmp->length & align) {
+ multiple = false;
+ break;
+ }
+ }
+
+ if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+ (align & PAGE_MASK))) || !multiple) {
+ ret = -EINVAL;
+ goto pio;
+ }
+
+ if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
+ host->force_pio = true;
+ return;
+ }
+
+ tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
+
+ /* The only sg element can be unaligned, use our bounce buffer then */
+ if (!aligned) {
+ unsigned long flags;
+ void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
+ sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+ memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
+ tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
+ host->sg_ptr = &host->bounce_sg;
+ sg = host->sg_ptr;
+ }
+
+ ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
+ if (ret > 0)
+ desc = dmaengine_prep_slave_sg(chan, sg, ret,
+ DMA_MEM_TO_DEV, DMA_CTRL_ACK);
+
+ if (desc) {
+ cookie = dmaengine_submit(desc);
+ if (cookie < 0) {
+ desc = NULL;
+ ret = cookie;
+ }
+ }
+ dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
+ __func__, host->sg_len, ret, cookie, host->mrq);
+
+pio:
+ if (!desc) {
+ /* DMA failed, fall back to PIO */
+ tmio_mmc_enable_dma(host, false);
+ if (ret >= 0)
+ ret = -EIO;
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ /* Free the Rx channel too */
+ chan = host->chan_rx;
+ if (chan) {
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ }
+ dev_warn(&host->pdev->dev,
+ "DMA failed: %d, falling back to PIO\n", ret);
+ }
+
+ dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
+ desc, cookie);
+}
+
+void tmio_mmc_start_dma(struct tmio_mmc_host *host,
+ struct mmc_data *data)
+{
+ if (data->flags & MMC_DATA_READ) {
+ if (host->chan_rx)
+ tmio_mmc_start_dma_rx(host);
+ } else {
+ if (host->chan_tx)
+ tmio_mmc_start_dma_tx(host);
+ }
+}
+
+static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
+{
+ struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
+ struct dma_chan *chan = NULL;
+
+ spin_lock_irq(&host->lock);
+
+ if (host && host->data) {
+ if (host->data->flags & MMC_DATA_READ)
+ chan = host->chan_rx;
+ else
+ chan = host->chan_tx;
+ }
+
+ spin_unlock_irq(&host->lock);
+
+ tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
+
+ if (chan)
+ dma_async_issue_pending(chan);
+}
+
+static void tmio_mmc_tasklet_fn(unsigned long arg)
+{
+ struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+
+ spin_lock_irq(&host->lock);
+
+ if (!host->data)
+ goto out;
+
+ if (host->data->flags & MMC_DATA_READ)
+ dma_unmap_sg(host->chan_rx->device->dev,
+ host->sg_ptr, host->sg_len,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_sg(host->chan_tx->device->dev,
+ host->sg_ptr, host->sg_len,
+ DMA_TO_DEVICE);
+
+ tmio_mmc_do_data_irq(host);
+out:
+ spin_unlock_irq(&host->lock);
+}
+
+void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
+{
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ if (!host->dma || (!host->pdev->dev.of_node &&
+ (!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
+ return;
+
+ if (!host->chan_tx && !host->chan_rx) {
+ struct resource *res = platform_get_resource(host->pdev,
+ IORESOURCE_MEM, 0);
+ struct dma_slave_config cfg = {};
+ dma_cap_mask_t mask;
+ int ret;
+
+ if (!res)
+ return;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->chan_tx = dma_request_slave_channel_compat(mask,
+ host->dma->filter, pdata->chan_priv_tx,
+ &host->pdev->dev, "tx");
+ dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
+ host->chan_tx);
+
+ if (!host->chan_tx)
+ return;
+
+ cfg.direction = DMA_MEM_TO_DEV;
+ cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift);
+ cfg.dst_addr_width = host->dma->dma_buswidth;
+ if (!cfg.dst_addr_width)
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.src_addr = 0;
+ ret = dmaengine_slave_config(host->chan_tx, &cfg);
+ if (ret < 0)
+ goto ecfgtx;
+
+ host->chan_rx = dma_request_slave_channel_compat(mask,
+ host->dma->filter, pdata->chan_priv_rx,
+ &host->pdev->dev, "rx");
+ dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
+ host->chan_rx);
+
+ if (!host->chan_rx)
+ goto ereqrx;
+
+ cfg.direction = DMA_DEV_TO_MEM;
+ cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
+ cfg.src_addr_width = host->dma->dma_buswidth;
+ if (!cfg.src_addr_width)
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.dst_addr = 0;
+ ret = dmaengine_slave_config(host->chan_rx, &cfg);
+ if (ret < 0)
+ goto ecfgrx;
+
+ host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!host->bounce_buf)
+ goto ebouncebuf;
+
+ tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
+ tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
+ }
+
+ tmio_mmc_enable_dma(host, true);
+
+ return;
+
+ebouncebuf:
+ecfgrx:
+ dma_release_channel(host->chan_rx);
+ host->chan_rx = NULL;
+ereqrx:
+ecfgtx:
+ dma_release_channel(host->chan_tx);
+ host->chan_tx = NULL;
+}
+
+void tmio_mmc_release_dma(struct tmio_mmc_host *host)
+{
+ if (host->chan_tx) {
+ struct dma_chan *chan = host->chan_tx;
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ }
+ if (host->chan_rx) {
+ struct dma_chan *chan = host->chan_rx;
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ }
+ if (host->bounce_buf) {
+ free_pages((unsigned long)host->bounce_buf, 0);
+ host->bounce_buf = NULL;
+ }
+}
diff --git a/kernel/drivers/mmc/host/tmio_mmc_pio.c b/kernel/drivers/mmc/host/tmio_mmc_pio.c
new file mode 100644
index 000000000..dba7e1c19
--- /dev/null
+++ b/kernel/drivers/mmc/host/tmio_mmc_pio.c
@@ -0,0 +1,1276 @@
+/*
+ * linux/drivers/mmc/host/tmio_mmc_pio.c
+ *
+ * Copyright (C) 2011 Guennadi Liakhovetski
+ * Copyright (C) 2007 Ian Molton
+ * Copyright (C) 2004 Ian Molton
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver for the MMC / SD / SDIO IP found in:
+ *
+ * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
+ *
+ * This driver draws mainly on scattered spec sheets, Reverse engineering
+ * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
+ * support). (Further 4 bit support from a later datasheet).
+ *
+ * TODO:
+ * Investigate using a workqueue for PIO transfers
+ * Eliminate FIXMEs
+ * SDIO support
+ * Better Power management
+ * Handle MMC errors better
+ * double buffer support
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/mmc/tmio.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mmc/sdio.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "tmio_mmc.h"
+
+void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
+{
+ host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
+ sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
+}
+
+void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
+{
+ host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
+ sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
+}
+
+static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
+{
+ sd_ctrl_write32(host, CTL_STATUS, ~i);
+}
+
+static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
+{
+ host->sg_len = data->sg_len;
+ host->sg_ptr = data->sg;
+ host->sg_orig = data->sg;
+ host->sg_off = 0;
+}
+
+static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
+{
+ host->sg_ptr = sg_next(host->sg_ptr);
+ host->sg_off = 0;
+ return --host->sg_len;
+}
+
+#ifdef CONFIG_MMC_DEBUG
+
+#define STATUS_TO_TEXT(a, status, i) \
+ do { \
+ if (status & TMIO_STAT_##a) { \
+ if (i++) \
+ printk(" | "); \
+ printk(#a); \
+ } \
+ } while (0)
+
+static void pr_debug_status(u32 status)
+{
+ int i = 0;
+ pr_debug("status: %08x = ", status);
+ STATUS_TO_TEXT(CARD_REMOVE, status, i);
+ STATUS_TO_TEXT(CARD_INSERT, status, i);
+ STATUS_TO_TEXT(SIGSTATE, status, i);
+ STATUS_TO_TEXT(WRPROTECT, status, i);
+ STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
+ STATUS_TO_TEXT(CARD_INSERT_A, status, i);
+ STATUS_TO_TEXT(SIGSTATE_A, status, i);
+ STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
+ STATUS_TO_TEXT(STOPBIT_ERR, status, i);
+ STATUS_TO_TEXT(ILL_FUNC, status, i);
+ STATUS_TO_TEXT(CMD_BUSY, status, i);
+ STATUS_TO_TEXT(CMDRESPEND, status, i);
+ STATUS_TO_TEXT(DATAEND, status, i);
+ STATUS_TO_TEXT(CRCFAIL, status, i);
+ STATUS_TO_TEXT(DATATIMEOUT, status, i);
+ STATUS_TO_TEXT(CMDTIMEOUT, status, i);
+ STATUS_TO_TEXT(RXOVERFLOW, status, i);
+ STATUS_TO_TEXT(TXUNDERRUN, status, i);
+ STATUS_TO_TEXT(RXRDY, status, i);
+ STATUS_TO_TEXT(TXRQ, status, i);
+ STATUS_TO_TEXT(ILL_ACCESS, status, i);
+ printk("\n");
+}
+
+#else
+#define pr_debug_status(s) do { } while (0)
+#endif
+
+static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ if (enable && !host->sdio_irq_enabled) {
+ /* Keep device active while SDIO irq is enabled */
+ pm_runtime_get_sync(mmc_dev(mmc));
+ host->sdio_irq_enabled = true;
+
+ host->sdio_irq_mask = TMIO_SDIO_MASK_ALL &
+ ~TMIO_SDIO_STAT_IOIRQ;
+ sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
+ sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
+ } else if (!enable && host->sdio_irq_enabled) {
+ host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
+ sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
+ sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
+
+ host->sdio_irq_enabled = false;
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
+ }
+}
+
+static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
+ unsigned int new_clock)
+{
+ u32 clk = 0, clock;
+
+ if (new_clock) {
+ for (clock = host->mmc->f_min, clk = 0x80000080;
+ new_clock >= (clock<<1); clk >>= 1)
+ clock <<= 1;
+
+ /* 1/1 clock is option */
+ if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) &&
+ ((clk >> 22) & 0x1))
+ clk |= 0xff;
+ }
+
+ if (host->set_clk_div)
+ host->set_clk_div(host->pdev, (clk>>22) & 1);
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
+ msleep(10);
+}
+
+static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
+{
+ /* implicit BUG_ON(!res) */
+ if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
+ sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
+ msleep(10);
+ }
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+ msleep(10);
+}
+
+static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
+{
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+ msleep(10);
+
+ /* implicit BUG_ON(!res) */
+ if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
+ sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
+ msleep(10);
+ }
+}
+
+static void tmio_mmc_reset(struct tmio_mmc_host *host)
+{
+ /* FIXME - should we set stop clock reg here */
+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
+ /* implicit BUG_ON(!res) */
+ if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
+ sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
+ msleep(10);
+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
+ if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
+ sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
+ msleep(10);
+}
+
+static void tmio_mmc_reset_work(struct work_struct *work)
+{
+ struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
+ delayed_reset_work.work);
+ struct mmc_request *mrq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ mrq = host->mrq;
+
+ /*
+ * is request already finished? Since we use a non-blocking
+ * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
+ * us, so, have to check for IS_ERR(host->mrq)
+ */
+ if (IS_ERR_OR_NULL(mrq)
+ || time_is_after_jiffies(host->last_req_ts +
+ msecs_to_jiffies(2000))) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ dev_warn(&host->pdev->dev,
+ "timeout waiting for hardware interrupt (CMD%u)\n",
+ mrq->cmd->opcode);
+
+ if (host->data)
+ host->data->error = -ETIMEDOUT;
+ else if (host->cmd)
+ host->cmd->error = -ETIMEDOUT;
+ else
+ mrq->cmd->error = -ETIMEDOUT;
+
+ host->cmd = NULL;
+ host->data = NULL;
+ host->force_pio = false;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ tmio_mmc_reset(host);
+
+ /* Ready for new calls */
+ host->mrq = NULL;
+
+ tmio_mmc_abort_dma(host);
+ mmc_request_done(host->mmc, mrq);
+
+ pm_runtime_mark_last_busy(mmc_dev(host->mmc));
+ pm_runtime_put_autosuspend(mmc_dev(host->mmc));
+}
+
+/* called with host->lock held, interrupts disabled */
+static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
+{
+ struct mmc_request *mrq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ mrq = host->mrq;
+ if (IS_ERR_OR_NULL(mrq)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ host->cmd = NULL;
+ host->data = NULL;
+ host->force_pio = false;
+
+ cancel_delayed_work(&host->delayed_reset_work);
+
+ host->mrq = NULL;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (mrq->cmd->error || (mrq->data && mrq->data->error))
+ tmio_mmc_abort_dma(host);
+
+ mmc_request_done(host->mmc, mrq);
+
+ pm_runtime_mark_last_busy(mmc_dev(host->mmc));
+ pm_runtime_put_autosuspend(mmc_dev(host->mmc));
+}
+
+static void tmio_mmc_done_work(struct work_struct *work)
+{
+ struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
+ done);
+ tmio_mmc_finish_request(host);
+}
+
+/* These are the bitmasks the tmio chip requires to implement the MMC response
+ * types. Note that R1 and R6 are the same in this scheme. */
+#define APP_CMD 0x0040
+#define RESP_NONE 0x0300
+#define RESP_R1 0x0400
+#define RESP_R1B 0x0500
+#define RESP_R2 0x0600
+#define RESP_R3 0x0700
+#define DATA_PRESENT 0x0800
+#define TRANSFER_READ 0x1000
+#define TRANSFER_MULTI 0x2000
+#define SECURITY_CMD 0x4000
+#define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
+
+static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
+{
+ struct mmc_data *data = host->data;
+ int c = cmd->opcode;
+ u32 irq_mask = TMIO_MASK_CMD;
+
+ /* CMD12 is handled by hardware */
+ if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) {
+ sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
+ return 0;
+ }
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE: c |= RESP_NONE; break;
+ case MMC_RSP_R1: c |= RESP_R1; break;
+ case MMC_RSP_R1B: c |= RESP_R1B; break;
+ case MMC_RSP_R2: c |= RESP_R2; break;
+ case MMC_RSP_R3: c |= RESP_R3; break;
+ default:
+ pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
+ return -EINVAL;
+ }
+
+ host->cmd = cmd;
+
+/* FIXME - this seems to be ok commented out but the spec suggest this bit
+ * should be set when issuing app commands.
+ * if(cmd->flags & MMC_FLAG_ACMD)
+ * c |= APP_CMD;
+ */
+ if (data) {
+ c |= DATA_PRESENT;
+ if (data->blocks > 1) {
+ sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
+ c |= TRANSFER_MULTI;
+
+ /*
+ * Disable auto CMD12 at IO_RW_EXTENDED when
+ * multiple block transfer
+ */
+ if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
+ (cmd->opcode == SD_IO_RW_EXTENDED))
+ c |= NO_CMD12_ISSUE;
+ }
+ if (data->flags & MMC_DATA_READ)
+ c |= TRANSFER_READ;
+ }
+
+ if (!host->native_hotplug)
+ irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
+ tmio_mmc_enable_mmc_irqs(host, irq_mask);
+
+ /* Fire off the command */
+ sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
+ sd_ctrl_write16(host, CTL_SD_CMD, c);
+
+ return 0;
+}
+
+static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
+ unsigned short *buf,
+ unsigned int count)
+{
+ int is_read = host->data->flags & MMC_DATA_READ;
+ u8 *buf8;
+
+ /*
+ * Transfer the data
+ */
+ if (is_read)
+ sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
+ else
+ sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
+
+ /* if count was even number */
+ if (!(count & 0x1))
+ return;
+
+ /* if count was odd number */
+ buf8 = (u8 *)(buf + (count >> 1));
+
+ /*
+ * FIXME
+ *
+ * driver and this function are assuming that
+ * it is used as little endian
+ */
+ if (is_read)
+ *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff;
+ else
+ sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8);
+}
+
+/*
+ * This chip always returns (at least?) as much data as you ask for.
+ * I'm unsure what happens if you ask for less than a block. This should be
+ * looked into to ensure that a funny length read doesn't hose the controller.
+ */
+static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
+{
+ struct mmc_data *data = host->data;
+ void *sg_virt;
+ unsigned short *buf;
+ unsigned int count;
+ unsigned long flags;
+
+ if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
+ pr_err("PIO IRQ in DMA mode!\n");
+ return;
+ } else if (!data) {
+ pr_debug("Spurious PIO IRQ\n");
+ return;
+ }
+
+ sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
+ buf = (unsigned short *)(sg_virt + host->sg_off);
+
+ count = host->sg_ptr->length - host->sg_off;
+ if (count > data->blksz)
+ count = data->blksz;
+
+ pr_debug("count: %08x offset: %08x flags %08x\n",
+ count, host->sg_off, data->flags);
+
+ /* Transfer the data */
+ tmio_mmc_transfer_data(host, buf, count);
+
+ host->sg_off += count;
+
+ tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
+
+ if (host->sg_off == host->sg_ptr->length)
+ tmio_mmc_next_sg(host);
+
+ return;
+}
+
+static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
+{
+ if (host->sg_ptr == &host->bounce_sg) {
+ unsigned long flags;
+ void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
+ memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
+ tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
+ }
+}
+
+/* needs to be called with host->lock held */
+void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
+{
+ struct mmc_data *data = host->data;
+ struct mmc_command *stop;
+
+ host->data = NULL;
+
+ if (!data) {
+ dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
+ return;
+ }
+ stop = data->stop;
+
+ /* FIXME - return correct transfer count on errors */
+ if (!data->error)
+ data->bytes_xfered = data->blocks * data->blksz;
+ else
+ data->bytes_xfered = 0;
+
+ pr_debug("Completed data request\n");
+
+ /*
+ * FIXME: other drivers allow an optional stop command of any given type
+ * which we dont do, as the chip can auto generate them.
+ * Perhaps we can be smarter about when to use auto CMD12 and
+ * only issue the auto request when we know this is the desired
+ * stop command, allowing fallback to the stop command the
+ * upper layers expect. For now, we do what works.
+ */
+
+ if (data->flags & MMC_DATA_READ) {
+ if (host->chan_rx && !host->force_pio)
+ tmio_mmc_check_bounce_buffer(host);
+ dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
+ host->mrq);
+ } else {
+ dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
+ host->mrq);
+ }
+
+ if (stop) {
+ if (stop->opcode == MMC_STOP_TRANSMISSION && !stop->arg)
+ sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
+ else
+ BUG();
+ }
+
+ schedule_work(&host->done);
+}
+
+static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
+{
+ struct mmc_data *data;
+ spin_lock(&host->lock);
+ data = host->data;
+
+ if (!data)
+ goto out;
+
+ if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
+ u32 status = sd_ctrl_read32(host, CTL_STATUS);
+ bool done = false;
+
+ /*
+ * Has all data been written out yet? Testing on SuperH showed,
+ * that in most cases the first interrupt comes already with the
+ * BUSY status bit clear, but on some operations, like mount or
+ * in the beginning of a write / sync / umount, there is one
+ * DATAEND interrupt with the BUSY bit set, in this cases
+ * waiting for one more interrupt fixes the problem.
+ */
+ if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
+ if (status & TMIO_STAT_ILL_FUNC)
+ done = true;
+ } else {
+ if (!(status & TMIO_STAT_CMD_BUSY))
+ done = true;
+ }
+
+ if (done) {
+ tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
+ tasklet_schedule(&host->dma_complete);
+ }
+ } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
+ tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
+ tasklet_schedule(&host->dma_complete);
+ } else {
+ tmio_mmc_do_data_irq(host);
+ tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
+ }
+out:
+ spin_unlock(&host->lock);
+}
+
+static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
+ unsigned int stat)
+{
+ struct mmc_command *cmd = host->cmd;
+ int i, addr;
+
+ spin_lock(&host->lock);
+
+ if (!host->cmd) {
+ pr_debug("Spurious CMD irq\n");
+ goto out;
+ }
+
+ host->cmd = NULL;
+
+ /* This controller is sicker than the PXA one. Not only do we need to
+ * drop the top 8 bits of the first response word, we also need to
+ * modify the order of the response for short response command types.
+ */
+
+ for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
+ cmd->resp[i] = sd_ctrl_read32(host, addr);
+
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
+ cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
+ cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
+ cmd->resp[3] <<= 8;
+ } else if (cmd->flags & MMC_RSP_R3) {
+ cmd->resp[0] = cmd->resp[3];
+ }
+
+ if (stat & TMIO_STAT_CMDTIMEOUT)
+ cmd->error = -ETIMEDOUT;
+ else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
+ cmd->error = -EILSEQ;
+
+ /* If there is data to handle we enable data IRQs here, and
+ * we will ultimatley finish the request in the data_end handler.
+ * If theres no data or we encountered an error, finish now.
+ */
+ if (host->data && !cmd->error) {
+ if (host->data->flags & MMC_DATA_READ) {
+ if (host->force_pio || !host->chan_rx)
+ tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
+ else
+ tasklet_schedule(&host->dma_issue);
+ } else {
+ if (host->force_pio || !host->chan_tx)
+ tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
+ else
+ tasklet_schedule(&host->dma_issue);
+ }
+ } else {
+ schedule_work(&host->done);
+ }
+
+out:
+ spin_unlock(&host->lock);
+}
+
+static void tmio_mmc_card_irq_status(struct tmio_mmc_host *host,
+ int *ireg, int *status)
+{
+ *status = sd_ctrl_read32(host, CTL_STATUS);
+ *ireg = *status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
+
+ pr_debug_status(*status);
+ pr_debug_status(*ireg);
+
+ /* Clear the status except the interrupt status */
+ sd_ctrl_write32(host, CTL_STATUS, TMIO_MASK_IRQ);
+}
+
+static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
+ int ireg, int status)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ /* Card insert / remove attempts */
+ if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
+ tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
+ TMIO_STAT_CARD_REMOVE);
+ if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
+ ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
+ !work_pending(&mmc->detect.work))
+ mmc_detect_change(host->mmc, msecs_to_jiffies(100));
+ return true;
+ }
+
+ return false;
+}
+
+irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid)
+{
+ unsigned int ireg, status;
+ struct tmio_mmc_host *host = devid;
+
+ tmio_mmc_card_irq_status(host, &ireg, &status);
+ __tmio_mmc_card_detect_irq(host, ireg, status);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(tmio_mmc_card_detect_irq);
+
+static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
+ int ireg, int status)
+{
+ /* Command completion */
+ if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
+ tmio_mmc_ack_mmc_irqs(host,
+ TMIO_STAT_CMDRESPEND |
+ TMIO_STAT_CMDTIMEOUT);
+ tmio_mmc_cmd_irq(host, status);
+ return true;
+ }
+
+ /* Data transfer */
+ if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
+ tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
+ tmio_mmc_pio_irq(host);
+ return true;
+ }
+
+ /* Data transfer completion */
+ if (ireg & TMIO_STAT_DATAEND) {
+ tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
+ tmio_mmc_data_irq(host);
+ return true;
+ }
+
+ return false;
+}
+
+irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid)
+{
+ unsigned int ireg, status;
+ struct tmio_mmc_host *host = devid;
+
+ tmio_mmc_card_irq_status(host, &ireg, &status);
+ __tmio_mmc_sdcard_irq(host, ireg, status);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(tmio_mmc_sdcard_irq);
+
+irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
+{
+ struct tmio_mmc_host *host = devid;
+ struct mmc_host *mmc = host->mmc;
+ struct tmio_mmc_data *pdata = host->pdata;
+ unsigned int ireg, status;
+ unsigned int sdio_status;
+
+ if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
+ return IRQ_HANDLED;
+
+ status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
+ ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
+
+ sdio_status = status & ~TMIO_SDIO_MASK_ALL;
+ if (pdata->flags & TMIO_MMC_SDIO_STATUS_QUIRK)
+ sdio_status |= 6;
+
+ sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
+
+ if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
+ mmc_signal_sdio_irq(mmc);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(tmio_mmc_sdio_irq);
+
+irqreturn_t tmio_mmc_irq(int irq, void *devid)
+{
+ struct tmio_mmc_host *host = devid;
+ unsigned int ireg, status;
+
+ pr_debug("MMC IRQ begin\n");
+
+ tmio_mmc_card_irq_status(host, &ireg, &status);
+ if (__tmio_mmc_card_detect_irq(host, ireg, status))
+ return IRQ_HANDLED;
+ if (__tmio_mmc_sdcard_irq(host, ireg, status))
+ return IRQ_HANDLED;
+
+ tmio_mmc_sdio_irq(irq, devid);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(tmio_mmc_irq);
+
+static int tmio_mmc_start_data(struct tmio_mmc_host *host,
+ struct mmc_data *data)
+{
+ struct tmio_mmc_data *pdata = host->pdata;
+
+ pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
+ data->blksz, data->blocks);
+
+ /* Some hardware cannot perform 2 byte requests in 4 bit mode */
+ if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
+ int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
+
+ if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
+ pr_err("%s: %d byte block unsupported in 4 bit mode\n",
+ mmc_hostname(host->mmc), data->blksz);
+ return -EINVAL;
+ }
+ }
+
+ tmio_mmc_init_sg(host, data);
+ host->data = data;
+
+ /* Set transfer length / blocksize */
+ sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
+ sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
+
+ tmio_mmc_start_dma(host, data);
+
+ return 0;
+}
+
+/* Process requests from the MMC layer */
+static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->mrq) {
+ pr_debug("request not null\n");
+ if (IS_ERR(host->mrq)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ mrq->cmd->error = -EAGAIN;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ }
+
+ host->last_req_ts = jiffies;
+ wmb();
+ host->mrq = mrq;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ pm_runtime_get_sync(mmc_dev(mmc));
+
+ if (mrq->data) {
+ ret = tmio_mmc_start_data(host, mrq->data);
+ if (ret)
+ goto fail;
+ }
+
+ ret = tmio_mmc_start_command(host, mrq->cmd);
+ if (!ret) {
+ schedule_delayed_work(&host->delayed_reset_work,
+ msecs_to_jiffies(2000));
+ return;
+ }
+
+fail:
+ host->force_pio = false;
+ host->mrq = NULL;
+ mrq->cmd->error = ret;
+ mmc_request_done(mmc, mrq);
+
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
+}
+
+static int tmio_mmc_clk_update(struct tmio_mmc_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ int ret;
+
+ if (!host->clk_enable)
+ return -ENOTSUPP;
+
+ ret = host->clk_enable(host->pdev, &mmc->f_max);
+ if (!ret)
+ mmc->f_min = mmc->f_max / 512;
+
+ return ret;
+}
+
+static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+ int ret = 0;
+
+ /* .set_ios() is returning void, so, no chance to report an error */
+
+ if (host->set_pwr)
+ host->set_pwr(host->pdev, 1);
+
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ /*
+ * Attention: empiric value. With a b43 WiFi SDIO card this
+ * delay proved necessary for reliable card-insertion probing.
+ * 100us were not enough. Is this the same 140us delay, as in
+ * tmio_mmc_set_ios()?
+ */
+ udelay(200);
+ }
+ /*
+ * It seems, VccQ should be switched on after Vcc, this is also what the
+ * omap_hsmmc.c driver does.
+ */
+ if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
+ ret = regulator_enable(mmc->supply.vqmmc);
+ udelay(200);
+ }
+
+ if (ret < 0)
+ dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
+ ret);
+}
+
+static void tmio_mmc_power_off(struct tmio_mmc_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ if (!IS_ERR(mmc->supply.vqmmc))
+ regulator_disable(mmc->supply.vqmmc);
+
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
+ if (host->set_pwr)
+ host->set_pwr(host->pdev, 0);
+}
+
+static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
+ unsigned char bus_width)
+{
+ switch (bus_width) {
+ case MMC_BUS_WIDTH_1:
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
+ break;
+ case MMC_BUS_WIDTH_4:
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
+ break;
+ }
+}
+
+/* Set MMC clock / power.
+ * Note: This controller uses a simple divider scheme therefore it cannot
+ * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
+ * MMC wont run that fast, it has to be clocked at 12MHz which is the next
+ * slowest setting.
+ */
+static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct device *dev = &host->pdev->dev;
+ unsigned long flags;
+
+ pm_runtime_get_sync(mmc_dev(mmc));
+
+ mutex_lock(&host->ios_lock);
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->mrq) {
+ if (IS_ERR(host->mrq)) {
+ dev_dbg(dev,
+ "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
+ current->comm, task_pid_nr(current),
+ ios->clock, ios->power_mode);
+ host->mrq = ERR_PTR(-EINTR);
+ } else {
+ dev_dbg(dev,
+ "%s.%d: CMD%u active since %lu, now %lu!\n",
+ current->comm, task_pid_nr(current),
+ host->mrq->cmd->opcode, host->last_req_ts, jiffies);
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mutex_unlock(&host->ios_lock);
+ return;
+ }
+
+ host->mrq = ERR_PTR(-EBUSY);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ tmio_mmc_power_off(host);
+ tmio_mmc_clk_stop(host);
+ break;
+ case MMC_POWER_UP:
+ tmio_mmc_set_clock(host, ios->clock);
+ tmio_mmc_power_on(host, ios->vdd);
+ tmio_mmc_clk_start(host);
+ tmio_mmc_set_bus_width(host, ios->bus_width);
+ break;
+ case MMC_POWER_ON:
+ tmio_mmc_set_clock(host, ios->clock);
+ tmio_mmc_clk_start(host);
+ tmio_mmc_set_bus_width(host, ios->bus_width);
+ break;
+ }
+
+ /* Let things settle. delay taken from winCE driver */
+ udelay(140);
+ if (PTR_ERR(host->mrq) == -EINTR)
+ dev_dbg(&host->pdev->dev,
+ "%s.%d: IOS interrupted: clk %u, mode %u",
+ current->comm, task_pid_nr(current),
+ ios->clock, ios->power_mode);
+ host->mrq = NULL;
+
+ host->clk_cache = ios->clock;
+
+ mutex_unlock(&host->ios_lock);
+
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
+}
+
+static int tmio_mmc_get_ro(struct mmc_host *mmc)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_data *pdata = host->pdata;
+ int ret = mmc_gpio_get_ro(mmc);
+ if (ret >= 0)
+ return ret;
+
+ pm_runtime_get_sync(mmc_dev(mmc));
+ ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
+ (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
+
+ return ret;
+}
+
+static int tmio_multi_io_quirk(struct mmc_card *card,
+ unsigned int direction, int blk_size)
+{
+ struct tmio_mmc_host *host = mmc_priv(card->host);
+
+ if (host->multi_io_quirk)
+ return host->multi_io_quirk(card, direction, blk_size);
+
+ return blk_size;
+}
+
+static const struct mmc_host_ops tmio_mmc_ops = {
+ .request = tmio_mmc_request,
+ .set_ios = tmio_mmc_set_ios,
+ .get_ro = tmio_mmc_get_ro,
+ .get_cd = mmc_gpio_get_cd,
+ .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
+ .multi_io_quirk = tmio_multi_io_quirk,
+};
+
+static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
+{
+ struct tmio_mmc_data *pdata = host->pdata;
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_get_supply(mmc);
+
+ /* use ocr_mask if no regulator */
+ if (!mmc->ocr_avail)
+ mmc->ocr_avail = pdata->ocr_mask;
+
+ /*
+ * try again.
+ * There is possibility that regulator has not been probed
+ */
+ if (!mmc->ocr_avail)
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+
+static void tmio_mmc_of_parse(struct platform_device *pdev,
+ struct tmio_mmc_data *pdata)
+{
+ const struct device_node *np = pdev->dev.of_node;
+ if (!np)
+ return;
+
+ if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
+ pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
+}
+
+struct tmio_mmc_host*
+tmio_mmc_host_alloc(struct platform_device *pdev)
+{
+ struct tmio_mmc_host *host;
+ struct mmc_host *mmc;
+
+ mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
+ if (!mmc)
+ return NULL;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->pdev = pdev;
+
+ return host;
+}
+EXPORT_SYMBOL(tmio_mmc_host_alloc);
+
+void tmio_mmc_host_free(struct tmio_mmc_host *host)
+{
+ mmc_free_host(host->mmc);
+}
+EXPORT_SYMBOL(tmio_mmc_host_free);
+
+int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
+ struct tmio_mmc_data *pdata)
+{
+ struct platform_device *pdev = _host->pdev;
+ struct mmc_host *mmc = _host->mmc;
+ struct resource *res_ctl;
+ int ret;
+ u32 irq_mask = TMIO_MASK_CMD;
+
+ tmio_mmc_of_parse(pdev, pdata);
+
+ if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
+ _host->write16_hook = NULL;
+
+ res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res_ctl)
+ return -EINVAL;
+
+ ret = mmc_of_parse(mmc);
+ if (ret < 0)
+ goto host_free;
+
+ _host->pdata = pdata;
+ platform_set_drvdata(pdev, mmc);
+
+ _host->set_pwr = pdata->set_pwr;
+ _host->set_clk_div = pdata->set_clk_div;
+
+ ret = tmio_mmc_init_ocr(_host);
+ if (ret < 0)
+ goto host_free;
+
+ _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
+ if (!_host->ctl) {
+ ret = -ENOMEM;
+ goto host_free;
+ }
+
+ mmc->ops = &tmio_mmc_ops;
+ mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
+ mmc->caps2 |= pdata->capabilities2;
+ mmc->max_segs = 32;
+ mmc->max_blk_size = 512;
+ mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
+ mmc->max_segs;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
+ mmc->caps & MMC_CAP_NEEDS_POLL ||
+ mmc->caps & MMC_CAP_NONREMOVABLE ||
+ mmc->slot.cd_irq >= 0);
+
+ if (tmio_mmc_clk_update(_host) < 0) {
+ mmc->f_max = pdata->hclk;
+ mmc->f_min = mmc->f_max / 512;
+ }
+
+ /*
+ * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
+ * looping forever...
+ */
+ if (mmc->f_min == 0) {
+ ret = -EINVAL;
+ goto host_free;
+ }
+
+ /*
+ * While using internal tmio hardware logic for card detection, we need
+ * to ensure it stays powered for it to work.
+ */
+ if (_host->native_hotplug)
+ pm_runtime_get_noresume(&pdev->dev);
+
+ tmio_mmc_clk_stop(_host);
+ tmio_mmc_reset(_host);
+
+ _host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK);
+ tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
+
+ /* Unmask the IRQs we want to know about */
+ if (!_host->chan_rx)
+ irq_mask |= TMIO_MASK_READOP;
+ if (!_host->chan_tx)
+ irq_mask |= TMIO_MASK_WRITEOP;
+ if (!_host->native_hotplug)
+ irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
+
+ _host->sdcard_irq_mask &= ~irq_mask;
+
+ _host->sdio_irq_enabled = false;
+ if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
+ _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
+ sd_ctrl_write16(_host, CTL_SDIO_IRQ_MASK, _host->sdio_irq_mask);
+ sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0000);
+ }
+
+ spin_lock_init(&_host->lock);
+ mutex_init(&_host->ios_lock);
+
+ /* Init delayed work for request timeouts */
+ INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
+ INIT_WORK(&_host->done, tmio_mmc_done_work);
+
+ /* See if we also get DMA */
+ tmio_mmc_request_dma(_host, pdata);
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = mmc_add_host(mmc);
+ if (ret < 0) {
+ tmio_mmc_host_remove(_host);
+ return ret;
+ }
+
+ dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
+
+ if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
+ ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
+ if (ret < 0) {
+ tmio_mmc_host_remove(_host);
+ return ret;
+ }
+ mmc_gpiod_request_cd_irq(mmc);
+ }
+
+ return 0;
+
+host_free:
+
+ return ret;
+}
+EXPORT_SYMBOL(tmio_mmc_host_probe);
+
+void tmio_mmc_host_remove(struct tmio_mmc_host *host)
+{
+ struct platform_device *pdev = host->pdev;
+ struct mmc_host *mmc = host->mmc;
+
+ if (!host->native_hotplug)
+ pm_runtime_get_sync(&pdev->dev);
+
+ dev_pm_qos_hide_latency_limit(&pdev->dev);
+
+ mmc_remove_host(mmc);
+ cancel_work_sync(&host->done);
+ cancel_delayed_work_sync(&host->delayed_reset_work);
+ tmio_mmc_release_dma(host);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ iounmap(host->ctl);
+}
+EXPORT_SYMBOL(tmio_mmc_host_remove);
+
+#ifdef CONFIG_PM
+int tmio_mmc_host_runtime_suspend(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
+
+ if (host->clk_cache)
+ tmio_mmc_clk_stop(host);
+
+ if (host->clk_disable)
+ host->clk_disable(host->pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
+
+int tmio_mmc_host_runtime_resume(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ tmio_mmc_reset(host);
+ tmio_mmc_clk_update(host);
+
+ if (host->clk_cache) {
+ tmio_mmc_set_clock(host, host->clk_cache);
+ tmio_mmc_clk_start(host);
+ }
+
+ tmio_mmc_enable_dma(host, true);
+
+ return 0;
+}
+EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
+#endif
+
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/toshsd.c b/kernel/drivers/mmc/host/toshsd.c
new file mode 100644
index 000000000..e2cdd5fb1
--- /dev/null
+++ b/kernel/drivers/mmc/host/toshsd.c
@@ -0,0 +1,708 @@
+/*
+ * Toshiba PCI Secure Digital Host Controller Interface driver
+ *
+ * Copyright (C) 2014 Ondrej Zary
+ * Copyright (C) 2007 Richard Betts, All Rights Reserved.
+ *
+ * Based on asic3_mmc.c, copyright (c) 2005 SDG Systems, LLC and,
+ * sdhci.c, copyright (C) 2005-2006 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+
+#include "toshsd.h"
+
+#define DRIVER_NAME "toshsd"
+
+static const struct pci_device_id pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA, 0x0805) },
+ { /* end: all zeroes */ },
+};
+
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static void toshsd_init(struct toshsd_host *host)
+{
+ /* enable clock */
+ pci_write_config_byte(host->pdev, SD_PCICFG_CLKSTOP,
+ SD_PCICFG_CLKSTOP_ENABLE_ALL);
+ pci_write_config_byte(host->pdev, SD_PCICFG_CARDDETECT, 2);
+
+ /* reset */
+ iowrite16(0, host->ioaddr + SD_SOFTWARERESET); /* assert */
+ mdelay(2);
+ iowrite16(1, host->ioaddr + SD_SOFTWARERESET); /* deassert */
+ mdelay(2);
+
+ /* Clear card registers */
+ iowrite16(0, host->ioaddr + SD_CARDCLOCKCTRL);
+ iowrite32(0, host->ioaddr + SD_CARDSTATUS);
+ iowrite32(0, host->ioaddr + SD_ERRORSTATUS0);
+ iowrite16(0, host->ioaddr + SD_STOPINTERNAL);
+
+ /* SDIO clock? */
+ iowrite16(0x100, host->ioaddr + SDIO_BASE + SDIO_CLOCKNWAITCTRL);
+
+ /* enable LED */
+ pci_write_config_byte(host->pdev, SD_PCICFG_SDLED_ENABLE1,
+ SD_PCICFG_LED_ENABLE1_START);
+ pci_write_config_byte(host->pdev, SD_PCICFG_SDLED_ENABLE2,
+ SD_PCICFG_LED_ENABLE2_START);
+
+ /* set interrupt masks */
+ iowrite32(~(u32)(SD_CARD_RESP_END | SD_CARD_RW_END
+ | SD_CARD_CARD_REMOVED_0 | SD_CARD_CARD_INSERTED_0
+ | SD_BUF_READ_ENABLE | SD_BUF_WRITE_ENABLE
+ | SD_BUF_CMD_TIMEOUT),
+ host->ioaddr + SD_INTMASKCARD);
+
+ iowrite16(0x1000, host->ioaddr + SD_TRANSACTIONCTRL);
+}
+
+/* Set MMC clock / power.
+ * Note: This controller uses a simple divider scheme therefore it cannot run
+ * SD/MMC cards at full speed (24/20MHz). HCLK (=33MHz PCI clock?) is too high
+ * and the next slowest is 16MHz (div=2).
+ */
+static void __toshsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct toshsd_host *host = mmc_priv(mmc);
+
+ if (ios->clock) {
+ u16 clk;
+ int div = 1;
+
+ while (ios->clock < HCLK / div)
+ div *= 2;
+
+ clk = div >> 2;
+
+ if (div == 1) { /* disable the divider */
+ pci_write_config_byte(host->pdev, SD_PCICFG_CLKMODE,
+ SD_PCICFG_CLKMODE_DIV_DISABLE);
+ clk |= SD_CARDCLK_DIV_DISABLE;
+ } else
+ pci_write_config_byte(host->pdev, SD_PCICFG_CLKMODE, 0);
+
+ clk |= SD_CARDCLK_ENABLE_CLOCK;
+ iowrite16(clk, host->ioaddr + SD_CARDCLOCKCTRL);
+
+ mdelay(10);
+ } else
+ iowrite16(0, host->ioaddr + SD_CARDCLOCKCTRL);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ pci_write_config_byte(host->pdev, SD_PCICFG_POWER1,
+ SD_PCICFG_PWR1_OFF);
+ mdelay(1);
+ break;
+ case MMC_POWER_UP:
+ break;
+ case MMC_POWER_ON:
+ pci_write_config_byte(host->pdev, SD_PCICFG_POWER1,
+ SD_PCICFG_PWR1_33V);
+ pci_write_config_byte(host->pdev, SD_PCICFG_POWER2,
+ SD_PCICFG_PWR2_AUTO);
+ mdelay(20);
+ break;
+ }
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ iowrite16(SD_CARDOPT_REQUIRED | SD_CARDOPT_DATA_RESP_TIMEOUT(14)
+ | SD_CARDOPT_C2_MODULE_ABSENT
+ | SD_CARDOPT_DATA_XFR_WIDTH_1,
+ host->ioaddr + SD_CARDOPTIONSETUP);
+ break;
+ case MMC_BUS_WIDTH_4:
+ iowrite16(SD_CARDOPT_REQUIRED | SD_CARDOPT_DATA_RESP_TIMEOUT(14)
+ | SD_CARDOPT_C2_MODULE_ABSENT
+ | SD_CARDOPT_DATA_XFR_WIDTH_4,
+ host->ioaddr + SD_CARDOPTIONSETUP);
+ break;
+ }
+}
+
+static void toshsd_set_led(struct toshsd_host *host, unsigned char state)
+{
+ iowrite16(state, host->ioaddr + SDIO_BASE + SDIO_LEDCTRL);
+}
+
+static void toshsd_finish_request(struct toshsd_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+
+ /* Write something to end the command */
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+
+ toshsd_set_led(host, 0);
+ mmc_request_done(host->mmc, mrq);
+}
+
+static irqreturn_t toshsd_thread_irq(int irq, void *dev_id)
+{
+ struct toshsd_host *host = dev_id;
+ struct mmc_data *data = host->data;
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ unsigned short *buf;
+ int count;
+ unsigned long flags;
+
+ if (!data) {
+ dev_warn(&host->pdev->dev, "Spurious Data IRQ\n");
+ if (host->cmd) {
+ host->cmd->error = -EIO;
+ toshsd_finish_request(host);
+ }
+ return IRQ_NONE;
+ }
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (!sg_miter_next(sg_miter))
+ goto done;
+
+ buf = sg_miter->addr;
+
+ /* Ensure we dont read more than one block. The chip will interrupt us
+ * When the next block is available.
+ */
+ count = sg_miter->length;
+ if (count > data->blksz)
+ count = data->blksz;
+
+ dev_dbg(&host->pdev->dev, "count: %08x, flags %08x\n", count,
+ data->flags);
+
+ /* Transfer the data */
+ if (data->flags & MMC_DATA_READ)
+ ioread32_rep(host->ioaddr + SD_DATAPORT, buf, count >> 2);
+ else
+ iowrite32_rep(host->ioaddr + SD_DATAPORT, buf, count >> 2);
+
+ sg_miter->consumed = count;
+ sg_miter_stop(sg_miter);
+
+done:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void toshsd_cmd_irq(struct toshsd_host *host)
+{
+ struct mmc_command *cmd = host->cmd;
+ u8 *buf;
+ u16 data;
+
+ if (!host->cmd) {
+ dev_warn(&host->pdev->dev, "Spurious CMD irq\n");
+ return;
+ }
+ buf = (u8 *)cmd->resp;
+ host->cmd = NULL;
+
+ if (cmd->flags & MMC_RSP_PRESENT && cmd->flags & MMC_RSP_136) {
+ /* R2 */
+ buf[12] = 0xff;
+ data = ioread16(host->ioaddr + SD_RESPONSE0);
+ buf[13] = data & 0xff;
+ buf[14] = data >> 8;
+ data = ioread16(host->ioaddr + SD_RESPONSE1);
+ buf[15] = data & 0xff;
+ buf[8] = data >> 8;
+ data = ioread16(host->ioaddr + SD_RESPONSE2);
+ buf[9] = data & 0xff;
+ buf[10] = data >> 8;
+ data = ioread16(host->ioaddr + SD_RESPONSE3);
+ buf[11] = data & 0xff;
+ buf[4] = data >> 8;
+ data = ioread16(host->ioaddr + SD_RESPONSE4);
+ buf[5] = data & 0xff;
+ buf[6] = data >> 8;
+ data = ioread16(host->ioaddr + SD_RESPONSE5);
+ buf[7] = data & 0xff;
+ buf[0] = data >> 8;
+ data = ioread16(host->ioaddr + SD_RESPONSE6);
+ buf[1] = data & 0xff;
+ buf[2] = data >> 8;
+ data = ioread16(host->ioaddr + SD_RESPONSE7);
+ buf[3] = data & 0xff;
+ } else if (cmd->flags & MMC_RSP_PRESENT) {
+ /* R1, R1B, R3, R6, R7 */
+ data = ioread16(host->ioaddr + SD_RESPONSE0);
+ buf[0] = data & 0xff;
+ buf[1] = data >> 8;
+ data = ioread16(host->ioaddr + SD_RESPONSE1);
+ buf[2] = data & 0xff;
+ buf[3] = data >> 8;
+ }
+
+ dev_dbg(&host->pdev->dev, "Command IRQ complete %d %d %x\n",
+ cmd->opcode, cmd->error, cmd->flags);
+
+ /* If there is data to handle we will
+ * finish the request in the mmc_data_end_irq handler.*/
+ if (host->data)
+ return;
+
+ toshsd_finish_request(host);
+}
+
+static void toshsd_data_end_irq(struct toshsd_host *host)
+{
+ struct mmc_data *data = host->data;
+
+ host->data = NULL;
+
+ if (!data) {
+ dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
+ return;
+ }
+
+ if (data->error == 0)
+ data->bytes_xfered = data->blocks * data->blksz;
+ else
+ data->bytes_xfered = 0;
+
+ dev_dbg(&host->pdev->dev, "Completed data request xfr=%d\n",
+ data->bytes_xfered);
+
+ iowrite16(0, host->ioaddr + SD_STOPINTERNAL);
+
+ toshsd_finish_request(host);
+}
+
+static irqreturn_t toshsd_irq(int irq, void *dev_id)
+{
+ struct toshsd_host *host = dev_id;
+ u32 int_reg, int_mask, int_status, detail;
+ int error = 0, ret = IRQ_HANDLED;
+
+ spin_lock(&host->lock);
+ int_status = ioread32(host->ioaddr + SD_CARDSTATUS);
+ int_mask = ioread32(host->ioaddr + SD_INTMASKCARD);
+ int_reg = int_status & ~int_mask & ~IRQ_DONT_CARE_BITS;
+
+ dev_dbg(&host->pdev->dev, "IRQ status:%x mask:%x\n",
+ int_status, int_mask);
+
+ /* nothing to do: it's not our IRQ */
+ if (!int_reg) {
+ ret = IRQ_NONE;
+ goto irq_end;
+ }
+
+ if (int_reg & SD_BUF_CMD_TIMEOUT) {
+ error = -ETIMEDOUT;
+ dev_dbg(&host->pdev->dev, "Timeout\n");
+ } else if (int_reg & SD_BUF_CRC_ERR) {
+ error = -EILSEQ;
+ dev_err(&host->pdev->dev, "BadCRC\n");
+ } else if (int_reg & (SD_BUF_ILLEGAL_ACCESS
+ | SD_BUF_CMD_INDEX_ERR
+ | SD_BUF_STOP_BIT_END_ERR
+ | SD_BUF_OVERFLOW
+ | SD_BUF_UNDERFLOW
+ | SD_BUF_DATA_TIMEOUT)) {
+ dev_err(&host->pdev->dev, "Buffer status error: { %s%s%s%s%s%s}\n",
+ int_reg & SD_BUF_ILLEGAL_ACCESS ? "ILLEGAL_ACC " : "",
+ int_reg & SD_BUF_CMD_INDEX_ERR ? "CMD_INDEX " : "",
+ int_reg & SD_BUF_STOP_BIT_END_ERR ? "STOPBIT_END " : "",
+ int_reg & SD_BUF_OVERFLOW ? "OVERFLOW " : "",
+ int_reg & SD_BUF_UNDERFLOW ? "UNDERFLOW " : "",
+ int_reg & SD_BUF_DATA_TIMEOUT ? "DATA_TIMEOUT " : "");
+
+ detail = ioread32(host->ioaddr + SD_ERRORSTATUS0);
+ dev_err(&host->pdev->dev, "detail error status { %s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
+ detail & SD_ERR0_RESP_CMD_ERR ? "RESP_CMD " : "",
+ detail & SD_ERR0_RESP_NON_CMD12_END_BIT_ERR ? "RESP_END_BIT " : "",
+ detail & SD_ERR0_RESP_CMD12_END_BIT_ERR ? "RESP_END_BIT " : "",
+ detail & SD_ERR0_READ_DATA_END_BIT_ERR ? "READ_DATA_END_BIT " : "",
+ detail & SD_ERR0_WRITE_CRC_STATUS_END_BIT_ERR ? "WRITE_CMD_END_BIT " : "",
+ detail & SD_ERR0_RESP_NON_CMD12_CRC_ERR ? "RESP_CRC " : "",
+ detail & SD_ERR0_RESP_CMD12_CRC_ERR ? "RESP_CRC " : "",
+ detail & SD_ERR0_READ_DATA_CRC_ERR ? "READ_DATA_CRC " : "",
+ detail & SD_ERR0_WRITE_CMD_CRC_ERR ? "WRITE_CMD_CRC " : "",
+ detail & SD_ERR1_NO_CMD_RESP ? "NO_CMD_RESP " : "",
+ detail & SD_ERR1_TIMEOUT_READ_DATA ? "READ_DATA_TIMEOUT " : "",
+ detail & SD_ERR1_TIMEOUT_CRS_STATUS ? "CRS_STATUS_TIMEOUT " : "",
+ detail & SD_ERR1_TIMEOUT_CRC_BUSY ? "CRC_BUSY_TIMEOUT " : "");
+ error = -EIO;
+ }
+
+ if (error) {
+ if (host->cmd)
+ host->cmd->error = error;
+
+ if (error == -ETIMEDOUT) {
+ iowrite32(int_status &
+ ~(SD_BUF_CMD_TIMEOUT | SD_CARD_RESP_END),
+ host->ioaddr + SD_CARDSTATUS);
+ } else {
+ toshsd_init(host);
+ __toshsd_set_ios(host->mmc, &host->mmc->ios);
+ goto irq_end;
+ }
+ }
+
+ /* Card insert/remove. The mmc controlling code is stateless. */
+ if (int_reg & (SD_CARD_CARD_INSERTED_0 | SD_CARD_CARD_REMOVED_0)) {
+ iowrite32(int_status &
+ ~(SD_CARD_CARD_REMOVED_0 | SD_CARD_CARD_INSERTED_0),
+ host->ioaddr + SD_CARDSTATUS);
+
+ if (int_reg & SD_CARD_CARD_INSERTED_0)
+ toshsd_init(host);
+
+ mmc_detect_change(host->mmc, 1);
+ }
+
+ /* Data transfer */
+ if (int_reg & (SD_BUF_READ_ENABLE | SD_BUF_WRITE_ENABLE)) {
+ iowrite32(int_status &
+ ~(SD_BUF_WRITE_ENABLE | SD_BUF_READ_ENABLE),
+ host->ioaddr + SD_CARDSTATUS);
+
+ ret = IRQ_WAKE_THREAD;
+ goto irq_end;
+ }
+
+ /* Command completion */
+ if (int_reg & SD_CARD_RESP_END) {
+ iowrite32(int_status & ~(SD_CARD_RESP_END),
+ host->ioaddr + SD_CARDSTATUS);
+ toshsd_cmd_irq(host);
+ }
+
+ /* Data transfer completion */
+ if (int_reg & SD_CARD_RW_END) {
+ iowrite32(int_status & ~(SD_CARD_RW_END),
+ host->ioaddr + SD_CARDSTATUS);
+ toshsd_data_end_irq(host);
+ }
+irq_end:
+ spin_unlock(&host->lock);
+ return ret;
+}
+
+static void toshsd_start_cmd(struct toshsd_host *host, struct mmc_command *cmd)
+{
+ struct mmc_data *data = host->data;
+ int c = cmd->opcode;
+
+ dev_dbg(&host->pdev->dev, "Command opcode: %d\n", cmd->opcode);
+
+ if (cmd->opcode == MMC_STOP_TRANSMISSION) {
+ iowrite16(SD_STOPINT_ISSUE_CMD12,
+ host->ioaddr + SD_STOPINTERNAL);
+
+ cmd->resp[0] = cmd->opcode;
+ cmd->resp[1] = 0;
+ cmd->resp[2] = 0;
+ cmd->resp[3] = 0;
+
+ toshsd_finish_request(host);
+ return;
+ }
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ c |= SD_CMD_RESP_TYPE_NONE;
+ break;
+
+ case MMC_RSP_R1:
+ c |= SD_CMD_RESP_TYPE_EXT_R1;
+ break;
+ case MMC_RSP_R1B:
+ c |= SD_CMD_RESP_TYPE_EXT_R1B;
+ break;
+ case MMC_RSP_R2:
+ c |= SD_CMD_RESP_TYPE_EXT_R2;
+ break;
+ case MMC_RSP_R3:
+ c |= SD_CMD_RESP_TYPE_EXT_R3;
+ break;
+
+ default:
+ dev_err(&host->pdev->dev, "Unknown response type %d\n",
+ mmc_resp_type(cmd));
+ break;
+ }
+
+ host->cmd = cmd;
+
+ if (cmd->opcode == MMC_APP_CMD)
+ c |= SD_CMD_TYPE_ACMD;
+
+ if (cmd->opcode == MMC_GO_IDLE_STATE)
+ c |= (3 << 8); /* removed from ipaq-asic3.h for some reason */
+
+ if (data) {
+ c |= SD_CMD_DATA_PRESENT;
+
+ if (data->blocks > 1) {
+ iowrite16(SD_STOPINT_AUTO_ISSUE_CMD12,
+ host->ioaddr + SD_STOPINTERNAL);
+ c |= SD_CMD_MULTI_BLOCK;
+ }
+
+ if (data->flags & MMC_DATA_READ)
+ c |= SD_CMD_TRANSFER_READ;
+
+ /* MMC_DATA_WRITE does not require a bit to be set */
+ }
+
+ /* Send the command */
+ iowrite32(cmd->arg, host->ioaddr + SD_ARG0);
+ iowrite16(c, host->ioaddr + SD_CMD);
+}
+
+static void toshsd_start_data(struct toshsd_host *host, struct mmc_data *data)
+{
+ unsigned int flags = SG_MITER_ATOMIC;
+
+ dev_dbg(&host->pdev->dev, "setup data transfer: blocksize %08x nr_blocks %d, offset: %08x\n",
+ data->blksz, data->blocks, data->sg->offset);
+
+ host->data = data;
+
+ if (data->flags & MMC_DATA_READ)
+ flags |= SG_MITER_TO_SG;
+ else
+ flags |= SG_MITER_FROM_SG;
+
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
+
+ /* Set transfer length and blocksize */
+ iowrite16(data->blocks, host->ioaddr + SD_BLOCKCOUNT);
+ iowrite16(data->blksz, host->ioaddr + SD_CARDXFERDATALEN);
+}
+
+/* Process requests from the MMC layer */
+static void toshsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct toshsd_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ /* abort if card not present */
+ if (!(ioread16(host->ioaddr + SD_CARDSTATUS) & SD_CARD_PRESENT_0)) {
+ mrq->cmd->error = -ENOMEDIUM;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ WARN_ON(host->mrq != NULL);
+
+ host->mrq = mrq;
+
+ if (mrq->data)
+ toshsd_start_data(host, mrq->data);
+
+ toshsd_set_led(host, 1);
+
+ toshsd_start_cmd(host, mrq->cmd);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void toshsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct toshsd_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ __toshsd_set_ios(mmc, ios);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int toshsd_get_ro(struct mmc_host *mmc)
+{
+ struct toshsd_host *host = mmc_priv(mmc);
+
+ /* active low */
+ return !(ioread16(host->ioaddr + SD_CARDSTATUS) & SD_CARD_WRITE_PROTECT);
+}
+
+static int toshsd_get_cd(struct mmc_host *mmc)
+{
+ struct toshsd_host *host = mmc_priv(mmc);
+
+ return !!(ioread16(host->ioaddr + SD_CARDSTATUS) & SD_CARD_PRESENT_0);
+}
+
+static struct mmc_host_ops toshsd_ops = {
+ .request = toshsd_request,
+ .set_ios = toshsd_set_ios,
+ .get_ro = toshsd_get_ro,
+ .get_cd = toshsd_get_cd,
+};
+
+
+static void toshsd_powerdown(struct toshsd_host *host)
+{
+ /* mask all interrupts */
+ iowrite32(0xffffffff, host->ioaddr + SD_INTMASKCARD);
+ /* disable card clock */
+ iowrite16(0x000, host->ioaddr + SDIO_BASE + SDIO_CLOCKNWAITCTRL);
+ iowrite16(0, host->ioaddr + SD_CARDCLOCKCTRL);
+ /* power down card */
+ pci_write_config_byte(host->pdev, SD_PCICFG_POWER1, SD_PCICFG_PWR1_OFF);
+ /* disable clock */
+ pci_write_config_byte(host->pdev, SD_PCICFG_CLKSTOP, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int toshsd_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct toshsd_host *host = pci_get_drvdata(pdev);
+
+ toshsd_powerdown(host);
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int toshsd_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct toshsd_host *host = pci_get_drvdata(pdev);
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ toshsd_init(host);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret;
+ struct toshsd_host *host;
+ struct mmc_host *mmc;
+ resource_size_t base;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ mmc = mmc_alloc_host(sizeof(struct toshsd_host), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ host->pdev = pdev;
+ pci_set_drvdata(pdev, host);
+
+ ret = pci_request_regions(pdev, DRIVER_NAME);
+ if (ret)
+ goto free;
+
+ host->ioaddr = pci_iomap(pdev, 0, 0);
+ if (!host->ioaddr) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ /* Set MMC host parameters */
+ mmc->ops = &toshsd_ops;
+ mmc->caps = MMC_CAP_4_BIT_DATA;
+ mmc->ocr_avail = MMC_VDD_32_33;
+
+ mmc->f_min = HCLK / 512;
+ mmc->f_max = HCLK;
+
+ spin_lock_init(&host->lock);
+
+ toshsd_init(host);
+
+ ret = request_threaded_irq(pdev->irq, toshsd_irq, toshsd_thread_irq,
+ IRQF_SHARED, DRIVER_NAME, host);
+ if (ret)
+ goto unmap;
+
+ mmc_add_host(mmc);
+
+ base = pci_resource_start(pdev, 0);
+ dev_dbg(&pdev->dev, "MMIO %pa, IRQ %d\n", &base, pdev->irq);
+
+ pm_suspend_ignore_children(&pdev->dev, 1);
+
+ return 0;
+
+unmap:
+ pci_iounmap(pdev, host->ioaddr);
+release:
+ pci_release_regions(pdev);
+free:
+ mmc_free_host(mmc);
+ pci_set_drvdata(pdev, NULL);
+err:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void toshsd_remove(struct pci_dev *pdev)
+{
+ struct toshsd_host *host = pci_get_drvdata(pdev);
+
+ mmc_remove_host(host->mmc);
+ toshsd_powerdown(host);
+ free_irq(pdev->irq, host);
+ pci_iounmap(pdev, host->ioaddr);
+ pci_release_regions(pdev);
+ mmc_free_host(host->mmc);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+}
+
+static const struct dev_pm_ops toshsd_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(toshsd_pm_suspend, toshsd_pm_resume)
+};
+
+static struct pci_driver toshsd_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pci_ids,
+ .probe = toshsd_probe,
+ .remove = toshsd_remove,
+ .driver.pm = &toshsd_pm_ops,
+};
+
+module_pci_driver(toshsd_driver);
+
+MODULE_AUTHOR("Ondrej Zary, Richard Betts");
+MODULE_DESCRIPTION("Toshiba PCI Secure Digital Host Controller Interface driver");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/mmc/host/toshsd.h b/kernel/drivers/mmc/host/toshsd.h
new file mode 100644
index 000000000..b6c0d89e5
--- /dev/null
+++ b/kernel/drivers/mmc/host/toshsd.h
@@ -0,0 +1,176 @@
+/*
+ * Toshiba PCI Secure Digital Host Controller Interface driver
+ *
+ * Copyright (C) 2014 Ondrej Zary
+ * Copyright (C) 2007 Richard Betts, All Rights Reserved.
+ *
+ * Based on asic3_mmc.c Copyright (c) 2005 SDG Systems, LLC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#define HCLK 33000000 /* 33 MHz (PCI clock) */
+
+#define SD_PCICFG_CLKSTOP 0x40 /* 0x1f = clock controller, 0 = stop */
+#define SD_PCICFG_GATEDCLK 0x41 /* Gated clock */
+#define SD_PCICFG_CLKMODE 0x42 /* Control clock of SD controller */
+#define SD_PCICFG_PINSTATUS 0x44 /* R/O: read status of SD pins */
+#define SD_PCICFG_POWER1 0x48
+#define SD_PCICFG_POWER2 0x49
+#define SD_PCICFG_POWER3 0x4a
+#define SD_PCICFG_CARDDETECT 0x4c
+#define SD_PCICFG_SLOTS 0x50 /* R/O: define support slot number */
+#define SD_PCICFG_EXTGATECLK1 0xf0 /* Could be used for gated clock */
+#define SD_PCICFG_EXTGATECLK2 0xf1 /* Could be used for gated clock */
+#define SD_PCICFG_EXTGATECLK3 0xf9 /* Bit 1: double buffer/single buffer */
+#define SD_PCICFG_SDLED_ENABLE1 0xfa
+#define SD_PCICFG_SDLED_ENABLE2 0xfe
+
+#define SD_PCICFG_CLKMODE_DIV_DISABLE BIT(0)
+#define SD_PCICFG_CLKSTOP_ENABLE_ALL 0x1f
+#define SD_PCICFG_LED_ENABLE1_START 0x12
+#define SD_PCICFG_LED_ENABLE2_START 0x80
+
+#define SD_PCICFG_PWR1_33V 0x08 /* Set for 3.3 volts */
+#define SD_PCICFG_PWR1_OFF 0x00 /* Turn off power */
+#define SD_PCICFG_PWR2_AUTO 0x02
+
+#define SD_CMD 0x00 /* also for SDIO */
+#define SD_ARG0 0x04 /* also for SDIO */
+#define SD_ARG1 0x06 /* also for SDIO */
+#define SD_STOPINTERNAL 0x08
+#define SD_BLOCKCOUNT 0x0a /* also for SDIO */
+#define SD_RESPONSE0 0x0c /* also for SDIO */
+#define SD_RESPONSE1 0x0e /* also for SDIO */
+#define SD_RESPONSE2 0x10 /* also for SDIO */
+#define SD_RESPONSE3 0x12 /* also for SDIO */
+#define SD_RESPONSE4 0x14 /* also for SDIO */
+#define SD_RESPONSE5 0x16 /* also for SDIO */
+#define SD_RESPONSE6 0x18 /* also for SDIO */
+#define SD_RESPONSE7 0x1a /* also for SDIO */
+#define SD_CARDSTATUS 0x1c /* also for SDIO */
+#define SD_BUFFERCTRL 0x1e /* also for SDIO */
+#define SD_INTMASKCARD 0x20 /* also for SDIO */
+#define SD_INTMASKBUFFER 0x22 /* also for SDIO */
+#define SD_CARDCLOCKCTRL 0x24
+#define SD_CARDXFERDATALEN 0x26 /* also for SDIO */
+#define SD_CARDOPTIONSETUP 0x28 /* also for SDIO */
+#define SD_ERRORSTATUS0 0x2c /* also for SDIO */
+#define SD_ERRORSTATUS1 0x2e /* also for SDIO */
+#define SD_DATAPORT 0x30 /* also for SDIO */
+#define SD_TRANSACTIONCTRL 0x34 /* also for SDIO */
+#define SD_SOFTWARERESET 0xe0 /* also for SDIO */
+
+/* registers above marked "also for SDIO" and all SDIO registers below can be
+ * accessed at SDIO_BASE + reg address */
+#define SDIO_BASE 0x100
+
+#define SDIO_CARDPORTSEL 0x02
+#define SDIO_CARDINTCTRL 0x36
+#define SDIO_CLOCKNWAITCTRL 0x38
+#define SDIO_HOSTINFORMATION 0x3a
+#define SDIO_ERRORCTRL 0x3c
+#define SDIO_LEDCTRL 0x3e
+
+#define SD_TRANSCTL_SET BIT(8)
+
+#define SD_CARDCLK_DIV_DISABLE BIT(15)
+#define SD_CARDCLK_ENABLE_CLOCK BIT(8)
+#define SD_CARDCLK_CLK_DIV_512 BIT(7)
+#define SD_CARDCLK_CLK_DIV_256 BIT(6)
+#define SD_CARDCLK_CLK_DIV_128 BIT(5)
+#define SD_CARDCLK_CLK_DIV_64 BIT(4)
+#define SD_CARDCLK_CLK_DIV_32 BIT(3)
+#define SD_CARDCLK_CLK_DIV_16 BIT(2)
+#define SD_CARDCLK_CLK_DIV_8 BIT(1)
+#define SD_CARDCLK_CLK_DIV_4 BIT(0)
+#define SD_CARDCLK_CLK_DIV_2 0
+
+#define SD_CARDOPT_REQUIRED 0x000e
+#define SD_CARDOPT_DATA_RESP_TIMEOUT(x) (((x) & 0x0f) << 4) /* 4 bits */
+#define SD_CARDOPT_C2_MODULE_ABSENT BIT(14)
+#define SD_CARDOPT_DATA_XFR_WIDTH_1 (1 << 15)
+#define SD_CARDOPT_DATA_XFR_WIDTH_4 (0 << 15)
+
+#define SD_CMD_TYPE_CMD (0 << 6)
+#define SD_CMD_TYPE_ACMD (1 << 6)
+#define SD_CMD_TYPE_AUTHEN (2 << 6)
+#define SD_CMD_RESP_TYPE_NONE (3 << 8)
+#define SD_CMD_RESP_TYPE_EXT_R1 (4 << 8)
+#define SD_CMD_RESP_TYPE_EXT_R1B (5 << 8)
+#define SD_CMD_RESP_TYPE_EXT_R2 (6 << 8)
+#define SD_CMD_RESP_TYPE_EXT_R3 (7 << 8)
+#define SD_CMD_RESP_TYPE_EXT_R6 (4 << 8)
+#define SD_CMD_RESP_TYPE_EXT_R7 (4 << 8)
+#define SD_CMD_DATA_PRESENT BIT(11)
+#define SD_CMD_TRANSFER_READ BIT(12)
+#define SD_CMD_MULTI_BLOCK BIT(13)
+#define SD_CMD_SECURITY_CMD BIT(14)
+
+#define SD_STOPINT_ISSUE_CMD12 BIT(0)
+#define SD_STOPINT_AUTO_ISSUE_CMD12 BIT(8)
+
+#define SD_CARD_RESP_END BIT(0)
+#define SD_CARD_RW_END BIT(2)
+#define SD_CARD_CARD_REMOVED_0 BIT(3)
+#define SD_CARD_CARD_INSERTED_0 BIT(4)
+#define SD_CARD_PRESENT_0 BIT(5)
+#define SD_CARD_UNK6 BIT(6)
+#define SD_CARD_WRITE_PROTECT BIT(7)
+#define SD_CARD_CARD_REMOVED_3 BIT(8)
+#define SD_CARD_CARD_INSERTED_3 BIT(9)
+#define SD_CARD_PRESENT_3 BIT(10)
+
+#define SD_BUF_CMD_INDEX_ERR BIT(16)
+#define SD_BUF_CRC_ERR BIT(17)
+#define SD_BUF_STOP_BIT_END_ERR BIT(18)
+#define SD_BUF_DATA_TIMEOUT BIT(19)
+#define SD_BUF_OVERFLOW BIT(20)
+#define SD_BUF_UNDERFLOW BIT(21)
+#define SD_BUF_CMD_TIMEOUT BIT(22)
+#define SD_BUF_UNK7 BIT(23)
+#define SD_BUF_READ_ENABLE BIT(24)
+#define SD_BUF_WRITE_ENABLE BIT(25)
+#define SD_BUF_ILLEGAL_FUNCTION BIT(29)
+#define SD_BUF_CMD_BUSY BIT(30)
+#define SD_BUF_ILLEGAL_ACCESS BIT(31)
+
+#define SD_ERR0_RESP_CMD_ERR BIT(0)
+#define SD_ERR0_RESP_NON_CMD12_END_BIT_ERR BIT(2)
+#define SD_ERR0_RESP_CMD12_END_BIT_ERR BIT(3)
+#define SD_ERR0_READ_DATA_END_BIT_ERR BIT(4)
+#define SD_ERR0_WRITE_CRC_STATUS_END_BIT_ERR BIT(5)
+#define SD_ERR0_RESP_NON_CMD12_CRC_ERR BIT(8)
+#define SD_ERR0_RESP_CMD12_CRC_ERR BIT(9)
+#define SD_ERR0_READ_DATA_CRC_ERR BIT(10)
+#define SD_ERR0_WRITE_CMD_CRC_ERR BIT(11)
+
+#define SD_ERR1_NO_CMD_RESP BIT(16)
+#define SD_ERR1_TIMEOUT_READ_DATA BIT(20)
+#define SD_ERR1_TIMEOUT_CRS_STATUS BIT(21)
+#define SD_ERR1_TIMEOUT_CRC_BUSY BIT(22)
+
+#define IRQ_DONT_CARE_BITS (SD_CARD_PRESENT_3 \
+ | SD_CARD_WRITE_PROTECT \
+ | SD_CARD_UNK6 \
+ | SD_CARD_PRESENT_0 \
+ | SD_BUF_UNK7 \
+ | SD_BUF_CMD_BUSY)
+
+struct toshsd_host {
+ struct pci_dev *pdev;
+ struct mmc_host *mmc;
+
+ spinlock_t lock;
+
+ struct mmc_request *mrq;/* Current request */
+ struct mmc_command *cmd;/* Current command */
+ struct mmc_data *data; /* Current data request */
+
+ struct sg_mapping_iter sg_miter; /* for PIO */
+
+ void __iomem *ioaddr; /* mapped address */
+};
diff --git a/kernel/drivers/mmc/host/usdhi6rol0.c b/kernel/drivers/mmc/host/usdhi6rol0.c
new file mode 100644
index 000000000..54b082b18
--- /dev/null
+++ b/kernel/drivers/mmc/host/usdhi6rol0.c
@@ -0,0 +1,1846 @@
+/*
+ * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/sdio.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/virtio.h>
+#include <linux/workqueue.h>
+
+#define USDHI6_SD_CMD 0x0000
+#define USDHI6_SD_PORT_SEL 0x0004
+#define USDHI6_SD_ARG 0x0008
+#define USDHI6_SD_STOP 0x0010
+#define USDHI6_SD_SECCNT 0x0014
+#define USDHI6_SD_RSP10 0x0018
+#define USDHI6_SD_RSP32 0x0020
+#define USDHI6_SD_RSP54 0x0028
+#define USDHI6_SD_RSP76 0x0030
+#define USDHI6_SD_INFO1 0x0038
+#define USDHI6_SD_INFO2 0x003c
+#define USDHI6_SD_INFO1_MASK 0x0040
+#define USDHI6_SD_INFO2_MASK 0x0044
+#define USDHI6_SD_CLK_CTRL 0x0048
+#define USDHI6_SD_SIZE 0x004c
+#define USDHI6_SD_OPTION 0x0050
+#define USDHI6_SD_ERR_STS1 0x0058
+#define USDHI6_SD_ERR_STS2 0x005c
+#define USDHI6_SD_BUF0 0x0060
+#define USDHI6_SDIO_MODE 0x0068
+#define USDHI6_SDIO_INFO1 0x006c
+#define USDHI6_SDIO_INFO1_MASK 0x0070
+#define USDHI6_CC_EXT_MODE 0x01b0
+#define USDHI6_SOFT_RST 0x01c0
+#define USDHI6_VERSION 0x01c4
+#define USDHI6_HOST_MODE 0x01c8
+#define USDHI6_SDIF_MODE 0x01cc
+
+#define USDHI6_SD_CMD_APP 0x0040
+#define USDHI6_SD_CMD_MODE_RSP_AUTO 0x0000
+#define USDHI6_SD_CMD_MODE_RSP_NONE 0x0300
+#define USDHI6_SD_CMD_MODE_RSP_R1 0x0400 /* Also R5, R6, R7 */
+#define USDHI6_SD_CMD_MODE_RSP_R1B 0x0500 /* R1b */
+#define USDHI6_SD_CMD_MODE_RSP_R2 0x0600
+#define USDHI6_SD_CMD_MODE_RSP_R3 0x0700 /* Also R4 */
+#define USDHI6_SD_CMD_DATA 0x0800
+#define USDHI6_SD_CMD_READ 0x1000
+#define USDHI6_SD_CMD_MULTI 0x2000
+#define USDHI6_SD_CMD_CMD12_AUTO_OFF 0x4000
+
+#define USDHI6_CC_EXT_MODE_SDRW BIT(1)
+
+#define USDHI6_SD_INFO1_RSP_END BIT(0)
+#define USDHI6_SD_INFO1_ACCESS_END BIT(2)
+#define USDHI6_SD_INFO1_CARD_OUT BIT(3)
+#define USDHI6_SD_INFO1_CARD_IN BIT(4)
+#define USDHI6_SD_INFO1_CD BIT(5)
+#define USDHI6_SD_INFO1_WP BIT(7)
+#define USDHI6_SD_INFO1_D3_CARD_OUT BIT(8)
+#define USDHI6_SD_INFO1_D3_CARD_IN BIT(9)
+
+#define USDHI6_SD_INFO2_CMD_ERR BIT(0)
+#define USDHI6_SD_INFO2_CRC_ERR BIT(1)
+#define USDHI6_SD_INFO2_END_ERR BIT(2)
+#define USDHI6_SD_INFO2_TOUT BIT(3)
+#define USDHI6_SD_INFO2_IWA_ERR BIT(4)
+#define USDHI6_SD_INFO2_IRA_ERR BIT(5)
+#define USDHI6_SD_INFO2_RSP_TOUT BIT(6)
+#define USDHI6_SD_INFO2_SDDAT0 BIT(7)
+#define USDHI6_SD_INFO2_BRE BIT(8)
+#define USDHI6_SD_INFO2_BWE BIT(9)
+#define USDHI6_SD_INFO2_SCLKDIVEN BIT(13)
+#define USDHI6_SD_INFO2_CBSY BIT(14)
+#define USDHI6_SD_INFO2_ILA BIT(15)
+
+#define USDHI6_SD_INFO1_CARD_INSERT (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_D3_CARD_IN)
+#define USDHI6_SD_INFO1_CARD_EJECT (USDHI6_SD_INFO1_CARD_OUT | USDHI6_SD_INFO1_D3_CARD_OUT)
+#define USDHI6_SD_INFO1_CARD (USDHI6_SD_INFO1_CARD_INSERT | USDHI6_SD_INFO1_CARD_EJECT)
+#define USDHI6_SD_INFO1_CARD_CD (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_CARD_OUT)
+
+#define USDHI6_SD_INFO2_ERR (USDHI6_SD_INFO2_CMD_ERR | \
+ USDHI6_SD_INFO2_CRC_ERR | USDHI6_SD_INFO2_END_ERR | \
+ USDHI6_SD_INFO2_TOUT | USDHI6_SD_INFO2_IWA_ERR | \
+ USDHI6_SD_INFO2_IRA_ERR | USDHI6_SD_INFO2_RSP_TOUT | \
+ USDHI6_SD_INFO2_ILA)
+
+#define USDHI6_SD_INFO1_IRQ (USDHI6_SD_INFO1_RSP_END | USDHI6_SD_INFO1_ACCESS_END | \
+ USDHI6_SD_INFO1_CARD)
+
+#define USDHI6_SD_INFO2_IRQ (USDHI6_SD_INFO2_ERR | USDHI6_SD_INFO2_BRE | \
+ USDHI6_SD_INFO2_BWE | 0x0800 | USDHI6_SD_INFO2_ILA)
+
+#define USDHI6_SD_CLK_CTRL_SCLKEN BIT(8)
+
+#define USDHI6_SD_STOP_STP BIT(0)
+#define USDHI6_SD_STOP_SEC BIT(8)
+
+#define USDHI6_SDIO_INFO1_IOIRQ BIT(0)
+#define USDHI6_SDIO_INFO1_EXPUB52 BIT(14)
+#define USDHI6_SDIO_INFO1_EXWT BIT(15)
+
+#define USDHI6_SD_ERR_STS1_CRC_NO_ERROR BIT(13)
+
+#define USDHI6_SOFT_RST_RESERVED (BIT(1) | BIT(2))
+#define USDHI6_SOFT_RST_RESET BIT(0)
+
+#define USDHI6_SD_OPTION_TIMEOUT_SHIFT 4
+#define USDHI6_SD_OPTION_TIMEOUT_MASK (0xf << USDHI6_SD_OPTION_TIMEOUT_SHIFT)
+#define USDHI6_SD_OPTION_WIDTH_1 BIT(15)
+
+#define USDHI6_SD_PORT_SEL_PORTS_SHIFT 8
+
+#define USDHI6_SD_CLK_CTRL_DIV_MASK 0xff
+
+#define USDHI6_SDIO_INFO1_IRQ (USDHI6_SDIO_INFO1_IOIRQ | 3 | \
+ USDHI6_SDIO_INFO1_EXPUB52 | USDHI6_SDIO_INFO1_EXWT)
+
+#define USDHI6_MIN_DMA 64
+
+enum usdhi6_wait_for {
+ USDHI6_WAIT_FOR_REQUEST,
+ USDHI6_WAIT_FOR_CMD,
+ USDHI6_WAIT_FOR_MREAD,
+ USDHI6_WAIT_FOR_MWRITE,
+ USDHI6_WAIT_FOR_READ,
+ USDHI6_WAIT_FOR_WRITE,
+ USDHI6_WAIT_FOR_DATA_END,
+ USDHI6_WAIT_FOR_STOP,
+ USDHI6_WAIT_FOR_DMA,
+};
+
+struct usdhi6_page {
+ struct page *page;
+ void *mapped; /* mapped page */
+};
+
+struct usdhi6_host {
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ void __iomem *base;
+ struct clk *clk;
+
+ /* SG memory handling */
+
+ /* Common for multiple and single block requests */
+ struct usdhi6_page pg; /* current page from an SG */
+ void *blk_page; /* either a mapped page, or the bounce buffer */
+ size_t offset; /* offset within a page, including sg->offset */
+
+ /* Blocks, crossing a page boundary */
+ size_t head_len;
+ struct usdhi6_page head_pg;
+
+ /* A bounce buffer for unaligned blocks or blocks, crossing a page boundary */
+ struct scatterlist bounce_sg;
+ u8 bounce_buf[512];
+
+ /* Multiple block requests only */
+ struct scatterlist *sg; /* current SG segment */
+ int page_idx; /* page index within an SG segment */
+
+ enum usdhi6_wait_for wait;
+ u32 status_mask;
+ u32 status2_mask;
+ u32 sdio_mask;
+ u32 io_error;
+ u32 irq_status;
+ unsigned long imclk;
+ unsigned long rate;
+ bool app_cmd;
+
+ /* Timeout handling */
+ struct delayed_work timeout_work;
+ unsigned long timeout;
+
+ /* DMA support */
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ bool dma_active;
+};
+
+/* I/O primitives */
+
+static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data)
+{
+ iowrite32(data, host->base + reg);
+ dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
+ host->base, reg, data);
+}
+
+static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data)
+{
+ iowrite16(data, host->base + reg);
+ dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
+ host->base, reg, data);
+}
+
+static u32 usdhi6_read(struct usdhi6_host *host, u32 reg)
+{
+ u32 data = ioread32(host->base + reg);
+ dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
+ host->base, reg, data);
+ return data;
+}
+
+static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg)
+{
+ u16 data = ioread16(host->base + reg);
+ dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
+ host->base, reg, data);
+ return data;
+}
+
+static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2)
+{
+ host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1;
+ host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2;
+ usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask);
+ usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask);
+}
+
+static void usdhi6_wait_for_resp(struct usdhi6_host *host)
+{
+ usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END |
+ USDHI6_SD_INFO1_ACCESS_END | USDHI6_SD_INFO1_CARD_CD,
+ USDHI6_SD_INFO2_ERR);
+}
+
+static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read)
+{
+ usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END |
+ USDHI6_SD_INFO1_CARD_CD, USDHI6_SD_INFO2_ERR |
+ (read ? USDHI6_SD_INFO2_BRE : USDHI6_SD_INFO2_BWE));
+}
+
+static void usdhi6_only_cd(struct usdhi6_host *host)
+{
+ /* Mask all except card hotplug */
+ usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0);
+}
+
+static void usdhi6_mask_all(struct usdhi6_host *host)
+{
+ usdhi6_irq_enable(host, 0, 0);
+}
+
+static int usdhi6_error_code(struct usdhi6_host *host)
+{
+ u32 err;
+
+ usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP);
+
+ if (host->io_error &
+ (USDHI6_SD_INFO2_RSP_TOUT | USDHI6_SD_INFO2_TOUT)) {
+ u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54);
+ int opc = host->mrq ? host->mrq->cmd->opcode : -1;
+
+ err = usdhi6_read(host, USDHI6_SD_ERR_STS2);
+ /* Response timeout is often normal, don't spam the log */
+ if (host->wait == USDHI6_WAIT_FOR_CMD)
+ dev_dbg(mmc_dev(host->mmc),
+ "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
+ err, rsp54, host->wait, opc);
+ else
+ dev_warn(mmc_dev(host->mmc),
+ "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
+ err, rsp54, host->wait, opc);
+ return -ETIMEDOUT;
+ }
+
+ err = usdhi6_read(host, USDHI6_SD_ERR_STS1);
+ if (err != USDHI6_SD_ERR_STS1_CRC_NO_ERROR)
+ dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n",
+ err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1);
+ if (host->io_error & USDHI6_SD_INFO2_ILA)
+ return -EILSEQ;
+
+ return -EIO;
+}
+
+/* Scatter-Gather management */
+
+/*
+ * In PIO mode we have to map each page separately, using kmap(). That way
+ * adjacent pages are mapped to non-adjacent virtual addresses. That's why we
+ * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks
+ * have been observed with an SDIO WiFi card (b43 driver).
+ */
+static void usdhi6_blk_bounce(struct usdhi6_host *host,
+ struct scatterlist *sg)
+{
+ struct mmc_data *data = host->mrq->data;
+ size_t blk_head = host->head_len;
+
+ dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n",
+ __func__, host->mrq->cmd->opcode, data->sg_len,
+ data->blksz, data->blocks, sg->offset);
+
+ host->head_pg.page = host->pg.page;
+ host->head_pg.mapped = host->pg.mapped;
+ host->pg.page = nth_page(host->pg.page, 1);
+ host->pg.mapped = kmap(host->pg.page);
+
+ host->blk_page = host->bounce_buf;
+ host->offset = 0;
+
+ if (data->flags & MMC_DATA_READ)
+ return;
+
+ memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head,
+ blk_head);
+ memcpy(host->bounce_buf + blk_head, host->pg.mapped,
+ data->blksz - blk_head);
+}
+
+/* Only called for multiple block IO */
+static void usdhi6_sg_prep(struct usdhi6_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_data *data = mrq->data;
+
+ usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks);
+
+ host->sg = data->sg;
+ /* TODO: if we always map, this is redundant */
+ host->offset = host->sg->offset;
+}
+
+/* Map the first page in an SG segment: common for multiple and single block IO */
+static void *usdhi6_sg_map(struct usdhi6_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg;
+ size_t head = PAGE_SIZE - sg->offset;
+ size_t blk_head = head % data->blksz;
+
+ WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page);
+ if (WARN(sg_dma_len(sg) % data->blksz,
+ "SG size %u isn't a multiple of block size %u\n",
+ sg_dma_len(sg), data->blksz))
+ return NULL;
+
+ host->pg.page = sg_page(sg);
+ host->pg.mapped = kmap(host->pg.page);
+ host->offset = sg->offset;
+
+ /*
+ * Block size must be a power of 2 for multi-block transfers,
+ * therefore blk_head is equal for all pages in this SG
+ */
+ host->head_len = blk_head;
+
+ if (head < data->blksz)
+ /*
+ * The first block in the SG crosses a page boundary.
+ * Max blksz = 512, so blocks can only span 2 pages
+ */
+ usdhi6_blk_bounce(host, sg);
+ else
+ host->blk_page = host->pg.mapped;
+
+ dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n",
+ host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
+ sg->offset, host->mrq->cmd->opcode, host->mrq);
+
+ return host->blk_page + host->offset;
+}
+
+/* Unmap the current page: common for multiple and single block IO */
+static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force)
+{
+ struct mmc_data *data = host->mrq->data;
+ struct page *page = host->head_pg.page;
+
+ if (page) {
+ /* Previous block was cross-page boundary */
+ struct scatterlist *sg = data->sg_len > 1 ?
+ host->sg : data->sg;
+ size_t blk_head = host->head_len;
+
+ if (!data->error && data->flags & MMC_DATA_READ) {
+ memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head,
+ host->bounce_buf, blk_head);
+ memcpy(host->pg.mapped, host->bounce_buf + blk_head,
+ data->blksz - blk_head);
+ }
+
+ flush_dcache_page(page);
+ kunmap(page);
+
+ host->head_pg.page = NULL;
+
+ if (!force && sg_dma_len(sg) + sg->offset >
+ (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head)
+ /* More blocks in this SG, don't unmap the next page */
+ return;
+ }
+
+ page = host->pg.page;
+ if (!page)
+ return;
+
+ flush_dcache_page(page);
+ kunmap(page);
+
+ host->pg.page = NULL;
+}
+
+/* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */
+static void usdhi6_sg_advance(struct usdhi6_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ size_t done, total;
+
+ /* New offset: set at the end of the previous block */
+ if (host->head_pg.page) {
+ /* Finished a cross-page block, jump to the new page */
+ host->page_idx++;
+ host->offset = data->blksz - host->head_len;
+ host->blk_page = host->pg.mapped;
+ usdhi6_sg_unmap(host, false);
+ } else {
+ host->offset += data->blksz;
+ /* The completed block didn't cross a page boundary */
+ if (host->offset == PAGE_SIZE) {
+ /* If required, we'll map the page below */
+ host->offset = 0;
+ host->page_idx++;
+ }
+ }
+
+ /*
+ * Now host->blk_page + host->offset point at the end of our last block
+ * and host->page_idx is the index of the page, in which our new block
+ * is located, if any
+ */
+
+ done = (host->page_idx << PAGE_SHIFT) + host->offset;
+ total = host->sg->offset + sg_dma_len(host->sg);
+
+ dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__,
+ done, total, host->offset);
+
+ if (done < total && host->offset) {
+ /* More blocks in this page */
+ if (host->offset + data->blksz > PAGE_SIZE)
+ /* We approached at a block, that spans 2 pages */
+ usdhi6_blk_bounce(host, host->sg);
+
+ return;
+ }
+
+ /* Finished current page or an SG segment */
+ usdhi6_sg_unmap(host, false);
+
+ if (done == total) {
+ /*
+ * End of an SG segment or the complete SG: jump to the next
+ * segment, we'll map it later in usdhi6_blk_read() or
+ * usdhi6_blk_write()
+ */
+ struct scatterlist *next = sg_next(host->sg);
+
+ host->page_idx = 0;
+
+ if (!next)
+ host->wait = USDHI6_WAIT_FOR_DATA_END;
+ host->sg = next;
+
+ if (WARN(next && sg_dma_len(next) % data->blksz,
+ "SG size %u isn't a multiple of block size %u\n",
+ sg_dma_len(next), data->blksz))
+ data->error = -EINVAL;
+
+ return;
+ }
+
+ /* We cannot get here after crossing a page border */
+
+ /* Next page in the same SG */
+ host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
+ host->pg.mapped = kmap(host->pg.page);
+ host->blk_page = host->pg.mapped;
+
+ dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n",
+ host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
+ host->mrq->cmd->opcode, host->mrq);
+}
+
+/* DMA handling */
+
+static void usdhi6_dma_release(struct usdhi6_host *host)
+{
+ host->dma_active = false;
+ if (host->chan_tx) {
+ struct dma_chan *chan = host->chan_tx;
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ }
+ if (host->chan_rx) {
+ struct dma_chan *chan = host->chan_rx;
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ }
+}
+
+static void usdhi6_dma_stop_unmap(struct usdhi6_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+
+ if (!host->dma_active)
+ return;
+
+ usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
+ host->dma_active = false;
+
+ if (data->flags & MMC_DATA_READ)
+ dma_unmap_sg(host->chan_rx->device->dev, data->sg,
+ data->sg_len, DMA_FROM_DEVICE);
+ else
+ dma_unmap_sg(host->chan_tx->device->dev, data->sg,
+ data->sg_len, DMA_TO_DEVICE);
+}
+
+static void usdhi6_dma_complete(void *arg)
+{
+ struct usdhi6_host *host = arg;
+ struct mmc_request *mrq = host->mrq;
+
+ if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n",
+ dev_name(mmc_dev(host->mmc)), mrq))
+ return;
+
+ dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__,
+ mrq->cmd->opcode);
+
+ usdhi6_dma_stop_unmap(host);
+ usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
+}
+
+static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan,
+ enum dma_transfer_direction dir)
+{
+ struct mmc_data *data = host->mrq->data;
+ struct scatterlist *sg = data->sg;
+ struct dma_async_tx_descriptor *desc = NULL;
+ dma_cookie_t cookie = -EINVAL;
+ enum dma_data_direction data_dir;
+ int ret;
+
+ switch (dir) {
+ case DMA_MEM_TO_DEV:
+ data_dir = DMA_TO_DEVICE;
+ break;
+ case DMA_DEV_TO_MEM:
+ data_dir = DMA_FROM_DEVICE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir);
+ if (ret > 0) {
+ host->dma_active = true;
+ desc = dmaengine_prep_slave_sg(chan, sg, ret, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ }
+
+ if (desc) {
+ desc->callback = usdhi6_dma_complete;
+ desc->callback_param = host;
+ cookie = dmaengine_submit(desc);
+ }
+
+ dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n",
+ __func__, data->sg_len, ret, cookie, desc);
+
+ if (cookie < 0) {
+ /* DMA failed, fall back to PIO */
+ if (ret >= 0)
+ ret = cookie;
+ usdhi6_dma_release(host);
+ dev_warn(mmc_dev(host->mmc),
+ "DMA failed: %d, falling back to PIO\n", ret);
+ }
+
+ return cookie;
+}
+
+static int usdhi6_dma_start(struct usdhi6_host *host)
+{
+ if (!host->chan_rx || !host->chan_tx)
+ return -ENODEV;
+
+ if (host->mrq->data->flags & MMC_DATA_READ)
+ return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM);
+
+ return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV);
+}
+
+static void usdhi6_dma_kill(struct usdhi6_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+
+ dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n",
+ __func__, data->sg_len, data->blocks, data->blksz);
+ /* Abort DMA */
+ if (data->flags & MMC_DATA_READ)
+ dmaengine_terminate_all(host->chan_rx);
+ else
+ dmaengine_terminate_all(host->chan_tx);
+}
+
+static void usdhi6_dma_check_error(struct usdhi6_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+
+ dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n",
+ __func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1));
+
+ if (host->io_error) {
+ data->error = usdhi6_error_code(host);
+ data->bytes_xfered = 0;
+ usdhi6_dma_kill(host);
+ usdhi6_dma_release(host);
+ dev_warn(mmc_dev(host->mmc),
+ "DMA failed: %d, falling back to PIO\n", data->error);
+ return;
+ }
+
+ /*
+ * The datasheet tells us to check a response from the card, whereas
+ * responses only come after the command phase, not after the data
+ * phase. Let's check anyway.
+ */
+ if (host->irq_status & USDHI6_SD_INFO1_RSP_END)
+ dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n");
+}
+
+static void usdhi6_dma_kick(struct usdhi6_host *host)
+{
+ if (host->mrq->data->flags & MMC_DATA_READ)
+ dma_async_issue_pending(host->chan_rx);
+ else
+ dma_async_issue_pending(host->chan_tx);
+}
+
+static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
+{
+ struct dma_slave_config cfg = {
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ };
+ int ret;
+
+ host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
+ dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
+ host->chan_tx);
+
+ if (!host->chan_tx)
+ return;
+
+ cfg.direction = DMA_MEM_TO_DEV;
+ cfg.dst_addr = start + USDHI6_SD_BUF0;
+ cfg.dst_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */
+ cfg.src_addr = 0;
+ ret = dmaengine_slave_config(host->chan_tx, &cfg);
+ if (ret < 0)
+ goto e_release_tx;
+
+ host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
+ dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
+ host->chan_rx);
+
+ if (!host->chan_rx)
+ goto e_release_tx;
+
+ cfg.direction = DMA_DEV_TO_MEM;
+ cfg.src_addr = cfg.dst_addr;
+ cfg.src_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */
+ cfg.dst_addr = 0;
+ ret = dmaengine_slave_config(host->chan_rx, &cfg);
+ if (ret < 0)
+ goto e_release_rx;
+
+ return;
+
+e_release_rx:
+ dma_release_channel(host->chan_rx);
+ host->chan_rx = NULL;
+e_release_tx:
+ dma_release_channel(host->chan_tx);
+ host->chan_tx = NULL;
+}
+
+/* API helpers */
+
+static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios)
+{
+ unsigned long rate = ios->clock;
+ u32 val;
+ unsigned int i;
+
+ for (i = 1000; i; i--) {
+ if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN)
+ break;
+ usleep_range(10, 100);
+ }
+
+ if (!i) {
+ dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n");
+ return;
+ }
+
+ val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK;
+
+ if (rate) {
+ unsigned long new_rate;
+
+ if (host->imclk <= rate) {
+ if (ios->timing != MMC_TIMING_UHS_DDR50) {
+ /* Cannot have 1-to-1 clock in DDR mode */
+ new_rate = host->imclk;
+ val |= 0xff;
+ } else {
+ new_rate = host->imclk / 2;
+ }
+ } else {
+ unsigned long div =
+ roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate));
+ val |= div >> 2;
+ new_rate = host->imclk / div;
+ }
+
+ if (host->rate == new_rate)
+ return;
+
+ host->rate = new_rate;
+
+ dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n",
+ rate, (val & 0xff) << 2, new_rate);
+ }
+
+ /*
+ * if old or new rate is equal to input rate, have to switch the clock
+ * off before changing and on after
+ */
+ if (host->imclk == rate || host->imclk == host->rate || !rate)
+ usdhi6_write(host, USDHI6_SD_CLK_CTRL,
+ val & ~USDHI6_SD_CLK_CTRL_SCLKEN);
+
+ if (!rate) {
+ host->rate = 0;
+ return;
+ }
+
+ usdhi6_write(host, USDHI6_SD_CLK_CTRL, val);
+
+ if (host->imclk == rate || host->imclk == host->rate ||
+ !(val & USDHI6_SD_CLK_CTRL_SCLKEN))
+ usdhi6_write(host, USDHI6_SD_CLK_CTRL,
+ val | USDHI6_SD_CLK_CTRL_SCLKEN);
+}
+
+static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ if (!IS_ERR(mmc->supply.vmmc))
+ /* Errors ignored... */
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
+ ios->power_mode ? ios->vdd : 0);
+}
+
+static int usdhi6_reset(struct usdhi6_host *host)
+{
+ int i;
+
+ usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED);
+ cpu_relax();
+ usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET);
+ for (i = 1000; i; i--)
+ if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET)
+ break;
+
+ return i ? 0 : -ETIMEDOUT;
+}
+
+static void usdhi6_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct usdhi6_host *host = mmc_priv(mmc);
+ u32 option, mode;
+ int ret;
+
+ dev_dbg(mmc_dev(mmc), "%uHz, OCR: %u, power %u, bus-width %u, timing %u\n",
+ ios->clock, ios->vdd, ios->power_mode, ios->bus_width, ios->timing);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ usdhi6_set_power(host, ios);
+ usdhi6_only_cd(host);
+ break;
+ case MMC_POWER_UP:
+ /*
+ * We only also touch USDHI6_SD_OPTION from .request(), which
+ * cannot race with MMC_POWER_UP
+ */
+ ret = usdhi6_reset(host);
+ if (ret < 0) {
+ dev_err(mmc_dev(mmc), "Cannot reset the interface!\n");
+ } else {
+ usdhi6_set_power(host, ios);
+ usdhi6_only_cd(host);
+ }
+ break;
+ case MMC_POWER_ON:
+ option = usdhi6_read(host, USDHI6_SD_OPTION);
+ /*
+ * The eMMC standard only allows 4 or 8 bits in the DDR mode,
+ * the same probably holds for SD cards. We check here anyway,
+ * since the datasheet explicitly requires 4 bits for DDR.
+ */
+ if (ios->bus_width == MMC_BUS_WIDTH_1) {
+ if (ios->timing == MMC_TIMING_UHS_DDR50)
+ dev_err(mmc_dev(mmc),
+ "4 bits are required for DDR\n");
+ option |= USDHI6_SD_OPTION_WIDTH_1;
+ mode = 0;
+ } else {
+ option &= ~USDHI6_SD_OPTION_WIDTH_1;
+ mode = ios->timing == MMC_TIMING_UHS_DDR50;
+ }
+ usdhi6_write(host, USDHI6_SD_OPTION, option);
+ usdhi6_write(host, USDHI6_SDIF_MODE, mode);
+ break;
+ }
+
+ if (host->rate != ios->clock)
+ usdhi6_clk_set(host, ios);
+}
+
+/* This is data timeout. Response timeout is fixed to 640 clock cycles */
+static void usdhi6_timeout_set(struct usdhi6_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ u32 val;
+ unsigned long ticks;
+
+ if (!mrq->data)
+ ticks = host->rate / 1000 * mrq->cmd->busy_timeout;
+ else
+ ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) +
+ mrq->data->timeout_clks;
+
+ if (!ticks || ticks > 1 << 27)
+ /* Max timeout */
+ val = 14;
+ else if (ticks < 1 << 13)
+ /* Min timeout */
+ val = 0;
+ else
+ val = order_base_2(ticks) - 13;
+
+ dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n",
+ mrq->data ? "data" : "cmd", ticks, host->rate);
+
+ /* Timeout Counter mask: 0xf0 */
+ usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) |
+ (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK));
+}
+
+static void usdhi6_request_done(struct usdhi6_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_data *data = mrq->data;
+
+ if (WARN(host->pg.page || host->head_pg.page,
+ "Page %p or %p not unmapped: wait %u, CMD%d(%c) @ +0x%zx %ux%u in SG%u!\n",
+ host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode,
+ data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-',
+ data ? host->offset : 0, data ? data->blocks : 0,
+ data ? data->blksz : 0, data ? data->sg_len : 0))
+ usdhi6_sg_unmap(host, true);
+
+ if (mrq->cmd->error ||
+ (data && data->error) ||
+ (mrq->stop && mrq->stop->error))
+ dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n",
+ __func__, mrq->cmd->opcode, data ? data->blocks : 0,
+ data ? data->blksz : 0,
+ mrq->cmd->error,
+ data ? data->error : 1,
+ mrq->stop ? mrq->stop->error : 1);
+
+ /* Disable DMA */
+ usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
+ host->wait = USDHI6_WAIT_FOR_REQUEST;
+ host->mrq = NULL;
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+static int usdhi6_cmd_flags(struct usdhi6_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd = mrq->cmd;
+ u16 opc = cmd->opcode;
+
+ if (host->app_cmd) {
+ host->app_cmd = false;
+ opc |= USDHI6_SD_CMD_APP;
+ }
+
+ if (mrq->data) {
+ opc |= USDHI6_SD_CMD_DATA;
+
+ if (mrq->data->flags & MMC_DATA_READ)
+ opc |= USDHI6_SD_CMD_READ;
+
+ if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
+ (cmd->opcode == SD_IO_RW_EXTENDED &&
+ mrq->data->blocks > 1)) {
+ opc |= USDHI6_SD_CMD_MULTI;
+ if (!mrq->stop)
+ opc |= USDHI6_SD_CMD_CMD12_AUTO_OFF;
+ }
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ opc |= USDHI6_SD_CMD_MODE_RSP_NONE;
+ break;
+ case MMC_RSP_R1:
+ opc |= USDHI6_SD_CMD_MODE_RSP_R1;
+ break;
+ case MMC_RSP_R1B:
+ opc |= USDHI6_SD_CMD_MODE_RSP_R1B;
+ break;
+ case MMC_RSP_R2:
+ opc |= USDHI6_SD_CMD_MODE_RSP_R2;
+ break;
+ case MMC_RSP_R3:
+ opc |= USDHI6_SD_CMD_MODE_RSP_R3;
+ break;
+ default:
+ dev_warn(mmc_dev(host->mmc),
+ "Unknown response type %d\n",
+ mmc_resp_type(cmd));
+ return -EINVAL;
+ }
+ }
+
+ return opc;
+}
+
+static int usdhi6_rq_start(struct usdhi6_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+ int opc = usdhi6_cmd_flags(host);
+ int i;
+
+ if (opc < 0)
+ return opc;
+
+ for (i = 1000; i; i--) {
+ if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY))
+ break;
+ usleep_range(10, 100);
+ }
+
+ if (!i) {
+ dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n");
+ return -EAGAIN;
+ }
+
+ if (data) {
+ bool use_dma;
+ int ret = 0;
+
+ host->page_idx = 0;
+
+ if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) {
+ switch (data->blksz) {
+ case 512:
+ break;
+ case 32:
+ case 64:
+ case 128:
+ case 256:
+ if (mrq->stop)
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ } else if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) &&
+ data->blksz != 512) {
+ ret = -EINVAL;
+ }
+
+ if (ret < 0) {
+ dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n",
+ __func__, data->blocks, data->blksz);
+ return -EINVAL;
+ }
+
+ if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
+ (cmd->opcode == SD_IO_RW_EXTENDED &&
+ data->blocks > 1))
+ usdhi6_sg_prep(host);
+
+ usdhi6_write(host, USDHI6_SD_SIZE, data->blksz);
+
+ if ((data->blksz >= USDHI6_MIN_DMA ||
+ data->blocks > 1) &&
+ (data->blksz % 4 ||
+ data->sg->offset % 4))
+ dev_dbg(mmc_dev(host->mmc),
+ "Bad SG of %u: %ux%u @ %u\n", data->sg_len,
+ data->blksz, data->blocks, data->sg->offset);
+
+ /* Enable DMA for USDHI6_MIN_DMA bytes or more */
+ use_dma = data->blksz >= USDHI6_MIN_DMA &&
+ !(data->blksz % 4) &&
+ usdhi6_dma_start(host) >= DMA_MIN_COOKIE;
+
+ if (use_dma)
+ usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW);
+
+ dev_dbg(mmc_dev(host->mmc),
+ "%s(): request opcode %u, %u blocks of %u bytes in %u segments, %s %s @+0x%x%s\n",
+ __func__, cmd->opcode, data->blocks, data->blksz,
+ data->sg_len, use_dma ? "DMA" : "PIO",
+ data->flags & MMC_DATA_READ ? "read" : "write",
+ data->sg->offset, mrq->stop ? " + stop" : "");
+ } else {
+ dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n",
+ __func__, cmd->opcode);
+ }
+
+ /* We have to get a command completion interrupt with DMA too */
+ usdhi6_wait_for_resp(host);
+
+ host->wait = USDHI6_WAIT_FOR_CMD;
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+
+ /* SEC bit is required to enable block counting by the core */
+ usdhi6_write(host, USDHI6_SD_STOP,
+ data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0);
+ usdhi6_write(host, USDHI6_SD_ARG, cmd->arg);
+
+ /* Kick command execution */
+ usdhi6_write(host, USDHI6_SD_CMD, opc);
+
+ return 0;
+}
+
+static void usdhi6_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct usdhi6_host *host = mmc_priv(mmc);
+ int ret;
+
+ cancel_delayed_work_sync(&host->timeout_work);
+
+ host->mrq = mrq;
+ host->sg = NULL;
+
+ usdhi6_timeout_set(host);
+ ret = usdhi6_rq_start(host);
+ if (ret < 0) {
+ mrq->cmd->error = ret;
+ usdhi6_request_done(host);
+ }
+}
+
+static int usdhi6_get_cd(struct mmc_host *mmc)
+{
+ struct usdhi6_host *host = mmc_priv(mmc);
+ /* Read is atomic, no need to lock */
+ u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD;
+
+/*
+ * level status.CD CD_ACTIVE_HIGH card present
+ * 1 0 0 0
+ * 1 0 1 1
+ * 0 1 0 1
+ * 0 1 1 0
+ */
+ return !status ^ !(mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
+}
+
+static int usdhi6_get_ro(struct mmc_host *mmc)
+{
+ struct usdhi6_host *host = mmc_priv(mmc);
+ /* No locking as above */
+ u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP;
+
+/*
+ * level status.WP RO_ACTIVE_HIGH card read-only
+ * 1 0 0 0
+ * 1 0 1 1
+ * 0 1 0 1
+ * 0 1 1 0
+ */
+ return !status ^ !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
+}
+
+static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct usdhi6_host *host = mmc_priv(mmc);
+
+ dev_dbg(mmc_dev(mmc), "%s(): %sable\n", __func__, enable ? "en" : "dis");
+
+ if (enable) {
+ host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ;
+ usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask);
+ usdhi6_write(host, USDHI6_SDIO_MODE, 1);
+ } else {
+ usdhi6_write(host, USDHI6_SDIO_MODE, 0);
+ usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ);
+ host->sdio_mask = USDHI6_SDIO_INFO1_IRQ;
+ }
+}
+
+static struct mmc_host_ops usdhi6_ops = {
+ .request = usdhi6_request,
+ .set_ios = usdhi6_set_ios,
+ .get_cd = usdhi6_get_cd,
+ .get_ro = usdhi6_get_ro,
+ .enable_sdio_irq = usdhi6_enable_sdio_irq,
+};
+
+/* State machine handlers */
+
+static void usdhi6_resp_cmd12(struct usdhi6_host *host)
+{
+ struct mmc_command *cmd = host->mrq->stop;
+ cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
+}
+
+static void usdhi6_resp_read(struct usdhi6_host *host)
+{
+ struct mmc_command *cmd = host->mrq->cmd;
+ u32 *rsp = cmd->resp, tmp = 0;
+ int i;
+
+/*
+ * RSP10 39-8
+ * RSP32 71-40
+ * RSP54 103-72
+ * RSP76 127-104
+ * R2-type response:
+ * resp[0] = r[127..96]
+ * resp[1] = r[95..64]
+ * resp[2] = r[63..32]
+ * resp[3] = r[31..0]
+ * Other responses:
+ * resp[0] = r[39..8]
+ */
+
+ if (mmc_resp_type(cmd) == MMC_RSP_NONE)
+ return;
+
+ if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) {
+ dev_err(mmc_dev(host->mmc),
+ "CMD%d: response expected but is missing!\n", cmd->opcode);
+ return;
+ }
+
+ if (mmc_resp_type(cmd) & MMC_RSP_136)
+ for (i = 0; i < 4; i++) {
+ if (i)
+ rsp[3 - i] = tmp >> 24;
+ tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8);
+ rsp[3 - i] |= tmp << 8;
+ }
+ else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
+ /* Read RSP54 to avoid conflict with auto CMD12 */
+ rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54);
+ else
+ rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
+
+ dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]);
+}
+
+static int usdhi6_blk_read(struct usdhi6_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p;
+ int i, rest;
+
+ if (host->io_error) {
+ data->error = usdhi6_error_code(host);
+ goto error;
+ }
+
+ if (host->pg.page) {
+ p = host->blk_page + host->offset;
+ } else {
+ p = usdhi6_sg_map(host);
+ if (!p) {
+ data->error = -ENOMEM;
+ goto error;
+ }
+ }
+
+ for (i = 0; i < data->blksz / 4; i++, p++)
+ *p = usdhi6_read(host, USDHI6_SD_BUF0);
+
+ rest = data->blksz % 4;
+ for (i = 0; i < (rest + 1) / 2; i++) {
+ u16 d = usdhi6_read16(host, USDHI6_SD_BUF0);
+ ((u8 *)p)[2 * i] = ((u8 *)&d)[0];
+ if (rest > 1 && !i)
+ ((u8 *)p)[2 * i + 1] = ((u8 *)&d)[1];
+ }
+
+ return 0;
+
+error:
+ dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
+ host->wait = USDHI6_WAIT_FOR_REQUEST;
+ return data->error;
+}
+
+static int usdhi6_blk_write(struct usdhi6_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p;
+ int i, rest;
+
+ if (host->io_error) {
+ data->error = usdhi6_error_code(host);
+ goto error;
+ }
+
+ if (host->pg.page) {
+ p = host->blk_page + host->offset;
+ } else {
+ p = usdhi6_sg_map(host);
+ if (!p) {
+ data->error = -ENOMEM;
+ goto error;
+ }
+ }
+
+ for (i = 0; i < data->blksz / 4; i++, p++)
+ usdhi6_write(host, USDHI6_SD_BUF0, *p);
+
+ rest = data->blksz % 4;
+ for (i = 0; i < (rest + 1) / 2; i++) {
+ u16 d;
+ ((u8 *)&d)[0] = ((u8 *)p)[2 * i];
+ if (rest > 1 && !i)
+ ((u8 *)&d)[1] = ((u8 *)p)[2 * i + 1];
+ else
+ ((u8 *)&d)[1] = 0;
+ usdhi6_write16(host, USDHI6_SD_BUF0, d);
+ }
+
+ return 0;
+
+error:
+ dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
+ host->wait = USDHI6_WAIT_FOR_REQUEST;
+ return data->error;
+}
+
+static int usdhi6_stop_cmd(struct usdhi6_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+
+ switch (mrq->cmd->opcode) {
+ case MMC_READ_MULTIPLE_BLOCK:
+ case MMC_WRITE_MULTIPLE_BLOCK:
+ if (mrq->stop->opcode == MMC_STOP_TRANSMISSION) {
+ host->wait = USDHI6_WAIT_FOR_STOP;
+ return 0;
+ }
+ /* Unsupported STOP command */
+ default:
+ dev_err(mmc_dev(host->mmc),
+ "unsupported stop CMD%d for CMD%d\n",
+ mrq->stop->opcode, mrq->cmd->opcode);
+ mrq->stop->error = -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static bool usdhi6_end_cmd(struct usdhi6_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_command *cmd = mrq->cmd;
+
+ if (host->io_error) {
+ cmd->error = usdhi6_error_code(host);
+ return false;
+ }
+
+ usdhi6_resp_read(host);
+
+ if (!mrq->data)
+ return false;
+
+ if (host->dma_active) {
+ usdhi6_dma_kick(host);
+ if (!mrq->stop)
+ host->wait = USDHI6_WAIT_FOR_DMA;
+ else if (usdhi6_stop_cmd(host) < 0)
+ return false;
+ } else if (mrq->data->flags & MMC_DATA_READ) {
+ if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
+ (cmd->opcode == SD_IO_RW_EXTENDED &&
+ mrq->data->blocks > 1))
+ host->wait = USDHI6_WAIT_FOR_MREAD;
+ else
+ host->wait = USDHI6_WAIT_FOR_READ;
+ } else {
+ if (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
+ (cmd->opcode == SD_IO_RW_EXTENDED &&
+ mrq->data->blocks > 1))
+ host->wait = USDHI6_WAIT_FOR_MWRITE;
+ else
+ host->wait = USDHI6_WAIT_FOR_WRITE;
+ }
+
+ return true;
+}
+
+static bool usdhi6_read_block(struct usdhi6_host *host)
+{
+ /* ACCESS_END IRQ is already unmasked */
+ int ret = usdhi6_blk_read(host);
+
+ /*
+ * Have to force unmapping both pages: the single block could have been
+ * cross-page, in which case for single-block IO host->page_idx == 0.
+ * So, if we don't force, the second page won't be unmapped.
+ */
+ usdhi6_sg_unmap(host, true);
+
+ if (ret < 0)
+ return false;
+
+ host->wait = USDHI6_WAIT_FOR_DATA_END;
+ return true;
+}
+
+static bool usdhi6_mread_block(struct usdhi6_host *host)
+{
+ int ret = usdhi6_blk_read(host);
+
+ if (ret < 0)
+ return false;
+
+ usdhi6_sg_advance(host);
+
+ return !host->mrq->data->error &&
+ (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
+}
+
+static bool usdhi6_write_block(struct usdhi6_host *host)
+{
+ int ret = usdhi6_blk_write(host);
+
+ /* See comment in usdhi6_read_block() */
+ usdhi6_sg_unmap(host, true);
+
+ if (ret < 0)
+ return false;
+
+ host->wait = USDHI6_WAIT_FOR_DATA_END;
+ return true;
+}
+
+static bool usdhi6_mwrite_block(struct usdhi6_host *host)
+{
+ int ret = usdhi6_blk_write(host);
+
+ if (ret < 0)
+ return false;
+
+ usdhi6_sg_advance(host);
+
+ return !host->mrq->data->error &&
+ (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
+}
+
+/* Interrupt & timeout handlers */
+
+static irqreturn_t usdhi6_sd_bh(int irq, void *dev_id)
+{
+ struct usdhi6_host *host = dev_id;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ bool io_wait = false;
+
+ cancel_delayed_work_sync(&host->timeout_work);
+
+ mrq = host->mrq;
+ if (!mrq)
+ return IRQ_HANDLED;
+
+ cmd = mrq->cmd;
+ data = mrq->data;
+
+ switch (host->wait) {
+ case USDHI6_WAIT_FOR_REQUEST:
+ /* We're too late, the timeout has already kicked in */
+ return IRQ_HANDLED;
+ case USDHI6_WAIT_FOR_CMD:
+ /* Wait for data? */
+ io_wait = usdhi6_end_cmd(host);
+ break;
+ case USDHI6_WAIT_FOR_MREAD:
+ /* Wait for more data? */
+ io_wait = usdhi6_mread_block(host);
+ break;
+ case USDHI6_WAIT_FOR_READ:
+ /* Wait for data end? */
+ io_wait = usdhi6_read_block(host);
+ break;
+ case USDHI6_WAIT_FOR_MWRITE:
+ /* Wait data to write? */
+ io_wait = usdhi6_mwrite_block(host);
+ break;
+ case USDHI6_WAIT_FOR_WRITE:
+ /* Wait for data end? */
+ io_wait = usdhi6_write_block(host);
+ break;
+ case USDHI6_WAIT_FOR_DMA:
+ usdhi6_dma_check_error(host);
+ break;
+ case USDHI6_WAIT_FOR_STOP:
+ usdhi6_write(host, USDHI6_SD_STOP, 0);
+ if (host->io_error) {
+ int ret = usdhi6_error_code(host);
+ if (mrq->stop)
+ mrq->stop->error = ret;
+ else
+ mrq->data->error = ret;
+ dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret);
+ break;
+ }
+ usdhi6_resp_cmd12(host);
+ mrq->stop->error = 0;
+ break;
+ case USDHI6_WAIT_FOR_DATA_END:
+ if (host->io_error) {
+ mrq->data->error = usdhi6_error_code(host);
+ dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__,
+ mrq->data->error);
+ }
+ break;
+ default:
+ cmd->error = -EFAULT;
+ dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
+ usdhi6_request_done(host);
+ return IRQ_HANDLED;
+ }
+
+ if (io_wait) {
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ /* Wait for more data or ACCESS_END */
+ if (!host->dma_active)
+ usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
+ return IRQ_HANDLED;
+ }
+
+ if (!cmd->error) {
+ if (data) {
+ if (!data->error) {
+ if (host->wait != USDHI6_WAIT_FOR_STOP &&
+ host->mrq->stop &&
+ !host->mrq->stop->error &&
+ !usdhi6_stop_cmd(host)) {
+ /* Sending STOP */
+ usdhi6_wait_for_resp(host);
+
+ schedule_delayed_work(&host->timeout_work,
+ host->timeout);
+
+ return IRQ_HANDLED;
+ }
+
+ data->bytes_xfered = data->blocks * data->blksz;
+ } else {
+ /* Data error: might need to unmap the last page */
+ dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n",
+ __func__, data->error);
+ usdhi6_sg_unmap(host, true);
+ }
+ } else if (cmd->opcode == MMC_APP_CMD) {
+ host->app_cmd = true;
+ }
+ }
+
+ usdhi6_request_done(host);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t usdhi6_sd(int irq, void *dev_id)
+{
+ struct usdhi6_host *host = dev_id;
+ u16 status, status2, error;
+
+ status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
+ ~USDHI6_SD_INFO1_CARD;
+ status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask;
+
+ usdhi6_only_cd(host);
+
+ dev_dbg(mmc_dev(host->mmc),
+ "IRQ status = 0x%08x, status2 = 0x%08x\n", status, status2);
+
+ if (!status && !status2)
+ return IRQ_NONE;
+
+ error = status2 & USDHI6_SD_INFO2_ERR;
+
+ /* Ack / clear interrupts */
+ if (USDHI6_SD_INFO1_IRQ & status)
+ usdhi6_write(host, USDHI6_SD_INFO1,
+ 0xffff & ~(USDHI6_SD_INFO1_IRQ & status));
+
+ if (USDHI6_SD_INFO2_IRQ & status2) {
+ if (error)
+ /* In error cases BWE and BRE aren't cleared automatically */
+ status2 |= USDHI6_SD_INFO2_BWE | USDHI6_SD_INFO2_BRE;
+
+ usdhi6_write(host, USDHI6_SD_INFO2,
+ 0xffff & ~(USDHI6_SD_INFO2_IRQ & status2));
+ }
+
+ host->io_error = error;
+ host->irq_status = status;
+
+ if (error) {
+ /* Don't pollute the log with unsupported command timeouts */
+ if (host->wait != USDHI6_WAIT_FOR_CMD ||
+ error != USDHI6_SD_INFO2_RSP_TOUT)
+ dev_warn(mmc_dev(host->mmc),
+ "%s(): INFO2 error bits 0x%08x\n",
+ __func__, error);
+ else
+ dev_dbg(mmc_dev(host->mmc),
+ "%s(): INFO2 error bits 0x%08x\n",
+ __func__, error);
+ }
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t usdhi6_sdio(int irq, void *dev_id)
+{
+ struct usdhi6_host *host = dev_id;
+ u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask;
+
+ dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status);
+
+ if (!status)
+ return IRQ_NONE;
+
+ usdhi6_write(host, USDHI6_SDIO_INFO1, ~status);
+
+ mmc_signal_sdio_irq(host->mmc);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t usdhi6_cd(int irq, void *dev_id)
+{
+ struct usdhi6_host *host = dev_id;
+ struct mmc_host *mmc = host->mmc;
+ u16 status;
+
+ /* We're only interested in hotplug events here */
+ status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
+ USDHI6_SD_INFO1_CARD;
+
+ if (!status)
+ return IRQ_NONE;
+
+ /* Ack */
+ usdhi6_write(host, USDHI6_SD_INFO1, !status);
+
+ if (!work_pending(&mmc->detect.work) &&
+ (((status & USDHI6_SD_INFO1_CARD_INSERT) &&
+ !mmc->card) ||
+ ((status & USDHI6_SD_INFO1_CARD_EJECT) &&
+ mmc->card)))
+ mmc_detect_change(mmc, msecs_to_jiffies(100));
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Actually this should not be needed, if the built-in timeout works reliably in
+ * the both PIO cases and DMA never fails. But if DMA does fail, a timeout
+ * handler might be the only way to catch the error.
+ */
+static void usdhi6_timeout_work(struct work_struct *work)
+{
+ struct delayed_work *d = container_of(work, struct delayed_work, work);
+ struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_data *data = mrq ? mrq->data : NULL;
+
+ dev_warn(mmc_dev(host->mmc),
+ "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
+ host->dma_active ? "DMA" : "PIO",
+ host->wait, mrq ? mrq->cmd->opcode : -1,
+ usdhi6_read(host, USDHI6_SD_INFO1),
+ usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status);
+
+ if (host->dma_active) {
+ usdhi6_dma_kill(host);
+ usdhi6_dma_stop_unmap(host);
+ }
+
+ switch (host->wait) {
+ default:
+ dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
+ /* mrq can be NULL in this actually impossible case */
+ case USDHI6_WAIT_FOR_CMD:
+ usdhi6_error_code(host);
+ if (mrq)
+ mrq->cmd->error = -ETIMEDOUT;
+ break;
+ case USDHI6_WAIT_FOR_STOP:
+ usdhi6_error_code(host);
+ mrq->stop->error = -ETIMEDOUT;
+ break;
+ case USDHI6_WAIT_FOR_DMA:
+ case USDHI6_WAIT_FOR_MREAD:
+ case USDHI6_WAIT_FOR_MWRITE:
+ case USDHI6_WAIT_FOR_READ:
+ case USDHI6_WAIT_FOR_WRITE:
+ dev_dbg(mmc_dev(host->mmc),
+ "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
+ data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
+ host->offset, data->blocks, data->blksz, data->sg_len,
+ sg_dma_len(host->sg), host->sg->offset);
+ usdhi6_sg_unmap(host, true);
+ /*
+ * If USDHI6_WAIT_FOR_DATA_END times out, we have already unmapped
+ * the page
+ */
+ case USDHI6_WAIT_FOR_DATA_END:
+ usdhi6_error_code(host);
+ data->error = -ETIMEDOUT;
+ }
+
+ if (mrq)
+ usdhi6_request_done(host);
+}
+
+/* Probe / release */
+
+static const struct of_device_id usdhi6_of_match[] = {
+ {.compatible = "renesas,usdhi6rol0"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, usdhi6_of_match);
+
+static int usdhi6_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mmc_host *mmc;
+ struct usdhi6_host *host;
+ struct resource *res;
+ int irq_cd, irq_sd, irq_sdio;
+ u32 version;
+ int ret;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ irq_cd = platform_get_irq_byname(pdev, "card detect");
+ irq_sd = platform_get_irq_byname(pdev, "data");
+ irq_sdio = platform_get_irq_byname(pdev, "SDIO");
+ if (irq_sd < 0 || irq_sdio < 0)
+ return -ENODEV;
+
+ mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ ret = mmc_of_parse(mmc);
+ if (ret < 0)
+ goto e_free_mmc;
+
+ mmc_regulator_get_supply(mmc);
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->wait = USDHI6_WAIT_FOR_REQUEST;
+ host->timeout = msecs_to_jiffies(4000);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
+ goto e_free_mmc;
+ }
+
+ host->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(host->clk))
+ goto e_free_mmc;
+
+ host->imclk = clk_get_rate(host->clk);
+
+ ret = clk_prepare_enable(host->clk);
+ if (ret < 0)
+ goto e_free_mmc;
+
+ version = usdhi6_read(host, USDHI6_VERSION);
+ if ((version & 0xfff) != 0xa0d) {
+ dev_err(dev, "Version not recognized %x\n", version);
+ goto e_clk_off;
+ }
+
+ dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n",
+ usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT);
+
+ usdhi6_mask_all(host);
+
+ if (irq_cd >= 0) {
+ ret = devm_request_irq(dev, irq_cd, usdhi6_cd, 0,
+ dev_name(dev), host);
+ if (ret < 0)
+ goto e_clk_off;
+ } else {
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq_sd, usdhi6_sd, usdhi6_sd_bh, 0,
+ dev_name(dev), host);
+ if (ret < 0)
+ goto e_clk_off;
+
+ ret = devm_request_irq(dev, irq_sdio, usdhi6_sdio, 0,
+ dev_name(dev), host);
+ if (ret < 0)
+ goto e_clk_off;
+
+ INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work);
+
+ usdhi6_dma_request(host, res->start);
+
+ mmc->ops = &usdhi6_ops;
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_SDIO_IRQ;
+ /* Set .max_segs to some random number. Feel free to adjust. */
+ mmc->max_segs = 32;
+ mmc->max_blk_size = 512;
+ mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
+ mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
+ /*
+ * Setting .max_seg_size to 1 page would simplify our page-mapping code,
+ * But OTOH, having large segments makes DMA more efficient. We could
+ * check, whether we managed to get DMA and fall back to 1 page
+ * segments, but if we do manage to obtain DMA and then it fails at
+ * run-time and we fall back to PIO, we will continue getting large
+ * segments. So, we wouldn't be able to get rid of the code anyway.
+ */
+ mmc->max_seg_size = mmc->max_req_size;
+ if (!mmc->f_max)
+ mmc->f_max = host->imclk;
+ mmc->f_min = host->imclk / 512;
+
+ platform_set_drvdata(pdev, host);
+
+ ret = mmc_add_host(mmc);
+ if (ret < 0)
+ goto e_clk_off;
+
+ return 0;
+
+e_clk_off:
+ clk_disable_unprepare(host->clk);
+e_free_mmc:
+ mmc_free_host(mmc);
+
+ return ret;
+}
+
+static int usdhi6_remove(struct platform_device *pdev)
+{
+ struct usdhi6_host *host = platform_get_drvdata(pdev);
+
+ mmc_remove_host(host->mmc);
+
+ usdhi6_mask_all(host);
+ cancel_delayed_work_sync(&host->timeout_work);
+ usdhi6_dma_release(host);
+ clk_disable_unprepare(host->clk);
+ mmc_free_host(host->mmc);
+
+ return 0;
+}
+
+static struct platform_driver usdhi6_driver = {
+ .probe = usdhi6_probe,
+ .remove = usdhi6_remove,
+ .driver = {
+ .name = "usdhi6rol0",
+ .of_match_table = usdhi6_of_match,
+ },
+};
+
+module_platform_driver(usdhi6_driver);
+
+MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:usdhi6rol0");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
diff --git a/kernel/drivers/mmc/host/ushc.c b/kernel/drivers/mmc/host/ushc.c
new file mode 100644
index 000000000..d2c386f09
--- /dev/null
+++ b/kernel/drivers/mmc/host/ushc.c
@@ -0,0 +1,569 @@
+/*
+ * USB SD Host Controller (USHC) controller driver.
+ *
+ * Copyright (C) 2010 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * Notes:
+ * - Only version 2 devices are supported.
+ * - Version 2 devices only support SDIO cards/devices (R2 response is
+ * unsupported).
+ *
+ * References:
+ * [USHC] USB SD Host Controller specification (CS-118793-SP)
+ */
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+
+enum ushc_request {
+ USHC_GET_CAPS = 0x00,
+ USHC_HOST_CTRL = 0x01,
+ USHC_PWR_CTRL = 0x02,
+ USHC_CLK_FREQ = 0x03,
+ USHC_EXEC_CMD = 0x04,
+ USHC_READ_RESP = 0x05,
+ USHC_RESET = 0x06,
+};
+
+enum ushc_request_type {
+ USHC_GET_CAPS_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_HOST_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_PWR_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_CLK_FREQ_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_EXEC_CMD_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_READ_RESP_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ USHC_RESET_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+};
+
+#define USHC_GET_CAPS_VERSION_MASK 0xff
+#define USHC_GET_CAPS_3V3 (1 << 8)
+#define USHC_GET_CAPS_3V0 (1 << 9)
+#define USHC_GET_CAPS_1V8 (1 << 10)
+#define USHC_GET_CAPS_HIGH_SPD (1 << 16)
+
+#define USHC_HOST_CTRL_4BIT (1 << 1)
+#define USHC_HOST_CTRL_HIGH_SPD (1 << 0)
+
+#define USHC_PWR_CTRL_OFF 0x00
+#define USHC_PWR_CTRL_3V3 0x01
+#define USHC_PWR_CTRL_3V0 0x02
+#define USHC_PWR_CTRL_1V8 0x03
+
+#define USHC_READ_RESP_BUSY (1 << 4)
+#define USHC_READ_RESP_ERR_TIMEOUT (1 << 3)
+#define USHC_READ_RESP_ERR_CRC (1 << 2)
+#define USHC_READ_RESP_ERR_DAT (1 << 1)
+#define USHC_READ_RESP_ERR_CMD (1 << 0)
+#define USHC_READ_RESP_ERR_MASK 0x0f
+
+struct ushc_cbw {
+ __u8 signature;
+ __u8 cmd_idx;
+ __le16 block_size;
+ __le32 arg;
+} __attribute__((packed));
+
+#define USHC_CBW_SIGNATURE 'C'
+
+struct ushc_csw {
+ __u8 signature;
+ __u8 status;
+ __le32 response;
+} __attribute__((packed));
+
+#define USHC_CSW_SIGNATURE 'S'
+
+struct ushc_int_data {
+ u8 status;
+ u8 reserved[3];
+};
+
+#define USHC_INT_STATUS_SDIO_INT (1 << 1)
+#define USHC_INT_STATUS_CARD_PRESENT (1 << 0)
+
+
+struct ushc_data {
+ struct usb_device *usb_dev;
+ struct mmc_host *mmc;
+
+ struct urb *int_urb;
+ struct ushc_int_data *int_data;
+
+ struct urb *cbw_urb;
+ struct ushc_cbw *cbw;
+
+ struct urb *data_urb;
+
+ struct urb *csw_urb;
+ struct ushc_csw *csw;
+
+ spinlock_t lock;
+ struct mmc_request *current_req;
+ u32 caps;
+ u16 host_ctrl;
+ unsigned long flags;
+ u8 last_status;
+ int clock_freq;
+};
+
+#define DISCONNECTED 0
+#define INT_EN 1
+#define IGNORE_NEXT_INT 2
+
+static void data_callback(struct urb *urb);
+
+static int ushc_hw_reset(struct ushc_data *ushc)
+{
+ return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
+ USHC_RESET, USHC_RESET_TYPE,
+ 0, 0, NULL, 0, 100);
+}
+
+static int ushc_hw_get_caps(struct ushc_data *ushc)
+{
+ int ret;
+ int version;
+
+ ret = usb_control_msg(ushc->usb_dev, usb_rcvctrlpipe(ushc->usb_dev, 0),
+ USHC_GET_CAPS, USHC_GET_CAPS_TYPE,
+ 0, 0, &ushc->caps, sizeof(ushc->caps), 100);
+ if (ret < 0)
+ return ret;
+
+ ushc->caps = le32_to_cpu(ushc->caps);
+
+ version = ushc->caps & USHC_GET_CAPS_VERSION_MASK;
+ if (version != 0x02) {
+ dev_err(&ushc->usb_dev->dev, "controller version %d is not supported\n", version);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ushc_hw_set_host_ctrl(struct ushc_data *ushc, u16 mask, u16 val)
+{
+ u16 host_ctrl;
+ int ret;
+
+ host_ctrl = (ushc->host_ctrl & ~mask) | val;
+ ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
+ USHC_HOST_CTRL, USHC_HOST_CTRL_TYPE,
+ host_ctrl, 0, NULL, 0, 100);
+ if (ret < 0)
+ return ret;
+ ushc->host_ctrl = host_ctrl;
+ return 0;
+}
+
+static void int_callback(struct urb *urb)
+{
+ struct ushc_data *ushc = urb->context;
+ u8 status, last_status;
+
+ if (urb->status < 0)
+ return;
+
+ status = ushc->int_data->status;
+ last_status = ushc->last_status;
+ ushc->last_status = status;
+
+ /*
+ * Ignore the card interrupt status on interrupt transfers that
+ * were submitted while card interrupts where disabled.
+ *
+ * This avoid occasional spurious interrupts when enabling
+ * interrupts immediately after clearing the source on the card.
+ */
+
+ if (!test_and_clear_bit(IGNORE_NEXT_INT, &ushc->flags)
+ && test_bit(INT_EN, &ushc->flags)
+ && status & USHC_INT_STATUS_SDIO_INT) {
+ mmc_signal_sdio_irq(ushc->mmc);
+ }
+
+ if ((status ^ last_status) & USHC_INT_STATUS_CARD_PRESENT)
+ mmc_detect_change(ushc->mmc, msecs_to_jiffies(100));
+
+ if (!test_bit(INT_EN, &ushc->flags))
+ set_bit(IGNORE_NEXT_INT, &ushc->flags);
+ usb_submit_urb(ushc->int_urb, GFP_ATOMIC);
+}
+
+static void cbw_callback(struct urb *urb)
+{
+ struct ushc_data *ushc = urb->context;
+
+ if (urb->status != 0) {
+ usb_unlink_urb(ushc->data_urb);
+ usb_unlink_urb(ushc->csw_urb);
+ }
+}
+
+static void data_callback(struct urb *urb)
+{
+ struct ushc_data *ushc = urb->context;
+
+ if (urb->status != 0)
+ usb_unlink_urb(ushc->csw_urb);
+}
+
+static void csw_callback(struct urb *urb)
+{
+ struct ushc_data *ushc = urb->context;
+ struct mmc_request *req = ushc->current_req;
+ int status;
+
+ status = ushc->csw->status;
+
+ if (urb->status != 0) {
+ req->cmd->error = urb->status;
+ } else if (status & USHC_READ_RESP_ERR_CMD) {
+ if (status & USHC_READ_RESP_ERR_CRC)
+ req->cmd->error = -EIO;
+ else
+ req->cmd->error = -ETIMEDOUT;
+ }
+ if (req->data) {
+ if (status & USHC_READ_RESP_ERR_DAT) {
+ if (status & USHC_READ_RESP_ERR_CRC)
+ req->data->error = -EIO;
+ else
+ req->data->error = -ETIMEDOUT;
+ req->data->bytes_xfered = 0;
+ } else {
+ req->data->bytes_xfered = req->data->blksz * req->data->blocks;
+ }
+ }
+
+ req->cmd->resp[0] = le32_to_cpu(ushc->csw->response);
+
+ mmc_request_done(ushc->mmc, req);
+}
+
+static void ushc_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct ushc_data *ushc = mmc_priv(mmc);
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ushc->lock, flags);
+
+ if (test_bit(DISCONNECTED, &ushc->flags)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Version 2 firmware doesn't support the R2 response format. */
+ if (req->cmd->flags & MMC_RSP_136) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* The Astoria's data FIFOs don't work with clock speeds < 5MHz so
+ limit commands with data to 6MHz or more. */
+ if (req->data && ushc->clock_freq < 6000000) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ushc->current_req = req;
+
+ /* Start cmd with CBW. */
+ ushc->cbw->cmd_idx = cpu_to_le16(req->cmd->opcode);
+ if (req->data)
+ ushc->cbw->block_size = cpu_to_le16(req->data->blksz);
+ else
+ ushc->cbw->block_size = 0;
+ ushc->cbw->arg = cpu_to_le32(req->cmd->arg);
+
+ ret = usb_submit_urb(ushc->cbw_urb, GFP_ATOMIC);
+ if (ret < 0)
+ goto out;
+
+ /* Submit data (if any). */
+ if (req->data) {
+ struct mmc_data *data = req->data;
+ int pipe;
+
+ if (data->flags & MMC_DATA_READ)
+ pipe = usb_rcvbulkpipe(ushc->usb_dev, 6);
+ else
+ pipe = usb_sndbulkpipe(ushc->usb_dev, 2);
+
+ usb_fill_bulk_urb(ushc->data_urb, ushc->usb_dev, pipe,
+ sg_virt(data->sg), data->sg->length,
+ data_callback, ushc);
+ ret = usb_submit_urb(ushc->data_urb, GFP_ATOMIC);
+ if (ret < 0)
+ goto out;
+ }
+
+ /* Submit CSW. */
+ ret = usb_submit_urb(ushc->csw_urb, GFP_ATOMIC);
+ if (ret < 0)
+ goto out;
+
+out:
+ spin_unlock_irqrestore(&ushc->lock, flags);
+ if (ret < 0) {
+ usb_unlink_urb(ushc->cbw_urb);
+ usb_unlink_urb(ushc->data_urb);
+ req->cmd->error = ret;
+ mmc_request_done(mmc, req);
+ }
+}
+
+static int ushc_set_power(struct ushc_data *ushc, unsigned char power_mode)
+{
+ u16 voltage;
+
+ switch (power_mode) {
+ case MMC_POWER_OFF:
+ voltage = USHC_PWR_CTRL_OFF;
+ break;
+ case MMC_POWER_UP:
+ case MMC_POWER_ON:
+ voltage = USHC_PWR_CTRL_3V3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
+ USHC_PWR_CTRL, USHC_PWR_CTRL_TYPE,
+ voltage, 0, NULL, 0, 100);
+}
+
+static int ushc_set_bus_width(struct ushc_data *ushc, int bus_width)
+{
+ return ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_4BIT,
+ bus_width == 4 ? USHC_HOST_CTRL_4BIT : 0);
+}
+
+static int ushc_set_bus_freq(struct ushc_data *ushc, int clk, bool enable_hs)
+{
+ int ret;
+
+ /* Hardware can't detect interrupts while the clock is off. */
+ if (clk == 0)
+ clk = 400000;
+
+ ret = ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_HIGH_SPD,
+ enable_hs ? USHC_HOST_CTRL_HIGH_SPD : 0);
+ if (ret < 0)
+ return ret;
+
+ ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
+ USHC_CLK_FREQ, USHC_CLK_FREQ_TYPE,
+ clk & 0xffff, (clk >> 16) & 0xffff, NULL, 0, 100);
+ if (ret < 0)
+ return ret;
+
+ ushc->clock_freq = clk;
+ return 0;
+}
+
+static void ushc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct ushc_data *ushc = mmc_priv(mmc);
+
+ ushc_set_power(ushc, ios->power_mode);
+ ushc_set_bus_width(ushc, 1 << ios->bus_width);
+ ushc_set_bus_freq(ushc, ios->clock, ios->timing == MMC_TIMING_SD_HS);
+}
+
+static int ushc_get_cd(struct mmc_host *mmc)
+{
+ struct ushc_data *ushc = mmc_priv(mmc);
+
+ return !!(ushc->last_status & USHC_INT_STATUS_CARD_PRESENT);
+}
+
+static void ushc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct ushc_data *ushc = mmc_priv(mmc);
+
+ if (enable)
+ set_bit(INT_EN, &ushc->flags);
+ else
+ clear_bit(INT_EN, &ushc->flags);
+}
+
+static void ushc_clean_up(struct ushc_data *ushc)
+{
+ usb_free_urb(ushc->int_urb);
+ usb_free_urb(ushc->csw_urb);
+ usb_free_urb(ushc->data_urb);
+ usb_free_urb(ushc->cbw_urb);
+
+ kfree(ushc->int_data);
+ kfree(ushc->cbw);
+ kfree(ushc->csw);
+
+ mmc_free_host(ushc->mmc);
+}
+
+static const struct mmc_host_ops ushc_ops = {
+ .request = ushc_request,
+ .set_ios = ushc_set_ios,
+ .get_cd = ushc_get_cd,
+ .enable_sdio_irq = ushc_enable_sdio_irq,
+};
+
+static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct mmc_host *mmc;
+ struct ushc_data *ushc;
+ int ret;
+
+ mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
+ if (mmc == NULL)
+ return -ENOMEM;
+ ushc = mmc_priv(mmc);
+ usb_set_intfdata(intf, ushc);
+
+ ushc->usb_dev = usb_dev;
+ ushc->mmc = mmc;
+
+ spin_lock_init(&ushc->lock);
+
+ ret = ushc_hw_reset(ushc);
+ if (ret < 0)
+ goto err;
+
+ /* Read capabilities. */
+ ret = ushc_hw_get_caps(ushc);
+ if (ret < 0)
+ goto err;
+
+ mmc->ops = &ushc_ops;
+
+ mmc->f_min = 400000;
+ mmc->f_max = 50000000;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+ mmc->caps |= (ushc->caps & USHC_GET_CAPS_HIGH_SPD) ? MMC_CAP_SD_HIGHSPEED : 0;
+
+ mmc->max_seg_size = 512*511;
+ mmc->max_segs = 1;
+ mmc->max_req_size = 512*511;
+ mmc->max_blk_size = 512;
+ mmc->max_blk_count = 511;
+
+ ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (ushc->int_urb == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL);
+ if (ushc->int_data == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ usb_fill_int_urb(ushc->int_urb, ushc->usb_dev,
+ usb_rcvintpipe(usb_dev,
+ intf->cur_altsetting->endpoint[0].desc.bEndpointAddress),
+ ushc->int_data, sizeof(struct ushc_int_data),
+ int_callback, ushc,
+ intf->cur_altsetting->endpoint[0].desc.bInterval);
+
+ ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (ushc->cbw_urb == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL);
+ if (ushc->cbw == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ushc->cbw->signature = USHC_CBW_SIGNATURE;
+
+ usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2),
+ ushc->cbw, sizeof(struct ushc_cbw),
+ cbw_callback, ushc);
+
+ ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (ushc->data_urb == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (ushc->csw_urb == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ushc->csw = kzalloc(sizeof(struct ushc_csw), GFP_KERNEL);
+ if (ushc->csw == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6),
+ ushc->csw, sizeof(struct ushc_csw),
+ csw_callback, ushc);
+
+ ret = mmc_add_host(ushc->mmc);
+ if (ret)
+ goto err;
+
+ ret = usb_submit_urb(ushc->int_urb, GFP_KERNEL);
+ if (ret < 0) {
+ mmc_remove_host(ushc->mmc);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ushc_clean_up(ushc);
+ return ret;
+}
+
+static void ushc_disconnect(struct usb_interface *intf)
+{
+ struct ushc_data *ushc = usb_get_intfdata(intf);
+
+ spin_lock_irq(&ushc->lock);
+ set_bit(DISCONNECTED, &ushc->flags);
+ spin_unlock_irq(&ushc->lock);
+
+ usb_kill_urb(ushc->int_urb);
+ usb_kill_urb(ushc->cbw_urb);
+ usb_kill_urb(ushc->data_urb);
+ usb_kill_urb(ushc->csw_urb);
+
+ mmc_remove_host(ushc->mmc);
+
+ ushc_clean_up(ushc);
+}
+
+static struct usb_device_id ushc_id_table[] = {
+ /* CSR USB SD Host Controller */
+ { USB_DEVICE(0x0a12, 0x5d10) },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, ushc_id_table);
+
+static struct usb_driver ushc_driver = {
+ .name = "ushc",
+ .id_table = ushc_id_table,
+ .probe = ushc_probe,
+ .disconnect = ushc_disconnect,
+};
+
+module_usb_driver(ushc_driver);
+
+MODULE_DESCRIPTION("USB SD Host Controller driver");
+MODULE_AUTHOR("David Vrabel <david.vrabel@csr.com>");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/mmc/host/via-sdmmc.c b/kernel/drivers/mmc/host/via-sdmmc.c
new file mode 100644
index 000000000..63fac78b3
--- /dev/null
+++ b/kernel/drivers/mmc/host/via-sdmmc.c
@@ -0,0 +1,1339 @@
+/*
+ * drivers/mmc/host/via-sdmmc.c - VIA SD/MMC Card Reader driver
+ * Copyright (c) 2008, VIA Technologies Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+
+#include <linux/mmc/host.h>
+
+#define DRV_NAME "via_sdmmc"
+
+#define PCI_DEVICE_ID_VIA_9530 0x9530
+
+#define VIA_CRDR_SDC_OFF 0x200
+#define VIA_CRDR_DDMA_OFF 0x400
+#define VIA_CRDR_PCICTRL_OFF 0x600
+
+#define VIA_CRDR_MIN_CLOCK 375000
+#define VIA_CRDR_MAX_CLOCK 48000000
+
+/*
+ * PCI registers
+ */
+
+#define VIA_CRDR_PCI_WORK_MODE 0x40
+#define VIA_CRDR_PCI_DBG_MODE 0x41
+
+/*
+ * SDC MMIO Registers
+ */
+
+#define VIA_CRDR_SDCTRL 0x0
+#define VIA_CRDR_SDCTRL_START 0x01
+#define VIA_CRDR_SDCTRL_WRITE 0x04
+#define VIA_CRDR_SDCTRL_SINGLE_WR 0x10
+#define VIA_CRDR_SDCTRL_SINGLE_RD 0x20
+#define VIA_CRDR_SDCTRL_MULTI_WR 0x30
+#define VIA_CRDR_SDCTRL_MULTI_RD 0x40
+#define VIA_CRDR_SDCTRL_STOP 0x70
+
+#define VIA_CRDR_SDCTRL_RSP_NONE 0x0
+#define VIA_CRDR_SDCTRL_RSP_R1 0x10000
+#define VIA_CRDR_SDCTRL_RSP_R2 0x20000
+#define VIA_CRDR_SDCTRL_RSP_R3 0x30000
+#define VIA_CRDR_SDCTRL_RSP_R1B 0x90000
+
+#define VIA_CRDR_SDCARG 0x4
+
+#define VIA_CRDR_SDBUSMODE 0x8
+#define VIA_CRDR_SDMODE_4BIT 0x02
+#define VIA_CRDR_SDMODE_CLK_ON 0x40
+
+#define VIA_CRDR_SDBLKLEN 0xc
+/*
+ * Bit 0 -Bit 10 : Block length. So, the maximum block length should be 2048.
+ * Bit 11 - Bit 13 : Reserved.
+ * GPIDET : Select GPI pin to detect card, GPI means CR_CD# in top design.
+ * INTEN : Enable SD host interrupt.
+ * Bit 16 - Bit 31 : Block count. So, the maximun block count should be 65536.
+ */
+#define VIA_CRDR_SDBLKLEN_GPIDET 0x2000
+#define VIA_CRDR_SDBLKLEN_INTEN 0x8000
+#define VIA_CRDR_MAX_BLOCK_COUNT 65536
+#define VIA_CRDR_MAX_BLOCK_LENGTH 2048
+
+#define VIA_CRDR_SDRESP0 0x10
+#define VIA_CRDR_SDRESP1 0x14
+#define VIA_CRDR_SDRESP2 0x18
+#define VIA_CRDR_SDRESP3 0x1c
+
+#define VIA_CRDR_SDCURBLKCNT 0x20
+
+#define VIA_CRDR_SDINTMASK 0x24
+/*
+ * MBDIE : Multiple Blocks transfer Done Interrupt Enable
+ * BDDIE : Block Data transfer Done Interrupt Enable
+ * CIRIE : Card Insertion or Removal Interrupt Enable
+ * CRDIE : Command-Response transfer Done Interrupt Enable
+ * CRTOIE : Command-Response response TimeOut Interrupt Enable
+ * ASCRDIE : Auto Stop Command-Response transfer Done Interrupt Enable
+ * DTIE : Data access Timeout Interrupt Enable
+ * SCIE : reSponse CRC error Interrupt Enable
+ * RCIE : Read data CRC error Interrupt Enable
+ * WCIE : Write data CRC error Interrupt Enable
+ */
+#define VIA_CRDR_SDINTMASK_MBDIE 0x10
+#define VIA_CRDR_SDINTMASK_BDDIE 0x20
+#define VIA_CRDR_SDINTMASK_CIRIE 0x80
+#define VIA_CRDR_SDINTMASK_CRDIE 0x200
+#define VIA_CRDR_SDINTMASK_CRTOIE 0x400
+#define VIA_CRDR_SDINTMASK_ASCRDIE 0x800
+#define VIA_CRDR_SDINTMASK_DTIE 0x1000
+#define VIA_CRDR_SDINTMASK_SCIE 0x2000
+#define VIA_CRDR_SDINTMASK_RCIE 0x4000
+#define VIA_CRDR_SDINTMASK_WCIE 0x8000
+
+#define VIA_CRDR_SDACTIVE_INTMASK \
+ (VIA_CRDR_SDINTMASK_MBDIE | VIA_CRDR_SDINTMASK_CIRIE \
+ | VIA_CRDR_SDINTMASK_CRDIE | VIA_CRDR_SDINTMASK_CRTOIE \
+ | VIA_CRDR_SDINTMASK_DTIE | VIA_CRDR_SDINTMASK_SCIE \
+ | VIA_CRDR_SDINTMASK_RCIE | VIA_CRDR_SDINTMASK_WCIE)
+
+#define VIA_CRDR_SDSTATUS 0x28
+/*
+ * CECC : Reserved
+ * WP : SD card Write Protect status
+ * SLOTD : Reserved
+ * SLOTG : SD SLOT status(Gpi pin status)
+ * MBD : Multiple Blocks transfer Done interrupt status
+ * BDD : Block Data transfer Done interrupt status
+ * CD : Reserved
+ * CIR : Card Insertion or Removal interrupt detected on GPI pin
+ * IO : Reserved
+ * CRD : Command-Response transfer Done interrupt status
+ * CRTO : Command-Response response TimeOut interrupt status
+ * ASCRDIE : Auto Stop Command-Response transfer Done interrupt status
+ * DT : Data access Timeout interrupt status
+ * SC : reSponse CRC error interrupt status
+ * RC : Read data CRC error interrupt status
+ * WC : Write data CRC error interrupt status
+ */
+#define VIA_CRDR_SDSTS_CECC 0x01
+#define VIA_CRDR_SDSTS_WP 0x02
+#define VIA_CRDR_SDSTS_SLOTD 0x04
+#define VIA_CRDR_SDSTS_SLOTG 0x08
+#define VIA_CRDR_SDSTS_MBD 0x10
+#define VIA_CRDR_SDSTS_BDD 0x20
+#define VIA_CRDR_SDSTS_CD 0x40
+#define VIA_CRDR_SDSTS_CIR 0x80
+#define VIA_CRDR_SDSTS_IO 0x100
+#define VIA_CRDR_SDSTS_CRD 0x200
+#define VIA_CRDR_SDSTS_CRTO 0x400
+#define VIA_CRDR_SDSTS_ASCRDIE 0x800
+#define VIA_CRDR_SDSTS_DT 0x1000
+#define VIA_CRDR_SDSTS_SC 0x2000
+#define VIA_CRDR_SDSTS_RC 0x4000
+#define VIA_CRDR_SDSTS_WC 0x8000
+
+#define VIA_CRDR_SDSTS_IGN_MASK\
+ (VIA_CRDR_SDSTS_BDD | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_IO)
+#define VIA_CRDR_SDSTS_INT_MASK \
+ (VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_BDD | VIA_CRDR_SDSTS_CD \
+ | VIA_CRDR_SDSTS_CIR | VIA_CRDR_SDSTS_IO | VIA_CRDR_SDSTS_CRD \
+ | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_DT \
+ | VIA_CRDR_SDSTS_SC | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
+#define VIA_CRDR_SDSTS_W1C_MASK \
+ (VIA_CRDR_SDSTS_CECC | VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_BDD \
+ | VIA_CRDR_SDSTS_CD | VIA_CRDR_SDSTS_CIR | VIA_CRDR_SDSTS_CRD \
+ | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_DT \
+ | VIA_CRDR_SDSTS_SC | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
+#define VIA_CRDR_SDSTS_CMD_MASK \
+ (VIA_CRDR_SDSTS_CRD | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_SC)
+#define VIA_CRDR_SDSTS_DATA_MASK\
+ (VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_DT \
+ | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
+
+#define VIA_CRDR_SDSTATUS2 0x2a
+/*
+ * CFE : Enable SD host automatic Clock FReezing
+ */
+#define VIA_CRDR_SDSTS_CFE 0x80
+
+#define VIA_CRDR_SDRSPTMO 0x2C
+
+#define VIA_CRDR_SDCLKSEL 0x30
+
+#define VIA_CRDR_SDEXTCTRL 0x34
+#define VIS_CRDR_SDEXTCTRL_AUTOSTOP_SD 0x01
+#define VIS_CRDR_SDEXTCTRL_SHIFT_9 0x02
+#define VIS_CRDR_SDEXTCTRL_MMC_8BIT 0x04
+#define VIS_CRDR_SDEXTCTRL_RELD_BLK 0x08
+#define VIS_CRDR_SDEXTCTRL_BAD_CMDA 0x10
+#define VIS_CRDR_SDEXTCTRL_BAD_DATA 0x20
+#define VIS_CRDR_SDEXTCTRL_AUTOSTOP_SPI 0x40
+#define VIA_CRDR_SDEXTCTRL_HISPD 0x80
+/* 0x38-0xFF reserved */
+
+/*
+ * Data DMA Control Registers
+ */
+
+#define VIA_CRDR_DMABASEADD 0x0
+#define VIA_CRDR_DMACOUNTER 0x4
+
+#define VIA_CRDR_DMACTRL 0x8
+/*
+ * DIR :Transaction Direction
+ * 0 : From card to memory
+ * 1 : From memory to card
+ */
+#define VIA_CRDR_DMACTRL_DIR 0x100
+#define VIA_CRDR_DMACTRL_ENIRQ 0x10000
+#define VIA_CRDR_DMACTRL_SFTRST 0x1000000
+
+#define VIA_CRDR_DMASTS 0xc
+
+#define VIA_CRDR_DMASTART 0x10
+/*0x14-0xFF reserved*/
+
+/*
+ * PCI Control Registers
+ */
+
+/*0x0 - 0x1 reserved*/
+#define VIA_CRDR_PCICLKGATT 0x2
+/*
+ * SFTRST :
+ * 0 : Soft reset all the controller and it will be de-asserted automatically
+ * 1 : Soft reset is de-asserted
+ */
+#define VIA_CRDR_PCICLKGATT_SFTRST 0x01
+/*
+ * 3V3 : Pad power select
+ * 0 : 1.8V
+ * 1 : 3.3V
+ * NOTE : No mater what the actual value should be, this bit always
+ * read as 0. This is a hardware bug.
+ */
+#define VIA_CRDR_PCICLKGATT_3V3 0x10
+/*
+ * PAD_PWRON : Pad Power on/off select
+ * 0 : Power off
+ * 1 : Power on
+ * NOTE : No mater what the actual value should be, this bit always
+ * read as 0. This is a hardware bug.
+ */
+#define VIA_CRDR_PCICLKGATT_PAD_PWRON 0x20
+
+#define VIA_CRDR_PCISDCCLK 0x5
+
+#define VIA_CRDR_PCIDMACLK 0x7
+#define VIA_CRDR_PCIDMACLK_SDC 0x2
+
+#define VIA_CRDR_PCIINTCTRL 0x8
+#define VIA_CRDR_PCIINTCTRL_SDCIRQEN 0x04
+
+#define VIA_CRDR_PCIINTSTATUS 0x9
+#define VIA_CRDR_PCIINTSTATUS_SDC 0x04
+
+#define VIA_CRDR_PCITMOCTRL 0xa
+#define VIA_CRDR_PCITMOCTRL_NO 0x0
+#define VIA_CRDR_PCITMOCTRL_32US 0x1
+#define VIA_CRDR_PCITMOCTRL_256US 0x2
+#define VIA_CRDR_PCITMOCTRL_1024US 0x3
+#define VIA_CRDR_PCITMOCTRL_256MS 0x4
+#define VIA_CRDR_PCITMOCTRL_512MS 0x5
+#define VIA_CRDR_PCITMOCTRL_1024MS 0x6
+
+/*0xB-0xFF reserved*/
+
+enum PCI_HOST_CLK_CONTROL {
+ PCI_CLK_375K = 0x03,
+ PCI_CLK_8M = 0x04,
+ PCI_CLK_12M = 0x00,
+ PCI_CLK_16M = 0x05,
+ PCI_CLK_24M = 0x01,
+ PCI_CLK_33M = 0x06,
+ PCI_CLK_48M = 0x02
+};
+
+struct sdhcreg {
+ u32 sdcontrol_reg;
+ u32 sdcmdarg_reg;
+ u32 sdbusmode_reg;
+ u32 sdblklen_reg;
+ u32 sdresp_reg[4];
+ u32 sdcurblkcnt_reg;
+ u32 sdintmask_reg;
+ u32 sdstatus_reg;
+ u32 sdrsptmo_reg;
+ u32 sdclksel_reg;
+ u32 sdextctrl_reg;
+};
+
+struct pcictrlreg {
+ u8 reserve[2];
+ u8 pciclkgat_reg;
+ u8 pcinfcclk_reg;
+ u8 pcimscclk_reg;
+ u8 pcisdclk_reg;
+ u8 pcicaclk_reg;
+ u8 pcidmaclk_reg;
+ u8 pciintctrl_reg;
+ u8 pciintstatus_reg;
+ u8 pcitmoctrl_reg;
+ u8 Resv;
+};
+
+struct via_crdr_mmc_host {
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+
+ void __iomem *mmiobase;
+ void __iomem *sdhc_mmiobase;
+ void __iomem *ddma_mmiobase;
+ void __iomem *pcictrl_mmiobase;
+
+ struct pcictrlreg pm_pcictrl_reg;
+ struct sdhcreg pm_sdhc_reg;
+
+ struct work_struct carddet_work;
+ struct tasklet_struct finish_tasklet;
+
+ struct timer_list timer;
+ spinlock_t lock;
+ u8 power;
+ int reject;
+ unsigned int quirks;
+};
+
+/* some devices need a very long delay for power to stabilize */
+#define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001
+
+static struct pci_device_id via_ids[] = {
+ {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, via_ids);
+
+static void via_print_sdchc(struct via_crdr_mmc_host *host)
+{
+ void __iomem *addrbase = host->sdhc_mmiobase;
+
+ pr_debug("SDC MMIO Registers:\n");
+ pr_debug("SDCONTROL=%08x, SDCMDARG=%08x, SDBUSMODE=%08x\n",
+ readl(addrbase + VIA_CRDR_SDCTRL),
+ readl(addrbase + VIA_CRDR_SDCARG),
+ readl(addrbase + VIA_CRDR_SDBUSMODE));
+ pr_debug("SDBLKLEN=%08x, SDCURBLKCNT=%08x, SDINTMASK=%08x\n",
+ readl(addrbase + VIA_CRDR_SDBLKLEN),
+ readl(addrbase + VIA_CRDR_SDCURBLKCNT),
+ readl(addrbase + VIA_CRDR_SDINTMASK));
+ pr_debug("SDSTATUS=%08x, SDCLKSEL=%08x, SDEXTCTRL=%08x\n",
+ readl(addrbase + VIA_CRDR_SDSTATUS),
+ readl(addrbase + VIA_CRDR_SDCLKSEL),
+ readl(addrbase + VIA_CRDR_SDEXTCTRL));
+}
+
+static void via_print_pcictrl(struct via_crdr_mmc_host *host)
+{
+ void __iomem *addrbase = host->pcictrl_mmiobase;
+
+ pr_debug("PCI Control Registers:\n");
+ pr_debug("PCICLKGATT=%02x, PCISDCCLK=%02x, PCIDMACLK=%02x\n",
+ readb(addrbase + VIA_CRDR_PCICLKGATT),
+ readb(addrbase + VIA_CRDR_PCISDCCLK),
+ readb(addrbase + VIA_CRDR_PCIDMACLK));
+ pr_debug("PCIINTCTRL=%02x, PCIINTSTATUS=%02x\n",
+ readb(addrbase + VIA_CRDR_PCIINTCTRL),
+ readb(addrbase + VIA_CRDR_PCIINTSTATUS));
+}
+
+static void via_save_pcictrlreg(struct via_crdr_mmc_host *host)
+{
+ struct pcictrlreg *pm_pcictrl_reg;
+ void __iomem *addrbase;
+
+ pm_pcictrl_reg = &(host->pm_pcictrl_reg);
+ addrbase = host->pcictrl_mmiobase;
+
+ pm_pcictrl_reg->pciclkgat_reg = readb(addrbase + VIA_CRDR_PCICLKGATT);
+ pm_pcictrl_reg->pciclkgat_reg |=
+ VIA_CRDR_PCICLKGATT_3V3 | VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ pm_pcictrl_reg->pcisdclk_reg = readb(addrbase + VIA_CRDR_PCISDCCLK);
+ pm_pcictrl_reg->pcidmaclk_reg = readb(addrbase + VIA_CRDR_PCIDMACLK);
+ pm_pcictrl_reg->pciintctrl_reg = readb(addrbase + VIA_CRDR_PCIINTCTRL);
+ pm_pcictrl_reg->pciintstatus_reg =
+ readb(addrbase + VIA_CRDR_PCIINTSTATUS);
+ pm_pcictrl_reg->pcitmoctrl_reg = readb(addrbase + VIA_CRDR_PCITMOCTRL);
+}
+
+static void via_restore_pcictrlreg(struct via_crdr_mmc_host *host)
+{
+ struct pcictrlreg *pm_pcictrl_reg;
+ void __iomem *addrbase;
+
+ pm_pcictrl_reg = &(host->pm_pcictrl_reg);
+ addrbase = host->pcictrl_mmiobase;
+
+ writeb(pm_pcictrl_reg->pciclkgat_reg, addrbase + VIA_CRDR_PCICLKGATT);
+ writeb(pm_pcictrl_reg->pcisdclk_reg, addrbase + VIA_CRDR_PCISDCCLK);
+ writeb(pm_pcictrl_reg->pcidmaclk_reg, addrbase + VIA_CRDR_PCIDMACLK);
+ writeb(pm_pcictrl_reg->pciintctrl_reg, addrbase + VIA_CRDR_PCIINTCTRL);
+ writeb(pm_pcictrl_reg->pciintstatus_reg,
+ addrbase + VIA_CRDR_PCIINTSTATUS);
+ writeb(pm_pcictrl_reg->pcitmoctrl_reg, addrbase + VIA_CRDR_PCITMOCTRL);
+}
+
+static void via_save_sdcreg(struct via_crdr_mmc_host *host)
+{
+ struct sdhcreg *pm_sdhc_reg;
+ void __iomem *addrbase;
+
+ pm_sdhc_reg = &(host->pm_sdhc_reg);
+ addrbase = host->sdhc_mmiobase;
+
+ pm_sdhc_reg->sdcontrol_reg = readl(addrbase + VIA_CRDR_SDCTRL);
+ pm_sdhc_reg->sdcmdarg_reg = readl(addrbase + VIA_CRDR_SDCARG);
+ pm_sdhc_reg->sdbusmode_reg = readl(addrbase + VIA_CRDR_SDBUSMODE);
+ pm_sdhc_reg->sdblklen_reg = readl(addrbase + VIA_CRDR_SDBLKLEN);
+ pm_sdhc_reg->sdcurblkcnt_reg = readl(addrbase + VIA_CRDR_SDCURBLKCNT);
+ pm_sdhc_reg->sdintmask_reg = readl(addrbase + VIA_CRDR_SDINTMASK);
+ pm_sdhc_reg->sdstatus_reg = readl(addrbase + VIA_CRDR_SDSTATUS);
+ pm_sdhc_reg->sdrsptmo_reg = readl(addrbase + VIA_CRDR_SDRSPTMO);
+ pm_sdhc_reg->sdclksel_reg = readl(addrbase + VIA_CRDR_SDCLKSEL);
+ pm_sdhc_reg->sdextctrl_reg = readl(addrbase + VIA_CRDR_SDEXTCTRL);
+}
+
+static void via_restore_sdcreg(struct via_crdr_mmc_host *host)
+{
+ struct sdhcreg *pm_sdhc_reg;
+ void __iomem *addrbase;
+
+ pm_sdhc_reg = &(host->pm_sdhc_reg);
+ addrbase = host->sdhc_mmiobase;
+
+ writel(pm_sdhc_reg->sdcontrol_reg, addrbase + VIA_CRDR_SDCTRL);
+ writel(pm_sdhc_reg->sdcmdarg_reg, addrbase + VIA_CRDR_SDCARG);
+ writel(pm_sdhc_reg->sdbusmode_reg, addrbase + VIA_CRDR_SDBUSMODE);
+ writel(pm_sdhc_reg->sdblklen_reg, addrbase + VIA_CRDR_SDBLKLEN);
+ writel(pm_sdhc_reg->sdcurblkcnt_reg, addrbase + VIA_CRDR_SDCURBLKCNT);
+ writel(pm_sdhc_reg->sdintmask_reg, addrbase + VIA_CRDR_SDINTMASK);
+ writel(pm_sdhc_reg->sdstatus_reg, addrbase + VIA_CRDR_SDSTATUS);
+ writel(pm_sdhc_reg->sdrsptmo_reg, addrbase + VIA_CRDR_SDRSPTMO);
+ writel(pm_sdhc_reg->sdclksel_reg, addrbase + VIA_CRDR_SDCLKSEL);
+ writel(pm_sdhc_reg->sdextctrl_reg, addrbase + VIA_CRDR_SDEXTCTRL);
+}
+
+static void via_pwron_sleep(struct via_crdr_mmc_host *sdhost)
+{
+ if (sdhost->quirks & VIA_CRDR_QUIRK_300MS_PWRDELAY)
+ msleep(300);
+ else
+ msleep(3);
+}
+
+static void via_set_ddma(struct via_crdr_mmc_host *host,
+ dma_addr_t dmaaddr, u32 count, int dir, int enirq)
+{
+ void __iomem *addrbase;
+ u32 ctrl_data = 0;
+
+ if (enirq)
+ ctrl_data |= VIA_CRDR_DMACTRL_ENIRQ;
+
+ if (dir)
+ ctrl_data |= VIA_CRDR_DMACTRL_DIR;
+
+ addrbase = host->ddma_mmiobase;
+
+ writel(dmaaddr, addrbase + VIA_CRDR_DMABASEADD);
+ writel(count, addrbase + VIA_CRDR_DMACOUNTER);
+ writel(ctrl_data, addrbase + VIA_CRDR_DMACTRL);
+ writel(0x01, addrbase + VIA_CRDR_DMASTART);
+
+ /* It seems that our DMA can not work normally with 375kHz clock */
+ /* FIXME: don't brute-force 8MHz but use PIO at 375kHz !! */
+ addrbase = host->pcictrl_mmiobase;
+ if (readb(addrbase + VIA_CRDR_PCISDCCLK) == PCI_CLK_375K) {
+ dev_info(host->mmc->parent, "forcing card speed to 8MHz\n");
+ writeb(PCI_CLK_8M, addrbase + VIA_CRDR_PCISDCCLK);
+ }
+}
+
+static void via_sdc_preparedata(struct via_crdr_mmc_host *host,
+ struct mmc_data *data)
+{
+ void __iomem *addrbase;
+ u32 blk_reg;
+ int count;
+
+ WARN_ON(host->data);
+
+ /* Sanity checks */
+ BUG_ON(data->blksz > host->mmc->max_blk_size);
+ BUG_ON(data->blocks > host->mmc->max_blk_count);
+
+ host->data = data;
+
+ count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_READ) ?
+ PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+ BUG_ON(count != 1);
+
+ via_set_ddma(host, sg_dma_address(data->sg), sg_dma_len(data->sg),
+ (data->flags & MMC_DATA_WRITE) ? 1 : 0, 1);
+
+ addrbase = host->sdhc_mmiobase;
+
+ blk_reg = data->blksz - 1;
+ blk_reg |= VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
+ blk_reg |= (data->blocks) << 16;
+
+ writel(blk_reg, addrbase + VIA_CRDR_SDBLKLEN);
+}
+
+static void via_sdc_get_response(struct via_crdr_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ void __iomem *addrbase = host->sdhc_mmiobase;
+ u32 dwdata0 = readl(addrbase + VIA_CRDR_SDRESP0);
+ u32 dwdata1 = readl(addrbase + VIA_CRDR_SDRESP1);
+ u32 dwdata2 = readl(addrbase + VIA_CRDR_SDRESP2);
+ u32 dwdata3 = readl(addrbase + VIA_CRDR_SDRESP3);
+
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[0] = ((u8) (dwdata1)) |
+ (((u8) (dwdata0 >> 24)) << 8) |
+ (((u8) (dwdata0 >> 16)) << 16) |
+ (((u8) (dwdata0 >> 8)) << 24);
+
+ cmd->resp[1] = ((u8) (dwdata2)) |
+ (((u8) (dwdata1 >> 24)) << 8) |
+ (((u8) (dwdata1 >> 16)) << 16) |
+ (((u8) (dwdata1 >> 8)) << 24);
+
+ cmd->resp[2] = ((u8) (dwdata3)) |
+ (((u8) (dwdata2 >> 24)) << 8) |
+ (((u8) (dwdata2 >> 16)) << 16) |
+ (((u8) (dwdata2 >> 8)) << 24);
+
+ cmd->resp[3] = 0xff |
+ ((((u8) (dwdata3 >> 24))) << 8) |
+ (((u8) (dwdata3 >> 16)) << 16) |
+ (((u8) (dwdata3 >> 8)) << 24);
+ } else {
+ dwdata0 >>= 8;
+ cmd->resp[0] = ((dwdata0 & 0xff) << 24) |
+ (((dwdata0 >> 8) & 0xff) << 16) |
+ (((dwdata0 >> 16) & 0xff) << 8) | (dwdata1 & 0xff);
+
+ dwdata1 >>= 8;
+ cmd->resp[1] = ((dwdata1 & 0xff) << 24) |
+ (((dwdata1 >> 8) & 0xff) << 16) |
+ (((dwdata1 >> 16) & 0xff) << 8);
+ }
+}
+
+static void via_sdc_send_command(struct via_crdr_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ void __iomem *addrbase;
+ struct mmc_data *data;
+ u32 cmdctrl = 0;
+
+ WARN_ON(host->cmd);
+
+ data = cmd->data;
+ mod_timer(&host->timer, jiffies + HZ);
+ host->cmd = cmd;
+
+ /*Command index*/
+ cmdctrl = cmd->opcode << 8;
+
+ /*Response type*/
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ cmdctrl |= VIA_CRDR_SDCTRL_RSP_NONE;
+ break;
+ case MMC_RSP_R1:
+ cmdctrl |= VIA_CRDR_SDCTRL_RSP_R1;
+ break;
+ case MMC_RSP_R1B:
+ cmdctrl |= VIA_CRDR_SDCTRL_RSP_R1B;
+ break;
+ case MMC_RSP_R2:
+ cmdctrl |= VIA_CRDR_SDCTRL_RSP_R2;
+ break;
+ case MMC_RSP_R3:
+ cmdctrl |= VIA_CRDR_SDCTRL_RSP_R3;
+ break;
+ default:
+ pr_err("%s: cmd->flag is not valid\n", mmc_hostname(host->mmc));
+ break;
+ }
+
+ if (!(cmd->data))
+ goto nodata;
+
+ via_sdc_preparedata(host, data);
+
+ /*Command control*/
+ if (data->blocks > 1) {
+ if (data->flags & MMC_DATA_WRITE) {
+ cmdctrl |= VIA_CRDR_SDCTRL_WRITE;
+ cmdctrl |= VIA_CRDR_SDCTRL_MULTI_WR;
+ } else {
+ cmdctrl |= VIA_CRDR_SDCTRL_MULTI_RD;
+ }
+ } else {
+ if (data->flags & MMC_DATA_WRITE) {
+ cmdctrl |= VIA_CRDR_SDCTRL_WRITE;
+ cmdctrl |= VIA_CRDR_SDCTRL_SINGLE_WR;
+ } else {
+ cmdctrl |= VIA_CRDR_SDCTRL_SINGLE_RD;
+ }
+ }
+
+nodata:
+ if (cmd == host->mrq->stop)
+ cmdctrl |= VIA_CRDR_SDCTRL_STOP;
+
+ cmdctrl |= VIA_CRDR_SDCTRL_START;
+
+ addrbase = host->sdhc_mmiobase;
+ writel(cmd->arg, addrbase + VIA_CRDR_SDCARG);
+ writel(cmdctrl, addrbase + VIA_CRDR_SDCTRL);
+}
+
+static void via_sdc_finish_data(struct via_crdr_mmc_host *host)
+{
+ struct mmc_data *data;
+
+ BUG_ON(!host->data);
+
+ data = host->data;
+ host->data = NULL;
+
+ if (data->error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blocks * data->blksz;
+
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_READ) ?
+ PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+
+ if (data->stop)
+ via_sdc_send_command(host, data->stop);
+ else
+ tasklet_schedule(&host->finish_tasklet);
+}
+
+static void via_sdc_finish_command(struct via_crdr_mmc_host *host)
+{
+ via_sdc_get_response(host, host->cmd);
+
+ host->cmd->error = 0;
+
+ if (!host->cmd->data)
+ tasklet_schedule(&host->finish_tasklet);
+
+ host->cmd = NULL;
+}
+
+static void via_sdc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ void __iomem *addrbase;
+ struct via_crdr_mmc_host *host;
+ unsigned long flags;
+ u16 status;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ addrbase = host->pcictrl_mmiobase;
+ writeb(VIA_CRDR_PCIDMACLK_SDC, addrbase + VIA_CRDR_PCIDMACLK);
+
+ status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
+ status &= VIA_CRDR_SDSTS_W1C_MASK;
+ writew(status, host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
+
+ WARN_ON(host->mrq != NULL);
+ host->mrq = mrq;
+
+ status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
+ if (!(status & VIA_CRDR_SDSTS_SLOTG) || host->reject) {
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ } else {
+ via_sdc_send_command(host, mrq->cmd);
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void via_sdc_set_power(struct via_crdr_mmc_host *host,
+ unsigned short power, unsigned int on)
+{
+ unsigned long flags;
+ u8 gatt;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->power = (1 << power);
+
+ gatt = readb(host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ if (host->power == MMC_VDD_165_195)
+ gatt &= ~VIA_CRDR_PCICLKGATT_3V3;
+ else
+ gatt |= VIA_CRDR_PCICLKGATT_3V3;
+ if (on)
+ gatt |= VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ else
+ gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ via_pwron_sleep(host);
+}
+
+static void via_sdc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct via_crdr_mmc_host *host;
+ unsigned long flags;
+ void __iomem *addrbase;
+ u32 org_data, sdextctrl;
+ u8 clock;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ addrbase = host->sdhc_mmiobase;
+ org_data = readl(addrbase + VIA_CRDR_SDBUSMODE);
+ sdextctrl = readl(addrbase + VIA_CRDR_SDEXTCTRL);
+
+ if (ios->bus_width == MMC_BUS_WIDTH_1)
+ org_data &= ~VIA_CRDR_SDMODE_4BIT;
+ else
+ org_data |= VIA_CRDR_SDMODE_4BIT;
+
+ if (ios->power_mode == MMC_POWER_OFF)
+ org_data &= ~VIA_CRDR_SDMODE_CLK_ON;
+ else
+ org_data |= VIA_CRDR_SDMODE_CLK_ON;
+
+ if (ios->timing == MMC_TIMING_SD_HS)
+ sdextctrl |= VIA_CRDR_SDEXTCTRL_HISPD;
+ else
+ sdextctrl &= ~VIA_CRDR_SDEXTCTRL_HISPD;
+
+ writel(org_data, addrbase + VIA_CRDR_SDBUSMODE);
+ writel(sdextctrl, addrbase + VIA_CRDR_SDEXTCTRL);
+
+ if (ios->clock >= 48000000)
+ clock = PCI_CLK_48M;
+ else if (ios->clock >= 33000000)
+ clock = PCI_CLK_33M;
+ else if (ios->clock >= 24000000)
+ clock = PCI_CLK_24M;
+ else if (ios->clock >= 16000000)
+ clock = PCI_CLK_16M;
+ else if (ios->clock >= 12000000)
+ clock = PCI_CLK_12M;
+ else if (ios->clock >= 8000000)
+ clock = PCI_CLK_8M;
+ else
+ clock = PCI_CLK_375K;
+
+ addrbase = host->pcictrl_mmiobase;
+ if (readb(addrbase + VIA_CRDR_PCISDCCLK) != clock)
+ writeb(clock, addrbase + VIA_CRDR_PCISDCCLK);
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (ios->power_mode != MMC_POWER_OFF)
+ via_sdc_set_power(host, ios->vdd, 1);
+ else
+ via_sdc_set_power(host, ios->vdd, 0);
+}
+
+static int via_sdc_get_ro(struct mmc_host *mmc)
+{
+ struct via_crdr_mmc_host *host;
+ unsigned long flags;
+ u16 status;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return !(status & VIA_CRDR_SDSTS_WP);
+}
+
+static const struct mmc_host_ops via_sdc_ops = {
+ .request = via_sdc_request,
+ .set_ios = via_sdc_set_ios,
+ .get_ro = via_sdc_get_ro,
+};
+
+static void via_reset_pcictrl(struct via_crdr_mmc_host *host)
+{
+ unsigned long flags;
+ u8 gatt;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ via_save_pcictrlreg(host);
+ via_save_sdcreg(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ gatt = VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ if (host->power == MMC_VDD_165_195)
+ gatt &= VIA_CRDR_PCICLKGATT_3V3;
+ else
+ gatt |= VIA_CRDR_PCICLKGATT_3V3;
+ writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ via_pwron_sleep(host);
+ gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
+ writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ msleep(3);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ via_restore_pcictrlreg(host);
+ via_restore_sdcreg(host);
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void via_sdc_cmd_isr(struct via_crdr_mmc_host *host, u16 intmask)
+{
+ BUG_ON(intmask == 0);
+
+ if (!host->cmd) {
+ pr_err("%s: Got command interrupt 0x%x even "
+ "though no command operation was in progress.\n",
+ mmc_hostname(host->mmc), intmask);
+ return;
+ }
+
+ if (intmask & VIA_CRDR_SDSTS_CRTO)
+ host->cmd->error = -ETIMEDOUT;
+ else if (intmask & VIA_CRDR_SDSTS_SC)
+ host->cmd->error = -EILSEQ;
+
+ if (host->cmd->error)
+ tasklet_schedule(&host->finish_tasklet);
+ else if (intmask & VIA_CRDR_SDSTS_CRD)
+ via_sdc_finish_command(host);
+}
+
+static void via_sdc_data_isr(struct via_crdr_mmc_host *host, u16 intmask)
+{
+ BUG_ON(intmask == 0);
+
+ if (intmask & VIA_CRDR_SDSTS_DT)
+ host->data->error = -ETIMEDOUT;
+ else if (intmask & (VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC))
+ host->data->error = -EILSEQ;
+
+ via_sdc_finish_data(host);
+}
+
+static irqreturn_t via_sdc_isr(int irq, void *dev_id)
+{
+ struct via_crdr_mmc_host *sdhost = dev_id;
+ void __iomem *addrbase;
+ u8 pci_status;
+ u16 sd_status;
+ irqreturn_t result;
+
+ if (!sdhost)
+ return IRQ_NONE;
+
+ spin_lock(&sdhost->lock);
+
+ addrbase = sdhost->pcictrl_mmiobase;
+ pci_status = readb(addrbase + VIA_CRDR_PCIINTSTATUS);
+ if (!(pci_status & VIA_CRDR_PCIINTSTATUS_SDC)) {
+ result = IRQ_NONE;
+ goto out;
+ }
+
+ addrbase = sdhost->sdhc_mmiobase;
+ sd_status = readw(addrbase + VIA_CRDR_SDSTATUS);
+ sd_status &= VIA_CRDR_SDSTS_INT_MASK;
+ sd_status &= ~VIA_CRDR_SDSTS_IGN_MASK;
+ if (!sd_status) {
+ result = IRQ_NONE;
+ goto out;
+ }
+
+ if (sd_status & VIA_CRDR_SDSTS_CIR) {
+ writew(sd_status & VIA_CRDR_SDSTS_CIR,
+ addrbase + VIA_CRDR_SDSTATUS);
+
+ schedule_work(&sdhost->carddet_work);
+ }
+
+ sd_status &= ~VIA_CRDR_SDSTS_CIR;
+ if (sd_status & VIA_CRDR_SDSTS_CMD_MASK) {
+ writew(sd_status & VIA_CRDR_SDSTS_CMD_MASK,
+ addrbase + VIA_CRDR_SDSTATUS);
+ via_sdc_cmd_isr(sdhost, sd_status & VIA_CRDR_SDSTS_CMD_MASK);
+ }
+ if (sd_status & VIA_CRDR_SDSTS_DATA_MASK) {
+ writew(sd_status & VIA_CRDR_SDSTS_DATA_MASK,
+ addrbase + VIA_CRDR_SDSTATUS);
+ via_sdc_data_isr(sdhost, sd_status & VIA_CRDR_SDSTS_DATA_MASK);
+ }
+
+ sd_status &= ~(VIA_CRDR_SDSTS_CMD_MASK | VIA_CRDR_SDSTS_DATA_MASK);
+ if (sd_status) {
+ pr_err("%s: Unexpected interrupt 0x%x\n",
+ mmc_hostname(sdhost->mmc), sd_status);
+ writew(sd_status, addrbase + VIA_CRDR_SDSTATUS);
+ }
+
+ result = IRQ_HANDLED;
+
+ mmiowb();
+out:
+ spin_unlock(&sdhost->lock);
+
+ return result;
+}
+
+static void via_sdc_timeout(unsigned long ulongdata)
+{
+ struct via_crdr_mmc_host *sdhost;
+ unsigned long flags;
+
+ sdhost = (struct via_crdr_mmc_host *)ulongdata;
+
+ spin_lock_irqsave(&sdhost->lock, flags);
+
+ if (sdhost->mrq) {
+ pr_err("%s: Timeout waiting for hardware interrupt."
+ "cmd:0x%x\n", mmc_hostname(sdhost->mmc),
+ sdhost->mrq->cmd->opcode);
+
+ if (sdhost->data) {
+ writel(VIA_CRDR_DMACTRL_SFTRST,
+ sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
+ sdhost->data->error = -ETIMEDOUT;
+ via_sdc_finish_data(sdhost);
+ } else {
+ if (sdhost->cmd)
+ sdhost->cmd->error = -ETIMEDOUT;
+ else
+ sdhost->mrq->cmd->error = -ETIMEDOUT;
+ tasklet_schedule(&sdhost->finish_tasklet);
+ }
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&sdhost->lock, flags);
+}
+
+static void via_sdc_tasklet_finish(unsigned long param)
+{
+ struct via_crdr_mmc_host *host;
+ unsigned long flags;
+ struct mmc_request *mrq;
+
+ host = (struct via_crdr_mmc_host *)param;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ del_timer(&host->timer);
+ mrq = host->mrq;
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void via_sdc_card_detect(struct work_struct *work)
+{
+ struct via_crdr_mmc_host *host;
+ void __iomem *addrbase;
+ unsigned long flags;
+ u16 status;
+
+ host = container_of(work, struct via_crdr_mmc_host, carddet_work);
+
+ addrbase = host->ddma_mmiobase;
+ writel(VIA_CRDR_DMACTRL_SFTRST, addrbase + VIA_CRDR_DMACTRL);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ addrbase = host->pcictrl_mmiobase;
+ writeb(VIA_CRDR_PCIDMACLK_SDC, addrbase + VIA_CRDR_PCIDMACLK);
+
+ addrbase = host->sdhc_mmiobase;
+ status = readw(addrbase + VIA_CRDR_SDSTATUS);
+ if (!(status & VIA_CRDR_SDSTS_SLOTG)) {
+ if (host->mrq) {
+ pr_err("%s: Card removed during transfer!\n",
+ mmc_hostname(host->mmc));
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ via_reset_pcictrl(host);
+
+ spin_lock_irqsave(&host->lock, flags);
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ via_print_pcictrl(host);
+ via_print_sdchc(host);
+
+ mmc_detect_change(host->mmc, msecs_to_jiffies(500));
+}
+
+static void via_init_mmc_host(struct via_crdr_mmc_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ void __iomem *addrbase;
+ u32 lenreg;
+ u32 status;
+
+ init_timer(&host->timer);
+ host->timer.data = (unsigned long)host;
+ host->timer.function = via_sdc_timeout;
+
+ spin_lock_init(&host->lock);
+
+ mmc->f_min = VIA_CRDR_MIN_CLOCK;
+ mmc->f_max = VIA_CRDR_MAX_CLOCK;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED;
+ mmc->ops = &via_sdc_ops;
+
+ /*Hardware cannot do scatter lists*/
+ mmc->max_segs = 1;
+
+ mmc->max_blk_size = VIA_CRDR_MAX_BLOCK_LENGTH;
+ mmc->max_blk_count = VIA_CRDR_MAX_BLOCK_COUNT;
+
+ mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_req_size = mmc->max_seg_size;
+
+ INIT_WORK(&host->carddet_work, via_sdc_card_detect);
+
+ tasklet_init(&host->finish_tasklet, via_sdc_tasklet_finish,
+ (unsigned long)host);
+
+ addrbase = host->sdhc_mmiobase;
+ writel(0x0, addrbase + VIA_CRDR_SDINTMASK);
+ msleep(1);
+
+ lenreg = VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
+ writel(lenreg, addrbase + VIA_CRDR_SDBLKLEN);
+
+ status = readw(addrbase + VIA_CRDR_SDSTATUS);
+ status &= VIA_CRDR_SDSTS_W1C_MASK;
+ writew(status, addrbase + VIA_CRDR_SDSTATUS);
+
+ status = readw(addrbase + VIA_CRDR_SDSTATUS2);
+ status |= VIA_CRDR_SDSTS_CFE;
+ writew(status, addrbase + VIA_CRDR_SDSTATUS2);
+
+ writeb(0x0, addrbase + VIA_CRDR_SDEXTCTRL);
+
+ writel(VIA_CRDR_SDACTIVE_INTMASK, addrbase + VIA_CRDR_SDINTMASK);
+ msleep(1);
+}
+
+static int via_sd_probe(struct pci_dev *pcidev,
+ const struct pci_device_id *id)
+{
+ struct mmc_host *mmc;
+ struct via_crdr_mmc_host *sdhost;
+ u32 base, len;
+ u8 gatt;
+ int ret;
+
+ pr_info(DRV_NAME
+ ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n",
+ pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
+ (int)pcidev->revision);
+
+ ret = pci_enable_device(pcidev);
+ if (ret)
+ return ret;
+
+ ret = pci_request_regions(pcidev, DRV_NAME);
+ if (ret)
+ goto disable;
+
+ pci_write_config_byte(pcidev, VIA_CRDR_PCI_WORK_MODE, 0);
+ pci_write_config_byte(pcidev, VIA_CRDR_PCI_DBG_MODE, 0);
+
+ mmc = mmc_alloc_host(sizeof(struct via_crdr_mmc_host), &pcidev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ sdhost = mmc_priv(mmc);
+ sdhost->mmc = mmc;
+ dev_set_drvdata(&pcidev->dev, sdhost);
+
+ len = pci_resource_len(pcidev, 0);
+ base = pci_resource_start(pcidev, 0);
+ sdhost->mmiobase = ioremap_nocache(base, len);
+ if (!sdhost->mmiobase) {
+ ret = -ENOMEM;
+ goto free_mmc_host;
+ }
+
+ sdhost->sdhc_mmiobase =
+ sdhost->mmiobase + VIA_CRDR_SDC_OFF;
+ sdhost->ddma_mmiobase =
+ sdhost->mmiobase + VIA_CRDR_DDMA_OFF;
+ sdhost->pcictrl_mmiobase =
+ sdhost->mmiobase + VIA_CRDR_PCICTRL_OFF;
+
+ sdhost->power = MMC_VDD_165_195;
+
+ gatt = VIA_CRDR_PCICLKGATT_3V3 | VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ via_pwron_sleep(sdhost);
+ gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
+ writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ msleep(3);
+
+ via_init_mmc_host(sdhost);
+
+ ret =
+ request_irq(pcidev->irq, via_sdc_isr, IRQF_SHARED, DRV_NAME,
+ sdhost);
+ if (ret)
+ goto unmap;
+
+ writeb(VIA_CRDR_PCIINTCTRL_SDCIRQEN,
+ sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
+ writeb(VIA_CRDR_PCITMOCTRL_1024MS,
+ sdhost->pcictrl_mmiobase + VIA_CRDR_PCITMOCTRL);
+
+ /* device-specific quirks */
+ if (pcidev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
+ pcidev->subsystem_device == 0x3891)
+ sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY;
+
+ mmc_add_host(mmc);
+
+ return 0;
+
+unmap:
+ iounmap(sdhost->mmiobase);
+free_mmc_host:
+ dev_set_drvdata(&pcidev->dev, NULL);
+ mmc_free_host(mmc);
+release:
+ pci_release_regions(pcidev);
+disable:
+ pci_disable_device(pcidev);
+
+ return ret;
+}
+
+static void via_sd_remove(struct pci_dev *pcidev)
+{
+ struct via_crdr_mmc_host *sdhost = pci_get_drvdata(pcidev);
+ unsigned long flags;
+ u8 gatt;
+
+ spin_lock_irqsave(&sdhost->lock, flags);
+
+ /* Ensure we don't accept more commands from mmc layer */
+ sdhost->reject = 1;
+
+ /* Disable generating further interrupts */
+ writeb(0x0, sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
+ mmiowb();
+
+ if (sdhost->mrq) {
+ pr_err("%s: Controller removed during "
+ "transfer\n", mmc_hostname(sdhost->mmc));
+
+ /* make sure all DMA is stopped */
+ writel(VIA_CRDR_DMACTRL_SFTRST,
+ sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
+ mmiowb();
+ sdhost->mrq->cmd->error = -ENOMEDIUM;
+ if (sdhost->mrq->stop)
+ sdhost->mrq->stop->error = -ENOMEDIUM;
+ tasklet_schedule(&sdhost->finish_tasklet);
+ }
+ spin_unlock_irqrestore(&sdhost->lock, flags);
+
+ mmc_remove_host(sdhost->mmc);
+
+ free_irq(pcidev->irq, sdhost);
+
+ del_timer_sync(&sdhost->timer);
+
+ tasklet_kill(&sdhost->finish_tasklet);
+
+ /* switch off power */
+ gatt = readb(sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+
+ iounmap(sdhost->mmiobase);
+ dev_set_drvdata(&pcidev->dev, NULL);
+ mmc_free_host(sdhost->mmc);
+ pci_release_regions(pcidev);
+ pci_disable_device(pcidev);
+
+ pr_info(DRV_NAME
+ ": VIA SDMMC controller at %s [%04x:%04x] has been removed\n",
+ pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
+}
+
+#ifdef CONFIG_PM
+
+static void via_init_sdc_pm(struct via_crdr_mmc_host *host)
+{
+ struct sdhcreg *pm_sdhcreg;
+ void __iomem *addrbase;
+ u32 lenreg;
+ u16 status;
+
+ pm_sdhcreg = &(host->pm_sdhc_reg);
+ addrbase = host->sdhc_mmiobase;
+
+ writel(0x0, addrbase + VIA_CRDR_SDINTMASK);
+
+ lenreg = VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
+ writel(lenreg, addrbase + VIA_CRDR_SDBLKLEN);
+
+ status = readw(addrbase + VIA_CRDR_SDSTATUS);
+ status &= VIA_CRDR_SDSTS_W1C_MASK;
+ writew(status, addrbase + VIA_CRDR_SDSTATUS);
+
+ status = readw(addrbase + VIA_CRDR_SDSTATUS2);
+ status |= VIA_CRDR_SDSTS_CFE;
+ writew(status, addrbase + VIA_CRDR_SDSTATUS2);
+
+ writel(pm_sdhcreg->sdcontrol_reg, addrbase + VIA_CRDR_SDCTRL);
+ writel(pm_sdhcreg->sdcmdarg_reg, addrbase + VIA_CRDR_SDCARG);
+ writel(pm_sdhcreg->sdintmask_reg, addrbase + VIA_CRDR_SDINTMASK);
+ writel(pm_sdhcreg->sdrsptmo_reg, addrbase + VIA_CRDR_SDRSPTMO);
+ writel(pm_sdhcreg->sdclksel_reg, addrbase + VIA_CRDR_SDCLKSEL);
+ writel(pm_sdhcreg->sdextctrl_reg, addrbase + VIA_CRDR_SDEXTCTRL);
+
+ via_print_pcictrl(host);
+ via_print_sdchc(host);
+}
+
+static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
+{
+ struct via_crdr_mmc_host *host;
+
+ host = pci_get_drvdata(pcidev);
+
+ via_save_pcictrlreg(host);
+ via_save_sdcreg(host);
+
+ pci_save_state(pcidev);
+ pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
+ pci_disable_device(pcidev);
+ pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
+
+ return 0;
+}
+
+static int via_sd_resume(struct pci_dev *pcidev)
+{
+ struct via_crdr_mmc_host *sdhost;
+ int ret = 0;
+ u8 gatt;
+
+ sdhost = pci_get_drvdata(pcidev);
+
+ gatt = VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ if (sdhost->power == MMC_VDD_165_195)
+ gatt &= ~VIA_CRDR_PCICLKGATT_3V3;
+ else
+ gatt |= VIA_CRDR_PCICLKGATT_3V3;
+ writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ via_pwron_sleep(sdhost);
+ gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
+ writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ msleep(3);
+
+ msleep(100);
+
+ pci_set_power_state(pcidev, PCI_D0);
+ pci_restore_state(pcidev);
+ ret = pci_enable_device(pcidev);
+ if (ret)
+ return ret;
+
+ via_restore_pcictrlreg(sdhost);
+ via_init_sdc_pm(sdhost);
+
+ return ret;
+}
+
+#else /* CONFIG_PM */
+
+#define via_sd_suspend NULL
+#define via_sd_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct pci_driver via_sd_driver = {
+ .name = DRV_NAME,
+ .id_table = via_ids,
+ .probe = via_sd_probe,
+ .remove = via_sd_remove,
+ .suspend = via_sd_suspend,
+ .resume = via_sd_resume,
+};
+
+module_pci_driver(via_sd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("VIA Technologies Inc.");
+MODULE_DESCRIPTION("VIA SD/MMC Card Interface driver");
diff --git a/kernel/drivers/mmc/host/vub300.c b/kernel/drivers/mmc/host/vub300.c
new file mode 100644
index 000000000..fbabbb82b
--- /dev/null
+++ b/kernel/drivers/mmc/host/vub300.c
@@ -0,0 +1,2488 @@
+/*
+ * Remote VUB300 SDIO/SDmem Host Controller Driver
+ *
+ * Copyright (C) 2010 Elan Digital Systems Limited
+ *
+ * based on USB Skeleton driver - 2.2
+ *
+ * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2
+ *
+ * VUB300: is a USB 2.0 client device with a single SDIO/SDmem/MMC slot
+ * Any SDIO/SDmem/MMC device plugged into the VUB300 will appear,
+ * by virtue of this driver, to have been plugged into a local
+ * SDIO host controller, similar to, say, a PCI Ricoh controller
+ * This is because this kernel device driver is both a USB 2.0
+ * client device driver AND an MMC host controller driver. Thus
+ * if there is an existing driver for the inserted SDIO/SDmem/MMC
+ * device then that driver will be used by the kernel to manage
+ * the device in exactly the same fashion as if it had been
+ * directly plugged into, say, a local pci bus Ricoh controller
+ *
+ * RANT: this driver was written using a display 128x48 - converting it
+ * to a line width of 80 makes it very difficult to support. In
+ * particular functions have been broken down into sub functions
+ * and the original meaningful names have been shortened into
+ * cryptic ones.
+ * The problem is that executing a fragment of code subject to
+ * two conditions means an indentation of 24, thus leaving only
+ * 56 characters for a C statement. And that is quite ridiculous!
+ *
+ * Data types: data passed to/from the VUB300 is fixed to a number of
+ * bits and driver data fields reflect that limit by using
+ * u8, u16, u32
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+#include <linux/mutex.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/workqueue.h>
+#include <linux/ctype.h>
+#include <linux/firmware.h>
+#include <linux/scatterlist.h>
+
+struct host_controller_info {
+ u8 info_size;
+ u16 firmware_version;
+ u8 number_of_ports;
+} __packed;
+
+#define FIRMWARE_BLOCK_BOUNDARY 1024
+struct sd_command_header {
+ u8 header_size;
+ u8 header_type;
+ u8 port_number;
+ u8 command_type; /* Bit7 - Rd/Wr */
+ u8 command_index;
+ u8 transfer_size[4]; /* ReadSize + ReadSize */
+ u8 response_type;
+ u8 arguments[4];
+ u8 block_count[2];
+ u8 block_size[2];
+ u8 block_boundary[2];
+ u8 reserved[44]; /* to pad out to 64 bytes */
+} __packed;
+
+struct sd_irqpoll_header {
+ u8 header_size;
+ u8 header_type;
+ u8 port_number;
+ u8 command_type; /* Bit7 - Rd/Wr */
+ u8 padding[16]; /* don't ask why !! */
+ u8 poll_timeout_msb;
+ u8 poll_timeout_lsb;
+ u8 reserved[42]; /* to pad out to 64 bytes */
+} __packed;
+
+struct sd_common_header {
+ u8 header_size;
+ u8 header_type;
+ u8 port_number;
+} __packed;
+
+struct sd_response_header {
+ u8 header_size;
+ u8 header_type;
+ u8 port_number;
+ u8 command_type;
+ u8 command_index;
+ u8 command_response[0];
+} __packed;
+
+struct sd_status_header {
+ u8 header_size;
+ u8 header_type;
+ u8 port_number;
+ u16 port_flags;
+ u32 sdio_clock;
+ u16 host_header_size;
+ u16 func_header_size;
+ u16 ctrl_header_size;
+} __packed;
+
+struct sd_error_header {
+ u8 header_size;
+ u8 header_type;
+ u8 port_number;
+ u8 error_code;
+} __packed;
+
+struct sd_interrupt_header {
+ u8 header_size;
+ u8 header_type;
+ u8 port_number;
+} __packed;
+
+struct offload_registers_access {
+ u8 command_byte[4];
+ u8 Respond_Byte[4];
+} __packed;
+
+#define INTERRUPT_REGISTER_ACCESSES 15
+struct sd_offloaded_interrupt {
+ u8 header_size;
+ u8 header_type;
+ u8 port_number;
+ struct offload_registers_access reg[INTERRUPT_REGISTER_ACCESSES];
+} __packed;
+
+struct sd_register_header {
+ u8 header_size;
+ u8 header_type;
+ u8 port_number;
+ u8 command_type;
+ u8 command_index;
+ u8 command_response[6];
+} __packed;
+
+#define PIGGYBACK_REGISTER_ACCESSES 14
+struct sd_offloaded_piggyback {
+ struct sd_register_header sdio;
+ struct offload_registers_access reg[PIGGYBACK_REGISTER_ACCESSES];
+} __packed;
+
+union sd_response {
+ struct sd_common_header common;
+ struct sd_status_header status;
+ struct sd_error_header error;
+ struct sd_interrupt_header interrupt;
+ struct sd_response_header response;
+ struct sd_offloaded_interrupt irq;
+ struct sd_offloaded_piggyback pig;
+} __packed;
+
+union sd_command {
+ struct sd_command_header head;
+ struct sd_irqpoll_header poll;
+} __packed;
+
+enum SD_RESPONSE_TYPE {
+ SDRT_UNSPECIFIED = 0,
+ SDRT_NONE,
+ SDRT_1,
+ SDRT_1B,
+ SDRT_2,
+ SDRT_3,
+ SDRT_4,
+ SDRT_5,
+ SDRT_5B,
+ SDRT_6,
+ SDRT_7,
+};
+
+#define RESPONSE_INTERRUPT 0x01
+#define RESPONSE_ERROR 0x02
+#define RESPONSE_STATUS 0x03
+#define RESPONSE_IRQ_DISABLED 0x05
+#define RESPONSE_IRQ_ENABLED 0x06
+#define RESPONSE_PIGGYBACKED 0x07
+#define RESPONSE_NO_INTERRUPT 0x08
+#define RESPONSE_PIG_DISABLED 0x09
+#define RESPONSE_PIG_ENABLED 0x0A
+#define SD_ERROR_1BIT_TIMEOUT 0x01
+#define SD_ERROR_4BIT_TIMEOUT 0x02
+#define SD_ERROR_1BIT_CRC_WRONG 0x03
+#define SD_ERROR_4BIT_CRC_WRONG 0x04
+#define SD_ERROR_1BIT_CRC_ERROR 0x05
+#define SD_ERROR_4BIT_CRC_ERROR 0x06
+#define SD_ERROR_NO_CMD_ENDBIT 0x07
+#define SD_ERROR_NO_1BIT_DATEND 0x08
+#define SD_ERROR_NO_4BIT_DATEND 0x09
+#define SD_ERROR_1BIT_UNEXPECTED_TIMEOUT 0x0A
+#define SD_ERROR_4BIT_UNEXPECTED_TIMEOUT 0x0B
+#define SD_ERROR_ILLEGAL_COMMAND 0x0C
+#define SD_ERROR_NO_DEVICE 0x0D
+#define SD_ERROR_TRANSFER_LENGTH 0x0E
+#define SD_ERROR_1BIT_DATA_TIMEOUT 0x0F
+#define SD_ERROR_4BIT_DATA_TIMEOUT 0x10
+#define SD_ERROR_ILLEGAL_STATE 0x11
+#define SD_ERROR_UNKNOWN_ERROR 0x12
+#define SD_ERROR_RESERVED_ERROR 0x13
+#define SD_ERROR_INVALID_FUNCTION 0x14
+#define SD_ERROR_OUT_OF_RANGE 0x15
+#define SD_ERROR_STAT_CMD 0x16
+#define SD_ERROR_STAT_DATA 0x17
+#define SD_ERROR_STAT_CMD_TIMEOUT 0x18
+#define SD_ERROR_SDCRDY_STUCK 0x19
+#define SD_ERROR_UNHANDLED 0x1A
+#define SD_ERROR_OVERRUN 0x1B
+#define SD_ERROR_PIO_TIMEOUT 0x1C
+
+#define FUN(c) (0x000007 & (c->arg>>28))
+#define REG(c) (0x01FFFF & (c->arg>>9))
+
+static bool limit_speed_to_24_MHz;
+module_param(limit_speed_to_24_MHz, bool, 0644);
+MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz");
+
+static bool pad_input_to_usb_pkt;
+module_param(pad_input_to_usb_pkt, bool, 0644);
+MODULE_PARM_DESC(pad_input_to_usb_pkt,
+ "Pad USB data input transfers to whole USB Packet");
+
+static bool disable_offload_processing;
+module_param(disable_offload_processing, bool, 0644);
+MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing");
+
+static bool force_1_bit_data_xfers;
+module_param(force_1_bit_data_xfers, bool, 0644);
+MODULE_PARM_DESC(force_1_bit_data_xfers,
+ "Force SDIO Data Transfers to 1-bit Mode");
+
+static bool force_polling_for_irqs;
+module_param(force_polling_for_irqs, bool, 0644);
+MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts");
+
+static int firmware_irqpoll_timeout = 1024;
+module_param(firmware_irqpoll_timeout, int, 0644);
+MODULE_PARM_DESC(firmware_irqpoll_timeout, "VUB300 firmware irqpoll timeout");
+
+static int force_max_req_size = 128;
+module_param(force_max_req_size, int, 0644);
+MODULE_PARM_DESC(force_max_req_size, "set max request size in kBytes");
+
+#ifdef SMSC_DEVELOPMENT_BOARD
+static int firmware_rom_wait_states = 0x04;
+#else
+static int firmware_rom_wait_states = 0x1C;
+#endif
+
+module_param(firmware_rom_wait_states, int, 0644);
+MODULE_PARM_DESC(firmware_rom_wait_states,
+ "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
+
+#define ELAN_VENDOR_ID 0x2201
+#define VUB300_VENDOR_ID 0x0424
+#define VUB300_PRODUCT_ID 0x012C
+static struct usb_device_id vub300_table[] = {
+ {USB_DEVICE(ELAN_VENDOR_ID, VUB300_PRODUCT_ID)},
+ {USB_DEVICE(VUB300_VENDOR_ID, VUB300_PRODUCT_ID)},
+ {} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, vub300_table);
+
+static struct workqueue_struct *cmndworkqueue;
+static struct workqueue_struct *pollworkqueue;
+static struct workqueue_struct *deadworkqueue;
+
+static inline int interface_to_InterfaceNumber(struct usb_interface *interface)
+{
+ if (!interface)
+ return -1;
+ if (!interface->cur_altsetting)
+ return -1;
+ return interface->cur_altsetting->desc.bInterfaceNumber;
+}
+
+struct sdio_register {
+ unsigned func_num:3;
+ unsigned sdio_reg:17;
+ unsigned activate:1;
+ unsigned prepared:1;
+ unsigned regvalue:8;
+ unsigned response:8;
+ unsigned sparebit:26;
+};
+
+struct vub300_mmc_host {
+ struct usb_device *udev;
+ struct usb_interface *interface;
+ struct kref kref;
+ struct mutex cmd_mutex;
+ struct mutex irq_mutex;
+ char vub_name[3 + (9 * 8) + 4 + 1]; /* max of 7 sdio fn's */
+ u8 cmnd_out_ep; /* EndPoint for commands */
+ u8 cmnd_res_ep; /* EndPoint for responses */
+ u8 data_out_ep; /* EndPoint for out data */
+ u8 data_inp_ep; /* EndPoint for inp data */
+ bool card_powered;
+ bool card_present;
+ bool read_only;
+ bool large_usb_packets;
+ bool app_spec; /* ApplicationSpecific */
+ bool irq_enabled; /* by the MMC CORE */
+ bool irq_disabled; /* in the firmware */
+ unsigned bus_width:4;
+ u8 total_offload_count;
+ u8 dynamic_register_count;
+ u8 resp_len;
+ u32 datasize;
+ int errors;
+ int usb_transport_fail;
+ int usb_timed_out;
+ int irqs_queued;
+ struct sdio_register sdio_register[16];
+ struct offload_interrupt_function_register {
+#define MAXREGBITS 4
+#define MAXREGS (1<<MAXREGBITS)
+#define MAXREGMASK (MAXREGS-1)
+ u8 offload_count;
+ u32 offload_point;
+ struct offload_registers_access reg[MAXREGS];
+ } fn[8];
+ u16 fbs[8]; /* Function Block Size */
+ struct mmc_command *cmd;
+ struct mmc_request *req;
+ struct mmc_data *data;
+ struct mmc_host *mmc;
+ struct urb *urb;
+ struct urb *command_out_urb;
+ struct urb *command_res_urb;
+ struct completion command_complete;
+ struct completion irqpoll_complete;
+ union sd_command cmnd;
+ union sd_response resp;
+ struct timer_list sg_transfer_timer;
+ struct usb_sg_request sg_request;
+ struct timer_list inactivity_timer;
+ struct work_struct deadwork;
+ struct work_struct cmndwork;
+ struct delayed_work pollwork;
+ struct host_controller_info hc_info;
+ struct sd_status_header system_port_status;
+ u8 padded_buffer[64];
+};
+
+#define kref_to_vub300_mmc_host(d) container_of(d, struct vub300_mmc_host, kref)
+#define SET_TRANSFER_PSEUDOCODE 21
+#define SET_INTERRUPT_PSEUDOCODE 20
+#define SET_FAILURE_MODE 18
+#define SET_ROM_WAIT_STATES 16
+#define SET_IRQ_ENABLE 13
+#define SET_CLOCK_SPEED 11
+#define SET_FUNCTION_BLOCK_SIZE 9
+#define SET_SD_DATA_MODE 6
+#define SET_SD_POWER 4
+#define ENTER_DFU_MODE 3
+#define GET_HC_INF0 1
+#define GET_SYSTEM_PORT_STATUS 0
+
+static void vub300_delete(struct kref *kref)
+{ /* kref callback - softirq */
+ struct vub300_mmc_host *vub300 = kref_to_vub300_mmc_host(kref);
+ struct mmc_host *mmc = vub300->mmc;
+ usb_free_urb(vub300->command_out_urb);
+ vub300->command_out_urb = NULL;
+ usb_free_urb(vub300->command_res_urb);
+ vub300->command_res_urb = NULL;
+ usb_put_dev(vub300->udev);
+ mmc_free_host(mmc);
+ /*
+ * and hence also frees vub300
+ * which is contained at the end of struct mmc
+ */
+}
+
+static void vub300_queue_cmnd_work(struct vub300_mmc_host *vub300)
+{
+ kref_get(&vub300->kref);
+ if (queue_work(cmndworkqueue, &vub300->cmndwork)) {
+ /*
+ * then the cmndworkqueue was not previously
+ * running and the above get ref is obvious
+ * required and will be put when the thread
+ * terminates by a specific call
+ */
+ } else {
+ /*
+ * the cmndworkqueue was already running from
+ * a previous invocation and thus to keep the
+ * kref counts correct we must undo the get
+ */
+ kref_put(&vub300->kref, vub300_delete);
+ }
+}
+
+static void vub300_queue_poll_work(struct vub300_mmc_host *vub300, int delay)
+{
+ kref_get(&vub300->kref);
+ if (queue_delayed_work(pollworkqueue, &vub300->pollwork, delay)) {
+ /*
+ * then the pollworkqueue was not previously
+ * running and the above get ref is obvious
+ * required and will be put when the thread
+ * terminates by a specific call
+ */
+ } else {
+ /*
+ * the pollworkqueue was already running from
+ * a previous invocation and thus to keep the
+ * kref counts correct we must undo the get
+ */
+ kref_put(&vub300->kref, vub300_delete);
+ }
+}
+
+static void vub300_queue_dead_work(struct vub300_mmc_host *vub300)
+{
+ kref_get(&vub300->kref);
+ if (queue_work(deadworkqueue, &vub300->deadwork)) {
+ /*
+ * then the deadworkqueue was not previously
+ * running and the above get ref is obvious
+ * required and will be put when the thread
+ * terminates by a specific call
+ */
+ } else {
+ /*
+ * the deadworkqueue was already running from
+ * a previous invocation and thus to keep the
+ * kref counts correct we must undo the get
+ */
+ kref_put(&vub300->kref, vub300_delete);
+ }
+}
+
+static void irqpoll_res_completed(struct urb *urb)
+{ /* urb completion handler - hardirq */
+ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
+ if (urb->status)
+ vub300->usb_transport_fail = urb->status;
+ complete(&vub300->irqpoll_complete);
+}
+
+static void irqpoll_out_completed(struct urb *urb)
+{ /* urb completion handler - hardirq */
+ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
+ if (urb->status) {
+ vub300->usb_transport_fail = urb->status;
+ complete(&vub300->irqpoll_complete);
+ return;
+ } else {
+ int ret;
+ unsigned int pipe =
+ usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep);
+ usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe,
+ &vub300->resp, sizeof(vub300->resp),
+ irqpoll_res_completed, vub300);
+ vub300->command_res_urb->actual_length = 0;
+ ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC);
+ if (ret) {
+ vub300->usb_transport_fail = ret;
+ complete(&vub300->irqpoll_complete);
+ }
+ return;
+ }
+}
+
+static void send_irqpoll(struct vub300_mmc_host *vub300)
+{
+ /* cmd_mutex is held by vub300_pollwork_thread */
+ int retval;
+ int timeout = 0xFFFF & (0x0001FFFF - firmware_irqpoll_timeout);
+ vub300->cmnd.poll.header_size = 22;
+ vub300->cmnd.poll.header_type = 1;
+ vub300->cmnd.poll.port_number = 0;
+ vub300->cmnd.poll.command_type = 2;
+ vub300->cmnd.poll.poll_timeout_lsb = 0xFF & (unsigned)timeout;
+ vub300->cmnd.poll.poll_timeout_msb = 0xFF & (unsigned)(timeout >> 8);
+ usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev,
+ usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep)
+ , &vub300->cmnd, sizeof(vub300->cmnd)
+ , irqpoll_out_completed, vub300);
+ retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL);
+ if (0 > retval) {
+ vub300->usb_transport_fail = retval;
+ vub300_queue_poll_work(vub300, 1);
+ complete(&vub300->irqpoll_complete);
+ return;
+ } else {
+ return;
+ }
+}
+
+static void new_system_port_status(struct vub300_mmc_host *vub300)
+{
+ int old_card_present = vub300->card_present;
+ int new_card_present =
+ (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
+ vub300->read_only =
+ (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
+ if (new_card_present && !old_card_present) {
+ dev_info(&vub300->udev->dev, "card just inserted\n");
+ vub300->card_present = 1;
+ vub300->bus_width = 0;
+ if (disable_offload_processing)
+ strncpy(vub300->vub_name, "EMPTY Processing Disabled",
+ sizeof(vub300->vub_name));
+ else
+ vub300->vub_name[0] = 0;
+ mmc_detect_change(vub300->mmc, 1);
+ } else if (!new_card_present && old_card_present) {
+ dev_info(&vub300->udev->dev, "card just ejected\n");
+ vub300->card_present = 0;
+ mmc_detect_change(vub300->mmc, 0);
+ } else {
+ /* no change */
+ }
+}
+
+static void __add_offloaded_reg_to_fifo(struct vub300_mmc_host *vub300,
+ struct offload_registers_access
+ *register_access, u8 func)
+{
+ u8 r = vub300->fn[func].offload_point + vub300->fn[func].offload_count;
+ memcpy(&vub300->fn[func].reg[MAXREGMASK & r], register_access,
+ sizeof(struct offload_registers_access));
+ vub300->fn[func].offload_count += 1;
+ vub300->total_offload_count += 1;
+}
+
+static void add_offloaded_reg(struct vub300_mmc_host *vub300,
+ struct offload_registers_access *register_access)
+{
+ u32 Register = ((0x03 & register_access->command_byte[0]) << 15)
+ | ((0xFF & register_access->command_byte[1]) << 7)
+ | ((0xFE & register_access->command_byte[2]) >> 1);
+ u8 func = ((0x70 & register_access->command_byte[0]) >> 4);
+ u8 regs = vub300->dynamic_register_count;
+ u8 i = 0;
+ while (0 < regs-- && 1 == vub300->sdio_register[i].activate) {
+ if (vub300->sdio_register[i].func_num == func &&
+ vub300->sdio_register[i].sdio_reg == Register) {
+ if (vub300->sdio_register[i].prepared == 0)
+ vub300->sdio_register[i].prepared = 1;
+ vub300->sdio_register[i].response =
+ register_access->Respond_Byte[2];
+ vub300->sdio_register[i].regvalue =
+ register_access->Respond_Byte[3];
+ return;
+ } else {
+ i += 1;
+ continue;
+ }
+ };
+ __add_offloaded_reg_to_fifo(vub300, register_access, func);
+}
+
+static void check_vub300_port_status(struct vub300_mmc_host *vub300)
+{
+ /*
+ * cmd_mutex is held by vub300_pollwork_thread,
+ * vub300_deadwork_thread or vub300_cmndwork_thread
+ */
+ int retval;
+ retval =
+ usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
+ GET_SYSTEM_PORT_STATUS,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0x0000, 0x0000, &vub300->system_port_status,
+ sizeof(vub300->system_port_status), HZ);
+ if (sizeof(vub300->system_port_status) == retval)
+ new_system_port_status(vub300);
+}
+
+static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300)
+{
+ /* cmd_mutex is held by vub300_pollwork_thread */
+ if (vub300->command_res_urb->actual_length == 0)
+ return;
+
+ switch (vub300->resp.common.header_type) {
+ case RESPONSE_INTERRUPT:
+ mutex_lock(&vub300->irq_mutex);
+ if (vub300->irq_enabled)
+ mmc_signal_sdio_irq(vub300->mmc);
+ else
+ vub300->irqs_queued += 1;
+ vub300->irq_disabled = 1;
+ mutex_unlock(&vub300->irq_mutex);
+ break;
+ case RESPONSE_ERROR:
+ if (vub300->resp.error.error_code == SD_ERROR_NO_DEVICE)
+ check_vub300_port_status(vub300);
+ break;
+ case RESPONSE_STATUS:
+ vub300->system_port_status = vub300->resp.status;
+ new_system_port_status(vub300);
+ if (!vub300->card_present)
+ vub300_queue_poll_work(vub300, HZ / 5);
+ break;
+ case RESPONSE_IRQ_DISABLED:
+ {
+ int offloaded_data_length = vub300->resp.common.header_size - 3;
+ int register_count = offloaded_data_length >> 3;
+ int ri = 0;
+ while (register_count--) {
+ add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]);
+ ri += 1;
+ }
+ mutex_lock(&vub300->irq_mutex);
+ if (vub300->irq_enabled)
+ mmc_signal_sdio_irq(vub300->mmc);
+ else
+ vub300->irqs_queued += 1;
+ vub300->irq_disabled = 1;
+ mutex_unlock(&vub300->irq_mutex);
+ break;
+ }
+ case RESPONSE_IRQ_ENABLED:
+ {
+ int offloaded_data_length = vub300->resp.common.header_size - 3;
+ int register_count = offloaded_data_length >> 3;
+ int ri = 0;
+ while (register_count--) {
+ add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]);
+ ri += 1;
+ }
+ mutex_lock(&vub300->irq_mutex);
+ if (vub300->irq_enabled)
+ mmc_signal_sdio_irq(vub300->mmc);
+ else if (vub300->irqs_queued)
+ vub300->irqs_queued += 1;
+ else
+ vub300->irqs_queued += 1;
+ vub300->irq_disabled = 0;
+ mutex_unlock(&vub300->irq_mutex);
+ break;
+ }
+ case RESPONSE_NO_INTERRUPT:
+ vub300_queue_poll_work(vub300, 1);
+ break;
+ default:
+ break;
+ }
+}
+
+static void __do_poll(struct vub300_mmc_host *vub300)
+{
+ /* cmd_mutex is held by vub300_pollwork_thread */
+ unsigned long commretval;
+ mod_timer(&vub300->inactivity_timer, jiffies + HZ);
+ init_completion(&vub300->irqpoll_complete);
+ send_irqpoll(vub300);
+ commretval = wait_for_completion_timeout(&vub300->irqpoll_complete,
+ msecs_to_jiffies(500));
+ if (vub300->usb_transport_fail) {
+ /* no need to do anything */
+ } else if (commretval == 0) {
+ vub300->usb_timed_out = 1;
+ usb_kill_urb(vub300->command_out_urb);
+ usb_kill_urb(vub300->command_res_urb);
+ } else { /* commretval > 0 */
+ __vub300_irqpoll_response(vub300);
+ }
+}
+
+/* this thread runs only when the driver
+ * is trying to poll the device for an IRQ
+ */
+static void vub300_pollwork_thread(struct work_struct *work)
+{ /* NOT irq */
+ struct vub300_mmc_host *vub300 = container_of(work,
+ struct vub300_mmc_host, pollwork.work);
+ if (!vub300->interface) {
+ kref_put(&vub300->kref, vub300_delete);
+ return;
+ }
+ mutex_lock(&vub300->cmd_mutex);
+ if (vub300->cmd) {
+ vub300_queue_poll_work(vub300, 1);
+ } else if (!vub300->card_present) {
+ /* no need to do anything */
+ } else { /* vub300->card_present */
+ mutex_lock(&vub300->irq_mutex);
+ if (!vub300->irq_enabled) {
+ mutex_unlock(&vub300->irq_mutex);
+ } else if (vub300->irqs_queued) {
+ vub300->irqs_queued -= 1;
+ mmc_signal_sdio_irq(vub300->mmc);
+ mod_timer(&vub300->inactivity_timer, jiffies + HZ);
+ mutex_unlock(&vub300->irq_mutex);
+ } else { /* NOT vub300->irqs_queued */
+ mutex_unlock(&vub300->irq_mutex);
+ __do_poll(vub300);
+ }
+ }
+ mutex_unlock(&vub300->cmd_mutex);
+ kref_put(&vub300->kref, vub300_delete);
+}
+
+static void vub300_deadwork_thread(struct work_struct *work)
+{ /* NOT irq */
+ struct vub300_mmc_host *vub300 =
+ container_of(work, struct vub300_mmc_host, deadwork);
+ if (!vub300->interface) {
+ kref_put(&vub300->kref, vub300_delete);
+ return;
+ }
+ mutex_lock(&vub300->cmd_mutex);
+ if (vub300->cmd) {
+ /*
+ * a command got in as the inactivity
+ * timer expired - so we just let the
+ * processing of the command show if
+ * the device is dead
+ */
+ } else if (vub300->card_present) {
+ check_vub300_port_status(vub300);
+ } else if (vub300->mmc && vub300->mmc->card &&
+ mmc_card_present(vub300->mmc->card)) {
+ /*
+ * the MMC core must not have responded
+ * to the previous indication - lets
+ * hope that it eventually does so we
+ * will just ignore this for now
+ */
+ } else {
+ check_vub300_port_status(vub300);
+ }
+ mod_timer(&vub300->inactivity_timer, jiffies + HZ);
+ mutex_unlock(&vub300->cmd_mutex);
+ kref_put(&vub300->kref, vub300_delete);
+}
+
+static void vub300_inactivity_timer_expired(unsigned long data)
+{ /* softirq */
+ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
+ if (!vub300->interface) {
+ kref_put(&vub300->kref, vub300_delete);
+ } else if (vub300->cmd) {
+ mod_timer(&vub300->inactivity_timer, jiffies + HZ);
+ } else {
+ vub300_queue_dead_work(vub300);
+ mod_timer(&vub300->inactivity_timer, jiffies + HZ);
+ }
+}
+
+static int vub300_response_error(u8 error_code)
+{
+ switch (error_code) {
+ case SD_ERROR_PIO_TIMEOUT:
+ case SD_ERROR_1BIT_TIMEOUT:
+ case SD_ERROR_4BIT_TIMEOUT:
+ return -ETIMEDOUT;
+ case SD_ERROR_STAT_DATA:
+ case SD_ERROR_OVERRUN:
+ case SD_ERROR_STAT_CMD:
+ case SD_ERROR_STAT_CMD_TIMEOUT:
+ case SD_ERROR_SDCRDY_STUCK:
+ case SD_ERROR_UNHANDLED:
+ case SD_ERROR_1BIT_CRC_WRONG:
+ case SD_ERROR_4BIT_CRC_WRONG:
+ case SD_ERROR_1BIT_CRC_ERROR:
+ case SD_ERROR_4BIT_CRC_ERROR:
+ case SD_ERROR_NO_CMD_ENDBIT:
+ case SD_ERROR_NO_1BIT_DATEND:
+ case SD_ERROR_NO_4BIT_DATEND:
+ case SD_ERROR_1BIT_DATA_TIMEOUT:
+ case SD_ERROR_4BIT_DATA_TIMEOUT:
+ case SD_ERROR_1BIT_UNEXPECTED_TIMEOUT:
+ case SD_ERROR_4BIT_UNEXPECTED_TIMEOUT:
+ return -EILSEQ;
+ case 33:
+ return -EILSEQ;
+ case SD_ERROR_ILLEGAL_COMMAND:
+ return -EINVAL;
+ case SD_ERROR_NO_DEVICE:
+ return -ENOMEDIUM;
+ default:
+ return -ENODEV;
+ }
+}
+
+static void command_res_completed(struct urb *urb)
+{ /* urb completion handler - hardirq */
+ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
+ if (urb->status) {
+ /* we have to let the initiator handle the error */
+ } else if (vub300->command_res_urb->actual_length == 0) {
+ /*
+ * we have seen this happen once or twice and
+ * we suspect a buggy USB host controller
+ */
+ } else if (!vub300->data) {
+ /* this means that the command (typically CMD52) succeeded */
+ } else if (vub300->resp.common.header_type != 0x02) {
+ /*
+ * this is an error response from the VUB300 chip
+ * and we let the initiator handle it
+ */
+ } else if (vub300->urb) {
+ vub300->cmd->error =
+ vub300_response_error(vub300->resp.error.error_code);
+ usb_unlink_urb(vub300->urb);
+ } else {
+ vub300->cmd->error =
+ vub300_response_error(vub300->resp.error.error_code);
+ usb_sg_cancel(&vub300->sg_request);
+ }
+ complete(&vub300->command_complete); /* got_response_in */
+}
+
+static void command_out_completed(struct urb *urb)
+{ /* urb completion handler - hardirq */
+ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
+ if (urb->status) {
+ complete(&vub300->command_complete);
+ } else {
+ int ret;
+ unsigned int pipe =
+ usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep);
+ usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe,
+ &vub300->resp, sizeof(vub300->resp),
+ command_res_completed, vub300);
+ vub300->command_res_urb->actual_length = 0;
+ ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC);
+ if (ret == 0) {
+ /*
+ * the urb completion handler will call
+ * our completion handler
+ */
+ } else {
+ /*
+ * and thus we only call it directly
+ * when it will not be called
+ */
+ complete(&vub300->command_complete);
+ }
+ }
+}
+
+/*
+ * the STUFF bits are masked out for the comparisons
+ */
+static void snoop_block_size_and_bus_width(struct vub300_mmc_host *vub300,
+ u32 cmd_arg)
+{
+ if ((0xFBFFFE00 & cmd_arg) == 0x80022200)
+ vub300->fbs[1] = (cmd_arg << 8) | (0x00FF & vub300->fbs[1]);
+ else if ((0xFBFFFE00 & cmd_arg) == 0x80022000)
+ vub300->fbs[1] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[1]);
+ else if ((0xFBFFFE00 & cmd_arg) == 0x80042200)
+ vub300->fbs[2] = (cmd_arg << 8) | (0x00FF & vub300->fbs[2]);
+ else if ((0xFBFFFE00 & cmd_arg) == 0x80042000)
+ vub300->fbs[2] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[2]);
+ else if ((0xFBFFFE00 & cmd_arg) == 0x80062200)
+ vub300->fbs[3] = (cmd_arg << 8) | (0x00FF & vub300->fbs[3]);
+ else if ((0xFBFFFE00 & cmd_arg) == 0x80062000)
+ vub300->fbs[3] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[3]);
+ else if ((0xFBFFFE00 & cmd_arg) == 0x80082200)
+ vub300->fbs[4] = (cmd_arg << 8) | (0x00FF & vub300->fbs[4]);
+ else if ((0xFBFFFE00 & cmd_arg) == 0x80082000)
+ vub300->fbs[4] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[4]);
+ else if ((0xFBFFFE00 & cmd_arg) == 0x800A2200)
+ vub300->fbs[5] = (cmd_arg << 8) | (0x00FF & vub300->fbs[5]);
+ else if ((0xFBFFFE00 & cmd_arg) == 0x800A2000)
+ vub300->fbs[5] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[5]);
+ else if ((0xFBFFFE00 & cmd_arg) == 0x800C2200)
+ vub300->fbs[6] = (cmd_arg << 8) | (0x00FF & vub300->fbs[6]);
+ else if ((0xFBFFFE00 & cmd_arg) == 0x800C2000)
+ vub300->fbs[6] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[6]);
+ else if ((0xFBFFFE00 & cmd_arg) == 0x800E2200)
+ vub300->fbs[7] = (cmd_arg << 8) | (0x00FF & vub300->fbs[7]);
+ else if ((0xFBFFFE00 & cmd_arg) == 0x800E2000)
+ vub300->fbs[7] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[7]);
+ else if ((0xFBFFFE03 & cmd_arg) == 0x80000E00)
+ vub300->bus_width = 1;
+ else if ((0xFBFFFE03 & cmd_arg) == 0x80000E02)
+ vub300->bus_width = 4;
+}
+
+static void send_command(struct vub300_mmc_host *vub300)
+{
+ /* cmd_mutex is held by vub300_cmndwork_thread */
+ struct mmc_command *cmd = vub300->cmd;
+ struct mmc_data *data = vub300->data;
+ int retval;
+ int i;
+ u8 response_type;
+ if (vub300->app_spec) {
+ switch (cmd->opcode) {
+ case 6:
+ response_type = SDRT_1;
+ vub300->resp_len = 6;
+ if (0x00000000 == (0x00000003 & cmd->arg))
+ vub300->bus_width = 1;
+ else if (0x00000002 == (0x00000003 & cmd->arg))
+ vub300->bus_width = 4;
+ else
+ dev_err(&vub300->udev->dev,
+ "unexpected ACMD6 bus_width=%d\n",
+ 0x00000003 & cmd->arg);
+ break;
+ case 13:
+ response_type = SDRT_1;
+ vub300->resp_len = 6;
+ break;
+ case 22:
+ response_type = SDRT_1;
+ vub300->resp_len = 6;
+ break;
+ case 23:
+ response_type = SDRT_1;
+ vub300->resp_len = 6;
+ break;
+ case 41:
+ response_type = SDRT_3;
+ vub300->resp_len = 6;
+ break;
+ case 42:
+ response_type = SDRT_1;
+ vub300->resp_len = 6;
+ break;
+ case 51:
+ response_type = SDRT_1;
+ vub300->resp_len = 6;
+ break;
+ case 55:
+ response_type = SDRT_1;
+ vub300->resp_len = 6;
+ break;
+ default:
+ vub300->resp_len = 0;
+ cmd->error = -EINVAL;
+ complete(&vub300->command_complete);
+ return;
+ }
+ vub300->app_spec = 0;
+ } else {
+ switch (cmd->opcode) {
+ case 0:
+ response_type = SDRT_NONE;
+ vub300->resp_len = 0;
+ break;
+ case 1:
+ response_type = SDRT_3;
+ vub300->resp_len = 6;
+ break;
+ case 2:
+ response_type = SDRT_2;
+ vub300->resp_len = 17;
+ break;
+ case 3:
+ response_type = SDRT_6;
+ vub300->resp_len = 6;
+ break;
+ case 4:
+ response_type = SDRT_NONE;
+ vub300->resp_len = 0;
+ break;
+ case 5:
+ response_type = SDRT_4;
+ vub300->resp_len = 6;
+ break;
+ case 6:
+ response_type = SDRT_1;
+ vub300->resp_len = 6;
+ break;
+ case 7:
+ response_type = SDRT_1B;
+ vub300->resp_len = 6;
+ break;
+ case 8:
+ response_type = SDRT_7;
+ vub300->resp_len = 6;
+ break;
+ case 9:
+ response_type = SDRT_2;
+ vub300->resp_len = 17;
+ break;
+ case 10:
+ response_type = SDRT_2;
+ vub300->resp_len = 17;
+ break;
+ case 12:
+ response_type = SDRT_1B;
+ vub300->resp_len = 6;
+ break;
+ case 13:
+ response_type = SDRT_1;
+ vub300->resp_len = 6;
+ break;
+ case 15:
+ response_type = SDRT_NONE;
+ vub300->resp_len = 0;
+ break;
+ case 16:
+ for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++)
+ vub300->fbs[i] = 0xFFFF & cmd->arg;
+ response_type = SDRT_1;
+ vub300->resp_len = 6;
+ break;
+ case 17:
+ case 18:
+ case 24:
+ case 25:
+ case 27:
+ response_type = SDRT_1;
+ vub300->resp_len = 6;
+ break;
+ case 28:
+ case 29:
+ response_type = SDRT_1B;
+ vub300->resp_len = 6;
+ break;
+ case 30:
+ case 32:
+ case 33:
+ response_type = SDRT_1;
+ vub300->resp_len = 6;
+ break;
+ case 38:
+ response_type = SDRT_1B;
+ vub300->resp_len = 6;
+ break;
+ case 42:
+ response_type = SDRT_1;
+ vub300->resp_len = 6;
+ break;
+ case 52:
+ response_type = SDRT_5;
+ vub300->resp_len = 6;
+ snoop_block_size_and_bus_width(vub300, cmd->arg);
+ break;
+ case 53:
+ response_type = SDRT_5;
+ vub300->resp_len = 6;
+ break;
+ case 55:
+ response_type = SDRT_1;
+ vub300->resp_len = 6;
+ vub300->app_spec = 1;
+ break;
+ case 56:
+ response_type = SDRT_1;
+ vub300->resp_len = 6;
+ break;
+ default:
+ vub300->resp_len = 0;
+ cmd->error = -EINVAL;
+ complete(&vub300->command_complete);
+ return;
+ }
+ }
+ /*
+ * it is a shame that we can not use "sizeof(struct sd_command_header)"
+ * this is because the packet _must_ be padded to 64 bytes
+ */
+ vub300->cmnd.head.header_size = 20;
+ vub300->cmnd.head.header_type = 0x00;
+ vub300->cmnd.head.port_number = 0; /* "0" means port 1 */
+ vub300->cmnd.head.command_type = 0x00; /* standard read command */
+ vub300->cmnd.head.response_type = response_type;
+ vub300->cmnd.head.command_index = cmd->opcode;
+ vub300->cmnd.head.arguments[0] = cmd->arg >> 24;
+ vub300->cmnd.head.arguments[1] = cmd->arg >> 16;
+ vub300->cmnd.head.arguments[2] = cmd->arg >> 8;
+ vub300->cmnd.head.arguments[3] = cmd->arg >> 0;
+ if (cmd->opcode == 52) {
+ int fn = 0x7 & (cmd->arg >> 28);
+ vub300->cmnd.head.block_count[0] = 0;
+ vub300->cmnd.head.block_count[1] = 0;
+ vub300->cmnd.head.block_size[0] = (vub300->fbs[fn] >> 8) & 0xFF;
+ vub300->cmnd.head.block_size[1] = (vub300->fbs[fn] >> 0) & 0xFF;
+ vub300->cmnd.head.command_type = 0x00;
+ vub300->cmnd.head.transfer_size[0] = 0;
+ vub300->cmnd.head.transfer_size[1] = 0;
+ vub300->cmnd.head.transfer_size[2] = 0;
+ vub300->cmnd.head.transfer_size[3] = 0;
+ } else if (!data) {
+ vub300->cmnd.head.block_count[0] = 0;
+ vub300->cmnd.head.block_count[1] = 0;
+ vub300->cmnd.head.block_size[0] = (vub300->fbs[0] >> 8) & 0xFF;
+ vub300->cmnd.head.block_size[1] = (vub300->fbs[0] >> 0) & 0xFF;
+ vub300->cmnd.head.command_type = 0x00;
+ vub300->cmnd.head.transfer_size[0] = 0;
+ vub300->cmnd.head.transfer_size[1] = 0;
+ vub300->cmnd.head.transfer_size[2] = 0;
+ vub300->cmnd.head.transfer_size[3] = 0;
+ } else if (cmd->opcode == 53) {
+ int fn = 0x7 & (cmd->arg >> 28);
+ if (0x08 & vub300->cmnd.head.arguments[0]) { /* BLOCK MODE */
+ vub300->cmnd.head.block_count[0] =
+ (data->blocks >> 8) & 0xFF;
+ vub300->cmnd.head.block_count[1] =
+ (data->blocks >> 0) & 0xFF;
+ vub300->cmnd.head.block_size[0] =
+ (data->blksz >> 8) & 0xFF;
+ vub300->cmnd.head.block_size[1] =
+ (data->blksz >> 0) & 0xFF;
+ } else { /* BYTE MODE */
+ vub300->cmnd.head.block_count[0] = 0;
+ vub300->cmnd.head.block_count[1] = 0;
+ vub300->cmnd.head.block_size[0] =
+ (vub300->datasize >> 8) & 0xFF;
+ vub300->cmnd.head.block_size[1] =
+ (vub300->datasize >> 0) & 0xFF;
+ }
+ vub300->cmnd.head.command_type =
+ (MMC_DATA_READ & data->flags) ? 0x00 : 0x80;
+ vub300->cmnd.head.transfer_size[0] =
+ (vub300->datasize >> 24) & 0xFF;
+ vub300->cmnd.head.transfer_size[1] =
+ (vub300->datasize >> 16) & 0xFF;
+ vub300->cmnd.head.transfer_size[2] =
+ (vub300->datasize >> 8) & 0xFF;
+ vub300->cmnd.head.transfer_size[3] =
+ (vub300->datasize >> 0) & 0xFF;
+ if (vub300->datasize < vub300->fbs[fn]) {
+ vub300->cmnd.head.block_count[0] = 0;
+ vub300->cmnd.head.block_count[1] = 0;
+ }
+ } else {
+ vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF;
+ vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF;
+ vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF;
+ vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF;
+ vub300->cmnd.head.command_type =
+ (MMC_DATA_READ & data->flags) ? 0x00 : 0x80;
+ vub300->cmnd.head.transfer_size[0] =
+ (vub300->datasize >> 24) & 0xFF;
+ vub300->cmnd.head.transfer_size[1] =
+ (vub300->datasize >> 16) & 0xFF;
+ vub300->cmnd.head.transfer_size[2] =
+ (vub300->datasize >> 8) & 0xFF;
+ vub300->cmnd.head.transfer_size[3] =
+ (vub300->datasize >> 0) & 0xFF;
+ if (vub300->datasize < vub300->fbs[0]) {
+ vub300->cmnd.head.block_count[0] = 0;
+ vub300->cmnd.head.block_count[1] = 0;
+ }
+ }
+ if (vub300->cmnd.head.block_size[0] || vub300->cmnd.head.block_size[1]) {
+ u16 block_size = vub300->cmnd.head.block_size[1] |
+ (vub300->cmnd.head.block_size[0] << 8);
+ u16 block_boundary = FIRMWARE_BLOCK_BOUNDARY -
+ (FIRMWARE_BLOCK_BOUNDARY % block_size);
+ vub300->cmnd.head.block_boundary[0] =
+ (block_boundary >> 8) & 0xFF;
+ vub300->cmnd.head.block_boundary[1] =
+ (block_boundary >> 0) & 0xFF;
+ } else {
+ vub300->cmnd.head.block_boundary[0] = 0;
+ vub300->cmnd.head.block_boundary[1] = 0;
+ }
+ usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev,
+ usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep),
+ &vub300->cmnd, sizeof(vub300->cmnd),
+ command_out_completed, vub300);
+ retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL);
+ if (retval < 0) {
+ cmd->error = retval;
+ complete(&vub300->command_complete);
+ return;
+ } else {
+ return;
+ }
+}
+
+/*
+ * timer callback runs in atomic mode
+ * so it cannot call usb_kill_urb()
+ */
+static void vub300_sg_timed_out(unsigned long data)
+{
+ struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
+ vub300->usb_timed_out = 1;
+ usb_sg_cancel(&vub300->sg_request);
+ usb_unlink_urb(vub300->command_out_urb);
+ usb_unlink_urb(vub300->command_res_urb);
+}
+
+static u16 roundup_to_multiple_of_64(u16 number)
+{
+ return 0xFFC0 & (0x3F + number);
+}
+
+/*
+ * this is a separate function to solve the 80 column width restriction
+ */
+static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
+ const struct firmware *fw)
+{
+ u8 register_count = 0;
+ u16 ts = 0;
+ u16 interrupt_size = 0;
+ const u8 *data = fw->data;
+ int size = fw->size;
+ u8 c;
+ dev_info(&vub300->udev->dev, "using %s for SDIO offload processing\n",
+ vub300->vub_name);
+ do {
+ c = *data++;
+ } while (size-- && c); /* skip comment */
+ dev_info(&vub300->udev->dev, "using offload firmware %s %s\n", fw->data,
+ vub300->vub_name);
+ if (size < 4) {
+ dev_err(&vub300->udev->dev,
+ "corrupt offload pseudocode in firmware %s\n",
+ vub300->vub_name);
+ strncpy(vub300->vub_name, "corrupt offload pseudocode",
+ sizeof(vub300->vub_name));
+ return;
+ }
+ interrupt_size += *data++;
+ size -= 1;
+ interrupt_size <<= 8;
+ interrupt_size += *data++;
+ size -= 1;
+ if (interrupt_size < size) {
+ u16 xfer_length = roundup_to_multiple_of_64(interrupt_size);
+ u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL);
+ if (xfer_buffer) {
+ int retval;
+ memcpy(xfer_buffer, data, interrupt_size);
+ memset(xfer_buffer + interrupt_size, 0,
+ xfer_length - interrupt_size);
+ size -= interrupt_size;
+ data += interrupt_size;
+ retval =
+ usb_control_msg(vub300->udev,
+ usb_sndctrlpipe(vub300->udev, 0),
+ SET_INTERRUPT_PSEUDOCODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, 0x0000, 0x0000,
+ xfer_buffer, xfer_length, HZ);
+ kfree(xfer_buffer);
+ if (retval < 0) {
+ strncpy(vub300->vub_name,
+ "SDIO pseudocode download failed",
+ sizeof(vub300->vub_name));
+ return;
+ }
+ } else {
+ dev_err(&vub300->udev->dev,
+ "not enough memory for xfer buffer to send"
+ " INTERRUPT_PSEUDOCODE for %s %s\n", fw->data,
+ vub300->vub_name);
+ strncpy(vub300->vub_name,
+ "SDIO interrupt pseudocode download failed",
+ sizeof(vub300->vub_name));
+ return;
+ }
+ } else {
+ dev_err(&vub300->udev->dev,
+ "corrupt interrupt pseudocode in firmware %s %s\n",
+ fw->data, vub300->vub_name);
+ strncpy(vub300->vub_name, "corrupt interrupt pseudocode",
+ sizeof(vub300->vub_name));
+ return;
+ }
+ ts += *data++;
+ size -= 1;
+ ts <<= 8;
+ ts += *data++;
+ size -= 1;
+ if (ts < size) {
+ u16 xfer_length = roundup_to_multiple_of_64(ts);
+ u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL);
+ if (xfer_buffer) {
+ int retval;
+ memcpy(xfer_buffer, data, ts);
+ memset(xfer_buffer + ts, 0,
+ xfer_length - ts);
+ size -= ts;
+ data += ts;
+ retval =
+ usb_control_msg(vub300->udev,
+ usb_sndctrlpipe(vub300->udev, 0),
+ SET_TRANSFER_PSEUDOCODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, 0x0000, 0x0000,
+ xfer_buffer, xfer_length, HZ);
+ kfree(xfer_buffer);
+ if (retval < 0) {
+ strncpy(vub300->vub_name,
+ "SDIO pseudocode download failed",
+ sizeof(vub300->vub_name));
+ return;
+ }
+ } else {
+ dev_err(&vub300->udev->dev,
+ "not enough memory for xfer buffer to send"
+ " TRANSFER_PSEUDOCODE for %s %s\n", fw->data,
+ vub300->vub_name);
+ strncpy(vub300->vub_name,
+ "SDIO transfer pseudocode download failed",
+ sizeof(vub300->vub_name));
+ return;
+ }
+ } else {
+ dev_err(&vub300->udev->dev,
+ "corrupt transfer pseudocode in firmware %s %s\n",
+ fw->data, vub300->vub_name);
+ strncpy(vub300->vub_name, "corrupt transfer pseudocode",
+ sizeof(vub300->vub_name));
+ return;
+ }
+ register_count += *data++;
+ size -= 1;
+ if (register_count * 4 == size) {
+ int I = vub300->dynamic_register_count = register_count;
+ int i = 0;
+ while (I--) {
+ unsigned int func_num = 0;
+ vub300->sdio_register[i].func_num = *data++;
+ size -= 1;
+ func_num += *data++;
+ size -= 1;
+ func_num <<= 8;
+ func_num += *data++;
+ size -= 1;
+ func_num <<= 8;
+ func_num += *data++;
+ size -= 1;
+ vub300->sdio_register[i].sdio_reg = func_num;
+ vub300->sdio_register[i].activate = 1;
+ vub300->sdio_register[i].prepared = 0;
+ i += 1;
+ }
+ dev_info(&vub300->udev->dev,
+ "initialized %d dynamic pseudocode registers\n",
+ vub300->dynamic_register_count);
+ return;
+ } else {
+ dev_err(&vub300->udev->dev,
+ "corrupt dynamic registers in firmware %s\n",
+ vub300->vub_name);
+ strncpy(vub300->vub_name, "corrupt dynamic registers",
+ sizeof(vub300->vub_name));
+ return;
+ }
+}
+
+/*
+ * if the binary containing the EMPTY PseudoCode can not be found
+ * vub300->vub_name is set anyway in order to prevent an automatic retry
+ */
+static void download_offload_pseudocode(struct vub300_mmc_host *vub300)
+{
+ struct mmc_card *card = vub300->mmc->card;
+ int sdio_funcs = card->sdio_funcs;
+ const struct firmware *fw = NULL;
+ int l = snprintf(vub300->vub_name, sizeof(vub300->vub_name),
+ "vub_%04X%04X", card->cis.vendor, card->cis.device);
+ int n = 0;
+ int retval;
+ for (n = 0; n < sdio_funcs; n++) {
+ struct sdio_func *sf = card->sdio_func[n];
+ l += snprintf(vub300->vub_name + l,
+ sizeof(vub300->vub_name) - l, "_%04X%04X",
+ sf->vendor, sf->device);
+ };
+ snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin");
+ dev_info(&vub300->udev->dev, "requesting offload firmware %s\n",
+ vub300->vub_name);
+ retval = request_firmware(&fw, vub300->vub_name, &card->dev);
+ if (retval < 0) {
+ strncpy(vub300->vub_name, "vub_default.bin",
+ sizeof(vub300->vub_name));
+ retval = request_firmware(&fw, vub300->vub_name, &card->dev);
+ if (retval < 0) {
+ strncpy(vub300->vub_name,
+ "no SDIO offload firmware found",
+ sizeof(vub300->vub_name));
+ } else {
+ __download_offload_pseudocode(vub300, fw);
+ release_firmware(fw);
+ }
+ } else {
+ __download_offload_pseudocode(vub300, fw);
+ release_firmware(fw);
+ }
+}
+
+static void vub300_usb_bulk_msg_completion(struct urb *urb)
+{ /* urb completion handler - hardirq */
+ complete((struct completion *)urb->context);
+}
+
+static int vub300_usb_bulk_msg(struct vub300_mmc_host *vub300,
+ unsigned int pipe, void *data, int len,
+ int *actual_length, int timeout_msecs)
+{
+ /* cmd_mutex is held by vub300_cmndwork_thread */
+ struct usb_device *usb_dev = vub300->udev;
+ struct completion done;
+ int retval;
+ vub300->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!vub300->urb)
+ return -ENOMEM;
+ usb_fill_bulk_urb(vub300->urb, usb_dev, pipe, data, len,
+ vub300_usb_bulk_msg_completion, NULL);
+ init_completion(&done);
+ vub300->urb->context = &done;
+ vub300->urb->actual_length = 0;
+ retval = usb_submit_urb(vub300->urb, GFP_KERNEL);
+ if (unlikely(retval))
+ goto out;
+ if (!wait_for_completion_timeout
+ (&done, msecs_to_jiffies(timeout_msecs))) {
+ retval = -ETIMEDOUT;
+ usb_kill_urb(vub300->urb);
+ } else {
+ retval = vub300->urb->status;
+ }
+out:
+ *actual_length = vub300->urb->actual_length;
+ usb_free_urb(vub300->urb);
+ vub300->urb = NULL;
+ return retval;
+}
+
+static int __command_read_data(struct vub300_mmc_host *vub300,
+ struct mmc_command *cmd, struct mmc_data *data)
+{
+ /* cmd_mutex is held by vub300_cmndwork_thread */
+ int linear_length = vub300->datasize;
+ int padded_length = vub300->large_usb_packets ?
+ ((511 + linear_length) >> 9) << 9 :
+ ((63 + linear_length) >> 6) << 6;
+ if ((padded_length == linear_length) || !pad_input_to_usb_pkt) {
+ int result;
+ unsigned pipe;
+ pipe = usb_rcvbulkpipe(vub300->udev, vub300->data_inp_ep);
+ result = usb_sg_init(&vub300->sg_request, vub300->udev,
+ pipe, 0, data->sg,
+ data->sg_len, 0, GFP_KERNEL);
+ if (result < 0) {
+ usb_unlink_urb(vub300->command_out_urb);
+ usb_unlink_urb(vub300->command_res_urb);
+ cmd->error = result;
+ data->bytes_xfered = 0;
+ return 0;
+ } else {
+ vub300->sg_transfer_timer.expires =
+ jiffies + msecs_to_jiffies(2000 +
+ (linear_length / 16384));
+ add_timer(&vub300->sg_transfer_timer);
+ usb_sg_wait(&vub300->sg_request);
+ del_timer(&vub300->sg_transfer_timer);
+ if (vub300->sg_request.status < 0) {
+ cmd->error = vub300->sg_request.status;
+ data->bytes_xfered = 0;
+ return 0;
+ } else {
+ data->bytes_xfered = vub300->datasize;
+ return linear_length;
+ }
+ }
+ } else {
+ u8 *buf = kmalloc(padded_length, GFP_KERNEL);
+ if (buf) {
+ int result;
+ unsigned pipe = usb_rcvbulkpipe(vub300->udev,
+ vub300->data_inp_ep);
+ int actual_length = 0;
+ result = vub300_usb_bulk_msg(vub300, pipe, buf,
+ padded_length, &actual_length,
+ 2000 + (padded_length / 16384));
+ if (result < 0) {
+ cmd->error = result;
+ data->bytes_xfered = 0;
+ kfree(buf);
+ return 0;
+ } else if (actual_length < linear_length) {
+ cmd->error = -EREMOTEIO;
+ data->bytes_xfered = 0;
+ kfree(buf);
+ return 0;
+ } else {
+ sg_copy_from_buffer(data->sg, data->sg_len, buf,
+ linear_length);
+ kfree(buf);
+ data->bytes_xfered = vub300->datasize;
+ return linear_length;
+ }
+ } else {
+ cmd->error = -ENOMEM;
+ data->bytes_xfered = 0;
+ return 0;
+ }
+ }
+}
+
+static int __command_write_data(struct vub300_mmc_host *vub300,
+ struct mmc_command *cmd, struct mmc_data *data)
+{
+ /* cmd_mutex is held by vub300_cmndwork_thread */
+ unsigned pipe = usb_sndbulkpipe(vub300->udev, vub300->data_out_ep);
+ int linear_length = vub300->datasize;
+ int modulo_64_length = linear_length & 0x003F;
+ int modulo_512_length = linear_length & 0x01FF;
+ if (linear_length < 64) {
+ int result;
+ int actual_length;
+ sg_copy_to_buffer(data->sg, data->sg_len,
+ vub300->padded_buffer,
+ sizeof(vub300->padded_buffer));
+ memset(vub300->padded_buffer + linear_length, 0,
+ sizeof(vub300->padded_buffer) - linear_length);
+ result = vub300_usb_bulk_msg(vub300, pipe, vub300->padded_buffer,
+ sizeof(vub300->padded_buffer),
+ &actual_length, 2000 +
+ (sizeof(vub300->padded_buffer) /
+ 16384));
+ if (result < 0) {
+ cmd->error = result;
+ data->bytes_xfered = 0;
+ } else {
+ data->bytes_xfered = vub300->datasize;
+ }
+ } else if ((!vub300->large_usb_packets && (0 < modulo_64_length)) ||
+ (vub300->large_usb_packets && (64 > modulo_512_length))
+ ) { /* don't you just love these work-rounds */
+ int padded_length = ((63 + linear_length) >> 6) << 6;
+ u8 *buf = kmalloc(padded_length, GFP_KERNEL);
+ if (buf) {
+ int result;
+ int actual_length;
+ sg_copy_to_buffer(data->sg, data->sg_len, buf,
+ padded_length);
+ memset(buf + linear_length, 0,
+ padded_length - linear_length);
+ result =
+ vub300_usb_bulk_msg(vub300, pipe, buf,
+ padded_length, &actual_length,
+ 2000 + padded_length / 16384);
+ kfree(buf);
+ if (result < 0) {
+ cmd->error = result;
+ data->bytes_xfered = 0;
+ } else {
+ data->bytes_xfered = vub300->datasize;
+ }
+ } else {
+ cmd->error = -ENOMEM;
+ data->bytes_xfered = 0;
+ }
+ } else { /* no data padding required */
+ int result;
+ unsigned char buf[64 * 4];
+ sg_copy_to_buffer(data->sg, data->sg_len, buf, sizeof(buf));
+ result = usb_sg_init(&vub300->sg_request, vub300->udev,
+ pipe, 0, data->sg,
+ data->sg_len, 0, GFP_KERNEL);
+ if (result < 0) {
+ usb_unlink_urb(vub300->command_out_urb);
+ usb_unlink_urb(vub300->command_res_urb);
+ cmd->error = result;
+ data->bytes_xfered = 0;
+ } else {
+ vub300->sg_transfer_timer.expires =
+ jiffies + msecs_to_jiffies(2000 +
+ linear_length / 16384);
+ add_timer(&vub300->sg_transfer_timer);
+ usb_sg_wait(&vub300->sg_request);
+ if (cmd->error) {
+ data->bytes_xfered = 0;
+ } else {
+ del_timer(&vub300->sg_transfer_timer);
+ if (vub300->sg_request.status < 0) {
+ cmd->error = vub300->sg_request.status;
+ data->bytes_xfered = 0;
+ } else {
+ data->bytes_xfered = vub300->datasize;
+ }
+ }
+ }
+ }
+ return linear_length;
+}
+
+static void __vub300_command_response(struct vub300_mmc_host *vub300,
+ struct mmc_command *cmd,
+ struct mmc_data *data, int data_length)
+{
+ /* cmd_mutex is held by vub300_cmndwork_thread */
+ long respretval;
+ int msec_timeout = 1000 + data_length / 4;
+ respretval =
+ wait_for_completion_timeout(&vub300->command_complete,
+ msecs_to_jiffies(msec_timeout));
+ if (respretval == 0) { /* TIMED OUT */
+ /* we don't know which of "out" and "res" if any failed */
+ int result;
+ vub300->usb_timed_out = 1;
+ usb_kill_urb(vub300->command_out_urb);
+ usb_kill_urb(vub300->command_res_urb);
+ cmd->error = -ETIMEDOUT;
+ result = usb_lock_device_for_reset(vub300->udev,
+ vub300->interface);
+ if (result == 0) {
+ result = usb_reset_device(vub300->udev);
+ usb_unlock_device(vub300->udev);
+ }
+ } else if (respretval < 0) {
+ /* we don't know which of "out" and "res" if any failed */
+ usb_kill_urb(vub300->command_out_urb);
+ usb_kill_urb(vub300->command_res_urb);
+ cmd->error = respretval;
+ } else if (cmd->error) {
+ /*
+ * the error occurred sending the command
+ * or receiving the response
+ */
+ } else if (vub300->command_out_urb->status) {
+ vub300->usb_transport_fail = vub300->command_out_urb->status;
+ cmd->error = -EPROTO == vub300->command_out_urb->status ?
+ -ESHUTDOWN : vub300->command_out_urb->status;
+ } else if (vub300->command_res_urb->status) {
+ vub300->usb_transport_fail = vub300->command_res_urb->status;
+ cmd->error = -EPROTO == vub300->command_res_urb->status ?
+ -ESHUTDOWN : vub300->command_res_urb->status;
+ } else if (vub300->resp.common.header_type == 0x00) {
+ /*
+ * the command completed successfully
+ * and there was no piggybacked data
+ */
+ } else if (vub300->resp.common.header_type == RESPONSE_ERROR) {
+ cmd->error =
+ vub300_response_error(vub300->resp.error.error_code);
+ if (vub300->data)
+ usb_sg_cancel(&vub300->sg_request);
+ } else if (vub300->resp.common.header_type == RESPONSE_PIGGYBACKED) {
+ int offloaded_data_length =
+ vub300->resp.common.header_size -
+ sizeof(struct sd_register_header);
+ int register_count = offloaded_data_length >> 3;
+ int ri = 0;
+ while (register_count--) {
+ add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
+ ri += 1;
+ }
+ vub300->resp.common.header_size =
+ sizeof(struct sd_register_header);
+ vub300->resp.common.header_type = 0x00;
+ cmd->error = 0;
+ } else if (vub300->resp.common.header_type == RESPONSE_PIG_DISABLED) {
+ int offloaded_data_length =
+ vub300->resp.common.header_size -
+ sizeof(struct sd_register_header);
+ int register_count = offloaded_data_length >> 3;
+ int ri = 0;
+ while (register_count--) {
+ add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
+ ri += 1;
+ }
+ mutex_lock(&vub300->irq_mutex);
+ if (vub300->irqs_queued) {
+ vub300->irqs_queued += 1;
+ } else if (vub300->irq_enabled) {
+ vub300->irqs_queued += 1;
+ vub300_queue_poll_work(vub300, 0);
+ } else {
+ vub300->irqs_queued += 1;
+ }
+ vub300->irq_disabled = 1;
+ mutex_unlock(&vub300->irq_mutex);
+ vub300->resp.common.header_size =
+ sizeof(struct sd_register_header);
+ vub300->resp.common.header_type = 0x00;
+ cmd->error = 0;
+ } else if (vub300->resp.common.header_type == RESPONSE_PIG_ENABLED) {
+ int offloaded_data_length =
+ vub300->resp.common.header_size -
+ sizeof(struct sd_register_header);
+ int register_count = offloaded_data_length >> 3;
+ int ri = 0;
+ while (register_count--) {
+ add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
+ ri += 1;
+ }
+ mutex_lock(&vub300->irq_mutex);
+ if (vub300->irqs_queued) {
+ vub300->irqs_queued += 1;
+ } else if (vub300->irq_enabled) {
+ vub300->irqs_queued += 1;
+ vub300_queue_poll_work(vub300, 0);
+ } else {
+ vub300->irqs_queued += 1;
+ }
+ vub300->irq_disabled = 0;
+ mutex_unlock(&vub300->irq_mutex);
+ vub300->resp.common.header_size =
+ sizeof(struct sd_register_header);
+ vub300->resp.common.header_type = 0x00;
+ cmd->error = 0;
+ } else {
+ cmd->error = -EINVAL;
+ }
+}
+
+static void construct_request_response(struct vub300_mmc_host *vub300,
+ struct mmc_command *cmd)
+{
+ int resp_len = vub300->resp_len;
+ int less_cmd = (17 == resp_len) ? resp_len : resp_len - 1;
+ int bytes = 3 & less_cmd;
+ int words = less_cmd >> 2;
+ u8 *r = vub300->resp.response.command_response;
+ if (bytes == 3) {
+ cmd->resp[words] = (r[1 + (words << 2)] << 24)
+ | (r[2 + (words << 2)] << 16)
+ | (r[3 + (words << 2)] << 8);
+ } else if (bytes == 2) {
+ cmd->resp[words] = (r[1 + (words << 2)] << 24)
+ | (r[2 + (words << 2)] << 16);
+ } else if (bytes == 1) {
+ cmd->resp[words] = (r[1 + (words << 2)] << 24);
+ }
+ while (words-- > 0) {
+ cmd->resp[words] = (r[1 + (words << 2)] << 24)
+ | (r[2 + (words << 2)] << 16)
+ | (r[3 + (words << 2)] << 8)
+ | (r[4 + (words << 2)] << 0);
+ }
+ if ((cmd->opcode == 53) && (0x000000FF & cmd->resp[0]))
+ cmd->resp[0] &= 0xFFFFFF00;
+}
+
+/* this thread runs only when there is an upper level command req outstanding */
+static void vub300_cmndwork_thread(struct work_struct *work)
+{
+ struct vub300_mmc_host *vub300 =
+ container_of(work, struct vub300_mmc_host, cmndwork);
+ if (!vub300->interface) {
+ kref_put(&vub300->kref, vub300_delete);
+ return;
+ } else {
+ struct mmc_request *req = vub300->req;
+ struct mmc_command *cmd = vub300->cmd;
+ struct mmc_data *data = vub300->data;
+ int data_length;
+ mutex_lock(&vub300->cmd_mutex);
+ init_completion(&vub300->command_complete);
+ if (likely(vub300->vub_name[0]) || !vub300->mmc->card ||
+ !mmc_card_present(vub300->mmc->card)) {
+ /*
+ * the name of the EMPTY Pseudo firmware file
+ * is used as a flag to indicate that the file
+ * has been already downloaded to the VUB300 chip
+ */
+ } else if (0 == vub300->mmc->card->sdio_funcs) {
+ strncpy(vub300->vub_name, "SD memory device",
+ sizeof(vub300->vub_name));
+ } else {
+ download_offload_pseudocode(vub300);
+ }
+ send_command(vub300);
+ if (!data)
+ data_length = 0;
+ else if (MMC_DATA_READ & data->flags)
+ data_length = __command_read_data(vub300, cmd, data);
+ else
+ data_length = __command_write_data(vub300, cmd, data);
+ __vub300_command_response(vub300, cmd, data, data_length);
+ vub300->req = NULL;
+ vub300->cmd = NULL;
+ vub300->data = NULL;
+ if (cmd->error) {
+ if (cmd->error == -ENOMEDIUM)
+ check_vub300_port_status(vub300);
+ mutex_unlock(&vub300->cmd_mutex);
+ mmc_request_done(vub300->mmc, req);
+ kref_put(&vub300->kref, vub300_delete);
+ return;
+ } else {
+ construct_request_response(vub300, cmd);
+ vub300->resp_len = 0;
+ mutex_unlock(&vub300->cmd_mutex);
+ kref_put(&vub300->kref, vub300_delete);
+ mmc_request_done(vub300->mmc, req);
+ return;
+ }
+ }
+}
+
+static int examine_cyclic_buffer(struct vub300_mmc_host *vub300,
+ struct mmc_command *cmd, u8 Function)
+{
+ /* cmd_mutex is held by vub300_mmc_request */
+ u8 cmd0 = 0xFF & (cmd->arg >> 24);
+ u8 cmd1 = 0xFF & (cmd->arg >> 16);
+ u8 cmd2 = 0xFF & (cmd->arg >> 8);
+ u8 cmd3 = 0xFF & (cmd->arg >> 0);
+ int first = MAXREGMASK & vub300->fn[Function].offload_point;
+ struct offload_registers_access *rf = &vub300->fn[Function].reg[first];
+ if (cmd0 == rf->command_byte[0] &&
+ cmd1 == rf->command_byte[1] &&
+ cmd2 == rf->command_byte[2] &&
+ cmd3 == rf->command_byte[3]) {
+ u8 checksum = 0x00;
+ cmd->resp[1] = checksum << 24;
+ cmd->resp[0] = (rf->Respond_Byte[0] << 24)
+ | (rf->Respond_Byte[1] << 16)
+ | (rf->Respond_Byte[2] << 8)
+ | (rf->Respond_Byte[3] << 0);
+ vub300->fn[Function].offload_point += 1;
+ vub300->fn[Function].offload_count -= 1;
+ vub300->total_offload_count -= 1;
+ return 1;
+ } else {
+ int delta = 1; /* because it does not match the first one */
+ u8 register_count = vub300->fn[Function].offload_count - 1;
+ u32 register_point = vub300->fn[Function].offload_point + 1;
+ while (0 < register_count) {
+ int point = MAXREGMASK & register_point;
+ struct offload_registers_access *r =
+ &vub300->fn[Function].reg[point];
+ if (cmd0 == r->command_byte[0] &&
+ cmd1 == r->command_byte[1] &&
+ cmd2 == r->command_byte[2] &&
+ cmd3 == r->command_byte[3]) {
+ u8 checksum = 0x00;
+ cmd->resp[1] = checksum << 24;
+ cmd->resp[0] = (r->Respond_Byte[0] << 24)
+ | (r->Respond_Byte[1] << 16)
+ | (r->Respond_Byte[2] << 8)
+ | (r->Respond_Byte[3] << 0);
+ vub300->fn[Function].offload_point += delta;
+ vub300->fn[Function].offload_count -= delta;
+ vub300->total_offload_count -= delta;
+ return 1;
+ } else {
+ register_point += 1;
+ register_count -= 1;
+ delta += 1;
+ continue;
+ }
+ }
+ return 0;
+ }
+}
+
+static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300,
+ struct mmc_command *cmd)
+{
+ /* cmd_mutex is held by vub300_mmc_request */
+ u8 regs = vub300->dynamic_register_count;
+ u8 i = 0;
+ u8 func = FUN(cmd);
+ u32 reg = REG(cmd);
+ while (0 < regs--) {
+ if ((vub300->sdio_register[i].func_num == func) &&
+ (vub300->sdio_register[i].sdio_reg == reg)) {
+ if (!vub300->sdio_register[i].prepared) {
+ return 0;
+ } else if ((0x80000000 & cmd->arg) == 0x80000000) {
+ /*
+ * a write to a dynamic register
+ * nullifies our offloaded value
+ */
+ vub300->sdio_register[i].prepared = 0;
+ return 0;
+ } else {
+ u8 checksum = 0x00;
+ u8 rsp0 = 0x00;
+ u8 rsp1 = 0x00;
+ u8 rsp2 = vub300->sdio_register[i].response;
+ u8 rsp3 = vub300->sdio_register[i].regvalue;
+ vub300->sdio_register[i].prepared = 0;
+ cmd->resp[1] = checksum << 24;
+ cmd->resp[0] = (rsp0 << 24)
+ | (rsp1 << 16)
+ | (rsp2 << 8)
+ | (rsp3 << 0);
+ return 1;
+ }
+ } else {
+ i += 1;
+ continue;
+ }
+ };
+ if (vub300->total_offload_count == 0)
+ return 0;
+ else if (vub300->fn[func].offload_count == 0)
+ return 0;
+ else
+ return examine_cyclic_buffer(vub300, cmd, func);
+}
+
+static void vub300_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
+{ /* NOT irq */
+ struct mmc_command *cmd = req->cmd;
+ struct vub300_mmc_host *vub300 = mmc_priv(mmc);
+ if (!vub300->interface) {
+ cmd->error = -ESHUTDOWN;
+ mmc_request_done(mmc, req);
+ return;
+ } else {
+ struct mmc_data *data = req->data;
+ if (!vub300->card_powered) {
+ cmd->error = -ENOMEDIUM;
+ mmc_request_done(mmc, req);
+ return;
+ }
+ if (!vub300->card_present) {
+ cmd->error = -ENOMEDIUM;
+ mmc_request_done(mmc, req);
+ return;
+ }
+ if (vub300->usb_transport_fail) {
+ cmd->error = vub300->usb_transport_fail;
+ mmc_request_done(mmc, req);
+ return;
+ }
+ if (!vub300->interface) {
+ cmd->error = -ENODEV;
+ mmc_request_done(mmc, req);
+ return;
+ }
+ kref_get(&vub300->kref);
+ mutex_lock(&vub300->cmd_mutex);
+ mod_timer(&vub300->inactivity_timer, jiffies + HZ);
+ /*
+ * for performance we have to return immediately
+ * if the requested data has been offloaded
+ */
+ if (cmd->opcode == 52 &&
+ satisfy_request_from_offloaded_data(vub300, cmd)) {
+ cmd->error = 0;
+ mutex_unlock(&vub300->cmd_mutex);
+ kref_put(&vub300->kref, vub300_delete);
+ mmc_request_done(mmc, req);
+ return;
+ } else {
+ vub300->cmd = cmd;
+ vub300->req = req;
+ vub300->data = data;
+ if (data)
+ vub300->datasize = data->blksz * data->blocks;
+ else
+ vub300->datasize = 0;
+ vub300_queue_cmnd_work(vub300);
+ mutex_unlock(&vub300->cmd_mutex);
+ kref_put(&vub300->kref, vub300_delete);
+ /*
+ * the kernel lock diagnostics complain
+ * if the cmd_mutex * is "passed on"
+ * to the cmndwork thread,
+ * so we must release it now
+ * and re-acquire it in the cmndwork thread
+ */
+ }
+ }
+}
+
+static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8],
+ struct mmc_ios *ios)
+{
+ int buf_array_size = 8; /* ARRAY_SIZE(buf) does not work !!! */
+ int retval;
+ u32 kHzClock;
+ if (ios->clock >= 48000000)
+ kHzClock = 48000;
+ else if (ios->clock >= 24000000)
+ kHzClock = 24000;
+ else if (ios->clock >= 20000000)
+ kHzClock = 20000;
+ else if (ios->clock >= 15000000)
+ kHzClock = 15000;
+ else if (ios->clock >= 200000)
+ kHzClock = 200;
+ else
+ kHzClock = 0;
+ {
+ int i;
+ u64 c = kHzClock;
+ for (i = 0; i < buf_array_size; i++) {
+ buf[i] = c;
+ c >>= 8;
+ }
+ }
+ retval =
+ usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
+ SET_CLOCK_SPEED,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0x00, 0x00, buf, buf_array_size, HZ);
+ if (retval != 8) {
+ dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED"
+ " %dkHz failed with retval=%d\n", kHzClock, retval);
+ } else {
+ dev_dbg(&vub300->udev->dev, "SET_CLOCK_SPEED"
+ " %dkHz\n", kHzClock);
+ }
+}
+
+static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{ /* NOT irq */
+ struct vub300_mmc_host *vub300 = mmc_priv(mmc);
+ if (!vub300->interface)
+ return;
+ kref_get(&vub300->kref);
+ mutex_lock(&vub300->cmd_mutex);
+ if ((ios->power_mode == MMC_POWER_OFF) && vub300->card_powered) {
+ vub300->card_powered = 0;
+ usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
+ SET_SD_POWER,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0x0000, 0x0000, NULL, 0, HZ);
+ /* must wait for the VUB300 u-proc to boot up */
+ msleep(600);
+ } else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) {
+ usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
+ SET_SD_POWER,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0x0001, 0x0000, NULL, 0, HZ);
+ msleep(600);
+ vub300->card_powered = 1;
+ } else if (ios->power_mode == MMC_POWER_ON) {
+ u8 *buf = kmalloc(8, GFP_KERNEL);
+ if (buf) {
+ __set_clock_speed(vub300, buf, ios);
+ kfree(buf);
+ }
+ } else {
+ /* this should mean no change of state */
+ }
+ mutex_unlock(&vub300->cmd_mutex);
+ kref_put(&vub300->kref, vub300_delete);
+}
+
+static int vub300_mmc_get_ro(struct mmc_host *mmc)
+{
+ struct vub300_mmc_host *vub300 = mmc_priv(mmc);
+ return vub300->read_only;
+}
+
+static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{ /* NOT irq */
+ struct vub300_mmc_host *vub300 = mmc_priv(mmc);
+ if (!vub300->interface)
+ return;
+ kref_get(&vub300->kref);
+ if (enable) {
+ mutex_lock(&vub300->irq_mutex);
+ if (vub300->irqs_queued) {
+ vub300->irqs_queued -= 1;
+ mmc_signal_sdio_irq(vub300->mmc);
+ } else if (vub300->irq_disabled) {
+ vub300->irq_disabled = 0;
+ vub300->irq_enabled = 1;
+ vub300_queue_poll_work(vub300, 0);
+ } else if (vub300->irq_enabled) {
+ /* this should not happen, so we will just ignore it */
+ } else {
+ vub300->irq_enabled = 1;
+ vub300_queue_poll_work(vub300, 0);
+ }
+ mutex_unlock(&vub300->irq_mutex);
+ } else {
+ vub300->irq_enabled = 0;
+ }
+ kref_put(&vub300->kref, vub300_delete);
+}
+
+static void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
+{ /* NOT irq */
+ struct vub300_mmc_host *vub300 = mmc_priv(mmc);
+ dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n");
+}
+
+static struct mmc_host_ops vub300_mmc_ops = {
+ .request = vub300_mmc_request,
+ .set_ios = vub300_mmc_set_ios,
+ .get_ro = vub300_mmc_get_ro,
+ .enable_sdio_irq = vub300_enable_sdio_irq,
+ .init_card = vub300_init_card,
+};
+
+static int vub300_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{ /* NOT irq */
+ struct vub300_mmc_host *vub300;
+ struct usb_host_interface *iface_desc;
+ struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface));
+ int i;
+ int retval = -ENOMEM;
+ struct urb *command_out_urb;
+ struct urb *command_res_urb;
+ struct mmc_host *mmc;
+ char manufacturer[48];
+ char product[32];
+ char serial_number[32];
+ usb_string(udev, udev->descriptor.iManufacturer, manufacturer,
+ sizeof(manufacturer));
+ usb_string(udev, udev->descriptor.iProduct, product, sizeof(product));
+ usb_string(udev, udev->descriptor.iSerialNumber, serial_number,
+ sizeof(serial_number));
+ dev_info(&udev->dev, "probing VID:PID(%04X:%04X) %s %s %s\n",
+ udev->descriptor.idVendor, udev->descriptor.idProduct,
+ manufacturer, product, serial_number);
+ command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!command_out_urb) {
+ retval = -ENOMEM;
+ dev_err(&udev->dev, "not enough memory for command_out_urb\n");
+ goto error0;
+ }
+ command_res_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!command_res_urb) {
+ retval = -ENOMEM;
+ dev_err(&udev->dev, "not enough memory for command_res_urb\n");
+ goto error1;
+ }
+ /* this also allocates memory for our VUB300 mmc host device */
+ mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev);
+ if (!mmc) {
+ retval = -ENOMEM;
+ dev_err(&udev->dev, "not enough memory for the mmc_host\n");
+ goto error4;
+ }
+ /* MMC core transfer sizes tunable parameters */
+ mmc->caps = 0;
+ if (!force_1_bit_data_xfers)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+ if (!force_polling_for_irqs)
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+ mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+ /*
+ * MMC_CAP_NEEDS_POLL causes core.c:mmc_rescan() to poll
+ * for devices which results in spurious CMD7's being
+ * issued which stops some SDIO cards from working
+ */
+ if (limit_speed_to_24_MHz) {
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+ mmc->f_max = 24000000;
+ dev_info(&udev->dev, "limiting SDIO speed to 24_MHz\n");
+ } else {
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+ mmc->f_max = 48000000;
+ }
+ mmc->f_min = 200000;
+ mmc->max_blk_count = 511;
+ mmc->max_blk_size = 512;
+ mmc->max_segs = 128;
+ if (force_max_req_size)
+ mmc->max_req_size = force_max_req_size * 1024;
+ else
+ mmc->max_req_size = 64 * 1024;
+ mmc->max_seg_size = mmc->max_req_size;
+ mmc->ocr_avail = 0;
+ mmc->ocr_avail |= MMC_VDD_165_195;
+ mmc->ocr_avail |= MMC_VDD_20_21;
+ mmc->ocr_avail |= MMC_VDD_21_22;
+ mmc->ocr_avail |= MMC_VDD_22_23;
+ mmc->ocr_avail |= MMC_VDD_23_24;
+ mmc->ocr_avail |= MMC_VDD_24_25;
+ mmc->ocr_avail |= MMC_VDD_25_26;
+ mmc->ocr_avail |= MMC_VDD_26_27;
+ mmc->ocr_avail |= MMC_VDD_27_28;
+ mmc->ocr_avail |= MMC_VDD_28_29;
+ mmc->ocr_avail |= MMC_VDD_29_30;
+ mmc->ocr_avail |= MMC_VDD_30_31;
+ mmc->ocr_avail |= MMC_VDD_31_32;
+ mmc->ocr_avail |= MMC_VDD_32_33;
+ mmc->ocr_avail |= MMC_VDD_33_34;
+ mmc->ocr_avail |= MMC_VDD_34_35;
+ mmc->ocr_avail |= MMC_VDD_35_36;
+ mmc->ops = &vub300_mmc_ops;
+ vub300 = mmc_priv(mmc);
+ vub300->mmc = mmc;
+ vub300->card_powered = 0;
+ vub300->bus_width = 0;
+ vub300->cmnd.head.block_size[0] = 0x00;
+ vub300->cmnd.head.block_size[1] = 0x00;
+ vub300->app_spec = 0;
+ mutex_init(&vub300->cmd_mutex);
+ mutex_init(&vub300->irq_mutex);
+ vub300->command_out_urb = command_out_urb;
+ vub300->command_res_urb = command_res_urb;
+ vub300->usb_timed_out = 0;
+ vub300->dynamic_register_count = 0;
+
+ for (i = 0; i < ARRAY_SIZE(vub300->fn); i++) {
+ vub300->fn[i].offload_point = 0;
+ vub300->fn[i].offload_count = 0;
+ }
+
+ vub300->total_offload_count = 0;
+ vub300->irq_enabled = 0;
+ vub300->irq_disabled = 0;
+ vub300->irqs_queued = 0;
+
+ for (i = 0; i < ARRAY_SIZE(vub300->sdio_register); i++)
+ vub300->sdio_register[i++].activate = 0;
+
+ vub300->udev = udev;
+ vub300->interface = interface;
+ vub300->cmnd_res_ep = 0;
+ vub300->cmnd_out_ep = 0;
+ vub300->data_inp_ep = 0;
+ vub300->data_out_ep = 0;
+
+ for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++)
+ vub300->fbs[i] = 512;
+
+ /*
+ * set up the endpoint information
+ *
+ * use the first pair of bulk-in and bulk-out
+ * endpoints for Command/Response+Interrupt
+ *
+ * use the second pair of bulk-in and bulk-out
+ * endpoints for Data In/Out
+ */
+ vub300->large_usb_packets = 0;
+ iface_desc = interface->cur_altsetting;
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ struct usb_endpoint_descriptor *endpoint =
+ &iface_desc->endpoint[i].desc;
+ dev_info(&vub300->udev->dev,
+ "vub300 testing %s EndPoint(%d) %02X\n",
+ usb_endpoint_is_bulk_in(endpoint) ? "BULK IN" :
+ usb_endpoint_is_bulk_out(endpoint) ? "BULK OUT" :
+ "UNKNOWN", i, endpoint->bEndpointAddress);
+ if (endpoint->wMaxPacketSize > 64)
+ vub300->large_usb_packets = 1;
+ if (usb_endpoint_is_bulk_in(endpoint)) {
+ if (!vub300->cmnd_res_ep) {
+ vub300->cmnd_res_ep =
+ endpoint->bEndpointAddress;
+ } else if (!vub300->data_inp_ep) {
+ vub300->data_inp_ep =
+ endpoint->bEndpointAddress;
+ } else {
+ dev_warn(&vub300->udev->dev,
+ "ignoring"
+ " unexpected bulk_in endpoint");
+ }
+ } else if (usb_endpoint_is_bulk_out(endpoint)) {
+ if (!vub300->cmnd_out_ep) {
+ vub300->cmnd_out_ep =
+ endpoint->bEndpointAddress;
+ } else if (!vub300->data_out_ep) {
+ vub300->data_out_ep =
+ endpoint->bEndpointAddress;
+ } else {
+ dev_warn(&vub300->udev->dev,
+ "ignoring"
+ " unexpected bulk_out endpoint");
+ }
+ } else {
+ dev_warn(&vub300->udev->dev,
+ "vub300 ignoring EndPoint(%d) %02X", i,
+ endpoint->bEndpointAddress);
+ }
+ }
+ if (vub300->cmnd_res_ep && vub300->cmnd_out_ep &&
+ vub300->data_inp_ep && vub300->data_out_ep) {
+ dev_info(&vub300->udev->dev,
+ "vub300 %s packets"
+ " using EndPoints %02X %02X %02X %02X\n",
+ vub300->large_usb_packets ? "LARGE" : "SMALL",
+ vub300->cmnd_out_ep, vub300->cmnd_res_ep,
+ vub300->data_out_ep, vub300->data_inp_ep);
+ /* we have the expected EndPoints */
+ } else {
+ dev_err(&vub300->udev->dev,
+ "Could not find two sets of bulk-in/out endpoint pairs\n");
+ retval = -EINVAL;
+ goto error5;
+ }
+ retval =
+ usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
+ GET_HC_INF0,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0x0000, 0x0000, &vub300->hc_info,
+ sizeof(vub300->hc_info), HZ);
+ if (retval < 0)
+ goto error5;
+ retval =
+ usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
+ SET_ROM_WAIT_STATES,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
+ if (retval < 0)
+ goto error5;
+ dev_info(&vub300->udev->dev,
+ "operating_mode = %s %s %d MHz %s %d byte USB packets\n",
+ (mmc->caps & MMC_CAP_SDIO_IRQ) ? "IRQs" : "POLL",
+ (mmc->caps & MMC_CAP_4_BIT_DATA) ? "4-bit" : "1-bit",
+ mmc->f_max / 1000000,
+ pad_input_to_usb_pkt ? "padding input data to" : "with",
+ vub300->large_usb_packets ? 512 : 64);
+ retval =
+ usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
+ GET_SYSTEM_PORT_STATUS,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0x0000, 0x0000, &vub300->system_port_status,
+ sizeof(vub300->system_port_status), HZ);
+ if (retval < 0) {
+ goto error4;
+ } else if (sizeof(vub300->system_port_status) == retval) {
+ vub300->card_present =
+ (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
+ vub300->read_only =
+ (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
+ } else {
+ goto error4;
+ }
+ usb_set_intfdata(interface, vub300);
+ INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread);
+ INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread);
+ INIT_WORK(&vub300->deadwork, vub300_deadwork_thread);
+ kref_init(&vub300->kref);
+ init_timer(&vub300->sg_transfer_timer);
+ vub300->sg_transfer_timer.data = (unsigned long)vub300;
+ vub300->sg_transfer_timer.function = vub300_sg_timed_out;
+ kref_get(&vub300->kref);
+ init_timer(&vub300->inactivity_timer);
+ vub300->inactivity_timer.data = (unsigned long)vub300;
+ vub300->inactivity_timer.function = vub300_inactivity_timer_expired;
+ vub300->inactivity_timer.expires = jiffies + HZ;
+ add_timer(&vub300->inactivity_timer);
+ if (vub300->card_present)
+ dev_info(&vub300->udev->dev,
+ "USB vub300 remote SDIO host controller[%d]"
+ "connected with SD/SDIO card inserted\n",
+ interface_to_InterfaceNumber(interface));
+ else
+ dev_info(&vub300->udev->dev,
+ "USB vub300 remote SDIO host controller[%d]"
+ "connected with no SD/SDIO card inserted\n",
+ interface_to_InterfaceNumber(interface));
+ mmc_add_host(mmc);
+ return 0;
+error5:
+ mmc_free_host(mmc);
+ /*
+ * and hence also frees vub300
+ * which is contained at the end of struct mmc
+ */
+error4:
+ usb_free_urb(command_res_urb);
+error1:
+ usb_free_urb(command_out_urb);
+error0:
+ usb_put_dev(udev);
+ return retval;
+}
+
+static void vub300_disconnect(struct usb_interface *interface)
+{ /* NOT irq */
+ struct vub300_mmc_host *vub300 = usb_get_intfdata(interface);
+ if (!vub300 || !vub300->mmc) {
+ return;
+ } else {
+ struct mmc_host *mmc = vub300->mmc;
+ if (!vub300->mmc) {
+ return;
+ } else {
+ int ifnum = interface_to_InterfaceNumber(interface);
+ usb_set_intfdata(interface, NULL);
+ /* prevent more I/O from starting */
+ vub300->interface = NULL;
+ kref_put(&vub300->kref, vub300_delete);
+ mmc_remove_host(mmc);
+ pr_info("USB vub300 remote SDIO host controller[%d]"
+ " now disconnected", ifnum);
+ return;
+ }
+ }
+}
+
+#ifdef CONFIG_PM
+static int vub300_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ return 0;
+}
+
+static int vub300_resume(struct usb_interface *intf)
+{
+ return 0;
+}
+#else
+#define vub300_suspend NULL
+#define vub300_resume NULL
+#endif
+static int vub300_pre_reset(struct usb_interface *intf)
+{ /* NOT irq */
+ struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
+ mutex_lock(&vub300->cmd_mutex);
+ return 0;
+}
+
+static int vub300_post_reset(struct usb_interface *intf)
+{ /* NOT irq */
+ struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
+ /* we are sure no URBs are active - no locking needed */
+ vub300->errors = -EPIPE;
+ mutex_unlock(&vub300->cmd_mutex);
+ return 0;
+}
+
+static struct usb_driver vub300_driver = {
+ .name = "vub300",
+ .probe = vub300_probe,
+ .disconnect = vub300_disconnect,
+ .suspend = vub300_suspend,
+ .resume = vub300_resume,
+ .pre_reset = vub300_pre_reset,
+ .post_reset = vub300_post_reset,
+ .id_table = vub300_table,
+ .supports_autosuspend = 1,
+};
+
+static int __init vub300_init(void)
+{ /* NOT irq */
+ int result;
+
+ pr_info("VUB300 Driver rom wait states = %02X irqpoll timeout = %04X",
+ firmware_rom_wait_states, 0x0FFFF & firmware_irqpoll_timeout);
+ cmndworkqueue = create_singlethread_workqueue("kvub300c");
+ if (!cmndworkqueue) {
+ pr_err("not enough memory for the REQUEST workqueue");
+ result = -ENOMEM;
+ goto out1;
+ }
+ pollworkqueue = create_singlethread_workqueue("kvub300p");
+ if (!pollworkqueue) {
+ pr_err("not enough memory for the IRQPOLL workqueue");
+ result = -ENOMEM;
+ goto out2;
+ }
+ deadworkqueue = create_singlethread_workqueue("kvub300d");
+ if (!deadworkqueue) {
+ pr_err("not enough memory for the EXPIRED workqueue");
+ result = -ENOMEM;
+ goto out3;
+ }
+ result = usb_register(&vub300_driver);
+ if (result) {
+ pr_err("usb_register failed. Error number %d", result);
+ goto out4;
+ }
+ return 0;
+out4:
+ destroy_workqueue(deadworkqueue);
+out3:
+ destroy_workqueue(pollworkqueue);
+out2:
+ destroy_workqueue(cmndworkqueue);
+out1:
+ return result;
+}
+
+static void __exit vub300_exit(void)
+{
+ usb_deregister(&vub300_driver);
+ flush_workqueue(cmndworkqueue);
+ flush_workqueue(pollworkqueue);
+ flush_workqueue(deadworkqueue);
+ destroy_workqueue(cmndworkqueue);
+ destroy_workqueue(pollworkqueue);
+ destroy_workqueue(deadworkqueue);
+}
+
+module_init(vub300_init);
+module_exit(vub300_exit);
+
+MODULE_AUTHOR("Tony Olech <tony.olech@elandigitalsystems.com>");
+MODULE_DESCRIPTION("VUB300 USB to SD/MMC/SDIO adapter driver");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/mmc/host/wbsd.c b/kernel/drivers/mmc/host/wbsd.c
new file mode 100644
index 000000000..ca183ea76
--- /dev/null
+++ b/kernel/drivers/mmc/host/wbsd.c
@@ -0,0 +1,2013 @@
+/*
+ * linux/drivers/mmc/host/wbsd.c - Winbond W83L51xD SD/MMC driver
+ *
+ * Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ *
+ * Warning!
+ *
+ * Changes to the FIFO system should be done with extreme care since
+ * the hardware is full of bugs related to the FIFO. Known issues are:
+ *
+ * - FIFO size field in FSR is always zero.
+ *
+ * - FIFO interrupts tend not to work as they should. Interrupts are
+ * triggered only for full/empty events, not for threshold values.
+ *
+ * - On APIC systems the FIFO empty interrupt is sometimes lost.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/pnp.h>
+#include <linux/highmem.h>
+#include <linux/mmc/host.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include "wbsd.h"
+
+#define DRIVER_NAME "wbsd"
+
+#define DBG(x...) \
+ pr_debug(DRIVER_NAME ": " x)
+#define DBGF(f, x...) \
+ pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
+
+/*
+ * Device resources
+ */
+
+#ifdef CONFIG_PNP
+
+static const struct pnp_device_id pnp_dev_table[] = {
+ { "WEC0517", 0 },
+ { "WEC0518", 0 },
+ { "", 0 },
+};
+
+MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
+
+#endif /* CONFIG_PNP */
+
+static const int config_ports[] = { 0x2E, 0x4E };
+static const int unlock_codes[] = { 0x83, 0x87 };
+
+static const int valid_ids[] = {
+ 0x7112,
+};
+
+#ifdef CONFIG_PNP
+static unsigned int param_nopnp = 0;
+#else
+static const unsigned int param_nopnp = 1;
+#endif
+static unsigned int param_io = 0x248;
+static unsigned int param_irq = 6;
+static int param_dma = 2;
+
+/*
+ * Basic functions
+ */
+
+static inline void wbsd_unlock_config(struct wbsd_host *host)
+{
+ BUG_ON(host->config == 0);
+
+ outb(host->unlock_code, host->config);
+ outb(host->unlock_code, host->config);
+}
+
+static inline void wbsd_lock_config(struct wbsd_host *host)
+{
+ BUG_ON(host->config == 0);
+
+ outb(LOCK_CODE, host->config);
+}
+
+static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value)
+{
+ BUG_ON(host->config == 0);
+
+ outb(reg, host->config);
+ outb(value, host->config + 1);
+}
+
+static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg)
+{
+ BUG_ON(host->config == 0);
+
+ outb(reg, host->config);
+ return inb(host->config + 1);
+}
+
+static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value)
+{
+ outb(index, host->base + WBSD_IDXR);
+ outb(value, host->base + WBSD_DATAR);
+}
+
+static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index)
+{
+ outb(index, host->base + WBSD_IDXR);
+ return inb(host->base + WBSD_DATAR);
+}
+
+/*
+ * Common routines
+ */
+
+static void wbsd_init_device(struct wbsd_host *host)
+{
+ u8 setup, ier;
+
+ /*
+ * Reset chip (SD/MMC part) and fifo.
+ */
+ setup = wbsd_read_index(host, WBSD_IDX_SETUP);
+ setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
+ wbsd_write_index(host, WBSD_IDX_SETUP, setup);
+
+ /*
+ * Set DAT3 to input
+ */
+ setup &= ~WBSD_DAT3_H;
+ wbsd_write_index(host, WBSD_IDX_SETUP, setup);
+ host->flags &= ~WBSD_FIGNORE_DETECT;
+
+ /*
+ * Read back default clock.
+ */
+ host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
+
+ /*
+ * Power down port.
+ */
+ outb(WBSD_POWER_N, host->base + WBSD_CSR);
+
+ /*
+ * Set maximum timeout.
+ */
+ wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
+
+ /*
+ * Test for card presence
+ */
+ if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT)
+ host->flags |= WBSD_FCARD_PRESENT;
+ else
+ host->flags &= ~WBSD_FCARD_PRESENT;
+
+ /*
+ * Enable interesting interrupts.
+ */
+ ier = 0;
+ ier |= WBSD_EINT_CARD;
+ ier |= WBSD_EINT_FIFO_THRE;
+ ier |= WBSD_EINT_CRC;
+ ier |= WBSD_EINT_TIMEOUT;
+ ier |= WBSD_EINT_TC;
+
+ outb(ier, host->base + WBSD_EIR);
+
+ /*
+ * Clear interrupts.
+ */
+ inb(host->base + WBSD_ISR);
+}
+
+static void wbsd_reset(struct wbsd_host *host)
+{
+ u8 setup;
+
+ pr_err("%s: Resetting chip\n", mmc_hostname(host->mmc));
+
+ /*
+ * Soft reset of chip (SD/MMC part).
+ */
+ setup = wbsd_read_index(host, WBSD_IDX_SETUP);
+ setup |= WBSD_SOFT_RESET;
+ wbsd_write_index(host, WBSD_IDX_SETUP, setup);
+}
+
+static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq)
+{
+ unsigned long dmaflags;
+
+ if (host->dma >= 0) {
+ /*
+ * Release ISA DMA controller.
+ */
+ dmaflags = claim_dma_lock();
+ disable_dma(host->dma);
+ clear_dma_ff(host->dma);
+ release_dma_lock(dmaflags);
+
+ /*
+ * Disable DMA on host.
+ */
+ wbsd_write_index(host, WBSD_IDX_DMA, 0);
+ }
+
+ host->mrq = NULL;
+
+ /*
+ * MMC layer might call back into the driver so first unlock.
+ */
+ spin_unlock(&host->lock);
+ mmc_request_done(host->mmc, mrq);
+ spin_lock(&host->lock);
+}
+
+/*
+ * Scatter/gather functions
+ */
+
+static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data)
+{
+ /*
+ * Get info. about SG list from data structure.
+ */
+ host->cur_sg = data->sg;
+ host->num_sg = data->sg_len;
+
+ host->offset = 0;
+ host->remain = host->cur_sg->length;
+}
+
+static inline int wbsd_next_sg(struct wbsd_host *host)
+{
+ /*
+ * Skip to next SG entry.
+ */
+ host->cur_sg++;
+ host->num_sg--;
+
+ /*
+ * Any entries left?
+ */
+ if (host->num_sg > 0) {
+ host->offset = 0;
+ host->remain = host->cur_sg->length;
+ }
+
+ return host->num_sg;
+}
+
+static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
+{
+ return sg_virt(host->cur_sg);
+}
+
+static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
+{
+ unsigned int len, i;
+ struct scatterlist *sg;
+ char *dmabuf = host->dma_buffer;
+ char *sgbuf;
+
+ sg = data->sg;
+ len = data->sg_len;
+
+ for (i = 0; i < len; i++) {
+ sgbuf = sg_virt(&sg[i]);
+ memcpy(dmabuf, sgbuf, sg[i].length);
+ dmabuf += sg[i].length;
+ }
+}
+
+static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
+{
+ unsigned int len, i;
+ struct scatterlist *sg;
+ char *dmabuf = host->dma_buffer;
+ char *sgbuf;
+
+ sg = data->sg;
+ len = data->sg_len;
+
+ for (i = 0; i < len; i++) {
+ sgbuf = sg_virt(&sg[i]);
+ memcpy(sgbuf, dmabuf, sg[i].length);
+ dmabuf += sg[i].length;
+ }
+}
+
+/*
+ * Command handling
+ */
+
+static inline void wbsd_get_short_reply(struct wbsd_host *host,
+ struct mmc_command *cmd)
+{
+ /*
+ * Correct response type?
+ */
+ if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) {
+ cmd->error = -EILSEQ;
+ return;
+ }
+
+ cmd->resp[0] = wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
+ cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
+ cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
+ cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
+ cmd->resp[1] = wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
+}
+
+static inline void wbsd_get_long_reply(struct wbsd_host *host,
+ struct mmc_command *cmd)
+{
+ int i;
+
+ /*
+ * Correct response type?
+ */
+ if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) {
+ cmd->error = -EILSEQ;
+ return;
+ }
+
+ for (i = 0; i < 4; i++) {
+ cmd->resp[i] =
+ wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
+ cmd->resp[i] |=
+ wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
+ cmd->resp[i] |=
+ wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
+ cmd->resp[i] |=
+ wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
+ }
+}
+
+static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd)
+{
+ int i;
+ u8 status, isr;
+
+ /*
+ * Clear accumulated ISR. The interrupt routine
+ * will fill this one with events that occur during
+ * transfer.
+ */
+ host->isr = 0;
+
+ /*
+ * Send the command (CRC calculated by host).
+ */
+ outb(cmd->opcode, host->base + WBSD_CMDR);
+ for (i = 3; i >= 0; i--)
+ outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
+
+ cmd->error = 0;
+
+ /*
+ * Wait for the request to complete.
+ */
+ do {
+ status = wbsd_read_index(host, WBSD_IDX_STATUS);
+ } while (status & WBSD_CARDTRAFFIC);
+
+ /*
+ * Do we expect a reply?
+ */
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ /*
+ * Read back status.
+ */
+ isr = host->isr;
+
+ /* Card removed? */
+ if (isr & WBSD_INT_CARD)
+ cmd->error = -ENOMEDIUM;
+ /* Timeout? */
+ else if (isr & WBSD_INT_TIMEOUT)
+ cmd->error = -ETIMEDOUT;
+ /* CRC? */
+ else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
+ cmd->error = -EILSEQ;
+ /* All ok */
+ else {
+ if (cmd->flags & MMC_RSP_136)
+ wbsd_get_long_reply(host, cmd);
+ else
+ wbsd_get_short_reply(host, cmd);
+ }
+ }
+}
+
+/*
+ * Data functions
+ */
+
+static void wbsd_empty_fifo(struct wbsd_host *host)
+{
+ struct mmc_data *data = host->mrq->cmd->data;
+ char *buffer;
+ int i, fsr, fifo;
+
+ /*
+ * Handle excessive data.
+ */
+ if (host->num_sg == 0)
+ return;
+
+ buffer = wbsd_sg_to_buffer(host) + host->offset;
+
+ /*
+ * Drain the fifo. This has a tendency to loop longer
+ * than the FIFO length (usually one block).
+ */
+ while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) {
+ /*
+ * The size field in the FSR is broken so we have to
+ * do some guessing.
+ */
+ if (fsr & WBSD_FIFO_FULL)
+ fifo = 16;
+ else if (fsr & WBSD_FIFO_FUTHRE)
+ fifo = 8;
+ else
+ fifo = 1;
+
+ for (i = 0; i < fifo; i++) {
+ *buffer = inb(host->base + WBSD_DFR);
+ buffer++;
+ host->offset++;
+ host->remain--;
+
+ data->bytes_xfered++;
+
+ /*
+ * End of scatter list entry?
+ */
+ if (host->remain == 0) {
+ /*
+ * Get next entry. Check if last.
+ */
+ if (!wbsd_next_sg(host))
+ return;
+
+ buffer = wbsd_sg_to_buffer(host);
+ }
+ }
+ }
+
+ /*
+ * This is a very dirty hack to solve a
+ * hardware problem. The chip doesn't trigger
+ * FIFO threshold interrupts properly.
+ */
+ if ((data->blocks * data->blksz - data->bytes_xfered) < 16)
+ tasklet_schedule(&host->fifo_tasklet);
+}
+
+static void wbsd_fill_fifo(struct wbsd_host *host)
+{
+ struct mmc_data *data = host->mrq->cmd->data;
+ char *buffer;
+ int i, fsr, fifo;
+
+ /*
+ * Check that we aren't being called after the
+ * entire buffer has been transferred.
+ */
+ if (host->num_sg == 0)
+ return;
+
+ buffer = wbsd_sg_to_buffer(host) + host->offset;
+
+ /*
+ * Fill the fifo. This has a tendency to loop longer
+ * than the FIFO length (usually one block).
+ */
+ while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) {
+ /*
+ * The size field in the FSR is broken so we have to
+ * do some guessing.
+ */
+ if (fsr & WBSD_FIFO_EMPTY)
+ fifo = 0;
+ else if (fsr & WBSD_FIFO_EMTHRE)
+ fifo = 8;
+ else
+ fifo = 15;
+
+ for (i = 16; i > fifo; i--) {
+ outb(*buffer, host->base + WBSD_DFR);
+ buffer++;
+ host->offset++;
+ host->remain--;
+
+ data->bytes_xfered++;
+
+ /*
+ * End of scatter list entry?
+ */
+ if (host->remain == 0) {
+ /*
+ * Get next entry. Check if last.
+ */
+ if (!wbsd_next_sg(host))
+ return;
+
+ buffer = wbsd_sg_to_buffer(host);
+ }
+ }
+ }
+
+ /*
+ * The controller stops sending interrupts for
+ * 'FIFO empty' under certain conditions. So we
+ * need to be a bit more pro-active.
+ */
+ tasklet_schedule(&host->fifo_tasklet);
+}
+
+static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
+{
+ u16 blksize;
+ u8 setup;
+ unsigned long dmaflags;
+ unsigned int size;
+
+ /*
+ * Calculate size.
+ */
+ size = data->blocks * data->blksz;
+
+ /*
+ * Check timeout values for overflow.
+ * (Yes, some cards cause this value to overflow).
+ */
+ if (data->timeout_ns > 127000000)
+ wbsd_write_index(host, WBSD_IDX_TAAC, 127);
+ else {
+ wbsd_write_index(host, WBSD_IDX_TAAC,
+ data->timeout_ns / 1000000);
+ }
+
+ if (data->timeout_clks > 255)
+ wbsd_write_index(host, WBSD_IDX_NSAC, 255);
+ else
+ wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
+
+ /*
+ * Inform the chip of how large blocks will be
+ * sent. It needs this to determine when to
+ * calculate CRC.
+ *
+ * Space for CRC must be included in the size.
+ * Two bytes are needed for each data line.
+ */
+ if (host->bus_width == MMC_BUS_WIDTH_1) {
+ blksize = data->blksz + 2;
+
+ wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
+ wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
+ } else if (host->bus_width == MMC_BUS_WIDTH_4) {
+ blksize = data->blksz + 2 * 4;
+
+ wbsd_write_index(host, WBSD_IDX_PBSMSB,
+ ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
+ wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
+ } else {
+ data->error = -EINVAL;
+ return;
+ }
+
+ /*
+ * Clear the FIFO. This is needed even for DMA
+ * transfers since the chip still uses the FIFO
+ * internally.
+ */
+ setup = wbsd_read_index(host, WBSD_IDX_SETUP);
+ setup |= WBSD_FIFO_RESET;
+ wbsd_write_index(host, WBSD_IDX_SETUP, setup);
+
+ /*
+ * DMA transfer?
+ */
+ if (host->dma >= 0) {
+ /*
+ * The buffer for DMA is only 64 kB.
+ */
+ BUG_ON(size > 0x10000);
+ if (size > 0x10000) {
+ data->error = -EINVAL;
+ return;
+ }
+
+ /*
+ * Transfer data from the SG list to
+ * the DMA buffer.
+ */
+ if (data->flags & MMC_DATA_WRITE)
+ wbsd_sg_to_dma(host, data);
+
+ /*
+ * Initialise the ISA DMA controller.
+ */
+ dmaflags = claim_dma_lock();
+ disable_dma(host->dma);
+ clear_dma_ff(host->dma);
+ if (data->flags & MMC_DATA_READ)
+ set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
+ else
+ set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
+ set_dma_addr(host->dma, host->dma_addr);
+ set_dma_count(host->dma, size);
+
+ enable_dma(host->dma);
+ release_dma_lock(dmaflags);
+
+ /*
+ * Enable DMA on the host.
+ */
+ wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
+ } else {
+ /*
+ * This flag is used to keep printk
+ * output to a minimum.
+ */
+ host->firsterr = 1;
+
+ /*
+ * Initialise the SG list.
+ */
+ wbsd_init_sg(host, data);
+
+ /*
+ * Turn off DMA.
+ */
+ wbsd_write_index(host, WBSD_IDX_DMA, 0);
+
+ /*
+ * Set up FIFO threshold levels (and fill
+ * buffer if doing a write).
+ */
+ if (data->flags & MMC_DATA_READ) {
+ wbsd_write_index(host, WBSD_IDX_FIFOEN,
+ WBSD_FIFOEN_FULL | 8);
+ } else {
+ wbsd_write_index(host, WBSD_IDX_FIFOEN,
+ WBSD_FIFOEN_EMPTY | 8);
+ wbsd_fill_fifo(host);
+ }
+ }
+
+ data->error = 0;
+}
+
+static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data)
+{
+ unsigned long dmaflags;
+ int count;
+ u8 status;
+
+ WARN_ON(host->mrq == NULL);
+
+ /*
+ * Send a stop command if needed.
+ */
+ if (data->stop)
+ wbsd_send_command(host, data->stop);
+
+ /*
+ * Wait for the controller to leave data
+ * transfer state.
+ */
+ do {
+ status = wbsd_read_index(host, WBSD_IDX_STATUS);
+ } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
+
+ /*
+ * DMA transfer?
+ */
+ if (host->dma >= 0) {
+ /*
+ * Disable DMA on the host.
+ */
+ wbsd_write_index(host, WBSD_IDX_DMA, 0);
+
+ /*
+ * Turn of ISA DMA controller.
+ */
+ dmaflags = claim_dma_lock();
+ disable_dma(host->dma);
+ clear_dma_ff(host->dma);
+ count = get_dma_residue(host->dma);
+ release_dma_lock(dmaflags);
+
+ data->bytes_xfered = host->mrq->data->blocks *
+ host->mrq->data->blksz - count;
+ data->bytes_xfered -= data->bytes_xfered % data->blksz;
+
+ /*
+ * Any leftover data?
+ */
+ if (count) {
+ pr_err("%s: Incomplete DMA transfer. "
+ "%d bytes left.\n",
+ mmc_hostname(host->mmc), count);
+
+ if (!data->error)
+ data->error = -EIO;
+ } else {
+ /*
+ * Transfer data from DMA buffer to
+ * SG list.
+ */
+ if (data->flags & MMC_DATA_READ)
+ wbsd_dma_to_sg(host, data);
+ }
+
+ if (data->error) {
+ if (data->bytes_xfered)
+ data->bytes_xfered -= data->blksz;
+ }
+ }
+
+ wbsd_request_end(host, host->mrq);
+}
+
+/*****************************************************************************\
+ * *
+ * MMC layer callbacks *
+ * *
+\*****************************************************************************/
+
+static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct wbsd_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd;
+
+ /*
+ * Disable tasklets to avoid a deadlock.
+ */
+ spin_lock_bh(&host->lock);
+
+ BUG_ON(host->mrq != NULL);
+
+ cmd = mrq->cmd;
+
+ host->mrq = mrq;
+
+ /*
+ * Check that there is actually a card in the slot.
+ */
+ if (!(host->flags & WBSD_FCARD_PRESENT)) {
+ cmd->error = -ENOMEDIUM;
+ goto done;
+ }
+
+ if (cmd->data) {
+ /*
+ * The hardware is so delightfully stupid that it has a list
+ * of "data" commands. If a command isn't on this list, it'll
+ * just go back to the idle state and won't send any data
+ * interrupts.
+ */
+ switch (cmd->opcode) {
+ case 11:
+ case 17:
+ case 18:
+ case 20:
+ case 24:
+ case 25:
+ case 26:
+ case 27:
+ case 30:
+ case 42:
+ case 56:
+ break;
+
+ /* ACMDs. We don't keep track of state, so we just treat them
+ * like any other command. */
+ case 51:
+ break;
+
+ default:
+#ifdef CONFIG_MMC_DEBUG
+ pr_warn("%s: Data command %d is not supported by this controller\n",
+ mmc_hostname(host->mmc), cmd->opcode);
+#endif
+ cmd->error = -EINVAL;
+
+ goto done;
+ };
+ }
+
+ /*
+ * Does the request include data?
+ */
+ if (cmd->data) {
+ wbsd_prepare_data(host, cmd->data);
+
+ if (cmd->data->error)
+ goto done;
+ }
+
+ wbsd_send_command(host, cmd);
+
+ /*
+ * If this is a data transfer the request
+ * will be finished after the data has
+ * transferred.
+ */
+ if (cmd->data && !cmd->error) {
+ /*
+ * Dirty fix for hardware bug.
+ */
+ if (host->dma == -1)
+ tasklet_schedule(&host->fifo_tasklet);
+
+ spin_unlock_bh(&host->lock);
+
+ return;
+ }
+
+done:
+ wbsd_request_end(host, mrq);
+
+ spin_unlock_bh(&host->lock);
+}
+
+static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct wbsd_host *host = mmc_priv(mmc);
+ u8 clk, setup, pwr;
+
+ spin_lock_bh(&host->lock);
+
+ /*
+ * Reset the chip on each power off.
+ * Should clear out any weird states.
+ */
+ if (ios->power_mode == MMC_POWER_OFF)
+ wbsd_init_device(host);
+
+ if (ios->clock >= 24000000)
+ clk = WBSD_CLK_24M;
+ else if (ios->clock >= 16000000)
+ clk = WBSD_CLK_16M;
+ else if (ios->clock >= 12000000)
+ clk = WBSD_CLK_12M;
+ else
+ clk = WBSD_CLK_375K;
+
+ /*
+ * Only write to the clock register when
+ * there is an actual change.
+ */
+ if (clk != host->clk) {
+ wbsd_write_index(host, WBSD_IDX_CLK, clk);
+ host->clk = clk;
+ }
+
+ /*
+ * Power up card.
+ */
+ if (ios->power_mode != MMC_POWER_OFF) {
+ pwr = inb(host->base + WBSD_CSR);
+ pwr &= ~WBSD_POWER_N;
+ outb(pwr, host->base + WBSD_CSR);
+ }
+
+ /*
+ * MMC cards need to have pin 1 high during init.
+ * It wreaks havoc with the card detection though so
+ * that needs to be disabled.
+ */
+ setup = wbsd_read_index(host, WBSD_IDX_SETUP);
+ if (ios->chip_select == MMC_CS_HIGH) {
+ BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1);
+ setup |= WBSD_DAT3_H;
+ host->flags |= WBSD_FIGNORE_DETECT;
+ } else {
+ if (setup & WBSD_DAT3_H) {
+ setup &= ~WBSD_DAT3_H;
+
+ /*
+ * We cannot resume card detection immediately
+ * because of capacitance and delays in the chip.
+ */
+ mod_timer(&host->ignore_timer, jiffies + HZ / 100);
+ }
+ }
+ wbsd_write_index(host, WBSD_IDX_SETUP, setup);
+
+ /*
+ * Store bus width for later. Will be used when
+ * setting up the data transfer.
+ */
+ host->bus_width = ios->bus_width;
+
+ spin_unlock_bh(&host->lock);
+}
+
+static int wbsd_get_ro(struct mmc_host *mmc)
+{
+ struct wbsd_host *host = mmc_priv(mmc);
+ u8 csr;
+
+ spin_lock_bh(&host->lock);
+
+ csr = inb(host->base + WBSD_CSR);
+ csr |= WBSD_MSLED;
+ outb(csr, host->base + WBSD_CSR);
+
+ mdelay(1);
+
+ csr = inb(host->base + WBSD_CSR);
+ csr &= ~WBSD_MSLED;
+ outb(csr, host->base + WBSD_CSR);
+
+ spin_unlock_bh(&host->lock);
+
+ return !!(csr & WBSD_WRPT);
+}
+
+static const struct mmc_host_ops wbsd_ops = {
+ .request = wbsd_request,
+ .set_ios = wbsd_set_ios,
+ .get_ro = wbsd_get_ro,
+};
+
+/*****************************************************************************\
+ * *
+ * Interrupt handling *
+ * *
+\*****************************************************************************/
+
+/*
+ * Helper function to reset detection ignore
+ */
+
+static void wbsd_reset_ignore(unsigned long data)
+{
+ struct wbsd_host *host = (struct wbsd_host *)data;
+
+ BUG_ON(host == NULL);
+
+ DBG("Resetting card detection ignore\n");
+
+ spin_lock_bh(&host->lock);
+
+ host->flags &= ~WBSD_FIGNORE_DETECT;
+
+ /*
+ * Card status might have changed during the
+ * blackout.
+ */
+ tasklet_schedule(&host->card_tasklet);
+
+ spin_unlock_bh(&host->lock);
+}
+
+/*
+ * Tasklets
+ */
+
+static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
+{
+ WARN_ON(!host->mrq);
+ if (!host->mrq)
+ return NULL;
+
+ WARN_ON(!host->mrq->cmd);
+ if (!host->mrq->cmd)
+ return NULL;
+
+ WARN_ON(!host->mrq->cmd->data);
+ if (!host->mrq->cmd->data)
+ return NULL;
+
+ return host->mrq->cmd->data;
+}
+
+static void wbsd_tasklet_card(unsigned long param)
+{
+ struct wbsd_host *host = (struct wbsd_host *)param;
+ u8 csr;
+ int delay = -1;
+
+ spin_lock(&host->lock);
+
+ if (host->flags & WBSD_FIGNORE_DETECT) {
+ spin_unlock(&host->lock);
+ return;
+ }
+
+ csr = inb(host->base + WBSD_CSR);
+ WARN_ON(csr == 0xff);
+
+ if (csr & WBSD_CARDPRESENT) {
+ if (!(host->flags & WBSD_FCARD_PRESENT)) {
+ DBG("Card inserted\n");
+ host->flags |= WBSD_FCARD_PRESENT;
+
+ delay = 500;
+ }
+ } else if (host->flags & WBSD_FCARD_PRESENT) {
+ DBG("Card removed\n");
+ host->flags &= ~WBSD_FCARD_PRESENT;
+
+ if (host->mrq) {
+ pr_err("%s: Card removed during transfer!\n",
+ mmc_hostname(host->mmc));
+ wbsd_reset(host);
+
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ }
+
+ delay = 0;
+ }
+
+ /*
+ * Unlock first since we might get a call back.
+ */
+
+ spin_unlock(&host->lock);
+
+ if (delay != -1)
+ mmc_detect_change(host->mmc, msecs_to_jiffies(delay));
+}
+
+static void wbsd_tasklet_fifo(unsigned long param)
+{
+ struct wbsd_host *host = (struct wbsd_host *)param;
+ struct mmc_data *data;
+
+ spin_lock(&host->lock);
+
+ if (!host->mrq)
+ goto end;
+
+ data = wbsd_get_data(host);
+ if (!data)
+ goto end;
+
+ if (data->flags & MMC_DATA_WRITE)
+ wbsd_fill_fifo(host);
+ else
+ wbsd_empty_fifo(host);
+
+ /*
+ * Done?
+ */
+ if (host->num_sg == 0) {
+ wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
+ tasklet_schedule(&host->finish_tasklet);
+ }
+
+end:
+ spin_unlock(&host->lock);
+}
+
+static void wbsd_tasklet_crc(unsigned long param)
+{
+ struct wbsd_host *host = (struct wbsd_host *)param;
+ struct mmc_data *data;
+
+ spin_lock(&host->lock);
+
+ if (!host->mrq)
+ goto end;
+
+ data = wbsd_get_data(host);
+ if (!data)
+ goto end;
+
+ DBGF("CRC error\n");
+
+ data->error = -EILSEQ;
+
+ tasklet_schedule(&host->finish_tasklet);
+
+end:
+ spin_unlock(&host->lock);
+}
+
+static void wbsd_tasklet_timeout(unsigned long param)
+{
+ struct wbsd_host *host = (struct wbsd_host *)param;
+ struct mmc_data *data;
+
+ spin_lock(&host->lock);
+
+ if (!host->mrq)
+ goto end;
+
+ data = wbsd_get_data(host);
+ if (!data)
+ goto end;
+
+ DBGF("Timeout\n");
+
+ data->error = -ETIMEDOUT;
+
+ tasklet_schedule(&host->finish_tasklet);
+
+end:
+ spin_unlock(&host->lock);
+}
+
+static void wbsd_tasklet_finish(unsigned long param)
+{
+ struct wbsd_host *host = (struct wbsd_host *)param;
+ struct mmc_data *data;
+
+ spin_lock(&host->lock);
+
+ WARN_ON(!host->mrq);
+ if (!host->mrq)
+ goto end;
+
+ data = wbsd_get_data(host);
+ if (!data)
+ goto end;
+
+ wbsd_finish_data(host, data);
+
+end:
+ spin_unlock(&host->lock);
+}
+
+/*
+ * Interrupt handling
+ */
+
+static irqreturn_t wbsd_irq(int irq, void *dev_id)
+{
+ struct wbsd_host *host = dev_id;
+ int isr;
+
+ isr = inb(host->base + WBSD_ISR);
+
+ /*
+ * Was it actually our hardware that caused the interrupt?
+ */
+ if (isr == 0xff || isr == 0x00)
+ return IRQ_NONE;
+
+ host->isr |= isr;
+
+ /*
+ * Schedule tasklets as needed.
+ */
+ if (isr & WBSD_INT_CARD)
+ tasklet_schedule(&host->card_tasklet);
+ if (isr & WBSD_INT_FIFO_THRE)
+ tasklet_schedule(&host->fifo_tasklet);
+ if (isr & WBSD_INT_CRC)
+ tasklet_hi_schedule(&host->crc_tasklet);
+ if (isr & WBSD_INT_TIMEOUT)
+ tasklet_hi_schedule(&host->timeout_tasklet);
+ if (isr & WBSD_INT_TC)
+ tasklet_schedule(&host->finish_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/*****************************************************************************\
+ * *
+ * Device initialisation and shutdown *
+ * *
+\*****************************************************************************/
+
+/*
+ * Allocate/free MMC structure.
+ */
+
+static int wbsd_alloc_mmc(struct device *dev)
+{
+ struct mmc_host *mmc;
+ struct wbsd_host *host;
+
+ /*
+ * Allocate MMC structure.
+ */
+ mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ host->dma = -1;
+
+ /*
+ * Set host parameters.
+ */
+ mmc->ops = &wbsd_ops;
+ mmc->f_min = 375000;
+ mmc->f_max = 24000000;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->caps = MMC_CAP_4_BIT_DATA;
+
+ spin_lock_init(&host->lock);
+
+ /*
+ * Set up timers
+ */
+ init_timer(&host->ignore_timer);
+ host->ignore_timer.data = (unsigned long)host;
+ host->ignore_timer.function = wbsd_reset_ignore;
+
+ /*
+ * Maximum number of segments. Worst case is one sector per segment
+ * so this will be 64kB/512.
+ */
+ mmc->max_segs = 128;
+
+ /*
+ * Maximum request size. Also limited by 64KiB buffer.
+ */
+ mmc->max_req_size = 65536;
+
+ /*
+ * Maximum segment size. Could be one segment with the maximum number
+ * of bytes.
+ */
+ mmc->max_seg_size = mmc->max_req_size;
+
+ /*
+ * Maximum block size. We have 12 bits (= 4095) but have to subtract
+ * space for CRC. So the maximum is 4095 - 4*2 = 4087.
+ */
+ mmc->max_blk_size = 4087;
+
+ /*
+ * Maximum block count. There is no real limit so the maximum
+ * request size will be the only restriction.
+ */
+ mmc->max_blk_count = mmc->max_req_size;
+
+ dev_set_drvdata(dev, mmc);
+
+ return 0;
+}
+
+static void wbsd_free_mmc(struct device *dev)
+{
+ struct mmc_host *mmc;
+ struct wbsd_host *host;
+
+ mmc = dev_get_drvdata(dev);
+ if (!mmc)
+ return;
+
+ host = mmc_priv(mmc);
+ BUG_ON(host == NULL);
+
+ del_timer_sync(&host->ignore_timer);
+
+ mmc_free_host(mmc);
+
+ dev_set_drvdata(dev, NULL);
+}
+
+/*
+ * Scan for known chip id:s
+ */
+
+static int wbsd_scan(struct wbsd_host *host)
+{
+ int i, j, k;
+ int id;
+
+ /*
+ * Iterate through all ports, all codes to
+ * find hardware that is in our known list.
+ */
+ for (i = 0; i < ARRAY_SIZE(config_ports); i++) {
+ if (!request_region(config_ports[i], 2, DRIVER_NAME))
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) {
+ id = 0xFFFF;
+
+ host->config = config_ports[i];
+ host->unlock_code = unlock_codes[j];
+
+ wbsd_unlock_config(host);
+
+ outb(WBSD_CONF_ID_HI, config_ports[i]);
+ id = inb(config_ports[i] + 1) << 8;
+
+ outb(WBSD_CONF_ID_LO, config_ports[i]);
+ id |= inb(config_ports[i] + 1);
+
+ wbsd_lock_config(host);
+
+ for (k = 0; k < ARRAY_SIZE(valid_ids); k++) {
+ if (id == valid_ids[k]) {
+ host->chip_id = id;
+
+ return 0;
+ }
+ }
+
+ if (id != 0xFFFF) {
+ DBG("Unknown hardware (id %x) found at %x\n",
+ id, config_ports[i]);
+ }
+ }
+
+ release_region(config_ports[i], 2);
+ }
+
+ host->config = 0;
+ host->unlock_code = 0;
+
+ return -ENODEV;
+}
+
+/*
+ * Allocate/free io port ranges
+ */
+
+static int wbsd_request_region(struct wbsd_host *host, int base)
+{
+ if (base & 0x7)
+ return -EINVAL;
+
+ if (!request_region(base, 8, DRIVER_NAME))
+ return -EIO;
+
+ host->base = base;
+
+ return 0;
+}
+
+static void wbsd_release_regions(struct wbsd_host *host)
+{
+ if (host->base)
+ release_region(host->base, 8);
+
+ host->base = 0;
+
+ if (host->config)
+ release_region(host->config, 2);
+
+ host->config = 0;
+}
+
+/*
+ * Allocate/free DMA port and buffer
+ */
+
+static void wbsd_request_dma(struct wbsd_host *host, int dma)
+{
+ if (dma < 0)
+ return;
+
+ if (request_dma(dma, DRIVER_NAME))
+ goto err;
+
+ /*
+ * We need to allocate a special buffer in
+ * order for ISA to be able to DMA to it.
+ */
+ host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
+ GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
+ if (!host->dma_buffer)
+ goto free;
+
+ /*
+ * Translate the address to a physical address.
+ */
+ host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer,
+ WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
+
+ /*
+ * ISA DMA must be aligned on a 64k basis.
+ */
+ if ((host->dma_addr & 0xffff) != 0)
+ goto kfree;
+ /*
+ * ISA cannot access memory above 16 MB.
+ */
+ else if (host->dma_addr >= 0x1000000)
+ goto kfree;
+
+ host->dma = dma;
+
+ return;
+
+kfree:
+ /*
+ * If we've gotten here then there is some kind of alignment bug
+ */
+ BUG_ON(1);
+
+ dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
+ WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
+ host->dma_addr = 0;
+
+ kfree(host->dma_buffer);
+ host->dma_buffer = NULL;
+
+free:
+ free_dma(dma);
+
+err:
+ pr_warn(DRIVER_NAME ": Unable to allocate DMA %d - falling back on FIFO\n",
+ dma);
+}
+
+static void wbsd_release_dma(struct wbsd_host *host)
+{
+ if (host->dma_addr) {
+ dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
+ WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
+ }
+ kfree(host->dma_buffer);
+ if (host->dma >= 0)
+ free_dma(host->dma);
+
+ host->dma = -1;
+ host->dma_buffer = NULL;
+ host->dma_addr = 0;
+}
+
+/*
+ * Allocate/free IRQ.
+ */
+
+static int wbsd_request_irq(struct wbsd_host *host, int irq)
+{
+ int ret;
+
+ /*
+ * Set up tasklets. Must be done before requesting interrupt.
+ */
+ tasklet_init(&host->card_tasklet, wbsd_tasklet_card,
+ (unsigned long)host);
+ tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo,
+ (unsigned long)host);
+ tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc,
+ (unsigned long)host);
+ tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout,
+ (unsigned long)host);
+ tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish,
+ (unsigned long)host);
+
+ /*
+ * Allocate interrupt.
+ */
+ ret = request_irq(irq, wbsd_irq, IRQF_SHARED, DRIVER_NAME, host);
+ if (ret)
+ return ret;
+
+ host->irq = irq;
+
+ return 0;
+}
+
+static void wbsd_release_irq(struct wbsd_host *host)
+{
+ if (!host->irq)
+ return;
+
+ free_irq(host->irq, host);
+
+ host->irq = 0;
+
+ tasklet_kill(&host->card_tasklet);
+ tasklet_kill(&host->fifo_tasklet);
+ tasklet_kill(&host->crc_tasklet);
+ tasklet_kill(&host->timeout_tasklet);
+ tasklet_kill(&host->finish_tasklet);
+}
+
+/*
+ * Allocate all resources for the host.
+ */
+
+static int wbsd_request_resources(struct wbsd_host *host,
+ int base, int irq, int dma)
+{
+ int ret;
+
+ /*
+ * Allocate I/O ports.
+ */
+ ret = wbsd_request_region(host, base);
+ if (ret)
+ return ret;
+
+ /*
+ * Allocate interrupt.
+ */
+ ret = wbsd_request_irq(host, irq);
+ if (ret)
+ return ret;
+
+ /*
+ * Allocate DMA.
+ */
+ wbsd_request_dma(host, dma);
+
+ return 0;
+}
+
+/*
+ * Release all resources for the host.
+ */
+
+static void wbsd_release_resources(struct wbsd_host *host)
+{
+ wbsd_release_dma(host);
+ wbsd_release_irq(host);
+ wbsd_release_regions(host);
+}
+
+/*
+ * Configure the resources the chip should use.
+ */
+
+static void wbsd_chip_config(struct wbsd_host *host)
+{
+ wbsd_unlock_config(host);
+
+ /*
+ * Reset the chip.
+ */
+ wbsd_write_config(host, WBSD_CONF_SWRST, 1);
+ wbsd_write_config(host, WBSD_CONF_SWRST, 0);
+
+ /*
+ * Select SD/MMC function.
+ */
+ wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
+
+ /*
+ * Set up card detection.
+ */
+ wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11);
+
+ /*
+ * Configure chip
+ */
+ wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
+ wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
+
+ wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
+
+ if (host->dma >= 0)
+ wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
+
+ /*
+ * Enable and power up chip.
+ */
+ wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
+ wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
+
+ wbsd_lock_config(host);
+}
+
+/*
+ * Check that configured resources are correct.
+ */
+
+static int wbsd_chip_validate(struct wbsd_host *host)
+{
+ int base, irq, dma;
+
+ wbsd_unlock_config(host);
+
+ /*
+ * Select SD/MMC function.
+ */
+ wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
+
+ /*
+ * Read configuration.
+ */
+ base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8;
+ base |= wbsd_read_config(host, WBSD_CONF_PORT_LO);
+
+ irq = wbsd_read_config(host, WBSD_CONF_IRQ);
+
+ dma = wbsd_read_config(host, WBSD_CONF_DRQ);
+
+ wbsd_lock_config(host);
+
+ /*
+ * Validate against given configuration.
+ */
+ if (base != host->base)
+ return 0;
+ if (irq != host->irq)
+ return 0;
+ if ((dma != host->dma) && (host->dma != -1))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Powers down the SD function
+ */
+
+static void wbsd_chip_poweroff(struct wbsd_host *host)
+{
+ wbsd_unlock_config(host);
+
+ wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
+ wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
+
+ wbsd_lock_config(host);
+}
+
+/*****************************************************************************\
+ * *
+ * Devices setup and shutdown *
+ * *
+\*****************************************************************************/
+
+static int wbsd_init(struct device *dev, int base, int irq, int dma,
+ int pnp)
+{
+ struct wbsd_host *host = NULL;
+ struct mmc_host *mmc = NULL;
+ int ret;
+
+ ret = wbsd_alloc_mmc(dev);
+ if (ret)
+ return ret;
+
+ mmc = dev_get_drvdata(dev);
+ host = mmc_priv(mmc);
+
+ /*
+ * Scan for hardware.
+ */
+ ret = wbsd_scan(host);
+ if (ret) {
+ if (pnp && (ret == -ENODEV)) {
+ pr_warn(DRIVER_NAME ": Unable to confirm device presence - you may experience lock-ups\n");
+ } else {
+ wbsd_free_mmc(dev);
+ return ret;
+ }
+ }
+
+ /*
+ * Request resources.
+ */
+ ret = wbsd_request_resources(host, base, irq, dma);
+ if (ret) {
+ wbsd_release_resources(host);
+ wbsd_free_mmc(dev);
+ return ret;
+ }
+
+ /*
+ * See if chip needs to be configured.
+ */
+ if (pnp) {
+ if ((host->config != 0) && !wbsd_chip_validate(host)) {
+ pr_warn(DRIVER_NAME ": PnP active but chip not configured! You probably have a buggy BIOS. Configuring chip manually.\n");
+ wbsd_chip_config(host);
+ }
+ } else
+ wbsd_chip_config(host);
+
+ /*
+ * Power Management stuff. No idea how this works.
+ * Not tested.
+ */
+#ifdef CONFIG_PM
+ if (host->config) {
+ wbsd_unlock_config(host);
+ wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
+ wbsd_lock_config(host);
+ }
+#endif
+ /*
+ * Allow device to initialise itself properly.
+ */
+ mdelay(5);
+
+ /*
+ * Reset the chip into a known state.
+ */
+ wbsd_init_device(host);
+
+ mmc_add_host(mmc);
+
+ pr_info("%s: W83L51xD", mmc_hostname(mmc));
+ if (host->chip_id != 0)
+ printk(" id %x", (int)host->chip_id);
+ printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
+ if (host->dma >= 0)
+ printk(" dma %d", (int)host->dma);
+ else
+ printk(" FIFO");
+ if (pnp)
+ printk(" PnP");
+ printk("\n");
+
+ return 0;
+}
+
+static void wbsd_shutdown(struct device *dev, int pnp)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct wbsd_host *host;
+
+ if (!mmc)
+ return;
+
+ host = mmc_priv(mmc);
+
+ mmc_remove_host(mmc);
+
+ /*
+ * Power down the SD/MMC function.
+ */
+ if (!pnp)
+ wbsd_chip_poweroff(host);
+
+ wbsd_release_resources(host);
+
+ wbsd_free_mmc(dev);
+}
+
+/*
+ * Non-PnP
+ */
+
+static int wbsd_probe(struct platform_device *dev)
+{
+ /* Use the module parameters for resources */
+ return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0);
+}
+
+static int wbsd_remove(struct platform_device *dev)
+{
+ wbsd_shutdown(&dev->dev, 0);
+
+ return 0;
+}
+
+/*
+ * PnP
+ */
+
+#ifdef CONFIG_PNP
+
+static int
+wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)
+{
+ int io, irq, dma;
+
+ /*
+ * Get resources from PnP layer.
+ */
+ io = pnp_port_start(pnpdev, 0);
+ irq = pnp_irq(pnpdev, 0);
+ if (pnp_dma_valid(pnpdev, 0))
+ dma = pnp_dma(pnpdev, 0);
+ else
+ dma = -1;
+
+ DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma);
+
+ return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
+}
+
+static void wbsd_pnp_remove(struct pnp_dev *dev)
+{
+ wbsd_shutdown(&dev->dev, 1);
+}
+
+#endif /* CONFIG_PNP */
+
+/*
+ * Power management
+ */
+
+#ifdef CONFIG_PM
+
+static int wbsd_platform_suspend(struct platform_device *dev,
+ pm_message_t state)
+{
+ struct mmc_host *mmc = platform_get_drvdata(dev);
+ struct wbsd_host *host;
+
+ if (mmc == NULL)
+ return 0;
+
+ DBGF("Suspending...\n");
+
+ host = mmc_priv(mmc);
+
+ wbsd_chip_poweroff(host);
+ return 0;
+}
+
+static int wbsd_platform_resume(struct platform_device *dev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(dev);
+ struct wbsd_host *host;
+
+ if (mmc == NULL)
+ return 0;
+
+ DBGF("Resuming...\n");
+
+ host = mmc_priv(mmc);
+
+ wbsd_chip_config(host);
+
+ /*
+ * Allow device to initialise itself properly.
+ */
+ mdelay(5);
+
+ wbsd_init_device(host);
+ return 0;
+}
+
+#ifdef CONFIG_PNP
+
+static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
+{
+ struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
+
+ if (mmc == NULL)
+ return 0;
+
+ DBGF("Suspending...\n");
+ return 0;
+}
+
+static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
+ struct wbsd_host *host;
+
+ if (mmc == NULL)
+ return 0;
+
+ DBGF("Resuming...\n");
+
+ host = mmc_priv(mmc);
+
+ /*
+ * See if chip needs to be configured.
+ */
+ if (host->config != 0) {
+ if (!wbsd_chip_validate(host)) {
+ pr_warn(DRIVER_NAME ": PnP active but chip not configured! You probably have a buggy BIOS. Configuring chip manually.\n");
+ wbsd_chip_config(host);
+ }
+ }
+
+ /*
+ * Allow device to initialise itself properly.
+ */
+ mdelay(5);
+
+ wbsd_init_device(host);
+ return 0;
+}
+
+#endif /* CONFIG_PNP */
+
+#else /* CONFIG_PM */
+
+#define wbsd_platform_suspend NULL
+#define wbsd_platform_resume NULL
+
+#define wbsd_pnp_suspend NULL
+#define wbsd_pnp_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct platform_device *wbsd_device;
+
+static struct platform_driver wbsd_driver = {
+ .probe = wbsd_probe,
+ .remove = wbsd_remove,
+
+ .suspend = wbsd_platform_suspend,
+ .resume = wbsd_platform_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+#ifdef CONFIG_PNP
+
+static struct pnp_driver wbsd_pnp_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pnp_dev_table,
+ .probe = wbsd_pnp_probe,
+ .remove = wbsd_pnp_remove,
+
+ .suspend = wbsd_pnp_suspend,
+ .resume = wbsd_pnp_resume,
+};
+
+#endif /* CONFIG_PNP */
+
+/*
+ * Module loading/unloading
+ */
+
+static int __init wbsd_drv_init(void)
+{
+ int result;
+
+ pr_info(DRIVER_NAME
+ ": Winbond W83L51xD SD/MMC card interface driver\n");
+ pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
+
+#ifdef CONFIG_PNP
+
+ if (!param_nopnp) {
+ result = pnp_register_driver(&wbsd_pnp_driver);
+ if (result < 0)
+ return result;
+ }
+#endif /* CONFIG_PNP */
+
+ if (param_nopnp) {
+ result = platform_driver_register(&wbsd_driver);
+ if (result < 0)
+ return result;
+
+ wbsd_device = platform_device_alloc(DRIVER_NAME, -1);
+ if (!wbsd_device) {
+ platform_driver_unregister(&wbsd_driver);
+ return -ENOMEM;
+ }
+
+ result = platform_device_add(wbsd_device);
+ if (result) {
+ platform_device_put(wbsd_device);
+ platform_driver_unregister(&wbsd_driver);
+ return result;
+ }
+ }
+
+ return 0;
+}
+
+static void __exit wbsd_drv_exit(void)
+{
+#ifdef CONFIG_PNP
+
+ if (!param_nopnp)
+ pnp_unregister_driver(&wbsd_pnp_driver);
+
+#endif /* CONFIG_PNP */
+
+ if (param_nopnp) {
+ platform_device_unregister(wbsd_device);
+
+ platform_driver_unregister(&wbsd_driver);
+ }
+
+ DBG("unloaded\n");
+}
+
+module_init(wbsd_drv_init);
+module_exit(wbsd_drv_exit);
+#ifdef CONFIG_PNP
+module_param_named(nopnp, param_nopnp, uint, 0444);
+#endif
+module_param_named(io, param_io, uint, 0444);
+module_param_named(irq, param_irq, uint, 0444);
+module_param_named(dma, param_dma, int, 0444);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
+MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
+
+#ifdef CONFIG_PNP
+MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)");
+#endif
+MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
+MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
+MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");
diff --git a/kernel/drivers/mmc/host/wbsd.h b/kernel/drivers/mmc/host/wbsd.h
new file mode 100644
index 000000000..0877866f8
--- /dev/null
+++ b/kernel/drivers/mmc/host/wbsd.h
@@ -0,0 +1,185 @@
+/*
+ * linux/drivers/mmc/host/wbsd.h - Winbond W83L51xD SD/MMC driver
+ *
+ * Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#define LOCK_CODE 0xAA
+
+#define WBSD_CONF_SWRST 0x02
+#define WBSD_CONF_DEVICE 0x07
+#define WBSD_CONF_ID_HI 0x20
+#define WBSD_CONF_ID_LO 0x21
+#define WBSD_CONF_POWER 0x22
+#define WBSD_CONF_PME 0x23
+#define WBSD_CONF_PMES 0x24
+
+#define WBSD_CONF_ENABLE 0x30
+#define WBSD_CONF_PORT_HI 0x60
+#define WBSD_CONF_PORT_LO 0x61
+#define WBSD_CONF_IRQ 0x70
+#define WBSD_CONF_DRQ 0x74
+
+#define WBSD_CONF_PINS 0xF0
+
+#define DEVICE_SD 0x03
+
+#define WBSD_PINS_DAT3_HI 0x20
+#define WBSD_PINS_DAT3_OUT 0x10
+#define WBSD_PINS_GP11_HI 0x04
+#define WBSD_PINS_DETECT_GP11 0x02
+#define WBSD_PINS_DETECT_DAT3 0x01
+
+#define WBSD_CMDR 0x00
+#define WBSD_DFR 0x01
+#define WBSD_EIR 0x02
+#define WBSD_ISR 0x03
+#define WBSD_FSR 0x04
+#define WBSD_IDXR 0x05
+#define WBSD_DATAR 0x06
+#define WBSD_CSR 0x07
+
+#define WBSD_EINT_CARD 0x40
+#define WBSD_EINT_FIFO_THRE 0x20
+#define WBSD_EINT_CRC 0x10
+#define WBSD_EINT_TIMEOUT 0x08
+#define WBSD_EINT_PROGEND 0x04
+#define WBSD_EINT_BUSYEND 0x02
+#define WBSD_EINT_TC 0x01
+
+#define WBSD_INT_PENDING 0x80
+#define WBSD_INT_CARD 0x40
+#define WBSD_INT_FIFO_THRE 0x20
+#define WBSD_INT_CRC 0x10
+#define WBSD_INT_TIMEOUT 0x08
+#define WBSD_INT_PROGEND 0x04
+#define WBSD_INT_BUSYEND 0x02
+#define WBSD_INT_TC 0x01
+
+#define WBSD_FIFO_EMPTY 0x80
+#define WBSD_FIFO_FULL 0x40
+#define WBSD_FIFO_EMTHRE 0x20
+#define WBSD_FIFO_FUTHRE 0x10
+#define WBSD_FIFO_SZMASK 0x0F
+
+#define WBSD_MSLED 0x20
+#define WBSD_POWER_N 0x10
+#define WBSD_WRPT 0x04
+#define WBSD_CARDPRESENT 0x01
+
+#define WBSD_IDX_CLK 0x01
+#define WBSD_IDX_PBSMSB 0x02
+#define WBSD_IDX_TAAC 0x03
+#define WBSD_IDX_NSAC 0x04
+#define WBSD_IDX_PBSLSB 0x05
+#define WBSD_IDX_SETUP 0x06
+#define WBSD_IDX_DMA 0x07
+#define WBSD_IDX_FIFOEN 0x08
+#define WBSD_IDX_STATUS 0x10
+#define WBSD_IDX_RSPLEN 0x1E
+#define WBSD_IDX_RESP0 0x1F
+#define WBSD_IDX_RESP1 0x20
+#define WBSD_IDX_RESP2 0x21
+#define WBSD_IDX_RESP3 0x22
+#define WBSD_IDX_RESP4 0x23
+#define WBSD_IDX_RESP5 0x24
+#define WBSD_IDX_RESP6 0x25
+#define WBSD_IDX_RESP7 0x26
+#define WBSD_IDX_RESP8 0x27
+#define WBSD_IDX_RESP9 0x28
+#define WBSD_IDX_RESP10 0x29
+#define WBSD_IDX_RESP11 0x2A
+#define WBSD_IDX_RESP12 0x2B
+#define WBSD_IDX_RESP13 0x2C
+#define WBSD_IDX_RESP14 0x2D
+#define WBSD_IDX_RESP15 0x2E
+#define WBSD_IDX_RESP16 0x2F
+#define WBSD_IDX_CRCSTATUS 0x30
+#define WBSD_IDX_ISR 0x3F
+
+#define WBSD_CLK_375K 0x00
+#define WBSD_CLK_12M 0x01
+#define WBSD_CLK_16M 0x02
+#define WBSD_CLK_24M 0x03
+
+#define WBSD_DATA_WIDTH 0x01
+
+#define WBSD_DAT3_H 0x08
+#define WBSD_FIFO_RESET 0x04
+#define WBSD_SOFT_RESET 0x02
+#define WBSD_INC_INDEX 0x01
+
+#define WBSD_DMA_SINGLE 0x02
+#define WBSD_DMA_ENABLE 0x01
+
+#define WBSD_FIFOEN_EMPTY 0x20
+#define WBSD_FIFOEN_FULL 0x10
+#define WBSD_FIFO_THREMASK 0x0F
+
+#define WBSD_BLOCK_READ 0x80
+#define WBSD_BLOCK_WRITE 0x40
+#define WBSD_BUSY 0x20
+#define WBSD_CARDTRAFFIC 0x04
+#define WBSD_SENDCMD 0x02
+#define WBSD_RECVRES 0x01
+
+#define WBSD_RSP_SHORT 0x00
+#define WBSD_RSP_LONG 0x01
+
+#define WBSD_CRC_MASK 0x1F
+#define WBSD_CRC_OK 0x05 /* S010E (00101) */
+#define WBSD_CRC_FAIL 0x0B /* S101E (01011) */
+
+#define WBSD_DMA_SIZE 65536
+
+struct wbsd_host
+{
+ struct mmc_host* mmc; /* MMC structure */
+
+ spinlock_t lock; /* Mutex */
+
+ int flags; /* Driver states */
+
+#define WBSD_FCARD_PRESENT (1<<0) /* Card is present */
+#define WBSD_FIGNORE_DETECT (1<<1) /* Ignore card detection */
+
+ struct mmc_request* mrq; /* Current request */
+
+ u8 isr; /* Accumulated ISR */
+
+ struct scatterlist* cur_sg; /* Current SG entry */
+ unsigned int num_sg; /* Number of entries left */
+
+ unsigned int offset; /* Offset into current entry */
+ unsigned int remain; /* Data left in curren entry */
+
+ char* dma_buffer; /* ISA DMA buffer */
+ dma_addr_t dma_addr; /* Physical address for same */
+
+ int firsterr; /* See fifo functions */
+
+ u8 clk; /* Current clock speed */
+ unsigned char bus_width; /* Current bus width */
+
+ int config; /* Config port */
+ u8 unlock_code; /* Code to unlock config */
+
+ int chip_id; /* ID of controller */
+
+ int base; /* I/O port base */
+ int irq; /* Interrupt */
+ int dma; /* DMA channel */
+
+ struct tasklet_struct card_tasklet; /* Tasklet structures */
+ struct tasklet_struct fifo_tasklet;
+ struct tasklet_struct crc_tasklet;
+ struct tasklet_struct timeout_tasklet;
+ struct tasklet_struct finish_tasklet;
+
+ struct timer_list ignore_timer; /* Ignore detection timer */
+};
diff --git a/kernel/drivers/mmc/host/wmt-sdmmc.c b/kernel/drivers/mmc/host/wmt-sdmmc.c
new file mode 100644
index 000000000..5af00559e
--- /dev/null
+++ b/kernel/drivers/mmc/host/wmt-sdmmc.c
@@ -0,0 +1,1004 @@
+/*
+ * WM8505/WM8650 SD/MMC Host Controller
+ *
+ * Copyright (C) 2010 Tony Prisk
+ * Copyright (C) 2008 WonderMedia Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/errno.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#include <asm/byteorder.h>
+
+
+#define DRIVER_NAME "wmt-sdhc"
+
+
+/* MMC/SD controller registers */
+#define SDMMC_CTLR 0x00
+#define SDMMC_CMD 0x01
+#define SDMMC_RSPTYPE 0x02
+#define SDMMC_ARG 0x04
+#define SDMMC_BUSMODE 0x08
+#define SDMMC_BLKLEN 0x0C
+#define SDMMC_BLKCNT 0x0E
+#define SDMMC_RSP 0x10
+#define SDMMC_CBCR 0x20
+#define SDMMC_INTMASK0 0x24
+#define SDMMC_INTMASK1 0x25
+#define SDMMC_STS0 0x28
+#define SDMMC_STS1 0x29
+#define SDMMC_STS2 0x2A
+#define SDMMC_STS3 0x2B
+#define SDMMC_RSPTIMEOUT 0x2C
+#define SDMMC_CLK 0x30 /* VT8500 only */
+#define SDMMC_EXTCTRL 0x34
+#define SDMMC_SBLKLEN 0x38
+#define SDMMC_DMATIMEOUT 0x3C
+
+
+/* SDMMC_CTLR bit fields */
+#define CTLR_CMD_START 0x01
+#define CTLR_CMD_WRITE 0x04
+#define CTLR_FIFO_RESET 0x08
+
+/* SDMMC_BUSMODE bit fields */
+#define BM_SPI_MODE 0x01
+#define BM_FOURBIT_MODE 0x02
+#define BM_EIGHTBIT_MODE 0x04
+#define BM_SD_OFF 0x10
+#define BM_SPI_CS 0x20
+#define BM_SD_POWER 0x40
+#define BM_SOFT_RESET 0x80
+
+/* SDMMC_BLKLEN bit fields */
+#define BLKL_CRCERR_ABORT 0x0800
+#define BLKL_CD_POL_HIGH 0x1000
+#define BLKL_GPI_CD 0x2000
+#define BLKL_DATA3_CD 0x4000
+#define BLKL_INT_ENABLE 0x8000
+
+/* SDMMC_INTMASK0 bit fields */
+#define INT0_MBLK_TRAN_DONE_INT_EN 0x10
+#define INT0_BLK_TRAN_DONE_INT_EN 0x20
+#define INT0_CD_INT_EN 0x40
+#define INT0_DI_INT_EN 0x80
+
+/* SDMMC_INTMASK1 bit fields */
+#define INT1_CMD_RES_TRAN_DONE_INT_EN 0x02
+#define INT1_CMD_RES_TOUT_INT_EN 0x04
+#define INT1_MBLK_AUTO_STOP_INT_EN 0x08
+#define INT1_DATA_TOUT_INT_EN 0x10
+#define INT1_RESCRC_ERR_INT_EN 0x20
+#define INT1_RCRC_ERR_INT_EN 0x40
+#define INT1_WCRC_ERR_INT_EN 0x80
+
+/* SDMMC_STS0 bit fields */
+#define STS0_WRITE_PROTECT 0x02
+#define STS0_CD_DATA3 0x04
+#define STS0_CD_GPI 0x08
+#define STS0_MBLK_DONE 0x10
+#define STS0_BLK_DONE 0x20
+#define STS0_CARD_DETECT 0x40
+#define STS0_DEVICE_INS 0x80
+
+/* SDMMC_STS1 bit fields */
+#define STS1_SDIO_INT 0x01
+#define STS1_CMDRSP_DONE 0x02
+#define STS1_RSP_TIMEOUT 0x04
+#define STS1_AUTOSTOP_DONE 0x08
+#define STS1_DATA_TIMEOUT 0x10
+#define STS1_RSP_CRC_ERR 0x20
+#define STS1_RCRC_ERR 0x40
+#define STS1_WCRC_ERR 0x80
+
+/* SDMMC_STS2 bit fields */
+#define STS2_CMD_RES_BUSY 0x10
+#define STS2_DATARSP_BUSY 0x20
+#define STS2_DIS_FORCECLK 0x80
+
+/* SDMMC_EXTCTRL bit fields */
+#define EXT_EIGHTBIT 0x04
+
+/* MMC/SD DMA Controller Registers */
+#define SDDMA_GCR 0x100
+#define SDDMA_IER 0x104
+#define SDDMA_ISR 0x108
+#define SDDMA_DESPR 0x10C
+#define SDDMA_RBR 0x110
+#define SDDMA_DAR 0x114
+#define SDDMA_BAR 0x118
+#define SDDMA_CPR 0x11C
+#define SDDMA_CCR 0x120
+
+
+/* SDDMA_GCR bit fields */
+#define DMA_GCR_DMA_EN 0x00000001
+#define DMA_GCR_SOFT_RESET 0x00000100
+
+/* SDDMA_IER bit fields */
+#define DMA_IER_INT_EN 0x00000001
+
+/* SDDMA_ISR bit fields */
+#define DMA_ISR_INT_STS 0x00000001
+
+/* SDDMA_RBR bit fields */
+#define DMA_RBR_FORMAT 0x40000000
+#define DMA_RBR_END 0x80000000
+
+/* SDDMA_CCR bit fields */
+#define DMA_CCR_RUN 0x00000080
+#define DMA_CCR_IF_TO_PERIPHERAL 0x00000000
+#define DMA_CCR_PERIPHERAL_TO_IF 0x00400000
+
+/* SDDMA_CCR event status */
+#define DMA_CCR_EVT_NO_STATUS 0x00000000
+#define DMA_CCR_EVT_UNDERRUN 0x00000001
+#define DMA_CCR_EVT_OVERRUN 0x00000002
+#define DMA_CCR_EVT_DESP_READ 0x00000003
+#define DMA_CCR_EVT_DATA_RW 0x00000004
+#define DMA_CCR_EVT_EARLY_END 0x00000005
+#define DMA_CCR_EVT_SUCCESS 0x0000000F
+
+#define PDMA_READ 0x00
+#define PDMA_WRITE 0x01
+
+#define WMT_SD_POWER_OFF 0
+#define WMT_SD_POWER_ON 1
+
+struct wmt_dma_descriptor {
+ u32 flags;
+ u32 data_buffer_addr;
+ u32 branch_addr;
+ u32 reserved1;
+};
+
+struct wmt_mci_caps {
+ unsigned int f_min;
+ unsigned int f_max;
+ u32 ocr_avail;
+ u32 caps;
+ u32 max_seg_size;
+ u32 max_segs;
+ u32 max_blk_size;
+};
+
+struct wmt_mci_priv {
+ struct mmc_host *mmc;
+ void __iomem *sdmmc_base;
+
+ int irq_regular;
+ int irq_dma;
+
+ void *dma_desc_buffer;
+ dma_addr_t dma_desc_device_addr;
+
+ struct completion cmdcomp;
+ struct completion datacomp;
+
+ struct completion *comp_cmd;
+ struct completion *comp_dma;
+
+ struct mmc_request *req;
+ struct mmc_command *cmd;
+
+ struct clk *clk_sdmmc;
+ struct device *dev;
+
+ u8 power_inverted;
+ u8 cd_inverted;
+};
+
+static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable)
+{
+ u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
+
+ if (enable ^ priv->power_inverted)
+ reg_tmp &= ~BM_SD_OFF;
+ else
+ reg_tmp |= BM_SD_OFF;
+
+ writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE);
+}
+
+static void wmt_mci_read_response(struct mmc_host *mmc)
+{
+ struct wmt_mci_priv *priv;
+ int idx1, idx2;
+ u8 tmp_resp;
+ u32 response;
+
+ priv = mmc_priv(mmc);
+
+ for (idx1 = 0; idx1 < 4; idx1++) {
+ response = 0;
+ for (idx2 = 0; idx2 < 4; idx2++) {
+ if ((idx1 == 3) && (idx2 == 3))
+ tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP);
+ else
+ tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP +
+ (idx1*4) + idx2 + 1);
+ response |= (tmp_resp << (idx2 * 8));
+ }
+ priv->cmd->resp[idx1] = cpu_to_be32(response);
+ }
+}
+
+static void wmt_mci_start_command(struct wmt_mci_priv *priv)
+{
+ u32 reg_tmp;
+
+ reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
+ writeb(reg_tmp | CTLR_CMD_START, priv->sdmmc_base + SDMMC_CTLR);
+}
+
+static int wmt_mci_send_command(struct mmc_host *mmc, u8 command, u8 cmdtype,
+ u32 arg, u8 rsptype)
+{
+ struct wmt_mci_priv *priv;
+ u32 reg_tmp;
+
+ priv = mmc_priv(mmc);
+
+ /* write command, arg, resptype registers */
+ writeb(command, priv->sdmmc_base + SDMMC_CMD);
+ writel(arg, priv->sdmmc_base + SDMMC_ARG);
+ writeb(rsptype, priv->sdmmc_base + SDMMC_RSPTYPE);
+
+ /* reset response FIFO */
+ reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
+ writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
+
+ /* ensure clock enabled - VT3465 */
+ wmt_set_sd_power(priv, WMT_SD_POWER_ON);
+
+ /* clear status bits */
+ writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
+ writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
+ writeb(0xFF, priv->sdmmc_base + SDMMC_STS2);
+ writeb(0xFF, priv->sdmmc_base + SDMMC_STS3);
+
+ /* set command type */
+ reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
+ writeb((reg_tmp & 0x0F) | (cmdtype << 4),
+ priv->sdmmc_base + SDMMC_CTLR);
+
+ return 0;
+}
+
+static void wmt_mci_disable_dma(struct wmt_mci_priv *priv)
+{
+ writel(DMA_ISR_INT_STS, priv->sdmmc_base + SDDMA_ISR);
+ writel(0, priv->sdmmc_base + SDDMA_IER);
+}
+
+static void wmt_complete_data_request(struct wmt_mci_priv *priv)
+{
+ struct mmc_request *req;
+ req = priv->req;
+
+ req->data->bytes_xfered = req->data->blksz * req->data->blocks;
+
+ /* unmap the DMA pages used for write data */
+ if (req->data->flags & MMC_DATA_WRITE)
+ dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
+ req->data->sg_len, DMA_TO_DEVICE);
+ else
+ dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
+ req->data->sg_len, DMA_FROM_DEVICE);
+
+ /* Check if the DMA ISR returned a data error */
+ if ((req->cmd->error) || (req->data->error))
+ mmc_request_done(priv->mmc, req);
+ else {
+ wmt_mci_read_response(priv->mmc);
+ if (!req->data->stop) {
+ /* single-block read/write requests end here */
+ mmc_request_done(priv->mmc, req);
+ } else {
+ /*
+ * we change the priv->cmd variable so the response is
+ * stored in the stop struct rather than the original
+ * calling command struct
+ */
+ priv->comp_cmd = &priv->cmdcomp;
+ init_completion(priv->comp_cmd);
+ priv->cmd = req->data->stop;
+ wmt_mci_send_command(priv->mmc, req->data->stop->opcode,
+ 7, req->data->stop->arg, 9);
+ wmt_mci_start_command(priv);
+ }
+ }
+}
+
+static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data)
+{
+ struct wmt_mci_priv *priv;
+
+ int status;
+
+ priv = (struct wmt_mci_priv *)data;
+
+ status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F;
+
+ if (status != DMA_CCR_EVT_SUCCESS) {
+ dev_err(priv->dev, "DMA Error: Status = %d\n", status);
+ priv->req->data->error = -ETIMEDOUT;
+ complete(priv->comp_dma);
+ return IRQ_HANDLED;
+ }
+
+ priv->req->data->error = 0;
+
+ wmt_mci_disable_dma(priv);
+
+ complete(priv->comp_dma);
+
+ if (priv->comp_cmd) {
+ if (completion_done(priv->comp_cmd)) {
+ /*
+ * if the command (regular) interrupt has already
+ * completed, finish off the request otherwise we wait
+ * for the command interrupt and finish from there.
+ */
+ wmt_complete_data_request(priv);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wmt_mci_regular_isr(int irq_num, void *data)
+{
+ struct wmt_mci_priv *priv;
+ u32 status0;
+ u32 status1;
+ u32 status2;
+ u32 reg_tmp;
+ int cmd_done;
+
+ priv = (struct wmt_mci_priv *)data;
+ cmd_done = 0;
+ status0 = readb(priv->sdmmc_base + SDMMC_STS0);
+ status1 = readb(priv->sdmmc_base + SDMMC_STS1);
+ status2 = readb(priv->sdmmc_base + SDMMC_STS2);
+
+ /* Check for card insertion */
+ reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
+ if ((reg_tmp & INT0_DI_INT_EN) && (status0 & STS0_DEVICE_INS)) {
+ mmc_detect_change(priv->mmc, 0);
+ if (priv->cmd)
+ priv->cmd->error = -ETIMEDOUT;
+ if (priv->comp_cmd)
+ complete(priv->comp_cmd);
+ if (priv->comp_dma) {
+ wmt_mci_disable_dma(priv);
+ complete(priv->comp_dma);
+ }
+ writeb(STS0_DEVICE_INS, priv->sdmmc_base + SDMMC_STS0);
+ return IRQ_HANDLED;
+ }
+
+ if ((!priv->req->data) ||
+ ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) {
+ /* handle non-data & stop_transmission requests */
+ if (status1 & STS1_CMDRSP_DONE) {
+ priv->cmd->error = 0;
+ cmd_done = 1;
+ } else if ((status1 & STS1_RSP_TIMEOUT) ||
+ (status1 & STS1_DATA_TIMEOUT)) {
+ priv->cmd->error = -ETIMEDOUT;
+ cmd_done = 1;
+ }
+
+ if (cmd_done) {
+ priv->comp_cmd = NULL;
+
+ if (!priv->cmd->error)
+ wmt_mci_read_response(priv->mmc);
+
+ priv->cmd = NULL;
+
+ mmc_request_done(priv->mmc, priv->req);
+ }
+ } else {
+ /* handle data requests */
+ if (status1 & STS1_CMDRSP_DONE) {
+ if (priv->cmd)
+ priv->cmd->error = 0;
+ if (priv->comp_cmd)
+ complete(priv->comp_cmd);
+ }
+
+ if ((status1 & STS1_RSP_TIMEOUT) ||
+ (status1 & STS1_DATA_TIMEOUT)) {
+ if (priv->cmd)
+ priv->cmd->error = -ETIMEDOUT;
+ if (priv->comp_cmd)
+ complete(priv->comp_cmd);
+ if (priv->comp_dma) {
+ wmt_mci_disable_dma(priv);
+ complete(priv->comp_dma);
+ }
+ }
+
+ if (priv->comp_dma) {
+ /*
+ * If the dma interrupt has already completed, finish
+ * off the request; otherwise we wait for the DMA
+ * interrupt and finish from there.
+ */
+ if (completion_done(priv->comp_dma))
+ wmt_complete_data_request(priv);
+ }
+ }
+
+ writeb(status0, priv->sdmmc_base + SDMMC_STS0);
+ writeb(status1, priv->sdmmc_base + SDMMC_STS1);
+ writeb(status2, priv->sdmmc_base + SDMMC_STS2);
+
+ return IRQ_HANDLED;
+}
+
+static void wmt_reset_hardware(struct mmc_host *mmc)
+{
+ struct wmt_mci_priv *priv;
+ u32 reg_tmp;
+
+ priv = mmc_priv(mmc);
+
+ /* reset controller */
+ reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
+ writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
+
+ /* reset response FIFO */
+ reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
+ writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
+
+ /* enable GPI pin to detect card */
+ writew(BLKL_INT_ENABLE | BLKL_GPI_CD, priv->sdmmc_base + SDMMC_BLKLEN);
+
+ /* clear interrupt status */
+ writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
+ writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
+
+ /* setup interrupts */
+ writeb(INT0_CD_INT_EN | INT0_DI_INT_EN, priv->sdmmc_base +
+ SDMMC_INTMASK0);
+ writeb(INT1_DATA_TOUT_INT_EN | INT1_CMD_RES_TRAN_DONE_INT_EN |
+ INT1_CMD_RES_TOUT_INT_EN, priv->sdmmc_base + SDMMC_INTMASK1);
+
+ /* set the DMA timeout */
+ writew(8191, priv->sdmmc_base + SDMMC_DMATIMEOUT);
+
+ /* auto clock freezing enable */
+ reg_tmp = readb(priv->sdmmc_base + SDMMC_STS2);
+ writeb(reg_tmp | STS2_DIS_FORCECLK, priv->sdmmc_base + SDMMC_STS2);
+
+ /* set a default clock speed of 400Khz */
+ clk_set_rate(priv->clk_sdmmc, 400000);
+}
+
+static int wmt_dma_init(struct mmc_host *mmc)
+{
+ struct wmt_mci_priv *priv;
+
+ priv = mmc_priv(mmc);
+
+ writel(DMA_GCR_SOFT_RESET, priv->sdmmc_base + SDDMA_GCR);
+ writel(DMA_GCR_DMA_EN, priv->sdmmc_base + SDDMA_GCR);
+ if ((readl(priv->sdmmc_base + SDDMA_GCR) & DMA_GCR_DMA_EN) != 0)
+ return 0;
+ else
+ return 1;
+}
+
+static void wmt_dma_init_descriptor(struct wmt_dma_descriptor *desc,
+ u16 req_count, u32 buffer_addr, u32 branch_addr, int end)
+{
+ desc->flags = 0x40000000 | req_count;
+ if (end)
+ desc->flags |= 0x80000000;
+ desc->data_buffer_addr = buffer_addr;
+ desc->branch_addr = branch_addr;
+}
+
+static void wmt_dma_config(struct mmc_host *mmc, u32 descaddr, u8 dir)
+{
+ struct wmt_mci_priv *priv;
+ u32 reg_tmp;
+
+ priv = mmc_priv(mmc);
+
+ /* Enable DMA Interrupts */
+ writel(DMA_IER_INT_EN, priv->sdmmc_base + SDDMA_IER);
+
+ /* Write DMA Descriptor Pointer Register */
+ writel(descaddr, priv->sdmmc_base + SDDMA_DESPR);
+
+ writel(0x00, priv->sdmmc_base + SDDMA_CCR);
+
+ if (dir == PDMA_WRITE) {
+ reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
+ writel(reg_tmp & DMA_CCR_IF_TO_PERIPHERAL, priv->sdmmc_base +
+ SDDMA_CCR);
+ } else {
+ reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
+ writel(reg_tmp | DMA_CCR_PERIPHERAL_TO_IF, priv->sdmmc_base +
+ SDDMA_CCR);
+ }
+}
+
+static void wmt_dma_start(struct wmt_mci_priv *priv)
+{
+ u32 reg_tmp;
+
+ reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
+ writel(reg_tmp | DMA_CCR_RUN, priv->sdmmc_base + SDDMA_CCR);
+}
+
+static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct wmt_mci_priv *priv;
+ struct wmt_dma_descriptor *desc;
+ u8 command;
+ u8 cmdtype;
+ u32 arg;
+ u8 rsptype;
+ u32 reg_tmp;
+
+ struct scatterlist *sg;
+ int i;
+ int sg_cnt;
+ int offset;
+ u32 dma_address;
+ int desc_cnt;
+
+ priv = mmc_priv(mmc);
+ priv->req = req;
+
+ /*
+ * Use the cmd variable to pass a pointer to the resp[] structure
+ * This is required on multi-block requests to pass the pointer to the
+ * stop command
+ */
+ priv->cmd = req->cmd;
+
+ command = req->cmd->opcode;
+ arg = req->cmd->arg;
+ rsptype = mmc_resp_type(req->cmd);
+ cmdtype = 0;
+
+ /* rsptype=7 only valid for SPI commands - should be =2 for SD */
+ if (rsptype == 7)
+ rsptype = 2;
+ /* rsptype=21 is R1B, convert for controller */
+ if (rsptype == 21)
+ rsptype = 9;
+
+ if (!req->data) {
+ wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
+ wmt_mci_start_command(priv);
+ /* completion is now handled in the regular_isr() */
+ }
+ if (req->data) {
+ priv->comp_cmd = &priv->cmdcomp;
+ init_completion(priv->comp_cmd);
+
+ wmt_dma_init(mmc);
+
+ /* set controller data length */
+ reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
+ writew((reg_tmp & 0xF800) | (req->data->blksz - 1),
+ priv->sdmmc_base + SDMMC_BLKLEN);
+
+ /* set controller block count */
+ writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT);
+
+ desc = (struct wmt_dma_descriptor *)priv->dma_desc_buffer;
+
+ if (req->data->flags & MMC_DATA_WRITE) {
+ sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
+ req->data->sg_len, DMA_TO_DEVICE);
+ cmdtype = 1;
+ if (req->data->blocks > 1)
+ cmdtype = 3;
+ } else {
+ sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
+ req->data->sg_len, DMA_FROM_DEVICE);
+ cmdtype = 2;
+ if (req->data->blocks > 1)
+ cmdtype = 4;
+ }
+
+ dma_address = priv->dma_desc_device_addr + 16;
+ desc_cnt = 0;
+
+ for_each_sg(req->data->sg, sg, sg_cnt, i) {
+ offset = 0;
+ while (offset < sg_dma_len(sg)) {
+ wmt_dma_init_descriptor(desc, req->data->blksz,
+ sg_dma_address(sg)+offset,
+ dma_address, 0);
+ desc++;
+ desc_cnt++;
+ offset += req->data->blksz;
+ dma_address += 16;
+ if (desc_cnt == req->data->blocks)
+ break;
+ }
+ }
+ desc--;
+ desc->flags |= 0x80000000;
+
+ if (req->data->flags & MMC_DATA_WRITE)
+ wmt_dma_config(mmc, priv->dma_desc_device_addr,
+ PDMA_WRITE);
+ else
+ wmt_dma_config(mmc, priv->dma_desc_device_addr,
+ PDMA_READ);
+
+ wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
+
+ priv->comp_dma = &priv->datacomp;
+ init_completion(priv->comp_dma);
+
+ wmt_dma_start(priv);
+ wmt_mci_start_command(priv);
+ }
+}
+
+static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct wmt_mci_priv *priv;
+ u32 busmode, extctrl;
+
+ priv = mmc_priv(mmc);
+
+ if (ios->power_mode == MMC_POWER_UP) {
+ wmt_reset_hardware(mmc);
+
+ wmt_set_sd_power(priv, WMT_SD_POWER_ON);
+ }
+ if (ios->power_mode == MMC_POWER_OFF)
+ wmt_set_sd_power(priv, WMT_SD_POWER_OFF);
+
+ if (ios->clock != 0)
+ clk_set_rate(priv->clk_sdmmc, ios->clock);
+
+ busmode = readb(priv->sdmmc_base + SDMMC_BUSMODE);
+ extctrl = readb(priv->sdmmc_base + SDMMC_EXTCTRL);
+
+ busmode &= ~(BM_EIGHTBIT_MODE | BM_FOURBIT_MODE);
+ extctrl &= ~EXT_EIGHTBIT;
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_8:
+ busmode |= BM_EIGHTBIT_MODE;
+ extctrl |= EXT_EIGHTBIT;
+ break;
+ case MMC_BUS_WIDTH_4:
+ busmode |= BM_FOURBIT_MODE;
+ break;
+ case MMC_BUS_WIDTH_1:
+ break;
+ }
+
+ writeb(busmode, priv->sdmmc_base + SDMMC_BUSMODE);
+ writeb(extctrl, priv->sdmmc_base + SDMMC_EXTCTRL);
+}
+
+static int wmt_mci_get_ro(struct mmc_host *mmc)
+{
+ struct wmt_mci_priv *priv = mmc_priv(mmc);
+
+ return !(readb(priv->sdmmc_base + SDMMC_STS0) & STS0_WRITE_PROTECT);
+}
+
+static int wmt_mci_get_cd(struct mmc_host *mmc)
+{
+ struct wmt_mci_priv *priv = mmc_priv(mmc);
+ u32 cd = (readb(priv->sdmmc_base + SDMMC_STS0) & STS0_CD_GPI) >> 3;
+
+ return !(cd ^ priv->cd_inverted);
+}
+
+static struct mmc_host_ops wmt_mci_ops = {
+ .request = wmt_mci_request,
+ .set_ios = wmt_mci_set_ios,
+ .get_ro = wmt_mci_get_ro,
+ .get_cd = wmt_mci_get_cd,
+};
+
+/* Controller capabilities */
+static struct wmt_mci_caps wm8505_caps = {
+ .f_min = 390425,
+ .f_max = 50000000,
+ .ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_SD_HIGHSPEED,
+ .max_seg_size = 65024,
+ .max_segs = 128,
+ .max_blk_size = 2048,
+};
+
+static const struct of_device_id wmt_mci_dt_ids[] = {
+ { .compatible = "wm,wm8505-sdhc", .data = &wm8505_caps },
+ { /* Sentinel */ },
+};
+
+static int wmt_mci_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct wmt_mci_priv *priv;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(wmt_mci_dt_ids, &pdev->dev);
+ const struct wmt_mci_caps *wmt_caps;
+ int ret;
+ int regular_irq, dma_irq;
+
+ if (!of_id || !of_id->data) {
+ dev_err(&pdev->dev, "Controller capabilities data missing\n");
+ return -EFAULT;
+ }
+
+ wmt_caps = of_id->data;
+
+ if (!np) {
+ dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n");
+ return -EFAULT;
+ }
+
+ regular_irq = irq_of_parse_and_map(np, 0);
+ dma_irq = irq_of_parse_and_map(np, 1);
+
+ if (!regular_irq || !dma_irq) {
+ dev_err(&pdev->dev, "Getting IRQs failed!\n");
+ ret = -ENXIO;
+ goto fail1;
+ }
+
+ mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev);
+ if (!mmc) {
+ dev_err(&pdev->dev, "Failed to allocate mmc_host\n");
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ mmc->ops = &wmt_mci_ops;
+ mmc->f_min = wmt_caps->f_min;
+ mmc->f_max = wmt_caps->f_max;
+ mmc->ocr_avail = wmt_caps->ocr_avail;
+ mmc->caps = wmt_caps->caps;
+
+ mmc->max_seg_size = wmt_caps->max_seg_size;
+ mmc->max_segs = wmt_caps->max_segs;
+ mmc->max_blk_size = wmt_caps->max_blk_size;
+
+ mmc->max_req_size = (16*512*mmc->max_segs);
+ mmc->max_blk_count = mmc->max_req_size / 512;
+
+ priv = mmc_priv(mmc);
+ priv->mmc = mmc;
+ priv->dev = &pdev->dev;
+
+ priv->power_inverted = 0;
+ priv->cd_inverted = 0;
+
+ if (of_get_property(np, "sdon-inverted", NULL))
+ priv->power_inverted = 1;
+ if (of_get_property(np, "cd-inverted", NULL))
+ priv->cd_inverted = 1;
+
+ priv->sdmmc_base = of_iomap(np, 0);
+ if (!priv->sdmmc_base) {
+ dev_err(&pdev->dev, "Failed to map IO space\n");
+ ret = -ENOMEM;
+ goto fail2;
+ }
+
+ priv->irq_regular = regular_irq;
+ priv->irq_dma = dma_irq;
+
+ ret = request_irq(regular_irq, wmt_mci_regular_isr, 0, "sdmmc", priv);
+ if (ret) {
+ dev_err(&pdev->dev, "Register regular IRQ fail\n");
+ goto fail3;
+ }
+
+ ret = request_irq(dma_irq, wmt_mci_dma_isr, 0, "sdmmc", priv);
+ if (ret) {
+ dev_err(&pdev->dev, "Register DMA IRQ fail\n");
+ goto fail4;
+ }
+
+ /* alloc some DMA buffers for descriptors/transfers */
+ priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev,
+ mmc->max_blk_count * 16,
+ &priv->dma_desc_device_addr,
+ GFP_KERNEL);
+ if (!priv->dma_desc_buffer) {
+ dev_err(&pdev->dev, "DMA alloc fail\n");
+ ret = -EPERM;
+ goto fail5;
+ }
+
+ platform_set_drvdata(pdev, mmc);
+
+ priv->clk_sdmmc = of_clk_get(np, 0);
+ if (IS_ERR(priv->clk_sdmmc)) {
+ dev_err(&pdev->dev, "Error getting clock\n");
+ ret = PTR_ERR(priv->clk_sdmmc);
+ goto fail5;
+ }
+
+ clk_prepare_enable(priv->clk_sdmmc);
+
+ /* configure the controller to a known 'ready' state */
+ wmt_reset_hardware(mmc);
+
+ mmc_add_host(mmc);
+
+ dev_info(&pdev->dev, "WMT SDHC Controller initialized\n");
+
+ return 0;
+fail5:
+ free_irq(dma_irq, priv);
+fail4:
+ free_irq(regular_irq, priv);
+fail3:
+ iounmap(priv->sdmmc_base);
+fail2:
+ mmc_free_host(mmc);
+fail1:
+ return ret;
+}
+
+static int wmt_mci_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct wmt_mci_priv *priv;
+ struct resource *res;
+ u32 reg_tmp;
+
+ mmc = platform_get_drvdata(pdev);
+ priv = mmc_priv(mmc);
+
+ /* reset SD controller */
+ reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
+ writel(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
+ reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
+ writew(reg_tmp & ~(0xA000), priv->sdmmc_base + SDMMC_BLKLEN);
+ writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
+ writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
+
+ /* release the dma buffers */
+ dma_free_coherent(&pdev->dev, priv->mmc->max_blk_count * 16,
+ priv->dma_desc_buffer, priv->dma_desc_device_addr);
+
+ mmc_remove_host(mmc);
+
+ free_irq(priv->irq_regular, priv);
+ free_irq(priv->irq_dma, priv);
+
+ iounmap(priv->sdmmc_base);
+
+ clk_disable_unprepare(priv->clk_sdmmc);
+ clk_put(priv->clk_sdmmc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ mmc_free_host(mmc);
+
+ dev_info(&pdev->dev, "WMT MCI device removed\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int wmt_mci_suspend(struct device *dev)
+{
+ u32 reg_tmp;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct wmt_mci_priv *priv;
+
+ if (!mmc)
+ return 0;
+
+ priv = mmc_priv(mmc);
+ reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
+ writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
+ SDMMC_BUSMODE);
+
+ reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
+ writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN);
+
+ writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
+ writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
+
+ clk_disable(priv->clk_sdmmc);
+ return 0;
+}
+
+static int wmt_mci_resume(struct device *dev)
+{
+ u32 reg_tmp;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct wmt_mci_priv *priv;
+
+ if (mmc) {
+ priv = mmc_priv(mmc);
+ clk_enable(priv->clk_sdmmc);
+
+ reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
+ writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
+ SDMMC_BUSMODE);
+
+ reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
+ writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE),
+ priv->sdmmc_base + SDMMC_BLKLEN);
+
+ reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
+ writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base +
+ SDMMC_INTMASK0);
+
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops wmt_mci_pm = {
+ .suspend = wmt_mci_suspend,
+ .resume = wmt_mci_resume,
+};
+
+#define wmt_mci_pm_ops (&wmt_mci_pm)
+
+#else /* !CONFIG_PM */
+
+#define wmt_mci_pm_ops NULL
+
+#endif
+
+static struct platform_driver wmt_mci_driver = {
+ .probe = wmt_mci_probe,
+ .remove = wmt_mci_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = wmt_mci_pm_ops,
+ .of_match_table = wmt_mci_dt_ids,
+ },
+};
+
+module_platform_driver(wmt_mci_driver);
+
+MODULE_DESCRIPTION("Wondermedia MMC/SD Driver");
+MODULE_AUTHOR("Tony Prisk");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, wmt_mci_dt_ids);