summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/mmc
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/mmc
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/mmc')
-rw-r--r--kernel/drivers/mmc/card/block.c244
-rw-r--r--kernel/drivers/mmc/card/mmc_test.c113
-rw-r--r--kernel/drivers/mmc/card/queue.c12
-rw-r--r--kernel/drivers/mmc/card/queue.h1
-rw-r--r--kernel/drivers/mmc/core/Kconfig10
-rw-r--r--kernel/drivers/mmc/core/core.c415
-rw-r--r--kernel/drivers/mmc/core/core.h7
-rw-r--r--kernel/drivers/mmc/core/debugfs.c30
-rw-r--r--kernel/drivers/mmc/core/host.c323
-rw-r--r--kernel/drivers/mmc/core/host.h6
-rw-r--r--kernel/drivers/mmc/core/mmc.c233
-rw-r--r--kernel/drivers/mmc/core/mmc_ops.c53
-rw-r--r--kernel/drivers/mmc/core/mmc_ops.h4
-rw-r--r--kernel/drivers/mmc/core/pwrseq_emmc.c8
-rw-r--r--kernel/drivers/mmc/core/pwrseq_simple.c45
-rw-r--r--kernel/drivers/mmc/core/quirks.c18
-rw-r--r--kernel/drivers/mmc/core/sd.c136
-rw-r--r--kernel/drivers/mmc/core/sdio.c121
-rw-r--r--kernel/drivers/mmc/core/sdio_bus.c12
-rw-r--r--kernel/drivers/mmc/core/sdio_irq.c14
-rw-r--r--kernel/drivers/mmc/core/sdio_ops.h7
-rw-r--r--kernel/drivers/mmc/host/Kconfig40
-rw-r--r--kernel/drivers/mmc/host/Makefile4
-rw-r--r--kernel/drivers/mmc/host/android-goldfish.c4
-rw-r--r--kernel/drivers/mmc/host/atmel-mci.c1
-rw-r--r--kernel/drivers/mmc/host/davinci_mmc.c2
-rw-r--r--kernel/drivers/mmc/host/dw_mmc-exynos.c6
-rw-r--r--kernel/drivers/mmc/host/dw_mmc-k3.c105
-rw-r--r--kernel/drivers/mmc/host/dw_mmc-pltfm.c2
-rw-r--r--kernel/drivers/mmc/host/dw_mmc-rockchip.c167
-rw-r--r--kernel/drivers/mmc/host/dw_mmc.c635
-rw-r--r--kernel/drivers/mmc/host/dw_mmc.h18
-rw-r--r--kernel/drivers/mmc/host/mmc_spi.c2
-rw-r--r--kernel/drivers/mmc/host/mmci.c2
-rw-r--r--kernel/drivers/mmc/host/moxart-mmc.c1
-rw-r--r--kernel/drivers/mmc/host/mtk-sd.c1708
-rw-r--r--kernel/drivers/mmc/host/mxcmmc.c6
-rw-r--r--kernel/drivers/mmc/host/mxs-mmc.c2
-rw-r--r--kernel/drivers/mmc/host/omap.c10
-rw-r--r--kernel/drivers/mmc/host/omap_hsmmc.c399
-rw-r--r--kernel/drivers/mmc/host/pxamci.c266
-rw-r--r--kernel/drivers/mmc/host/rtsx_pci_sdmmc.c2
-rw-r--r--kernel/drivers/mmc/host/rtsx_usb_sdmmc.c2
-rw-r--r--kernel/drivers/mmc/host/s3cmci.c2
-rw-r--r--kernel/drivers/mmc/host/sdhci-acpi.c41
-rw-r--r--kernel/drivers/mmc/host/sdhci-bcm-kona.c2
-rw-r--r--kernel/drivers/mmc/host/sdhci-bcm2835.c12
-rw-r--r--kernel/drivers/mmc/host/sdhci-esdhc-imx.c340
-rw-r--r--kernel/drivers/mmc/host/sdhci-esdhc.h5
-rw-r--r--kernel/drivers/mmc/host/sdhci-msm.c7
-rw-r--r--kernel/drivers/mmc/host/sdhci-of-arasan.c11
-rw-r--r--kernel/drivers/mmc/host/sdhci-of-at91.c191
-rw-r--r--kernel/drivers/mmc/host/sdhci-of-esdhc.c472
-rw-r--r--kernel/drivers/mmc/host/sdhci-pci-core.c (renamed from kernel/drivers/mmc/host/sdhci-pci.c)202
-rw-r--r--kernel/drivers/mmc/host/sdhci-pci-data.c3
-rw-r--r--kernel/drivers/mmc/host/sdhci-pci-o2micro.c6
-rw-r--r--kernel/drivers/mmc/host/sdhci-pci-o2micro.h2
-rw-r--r--kernel/drivers/mmc/host/sdhci-pci.h11
-rw-r--r--kernel/drivers/mmc/host/sdhci-pltfm.c6
-rw-r--r--kernel/drivers/mmc/host/sdhci-pxav2.c4
-rw-r--r--kernel/drivers/mmc/host/sdhci-pxav3.c16
-rw-r--r--kernel/drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--kernel/drivers/mmc/host/sdhci-sirf.c51
-rw-r--r--kernel/drivers/mmc/host/sdhci-spear.c4
-rw-r--r--kernel/drivers/mmc/host/sdhci-st.c2
-rw-r--r--kernel/drivers/mmc/host/sdhci.c317
-rw-r--r--kernel/drivers/mmc/host/sdhci.h23
-rw-r--r--kernel/drivers/mmc/host/sdhci_f_sdh30.c9
-rw-r--r--kernel/drivers/mmc/host/sh_mmcif.c298
-rw-r--r--kernel/drivers/mmc/host/sunxi-mmc.c63
-rw-r--r--kernel/drivers/mmc/host/tmio_mmc.c10
-rw-r--r--kernel/drivers/mmc/host/tmio_mmc_pio.c11
-rw-r--r--kernel/drivers/mmc/host/usdhi6rol0.c16
-rw-r--r--kernel/drivers/mmc/host/vub300.c6
-rw-r--r--kernel/drivers/mmc/host/wbsd.c2
75 files changed, 5385 insertions, 1958 deletions
diff --git a/kernel/drivers/mmc/card/block.c b/kernel/drivers/mmc/card/block.c
index 31d2627d9..d84861684 100644
--- a/kernel/drivers/mmc/card/block.c
+++ b/kernel/drivers/mmc/card/block.c
@@ -47,10 +47,13 @@
#include "queue.h"
MODULE_ALIAS("mmc:block");
+
+#ifdef KERNEL
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX "mmcblk."
+#endif
#define INAND_CMD38_ARG_EXT_CSD 113
#define INAND_CMD38_ARG_ERASE 0x00
@@ -62,8 +65,7 @@ MODULE_ALIAS("mmc:block");
#define MMC_SANITIZE_REQ_TIMEOUT 240000
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
-#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
- (req->cmd_flags & REQ_META)) && \
+#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
(rq_data_dir(req) == WRITE))
#define PACKED_CMD_VER 0x01
#define PACKED_CMD_WR 0x02
@@ -384,6 +386,24 @@ out:
return ERR_PTR(err);
}
+static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
+ struct mmc_blk_ioc_data *idata)
+{
+ struct mmc_ioc_cmd *ic = &idata->ic;
+
+ if (copy_to_user(&(ic_ptr->response), ic->response,
+ sizeof(ic->response)))
+ return -EFAULT;
+
+ if (!idata->ic.write_flag) {
+ if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
+ idata->buf, idata->buf_bytes))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
u32 retries_max)
{
@@ -444,12 +464,9 @@ out:
return err;
}
-static int mmc_blk_ioctl_cmd(struct block_device *bdev,
- struct mmc_ioc_cmd __user *ic_ptr)
+static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ struct mmc_blk_ioc_data *idata)
{
- struct mmc_blk_ioc_data *idata;
- struct mmc_blk_data *md;
- struct mmc_card *card;
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct mmc_request mrq = {NULL};
@@ -458,33 +475,12 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
int is_rpmb = false;
u32 status = 0;
- /*
- * The caller must have CAP_SYS_RAWIO, and must be calling this on the
- * whole block device, not on a partition. This prevents overspray
- * between sibling partitions.
- */
- if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
- return -EPERM;
-
- idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
- if (IS_ERR(idata))
- return PTR_ERR(idata);
-
- md = mmc_blk_get(bdev->bd_disk);
- if (!md) {
- err = -EINVAL;
- goto cmd_err;
- }
+ if (!card || !md || !idata)
+ return -EINVAL;
if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
is_rpmb = true;
- card = md->queue.card;
- if (IS_ERR(card)) {
- err = PTR_ERR(card);
- goto cmd_done;
- }
-
cmd.opcode = idata->ic.opcode;
cmd.arg = idata->ic.arg;
cmd.flags = idata->ic.flags;
@@ -527,23 +523,21 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
mrq.cmd = &cmd;
- mmc_get_card(card);
-
err = mmc_blk_part_switch(card, md);
if (err)
- goto cmd_rel_host;
+ return err;
if (idata->ic.is_acmd) {
err = mmc_app_cmd(card->host, card);
if (err)
- goto cmd_rel_host;
+ return err;
}
if (is_rpmb) {
err = mmc_set_blockcount(card, data.blocks,
idata->ic.write_flag & (1 << 31));
if (err)
- goto cmd_rel_host;
+ return err;
}
if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
@@ -554,7 +548,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
pr_err("%s: ioctl_do_sanitize() failed. err = %d",
__func__, err);
- goto cmd_rel_host;
+ return err;
}
mmc_wait_for_req(card->host, &mrq);
@@ -562,14 +556,12 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
if (cmd.error) {
dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
__func__, cmd.error);
- err = cmd.error;
- goto cmd_rel_host;
+ return cmd.error;
}
if (data.error) {
dev_err(mmc_dev(card->host), "%s: data error %d\n",
__func__, data.error);
- err = data.error;
- goto cmd_rel_host;
+ return data.error;
}
/*
@@ -579,18 +571,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
if (idata->ic.postsleep_min_us)
usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
- err = -EFAULT;
- goto cmd_rel_host;
- }
-
- if (!idata->ic.write_flag) {
- if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
- idata->buf, idata->buf_bytes)) {
- err = -EFAULT;
- goto cmd_rel_host;
- }
- }
+ memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
if (is_rpmb) {
/*
@@ -604,24 +585,132 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
__func__, status, err);
}
-cmd_rel_host:
+ return err;
+}
+
+static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+ struct mmc_ioc_cmd __user *ic_ptr)
+{
+ struct mmc_blk_ioc_data *idata;
+ struct mmc_blk_data *md;
+ struct mmc_card *card;
+ int err = 0, ioc_err = 0;
+
+ idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
+ if (IS_ERR(idata))
+ return PTR_ERR(idata);
+
+ md = mmc_blk_get(bdev->bd_disk);
+ if (!md) {
+ err = -EINVAL;
+ goto cmd_err;
+ }
+
+ card = md->queue.card;
+ if (IS_ERR(card)) {
+ err = PTR_ERR(card);
+ goto cmd_done;
+ }
+
+ mmc_get_card(card);
+
+ ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
+
mmc_put_card(card);
+ err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
+
cmd_done:
mmc_blk_put(md);
cmd_err:
kfree(idata->buf);
kfree(idata);
- return err;
+ return ioc_err ? ioc_err : err;
+}
+
+static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
+ struct mmc_ioc_multi_cmd __user *user)
+{
+ struct mmc_blk_ioc_data **idata = NULL;
+ struct mmc_ioc_cmd __user *cmds = user->cmds;
+ struct mmc_card *card;
+ struct mmc_blk_data *md;
+ int i, err = 0, ioc_err = 0;
+ __u64 num_of_cmds;
+
+ if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
+ sizeof(num_of_cmds)))
+ return -EFAULT;
+
+ if (num_of_cmds > MMC_IOC_MAX_CMDS)
+ return -EINVAL;
+
+ idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
+ if (!idata)
+ return -ENOMEM;
+
+ for (i = 0; i < num_of_cmds; i++) {
+ idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
+ if (IS_ERR(idata[i])) {
+ err = PTR_ERR(idata[i]);
+ num_of_cmds = i;
+ goto cmd_err;
+ }
+ }
+
+ md = mmc_blk_get(bdev->bd_disk);
+ if (!md)
+ goto cmd_err;
+
+ card = md->queue.card;
+ if (IS_ERR(card)) {
+ err = PTR_ERR(card);
+ goto cmd_done;
+ }
+
+ mmc_get_card(card);
+
+ for (i = 0; i < num_of_cmds && !ioc_err; i++)
+ ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
+
+ mmc_put_card(card);
+
+ /* copy to user if data and response */
+ for (i = 0; i < num_of_cmds && !err; i++)
+ err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
+
+cmd_done:
+ mmc_blk_put(md);
+cmd_err:
+ for (i = 0; i < num_of_cmds; i++) {
+ kfree(idata[i]->buf);
+ kfree(idata[i]);
+ }
+ kfree(idata);
+ return ioc_err ? ioc_err : err;
}
static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- int ret = -EINVAL;
- if (cmd == MMC_IOC_CMD)
- ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
- return ret;
+ /*
+ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+ * whole block device, not on a partition. This prevents overspray
+ * between sibling partitions.
+ */
+ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+ return -EPERM;
+
+ switch (cmd) {
+ case MMC_IOC_CMD:
+ return mmc_blk_ioctl_cmd(bdev,
+ (struct mmc_ioc_cmd __user *)arg);
+ case MMC_IOC_MULTI_CMD:
+ return mmc_blk_ioctl_multi_cmd(bdev,
+ (struct mmc_ioc_multi_cmd __user *)arg);
+ default:
+ return -EINVAL;
+ }
}
#ifdef CONFIG_COMPAT
@@ -915,6 +1004,9 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
if (!err)
break;
+ /* Re-tune if needed */
+ mmc_retune_recheck(card->host);
+
prev_cmd_status_valid = false;
pr_err("%s: error %d sending status command, %sing\n",
req->rq_disk->disk_name, err, retry ? "retry" : "abort");
@@ -1206,6 +1298,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
mmc_active);
struct mmc_blk_request *brq = &mq_mrq->brq;
struct request *req = mq_mrq->req;
+ int need_retune = card->host->need_retune;
int ecc_err = 0, gen_err = 0;
/*
@@ -1273,6 +1366,12 @@ static int mmc_blk_err_check(struct mmc_card *card,
}
if (brq->data.error) {
+ if (need_retune && !brq->retune_retry_done) {
+ pr_info("%s: retrying because a re-tune was needed\n",
+ req->rq_disk->disk_name);
+ brq->retune_retry_done = 1;
+ return MMC_BLK_RETRY;
+ }
pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
req->rq_disk->disk_name, brq->data.error,
(unsigned)blk_rq_pos(req),
@@ -1367,13 +1466,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
/*
* Reliable writes are used to implement Forced Unit Access and
- * REQ_META accesses, and are supported only on MMCs.
- *
- * XXX: this really needs a good explanation of why REQ_META
- * is treated special.
+ * are supported only on MMCs.
*/
- bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
- (req->cmd_flags & REQ_META)) &&
+ bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
(rq_data_dir(req) == WRITE) &&
(md->flags & MMC_BLK_REL_WR);
@@ -1832,7 +1927,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
- int ret = 1, disable_multi = 0, retry = 0, type;
+ int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
enum mmc_blk_status status;
struct mmc_queue_req *mq_rq;
struct request *req = rqc;
@@ -1918,6 +2013,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
goto start_new_req;
break;
case MMC_BLK_RETRY:
+ retune_retry_done = brq->retune_retry_done;
if (retry++ < 5)
break;
/* Fall through */
@@ -1980,6 +2076,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
mmc_start_req(card->host,
&mq_rq->mmc_active, NULL);
}
+ mq_rq->brq.retune_retry_done = retune_retry_done;
}
} while (ret);
@@ -2221,7 +2318,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
* The CSD capacity field is in units of read_blkbits.
* set_capacity takes units of 512 bytes.
*/
- size = card->csd.capacity << (card->csd.read_blkbits - 9);
+ size = (typeof(sector_t))card->csd.capacity
+ << (card->csd.read_blkbits - 9);
}
return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
@@ -2373,6 +2471,7 @@ force_ro_fail:
#define CID_MANFID_TOSHIBA 0x11
#define CID_MANFID_MICRON 0x13
#define CID_MANFID_SAMSUNG 0x15
+#define CID_MANFID_KINGSTON 0x70
static const struct mmc_fixup blk_fixups[] =
{
@@ -2395,6 +2494,10 @@ static const struct mmc_fixup blk_fixups[] =
*
* N.B. This doesn't affect SD cards.
*/
+ MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
@@ -2431,6 +2534,15 @@ static const struct mmc_fixup blk_fixups[] =
MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ /*
+ * On Some Kingston eMMCs, performing trim can result in
+ * unrecoverable data conrruption occasionally due to a firmware bug.
+ */
+ MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+ MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+
END_FIXUP
};
diff --git a/kernel/drivers/mmc/card/mmc_test.c b/kernel/drivers/mmc/card/mmc_test.c
index 53b741398..7fc9174d4 100644
--- a/kernel/drivers/mmc/card/mmc_test.c
+++ b/kernel/drivers/mmc/card/mmc_test.c
@@ -268,8 +268,6 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
static int mmc_test_buffer_transfer(struct mmc_test_card *test,
u8 *buffer, unsigned addr, unsigned blksz, int write)
{
- int ret;
-
struct mmc_request mrq = {0};
struct mmc_command cmd = {0};
struct mmc_command stop = {0};
@@ -292,11 +290,7 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test,
if (data.error)
return data.error;
- ret = mmc_test_wait_busy(test);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_test_wait_busy(test);
}
static void mmc_test_free_mem(struct mmc_test_mem *mem)
@@ -826,9 +820,7 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
mmc_test_nonblock_reset(&mrq1, &cmd1,
&stop1, &data1);
}
- done_areq = cur_areq;
- cur_areq = other_areq;
- other_areq = done_areq;
+ swap(cur_areq, other_areq);
dev_addr += blocks;
}
@@ -994,11 +986,7 @@ static int mmc_test_basic_write(struct mmc_test_card *test)
sg_init_one(&sg, test->buffer, 512);
- ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
}
static int mmc_test_basic_read(struct mmc_test_card *test)
@@ -1012,44 +1000,29 @@ static int mmc_test_basic_read(struct mmc_test_card *test)
sg_init_one(&sg, test->buffer, 512);
- ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
}
static int mmc_test_verify_write(struct mmc_test_card *test)
{
- int ret;
struct scatterlist sg;
sg_init_one(&sg, test->buffer, 512);
- ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
}
static int mmc_test_verify_read(struct mmc_test_card *test)
{
- int ret;
struct scatterlist sg;
sg_init_one(&sg, test->buffer, 512);
- ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
}
static int mmc_test_multi_write(struct mmc_test_card *test)
{
- int ret;
unsigned int size;
struct scatterlist sg;
@@ -1066,16 +1039,11 @@ static int mmc_test_multi_write(struct mmc_test_card *test)
sg_init_one(&sg, test->buffer, size);
- ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
}
static int mmc_test_multi_read(struct mmc_test_card *test)
{
- int ret;
unsigned int size;
struct scatterlist sg;
@@ -1092,11 +1060,7 @@ static int mmc_test_multi_read(struct mmc_test_card *test)
sg_init_one(&sg, test->buffer, size);
- ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
}
static int mmc_test_pow2_write(struct mmc_test_card *test)
@@ -1263,11 +1227,7 @@ static int mmc_test_xfersize_write(struct mmc_test_card *test)
if (ret)
return ret;
- ret = mmc_test_broken_transfer(test, 1, 512, 1);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_test_broken_transfer(test, 1, 512, 1);
}
static int mmc_test_xfersize_read(struct mmc_test_card *test)
@@ -1278,11 +1238,7 @@ static int mmc_test_xfersize_read(struct mmc_test_card *test)
if (ret)
return ret;
- ret = mmc_test_broken_transfer(test, 1, 512, 0);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_test_broken_transfer(test, 1, 512, 0);
}
static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
@@ -1296,11 +1252,7 @@ static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
if (ret)
return ret;
- ret = mmc_test_broken_transfer(test, 2, 512, 1);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_test_broken_transfer(test, 2, 512, 1);
}
static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
@@ -1314,48 +1266,33 @@ static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
if (ret)
return ret;
- ret = mmc_test_broken_transfer(test, 2, 512, 0);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_test_broken_transfer(test, 2, 512, 0);
}
#ifdef CONFIG_HIGHMEM
static int mmc_test_write_high(struct mmc_test_card *test)
{
- int ret;
struct scatterlist sg;
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, 512, 0);
- ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
}
static int mmc_test_read_high(struct mmc_test_card *test)
{
- int ret;
struct scatterlist sg;
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, 512, 0);
- ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
}
static int mmc_test_multi_write_high(struct mmc_test_card *test)
{
- int ret;
unsigned int size;
struct scatterlist sg;
@@ -1373,16 +1310,11 @@ static int mmc_test_multi_write_high(struct mmc_test_card *test)
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, size, 0);
- ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
}
static int mmc_test_multi_read_high(struct mmc_test_card *test)
{
- int ret;
unsigned int size;
struct scatterlist sg;
@@ -1400,11 +1332,7 @@ static int mmc_test_multi_read_high(struct mmc_test_card *test)
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, size, 0);
- ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
- if (ret)
- return ret;
-
- return 0;
+ return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
}
#else
@@ -2335,15 +2263,12 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
/*
* eMMC hardware reset.
*/
-static int mmc_test_hw_reset(struct mmc_test_card *test)
+static int mmc_test_reset(struct mmc_test_card *test)
{
struct mmc_card *card = test->card;
struct mmc_host *host = card->host;
int err;
- if (!mmc_card_mmc(card) || !mmc_can_reset(card))
- return RESULT_UNSUP_CARD;
-
err = mmc_hw_reset(host);
if (!err)
return RESULT_OK;
@@ -2677,8 +2602,8 @@ static const struct mmc_test_case mmc_test_cases[] = {
},
{
- .name = "eMMC hardware reset",
- .run = mmc_test_hw_reset,
+ .name = "Reset test",
+ .run = mmc_test_reset,
},
};
diff --git a/kernel/drivers/mmc/card/queue.c b/kernel/drivers/mmc/card/queue.c
index 8efa3684a..6f4323c6d 100644
--- a/kernel/drivers/mmc/card/queue.c
+++ b/kernel/drivers/mmc/card/queue.c
@@ -56,7 +56,6 @@ static int mmc_queue_thread(void *d)
down(&mq->thread_sem);
do {
struct request *req = NULL;
- struct mmc_queue_req *tmp;
unsigned int cmd_flags = 0;
spin_lock_irq(q->queue_lock);
@@ -69,6 +68,7 @@ static int mmc_queue_thread(void *d)
set_current_state(TASK_RUNNING);
cmd_flags = req ? req->cmd_flags : 0;
mq->issue_fn(mq, req);
+ cond_resched();
if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
continue; /* fetch again */
@@ -86,9 +86,7 @@ static int mmc_queue_thread(void *d)
mq->mqrq_prev->brq.mrq.data = NULL;
mq->mqrq_prev->req = NULL;
- tmp = mq->mqrq_prev;
- mq->mqrq_prev = mq->mqrq_cur;
- mq->mqrq_cur = tmp;
+ swap(mq->mqrq_prev, mq->mqrq_cur);
} else {
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
@@ -167,7 +165,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
return;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
- q->limits.max_discard_sectors = max_discard;
+ blk_queue_max_discard_sectors(q, max_discard);
if (card->erased_byte == 0 && !mmc_can_discard(card))
q->limits.discard_zeroes_data = 1;
q->limits.discard_granularity = card->pref_erase << 9;
@@ -469,7 +467,7 @@ static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
sg_set_buf(__sg, buf + offset, len);
offset += len;
remain -= len;
- (__sg++)->page_link &= ~0x02;
+ sg_unmark_end(__sg++);
sg_len++;
} while (remain);
}
@@ -477,7 +475,7 @@ static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
list_for_each_entry(req, &packed->list, queuelist) {
sg_len += blk_rq_map_sg(mq->queue, req, __sg);
__sg = sg + (sg_len - 1);
- (__sg++)->page_link &= ~0x02;
+ sg_unmark_end(__sg++);
}
sg_mark_end(sg + (sg_len - 1));
return sg_len;
diff --git a/kernel/drivers/mmc/card/queue.h b/kernel/drivers/mmc/card/queue.h
index 99e6521e6..36cddab57 100644
--- a/kernel/drivers/mmc/card/queue.h
+++ b/kernel/drivers/mmc/card/queue.h
@@ -12,6 +12,7 @@ struct mmc_blk_request {
struct mmc_command cmd;
struct mmc_command stop;
struct mmc_data data;
+ int retune_retry_done;
};
enum mmc_packed_type {
diff --git a/kernel/drivers/mmc/core/Kconfig b/kernel/drivers/mmc/core/Kconfig
index 9ebee72d9..4c33d7690 100644
--- a/kernel/drivers/mmc/core/Kconfig
+++ b/kernel/drivers/mmc/core/Kconfig
@@ -1,13 +1,3 @@
#
# MMC core configuration
#
-
-config MMC_CLKGATE
- bool "MMC host clock gating"
- help
- This will attempt to aggressively gate the clock to the MMC card.
- This is done to save power due to gating off the logic and bus
- noise when the MMC card is not in use. Your host driver has to
- support handling this in order for it to be of any use.
-
- If unsure, say N.
diff --git a/kernel/drivers/mmc/core/core.c b/kernel/drivers/mmc/core/core.c
index 588fb7908..5ae89e48f 100644
--- a/kernel/drivers/mmc/core/core.c
+++ b/kernel/drivers/mmc/core/core.c
@@ -133,6 +133,14 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
struct mmc_command *cmd = mrq->cmd;
int err = cmd->error;
+ /* Flag re-tuning needed on CRC errors */
+ if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
+ cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
+ (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
+ (mrq->data && mrq->data->error == -EILSEQ) ||
+ (mrq->stop && mrq->stop->error == -EILSEQ)))
+ mmc_retune_needed(host);
+
if (err && cmd->retries && mmc_host_is_spi(host)) {
if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
cmd->retries = 0;
@@ -179,19 +187,51 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
if (mrq->done)
mrq->done(mrq);
-
- mmc_host_clk_release(host);
}
}
EXPORT_SYMBOL(mmc_request_done);
+static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int err;
+
+ /* Assumes host controller has been runtime resumed by mmc_claim_host */
+ err = mmc_retune(host);
+ if (err) {
+ mrq->cmd->error = err;
+ mmc_request_done(host, mrq);
+ return;
+ }
+
+ /*
+ * For sdio rw commands we must wait for card busy otherwise some
+ * sdio devices won't work properly.
+ */
+ if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) {
+ int tries = 500; /* Wait aprox 500ms at maximum */
+
+ while (host->ops->card_busy(host) && --tries)
+ mmc_delay(1);
+
+ if (tries == 0) {
+ mrq->cmd->error = -EBUSY;
+ mmc_request_done(host, mrq);
+ return;
+ }
+ }
+
+ host->ops->request(host, mrq);
+}
+
static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
#ifdef CONFIG_MMC_DEBUG
unsigned int i, sz;
struct scatterlist *sg;
#endif
+ mmc_retune_hold(host);
+
if (mmc_card_removed(host->card))
return -ENOMEDIUM;
@@ -250,9 +290,8 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
mrq->stop->mrq = mrq;
}
}
- mmc_host_clk_hold(host);
led_trigger_event(host->led, LED_FULL);
- host->ops->request(host, mrq);
+ __mmc_start_request(host, mrq);
return 0;
}
@@ -301,12 +340,15 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
use_busy_signal = false;
}
+ mmc_retune_hold(card->host);
+
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BKOPS_START, 1, timeout,
use_busy_signal, true, false);
if (err) {
pr_warn("%s: Error %d starting bkops\n",
mmc_hostname(card->host), err);
+ mmc_retune_release(card->host);
goto out;
}
@@ -317,6 +359,8 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
*/
if (!use_busy_signal)
mmc_card_set_doing_bkops(card);
+ else
+ mmc_retune_release(card->host);
out:
mmc_release_host(card->host);
}
@@ -419,22 +463,22 @@ static int mmc_wait_for_data_req_done(struct mmc_host *host,
host->areq);
break; /* return err */
} else {
+ mmc_retune_recheck(host);
pr_info("%s: req failed (CMD%u): %d, retrying...\n",
mmc_hostname(host),
cmd->opcode, cmd->error);
cmd->retries--;
cmd->error = 0;
- host->ops->request(host, mrq);
+ __mmc_start_request(host, mrq);
continue; /* wait for done/new event again */
}
} else if (context_info->is_new_req) {
context_info->is_new_req = false;
- if (!next_req) {
- err = MMC_BLK_NEW_REQUEST;
- break; /* return err */
- }
+ if (!next_req)
+ return MMC_BLK_NEW_REQUEST;
}
}
+ mmc_retune_release(host);
return err;
}
@@ -469,12 +513,16 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
mmc_card_removed(host->card))
break;
+ mmc_retune_recheck(host);
+
pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
mmc_hostname(host), cmd->opcode, cmd->error);
cmd->retries--;
cmd->error = 0;
- host->ops->request(host, mrq);
+ __mmc_start_request(host, mrq);
}
+
+ mmc_retune_release(host);
}
/**
@@ -491,11 +539,8 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
bool is_first_req)
{
- if (host->ops->pre_req) {
- mmc_host_clk_hold(host);
+ if (host->ops->pre_req)
host->ops->pre_req(host, mrq, is_first_req);
- mmc_host_clk_release(host);
- }
}
/**
@@ -510,11 +555,8 @@ static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
int err)
{
- if (host->ops->post_req) {
- mmc_host_clk_hold(host);
+ if (host->ops->post_req)
host->ops->post_req(host, mrq, err);
- mmc_host_clk_release(host);
- }
}
/**
@@ -730,6 +772,7 @@ int mmc_stop_bkops(struct mmc_card *card)
*/
if (!err || (err == -EINVAL)) {
mmc_card_clr_doing_bkops(card);
+ mmc_retune_release(card->host);
err = 0;
}
@@ -798,9 +841,9 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
unsigned int timeout_us, limit_us;
timeout_us = data->timeout_ns / 1000;
- if (mmc_host_clk_rate(card->host))
+ if (card->host->ios.clock)
timeout_us += data->timeout_clks * 1000 /
- (mmc_host_clk_rate(card->host) / 1000);
+ (card->host->ios.clock / 1000);
if (data->flags & MMC_DATA_WRITE)
/*
@@ -998,8 +1041,6 @@ static inline void mmc_set_ios(struct mmc_host *host)
ios->power_mode, ios->chip_select, ios->vdd,
ios->bus_width, ios->timing);
- if (ios->clock > 0)
- mmc_set_ungated(host);
host->ops->set_ios(host, ios);
}
@@ -1008,17 +1049,15 @@ static inline void mmc_set_ios(struct mmc_host *host)
*/
void mmc_set_chip_select(struct mmc_host *host, int mode)
{
- mmc_host_clk_hold(host);
host->ios.chip_select = mode;
mmc_set_ios(host);
- mmc_host_clk_release(host);
}
/*
* Sets the host clock to the highest possible frequency that
* is below "hz".
*/
-static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
+void mmc_set_clock(struct mmc_host *host, unsigned int hz)
{
WARN_ON(hz && hz < host->f_min);
@@ -1029,68 +1068,6 @@ static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
mmc_set_ios(host);
}
-void mmc_set_clock(struct mmc_host *host, unsigned int hz)
-{
- mmc_host_clk_hold(host);
- __mmc_set_clock(host, hz);
- mmc_host_clk_release(host);
-}
-
-#ifdef CONFIG_MMC_CLKGATE
-/*
- * This gates the clock by setting it to 0 Hz.
- */
-void mmc_gate_clock(struct mmc_host *host)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&host->clk_lock, flags);
- host->clk_old = host->ios.clock;
- host->ios.clock = 0;
- host->clk_gated = true;
- spin_unlock_irqrestore(&host->clk_lock, flags);
- mmc_set_ios(host);
-}
-
-/*
- * This restores the clock from gating by using the cached
- * clock value.
- */
-void mmc_ungate_clock(struct mmc_host *host)
-{
- /*
- * We should previously have gated the clock, so the clock shall
- * be 0 here! The clock may however be 0 during initialization,
- * when some request operations are performed before setting
- * the frequency. When ungate is requested in that situation
- * we just ignore the call.
- */
- if (host->clk_old) {
- BUG_ON(host->ios.clock);
- /* This call will also set host->clk_gated to false */
- __mmc_set_clock(host, host->clk_old);
- }
-}
-
-void mmc_set_ungated(struct mmc_host *host)
-{
- unsigned long flags;
-
- /*
- * We've been given a new frequency while the clock is gated,
- * so make sure we regard this as ungating it.
- */
- spin_lock_irqsave(&host->clk_lock, flags);
- host->clk_gated = false;
- spin_unlock_irqrestore(&host->clk_lock, flags);
-}
-
-#else
-void mmc_set_ungated(struct mmc_host *host)
-{
-}
-#endif
-
int mmc_execute_tuning(struct mmc_card *card)
{
struct mmc_host *host = card->host;
@@ -1105,12 +1082,12 @@ int mmc_execute_tuning(struct mmc_card *card)
else
opcode = MMC_SEND_TUNING_BLOCK;
- mmc_host_clk_hold(host);
err = host->ops->execute_tuning(host, opcode);
- mmc_host_clk_release(host);
if (err)
pr_err("%s: tuning execution failed\n", mmc_hostname(host));
+ else
+ mmc_retune_enable(host);
return err;
}
@@ -1120,10 +1097,8 @@ int mmc_execute_tuning(struct mmc_card *card)
*/
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
{
- mmc_host_clk_hold(host);
host->ios.bus_mode = mode;
mmc_set_ios(host);
- mmc_host_clk_release(host);
}
/*
@@ -1131,10 +1106,8 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
*/
void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
{
- mmc_host_clk_hold(host);
host->ios.bus_width = width;
mmc_set_ios(host);
- mmc_host_clk_release(host);
}
/*
@@ -1142,6 +1115,8 @@ void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
*/
void mmc_set_initial_state(struct mmc_host *host)
{
+ mmc_retune_disable(host);
+
if (mmc_host_is_spi(host))
host->ios.chip_select = MMC_CS_HIGH;
else
@@ -1149,6 +1124,7 @@ void mmc_set_initial_state(struct mmc_host *host)
host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
+ host->ios.drv_type = 0;
mmc_set_ios(host);
}
@@ -1301,6 +1277,40 @@ struct device_node *mmc_of_find_child_device(struct mmc_host *host,
#ifdef CONFIG_REGULATOR
/**
+ * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
+ * @vdd_bit: OCR bit number
+ * @min_uV: minimum voltage value (mV)
+ * @max_uV: maximum voltage value (mV)
+ *
+ * This function returns the voltage range according to the provided OCR
+ * bit number. If conversion is not possible a negative errno value returned.
+ */
+static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
+{
+ int tmp;
+
+ if (!vdd_bit)
+ return -EINVAL;
+
+ /*
+ * REVISIT mmc_vddrange_to_ocrmask() may have set some
+ * bits this regulator doesn't quite support ... don't
+ * be too picky, most cards and regulators are OK with
+ * a 0.1V range goof (it's a small error percentage).
+ */
+ tmp = vdd_bit - ilog2(MMC_VDD_165_195);
+ if (tmp == 0) {
+ *min_uV = 1650 * 1000;
+ *max_uV = 1950 * 1000;
+ } else {
+ *min_uV = 1900 * 1000 + tmp * 100 * 1000;
+ *max_uV = *min_uV + 100 * 1000;
+ }
+
+ return 0;
+}
+
+/**
* mmc_regulator_get_ocrmask - return mask of supported voltages
* @supply: regulator to use
*
@@ -1363,22 +1373,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
int min_uV, max_uV;
if (vdd_bit) {
- int tmp;
-
- /*
- * REVISIT mmc_vddrange_to_ocrmask() may have set some
- * bits this regulator doesn't quite support ... don't
- * be too picky, most cards and regulators are OK with
- * a 0.1V range goof (it's a small error percentage).
- */
- tmp = vdd_bit - ilog2(MMC_VDD_165_195);
- if (tmp == 0) {
- min_uV = 1650 * 1000;
- max_uV = 1950 * 1000;
- } else {
- min_uV = 1900 * 1000 + tmp * 100 * 1000;
- max_uV = min_uV + 100 * 1000;
- }
+ mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
result = regulator_set_voltage(supply, min_uV, max_uV);
if (result == 0 && !mmc->regulator_enabled) {
@@ -1399,6 +1394,84 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
}
EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
+static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
+ int min_uV, int target_uV,
+ int max_uV)
+{
+ /*
+ * Check if supported first to avoid errors since we may try several
+ * signal levels during power up and don't want to show errors.
+ */
+ if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
+ return -EINVAL;
+
+ return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
+ max_uV);
+}
+
+/**
+ * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
+ *
+ * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
+ * That will match the behavior of old boards where VQMMC and VMMC were supplied
+ * by the same supply. The Bus Operating conditions for 3.3V signaling in the
+ * SD card spec also define VQMMC in terms of VMMC.
+ * If this is not possible we'll try the full 2.7-3.6V of the spec.
+ *
+ * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
+ * requested voltage. This is definitely a good idea for UHS where there's a
+ * separate regulator on the card that's trying to make 1.8V and it's best if
+ * we match.
+ *
+ * This function is expected to be used by a controller's
+ * start_signal_voltage_switch() function.
+ */
+int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct device *dev = mmc_dev(mmc);
+ int ret, volt, min_uV, max_uV;
+
+ /* If no vqmmc supply then we can't change the voltage */
+ if (IS_ERR(mmc->supply.vqmmc))
+ return -EINVAL;
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_120:
+ return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ 1100000, 1200000, 1300000);
+ case MMC_SIGNAL_VOLTAGE_180:
+ return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ 1700000, 1800000, 1950000);
+ case MMC_SIGNAL_VOLTAGE_330:
+ ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
+ __func__, volt, max_uV);
+
+ min_uV = max(volt - 300000, 2700000);
+ max_uV = min(max_uV + 200000, 3600000);
+
+ /*
+ * Due to a limitation in the current implementation of
+ * regulator_set_voltage_triplet() which is taking the lowest
+ * voltage possible if below the target, search for a suitable
+ * voltage in two steps and try to stay close to vmmc
+ * with a 0.3V tolerance at first.
+ */
+ if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ min_uV, volt, max_uV))
+ return 0;
+
+ return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ 2700000, volt, 3600000);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
+
#endif /* CONFIG_REGULATOR */
int mmc_regulator_get_supply(struct mmc_host *mmc)
@@ -1475,11 +1548,8 @@ int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
int old_signal_voltage = host->ios.signal_voltage;
host->ios.signal_voltage = signal_voltage;
- if (host->ops->start_signal_voltage_switch) {
- mmc_host_clk_hold(host);
+ if (host->ops->start_signal_voltage_switch)
err = host->ops->start_signal_voltage_switch(host, &host->ios);
- mmc_host_clk_release(host);
- }
if (err)
host->ios.signal_voltage = old_signal_voltage;
@@ -1513,20 +1583,17 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
pr_warn("%s: cannot verify signal voltage switch\n",
mmc_hostname(host));
- mmc_host_clk_hold(host);
-
cmd.opcode = SD_SWITCH_VOLTAGE;
cmd.arg = 0;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
- goto err_command;
+ return err;
+
+ if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
+ return -EIO;
- if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
- err = -EIO;
- goto err_command;
- }
/*
* The card should drive cmd and dat[0:3] low immediately
* after the response of cmd11, but wait 1 ms to be sure
@@ -1553,8 +1620,8 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
goto power_cycle;
}
- /* Keep clock gated for at least 5 ms */
- mmc_delay(5);
+ /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
+ mmc_delay(10);
host->ios.clock = clock;
mmc_set_ios(host);
@@ -1575,9 +1642,6 @@ power_cycle:
mmc_power_cycle(host, ocr);
}
-err_command:
- mmc_host_clk_release(host);
-
return err;
}
@@ -1586,10 +1650,8 @@ err_command:
*/
void mmc_set_timing(struct mmc_host *host, unsigned int timing)
{
- mmc_host_clk_hold(host);
host->ios.timing = timing;
mmc_set_ios(host);
- mmc_host_clk_release(host);
}
/*
@@ -1597,10 +1659,41 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
*/
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
{
- mmc_host_clk_hold(host);
host->ios.drv_type = drv_type;
mmc_set_ios(host);
- mmc_host_clk_release(host);
+}
+
+int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
+ int card_drv_type, int *drv_type)
+{
+ struct mmc_host *host = card->host;
+ int host_drv_type = SD_DRIVER_TYPE_B;
+
+ *drv_type = 0;
+
+ if (!host->ops->select_drive_strength)
+ return 0;
+
+ /* Use SD definition of driver strength for hosts */
+ if (host->caps & MMC_CAP_DRIVER_TYPE_A)
+ host_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (host->caps & MMC_CAP_DRIVER_TYPE_C)
+ host_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (host->caps & MMC_CAP_DRIVER_TYPE_D)
+ host_drv_type |= SD_DRIVER_TYPE_D;
+
+ /*
+ * The drive strength that the hardware can support
+ * depends on the board design. Pass the appropriate
+ * information and let the hardware specific code
+ * return what is possible given the options
+ */
+ return host->ops->select_drive_strength(card, max_dtr,
+ host_drv_type,
+ card_drv_type,
+ drv_type);
}
/*
@@ -1619,8 +1712,6 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
if (host->ios.power_mode == MMC_POWER_ON)
return;
- mmc_host_clk_hold(host);
-
mmc_pwrseq_pre_power_on(host);
host->ios.vdd = fls(ocr) - 1;
@@ -1654,8 +1745,6 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
* time required to reach a stable voltage.
*/
mmc_delay(10);
-
- mmc_host_clk_release(host);
}
void mmc_power_off(struct mmc_host *host)
@@ -1663,8 +1752,6 @@ void mmc_power_off(struct mmc_host *host)
if (host->ios.power_mode == MMC_POWER_OFF)
return;
- mmc_host_clk_hold(host);
-
mmc_pwrseq_power_off(host);
host->ios.clock = 0;
@@ -1680,8 +1767,6 @@ void mmc_power_off(struct mmc_host *host)
* can be successfully turned on again.
*/
mmc_delay(1);
-
- mmc_host_clk_release(host);
}
void mmc_power_cycle(struct mmc_host *host, u32 ocr)
@@ -1897,7 +1982,7 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
*/
timeout_clks <<= 1;
timeout_us += (timeout_clks * 1000) /
- (mmc_host_clk_rate(card->host) / 1000);
+ (card->host->ios.clock / 1000);
erase_timeout = timeout_us / 1000;
@@ -1972,6 +2057,8 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
unsigned long timeout;
int err;
+ mmc_retune_hold(card->host);
+
/*
* qty is used to calculate the erase timeout which depends on how many
* erase groups (or allocation units in SD terminology) are affected.
@@ -2075,6 +2162,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
(R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
out:
+ mmc_retune_release(card->host);
return err;
}
@@ -2091,6 +2179,7 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
unsigned int arg)
{
unsigned int rem, to = from + nr;
+ int err;
if (!(card->host->caps & MMC_CAP_ERASE) ||
!(card->csd.cmdclass & CCC_ERASE))
@@ -2141,6 +2230,22 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
/* 'from' and 'to' are inclusive */
to -= 1;
+ /*
+ * Special case where only one erase-group fits in the timeout budget:
+ * If the region crosses an erase-group boundary on this particular
+ * case, we will be trimming more than one erase-group which, does not
+ * fit in the timeout budget of the controller, so we need to split it
+ * and call mmc_do_erase() twice if necessary. This special case is
+ * identified by the card->eg_boundary flag.
+ */
+ rem = card->erase_size - (from % card->erase_size);
+ if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
+ err = mmc_do_erase(card, from, from + rem - 1, arg);
+ from += rem;
+ if ((err) || (to <= from))
+ return err;
+ }
+
return mmc_do_erase(card, from, to, arg);
}
EXPORT_SYMBOL(mmc_erase);
@@ -2156,7 +2261,8 @@ EXPORT_SYMBOL(mmc_can_erase);
int mmc_can_trim(struct mmc_card *card)
{
- if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
+ if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
+ (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
return 1;
return 0;
}
@@ -2236,16 +2342,28 @@ static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
if (!qty)
return 0;
+ /*
+ * When specifying a sector range to trim, chances are we might cross
+ * an erase-group boundary even if the amount of sectors is less than
+ * one erase-group.
+ * If we can only fit one erase-group in the controller timeout budget,
+ * we have to care that erase-group boundaries are not crossed by a
+ * single trim operation. We flag that special case with "eg_boundary".
+ * In all other cases we can just decrement qty and pretend that we
+ * always touch (qty + 1) erase-groups as a simple optimization.
+ */
if (qty == 1)
- return 1;
+ card->eg_boundary = 1;
+ else
+ qty--;
/* Convert qty to sectors */
if (card->erase_shift)
- max_discard = --qty << card->erase_shift;
+ max_discard = qty << card->erase_shift;
else if (mmc_card_sd(card))
- max_discard = qty;
+ max_discard = qty + 1;
else
- max_discard = --qty * card->erase_size;
+ max_discard = qty * card->erase_size;
return max_discard;
}
@@ -2312,9 +2430,7 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
{
if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
return;
- mmc_host_clk_hold(host);
host->ops->hw_reset(host);
- mmc_host_clk_release(host);
}
int mmc_hw_reset(struct mmc_host *host)
@@ -2333,7 +2449,8 @@ int mmc_hw_reset(struct mmc_host *host)
ret = host->bus_ops->reset(host);
mmc_bus_put(host);
- pr_warn("%s: tried to reset card\n", mmc_hostname(host));
+ if (ret != -EOPNOTSUPP)
+ pr_warn("%s: tried to reset card\n", mmc_hostname(host));
return ret;
}
@@ -2521,10 +2638,14 @@ void mmc_start_host(struct mmc_host *host)
host->f_init = max(freqs[0], host->f_min);
host->rescan_disable = 0;
host->ios.power_mode = MMC_POWER_UNDEFINED;
+
+ mmc_claim_host(host);
if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
mmc_power_off(host);
else
mmc_power_up(host, host->ocr_avail);
+ mmc_release_host(host);
+
mmc_gpiod_request_cd_irq(host);
_mmc_detect_change(host, 0, false);
}
@@ -2562,7 +2683,9 @@ void mmc_stop_host(struct mmc_host *host)
BUG_ON(host->card);
+ mmc_claim_host(host);
mmc_power_off(host);
+ mmc_release_host(host);
}
int mmc_power_save_host(struct mmc_host *host)
diff --git a/kernel/drivers/mmc/core/core.h b/kernel/drivers/mmc/core/core.h
index cfba3c05a..09241e56d 100644
--- a/kernel/drivers/mmc/core/core.h
+++ b/kernel/drivers/mmc/core/core.h
@@ -40,9 +40,6 @@ void mmc_init_erase(struct mmc_card *card);
void mmc_set_chip_select(struct mmc_host *host, int mode);
void mmc_set_clock(struct mmc_host *host, unsigned int hz);
-void mmc_gate_clock(struct mmc_host *host);
-void mmc_ungate_clock(struct mmc_host *host);
-void mmc_set_ungated(struct mmc_host *host);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
@@ -50,6 +47,8 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr);
int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
void mmc_set_timing(struct mmc_host *host, unsigned int timing);
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
+int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
+ int card_drv_type, int *drv_type);
void mmc_power_up(struct mmc_host *host, u32 ocr);
void mmc_power_off(struct mmc_host *host);
void mmc_power_cycle(struct mmc_host *host, u32 ocr);
@@ -88,6 +87,8 @@ void mmc_remove_card_debugfs(struct mmc_card *card);
void mmc_init_context_info(struct mmc_host *host);
int mmc_execute_tuning(struct mmc_card *card);
+int mmc_hs200_to_hs400(struct mmc_card *card);
+int mmc_hs400_to_hs200(struct mmc_card *card);
#endif
diff --git a/kernel/drivers/mmc/core/debugfs.c b/kernel/drivers/mmc/core/debugfs.c
index e9142108a..154aced0b 100644
--- a/kernel/drivers/mmc/core/debugfs.c
+++ b/kernel/drivers/mmc/core/debugfs.c
@@ -126,6 +126,12 @@ static int mmc_ios_show(struct seq_file *s, void *data)
case MMC_TIMING_SD_HS:
str = "sd high-speed";
break;
+ case MMC_TIMING_UHS_SDR12:
+ str = "sd uhs SDR12";
+ break;
+ case MMC_TIMING_UHS_SDR25:
+ str = "sd uhs SDR25";
+ break;
case MMC_TIMING_UHS_SDR50:
str = "sd uhs SDR50";
break;
@@ -166,6 +172,25 @@ static int mmc_ios_show(struct seq_file *s, void *data)
}
seq_printf(s, "signal voltage:\t%u (%s)\n", ios->chip_select, str);
+ switch (ios->drv_type) {
+ case MMC_SET_DRIVER_TYPE_A:
+ str = "driver type A";
+ break;
+ case MMC_SET_DRIVER_TYPE_B:
+ str = "driver type B";
+ break;
+ case MMC_SET_DRIVER_TYPE_C:
+ str = "driver type C";
+ break;
+ case MMC_SET_DRIVER_TYPE_D:
+ str = "driver type D";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "driver type:\t%u (%s)\n", ios->drv_type, str);
+
return 0;
}
@@ -230,11 +255,6 @@ void mmc_add_host_debugfs(struct mmc_host *host)
&mmc_clock_fops))
goto err_node;
-#ifdef CONFIG_MMC_CLKGATE
- if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
- root, &host->clk_delay))
- goto err_node;
-#endif
#ifdef CONFIG_FAIL_MMC_REQUEST
if (fail_request)
setup_fault_attr(&fail_default_attr, fail_request);
diff --git a/kernel/drivers/mmc/core/host.c b/kernel/drivers/mmc/core/host.c
index 8be0df758..da950c442 100644
--- a/kernel/drivers/mmc/core/host.c
+++ b/kernel/drivers/mmc/core/host.c
@@ -61,245 +61,89 @@ void mmc_unregister_host_class(void)
class_unregister(&mmc_host_class);
}
-#ifdef CONFIG_MMC_CLKGATE
-static ssize_t clkgate_delay_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+void mmc_retune_enable(struct mmc_host *host)
{
- struct mmc_host *host = cls_dev_to_mmc_host(dev);
- return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
+ host->can_retune = 1;
+ if (host->retune_period)
+ mod_timer(&host->retune_timer,
+ jiffies + host->retune_period * HZ);
}
-static ssize_t clkgate_delay_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+void mmc_retune_disable(struct mmc_host *host)
{
- struct mmc_host *host = cls_dev_to_mmc_host(dev);
- unsigned long flags, value;
-
- if (kstrtoul(buf, 0, &value))
- return -EINVAL;
-
- spin_lock_irqsave(&host->clk_lock, flags);
- host->clkgate_delay = value;
- spin_unlock_irqrestore(&host->clk_lock, flags);
- return count;
+ host->can_retune = 0;
+ del_timer_sync(&host->retune_timer);
+ host->retune_now = 0;
+ host->need_retune = 0;
}
-/*
- * Enabling clock gating will make the core call out to the host
- * once up and once down when it performs a request or card operation
- * intermingled in any fashion. The driver will see this through
- * set_ios() operations with ios.clock field set to 0 to gate (disable)
- * the block clock, and to the old frequency to enable it again.
- */
-static void mmc_host_clk_gate_delayed(struct mmc_host *host)
+void mmc_retune_timer_stop(struct mmc_host *host)
{
- unsigned long tick_ns;
- unsigned long freq = host->ios.clock;
- unsigned long flags;
-
- if (!freq) {
- pr_debug("%s: frequency set to 0 in disable function, "
- "this means the clock is already disabled.\n",
- mmc_hostname(host));
- return;
- }
- /*
- * New requests may have appeared while we were scheduling,
- * then there is no reason to delay the check before
- * clk_disable().
- */
- spin_lock_irqsave(&host->clk_lock, flags);
-
- /*
- * Delay n bus cycles (at least 8 from MMC spec) before attempting
- * to disable the MCI block clock. The reference count may have
- * gone up again after this delay due to rescheduling!
- */
- if (!host->clk_requests) {
- spin_unlock_irqrestore(&host->clk_lock, flags);
- tick_ns = DIV_ROUND_UP(1000000000, freq);
- ndelay(host->clk_delay * tick_ns);
- } else {
- /* New users appeared while waiting for this work */
- spin_unlock_irqrestore(&host->clk_lock, flags);
- return;
- }
- mutex_lock(&host->clk_gate_mutex);
- spin_lock_irqsave(&host->clk_lock, flags);
- if (!host->clk_requests) {
- spin_unlock_irqrestore(&host->clk_lock, flags);
- /* This will set host->ios.clock to 0 */
- mmc_gate_clock(host);
- spin_lock_irqsave(&host->clk_lock, flags);
- pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
- }
- spin_unlock_irqrestore(&host->clk_lock, flags);
- mutex_unlock(&host->clk_gate_mutex);
+ del_timer_sync(&host->retune_timer);
}
+EXPORT_SYMBOL(mmc_retune_timer_stop);
-/*
- * Internal work. Work to disable the clock at some later point.
- */
-static void mmc_host_clk_gate_work(struct work_struct *work)
+void mmc_retune_hold(struct mmc_host *host)
{
- struct mmc_host *host = container_of(work, struct mmc_host,
- clk_gate_work.work);
-
- mmc_host_clk_gate_delayed(host);
+ if (!host->hold_retune)
+ host->retune_now = 1;
+ host->hold_retune += 1;
}
-/**
- * mmc_host_clk_hold - ungate hardware MCI clocks
- * @host: host to ungate.
- *
- * Makes sure the host ios.clock is restored to a non-zero value
- * past this call. Increase clock reference count and ungate clock
- * if we're the first user.
- */
-void mmc_host_clk_hold(struct mmc_host *host)
+void mmc_retune_release(struct mmc_host *host)
{
- unsigned long flags;
-
- /* cancel any clock gating work scheduled by mmc_host_clk_release() */
- cancel_delayed_work_sync(&host->clk_gate_work);
- mutex_lock(&host->clk_gate_mutex);
- spin_lock_irqsave(&host->clk_lock, flags);
- if (host->clk_gated) {
- spin_unlock_irqrestore(&host->clk_lock, flags);
- mmc_ungate_clock(host);
- spin_lock_irqsave(&host->clk_lock, flags);
- pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
- }
- host->clk_requests++;
- spin_unlock_irqrestore(&host->clk_lock, flags);
- mutex_unlock(&host->clk_gate_mutex);
+ if (host->hold_retune)
+ host->hold_retune -= 1;
+ else
+ WARN_ON(1);
}
-/**
- * mmc_host_may_gate_card - check if this card may be gated
- * @card: card to check.
- */
-static bool mmc_host_may_gate_card(struct mmc_card *card)
+int mmc_retune(struct mmc_host *host)
{
- /* If there is no card we may gate it */
- if (!card)
- return true;
- /*
- * Don't gate SDIO cards! These need to be clocked at all times
- * since they may be independent systems generating interrupts
- * and other events. The clock requests counter from the core will
- * go down to zero since the core does not need it, but we will not
- * gate the clock, because there is somebody out there that may still
- * be using it.
- */
- return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
-}
+ bool return_to_hs400 = false;
+ int err;
-/**
- * mmc_host_clk_release - gate off hardware MCI clocks
- * @host: host to gate.
- *
- * Calls the host driver with ios.clock set to zero as often as possible
- * in order to gate off hardware MCI clocks. Decrease clock reference
- * count and schedule disabling of clock.
- */
-void mmc_host_clk_release(struct mmc_host *host)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&host->clk_lock, flags);
- host->clk_requests--;
- if (mmc_host_may_gate_card(host->card) &&
- !host->clk_requests)
- schedule_delayed_work(&host->clk_gate_work,
- msecs_to_jiffies(host->clkgate_delay));
- spin_unlock_irqrestore(&host->clk_lock, flags);
-}
+ if (host->retune_now)
+ host->retune_now = 0;
+ else
+ return 0;
-/**
- * mmc_host_clk_rate - get current clock frequency setting
- * @host: host to get the clock frequency for.
- *
- * Returns current clock frequency regardless of gating.
- */
-unsigned int mmc_host_clk_rate(struct mmc_host *host)
-{
- unsigned long freq;
- unsigned long flags;
+ if (!host->need_retune || host->doing_retune || !host->card)
+ return 0;
- spin_lock_irqsave(&host->clk_lock, flags);
- if (host->clk_gated)
- freq = host->clk_old;
- else
- freq = host->ios.clock;
- spin_unlock_irqrestore(&host->clk_lock, flags);
- return freq;
-}
+ host->need_retune = 0;
-/**
- * mmc_host_clk_init - set up clock gating code
- * @host: host with potential clock to control
- */
-static inline void mmc_host_clk_init(struct mmc_host *host)
-{
- host->clk_requests = 0;
- /* Hold MCI clock for 8 cycles by default */
- host->clk_delay = 8;
- /*
- * Default clock gating delay is 0ms to avoid wasting power.
- * This value can be tuned by writing into sysfs entry.
- */
- host->clkgate_delay = 0;
- host->clk_gated = false;
- INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
- spin_lock_init(&host->clk_lock);
- mutex_init(&host->clk_gate_mutex);
-}
+ host->doing_retune = 1;
-/**
- * mmc_host_clk_exit - shut down clock gating code
- * @host: host with potential clock to control
- */
-static inline void mmc_host_clk_exit(struct mmc_host *host)
-{
- /*
- * Wait for any outstanding gate and then make sure we're
- * ungated before exiting.
- */
- if (cancel_delayed_work_sync(&host->clk_gate_work))
- mmc_host_clk_gate_delayed(host);
- if (host->clk_gated)
- mmc_host_clk_hold(host);
- /* There should be only one user now */
- WARN_ON(host->clk_requests > 1);
-}
+ if (host->ios.timing == MMC_TIMING_MMC_HS400) {
+ err = mmc_hs400_to_hs200(host->card);
+ if (err)
+ goto out;
-static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
-{
- host->clkgate_delay_attr.show = clkgate_delay_show;
- host->clkgate_delay_attr.store = clkgate_delay_store;
- sysfs_attr_init(&host->clkgate_delay_attr.attr);
- host->clkgate_delay_attr.attr.name = "clkgate_delay";
- host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
- if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
- pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
- mmc_hostname(host));
-}
-#else
+ return_to_hs400 = true;
-static inline void mmc_host_clk_init(struct mmc_host *host)
-{
-}
+ if (host->ops->prepare_hs400_tuning)
+ host->ops->prepare_hs400_tuning(host, &host->ios);
+ }
-static inline void mmc_host_clk_exit(struct mmc_host *host)
-{
+ err = mmc_execute_tuning(host->card);
+ if (err)
+ goto out;
+
+ if (return_to_hs400)
+ err = mmc_hs200_to_hs400(host->card);
+out:
+ host->doing_retune = 0;
+
+ return err;
}
-static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+static void mmc_retune_timer(unsigned long data)
{
-}
+ struct mmc_host *host = (struct mmc_host *)data;
-#endif
+ mmc_retune_needed(host);
+}
/**
* mmc_of_parse() - parse host's device-tree node
@@ -314,7 +158,7 @@ int mmc_of_parse(struct mmc_host *host)
{
struct device_node *np;
u32 bus_width;
- int len, ret;
+ int ret;
bool cd_cap_invert, cd_gpio_invert = false;
bool ro_cap_invert, ro_gpio_invert = false;
@@ -361,19 +205,19 @@ int mmc_of_parse(struct mmc_host *host)
*/
/* Parse Card Detection */
- if (of_find_property(np, "non-removable", &len)) {
+ if (of_property_read_bool(np, "non-removable")) {
host->caps |= MMC_CAP_NONREMOVABLE;
} else {
cd_cap_invert = of_property_read_bool(np, "cd-inverted");
- if (of_find_property(np, "broken-cd", &len))
+ if (of_property_read_bool(np, "broken-cd"))
host->caps |= MMC_CAP_NEEDS_POLL;
ret = mmc_gpiod_request_cd(host, "cd", 0, true,
0, &cd_gpio_invert);
if (!ret)
dev_info(host->parent, "Got CD GPIO\n");
- else if (ret != -ENOENT)
+ else if (ret != -ENOENT && ret != -ENOSYS)
return ret;
/*
@@ -397,48 +241,53 @@ int mmc_of_parse(struct mmc_host *host)
ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
if (!ret)
dev_info(host->parent, "Got WP GPIO\n");
- else if (ret != -ENOENT)
+ else if (ret != -ENOENT && ret != -ENOSYS)
return ret;
+ if (of_property_read_bool(np, "disable-wp"))
+ host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
+
/* See the comment on CD inversion above */
if (ro_cap_invert ^ ro_gpio_invert)
host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
- if (of_find_property(np, "cap-sd-highspeed", &len))
+ if (of_property_read_bool(np, "cap-sd-highspeed"))
host->caps |= MMC_CAP_SD_HIGHSPEED;
- if (of_find_property(np, "cap-mmc-highspeed", &len))
+ if (of_property_read_bool(np, "cap-mmc-highspeed"))
host->caps |= MMC_CAP_MMC_HIGHSPEED;
- if (of_find_property(np, "sd-uhs-sdr12", &len))
+ if (of_property_read_bool(np, "sd-uhs-sdr12"))
host->caps |= MMC_CAP_UHS_SDR12;
- if (of_find_property(np, "sd-uhs-sdr25", &len))
+ if (of_property_read_bool(np, "sd-uhs-sdr25"))
host->caps |= MMC_CAP_UHS_SDR25;
- if (of_find_property(np, "sd-uhs-sdr50", &len))
+ if (of_property_read_bool(np, "sd-uhs-sdr50"))
host->caps |= MMC_CAP_UHS_SDR50;
- if (of_find_property(np, "sd-uhs-sdr104", &len))
+ if (of_property_read_bool(np, "sd-uhs-sdr104"))
host->caps |= MMC_CAP_UHS_SDR104;
- if (of_find_property(np, "sd-uhs-ddr50", &len))
+ if (of_property_read_bool(np, "sd-uhs-ddr50"))
host->caps |= MMC_CAP_UHS_DDR50;
- if (of_find_property(np, "cap-power-off-card", &len))
+ if (of_property_read_bool(np, "cap-power-off-card"))
host->caps |= MMC_CAP_POWER_OFF_CARD;
- if (of_find_property(np, "cap-sdio-irq", &len))
+ if (of_property_read_bool(np, "cap-mmc-hw-reset"))
+ host->caps |= MMC_CAP_HW_RESET;
+ if (of_property_read_bool(np, "cap-sdio-irq"))
host->caps |= MMC_CAP_SDIO_IRQ;
- if (of_find_property(np, "full-pwr-cycle", &len))
+ if (of_property_read_bool(np, "full-pwr-cycle"))
host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
- if (of_find_property(np, "keep-power-in-suspend", &len))
+ if (of_property_read_bool(np, "keep-power-in-suspend"))
host->pm_caps |= MMC_PM_KEEP_POWER;
- if (of_find_property(np, "enable-sdio-wakeup", &len))
+ if (of_property_read_bool(np, "enable-sdio-wakeup"))
host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
- if (of_find_property(np, "mmc-ddr-1_8v", &len))
+ if (of_property_read_bool(np, "mmc-ddr-1_8v"))
host->caps |= MMC_CAP_1_8V_DDR;
- if (of_find_property(np, "mmc-ddr-1_2v", &len))
+ if (of_property_read_bool(np, "mmc-ddr-1_2v"))
host->caps |= MMC_CAP_1_2V_DDR;
- if (of_find_property(np, "mmc-hs200-1_8v", &len))
+ if (of_property_read_bool(np, "mmc-hs200-1_8v"))
host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
- if (of_find_property(np, "mmc-hs200-1_2v", &len))
+ if (of_property_read_bool(np, "mmc-hs200-1_2v"))
host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
- if (of_find_property(np, "mmc-hs400-1_8v", &len))
+ if (of_property_read_bool(np, "mmc-hs400-1_8v"))
host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
- if (of_find_property(np, "mmc-hs400-1_2v", &len))
+ if (of_property_read_bool(np, "mmc-hs400-1_2v"))
host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
@@ -496,14 +345,13 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
return NULL;
}
- mmc_host_clk_init(host);
-
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
#ifdef CONFIG_PM
host->pm_notify.notifier_call = mmc_pm_notify;
#endif
+ setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host);
/*
* By default, hosts do not support SGIO or large requests.
@@ -545,7 +393,6 @@ int mmc_add_host(struct mmc_host *host)
#ifdef CONFIG_DEBUG_FS
mmc_add_host_debugfs(host);
#endif
- mmc_host_clk_sysfs_init(host);
mmc_start_host(host);
register_pm_notifier(&host->pm_notify);
@@ -575,8 +422,6 @@ void mmc_remove_host(struct mmc_host *host)
device_del(&host->class_dev);
led_trigger_unregister_simple(host->led);
-
- mmc_host_clk_exit(host);
}
EXPORT_SYMBOL(mmc_remove_host);
diff --git a/kernel/drivers/mmc/core/host.h b/kernel/drivers/mmc/core/host.h
index f2ab9e578..992bf5397 100644
--- a/kernel/drivers/mmc/core/host.h
+++ b/kernel/drivers/mmc/core/host.h
@@ -15,5 +15,11 @@
int mmc_register_host_class(void);
void mmc_unregister_host_class(void);
+void mmc_retune_enable(struct mmc_host *host);
+void mmc_retune_disable(struct mmc_host *host);
+void mmc_retune_hold(struct mmc_host *host);
+void mmc_retune_release(struct mmc_host *host);
+int mmc_retune(struct mmc_host *host);
+
#endif
diff --git a/kernel/drivers/mmc/core/mmc.c b/kernel/drivers/mmc/core/mmc.c
index f36c76f8b..3d5087b03 100644
--- a/kernel/drivers/mmc/core/mmc.c
+++ b/kernel/drivers/mmc/core/mmc.c
@@ -21,6 +21,7 @@
#include <linux/mmc/mmc.h>
#include "core.h"
+#include "host.h"
#include "bus.h"
#include "mmc_ops.h"
#include "sd_ops.h"
@@ -266,8 +267,10 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
* calculate the enhanced data area offset, in bytes
*/
card->ext_csd.enhanced_area_offset =
- (ext_csd[139] << 24) + (ext_csd[138] << 16) +
- (ext_csd[137] << 8) + ext_csd[136];
+ (((unsigned long long)ext_csd[139]) << 24) +
+ (((unsigned long long)ext_csd[138]) << 16) +
+ (((unsigned long long)ext_csd[137]) << 8) +
+ (((unsigned long long)ext_csd[136]));
if (mmc_card_blockaddr(card))
card->ext_csd.enhanced_area_offset <<= 9;
/*
@@ -434,6 +437,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.raw_trim_mult =
ext_csd[EXT_CSD_TRIM_MULT];
card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
+ card->ext_csd.raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH];
if (card->ext_csd.rev >= 4) {
if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
EXT_CSD_PART_SETTING_COMPLETED)
@@ -1036,10 +1040,26 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
return err;
}
+/* Caller must hold re-tuning */
+static int mmc_switch_status(struct mmc_card *card)
+{
+ u32 status;
+ int err;
+
+ err = mmc_send_status(card, &status);
+ if (err)
+ return err;
+
+ return mmc_switch_status_error(card->host, status);
+}
+
static int mmc_select_hs400(struct mmc_card *card)
{
struct mmc_host *host = card->host;
+ bool send_status = true;
+ unsigned int max_dtr;
int err = 0;
+ u8 val;
/*
* HS400 mode requires 8-bit bus width
@@ -1048,23 +1068,35 @@ static int mmc_select_hs400(struct mmc_card *card)
host->ios.bus_width == MMC_BUS_WIDTH_8))
return 0;
- /*
- * Before switching to dual data rate operation for HS400,
- * it is required to convert from HS200 mode to HS mode.
- */
- mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
- mmc_set_bus_speed(card);
+ if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+ send_status = false;
+
+ /* Reduce frequency to HS frequency */
+ max_dtr = card->ext_csd.hs_max_dtr;
+ mmc_set_clock(host, max_dtr);
+ /* Switch card to HS mode */
+ val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
+ EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time,
- true, true, true);
+ true, send_status, true);
if (err) {
pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
mmc_hostname(host), err);
return err;
}
+ /* Set host controller to HS timing */
+ mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+
+ if (!send_status) {
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+ }
+
+ /* Switch card to DDR */
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
EXT_CSD_DDR_BUS_WIDTH_8,
@@ -1075,20 +1107,130 @@ static int mmc_select_hs400(struct mmc_card *card)
return err;
}
+ /* Switch card to HS400 */
+ val = EXT_CSD_TIMING_HS400 |
+ card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400,
+ EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time,
- true, true, true);
+ true, send_status, true);
if (err) {
pr_err("%s: switch to hs400 failed, err:%d\n",
mmc_hostname(host), err);
return err;
}
+ /* Set host controller to HS400 timing and frequency */
mmc_set_timing(host, MMC_TIMING_MMC_HS400);
mmc_set_bus_speed(card);
+ if (!send_status) {
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+ }
+
return 0;
+
+out_err:
+ pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
+ __func__, err);
+ return err;
+}
+
+int mmc_hs200_to_hs400(struct mmc_card *card)
+{
+ return mmc_select_hs400(card);
+}
+
+int mmc_hs400_to_hs200(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ bool send_status = true;
+ unsigned int max_dtr;
+ int err;
+ u8 val;
+
+ if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+ send_status = false;
+
+ /* Reduce frequency to HS */
+ max_dtr = card->ext_csd.hs_max_dtr;
+ mmc_set_clock(host, max_dtr);
+
+ /* Switch HS400 to HS DDR */
+ val = EXT_CSD_TIMING_HS;
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
+ val, card->ext_csd.generic_cmd6_time,
+ true, send_status, true);
+ if (err)
+ goto out_err;
+
+ mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
+
+ if (!send_status) {
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+ }
+
+ /* Switch HS DDR to HS */
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
+ EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
+ true, send_status, true);
+ if (err)
+ goto out_err;
+
+ mmc_set_timing(host, MMC_TIMING_MMC_HS);
+
+ if (!send_status) {
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+ }
+
+ /* Switch HS to HS200 */
+ val = EXT_CSD_TIMING_HS200 |
+ card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
+ val, card->ext_csd.generic_cmd6_time, true,
+ send_status, true);
+ if (err)
+ goto out_err;
+
+ mmc_set_timing(host, MMC_TIMING_MMC_HS200);
+
+ if (!send_status) {
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+ }
+
+ mmc_set_bus_speed(card);
+
+ return 0;
+
+out_err:
+ pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
+ __func__, err);
+ return err;
+}
+
+static void mmc_select_driver_type(struct mmc_card *card)
+{
+ int card_drv_type, drive_strength, drv_type;
+
+ card_drv_type = card->ext_csd.raw_driver_strength |
+ mmc_driver_type_mask(0);
+
+ drive_strength = mmc_select_drive_strength(card,
+ card->ext_csd.hs200_max_dtr,
+ card_drv_type, &drv_type);
+
+ card->drive_strength = drive_strength;
+
+ if (drv_type)
+ mmc_set_driver_type(card->host, drv_type);
}
/*
@@ -1101,7 +1243,10 @@ static int mmc_select_hs400(struct mmc_card *card)
static int mmc_select_hs200(struct mmc_card *card)
{
struct mmc_host *host = card->host;
+ bool send_status = true;
+ unsigned int old_timing;
int err = -EINVAL;
+ u8 val;
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
@@ -1113,20 +1258,41 @@ static int mmc_select_hs200(struct mmc_card *card)
if (err)
goto err;
+ mmc_select_driver_type(card);
+
+ if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+ send_status = false;
+
/*
* Set the bus width(4 or 8) with host's support and
* switch to HS200 mode if bus width is set successfully.
*/
err = mmc_select_bus_width(card);
if (!IS_ERR_VALUE(err)) {
+ val = EXT_CSD_TIMING_HS200 |
+ card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS200,
+ EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time,
- true, true, true);
- if (!err)
- mmc_set_timing(host, MMC_TIMING_MMC_HS200);
+ true, send_status, true);
+ if (err)
+ goto err;
+ old_timing = host->ios.timing;
+ mmc_set_timing(host, MMC_TIMING_MMC_HS200);
+ if (!send_status) {
+ err = mmc_switch_status(card);
+ /*
+ * mmc_select_timing() assumes timing has not changed if
+ * it is a switch error.
+ */
+ if (err == -EBADMSG)
+ mmc_set_timing(host, old_timing);
+ }
}
err:
+ if (err)
+ pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
+ __func__, err);
return err;
}
@@ -1511,9 +1677,12 @@ static int mmc_sleep(struct mmc_host *host)
unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
int err;
+ /* Re-tuning can't be done once the card is deselected */
+ mmc_retune_hold(host);
+
err = mmc_deselect_cards(host);
if (err)
- return err;
+ goto out_release;
cmd.opcode = MMC_SLEEP_AWAKE;
cmd.arg = card->rca << 16;
@@ -1534,7 +1703,7 @@ static int mmc_sleep(struct mmc_host *host)
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
- return err;
+ goto out_release;
/*
* If the host does not wait while the card signals busy, then we will
@@ -1545,6 +1714,8 @@ static int mmc_sleep(struct mmc_host *host)
if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
mmc_delay(timeout_ms);
+out_release:
+ mmc_retune_release(host);
return err;
}
@@ -1782,17 +1953,6 @@ static int mmc_runtime_resume(struct mmc_host *host)
return 0;
}
-static int mmc_power_restore(struct mmc_host *host)
-{
- int ret;
-
- mmc_claim_host(host);
- ret = mmc_init_card(host, host->card->ocr, host->card);
- mmc_release_host(host);
-
- return ret;
-}
-
int mmc_can_reset(struct mmc_card *card)
{
u8 rst_n_function;
@@ -1807,7 +1967,6 @@ EXPORT_SYMBOL(mmc_can_reset);
static int mmc_reset(struct mmc_host *host)
{
struct mmc_card *card = host->card;
- u32 status;
if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
return -EOPNOTSUPP;
@@ -1815,22 +1974,14 @@ static int mmc_reset(struct mmc_host *host)
if (!mmc_can_reset(card))
return -EOPNOTSUPP;
- mmc_host_clk_hold(host);
mmc_set_clock(host, host->f_init);
host->ops->hw_reset(host);
- /* If the reset has happened, then a status command will fail */
- if (!mmc_send_status(card, &status)) {
- mmc_host_clk_release(host);
- return -ENOSYS;
- }
-
/* Set initial state and call mmc_set_ios */
mmc_set_initial_state(host);
- mmc_host_clk_release(host);
- return mmc_power_restore(host);
+ return mmc_init_card(host, card->ocr, card);
}
static const struct mmc_bus_ops mmc_ops = {
@@ -1840,7 +1991,6 @@ static const struct mmc_bus_ops mmc_ops = {
.resume = mmc_resume,
.runtime_suspend = mmc_runtime_suspend,
.runtime_resume = mmc_runtime_resume,
- .power_restore = mmc_power_restore,
.alive = mmc_alive,
.shutdown = mmc_shutdown,
.reset = mmc_reset,
@@ -1897,14 +2047,13 @@ int mmc_attach_mmc(struct mmc_host *host)
mmc_release_host(host);
err = mmc_add_card(host->card);
- mmc_claim_host(host);
if (err)
goto remove_card;
+ mmc_claim_host(host);
return 0;
remove_card:
- mmc_release_host(host);
mmc_remove_card(host->card);
mmc_claim_host(host);
host->card = NULL;
diff --git a/kernel/drivers/mmc/core/mmc_ops.c b/kernel/drivers/mmc/core/mmc_ops.c
index 0ea042dc7..1f444269e 100644
--- a/kernel/drivers/mmc/core/mmc_ops.c
+++ b/kernel/drivers/mmc/core/mmc_ops.c
@@ -19,6 +19,7 @@
#include <linux/mmc/mmc.h>
#include "core.h"
+#include "host.h"
#include "mmc_ops.h"
#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
@@ -449,6 +450,21 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
return err;
}
+int mmc_switch_status_error(struct mmc_host *host, u32 status)
+{
+ if (mmc_host_is_spi(host)) {
+ if (status & R1_SPI_ILLEGAL_COMMAND)
+ return -EBADMSG;
+ } else {
+ if (status & 0xFDFFA000)
+ pr_warn("%s: unexpected status %#x after switch\n",
+ mmc_hostname(host), status);
+ if (status & R1_SWITCH_ERROR)
+ return -EBADMSG;
+ }
+ return 0;
+}
+
/**
* __mmc_switch - modify EXT_CSD register
* @card: the MMC card associated with the data transfer
@@ -474,6 +490,8 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
u32 status = 0;
bool use_r1b_resp = use_busy_signal;
+ mmc_retune_hold(host);
+
/*
* If the cmd timeout and the max_busy_timeout of the host are both
* specified, let's validate them. A failure means we need to prevent
@@ -506,11 +524,11 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
- return err;
+ goto out;
/* No need to check card status in case of unblocking command */
if (!use_busy_signal)
- return 0;
+ goto out;
/*
* CRC errors shall only be ignored in cases were CMD13 is used to poll
@@ -529,7 +547,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
if (send_status) {
err = __mmc_send_status(card, &status, ignore_crc);
if (err)
- return err;
+ goto out;
}
if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
break;
@@ -543,31 +561,24 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
*/
if (!send_status) {
mmc_delay(timeout_ms);
- return 0;
+ goto out;
}
/* Timeout if the device never leaves the program state. */
if (time_after(jiffies, timeout)) {
pr_err("%s: Card stuck in programming state! %s\n",
mmc_hostname(host), __func__);
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto out;
}
} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
- if (mmc_host_is_spi(host)) {
- if (status & R1_SPI_ILLEGAL_COMMAND)
- return -EBADMSG;
- } else {
- if (status & 0xFDFFA000)
- pr_warn("%s: unexpected status %#x after switch\n",
- mmc_hostname(host), status);
- if (status & R1_SWITCH_ERROR)
- return -EBADMSG;
- }
+ err = mmc_switch_status_error(host, status);
+out:
+ mmc_retune_release(host);
- return 0;
+ return err;
}
-EXPORT_SYMBOL_GPL(__mmc_switch);
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms)
@@ -577,7 +588,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
}
EXPORT_SYMBOL_GPL(mmc_switch);
-int mmc_send_tuning(struct mmc_host *host)
+int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
{
struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
@@ -587,16 +598,13 @@ int mmc_send_tuning(struct mmc_host *host)
const u8 *tuning_block_pattern;
int size, err = 0;
u8 *data_buf;
- u32 opcode;
if (ios->bus_width == MMC_BUS_WIDTH_8) {
tuning_block_pattern = tuning_blk_pattern_8bit;
size = sizeof(tuning_blk_pattern_8bit);
- opcode = MMC_SEND_TUNING_BLOCK_HS200;
} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
tuning_block_pattern = tuning_blk_pattern_4bit;
size = sizeof(tuning_blk_pattern_4bit);
- opcode = MMC_SEND_TUNING_BLOCK;
} else
return -EINVAL;
@@ -627,6 +635,9 @@ int mmc_send_tuning(struct mmc_host *host)
mmc_wait_for_req(host, &mrq);
+ if (cmd_error)
+ *cmd_error = cmd.error;
+
if (cmd.error) {
err = cmd.error;
goto out;
diff --git a/kernel/drivers/mmc/core/mmc_ops.h b/kernel/drivers/mmc/core/mmc_ops.h
index 6f4b00ed9..f1b8e81aa 100644
--- a/kernel/drivers/mmc/core/mmc_ops.h
+++ b/kernel/drivers/mmc/core/mmc_ops.h
@@ -27,6 +27,10 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
int mmc_can_ext_csd(struct mmc_card *card);
+int mmc_switch_status_error(struct mmc_host *host, u32 status);
+int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
+ unsigned int timeout_ms, bool use_busy_signal, bool send_status,
+ bool ignore_crc);
#endif
diff --git a/kernel/drivers/mmc/core/pwrseq_emmc.c b/kernel/drivers/mmc/core/pwrseq_emmc.c
index 9d6d2fb21..ad4f94ec7 100644
--- a/kernel/drivers/mmc/core/pwrseq_emmc.c
+++ b/kernel/drivers/mmc/core/pwrseq_emmc.c
@@ -76,7 +76,7 @@ struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
if (!pwrseq)
return ERR_PTR(-ENOMEM);
- pwrseq->reset_gpio = gpiod_get_index(dev, "reset", 0, GPIOD_OUT_LOW);
+ pwrseq->reset_gpio = gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(pwrseq->reset_gpio)) {
ret = PTR_ERR(pwrseq->reset_gpio);
goto free;
@@ -84,11 +84,11 @@ struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
/*
* register reset handler to ensure emmc reset also from
- * emergency_reboot(), priority 129 schedules it just before
- * system reboot
+ * emergency_reboot(), priority 255 is the highest priority
+ * so it will be executed before any system reboot handler.
*/
pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
- pwrseq->reset_nb.priority = 129;
+ pwrseq->reset_nb.priority = 255;
register_restart_handler(&pwrseq->reset_nb);
pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
diff --git a/kernel/drivers/mmc/core/pwrseq_simple.c b/kernel/drivers/mmc/core/pwrseq_simple.c
index 0b14b83a5..d10538bb5 100644
--- a/kernel/drivers/mmc/core/pwrseq_simple.c
+++ b/kernel/drivers/mmc/core/pwrseq_simple.c
@@ -23,18 +23,21 @@ struct mmc_pwrseq_simple {
struct mmc_pwrseq pwrseq;
bool clk_enabled;
struct clk *ext_clk;
- int nr_gpios;
- struct gpio_desc *reset_gpios[0];
+ struct gpio_descs *reset_gpios;
};
static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
int value)
{
int i;
+ struct gpio_descs *reset_gpios = pwrseq->reset_gpios;
+ int values[reset_gpios->ndescs];
- for (i = 0; i < pwrseq->nr_gpios; i++)
- if (!IS_ERR(pwrseq->reset_gpios[i]))
- gpiod_set_value_cansleep(pwrseq->reset_gpios[i], value);
+ for (i = 0; i < reset_gpios->ndescs; i++)
+ values[i] = value;
+
+ gpiod_set_array_value_cansleep(reset_gpios->ndescs, reset_gpios->desc,
+ values);
}
static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
@@ -75,11 +78,8 @@ static void mmc_pwrseq_simple_free(struct mmc_host *host)
{
struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
struct mmc_pwrseq_simple, pwrseq);
- int i;
- for (i = 0; i < pwrseq->nr_gpios; i++)
- if (!IS_ERR(pwrseq->reset_gpios[i]))
- gpiod_put(pwrseq->reset_gpios[i]);
+ gpiod_put_array(pwrseq->reset_gpios);
if (!IS_ERR(pwrseq->ext_clk))
clk_put(pwrseq->ext_clk);
@@ -98,14 +98,9 @@ struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
struct device *dev)
{
struct mmc_pwrseq_simple *pwrseq;
- int i, nr_gpios, ret = 0;
-
- nr_gpios = of_gpio_named_count(dev->of_node, "reset-gpios");
- if (nr_gpios < 0)
- nr_gpios = 0;
+ int ret = 0;
- pwrseq = kzalloc(sizeof(struct mmc_pwrseq_simple) + nr_gpios *
- sizeof(struct gpio_desc *), GFP_KERNEL);
+ pwrseq = kzalloc(sizeof(*pwrseq), GFP_KERNEL);
if (!pwrseq)
return ERR_PTR(-ENOMEM);
@@ -116,22 +111,12 @@ struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
goto free;
}
- for (i = 0; i < nr_gpios; i++) {
- pwrseq->reset_gpios[i] = gpiod_get_index(dev, "reset", i,
- GPIOD_OUT_HIGH);
- if (IS_ERR(pwrseq->reset_gpios[i]) &&
- PTR_ERR(pwrseq->reset_gpios[i]) != -ENOENT &&
- PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) {
- ret = PTR_ERR(pwrseq->reset_gpios[i]);
-
- while (i--)
- gpiod_put(pwrseq->reset_gpios[i]);
-
- goto clk_put;
- }
+ pwrseq->reset_gpios = gpiod_get_array(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pwrseq->reset_gpios)) {
+ ret = PTR_ERR(pwrseq->reset_gpios);
+ goto clk_put;
}
- pwrseq->nr_gpios = nr_gpios;
pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
return &pwrseq->pwrseq;
diff --git a/kernel/drivers/mmc/core/quirks.c b/kernel/drivers/mmc/core/quirks.c
index dd1d1e0fe..fad660b95 100644
--- a/kernel/drivers/mmc/core/quirks.c
+++ b/kernel/drivers/mmc/core/quirks.c
@@ -35,25 +35,7 @@
#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
#endif
-/*
- * This hook just adds a quirk for all sdio devices
- */
-static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
-{
- if (mmc_card_sdio(card))
- card->quirks |= data;
-}
-
static const struct mmc_fixup mmc_fixup_methods[] = {
- /* by default sdio devices are considered CLK_GATING broken */
- /* good cards will be whitelisted as they are tested */
- SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID,
- add_quirk_for_sdio_devices,
- MMC_QUIRK_BROKEN_CLK_GATING),
-
- SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
- remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
-
SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
diff --git a/kernel/drivers/mmc/core/sd.c b/kernel/drivers/mmc/core/sd.c
index 31a9ef256..967535d76 100644
--- a/kernel/drivers/mmc/core/sd.c
+++ b/kernel/drivers/mmc/core/sd.c
@@ -357,8 +357,6 @@ int mmc_sd_switch_hs(struct mmc_card *card)
if (card->sw_caps.hs_max_dtr == 0)
return 0;
- err = -EIO;
-
status = kmalloc(64, GFP_KERNEL);
if (!status) {
pr_err("%s: could not allocate a buffer for "
@@ -386,64 +384,31 @@ out:
static int sd_select_driver_type(struct mmc_card *card, u8 *status)
{
- int host_drv_type = SD_DRIVER_TYPE_B;
- int card_drv_type = SD_DRIVER_TYPE_B;
- int drive_strength;
+ int card_drv_type, drive_strength, drv_type;
int err;
- /*
- * If the host doesn't support any of the Driver Types A,C or D,
- * or there is no board specific handler then default Driver
- * Type B is used.
- */
- if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C
- | MMC_CAP_DRIVER_TYPE_D)))
- return 0;
-
- if (!card->host->ops->select_drive_strength)
- return 0;
-
- if (card->host->caps & MMC_CAP_DRIVER_TYPE_A)
- host_drv_type |= SD_DRIVER_TYPE_A;
-
- if (card->host->caps & MMC_CAP_DRIVER_TYPE_C)
- host_drv_type |= SD_DRIVER_TYPE_C;
-
- if (card->host->caps & MMC_CAP_DRIVER_TYPE_D)
- host_drv_type |= SD_DRIVER_TYPE_D;
-
- if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
- card_drv_type |= SD_DRIVER_TYPE_A;
-
- if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
- card_drv_type |= SD_DRIVER_TYPE_C;
-
- if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D)
- card_drv_type |= SD_DRIVER_TYPE_D;
+ card->drive_strength = 0;
- /*
- * The drive strength that the hardware can support
- * depends on the board design. Pass the appropriate
- * information and let the hardware specific code
- * return what is possible given the options
- */
- mmc_host_clk_hold(card->host);
- drive_strength = card->host->ops->select_drive_strength(
- card->sw_caps.uhs_max_dtr,
- host_drv_type, card_drv_type);
- mmc_host_clk_release(card->host);
+ card_drv_type = card->sw_caps.sd3_drv_type | SD_DRIVER_TYPE_B;
- err = mmc_sd_switch(card, 1, 2, drive_strength, status);
- if (err)
- return err;
+ drive_strength = mmc_select_drive_strength(card,
+ card->sw_caps.uhs_max_dtr,
+ card_drv_type, &drv_type);
- if ((status[15] & 0xF) != drive_strength) {
- pr_warn("%s: Problem setting drive strength!\n",
- mmc_hostname(card->host));
- return 0;
+ if (drive_strength) {
+ err = mmc_sd_switch(card, 1, 2, drive_strength, status);
+ if (err)
+ return err;
+ if ((status[15] & 0xF) != drive_strength) {
+ pr_warn("%s: Problem setting drive strength!\n",
+ mmc_hostname(card->host));
+ return 0;
+ }
+ card->drive_strength = drive_strength;
}
- mmc_set_driver_type(card->host, drive_strength);
+ if (drv_type)
+ mmc_set_driver_type(card->host, drv_type);
return 0;
}
@@ -661,9 +626,25 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
* SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
*/
if (!mmc_host_is_spi(card->host) &&
- (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
- card->sd_bus_speed == UHS_SDR104_BUS_SPEED))
+ (card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
+ card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
+ card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
err = mmc_execute_tuning(card);
+
+ /*
+ * As SD Specifications Part1 Physical Layer Specification
+ * Version 3.01 says, CMD19 tuning is available for unlocked
+ * cards in transfer state of 1.8V signaling mode. The small
+ * difference between v3.00 and 3.01 spec means that CMD19
+ * tuning is also available for DDR50 mode.
+ */
+ if (err && card->host->ios.timing == MMC_TIMING_UHS_DDR50) {
+ pr_warn("%s: ddr50 tuning failed\n",
+ mmc_hostname(card->host));
+ err = 0;
+ }
+ }
+
out:
kfree(status);
@@ -804,6 +785,26 @@ int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card)
return 0;
}
+static int mmc_sd_get_ro(struct mmc_host *host)
+{
+ int ro;
+
+ /*
+ * Some systems don't feature a write-protect pin and don't need one.
+ * E.g. because they only have micro-SD card slot. For those systems
+ * assume that the SD card is always read-write.
+ */
+ if (host->caps2 & MMC_CAP2_NO_WRITE_PROTECT)
+ return 0;
+
+ if (!host->ops->get_ro)
+ return -1;
+
+ ro = host->ops->get_ro(host);
+
+ return ro;
+}
+
int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
bool reinit)
{
@@ -855,13 +856,7 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
* Check if read-only switch is active.
*/
if (!reinit) {
- int ro = -1;
-
- if (host->ops->get_ro) {
- mmc_host_clk_hold(card->host);
- ro = host->ops->get_ro(host);
- mmc_host_clk_release(card->host);
- }
+ int ro = mmc_sd_get_ro(host);
if (ro < 0) {
pr_warn("%s: host does not support reading read-only switch, assuming write-enable\n",
@@ -1181,21 +1176,10 @@ static int mmc_sd_runtime_resume(struct mmc_host *host)
return 0;
}
-static int mmc_sd_power_restore(struct mmc_host *host)
-{
- int ret;
-
- mmc_claim_host(host);
- ret = mmc_sd_init_card(host, host->card->ocr, host->card);
- mmc_release_host(host);
-
- return ret;
-}
-
static int mmc_sd_reset(struct mmc_host *host)
{
mmc_power_cycle(host, host->card->ocr);
- return mmc_sd_power_restore(host);
+ return mmc_sd_init_card(host, host->card->ocr, host->card);
}
static const struct mmc_bus_ops mmc_sd_ops = {
@@ -1205,7 +1189,6 @@ static const struct mmc_bus_ops mmc_sd_ops = {
.runtime_resume = mmc_sd_runtime_resume,
.suspend = mmc_sd_suspend,
.resume = mmc_sd_resume,
- .power_restore = mmc_sd_power_restore,
.alive = mmc_sd_alive,
.shutdown = mmc_sd_suspend,
.reset = mmc_sd_reset,
@@ -1260,14 +1243,13 @@ int mmc_attach_sd(struct mmc_host *host)
mmc_release_host(host);
err = mmc_add_card(host->card);
- mmc_claim_host(host);
if (err)
goto remove_card;
+ mmc_claim_host(host);
return 0;
remove_card:
- mmc_release_host(host);
mmc_remove_card(host->card);
host->card = NULL;
mmc_claim_host(host);
diff --git a/kernel/drivers/mmc/core/sdio.c b/kernel/drivers/mmc/core/sdio.c
index 5bc6c7dbb..467b3cf80 100644
--- a/kernel/drivers/mmc/core/sdio.c
+++ b/kernel/drivers/mmc/core/sdio.c
@@ -402,69 +402,38 @@ static unsigned char host_drive_to_sdio_drive(int host_strength)
static void sdio_select_driver_type(struct mmc_card *card)
{
- int host_drv_type = SD_DRIVER_TYPE_B;
- int card_drv_type = SD_DRIVER_TYPE_B;
- int drive_strength;
+ int card_drv_type, drive_strength, drv_type;
unsigned char card_strength;
int err;
- /*
- * If the host doesn't support any of the Driver Types A,C or D,
- * or there is no board specific handler then default Driver
- * Type B is used.
- */
- if (!(card->host->caps &
- (MMC_CAP_DRIVER_TYPE_A |
- MMC_CAP_DRIVER_TYPE_C |
- MMC_CAP_DRIVER_TYPE_D)))
- return;
-
- if (!card->host->ops->select_drive_strength)
- return;
-
- if (card->host->caps & MMC_CAP_DRIVER_TYPE_A)
- host_drv_type |= SD_DRIVER_TYPE_A;
-
- if (card->host->caps & MMC_CAP_DRIVER_TYPE_C)
- host_drv_type |= SD_DRIVER_TYPE_C;
+ card->drive_strength = 0;
- if (card->host->caps & MMC_CAP_DRIVER_TYPE_D)
- host_drv_type |= SD_DRIVER_TYPE_D;
+ card_drv_type = card->sw_caps.sd3_drv_type | SD_DRIVER_TYPE_B;
- if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
- card_drv_type |= SD_DRIVER_TYPE_A;
-
- if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
- card_drv_type |= SD_DRIVER_TYPE_C;
-
- if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D)
- card_drv_type |= SD_DRIVER_TYPE_D;
-
- /*
- * The drive strength that the hardware can support
- * depends on the board design. Pass the appropriate
- * information and let the hardware specific code
- * return what is possible given the options
- */
- drive_strength = card->host->ops->select_drive_strength(
- card->sw_caps.uhs_max_dtr,
- host_drv_type, card_drv_type);
+ drive_strength = mmc_select_drive_strength(card,
+ card->sw_caps.uhs_max_dtr,
+ card_drv_type, &drv_type);
- /* if error just use default for drive strength B */
- err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_DRIVE_STRENGTH, 0,
- &card_strength);
- if (err)
- return;
+ if (drive_strength) {
+ /* if error just use default for drive strength B */
+ err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_DRIVE_STRENGTH, 0,
+ &card_strength);
+ if (err)
+ return;
- card_strength &= ~(SDIO_DRIVE_DTSx_MASK<<SDIO_DRIVE_DTSx_SHIFT);
- card_strength |= host_drive_to_sdio_drive(drive_strength);
+ card_strength &= ~(SDIO_DRIVE_DTSx_MASK<<SDIO_DRIVE_DTSx_SHIFT);
+ card_strength |= host_drive_to_sdio_drive(drive_strength);
- err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_DRIVE_STRENGTH,
- card_strength, NULL);
+ /* if error default to drive strength B */
+ err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_DRIVE_STRENGTH,
+ card_strength, NULL);
+ if (err)
+ return;
+ card->drive_strength = drive_strength;
+ }
- /* if error default to drive strength B */
- if (!err)
- mmc_set_driver_type(card->host, drive_strength);
+ if (drv_type)
+ mmc_set_driver_type(card->host, drv_type);
}
@@ -566,8 +535,8 @@ static int mmc_sdio_init_uhs_card(struct mmc_card *card)
* SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
*/
if (!mmc_host_is_spi(card->host) &&
- ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) ||
- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)))
+ ((card->host->ios.timing == MMC_TIMING_UHS_SDR50) ||
+ (card->host->ios.timing == MMC_TIMING_UHS_SDR104)))
err = mmc_execute_tuning(card);
out:
return err;
@@ -661,7 +630,7 @@ try_again:
*/
if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
- ocr);
+ ocr_card);
if (err == -EAGAIN) {
sdio_reset(host);
mmc_go_idle(host);
@@ -928,14 +897,19 @@ static int mmc_sdio_pre_suspend(struct mmc_host *host)
*/
static int mmc_sdio_suspend(struct mmc_host *host)
{
- if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
- mmc_claim_host(host);
+ mmc_claim_host(host);
+
+ if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host))
sdio_disable_wide(host->card);
- mmc_release_host(host);
- }
- if (!mmc_card_keep_power(host))
+ if (!mmc_card_keep_power(host)) {
mmc_power_off(host);
+ } else if (host->retune_period) {
+ mmc_retune_timer_stop(host);
+ mmc_retune_needed(host);
+ }
+
+ mmc_release_host(host);
return 0;
}
@@ -982,13 +956,10 @@ static int mmc_sdio_resume(struct mmc_host *host)
}
if (!err && host->sdio_irqs) {
- if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
+ if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
wake_up_process(host->sdio_irq_thread);
- } else if (host->caps & MMC_CAP_SDIO_IRQ) {
- mmc_host_clk_hold(host);
+ else if (host->caps & MMC_CAP_SDIO_IRQ)
host->ops->enable_sdio_irq(host, 1);
- mmc_host_clk_release(host);
- }
}
mmc_release_host(host);
@@ -1045,14 +1016,29 @@ out:
static int mmc_sdio_runtime_suspend(struct mmc_host *host)
{
/* No references to the card, cut the power to it. */
+ mmc_claim_host(host);
mmc_power_off(host);
+ mmc_release_host(host);
+
return 0;
}
static int mmc_sdio_runtime_resume(struct mmc_host *host)
{
+ int ret;
+
/* Restore power and re-initialize. */
+ mmc_claim_host(host);
mmc_power_up(host, host->card->ocr);
+ ret = mmc_sdio_power_restore(host);
+ mmc_release_host(host);
+
+ return ret;
+}
+
+static int mmc_sdio_reset(struct mmc_host *host)
+{
+ mmc_power_cycle(host, host->card->ocr);
return mmc_sdio_power_restore(host);
}
@@ -1066,6 +1052,7 @@ static const struct mmc_bus_ops mmc_sdio_ops = {
.runtime_resume = mmc_sdio_runtime_resume,
.power_restore = mmc_sdio_power_restore,
.alive = mmc_sdio_alive,
+ .reset = mmc_sdio_reset,
};
diff --git a/kernel/drivers/mmc/core/sdio_bus.c b/kernel/drivers/mmc/core/sdio_bus.c
index bee02e644..7e327a6dd 100644
--- a/kernel/drivers/mmc/core/sdio_bus.c
+++ b/kernel/drivers/mmc/core/sdio_bus.c
@@ -137,6 +137,10 @@ static int sdio_bus_probe(struct device *dev)
if (!id)
return -ENODEV;
+ ret = dev_pm_domain_attach(dev, false);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
/* Unbound SDIO functions are always suspended.
* During probe, the function is set active and the usage count
* is incremented. If the driver supports runtime PM,
@@ -166,6 +170,7 @@ static int sdio_bus_probe(struct device *dev)
disable_runtimepm:
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_noidle(dev);
+ dev_pm_domain_detach(dev, false);
return ret;
}
@@ -197,6 +202,8 @@ static int sdio_bus_remove(struct device *dev)
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_sync(dev);
+ dev_pm_domain_detach(dev, false);
+
return ret;
}
@@ -316,10 +323,8 @@ int sdio_add_func(struct sdio_func *func)
sdio_set_of_node(func);
sdio_acpi_set_handle(func);
ret = device_add(&func->dev);
- if (ret == 0) {
+ if (ret == 0)
sdio_func_set_present(func);
- dev_pm_domain_attach(&func->dev, false);
- }
return ret;
}
@@ -335,7 +340,6 @@ void sdio_remove_func(struct sdio_func *func)
if (!sdio_func_present(func))
return;
- dev_pm_domain_detach(&func->dev, false);
device_del(&func->dev);
of_node_put(func->dev.of_node);
put_device(&func->dev);
diff --git a/kernel/drivers/mmc/core/sdio_irq.c b/kernel/drivers/mmc/core/sdio_irq.c
index 09cc67d02..91bbbfb29 100644
--- a/kernel/drivers/mmc/core/sdio_irq.c
+++ b/kernel/drivers/mmc/core/sdio_irq.c
@@ -168,21 +168,15 @@ static int sdio_irq_thread(void *_host)
}
set_current_state(TASK_INTERRUPTIBLE);
- if (host->caps & MMC_CAP_SDIO_IRQ) {
- mmc_host_clk_hold(host);
+ if (host->caps & MMC_CAP_SDIO_IRQ)
host->ops->enable_sdio_irq(host, 1);
- mmc_host_clk_release(host);
- }
if (!kthread_should_stop())
schedule_timeout(period);
set_current_state(TASK_RUNNING);
} while (!kthread_should_stop());
- if (host->caps & MMC_CAP_SDIO_IRQ) {
- mmc_host_clk_hold(host);
+ if (host->caps & MMC_CAP_SDIO_IRQ)
host->ops->enable_sdio_irq(host, 0);
- mmc_host_clk_release(host);
- }
pr_debug("%s: IRQ thread exiting with code %d\n",
mmc_hostname(host), ret);
@@ -208,9 +202,7 @@ static int sdio_card_irq_get(struct mmc_card *card)
return err;
}
} else if (host->caps & MMC_CAP_SDIO_IRQ) {
- mmc_host_clk_hold(host);
host->ops->enable_sdio_irq(host, 1);
- mmc_host_clk_release(host);
}
}
@@ -229,9 +221,7 @@ static int sdio_card_irq_put(struct mmc_card *card)
atomic_set(&host->sdio_irq_thread_abort, 1);
kthread_stop(host->sdio_irq_thread);
} else if (host->caps & MMC_CAP_SDIO_IRQ) {
- mmc_host_clk_hold(host);
host->ops->enable_sdio_irq(host, 0);
- mmc_host_clk_release(host);
}
}
diff --git a/kernel/drivers/mmc/core/sdio_ops.h b/kernel/drivers/mmc/core/sdio_ops.h
index 12a4d3ab1..5660c7f45 100644
--- a/kernel/drivers/mmc/core/sdio_ops.h
+++ b/kernel/drivers/mmc/core/sdio_ops.h
@@ -12,6 +12,8 @@
#ifndef _MMC_SDIO_OPS_H
#define _MMC_SDIO_OPS_H
+#include <linux/mmc/sdio.h>
+
int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
unsigned addr, u8 in, u8* out);
@@ -19,5 +21,10 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz);
int sdio_reset(struct mmc_host *host);
+static inline bool mmc_is_io_op(u32 opcode)
+{
+ return opcode == SD_IO_RW_DIRECT || opcode == SD_IO_RW_EXTENDED;
+}
+
#endif
diff --git a/kernel/drivers/mmc/host/Kconfig b/kernel/drivers/mmc/host/Kconfig
index b1f837e74..1dee53363 100644
--- a/kernel/drivers/mmc/host/Kconfig
+++ b/kernel/drivers/mmc/host/Kconfig
@@ -67,7 +67,7 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
has the effect of scrambling the addresses and formats of data
accessed in sizes other than the datum size.
- This is the case for the Freescale eSDHC and Nintendo Wii SDHCI.
+ This is the case for the Nintendo Wii SDHCI.
config MMC_SDHCI_PCI
tristate "SDHCI support on PCI bus"
@@ -129,11 +129,19 @@ config MMC_SDHCI_OF_ARASAN
If unsure, say N.
+config MMC_SDHCI_OF_AT91
+ tristate "SDHCI OF support for the Atmel SDMMC controller"
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the Atmel SDMMC driver
+
config MMC_SDHCI_OF_ESDHC
tristate "SDHCI OF support for the Freescale eSDHC controller"
depends on MMC_SDHCI_PLTFM
- depends on PPC
- select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+ depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE
+ select MMC_SDHCI_IO_ACCESSORS
help
This selects the Freescale eSDHC controller support.
@@ -219,6 +227,7 @@ config MMC_SDHCI_SIRF
tristate "SDHCI support on CSR SiRFprimaII and SiRFmarco SoCs"
depends on ARCH_SIRF
depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
help
This selects the SDHCI support for SiRF System-on-Chip devices.
@@ -357,7 +366,7 @@ config MMC_OMAP
config MMC_OMAP_HS
tristate "TI OMAP High Speed Multimedia Card Interface support"
depends on HAS_DMA
- depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
help
This selects the TI OMAP High Speed Multimedia card Interface.
If you have an omap2plus board with a Multimedia Card slot,
@@ -464,7 +473,8 @@ config MMC_DAVINCI
config MMC_GOLDFISH
tristate "goldfish qemu Multimedia Card Interface support"
- depends on GOLDFISH
+ depends on HAS_DMA
+ depends on GOLDFISH || COMPILE_TEST
help
This selects the Goldfish Multimedia card Interface emulation
found on the Goldfish Android virtual device emulation.
@@ -606,15 +616,7 @@ config MMC_DW
help
This selects support for the Synopsys DesignWare Mobile Storage IP
block, this provides host support for SD and MMC interfaces, in both
- PIO and external DMA modes.
-
-config MMC_DW_IDMAC
- bool "Internal DMAC interface"
- depends on MMC_DW
- help
- This selects support for the internal DMAC block within the Synopsys
- Designware Mobile Storage IP block. This disables the external DMA
- interface.
+ PIO, internal DMA mode and external DMA mode.
config MMC_DW_PLTFM
tristate "Synopsys Designware MCI Support as platform device"
@@ -643,7 +645,6 @@ config MMC_DW_K3
tristate "K3 specific extensions for Synopsys DW Memory Card Interface"
depends on MMC_DW
select MMC_DW_PLTFM
- select MMC_DW_IDMAC
help
This selects support for Hisilicon K3 SoC specific extensions to the
Synopsys DesignWare Memory Card Interface driver. Select this option
@@ -775,3 +776,12 @@ config MMC_TOSHIBA_PCI
tristate "Toshiba Type A SD/MMC Card Interface Driver"
depends on PCI
help
+
+config MMC_MTK
+ tristate "MediaTek SD/MMC Card Interface support"
+ depends on HAS_DMA
+ help
+ This selects the MediaTek(R) Secure digital and Multimedia card Interface.
+ If you have a machine with a integrated SD/MMC card reader, say Y or M here.
+ This is needed if support for any SD/SDIO/MMC devices is required.
+ If unsure, say N.
diff --git a/kernel/drivers/mmc/host/Makefile b/kernel/drivers/mmc/host/Makefile
index e3ab5b968..3595f83e8 100644
--- a/kernel/drivers/mmc/host/Makefile
+++ b/kernel/drivers/mmc/host/Makefile
@@ -9,8 +9,8 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
+sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o
obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
-obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-o2micro.o
obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
@@ -20,6 +20,7 @@ obj-$(CONFIG_MMC_SDHCI_F_SDH30) += sdhci_f_sdh30.o
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
+obj-$(CONFIG_MMC_MTK) += mtk-sd.o
obj-$(CONFIG_MMC_OMAP) += omap.o
obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o
obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
@@ -66,6 +67,7 @@ obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
obj-$(CONFIG_MMC_SDHCI_OF_ARASAN) += sdhci-of-arasan.o
+obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
diff --git a/kernel/drivers/mmc/host/android-goldfish.c b/kernel/drivers/mmc/host/android-goldfish.c
index 8b4e20a3f..dca5518b0 100644
--- a/kernel/drivers/mmc/host/android-goldfish.c
+++ b/kernel/drivers/mmc/host/android-goldfish.c
@@ -42,10 +42,10 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/clk.h>
+#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/scatterlist.h>
#include <asm/types.h>
#include <asm/io.h>
@@ -118,7 +118,7 @@ struct goldfish_mmc_host {
struct mmc_host *mmc;
struct device *dev;
unsigned char id; /* 16xx chips have 2 MMC blocks */
- void __iomem *virt_base;
+ void *virt_base;
unsigned int phys_base;
int irq;
unsigned char bus_mode;
diff --git a/kernel/drivers/mmc/host/atmel-mci.c b/kernel/drivers/mmc/host/atmel-mci.c
index 9a39e0b7e..bf62e429f 100644
--- a/kernel/drivers/mmc/host/atmel-mci.c
+++ b/kernel/drivers/mmc/host/atmel-mci.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/types.h>
-#include <linux/platform_data/atmel.h>
#include <linux/platform_data/mmc-atmel-mci.h>
#include <linux/mmc/host.h>
diff --git a/kernel/drivers/mmc/host/davinci_mmc.c b/kernel/drivers/mmc/host/davinci_mmc.c
index 1625f908d..ea2a2ebc6 100644
--- a/kernel/drivers/mmc/host/davinci_mmc.c
+++ b/kernel/drivers/mmc/host/davinci_mmc.c
@@ -1161,7 +1161,7 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host)
mmc_davinci_reset_ctrl(host, 0);
}
-static struct platform_device_id davinci_mmc_devtype[] = {
+static const struct platform_device_id davinci_mmc_devtype[] = {
{
.name = "dm6441-mmc",
.driver_data = MMC_CTLR_VERSION_1,
diff --git a/kernel/drivers/mmc/host/dw_mmc-exynos.c b/kernel/drivers/mmc/host/dw_mmc-exynos.c
index e761eb1b1..3a7e835a0 100644
--- a/kernel/drivers/mmc/host/dw_mmc-exynos.c
+++ b/kernel/drivers/mmc/host/dw_mmc-exynos.c
@@ -446,7 +446,7 @@ out:
return loc;
}
-static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot)
+static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
{
struct dw_mci *host = slot->host;
struct dw_mci_exynos_priv_data *priv = host->priv;
@@ -461,7 +461,7 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot)
mci_writel(host, TMOUT, ~0);
smpl = dw_mci_exynos_move_next_clksmpl(host);
- if (!mmc_send_tuning(mmc))
+ if (!mmc_send_tuning(mmc, opcode, NULL))
candiates |= (1 << smpl);
} while (start_smpl != smpl);
@@ -556,4 +556,4 @@ module_platform_driver(dw_mci_exynos_pltfm_driver);
MODULE_DESCRIPTION("Samsung Specific DW-MSHC Driver Extension");
MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:dwmmc-exynos");
+MODULE_ALIAS("platform:dwmmc_exynos");
diff --git a/kernel/drivers/mmc/host/dw_mmc-k3.c b/kernel/drivers/mmc/host/dw_mmc-k3.c
index 650f9cc3f..63c2e2ed1 100644
--- a/kernel/drivers/mmc/host/dw_mmc-k3.c
+++ b/kernel/drivers/mmc/host/dw_mmc-k3.c
@@ -8,16 +8,30 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
#include <linux/mmc/host.h>
#include <linux/mmc/dw_mmc.h>
+#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
+/*
+ * hi6220 sd only support io voltage 1.8v and 3v
+ * Also need config AO_SCTRL_SEL18 accordingly
+ */
+#define AO_SCTRL_SEL18 BIT(10)
+#define AO_SCTRL_CTRL3 0x40C
+
+struct k3_priv {
+ struct regmap *reg;
+};
+
static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
int ret;
@@ -33,8 +47,93 @@ static const struct dw_mci_drv_data k3_drv_data = {
.set_ios = dw_mci_k3_set_ios,
};
+static int dw_mci_hi6220_parse_dt(struct dw_mci *host)
+{
+ struct k3_priv *priv;
+
+ priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->reg = syscon_regmap_lookup_by_phandle(host->dev->of_node,
+ "hisilicon,peripheral-syscon");
+ if (IS_ERR(priv->reg))
+ priv->reg = NULL;
+
+ host->priv = priv;
+ return 0;
+}
+
+static int dw_mci_hi6220_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct k3_priv *priv;
+ struct dw_mci *host;
+ int min_uv, max_uv;
+ int ret;
+
+ host = slot->host;
+ priv = host->priv;
+
+ if (!priv || !priv->reg)
+ return 0;
+
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ ret = regmap_update_bits(priv->reg, AO_SCTRL_CTRL3,
+ AO_SCTRL_SEL18, 0);
+ min_uv = 3000000;
+ max_uv = 3000000;
+ } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+ ret = regmap_update_bits(priv->reg, AO_SCTRL_CTRL3,
+ AO_SCTRL_SEL18, AO_SCTRL_SEL18);
+ min_uv = 1800000;
+ max_uv = 1800000;
+ } else {
+ dev_dbg(host->dev, "voltage not supported\n");
+ return -EINVAL;
+ }
+
+ if (ret) {
+ dev_dbg(host->dev, "switch voltage failed\n");
+ return ret;
+ }
+
+ if (IS_ERR_OR_NULL(mmc->supply.vqmmc))
+ return 0;
+
+ ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
+ if (ret) {
+ dev_dbg(host->dev, "Regulator set error %d: %d - %d\n",
+ ret, min_uv, max_uv);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dw_mci_hi6220_set_ios(struct dw_mci *host, struct mmc_ios *ios)
+{
+ int ret;
+ unsigned int clock;
+
+ clock = (ios->clock <= 25000000) ? 25000000 : ios->clock;
+
+ ret = clk_set_rate(host->biu_clk, clock);
+ if (ret)
+ dev_warn(host->dev, "failed to set rate %uHz\n", clock);
+
+ host->bus_hz = clk_get_rate(host->biu_clk);
+}
+
+static const struct dw_mci_drv_data hi6220_data = {
+ .switch_voltage = dw_mci_hi6220_switch_voltage,
+ .set_ios = dw_mci_hi6220_set_ios,
+ .parse_dt = dw_mci_hi6220_parse_dt,
+};
+
static const struct of_device_id dw_mci_k3_match[] = {
{ .compatible = "hisilicon,hi4511-dw-mshc", .data = &k3_drv_data, },
+ { .compatible = "hisilicon,hi6220-dw-mshc", .data = &hi6220_data, },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_k3_match);
@@ -94,4 +193,4 @@ module_platform_driver(dw_mci_k3_pltfm_driver);
MODULE_DESCRIPTION("K3 Specific DW-MSHC Driver Extension");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:dwmmc-k3");
+MODULE_ALIAS("platform:dwmmc_k3");
diff --git a/kernel/drivers/mmc/host/dw_mmc-pltfm.c b/kernel/drivers/mmc/host/dw_mmc-pltfm.c
index ec6dbcdec..7e1d13b68 100644
--- a/kernel/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/kernel/drivers/mmc/host/dw_mmc-pltfm.c
@@ -59,6 +59,8 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
host->pdata = pdev->dev.platform_data;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* Get registers' physical base address */
+ host->phy_regs = (void *)(regs->start);
host->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
diff --git a/kernel/drivers/mmc/host/dw_mmc-rockchip.c b/kernel/drivers/mmc/host/dw_mmc-rockchip.c
index dbf166f94..9becebeec 100644
--- a/kernel/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/kernel/drivers/mmc/host/dw_mmc-rockchip.c
@@ -13,12 +13,19 @@
#include <linux/mmc/host.h>
#include <linux/mmc/dw_mmc.h>
#include <linux/of_address.h>
+#include <linux/slab.h>
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
#define RK3288_CLKGEN_DIV 2
+struct dw_mci_rockchip_priv_data {
+ struct clk *drv_clk;
+ struct clk *sample_clk;
+ int default_sample_phase;
+};
+
static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr)
{
*cmdr |= SDMMC_CMD_USE_HOLD_REG;
@@ -33,6 +40,7 @@ static int dw_mci_rk3288_setup_clock(struct dw_mci *host)
static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
+ struct dw_mci_rockchip_priv_data *priv = host->priv;
int ret;
unsigned int cclkin;
u32 bus_hz;
@@ -66,6 +74,158 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
/* force dw_mci_setup_bus() */
host->current_speed = 0;
}
+
+ /* Make sure we use phases which we can enumerate with */
+ if (!IS_ERR(priv->sample_clk))
+ clk_set_phase(priv->sample_clk, priv->default_sample_phase);
+}
+
+#define NUM_PHASES 360
+#define TUNING_ITERATION_TO_PHASE(i) (DIV_ROUND_UP((i) * 360, NUM_PHASES))
+
+static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
+{
+ struct dw_mci *host = slot->host;
+ struct dw_mci_rockchip_priv_data *priv = host->priv;
+ struct mmc_host *mmc = slot->mmc;
+ int ret = 0;
+ int i;
+ bool v, prev_v = 0, first_v;
+ struct range_t {
+ int start;
+ int end; /* inclusive */
+ };
+ struct range_t *ranges;
+ unsigned int range_count = 0;
+ int longest_range_len = -1;
+ int longest_range = -1;
+ int middle_phase;
+
+ if (IS_ERR(priv->sample_clk)) {
+ dev_err(host->dev, "Tuning clock (sample_clk) not defined.\n");
+ return -EIO;
+ }
+
+ ranges = kmalloc_array(NUM_PHASES / 2 + 1, sizeof(*ranges), GFP_KERNEL);
+ if (!ranges)
+ return -ENOMEM;
+
+ /* Try each phase and extract good ranges */
+ for (i = 0; i < NUM_PHASES; ) {
+ clk_set_phase(priv->sample_clk, TUNING_ITERATION_TO_PHASE(i));
+
+ v = !mmc_send_tuning(mmc, opcode, NULL);
+
+ if (i == 0)
+ first_v = v;
+
+ if ((!prev_v) && v) {
+ range_count++;
+ ranges[range_count-1].start = i;
+ }
+ if (v) {
+ ranges[range_count-1].end = i;
+ i++;
+ } else if (i == NUM_PHASES - 1) {
+ /* No extra skipping rules if we're at the end */
+ i++;
+ } else {
+ /*
+ * No need to check too close to an invalid
+ * one since testing bad phases is slow. Skip
+ * 20 degrees.
+ */
+ i += DIV_ROUND_UP(20 * NUM_PHASES, 360);
+
+ /* Always test the last one */
+ if (i >= NUM_PHASES)
+ i = NUM_PHASES - 1;
+ }
+
+ prev_v = v;
+ }
+
+ if (range_count == 0) {
+ dev_warn(host->dev, "All phases bad!");
+ ret = -EIO;
+ goto free;
+ }
+
+ /* wrap around case, merge the end points */
+ if ((range_count > 1) && first_v && v) {
+ ranges[0].start = ranges[range_count-1].start;
+ range_count--;
+ }
+
+ if (ranges[0].start == 0 && ranges[0].end == NUM_PHASES - 1) {
+ clk_set_phase(priv->sample_clk, priv->default_sample_phase);
+ dev_info(host->dev, "All phases work, using default phase %d.",
+ priv->default_sample_phase);
+ goto free;
+ }
+
+ /* Find the longest range */
+ for (i = 0; i < range_count; i++) {
+ int len = (ranges[i].end - ranges[i].start + 1);
+
+ if (len < 0)
+ len += NUM_PHASES;
+
+ if (longest_range_len < len) {
+ longest_range_len = len;
+ longest_range = i;
+ }
+
+ dev_dbg(host->dev, "Good phase range %d-%d (%d len)\n",
+ TUNING_ITERATION_TO_PHASE(ranges[i].start),
+ TUNING_ITERATION_TO_PHASE(ranges[i].end),
+ len
+ );
+ }
+
+ dev_dbg(host->dev, "Best phase range %d-%d (%d len)\n",
+ TUNING_ITERATION_TO_PHASE(ranges[longest_range].start),
+ TUNING_ITERATION_TO_PHASE(ranges[longest_range].end),
+ longest_range_len
+ );
+
+ middle_phase = ranges[longest_range].start + longest_range_len / 2;
+ middle_phase %= NUM_PHASES;
+ dev_info(host->dev, "Successfully tuned phase to %d\n",
+ TUNING_ITERATION_TO_PHASE(middle_phase));
+
+ clk_set_phase(priv->sample_clk,
+ TUNING_ITERATION_TO_PHASE(middle_phase));
+
+free:
+ kfree(ranges);
+ return ret;
+}
+
+static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
+{
+ struct device_node *np = host->dev->of_node;
+ struct dw_mci_rockchip_priv_data *priv;
+
+ priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (of_property_read_u32(np, "rockchip,default-sample-phase",
+ &priv->default_sample_phase))
+ priv->default_sample_phase = 0;
+
+ priv->drv_clk = devm_clk_get(host->dev, "ciu-drive");
+ if (IS_ERR(priv->drv_clk))
+ dev_dbg(host->dev, "ciu_drv not available\n");
+
+ priv->sample_clk = devm_clk_get(host->dev, "ciu-sample");
+ if (IS_ERR(priv->sample_clk))
+ dev_dbg(host->dev, "ciu_sample not available\n");
+
+ host->priv = priv;
+
+ return 0;
}
static int dw_mci_rockchip_init(struct dw_mci *host)
@@ -73,6 +233,9 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
/* It is slot 8 on Rockchip SoCs */
host->sdio_id0 = 8;
+ /* It needs this quirk on all Rockchip SoCs */
+ host->pdata->quirks |= DW_MCI_QUIRK_BROKEN_DTO;
+
return 0;
}
@@ -92,6 +255,8 @@ static const struct dw_mci_drv_data rk3288_drv_data = {
.caps = dw_mci_rk3288_dwmmc_caps,
.prepare_command = dw_mci_rockchip_prepare_command,
.set_ios = dw_mci_rk3288_set_ios,
+ .execute_tuning = dw_mci_rk3288_execute_tuning,
+ .parse_dt = dw_mci_rk3288_parse_dt,
.setup_clock = dw_mci_rk3288_setup_clock,
.init = dw_mci_rockchip_init,
};
@@ -153,5 +318,5 @@ module_platform_driver(dw_mci_rockchip_pltfm_driver);
MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
MODULE_DESCRIPTION("Rockchip Specific DW-MSHC Driver Extension");
-MODULE_ALIAS("platform:dwmmc-rockchip");
+MODULE_ALIAS("platform:dwmmc_rockchip");
MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/dw_mmc.c b/kernel/drivers/mmc/host/dw_mmc.c
index 5f5adafb2..7a6cedbe4 100644
--- a/kernel/drivers/mmc/host/dw_mmc.c
+++ b/kernel/drivers/mmc/host/dw_mmc.c
@@ -56,7 +56,6 @@
#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
-#ifdef CONFIG_MMC_DW_IDMAC
#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
@@ -99,7 +98,9 @@ struct idmac_desc {
__le32 des3; /* buffer 2 physical address */
};
-#endif /* CONFIG_MMC_DW_IDMAC */
+
+/* Each descriptor can transfer up to 4KB of data in chained mode */
+#define DW_MCI_DESC_DATA_LENGTH 0x1000
static bool dw_mci_reset(struct dw_mci *host);
static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
@@ -235,8 +236,8 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
struct dw_mci *host = slot->host;
const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
u32 cmdr;
- cmd->error = -EINPROGRESS;
+ cmd->error = -EINPROGRESS;
cmdr = cmd->opcode;
if (cmd->opcode == MMC_STOP_TRANSMISSION ||
@@ -371,7 +372,7 @@ static void dw_mci_start_command(struct dw_mci *host,
cmd->arg, cmd_flags);
mci_writel(host, CMDARG, cmd->arg);
- wmb();
+ wmb(); /* drain writebuffer */
dw_mci_wait_while_busy(host, cmd_flags);
mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
@@ -380,6 +381,7 @@ static void dw_mci_start_command(struct dw_mci *host,
static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
{
struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
+
dw_mci_start_command(host, stop, host->stop_cmdr);
}
@@ -403,7 +405,6 @@ static int dw_mci_get_dma_dir(struct mmc_data *data)
return DMA_FROM_DEVICE;
}
-#ifdef CONFIG_MMC_DW_IDMAC
static void dw_mci_dma_cleanup(struct dw_mci *host)
{
struct mmc_data *data = host->data;
@@ -441,12 +442,21 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host)
mci_writel(host, BMOD, temp);
}
-static void dw_mci_idmac_complete_dma(struct dw_mci *host)
+static void dw_mci_dmac_complete_dma(void *arg)
{
+ struct dw_mci *host = arg;
struct mmc_data *data = host->data;
dev_vdbg(host->dev, "DMA complete\n");
+ if ((host->use_dma == TRANS_MODE_EDMAC) &&
+ data && (data->flags & MMC_DATA_READ))
+ /* Invalidate cache after read */
+ dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc),
+ data->sg,
+ data->sg_len,
+ DMA_FROM_DEVICE);
+
host->dma_ops->cleanup(host);
/*
@@ -462,72 +472,105 @@ static void dw_mci_idmac_complete_dma(struct dw_mci *host)
static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
unsigned int sg_len)
{
+ unsigned int desc_len;
int i;
+
if (host->dma_64bit_address == 1) {
- struct idmac_desc_64addr *desc = host->sg_cpu;
+ struct idmac_desc_64addr *desc_first, *desc_last, *desc;
+
+ desc_first = desc_last = desc = host->sg_cpu;
- for (i = 0; i < sg_len; i++, desc++) {
+ for (i = 0; i < sg_len; i++) {
unsigned int length = sg_dma_len(&data->sg[i]);
+
u64 mem_addr = sg_dma_address(&data->sg[i]);
- /*
- * Set the OWN bit and disable interrupts for this
- * descriptor
- */
- desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
- IDMAC_DES0_CH;
- /* Buffer length */
- IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length);
-
- /* Physical address to DMA to/from */
- desc->des4 = mem_addr & 0xffffffff;
- desc->des5 = mem_addr >> 32;
+ for ( ; length ; desc++) {
+ desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
+ length : DW_MCI_DESC_DATA_LENGTH;
+
+ length -= desc_len;
+
+ /*
+ * Set the OWN bit and disable interrupts
+ * for this descriptor
+ */
+ desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
+ IDMAC_DES0_CH;
+
+ /* Buffer length */
+ IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
+
+ /* Physical address to DMA to/from */
+ desc->des4 = mem_addr & 0xffffffff;
+ desc->des5 = mem_addr >> 32;
+
+ /* Update physical address for the next desc */
+ mem_addr += desc_len;
+
+ /* Save pointer to the last descriptor */
+ desc_last = desc;
+ }
}
/* Set first descriptor */
- desc = host->sg_cpu;
- desc->des0 |= IDMAC_DES0_FD;
+ desc_first->des0 |= IDMAC_DES0_FD;
/* Set last descriptor */
- desc = host->sg_cpu + (i - 1) *
- sizeof(struct idmac_desc_64addr);
- desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
- desc->des0 |= IDMAC_DES0_LD;
+ desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
+ desc_last->des0 |= IDMAC_DES0_LD;
} else {
- struct idmac_desc *desc = host->sg_cpu;
+ struct idmac_desc *desc_first, *desc_last, *desc;
+
+ desc_first = desc_last = desc = host->sg_cpu;
- for (i = 0; i < sg_len; i++, desc++) {
+ for (i = 0; i < sg_len; i++) {
unsigned int length = sg_dma_len(&data->sg[i]);
+
u32 mem_addr = sg_dma_address(&data->sg[i]);
- /*
- * Set the OWN bit and disable interrupts for this
- * descriptor
- */
- desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
- IDMAC_DES0_DIC | IDMAC_DES0_CH);
- /* Buffer length */
- IDMAC_SET_BUFFER1_SIZE(desc, length);
+ for ( ; length ; desc++) {
+ desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
+ length : DW_MCI_DESC_DATA_LENGTH;
+
+ length -= desc_len;
+
+ /*
+ * Set the OWN bit and disable interrupts
+ * for this descriptor
+ */
+ desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
+ IDMAC_DES0_DIC |
+ IDMAC_DES0_CH);
+
+ /* Buffer length */
+ IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
- /* Physical address to DMA to/from */
- desc->des2 = cpu_to_le32(mem_addr);
+ /* Physical address to DMA to/from */
+ desc->des2 = cpu_to_le32(mem_addr);
+
+ /* Update physical address for the next desc */
+ mem_addr += desc_len;
+
+ /* Save pointer to the last descriptor */
+ desc_last = desc;
+ }
}
/* Set first descriptor */
- desc = host->sg_cpu;
- desc->des0 |= cpu_to_le32(IDMAC_DES0_FD);
+ desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
/* Set last descriptor */
- desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
- desc->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | IDMAC_DES0_DIC));
- desc->des0 |= cpu_to_le32(IDMAC_DES0_LD);
+ desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
+ IDMAC_DES0_DIC));
+ desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
}
- wmb();
+ wmb(); /* drain writebuffer */
}
-static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
+static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
{
u32 temp;
@@ -542,6 +585,7 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
temp |= SDMMC_CTRL_USE_IDMAC;
mci_writel(host, CTRL, temp);
+ /* drain writebuffer */
wmb();
/* Enable the IDMAC */
@@ -551,6 +595,8 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
/* Start it running */
mci_writel(host, PLDMND, 1);
+
+ return 0;
}
static int dw_mci_idmac_init(struct dw_mci *host)
@@ -589,7 +635,9 @@ static int dw_mci_idmac_init(struct dw_mci *host)
host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
/* Forward link the descriptor list */
- for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) {
+ for (i = 0, p = host->sg_cpu;
+ i < host->ring_size - 1;
+ i++, p++) {
p->des3 = cpu_to_le32(host->sg_dma +
(sizeof(struct idmac_desc) * (i + 1)));
p->des1 = 0;
@@ -629,10 +677,110 @@ static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
.init = dw_mci_idmac_init,
.start = dw_mci_idmac_start_dma,
.stop = dw_mci_idmac_stop_dma,
- .complete = dw_mci_idmac_complete_dma,
+ .complete = dw_mci_dmac_complete_dma,
+ .cleanup = dw_mci_dma_cleanup,
+};
+
+static void dw_mci_edmac_stop_dma(struct dw_mci *host)
+{
+ dmaengine_terminate_all(host->dms->ch);
+}
+
+static int dw_mci_edmac_start_dma(struct dw_mci *host,
+ unsigned int sg_len)
+{
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct scatterlist *sgl = host->data->sg;
+ const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ u32 sg_elems = host->data->sg_len;
+ u32 fifoth_val;
+ u32 fifo_offset = host->fifo_reg - host->regs;
+ int ret = 0;
+
+ /* Set external dma config: burst size, burst width */
+ cfg.dst_addr = (dma_addr_t)(host->phy_regs + fifo_offset);
+ cfg.src_addr = cfg.dst_addr;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ /* Match burst msize with external dma config */
+ fifoth_val = mci_readl(host, FIFOTH);
+ cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
+ cfg.src_maxburst = cfg.dst_maxburst;
+
+ if (host->data->flags & MMC_DATA_WRITE)
+ cfg.direction = DMA_MEM_TO_DEV;
+ else
+ cfg.direction = DMA_DEV_TO_MEM;
+
+ ret = dmaengine_slave_config(host->dms->ch, &cfg);
+ if (ret) {
+ dev_err(host->dev, "Failed to config edmac.\n");
+ return -EBUSY;
+ }
+
+ desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
+ sg_len, cfg.direction,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(host->dev, "Can't prepare slave sg.\n");
+ return -EBUSY;
+ }
+
+ /* Set dw_mci_dmac_complete_dma as callback */
+ desc->callback = dw_mci_dmac_complete_dma;
+ desc->callback_param = (void *)host;
+ dmaengine_submit(desc);
+
+ /* Flush cache before write */
+ if (host->data->flags & MMC_DATA_WRITE)
+ dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl,
+ sg_elems, DMA_TO_DEVICE);
+
+ dma_async_issue_pending(host->dms->ch);
+
+ return 0;
+}
+
+static int dw_mci_edmac_init(struct dw_mci *host)
+{
+ /* Request external dma channel */
+ host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
+ if (!host->dms)
+ return -ENOMEM;
+
+ host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
+ if (!host->dms->ch) {
+ dev_err(host->dev, "Failed to get external DMA channel.\n");
+ kfree(host->dms);
+ host->dms = NULL;
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void dw_mci_edmac_exit(struct dw_mci *host)
+{
+ if (host->dms) {
+ if (host->dms->ch) {
+ dma_release_channel(host->dms->ch);
+ host->dms->ch = NULL;
+ }
+ kfree(host->dms);
+ host->dms = NULL;
+ }
+}
+
+static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
+ .init = dw_mci_edmac_init,
+ .exit = dw_mci_edmac_exit,
+ .start = dw_mci_edmac_start_dma,
+ .stop = dw_mci_edmac_stop_dma,
+ .complete = dw_mci_dmac_complete_dma,
.cleanup = dw_mci_dma_cleanup,
};
-#endif /* CONFIG_MMC_DW_IDMAC */
static int dw_mci_pre_dma_transfer(struct dw_mci *host,
struct mmc_data *data,
@@ -712,13 +860,16 @@ static void dw_mci_post_req(struct mmc_host *mmc,
static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
{
-#ifdef CONFIG_MMC_DW_IDMAC
unsigned int blksz = data->blksz;
const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
u32 fifo_width = 1 << host->data_shift;
u32 blksz_depth = blksz / fifo_width, fifoth_val;
u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
- int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
+ int idx = ARRAY_SIZE(mszs) - 1;
+
+ /* pio should ship this scenario */
+ if (!host->use_dma)
+ return;
tx_wmark = (host->fifo_depth) / 2;
tx_wmark_invers = host->fifo_depth - tx_wmark;
@@ -748,7 +899,6 @@ static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
done:
fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
mci_writel(host, FIFOTH, fifoth_val);
-#endif
}
static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
@@ -810,10 +960,12 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
host->using_dma = 1;
- dev_vdbg(host->dev,
- "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
- (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
- sg_len);
+ if (host->use_dma == TRANS_MODE_IDMAC)
+ dev_vdbg(host->dev,
+ "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
+ (unsigned long)host->sg_cpu,
+ (unsigned long)host->sg_dma,
+ sg_len);
/*
* Decide the MSIZE and RX/TX Watermark.
@@ -835,7 +987,11 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
mci_writel(host, INTMASK, temp);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
- host->dma_ops->start(host, sg_len);
+ if (host->dma_ops->start(host, sg_len)) {
+ /* We can't do DMA */
+ dev_err(host->dev, "%s: failed to start DMA.\n", __func__);
+ return -ENODEV;
+ }
return 0;
}
@@ -843,6 +999,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
{
unsigned long irqflags;
+ int flags = SG_MITER_ATOMIC;
u32 temp;
data->error = -EINPROGRESS;
@@ -859,7 +1016,6 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
}
if (dw_mci_submit_data_dma(host, data)) {
- int flags = SG_MITER_ATOMIC;
if (host->data->flags & MMC_DATA_READ)
flags |= SG_MITER_TO_SG;
else
@@ -906,7 +1062,7 @@ static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
unsigned int cmd_status = 0;
mci_writel(host, CMDARG, arg);
- wmb();
+ wmb(); /* drain writebuffer */
dw_mci_wait_while_busy(host, cmd);
mci_writel(host, CMD, SDMMC_CMD_START | cmd);
@@ -1019,7 +1175,7 @@ static void __dw_mci_start_request(struct dw_mci *host,
if (data) {
dw_mci_submit_data(host, data);
- wmb();
+ wmb(); /* drain writebuffer */
}
dw_mci_start_command(host, cmd, cmdflags);
@@ -1137,6 +1293,7 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* DDR mode set */
if (ios->timing == MMC_TIMING_MMC_DDR52 ||
+ ios->timing == MMC_TIMING_UHS_DDR50 ||
ios->timing == MMC_TIMING_MMC_HS400)
regs |= ((0x1 << slot->id) << 16);
else
@@ -1236,33 +1393,32 @@ static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
u32 uhs;
u32 v18 = SDMMC_UHS_18V << slot->id;
- int min_uv, max_uv;
int ret;
+ if (drv_data && drv_data->switch_voltage)
+ return drv_data->switch_voltage(mmc, ios);
+
/*
* Program the voltage. Note that some instances of dw_mmc may use
* the UHS_REG for this. For other instances (like exynos) the UHS_REG
* does no harm but you need to set the regulator directly. Try both.
*/
uhs = mci_readl(host, UHS_REG);
- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
- min_uv = 2700000;
- max_uv = 3600000;
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
uhs &= ~v18;
- } else {
- min_uv = 1700000;
- max_uv = 1950000;
+ else
uhs |= v18;
- }
+
if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
if (ret) {
dev_dbg(&mmc->class_dev,
- "Regulator set error %d: %d - %d\n",
- ret, min_uv, max_uv);
+ "Regulator set error %d - %s V\n",
+ ret, uhs & v18 ? "1.8" : "3.3");
return ret;
}
}
@@ -1278,10 +1434,7 @@ static int dw_mci_get_ro(struct mmc_host *mmc)
int gpio_ro = mmc_gpio_get_ro(mmc);
/* Use platform get_ro function, else try on board write protect */
- if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
- (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
- read_only = 0;
- else if (!IS_ERR_VALUE(gpio_ro))
+ if (!IS_ERR_VALUE(gpio_ro))
read_only = gpio_ro;
else
read_only =
@@ -1383,14 +1536,15 @@ static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
const struct dw_mci_drv_data *drv_data = host->drv_data;
- int err = -ENOSYS;
+ int err = -EINVAL;
if (drv_data && drv_data->execute_tuning)
- err = drv_data->execute_tuning(slot);
+ err = drv_data->execute_tuning(slot, opcode);
return err;
}
-static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
+ struct mmc_ios *ios)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
@@ -1532,6 +1686,20 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
return data->error;
}
+static void dw_mci_set_drto(struct dw_mci *host)
+{
+ unsigned int drto_clks;
+ unsigned int drto_ms;
+
+ drto_clks = mci_readl(host, TMOUT) >> 8;
+ drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000);
+
+ /* add a bit spare time */
+ drto_ms += 10;
+
+ mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms));
+}
+
static void dw_mci_tasklet_func(unsigned long priv)
{
struct dw_mci *host = (struct dw_mci *)priv;
@@ -1609,8 +1777,16 @@ static void dw_mci_tasklet_func(unsigned long priv)
}
if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
- &host->pending_events))
+ &host->pending_events)) {
+ /*
+ * If all data-related interrupts don't come
+ * within the given time in reading data state.
+ */
+ if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) &&
+ (host->dir_status == DW_MCI_RECV_STATUS))
+ dw_mci_set_drto(host);
break;
+ }
set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
@@ -1643,8 +1819,17 @@ static void dw_mci_tasklet_func(unsigned long priv)
case STATE_DATA_BUSY:
if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
- &host->pending_events))
+ &host->pending_events)) {
+ /*
+ * If data error interrupt comes but data over
+ * interrupt doesn't come within the given time.
+ * in reading data state.
+ */
+ if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) &&
+ (host->dir_status == DW_MCI_RECV_STATUS))
+ dw_mci_set_drto(host);
break;
+ }
host->data = NULL;
set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
@@ -1742,7 +1927,7 @@ static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
/* pull first bytes from part_buf, only use during pull */
static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
{
- cnt = min(cnt, (int)host->part_buf_count);
+ cnt = min_t(int, cnt, host->part_buf_count);
if (cnt) {
memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
cnt);
@@ -1768,6 +1953,7 @@ static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
/* try and push anything in the part_buf */
if (unlikely(host->part_buf_count)) {
int len = dw_mci_push_part_bytes(host, buf, cnt);
+
buf += len;
cnt -= len;
if (host->part_buf_count == 2) {
@@ -1794,6 +1980,7 @@ static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
#endif
{
u16 *pdata = buf;
+
for (; cnt >= 2; cnt -= 2)
mci_fifo_writew(host->fifo_reg, *pdata++);
buf = pdata;
@@ -1818,6 +2005,7 @@ static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
int len = min(cnt & -2, (int)sizeof(aligned_buf));
int items = len >> 1;
int i;
+
for (i = 0; i < items; ++i)
aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
/* memcpy from aligned buffer into output buffer */
@@ -1829,6 +2017,7 @@ static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
#endif
{
u16 *pdata = buf;
+
for (; cnt >= 2; cnt -= 2)
*pdata++ = mci_fifo_readw(host->fifo_reg);
buf = pdata;
@@ -1847,6 +2036,7 @@ static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
/* try and push anything in the part_buf */
if (unlikely(host->part_buf_count)) {
int len = dw_mci_push_part_bytes(host, buf, cnt);
+
buf += len;
cnt -= len;
if (host->part_buf_count == 4) {
@@ -1873,6 +2063,7 @@ static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
#endif
{
u32 *pdata = buf;
+
for (; cnt >= 4; cnt -= 4)
mci_fifo_writel(host->fifo_reg, *pdata++);
buf = pdata;
@@ -1897,6 +2088,7 @@ static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
int len = min(cnt & -4, (int)sizeof(aligned_buf));
int items = len >> 2;
int i;
+
for (i = 0; i < items; ++i)
aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
/* memcpy from aligned buffer into output buffer */
@@ -1908,6 +2100,7 @@ static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
#endif
{
u32 *pdata = buf;
+
for (; cnt >= 4; cnt -= 4)
*pdata++ = mci_fifo_readl(host->fifo_reg);
buf = pdata;
@@ -1926,6 +2119,7 @@ static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
/* try and push anything in the part_buf */
if (unlikely(host->part_buf_count)) {
int len = dw_mci_push_part_bytes(host, buf, cnt);
+
buf += len;
cnt -= len;
@@ -1953,6 +2147,7 @@ static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
#endif
{
u64 *pdata = buf;
+
for (; cnt >= 8; cnt -= 8)
mci_fifo_writeq(host->fifo_reg, *pdata++);
buf = pdata;
@@ -1977,6 +2172,7 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
int len = min(cnt & -8, (int)sizeof(aligned_buf));
int items = len >> 3;
int i;
+
for (i = 0; i < items; ++i)
aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
@@ -1989,6 +2185,7 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
#endif
{
u64 *pdata = buf;
+
for (; cnt >= 8; cnt -= 8)
*pdata++ = mci_fifo_readq(host->fifo_reg);
buf = pdata;
@@ -2064,7 +2261,7 @@ static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
done:
sg_miter_stop(sg_miter);
host->sg = NULL;
- smp_wmb();
+ smp_wmb(); /* drain writebuffer */
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
@@ -2118,7 +2315,7 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
done:
sg_miter_stop(sg_miter);
host->sg = NULL;
- smp_wmb();
+ smp_wmb(); /* drain writebuffer */
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
@@ -2127,7 +2324,7 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
if (!host->cmd_status)
host->cmd_status = status;
- smp_wmb();
+ smp_wmb(); /* drain writebuffer */
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
@@ -2191,7 +2388,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
host->cmd_status = pending;
- smp_wmb();
+ smp_wmb(); /* drain writebuffer */
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
}
@@ -2199,16 +2396,19 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
/* if there is an error report DATA_ERROR */
mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
host->data_status = pending;
- smp_wmb();
+ smp_wmb(); /* drain writebuffer */
set_bit(EVENT_DATA_ERROR, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
if (pending & SDMMC_INT_DATA_OVER) {
+ if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO)
+ del_timer(&host->dto_timer);
+
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
if (!host->data_status)
host->data_status = pending;
- smp_wmb();
+ smp_wmb(); /* drain writebuffer */
if (host->dir_status == DW_MCI_RECV_STATUS) {
if (host->sg != NULL)
dw_mci_read_data_pio(host, true);
@@ -2255,15 +2455,17 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
-#ifdef CONFIG_MMC_DW_IDMAC
- /* Handle DMA interrupts */
+ if (host->use_dma != TRANS_MODE_IDMAC)
+ return IRQ_HANDLED;
+
+ /* Handle IDMA interrupts */
if (host->dma_64bit_address == 1) {
pending = mci_readl(host, IDSTS64);
if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
SDMMC_IDMAC_INT_RI);
mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
- host->dma_ops->complete(host);
+ host->dma_ops->complete((void *)host);
}
} else {
pending = mci_readl(host, IDSTS);
@@ -2271,18 +2473,18 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
SDMMC_IDMAC_INT_RI);
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
- host->dma_ops->complete(host);
+ host->dma_ops->complete((void *)host);
}
}
-#endif
return IRQ_HANDLED;
}
#ifdef CONFIG_OF
-/* given a slot id, find out the device node representing that slot */
-static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
+/* given a slot, find out the device node representing that slot */
+static struct device_node *dw_mci_of_find_slot_node(struct dw_mci_slot *slot)
{
+ struct device *dev = slot->mmc->parent;
struct device_node *np;
const __be32 *addr;
int len;
@@ -2294,42 +2496,28 @@ static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
addr = of_get_property(np, "reg", &len);
if (!addr || (len < sizeof(int)))
continue;
- if (be32_to_cpup(addr) == slot)
+ if (be32_to_cpup(addr) == slot->id)
return np;
}
return NULL;
}
-static struct dw_mci_of_slot_quirks {
- char *quirk;
- int id;
-} of_slot_quirks[] = {
- {
- .quirk = "disable-wp",
- .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
- },
-};
-
-static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
+static void dw_mci_slot_of_parse(struct dw_mci_slot *slot)
{
- struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
- int quirks = 0;
- int idx;
+ struct device_node *np = dw_mci_of_find_slot_node(slot);
- /* get quirks */
- for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
- if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
- dev_warn(dev, "Slot quirk %s is deprecated\n",
- of_slot_quirks[idx].quirk);
- quirks |= of_slot_quirks[idx].id;
- }
+ if (!np)
+ return;
- return quirks;
+ if (of_property_read_bool(np, "disable-wp")) {
+ slot->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
+ dev_warn(slot->mmc->parent,
+ "Slot quirk 'disable-wp' is deprecated\n");
+ }
}
#else /* CONFIG_OF */
-static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
+static void dw_mci_slot_of_parse(struct dw_mci_slot *slot)
{
- return 0;
}
#endif /* CONFIG_OF */
@@ -2352,8 +2540,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
slot->host = host;
host->slot[id] = slot;
- slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
-
mmc->ops = &dw_mci_ops;
if (of_property_read_u32_array(host->dev->of_node,
"clock-freq-min-max", freq, 2)) {
@@ -2391,31 +2577,34 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
if (host->pdata->caps2)
mmc->caps2 = host->pdata->caps2;
+ dw_mci_slot_of_parse(slot);
+
ret = mmc_of_parse(mmc);
if (ret)
goto err_host_allocated;
- if (host->pdata->blk_settings) {
- mmc->max_segs = host->pdata->blk_settings->max_segs;
- mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
- mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
- mmc->max_req_size = host->pdata->blk_settings->max_req_size;
- mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
- } else {
- /* Useful defaults if platform data is unset. */
-#ifdef CONFIG_MMC_DW_IDMAC
+ /* Useful defaults if platform data is unset. */
+ if (host->use_dma == TRANS_MODE_IDMAC) {
mmc->max_segs = host->ring_size;
mmc->max_blk_size = 65536;
mmc->max_seg_size = 0x1000;
mmc->max_req_size = mmc->max_seg_size * host->ring_size;
mmc->max_blk_count = mmc->max_req_size / 512;
-#else
+ } else if (host->use_dma == TRANS_MODE_EDMAC) {
+ mmc->max_segs = 64;
+ mmc->max_blk_size = 65536;
+ mmc->max_blk_count = 65535;
+ mmc->max_req_size =
+ mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+ } else {
+ /* TRANS_MODE_PIO */
mmc->max_segs = 64;
mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
mmc->max_blk_count = 512;
- mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_req_size = mmc->max_blk_size *
+ mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
-#endif /* CONFIG_MMC_DW_IDMAC */
}
if (dw_mci_get_cd(mmc))
@@ -2449,44 +2638,80 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
static void dw_mci_init_dma(struct dw_mci *host)
{
int addr_config;
- /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */
- addr_config = (mci_readl(host, HCON) >> 27) & 0x01;
-
- if (addr_config == 1) {
- /* host supports IDMAC in 64-bit address mode */
- host->dma_64bit_address = 1;
- dev_info(host->dev, "IDMAC supports 64-bit address mode.\n");
- if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
- dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64));
- } else {
- /* host supports IDMAC in 32-bit address mode */
- host->dma_64bit_address = 0;
- dev_info(host->dev, "IDMAC supports 32-bit address mode.\n");
- }
+ struct device *dev = host->dev;
+ struct device_node *np = dev->of_node;
- /* Alloc memory for sg translation */
- host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
- &host->sg_dma, GFP_KERNEL);
- if (!host->sg_cpu) {
- dev_err(host->dev, "%s: could not alloc DMA memory\n",
- __func__);
+ /*
+ * Check tansfer mode from HCON[17:16]
+ * Clear the ambiguous description of dw_mmc databook:
+ * 2b'00: No DMA Interface -> Actually means using Internal DMA block
+ * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
+ * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
+ * 2b'11: Non DW DMA Interface -> pio only
+ * Compared to DesignWare DMA Interface, Generic DMA Interface has a
+ * simpler request/acknowledge handshake mechanism and both of them
+ * are regarded as external dma master for dw_mmc.
+ */
+ host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
+ if (host->use_dma == DMA_INTERFACE_IDMA) {
+ host->use_dma = TRANS_MODE_IDMAC;
+ } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
+ host->use_dma == DMA_INTERFACE_GDMA) {
+ host->use_dma = TRANS_MODE_EDMAC;
+ } else {
goto no_dma;
}
/* Determine which DMA interface to use */
-#ifdef CONFIG_MMC_DW_IDMAC
- host->dma_ops = &dw_mci_idmac_ops;
- dev_info(host->dev, "Using internal DMA controller.\n");
-#endif
+ if (host->use_dma == TRANS_MODE_IDMAC) {
+ /*
+ * Check ADDR_CONFIG bit in HCON to find
+ * IDMAC address bus width
+ */
+ addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
+
+ if (addr_config == 1) {
+ /* host supports IDMAC in 64-bit address mode */
+ host->dma_64bit_address = 1;
+ dev_info(host->dev,
+ "IDMAC supports 64-bit address mode.\n");
+ if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
+ dma_set_coherent_mask(host->dev,
+ DMA_BIT_MASK(64));
+ } else {
+ /* host supports IDMAC in 32-bit address mode */
+ host->dma_64bit_address = 0;
+ dev_info(host->dev,
+ "IDMAC supports 32-bit address mode.\n");
+ }
- if (!host->dma_ops)
- goto no_dma;
+ /* Alloc memory for sg translation */
+ host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
+ &host->sg_dma, GFP_KERNEL);
+ if (!host->sg_cpu) {
+ dev_err(host->dev,
+ "%s: could not alloc DMA memory\n",
+ __func__);
+ goto no_dma;
+ }
+
+ host->dma_ops = &dw_mci_idmac_ops;
+ dev_info(host->dev, "Using internal DMA controller.\n");
+ } else {
+ /* TRANS_MODE_EDMAC: check dma bindings again */
+ if ((of_property_count_strings(np, "dma-names") < 0) ||
+ (!of_find_property(np, "dmas", NULL))) {
+ goto no_dma;
+ }
+ host->dma_ops = &dw_mci_edmac_ops;
+ dev_info(host->dev, "Using external DMA controller.\n");
+ }
if (host->dma_ops->init && host->dma_ops->start &&
host->dma_ops->stop && host->dma_ops->cleanup) {
if (host->dma_ops->init(host)) {
- dev_err(host->dev, "%s: Unable to initialize "
- "DMA Controller.\n", __func__);
+ dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
+ __func__);
goto no_dma;
}
} else {
@@ -2494,13 +2719,11 @@ static void dw_mci_init_dma(struct dw_mci *host)
goto no_dma;
}
- host->use_dma = 1;
return;
no_dma:
dev_info(host->dev, "Using PIO mode.\n");
- host->use_dma = 0;
- return;
+ host->use_dma = TRANS_MODE_PIO;
}
static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
@@ -2554,6 +2777,7 @@ static bool dw_mci_reset(struct dw_mci *host)
if (host->use_dma) {
unsigned long timeout = jiffies + msecs_to_jiffies(500);
u32 status;
+
do {
status = mci_readl(host, STATUS);
if (!(status & SDMMC_STATUS_DMA_REQ))
@@ -2563,8 +2787,8 @@ static bool dw_mci_reset(struct dw_mci *host)
if (status & SDMMC_STATUS_DMA_REQ) {
dev_err(host->dev,
- "%s: Timeout waiting for dma_req to "
- "clear during reset\n", __func__);
+ "%s: Timeout waiting for dma_req to clear during reset\n",
+ __func__);
goto ciu_out;
}
@@ -2575,17 +2799,16 @@ static bool dw_mci_reset(struct dw_mci *host)
} else {
/* if the controller reset bit did clear, then set clock regs */
if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
- dev_err(host->dev, "%s: fifo/dma reset bits didn't "
- "clear but ciu was reset, doing clock update\n",
+ dev_err(host->dev,
+ "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
__func__);
goto ciu_out;
}
}
-#if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
- /* It is also recommended that we reset and reprogram idmac */
- dw_mci_idmac_reset(host);
-#endif
+ if (host->use_dma == TRANS_MODE_IDMAC)
+ /* It is also recommended that we reset and reprogram idmac */
+ dw_mci_idmac_reset(host);
ret = true;
@@ -2610,6 +2833,28 @@ static void dw_mci_cmd11_timer(unsigned long arg)
tasklet_schedule(&host->tasklet);
}
+static void dw_mci_dto_timer(unsigned long arg)
+{
+ struct dw_mci *host = (struct dw_mci *)arg;
+
+ switch (host->state) {
+ case STATE_SENDING_DATA:
+ case STATE_DATA_BUSY:
+ /*
+ * If DTO interrupt does NOT come in sending data state,
+ * we should notify the driver to terminate current transfer
+ * and report a data timeout to the core.
+ */
+ host->data_status = SDMMC_INT_DRTO;
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+ set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ break;
+ default:
+ break;
+ }
+}
+
#ifdef CONFIG_OF
static struct dw_mci_of_quirks {
char *quirk;
@@ -2618,9 +2863,6 @@ static struct dw_mci_of_quirks {
{
.quirk = "broken-cd",
.id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
- }, {
- .quirk = "disable-wp",
- .id = DW_MCI_QUIRK_NO_WRITE_PROTECT,
},
};
@@ -2640,8 +2882,8 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
/* find out number of slots supported */
if (of_property_read_u32(dev->of_node, "num-slots",
&pdata->num_slots)) {
- dev_info(dev, "num-slots property not found, "
- "assuming 1 slot is available\n");
+ dev_info(dev,
+ "num-slots property not found, assuming 1 slot is available\n");
pdata->num_slots = 1;
}
@@ -2651,8 +2893,8 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
pdata->quirks |= of_quirks[idx].id;
if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
- dev_info(dev, "fifo-depth property not found, using "
- "value of FIFOTH register as default\n");
+ dev_info(dev,
+ "fifo-depth property not found, using value of FIFOTH register as default\n");
of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
@@ -2665,8 +2907,10 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
return ERR_PTR(ret);
}
- if (of_find_property(np, "supports-highspeed", NULL))
+ if (of_find_property(np, "supports-highspeed", NULL)) {
+ dev_info(dev, "supports-highspeed property is deprecated.\n");
pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
+ }
return pdata;
}
@@ -2721,7 +2965,7 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- if (host->pdata->num_slots > 1) {
+ if (host->pdata->num_slots < 1) {
dev_err(host->dev,
"Platform data must supply num_slots.\n");
return -ENODEV;
@@ -2789,6 +3033,10 @@ int dw_mci_probe(struct dw_mci *host)
host->quirks = host->pdata->quirks;
+ if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO)
+ setup_timer(&host->dto_timer,
+ dw_mci_dto_timer, (unsigned long)host);
+
spin_lock_init(&host->lock);
spin_lock_init(&host->irq_lock);
INIT_LIST_HEAD(&host->queue);
@@ -2797,7 +3045,7 @@ int dw_mci_probe(struct dw_mci *host)
* Get the host data width - this assumes that HCON has been set with
* the correct values.
*/
- i = (mci_readl(host, HCON) >> 7) & 0x7;
+ i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
if (!i) {
host->push_data = dw_mci_push_data16;
host->pull_data = dw_mci_pull_data16;
@@ -2879,7 +3127,7 @@ int dw_mci_probe(struct dw_mci *host)
if (host->pdata->num_slots)
host->num_slots = host->pdata->num_slots;
else
- host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
+ host->num_slots = SDMMC_GET_SLOT_NUM(mci_readl(host, HCON));
/*
* Enable interrupts for command done, data over, data empty,
@@ -2889,11 +3137,11 @@ int dw_mci_probe(struct dw_mci *host)
mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
SDMMC_INT_TXDR | SDMMC_INT_RXDR |
DW_MCI_ERROR_FLAGS);
- mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
+ /* Enable mci interrupt */
+ mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
- dev_info(host->dev, "DW MMC controller at irq %d, "
- "%d bit host data width, "
- "%u deep fifo\n",
+ dev_info(host->dev,
+ "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
host->irq, width, fifo_size);
/* We need at least one slot to succeed */
@@ -2908,8 +3156,9 @@ int dw_mci_probe(struct dw_mci *host)
if (init_slots) {
dev_info(host->dev, "%d slots initialized\n", init_slots);
} else {
- dev_dbg(host->dev, "attempted to initialize %d slots, "
- "but failed on all\n", host->num_slots);
+ dev_dbg(host->dev,
+ "attempted to initialize %d slots, but failed on all\n",
+ host->num_slots);
goto err_dmaunmap;
}
@@ -2941,15 +3190,15 @@ void dw_mci_remove(struct dw_mci *host)
{
int i;
- mci_writel(host, RINTSTS, 0xFFFFFFFF);
- mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
-
for (i = 0; i < host->num_slots; i++) {
dev_dbg(host->dev, "remove slot %d\n", i);
if (host->slot[i])
dw_mci_cleanup_slot(host->slot[i], i);
}
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+ mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+
/* disable clock to CIU */
mci_writel(host, CLKENA, 0);
mci_writel(host, CLKSRC, 0);
@@ -2973,6 +3222,9 @@ EXPORT_SYMBOL(dw_mci_remove);
*/
int dw_mci_suspend(struct dw_mci *host)
{
+ if (host->use_dma && host->dma_ops->exit)
+ host->dma_ops->exit(host);
+
return 0;
}
EXPORT_SYMBOL(dw_mci_suspend);
@@ -3007,6 +3259,7 @@ int dw_mci_resume(struct dw_mci *host)
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
+
if (!slot)
continue;
if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
diff --git a/kernel/drivers/mmc/host/dw_mmc.h b/kernel/drivers/mmc/host/dw_mmc.h
index f45ab91de..f695b58f0 100644
--- a/kernel/drivers/mmc/host/dw_mmc.h
+++ b/kernel/drivers/mmc/host/dw_mmc.h
@@ -148,6 +148,15 @@
#define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \
((r) & 0xFFF) << 16 | \
((t) & 0xFFF))
+/* HCON register defines */
+#define DMA_INTERFACE_IDMA (0x0)
+#define DMA_INTERFACE_DWDMA (0x1)
+#define DMA_INTERFACE_GDMA (0x2)
+#define DMA_INTERFACE_NODMA (0x3)
+#define SDMMC_GET_TRANS_MODE(x) (((x)>>16) & 0x3)
+#define SDMMC_GET_SLOT_NUM(x) ((((x)>>1) & 0x1F) + 1)
+#define SDMMC_GET_HDATA_WIDTH(x) (((x)>>7) & 0x7)
+#define SDMMC_GET_ADDR_CONFIG(x) (((x)>>27) & 0x1)
/* Internal DMAC interrupt defines */
#define SDMMC_IDMAC_INT_AI BIT(9)
#define SDMMC_IDMAC_INT_NI BIT(8)
@@ -163,7 +172,7 @@
/* Version ID register define */
#define SDMMC_GET_VERID(x) ((x) & 0xFFFF)
/* Card read threshold */
-#define SDMMC_SET_RD_THLD(v, x) (((v) & 0x1FFF) << 16 | (x))
+#define SDMMC_SET_RD_THLD(v, x) (((v) & 0xFFF) << 16 | (x))
#define SDMMC_UHS_18V BIT(0)
/* All ctrl reset bits */
#define SDMMC_CTRL_ALL_RESET_FLAGS \
@@ -227,7 +236,6 @@ extern int dw_mci_resume(struct dw_mci *host);
* struct dw_mci_slot - MMC slot state
* @mmc: The mmc_host representing this slot.
* @host: The MMC controller this slot is using.
- * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX)
* @ctype: Card type for this slot.
* @mrq: mmc_request currently being processed or waiting to be
* processed, or NULL when the slot is idle.
@@ -245,8 +253,6 @@ struct dw_mci_slot {
struct mmc_host *mmc;
struct dw_mci *host;
- int quirks;
-
u32 ctype;
struct mmc_request *mrq;
@@ -284,8 +290,10 @@ struct dw_mci_drv_data {
void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
int (*parse_dt)(struct dw_mci *host);
- int (*execute_tuning)(struct dw_mci_slot *slot);
+ int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode);
int (*prepare_hs400_tuning)(struct dw_mci *host,
struct mmc_ios *ios);
+ int (*switch_voltage)(struct mmc_host *mmc,
+ struct mmc_ios *ios);
};
#endif /* _DW_MMC_H_ */
diff --git a/kernel/drivers/mmc/host/mmc_spi.c b/kernel/drivers/mmc/host/mmc_spi.c
index ae19d83bb..1c1b45ef3 100644
--- a/kernel/drivers/mmc/host/mmc_spi.c
+++ b/kernel/drivers/mmc/host/mmc_spi.c
@@ -1511,11 +1511,11 @@ static const struct of_device_id mmc_spi_of_match_table[] = {
{ .compatible = "mmc-spi-slot", },
{},
};
+MODULE_DEVICE_TABLE(of, mmc_spi_of_match_table);
static struct spi_driver mmc_spi_driver = {
.driver = {
.name = "mmc_spi",
- .owner = THIS_MODULE,
.of_match_table = mmc_spi_of_match_table,
},
.probe = mmc_spi_probe,
diff --git a/kernel/drivers/mmc/host/mmci.c b/kernel/drivers/mmc/host/mmci.c
index d42fc084d..58ea04a03 100644
--- a/kernel/drivers/mmc/host/mmci.c
+++ b/kernel/drivers/mmc/host/mmci.c
@@ -1881,7 +1881,7 @@ static struct amba_id mmci_ids[] = {
{
.id = 0x00280180,
.mask = 0x00ffffff,
- .data = &variant_u300,
+ .data = &variant_nomadik,
},
{
.id = 0x00480180,
diff --git a/kernel/drivers/mmc/host/moxart-mmc.c b/kernel/drivers/mmc/host/moxart-mmc.c
index 006f18624..79905ce89 100644
--- a/kernel/drivers/mmc/host/moxart-mmc.c
+++ b/kernel/drivers/mmc/host/moxart-mmc.c
@@ -711,6 +711,7 @@ static const struct of_device_id moxart_mmc_match[] = {
{ .compatible = "faraday,ftsdc010" },
{ }
};
+MODULE_DEVICE_TABLE(of, moxart_mmc_match);
static struct platform_driver moxart_mmc_driver = {
.probe = moxart_probe,
diff --git a/kernel/drivers/mmc/host/mtk-sd.c b/kernel/drivers/mmc/host/mtk-sd.c
new file mode 100644
index 000000000..33dfd7e72
--- /dev/null
+++ b/kernel/drivers/mmc/host/mtk-sd.c
@@ -0,0 +1,1708 @@
+/*
+ * Copyright (c) 2014-2015 MediaTek Inc.
+ * Author: Chaotian.Jing <chaotian.jing@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/sdio.h>
+
+#define MAX_BD_NUM 1024
+
+/*--------------------------------------------------------------------------*/
+/* Common Definition */
+/*--------------------------------------------------------------------------*/
+#define MSDC_BUS_1BITS 0x0
+#define MSDC_BUS_4BITS 0x1
+#define MSDC_BUS_8BITS 0x2
+
+#define MSDC_BURST_64B 0x6
+
+/*--------------------------------------------------------------------------*/
+/* Register Offset */
+/*--------------------------------------------------------------------------*/
+#define MSDC_CFG 0x0
+#define MSDC_IOCON 0x04
+#define MSDC_PS 0x08
+#define MSDC_INT 0x0c
+#define MSDC_INTEN 0x10
+#define MSDC_FIFOCS 0x14
+#define SDC_CFG 0x30
+#define SDC_CMD 0x34
+#define SDC_ARG 0x38
+#define SDC_STS 0x3c
+#define SDC_RESP0 0x40
+#define SDC_RESP1 0x44
+#define SDC_RESP2 0x48
+#define SDC_RESP3 0x4c
+#define SDC_BLK_NUM 0x50
+#define EMMC_IOCON 0x7c
+#define SDC_ACMD_RESP 0x80
+#define MSDC_DMA_SA 0x90
+#define MSDC_DMA_CTRL 0x98
+#define MSDC_DMA_CFG 0x9c
+#define MSDC_PATCH_BIT 0xb0
+#define MSDC_PATCH_BIT1 0xb4
+#define MSDC_PAD_TUNE 0xec
+#define PAD_DS_TUNE 0x188
+#define EMMC50_CFG0 0x208
+
+/*--------------------------------------------------------------------------*/
+/* Register Mask */
+/*--------------------------------------------------------------------------*/
+
+/* MSDC_CFG mask */
+#define MSDC_CFG_MODE (0x1 << 0) /* RW */
+#define MSDC_CFG_CKPDN (0x1 << 1) /* RW */
+#define MSDC_CFG_RST (0x1 << 2) /* RW */
+#define MSDC_CFG_PIO (0x1 << 3) /* RW */
+#define MSDC_CFG_CKDRVEN (0x1 << 4) /* RW */
+#define MSDC_CFG_BV18SDT (0x1 << 5) /* RW */
+#define MSDC_CFG_BV18PSS (0x1 << 6) /* R */
+#define MSDC_CFG_CKSTB (0x1 << 7) /* R */
+#define MSDC_CFG_CKDIV (0xff << 8) /* RW */
+#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */
+#define MSDC_CFG_HS400_CK_MODE (0x1 << 18) /* RW */
+
+/* MSDC_IOCON mask */
+#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */
+#define MSDC_IOCON_RSPL (0x1 << 1) /* RW */
+#define MSDC_IOCON_DSPL (0x1 << 2) /* RW */
+#define MSDC_IOCON_DDLSEL (0x1 << 3) /* RW */
+#define MSDC_IOCON_DDR50CKD (0x1 << 4) /* RW */
+#define MSDC_IOCON_DSPLSEL (0x1 << 5) /* RW */
+#define MSDC_IOCON_W_DSPL (0x1 << 8) /* RW */
+#define MSDC_IOCON_D0SPL (0x1 << 16) /* RW */
+#define MSDC_IOCON_D1SPL (0x1 << 17) /* RW */
+#define MSDC_IOCON_D2SPL (0x1 << 18) /* RW */
+#define MSDC_IOCON_D3SPL (0x1 << 19) /* RW */
+#define MSDC_IOCON_D4SPL (0x1 << 20) /* RW */
+#define MSDC_IOCON_D5SPL (0x1 << 21) /* RW */
+#define MSDC_IOCON_D6SPL (0x1 << 22) /* RW */
+#define MSDC_IOCON_D7SPL (0x1 << 23) /* RW */
+#define MSDC_IOCON_RISCSZ (0x3 << 24) /* RW */
+
+/* MSDC_PS mask */
+#define MSDC_PS_CDEN (0x1 << 0) /* RW */
+#define MSDC_PS_CDSTS (0x1 << 1) /* R */
+#define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */
+#define MSDC_PS_DAT (0xff << 16) /* R */
+#define MSDC_PS_CMD (0x1 << 24) /* R */
+#define MSDC_PS_WP (0x1 << 31) /* R */
+
+/* MSDC_INT mask */
+#define MSDC_INT_MMCIRQ (0x1 << 0) /* W1C */
+#define MSDC_INT_CDSC (0x1 << 1) /* W1C */
+#define MSDC_INT_ACMDRDY (0x1 << 3) /* W1C */
+#define MSDC_INT_ACMDTMO (0x1 << 4) /* W1C */
+#define MSDC_INT_ACMDCRCERR (0x1 << 5) /* W1C */
+#define MSDC_INT_DMAQ_EMPTY (0x1 << 6) /* W1C */
+#define MSDC_INT_SDIOIRQ (0x1 << 7) /* W1C */
+#define MSDC_INT_CMDRDY (0x1 << 8) /* W1C */
+#define MSDC_INT_CMDTMO (0x1 << 9) /* W1C */
+#define MSDC_INT_RSPCRCERR (0x1 << 10) /* W1C */
+#define MSDC_INT_CSTA (0x1 << 11) /* R */
+#define MSDC_INT_XFER_COMPL (0x1 << 12) /* W1C */
+#define MSDC_INT_DXFER_DONE (0x1 << 13) /* W1C */
+#define MSDC_INT_DATTMO (0x1 << 14) /* W1C */
+#define MSDC_INT_DATCRCERR (0x1 << 15) /* W1C */
+#define MSDC_INT_ACMD19_DONE (0x1 << 16) /* W1C */
+#define MSDC_INT_DMA_BDCSERR (0x1 << 17) /* W1C */
+#define MSDC_INT_DMA_GPDCSERR (0x1 << 18) /* W1C */
+#define MSDC_INT_DMA_PROTECT (0x1 << 19) /* W1C */
+
+/* MSDC_INTEN mask */
+#define MSDC_INTEN_MMCIRQ (0x1 << 0) /* RW */
+#define MSDC_INTEN_CDSC (0x1 << 1) /* RW */
+#define MSDC_INTEN_ACMDRDY (0x1 << 3) /* RW */
+#define MSDC_INTEN_ACMDTMO (0x1 << 4) /* RW */
+#define MSDC_INTEN_ACMDCRCERR (0x1 << 5) /* RW */
+#define MSDC_INTEN_DMAQ_EMPTY (0x1 << 6) /* RW */
+#define MSDC_INTEN_SDIOIRQ (0x1 << 7) /* RW */
+#define MSDC_INTEN_CMDRDY (0x1 << 8) /* RW */
+#define MSDC_INTEN_CMDTMO (0x1 << 9) /* RW */
+#define MSDC_INTEN_RSPCRCERR (0x1 << 10) /* RW */
+#define MSDC_INTEN_CSTA (0x1 << 11) /* RW */
+#define MSDC_INTEN_XFER_COMPL (0x1 << 12) /* RW */
+#define MSDC_INTEN_DXFER_DONE (0x1 << 13) /* RW */
+#define MSDC_INTEN_DATTMO (0x1 << 14) /* RW */
+#define MSDC_INTEN_DATCRCERR (0x1 << 15) /* RW */
+#define MSDC_INTEN_ACMD19_DONE (0x1 << 16) /* RW */
+#define MSDC_INTEN_DMA_BDCSERR (0x1 << 17) /* RW */
+#define MSDC_INTEN_DMA_GPDCSERR (0x1 << 18) /* RW */
+#define MSDC_INTEN_DMA_PROTECT (0x1 << 19) /* RW */
+
+/* MSDC_FIFOCS mask */
+#define MSDC_FIFOCS_RXCNT (0xff << 0) /* R */
+#define MSDC_FIFOCS_TXCNT (0xff << 16) /* R */
+#define MSDC_FIFOCS_CLR (0x1 << 31) /* RW */
+
+/* SDC_CFG mask */
+#define SDC_CFG_SDIOINTWKUP (0x1 << 0) /* RW */
+#define SDC_CFG_INSWKUP (0x1 << 1) /* RW */
+#define SDC_CFG_BUSWIDTH (0x3 << 16) /* RW */
+#define SDC_CFG_SDIO (0x1 << 19) /* RW */
+#define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */
+#define SDC_CFG_INTATGAP (0x1 << 21) /* RW */
+#define SDC_CFG_DTOC (0xff << 24) /* RW */
+
+/* SDC_STS mask */
+#define SDC_STS_SDCBUSY (0x1 << 0) /* RW */
+#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */
+#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */
+
+/* MSDC_DMA_CTRL mask */
+#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */
+#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */
+#define MSDC_DMA_CTRL_RESUME (0x1 << 2) /* W */
+#define MSDC_DMA_CTRL_MODE (0x1 << 8) /* RW */
+#define MSDC_DMA_CTRL_LASTBUF (0x1 << 10) /* RW */
+#define MSDC_DMA_CTRL_BRUSTSZ (0x7 << 12) /* RW */
+
+/* MSDC_DMA_CFG mask */
+#define MSDC_DMA_CFG_STS (0x1 << 0) /* R */
+#define MSDC_DMA_CFG_DECSEN (0x1 << 1) /* RW */
+#define MSDC_DMA_CFG_AHBHPROT2 (0x2 << 8) /* RW */
+#define MSDC_DMA_CFG_ACTIVEEN (0x2 << 12) /* RW */
+#define MSDC_DMA_CFG_CS12B16B (0x1 << 16) /* RW */
+
+/* MSDC_PATCH_BIT mask */
+#define MSDC_PATCH_BIT_ODDSUPP (0x1 << 1) /* RW */
+#define MSDC_INT_DAT_LATCH_CK_SEL (0x7 << 7)
+#define MSDC_CKGEN_MSDC_DLY_SEL (0x1f << 10)
+#define MSDC_PATCH_BIT_IODSSEL (0x1 << 16) /* RW */
+#define MSDC_PATCH_BIT_IOINTSEL (0x1 << 17) /* RW */
+#define MSDC_PATCH_BIT_BUSYDLY (0xf << 18) /* RW */
+#define MSDC_PATCH_BIT_WDOD (0xf << 22) /* RW */
+#define MSDC_PATCH_BIT_IDRTSEL (0x1 << 26) /* RW */
+#define MSDC_PATCH_BIT_CMDFSEL (0x1 << 27) /* RW */
+#define MSDC_PATCH_BIT_INTDLSEL (0x1 << 28) /* RW */
+#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
+#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
+
+#define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */
+#define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */
+
+#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
+#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
+#define PAD_DS_TUNE_DLY3 (0x1f << 12) /* RW */
+
+#define EMMC50_CFG_PADCMD_LATCHCK (0x1 << 0) /* RW */
+#define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */
+#define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */
+
+#define REQ_CMD_EIO (0x1 << 0)
+#define REQ_CMD_TMO (0x1 << 1)
+#define REQ_DAT_ERR (0x1 << 2)
+#define REQ_STOP_EIO (0x1 << 3)
+#define REQ_STOP_TMO (0x1 << 4)
+#define REQ_CMD_BUSY (0x1 << 5)
+
+#define MSDC_PREPARE_FLAG (0x1 << 0)
+#define MSDC_ASYNC_FLAG (0x1 << 1)
+#define MSDC_MMAP_FLAG (0x1 << 2)
+
+#define MTK_MMC_AUTOSUSPEND_DELAY 50
+#define CMD_TIMEOUT (HZ/10 * 5) /* 100ms x5 */
+#define DAT_TIMEOUT (HZ * 5) /* 1000ms x5 */
+
+#define PAD_DELAY_MAX 32 /* PAD delay cells */
+/*--------------------------------------------------------------------------*/
+/* Descriptor Structure */
+/*--------------------------------------------------------------------------*/
+struct mt_gpdma_desc {
+ u32 gpd_info;
+#define GPDMA_DESC_HWO (0x1 << 0)
+#define GPDMA_DESC_BDP (0x1 << 1)
+#define GPDMA_DESC_CHECKSUM (0xff << 8) /* bit8 ~ bit15 */
+#define GPDMA_DESC_INT (0x1 << 16)
+ u32 next;
+ u32 ptr;
+ u32 gpd_data_len;
+#define GPDMA_DESC_BUFLEN (0xffff) /* bit0 ~ bit15 */
+#define GPDMA_DESC_EXTLEN (0xff << 16) /* bit16 ~ bit23 */
+ u32 arg;
+ u32 blknum;
+ u32 cmd;
+};
+
+struct mt_bdma_desc {
+ u32 bd_info;
+#define BDMA_DESC_EOL (0x1 << 0)
+#define BDMA_DESC_CHECKSUM (0xff << 8) /* bit8 ~ bit15 */
+#define BDMA_DESC_BLKPAD (0x1 << 17)
+#define BDMA_DESC_DWPAD (0x1 << 18)
+ u32 next;
+ u32 ptr;
+ u32 bd_data_len;
+#define BDMA_DESC_BUFLEN (0xffff) /* bit0 ~ bit15 */
+};
+
+struct msdc_dma {
+ struct scatterlist *sg; /* I/O scatter list */
+ struct mt_gpdma_desc *gpd; /* pointer to gpd array */
+ struct mt_bdma_desc *bd; /* pointer to bd array */
+ dma_addr_t gpd_addr; /* the physical address of gpd array */
+ dma_addr_t bd_addr; /* the physical address of bd array */
+};
+
+struct msdc_save_para {
+ u32 msdc_cfg;
+ u32 iocon;
+ u32 sdc_cfg;
+ u32 pad_tune;
+ u32 patch_bit0;
+ u32 patch_bit1;
+ u32 pad_ds_tune;
+ u32 emmc50_cfg0;
+};
+
+struct msdc_delay_phase {
+ u8 maxlen;
+ u8 start;
+ u8 final_phase;
+};
+
+struct msdc_host {
+ struct device *dev;
+ struct mmc_host *mmc; /* mmc structure */
+ int cmd_rsp;
+
+ spinlock_t lock;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ int error;
+
+ void __iomem *base; /* host base address */
+
+ struct msdc_dma dma; /* dma channel */
+ u64 dma_mask;
+
+ u32 timeout_ns; /* data timeout ns */
+ u32 timeout_clks; /* data timeout clks */
+
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_uhs;
+ struct delayed_work req_timeout;
+ int irq; /* host interrupt */
+
+ struct clk *src_clk; /* msdc source clock */
+ struct clk *h_clk; /* msdc h_clk */
+ u32 mclk; /* mmc subsystem clock frequency */
+ u32 src_clk_freq; /* source clock frequency */
+ u32 sclk; /* SD/MS bus clock frequency */
+ unsigned char timing;
+ bool vqmmc_enabled;
+ u32 hs400_ds_delay;
+ struct msdc_save_para save_para; /* used when gate HCLK */
+};
+
+static void sdr_set_bits(void __iomem *reg, u32 bs)
+{
+ u32 val = readl(reg);
+
+ val |= bs;
+ writel(val, reg);
+}
+
+static void sdr_clr_bits(void __iomem *reg, u32 bs)
+{
+ u32 val = readl(reg);
+
+ val &= ~bs;
+ writel(val, reg);
+}
+
+static void sdr_set_field(void __iomem *reg, u32 field, u32 val)
+{
+ unsigned int tv = readl(reg);
+
+ tv &= ~field;
+ tv |= ((val) << (ffs((unsigned int)field) - 1));
+ writel(tv, reg);
+}
+
+static void sdr_get_field(void __iomem *reg, u32 field, u32 *val)
+{
+ unsigned int tv = readl(reg);
+
+ *val = ((tv & field) >> (ffs((unsigned int)field) - 1));
+}
+
+static void msdc_reset_hw(struct msdc_host *host)
+{
+ u32 val;
+
+ sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
+ while (readl(host->base + MSDC_CFG) & MSDC_CFG_RST)
+ cpu_relax();
+
+ sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
+ while (readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_CLR)
+ cpu_relax();
+
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
+}
+
+static void msdc_cmd_next(struct msdc_host *host,
+ struct mmc_request *mrq, struct mmc_command *cmd);
+
+static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR |
+ MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY |
+ MSDC_INTEN_ACMDCRCERR | MSDC_INTEN_ACMDTMO;
+static const u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO |
+ MSDC_INTEN_DATCRCERR | MSDC_INTEN_DMA_BDCSERR |
+ MSDC_INTEN_DMA_GPDCSERR | MSDC_INTEN_DMA_PROTECT;
+
+static u8 msdc_dma_calcs(u8 *buf, u32 len)
+{
+ u32 i, sum = 0;
+
+ for (i = 0; i < len; i++)
+ sum += buf[i];
+ return 0xff - (u8) sum;
+}
+
+static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
+ struct mmc_data *data)
+{
+ unsigned int j, dma_len;
+ dma_addr_t dma_address;
+ u32 dma_ctrl;
+ struct scatterlist *sg;
+ struct mt_gpdma_desc *gpd;
+ struct mt_bdma_desc *bd;
+
+ sg = data->sg;
+
+ gpd = dma->gpd;
+ bd = dma->bd;
+
+ /* modify gpd */
+ gpd->gpd_info |= GPDMA_DESC_HWO;
+ gpd->gpd_info |= GPDMA_DESC_BDP;
+ /* need to clear first. use these bits to calc checksum */
+ gpd->gpd_info &= ~GPDMA_DESC_CHECKSUM;
+ gpd->gpd_info |= msdc_dma_calcs((u8 *) gpd, 16) << 8;
+
+ /* modify bd */
+ for_each_sg(data->sg, sg, data->sg_count, j) {
+ dma_address = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+
+ /* init bd */
+ bd[j].bd_info &= ~BDMA_DESC_BLKPAD;
+ bd[j].bd_info &= ~BDMA_DESC_DWPAD;
+ bd[j].ptr = (u32)dma_address;
+ bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN;
+ bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN);
+
+ if (j == data->sg_count - 1) /* the last bd */
+ bd[j].bd_info |= BDMA_DESC_EOL;
+ else
+ bd[j].bd_info &= ~BDMA_DESC_EOL;
+
+ /* checksume need to clear first */
+ bd[j].bd_info &= ~BDMA_DESC_CHECKSUM;
+ bd[j].bd_info |= msdc_dma_calcs((u8 *)(&bd[j]), 16) << 8;
+ }
+
+ sdr_set_field(host->base + MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1);
+ dma_ctrl = readl_relaxed(host->base + MSDC_DMA_CTRL);
+ dma_ctrl &= ~(MSDC_DMA_CTRL_BRUSTSZ | MSDC_DMA_CTRL_MODE);
+ dma_ctrl |= (MSDC_BURST_64B << 12 | 1 << 8);
+ writel_relaxed(dma_ctrl, host->base + MSDC_DMA_CTRL);
+ writel((u32)dma->gpd_addr, host->base + MSDC_DMA_SA);
+}
+
+static void msdc_prepare_data(struct msdc_host *host, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+
+ if (!(data->host_cookie & MSDC_PREPARE_FLAG)) {
+ bool read = (data->flags & MMC_DATA_READ) != 0;
+
+ data->host_cookie |= MSDC_PREPARE_FLAG;
+ data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len,
+ read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+}
+
+static void msdc_unprepare_data(struct msdc_host *host, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+
+ if (data->host_cookie & MSDC_ASYNC_FLAG)
+ return;
+
+ if (data->host_cookie & MSDC_PREPARE_FLAG) {
+ bool read = (data->flags & MMC_DATA_READ) != 0;
+
+ dma_unmap_sg(host->dev, data->sg, data->sg_len,
+ read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ data->host_cookie &= ~MSDC_PREPARE_FLAG;
+ }
+}
+
+/* clock control primitives */
+static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
+{
+ u32 timeout, clk_ns;
+ u32 mode = 0;
+
+ host->timeout_ns = ns;
+ host->timeout_clks = clks;
+ if (host->sclk == 0) {
+ timeout = 0;
+ } else {
+ clk_ns = 1000000000UL / host->sclk;
+ timeout = (ns + clk_ns - 1) / clk_ns + clks;
+ /* in 1048576 sclk cycle unit */
+ timeout = (timeout + (0x1 << 20) - 1) >> 20;
+ sdr_get_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD, &mode);
+ /*DDR mode will double the clk cycles for data timeout */
+ timeout = mode >= 2 ? timeout * 2 : timeout;
+ timeout = timeout > 1 ? timeout - 1 : 0;
+ timeout = timeout > 255 ? 255 : timeout;
+ }
+ sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, timeout);
+}
+
+static void msdc_gate_clock(struct msdc_host *host)
+{
+ clk_disable_unprepare(host->src_clk);
+ clk_disable_unprepare(host->h_clk);
+}
+
+static void msdc_ungate_clock(struct msdc_host *host)
+{
+ clk_prepare_enable(host->h_clk);
+ clk_prepare_enable(host->src_clk);
+ while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
+ cpu_relax();
+}
+
+static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
+{
+ u32 mode;
+ u32 flags;
+ u32 div;
+ u32 sclk;
+
+ if (!hz) {
+ dev_dbg(host->dev, "set mclk to 0\n");
+ host->mclk = 0;
+ sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
+ return;
+ }
+
+ flags = readl(host->base + MSDC_INTEN);
+ sdr_clr_bits(host->base + MSDC_INTEN, flags);
+ sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
+ if (timing == MMC_TIMING_UHS_DDR50 ||
+ timing == MMC_TIMING_MMC_DDR52 ||
+ timing == MMC_TIMING_MMC_HS400) {
+ if (timing == MMC_TIMING_MMC_HS400)
+ mode = 0x3;
+ else
+ mode = 0x2; /* ddr mode and use divisor */
+
+ if (hz >= (host->src_clk_freq >> 2)) {
+ div = 0; /* mean div = 1/4 */
+ sclk = host->src_clk_freq >> 2; /* sclk = clk / 4 */
+ } else {
+ div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2);
+ sclk = (host->src_clk_freq >> 2) / div;
+ div = (div >> 1);
+ }
+
+ if (timing == MMC_TIMING_MMC_HS400 &&
+ hz >= (host->src_clk_freq >> 1)) {
+ sdr_set_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE);
+ sclk = host->src_clk_freq >> 1;
+ div = 0; /* div is ignore when bit18 is set */
+ }
+ } else if (hz >= host->src_clk_freq) {
+ mode = 0x1; /* no divisor */
+ div = 0;
+ sclk = host->src_clk_freq;
+ } else {
+ mode = 0x0; /* use divisor */
+ if (hz >= (host->src_clk_freq >> 1)) {
+ div = 0; /* mean div = 1/2 */
+ sclk = host->src_clk_freq >> 1; /* sclk = clk / 2 */
+ } else {
+ div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2);
+ sclk = (host->src_clk_freq >> 2) / div;
+ }
+ }
+ sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
+ (mode << 8) | (div % 0xff));
+ sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
+ while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
+ cpu_relax();
+ host->sclk = sclk;
+ host->mclk = hz;
+ host->timing = timing;
+ /* need because clk changed. */
+ msdc_set_timeout(host, host->timeout_ns, host->timeout_clks);
+ sdr_set_bits(host->base + MSDC_INTEN, flags);
+
+ dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing);
+}
+
+static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
+ struct mmc_request *mrq, struct mmc_command *cmd)
+{
+ u32 resp;
+
+ switch (mmc_resp_type(cmd)) {
+ /* Actually, R1, R5, R6, R7 are the same */
+ case MMC_RSP_R1:
+ resp = 0x1;
+ break;
+ case MMC_RSP_R1B:
+ resp = 0x7;
+ break;
+ case MMC_RSP_R2:
+ resp = 0x2;
+ break;
+ case MMC_RSP_R3:
+ resp = 0x3;
+ break;
+ case MMC_RSP_NONE:
+ default:
+ resp = 0x0;
+ break;
+ }
+
+ return resp;
+}
+
+static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
+ struct mmc_request *mrq, struct mmc_command *cmd)
+{
+ /* rawcmd :
+ * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 |
+ * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
+ */
+ u32 opcode = cmd->opcode;
+ u32 resp = msdc_cmd_find_resp(host, mrq, cmd);
+ u32 rawcmd = (opcode & 0x3f) | ((resp & 0x7) << 7);
+
+ host->cmd_rsp = resp;
+
+ if ((opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int) -1) ||
+ opcode == MMC_STOP_TRANSMISSION)
+ rawcmd |= (0x1 << 14);
+ else if (opcode == SD_SWITCH_VOLTAGE)
+ rawcmd |= (0x1 << 30);
+ else if (opcode == SD_APP_SEND_SCR ||
+ opcode == SD_APP_SEND_NUM_WR_BLKS ||
+ (opcode == SD_SWITCH && mmc_cmd_type(cmd) == MMC_CMD_ADTC) ||
+ (opcode == SD_APP_SD_STATUS && mmc_cmd_type(cmd) == MMC_CMD_ADTC) ||
+ (opcode == MMC_SEND_EXT_CSD && mmc_cmd_type(cmd) == MMC_CMD_ADTC))
+ rawcmd |= (0x1 << 11);
+
+ if (cmd->data) {
+ struct mmc_data *data = cmd->data;
+
+ if (mmc_op_multi(opcode)) {
+ if (mmc_card_mmc(host->mmc->card) && mrq->sbc &&
+ !(mrq->sbc->arg & 0xFFFF0000))
+ rawcmd |= 0x2 << 28; /* AutoCMD23 */
+ }
+
+ rawcmd |= ((data->blksz & 0xFFF) << 16);
+ if (data->flags & MMC_DATA_WRITE)
+ rawcmd |= (0x1 << 13);
+ if (data->blocks > 1)
+ rawcmd |= (0x2 << 11);
+ else
+ rawcmd |= (0x1 << 11);
+ /* Always use dma mode */
+ sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_PIO);
+
+ if (host->timeout_ns != data->timeout_ns ||
+ host->timeout_clks != data->timeout_clks)
+ msdc_set_timeout(host, data->timeout_ns,
+ data->timeout_clks);
+
+ writel(data->blocks, host->base + SDC_BLK_NUM);
+ }
+ return rawcmd;
+}
+
+static void msdc_start_data(struct msdc_host *host, struct mmc_request *mrq,
+ struct mmc_command *cmd, struct mmc_data *data)
+{
+ bool read;
+
+ WARN_ON(host->data);
+ host->data = data;
+ read = data->flags & MMC_DATA_READ;
+
+ mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
+ msdc_dma_setup(host, &host->dma, data);
+ sdr_set_bits(host->base + MSDC_INTEN, data_ints_mask);
+ sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1);
+ dev_dbg(host->dev, "DMA start\n");
+ dev_dbg(host->dev, "%s: cmd=%d DMA data: %d blocks; read=%d\n",
+ __func__, cmd->opcode, data->blocks, read);
+}
+
+static int msdc_auto_cmd_done(struct msdc_host *host, int events,
+ struct mmc_command *cmd)
+{
+ u32 *rsp = cmd->resp;
+
+ rsp[0] = readl(host->base + SDC_ACMD_RESP);
+
+ if (events & MSDC_INT_ACMDRDY) {
+ cmd->error = 0;
+ } else {
+ msdc_reset_hw(host);
+ if (events & MSDC_INT_ACMDCRCERR) {
+ cmd->error = -EILSEQ;
+ host->error |= REQ_STOP_EIO;
+ } else if (events & MSDC_INT_ACMDTMO) {
+ cmd->error = -ETIMEDOUT;
+ host->error |= REQ_STOP_TMO;
+ }
+ dev_err(host->dev,
+ "%s: AUTO_CMD%d arg=%08X; rsp %08X; cmd_error=%d\n",
+ __func__, cmd->opcode, cmd->arg, rsp[0], cmd->error);
+ }
+ return cmd->error;
+}
+
+static void msdc_track_cmd_data(struct msdc_host *host,
+ struct mmc_command *cmd, struct mmc_data *data)
+{
+ if (host->error)
+ dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n",
+ __func__, cmd->opcode, cmd->arg, host->error);
+}
+
+static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
+{
+ unsigned long flags;
+ bool ret;
+
+ ret = cancel_delayed_work(&host->req_timeout);
+ if (!ret) {
+ /* delay work already running */
+ return;
+ }
+ spin_lock_irqsave(&host->lock, flags);
+ host->mrq = NULL;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ msdc_track_cmd_data(host, mrq->cmd, mrq->data);
+ if (mrq->data)
+ msdc_unprepare_data(host, mrq);
+ mmc_request_done(host->mmc, mrq);
+
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
+}
+
+/* returns true if command is fully handled; returns false otherwise */
+static bool msdc_cmd_done(struct msdc_host *host, int events,
+ struct mmc_request *mrq, struct mmc_command *cmd)
+{
+ bool done = false;
+ bool sbc_error;
+ unsigned long flags;
+ u32 *rsp = cmd->resp;
+
+ if (mrq->sbc && cmd == mrq->cmd &&
+ (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR
+ | MSDC_INT_ACMDTMO)))
+ msdc_auto_cmd_done(host, events, mrq->sbc);
+
+ sbc_error = mrq->sbc && mrq->sbc->error;
+
+ if (!sbc_error && !(events & (MSDC_INT_CMDRDY
+ | MSDC_INT_RSPCRCERR
+ | MSDC_INT_CMDTMO)))
+ return done;
+
+ spin_lock_irqsave(&host->lock, flags);
+ done = !host->cmd;
+ host->cmd = NULL;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (done)
+ return true;
+
+ sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask);
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ rsp[0] = readl(host->base + SDC_RESP3);
+ rsp[1] = readl(host->base + SDC_RESP2);
+ rsp[2] = readl(host->base + SDC_RESP1);
+ rsp[3] = readl(host->base + SDC_RESP0);
+ } else {
+ rsp[0] = readl(host->base + SDC_RESP0);
+ }
+ }
+
+ if (!sbc_error && !(events & MSDC_INT_CMDRDY)) {
+ msdc_reset_hw(host);
+ if (events & MSDC_INT_RSPCRCERR) {
+ cmd->error = -EILSEQ;
+ host->error |= REQ_CMD_EIO;
+ } else if (events & MSDC_INT_CMDTMO) {
+ cmd->error = -ETIMEDOUT;
+ host->error |= REQ_CMD_TMO;
+ }
+ }
+ if (cmd->error)
+ dev_dbg(host->dev,
+ "%s: cmd=%d arg=%08X; rsp %08X; cmd_error=%d\n",
+ __func__, cmd->opcode, cmd->arg, rsp[0],
+ cmd->error);
+
+ msdc_cmd_next(host, mrq, cmd);
+ return true;
+}
+
+/* It is the core layer's responsibility to ensure card status
+ * is correct before issue a request. but host design do below
+ * checks recommended.
+ */
+static inline bool msdc_cmd_is_ready(struct msdc_host *host,
+ struct mmc_request *mrq, struct mmc_command *cmd)
+{
+ /* The max busy time we can endure is 20ms */
+ unsigned long tmo = jiffies + msecs_to_jiffies(20);
+
+ while ((readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) &&
+ time_before(jiffies, tmo))
+ cpu_relax();
+ if (readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) {
+ dev_err(host->dev, "CMD bus busy detected\n");
+ host->error |= REQ_CMD_BUSY;
+ msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
+ return false;
+ }
+
+ if (mmc_resp_type(cmd) == MMC_RSP_R1B || cmd->data) {
+ tmo = jiffies + msecs_to_jiffies(20);
+ /* R1B or with data, should check SDCBUSY */
+ while ((readl(host->base + SDC_STS) & SDC_STS_SDCBUSY) &&
+ time_before(jiffies, tmo))
+ cpu_relax();
+ if (readl(host->base + SDC_STS) & SDC_STS_SDCBUSY) {
+ dev_err(host->dev, "Controller busy detected\n");
+ host->error |= REQ_CMD_BUSY;
+ msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
+ return false;
+ }
+ }
+ return true;
+}
+
+static void msdc_start_command(struct msdc_host *host,
+ struct mmc_request *mrq, struct mmc_command *cmd)
+{
+ u32 rawcmd;
+
+ WARN_ON(host->cmd);
+ host->cmd = cmd;
+
+ if (!msdc_cmd_is_ready(host, mrq, cmd))
+ return;
+
+ if ((readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16 ||
+ readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) {
+ dev_err(host->dev, "TX/RX FIFO non-empty before start of IO. Reset\n");
+ msdc_reset_hw(host);
+ }
+
+ cmd->error = 0;
+ rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
+ mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
+
+ sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
+ writel(cmd->arg, host->base + SDC_ARG);
+ writel(rawcmd, host->base + SDC_CMD);
+}
+
+static void msdc_cmd_next(struct msdc_host *host,
+ struct mmc_request *mrq, struct mmc_command *cmd)
+{
+ if (cmd->error || (mrq->sbc && mrq->sbc->error))
+ msdc_request_done(host, mrq);
+ else if (cmd == mrq->sbc)
+ msdc_start_command(host, mrq, mrq->cmd);
+ else if (!cmd->data)
+ msdc_request_done(host, mrq);
+ else
+ msdc_start_data(host, mrq, cmd, cmd->data);
+}
+
+static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+
+ host->error = 0;
+ WARN_ON(host->mrq);
+ host->mrq = mrq;
+
+ pm_runtime_get_sync(host->dev);
+
+ if (mrq->data)
+ msdc_prepare_data(host, mrq);
+
+ /* if SBC is required, we have HW option and SW option.
+ * if HW option is enabled, and SBC does not have "special" flags,
+ * use HW option, otherwise use SW option
+ */
+ if (mrq->sbc && (!mmc_card_mmc(mmc->card) ||
+ (mrq->sbc->arg & 0xFFFF0000)))
+ msdc_start_command(host, mrq, mrq->sbc);
+ else
+ msdc_start_command(host, mrq, mrq->cmd);
+}
+
+static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return;
+
+ msdc_prepare_data(host, mrq);
+ data->host_cookie |= MSDC_ASYNC_FLAG;
+}
+
+static void msdc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+ struct mmc_data *data;
+
+ data = mrq->data;
+ if (!data)
+ return;
+ if (data->host_cookie) {
+ data->host_cookie &= ~MSDC_ASYNC_FLAG;
+ msdc_unprepare_data(host, mrq);
+ }
+}
+
+static void msdc_data_xfer_next(struct msdc_host *host,
+ struct mmc_request *mrq, struct mmc_data *data)
+{
+ if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error &&
+ !mrq->sbc)
+ msdc_start_command(host, mrq, mrq->stop);
+ else
+ msdc_request_done(host, mrq);
+}
+
+static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
+ struct mmc_request *mrq, struct mmc_data *data)
+{
+ struct mmc_command *stop = data->stop;
+ unsigned long flags;
+ bool done;
+ unsigned int check_data = events &
+ (MSDC_INT_XFER_COMPL | MSDC_INT_DATCRCERR | MSDC_INT_DATTMO
+ | MSDC_INT_DMA_BDCSERR | MSDC_INT_DMA_GPDCSERR
+ | MSDC_INT_DMA_PROTECT);
+
+ spin_lock_irqsave(&host->lock, flags);
+ done = !host->data;
+ if (check_data)
+ host->data = NULL;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (done)
+ return true;
+
+ if (check_data || (stop && stop->error)) {
+ dev_dbg(host->dev, "DMA status: 0x%8X\n",
+ readl(host->base + MSDC_DMA_CFG));
+ sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP,
+ 1);
+ while (readl(host->base + MSDC_DMA_CFG) & MSDC_DMA_CFG_STS)
+ cpu_relax();
+ sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask);
+ dev_dbg(host->dev, "DMA stop\n");
+
+ if ((events & MSDC_INT_XFER_COMPL) && (!stop || !stop->error)) {
+ data->bytes_xfered = data->blocks * data->blksz;
+ } else {
+ dev_err(host->dev, "interrupt events: %x\n", events);
+ msdc_reset_hw(host);
+ host->error |= REQ_DAT_ERR;
+ data->bytes_xfered = 0;
+
+ if (events & MSDC_INT_DATTMO)
+ data->error = -ETIMEDOUT;
+ else if (events & MSDC_INT_DATCRCERR)
+ data->error = -EILSEQ;
+
+ dev_err(host->dev, "%s: cmd=%d; blocks=%d",
+ __func__, mrq->cmd->opcode, data->blocks);
+ dev_err(host->dev, "data_error=%d xfer_size=%d\n",
+ (int)data->error, data->bytes_xfered);
+ }
+
+ msdc_data_xfer_next(host, mrq, data);
+ done = true;
+ }
+ return done;
+}
+
+static void msdc_set_buswidth(struct msdc_host *host, u32 width)
+{
+ u32 val = readl(host->base + SDC_CFG);
+
+ val &= ~SDC_CFG_BUSWIDTH;
+
+ switch (width) {
+ default:
+ case MMC_BUS_WIDTH_1:
+ val |= (MSDC_BUS_1BITS << 16);
+ break;
+ case MMC_BUS_WIDTH_4:
+ val |= (MSDC_BUS_4BITS << 16);
+ break;
+ case MMC_BUS_WIDTH_8:
+ val |= (MSDC_BUS_8BITS << 16);
+ break;
+ }
+
+ writel(val, host->base + SDC_CFG);
+ dev_dbg(host->dev, "Bus Width = %d", width);
+}
+
+static int msdc_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+ int min_uv, max_uv;
+ int ret = 0;
+
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ min_uv = 3300000;
+ max_uv = 3300000;
+ } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+ min_uv = 1800000;
+ max_uv = 1800000;
+ } else {
+ dev_err(host->dev, "Unsupported signal voltage!\n");
+ return -EINVAL;
+ }
+
+ ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
+ if (ret) {
+ dev_err(host->dev,
+ "Regulator set error %d: %d - %d\n",
+ ret, min_uv, max_uv);
+ } else {
+ /* Apply different pinctrl settings for different signal voltage */
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
+ pinctrl_select_state(host->pinctrl, host->pins_uhs);
+ else
+ pinctrl_select_state(host->pinctrl, host->pins_default);
+ }
+ }
+ return ret;
+}
+
+static int msdc_card_busy(struct mmc_host *mmc)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+ u32 status = readl(host->base + MSDC_PS);
+
+ /* check if any pin between dat[0:3] is low */
+ if (((status >> 16) & 0xf) != 0xf)
+ return 1;
+
+ return 0;
+}
+
+static void msdc_request_timeout(struct work_struct *work)
+{
+ struct msdc_host *host = container_of(work, struct msdc_host,
+ req_timeout.work);
+
+ /* simulate HW timeout status */
+ dev_err(host->dev, "%s: aborting cmd/data/mrq\n", __func__);
+ if (host->mrq) {
+ dev_err(host->dev, "%s: aborting mrq=%p cmd=%d\n", __func__,
+ host->mrq, host->mrq->cmd->opcode);
+ if (host->cmd) {
+ dev_err(host->dev, "%s: aborting cmd=%d\n",
+ __func__, host->cmd->opcode);
+ msdc_cmd_done(host, MSDC_INT_CMDTMO, host->mrq,
+ host->cmd);
+ } else if (host->data) {
+ dev_err(host->dev, "%s: abort data: cmd%d; %d blocks\n",
+ __func__, host->mrq->cmd->opcode,
+ host->data->blocks);
+ msdc_data_xfer_done(host, MSDC_INT_DATTMO, host->mrq,
+ host->data);
+ }
+ }
+}
+
+static irqreturn_t msdc_irq(int irq, void *dev_id)
+{
+ struct msdc_host *host = (struct msdc_host *) dev_id;
+
+ while (true) {
+ unsigned long flags;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ u32 events, event_mask;
+
+ spin_lock_irqsave(&host->lock, flags);
+ events = readl(host->base + MSDC_INT);
+ event_mask = readl(host->base + MSDC_INTEN);
+ /* clear interrupts */
+ writel(events & event_mask, host->base + MSDC_INT);
+
+ mrq = host->mrq;
+ cmd = host->cmd;
+ data = host->data;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (!(events & event_mask))
+ break;
+
+ if (!mrq) {
+ dev_err(host->dev,
+ "%s: MRQ=NULL; events=%08X; event_mask=%08X\n",
+ __func__, events, event_mask);
+ WARN_ON(1);
+ break;
+ }
+
+ dev_dbg(host->dev, "%s: events=%08X\n", __func__, events);
+
+ if (cmd)
+ msdc_cmd_done(host, events, mrq, cmd);
+ else if (data)
+ msdc_data_xfer_done(host, events, mrq, data);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void msdc_init_hw(struct msdc_host *host)
+{
+ u32 val;
+
+ /* Configure to MMC/SD mode, clock free running */
+ sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN);
+
+ /* Reset */
+ msdc_reset_hw(host);
+
+ /* Disable card detection */
+ sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
+
+ /* Disable and clear all interrupts */
+ writel(0, host->base + MSDC_INTEN);
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
+
+ writel(0, host->base + MSDC_PAD_TUNE);
+ writel(0, host->base + MSDC_IOCON);
+ sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
+ writel(0x403c0046, host->base + MSDC_PATCH_BIT);
+ sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1);
+ writel(0xffff0089, host->base + MSDC_PATCH_BIT1);
+ sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
+
+ /* Configure to enable SDIO mode.
+ * it's must otherwise sdio cmd5 failed
+ */
+ sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
+
+ /* disable detect SDIO device interrupt function */
+ sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
+
+ /* Configure to default data timeout */
+ sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
+
+ dev_dbg(host->dev, "init hardware done!");
+}
+
+static void msdc_deinit_hw(struct msdc_host *host)
+{
+ u32 val;
+ /* Disable and clear all interrupts */
+ writel(0, host->base + MSDC_INTEN);
+
+ val = readl(host->base + MSDC_INT);
+ writel(val, host->base + MSDC_INT);
+}
+
+/* init gpd and bd list in msdc_drv_probe */
+static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma)
+{
+ struct mt_gpdma_desc *gpd = dma->gpd;
+ struct mt_bdma_desc *bd = dma->bd;
+ int i;
+
+ memset(gpd, 0, sizeof(struct mt_gpdma_desc) * 2);
+
+ gpd->gpd_info = GPDMA_DESC_BDP; /* hwo, cs, bd pointer */
+ gpd->ptr = (u32)dma->bd_addr; /* physical address */
+ /* gpd->next is must set for desc DMA
+ * That's why must alloc 2 gpd structure.
+ */
+ gpd->next = (u32)dma->gpd_addr + sizeof(struct mt_gpdma_desc);
+ memset(bd, 0, sizeof(struct mt_bdma_desc) * MAX_BD_NUM);
+ for (i = 0; i < (MAX_BD_NUM - 1); i++)
+ bd[i].next = (u32)dma->bd_addr + sizeof(*bd) * (i + 1);
+}
+
+static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+ int ret;
+
+ pm_runtime_get_sync(host->dev);
+
+ msdc_set_buswidth(host, ios->bus_width);
+
+ /* Suspend/Resume will do power off/on */
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ msdc_init_hw(host);
+ ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
+ ios->vdd);
+ if (ret) {
+ dev_err(host->dev, "Failed to set vmmc power!\n");
+ goto end;
+ }
+ }
+ break;
+ case MMC_POWER_ON:
+ if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
+ ret = regulator_enable(mmc->supply.vqmmc);
+ if (ret)
+ dev_err(host->dev, "Failed to set vqmmc power!\n");
+ else
+ host->vqmmc_enabled = true;
+ }
+ break;
+ case MMC_POWER_OFF:
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
+ if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
+ regulator_disable(mmc->supply.vqmmc);
+ host->vqmmc_enabled = false;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (host->mclk != ios->clock || host->timing != ios->timing)
+ msdc_set_mclk(host, ios->timing, ios->clock);
+
+end:
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
+}
+
+static u32 test_delay_bit(u32 delay, u32 bit)
+{
+ bit %= PAD_DELAY_MAX;
+ return delay & (1 << bit);
+}
+
+static int get_delay_len(u32 delay, u32 start_bit)
+{
+ int i;
+
+ for (i = 0; i < (PAD_DELAY_MAX - start_bit); i++) {
+ if (test_delay_bit(delay, start_bit + i) == 0)
+ return i;
+ }
+ return PAD_DELAY_MAX - start_bit;
+}
+
+static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
+{
+ int start = 0, len = 0;
+ int start_final = 0, len_final = 0;
+ u8 final_phase = 0xff;
+ struct msdc_delay_phase delay_phase = { 0, };
+
+ if (delay == 0) {
+ dev_err(host->dev, "phase error: [map:%x]\n", delay);
+ delay_phase.final_phase = final_phase;
+ return delay_phase;
+ }
+
+ while (start < PAD_DELAY_MAX) {
+ len = get_delay_len(delay, start);
+ if (len_final < len) {
+ start_final = start;
+ len_final = len;
+ }
+ start += len ? len : 1;
+ if (len >= 8 && start_final < 4)
+ break;
+ }
+
+ /* The rule is that to find the smallest delay cell */
+ if (start_final == 0)
+ final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX;
+ else
+ final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX;
+ dev_info(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n",
+ delay, len_final, final_phase);
+
+ delay_phase.maxlen = len_final;
+ delay_phase.start = start_final;
+ delay_phase.final_phase = final_phase;
+ return delay_phase;
+}
+
+static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+ u32 rise_delay = 0, fall_delay = 0;
+ struct msdc_delay_phase final_rise_delay, final_fall_delay;
+ u8 final_delay, final_maxlen;
+ int cmd_err;
+ int i;
+
+ sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+ for (i = 0 ; i < PAD_DELAY_MAX; i++) {
+ sdr_set_field(host->base + MSDC_PAD_TUNE,
+ MSDC_PAD_TUNE_CMDRDLY, i);
+ mmc_send_tuning(mmc, opcode, &cmd_err);
+ if (!cmd_err)
+ rise_delay |= (1 << i);
+ }
+
+ sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+ for (i = 0; i < PAD_DELAY_MAX; i++) {
+ sdr_set_field(host->base + MSDC_PAD_TUNE,
+ MSDC_PAD_TUNE_CMDRDLY, i);
+ mmc_send_tuning(mmc, opcode, &cmd_err);
+ if (!cmd_err)
+ fall_delay |= (1 << i);
+ }
+
+ final_rise_delay = get_best_delay(host, rise_delay);
+ final_fall_delay = get_best_delay(host, fall_delay);
+
+ final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
+ if (final_maxlen == final_rise_delay.maxlen) {
+ sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+ sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+ final_rise_delay.final_phase);
+ final_delay = final_rise_delay.final_phase;
+ } else {
+ sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+ sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+ final_fall_delay.final_phase);
+ final_delay = final_fall_delay.final_phase;
+ }
+
+ return final_delay == 0xff ? -EIO : 0;
+}
+
+static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+ u32 rise_delay = 0, fall_delay = 0;
+ struct msdc_delay_phase final_rise_delay, final_fall_delay;
+ u8 final_delay, final_maxlen;
+ int i, ret;
+
+ sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
+ sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
+ for (i = 0 ; i < PAD_DELAY_MAX; i++) {
+ sdr_set_field(host->base + MSDC_PAD_TUNE,
+ MSDC_PAD_TUNE_DATRRDLY, i);
+ ret = mmc_send_tuning(mmc, opcode, NULL);
+ if (!ret)
+ rise_delay |= (1 << i);
+ }
+
+ sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
+ sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
+ for (i = 0; i < PAD_DELAY_MAX; i++) {
+ sdr_set_field(host->base + MSDC_PAD_TUNE,
+ MSDC_PAD_TUNE_DATRRDLY, i);
+ ret = mmc_send_tuning(mmc, opcode, NULL);
+ if (!ret)
+ fall_delay |= (1 << i);
+ }
+
+ final_rise_delay = get_best_delay(host, rise_delay);
+ final_fall_delay = get_best_delay(host, fall_delay);
+
+ final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
+ /* Rising edge is more stable, prefer to use it */
+ if (final_rise_delay.maxlen >= 10)
+ final_maxlen = final_rise_delay.maxlen;
+ if (final_maxlen == final_rise_delay.maxlen) {
+ sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
+ sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
+ sdr_set_field(host->base + MSDC_PAD_TUNE,
+ MSDC_PAD_TUNE_DATRRDLY,
+ final_rise_delay.final_phase);
+ final_delay = final_rise_delay.final_phase;
+ } else {
+ sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
+ sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
+ sdr_set_field(host->base + MSDC_PAD_TUNE,
+ MSDC_PAD_TUNE_DATRRDLY,
+ final_fall_delay.final_phase);
+ final_delay = final_fall_delay.final_phase;
+ }
+
+ return final_delay == 0xff ? -EIO : 0;
+}
+
+static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+ int ret;
+
+ pm_runtime_get_sync(host->dev);
+ ret = msdc_tune_response(mmc, opcode);
+ if (ret == -EIO) {
+ dev_err(host->dev, "Tune response fail!\n");
+ goto out;
+ }
+ ret = msdc_tune_data(mmc, opcode);
+ if (ret == -EIO)
+ dev_err(host->dev, "Tune data fail!\n");
+
+out:
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
+ return ret;
+}
+
+static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+
+ writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+ return 0;
+}
+
+static void msdc_hw_reset(struct mmc_host *mmc)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+
+ sdr_set_bits(host->base + EMMC_IOCON, 1);
+ udelay(10); /* 10us is enough */
+ sdr_clr_bits(host->base + EMMC_IOCON, 1);
+}
+
+static struct mmc_host_ops mt_msdc_ops = {
+ .post_req = msdc_post_req,
+ .pre_req = msdc_pre_req,
+ .request = msdc_ops_request,
+ .set_ios = msdc_ops_set_ios,
+ .start_signal_voltage_switch = msdc_ops_switch_volt,
+ .card_busy = msdc_card_busy,
+ .execute_tuning = msdc_execute_tuning,
+ .prepare_hs400_tuning = msdc_prepare_hs400_tuning,
+ .hw_reset = msdc_hw_reset,
+};
+
+static int msdc_drv_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct msdc_host *host;
+ struct resource *res;
+ int ret;
+
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "No DT found\n");
+ return -EINVAL;
+ }
+ /* Allocate MMC host for this device */
+ mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ host = mmc_priv(mmc);
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto host_free;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
+ goto host_free;
+ }
+
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret == -EPROBE_DEFER)
+ goto host_free;
+
+ host->src_clk = devm_clk_get(&pdev->dev, "source");
+ if (IS_ERR(host->src_clk)) {
+ ret = PTR_ERR(host->src_clk);
+ goto host_free;
+ }
+
+ host->h_clk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(host->h_clk)) {
+ ret = PTR_ERR(host->h_clk);
+ goto host_free;
+ }
+
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0) {
+ ret = -EINVAL;
+ goto host_free;
+ }
+
+ host->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(host->pinctrl)) {
+ ret = PTR_ERR(host->pinctrl);
+ dev_err(&pdev->dev, "Cannot find pinctrl!\n");
+ goto host_free;
+ }
+
+ host->pins_default = pinctrl_lookup_state(host->pinctrl, "default");
+ if (IS_ERR(host->pins_default)) {
+ ret = PTR_ERR(host->pins_default);
+ dev_err(&pdev->dev, "Cannot find pinctrl default!\n");
+ goto host_free;
+ }
+
+ host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
+ if (IS_ERR(host->pins_uhs)) {
+ ret = PTR_ERR(host->pins_uhs);
+ dev_err(&pdev->dev, "Cannot find pinctrl uhs!\n");
+ goto host_free;
+ }
+
+ if (!of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
+ &host->hs400_ds_delay))
+ dev_dbg(&pdev->dev, "hs400-ds-delay: %x\n",
+ host->hs400_ds_delay);
+
+ host->dev = &pdev->dev;
+ host->mmc = mmc;
+ host->src_clk_freq = clk_get_rate(host->src_clk);
+ /* Set host parameters to mmc */
+ mmc->ops = &mt_msdc_ops;
+ mmc->f_min = host->src_clk_freq / (4 * 255);
+
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->caps |= MMC_CAP_RUNTIME_RESUME;
+ /* MMC core transfer sizes tunable parameters */
+ mmc->max_segs = MAX_BD_NUM;
+ mmc->max_seg_size = BDMA_DESC_BUFLEN;
+ mmc->max_blk_size = 2048;
+ mmc->max_req_size = 512 * 1024;
+ mmc->max_blk_count = mmc->max_req_size / 512;
+ host->dma_mask = DMA_BIT_MASK(32);
+ mmc_dev(mmc)->dma_mask = &host->dma_mask;
+
+ host->timeout_clks = 3 * 1048576;
+ host->dma.gpd = dma_alloc_coherent(&pdev->dev,
+ 2 * sizeof(struct mt_gpdma_desc),
+ &host->dma.gpd_addr, GFP_KERNEL);
+ host->dma.bd = dma_alloc_coherent(&pdev->dev,
+ MAX_BD_NUM * sizeof(struct mt_bdma_desc),
+ &host->dma.bd_addr, GFP_KERNEL);
+ if (!host->dma.gpd || !host->dma.bd) {
+ ret = -ENOMEM;
+ goto release_mem;
+ }
+ msdc_init_gpd_bd(host, &host->dma);
+ INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
+ spin_lock_init(&host->lock);
+
+ platform_set_drvdata(pdev, mmc);
+ msdc_ungate_clock(host);
+ msdc_init_hw(host);
+
+ ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT, pdev->name, host);
+ if (ret)
+ goto release;
+
+ pm_runtime_set_active(host->dev);
+ pm_runtime_set_autosuspend_delay(host->dev, MTK_MMC_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(host->dev);
+ pm_runtime_enable(host->dev);
+ ret = mmc_add_host(mmc);
+
+ if (ret)
+ goto end;
+
+ return 0;
+end:
+ pm_runtime_disable(host->dev);
+release:
+ platform_set_drvdata(pdev, NULL);
+ msdc_deinit_hw(host);
+ msdc_gate_clock(host);
+release_mem:
+ if (host->dma.gpd)
+ dma_free_coherent(&pdev->dev,
+ 2 * sizeof(struct mt_gpdma_desc),
+ host->dma.gpd, host->dma.gpd_addr);
+ if (host->dma.bd)
+ dma_free_coherent(&pdev->dev,
+ MAX_BD_NUM * sizeof(struct mt_bdma_desc),
+ host->dma.bd, host->dma.bd_addr);
+host_free:
+ mmc_free_host(mmc);
+
+ return ret;
+}
+
+static int msdc_drv_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct msdc_host *host;
+
+ mmc = platform_get_drvdata(pdev);
+ host = mmc_priv(mmc);
+
+ pm_runtime_get_sync(host->dev);
+
+ platform_set_drvdata(pdev, NULL);
+ mmc_remove_host(host->mmc);
+ msdc_deinit_hw(host);
+ msdc_gate_clock(host);
+
+ pm_runtime_disable(host->dev);
+ pm_runtime_put_noidle(host->dev);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct mt_gpdma_desc),
+ host->dma.gpd, host->dma.gpd_addr);
+ dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc),
+ host->dma.bd, host->dma.bd_addr);
+
+ mmc_free_host(host->mmc);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void msdc_save_reg(struct msdc_host *host)
+{
+ host->save_para.msdc_cfg = readl(host->base + MSDC_CFG);
+ host->save_para.iocon = readl(host->base + MSDC_IOCON);
+ host->save_para.sdc_cfg = readl(host->base + SDC_CFG);
+ host->save_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
+ host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
+ host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE);
+ host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
+}
+
+static void msdc_restore_reg(struct msdc_host *host)
+{
+ writel(host->save_para.msdc_cfg, host->base + MSDC_CFG);
+ writel(host->save_para.iocon, host->base + MSDC_IOCON);
+ writel(host->save_para.sdc_cfg, host->base + SDC_CFG);
+ writel(host->save_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
+ writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
+ writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE);
+ writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
+}
+
+static int msdc_runtime_suspend(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msdc_host *host = mmc_priv(mmc);
+
+ msdc_save_reg(host);
+ msdc_gate_clock(host);
+ return 0;
+}
+
+static int msdc_runtime_resume(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msdc_host *host = mmc_priv(mmc);
+
+ msdc_ungate_clock(host);
+ msdc_restore_reg(host);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops msdc_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
+};
+
+static const struct of_device_id msdc_of_ids[] = {
+ { .compatible = "mediatek,mt8135-mmc", },
+ {}
+};
+
+static struct platform_driver mt_msdc_driver = {
+ .probe = msdc_drv_probe,
+ .remove = msdc_drv_remove,
+ .driver = {
+ .name = "mtk-msdc",
+ .of_match_table = msdc_of_ids,
+ .pm = &msdc_dev_pm_ops,
+ },
+};
+
+module_platform_driver(mt_msdc_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek SD/MMC Card Driver");
diff --git a/kernel/drivers/mmc/host/mxcmmc.c b/kernel/drivers/mmc/host/mxcmmc.c
index 317d709f7..d110f9e98 100644
--- a/kernel/drivers/mmc/host/mxcmmc.c
+++ b/kernel/drivers/mmc/host/mxcmmc.c
@@ -605,11 +605,7 @@ static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS);
}
- stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
- if (stat)
- return stat;
-
- return 0;
+ return mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
}
static int mxcmci_transfer_data(struct mxcmci_host *host)
diff --git a/kernel/drivers/mmc/host/mxs-mmc.c b/kernel/drivers/mmc/host/mxs-mmc.c
index a82411a2c..d839147e5 100644
--- a/kernel/drivers/mmc/host/mxs-mmc.c
+++ b/kernel/drivers/mmc/host/mxs-mmc.c
@@ -549,7 +549,7 @@ static const struct mmc_host_ops mxs_mmc_ops = {
.enable_sdio_irq = mxs_mmc_enable_sdio_irq,
};
-static struct platform_device_id mxs_ssp_ids[] = {
+static const struct platform_device_id mxs_ssp_ids[] = {
{
.name = "imx23-mmc",
.driver_data = IMX23_SSP,
diff --git a/kernel/drivers/mmc/host/omap.c b/kernel/drivers/mmc/host/omap.c
index 68dd6c79c..b9958a123 100644
--- a/kernel/drivers/mmc/host/omap.c
+++ b/kernel/drivers/mmc/host/omap.c
@@ -948,6 +948,7 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
{
struct mmc_data *data = req->data;
int i, use_dma = 1, block_size;
+ struct scatterlist *sg;
unsigned sg_len;
host->data = data;
@@ -972,8 +973,8 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
sg_len = (data->blocks == 1) ? 1 : data->sg_len;
/* Only do DMA for entire blocks */
- for (i = 0; i < sg_len; i++) {
- if ((data->sg[i].length % block_size) != 0) {
+ for_each_sg(data->sg, sg, sg_len, i) {
+ if ((sg->length % block_size) != 0) {
use_dma = 0;
break;
}
@@ -1419,8 +1420,10 @@ static int mmc_omap_probe(struct platform_device *pdev)
host->reg_shift = (mmc_omap7xx() ? 1 : 2);
host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
- if (!host->mmc_omap_wq)
+ if (!host->mmc_omap_wq) {
+ ret = -ENOMEM;
goto err_plat_cleanup;
+ }
for (i = 0; i < pdata->nr_slots; i++) {
ret = mmc_omap_new_slot(host, i);
@@ -1487,6 +1490,7 @@ static const struct of_device_id mmc_omap_match[] = {
{ .compatible = "ti,omap2420-mmc", },
{ },
};
+MODULE_DEVICE_TABLE(of, mmc_omap_match);
#endif
static struct platform_driver mmc_omap_driver = {
diff --git a/kernel/drivers/mmc/host/omap_hsmmc.c b/kernel/drivers/mmc/host/omap_hsmmc.c
index d0abdffb0..7fb0753ab 100644
--- a/kernel/drivers/mmc/host/omap_hsmmc.c
+++ b/kernel/drivers/mmc/host/omap_hsmmc.c
@@ -43,6 +43,7 @@
#include <linux/regulator/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/platform_data/hsmmc-omap.h>
/* OMAP HSMMC Host Controller Registers */
@@ -180,18 +181,10 @@ struct omap_hsmmc_host {
struct mmc_data *data;
struct clk *fclk;
struct clk *dbclk;
- /*
- * vcc == configured supply
- * vcc_aux == optional
- * - MMC1, supply for DAT4..DAT7
- * - MMC2/MMC2, external level shifter voltage supply, for
- * chip (SDIO, eMMC, etc) or transceiver (MMC2 only)
- */
- struct regulator *vcc;
- struct regulator *vcc_aux;
struct regulator *pbias;
bool pbias_enabled;
void __iomem *base;
+ int vqmmc_enabled;
resource_size_t mapbase;
spinlock_t irq_lock; /* Prevent races with irq handler */
unsigned int dma_len;
@@ -212,13 +205,11 @@ struct omap_hsmmc_host {
int context_loss;
int protect_card;
int reqs_blocked;
- int use_reg;
int req_in_progress;
unsigned long clk_rate;
unsigned int flags;
#define AUTO_CMD23 (1 << 0) /* Auto CMD23 support */
#define HSMMC_SDIO_IRQ_ENABLED (1 << 1) /* SDIO irq enabled */
-#define HSMMC_WAKE_IRQ_ENABLED (1 << 2)
struct omap_hsmmc_next next_data;
struct omap_hsmmc_platform_data *pdata;
@@ -254,32 +245,135 @@ static int omap_hsmmc_get_cover_state(struct device *dev)
return mmc_gpio_get_cd(host->mmc);
}
-#ifdef CONFIG_REGULATOR
+static int omap_hsmmc_enable_supply(struct mmc_host *mmc)
+{
+ int ret;
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+ struct mmc_ios *ios = &mmc->ios;
+
+ if (mmc->supply.vmmc) {
+ ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable interface voltage rail, if needed */
+ if (mmc->supply.vqmmc && !host->vqmmc_enabled) {
+ ret = regulator_enable(mmc->supply.vqmmc);
+ if (ret) {
+ dev_err(mmc_dev(mmc), "vmmc_aux reg enable failed\n");
+ goto err_vqmmc;
+ }
+ host->vqmmc_enabled = 1;
+ }
+
+ return 0;
+
+err_vqmmc:
+ if (mmc->supply.vmmc)
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
+ return ret;
+}
+
+static int omap_hsmmc_disable_supply(struct mmc_host *mmc)
+{
+ int ret;
+ int status;
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+ if (mmc->supply.vqmmc && host->vqmmc_enabled) {
+ ret = regulator_disable(mmc->supply.vqmmc);
+ if (ret) {
+ dev_err(mmc_dev(mmc), "vmmc_aux reg disable failed\n");
+ return ret;
+ }
+ host->vqmmc_enabled = 0;
+ }
+
+ if (mmc->supply.vmmc) {
+ ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ if (ret)
+ goto err_set_ocr;
+ }
+
+ return 0;
+
+err_set_ocr:
+ if (mmc->supply.vqmmc) {
+ status = regulator_enable(mmc->supply.vqmmc);
+ if (status)
+ dev_err(mmc_dev(mmc), "vmmc_aux re-enable failed\n");
+ }
+
+ return ret;
+}
+
+static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
+ int vdd)
+{
+ int ret;
+
+ if (!host->pbias)
+ return 0;
+
+ if (power_on) {
+ if (vdd <= VDD_165_195)
+ ret = regulator_set_voltage(host->pbias, VDD_1V8,
+ VDD_1V8);
+ else
+ ret = regulator_set_voltage(host->pbias, VDD_3V0,
+ VDD_3V0);
+ if (ret < 0) {
+ dev_err(host->dev, "pbias set voltage fail\n");
+ return ret;
+ }
+
+ if (host->pbias_enabled == 0) {
+ ret = regulator_enable(host->pbias);
+ if (ret) {
+ dev_err(host->dev, "pbias reg enable fail\n");
+ return ret;
+ }
+ host->pbias_enabled = 1;
+ }
+ } else {
+ if (host->pbias_enabled == 1) {
+ ret = regulator_disable(host->pbias);
+ if (ret) {
+ dev_err(host->dev, "pbias reg disable fail\n");
+ return ret;
+ }
+ host->pbias_enabled = 0;
+ }
+ }
+
+ return 0;
+}
static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
{
struct omap_hsmmc_host *host =
platform_get_drvdata(to_platform_device(dev));
+ struct mmc_host *mmc = host->mmc;
int ret = 0;
+ if (mmc_pdata(host)->set_power)
+ return mmc_pdata(host)->set_power(dev, power_on, vdd);
+
/*
* If we don't see a Vcc regulator, assume it's a fixed
* voltage always-on regulator.
*/
- if (!host->vcc)
+ if (!mmc->supply.vmmc)
return 0;
if (mmc_pdata(host)->before_set_reg)
mmc_pdata(host)->before_set_reg(dev, power_on, vdd);
- if (host->pbias) {
- if (host->pbias_enabled == 1) {
- ret = regulator_disable(host->pbias);
- if (!ret)
- host->pbias_enabled = 0;
- }
- regulator_set_voltage(host->pbias, VDD_3V0, VDD_3V0);
- }
+ ret = omap_hsmmc_set_pbias(host, false, 0);
+ if (ret)
+ return ret;
/*
* Assume Vcc regulator is used only to power the card ... OMAP
@@ -295,129 +389,138 @@ static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
* chips/cards need an interface voltage rail too.
*/
if (power_on) {
- if (host->vcc)
- ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
- /* Enable interface voltage rail, if needed */
- if (ret == 0 && host->vcc_aux) {
- ret = regulator_enable(host->vcc_aux);
- if (ret < 0 && host->vcc)
- ret = mmc_regulator_set_ocr(host->mmc,
- host->vcc, 0);
- }
- } else {
- /* Shut down the rail */
- if (host->vcc_aux)
- ret = regulator_disable(host->vcc_aux);
- if (host->vcc) {
- /* Then proceed to shut down the local regulator */
- ret = mmc_regulator_set_ocr(host->mmc,
- host->vcc, 0);
- }
- }
-
- if (host->pbias) {
- if (vdd <= VDD_165_195)
- ret = regulator_set_voltage(host->pbias, VDD_1V8,
- VDD_1V8);
- else
- ret = regulator_set_voltage(host->pbias, VDD_3V0,
- VDD_3V0);
- if (ret < 0)
- goto error_set_power;
+ ret = omap_hsmmc_enable_supply(mmc);
+ if (ret)
+ return ret;
- if (host->pbias_enabled == 0) {
- ret = regulator_enable(host->pbias);
- if (!ret)
- host->pbias_enabled = 1;
- }
+ ret = omap_hsmmc_set_pbias(host, true, vdd);
+ if (ret)
+ goto err_set_voltage;
+ } else {
+ ret = omap_hsmmc_disable_supply(mmc);
+ if (ret)
+ return ret;
}
if (mmc_pdata(host)->after_set_reg)
mmc_pdata(host)->after_set_reg(dev, power_on, vdd);
-error_set_power:
+ return 0;
+
+err_set_voltage:
+ omap_hsmmc_disable_supply(mmc);
+
return ret;
}
-static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
+static int omap_hsmmc_disable_boot_regulator(struct regulator *reg)
{
- struct regulator *reg;
- int ocr_value = 0;
+ int ret;
- reg = devm_regulator_get(host->dev, "vmmc");
- if (IS_ERR(reg)) {
- dev_err(host->dev, "unable to get vmmc regulator %ld\n",
- PTR_ERR(reg));
- return PTR_ERR(reg);
- } else {
- host->vcc = reg;
- ocr_value = mmc_regulator_get_ocrmask(reg);
- if (!mmc_pdata(host)->ocr_mask) {
- mmc_pdata(host)->ocr_mask = ocr_value;
- } else {
- if (!(mmc_pdata(host)->ocr_mask & ocr_value)) {
- dev_err(host->dev, "ocrmask %x is not supported\n",
- mmc_pdata(host)->ocr_mask);
- mmc_pdata(host)->ocr_mask = 0;
- return -EINVAL;
- }
- }
+ if (!reg)
+ return 0;
+
+ if (regulator_is_enabled(reg)) {
+ ret = regulator_enable(reg);
+ if (ret)
+ return ret;
+
+ ret = regulator_disable(reg);
+ if (ret)
+ return ret;
}
- mmc_pdata(host)->set_power = omap_hsmmc_set_power;
- /* Allow an aux regulator */
- reg = devm_regulator_get_optional(host->dev, "vmmc_aux");
- host->vcc_aux = IS_ERR(reg) ? NULL : reg;
+ return 0;
+}
- reg = devm_regulator_get_optional(host->dev, "pbias");
- host->pbias = IS_ERR(reg) ? NULL : reg;
+static int omap_hsmmc_disable_boot_regulators(struct omap_hsmmc_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ int ret;
- /* For eMMC do not power off when not in sleep state */
- if (mmc_pdata(host)->no_regulator_off_init)
- return 0;
/*
- * To disable boot_on regulator, enable regulator
- * to increase usecount and then disable it.
+ * disable regulators enabled during boot and get the usecount
+ * right so that regulators can be enabled/disabled by checking
+ * the return value of regulator_is_enabled
*/
- if ((host->vcc && regulator_is_enabled(host->vcc) > 0) ||
- (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) {
- int vdd = ffs(mmc_pdata(host)->ocr_mask) - 1;
+ ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vmmc);
+ if (ret) {
+ dev_err(host->dev, "fail to disable boot enabled vmmc reg\n");
+ return ret;
+ }
+
+ ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vqmmc);
+ if (ret) {
+ dev_err(host->dev,
+ "fail to disable boot enabled vmmc_aux reg\n");
+ return ret;
+ }
- mmc_pdata(host)->set_power(host->dev, 1, vdd);
- mmc_pdata(host)->set_power(host->dev, 0, 0);
+ ret = omap_hsmmc_disable_boot_regulator(host->pbias);
+ if (ret) {
+ dev_err(host->dev,
+ "failed to disable boot enabled pbias reg\n");
+ return ret;
}
return 0;
}
-static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
+static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
{
- mmc_pdata(host)->set_power = NULL;
-}
+ int ocr_value = 0;
+ int ret;
+ struct mmc_host *mmc = host->mmc;
-static inline int omap_hsmmc_have_reg(void)
-{
- return 1;
-}
+ if (mmc_pdata(host)->set_power)
+ return 0;
-#else
+ mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc");
+ if (IS_ERR(mmc->supply.vmmc)) {
+ ret = PTR_ERR(mmc->supply.vmmc);
+ if ((ret != -ENODEV) && host->dev->of_node)
+ return ret;
+ dev_dbg(host->dev, "unable to get vmmc regulator %ld\n",
+ PTR_ERR(mmc->supply.vmmc));
+ mmc->supply.vmmc = NULL;
+ } else {
+ ocr_value = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
+ if (ocr_value > 0)
+ mmc_pdata(host)->ocr_mask = ocr_value;
+ }
-static inline int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
-{
- return -EINVAL;
-}
+ /* Allow an aux regulator */
+ mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux");
+ if (IS_ERR(mmc->supply.vqmmc)) {
+ ret = PTR_ERR(mmc->supply.vqmmc);
+ if ((ret != -ENODEV) && host->dev->of_node)
+ return ret;
+ dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
+ PTR_ERR(mmc->supply.vqmmc));
+ mmc->supply.vqmmc = NULL;
+ }
-static inline void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
-{
-}
+ host->pbias = devm_regulator_get_optional(host->dev, "pbias");
+ if (IS_ERR(host->pbias)) {
+ ret = PTR_ERR(host->pbias);
+ if ((ret != -ENODEV) && host->dev->of_node)
+ return ret;
+ dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
+ PTR_ERR(host->pbias));
+ host->pbias = NULL;
+ }
+
+ /* For eMMC do not power off when not in sleep state */
+ if (mmc_pdata(host)->no_regulator_off_init)
+ return 0;
+
+ ret = omap_hsmmc_disable_boot_regulators(host);
+ if (ret)
+ return ret;
-static inline int omap_hsmmc_have_reg(void)
-{
return 0;
}
-#endif
-
static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id);
static int omap_hsmmc_gpio_init(struct mmc_host *mmc,
@@ -1068,7 +1171,8 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
}
if (status & (CTO_EN | DTO_EN))
hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
- else if (status & (CCRC_EN | DCRC_EN))
+ else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN |
+ BADA_EN))
hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
if (status & ACE_EN) {
@@ -1117,22 +1221,6 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static irqreturn_t omap_hsmmc_wake_irq(int irq, void *dev_id)
-{
- struct omap_hsmmc_host *host = dev_id;
-
- /* cirq is level triggered, disable to avoid infinite loop */
- spin_lock(&host->irq_lock);
- if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
- disable_irq_nosync(host->wake_irq);
- host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
- }
- spin_unlock(&host->irq_lock);
- pm_request_resume(host->dev); /* no use counter */
-
- return IRQ_HANDLED;
-}
-
static void set_sd_bus_power(struct omap_hsmmc_host *host)
{
unsigned long i;
@@ -1164,11 +1252,11 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
clk_disable_unprepare(host->dbclk);
/* Turn the power off */
- ret = mmc_pdata(host)->set_power(host->dev, 0, 0);
+ ret = omap_hsmmc_set_power(host->dev, 0, 0);
/* Turn the power ON with given VDD 1.8 or 3.0v */
if (!ret)
- ret = mmc_pdata(host)->set_power(host->dev, 1, vdd);
+ ret = omap_hsmmc_set_power(host->dev, 1, vdd);
pm_runtime_get_sync(host->dev);
if (host->dbclk)
clk_prepare_enable(host->dbclk);
@@ -1567,10 +1655,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->power_mode != host->power_mode) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
- mmc_pdata(host)->set_power(host->dev, 0, 0);
+ omap_hsmmc_set_power(host->dev, 0, 0);
break;
case MMC_POWER_UP:
- mmc_pdata(host)->set_power(host->dev, 1, ios->vdd);
+ omap_hsmmc_set_power(host->dev, 1, ios->vdd);
break;
case MMC_POWER_ON:
do_send_init_stream = 1;
@@ -1665,7 +1753,6 @@ static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
{
- struct mmc_host *mmc = host->mmc;
int ret;
/*
@@ -1677,11 +1764,7 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
if (!host->dev->of_node || !host->wake_irq)
return -ENODEV;
- /* Prevent auto-enabling of IRQ */
- irq_set_status_flags(host->wake_irq, IRQ_NOAUTOEN);
- ret = devm_request_irq(host->dev, host->wake_irq, omap_hsmmc_wake_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- mmc_hostname(mmc), host);
+ ret = dev_pm_set_dedicated_wake_irq(host->dev, host->wake_irq);
if (ret) {
dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n");
goto err;
@@ -1718,7 +1801,7 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
return 0;
err_free_irq:
- devm_free_irq(host->dev, host->wake_irq, host);
+ dev_pm_clear_wake_irq(host->dev);
err:
dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n");
host->wake_irq = 0;
@@ -1974,6 +2057,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
host->power_mode = MMC_POWER_OFF;
host->next_data.cookie = 1;
host->pbias_enabled = 0;
+ host->vqmmc_enabled = 0;
ret = omap_hsmmc_gpio_init(mmc, host, pdata);
if (ret)
@@ -2007,6 +2091,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
}
+ device_init_wakeup(&pdev->dev, true);
pm_runtime_enable(host->dev);
pm_runtime_get_sync(host->dev);
pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
@@ -2097,12 +2182,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
goto err_irq;
}
- if (omap_hsmmc_have_reg() && !mmc_pdata(host)->set_power) {
- ret = omap_hsmmc_reg_get(host);
- if (ret)
- goto err_irq;
- host->use_reg = 1;
- }
+ ret = omap_hsmmc_reg_get(host);
+ if (ret)
+ goto err_irq;
mmc->ocr_avail = mmc_pdata(host)->ocr_mask;
@@ -2144,9 +2226,8 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
err_slot_name:
mmc_remove_host(mmc);
- if (host->use_reg)
- omap_hsmmc_reg_put(host);
err_irq:
+ device_init_wakeup(&pdev->dev, false);
if (host->tx_chan)
dma_release_channel(host->tx_chan);
if (host->rx_chan)
@@ -2168,8 +2249,6 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
pm_runtime_get_sync(host->dev);
mmc_remove_host(host->mmc);
- if (host->use_reg)
- omap_hsmmc_reg_put(host);
if (host->tx_chan)
dma_release_channel(host->tx_chan);
@@ -2178,6 +2257,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
+ device_init_wakeup(&pdev->dev, false);
if (host->dbclk)
clk_disable_unprepare(host->dbclk);
@@ -2204,11 +2284,6 @@ static int omap_hsmmc_suspend(struct device *dev)
OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
}
- /* do not wake up due to sdio irq */
- if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
- !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
- disable_irq(host->wake_irq);
-
if (host->dbclk)
clk_disable_unprepare(host->dbclk);
@@ -2233,11 +2308,6 @@ static int omap_hsmmc_resume(struct device *dev)
omap_hsmmc_conf_bus_power(host);
omap_hsmmc_protect_card(host);
-
- if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
- !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
- enable_irq(host->wake_irq);
-
pm_runtime_mark_last_busy(host->dev);
pm_runtime_put_autosuspend(host->dev);
return 0;
@@ -2277,10 +2347,6 @@ static int omap_hsmmc_runtime_suspend(struct device *dev)
}
pinctrl_pm_select_idle_state(dev);
-
- WARN_ON(host->flags & HSMMC_WAKE_IRQ_ENABLED);
- enable_irq(host->wake_irq);
- host->flags |= HSMMC_WAKE_IRQ_ENABLED;
} else {
pinctrl_pm_select_idle_state(dev);
}
@@ -2302,11 +2368,6 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
spin_lock_irqsave(&host->irq_lock, flags);
if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
(host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
- /* sdio irq flag can't change while in runtime suspend */
- if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
- disable_irq_nosync(host->wake_irq);
- host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
- }
pinctrl_pm_select_default_state(host->dev);
diff --git a/kernel/drivers/mmc/host/pxamci.c b/kernel/drivers/mmc/host/pxamci.c
index 1b6d0bfe3..28a057fae 100644
--- a/kernel/drivers/mmc/host/pxamci.c
+++ b/kernel/drivers/mmc/host/pxamci.c
@@ -22,10 +22,13 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/io.h>
#include <linux/regulator/consumer.h>
#include <linux/gpio.h>
@@ -37,7 +40,6 @@
#include <asm/sizes.h>
#include <mach/hardware.h>
-#include <mach/dma.h>
#include <linux/platform_data/mmc-pxamci.h>
#include "pxamci.h"
@@ -58,7 +60,6 @@ struct pxamci_host {
struct clk *clk;
unsigned long clkrate;
int irq;
- int dma;
unsigned int clkrt;
unsigned int cmdat;
unsigned int imask;
@@ -69,8 +70,10 @@ struct pxamci_host {
struct mmc_command *cmd;
struct mmc_data *data;
+ struct dma_chan *dma_chan_rx;
+ struct dma_chan *dma_chan_tx;
+ dma_cookie_t dma_cookie;
dma_addr_t sg_dma;
- struct pxa_dma_desc *sg_cpu;
unsigned int dma_len;
unsigned int dma_dir;
@@ -173,14 +176,18 @@ static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
spin_unlock_irqrestore(&host->lock, flags);
}
+static void pxamci_dma_irq(void *param);
+
static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
{
+ struct dma_async_tx_descriptor *tx;
+ enum dma_data_direction direction;
+ struct dma_slave_config config;
+ struct dma_chan *chan;
unsigned int nob = data->blocks;
unsigned long long clks;
unsigned int timeout;
- bool dalgn = 0;
- u32 dcmd;
- int i;
+ int ret;
host->data = data;
@@ -195,54 +202,48 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
writel((timeout + 255) / 256, host->base + MMC_RDTO);
+ memset(&config, 0, sizeof(config));
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ config.src_addr = host->res->start + MMC_RXFIFO;
+ config.dst_addr = host->res->start + MMC_TXFIFO;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+
if (data->flags & MMC_DATA_READ) {
host->dma_dir = DMA_FROM_DEVICE;
- dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
- DRCMR(host->dma_drcmrtx) = 0;
- DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD;
+ direction = DMA_DEV_TO_MEM;
+ chan = host->dma_chan_rx;
} else {
host->dma_dir = DMA_TO_DEVICE;
- dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
- DRCMR(host->dma_drcmrrx) = 0;
- DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD;
+ direction = DMA_MEM_TO_DEV;
+ chan = host->dma_chan_tx;
}
- dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
+ config.direction = direction;
- host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret < 0) {
+ dev_err(mmc_dev(host->mmc), "dma slave config failed\n");
+ return;
+ }
+
+ host->dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
host->dma_dir);
- for (i = 0; i < host->dma_len; i++) {
- unsigned int length = sg_dma_len(&data->sg[i]);
- host->sg_cpu[i].dcmd = dcmd | length;
- if (length & 31 && !(data->flags & MMC_DATA_READ))
- host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN;
- /* Not aligned to 8-byte boundary? */
- if (sg_dma_address(&data->sg[i]) & 0x7)
- dalgn = 1;
- if (data->flags & MMC_DATA_READ) {
- host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
- host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
- } else {
- host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
- host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
- }
- host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
- sizeof(struct pxa_dma_desc);
+ tx = dmaengine_prep_slave_sg(chan, data->sg, host->dma_len, direction,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
+ return;
}
- host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
- wmb();
- /*
- * The PXA27x DMA controller encounters overhead when working with
- * unaligned (to 8-byte boundaries) data, so switch on byte alignment
- * mode only if we have unaligned data.
- */
- if (dalgn)
- DALGN |= (1 << host->dma);
- else
- DALGN &= ~(1 << host->dma);
- DDADR(host->dma) = host->sg_dma;
+ if (!(data->flags & MMC_DATA_READ)) {
+ tx->callback = pxamci_dma_irq;
+ tx->callback_param = host;
+ }
+
+ host->dma_cookie = dmaengine_submit(tx);
/*
* workaround for erratum #91:
@@ -251,7 +252,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
* before starting DMA.
*/
if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ)
- DCSR(host->dma) = DCSR_RUN;
+ dma_async_issue_pending(chan);
}
static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
@@ -343,7 +344,7 @@ static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
* enable DMA late
*/
if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE)
- DCSR(host->dma) = DCSR_RUN;
+ dma_async_issue_pending(host->dma_chan_tx);
} else {
pxamci_finish_request(host, host->mrq);
}
@@ -354,13 +355,17 @@ static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
{
struct mmc_data *data = host->data;
+ struct dma_chan *chan;
if (!data)
return 0;
- DCSR(host->dma) = 0;
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- host->dma_dir);
+ if (data->flags & MMC_DATA_READ)
+ chan = host->dma_chan_rx;
+ else
+ chan = host->dma_chan_tx;
+ dma_unmap_sg(chan->device->dev,
+ data->sg, data->sg_len, host->dma_dir);
if (stat & STAT_READ_TIME_OUT)
data->error = -ETIMEDOUT;
@@ -450,12 +455,8 @@ static int pxamci_get_ro(struct mmc_host *mmc)
{
struct pxamci_host *host = mmc_priv(mmc);
- if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) {
- if (host->pdata->gpio_card_ro_invert)
- return !gpio_get_value(host->pdata->gpio_card_ro);
- else
- return gpio_get_value(host->pdata->gpio_card_ro);
- }
+ if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro))
+ return mmc_gpio_get_ro(mmc);
if (host->pdata && host->pdata->get_ro)
return !!host->pdata->get_ro(mmc_dev(mmc));
/*
@@ -547,25 +548,43 @@ static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
static const struct mmc_host_ops pxamci_ops = {
.request = pxamci_request,
+ .get_cd = mmc_gpio_get_cd,
.get_ro = pxamci_get_ro,
.set_ios = pxamci_set_ios,
.enable_sdio_irq = pxamci_enable_sdio_irq,
};
-static void pxamci_dma_irq(int dma, void *devid)
+static void pxamci_dma_irq(void *param)
{
- struct pxamci_host *host = devid;
- int dcsr = DCSR(dma);
- DCSR(dma) = dcsr & ~DCSR_STOPIRQEN;
+ struct pxamci_host *host = param;
+ struct dma_tx_state state;
+ enum dma_status status;
+ struct dma_chan *chan;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (!host->data)
+ goto out_unlock;
+
+ if (host->data->flags & MMC_DATA_READ)
+ chan = host->dma_chan_rx;
+ else
+ chan = host->dma_chan_tx;
+
+ status = dmaengine_tx_status(chan, host->dma_cookie, &state);
- if (dcsr & DCSR_ENDINTR) {
+ if (likely(status == DMA_COMPLETE)) {
writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
} else {
- pr_err("%s: DMA error on channel %d (DCSR=%#x)\n",
- mmc_hostname(host->mmc), dma, dcsr);
+ pr_err("%s: DMA error on %s channel\n", mmc_hostname(host->mmc),
+ host->data->flags & MMC_DATA_READ ? "rx" : "tx");
host->data->error = -EIO;
pxamci_data_done(host, 0);
}
+
+out_unlock:
+ spin_unlock_irqrestore(&host->lock, flags);
}
static irqreturn_t pxamci_detect_irq(int irq, void *devid)
@@ -625,7 +644,9 @@ static int pxamci_probe(struct platform_device *pdev)
struct mmc_host *mmc;
struct pxamci_host *host = NULL;
struct resource *r, *dmarx, *dmatx;
+ struct pxad_param param_rx, param_tx;
int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
+ dma_cap_mask_t mask;
ret = pxamci_of_init(pdev);
if (ret)
@@ -671,7 +692,6 @@ static int pxamci_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
host->mmc = mmc;
- host->dma = -1;
host->pdata = pdev->dev.platform_data;
host->clkrt = CLKRT_OFF;
@@ -702,12 +722,6 @@ static int pxamci_probe(struct platform_device *pdev)
MMC_CAP_SD_HIGHSPEED;
}
- host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
- if (!host->sg_cpu) {
- ret = -ENOMEM;
- goto out;
- }
-
spin_lock_init(&host->lock);
host->res = r;
host->irq = irq;
@@ -728,32 +742,45 @@ static int pxamci_probe(struct platform_device *pdev)
writel(64, host->base + MMC_RESTO);
writel(host->imask, host->base + MMC_I_MASK);
- host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
- pxamci_dma_irq, host);
- if (host->dma < 0) {
- ret = -EBUSY;
- goto out;
- }
-
ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
if (ret)
goto out;
platform_set_drvdata(pdev, mmc);
- dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!dmarx) {
- ret = -ENXIO;
+ if (!pdev->dev.of_node) {
+ dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!dmarx || !dmatx) {
+ ret = -ENXIO;
+ goto out;
+ }
+ param_rx.prio = PXAD_PRIO_LOWEST;
+ param_rx.drcmr = dmarx->start;
+ param_tx.prio = PXAD_PRIO_LOWEST;
+ param_tx.drcmr = dmatx->start;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->dma_chan_rx =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param_rx, &pdev->dev, "rx");
+ if (host->dma_chan_rx == NULL) {
+ dev_err(&pdev->dev, "unable to request rx dma channel\n");
+ ret = -ENODEV;
goto out;
}
- host->dma_drcmrrx = dmarx->start;
- dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!dmatx) {
- ret = -ENXIO;
+ host->dma_chan_tx =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param_tx, &pdev->dev, "tx");
+ if (host->dma_chan_tx == NULL) {
+ dev_err(&pdev->dev, "unable to request tx dma channel\n");
+ ret = -ENODEV;
goto out;
}
- host->dma_drcmrtx = dmatx->start;
if (host->pdata) {
gpio_cd = host->pdata->gpio_card_detect;
@@ -761,37 +788,31 @@ static int pxamci_probe(struct platform_device *pdev)
gpio_power = host->pdata->gpio_power;
}
if (gpio_is_valid(gpio_power)) {
- ret = gpio_request(gpio_power, "mmc card power");
+ ret = devm_gpio_request(&pdev->dev, gpio_power,
+ "mmc card power");
if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power);
+ dev_err(&pdev->dev, "Failed requesting gpio_power %d\n",
+ gpio_power);
goto out;
}
gpio_direction_output(gpio_power,
host->pdata->gpio_power_invert);
}
- if (gpio_is_valid(gpio_ro)) {
- ret = gpio_request(gpio_ro, "mmc card read only");
- if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
- goto err_gpio_ro;
- }
- gpio_direction_input(gpio_ro);
+ if (gpio_is_valid(gpio_ro))
+ ret = mmc_gpio_request_ro(mmc, gpio_ro);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
+ goto out;
+ } else {
+ mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
+ 0 : MMC_CAP2_RO_ACTIVE_HIGH;
}
- if (gpio_is_valid(gpio_cd)) {
- ret = gpio_request(gpio_cd, "mmc card detect");
- if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
- goto err_gpio_cd;
- }
- gpio_direction_input(gpio_cd);
- ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "mmc card detect", mmc);
- if (ret) {
- dev_err(&pdev->dev, "failed to request card detect IRQ\n");
- goto err_request_irq;
- }
+ if (gpio_is_valid(gpio_cd))
+ ret = mmc_gpio_request_cd(mmc, gpio_cd, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
+ goto out;
}
if (host->pdata && host->pdata->init)
@@ -806,20 +827,14 @@ static int pxamci_probe(struct platform_device *pdev)
return 0;
-err_request_irq:
- gpio_free(gpio_cd);
-err_gpio_cd:
- gpio_free(gpio_ro);
-err_gpio_ro:
- gpio_free(gpio_power);
- out:
+out:
if (host) {
- if (host->dma >= 0)
- pxa_free_dma(host->dma);
+ if (host->dma_chan_rx)
+ dma_release_channel(host->dma_chan_rx);
+ if (host->dma_chan_tx)
+ dma_release_channel(host->dma_chan_tx);
if (host->base)
iounmap(host->base);
- if (host->sg_cpu)
- dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
if (host->clk)
clk_put(host->clk);
}
@@ -844,14 +859,6 @@ static int pxamci_remove(struct platform_device *pdev)
gpio_ro = host->pdata->gpio_card_ro;
gpio_power = host->pdata->gpio_power;
}
- if (gpio_is_valid(gpio_cd)) {
- free_irq(gpio_to_irq(gpio_cd), mmc);
- gpio_free(gpio_cd);
- }
- if (gpio_is_valid(gpio_ro))
- gpio_free(gpio_ro);
- if (gpio_is_valid(gpio_power))
- gpio_free(gpio_power);
if (host->vcc)
regulator_put(host->vcc);
@@ -863,13 +870,12 @@ static int pxamci_remove(struct platform_device *pdev)
END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
host->base + MMC_I_MASK);
- DRCMR(host->dma_drcmrrx) = 0;
- DRCMR(host->dma_drcmrtx) = 0;
-
free_irq(host->irq, host);
- pxa_free_dma(host->dma);
+ dmaengine_terminate_all(host->dma_chan_rx);
+ dmaengine_terminate_all(host->dma_chan_tx);
+ dma_release_channel(host->dma_chan_rx);
+ dma_release_channel(host->dma_chan_tx);
iounmap(host->base);
- dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
clk_put(host->clk);
diff --git a/kernel/drivers/mmc/host/rtsx_pci_sdmmc.c b/kernel/drivers/mmc/host/rtsx_pci_sdmmc.c
index 1d3d6c4bf..93137483e 100644
--- a/kernel/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/kernel/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -1474,7 +1474,7 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_device_id rtsx_pci_sdmmc_ids[] = {
+static const struct platform_device_id rtsx_pci_sdmmc_ids[] = {
{
.name = DRV_NAME_RTSX_PCI_SDMMC,
}, {
diff --git a/kernel/drivers/mmc/host/rtsx_usb_sdmmc.c b/kernel/drivers/mmc/host/rtsx_usb_sdmmc.c
index 88af827e0..6c71fc9f7 100644
--- a/kernel/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/kernel/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -1439,7 +1439,7 @@ static int rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_device_id rtsx_usb_sdmmc_ids[] = {
+static const struct platform_device_id rtsx_usb_sdmmc_ids[] = {
{
.name = "rtsx_usb_sdmmc",
}, {
diff --git a/kernel/drivers/mmc/host/s3cmci.c b/kernel/drivers/mmc/host/s3cmci.c
index 94cddf381..6291d5042 100644
--- a/kernel/drivers/mmc/host/s3cmci.c
+++ b/kernel/drivers/mmc/host/s3cmci.c
@@ -1856,7 +1856,7 @@ static int s3cmci_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_device_id s3cmci_driver_ids[] = {
+static const struct platform_device_id s3cmci_driver_ids[] = {
{
.name = "s3c2410-sdi",
.driver_data = 0,
diff --git a/kernel/drivers/mmc/host/sdhci-acpi.c b/kernel/drivers/mmc/host/sdhci-acpi.c
index 22d929fa3..a5cda926d 100644
--- a/kernel/drivers/mmc/host/sdhci-acpi.c
+++ b/kernel/drivers/mmc/host/sdhci-acpi.c
@@ -146,6 +146,33 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
.ops = &sdhci_acpi_ops_int,
};
+static int bxt_get_cd(struct mmc_host *mmc)
+{
+ int gpio_cd = mmc_gpio_get_cd(mmc);
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!gpio_cd)
+ return 0;
+
+ pm_runtime_get_sync(mmc->parent);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ goto out;
+
+ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ pm_runtime_mark_last_busy(mmc->parent);
+ pm_runtime_put_autosuspend(mmc->parent);
+
+ return ret;
+}
+
static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
const char *hid, const char *uid)
{
@@ -196,6 +223,9 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
/* Platform specific code during sd probe slot goes here */
+ if (hid && !strcmp(hid, "80865ACA"))
+ host->mmc_host_ops.get_cd = bxt_get_cd;
+
return 0;
}
@@ -207,7 +237,9 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.caps2 = MMC_CAP2_HC_ERASE_SZ,
.flags = SDHCI_ACPI_RUNTIME_PM,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_STOP_WITH_TC,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_STOP_WITH_TC |
+ SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
.probe_slot = sdhci_acpi_emmc_probe_slot,
};
@@ -239,6 +271,9 @@ struct sdhci_acpi_uid_slot {
};
static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
+ { "80865ACA", NULL, &sdhci_acpi_slot_int_sd },
+ { "80865ACC", NULL, &sdhci_acpi_slot_int_emmc },
+ { "80865AD0", NULL, &sdhci_acpi_slot_int_sdio },
{ "80860F14" , "1" , &sdhci_acpi_slot_int_emmc },
{ "80860F14" , "3" , &sdhci_acpi_slot_int_sd },
{ "80860F16" , NULL, &sdhci_acpi_slot_int_sd },
@@ -247,11 +282,15 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
{ "INT3436" , NULL, &sdhci_acpi_slot_int_sdio },
{ "INT344D" , NULL, &sdhci_acpi_slot_int_sdio },
+ { "PNP0FFF" , "3" , &sdhci_acpi_slot_int_sd },
{ "PNP0D40" },
{ },
};
static const struct acpi_device_id sdhci_acpi_ids[] = {
+ { "80865ACA" },
+ { "80865ACC" },
+ { "80865AD0" },
{ "80860F14" },
{ "80860F16" },
{ "INT33BB" },
diff --git a/kernel/drivers/mmc/host/sdhci-bcm-kona.c b/kernel/drivers/mmc/host/sdhci-bcm-kona.c
index 2bd90fb35..00a8a40a3 100644
--- a/kernel/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/kernel/drivers/mmc/host/sdhci-bcm-kona.c
@@ -273,7 +273,7 @@ static int sdhci_bcm_kona_probe(struct platform_device *pdev)
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
dev_dbg(dev, "is_8bit=%c\n",
- (host->mmc->caps | MMC_CAP_8_BIT_DATA) ? 'Y' : 'N');
+ (host->mmc->caps & MMC_CAP_8_BIT_DATA) ? 'Y' : 'N');
ret = sdhci_bcm_kona_sd_reset(host);
if (ret)
diff --git a/kernel/drivers/mmc/host/sdhci-bcm2835.c b/kernel/drivers/mmc/host/sdhci-bcm2835.c
index 0ef0343c6..1c65d4690 100644
--- a/kernel/drivers/mmc/host/sdhci-bcm2835.c
+++ b/kernel/drivers/mmc/host/sdhci-bcm2835.c
@@ -172,9 +172,19 @@ static int bcm2835_sdhci_probe(struct platform_device *pdev)
ret = PTR_ERR(pltfm_host->clk);
goto err;
}
+ ret = clk_prepare_enable(pltfm_host->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable host clk\n");
+ goto err;
+ }
- return sdhci_add_host(host);
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_clk;
+ return 0;
+err_clk:
+ clk_disable_unprepare(pltfm_host->clk);
err:
sdhci_pltfm_free(pdev);
return ret;
diff --git a/kernel/drivers/mmc/host/sdhci-esdhc-imx.c b/kernel/drivers/mmc/host/sdhci-esdhc-imx.c
index 82f512d87..1f1582f6c 100644
--- a/kernel/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/kernel/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -4,7 +4,7 @@
* derived from the OF-version.
*
* Copyright (c) 2010 Pengutronix e.K.
- * Author: Wolfram Sang <w.sang@pengutronix.de>
+ * Author: Wolfram Sang <kernel@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -32,6 +32,7 @@
#include "sdhci-esdhc.h"
#define ESDHC_CTRL_D3CD 0x08
+#define ESDHC_BURST_LEN_EN_INCR (1 << 27)
/* VENDOR SPEC register */
#define ESDHC_VENDOR_SPEC 0xc0
#define ESDHC_VENDOR_SPEC_SDIO_QUIRK (1 << 1)
@@ -44,6 +45,7 @@
#define ESDHC_MIX_CTRL_EXE_TUNE (1 << 22)
#define ESDHC_MIX_CTRL_SMPCLK_SEL (1 << 23)
#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25)
+#define ESDHC_MIX_CTRL_HS400_EN (1 << 26)
/* Bits 3 and 6 are not SDHCI standard definitions */
#define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7
/* Tuning bits */
@@ -60,10 +62,21 @@
#define ESDHC_TUNE_CTRL_MIN 0
#define ESDHC_TUNE_CTRL_MAX ((1 << 7) - 1)
+/* strobe dll register */
+#define ESDHC_STROBE_DLL_CTRL 0x70
+#define ESDHC_STROBE_DLL_CTRL_ENABLE (1 << 0)
+#define ESDHC_STROBE_DLL_CTRL_RESET (1 << 1)
+#define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT 3
+
+#define ESDHC_STROBE_DLL_STATUS 0x74
+#define ESDHC_STROBE_DLL_STS_REF_LOCK (1 << 1)
+#define ESDHC_STROBE_DLL_STS_SLV_LOCK 0x1
+
#define ESDHC_TUNING_CTRL 0xcc
#define ESDHC_STD_TUNING_EN (1 << 24)
/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
#define ESDHC_TUNING_START_TAP 0x1
+#define ESDHC_TUNING_STEP_SHIFT 16
/* pinctrl state */
#define ESDHC_PINCTRL_STATE_100MHZ "state_100mhz"
@@ -112,6 +125,19 @@
#define ESDHC_FLAG_STD_TUNING BIT(5)
/* The IP has SDHCI_CAPABILITIES_1 register */
#define ESDHC_FLAG_HAVE_CAP1 BIT(6)
+/*
+ * The IP has errata ERR004536
+ * uSDHC: ADMA Length Mismatch Error occurs if the AHB read access is slow,
+ * when reading data from the card
+ */
+#define ESDHC_FLAG_ERR004536 BIT(7)
+/* The IP supports HS200 mode */
+#define ESDHC_FLAG_HS200 BIT(8)
+/* The IP supports HS400 mode */
+#define ESDHC_FLAG_HS400 BIT(9)
+
+/* A higher clock ferquency than this rate requires strobell dll control */
+#define ESDHC_STROBE_DLL_CLK_FREQ 100000000
struct esdhc_soc_data {
u32 flags;
@@ -139,7 +165,19 @@ static struct esdhc_soc_data usdhc_imx6q_data = {
static struct esdhc_soc_data usdhc_imx6sl_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
- | ESDHC_FLAG_HAVE_CAP1,
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536
+ | ESDHC_FLAG_HS200,
+};
+
+static struct esdhc_soc_data usdhc_imx6sx_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200,
+};
+
+static struct esdhc_soc_data usdhc_imx7d_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_HS400,
};
struct pltfm_imx_data {
@@ -161,7 +199,7 @@ struct pltfm_imx_data {
u32 is_ddr;
};
-static struct platform_device_id imx_esdhc_devtype[] = {
+static const struct platform_device_id imx_esdhc_devtype[] = {
{
.name = "sdhci-esdhc-imx25",
.driver_data = (kernel_ulong_t) &esdhc_imx25_data,
@@ -182,8 +220,10 @@ static const struct of_device_id imx_esdhc_dt_ids[] = {
{ .compatible = "fsl,imx35-esdhc", .data = &esdhc_imx35_data, },
{ .compatible = "fsl,imx51-esdhc", .data = &esdhc_imx51_data, },
{ .compatible = "fsl,imx53-esdhc", .data = &esdhc_imx53_data, },
+ { .compatible = "fsl,imx6sx-usdhc", .data = &usdhc_imx6sx_data, },
{ .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, },
{ .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, },
+ { .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
@@ -259,6 +299,9 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
| SDHCI_USE_SDR50_TUNING;
+
+ if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
+ val |= SDHCI_SUPPORT_HS400;
}
}
@@ -298,7 +341,7 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
u32 data;
if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
- if (val & SDHCI_INT_CARD_INT) {
+ if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) {
/*
* Clear and then set D3CD bit to avoid missing the
* card interrupt. This is a eSDHC controller problem
@@ -313,6 +356,11 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
data |= ESDHC_CTRL_D3CD;
writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
}
+
+ if (val & SDHCI_INT_ADMA_ERROR) {
+ val &= ~SDHCI_INT_ADMA_ERROR;
+ val |= ESDHC_INT_VENDOR_SPEC_DMA_ERR;
+ }
}
if (unlikely((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
@@ -333,13 +381,6 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
}
}
- if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
- if (val & SDHCI_INT_ADMA_ERROR) {
- val &= ~SDHCI_INT_ADMA_ERROR;
- val |= ESDHC_INT_VENDOR_SPEC_DMA_ERR;
- }
- }
-
writel(val, host->ioaddr + reg);
}
@@ -435,6 +476,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ u32 tuning_ctrl;
if (val & SDHCI_CTRL_TUNED_CLK) {
v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
} else {
@@ -445,6 +487,11 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
if (val & SDHCI_CTRL_EXEC_TUNING) {
v |= ESDHC_MIX_CTRL_EXE_TUNE;
m |= ESDHC_MIX_CTRL_FBCLK_SEL;
+ tuning_ctrl = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+ tuning_ctrl |= ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP;
+ if (imx_data->boarddata.tuning_step)
+ tuning_ctrl |= imx_data->boarddata.tuning_step << ESDHC_TUNING_STEP_SHIFT;
+ writel(tuning_ctrl, host->ioaddr + ESDHC_TUNING_CTRL);
} else {
v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
}
@@ -568,13 +615,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
- struct esdhc_platform_data *boarddata = &imx_data->boarddata;
- if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock))
- return boarddata->f_max;
- else
- return pltfm_host->clock;
+ return pltfm_host->clock;
}
static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
@@ -717,7 +759,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
min = ESDHC_TUNE_CTRL_MIN;
while (min < ESDHC_TUNE_CTRL_MAX) {
esdhc_prepare_tuning(host, min);
- if (!mmc_send_tuning(host->mmc))
+ if (!mmc_send_tuning(host->mmc, opcode, NULL))
break;
min += ESDHC_TUNE_CTRL_STEP;
}
@@ -726,7 +768,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
max = min + ESDHC_TUNE_CTRL_STEP;
while (max < ESDHC_TUNE_CTRL_MAX) {
esdhc_prepare_tuning(host, max);
- if (mmc_send_tuning(host->mmc)) {
+ if (mmc_send_tuning(host->mmc, opcode, NULL)) {
max -= ESDHC_TUNE_CTRL_STEP;
break;
}
@@ -736,7 +778,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
/* use average delay to get the best timing */
avg = (min + max) / 2;
esdhc_prepare_tuning(host, avg);
- ret = mmc_send_tuning(host->mmc);
+ ret = mmc_send_tuning(host->mmc, opcode, NULL);
esdhc_post_tuning(host);
dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
@@ -766,6 +808,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
break;
case MMC_TIMING_UHS_SDR104:
case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_MMC_HS400:
pinctrl = imx_data->pins_200mhz;
break;
default:
@@ -776,24 +819,68 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
return pinctrl_select_state(imx_data->pinctrl, pinctrl);
}
+/*
+ * For HS400 eMMC, there is a data_strobe line, this signal is generated
+ * by the device and used for data output and CRC status response output
+ * in HS400 mode. The frequency of this signal follows the frequency of
+ * CLK generated by host. Host receive the data which is aligned to the
+ * edge of data_strobe line. Due to the time delay between CLK line and
+ * data_strobe line, if the delay time is larger than one clock cycle,
+ * then CLK and data_strobe line will misaligned, read error shows up.
+ * So when the CLK is higher than 100MHz, each clock cycle is short enough,
+ * host should config the delay target.
+ */
+static void esdhc_set_strobe_dll(struct sdhci_host *host)
+{
+ u32 v;
+
+ if (host->mmc->actual_clock > ESDHC_STROBE_DLL_CLK_FREQ) {
+ /* force a reset on strobe dll */
+ writel(ESDHC_STROBE_DLL_CTRL_RESET,
+ host->ioaddr + ESDHC_STROBE_DLL_CTRL);
+ /*
+ * enable strobe dll ctrl and adjust the delay target
+ * for the uSDHC loopback read clock
+ */
+ v = ESDHC_STROBE_DLL_CTRL_ENABLE |
+ (7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT);
+ writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL);
+ /* wait 1us to make sure strobe dll status register stable */
+ udelay(1);
+ v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS);
+ if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK))
+ dev_warn(mmc_dev(host->mmc),
+ "warning! HS400 strobe DLL status REF not lock!\n");
+ if (!(v & ESDHC_STROBE_DLL_STS_SLV_LOCK))
+ dev_warn(mmc_dev(host->mmc),
+ "warning! HS400 strobe DLL status SLV not lock!\n");
+ }
+}
+
static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
{
+ u32 m;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+ /* disable ddr mode and disable HS400 mode */
+ m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ m &= ~(ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN);
+ imx_data->is_ddr = 0;
+
switch (timing) {
case MMC_TIMING_UHS_SDR12:
case MMC_TIMING_UHS_SDR25:
case MMC_TIMING_UHS_SDR50:
case MMC_TIMING_UHS_SDR104:
case MMC_TIMING_MMC_HS200:
+ writel(m, host->ioaddr + ESDHC_MIX_CTRL);
break;
case MMC_TIMING_UHS_DDR50:
case MMC_TIMING_MMC_DDR52:
- writel(readl(host->ioaddr + ESDHC_MIX_CTRL) |
- ESDHC_MIX_CTRL_DDREN,
- host->ioaddr + ESDHC_MIX_CTRL);
+ m |= ESDHC_MIX_CTRL_DDREN;
+ writel(m, host->ioaddr + ESDHC_MIX_CTRL);
imx_data->is_ddr = 1;
if (boarddata->delay_line) {
u32 v;
@@ -805,6 +892,12 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
writel(v, host->ioaddr + ESDHC_DLL_CTRL);
}
break;
+ case MMC_TIMING_MMC_HS400:
+ m |= ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN;
+ writel(m, host->ioaddr + ESDHC_MIX_CTRL);
+ imx_data->is_ddr = 1;
+ esdhc_set_strobe_dll(host);
+ break;
}
esdhc_change_pinstate(host, timing);
@@ -865,33 +958,20 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
static int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
struct sdhci_host *host,
- struct esdhc_platform_data *boarddata)
+ struct pltfm_imx_data *imx_data)
{
struct device_node *np = pdev->dev.of_node;
-
- if (!np)
- return -ENODEV;
-
- if (of_get_property(np, "non-removable", NULL))
- boarddata->cd_type = ESDHC_CD_PERMANENT;
-
- if (of_get_property(np, "fsl,cd-controller", NULL))
- boarddata->cd_type = ESDHC_CD_CONTROLLER;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+ int ret;
if (of_get_property(np, "fsl,wp-controller", NULL))
boarddata->wp_type = ESDHC_WP_CONTROLLER;
- boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
- if (gpio_is_valid(boarddata->cd_gpio))
- boarddata->cd_type = ESDHC_CD_GPIO;
-
boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
if (gpio_is_valid(boarddata->wp_gpio))
boarddata->wp_type = ESDHC_WP_GPIO;
- of_property_read_u32(np, "bus-width", &boarddata->max_bus_width);
-
- of_property_read_u32(np, "max-frequency", &boarddata->f_max);
+ of_property_read_u32(np, "fsl,tuning-step", &boarddata->tuning_step);
if (of_find_property(np, "no-1-8-v", NULL))
boarddata->support_vsel = false;
@@ -903,25 +983,117 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
mmc_of_parse_voltage(np, &host->ocr_mask);
+ /* sdr50 and sdr104 needs work on 1.8v signal voltage */
+ if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
+ !IS_ERR(imx_data->pins_default)) {
+ imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ ESDHC_PINCTRL_STATE_100MHZ);
+ imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ ESDHC_PINCTRL_STATE_200MHZ);
+ if (IS_ERR(imx_data->pins_100mhz) ||
+ IS_ERR(imx_data->pins_200mhz)) {
+ dev_warn(mmc_dev(host->mmc),
+ "could not get ultra high speed state, work on normal mode\n");
+ /*
+ * fall back to not support uhs by specify no 1.8v quirk
+ */
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+ } else {
+ host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ }
+
+ /* call to generic mmc_of_parse to support additional capabilities */
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ return ret;
+
+ if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
return 0;
}
#else
static inline int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
struct sdhci_host *host,
- struct esdhc_platform_data *boarddata)
+ struct pltfm_imx_data *imx_data)
{
return -ENODEV;
}
#endif
+static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
+ struct sdhci_host *host,
+ struct pltfm_imx_data *imx_data)
+{
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+ int err;
+
+ if (!host->mmc->parent->platform_data) {
+ dev_err(mmc_dev(host->mmc), "no board data!\n");
+ return -EINVAL;
+ }
+
+ imx_data->boarddata = *((struct esdhc_platform_data *)
+ host->mmc->parent->platform_data);
+ /* write_protect */
+ if (boarddata->wp_type == ESDHC_WP_GPIO) {
+ err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
+ if (err) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to request write-protect gpio!\n");
+ return err;
+ }
+ host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+ }
+
+ /* card_detect */
+ switch (boarddata->cd_type) {
+ case ESDHC_CD_GPIO:
+ err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
+ if (err) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to request card-detect gpio!\n");
+ return err;
+ }
+ /* fall through */
+
+ case ESDHC_CD_CONTROLLER:
+ /* we have a working card_detect back */
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ break;
+
+ case ESDHC_CD_PERMANENT:
+ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+ break;
+
+ case ESDHC_CD_NONE:
+ break;
+ }
+
+ switch (boarddata->max_bus_width) {
+ case 8:
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
+ break;
+ case 4:
+ host->mmc->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 1:
+ default:
+ host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
+ break;
+ }
+
+ return 0;
+}
+
static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(imx_esdhc_dt_ids, &pdev->dev);
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
- struct esdhc_platform_data *boarddata;
int err;
struct pltfm_imx_data *imx_data;
@@ -988,9 +1160,35 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
* to something insane. Change it back here.
*/
if (esdhc_is_usdhc(imx_data)) {
- writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL);
+ writel(0x10401040, host->ioaddr + ESDHC_WTMK_LVL);
+
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
host->mmc->caps |= MMC_CAP_1_8V_DDR;
+
+ /*
+ * ROM code will change the bit burst_length_enable setting
+ * to zero if this usdhc is choosed to boot system. Change
+ * it back here, otherwise it will impact the performance a
+ * lot. This bit is used to enable/disable the burst length
+ * for the external AHB2AXI bridge, it's usefully especially
+ * for INCR transfer because without burst length indicator,
+ * the AHB2AXI bridge does not know the burst length in
+ * advance. And without burst length indicator, AHB INCR
+ * transfer can only be converted to singles on the AXI side.
+ */
+ writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
+ | ESDHC_BURST_LEN_EN_INCR,
+ host->ioaddr + SDHCI_HOST_CONTROL);
+
+ if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200))
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
+
+ /*
+ * errata ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
+ * TO1.1, it's harmless for MX6SL
+ */
+ writel(readl(host->ioaddr + 0x6c) | BIT(7),
+ host->ioaddr + 0x6c);
}
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
@@ -1002,54 +1200,16 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP,
host->ioaddr + ESDHC_TUNING_CTRL);
- boarddata = &imx_data->boarddata;
- if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) {
- if (!host->mmc->parent->platform_data) {
- dev_err(mmc_dev(host->mmc), "no board data!\n");
- err = -EINVAL;
- goto disable_clk;
- }
- imx_data->boarddata = *((struct esdhc_platform_data *)
- host->mmc->parent->platform_data);
- }
-
- /* card_detect */
- if (boarddata->cd_type == ESDHC_CD_CONTROLLER)
- host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
-
- switch (boarddata->max_bus_width) {
- case 8:
- host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
- break;
- case 4:
- host->mmc->caps |= MMC_CAP_4_BIT_DATA;
- break;
- case 1:
- default:
- host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
- break;
- }
+ if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
+ host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
- /* sdr50 and sdr104 needs work on 1.8v signal voltage */
- if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
- !IS_ERR(imx_data->pins_default)) {
- imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
- ESDHC_PINCTRL_STATE_100MHZ);
- imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
- ESDHC_PINCTRL_STATE_200MHZ);
- if (IS_ERR(imx_data->pins_100mhz) ||
- IS_ERR(imx_data->pins_200mhz)) {
- dev_warn(mmc_dev(host->mmc),
- "could not get ultra high speed state, work on normal mode\n");
- /* fall back to not support uhs by specify no 1.8v quirk */
- host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
- }
- } else {
- host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
- }
+ if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
+ host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400;
- /* call to generic mmc_of_parse to support additional capabilities */
- err = mmc_of_parse(host->mmc);
+ if (of_id)
+ err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
+ else
+ err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data);
if (err)
goto disable_clk;
@@ -1151,5 +1311,5 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
module_platform_driver(sdhci_esdhc_imx_driver);
MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC");
-MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>");
MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-esdhc.h b/kernel/drivers/mmc/host/sdhci-esdhc.h
index a870c4273..de132e281 100644
--- a/kernel/drivers/mmc/host/sdhci-esdhc.h
+++ b/kernel/drivers/mmc/host/sdhci-esdhc.h
@@ -21,7 +21,10 @@
#define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
SDHCI_QUIRK_NO_BUSY_IRQ | \
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
- SDHCI_QUIRK_PIO_NEEDS_DELAY)
+ SDHCI_QUIRK_PIO_NEEDS_DELAY | \
+ SDHCI_QUIRK_NO_HISPD_BIT)
+
+#define ESDHC_PROCTL 0x28
#define ESDHC_SYSTEM_CONTROL 0x2c
#define ESDHC_CLOCK_MASK 0x0000fff0
diff --git a/kernel/drivers/mmc/host/sdhci-msm.c b/kernel/drivers/mmc/host/sdhci-msm.c
index 4a09f7608..4695bee20 100644
--- a/kernel/drivers/mmc/host/sdhci-msm.c
+++ b/kernel/drivers/mmc/host/sdhci-msm.c
@@ -373,7 +373,7 @@ retry:
if (rc)
return rc;
- rc = mmc_send_tuning(mmc);
+ rc = mmc_send_tuning(mmc, opcode, NULL);
if (!rc) {
/* Tuning is successful at this tuning point */
tuned_phases[tuned_phase_cnt++] = phase;
@@ -489,6 +489,11 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto pclk_disable;
}
+ /* Vote for maximum clock rate for maximum performance */
+ ret = clk_set_rate(msm_host->clk, INT_MAX);
+ if (ret)
+ dev_warn(&pdev->dev, "core clock boost failed\n");
+
ret = clk_prepare_enable(msm_host->clk);
if (ret)
goto pclk_disable;
diff --git a/kernel/drivers/mmc/host/sdhci-of-arasan.c b/kernel/drivers/mmc/host/sdhci-of-arasan.c
index 6287d426c..75379cb0f 100644
--- a/kernel/drivers/mmc/host/sdhci-of-arasan.c
+++ b/kernel/drivers/mmc/host/sdhci-of-arasan.c
@@ -20,6 +20,7 @@
*/
#include <linux/module.h>
+#include <linux/of_device.h>
#include "sdhci-pltfm.h"
#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
@@ -62,6 +63,9 @@ static struct sdhci_ops sdhci_arasan_ops = {
static struct sdhci_pltfm_data sdhci_arasan_pdata = {
.ops = &sdhci_arasan_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
#ifdef CONFIG_PM_SLEEP
@@ -168,6 +172,11 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
goto clk_disable_all;
}
+ if (of_device_is_compatible(pdev->dev.of_node, "arasan,sdhci-4.9a")) {
+ host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
+ host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
+ }
+
sdhci_get_of_property(pdev);
pltfm_host = sdhci_priv(host);
pltfm_host->priv = sdhci_arasan;
@@ -208,6 +217,8 @@ static int sdhci_arasan_remove(struct platform_device *pdev)
static const struct of_device_id sdhci_arasan_of_match[] = {
{ .compatible = "arasan,sdhci-8.9a" },
+ { .compatible = "arasan,sdhci-5.1" },
+ { .compatible = "arasan,sdhci-4.9a" },
{ }
};
MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
diff --git a/kernel/drivers/mmc/host/sdhci-of-at91.c b/kernel/drivers/mmc/host/sdhci-of-at91.c
new file mode 100644
index 000000000..06d0b50df
--- /dev/null
+++ b/kernel/drivers/mmc/host/sdhci-of-at91.c
@@ -0,0 +1,191 @@
+/*
+ * Atmel SDMMC controller driver.
+ *
+ * Copyright (C) 2015 Atmel,
+ * 2015 Ludovic Desroches <ludovic.desroches@atmel.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "sdhci-pltfm.h"
+
+#define SDMMC_CACR 0x230
+#define SDMMC_CACR_CAPWREN BIT(0)
+#define SDMMC_CACR_KEY (0x46 << 8)
+
+struct sdhci_at91_priv {
+ struct clk *hclock;
+ struct clk *gck;
+ struct clk *mainck;
+};
+
+static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data soc_data_sama5d2 = {
+ .ops = &sdhci_at91_sama5d2_ops,
+ .quirks2 = SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST,
+};
+
+static const struct of_device_id sdhci_at91_dt_match[] = {
+ { .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 },
+ {}
+};
+
+static int sdhci_at91_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct sdhci_pltfm_data *soc_data;
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_at91_priv *priv;
+ unsigned int caps0, caps1;
+ unsigned int clk_base, clk_mul;
+ unsigned int gck_rate, real_gck_rate;
+ int ret;
+
+ match = of_match_device(sdhci_at91_dt_match, &pdev->dev);
+ if (!match)
+ return -EINVAL;
+ soc_data = match->data;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "unable to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ priv->mainck = devm_clk_get(&pdev->dev, "baseclk");
+ if (IS_ERR(priv->mainck)) {
+ dev_err(&pdev->dev, "failed to get baseclk\n");
+ return PTR_ERR(priv->mainck);
+ }
+
+ priv->hclock = devm_clk_get(&pdev->dev, "hclock");
+ if (IS_ERR(priv->hclock)) {
+ dev_err(&pdev->dev, "failed to get hclock\n");
+ return PTR_ERR(priv->hclock);
+ }
+
+ priv->gck = devm_clk_get(&pdev->dev, "multclk");
+ if (IS_ERR(priv->gck)) {
+ dev_err(&pdev->dev, "failed to get multclk\n");
+ return PTR_ERR(priv->gck);
+ }
+
+ host = sdhci_pltfm_init(pdev, soc_data, 0);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ /*
+ * The mult clock is provided by as a generated clock by the PMC
+ * controller. In order to set the rate of gck, we have to get the
+ * base clock rate and the clock mult from capabilities.
+ */
+ clk_prepare_enable(priv->hclock);
+ caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES);
+ caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1);
+ clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
+ clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT;
+ gck_rate = clk_base * 1000000 * (clk_mul + 1);
+ ret = clk_set_rate(priv->gck, gck_rate);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to set gck");
+ goto hclock_disable_unprepare;
+ }
+ /*
+ * We need to check if we have the requested rate for gck because in
+ * some cases this rate could be not supported. If it happens, the rate
+ * is the closest one gck can provide. We have to update the value
+ * of clk mul.
+ */
+ real_gck_rate = clk_get_rate(priv->gck);
+ if (real_gck_rate != gck_rate) {
+ clk_mul = real_gck_rate / (clk_base * 1000000) - 1;
+ caps1 &= (~SDHCI_CLOCK_MUL_MASK);
+ caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK);
+ /* Set capabilities in r/w mode. */
+ writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR);
+ writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
+ /* Set capabilities in ro mode. */
+ writel(0, host->ioaddr + SDMMC_CACR);
+ dev_info(&pdev->dev, "update clk mul to %u as gck rate is %u Hz\n",
+ clk_mul, real_gck_rate);
+ }
+
+ clk_prepare_enable(priv->mainck);
+ clk_prepare_enable(priv->gck);
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = priv;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto clocks_disable_unprepare;
+
+ sdhci_get_of_property(pdev);
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto clocks_disable_unprepare;
+
+ return 0;
+
+clocks_disable_unprepare:
+ clk_disable_unprepare(priv->gck);
+ clk_disable_unprepare(priv->mainck);
+hclock_disable_unprepare:
+ clk_disable_unprepare(priv->hclock);
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int sdhci_at91_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_at91_priv *priv = pltfm_host->priv;
+
+ sdhci_pltfm_unregister(pdev);
+
+ clk_disable_unprepare(priv->gck);
+ clk_disable_unprepare(priv->hclock);
+ clk_disable_unprepare(priv->mainck);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_at91_driver = {
+ .driver = {
+ .name = "sdhci-at91",
+ .of_match_table = sdhci_at91_dt_match,
+ .pm = SDHCI_PLTFM_PMOPS,
+ },
+ .probe = sdhci_at91_probe,
+ .remove = sdhci_at91_remove,
+};
+
+module_platform_driver(sdhci_at91_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for at91");
+MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-of-esdhc.c b/kernel/drivers/mmc/host/sdhci-of-esdhc.c
index 22e9111b1..90e94a028 100644
--- a/kernel/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/kernel/drivers/mmc/host/sdhci-of-esdhc.c
@@ -24,122 +24,324 @@
#define VENDOR_V_22 0x12
#define VENDOR_V_23 0x13
-static u32 esdhc_readl(struct sdhci_host *host, int reg)
+
+struct sdhci_esdhc {
+ u8 vendor_ver;
+ u8 spec_ver;
+};
+
+/**
+ * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
+ * to make it compatible with SD spec.
+ *
+ * @host: pointer to sdhci_host
+ * @spec_reg: SD spec register address
+ * @value: 32bit eSDHC register value on spec_reg address
+ *
+ * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
+ * registers are 32 bits. There are differences in register size, register
+ * address, register function, bit position and function between eSDHC spec
+ * and SD spec.
+ *
+ * Return a fixed up register value
+ */
+static u32 esdhc_readl_fixup(struct sdhci_host *host,
+ int spec_reg, u32 value)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = pltfm_host->priv;
u32 ret;
- ret = in_be32(host->ioaddr + reg);
/*
* The bit of ADMA flag in eSDHC is not compatible with standard
* SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
* supported by eSDHC.
* And for many FSL eSDHC controller, the reset value of field
- * SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA,
+ * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
* only these vendor version is greater than 2.2/0x12 support ADMA.
- * For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the
- * the verdor version number, oxFE is SDHCI_HOST_VERSION.
*/
- if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) {
- u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
- tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
- if (tmp > VENDOR_V_22)
- ret |= SDHCI_CAN_DO_ADMA2;
+ if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
+ if (esdhc->vendor_ver > VENDOR_V_22) {
+ ret = value | SDHCI_CAN_DO_ADMA2;
+ return ret;
+ }
}
-
+ ret = value;
return ret;
}
-static u16 esdhc_readw(struct sdhci_host *host, int reg)
+static u16 esdhc_readw_fixup(struct sdhci_host *host,
+ int spec_reg, u32 value)
{
u16 ret;
- int base = reg & ~0x3;
- int shift = (reg & 0x2) * 8;
+ int shift = (spec_reg & 0x2) * 8;
- if (unlikely(reg == SDHCI_HOST_VERSION))
- ret = in_be32(host->ioaddr + base) & 0xffff;
+ if (spec_reg == SDHCI_HOST_VERSION)
+ ret = value & 0xffff;
else
- ret = (in_be32(host->ioaddr + base) >> shift) & 0xffff;
+ ret = (value >> shift) & 0xffff;
return ret;
}
-static u8 esdhc_readb(struct sdhci_host *host, int reg)
+static u8 esdhc_readb_fixup(struct sdhci_host *host,
+ int spec_reg, u32 value)
{
- int base = reg & ~0x3;
- int shift = (reg & 0x3) * 8;
- u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff;
+ u8 ret;
+ u8 dma_bits;
+ int shift = (spec_reg & 0x3) * 8;
+
+ ret = (value >> shift) & 0xff;
/*
* "DMA select" locates at offset 0x28 in SD specification, but on
* P5020 or P3041, it locates at 0x29.
*/
- if (reg == SDHCI_HOST_CONTROL) {
- u32 dma_bits;
-
- dma_bits = in_be32(host->ioaddr + reg);
+ if (spec_reg == SDHCI_HOST_CONTROL) {
/* DMA select is 22,23 bits in Protocol Control Register */
- dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK;
-
+ dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
/* fixup the result */
ret &= ~SDHCI_CTRL_DMA_MASK;
ret |= dma_bits;
}
-
return ret;
}
-static void esdhc_writel(struct sdhci_host *host, u32 val, int reg)
+/**
+ * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
+ * written into eSDHC register.
+ *
+ * @host: pointer to sdhci_host
+ * @spec_reg: SD spec register address
+ * @value: 8/16/32bit SD spec register value that would be written
+ * @old_value: 32bit eSDHC register value on spec_reg address
+ *
+ * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
+ * registers are 32 bits. There are differences in register size, register
+ * address, register function, bit position and function between eSDHC spec
+ * and SD spec.
+ *
+ * Return a fixed up register value
+ */
+static u32 esdhc_writel_fixup(struct sdhci_host *host,
+ int spec_reg, u32 value, u32 old_value)
{
+ u32 ret;
+
/*
- * Enable IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
- * when SYSCTL[RSTD]) is set for some special operations.
- * No any impact other operation.
+ * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
+ * when SYSCTL[RSTD] is set for some special operations.
+ * No any impact on other operation.
*/
- if (reg == SDHCI_INT_ENABLE)
- val |= SDHCI_INT_BLK_GAP;
- sdhci_be32bs_writel(host, val, reg);
+ if (spec_reg == SDHCI_INT_ENABLE)
+ ret = value | SDHCI_INT_BLK_GAP;
+ else
+ ret = value;
+
+ return ret;
}
-static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
+static u32 esdhc_writew_fixup(struct sdhci_host *host,
+ int spec_reg, u16 value, u32 old_value)
{
- if (reg == SDHCI_BLOCK_SIZE) {
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int shift = (spec_reg & 0x2) * 8;
+ u32 ret;
+
+ switch (spec_reg) {
+ case SDHCI_TRANSFER_MODE:
+ /*
+ * Postpone this write, we must do it together with a
+ * command write that is down below. Return old value.
+ */
+ pltfm_host->xfer_mode_shadow = value;
+ return old_value;
+ case SDHCI_COMMAND:
+ ret = (value << 16) | pltfm_host->xfer_mode_shadow;
+ return ret;
+ }
+
+ ret = old_value & (~(0xffff << shift));
+ ret |= (value << shift);
+
+ if (spec_reg == SDHCI_BLOCK_SIZE) {
/*
* Two last DMA bits are reserved, and first one is used for
* non-standard blksz of 4096 bytes that we don't support
* yet. So clear the DMA boundary bits.
*/
- val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
+ ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
}
- sdhci_be32bs_writew(host, val, reg);
+ return ret;
}
-static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
+static u32 esdhc_writeb_fixup(struct sdhci_host *host,
+ int spec_reg, u8 value, u32 old_value)
{
+ u32 ret;
+ u32 dma_bits;
+ u8 tmp;
+ int shift = (spec_reg & 0x3) * 8;
+
+ /*
+ * eSDHC doesn't have a standard power control register, so we do
+ * nothing here to avoid incorrect operation.
+ */
+ if (spec_reg == SDHCI_POWER_CONTROL)
+ return old_value;
/*
* "DMA select" location is offset 0x28 in SD specification, but on
* P5020 or P3041, it's located at 0x29.
*/
- if (reg == SDHCI_HOST_CONTROL) {
- u32 dma_bits;
-
+ if (spec_reg == SDHCI_HOST_CONTROL) {
/*
* If host control register is not standard, exit
* this function
*/
if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
- return;
+ return old_value;
/* DMA select is 22,23 bits in Protocol Control Register */
- dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5;
- clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5,
- dma_bits);
- val &= ~SDHCI_CTRL_DMA_MASK;
- val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK;
+ dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
+ ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
+ tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
+ (old_value & SDHCI_CTRL_DMA_MASK);
+ ret = (ret & (~0xff)) | tmp;
+
+ /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
+ ret &= ~ESDHC_HOST_CONTROL_RES;
+ return ret;
}
- /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
- if (reg == SDHCI_HOST_CONTROL)
- val &= ~ESDHC_HOST_CONTROL_RES;
- sdhci_be32bs_writeb(host, val, reg);
+ ret = (old_value & (~(0xff << shift))) | (value << shift);
+ return ret;
+}
+
+static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
+{
+ u32 ret;
+ u32 value;
+
+ value = ioread32be(host->ioaddr + reg);
+ ret = esdhc_readl_fixup(host, reg, value);
+
+ return ret;
+}
+
+static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
+{
+ u32 ret;
+ u32 value;
+
+ value = ioread32(host->ioaddr + reg);
+ ret = esdhc_readl_fixup(host, reg, value);
+
+ return ret;
+}
+
+static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
+{
+ u16 ret;
+ u32 value;
+ int base = reg & ~0x3;
+
+ value = ioread32be(host->ioaddr + base);
+ ret = esdhc_readw_fixup(host, reg, value);
+ return ret;
+}
+
+static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
+{
+ u16 ret;
+ u32 value;
+ int base = reg & ~0x3;
+
+ value = ioread32(host->ioaddr + base);
+ ret = esdhc_readw_fixup(host, reg, value);
+ return ret;
+}
+
+static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
+{
+ u8 ret;
+ u32 value;
+ int base = reg & ~0x3;
+
+ value = ioread32be(host->ioaddr + base);
+ ret = esdhc_readb_fixup(host, reg, value);
+ return ret;
+}
+
+static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
+{
+ u8 ret;
+ u32 value;
+ int base = reg & ~0x3;
+
+ value = ioread32(host->ioaddr + base);
+ ret = esdhc_readb_fixup(host, reg, value);
+ return ret;
+}
+
+static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ u32 value;
+
+ value = esdhc_writel_fixup(host, reg, val, 0);
+ iowrite32be(value, host->ioaddr + reg);
+}
+
+static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ u32 value;
+
+ value = esdhc_writel_fixup(host, reg, val, 0);
+ iowrite32(value, host->ioaddr + reg);
+}
+
+static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ int base = reg & ~0x3;
+ u32 value;
+ u32 ret;
+
+ value = ioread32be(host->ioaddr + base);
+ ret = esdhc_writew_fixup(host, reg, val, value);
+ if (reg != SDHCI_TRANSFER_MODE)
+ iowrite32be(ret, host->ioaddr + base);
+}
+
+static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ int base = reg & ~0x3;
+ u32 value;
+ u32 ret;
+
+ value = ioread32(host->ioaddr + base);
+ ret = esdhc_writew_fixup(host, reg, val, value);
+ if (reg != SDHCI_TRANSFER_MODE)
+ iowrite32(ret, host->ioaddr + base);
+}
+
+static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ int base = reg & ~0x3;
+ u32 value;
+ u32 ret;
+
+ value = ioread32be(host->ioaddr + base);
+ ret = esdhc_writeb_fixup(host, reg, val, value);
+ iowrite32be(ret, host->ioaddr + base);
+}
+
+static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ int base = reg & ~0x3;
+ u32 value;
+ u32 ret;
+
+ value = ioread32(host->ioaddr + base);
+ ret = esdhc_writeb_fixup(host, reg, val, value);
+ iowrite32(ret, host->ioaddr + base);
}
/*
@@ -149,19 +351,17 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
* For Continue, apply soft reset for data(SYSCTL[RSTD]);
* and re-issue the entire read transaction from beginning.
*/
-static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask)
+static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
{
- u32 tmp;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = pltfm_host->priv;
bool applicable;
dma_addr_t dmastart;
dma_addr_t dmanow;
- tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
- tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
-
applicable = (intmask & SDHCI_INT_DATA_END) &&
- (intmask & SDHCI_INT_BLK_GAP) &&
- (tmp == VENDOR_V_23);
+ (intmask & SDHCI_INT_BLK_GAP) &&
+ (esdhc->vendor_ver == VENDOR_V_23);
if (!applicable)
return;
@@ -179,7 +379,11 @@ static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask)
static int esdhc_of_enable_dma(struct sdhci_host *host)
{
- setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
+ u32 value;
+
+ value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
+ value |= ESDHC_DMA_SNOOP;
+ sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
return 0;
}
@@ -199,7 +403,9 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
- int pre_div = 2;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = pltfm_host->priv;
+ int pre_div = 1;
int div = 1;
u32 temp;
@@ -208,6 +414,10 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
if (clock == 0)
return;
+ /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
+ if (esdhc->vendor_ver < VENDOR_V_23)
+ pre_div = 2;
+
/* Workaround to reduce the clock frequency for p1010 esdhc */
if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
if (clock > 20000000)
@@ -229,7 +439,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
clock, host->max_clk / pre_div / div);
-
+ host->mmc->actual_clock = host->max_clk / pre_div / div;
pre_div >>= 1;
div--;
@@ -241,39 +451,26 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
mdelay(1);
}
-static void esdhc_of_platform_init(struct sdhci_host *host)
-{
- u32 vvn;
-
- vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
- vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
- if (vvn == VENDOR_V_22)
- host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
-
- if (vvn > VENDOR_V_22)
- host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
-}
-
static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
{
u32 ctrl;
+ ctrl = sdhci_readl(host, ESDHC_PROCTL);
+ ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
switch (width) {
case MMC_BUS_WIDTH_8:
- ctrl = ESDHC_CTRL_8BITBUS;
+ ctrl |= ESDHC_CTRL_8BITBUS;
break;
case MMC_BUS_WIDTH_4:
- ctrl = ESDHC_CTRL_4BITBUS;
+ ctrl |= ESDHC_CTRL_4BITBUS;
break;
default:
- ctrl = 0;
break;
}
- clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL,
- ESDHC_CTRL_BUSWIDTH_MASK, ctrl);
+ sdhci_writel(host, ctrl, ESDHC_PROCTL);
}
static void esdhc_reset(struct sdhci_host *host, u8 mask)
@@ -284,32 +481,13 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
-static const struct sdhci_ops sdhci_esdhc_ops = {
- .read_l = esdhc_readl,
- .read_w = esdhc_readw,
- .read_b = esdhc_readb,
- .write_l = esdhc_writel,
- .write_w = esdhc_writew,
- .write_b = esdhc_writeb,
- .set_clock = esdhc_of_set_clock,
- .enable_dma = esdhc_of_enable_dma,
- .get_max_clock = esdhc_of_get_max_clock,
- .get_min_clock = esdhc_of_get_min_clock,
- .platform_init = esdhc_of_platform_init,
- .adma_workaround = esdhci_of_adma_workaround,
- .set_bus_width = esdhc_pltfm_set_bus_width,
- .reset = esdhc_reset,
- .set_uhs_signaling = sdhci_set_uhs_signaling,
-};
-
#ifdef CONFIG_PM
-
static u32 esdhc_proctl;
static int esdhc_of_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
- esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL);
+ esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
return sdhci_suspend_host(host);
}
@@ -322,9 +500,8 @@ static int esdhc_of_resume(struct device *dev)
if (ret == 0) {
/* Isn't this already done by sdhci_resume_host() ? --rmk */
esdhc_of_enable_dma(host);
- sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
+ sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
}
-
return ret;
}
@@ -337,30 +514,103 @@ static const struct dev_pm_ops esdhc_pmops = {
#define ESDHC_PMOPS NULL
#endif
-static const struct sdhci_pltfm_data sdhci_esdhc_pdata = {
- /*
- * card detection could be handled via GPIO
- * eSDHC cannot support End Attribute in NOP ADMA descriptor
- */
+static const struct sdhci_ops sdhci_esdhc_be_ops = {
+ .read_l = esdhc_be_readl,
+ .read_w = esdhc_be_readw,
+ .read_b = esdhc_be_readb,
+ .write_l = esdhc_be_writel,
+ .write_w = esdhc_be_writew,
+ .write_b = esdhc_be_writeb,
+ .set_clock = esdhc_of_set_clock,
+ .enable_dma = esdhc_of_enable_dma,
+ .get_max_clock = esdhc_of_get_max_clock,
+ .get_min_clock = esdhc_of_get_min_clock,
+ .adma_workaround = esdhc_of_adma_workaround,
+ .set_bus_width = esdhc_pltfm_set_bus_width,
+ .reset = esdhc_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_ops sdhci_esdhc_le_ops = {
+ .read_l = esdhc_le_readl,
+ .read_w = esdhc_le_readw,
+ .read_b = esdhc_le_readb,
+ .write_l = esdhc_le_writel,
+ .write_w = esdhc_le_writew,
+ .write_b = esdhc_le_writeb,
+ .set_clock = esdhc_of_set_clock,
+ .enable_dma = esdhc_of_enable_dma,
+ .get_max_clock = esdhc_of_get_max_clock,
+ .get_min_clock = esdhc_of_get_min_clock,
+ .adma_workaround = esdhc_of_adma_workaround,
+ .set_bus_width = esdhc_pltfm_set_bus_width,
+ .reset = esdhc_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
.quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
| SDHCI_QUIRK_NO_CARD_NO_RESET
| SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
- .ops = &sdhci_esdhc_ops,
+ .ops = &sdhci_esdhc_be_ops,
};
+static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
+ | SDHCI_QUIRK_NO_CARD_NO_RESET
+ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .ops = &sdhci_esdhc_le_ops,
+};
+
+static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_esdhc *esdhc;
+ u16 host_ver;
+
+ pltfm_host = sdhci_priv(host);
+ esdhc = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_esdhc),
+ GFP_KERNEL);
+
+ host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
+ esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
+ SDHCI_VENDOR_VER_SHIFT;
+ esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
+
+ pltfm_host->priv = esdhc;
+}
+
static int sdhci_esdhc_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct device_node *np;
int ret;
- host = sdhci_pltfm_init(pdev, &sdhci_esdhc_pdata, 0);
+ np = pdev->dev.of_node;
+
+ if (of_get_property(np, "little-endian", NULL))
+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata, 0);
+ else
+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata, 0);
+
if (IS_ERR(host))
return PTR_ERR(host);
+ esdhc_init(pdev, host);
+
sdhci_get_of_property(pdev);
- np = pdev->dev.of_node;
+ if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
+ of_device_is_compatible(np, "fsl,p5020-esdhc") ||
+ of_device_is_compatible(np, "fsl,p4080-esdhc") ||
+ of_device_is_compatible(np, "fsl,p1020-esdhc") ||
+ of_device_is_compatible(np, "fsl,t1040-esdhc") ||
+ of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
+ if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
/*
* Freescale messed up with P2020 as it has a non-standard
diff --git a/kernel/drivers/mmc/host/sdhci-pci.c b/kernel/drivers/mmc/host/sdhci-pci-core.c
index 53cfc7ced..45ee07d3a 100644
--- a/kernel/drivers/mmc/host/sdhci-pci.c
+++ b/kernel/drivers/mmc/host/sdhci-pci-core.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
#include <linux/gpio.h>
@@ -266,6 +267,96 @@ static void sdhci_pci_int_hw_reset(struct sdhci_host *host)
usleep_range(300, 1000);
}
+static int spt_select_drive_strength(struct sdhci_host *host,
+ struct mmc_card *card,
+ unsigned int max_dtr,
+ int host_drv, int card_drv, int *drv_type)
+{
+ int drive_strength;
+
+ if (sdhci_pci_spt_drive_strength > 0)
+ drive_strength = sdhci_pci_spt_drive_strength & 0xf;
+ else
+ drive_strength = 0; /* Default 50-ohm */
+
+ if ((mmc_driver_type_mask(drive_strength) & card_drv) == 0)
+ drive_strength = 0; /* Default 50-ohm */
+
+ return drive_strength;
+}
+
+/* Try to read the drive strength from the card */
+static void spt_read_drive_strength(struct sdhci_host *host)
+{
+ u32 val, i, t;
+ u16 m;
+
+ if (sdhci_pci_spt_drive_strength)
+ return;
+
+ sdhci_pci_spt_drive_strength = -1;
+
+ m = sdhci_readw(host, SDHCI_HOST_CONTROL2) & 0x7;
+ if (m != 3 && m != 5)
+ return;
+ val = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ if (val & 0x3)
+ return;
+ sdhci_writel(host, 0x007f0023, SDHCI_INT_ENABLE);
+ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+ sdhci_writew(host, 0x10, SDHCI_TRANSFER_MODE);
+ sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
+ sdhci_writew(host, 512, SDHCI_BLOCK_SIZE);
+ sdhci_writew(host, 1, SDHCI_BLOCK_COUNT);
+ sdhci_writel(host, 0, SDHCI_ARGUMENT);
+ sdhci_writew(host, 0x83b, SDHCI_COMMAND);
+ for (i = 0; i < 1000; i++) {
+ val = sdhci_readl(host, SDHCI_INT_STATUS);
+ if (val & 0xffff8000)
+ return;
+ if (val & 0x20)
+ break;
+ udelay(1);
+ }
+ val = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ if (!(val & 0x800))
+ return;
+ for (i = 0; i < 47; i++)
+ val = sdhci_readl(host, SDHCI_BUFFER);
+ t = val & 0xf00;
+ if (t != 0x200 && t != 0x300)
+ return;
+
+ sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf);
+}
+
+static int bxt_get_cd(struct mmc_host *mmc)
+{
+ int gpio_cd = mmc_gpio_get_cd(mmc);
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!gpio_cd)
+ return 0;
+
+ pm_runtime_get_sync(mmc->parent);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ goto out;
+
+ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ pm_runtime_mark_last_busy(mmc->parent);
+ pm_runtime_put_autosuspend(mmc->parent);
+
+ return ret;
+}
+
static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
@@ -276,6 +367,10 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
slot->hw_reset = sdhci_pci_int_hw_reset;
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_SPT_EMMC) {
+ spt_read_drive_strength(slot->host);
+ slot->select_drive_strength = spt_select_drive_strength;
+ }
return 0;
}
@@ -294,6 +389,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
slot->cd_con_id = NULL;
slot->cd_idx = 0;
slot->cd_override_level = true;
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
+ slot->host->mmc_host_ops.get_cd = bxt_get_cd;
+
return 0;
}
@@ -302,6 +401,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
.probe_slot = byt_emmc_probe_slot,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
SDHCI_QUIRK2_STOP_WITH_TC,
};
@@ -375,11 +475,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
else
scratch &= ~0x47;
- ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
- if (ret)
- return ret;
-
- return 0;
+ return pci_write_config_byte(chip->pdev, 0xAE, scratch);
}
static int jmicron_probe(struct sdhci_pci_chip *chip)
@@ -656,14 +752,37 @@ static const struct sdhci_pci_fixes sdhci_rtsx = {
.probe_slot = rtsx_probe_slot,
};
+/*AMD chipset generation*/
+enum amd_chipset_gen {
+ AMD_CHIPSET_BEFORE_ML,
+ AMD_CHIPSET_CZ,
+ AMD_CHIPSET_NL,
+ AMD_CHIPSET_UNKNOWN,
+};
+
static int amd_probe(struct sdhci_pci_chip *chip)
{
struct pci_dev *smbus_dev;
+ enum amd_chipset_gen gen;
smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
+ if (smbus_dev) {
+ gen = AMD_CHIPSET_BEFORE_ML;
+ } else {
+ smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
+ if (smbus_dev) {
+ if (smbus_dev->revision < 0x51)
+ gen = AMD_CHIPSET_CZ;
+ else
+ gen = AMD_CHIPSET_NL;
+ } else {
+ gen = AMD_CHIPSET_UNKNOWN;
+ }
+ }
- if (smbus_dev && (smbus_dev->revision < 0x51)) {
+ if ((gen == AMD_CHIPSET_BEFORE_ML) || (gen == AMD_CHIPSET_CZ)) {
chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
chip->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
}
@@ -1021,6 +1140,62 @@ static const struct pci_device_id pci_ids[] = {
},
{
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_DNV_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXT_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXT_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXT_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_APL_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_APL_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_APL_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
+ {
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8120,
.subvendor = PCI_ANY_ID,
@@ -1204,6 +1379,20 @@ static void sdhci_pci_hw_reset(struct sdhci_host *host)
slot->hw_reset(host);
}
+static int sdhci_pci_select_drive_strength(struct sdhci_host *host,
+ struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+
+ if (!slot->select_drive_strength)
+ return 0;
+
+ return slot->select_drive_strength(host, card, max_dtr, host_drv,
+ card_drv, drv_type);
+}
+
static const struct sdhci_ops sdhci_pci_ops = {
.set_clock = sdhci_set_clock,
.enable_dma = sdhci_pci_enable_dma,
@@ -1211,6 +1400,7 @@ static const struct sdhci_ops sdhci_pci_ops = {
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.hw_reset = sdhci_pci_hw_reset,
+ .select_drive_strength = sdhci_pci_select_drive_strength,
};
/*****************************************************************************\
diff --git a/kernel/drivers/mmc/host/sdhci-pci-data.c b/kernel/drivers/mmc/host/sdhci-pci-data.c
index a61121776..56fddc622 100644
--- a/kernel/drivers/mmc/host/sdhci-pci-data.c
+++ b/kernel/drivers/mmc/host/sdhci-pci-data.c
@@ -3,3 +3,6 @@
struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno);
EXPORT_SYMBOL_GPL(sdhci_pci_get_data);
+
+int sdhci_pci_spt_drive_strength;
+EXPORT_SYMBOL_GPL(sdhci_pci_spt_drive_strength);
diff --git a/kernel/drivers/mmc/host/sdhci-pci-o2micro.c b/kernel/drivers/mmc/host/sdhci-pci-o2micro.c
index e2ec108db..d48f03104 100644
--- a/kernel/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/kernel/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -60,7 +60,7 @@ static void o2_pci_led_enable(struct sdhci_pci_chip *chip)
}
-void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
+static void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
{
u32 scratch_32;
int ret;
@@ -145,7 +145,6 @@ void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
scratch_32 |= 0x00080000;
pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL4, scratch_32);
}
-EXPORT_SYMBOL_GPL(sdhci_pci_o2_fujin2_pci_init);
int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
{
@@ -179,7 +178,6 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
-EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe_slot);
int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
{
@@ -385,11 +383,9 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
return 0;
}
-EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe);
int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip)
{
sdhci_pci_o2_probe(chip);
return 0;
}
-EXPORT_SYMBOL_GPL(sdhci_pci_o2_resume);
diff --git a/kernel/drivers/mmc/host/sdhci-pci-o2micro.h b/kernel/drivers/mmc/host/sdhci-pci-o2micro.h
index f7ffc908d..770f53857 100644
--- a/kernel/drivers/mmc/host/sdhci-pci-o2micro.h
+++ b/kernel/drivers/mmc/host/sdhci-pci-o2micro.h
@@ -64,8 +64,6 @@
#define O2_SD_VENDOR_SETTING 0x110
#define O2_SD_VENDOR_SETTING2 0x1C8
-extern void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip);
-
extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
diff --git a/kernel/drivers/mmc/host/sdhci-pci.h b/kernel/drivers/mmc/host/sdhci-pci.h
index 1ec684d06..d1a0b4db6 100644
--- a/kernel/drivers/mmc/host/sdhci-pci.h
+++ b/kernel/drivers/mmc/host/sdhci-pci.h
@@ -24,6 +24,13 @@
#define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b
#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c
#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d
+#define PCI_DEVICE_ID_INTEL_DNV_EMMC 0x19db
+#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
+#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
+#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
+#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca
+#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc
+#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0
/*
* PCI registers
@@ -72,6 +79,10 @@ struct sdhci_pci_slot {
bool cd_override_level;
void (*hw_reset)(struct sdhci_host *host);
+ int (*select_drive_strength)(struct sdhci_host *host,
+ struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type);
};
struct sdhci_pci_chip {
diff --git a/kernel/drivers/mmc/host/sdhci-pltfm.c b/kernel/drivers/mmc/host/sdhci-pltfm.c
index a207f5aaf..87fb5ea8e 100644
--- a/kernel/drivers/mmc/host/sdhci-pltfm.c
+++ b/kernel/drivers/mmc/host/sdhci-pltfm.c
@@ -71,9 +71,7 @@ void sdhci_get_of_property(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- const __be32 *clk;
u32 bus_width;
- int size;
if (of_get_property(np, "sdhci,auto-cmd12", NULL))
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
@@ -101,9 +99,7 @@ void sdhci_get_of_property(struct platform_device *pdev)
of_device_is_compatible(np, "fsl,mpc8536-esdhc"))
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
- clk = of_get_property(np, "clock-frequency", &size);
- if (clk && size == sizeof(*clk) && *clk)
- pltfm_host->clock = be32_to_cpup(clk);
+ of_property_read_u32(np, "clock-frequency", &pltfm_host->clock);
if (of_find_property(np, "keep-power-in-suspend", NULL))
host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
diff --git a/kernel/drivers/mmc/host/sdhci-pxav2.c b/kernel/drivers/mmc/host/sdhci-pxav2.c
index f98008b5e..beffd8615 100644
--- a/kernel/drivers/mmc/host/sdhci-pxav2.c
+++ b/kernel/drivers/mmc/host/sdhci-pxav2.c
@@ -252,9 +252,7 @@ static int sdhci_pxav2_remove(struct platform_device *pdev)
static struct platform_driver sdhci_pxav2_driver = {
.driver = {
.name = "sdhci-pxav2",
-#ifdef CONFIG_OF
- .of_match_table = sdhci_pxav2_of_match,
-#endif
+ .of_match_table = of_match_ptr(sdhci_pxav2_of_match),
.pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_pxav2_probe,
diff --git a/kernel/drivers/mmc/host/sdhci-pxav3.c b/kernel/drivers/mmc/host/sdhci-pxav3.c
index 065dc70ca..f5edf9d3a 100644
--- a/kernel/drivers/mmc/host/sdhci-pxav3.c
+++ b/kernel/drivers/mmc/host/sdhci-pxav3.c
@@ -135,6 +135,7 @@ static int armada_38x_quirks(struct platform_device *pdev,
struct sdhci_pxa *pxa = pltfm_host->priv;
struct resource *res;
+ host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"conf-sdio3");
@@ -290,6 +291,9 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
uhs == MMC_TIMING_UHS_DDR50) {
reg_val &= ~SDIO3_CONF_CLK_INV;
reg_val |= SDIO3_CONF_SD_FB_CLK;
+ } else if (uhs == MMC_TIMING_MMC_HS) {
+ reg_val &= ~SDIO3_CONF_CLK_INV;
+ reg_val &= ~SDIO3_CONF_SD_FB_CLK;
} else {
reg_val |= SDIO3_CONF_CLK_INV;
reg_val &= ~SDIO3_CONF_SD_FB_CLK;
@@ -398,7 +402,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
ret = armada_38x_quirks(pdev, host);
if (ret < 0)
- goto err_clk_get;
+ goto err_mbus_win;
ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
if (ret < 0)
goto err_mbus_win;
@@ -458,12 +462,8 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- if (host->mmc->pm_caps & MMC_PM_KEEP_POWER) {
+ if (host->mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ)
device_init_wakeup(&pdev->dev, 1);
- host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ;
- } else {
- device_init_wakeup(&pdev->dev, 0);
- }
pm_runtime_put_autosuspend(&pdev->dev);
@@ -579,9 +579,7 @@ static const struct dev_pm_ops sdhci_pxav3_pmops = {
static struct platform_driver sdhci_pxav3_driver = {
.driver = {
.name = "sdhci-pxav3",
-#ifdef CONFIG_OF
- .of_match_table = sdhci_pxav3_of_match,
-#endif
+ .of_match_table = of_match_ptr(sdhci_pxav3_of_match),
.pm = SDHCI_PXAV3_PMOPS,
},
.probe = sdhci_pxav3_probe,
diff --git a/kernel/drivers/mmc/host/sdhci-s3c.c b/kernel/drivers/mmc/host/sdhci-s3c.c
index c6d2dd731..70c724bc6 100644
--- a/kernel/drivers/mmc/host/sdhci-s3c.c
+++ b/kernel/drivers/mmc/host/sdhci-s3c.c
@@ -736,7 +736,7 @@ static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)NULL)
#endif
-static struct platform_device_id sdhci_s3c_driver_ids[] = {
+static const struct platform_device_id sdhci_s3c_driver_ids[] = {
{
.name = "s3c-sdhci",
.driver_data = (kernel_ulong_t)NULL,
diff --git a/kernel/drivers/mmc/host/sdhci-sirf.c b/kernel/drivers/mmc/host/sdhci-sirf.c
index 32848eb7a..34866f668 100644
--- a/kernel/drivers/mmc/host/sdhci-sirf.c
+++ b/kernel/drivers/mmc/host/sdhci-sirf.c
@@ -17,7 +17,7 @@
#define SDHCI_CLK_DELAY_SETTING 0x4C
#define SDHCI_SIRF_8BITBUS BIT(3)
-#define SIRF_TUNING_COUNT 128
+#define SIRF_TUNING_COUNT 16384
struct sdhci_sirf_priv {
int gpio_cd;
@@ -43,10 +43,44 @@ static void sdhci_sirf_set_bus_width(struct sdhci_host *host, int width)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
+static u32 sdhci_sirf_readl_le(struct sdhci_host *host, int reg)
+{
+ u32 val = readl(host->ioaddr + reg);
+
+ if (unlikely((reg == SDHCI_CAPABILITIES_1) &&
+ (host->mmc->caps & MMC_CAP_UHS_SDR50))) {
+ /* fake CAP_1 register */
+ val = SDHCI_SUPPORT_DDR50 |
+ SDHCI_SUPPORT_SDR50 | SDHCI_USE_SDR50_TUNING;
+ }
+
+ if (unlikely(reg == SDHCI_SLOT_INT_STATUS)) {
+ u32 prss = val;
+ /* fake chips as V3.0 host conreoller */
+ prss &= ~(0xFF << 16);
+ val = prss | (SDHCI_SPEC_300 << 16);
+ }
+ return val;
+}
+
+static u16 sdhci_sirf_readw_le(struct sdhci_host *host, int reg)
+{
+ u16 ret = 0;
+
+ ret = readw(host->ioaddr + reg);
+
+ if (unlikely(reg == SDHCI_HOST_VERSION)) {
+ ret = readw(host->ioaddr + SDHCI_HOST_VERSION);
+ ret |= SDHCI_SPEC_300;
+ }
+
+ return ret;
+}
+
static int sdhci_sirf_execute_tuning(struct sdhci_host *host, u32 opcode)
{
int tuning_seq_cnt = 3;
- u8 phase, tuned_phases[SIRF_TUNING_COUNT];
+ int phase;
u8 tuned_phase_cnt = 0;
int rc = 0, longest_range = 0;
int start = -1, end = 0, tuning_value = -1, range = 0;
@@ -58,14 +92,15 @@ static int sdhci_sirf_execute_tuning(struct sdhci_host *host, u32 opcode)
retry:
phase = 0;
+ tuned_phase_cnt = 0;
do {
sdhci_writel(host,
clock_setting | phase,
SDHCI_CLK_DELAY_SETTING);
- if (!mmc_send_tuning(mmc)) {
+ if (!mmc_send_tuning(mmc, opcode, NULL)) {
/* Tuning is successful at this tuning point */
- tuned_phases[tuned_phase_cnt++] = phase;
+ tuned_phase_cnt++;
dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
mmc_hostname(mmc), phase);
if (start == -1)
@@ -85,7 +120,7 @@ retry:
start = -1;
end = range = 0;
}
- } while (++phase < ARRAY_SIZE(tuned_phases));
+ } while (++phase < SIRF_TUNING_COUNT);
if (tuned_phase_cnt && tuning_value > 0) {
/*
@@ -112,6 +147,8 @@ retry:
}
static struct sdhci_ops sdhci_sirf_ops = {
+ .read_l = sdhci_sirf_readl_le,
+ .read_w = sdhci_sirf_readw_le,
.platform_execute_tuning = sdhci_sirf_execute_tuning,
.set_clock = sdhci_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -125,8 +162,8 @@ static struct sdhci_pltfm_data sdhci_sirf_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
- SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_DELAY_AFTER_POWER,
+ SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
static int sdhci_sirf_probe(struct platform_device *pdev)
diff --git a/kernel/drivers/mmc/host/sdhci-spear.c b/kernel/drivers/mmc/host/sdhci-spear.c
index df088343d..255a89676 100644
--- a/kernel/drivers/mmc/host/sdhci-spear.c
+++ b/kernel/drivers/mmc/host/sdhci-spear.c
@@ -4,7 +4,7 @@
* Support of SDHCI platform devices for spear soc family
*
* Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* Inspired by sdhci-pltfm.c
*
@@ -211,5 +211,5 @@ static struct platform_driver sdhci_driver = {
module_platform_driver(sdhci_driver);
MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mmc/host/sdhci-st.c b/kernel/drivers/mmc/host/sdhci-st.c
index 682f2bb0f..969c2b0d5 100644
--- a/kernel/drivers/mmc/host/sdhci-st.c
+++ b/kernel/drivers/mmc/host/sdhci-st.c
@@ -509,4 +509,4 @@ module_platform_driver(sdhci_st_driver);
MODULE_DESCRIPTION("SDHCI driver for STMicroelectronics SoCs");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:st-sdhci");
+MODULE_ALIAS("platform:sdhci-st");
diff --git a/kernel/drivers/mmc/host/sdhci.c b/kernel/drivers/mmc/host/sdhci.c
index fd41b9143..8814eb6b8 100644
--- a/kernel/drivers/mmc/host/sdhci.c
+++ b/kernel/drivers/mmc/host/sdhci.c
@@ -52,11 +52,9 @@ static void sdhci_finish_data(struct sdhci_host *);
static void sdhci_finish_command(struct sdhci_host *);
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
-static void sdhci_tuning_timer(unsigned long data);
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
static int sdhci_pre_dma_transfer(struct sdhci_host *host,
- struct mmc_data *data,
- struct sdhci_host_next *next);
+ struct mmc_data *data);
static int sdhci_do_get_cd(struct sdhci_host *host);
#ifdef CONFIG_PM
@@ -208,8 +206,7 @@ EXPORT_SYMBOL_GPL(sdhci_reset);
static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
- if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
- SDHCI_CARD_PRESENT))
+ if (!sdhci_do_get_cd(host))
return;
}
@@ -254,17 +251,6 @@ static void sdhci_init(struct sdhci_host *host, int soft)
static void sdhci_reinit(struct sdhci_host *host)
{
sdhci_init(host, 0);
- /*
- * Retuning stuffs are affected by different cards inserted and only
- * applicable to UHS-I cards. So reset these fields to their initial
- * value when card is removed.
- */
- if (host->flags & SDHCI_USING_RETUNING_TIMER) {
- host->flags &= ~SDHCI_USING_RETUNING_TIMER;
-
- del_timer_sync(&host->tuning_timer);
- host->flags &= ~SDHCI_NEEDS_RETUNING;
- }
sdhci_enable_card_detection(host);
}
@@ -328,8 +314,7 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
local_irq_save(flags);
while (blksize) {
- if (!sg_miter_next(&host->sg_miter))
- BUG();
+ BUG_ON(!sg_miter_next(&host->sg_miter));
len = min(host->sg_miter.length, blksize);
@@ -374,8 +359,7 @@ static void sdhci_write_block_pio(struct sdhci_host *host)
local_irq_save(flags);
while (blksize) {
- if (!sg_miter_next(&host->sg_miter))
- BUG();
+ BUG_ON(!sg_miter_next(&host->sg_miter));
len = min(host->sg_miter.length, blksize);
@@ -510,7 +494,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
goto fail;
BUG_ON(host->align_addr & host->align_mask);
- host->sg_count = sdhci_pre_dma_transfer(host, data, NULL);
+ host->sg_count = sdhci_pre_dma_transfer(host, data);
if (host->sg_count < 0)
goto unmap_align;
@@ -556,9 +540,12 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
BUG_ON(len > 65536);
- /* tran, valid */
- sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
- desc += host->desc_sz;
+ if (len) {
+ /* tran, valid */
+ sdhci_adma_write_desc(host, desc, addr, len,
+ ADMA2_TRAN_VALID);
+ desc += host->desc_sz;
+ }
/*
* If this triggers then we have a calculation bug
@@ -649,9 +636,11 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
}
}
- if (!data->host_cookie)
+ if (data->host_cookie == COOKIE_MAPPED) {
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, direction);
+ data->host_cookie = COOKIE_UNMAPPED;
+ }
}
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
@@ -847,7 +836,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
} else {
int sg_cnt;
- sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
+ sg_cnt = sdhci_pre_dma_transfer(host, data);
if (sg_cnt <= 0) {
/*
* This only happens when someone fed
@@ -963,11 +952,13 @@ static void sdhci_finish_data(struct sdhci_host *host)
if (host->flags & SDHCI_USE_ADMA)
sdhci_adma_table_post(host, data);
else {
- if (!data->host_cookie)
+ if (data->host_cookie == COOKIE_MAPPED) {
dma_unmap_sg(mmc_dev(host->mmc),
data->sg, data->sg_len,
(data->flags & MMC_DATA_READ) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ data->host_cookie = COOKIE_UNMAPPED;
+ }
}
}
@@ -1167,10 +1158,13 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
int real_div = div, clk_mul = 1;
u16 clk = 0;
unsigned long timeout;
+ bool switch_base_clk = false;
host->mmc->actual_clock = 0;
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST)
+ mdelay(1);
if (clock == 0)
return;
@@ -1204,15 +1198,25 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
<= clock)
break;
}
- /*
- * Set Programmable Clock Mode in the Clock
- * Control register.
- */
- clk = SDHCI_PROG_CLOCK_MODE;
- real_div = div;
- clk_mul = host->clk_mul;
- div--;
- } else {
+ if ((host->max_clk * host->clk_mul / div) <= clock) {
+ /*
+ * Set Programmable Clock Mode in the Clock
+ * Control register.
+ */
+ clk = SDHCI_PROG_CLOCK_MODE;
+ real_div = div;
+ clk_mul = host->clk_mul;
+ div--;
+ } else {
+ /*
+ * Divisor can be too small to reach clock
+ * speed requirement. Then use the base clock.
+ */
+ switch_base_clk = true;
+ }
+ }
+
+ if (!host->clk_mul || switch_base_clk) {
/* Version 3.00 divisors must be a multiple of 2. */
if (host->max_clk <= clock)
div = 1;
@@ -1225,6 +1229,9 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
}
real_div = div;
div >>= 1;
+ if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
+ && !div && host->max_clk <= 25000000)
+ div = 1;
}
} else {
/* Version 2.00 divisors must be a power of 2. */
@@ -1354,14 +1361,13 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
struct sdhci_host *host;
int present;
unsigned long flags;
- u32 tuning_opcode;
host = mmc_priv(mmc);
sdhci_runtime_pm_get(host);
/* Firstly check card presence */
- present = sdhci_do_get_cd(host);
+ present = mmc->ops->get_cd(mmc);
spin_lock_irqsave(&host->lock, flags);
@@ -1388,39 +1394,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->mrq->cmd->error = -ENOMEDIUM;
tasklet_schedule(&host->finish_tasklet);
} else {
- u32 present_state;
-
- present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
- /*
- * Check if the re-tuning timer has already expired and there
- * is no on-going data transfer and DAT0 is not busy. If so,
- * we need to execute tuning procedure before sending command.
- */
- if ((host->flags & SDHCI_NEEDS_RETUNING) &&
- !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) &&
- (present_state & SDHCI_DATA_0_LVL_MASK)) {
- if (mmc->card) {
- /* eMMC uses cmd21 but sd and sdio use cmd19 */
- tuning_opcode =
- mmc->card->type == MMC_TYPE_MMC ?
- MMC_SEND_TUNING_BLOCK_HS200 :
- MMC_SEND_TUNING_BLOCK;
-
- /* Here we need to set the host->mrq to NULL,
- * in case the pending finish_tasklet
- * finishes it incorrectly.
- */
- host->mrq = NULL;
-
- spin_unlock_irqrestore(&host->lock, flags);
- sdhci_execute_tuning(mmc, tuning_opcode);
- spin_lock_irqsave(&host->lock, flags);
-
- /* Restore original mmc_request structure */
- host->mrq = mrq;
- }
- }
-
if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
sdhci_send_command(host, mrq->sbc);
else
@@ -1563,8 +1536,17 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
+ else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
+ ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
+ else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
+ ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
+ else {
+ pr_warn("%s: invalid driver type, default to "
+ "driver type B\n", mmc_hostname(mmc));
+ ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
+ }
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
} else {
@@ -1642,15 +1624,21 @@ static int sdhci_do_get_cd(struct sdhci_host *host)
if (host->flags & SDHCI_DEVICE_DEAD)
return 0;
- /* If polling/nonremovable, assume that the card is always present. */
- if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
- (host->mmc->caps & MMC_CAP_NONREMOVABLE))
+ /* If nonremovable, assume that the card is always present. */
+ if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
return 1;
- /* Try slot gpio detect */
+ /*
+ * Try slot gpio detect, if defined it take precedence
+ * over build in controller functionality
+ */
if (!IS_ERR_VALUE(gpio_cd))
return !!gpio_cd;
+ /* If polling, assume that the card is always present. */
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ return 1;
+
/* Host native card detect */
return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}
@@ -1910,9 +1898,9 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
tuning_count = host->tuning_count;
/*
- * The Host Controller needs tuning only in case of SDR104 mode
- * and for SDR50 mode when Use Tuning for SDR50 is set in the
- * Capabilities register.
+ * The Host Controller needs tuning in case of SDR104 and DDR50
+ * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
+ * the Capabilities register.
* If the Host Controller supports the HS200 mode then the
* tuning function has to be executed.
*/
@@ -1932,6 +1920,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
break;
case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_UHS_DDR50:
break;
case MMC_TIMING_UHS_SDR50:
@@ -2067,23 +2056,18 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
}
out:
- host->flags &= ~SDHCI_NEEDS_RETUNING;
-
if (tuning_count) {
- host->flags |= SDHCI_USING_RETUNING_TIMER;
- mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ);
+ /*
+ * In case tuning fails, host controllers which support
+ * re-tuning can try tuning again at a later time, when the
+ * re-tuning timer expires. So for these controllers, we
+ * return 0. Since there might be other controllers who do not
+ * have this capability, we return error for them.
+ */
+ err = 0;
}
- /*
- * In case tuning fails, host controllers which support re-tuning can
- * try tuning again at a later time, when the re-tuning timer expires.
- * So for these controllers, we return 0. Since there might be other
- * controllers who do not have this capability, we return error for
- * them. SDHCI_USING_RETUNING_TIMER means the host is currently using
- * a retuning timer to do the retuning for the card.
- */
- if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
- err = 0;
+ host->mmc->retune_period = err ? 0 : tuning_count;
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
@@ -2094,6 +2078,18 @@ out_unlock:
return err;
}
+static int sdhci_select_drive_strength(struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type)
+{
+ struct sdhci_host *host = mmc_priv(card->host);
+
+ if (!host->ops->select_drive_strength)
+ return 0;
+
+ return host->ops->select_drive_strength(host, card, max_dtr, host_drv,
+ card_drv, drv_type);
+}
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
{
@@ -2131,49 +2127,36 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
struct mmc_data *data = mrq->data;
if (host->flags & SDHCI_REQ_USE_DMA) {
- if (data->host_cookie)
+ if (data->host_cookie == COOKIE_GIVEN ||
+ data->host_cookie == COOKIE_MAPPED)
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
data->flags & MMC_DATA_WRITE ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
- mrq->data->host_cookie = 0;
+ data->host_cookie = COOKIE_UNMAPPED;
}
}
static int sdhci_pre_dma_transfer(struct sdhci_host *host,
- struct mmc_data *data,
- struct sdhci_host_next *next)
+ struct mmc_data *data)
{
int sg_count;
- if (!next && data->host_cookie &&
- data->host_cookie != host->next_data.cookie) {
- pr_debug(DRIVER_NAME "[%s] invalid cookie: %d, next-cookie %d\n",
- __func__, data->host_cookie, host->next_data.cookie);
- data->host_cookie = 0;
+ if (data->host_cookie == COOKIE_MAPPED) {
+ data->host_cookie = COOKIE_GIVEN;
+ return data->sg_count;
}
- /* Check if next job is already prepared */
- if (next ||
- (!next && data->host_cookie != host->next_data.cookie)) {
- sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len,
- data->flags & MMC_DATA_WRITE ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
-
- } else {
- sg_count = host->next_data.sg_count;
- host->next_data.sg_count = 0;
- }
+ WARN_ON(data->host_cookie == COOKIE_GIVEN);
+ sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ data->flags & MMC_DATA_WRITE ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (sg_count == 0)
- return -EINVAL;
+ return -ENOSPC;
- if (next) {
- next->sg_count = sg_count;
- data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
- } else
- host->sg_count = sg_count;
+ data->sg_count = sg_count;
+ data->host_cookie = COOKIE_MAPPED;
return sg_count;
}
@@ -2183,16 +2166,10 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
{
struct sdhci_host *host = mmc_priv(mmc);
- if (mrq->data->host_cookie) {
- mrq->data->host_cookie = 0;
- return;
- }
+ mrq->data->host_cookie = COOKIE_UNMAPPED;
if (host->flags & SDHCI_REQ_USE_DMA)
- if (sdhci_pre_dma_transfer(host,
- mrq->data,
- &host->next_data) < 0)
- mrq->data->host_cookie = 0;
+ sdhci_pre_dma_transfer(host, mrq->data);
}
static void sdhci_card_event(struct mmc_host *mmc)
@@ -2238,6 +2215,7 @@ static const struct mmc_host_ops sdhci_ops = {
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
.prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
.execute_tuning = sdhci_execute_tuning,
+ .select_drive_strength = sdhci_select_drive_strength,
.card_event = sdhci_card_event,
.card_busy = sdhci_card_busy,
};
@@ -2339,20 +2317,6 @@ static void sdhci_timeout_timer(unsigned long data)
spin_unlock_irqrestore(&host->lock, flags);
}
-static void sdhci_tuning_timer(unsigned long data)
-{
- struct sdhci_host *host;
- unsigned long flags;
-
- host = (struct sdhci_host *)data;
-
- spin_lock_irqsave(&host->lock, flags);
-
- host->flags |= SDHCI_NEEDS_RETUNING;
-
- spin_unlock_irqrestore(&host->lock, flags);
-}
-
/*****************************************************************************\
* *
* Interrupt handling *
@@ -2730,11 +2694,8 @@ int sdhci_suspend_host(struct sdhci_host *host)
{
sdhci_disable_card_detection(host);
- /* Disable tuning since we are suspending */
- if (host->flags & SDHCI_USING_RETUNING_TIMER) {
- del_timer_sync(&host->tuning_timer);
- host->flags &= ~SDHCI_NEEDS_RETUNING;
- }
+ mmc_retune_timer_stop(host->mmc);
+ mmc_retune_needed(host->mmc);
if (!device_may_wakeup(mmc_dev(host->mmc))) {
host->ier = 0;
@@ -2759,17 +2720,6 @@ int sdhci_resume_host(struct sdhci_host *host)
host->ops->enable_dma(host);
}
- if (!device_may_wakeup(mmc_dev(host->mmc))) {
- ret = request_threaded_irq(host->irq, sdhci_irq,
- sdhci_thread_irq, IRQF_SHARED,
- mmc_hostname(host->mmc), host);
- if (ret)
- return ret;
- } else {
- sdhci_disable_irq_wakeups(host);
- disable_irq_wake(host->irq);
- }
-
if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
(host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
/* Card keeps power but host controller does not */
@@ -2782,11 +2732,18 @@ int sdhci_resume_host(struct sdhci_host *host)
mmiowb();
}
- sdhci_enable_card_detection(host);
+ if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ ret = request_threaded_irq(host->irq, sdhci_irq,
+ sdhci_thread_irq, IRQF_SHARED,
+ mmc_hostname(host->mmc), host);
+ if (ret)
+ return ret;
+ } else {
+ sdhci_disable_irq_wakeups(host);
+ disable_irq_wake(host->irq);
+ }
- /* Set the re-tuning expiration flag */
- if (host->flags & SDHCI_USING_RETUNING_TIMER)
- host->flags |= SDHCI_NEEDS_RETUNING;
+ sdhci_enable_card_detection(host);
return ret;
}
@@ -2806,7 +2763,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
{
- if (host->runtime_suspended || host->bus_on)
+ if (host->bus_on)
return;
host->bus_on = true;
pm_runtime_get_noresume(host->mmc->parent);
@@ -2814,7 +2771,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
{
- if (host->runtime_suspended || !host->bus_on)
+ if (!host->bus_on)
return;
host->bus_on = false;
pm_runtime_put_noidle(host->mmc->parent);
@@ -2824,11 +2781,8 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host)
{
unsigned long flags;
- /* Disable tuning since we are suspending */
- if (host->flags & SDHCI_USING_RETUNING_TIMER) {
- del_timer_sync(&host->tuning_timer);
- host->flags &= ~SDHCI_NEEDS_RETUNING;
- }
+ mmc_retune_timer_stop(host->mmc);
+ mmc_retune_needed(host->mmc);
spin_lock_irqsave(&host->lock, flags);
host->ier &= SDHCI_INT_CARD_INT;
@@ -2871,10 +2825,6 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
spin_unlock_irqrestore(&host->lock, flags);
}
- /* Set the re-tuning expiration flag */
- if (host->flags & SDHCI_USING_RETUNING_TIMER)
- host->flags |= SDHCI_NEEDS_RETUNING;
-
spin_lock_irqsave(&host->lock, flags);
host->runtime_suspended = false;
@@ -2914,6 +2864,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
host = mmc_priv(mmc);
host->mmc = mmc;
+ host->mmc_host_ops = sdhci_ops;
+ mmc->ops = &host->mmc_host_ops;
return host;
}
@@ -2927,6 +2879,7 @@ int sdhci_add_host(struct sdhci_host *host)
u32 max_current_caps;
unsigned int ocr_avail;
unsigned int override_timeout_clk;
+ u32 max_clk;
int ret;
WARN_ON(host == NULL);
@@ -3090,7 +3043,6 @@ int sdhci_add_host(struct sdhci_host *host)
host->max_clk = host->ops->get_max_clock(host);
}
- host->next_data.cookie = 1;
/*
* In case of Host Controller v3.00, find out whether clock
* multiplier is supported.
@@ -3110,19 +3062,22 @@ int sdhci_add_host(struct sdhci_host *host)
/*
* Set host parameters.
*/
- mmc->ops = &sdhci_ops;
- mmc->f_max = host->max_clk;
+ max_clk = host->max_clk;
+
if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
else if (host->version >= SDHCI_SPEC_300) {
if (host->clk_mul) {
mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
- mmc->f_max = host->max_clk * host->clk_mul;
+ max_clk = host->max_clk * host->clk_mul;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
+ if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
+ mmc->f_max = max_clk;
+
if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
SDHCI_TIMEOUT_CLK_SHIFT;
@@ -3182,7 +3137,8 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
- !(mmc->caps & MMC_CAP_NONREMOVABLE))
+ !(mmc->caps & MMC_CAP_NONREMOVABLE) &&
+ IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
mmc->caps |= MMC_CAP_NEEDS_POLL;
/* If there are external regulators, get them */
@@ -3414,13 +3370,6 @@ int sdhci_add_host(struct sdhci_host *host)
init_waitqueue_head(&host->buf_ready_int);
- if (host->version >= SDHCI_SPEC_300) {
- /* Initialize re-tuning timer */
- init_timer(&host->tuning_timer);
- host->tuning_timer.data = (unsigned long)host;
- host->tuning_timer.function = sdhci_tuning_timer;
- }
-
sdhci_init(host, 0);
ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
diff --git a/kernel/drivers/mmc/host/sdhci.h b/kernel/drivers/mmc/host/sdhci.h
index e639b7f43..9c331ac5a 100644
--- a/kernel/drivers/mmc/host/sdhci.h
+++ b/kernel/drivers/mmc/host/sdhci.h
@@ -309,9 +309,10 @@ struct sdhci_adma2_64_desc {
*/
#define SDHCI_MAX_SEGS 128
-struct sdhci_host_next {
- unsigned int sg_count;
- s32 cookie;
+enum sdhci_cookie {
+ COOKIE_UNMAPPED,
+ COOKIE_MAPPED,
+ COOKIE_GIVEN,
};
struct sdhci_host {
@@ -409,6 +410,13 @@ struct sdhci_host {
#define SDHCI_QUIRK2_SUPPORT_SINGLE (1<<13)
/* Controller broken with using ACMD23 */
#define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14)
+/* Broken Clock divider zero in controller */
+#define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15)
+/*
+ * When internal clock is disabled, a delay is needed before modifying the
+ * SD clock frequency or enabling back the internal clock.
+ */
+#define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST (1<<16)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -417,6 +425,7 @@ struct sdhci_host {
/* Internal data */
struct mmc_host *mmc; /* MMC structure */
+ struct mmc_host_ops mmc_host_ops; /* MMC host ops */
u64 dma_mask; /* custom DMA mask */
#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
@@ -432,13 +441,11 @@ struct sdhci_host {
#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
#define SDHCI_SDR50_NEEDS_TUNING (1<<4) /* SDR50 needs tuning */
-#define SDHCI_NEEDS_RETUNING (1<<5) /* Host needs retuning */
#define SDHCI_AUTO_CMD12 (1<<6) /* Auto CMD12 support */
#define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */
#define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */
#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
-#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
@@ -504,9 +511,7 @@ struct sdhci_host {
unsigned int tuning_count; /* Timer count for re-tuning */
unsigned int tuning_mode; /* Re-tuning mode supported by host */
#define SDHCI_TUNING_MODE_1 0
- struct timer_list tuning_timer; /* Timer for tuning */
- struct sdhci_host_next next_data;
unsigned long private[0] ____cacheline_aligned;
};
@@ -541,6 +546,10 @@ struct sdhci_ops {
void (*platform_init)(struct sdhci_host *host);
void (*card_event)(struct sdhci_host *host);
void (*voltage_switch)(struct sdhci_host *host);
+ int (*select_drive_strength)(struct sdhci_host *host,
+ struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
diff --git a/kernel/drivers/mmc/host/sdhci_f_sdh30.c b/kernel/drivers/mmc/host/sdhci_f_sdh30.c
index 2fe8b9148..983b8b32e 100644
--- a/kernel/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/kernel/drivers/mmc/host/sdhci_f_sdh30.c
@@ -49,7 +49,7 @@ struct f_sdhost_priv {
struct device *dev;
};
-void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
+static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
{
struct f_sdhost_priv *priv = sdhci_priv(host);
u32 ctrl = 0;
@@ -77,12 +77,12 @@ void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
sdhci_writel(host, ctrl, F_SDH30_TUNING_SETTING);
}
-unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
+static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
{
return F_SDH30_MIN_CLOCK;
}
-void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
+static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
{
if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
sdhci_writew(host, 0xBC01, SDHCI_CLOCK_CONTROL);
@@ -114,8 +114,7 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
return irq;
}
- host = sdhci_alloc_host(dev, sizeof(struct sdhci_host) +
- sizeof(struct f_sdhost_priv));
+ host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
if (IS_ERR(host))
return PTR_ERR(host);
diff --git a/kernel/drivers/mmc/host/sh_mmcif.c b/kernel/drivers/mmc/host/sh_mmcif.c
index 7eff087cf..ad9ffea7d 100644
--- a/kernel/drivers/mmc/host/sh_mmcif.c
+++ b/kernel/drivers/mmc/host/sh_mmcif.c
@@ -57,6 +57,7 @@
#include <linux/mmc/slot-gpio.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
@@ -205,14 +206,14 @@
#define CLKDEV_MMC_DATA 20000000 /* 20MHz */
#define CLKDEV_INIT 400000 /* 400 KHz */
-enum mmcif_state {
+enum sh_mmcif_state {
STATE_IDLE,
STATE_REQUEST,
STATE_IOS,
STATE_TIMEOUT,
};
-enum mmcif_wait_for {
+enum sh_mmcif_wait_for {
MMCIF_WAIT_FOR_REQUEST,
MMCIF_WAIT_FOR_CMD,
MMCIF_WAIT_FOR_MREAD,
@@ -224,12 +225,14 @@ enum mmcif_wait_for {
MMCIF_WAIT_FOR_STOP,
};
+/*
+ * difference for each SoC
+ */
struct sh_mmcif_host {
struct mmc_host *mmc;
struct mmc_request *mrq;
struct platform_device *pd;
- struct clk *hclk;
- unsigned int clk;
+ struct clk *clk;
int bus_width;
unsigned char timing;
bool sd_error;
@@ -238,8 +241,8 @@ struct sh_mmcif_host {
void __iomem *addr;
u32 *pio_ptr;
spinlock_t lock; /* protect sh_mmcif_host::state */
- enum mmcif_state state;
- enum mmcif_wait_for wait_for;
+ enum sh_mmcif_state state;
+ enum sh_mmcif_wait_for wait_for;
struct delayed_work timeout_work;
size_t blocksize;
int sg_idx;
@@ -249,6 +252,7 @@ struct sh_mmcif_host {
bool ccs_enable; /* Command Completion Signal support */
bool clk_ctrl2_enable;
struct mutex thread_lock;
+ u32 clkdiv_map; /* see CE_CLK_CTRL::CLKDIV */
/* DMA support */
struct dma_chan *chan_rx;
@@ -257,6 +261,14 @@ struct sh_mmcif_host {
bool dma_active;
};
+static const struct of_device_id sh_mmcif_of_match[] = {
+ { .compatible = "renesas,sh-mmcif" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sh_mmcif_of_match);
+
+#define sh_mmcif_host_to_dev(host) (&host->pd->dev)
+
static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
unsigned int reg, u32 val)
{
@@ -269,15 +281,16 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
writel(~val & readl(host->addr + reg), host->addr + reg);
}
-static void mmcif_dma_complete(void *arg)
+static void sh_mmcif_dma_complete(void *arg)
{
struct sh_mmcif_host *host = arg;
struct mmc_request *mrq = host->mrq;
+ struct device *dev = sh_mmcif_host_to_dev(host);
- dev_dbg(&host->pd->dev, "Command completed\n");
+ dev_dbg(dev, "Command completed\n");
if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
- dev_name(&host->pd->dev)))
+ dev_name(dev)))
return;
complete(&host->dma_complete);
@@ -289,6 +302,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
struct scatterlist *sg = data->sg;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
+ struct device *dev = sh_mmcif_host_to_dev(host);
dma_cookie_t cookie = -EINVAL;
int ret;
@@ -301,13 +315,13 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
}
if (desc) {
- desc->callback = mmcif_dma_complete;
+ desc->callback = sh_mmcif_dma_complete;
desc->callback_param = host;
cookie = dmaengine_submit(desc);
sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
dma_async_issue_pending(chan);
}
- dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
+ dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
__func__, data->sg_len, ret, cookie);
if (!desc) {
@@ -323,12 +337,12 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
host->chan_tx = NULL;
dma_release_channel(chan);
}
- dev_warn(&host->pd->dev,
+ dev_warn(dev,
"DMA failed: %d, falling back to PIO\n", ret);
sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
}
- dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
+ dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
desc, cookie, data->sg_len);
}
@@ -338,6 +352,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
struct scatterlist *sg = data->sg;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
+ struct device *dev = sh_mmcif_host_to_dev(host);
dma_cookie_t cookie = -EINVAL;
int ret;
@@ -350,13 +365,13 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
}
if (desc) {
- desc->callback = mmcif_dma_complete;
+ desc->callback = sh_mmcif_dma_complete;
desc->callback_param = host;
cookie = dmaengine_submit(desc);
sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
dma_async_issue_pending(chan);
}
- dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
+ dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
__func__, data->sg_len, ret, cookie);
if (!desc) {
@@ -372,12 +387,12 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
host->chan_rx = NULL;
dma_release_channel(chan);
}
- dev_warn(&host->pd->dev,
+ dev_warn(dev,
"DMA failed: %d, falling back to PIO\n", ret);
sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
}
- dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
+ dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__,
desc, cookie);
}
@@ -390,6 +405,7 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
struct dma_chan *chan;
void *slave_data = NULL;
struct resource *res;
+ struct device *dev = sh_mmcif_host_to_dev(host);
dma_cap_mask_t mask;
int ret;
@@ -402,10 +418,10 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
(void *)pdata->slave_id_rx;
chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
- slave_data, &host->pd->dev,
+ slave_data, dev,
direction == DMA_MEM_TO_DEV ? "tx" : "rx");
- dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__,
+ dev_dbg(dev, "%s: %s: got channel %p\n", __func__,
direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan);
if (!chan)
@@ -435,12 +451,13 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
struct sh_mmcif_plat_data *pdata)
{
+ struct device *dev = sh_mmcif_host_to_dev(host);
host->dma_active = false;
if (pdata) {
if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
return;
- } else if (!host->pd->dev.of_node) {
+ } else if (!dev->of_node) {
return;
}
@@ -476,21 +493,59 @@ static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
{
- struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+ struct device *dev = sh_mmcif_host_to_dev(host);
+ struct sh_mmcif_plat_data *p = dev->platform_data;
bool sup_pclk = p ? p->sup_pclk : false;
+ unsigned int current_clk = clk_get_rate(host->clk);
+ unsigned int clkdiv;
sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
if (!clk)
return;
- if (sup_pclk && clk == host->clk)
- sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
- else
- sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
- ((fls(DIV_ROUND_UP(host->clk,
- clk) - 1) - 1) << 16));
+ if (host->clkdiv_map) {
+ unsigned int freq, best_freq, myclk, div, diff_min, diff;
+ int i;
+
+ clkdiv = 0;
+ diff_min = ~0;
+ best_freq = 0;
+ for (i = 31; i >= 0; i--) {
+ if (!((1 << i) & host->clkdiv_map))
+ continue;
+
+ /*
+ * clk = parent_freq / div
+ * -> parent_freq = clk x div
+ */
+
+ div = 1 << (i + 1);
+ freq = clk_round_rate(host->clk, clk * div);
+ myclk = freq / div;
+ diff = (myclk > clk) ? myclk - clk : clk - myclk;
+
+ if (diff <= diff_min) {
+ best_freq = freq;
+ clkdiv = i;
+ diff_min = diff;
+ }
+ }
+
+ dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n",
+ (best_freq / (1 << (clkdiv + 1))), clk,
+ best_freq, clkdiv);
+
+ clk_set_rate(host->clk, best_freq);
+ clkdiv = clkdiv << 16;
+ } else if (sup_pclk && clk == current_clk) {
+ clkdiv = CLK_SUP_PCLK;
+ } else {
+ clkdiv = (fls(DIV_ROUND_UP(current_clk, clk) - 1) - 1) << 16;
+ }
+
+ sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv);
sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
}
@@ -514,6 +569,7 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
{
+ struct device *dev = sh_mmcif_host_to_dev(host);
u32 state1, state2;
int ret, timeout;
@@ -521,8 +577,8 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
- dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
- dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
+ dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1);
+ dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2);
if (state1 & STS1_CMDSEQ) {
sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
@@ -534,25 +590,25 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
mdelay(1);
}
if (!timeout) {
- dev_err(&host->pd->dev,
+ dev_err(dev,
"Forced end of command sequence timeout err\n");
return -EIO;
}
sh_mmcif_sync_reset(host);
- dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
+ dev_dbg(dev, "Forced end of command sequence\n");
return -EIO;
}
if (state2 & STS2_CRC_ERR) {
- dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n",
+ dev_err(dev, " CRC error: state %u, wait %u\n",
host->state, host->wait_for);
ret = -EIO;
} else if (state2 & STS2_TIMEOUT_ERR) {
- dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n",
+ dev_err(dev, " Timeout: state %u, wait %u\n",
host->state, host->wait_for);
ret = -ETIMEDOUT;
} else {
- dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n",
+ dev_dbg(dev, " End/Index error: state %u, wait %u\n",
host->state, host->wait_for);
ret = -EIO;
}
@@ -593,13 +649,14 @@ static void sh_mmcif_single_read(struct sh_mmcif_host *host,
static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
{
+ struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
u32 *p = sg_virt(data->sg);
int i;
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
- dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+ dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
@@ -634,13 +691,14 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
{
+ struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
u32 *p = host->pio_ptr;
int i;
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
- dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+ dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
@@ -671,13 +729,14 @@ static void sh_mmcif_single_write(struct sh_mmcif_host *host,
static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
{
+ struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
u32 *p = sg_virt(data->sg);
int i;
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
- dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+ dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
@@ -712,13 +771,14 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
{
+ struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
u32 *p = host->pio_ptr;
int i;
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
- dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+ dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
@@ -756,6 +816,7 @@ static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
+ struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = mrq->data;
struct mmc_command *cmd = mrq->cmd;
u32 opc = cmd->opcode;
@@ -775,7 +836,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
tmp |= CMD_SET_RTYP_17B;
break;
default:
- dev_err(&host->pd->dev, "Unsupported response type.\n");
+ dev_err(dev, "Unsupported response type.\n");
break;
}
switch (opc) {
@@ -803,7 +864,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
tmp |= CMD_SET_DATW_8;
break;
default:
- dev_err(&host->pd->dev, "Unsupported bus width.\n");
+ dev_err(dev, "Unsupported bus width.\n");
break;
}
switch (host->timing) {
@@ -846,6 +907,8 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
struct mmc_request *mrq, u32 opc)
{
+ struct device *dev = sh_mmcif_host_to_dev(host);
+
switch (opc) {
case MMC_READ_MULTIPLE_BLOCK:
sh_mmcif_multi_read(host, mrq);
@@ -861,7 +924,7 @@ static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
sh_mmcif_single_read(host, mrq);
return 0;
default:
- dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc);
+ dev_err(dev, "Unsupported CMD%d\n", opc);
return -EINVAL;
}
}
@@ -918,6 +981,8 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
+ struct device *dev = sh_mmcif_host_to_dev(host);
+
switch (mrq->cmd->opcode) {
case MMC_READ_MULTIPLE_BLOCK:
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
@@ -926,7 +991,7 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
break;
default:
- dev_err(&host->pd->dev, "unsupported stop cmd\n");
+ dev_err(dev, "unsupported stop cmd\n");
mrq->stop->error = sh_mmcif_error_manage(host);
return;
}
@@ -937,11 +1002,13 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sh_mmcif_host *host = mmc_priv(mmc);
+ struct device *dev = sh_mmcif_host_to_dev(host);
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
if (host->state != STATE_IDLE) {
- dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
+ dev_dbg(dev, "%s() rejected, state %u\n",
+ __func__, host->state);
spin_unlock_irqrestore(&host->lock, flags);
mrq->cmd->error = -EAGAIN;
mmc_request_done(mmc, mrq);
@@ -972,17 +1039,37 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
sh_mmcif_start_cmd(host, mrq);
}
-static int sh_mmcif_clk_update(struct sh_mmcif_host *host)
+static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
{
- int ret = clk_prepare_enable(host->hclk);
+ struct device *dev = sh_mmcif_host_to_dev(host);
+
+ if (host->mmc->f_max) {
+ unsigned int f_max, f_min = 0, f_min_old;
+
+ f_max = host->mmc->f_max;
+ for (f_min_old = f_max; f_min_old > 2;) {
+ f_min = clk_round_rate(host->clk, f_min_old / 2);
+ if (f_min == f_min_old)
+ break;
+ f_min_old = f_min;
+ }
+
+ /*
+ * This driver assumes this SoC is R-Car Gen2 or later
+ */
+ host->clkdiv_map = 0x3ff;
+
+ host->mmc->f_max = f_max / (1 << ffs(host->clkdiv_map));
+ host->mmc->f_min = f_min / (1 << fls(host->clkdiv_map));
+ } else {
+ unsigned int clk = clk_get_rate(host->clk);
- if (!ret) {
- host->clk = clk_get_rate(host->hclk);
- host->mmc->f_max = host->clk / 2;
- host->mmc->f_min = host->clk / 512;
+ host->mmc->f_max = clk / 2;
+ host->mmc->f_min = clk / 512;
}
- return ret;
+ dev_dbg(dev, "clk max/min = %d/%d\n",
+ host->mmc->f_max, host->mmc->f_min);
}
static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
@@ -998,11 +1085,13 @@ static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sh_mmcif_host *host = mmc_priv(mmc);
+ struct device *dev = sh_mmcif_host_to_dev(host);
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
if (host->state != STATE_IDLE) {
- dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
+ dev_dbg(dev, "%s() rejected, state %u\n",
+ __func__, host->state);
spin_unlock_irqrestore(&host->lock, flags);
return;
}
@@ -1013,7 +1102,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->power_mode == MMC_POWER_UP) {
if (!host->card_present) {
/* See if we also get DMA */
- sh_mmcif_request_dma(host, host->pd->dev.platform_data);
+ sh_mmcif_request_dma(host, dev->platform_data);
host->card_present = true;
}
sh_mmcif_set_power(host, ios);
@@ -1027,8 +1116,8 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
if (host->power) {
- pm_runtime_put_sync(&host->pd->dev);
- clk_disable_unprepare(host->hclk);
+ pm_runtime_put_sync(dev);
+ clk_disable_unprepare(host->clk);
host->power = false;
if (ios->power_mode == MMC_POWER_OFF)
sh_mmcif_set_power(host, ios);
@@ -1039,8 +1128,9 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->clock) {
if (!host->power) {
- sh_mmcif_clk_update(host);
- pm_runtime_get_sync(&host->pd->dev);
+ clk_prepare_enable(host->clk);
+
+ pm_runtime_get_sync(dev);
host->power = true;
sh_mmcif_sync_reset(host);
}
@@ -1055,7 +1145,8 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static int sh_mmcif_get_cd(struct mmc_host *mmc)
{
struct sh_mmcif_host *host = mmc_priv(mmc);
- struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+ struct device *dev = sh_mmcif_host_to_dev(host);
+ struct sh_mmcif_plat_data *p = dev->platform_data;
int ret = mmc_gpio_get_cd(mmc);
if (ret >= 0)
@@ -1077,6 +1168,7 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
{
struct mmc_command *cmd = host->mrq->cmd;
struct mmc_data *data = host->mrq->data;
+ struct device *dev = sh_mmcif_host_to_dev(host);
long time;
if (host->sd_error) {
@@ -1090,7 +1182,7 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
cmd->error = sh_mmcif_error_manage(host);
break;
}
- dev_dbg(&host->pd->dev, "CMD%d error %d\n",
+ dev_dbg(dev, "CMD%d error %d\n",
cmd->opcode, cmd->error);
host->sd_error = false;
return false;
@@ -1170,6 +1262,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
{
struct sh_mmcif_host *host = dev_id;
struct mmc_request *mrq;
+ struct device *dev = sh_mmcif_host_to_dev(host);
bool wait = false;
unsigned long flags;
int wait_work;
@@ -1184,7 +1277,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
mrq = host->mrq;
if (!mrq) {
- dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
+ dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
host->state, host->wait_for);
mutex_unlock(&host->thread_lock);
return IRQ_HANDLED;
@@ -1222,7 +1315,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
case MMCIF_WAIT_FOR_STOP:
if (host->sd_error) {
mrq->stop->error = sh_mmcif_error_manage(host);
- dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error);
+ dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error);
break;
}
sh_mmcif_get_cmd12response(host, mrq->stop);
@@ -1232,7 +1325,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
case MMCIF_WAIT_FOR_WRITE_END:
if (host->sd_error) {
mrq->data->error = sh_mmcif_error_manage(host);
- dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error);
+ dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error);
}
break;
default:
@@ -1275,6 +1368,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
{
struct sh_mmcif_host *host = dev_id;
+ struct device *dev = sh_mmcif_host_to_dev(host);
u32 state, mask;
state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
@@ -1286,32 +1380,33 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
if (state & ~MASK_CLEAN)
- dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n",
+ dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n",
state);
if (state & INT_ERR_STS || state & ~INT_ALL) {
host->sd_error = true;
- dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state);
+ dev_dbg(dev, "int err state = 0x%08x\n", state);
}
if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
if (!host->mrq)
- dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state);
+ dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state);
if (!host->dma_active)
return IRQ_WAKE_THREAD;
else if (host->sd_error)
- mmcif_dma_complete(host);
+ sh_mmcif_dma_complete(host);
} else {
- dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
+ dev_dbg(dev, "Unexpected IRQ 0x%x\n", state);
}
return IRQ_HANDLED;
}
-static void mmcif_timeout_work(struct work_struct *work)
+static void sh_mmcif_timeout_work(struct work_struct *work)
{
struct delayed_work *d = container_of(work, struct delayed_work, work);
struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
struct mmc_request *mrq = host->mrq;
+ struct device *dev = sh_mmcif_host_to_dev(host);
unsigned long flags;
if (host->dying)
@@ -1324,7 +1419,7 @@ static void mmcif_timeout_work(struct work_struct *work)
return;
}
- dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n",
+ dev_err(dev, "Timeout waiting for %u on CMD%u\n",
host->wait_for, mrq->cmd->opcode);
host->state = STATE_TIMEOUT;
@@ -1361,7 +1456,8 @@ static void mmcif_timeout_work(struct work_struct *work)
static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
{
- struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
+ struct device *dev = sh_mmcif_host_to_dev(host);
+ struct sh_mmcif_plat_data *pd = dev->platform_data;
struct mmc_host *mmc = host->mmc;
mmc_regulator_get_supply(mmc);
@@ -1380,7 +1476,8 @@ static int sh_mmcif_probe(struct platform_device *pdev)
int ret = 0, irq[2];
struct mmc_host *mmc;
struct sh_mmcif_host *host;
- struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct sh_mmcif_plat_data *pd = dev->platform_data;
struct resource *res;
void __iomem *reg;
const char *name;
@@ -1388,16 +1485,16 @@ static int sh_mmcif_probe(struct platform_device *pdev)
irq[0] = platform_get_irq(pdev, 0);
irq[1] = platform_get_irq(pdev, 1);
if (irq[0] < 0) {
- dev_err(&pdev->dev, "Get irq error\n");
+ dev_err(dev, "Get irq error\n");
return -ENXIO;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_ioremap_resource(dev, res);
if (IS_ERR(reg))
return PTR_ERR(reg);
- mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
+ mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), dev);
if (!mmc)
return -ENOMEM;
@@ -1430,41 +1527,44 @@ static int sh_mmcif_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_enable(dev);
host->power = false;
- host->hclk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(host->hclk)) {
- ret = PTR_ERR(host->hclk);
- dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
+ host->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ dev_err(dev, "cannot get clock: %d\n", ret);
goto err_pm;
}
- ret = sh_mmcif_clk_update(host);
+
+ ret = clk_prepare_enable(host->clk);
if (ret < 0)
goto err_pm;
- ret = pm_runtime_resume(&pdev->dev);
+ sh_mmcif_clk_setup(host);
+
+ ret = pm_runtime_resume(dev);
if (ret < 0)
goto err_clk;
- INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
+ INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work);
sh_mmcif_sync_reset(host);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
- name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error";
- ret = devm_request_threaded_irq(&pdev->dev, irq[0], sh_mmcif_intr,
+ name = irq[1] < 0 ? dev_name(dev) : "sh_mmc:error";
+ ret = devm_request_threaded_irq(dev, irq[0], sh_mmcif_intr,
sh_mmcif_irqt, 0, name, host);
if (ret) {
- dev_err(&pdev->dev, "request_irq error (%s)\n", name);
+ dev_err(dev, "request_irq error (%s)\n", name);
goto err_clk;
}
if (irq[1] >= 0) {
- ret = devm_request_threaded_irq(&pdev->dev, irq[1],
+ ret = devm_request_threaded_irq(dev, irq[1],
sh_mmcif_intr, sh_mmcif_irqt,
0, "sh_mmc:int", host);
if (ret) {
- dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
+ dev_err(dev, "request_irq error (sh_mmc:int)\n");
goto err_clk;
}
}
@@ -1481,19 +1581,19 @@ static int sh_mmcif_probe(struct platform_device *pdev)
if (ret < 0)
goto err_clk;
- dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
+ dev_pm_qos_expose_latency_limit(dev, 100);
- dev_info(&pdev->dev, "Chip version 0x%04x, clock rate %luMHz\n",
+ dev_info(dev, "Chip version 0x%04x, clock rate %luMHz\n",
sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
- clk_get_rate(host->hclk) / 1000000UL);
+ clk_get_rate(host->clk) / 1000000UL);
- clk_disable_unprepare(host->hclk);
+ clk_disable_unprepare(host->clk);
return ret;
err_clk:
- clk_disable_unprepare(host->hclk);
+ clk_disable_unprepare(host->clk);
err_pm:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
err_host:
mmc_free_host(mmc);
return ret;
@@ -1504,7 +1604,7 @@ static int sh_mmcif_remove(struct platform_device *pdev)
struct sh_mmcif_host *host = platform_get_drvdata(pdev);
host->dying = true;
- clk_prepare_enable(host->hclk);
+ clk_prepare_enable(host->clk);
pm_runtime_get_sync(&pdev->dev);
dev_pm_qos_hide_latency_limit(&pdev->dev);
@@ -1519,7 +1619,7 @@ static int sh_mmcif_remove(struct platform_device *pdev)
*/
cancel_delayed_work_sync(&host->timeout_work);
- clk_disable_unprepare(host->hclk);
+ clk_disable_unprepare(host->clk);
mmc_free_host(host->mmc);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -1532,7 +1632,9 @@ static int sh_mmcif_suspend(struct device *dev)
{
struct sh_mmcif_host *host = dev_get_drvdata(dev);
+ pm_runtime_get_sync(dev);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
+ pm_runtime_put(dev);
return 0;
}
@@ -1543,12 +1645,6 @@ static int sh_mmcif_resume(struct device *dev)
}
#endif
-static const struct of_device_id mmcif_of_match[] = {
- { .compatible = "renesas,sh-mmcif" },
- { }
-};
-MODULE_DEVICE_TABLE(of, mmcif_of_match);
-
static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
};
@@ -1559,7 +1655,7 @@ static struct platform_driver sh_mmcif_driver = {
.driver = {
.name = DRIVER_NAME,
.pm = &sh_mmcif_dev_pm_ops,
- .of_match_table = mmcif_of_match,
+ .of_match_table = sh_mmcif_of_match,
},
};
diff --git a/kernel/drivers/mmc/host/sunxi-mmc.c b/kernel/drivers/mmc/host/sunxi-mmc.c
index 4d3e1ffe5..83de82bce 100644
--- a/kernel/drivers/mmc/host/sunxi-mmc.c
+++ b/kernel/drivers/mmc/host/sunxi-mmc.c
@@ -210,6 +210,16 @@
#define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */
#define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */
+#define SDXC_CLK_400K 0
+#define SDXC_CLK_25M 1
+#define SDXC_CLK_50M 2
+#define SDXC_CLK_50M_DDR 3
+
+struct sunxi_mmc_clk_delay {
+ u32 output;
+ u32 sample;
+};
+
struct sunxi_idma_des {
u32 config;
u32 buf_size;
@@ -229,6 +239,7 @@ struct sunxi_mmc_host {
struct clk *clk_mmc;
struct clk *clk_sample;
struct clk *clk_output;
+ const struct sunxi_mmc_clk_delay *clk_delays;
/* irq */
spinlock_t lock;
@@ -595,7 +606,7 @@ static irqreturn_t sunxi_mmc_handle_manual_stop(int irq, void *dev_id)
static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en)
{
- unsigned long expire = jiffies + msecs_to_jiffies(250);
+ unsigned long expire = jiffies + msecs_to_jiffies(750);
u32 rval;
rval = mmc_readl(host, REG_CLKCR);
@@ -654,25 +665,19 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
/* determine delays */
if (rate <= 400000) {
- oclk_dly = 180;
- sclk_dly = 42;
+ oclk_dly = host->clk_delays[SDXC_CLK_400K].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_400K].sample;
} else if (rate <= 25000000) {
- oclk_dly = 180;
- sclk_dly = 75;
+ oclk_dly = host->clk_delays[SDXC_CLK_25M].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_25M].sample;
} else if (rate <= 50000000) {
if (ios->timing == MMC_TIMING_UHS_DDR50) {
- oclk_dly = 60;
- sclk_dly = 120;
+ oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
} else {
- oclk_dly = 90;
- sclk_dly = 150;
+ oclk_dly = host->clk_delays[SDXC_CLK_50M].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_50M].sample;
}
- } else if (rate <= 100000000) {
- oclk_dly = 6;
- sclk_dly = 24;
- } else if (rate <= 200000000) {
- oclk_dly = 3;
- sclk_dly = 12;
} else {
return -EINVAL;
}
@@ -868,9 +873,17 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, iflags);
}
+static int sunxi_mmc_card_busy(struct mmc_host *mmc)
+{
+ struct sunxi_mmc_host *host = mmc_priv(mmc);
+
+ return !!(mmc_readl(host, REG_STAS) & SDXC_CARD_DATA_BUSY);
+}
+
static const struct of_device_id sunxi_mmc_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-mmc", },
{ .compatible = "allwinner,sun5i-a13-mmc", },
+ { .compatible = "allwinner,sun9i-a80-mmc", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
@@ -882,6 +895,21 @@ static struct mmc_host_ops sunxi_mmc_ops = {
.get_cd = mmc_gpio_get_cd,
.enable_sdio_irq = sunxi_mmc_enable_sdio_irq,
.hw_reset = sunxi_mmc_hw_reset,
+ .card_busy = sunxi_mmc_card_busy,
+};
+
+static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = {
+ [SDXC_CLK_400K] = { .output = 180, .sample = 180 },
+ [SDXC_CLK_25M] = { .output = 180, .sample = 75 },
+ [SDXC_CLK_50M] = { .output = 90, .sample = 120 },
+ [SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 },
+};
+
+static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
+ [SDXC_CLK_400K] = { .output = 180, .sample = 180 },
+ [SDXC_CLK_25M] = { .output = 180, .sample = 75 },
+ [SDXC_CLK_50M] = { .output = 150, .sample = 120 },
+ [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 },
};
static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
@@ -895,6 +923,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
else
host->idma_des_size_bits = 16;
+ if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc"))
+ host->clk_delays = sun9i_mmc_clk_delays;
+ else
+ host->clk_delays = sunxi_mmc_clk_delays;
+
ret = mmc_regulator_get_supply(host->mmc);
if (ret) {
if (ret != -EPROBE_DEFER)
diff --git a/kernel/drivers/mmc/host/tmio_mmc.c b/kernel/drivers/mmc/host/tmio_mmc.c
index f746df493..e897e7fc3 100644
--- a/kernel/drivers/mmc/host/tmio_mmc.c
+++ b/kernel/drivers/mmc/host/tmio_mmc.c
@@ -85,8 +85,10 @@ static int tmio_mmc_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
+ if (!res) {
+ ret = -EINVAL;
+ goto cell_disable;
+ }
pdata->flags |= TMIO_MMC_HAVE_HIGH_REG;
@@ -101,7 +103,8 @@ static int tmio_mmc_probe(struct platform_device *pdev)
if (ret)
goto host_free;
- ret = request_irq(irq, tmio_mmc_irq, IRQF_TRIGGER_FALLING,
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq,
+ IRQF_TRIGGER_FALLING,
dev_name(&pdev->dev), host);
if (ret)
goto host_remove;
@@ -129,7 +132,6 @@ static int tmio_mmc_remove(struct platform_device *pdev)
if (mmc) {
struct tmio_mmc_host *host = mmc_priv(mmc);
- free_irq(platform_get_irq(pdev, 0), host);
tmio_mmc_host_remove(host);
if (cell->disable)
cell->disable(pdev);
diff --git a/kernel/drivers/mmc/host/tmio_mmc_pio.c b/kernel/drivers/mmc/host/tmio_mmc_pio.c
index dba7e1c19..a10fde40b 100644
--- a/kernel/drivers/mmc/host/tmio_mmc_pio.c
+++ b/kernel/drivers/mmc/host/tmio_mmc_pio.c
@@ -83,6 +83,8 @@ static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
return --host->sg_len;
}
+#define CMDREQ_TIMEOUT 5000
+
#ifdef CONFIG_MMC_DEBUG
#define STATUS_TO_TEXT(a, status, i) \
@@ -230,7 +232,7 @@ static void tmio_mmc_reset_work(struct work_struct *work)
*/
if (IS_ERR_OR_NULL(mrq)
|| time_is_after_jiffies(host->last_req_ts +
- msecs_to_jiffies(2000))) {
+ msecs_to_jiffies(CMDREQ_TIMEOUT))) {
spin_unlock_irqrestore(&host->lock, flags);
return;
}
@@ -818,7 +820,7 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
ret = tmio_mmc_start_command(host, mrq->cmd);
if (!ret) {
schedule_delayed_work(&host->delayed_reset_work,
- msecs_to_jiffies(2000));
+ msecs_to_jiffies(CMDREQ_TIMEOUT));
return;
}
@@ -1108,7 +1110,8 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
if (ret < 0)
goto host_free;
- _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
+ _host->ctl = devm_ioremap(&pdev->dev,
+ res_ctl->start, resource_size(res_ctl));
if (!_host->ctl) {
ret = -ENOMEM;
goto host_free;
@@ -1230,8 +1233,6 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- iounmap(host->ctl);
}
EXPORT_SYMBOL(tmio_mmc_host_remove);
diff --git a/kernel/drivers/mmc/host/usdhi6rol0.c b/kernel/drivers/mmc/host/usdhi6rol0.c
index 54b082b18..b47122d3e 100644
--- a/kernel/drivers/mmc/host/usdhi6rol0.c
+++ b/kernel/drivers/mmc/host/usdhi6rol0.c
@@ -1611,7 +1611,7 @@ static irqreturn_t usdhi6_cd(int irq, void *dev_id)
return IRQ_NONE;
/* Ack */
- usdhi6_write(host, USDHI6_SD_INFO1, !status);
+ usdhi6_write(host, USDHI6_SD_INFO1, ~status);
if (!work_pending(&mmc->detect.work) &&
(((status & USDHI6_SD_INFO1_CARD_INSERT) &&
@@ -1634,6 +1634,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
struct mmc_request *mrq = host->mrq;
struct mmc_data *data = mrq ? mrq->data : NULL;
+ struct scatterlist *sg;
dev_warn(mmc_dev(host->mmc),
"%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
@@ -1665,11 +1666,12 @@ static void usdhi6_timeout_work(struct work_struct *work)
case USDHI6_WAIT_FOR_MWRITE:
case USDHI6_WAIT_FOR_READ:
case USDHI6_WAIT_FOR_WRITE:
+ sg = host->sg ?: data->sg;
dev_dbg(mmc_dev(host->mmc),
"%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
host->offset, data->blocks, data->blksz, data->sg_len,
- sg_dma_len(host->sg), host->sg->offset);
+ sg_dma_len(sg), sg->offset);
usdhi6_sg_unmap(host, true);
/*
* If USDHI6_WAIT_FOR_DATA_END times out, we have already unmapped
@@ -1715,12 +1717,14 @@ static int usdhi6_probe(struct platform_device *pdev)
if (!mmc)
return -ENOMEM;
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret == -EPROBE_DEFER)
+ goto e_free_mmc;
+
ret = mmc_of_parse(mmc);
if (ret < 0)
goto e_free_mmc;
- mmc_regulator_get_supply(mmc);
-
host = mmc_priv(mmc);
host->mmc = mmc;
host->wait = USDHI6_WAIT_FOR_REQUEST;
@@ -1734,8 +1738,10 @@ static int usdhi6_probe(struct platform_device *pdev)
}
host->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(host->clk))
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
goto e_free_mmc;
+ }
host->imclk = clk_get_rate(host->clk);
diff --git a/kernel/drivers/mmc/host/vub300.c b/kernel/drivers/mmc/host/vub300.c
index fbabbb82b..1e819f98b 100644
--- a/kernel/drivers/mmc/host/vub300.c
+++ b/kernel/drivers/mmc/host/vub300.c
@@ -563,7 +563,7 @@ static void add_offloaded_reg(struct vub300_mmc_host *vub300,
i += 1;
continue;
}
- };
+ }
__add_offloaded_reg_to_fifo(vub300, register_access, func);
}
@@ -1372,7 +1372,7 @@ static void download_offload_pseudocode(struct vub300_mmc_host *vub300)
l += snprintf(vub300->vub_name + l,
sizeof(vub300->vub_name) - l, "_%04X%04X",
sf->vendor, sf->device);
- };
+ }
snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin");
dev_info(&vub300->udev->dev, "requesting offload firmware %s\n",
vub300->vub_name);
@@ -1893,7 +1893,7 @@ static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300,
i += 1;
continue;
}
- };
+ }
if (vub300->total_offload_count == 0)
return 0;
else if (vub300->fn[func].offload_count == 0)
diff --git a/kernel/drivers/mmc/host/wbsd.c b/kernel/drivers/mmc/host/wbsd.c
index ca183ea76..c3fd16d99 100644
--- a/kernel/drivers/mmc/host/wbsd.c
+++ b/kernel/drivers/mmc/host/wbsd.c
@@ -809,7 +809,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
cmd->error = -EINVAL;
goto done;
- };
+ }
}
/*