summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/mmc/core
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 12:17:53 -0700
committerYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 15:44:42 -0700
commit9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch)
tree1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/drivers/mmc/core
parent98260f3884f4a202f9ca5eabed40b1354c489b29 (diff)
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/drivers/mmc/core')
-rw-r--r--kernel/drivers/mmc/core/Kconfig13
-rw-r--r--kernel/drivers/mmc/core/Makefile12
-rw-r--r--kernel/drivers/mmc/core/bus.c385
-rw-r--r--kernel/drivers/mmc/core/bus.h31
-rw-r--r--kernel/drivers/mmc/core/core.c2753
-rw-r--r--kernel/drivers/mmc/core/core.h93
-rw-r--r--kernel/drivers/mmc/core/debugfs.c379
-rw-r--r--kernel/drivers/mmc/core/host.c596
-rw-r--r--kernel/drivers/mmc/core/host.h19
-rw-r--r--kernel/drivers/mmc/core/mmc.c1918
-rw-r--r--kernel/drivers/mmc/core/mmc_ops.c785
-rw-r--r--kernel/drivers/mmc/core/mmc_ops.h32
-rw-r--r--kernel/drivers/mmc/core/pwrseq.c120
-rw-r--r--kernel/drivers/mmc/core/pwrseq.h45
-rw-r--r--kernel/drivers/mmc/core/pwrseq_emmc.c100
-rw-r--r--kernel/drivers/mmc/core/pwrseq_simple.c144
-rw-r--r--kernel/drivers/mmc/core/quirks.c99
-rw-r--r--kernel/drivers/mmc/core/sd.c1281
-rw-r--r--kernel/drivers/mmc/core/sd.h16
-rw-r--r--kernel/drivers/mmc/core/sd_ops.c395
-rw-r--r--kernel/drivers/mmc/core/sd_ops.h25
-rw-r--r--kernel/drivers/mmc/core/sdio.c1190
-rw-r--r--kernel/drivers/mmc/core/sdio_bus.c343
-rw-r--r--kernel/drivers/mmc/core/sdio_bus.h22
-rw-r--r--kernel/drivers/mmc/core/sdio_cis.c412
-rw-r--r--kernel/drivers/mmc/core/sdio_cis.h23
-rw-r--r--kernel/drivers/mmc/core/sdio_io.c722
-rw-r--r--kernel/drivers/mmc/core/sdio_irq.c345
-rw-r--r--kernel/drivers/mmc/core/sdio_ops.c223
-rw-r--r--kernel/drivers/mmc/core/sdio_ops.h23
-rw-r--r--kernel/drivers/mmc/core/slot-gpio.c305
-rw-r--r--kernel/drivers/mmc/core/slot-gpio.h13
32 files changed, 12862 insertions, 0 deletions
diff --git a/kernel/drivers/mmc/core/Kconfig b/kernel/drivers/mmc/core/Kconfig
new file mode 100644
index 000000000..9ebee72d9
--- /dev/null
+++ b/kernel/drivers/mmc/core/Kconfig
@@ -0,0 +1,13 @@
+#
+# MMC core configuration
+#
+
+config MMC_CLKGATE
+ bool "MMC host clock gating"
+ help
+ This will attempt to aggressively gate the clock to the MMC card.
+ This is done to save power due to gating off the logic and bus
+ noise when the MMC card is not in use. Your host driver has to
+ support handling this in order for it to be of any use.
+
+ If unsure, say N.
diff --git a/kernel/drivers/mmc/core/Makefile b/kernel/drivers/mmc/core/Makefile
new file mode 100644
index 000000000..2c25138f2
--- /dev/null
+++ b/kernel/drivers/mmc/core/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the kernel mmc core.
+#
+
+obj-$(CONFIG_MMC) += mmc_core.o
+mmc_core-y := core.o bus.o host.o \
+ mmc.o mmc_ops.o sd.o sd_ops.o \
+ sdio.o sdio_ops.o sdio_bus.o \
+ sdio_cis.o sdio_io.o sdio_irq.o \
+ quirks.o slot-gpio.o
+mmc_core-$(CONFIG_OF) += pwrseq.o pwrseq_simple.o pwrseq_emmc.o
+mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/kernel/drivers/mmc/core/bus.c b/kernel/drivers/mmc/core/bus.c
new file mode 100644
index 000000000..972ff844c
--- /dev/null
+++ b/kernel/drivers/mmc/core/bus.c
@@ -0,0 +1,385 @@
+/*
+ * linux/drivers/mmc/core/bus.c
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright (C) 2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * MMC card bus driver model
+ */
+
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include "core.h"
+#include "sdio_cis.h"
+#include "bus.h"
+
+#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
+
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ switch (card->type) {
+ case MMC_TYPE_MMC:
+ return sprintf(buf, "MMC\n");
+ case MMC_TYPE_SD:
+ return sprintf(buf, "SD\n");
+ case MMC_TYPE_SDIO:
+ return sprintf(buf, "SDIO\n");
+ case MMC_TYPE_SD_COMBO:
+ return sprintf(buf, "SDcombo\n");
+ default:
+ return -EFAULT;
+ }
+}
+static DEVICE_ATTR_RO(type);
+
+static struct attribute *mmc_dev_attrs[] = {
+ &dev_attr_type.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mmc_dev);
+
+/*
+ * This currently matches any MMC driver to any MMC card - drivers
+ * themselves make the decision whether to drive this card in their
+ * probe method.
+ */
+static int mmc_bus_match(struct device *dev, struct device_driver *drv)
+{
+ return 1;
+}
+
+static int
+mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ const char *type;
+ int retval = 0;
+
+ switch (card->type) {
+ case MMC_TYPE_MMC:
+ type = "MMC";
+ break;
+ case MMC_TYPE_SD:
+ type = "SD";
+ break;
+ case MMC_TYPE_SDIO:
+ type = "SDIO";
+ break;
+ case MMC_TYPE_SD_COMBO:
+ type = "SDcombo";
+ break;
+ default:
+ type = NULL;
+ }
+
+ if (type) {
+ retval = add_uevent_var(env, "MMC_TYPE=%s", type);
+ if (retval)
+ return retval;
+ }
+
+ retval = add_uevent_var(env, "MMC_NAME=%s", mmc_card_name(card));
+ if (retval)
+ return retval;
+
+ /*
+ * Request the mmc_block device. Note: that this is a direct request
+ * for the module it carries no information as to what is inserted.
+ */
+ retval = add_uevent_var(env, "MODALIAS=mmc:block");
+
+ return retval;
+}
+
+static int mmc_bus_probe(struct device *dev)
+{
+ struct mmc_driver *drv = to_mmc_driver(dev->driver);
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ return drv->probe(card);
+}
+
+static int mmc_bus_remove(struct device *dev)
+{
+ struct mmc_driver *drv = to_mmc_driver(dev->driver);
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ drv->remove(card);
+
+ return 0;
+}
+
+static void mmc_bus_shutdown(struct device *dev)
+{
+ struct mmc_driver *drv = to_mmc_driver(dev->driver);
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+ int ret;
+
+ if (dev->driver && drv->shutdown)
+ drv->shutdown(card);
+
+ if (host->bus_ops->shutdown) {
+ ret = host->bus_ops->shutdown(host);
+ if (ret)
+ pr_warn("%s: error %d during shutdown\n",
+ mmc_hostname(host), ret);
+ }
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mmc_bus_suspend(struct device *dev)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+ int ret;
+
+ ret = pm_generic_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = host->bus_ops->suspend(host);
+ return ret;
+}
+
+static int mmc_bus_resume(struct device *dev)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+ int ret;
+
+ ret = host->bus_ops->resume(host);
+ if (ret)
+ pr_warn("%s: error %d during resume (card was removed?)\n",
+ mmc_hostname(host), ret);
+
+ ret = pm_generic_resume(dev);
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int mmc_runtime_suspend(struct device *dev)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+
+ return host->bus_ops->runtime_suspend(host);
+}
+
+static int mmc_runtime_resume(struct device *dev)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+
+ return host->bus_ops->runtime_resume(host);
+}
+#endif /* !CONFIG_PM */
+
+static const struct dev_pm_ops mmc_bus_pm_ops = {
+ SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume)
+};
+
+static struct bus_type mmc_bus_type = {
+ .name = "mmc",
+ .dev_groups = mmc_dev_groups,
+ .match = mmc_bus_match,
+ .uevent = mmc_bus_uevent,
+ .probe = mmc_bus_probe,
+ .remove = mmc_bus_remove,
+ .shutdown = mmc_bus_shutdown,
+ .pm = &mmc_bus_pm_ops,
+};
+
+int mmc_register_bus(void)
+{
+ return bus_register(&mmc_bus_type);
+}
+
+void mmc_unregister_bus(void)
+{
+ bus_unregister(&mmc_bus_type);
+}
+
+/**
+ * mmc_register_driver - register a media driver
+ * @drv: MMC media driver
+ */
+int mmc_register_driver(struct mmc_driver *drv)
+{
+ drv->drv.bus = &mmc_bus_type;
+ return driver_register(&drv->drv);
+}
+
+EXPORT_SYMBOL(mmc_register_driver);
+
+/**
+ * mmc_unregister_driver - unregister a media driver
+ * @drv: MMC media driver
+ */
+void mmc_unregister_driver(struct mmc_driver *drv)
+{
+ drv->drv.bus = &mmc_bus_type;
+ driver_unregister(&drv->drv);
+}
+
+EXPORT_SYMBOL(mmc_unregister_driver);
+
+static void mmc_release_card(struct device *dev)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ sdio_free_common_cis(card);
+
+ kfree(card->info);
+
+ kfree(card);
+}
+
+/*
+ * Allocate and initialise a new MMC card structure.
+ */
+struct mmc_card *mmc_alloc_card(struct mmc_host *host, struct device_type *type)
+{
+ struct mmc_card *card;
+
+ card = kzalloc(sizeof(struct mmc_card), GFP_KERNEL);
+ if (!card)
+ return ERR_PTR(-ENOMEM);
+
+ card->host = host;
+
+ device_initialize(&card->dev);
+
+ card->dev.parent = mmc_classdev(host);
+ card->dev.bus = &mmc_bus_type;
+ card->dev.release = mmc_release_card;
+ card->dev.type = type;
+
+ return card;
+}
+
+/*
+ * Register a new MMC card with the driver model.
+ */
+int mmc_add_card(struct mmc_card *card)
+{
+ int ret;
+ const char *type;
+ const char *uhs_bus_speed_mode = "";
+ static const char *const uhs_speeds[] = {
+ [UHS_SDR12_BUS_SPEED] = "SDR12 ",
+ [UHS_SDR25_BUS_SPEED] = "SDR25 ",
+ [UHS_SDR50_BUS_SPEED] = "SDR50 ",
+ [UHS_SDR104_BUS_SPEED] = "SDR104 ",
+ [UHS_DDR50_BUS_SPEED] = "DDR50 ",
+ };
+
+
+ dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca);
+
+ switch (card->type) {
+ case MMC_TYPE_MMC:
+ type = "MMC";
+ break;
+ case MMC_TYPE_SD:
+ type = "SD";
+ if (mmc_card_blockaddr(card)) {
+ if (mmc_card_ext_capacity(card))
+ type = "SDXC";
+ else
+ type = "SDHC";
+ }
+ break;
+ case MMC_TYPE_SDIO:
+ type = "SDIO";
+ break;
+ case MMC_TYPE_SD_COMBO:
+ type = "SD-combo";
+ if (mmc_card_blockaddr(card))
+ type = "SDHC-combo";
+ break;
+ default:
+ type = "?";
+ break;
+ }
+
+ if (mmc_card_uhs(card) &&
+ (card->sd_bus_speed < ARRAY_SIZE(uhs_speeds)))
+ uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed];
+
+ if (mmc_host_is_spi(card->host)) {
+ pr_info("%s: new %s%s%s card on SPI\n",
+ mmc_hostname(card->host),
+ mmc_card_hs(card) ? "high speed " : "",
+ mmc_card_ddr52(card) ? "DDR " : "",
+ type);
+ } else {
+ pr_info("%s: new %s%s%s%s%s card at address %04x\n",
+ mmc_hostname(card->host),
+ mmc_card_uhs(card) ? "ultra high speed " :
+ (mmc_card_hs(card) ? "high speed " : ""),
+ mmc_card_hs400(card) ? "HS400 " :
+ (mmc_card_hs200(card) ? "HS200 " : ""),
+ mmc_card_ddr52(card) ? "DDR " : "",
+ uhs_bus_speed_mode, type, card->rca);
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ mmc_add_card_debugfs(card);
+#endif
+ mmc_init_context_info(card->host);
+
+ card->dev.of_node = mmc_of_find_child_device(card->host, 0);
+
+ ret = device_add(&card->dev);
+ if (ret)
+ return ret;
+
+ mmc_card_set_present(card);
+
+ return 0;
+}
+
+/*
+ * Unregister a new MMC card with the driver model, and
+ * (eventually) free it.
+ */
+void mmc_remove_card(struct mmc_card *card)
+{
+#ifdef CONFIG_DEBUG_FS
+ mmc_remove_card_debugfs(card);
+#endif
+
+ if (mmc_card_present(card)) {
+ if (mmc_host_is_spi(card->host)) {
+ pr_info("%s: SPI card removed\n",
+ mmc_hostname(card->host));
+ } else {
+ pr_info("%s: card %04x removed\n",
+ mmc_hostname(card->host), card->rca);
+ }
+ device_del(&card->dev);
+ of_node_put(card->dev.of_node);
+ }
+
+ put_device(&card->dev);
+}
+
diff --git a/kernel/drivers/mmc/core/bus.h b/kernel/drivers/mmc/core/bus.h
new file mode 100644
index 000000000..00a19710b
--- /dev/null
+++ b/kernel/drivers/mmc/core/bus.h
@@ -0,0 +1,31 @@
+/*
+ * linux/drivers/mmc/core/bus.h
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright 2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _MMC_CORE_BUS_H
+#define _MMC_CORE_BUS_H
+
+#define MMC_DEV_ATTR(name, fmt, args...) \
+static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct mmc_card *card = mmc_dev_to_card(dev); \
+ return sprintf(buf, fmt, args); \
+} \
+static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
+
+struct mmc_card *mmc_alloc_card(struct mmc_host *host,
+ struct device_type *type);
+int mmc_add_card(struct mmc_card *card);
+void mmc_remove_card(struct mmc_card *card);
+
+int mmc_register_bus(void);
+void mmc_unregister_bus(void);
+
+#endif
+
diff --git a/kernel/drivers/mmc/core/core.c b/kernel/drivers/mmc/core/core.c
new file mode 100644
index 000000000..92e767142
--- /dev/null
+++ b/kernel/drivers/mmc/core/core.c
@@ -0,0 +1,2753 @@
+/*
+ * linux/drivers/mmc/core/core.c
+ *
+ * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
+ * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
+ * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
+ * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/pagemap.h>
+#include <linux/err.h>
+#include <linux/leds.h>
+#include <linux/scatterlist.h>
+#include <linux/log2.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeup.h>
+#include <linux/suspend.h>
+#include <linux/fault-inject.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/slot-gpio.h>
+
+#include "core.h"
+#include "bus.h"
+#include "host.h"
+#include "sdio_bus.h"
+#include "pwrseq.h"
+
+#include "mmc_ops.h"
+#include "sd_ops.h"
+#include "sdio_ops.h"
+
+/* If the device is not responding */
+#define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+
+/*
+ * Background operations can take a long time, depending on the housekeeping
+ * operations the card has to perform.
+ */
+#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
+
+static struct workqueue_struct *workqueue;
+static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
+
+/*
+ * Enabling software CRCs on the data blocks can be a significant (30%)
+ * performance cost, and for other reasons may not always be desired.
+ * So we allow it it to be disabled.
+ */
+bool use_spi_crc = 1;
+module_param(use_spi_crc, bool, 0);
+
+/*
+ * Internal function. Schedule delayed work in the MMC work queue.
+ */
+static int mmc_schedule_delayed_work(struct delayed_work *work,
+ unsigned long delay)
+{
+ return queue_delayed_work(workqueue, work, delay);
+}
+
+/*
+ * Internal function. Flush all scheduled work from the MMC work queue.
+ */
+static void mmc_flush_scheduled_work(void)
+{
+ flush_workqueue(workqueue);
+}
+
+#ifdef CONFIG_FAIL_MMC_REQUEST
+
+/*
+ * Internal function. Inject random data errors.
+ * If mmc_data is NULL no errors are injected.
+ */
+static void mmc_should_fail_request(struct mmc_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+ static const int data_errors[] = {
+ -ETIMEDOUT,
+ -EILSEQ,
+ -EIO,
+ };
+
+ if (!data)
+ return;
+
+ if (cmd->error || data->error ||
+ !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
+ return;
+
+ data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
+ data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
+}
+
+#else /* CONFIG_FAIL_MMC_REQUEST */
+
+static inline void mmc_should_fail_request(struct mmc_host *host,
+ struct mmc_request *mrq)
+{
+}
+
+#endif /* CONFIG_FAIL_MMC_REQUEST */
+
+/**
+ * mmc_request_done - finish processing an MMC request
+ * @host: MMC host which completed request
+ * @mrq: MMC request which request
+ *
+ * MMC drivers should call this function when they have completed
+ * their processing of a request.
+ */
+void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
+{
+ struct mmc_command *cmd = mrq->cmd;
+ int err = cmd->error;
+
+ if (err && cmd->retries && mmc_host_is_spi(host)) {
+ if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
+ cmd->retries = 0;
+ }
+
+ if (err && cmd->retries && !mmc_card_removed(host->card)) {
+ /*
+ * Request starter must handle retries - see
+ * mmc_wait_for_req_done().
+ */
+ if (mrq->done)
+ mrq->done(mrq);
+ } else {
+ mmc_should_fail_request(host, mrq);
+
+ led_trigger_event(host->led, LED_OFF);
+
+ if (mrq->sbc) {
+ pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
+ mmc_hostname(host), mrq->sbc->opcode,
+ mrq->sbc->error,
+ mrq->sbc->resp[0], mrq->sbc->resp[1],
+ mrq->sbc->resp[2], mrq->sbc->resp[3]);
+ }
+
+ pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
+ mmc_hostname(host), cmd->opcode, err,
+ cmd->resp[0], cmd->resp[1],
+ cmd->resp[2], cmd->resp[3]);
+
+ if (mrq->data) {
+ pr_debug("%s: %d bytes transferred: %d\n",
+ mmc_hostname(host),
+ mrq->data->bytes_xfered, mrq->data->error);
+ }
+
+ if (mrq->stop) {
+ pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
+ mmc_hostname(host), mrq->stop->opcode,
+ mrq->stop->error,
+ mrq->stop->resp[0], mrq->stop->resp[1],
+ mrq->stop->resp[2], mrq->stop->resp[3]);
+ }
+
+ if (mrq->done)
+ mrq->done(mrq);
+
+ mmc_host_clk_release(host);
+ }
+}
+
+EXPORT_SYMBOL(mmc_request_done);
+
+static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
+{
+#ifdef CONFIG_MMC_DEBUG
+ unsigned int i, sz;
+ struct scatterlist *sg;
+#endif
+ if (mmc_card_removed(host->card))
+ return -ENOMEDIUM;
+
+ if (mrq->sbc) {
+ pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
+ mmc_hostname(host), mrq->sbc->opcode,
+ mrq->sbc->arg, mrq->sbc->flags);
+ }
+
+ pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
+ mmc_hostname(host), mrq->cmd->opcode,
+ mrq->cmd->arg, mrq->cmd->flags);
+
+ if (mrq->data) {
+ pr_debug("%s: blksz %d blocks %d flags %08x "
+ "tsac %d ms nsac %d\n",
+ mmc_hostname(host), mrq->data->blksz,
+ mrq->data->blocks, mrq->data->flags,
+ mrq->data->timeout_ns / 1000000,
+ mrq->data->timeout_clks);
+ }
+
+ if (mrq->stop) {
+ pr_debug("%s: CMD%u arg %08x flags %08x\n",
+ mmc_hostname(host), mrq->stop->opcode,
+ mrq->stop->arg, mrq->stop->flags);
+ }
+
+ WARN_ON(!host->claimed);
+
+ mrq->cmd->error = 0;
+ mrq->cmd->mrq = mrq;
+ if (mrq->sbc) {
+ mrq->sbc->error = 0;
+ mrq->sbc->mrq = mrq;
+ }
+ if (mrq->data) {
+ BUG_ON(mrq->data->blksz > host->max_blk_size);
+ BUG_ON(mrq->data->blocks > host->max_blk_count);
+ BUG_ON(mrq->data->blocks * mrq->data->blksz >
+ host->max_req_size);
+
+#ifdef CONFIG_MMC_DEBUG
+ sz = 0;
+ for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
+ sz += sg->length;
+ BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
+#endif
+
+ mrq->cmd->data = mrq->data;
+ mrq->data->error = 0;
+ mrq->data->mrq = mrq;
+ if (mrq->stop) {
+ mrq->data->stop = mrq->stop;
+ mrq->stop->error = 0;
+ mrq->stop->mrq = mrq;
+ }
+ }
+ mmc_host_clk_hold(host);
+ led_trigger_event(host->led, LED_FULL);
+ host->ops->request(host, mrq);
+
+ return 0;
+}
+
+/**
+ * mmc_start_bkops - start BKOPS for supported cards
+ * @card: MMC card to start BKOPS
+ * @form_exception: A flag to indicate if this function was
+ * called due to an exception raised by the card
+ *
+ * Start background operations whenever requested.
+ * When the urgent BKOPS bit is set in a R1 command response
+ * then background operations should be started immediately.
+*/
+void mmc_start_bkops(struct mmc_card *card, bool from_exception)
+{
+ int err;
+ int timeout;
+ bool use_busy_signal;
+
+ BUG_ON(!card);
+
+ if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
+ return;
+
+ err = mmc_read_bkops_status(card);
+ if (err) {
+ pr_err("%s: Failed to read bkops status: %d\n",
+ mmc_hostname(card->host), err);
+ return;
+ }
+
+ if (!card->ext_csd.raw_bkops_status)
+ return;
+
+ if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
+ from_exception)
+ return;
+
+ mmc_claim_host(card->host);
+ if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
+ timeout = MMC_BKOPS_MAX_TIMEOUT;
+ use_busy_signal = true;
+ } else {
+ timeout = 0;
+ use_busy_signal = false;
+ }
+
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BKOPS_START, 1, timeout,
+ use_busy_signal, true, false);
+ if (err) {
+ pr_warn("%s: Error %d starting bkops\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ /*
+ * For urgent bkops status (LEVEL_2 and more)
+ * bkops executed synchronously, otherwise
+ * the operation is in progress
+ */
+ if (!use_busy_signal)
+ mmc_card_set_doing_bkops(card);
+out:
+ mmc_release_host(card->host);
+}
+EXPORT_SYMBOL(mmc_start_bkops);
+
+/*
+ * mmc_wait_data_done() - done callback for data request
+ * @mrq: done data request
+ *
+ * Wakes up mmc context, passed as a callback to host controller driver
+ */
+static void mmc_wait_data_done(struct mmc_request *mrq)
+{
+ mrq->host->context_info.is_done_rcv = true;
+ wake_up_interruptible(&mrq->host->context_info.wait);
+}
+
+static void mmc_wait_done(struct mmc_request *mrq)
+{
+ complete(&mrq->completion);
+}
+
+/*
+ *__mmc_start_data_req() - starts data request
+ * @host: MMC host to start the request
+ * @mrq: data request to start
+ *
+ * Sets the done callback to be called when request is completed by the card.
+ * Starts data mmc request execution
+ */
+static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int err;
+
+ mrq->done = mmc_wait_data_done;
+ mrq->host = host;
+
+ err = mmc_start_request(host, mrq);
+ if (err) {
+ mrq->cmd->error = err;
+ mmc_wait_data_done(mrq);
+ }
+
+ return err;
+}
+
+static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int err;
+
+ init_completion(&mrq->completion);
+ mrq->done = mmc_wait_done;
+
+ err = mmc_start_request(host, mrq);
+ if (err) {
+ mrq->cmd->error = err;
+ complete(&mrq->completion);
+ }
+
+ return err;
+}
+
+/*
+ * mmc_wait_for_data_req_done() - wait for request completed
+ * @host: MMC host to prepare the command.
+ * @mrq: MMC request to wait for
+ *
+ * Blocks MMC context till host controller will ack end of data request
+ * execution or new request notification arrives from the block layer.
+ * Handles command retries.
+ *
+ * Returns enum mmc_blk_status after checking errors.
+ */
+static int mmc_wait_for_data_req_done(struct mmc_host *host,
+ struct mmc_request *mrq,
+ struct mmc_async_req *next_req)
+{
+ struct mmc_command *cmd;
+ struct mmc_context_info *context_info = &host->context_info;
+ int err;
+ unsigned long flags;
+
+ while (1) {
+ wait_event_interruptible(context_info->wait,
+ (context_info->is_done_rcv ||
+ context_info->is_new_req));
+ spin_lock_irqsave(&context_info->lock, flags);
+ context_info->is_waiting_last_req = false;
+ spin_unlock_irqrestore(&context_info->lock, flags);
+ if (context_info->is_done_rcv) {
+ context_info->is_done_rcv = false;
+ context_info->is_new_req = false;
+ cmd = mrq->cmd;
+
+ if (!cmd->error || !cmd->retries ||
+ mmc_card_removed(host->card)) {
+ err = host->areq->err_check(host->card,
+ host->areq);
+ break; /* return err */
+ } else {
+ pr_info("%s: req failed (CMD%u): %d, retrying...\n",
+ mmc_hostname(host),
+ cmd->opcode, cmd->error);
+ cmd->retries--;
+ cmd->error = 0;
+ host->ops->request(host, mrq);
+ continue; /* wait for done/new event again */
+ }
+ } else if (context_info->is_new_req) {
+ context_info->is_new_req = false;
+ if (!next_req) {
+ err = MMC_BLK_NEW_REQUEST;
+ break; /* return err */
+ }
+ }
+ }
+ return err;
+}
+
+static void mmc_wait_for_req_done(struct mmc_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_command *cmd;
+
+ while (1) {
+ wait_for_completion(&mrq->completion);
+
+ cmd = mrq->cmd;
+
+ /*
+ * If host has timed out waiting for the sanitize
+ * to complete, card might be still in programming state
+ * so let's try to bring the card out of programming
+ * state.
+ */
+ if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
+ if (!mmc_interrupt_hpi(host->card)) {
+ pr_warn("%s: %s: Interrupted sanitize\n",
+ mmc_hostname(host), __func__);
+ cmd->error = 0;
+ break;
+ } else {
+ pr_err("%s: %s: Failed to interrupt sanitize\n",
+ mmc_hostname(host), __func__);
+ }
+ }
+ if (!cmd->error || !cmd->retries ||
+ mmc_card_removed(host->card))
+ break;
+
+ pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
+ mmc_hostname(host), cmd->opcode, cmd->error);
+ cmd->retries--;
+ cmd->error = 0;
+ host->ops->request(host, mrq);
+ }
+}
+
+/**
+ * mmc_pre_req - Prepare for a new request
+ * @host: MMC host to prepare command
+ * @mrq: MMC request to prepare for
+ * @is_first_req: true if there is no previous started request
+ * that may run in parellel to this call, otherwise false
+ *
+ * mmc_pre_req() is called in prior to mmc_start_req() to let
+ * host prepare for the new request. Preparation of a request may be
+ * performed while another request is running on the host.
+ */
+static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ if (host->ops->pre_req) {
+ mmc_host_clk_hold(host);
+ host->ops->pre_req(host, mrq, is_first_req);
+ mmc_host_clk_release(host);
+ }
+}
+
+/**
+ * mmc_post_req - Post process a completed request
+ * @host: MMC host to post process command
+ * @mrq: MMC request to post process for
+ * @err: Error, if non zero, clean up any resources made in pre_req
+ *
+ * Let the host post process a completed request. Post processing of
+ * a request may be performed while another reuqest is running.
+ */
+static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
+ int err)
+{
+ if (host->ops->post_req) {
+ mmc_host_clk_hold(host);
+ host->ops->post_req(host, mrq, err);
+ mmc_host_clk_release(host);
+ }
+}
+
+/**
+ * mmc_start_req - start a non-blocking request
+ * @host: MMC host to start command
+ * @areq: async request to start
+ * @error: out parameter returns 0 for success, otherwise non zero
+ *
+ * Start a new MMC custom command request for a host.
+ * If there is on ongoing async request wait for completion
+ * of that request and start the new one and return.
+ * Does not wait for the new request to complete.
+ *
+ * Returns the completed request, NULL in case of none completed.
+ * Wait for the an ongoing request (previoulsy started) to complete and
+ * return the completed request. If there is no ongoing request, NULL
+ * is returned without waiting. NULL is not an error condition.
+ */
+struct mmc_async_req *mmc_start_req(struct mmc_host *host,
+ struct mmc_async_req *areq, int *error)
+{
+ int err = 0;
+ int start_err = 0;
+ struct mmc_async_req *data = host->areq;
+
+ /* Prepare a new request */
+ if (areq)
+ mmc_pre_req(host, areq->mrq, !host->areq);
+
+ if (host->areq) {
+ err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
+ if (err == MMC_BLK_NEW_REQUEST) {
+ if (error)
+ *error = err;
+ /*
+ * The previous request was not completed,
+ * nothing to return
+ */
+ return NULL;
+ }
+ /*
+ * Check BKOPS urgency for each R1 response
+ */
+ if (host->card && mmc_card_mmc(host->card) &&
+ ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
+ (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
+ (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
+
+ /* Cancel the prepared request */
+ if (areq)
+ mmc_post_req(host, areq->mrq, -EINVAL);
+
+ mmc_start_bkops(host->card, true);
+
+ /* prepare the request again */
+ if (areq)
+ mmc_pre_req(host, areq->mrq, !host->areq);
+ }
+ }
+
+ if (!err && areq)
+ start_err = __mmc_start_data_req(host, areq->mrq);
+
+ if (host->areq)
+ mmc_post_req(host, host->areq->mrq, 0);
+
+ /* Cancel a prepared request if it was not started. */
+ if ((err || start_err) && areq)
+ mmc_post_req(host, areq->mrq, -EINVAL);
+
+ if (err)
+ host->areq = NULL;
+ else
+ host->areq = areq;
+
+ if (error)
+ *error = err;
+ return data;
+}
+EXPORT_SYMBOL(mmc_start_req);
+
+/**
+ * mmc_wait_for_req - start a request and wait for completion
+ * @host: MMC host to start command
+ * @mrq: MMC request to start
+ *
+ * Start a new MMC custom command request for a host, and wait
+ * for the command to complete. Does not attempt to parse the
+ * response.
+ */
+void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ __mmc_start_req(host, mrq);
+ mmc_wait_for_req_done(host, mrq);
+}
+EXPORT_SYMBOL(mmc_wait_for_req);
+
+/**
+ * mmc_interrupt_hpi - Issue for High priority Interrupt
+ * @card: the MMC card associated with the HPI transfer
+ *
+ * Issued High Priority Interrupt, and check for card status
+ * until out-of prg-state.
+ */
+int mmc_interrupt_hpi(struct mmc_card *card)
+{
+ int err;
+ u32 status;
+ unsigned long prg_wait;
+
+ BUG_ON(!card);
+
+ if (!card->ext_csd.hpi_en) {
+ pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
+ return 1;
+ }
+
+ mmc_claim_host(card->host);
+ err = mmc_send_status(card, &status);
+ if (err) {
+ pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
+ goto out;
+ }
+
+ switch (R1_CURRENT_STATE(status)) {
+ case R1_STATE_IDLE:
+ case R1_STATE_READY:
+ case R1_STATE_STBY:
+ case R1_STATE_TRAN:
+ /*
+ * In idle and transfer states, HPI is not needed and the caller
+ * can issue the next intended command immediately
+ */
+ goto out;
+ case R1_STATE_PRG:
+ break;
+ default:
+ /* In all other states, it's illegal to issue HPI */
+ pr_debug("%s: HPI cannot be sent. Card state=%d\n",
+ mmc_hostname(card->host), R1_CURRENT_STATE(status));
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mmc_send_hpi_cmd(card, &status);
+ if (err)
+ goto out;
+
+ prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
+ do {
+ err = mmc_send_status(card, &status);
+
+ if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
+ break;
+ if (time_after(jiffies, prg_wait))
+ err = -ETIMEDOUT;
+ } while (!err);
+
+out:
+ mmc_release_host(card->host);
+ return err;
+}
+EXPORT_SYMBOL(mmc_interrupt_hpi);
+
+/**
+ * mmc_wait_for_cmd - start a command and wait for completion
+ * @host: MMC host to start command
+ * @cmd: MMC command to start
+ * @retries: maximum number of retries
+ *
+ * Start a new MMC command for a host, and wait for the command
+ * to complete. Return any error that occurred while the command
+ * was executing. Do not attempt to parse the response.
+ */
+int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
+{
+ struct mmc_request mrq = {NULL};
+
+ WARN_ON(!host->claimed);
+
+ memset(cmd->resp, 0, sizeof(cmd->resp));
+ cmd->retries = retries;
+
+ mrq.cmd = cmd;
+ cmd->data = NULL;
+
+ mmc_wait_for_req(host, &mrq);
+
+ return cmd->error;
+}
+
+EXPORT_SYMBOL(mmc_wait_for_cmd);
+
+/**
+ * mmc_stop_bkops - stop ongoing BKOPS
+ * @card: MMC card to check BKOPS
+ *
+ * Send HPI command to stop ongoing background operations to
+ * allow rapid servicing of foreground operations, e.g. read/
+ * writes. Wait until the card comes out of the programming state
+ * to avoid errors in servicing read/write requests.
+ */
+int mmc_stop_bkops(struct mmc_card *card)
+{
+ int err = 0;
+
+ BUG_ON(!card);
+ err = mmc_interrupt_hpi(card);
+
+ /*
+ * If err is EINVAL, we can't issue an HPI.
+ * It should complete the BKOPS.
+ */
+ if (!err || (err == -EINVAL)) {
+ mmc_card_clr_doing_bkops(card);
+ err = 0;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_stop_bkops);
+
+int mmc_read_bkops_status(struct mmc_card *card)
+{
+ int err;
+ u8 *ext_csd;
+
+ mmc_claim_host(card->host);
+ err = mmc_get_ext_csd(card, &ext_csd);
+ mmc_release_host(card->host);
+ if (err)
+ return err;
+
+ card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
+ card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
+ kfree(ext_csd);
+ return 0;
+}
+EXPORT_SYMBOL(mmc_read_bkops_status);
+
+/**
+ * mmc_set_data_timeout - set the timeout for a data command
+ * @data: data phase for command
+ * @card: the MMC card associated with the data transfer
+ *
+ * Computes the data timeout parameters according to the
+ * correct algorithm given the card type.
+ */
+void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
+{
+ unsigned int mult;
+
+ /*
+ * SDIO cards only define an upper 1 s limit on access.
+ */
+ if (mmc_card_sdio(card)) {
+ data->timeout_ns = 1000000000;
+ data->timeout_clks = 0;
+ return;
+ }
+
+ /*
+ * SD cards use a 100 multiplier rather than 10
+ */
+ mult = mmc_card_sd(card) ? 100 : 10;
+
+ /*
+ * Scale up the multiplier (and therefore the timeout) by
+ * the r2w factor for writes.
+ */
+ if (data->flags & MMC_DATA_WRITE)
+ mult <<= card->csd.r2w_factor;
+
+ data->timeout_ns = card->csd.tacc_ns * mult;
+ data->timeout_clks = card->csd.tacc_clks * mult;
+
+ /*
+ * SD cards also have an upper limit on the timeout.
+ */
+ if (mmc_card_sd(card)) {
+ unsigned int timeout_us, limit_us;
+
+ timeout_us = data->timeout_ns / 1000;
+ if (mmc_host_clk_rate(card->host))
+ timeout_us += data->timeout_clks * 1000 /
+ (mmc_host_clk_rate(card->host) / 1000);
+
+ if (data->flags & MMC_DATA_WRITE)
+ /*
+ * The MMC spec "It is strongly recommended
+ * for hosts to implement more than 500ms
+ * timeout value even if the card indicates
+ * the 250ms maximum busy length." Even the
+ * previous value of 300ms is known to be
+ * insufficient for some cards.
+ */
+ limit_us = 3000000;
+ else
+ limit_us = 100000;
+
+ /*
+ * SDHC cards always use these fixed values.
+ */
+ if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
+ data->timeout_ns = limit_us * 1000;
+ data->timeout_clks = 0;
+ }
+
+ /* assign limit value if invalid */
+ if (timeout_us == 0)
+ data->timeout_ns = limit_us * 1000;
+ }
+
+ /*
+ * Some cards require longer data read timeout than indicated in CSD.
+ * Address this by setting the read timeout to a "reasonably high"
+ * value. For the cards tested, 300ms has proven enough. If necessary,
+ * this value can be increased if other problematic cards require this.
+ */
+ if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+ data->timeout_ns = 300000000;
+ data->timeout_clks = 0;
+ }
+
+ /*
+ * Some cards need very high timeouts if driven in SPI mode.
+ * The worst observed timeout was 900ms after writing a
+ * continuous stream of data until the internal logic
+ * overflowed.
+ */
+ if (mmc_host_is_spi(card->host)) {
+ if (data->flags & MMC_DATA_WRITE) {
+ if (data->timeout_ns < 1000000000)
+ data->timeout_ns = 1000000000; /* 1s */
+ } else {
+ if (data->timeout_ns < 100000000)
+ data->timeout_ns = 100000000; /* 100ms */
+ }
+ }
+}
+EXPORT_SYMBOL(mmc_set_data_timeout);
+
+/**
+ * mmc_align_data_size - pads a transfer size to a more optimal value
+ * @card: the MMC card associated with the data transfer
+ * @sz: original transfer size
+ *
+ * Pads the original data size with a number of extra bytes in
+ * order to avoid controller bugs and/or performance hits
+ * (e.g. some controllers revert to PIO for certain sizes).
+ *
+ * Returns the improved size, which might be unmodified.
+ *
+ * Note that this function is only relevant when issuing a
+ * single scatter gather entry.
+ */
+unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
+{
+ /*
+ * FIXME: We don't have a system for the controller to tell
+ * the core about its problems yet, so for now we just 32-bit
+ * align the size.
+ */
+ sz = ((sz + 3) / 4) * 4;
+
+ return sz;
+}
+EXPORT_SYMBOL(mmc_align_data_size);
+
+/**
+ * __mmc_claim_host - exclusively claim a host
+ * @host: mmc host to claim
+ * @abort: whether or not the operation should be aborted
+ *
+ * Claim a host for a set of operations. If @abort is non null and
+ * dereference a non-zero value then this will return prematurely with
+ * that non-zero value without acquiring the lock. Returns zero
+ * with the lock held otherwise.
+ */
+int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long flags;
+ int stop;
+ bool pm = false;
+
+ might_sleep();
+
+ add_wait_queue(&host->wq, &wait);
+ spin_lock_irqsave(&host->lock, flags);
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ stop = abort ? atomic_read(abort) : 0;
+ if (stop || !host->claimed || host->claimer == current)
+ break;
+ spin_unlock_irqrestore(&host->lock, flags);
+ schedule();
+ spin_lock_irqsave(&host->lock, flags);
+ }
+ set_current_state(TASK_RUNNING);
+ if (!stop) {
+ host->claimed = 1;
+ host->claimer = current;
+ host->claim_cnt += 1;
+ if (host->claim_cnt == 1)
+ pm = true;
+ } else
+ wake_up(&host->wq);
+ spin_unlock_irqrestore(&host->lock, flags);
+ remove_wait_queue(&host->wq, &wait);
+
+ if (pm)
+ pm_runtime_get_sync(mmc_dev(host));
+
+ return stop;
+}
+EXPORT_SYMBOL(__mmc_claim_host);
+
+/**
+ * mmc_release_host - release a host
+ * @host: mmc host to release
+ *
+ * Release a MMC host, allowing others to claim the host
+ * for their operations.
+ */
+void mmc_release_host(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ WARN_ON(!host->claimed);
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (--host->claim_cnt) {
+ /* Release for nested claim */
+ spin_unlock_irqrestore(&host->lock, flags);
+ } else {
+ host->claimed = 0;
+ host->claimer = NULL;
+ spin_unlock_irqrestore(&host->lock, flags);
+ wake_up(&host->wq);
+ pm_runtime_mark_last_busy(mmc_dev(host));
+ pm_runtime_put_autosuspend(mmc_dev(host));
+ }
+}
+EXPORT_SYMBOL(mmc_release_host);
+
+/*
+ * This is a helper function, which fetches a runtime pm reference for the
+ * card device and also claims the host.
+ */
+void mmc_get_card(struct mmc_card *card)
+{
+ pm_runtime_get_sync(&card->dev);
+ mmc_claim_host(card->host);
+}
+EXPORT_SYMBOL(mmc_get_card);
+
+/*
+ * This is a helper function, which releases the host and drops the runtime
+ * pm reference for the card device.
+ */
+void mmc_put_card(struct mmc_card *card)
+{
+ mmc_release_host(card->host);
+ pm_runtime_mark_last_busy(&card->dev);
+ pm_runtime_put_autosuspend(&card->dev);
+}
+EXPORT_SYMBOL(mmc_put_card);
+
+/*
+ * Internal function that does the actual ios call to the host driver,
+ * optionally printing some debug output.
+ */
+static inline void mmc_set_ios(struct mmc_host *host)
+{
+ struct mmc_ios *ios = &host->ios;
+
+ pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
+ "width %u timing %u\n",
+ mmc_hostname(host), ios->clock, ios->bus_mode,
+ ios->power_mode, ios->chip_select, ios->vdd,
+ ios->bus_width, ios->timing);
+
+ if (ios->clock > 0)
+ mmc_set_ungated(host);
+ host->ops->set_ios(host, ios);
+}
+
+/*
+ * Control chip select pin on a host.
+ */
+void mmc_set_chip_select(struct mmc_host *host, int mode)
+{
+ mmc_host_clk_hold(host);
+ host->ios.chip_select = mode;
+ mmc_set_ios(host);
+ mmc_host_clk_release(host);
+}
+
+/*
+ * Sets the host clock to the highest possible frequency that
+ * is below "hz".
+ */
+static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
+{
+ WARN_ON(hz && hz < host->f_min);
+
+ if (hz > host->f_max)
+ hz = host->f_max;
+
+ host->ios.clock = hz;
+ mmc_set_ios(host);
+}
+
+void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+{
+ mmc_host_clk_hold(host);
+ __mmc_set_clock(host, hz);
+ mmc_host_clk_release(host);
+}
+
+#ifdef CONFIG_MMC_CLKGATE
+/*
+ * This gates the clock by setting it to 0 Hz.
+ */
+void mmc_gate_clock(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clk_old = host->ios.clock;
+ host->ios.clock = 0;
+ host->clk_gated = true;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mmc_set_ios(host);
+}
+
+/*
+ * This restores the clock from gating by using the cached
+ * clock value.
+ */
+void mmc_ungate_clock(struct mmc_host *host)
+{
+ /*
+ * We should previously have gated the clock, so the clock shall
+ * be 0 here! The clock may however be 0 during initialization,
+ * when some request operations are performed before setting
+ * the frequency. When ungate is requested in that situation
+ * we just ignore the call.
+ */
+ if (host->clk_old) {
+ BUG_ON(host->ios.clock);
+ /* This call will also set host->clk_gated to false */
+ __mmc_set_clock(host, host->clk_old);
+ }
+}
+
+void mmc_set_ungated(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ /*
+ * We've been given a new frequency while the clock is gated,
+ * so make sure we regard this as ungating it.
+ */
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clk_gated = false;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+#else
+void mmc_set_ungated(struct mmc_host *host)
+{
+}
+#endif
+
+int mmc_execute_tuning(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 opcode;
+ int err;
+
+ if (!host->ops->execute_tuning)
+ return 0;
+
+ if (mmc_card_mmc(card))
+ opcode = MMC_SEND_TUNING_BLOCK_HS200;
+ else
+ opcode = MMC_SEND_TUNING_BLOCK;
+
+ mmc_host_clk_hold(host);
+ err = host->ops->execute_tuning(host, opcode);
+ mmc_host_clk_release(host);
+
+ if (err)
+ pr_err("%s: tuning execution failed\n", mmc_hostname(host));
+
+ return err;
+}
+
+/*
+ * Change the bus mode (open drain/push-pull) of a host.
+ */
+void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
+{
+ mmc_host_clk_hold(host);
+ host->ios.bus_mode = mode;
+ mmc_set_ios(host);
+ mmc_host_clk_release(host);
+}
+
+/*
+ * Change data bus width of a host.
+ */
+void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
+{
+ mmc_host_clk_hold(host);
+ host->ios.bus_width = width;
+ mmc_set_ios(host);
+ mmc_host_clk_release(host);
+}
+
+/*
+ * Set initial state after a power cycle or a hw_reset.
+ */
+void mmc_set_initial_state(struct mmc_host *host)
+{
+ if (mmc_host_is_spi(host))
+ host->ios.chip_select = MMC_CS_HIGH;
+ else
+ host->ios.chip_select = MMC_CS_DONTCARE;
+ host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+ host->ios.bus_width = MMC_BUS_WIDTH_1;
+ host->ios.timing = MMC_TIMING_LEGACY;
+
+ mmc_set_ios(host);
+}
+
+/**
+ * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
+ * @vdd: voltage (mV)
+ * @low_bits: prefer low bits in boundary cases
+ *
+ * This function returns the OCR bit number according to the provided @vdd
+ * value. If conversion is not possible a negative errno value returned.
+ *
+ * Depending on the @low_bits flag the function prefers low or high OCR bits
+ * on boundary voltages. For example,
+ * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
+ * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
+ *
+ * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
+ */
+static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
+{
+ const int max_bit = ilog2(MMC_VDD_35_36);
+ int bit;
+
+ if (vdd < 1650 || vdd > 3600)
+ return -EINVAL;
+
+ if (vdd >= 1650 && vdd <= 1950)
+ return ilog2(MMC_VDD_165_195);
+
+ if (low_bits)
+ vdd -= 1;
+
+ /* Base 2000 mV, step 100 mV, bit's base 8. */
+ bit = (vdd - 2000) / 100 + 8;
+ if (bit > max_bit)
+ return max_bit;
+ return bit;
+}
+
+/**
+ * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
+ * @vdd_min: minimum voltage value (mV)
+ * @vdd_max: maximum voltage value (mV)
+ *
+ * This function returns the OCR mask bits according to the provided @vdd_min
+ * and @vdd_max values. If conversion is not possible the function returns 0.
+ *
+ * Notes wrt boundary cases:
+ * This function sets the OCR bits for all boundary voltages, for example
+ * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
+ * MMC_VDD_34_35 mask.
+ */
+u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
+{
+ u32 mask = 0;
+
+ if (vdd_max < vdd_min)
+ return 0;
+
+ /* Prefer high bits for the boundary vdd_max values. */
+ vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
+ if (vdd_max < 0)
+ return 0;
+
+ /* Prefer low bits for the boundary vdd_min values. */
+ vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
+ if (vdd_min < 0)
+ return 0;
+
+ /* Fill the mask, from max bit to min bit. */
+ while (vdd_max >= vdd_min)
+ mask |= 1 << vdd_max--;
+
+ return mask;
+}
+EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
+
+#ifdef CONFIG_OF
+
+/**
+ * mmc_of_parse_voltage - return mask of supported voltages
+ * @np: The device node need to be parsed.
+ * @mask: mask of voltages available for MMC/SD/SDIO
+ *
+ * 1. Return zero on success.
+ * 2. Return negative errno: voltage-range is invalid.
+ */
+int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
+{
+ const u32 *voltage_ranges;
+ int num_ranges, i;
+
+ voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
+ num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
+ if (!voltage_ranges || !num_ranges) {
+ pr_info("%s: voltage-ranges unspecified\n", np->full_name);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_ranges; i++) {
+ const int j = i * 2;
+ u32 ocr_mask;
+
+ ocr_mask = mmc_vddrange_to_ocrmask(
+ be32_to_cpu(voltage_ranges[j]),
+ be32_to_cpu(voltage_ranges[j + 1]));
+ if (!ocr_mask) {
+ pr_err("%s: voltage-range #%d is invalid\n",
+ np->full_name, i);
+ return -EINVAL;
+ }
+ *mask |= ocr_mask;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_of_parse_voltage);
+
+#endif /* CONFIG_OF */
+
+static int mmc_of_get_func_num(struct device_node *node)
+{
+ u32 reg;
+ int ret;
+
+ ret = of_property_read_u32(node, "reg", &reg);
+ if (ret < 0)
+ return ret;
+
+ return reg;
+}
+
+struct device_node *mmc_of_find_child_device(struct mmc_host *host,
+ unsigned func_num)
+{
+ struct device_node *node;
+
+ if (!host->parent || !host->parent->of_node)
+ return NULL;
+
+ for_each_child_of_node(host->parent->of_node, node) {
+ if (mmc_of_get_func_num(node) == func_num)
+ return node;
+ }
+
+ return NULL;
+}
+
+#ifdef CONFIG_REGULATOR
+
+/**
+ * mmc_regulator_get_ocrmask - return mask of supported voltages
+ * @supply: regulator to use
+ *
+ * This returns either a negative errno, or a mask of voltages that
+ * can be provided to MMC/SD/SDIO devices using the specified voltage
+ * regulator. This would normally be called before registering the
+ * MMC host adapter.
+ */
+int mmc_regulator_get_ocrmask(struct regulator *supply)
+{
+ int result = 0;
+ int count;
+ int i;
+ int vdd_uV;
+ int vdd_mV;
+
+ count = regulator_count_voltages(supply);
+ if (count < 0)
+ return count;
+
+ for (i = 0; i < count; i++) {
+ vdd_uV = regulator_list_voltage(supply, i);
+ if (vdd_uV <= 0)
+ continue;
+
+ vdd_mV = vdd_uV / 1000;
+ result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
+ }
+
+ if (!result) {
+ vdd_uV = regulator_get_voltage(supply);
+ if (vdd_uV <= 0)
+ return vdd_uV;
+
+ vdd_mV = vdd_uV / 1000;
+ result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
+ }
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
+
+/**
+ * mmc_regulator_set_ocr - set regulator to match host->ios voltage
+ * @mmc: the host to regulate
+ * @supply: regulator to use
+ * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * MMC host drivers may use this to enable or disable a regulator using
+ * a particular supply voltage. This would normally be called from the
+ * set_ios() method.
+ */
+int mmc_regulator_set_ocr(struct mmc_host *mmc,
+ struct regulator *supply,
+ unsigned short vdd_bit)
+{
+ int result = 0;
+ int min_uV, max_uV;
+
+ if (vdd_bit) {
+ int tmp;
+
+ /*
+ * REVISIT mmc_vddrange_to_ocrmask() may have set some
+ * bits this regulator doesn't quite support ... don't
+ * be too picky, most cards and regulators are OK with
+ * a 0.1V range goof (it's a small error percentage).
+ */
+ tmp = vdd_bit - ilog2(MMC_VDD_165_195);
+ if (tmp == 0) {
+ min_uV = 1650 * 1000;
+ max_uV = 1950 * 1000;
+ } else {
+ min_uV = 1900 * 1000 + tmp * 100 * 1000;
+ max_uV = min_uV + 100 * 1000;
+ }
+
+ result = regulator_set_voltage(supply, min_uV, max_uV);
+ if (result == 0 && !mmc->regulator_enabled) {
+ result = regulator_enable(supply);
+ if (!result)
+ mmc->regulator_enabled = true;
+ }
+ } else if (mmc->regulator_enabled) {
+ result = regulator_disable(supply);
+ if (result == 0)
+ mmc->regulator_enabled = false;
+ }
+
+ if (result)
+ dev_err(mmc_dev(mmc),
+ "could not set regulator OCR (%d)\n", result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
+
+#endif /* CONFIG_REGULATOR */
+
+int mmc_regulator_get_supply(struct mmc_host *mmc)
+{
+ struct device *dev = mmc_dev(mmc);
+ int ret;
+
+ mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
+ mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
+
+ if (IS_ERR(mmc->supply.vmmc)) {
+ if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "No vmmc regulator found\n");
+ } else {
+ ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
+ if (ret > 0)
+ mmc->ocr_avail = ret;
+ else
+ dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
+ }
+
+ if (IS_ERR(mmc->supply.vqmmc)) {
+ if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "No vqmmc regulator found\n");
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
+
+/*
+ * Mask off any voltages we don't support and select
+ * the lowest voltage
+ */
+u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
+{
+ int bit;
+
+ /*
+ * Sanity check the voltages that the card claims to
+ * support.
+ */
+ if (ocr & 0x7F) {
+ dev_warn(mmc_dev(host),
+ "card claims to support voltages below defined range\n");
+ ocr &= ~0x7F;
+ }
+
+ ocr &= host->ocr_avail;
+ if (!ocr) {
+ dev_warn(mmc_dev(host), "no support for card's volts\n");
+ return 0;
+ }
+
+ if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
+ bit = ffs(ocr) - 1;
+ ocr &= 3 << bit;
+ mmc_power_cycle(host, ocr);
+ } else {
+ bit = fls(ocr) - 1;
+ ocr &= 3 << bit;
+ if (bit != host->ios.vdd)
+ dev_warn(mmc_dev(host), "exceeding card's volts\n");
+ }
+
+ return ocr;
+}
+
+int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
+{
+ int err = 0;
+ int old_signal_voltage = host->ios.signal_voltage;
+
+ host->ios.signal_voltage = signal_voltage;
+ if (host->ops->start_signal_voltage_switch) {
+ mmc_host_clk_hold(host);
+ err = host->ops->start_signal_voltage_switch(host, &host->ios);
+ mmc_host_clk_release(host);
+ }
+
+ if (err)
+ host->ios.signal_voltage = old_signal_voltage;
+
+ return err;
+
+}
+
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
+{
+ struct mmc_command cmd = {0};
+ int err = 0;
+ u32 clock;
+
+ BUG_ON(!host);
+
+ /*
+ * Send CMD11 only if the request is to switch the card to
+ * 1.8V signalling.
+ */
+ if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ return __mmc_set_signal_voltage(host, signal_voltage);
+
+ /*
+ * If we cannot switch voltages, return failure so the caller
+ * can continue without UHS mode
+ */
+ if (!host->ops->start_signal_voltage_switch)
+ return -EPERM;
+ if (!host->ops->card_busy)
+ pr_warn("%s: cannot verify signal voltage switch\n",
+ mmc_hostname(host));
+
+ mmc_host_clk_hold(host);
+
+ cmd.opcode = SD_SWITCH_VOLTAGE;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err)
+ goto err_command;
+
+ if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
+ err = -EIO;
+ goto err_command;
+ }
+ /*
+ * The card should drive cmd and dat[0:3] low immediately
+ * after the response of cmd11, but wait 1 ms to be sure
+ */
+ mmc_delay(1);
+ if (host->ops->card_busy && !host->ops->card_busy(host)) {
+ err = -EAGAIN;
+ goto power_cycle;
+ }
+ /*
+ * During a signal voltage level switch, the clock must be gated
+ * for 5 ms according to the SD spec
+ */
+ clock = host->ios.clock;
+ host->ios.clock = 0;
+ mmc_set_ios(host);
+
+ if (__mmc_set_signal_voltage(host, signal_voltage)) {
+ /*
+ * Voltages may not have been switched, but we've already
+ * sent CMD11, so a power cycle is required anyway
+ */
+ err = -EAGAIN;
+ goto power_cycle;
+ }
+
+ /* Keep clock gated for at least 5 ms */
+ mmc_delay(5);
+ host->ios.clock = clock;
+ mmc_set_ios(host);
+
+ /* Wait for at least 1 ms according to spec */
+ mmc_delay(1);
+
+ /*
+ * Failure to switch is indicated by the card holding
+ * dat[0:3] low
+ */
+ if (host->ops->card_busy && host->ops->card_busy(host))
+ err = -EAGAIN;
+
+power_cycle:
+ if (err) {
+ pr_debug("%s: Signal voltage switch failed, "
+ "power cycling card\n", mmc_hostname(host));
+ mmc_power_cycle(host, ocr);
+ }
+
+err_command:
+ mmc_host_clk_release(host);
+
+ return err;
+}
+
+/*
+ * Select timing parameters for host.
+ */
+void mmc_set_timing(struct mmc_host *host, unsigned int timing)
+{
+ mmc_host_clk_hold(host);
+ host->ios.timing = timing;
+ mmc_set_ios(host);
+ mmc_host_clk_release(host);
+}
+
+/*
+ * Select appropriate driver type for host.
+ */
+void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
+{
+ mmc_host_clk_hold(host);
+ host->ios.drv_type = drv_type;
+ mmc_set_ios(host);
+ mmc_host_clk_release(host);
+}
+
+/*
+ * Apply power to the MMC stack. This is a two-stage process.
+ * First, we enable power to the card without the clock running.
+ * We then wait a bit for the power to stabilise. Finally,
+ * enable the bus drivers and clock to the card.
+ *
+ * We must _NOT_ enable the clock prior to power stablising.
+ *
+ * If a host does all the power sequencing itself, ignore the
+ * initial MMC_POWER_UP stage.
+ */
+void mmc_power_up(struct mmc_host *host, u32 ocr)
+{
+ if (host->ios.power_mode == MMC_POWER_ON)
+ return;
+
+ mmc_host_clk_hold(host);
+
+ mmc_pwrseq_pre_power_on(host);
+
+ host->ios.vdd = fls(ocr) - 1;
+ host->ios.power_mode = MMC_POWER_UP;
+ /* Set initial state and call mmc_set_ios */
+ mmc_set_initial_state(host);
+
+ /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
+ if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
+ dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
+ else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
+ dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
+ else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
+ dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
+
+ /*
+ * This delay should be sufficient to allow the power supply
+ * to reach the minimum voltage.
+ */
+ mmc_delay(10);
+
+ mmc_pwrseq_post_power_on(host);
+
+ host->ios.clock = host->f_init;
+
+ host->ios.power_mode = MMC_POWER_ON;
+ mmc_set_ios(host);
+
+ /*
+ * This delay must be at least 74 clock sizes, or 1 ms, or the
+ * time required to reach a stable voltage.
+ */
+ mmc_delay(10);
+
+ mmc_host_clk_release(host);
+}
+
+void mmc_power_off(struct mmc_host *host)
+{
+ if (host->ios.power_mode == MMC_POWER_OFF)
+ return;
+
+ mmc_host_clk_hold(host);
+
+ mmc_pwrseq_power_off(host);
+
+ host->ios.clock = 0;
+ host->ios.vdd = 0;
+
+ host->ios.power_mode = MMC_POWER_OFF;
+ /* Set initial state and call mmc_set_ios */
+ mmc_set_initial_state(host);
+
+ /*
+ * Some configurations, such as the 802.11 SDIO card in the OLPC
+ * XO-1.5, require a short delay after poweroff before the card
+ * can be successfully turned on again.
+ */
+ mmc_delay(1);
+
+ mmc_host_clk_release(host);
+}
+
+void mmc_power_cycle(struct mmc_host *host, u32 ocr)
+{
+ mmc_power_off(host);
+ /* Wait at least 1 ms according to SD spec */
+ mmc_delay(1);
+ mmc_power_up(host, ocr);
+}
+
+/*
+ * Cleanup when the last reference to the bus operator is dropped.
+ */
+static void __mmc_release_bus(struct mmc_host *host)
+{
+ BUG_ON(!host);
+ BUG_ON(host->bus_refs);
+ BUG_ON(!host->bus_dead);
+
+ host->bus_ops = NULL;
+}
+
+/*
+ * Increase reference count of bus operator
+ */
+static inline void mmc_bus_get(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->bus_refs++;
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*
+ * Decrease reference count of bus operator and free it if
+ * it is the last reference.
+ */
+static inline void mmc_bus_put(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->bus_refs--;
+ if ((host->bus_refs == 0) && host->bus_ops)
+ __mmc_release_bus(host);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*
+ * Assign a mmc bus handler to a host. Only one bus handler may control a
+ * host at any given time.
+ */
+void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
+{
+ unsigned long flags;
+
+ BUG_ON(!host);
+ BUG_ON(!ops);
+
+ WARN_ON(!host->claimed);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ BUG_ON(host->bus_ops);
+ BUG_ON(host->bus_refs);
+
+ host->bus_ops = ops;
+ host->bus_refs = 1;
+ host->bus_dead = 0;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*
+ * Remove the current bus handler from a host.
+ */
+void mmc_detach_bus(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ BUG_ON(!host);
+
+ WARN_ON(!host->claimed);
+ WARN_ON(!host->bus_ops);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->bus_dead = 1;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mmc_bus_put(host);
+}
+
+static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
+ bool cd_irq)
+{
+#ifdef CONFIG_MMC_DEBUG
+ unsigned long flags;
+ spin_lock_irqsave(&host->lock, flags);
+ WARN_ON(host->removed);
+ spin_unlock_irqrestore(&host->lock, flags);
+#endif
+
+ /*
+ * If the device is configured as wakeup, we prevent a new sleep for
+ * 5 s to give provision for user space to consume the event.
+ */
+ if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
+ device_can_wakeup(mmc_dev(host)))
+ pm_wakeup_event(mmc_dev(host), 5000);
+
+ host->detect_change = 1;
+ mmc_schedule_delayed_work(&host->detect, delay);
+}
+
+/**
+ * mmc_detect_change - process change of state on a MMC socket
+ * @host: host which changed state.
+ * @delay: optional delay to wait before detection (jiffies)
+ *
+ * MMC drivers should call this when they detect a card has been
+ * inserted or removed. The MMC layer will confirm that any
+ * present card is still functional, and initialize any newly
+ * inserted.
+ */
+void mmc_detect_change(struct mmc_host *host, unsigned long delay)
+{
+ _mmc_detect_change(host, delay, true);
+}
+EXPORT_SYMBOL(mmc_detect_change);
+
+void mmc_init_erase(struct mmc_card *card)
+{
+ unsigned int sz;
+
+ if (is_power_of_2(card->erase_size))
+ card->erase_shift = ffs(card->erase_size) - 1;
+ else
+ card->erase_shift = 0;
+
+ /*
+ * It is possible to erase an arbitrarily large area of an SD or MMC
+ * card. That is not desirable because it can take a long time
+ * (minutes) potentially delaying more important I/O, and also the
+ * timeout calculations become increasingly hugely over-estimated.
+ * Consequently, 'pref_erase' is defined as a guide to limit erases
+ * to that size and alignment.
+ *
+ * For SD cards that define Allocation Unit size, limit erases to one
+ * Allocation Unit at a time. For MMC cards that define High Capacity
+ * Erase Size, whether it is switched on or not, limit to that size.
+ * Otherwise just have a stab at a good value. For modern cards it
+ * will end up being 4MiB. Note that if the value is too small, it
+ * can end up taking longer to erase.
+ */
+ if (mmc_card_sd(card) && card->ssr.au) {
+ card->pref_erase = card->ssr.au;
+ card->erase_shift = ffs(card->ssr.au) - 1;
+ } else if (card->ext_csd.hc_erase_size) {
+ card->pref_erase = card->ext_csd.hc_erase_size;
+ } else if (card->erase_size) {
+ sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
+ if (sz < 128)
+ card->pref_erase = 512 * 1024 / 512;
+ else if (sz < 512)
+ card->pref_erase = 1024 * 1024 / 512;
+ else if (sz < 1024)
+ card->pref_erase = 2 * 1024 * 1024 / 512;
+ else
+ card->pref_erase = 4 * 1024 * 1024 / 512;
+ if (card->pref_erase < card->erase_size)
+ card->pref_erase = card->erase_size;
+ else {
+ sz = card->pref_erase % card->erase_size;
+ if (sz)
+ card->pref_erase += card->erase_size - sz;
+ }
+ } else
+ card->pref_erase = 0;
+}
+
+static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
+ unsigned int arg, unsigned int qty)
+{
+ unsigned int erase_timeout;
+
+ if (arg == MMC_DISCARD_ARG ||
+ (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
+ erase_timeout = card->ext_csd.trim_timeout;
+ } else if (card->ext_csd.erase_group_def & 1) {
+ /* High Capacity Erase Group Size uses HC timeouts */
+ if (arg == MMC_TRIM_ARG)
+ erase_timeout = card->ext_csd.trim_timeout;
+ else
+ erase_timeout = card->ext_csd.hc_erase_timeout;
+ } else {
+ /* CSD Erase Group Size uses write timeout */
+ unsigned int mult = (10 << card->csd.r2w_factor);
+ unsigned int timeout_clks = card->csd.tacc_clks * mult;
+ unsigned int timeout_us;
+
+ /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
+ if (card->csd.tacc_ns < 1000000)
+ timeout_us = (card->csd.tacc_ns * mult) / 1000;
+ else
+ timeout_us = (card->csd.tacc_ns / 1000) * mult;
+
+ /*
+ * ios.clock is only a target. The real clock rate might be
+ * less but not that much less, so fudge it by multiplying by 2.
+ */
+ timeout_clks <<= 1;
+ timeout_us += (timeout_clks * 1000) /
+ (mmc_host_clk_rate(card->host) / 1000);
+
+ erase_timeout = timeout_us / 1000;
+
+ /*
+ * Theoretically, the calculation could underflow so round up
+ * to 1ms in that case.
+ */
+ if (!erase_timeout)
+ erase_timeout = 1;
+ }
+
+ /* Multiplier for secure operations */
+ if (arg & MMC_SECURE_ARGS) {
+ if (arg == MMC_SECURE_ERASE_ARG)
+ erase_timeout *= card->ext_csd.sec_erase_mult;
+ else
+ erase_timeout *= card->ext_csd.sec_trim_mult;
+ }
+
+ erase_timeout *= qty;
+
+ /*
+ * Ensure at least a 1 second timeout for SPI as per
+ * 'mmc_set_data_timeout()'
+ */
+ if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
+ erase_timeout = 1000;
+
+ return erase_timeout;
+}
+
+static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
+ unsigned int arg,
+ unsigned int qty)
+{
+ unsigned int erase_timeout;
+
+ if (card->ssr.erase_timeout) {
+ /* Erase timeout specified in SD Status Register (SSR) */
+ erase_timeout = card->ssr.erase_timeout * qty +
+ card->ssr.erase_offset;
+ } else {
+ /*
+ * Erase timeout not specified in SD Status Register (SSR) so
+ * use 250ms per write block.
+ */
+ erase_timeout = 250 * qty;
+ }
+
+ /* Must not be less than 1 second */
+ if (erase_timeout < 1000)
+ erase_timeout = 1000;
+
+ return erase_timeout;
+}
+
+static unsigned int mmc_erase_timeout(struct mmc_card *card,
+ unsigned int arg,
+ unsigned int qty)
+{
+ if (mmc_card_sd(card))
+ return mmc_sd_erase_timeout(card, arg, qty);
+ else
+ return mmc_mmc_erase_timeout(card, arg, qty);
+}
+
+static int mmc_do_erase(struct mmc_card *card, unsigned int from,
+ unsigned int to, unsigned int arg)
+{
+ struct mmc_command cmd = {0};
+ unsigned int qty = 0;
+ unsigned long timeout;
+ int err;
+
+ /*
+ * qty is used to calculate the erase timeout which depends on how many
+ * erase groups (or allocation units in SD terminology) are affected.
+ * We count erasing part of an erase group as one erase group.
+ * For SD, the allocation units are always a power of 2. For MMC, the
+ * erase group size is almost certainly also power of 2, but it does not
+ * seem to insist on that in the JEDEC standard, so we fall back to
+ * division in that case. SD may not specify an allocation unit size,
+ * in which case the timeout is based on the number of write blocks.
+ *
+ * Note that the timeout for secure trim 2 will only be correct if the
+ * number of erase groups specified is the same as the total of all
+ * preceding secure trim 1 commands. Since the power may have been
+ * lost since the secure trim 1 commands occurred, it is generally
+ * impossible to calculate the secure trim 2 timeout correctly.
+ */
+ if (card->erase_shift)
+ qty += ((to >> card->erase_shift) -
+ (from >> card->erase_shift)) + 1;
+ else if (mmc_card_sd(card))
+ qty += to - from + 1;
+ else
+ qty += ((to / card->erase_size) -
+ (from / card->erase_size)) + 1;
+
+ if (!mmc_card_blockaddr(card)) {
+ from <<= 9;
+ to <<= 9;
+ }
+
+ if (mmc_card_sd(card))
+ cmd.opcode = SD_ERASE_WR_BLK_START;
+ else
+ cmd.opcode = MMC_ERASE_GROUP_START;
+ cmd.arg = from;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err) {
+ pr_err("mmc_erase: group start error %d, "
+ "status %#x\n", err, cmd.resp[0]);
+ err = -EIO;
+ goto out;
+ }
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ if (mmc_card_sd(card))
+ cmd.opcode = SD_ERASE_WR_BLK_END;
+ else
+ cmd.opcode = MMC_ERASE_GROUP_END;
+ cmd.arg = to;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err) {
+ pr_err("mmc_erase: group end error %d, status %#x\n",
+ err, cmd.resp[0]);
+ err = -EIO;
+ goto out;
+ }
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ cmd.opcode = MMC_ERASE;
+ cmd.arg = arg;
+ cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err) {
+ pr_err("mmc_erase: erase error %d, status %#x\n",
+ err, cmd.resp[0]);
+ err = -EIO;
+ goto out;
+ }
+
+ if (mmc_host_is_spi(card->host))
+ goto out;
+
+ timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
+ do {
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ cmd.opcode = MMC_SEND_STATUS;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ /* Do not retry else we can't see errors */
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err || (cmd.resp[0] & 0xFDF92000)) {
+ pr_err("error %d requesting status %#x\n",
+ err, cmd.resp[0]);
+ err = -EIO;
+ goto out;
+ }
+
+ /* Timeout if the device never becomes ready for data and
+ * never leaves the program state.
+ */
+ if (time_after(jiffies, timeout)) {
+ pr_err("%s: Card stuck in programming state! %s\n",
+ mmc_hostname(card->host), __func__);
+ err = -EIO;
+ goto out;
+ }
+
+ } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
+out:
+ return err;
+}
+
+/**
+ * mmc_erase - erase sectors.
+ * @card: card to erase
+ * @from: first sector to erase
+ * @nr: number of sectors to erase
+ * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
+ *
+ * Caller must claim host before calling this function.
+ */
+int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
+ unsigned int arg)
+{
+ unsigned int rem, to = from + nr;
+
+ if (!(card->host->caps & MMC_CAP_ERASE) ||
+ !(card->csd.cmdclass & CCC_ERASE))
+ return -EOPNOTSUPP;
+
+ if (!card->erase_size)
+ return -EOPNOTSUPP;
+
+ if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
+ return -EOPNOTSUPP;
+
+ if ((arg & MMC_SECURE_ARGS) &&
+ !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
+ return -EOPNOTSUPP;
+
+ if ((arg & MMC_TRIM_ARGS) &&
+ !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
+ return -EOPNOTSUPP;
+
+ if (arg == MMC_SECURE_ERASE_ARG) {
+ if (from % card->erase_size || nr % card->erase_size)
+ return -EINVAL;
+ }
+
+ if (arg == MMC_ERASE_ARG) {
+ rem = from % card->erase_size;
+ if (rem) {
+ rem = card->erase_size - rem;
+ from += rem;
+ if (nr > rem)
+ nr -= rem;
+ else
+ return 0;
+ }
+ rem = nr % card->erase_size;
+ if (rem)
+ nr -= rem;
+ }
+
+ if (nr == 0)
+ return 0;
+
+ to = from + nr;
+
+ if (to <= from)
+ return -EINVAL;
+
+ /* 'from' and 'to' are inclusive */
+ to -= 1;
+
+ return mmc_do_erase(card, from, to, arg);
+}
+EXPORT_SYMBOL(mmc_erase);
+
+int mmc_can_erase(struct mmc_card *card)
+{
+ if ((card->host->caps & MMC_CAP_ERASE) &&
+ (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_erase);
+
+int mmc_can_trim(struct mmc_card *card)
+{
+ if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_trim);
+
+int mmc_can_discard(struct mmc_card *card)
+{
+ /*
+ * As there's no way to detect the discard support bit at v4.5
+ * use the s/w feature support filed.
+ */
+ if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_discard);
+
+int mmc_can_sanitize(struct mmc_card *card)
+{
+ if (!mmc_can_trim(card) && !mmc_can_erase(card))
+ return 0;
+ if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_sanitize);
+
+int mmc_can_secure_erase_trim(struct mmc_card *card)
+{
+ if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
+ !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_secure_erase_trim);
+
+int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
+ unsigned int nr)
+{
+ if (!card->erase_size)
+ return 0;
+ if (from % card->erase_size || nr % card->erase_size)
+ return 0;
+ return 1;
+}
+EXPORT_SYMBOL(mmc_erase_group_aligned);
+
+static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
+ unsigned int arg)
+{
+ struct mmc_host *host = card->host;
+ unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
+ unsigned int last_timeout = 0;
+
+ if (card->erase_shift)
+ max_qty = UINT_MAX >> card->erase_shift;
+ else if (mmc_card_sd(card))
+ max_qty = UINT_MAX;
+ else
+ max_qty = UINT_MAX / card->erase_size;
+
+ /* Find the largest qty with an OK timeout */
+ do {
+ y = 0;
+ for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
+ timeout = mmc_erase_timeout(card, arg, qty + x);
+ if (timeout > host->max_busy_timeout)
+ break;
+ if (timeout < last_timeout)
+ break;
+ last_timeout = timeout;
+ y = x;
+ }
+ qty += y;
+ } while (y);
+
+ if (!qty)
+ return 0;
+
+ if (qty == 1)
+ return 1;
+
+ /* Convert qty to sectors */
+ if (card->erase_shift)
+ max_discard = --qty << card->erase_shift;
+ else if (mmc_card_sd(card))
+ max_discard = qty;
+ else
+ max_discard = --qty * card->erase_size;
+
+ return max_discard;
+}
+
+unsigned int mmc_calc_max_discard(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ unsigned int max_discard, max_trim;
+
+ if (!host->max_busy_timeout)
+ return UINT_MAX;
+
+ /*
+ * Without erase_group_def set, MMC erase timeout depends on clock
+ * frequence which can change. In that case, the best choice is
+ * just the preferred erase size.
+ */
+ if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
+ return card->pref_erase;
+
+ max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
+ if (mmc_can_trim(card)) {
+ max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
+ if (max_trim < max_discard)
+ max_discard = max_trim;
+ } else if (max_discard < card->erase_size) {
+ max_discard = 0;
+ }
+ pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
+ mmc_hostname(host), max_discard, host->max_busy_timeout);
+ return max_discard;
+}
+EXPORT_SYMBOL(mmc_calc_max_discard);
+
+int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
+{
+ struct mmc_command cmd = {0};
+
+ if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))
+ return 0;
+
+ cmd.opcode = MMC_SET_BLOCKLEN;
+ cmd.arg = blocklen;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ return mmc_wait_for_cmd(card->host, &cmd, 5);
+}
+EXPORT_SYMBOL(mmc_set_blocklen);
+
+int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
+ bool is_rel_write)
+{
+ struct mmc_command cmd = {0};
+
+ cmd.opcode = MMC_SET_BLOCK_COUNT;
+ cmd.arg = blockcount & 0x0000FFFF;
+ if (is_rel_write)
+ cmd.arg |= 1 << 31;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ return mmc_wait_for_cmd(card->host, &cmd, 5);
+}
+EXPORT_SYMBOL(mmc_set_blockcount);
+
+static void mmc_hw_reset_for_init(struct mmc_host *host)
+{
+ if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
+ return;
+ mmc_host_clk_hold(host);
+ host->ops->hw_reset(host);
+ mmc_host_clk_release(host);
+}
+
+int mmc_hw_reset(struct mmc_host *host)
+{
+ int ret;
+
+ if (!host->card)
+ return -EINVAL;
+
+ mmc_bus_get(host);
+ if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
+ mmc_bus_put(host);
+ return -EOPNOTSUPP;
+ }
+
+ ret = host->bus_ops->reset(host);
+ mmc_bus_put(host);
+
+ pr_warn("%s: tried to reset card\n", mmc_hostname(host));
+
+ return ret;
+}
+EXPORT_SYMBOL(mmc_hw_reset);
+
+static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
+{
+ host->f_init = freq;
+
+#ifdef CONFIG_MMC_DEBUG
+ pr_info("%s: %s: trying to init card at %u Hz\n",
+ mmc_hostname(host), __func__, host->f_init);
+#endif
+ mmc_power_up(host, host->ocr_avail);
+
+ /*
+ * Some eMMCs (with VCCQ always on) may not be reset after power up, so
+ * do a hardware reset if possible.
+ */
+ mmc_hw_reset_for_init(host);
+
+ /*
+ * sdio_reset sends CMD52 to reset card. Since we do not know
+ * if the card is being re-initialized, just send it. CMD52
+ * should be ignored by SD/eMMC cards.
+ */
+ sdio_reset(host);
+ mmc_go_idle(host);
+
+ mmc_send_if_cond(host, host->ocr_avail);
+
+ /* Order's important: probe SDIO, then SD, then MMC */
+ if (!mmc_attach_sdio(host))
+ return 0;
+ if (!mmc_attach_sd(host))
+ return 0;
+ if (!mmc_attach_mmc(host))
+ return 0;
+
+ mmc_power_off(host);
+ return -EIO;
+}
+
+int _mmc_detect_card_removed(struct mmc_host *host)
+{
+ int ret;
+
+ if (host->caps & MMC_CAP_NONREMOVABLE)
+ return 0;
+
+ if (!host->card || mmc_card_removed(host->card))
+ return 1;
+
+ ret = host->bus_ops->alive(host);
+
+ /*
+ * Card detect status and alive check may be out of sync if card is
+ * removed slowly, when card detect switch changes while card/slot
+ * pads are still contacted in hardware (refer to "SD Card Mechanical
+ * Addendum, Appendix C: Card Detection Switch"). So reschedule a
+ * detect work 200ms later for this case.
+ */
+ if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
+ mmc_detect_change(host, msecs_to_jiffies(200));
+ pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
+ }
+
+ if (ret) {
+ mmc_card_set_removed(host->card);
+ pr_debug("%s: card remove detected\n", mmc_hostname(host));
+ }
+
+ return ret;
+}
+
+int mmc_detect_card_removed(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ int ret;
+
+ WARN_ON(!host->claimed);
+
+ if (!card)
+ return 1;
+
+ ret = mmc_card_removed(card);
+ /*
+ * The card will be considered unchanged unless we have been asked to
+ * detect a change or host requires polling to provide card detection.
+ */
+ if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
+ return ret;
+
+ host->detect_change = 0;
+ if (!ret) {
+ ret = _mmc_detect_card_removed(host);
+ if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
+ /*
+ * Schedule a detect work as soon as possible to let a
+ * rescan handle the card removal.
+ */
+ cancel_delayed_work(&host->detect);
+ _mmc_detect_change(host, 0, false);
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(mmc_detect_card_removed);
+
+void mmc_rescan(struct work_struct *work)
+{
+ struct mmc_host *host =
+ container_of(work, struct mmc_host, detect.work);
+ int i;
+
+ if (host->trigger_card_event && host->ops->card_event) {
+ host->ops->card_event(host);
+ host->trigger_card_event = false;
+ }
+
+ if (host->rescan_disable)
+ return;
+
+ /* If there is a non-removable card registered, only scan once */
+ if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
+ return;
+ host->rescan_entered = 1;
+
+ mmc_bus_get(host);
+
+ /*
+ * if there is a _removable_ card registered, check whether it is
+ * still present
+ */
+ if (host->bus_ops && !host->bus_dead
+ && !(host->caps & MMC_CAP_NONREMOVABLE))
+ host->bus_ops->detect(host);
+
+ host->detect_change = 0;
+
+ /*
+ * Let mmc_bus_put() free the bus/bus_ops if we've found that
+ * the card is no longer present.
+ */
+ mmc_bus_put(host);
+ mmc_bus_get(host);
+
+ /* if there still is a card present, stop here */
+ if (host->bus_ops != NULL) {
+ mmc_bus_put(host);
+ goto out;
+ }
+
+ /*
+ * Only we can add a new handler, so it's safe to
+ * release the lock here.
+ */
+ mmc_bus_put(host);
+
+ if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
+ host->ops->get_cd(host) == 0) {
+ mmc_claim_host(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+ goto out;
+ }
+
+ mmc_claim_host(host);
+ for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+ if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+ break;
+ if (freqs[i] <= host->f_min)
+ break;
+ }
+ mmc_release_host(host);
+
+ out:
+ if (host->caps & MMC_CAP_NEEDS_POLL)
+ mmc_schedule_delayed_work(&host->detect, HZ);
+}
+
+void mmc_start_host(struct mmc_host *host)
+{
+ host->f_init = max(freqs[0], host->f_min);
+ host->rescan_disable = 0;
+ host->ios.power_mode = MMC_POWER_UNDEFINED;
+ if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
+ mmc_power_off(host);
+ else
+ mmc_power_up(host, host->ocr_avail);
+ mmc_gpiod_request_cd_irq(host);
+ _mmc_detect_change(host, 0, false);
+}
+
+void mmc_stop_host(struct mmc_host *host)
+{
+#ifdef CONFIG_MMC_DEBUG
+ unsigned long flags;
+ spin_lock_irqsave(&host->lock, flags);
+ host->removed = 1;
+ spin_unlock_irqrestore(&host->lock, flags);
+#endif
+ if (host->slot.cd_irq >= 0)
+ disable_irq(host->slot.cd_irq);
+
+ host->rescan_disable = 1;
+ cancel_delayed_work_sync(&host->detect);
+ mmc_flush_scheduled_work();
+
+ /* clear pm flags now and let card drivers set them as needed */
+ host->pm_flags = 0;
+
+ mmc_bus_get(host);
+ if (host->bus_ops && !host->bus_dead) {
+ /* Calling bus_ops->remove() with a claimed host can deadlock */
+ host->bus_ops->remove(host);
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+ mmc_bus_put(host);
+ return;
+ }
+ mmc_bus_put(host);
+
+ BUG_ON(host->card);
+
+ mmc_power_off(host);
+}
+
+int mmc_power_save_host(struct mmc_host *host)
+{
+ int ret = 0;
+
+#ifdef CONFIG_MMC_DEBUG
+ pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
+#endif
+
+ mmc_bus_get(host);
+
+ if (!host->bus_ops || host->bus_dead) {
+ mmc_bus_put(host);
+ return -EINVAL;
+ }
+
+ if (host->bus_ops->power_save)
+ ret = host->bus_ops->power_save(host);
+
+ mmc_bus_put(host);
+
+ mmc_power_off(host);
+
+ return ret;
+}
+EXPORT_SYMBOL(mmc_power_save_host);
+
+int mmc_power_restore_host(struct mmc_host *host)
+{
+ int ret;
+
+#ifdef CONFIG_MMC_DEBUG
+ pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
+#endif
+
+ mmc_bus_get(host);
+
+ if (!host->bus_ops || host->bus_dead) {
+ mmc_bus_put(host);
+ return -EINVAL;
+ }
+
+ mmc_power_up(host, host->card->ocr);
+ ret = host->bus_ops->power_restore(host);
+
+ mmc_bus_put(host);
+
+ return ret;
+}
+EXPORT_SYMBOL(mmc_power_restore_host);
+
+/*
+ * Flush the cache to the non-volatile storage.
+ */
+int mmc_flush_cache(struct mmc_card *card)
+{
+ int err = 0;
+
+ if (mmc_card_mmc(card) &&
+ (card->ext_csd.cache_size > 0) &&
+ (card->ext_csd.cache_ctrl & 1)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 1, 0);
+ if (err)
+ pr_err("%s: cache flush error %d\n",
+ mmc_hostname(card->host), err);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_flush_cache);
+
+#ifdef CONFIG_PM
+
+/* Do the card removal on suspend if card is assumed removeable
+ * Do that in pm notifier while userspace isn't yet frozen, so we will be able
+ to sync the card.
+*/
+int mmc_pm_notify(struct notifier_block *notify_block,
+ unsigned long mode, void *unused)
+{
+ struct mmc_host *host = container_of(
+ notify_block, struct mmc_host, pm_notify);
+ unsigned long flags;
+ int err = 0;
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ case PM_RESTORE_PREPARE:
+ spin_lock_irqsave(&host->lock, flags);
+ host->rescan_disable = 1;
+ spin_unlock_irqrestore(&host->lock, flags);
+ cancel_delayed_work_sync(&host->detect);
+
+ if (!host->bus_ops)
+ break;
+
+ /* Validate prerequisites for suspend */
+ if (host->bus_ops->pre_suspend)
+ err = host->bus_ops->pre_suspend(host);
+ if (!err)
+ break;
+
+ /* Calling bus_ops->remove() with a claimed host can deadlock */
+ host->bus_ops->remove(host);
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+ host->pm_flags = 0;
+ break;
+
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->rescan_disable = 0;
+ spin_unlock_irqrestore(&host->lock, flags);
+ _mmc_detect_change(host, 0, false);
+
+ }
+
+ return 0;
+}
+#endif
+
+/**
+ * mmc_init_context_info() - init synchronization context
+ * @host: mmc host
+ *
+ * Init struct context_info needed to implement asynchronous
+ * request mechanism, used by mmc core, host driver and mmc requests
+ * supplier.
+ */
+void mmc_init_context_info(struct mmc_host *host)
+{
+ spin_lock_init(&host->context_info.lock);
+ host->context_info.is_new_req = false;
+ host->context_info.is_done_rcv = false;
+ host->context_info.is_waiting_last_req = false;
+ init_waitqueue_head(&host->context_info.wait);
+}
+
+static int __init mmc_init(void)
+{
+ int ret;
+
+ workqueue = alloc_ordered_workqueue("kmmcd", 0);
+ if (!workqueue)
+ return -ENOMEM;
+
+ ret = mmc_register_bus();
+ if (ret)
+ goto destroy_workqueue;
+
+ ret = mmc_register_host_class();
+ if (ret)
+ goto unregister_bus;
+
+ ret = sdio_register_bus();
+ if (ret)
+ goto unregister_host_class;
+
+ return 0;
+
+unregister_host_class:
+ mmc_unregister_host_class();
+unregister_bus:
+ mmc_unregister_bus();
+destroy_workqueue:
+ destroy_workqueue(workqueue);
+
+ return ret;
+}
+
+static void __exit mmc_exit(void)
+{
+ sdio_unregister_bus();
+ mmc_unregister_host_class();
+ mmc_unregister_bus();
+ destroy_workqueue(workqueue);
+}
+
+subsys_initcall(mmc_init);
+module_exit(mmc_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/mmc/core/core.h b/kernel/drivers/mmc/core/core.h
new file mode 100644
index 000000000..cfba3c05a
--- /dev/null
+++ b/kernel/drivers/mmc/core/core.h
@@ -0,0 +1,93 @@
+/*
+ * linux/drivers/mmc/core/core.h
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright 2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _MMC_CORE_CORE_H
+#define _MMC_CORE_CORE_H
+
+#include <linux/delay.h>
+
+#define MMC_CMD_RETRIES 3
+
+struct mmc_bus_ops {
+ void (*remove)(struct mmc_host *);
+ void (*detect)(struct mmc_host *);
+ int (*pre_suspend)(struct mmc_host *);
+ int (*suspend)(struct mmc_host *);
+ int (*resume)(struct mmc_host *);
+ int (*runtime_suspend)(struct mmc_host *);
+ int (*runtime_resume)(struct mmc_host *);
+ int (*power_save)(struct mmc_host *);
+ int (*power_restore)(struct mmc_host *);
+ int (*alive)(struct mmc_host *);
+ int (*shutdown)(struct mmc_host *);
+ int (*reset)(struct mmc_host *);
+};
+
+void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
+void mmc_detach_bus(struct mmc_host *host);
+
+struct device_node *mmc_of_find_child_device(struct mmc_host *host,
+ unsigned func_num);
+
+void mmc_init_erase(struct mmc_card *card);
+
+void mmc_set_chip_select(struct mmc_host *host, int mode);
+void mmc_set_clock(struct mmc_host *host, unsigned int hz);
+void mmc_gate_clock(struct mmc_host *host);
+void mmc_ungate_clock(struct mmc_host *host);
+void mmc_set_ungated(struct mmc_host *host);
+void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
+void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
+u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr);
+int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
+void mmc_set_timing(struct mmc_host *host, unsigned int timing);
+void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
+void mmc_power_up(struct mmc_host *host, u32 ocr);
+void mmc_power_off(struct mmc_host *host);
+void mmc_power_cycle(struct mmc_host *host, u32 ocr);
+void mmc_set_initial_state(struct mmc_host *host);
+
+static inline void mmc_delay(unsigned int ms)
+{
+ if (ms < 1000 / HZ) {
+ cond_resched();
+ mdelay(ms);
+ } else {
+ msleep(ms);
+ }
+}
+
+void mmc_rescan(struct work_struct *work);
+void mmc_start_host(struct mmc_host *host);
+void mmc_stop_host(struct mmc_host *host);
+
+int _mmc_detect_card_removed(struct mmc_host *host);
+
+int mmc_attach_mmc(struct mmc_host *host);
+int mmc_attach_sd(struct mmc_host *host);
+int mmc_attach_sdio(struct mmc_host *host);
+
+/* Module parameters */
+extern bool use_spi_crc;
+
+/* Debugfs information for hosts and cards */
+void mmc_add_host_debugfs(struct mmc_host *host);
+void mmc_remove_host_debugfs(struct mmc_host *host);
+
+void mmc_add_card_debugfs(struct mmc_card *card);
+void mmc_remove_card_debugfs(struct mmc_card *card);
+
+void mmc_init_context_info(struct mmc_host *host);
+
+int mmc_execute_tuning(struct mmc_card *card);
+
+#endif
+
diff --git a/kernel/drivers/mmc/core/debugfs.c b/kernel/drivers/mmc/core/debugfs.c
new file mode 100644
index 000000000..e9142108a
--- /dev/null
+++ b/kernel/drivers/mmc/core/debugfs.c
@@ -0,0 +1,379 @@
+/*
+ * Debugfs support for hosts and cards
+ *
+ * Copyright (C) 2008 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/moduleparam.h>
+#include <linux/export.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/fault-inject.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include "core.h"
+#include "mmc_ops.h"
+
+#ifdef CONFIG_FAIL_MMC_REQUEST
+
+static DECLARE_FAULT_ATTR(fail_default_attr);
+static char *fail_request;
+module_param(fail_request, charp, 0);
+
+#endif /* CONFIG_FAIL_MMC_REQUEST */
+
+/* The debugfs functions are optimized away when CONFIG_DEBUG_FS isn't set. */
+static int mmc_ios_show(struct seq_file *s, void *data)
+{
+ static const char *vdd_str[] = {
+ [8] = "2.0",
+ [9] = "2.1",
+ [10] = "2.2",
+ [11] = "2.3",
+ [12] = "2.4",
+ [13] = "2.5",
+ [14] = "2.6",
+ [15] = "2.7",
+ [16] = "2.8",
+ [17] = "2.9",
+ [18] = "3.0",
+ [19] = "3.1",
+ [20] = "3.2",
+ [21] = "3.3",
+ [22] = "3.4",
+ [23] = "3.5",
+ [24] = "3.6",
+ };
+ struct mmc_host *host = s->private;
+ struct mmc_ios *ios = &host->ios;
+ const char *str;
+
+ seq_printf(s, "clock:\t\t%u Hz\n", ios->clock);
+ if (host->actual_clock)
+ seq_printf(s, "actual clock:\t%u Hz\n", host->actual_clock);
+ seq_printf(s, "vdd:\t\t%u ", ios->vdd);
+ if ((1 << ios->vdd) & MMC_VDD_165_195)
+ seq_printf(s, "(1.65 - 1.95 V)\n");
+ else if (ios->vdd < (ARRAY_SIZE(vdd_str) - 1)
+ && vdd_str[ios->vdd] && vdd_str[ios->vdd + 1])
+ seq_printf(s, "(%s ~ %s V)\n", vdd_str[ios->vdd],
+ vdd_str[ios->vdd + 1]);
+ else
+ seq_printf(s, "(invalid)\n");
+
+ switch (ios->bus_mode) {
+ case MMC_BUSMODE_OPENDRAIN:
+ str = "open drain";
+ break;
+ case MMC_BUSMODE_PUSHPULL:
+ str = "push-pull";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "bus mode:\t%u (%s)\n", ios->bus_mode, str);
+
+ switch (ios->chip_select) {
+ case MMC_CS_DONTCARE:
+ str = "don't care";
+ break;
+ case MMC_CS_HIGH:
+ str = "active high";
+ break;
+ case MMC_CS_LOW:
+ str = "active low";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "chip select:\t%u (%s)\n", ios->chip_select, str);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ str = "off";
+ break;
+ case MMC_POWER_UP:
+ str = "up";
+ break;
+ case MMC_POWER_ON:
+ str = "on";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "power mode:\t%u (%s)\n", ios->power_mode, str);
+ seq_printf(s, "bus width:\t%u (%u bits)\n",
+ ios->bus_width, 1 << ios->bus_width);
+
+ switch (ios->timing) {
+ case MMC_TIMING_LEGACY:
+ str = "legacy";
+ break;
+ case MMC_TIMING_MMC_HS:
+ str = "mmc high-speed";
+ break;
+ case MMC_TIMING_SD_HS:
+ str = "sd high-speed";
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ str = "sd uhs SDR50";
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ str = "sd uhs SDR104";
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ str = "sd uhs DDR50";
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ str = "mmc DDR52";
+ break;
+ case MMC_TIMING_MMC_HS200:
+ str = "mmc HS200";
+ break;
+ case MMC_TIMING_MMC_HS400:
+ str = "mmc HS400";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "timing spec:\t%u (%s)\n", ios->timing, str);
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ str = "3.30 V";
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ str = "1.80 V";
+ break;
+ case MMC_SIGNAL_VOLTAGE_120:
+ str = "1.20 V";
+ break;
+ default:
+ str = "invalid";
+ break;
+ }
+ seq_printf(s, "signal voltage:\t%u (%s)\n", ios->chip_select, str);
+
+ return 0;
+}
+
+static int mmc_ios_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mmc_ios_show, inode->i_private);
+}
+
+static const struct file_operations mmc_ios_fops = {
+ .open = mmc_ios_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mmc_clock_opt_get(void *data, u64 *val)
+{
+ struct mmc_host *host = data;
+
+ *val = host->ios.clock;
+
+ return 0;
+}
+
+static int mmc_clock_opt_set(void *data, u64 val)
+{
+ struct mmc_host *host = data;
+
+ /* We need this check due to input value is u64 */
+ if (val > host->f_max)
+ return -EINVAL;
+
+ mmc_claim_host(host);
+ mmc_set_clock(host, (unsigned int) val);
+ mmc_release_host(host);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
+ "%llu\n");
+
+void mmc_add_host_debugfs(struct mmc_host *host)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir(mmc_hostname(host), NULL);
+ if (IS_ERR(root))
+ /* Don't complain -- debugfs just isn't enabled */
+ return;
+ if (!root)
+ /* Complain -- debugfs is enabled, but it failed to
+ * create the directory. */
+ goto err_root;
+
+ host->debugfs_root = root;
+
+ if (!debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops))
+ goto err_node;
+
+ if (!debugfs_create_file("clock", S_IRUSR | S_IWUSR, root, host,
+ &mmc_clock_fops))
+ goto err_node;
+
+#ifdef CONFIG_MMC_CLKGATE
+ if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
+ root, &host->clk_delay))
+ goto err_node;
+#endif
+#ifdef CONFIG_FAIL_MMC_REQUEST
+ if (fail_request)
+ setup_fault_attr(&fail_default_attr, fail_request);
+ host->fail_mmc_request = fail_default_attr;
+ if (IS_ERR(fault_create_debugfs_attr("fail_mmc_request",
+ root,
+ &host->fail_mmc_request)))
+ goto err_node;
+#endif
+ return;
+
+err_node:
+ debugfs_remove_recursive(root);
+ host->debugfs_root = NULL;
+err_root:
+ dev_err(&host->class_dev, "failed to initialize debugfs\n");
+}
+
+void mmc_remove_host_debugfs(struct mmc_host *host)
+{
+ debugfs_remove_recursive(host->debugfs_root);
+}
+
+static int mmc_dbg_card_status_get(void *data, u64 *val)
+{
+ struct mmc_card *card = data;
+ u32 status;
+ int ret;
+
+ mmc_get_card(card);
+
+ ret = mmc_send_status(data, &status);
+ if (!ret)
+ *val = status;
+
+ mmc_put_card(card);
+
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
+ NULL, "%08llx\n");
+
+#define EXT_CSD_STR_LEN 1025
+
+static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
+{
+ struct mmc_card *card = inode->i_private;
+ char *buf;
+ ssize_t n = 0;
+ u8 *ext_csd;
+ int err, i;
+
+ buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mmc_get_card(card);
+ err = mmc_get_ext_csd(card, &ext_csd);
+ mmc_put_card(card);
+ if (err)
+ goto out_free;
+
+ for (i = 0; i < 512; i++)
+ n += sprintf(buf + n, "%02x", ext_csd[i]);
+ n += sprintf(buf + n, "\n");
+ BUG_ON(n != EXT_CSD_STR_LEN);
+
+ filp->private_data = buf;
+ kfree(ext_csd);
+ return 0;
+
+out_free:
+ kfree(buf);
+ return err;
+}
+
+static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char *buf = filp->private_data;
+
+ return simple_read_from_buffer(ubuf, cnt, ppos,
+ buf, EXT_CSD_STR_LEN);
+}
+
+static int mmc_ext_csd_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations mmc_dbg_ext_csd_fops = {
+ .open = mmc_ext_csd_open,
+ .read = mmc_ext_csd_read,
+ .release = mmc_ext_csd_release,
+ .llseek = default_llseek,
+};
+
+void mmc_add_card_debugfs(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ struct dentry *root;
+
+ if (!host->debugfs_root)
+ return;
+
+ root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root);
+ if (IS_ERR(root))
+ /* Don't complain -- debugfs just isn't enabled */
+ return;
+ if (!root)
+ /* Complain -- debugfs is enabled, but it failed to
+ * create the directory. */
+ goto err;
+
+ card->debugfs_root = root;
+
+ if (!debugfs_create_x32("state", S_IRUSR, root, &card->state))
+ goto err;
+
+ if (mmc_card_mmc(card) || mmc_card_sd(card))
+ if (!debugfs_create_file("status", S_IRUSR, root, card,
+ &mmc_dbg_card_status_fops))
+ goto err;
+
+ if (mmc_card_mmc(card))
+ if (!debugfs_create_file("ext_csd", S_IRUSR, root, card,
+ &mmc_dbg_ext_csd_fops))
+ goto err;
+
+ return;
+
+err:
+ debugfs_remove_recursive(root);
+ card->debugfs_root = NULL;
+ dev_err(&card->dev, "failed to initialize debugfs\n");
+}
+
+void mmc_remove_card_debugfs(struct mmc_card *card)
+{
+ debugfs_remove_recursive(card->debugfs_root);
+}
diff --git a/kernel/drivers/mmc/core/host.c b/kernel/drivers/mmc/core/host.c
new file mode 100644
index 000000000..8be0df758
--- /dev/null
+++ b/kernel/drivers/mmc/core/host.c
@@ -0,0 +1,596 @@
+/*
+ * linux/drivers/mmc/core/host.c
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright (C) 2007-2008 Pierre Ossman
+ * Copyright (C) 2010 Linus Walleij
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * MMC host class device management
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/idr.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/pagemap.h>
+#include <linux/export.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/slot-gpio.h>
+
+#include "core.h"
+#include "host.h"
+#include "slot-gpio.h"
+#include "pwrseq.h"
+
+#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
+
+static DEFINE_IDR(mmc_host_idr);
+static DEFINE_SPINLOCK(mmc_host_lock);
+
+static void mmc_host_classdev_release(struct device *dev)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ spin_lock(&mmc_host_lock);
+ idr_remove(&mmc_host_idr, host->index);
+ spin_unlock(&mmc_host_lock);
+ kfree(host);
+}
+
+static struct class mmc_host_class = {
+ .name = "mmc_host",
+ .dev_release = mmc_host_classdev_release,
+};
+
+int mmc_register_host_class(void)
+{
+ return class_register(&mmc_host_class);
+}
+
+void mmc_unregister_host_class(void)
+{
+ class_unregister(&mmc_host_class);
+}
+
+#ifdef CONFIG_MMC_CLKGATE
+static ssize_t clkgate_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
+}
+
+static ssize_t clkgate_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ unsigned long flags, value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clkgate_delay = value;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ return count;
+}
+
+/*
+ * Enabling clock gating will make the core call out to the host
+ * once up and once down when it performs a request or card operation
+ * intermingled in any fashion. The driver will see this through
+ * set_ios() operations with ios.clock field set to 0 to gate (disable)
+ * the block clock, and to the old frequency to enable it again.
+ */
+static void mmc_host_clk_gate_delayed(struct mmc_host *host)
+{
+ unsigned long tick_ns;
+ unsigned long freq = host->ios.clock;
+ unsigned long flags;
+
+ if (!freq) {
+ pr_debug("%s: frequency set to 0 in disable function, "
+ "this means the clock is already disabled.\n",
+ mmc_hostname(host));
+ return;
+ }
+ /*
+ * New requests may have appeared while we were scheduling,
+ * then there is no reason to delay the check before
+ * clk_disable().
+ */
+ spin_lock_irqsave(&host->clk_lock, flags);
+
+ /*
+ * Delay n bus cycles (at least 8 from MMC spec) before attempting
+ * to disable the MCI block clock. The reference count may have
+ * gone up again after this delay due to rescheduling!
+ */
+ if (!host->clk_requests) {
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ tick_ns = DIV_ROUND_UP(1000000000, freq);
+ ndelay(host->clk_delay * tick_ns);
+ } else {
+ /* New users appeared while waiting for this work */
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ return;
+ }
+ mutex_lock(&host->clk_gate_mutex);
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (!host->clk_requests) {
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ /* This will set host->ios.clock to 0 */
+ mmc_gate_clock(host);
+ spin_lock_irqsave(&host->clk_lock, flags);
+ pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
+ }
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mutex_unlock(&host->clk_gate_mutex);
+}
+
+/*
+ * Internal work. Work to disable the clock at some later point.
+ */
+static void mmc_host_clk_gate_work(struct work_struct *work)
+{
+ struct mmc_host *host = container_of(work, struct mmc_host,
+ clk_gate_work.work);
+
+ mmc_host_clk_gate_delayed(host);
+}
+
+/**
+ * mmc_host_clk_hold - ungate hardware MCI clocks
+ * @host: host to ungate.
+ *
+ * Makes sure the host ios.clock is restored to a non-zero value
+ * past this call. Increase clock reference count and ungate clock
+ * if we're the first user.
+ */
+void mmc_host_clk_hold(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ /* cancel any clock gating work scheduled by mmc_host_clk_release() */
+ cancel_delayed_work_sync(&host->clk_gate_work);
+ mutex_lock(&host->clk_gate_mutex);
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (host->clk_gated) {
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mmc_ungate_clock(host);
+ spin_lock_irqsave(&host->clk_lock, flags);
+ pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
+ }
+ host->clk_requests++;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mutex_unlock(&host->clk_gate_mutex);
+}
+
+/**
+ * mmc_host_may_gate_card - check if this card may be gated
+ * @card: card to check.
+ */
+static bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+ /* If there is no card we may gate it */
+ if (!card)
+ return true;
+ /*
+ * Don't gate SDIO cards! These need to be clocked at all times
+ * since they may be independent systems generating interrupts
+ * and other events. The clock requests counter from the core will
+ * go down to zero since the core does not need it, but we will not
+ * gate the clock, because there is somebody out there that may still
+ * be using it.
+ */
+ return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
+}
+
+/**
+ * mmc_host_clk_release - gate off hardware MCI clocks
+ * @host: host to gate.
+ *
+ * Calls the host driver with ios.clock set to zero as often as possible
+ * in order to gate off hardware MCI clocks. Decrease clock reference
+ * count and schedule disabling of clock.
+ */
+void mmc_host_clk_release(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clk_requests--;
+ if (mmc_host_may_gate_card(host->card) &&
+ !host->clk_requests)
+ schedule_delayed_work(&host->clk_gate_work,
+ msecs_to_jiffies(host->clkgate_delay));
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+/**
+ * mmc_host_clk_rate - get current clock frequency setting
+ * @host: host to get the clock frequency for.
+ *
+ * Returns current clock frequency regardless of gating.
+ */
+unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+ unsigned long freq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (host->clk_gated)
+ freq = host->clk_old;
+ else
+ freq = host->ios.clock;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ return freq;
+}
+
+/**
+ * mmc_host_clk_init - set up clock gating code
+ * @host: host with potential clock to control
+ */
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+ host->clk_requests = 0;
+ /* Hold MCI clock for 8 cycles by default */
+ host->clk_delay = 8;
+ /*
+ * Default clock gating delay is 0ms to avoid wasting power.
+ * This value can be tuned by writing into sysfs entry.
+ */
+ host->clkgate_delay = 0;
+ host->clk_gated = false;
+ INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
+ spin_lock_init(&host->clk_lock);
+ mutex_init(&host->clk_gate_mutex);
+}
+
+/**
+ * mmc_host_clk_exit - shut down clock gating code
+ * @host: host with potential clock to control
+ */
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+ /*
+ * Wait for any outstanding gate and then make sure we're
+ * ungated before exiting.
+ */
+ if (cancel_delayed_work_sync(&host->clk_gate_work))
+ mmc_host_clk_gate_delayed(host);
+ if (host->clk_gated)
+ mmc_host_clk_hold(host);
+ /* There should be only one user now */
+ WARN_ON(host->clk_requests > 1);
+}
+
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+ host->clkgate_delay_attr.show = clkgate_delay_show;
+ host->clkgate_delay_attr.store = clkgate_delay_store;
+ sysfs_attr_init(&host->clkgate_delay_attr.attr);
+ host->clkgate_delay_attr.attr.name = "clkgate_delay";
+ host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
+ pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
+ mmc_hostname(host));
+}
+#else
+
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+}
+
+#endif
+
+/**
+ * mmc_of_parse() - parse host's device-tree node
+ * @host: host whose node should be parsed.
+ *
+ * To keep the rest of the MMC subsystem unaware of whether DT has been
+ * used to to instantiate and configure this host instance or not, we
+ * parse the properties and set respective generic mmc-host flags and
+ * parameters.
+ */
+int mmc_of_parse(struct mmc_host *host)
+{
+ struct device_node *np;
+ u32 bus_width;
+ int len, ret;
+ bool cd_cap_invert, cd_gpio_invert = false;
+ bool ro_cap_invert, ro_gpio_invert = false;
+
+ if (!host->parent || !host->parent->of_node)
+ return 0;
+
+ np = host->parent->of_node;
+
+ /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
+ if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
+ dev_dbg(host->parent,
+ "\"bus-width\" property is missing, assuming 1 bit.\n");
+ bus_width = 1;
+ }
+
+ switch (bus_width) {
+ case 8:
+ host->caps |= MMC_CAP_8_BIT_DATA;
+ /* Hosts capable of 8-bit transfers can also do 4 bits */
+ case 4:
+ host->caps |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 1:
+ break;
+ default:
+ dev_err(host->parent,
+ "Invalid \"bus-width\" value %u!\n", bus_width);
+ return -EINVAL;
+ }
+
+ /* f_max is obtained from the optional "max-frequency" property */
+ of_property_read_u32(np, "max-frequency", &host->f_max);
+
+ /*
+ * Configure CD and WP pins. They are both by default active low to
+ * match the SDHCI spec. If GPIOs are provided for CD and / or WP, the
+ * mmc-gpio helpers are used to attach, configure and use them. If
+ * polarity inversion is specified in DT, one of MMC_CAP2_CD_ACTIVE_HIGH
+ * and MMC_CAP2_RO_ACTIVE_HIGH capability-2 flags is set. If the
+ * "broken-cd" property is provided, the MMC_CAP_NEEDS_POLL capability
+ * is set. If the "non-removable" property is found, the
+ * MMC_CAP_NONREMOVABLE capability is set and no card-detection
+ * configuration is performed.
+ */
+
+ /* Parse Card Detection */
+ if (of_find_property(np, "non-removable", &len)) {
+ host->caps |= MMC_CAP_NONREMOVABLE;
+ } else {
+ cd_cap_invert = of_property_read_bool(np, "cd-inverted");
+
+ if (of_find_property(np, "broken-cd", &len))
+ host->caps |= MMC_CAP_NEEDS_POLL;
+
+ ret = mmc_gpiod_request_cd(host, "cd", 0, true,
+ 0, &cd_gpio_invert);
+ if (!ret)
+ dev_info(host->parent, "Got CD GPIO\n");
+ else if (ret != -ENOENT)
+ return ret;
+
+ /*
+ * There are two ways to flag that the CD line is inverted:
+ * through the cd-inverted flag and by the GPIO line itself
+ * being inverted from the GPIO subsystem. This is a leftover
+ * from the times when the GPIO subsystem did not make it
+ * possible to flag a line as inverted.
+ *
+ * If the capability on the host AND the GPIO line are
+ * both inverted, the end result is that the CD line is
+ * not inverted.
+ */
+ if (cd_cap_invert ^ cd_gpio_invert)
+ host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+ }
+
+ /* Parse Write Protection */
+ ro_cap_invert = of_property_read_bool(np, "wp-inverted");
+
+ ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
+ if (!ret)
+ dev_info(host->parent, "Got WP GPIO\n");
+ else if (ret != -ENOENT)
+ return ret;
+
+ /* See the comment on CD inversion above */
+ if (ro_cap_invert ^ ro_gpio_invert)
+ host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ if (of_find_property(np, "cap-sd-highspeed", &len))
+ host->caps |= MMC_CAP_SD_HIGHSPEED;
+ if (of_find_property(np, "cap-mmc-highspeed", &len))
+ host->caps |= MMC_CAP_MMC_HIGHSPEED;
+ if (of_find_property(np, "sd-uhs-sdr12", &len))
+ host->caps |= MMC_CAP_UHS_SDR12;
+ if (of_find_property(np, "sd-uhs-sdr25", &len))
+ host->caps |= MMC_CAP_UHS_SDR25;
+ if (of_find_property(np, "sd-uhs-sdr50", &len))
+ host->caps |= MMC_CAP_UHS_SDR50;
+ if (of_find_property(np, "sd-uhs-sdr104", &len))
+ host->caps |= MMC_CAP_UHS_SDR104;
+ if (of_find_property(np, "sd-uhs-ddr50", &len))
+ host->caps |= MMC_CAP_UHS_DDR50;
+ if (of_find_property(np, "cap-power-off-card", &len))
+ host->caps |= MMC_CAP_POWER_OFF_CARD;
+ if (of_find_property(np, "cap-sdio-irq", &len))
+ host->caps |= MMC_CAP_SDIO_IRQ;
+ if (of_find_property(np, "full-pwr-cycle", &len))
+ host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
+ if (of_find_property(np, "keep-power-in-suspend", &len))
+ host->pm_caps |= MMC_PM_KEEP_POWER;
+ if (of_find_property(np, "enable-sdio-wakeup", &len))
+ host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+ if (of_find_property(np, "mmc-ddr-1_8v", &len))
+ host->caps |= MMC_CAP_1_8V_DDR;
+ if (of_find_property(np, "mmc-ddr-1_2v", &len))
+ host->caps |= MMC_CAP_1_2V_DDR;
+ if (of_find_property(np, "mmc-hs200-1_8v", &len))
+ host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+ if (of_find_property(np, "mmc-hs200-1_2v", &len))
+ host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+ if (of_find_property(np, "mmc-hs400-1_8v", &len))
+ host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
+ if (of_find_property(np, "mmc-hs400-1_2v", &len))
+ host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
+
+ host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
+ if (host->dsr_req && (host->dsr & ~0xffff)) {
+ dev_err(host->parent,
+ "device tree specified broken value for DSR: 0x%x, ignoring\n",
+ host->dsr);
+ host->dsr_req = 0;
+ }
+
+ return mmc_pwrseq_alloc(host);
+}
+
+EXPORT_SYMBOL(mmc_of_parse);
+
+/**
+ * mmc_alloc_host - initialise the per-host structure.
+ * @extra: sizeof private data structure
+ * @dev: pointer to host device model structure
+ *
+ * Initialise the per-host structure.
+ */
+struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
+{
+ int err;
+ struct mmc_host *host;
+
+ host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
+ if (!host)
+ return NULL;
+
+ /* scanning will be enabled when we're ready */
+ host->rescan_disable = 1;
+ idr_preload(GFP_KERNEL);
+ spin_lock(&mmc_host_lock);
+ err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT);
+ if (err >= 0)
+ host->index = err;
+ spin_unlock(&mmc_host_lock);
+ idr_preload_end();
+ if (err < 0) {
+ kfree(host);
+ return NULL;
+ }
+
+ dev_set_name(&host->class_dev, "mmc%d", host->index);
+
+ host->parent = dev;
+ host->class_dev.parent = dev;
+ host->class_dev.class = &mmc_host_class;
+ device_initialize(&host->class_dev);
+
+ if (mmc_gpio_alloc(host)) {
+ put_device(&host->class_dev);
+ return NULL;
+ }
+
+ mmc_host_clk_init(host);
+
+ spin_lock_init(&host->lock);
+ init_waitqueue_head(&host->wq);
+ INIT_DELAYED_WORK(&host->detect, mmc_rescan);
+#ifdef CONFIG_PM
+ host->pm_notify.notifier_call = mmc_pm_notify;
+#endif
+
+ /*
+ * By default, hosts do not support SGIO or large requests.
+ * They have to set these according to their abilities.
+ */
+ host->max_segs = 1;
+ host->max_seg_size = PAGE_CACHE_SIZE;
+
+ host->max_req_size = PAGE_CACHE_SIZE;
+ host->max_blk_size = 512;
+ host->max_blk_count = PAGE_CACHE_SIZE / 512;
+
+ return host;
+}
+
+EXPORT_SYMBOL(mmc_alloc_host);
+
+/**
+ * mmc_add_host - initialise host hardware
+ * @host: mmc host
+ *
+ * Register the host with the driver model. The host must be
+ * prepared to start servicing requests before this function
+ * completes.
+ */
+int mmc_add_host(struct mmc_host *host)
+{
+ int err;
+
+ WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
+ !host->ops->enable_sdio_irq);
+
+ err = device_add(&host->class_dev);
+ if (err)
+ return err;
+
+ led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
+
+#ifdef CONFIG_DEBUG_FS
+ mmc_add_host_debugfs(host);
+#endif
+ mmc_host_clk_sysfs_init(host);
+
+ mmc_start_host(host);
+ register_pm_notifier(&host->pm_notify);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(mmc_add_host);
+
+/**
+ * mmc_remove_host - remove host hardware
+ * @host: mmc host
+ *
+ * Unregister and remove all cards associated with this host,
+ * and power down the MMC bus. No new requests will be issued
+ * after this function has returned.
+ */
+void mmc_remove_host(struct mmc_host *host)
+{
+ unregister_pm_notifier(&host->pm_notify);
+ mmc_stop_host(host);
+
+#ifdef CONFIG_DEBUG_FS
+ mmc_remove_host_debugfs(host);
+#endif
+
+ device_del(&host->class_dev);
+
+ led_trigger_unregister_simple(host->led);
+
+ mmc_host_clk_exit(host);
+}
+
+EXPORT_SYMBOL(mmc_remove_host);
+
+/**
+ * mmc_free_host - free the host structure
+ * @host: mmc host
+ *
+ * Free the host once all references to it have been dropped.
+ */
+void mmc_free_host(struct mmc_host *host)
+{
+ mmc_pwrseq_free(host);
+ put_device(&host->class_dev);
+}
+
+EXPORT_SYMBOL(mmc_free_host);
diff --git a/kernel/drivers/mmc/core/host.h b/kernel/drivers/mmc/core/host.h
new file mode 100644
index 000000000..f2ab9e578
--- /dev/null
+++ b/kernel/drivers/mmc/core/host.h
@@ -0,0 +1,19 @@
+/*
+ * linux/drivers/mmc/core/host.h
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright 2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _MMC_CORE_HOST_H
+#define _MMC_CORE_HOST_H
+#include <linux/mmc/host.h>
+
+int mmc_register_host_class(void);
+void mmc_unregister_host_class(void);
+
+#endif
+
diff --git a/kernel/drivers/mmc/core/mmc.c b/kernel/drivers/mmc/core/mmc.c
new file mode 100644
index 000000000..f36c76f8b
--- /dev/null
+++ b/kernel/drivers/mmc/core/mmc.c
@@ -0,0 +1,1918 @@
+/*
+ * linux/drivers/mmc/core/mmc.c
+ *
+ * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
+ * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
+ * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+
+#include "core.h"
+#include "bus.h"
+#include "mmc_ops.h"
+#include "sd_ops.h"
+
+static const unsigned int tran_exp[] = {
+ 10000, 100000, 1000000, 10000000,
+ 0, 0, 0, 0
+};
+
+static const unsigned char tran_mant[] = {
+ 0, 10, 12, 13, 15, 20, 25, 30,
+ 35, 40, 45, 50, 55, 60, 70, 80,
+};
+
+static const unsigned int tacc_exp[] = {
+ 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
+};
+
+static const unsigned int tacc_mant[] = {
+ 0, 10, 12, 13, 15, 20, 25, 30,
+ 35, 40, 45, 50, 55, 60, 70, 80,
+};
+
+#define UNSTUFF_BITS(resp,start,size) \
+ ({ \
+ const int __size = size; \
+ const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
+ const int __off = 3 - ((start) / 32); \
+ const int __shft = (start) & 31; \
+ u32 __res; \
+ \
+ __res = resp[__off] >> __shft; \
+ if (__size + __shft > 32) \
+ __res |= resp[__off-1] << ((32 - __shft) % 32); \
+ __res & __mask; \
+ })
+
+/*
+ * Given the decoded CSD structure, decode the raw CID to our CID structure.
+ */
+static int mmc_decode_cid(struct mmc_card *card)
+{
+ u32 *resp = card->raw_cid;
+
+ /*
+ * The selection of the format here is based upon published
+ * specs from sandisk and from what people have reported.
+ */
+ switch (card->csd.mmca_vsn) {
+ case 0: /* MMC v1.0 - v1.2 */
+ case 1: /* MMC v1.4 */
+ card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
+ card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
+ card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
+ card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
+ card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
+ card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
+ card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
+ card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
+ card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
+ card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
+ card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
+ card->cid.month = UNSTUFF_BITS(resp, 12, 4);
+ card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
+ break;
+
+ case 2: /* MMC v2.0 - v2.2 */
+ case 3: /* MMC v3.1 - v3.3 */
+ case 4: /* MMC v4 */
+ card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
+ card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
+ card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
+ card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
+ card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
+ card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
+ card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
+ card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
+ card->cid.prv = UNSTUFF_BITS(resp, 48, 8);
+ card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
+ card->cid.month = UNSTUFF_BITS(resp, 12, 4);
+ card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
+ break;
+
+ default:
+ pr_err("%s: card has unknown MMCA version %d\n",
+ mmc_hostname(card->host), card->csd.mmca_vsn);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mmc_set_erase_size(struct mmc_card *card)
+{
+ if (card->ext_csd.erase_group_def & 1)
+ card->erase_size = card->ext_csd.hc_erase_size;
+ else
+ card->erase_size = card->csd.erase_size;
+
+ mmc_init_erase(card);
+}
+
+/*
+ * Given a 128-bit response, decode to our card CSD structure.
+ */
+static int mmc_decode_csd(struct mmc_card *card)
+{
+ struct mmc_csd *csd = &card->csd;
+ unsigned int e, m, a, b;
+ u32 *resp = card->raw_csd;
+
+ /*
+ * We only understand CSD structure v1.1 and v1.2.
+ * v1.2 has extra information in bits 15, 11 and 10.
+ * We also support eMMC v4.4 & v4.41.
+ */
+ csd->structure = UNSTUFF_BITS(resp, 126, 2);
+ if (csd->structure == 0) {
+ pr_err("%s: unrecognised CSD structure version %d\n",
+ mmc_hostname(card->host), csd->structure);
+ return -EINVAL;
+ }
+
+ csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
+ m = UNSTUFF_BITS(resp, 115, 4);
+ e = UNSTUFF_BITS(resp, 112, 3);
+ csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
+ csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
+
+ m = UNSTUFF_BITS(resp, 99, 4);
+ e = UNSTUFF_BITS(resp, 96, 3);
+ csd->max_dtr = tran_exp[e] * tran_mant[m];
+ csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
+
+ e = UNSTUFF_BITS(resp, 47, 3);
+ m = UNSTUFF_BITS(resp, 62, 12);
+ csd->capacity = (1 + m) << (e + 2);
+
+ csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
+ csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
+ csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
+ csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
+ csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
+ csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
+ csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
+ csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
+
+ if (csd->write_blkbits >= 9) {
+ a = UNSTUFF_BITS(resp, 42, 5);
+ b = UNSTUFF_BITS(resp, 37, 5);
+ csd->erase_size = (a + 1) * (b + 1);
+ csd->erase_size <<= csd->write_blkbits - 9;
+ }
+
+ return 0;
+}
+
+static void mmc_select_card_type(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u8 card_type = card->ext_csd.raw_card_type;
+ u32 caps = host->caps, caps2 = host->caps2;
+ unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
+ unsigned int avail_type = 0;
+
+ if (caps & MMC_CAP_MMC_HIGHSPEED &&
+ card_type & EXT_CSD_CARD_TYPE_HS_26) {
+ hs_max_dtr = MMC_HIGH_26_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS_26;
+ }
+
+ if (caps & MMC_CAP_MMC_HIGHSPEED &&
+ card_type & EXT_CSD_CARD_TYPE_HS_52) {
+ hs_max_dtr = MMC_HIGH_52_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS_52;
+ }
+
+ if (caps & MMC_CAP_1_8V_DDR &&
+ card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
+ hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
+ }
+
+ if (caps & MMC_CAP_1_2V_DDR &&
+ card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
+ hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
+ }
+
+ if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
+ card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
+ hs200_max_dtr = MMC_HS200_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
+ }
+
+ if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
+ card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
+ hs200_max_dtr = MMC_HS200_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
+ }
+
+ if (caps2 & MMC_CAP2_HS400_1_8V &&
+ card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
+ hs200_max_dtr = MMC_HS200_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
+ }
+
+ if (caps2 & MMC_CAP2_HS400_1_2V &&
+ card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
+ hs200_max_dtr = MMC_HS200_MAX_DTR;
+ avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
+ }
+
+ card->ext_csd.hs_max_dtr = hs_max_dtr;
+ card->ext_csd.hs200_max_dtr = hs200_max_dtr;
+ card->mmc_avail_type = avail_type;
+}
+
+static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
+{
+ u8 hc_erase_grp_sz, hc_wp_grp_sz;
+
+ /*
+ * Disable these attributes by default
+ */
+ card->ext_csd.enhanced_area_offset = -EINVAL;
+ card->ext_csd.enhanced_area_size = -EINVAL;
+
+ /*
+ * Enhanced area feature support -- check whether the eMMC
+ * card has the Enhanced area enabled. If so, export enhanced
+ * area offset and size to user by adding sysfs interface.
+ */
+ if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
+ (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+ if (card->ext_csd.partition_setting_completed) {
+ hc_erase_grp_sz =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+ hc_wp_grp_sz =
+ ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
+
+ /*
+ * calculate the enhanced data area offset, in bytes
+ */
+ card->ext_csd.enhanced_area_offset =
+ (ext_csd[139] << 24) + (ext_csd[138] << 16) +
+ (ext_csd[137] << 8) + ext_csd[136];
+ if (mmc_card_blockaddr(card))
+ card->ext_csd.enhanced_area_offset <<= 9;
+ /*
+ * calculate the enhanced data area size, in kilobytes
+ */
+ card->ext_csd.enhanced_area_size =
+ (ext_csd[142] << 16) + (ext_csd[141] << 8) +
+ ext_csd[140];
+ card->ext_csd.enhanced_area_size *=
+ (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
+ card->ext_csd.enhanced_area_size <<= 9;
+ } else {
+ pr_warn("%s: defines enhanced area without partition setting complete\n",
+ mmc_hostname(card->host));
+ }
+ }
+}
+
+static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
+{
+ int idx;
+ u8 hc_erase_grp_sz, hc_wp_grp_sz;
+ unsigned int part_size;
+
+ /*
+ * General purpose partition feature support --
+ * If ext_csd has the size of general purpose partitions,
+ * set size, part_cfg, partition name in mmc_part.
+ */
+ if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
+ EXT_CSD_PART_SUPPORT_PART_EN) {
+ hc_erase_grp_sz =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+ hc_wp_grp_sz =
+ ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
+
+ for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
+ if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
+ !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
+ !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
+ continue;
+ if (card->ext_csd.partition_setting_completed == 0) {
+ pr_warn("%s: has partition size defined without partition complete\n",
+ mmc_hostname(card->host));
+ break;
+ }
+ part_size =
+ (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
+ << 16) +
+ (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
+ << 8) +
+ ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
+ part_size *= (size_t)(hc_erase_grp_sz *
+ hc_wp_grp_sz);
+ mmc_part_add(card, part_size << 19,
+ EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
+ "gp%d", idx, false,
+ MMC_BLK_DATA_AREA_GP);
+ }
+ }
+}
+
+/*
+ * Decode extended CSD.
+ */
+static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
+{
+ int err = 0, idx;
+ unsigned int part_size;
+ struct device_node *np;
+ bool broken_hpi = false;
+
+ /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
+ card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
+ if (card->csd.structure == 3) {
+ if (card->ext_csd.raw_ext_csd_structure > 2) {
+ pr_err("%s: unrecognised EXT_CSD structure "
+ "version %d\n", mmc_hostname(card->host),
+ card->ext_csd.raw_ext_csd_structure);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ np = mmc_of_find_child_device(card->host, 0);
+ if (np && of_device_is_compatible(np, "mmc-card"))
+ broken_hpi = of_property_read_bool(np, "broken-hpi");
+ of_node_put(np);
+
+ /*
+ * The EXT_CSD format is meant to be forward compatible. As long
+ * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
+ * are authorized, see JEDEC JESD84-B50 section B.8.
+ */
+ card->ext_csd.rev = ext_csd[EXT_CSD_REV];
+
+ card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
+ card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
+ card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
+ card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
+ if (card->ext_csd.rev >= 2) {
+ card->ext_csd.sectors =
+ ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
+ ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
+ ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
+ ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
+
+ /* Cards with density > 2GiB are sector addressed */
+ if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
+ mmc_card_set_blockaddr(card);
+ }
+
+ card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
+ mmc_select_card_type(card);
+
+ card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
+ card->ext_csd.raw_erase_timeout_mult =
+ ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
+ card->ext_csd.raw_hc_erase_grp_size =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+ if (card->ext_csd.rev >= 3) {
+ u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
+ card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
+
+ /* EXT_CSD value is in units of 10ms, but we store in ms */
+ card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
+
+ /* Sleep / awake timeout in 100ns units */
+ if (sa_shift > 0 && sa_shift <= 0x17)
+ card->ext_csd.sa_timeout =
+ 1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
+ card->ext_csd.erase_group_def =
+ ext_csd[EXT_CSD_ERASE_GROUP_DEF];
+ card->ext_csd.hc_erase_timeout = 300 *
+ ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
+ card->ext_csd.hc_erase_size =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
+
+ card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
+
+ /*
+ * There are two boot regions of equal size, defined in
+ * multiples of 128K.
+ */
+ if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
+ for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
+ part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
+ mmc_part_add(card, part_size,
+ EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
+ "boot%d", idx, true,
+ MMC_BLK_DATA_AREA_BOOT);
+ }
+ }
+ }
+
+ card->ext_csd.raw_hc_erase_gap_size =
+ ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
+ card->ext_csd.raw_sec_trim_mult =
+ ext_csd[EXT_CSD_SEC_TRIM_MULT];
+ card->ext_csd.raw_sec_erase_mult =
+ ext_csd[EXT_CSD_SEC_ERASE_MULT];
+ card->ext_csd.raw_sec_feature_support =
+ ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+ card->ext_csd.raw_trim_mult =
+ ext_csd[EXT_CSD_TRIM_MULT];
+ card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
+ if (card->ext_csd.rev >= 4) {
+ if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
+ EXT_CSD_PART_SETTING_COMPLETED)
+ card->ext_csd.partition_setting_completed = 1;
+ else
+ card->ext_csd.partition_setting_completed = 0;
+
+ mmc_manage_enhanced_area(card, ext_csd);
+
+ mmc_manage_gp_partitions(card, ext_csd);
+
+ card->ext_csd.sec_trim_mult =
+ ext_csd[EXT_CSD_SEC_TRIM_MULT];
+ card->ext_csd.sec_erase_mult =
+ ext_csd[EXT_CSD_SEC_ERASE_MULT];
+ card->ext_csd.sec_feature_support =
+ ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+ card->ext_csd.trim_timeout = 300 *
+ ext_csd[EXT_CSD_TRIM_MULT];
+
+ /*
+ * Note that the call to mmc_part_add above defaults to read
+ * only. If this default assumption is changed, the call must
+ * take into account the value of boot_locked below.
+ */
+ card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
+ card->ext_csd.boot_ro_lockable = true;
+
+ /* Save power class values */
+ card->ext_csd.raw_pwr_cl_52_195 =
+ ext_csd[EXT_CSD_PWR_CL_52_195];
+ card->ext_csd.raw_pwr_cl_26_195 =
+ ext_csd[EXT_CSD_PWR_CL_26_195];
+ card->ext_csd.raw_pwr_cl_52_360 =
+ ext_csd[EXT_CSD_PWR_CL_52_360];
+ card->ext_csd.raw_pwr_cl_26_360 =
+ ext_csd[EXT_CSD_PWR_CL_26_360];
+ card->ext_csd.raw_pwr_cl_200_195 =
+ ext_csd[EXT_CSD_PWR_CL_200_195];
+ card->ext_csd.raw_pwr_cl_200_360 =
+ ext_csd[EXT_CSD_PWR_CL_200_360];
+ card->ext_csd.raw_pwr_cl_ddr_52_195 =
+ ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
+ card->ext_csd.raw_pwr_cl_ddr_52_360 =
+ ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
+ card->ext_csd.raw_pwr_cl_ddr_200_360 =
+ ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
+ }
+
+ if (card->ext_csd.rev >= 5) {
+ /* Adjust production date as per JEDEC JESD84-B451 */
+ if (card->cid.year < 2010)
+ card->cid.year += 16;
+
+ /* check whether the eMMC card supports BKOPS */
+ if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+ card->ext_csd.bkops = 1;
+ card->ext_csd.man_bkops_en =
+ (ext_csd[EXT_CSD_BKOPS_EN] &
+ EXT_CSD_MANUAL_BKOPS_MASK);
+ card->ext_csd.raw_bkops_status =
+ ext_csd[EXT_CSD_BKOPS_STATUS];
+ if (!card->ext_csd.man_bkops_en)
+ pr_info("%s: MAN_BKOPS_EN bit is not set\n",
+ mmc_hostname(card->host));
+ }
+
+ /* check whether the eMMC card supports HPI */
+ if (!broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) {
+ card->ext_csd.hpi = 1;
+ if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
+ card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
+ else
+ card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
+ /*
+ * Indicate the maximum timeout to close
+ * a command interrupted by HPI
+ */
+ card->ext_csd.out_of_int_time =
+ ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
+ }
+
+ card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
+ card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
+
+ /*
+ * RPMB regions are defined in multiples of 128K.
+ */
+ card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
+ if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
+ mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
+ EXT_CSD_PART_CONFIG_ACC_RPMB,
+ "rpmb", 0, false,
+ MMC_BLK_DATA_AREA_RPMB);
+ }
+ }
+
+ card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
+ if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
+ card->erased_byte = 0xFF;
+ else
+ card->erased_byte = 0x0;
+
+ /* eMMC v4.5 or later */
+ if (card->ext_csd.rev >= 6) {
+ card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
+
+ card->ext_csd.generic_cmd6_time = 10 *
+ ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
+ card->ext_csd.power_off_longtime = 10 *
+ ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
+
+ card->ext_csd.cache_size =
+ ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
+ ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
+ ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
+ ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
+
+ if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
+ card->ext_csd.data_sector_size = 4096;
+ else
+ card->ext_csd.data_sector_size = 512;
+
+ if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
+ (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
+ card->ext_csd.data_tag_unit_size =
+ ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
+ (card->ext_csd.data_sector_size);
+ } else {
+ card->ext_csd.data_tag_unit_size = 0;
+ }
+
+ card->ext_csd.max_packed_writes =
+ ext_csd[EXT_CSD_MAX_PACKED_WRITES];
+ card->ext_csd.max_packed_reads =
+ ext_csd[EXT_CSD_MAX_PACKED_READS];
+ } else {
+ card->ext_csd.data_sector_size = 512;
+ }
+
+ /* eMMC v5 or later */
+ if (card->ext_csd.rev >= 7) {
+ memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
+ MMC_FIRMWARE_LEN);
+ card->ext_csd.ffu_capable =
+ (ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
+ !(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
+ }
+out:
+ return err;
+}
+
+static int mmc_read_ext_csd(struct mmc_card *card)
+{
+ u8 *ext_csd;
+ int err;
+
+ if (!mmc_can_ext_csd(card))
+ return 0;
+
+ err = mmc_get_ext_csd(card, &ext_csd);
+ if (err) {
+ /* If the host or the card can't do the switch,
+ * fail more gracefully. */
+ if ((err != -EINVAL)
+ && (err != -ENOSYS)
+ && (err != -EFAULT))
+ return err;
+
+ /*
+ * High capacity cards should have this "magic" size
+ * stored in their CSD.
+ */
+ if (card->csd.capacity == (4096 * 512)) {
+ pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
+ mmc_hostname(card->host));
+ } else {
+ pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
+ mmc_hostname(card->host));
+ err = 0;
+ }
+
+ return err;
+ }
+
+ err = mmc_decode_ext_csd(card, ext_csd);
+ kfree(ext_csd);
+ return err;
+}
+
+static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
+{
+ u8 *bw_ext_csd;
+ int err;
+
+ if (bus_width == MMC_BUS_WIDTH_1)
+ return 0;
+
+ err = mmc_get_ext_csd(card, &bw_ext_csd);
+ if (err)
+ return err;
+
+ /* only compare read only fields */
+ err = !((card->ext_csd.raw_partition_support ==
+ bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
+ (card->ext_csd.raw_erased_mem_count ==
+ bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
+ (card->ext_csd.rev ==
+ bw_ext_csd[EXT_CSD_REV]) &&
+ (card->ext_csd.raw_ext_csd_structure ==
+ bw_ext_csd[EXT_CSD_STRUCTURE]) &&
+ (card->ext_csd.raw_card_type ==
+ bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
+ (card->ext_csd.raw_s_a_timeout ==
+ bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
+ (card->ext_csd.raw_hc_erase_gap_size ==
+ bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
+ (card->ext_csd.raw_erase_timeout_mult ==
+ bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
+ (card->ext_csd.raw_hc_erase_grp_size ==
+ bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
+ (card->ext_csd.raw_sec_trim_mult ==
+ bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
+ (card->ext_csd.raw_sec_erase_mult ==
+ bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
+ (card->ext_csd.raw_sec_feature_support ==
+ bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
+ (card->ext_csd.raw_trim_mult ==
+ bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
+ (card->ext_csd.raw_sectors[0] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
+ (card->ext_csd.raw_sectors[1] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
+ (card->ext_csd.raw_sectors[2] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
+ (card->ext_csd.raw_sectors[3] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
+ (card->ext_csd.raw_pwr_cl_52_195 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
+ (card->ext_csd.raw_pwr_cl_26_195 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
+ (card->ext_csd.raw_pwr_cl_52_360 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
+ (card->ext_csd.raw_pwr_cl_26_360 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
+ (card->ext_csd.raw_pwr_cl_200_195 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
+ (card->ext_csd.raw_pwr_cl_200_360 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
+ (card->ext_csd.raw_pwr_cl_ddr_52_195 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
+ (card->ext_csd.raw_pwr_cl_ddr_52_360 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
+ (card->ext_csd.raw_pwr_cl_ddr_200_360 ==
+ bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
+
+ if (err)
+ err = -EINVAL;
+
+ kfree(bw_ext_csd);
+ return err;
+}
+
+MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
+ card->raw_cid[2], card->raw_cid[3]);
+MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
+ card->raw_csd[2], card->raw_csd[3]);
+MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
+MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
+MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
+MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
+MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
+MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
+MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
+MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
+MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
+MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
+MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
+ card->ext_csd.enhanced_area_offset);
+MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
+MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
+MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
+
+static ssize_t mmc_fwrev_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+
+ if (card->ext_csd.rev < 7) {
+ return sprintf(buf, "0x%x\n", card->cid.fwrev);
+ } else {
+ return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
+ card->ext_csd.fwrev);
+ }
+}
+
+static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
+
+static struct attribute *mmc_std_attrs[] = {
+ &dev_attr_cid.attr,
+ &dev_attr_csd.attr,
+ &dev_attr_date.attr,
+ &dev_attr_erase_size.attr,
+ &dev_attr_preferred_erase_size.attr,
+ &dev_attr_fwrev.attr,
+ &dev_attr_ffu_capable.attr,
+ &dev_attr_hwrev.attr,
+ &dev_attr_manfid.attr,
+ &dev_attr_name.attr,
+ &dev_attr_oemid.attr,
+ &dev_attr_prv.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_enhanced_area_offset.attr,
+ &dev_attr_enhanced_area_size.attr,
+ &dev_attr_raw_rpmb_size_mult.attr,
+ &dev_attr_rel_sectors.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mmc_std);
+
+static struct device_type mmc_type = {
+ .groups = mmc_std_groups,
+};
+
+/*
+ * Select the PowerClass for the current bus width
+ * If power class is defined for 4/8 bit bus in the
+ * extended CSD register, select it by executing the
+ * mmc_switch command.
+ */
+static int __mmc_select_powerclass(struct mmc_card *card,
+ unsigned int bus_width)
+{
+ struct mmc_host *host = card->host;
+ struct mmc_ext_csd *ext_csd = &card->ext_csd;
+ unsigned int pwrclass_val = 0;
+ int err = 0;
+
+ switch (1 << host->ios.vdd) {
+ case MMC_VDD_165_195:
+ if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
+ pwrclass_val = ext_csd->raw_pwr_cl_26_195;
+ else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
+ pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
+ ext_csd->raw_pwr_cl_52_195 :
+ ext_csd->raw_pwr_cl_ddr_52_195;
+ else if (host->ios.clock <= MMC_HS200_MAX_DTR)
+ pwrclass_val = ext_csd->raw_pwr_cl_200_195;
+ break;
+ case MMC_VDD_27_28:
+ case MMC_VDD_28_29:
+ case MMC_VDD_29_30:
+ case MMC_VDD_30_31:
+ case MMC_VDD_31_32:
+ case MMC_VDD_32_33:
+ case MMC_VDD_33_34:
+ case MMC_VDD_34_35:
+ case MMC_VDD_35_36:
+ if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
+ pwrclass_val = ext_csd->raw_pwr_cl_26_360;
+ else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
+ pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
+ ext_csd->raw_pwr_cl_52_360 :
+ ext_csd->raw_pwr_cl_ddr_52_360;
+ else if (host->ios.clock <= MMC_HS200_MAX_DTR)
+ pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
+ ext_csd->raw_pwr_cl_ddr_200_360 :
+ ext_csd->raw_pwr_cl_200_360;
+ break;
+ default:
+ pr_warn("%s: Voltage range not supported for power class\n",
+ mmc_hostname(host));
+ return -EINVAL;
+ }
+
+ if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
+ pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
+ EXT_CSD_PWR_CL_8BIT_SHIFT;
+ else
+ pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
+ EXT_CSD_PWR_CL_4BIT_SHIFT;
+
+ /* If the power class is different from the default value */
+ if (pwrclass_val > 0) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_CLASS,
+ pwrclass_val,
+ card->ext_csd.generic_cmd6_time);
+ }
+
+ return err;
+}
+
+static int mmc_select_powerclass(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 bus_width, ext_csd_bits;
+ int err, ddr;
+
+ /* Power class selection is supported for versions >= 4.0 */
+ if (!mmc_can_ext_csd(card))
+ return 0;
+
+ bus_width = host->ios.bus_width;
+ /* Power class values are defined only for 4/8 bit bus */
+ if (bus_width == MMC_BUS_WIDTH_1)
+ return 0;
+
+ ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
+ if (ddr)
+ ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
+ EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
+ else
+ ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
+ EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
+
+ err = __mmc_select_powerclass(card, ext_csd_bits);
+ if (err)
+ pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
+ mmc_hostname(host), 1 << bus_width, ddr);
+
+ return err;
+}
+
+/*
+ * Set the bus speed for the selected speed mode.
+ */
+static void mmc_set_bus_speed(struct mmc_card *card)
+{
+ unsigned int max_dtr = (unsigned int)-1;
+
+ if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
+ max_dtr > card->ext_csd.hs200_max_dtr)
+ max_dtr = card->ext_csd.hs200_max_dtr;
+ else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
+ max_dtr = card->ext_csd.hs_max_dtr;
+ else if (max_dtr > card->csd.max_dtr)
+ max_dtr = card->csd.max_dtr;
+
+ mmc_set_clock(card->host, max_dtr);
+}
+
+/*
+ * Select the bus width amoung 4-bit and 8-bit(SDR).
+ * If the bus width is changed successfully, return the selected width value.
+ * Zero is returned instead of error value if the wide width is not supported.
+ */
+static int mmc_select_bus_width(struct mmc_card *card)
+{
+ static unsigned ext_csd_bits[] = {
+ EXT_CSD_BUS_WIDTH_8,
+ EXT_CSD_BUS_WIDTH_4,
+ };
+ static unsigned bus_widths[] = {
+ MMC_BUS_WIDTH_8,
+ MMC_BUS_WIDTH_4,
+ };
+ struct mmc_host *host = card->host;
+ unsigned idx, bus_width = 0;
+ int err = 0;
+
+ if (!mmc_can_ext_csd(card) ||
+ !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
+ return 0;
+
+ idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
+
+ /*
+ * Unlike SD, MMC cards dont have a configuration register to notify
+ * supported bus width. So bus test command should be run to identify
+ * the supported bus width or compare the ext csd values of current
+ * bus width and ext csd values of 1 bit mode read earlier.
+ */
+ for (; idx < ARRAY_SIZE(bus_widths); idx++) {
+ /*
+ * Host is capable of 8bit transfer, then switch
+ * the device to work in 8bit transfer mode. If the
+ * mmc switch command returns error then switch to
+ * 4bit transfer mode. On success set the corresponding
+ * bus width on the host.
+ */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ ext_csd_bits[idx],
+ card->ext_csd.generic_cmd6_time);
+ if (err)
+ continue;
+
+ bus_width = bus_widths[idx];
+ mmc_set_bus_width(host, bus_width);
+
+ /*
+ * If controller can't handle bus width test,
+ * compare ext_csd previously read in 1 bit mode
+ * against ext_csd at new bus width
+ */
+ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
+ err = mmc_compare_ext_csds(card, bus_width);
+ else
+ err = mmc_bus_test(card, bus_width);
+
+ if (!err) {
+ err = bus_width;
+ break;
+ } else {
+ pr_warn("%s: switch to bus width %d failed\n",
+ mmc_hostname(host), ext_csd_bits[idx]);
+ }
+ }
+
+ return err;
+}
+
+/*
+ * Switch to the high-speed mode
+ */
+static int mmc_select_hs(struct mmc_card *card)
+{
+ int err;
+
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
+ card->ext_csd.generic_cmd6_time,
+ true, true, true);
+ if (!err)
+ mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+
+ return err;
+}
+
+/*
+ * Activate wide bus and DDR if supported.
+ */
+static int mmc_select_hs_ddr(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 bus_width, ext_csd_bits;
+ int err = 0;
+
+ if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
+ return 0;
+
+ bus_width = host->ios.bus_width;
+ if (bus_width == MMC_BUS_WIDTH_1)
+ return 0;
+
+ ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
+ EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ ext_csd_bits,
+ card->ext_csd.generic_cmd6_time);
+ if (err) {
+ pr_err("%s: switch to bus width %d ddr failed\n",
+ mmc_hostname(host), 1 << bus_width);
+ return err;
+ }
+
+ /*
+ * eMMC cards can support 3.3V to 1.2V i/o (vccq)
+ * signaling.
+ *
+ * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
+ *
+ * 1.8V vccq at 3.3V core voltage (vcc) is not required
+ * in the JEDEC spec for DDR.
+ *
+ * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
+ * host controller can support this, like some of the SDHCI
+ * controller which connect to an eMMC device. Some of these
+ * host controller still needs to use 1.8v vccq for supporting
+ * DDR mode.
+ *
+ * So the sequence will be:
+ * if (host and device can both support 1.2v IO)
+ * use 1.2v IO;
+ * else if (host and device can both support 1.8v IO)
+ * use 1.8v IO;
+ * so if host and device can only support 3.3v IO, this is the
+ * last choice.
+ *
+ * WARNING: eMMC rules are NOT the same as SD DDR
+ */
+ err = -EINVAL;
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+
+ if (err && (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+
+ /* make sure vccq is 3.3v after switching disaster */
+ if (err)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
+
+ if (!err)
+ mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
+
+ return err;
+}
+
+static int mmc_select_hs400(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int err = 0;
+
+ /*
+ * HS400 mode requires 8-bit bus width
+ */
+ if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+ host->ios.bus_width == MMC_BUS_WIDTH_8))
+ return 0;
+
+ /*
+ * Before switching to dual data rate operation for HS400,
+ * it is required to convert from HS200 mode to HS mode.
+ */
+ mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ mmc_set_bus_speed(card);
+
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
+ card->ext_csd.generic_cmd6_time,
+ true, true, true);
+ if (err) {
+ pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
+ mmc_hostname(host), err);
+ return err;
+ }
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ EXT_CSD_DDR_BUS_WIDTH_8,
+ card->ext_csd.generic_cmd6_time);
+ if (err) {
+ pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
+ mmc_hostname(host), err);
+ return err;
+ }
+
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400,
+ card->ext_csd.generic_cmd6_time,
+ true, true, true);
+ if (err) {
+ pr_err("%s: switch to hs400 failed, err:%d\n",
+ mmc_hostname(host), err);
+ return err;
+ }
+
+ mmc_set_timing(host, MMC_TIMING_MMC_HS400);
+ mmc_set_bus_speed(card);
+
+ return 0;
+}
+
+/*
+ * For device supporting HS200 mode, the following sequence
+ * should be done before executing the tuning process.
+ * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
+ * 2. switch to HS200 mode
+ * 3. set the clock to > 52Mhz and <=200MHz
+ */
+static int mmc_select_hs200(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int err = -EINVAL;
+
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+
+ if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+
+ /* If fails try again during next card power cycle */
+ if (err)
+ goto err;
+
+ /*
+ * Set the bus width(4 or 8) with host's support and
+ * switch to HS200 mode if bus width is set successfully.
+ */
+ err = mmc_select_bus_width(card);
+ if (!IS_ERR_VALUE(err)) {
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS200,
+ card->ext_csd.generic_cmd6_time,
+ true, true, true);
+ if (!err)
+ mmc_set_timing(host, MMC_TIMING_MMC_HS200);
+ }
+err:
+ return err;
+}
+
+/*
+ * Activate High Speed or HS200 mode if supported.
+ */
+static int mmc_select_timing(struct mmc_card *card)
+{
+ int err = 0;
+
+ if (!mmc_can_ext_csd(card))
+ goto bus_speed;
+
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
+ err = mmc_select_hs200(card);
+ else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
+ err = mmc_select_hs(card);
+
+ if (err && err != -EBADMSG)
+ return err;
+
+ if (err) {
+ pr_warn("%s: switch to %s failed\n",
+ mmc_card_hs(card) ? "high-speed" :
+ (mmc_card_hs200(card) ? "hs200" : ""),
+ mmc_hostname(card->host));
+ err = 0;
+ }
+
+bus_speed:
+ /*
+ * Set the bus speed to the selected bus timing.
+ * If timing is not selected, backward compatible is the default.
+ */
+ mmc_set_bus_speed(card);
+ return err;
+}
+
+/*
+ * Execute tuning sequence to seek the proper bus operating
+ * conditions for HS200 and HS400, which sends CMD21 to the device.
+ */
+static int mmc_hs200_tuning(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+
+ /*
+ * Timing should be adjusted to the HS400 target
+ * operation frequency for tuning process
+ */
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+ host->ios.bus_width == MMC_BUS_WIDTH_8)
+ if (host->ops->prepare_hs400_tuning)
+ host->ops->prepare_hs400_tuning(host, &host->ios);
+
+ return mmc_execute_tuning(card);
+}
+
+/*
+ * Handle the detection and initialisation of a card.
+ *
+ * In the case of a resume, "oldcard" will contain the card
+ * we're trying to reinitialise.
+ */
+static int mmc_init_card(struct mmc_host *host, u32 ocr,
+ struct mmc_card *oldcard)
+{
+ struct mmc_card *card;
+ int err;
+ u32 cid[4];
+ u32 rocr;
+
+ BUG_ON(!host);
+ WARN_ON(!host->claimed);
+
+ /* Set correct bus mode for MMC before attempting init */
+ if (!mmc_host_is_spi(host))
+ mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
+
+ /*
+ * Since we're changing the OCR value, we seem to
+ * need to tell some cards to go back to the idle
+ * state. We wait 1ms to give cards time to
+ * respond.
+ * mmc_go_idle is needed for eMMC that are asleep
+ */
+ mmc_go_idle(host);
+
+ /* The extra bit indicates that we support high capacity */
+ err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
+ if (err)
+ goto err;
+
+ /*
+ * For SPI, enable CRC as appropriate.
+ */
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_set_crc(host, use_spi_crc);
+ if (err)
+ goto err;
+ }
+
+ /*
+ * Fetch CID from card.
+ */
+ if (mmc_host_is_spi(host))
+ err = mmc_send_cid(host, cid);
+ else
+ err = mmc_all_send_cid(host, cid);
+ if (err)
+ goto err;
+
+ if (oldcard) {
+ if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
+ err = -ENOENT;
+ goto err;
+ }
+
+ card = oldcard;
+ } else {
+ /*
+ * Allocate card structure.
+ */
+ card = mmc_alloc_card(host, &mmc_type);
+ if (IS_ERR(card)) {
+ err = PTR_ERR(card);
+ goto err;
+ }
+
+ card->ocr = ocr;
+ card->type = MMC_TYPE_MMC;
+ card->rca = 1;
+ memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
+ }
+
+ /*
+ * Call the optional HC's init_card function to handle quirks.
+ */
+ if (host->ops->init_card)
+ host->ops->init_card(host, card);
+
+ /*
+ * For native busses: set card RCA and quit open drain mode.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_set_relative_addr(card);
+ if (err)
+ goto free_card;
+
+ mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
+ }
+
+ if (!oldcard) {
+ /*
+ * Fetch CSD from card.
+ */
+ err = mmc_send_csd(card, card->raw_csd);
+ if (err)
+ goto free_card;
+
+ err = mmc_decode_csd(card);
+ if (err)
+ goto free_card;
+ err = mmc_decode_cid(card);
+ if (err)
+ goto free_card;
+ }
+
+ /*
+ * handling only for cards supporting DSR and hosts requesting
+ * DSR configuration
+ */
+ if (card->csd.dsr_imp && host->dsr_req)
+ mmc_set_dsr(host);
+
+ /*
+ * Select card, as all following commands rely on that.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_select_card(card);
+ if (err)
+ goto free_card;
+ }
+
+ if (!oldcard) {
+ /* Read extended CSD. */
+ err = mmc_read_ext_csd(card);
+ if (err)
+ goto free_card;
+
+ /* If doing byte addressing, check if required to do sector
+ * addressing. Handle the case of <2GB cards needing sector
+ * addressing. See section 8.1 JEDEC Standard JED84-A441;
+ * ocr register has bit 30 set for sector addressing.
+ */
+ if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
+ mmc_card_set_blockaddr(card);
+
+ /* Erase size depends on CSD and Extended CSD */
+ mmc_set_erase_size(card);
+ }
+
+ /*
+ * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
+ * bit. This bit will be lost every time after a reset or power off.
+ */
+ if (card->ext_csd.partition_setting_completed ||
+ (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_ERASE_GROUP_DEF, 1,
+ card->ext_csd.generic_cmd6_time);
+
+ if (err && err != -EBADMSG)
+ goto free_card;
+
+ if (err) {
+ err = 0;
+ /*
+ * Just disable enhanced area off & sz
+ * will try to enable ERASE_GROUP_DEF
+ * during next time reinit
+ */
+ card->ext_csd.enhanced_area_offset = -EINVAL;
+ card->ext_csd.enhanced_area_size = -EINVAL;
+ } else {
+ card->ext_csd.erase_group_def = 1;
+ /*
+ * enable ERASE_GRP_DEF successfully.
+ * This will affect the erase size, so
+ * here need to reset erase size
+ */
+ mmc_set_erase_size(card);
+ }
+ }
+
+ /*
+ * Ensure eMMC user default partition is enabled
+ */
+ if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
+ card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
+ card->ext_csd.part_config,
+ card->ext_csd.part_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ }
+
+ /*
+ * Enable power_off_notification byte in the ext_csd register
+ */
+ if (card->ext_csd.rev >= 6) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ EXT_CSD_POWER_ON,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+
+ /*
+ * The err can be -EBADMSG or 0,
+ * so check for success and update the flag
+ */
+ if (!err)
+ card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
+ }
+
+ /*
+ * Select timing interface
+ */
+ err = mmc_select_timing(card);
+ if (err)
+ goto free_card;
+
+ if (mmc_card_hs200(card)) {
+ err = mmc_hs200_tuning(card);
+ if (err)
+ goto free_card;
+
+ err = mmc_select_hs400(card);
+ if (err)
+ goto free_card;
+ } else if (mmc_card_hs(card)) {
+ /* Select the desired bus width optionally */
+ err = mmc_select_bus_width(card);
+ if (!IS_ERR_VALUE(err)) {
+ err = mmc_select_hs_ddr(card);
+ if (err)
+ goto free_card;
+ }
+ }
+
+ /*
+ * Choose the power class with selected bus interface
+ */
+ mmc_select_powerclass(card);
+
+ /*
+ * Enable HPI feature (if supported)
+ */
+ if (card->ext_csd.hpi) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HPI_MGMT, 1,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warn("%s: Enabling HPI failed\n",
+ mmc_hostname(card->host));
+ err = 0;
+ } else
+ card->ext_csd.hpi_en = 1;
+ }
+
+ /*
+ * If cache size is higher than 0, this indicates
+ * the existence of cache and it can be turned on.
+ */
+ if (card->ext_csd.cache_size > 0) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CACHE_CTRL, 1,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+
+ /*
+ * Only if no error, cache is turned on successfully.
+ */
+ if (err) {
+ pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
+ mmc_hostname(card->host), err);
+ card->ext_csd.cache_ctrl = 0;
+ err = 0;
+ } else {
+ card->ext_csd.cache_ctrl = 1;
+ }
+ }
+
+ /*
+ * The mandatory minimum values are defined for packed command.
+ * read: 5, write: 3
+ */
+ if (card->ext_csd.max_packed_writes >= 3 &&
+ card->ext_csd.max_packed_reads >= 5 &&
+ host->caps2 & MMC_CAP2_PACKED_CMD) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_EXP_EVENTS_CTRL,
+ EXT_CSD_PACKED_EVENT_EN,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warn("%s: Enabling packed event failed\n",
+ mmc_hostname(card->host));
+ card->ext_csd.packed_event_en = 0;
+ err = 0;
+ } else {
+ card->ext_csd.packed_event_en = 1;
+ }
+ }
+
+ if (!oldcard)
+ host->card = card;
+
+ return 0;
+
+free_card:
+ if (!oldcard)
+ mmc_remove_card(card);
+err:
+ return err;
+}
+
+static int mmc_can_sleep(struct mmc_card *card)
+{
+ return (card && card->ext_csd.rev >= 3);
+}
+
+static int mmc_sleep(struct mmc_host *host)
+{
+ struct mmc_command cmd = {0};
+ struct mmc_card *card = host->card;
+ unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
+ int err;
+
+ err = mmc_deselect_cards(host);
+ if (err)
+ return err;
+
+ cmd.opcode = MMC_SLEEP_AWAKE;
+ cmd.arg = card->rca << 16;
+ cmd.arg |= 1 << 15;
+
+ /*
+ * If the max_busy_timeout of the host is specified, validate it against
+ * the sleep cmd timeout. A failure means we need to prevent the host
+ * from doing hw busy detection, which is done by converting to a R1
+ * response instead of a R1B.
+ */
+ if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.busy_timeout = timeout_ms;
+ }
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err)
+ return err;
+
+ /*
+ * If the host does not wait while the card signals busy, then we will
+ * will have to wait the sleep/awake timeout. Note, we cannot use the
+ * SEND_STATUS command to poll the status because that command (and most
+ * others) is invalid while the card sleeps.
+ */
+ if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
+ mmc_delay(timeout_ms);
+
+ return err;
+}
+
+static int mmc_can_poweroff_notify(const struct mmc_card *card)
+{
+ return card &&
+ mmc_card_mmc(card) &&
+ (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
+}
+
+static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
+{
+ unsigned int timeout = card->ext_csd.generic_cmd6_time;
+ int err;
+
+ /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
+ if (notify_type == EXT_CSD_POWER_OFF_LONG)
+ timeout = card->ext_csd.power_off_longtime;
+
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ notify_type, timeout, true, false, false);
+ if (err)
+ pr_err("%s: Power Off Notification timed out, %u\n",
+ mmc_hostname(card->host), timeout);
+
+ /* Disable the power off notification after the switch operation. */
+ card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
+
+ return err;
+}
+
+/*
+ * Host is being removed. Free up the current card.
+ */
+static void mmc_remove(struct mmc_host *host)
+{
+ BUG_ON(!host);
+ BUG_ON(!host->card);
+
+ mmc_remove_card(host->card);
+ host->card = NULL;
+}
+
+/*
+ * Card detection - card is alive.
+ */
+static int mmc_alive(struct mmc_host *host)
+{
+ return mmc_send_status(host->card, NULL);
+}
+
+/*
+ * Card detection callback from host.
+ */
+static void mmc_detect(struct mmc_host *host)
+{
+ int err;
+
+ BUG_ON(!host);
+ BUG_ON(!host->card);
+
+ mmc_get_card(host->card);
+
+ /*
+ * Just check if our card has been removed.
+ */
+ err = _mmc_detect_card_removed(host);
+
+ mmc_put_card(host->card);
+
+ if (err) {
+ mmc_remove(host);
+
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+ }
+}
+
+static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
+{
+ int err = 0;
+ unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
+ EXT_CSD_POWER_OFF_LONG;
+
+ BUG_ON(!host);
+ BUG_ON(!host->card);
+
+ mmc_claim_host(host);
+
+ if (mmc_card_suspended(host->card))
+ goto out;
+
+ if (mmc_card_doing_bkops(host->card)) {
+ err = mmc_stop_bkops(host->card);
+ if (err)
+ goto out;
+ }
+
+ err = mmc_flush_cache(host->card);
+ if (err)
+ goto out;
+
+ if (mmc_can_poweroff_notify(host->card) &&
+ ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
+ err = mmc_poweroff_notify(host->card, notify_type);
+ else if (mmc_can_sleep(host->card))
+ err = mmc_sleep(host);
+ else if (!mmc_host_is_spi(host))
+ err = mmc_deselect_cards(host);
+
+ if (!err) {
+ mmc_power_off(host);
+ mmc_card_set_suspended(host->card);
+ }
+out:
+ mmc_release_host(host);
+ return err;
+}
+
+/*
+ * Suspend callback
+ */
+static int mmc_suspend(struct mmc_host *host)
+{
+ int err;
+
+ err = _mmc_suspend(host, true);
+ if (!err) {
+ pm_runtime_disable(&host->card->dev);
+ pm_runtime_set_suspended(&host->card->dev);
+ }
+
+ return err;
+}
+
+/*
+ * This function tries to determine if the same card is still present
+ * and, if so, restore all state to it.
+ */
+static int _mmc_resume(struct mmc_host *host)
+{
+ int err = 0;
+
+ BUG_ON(!host);
+ BUG_ON(!host->card);
+
+ mmc_claim_host(host);
+
+ if (!mmc_card_suspended(host->card))
+ goto out;
+
+ mmc_power_up(host, host->card->ocr);
+ err = mmc_init_card(host, host->card->ocr, host->card);
+ mmc_card_clr_suspended(host->card);
+
+out:
+ mmc_release_host(host);
+ return err;
+}
+
+/*
+ * Shutdown callback
+ */
+static int mmc_shutdown(struct mmc_host *host)
+{
+ int err = 0;
+
+ /*
+ * In a specific case for poweroff notify, we need to resume the card
+ * before we can shutdown it properly.
+ */
+ if (mmc_can_poweroff_notify(host->card) &&
+ !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
+ err = _mmc_resume(host);
+
+ if (!err)
+ err = _mmc_suspend(host, false);
+
+ return err;
+}
+
+/*
+ * Callback for resume.
+ */
+static int mmc_resume(struct mmc_host *host)
+{
+ int err = 0;
+
+ if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
+ err = _mmc_resume(host);
+ pm_runtime_set_active(&host->card->dev);
+ pm_runtime_mark_last_busy(&host->card->dev);
+ }
+ pm_runtime_enable(&host->card->dev);
+
+ return err;
+}
+
+/*
+ * Callback for runtime_suspend.
+ */
+static int mmc_runtime_suspend(struct mmc_host *host)
+{
+ int err;
+
+ if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
+ return 0;
+
+ err = _mmc_suspend(host, true);
+ if (err)
+ pr_err("%s: error %d doing aggressive suspend\n",
+ mmc_hostname(host), err);
+
+ return err;
+}
+
+/*
+ * Callback for runtime_resume.
+ */
+static int mmc_runtime_resume(struct mmc_host *host)
+{
+ int err;
+
+ if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME)))
+ return 0;
+
+ err = _mmc_resume(host);
+ if (err)
+ pr_err("%s: error %d doing aggressive resume\n",
+ mmc_hostname(host), err);
+
+ return 0;
+}
+
+static int mmc_power_restore(struct mmc_host *host)
+{
+ int ret;
+
+ mmc_claim_host(host);
+ ret = mmc_init_card(host, host->card->ocr, host->card);
+ mmc_release_host(host);
+
+ return ret;
+}
+
+int mmc_can_reset(struct mmc_card *card)
+{
+ u8 rst_n_function;
+
+ rst_n_function = card->ext_csd.rst_n_function;
+ if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
+ return 0;
+ return 1;
+}
+EXPORT_SYMBOL(mmc_can_reset);
+
+static int mmc_reset(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ u32 status;
+
+ if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
+ return -EOPNOTSUPP;
+
+ if (!mmc_can_reset(card))
+ return -EOPNOTSUPP;
+
+ mmc_host_clk_hold(host);
+ mmc_set_clock(host, host->f_init);
+
+ host->ops->hw_reset(host);
+
+ /* If the reset has happened, then a status command will fail */
+ if (!mmc_send_status(card, &status)) {
+ mmc_host_clk_release(host);
+ return -ENOSYS;
+ }
+
+ /* Set initial state and call mmc_set_ios */
+ mmc_set_initial_state(host);
+ mmc_host_clk_release(host);
+
+ return mmc_power_restore(host);
+}
+
+static const struct mmc_bus_ops mmc_ops = {
+ .remove = mmc_remove,
+ .detect = mmc_detect,
+ .suspend = mmc_suspend,
+ .resume = mmc_resume,
+ .runtime_suspend = mmc_runtime_suspend,
+ .runtime_resume = mmc_runtime_resume,
+ .power_restore = mmc_power_restore,
+ .alive = mmc_alive,
+ .shutdown = mmc_shutdown,
+ .reset = mmc_reset,
+};
+
+/*
+ * Starting point for MMC card init.
+ */
+int mmc_attach_mmc(struct mmc_host *host)
+{
+ int err;
+ u32 ocr, rocr;
+
+ BUG_ON(!host);
+ WARN_ON(!host->claimed);
+
+ /* Set correct bus mode for MMC before attempting attach */
+ if (!mmc_host_is_spi(host))
+ mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
+
+ err = mmc_send_op_cond(host, 0, &ocr);
+ if (err)
+ return err;
+
+ mmc_attach_bus(host, &mmc_ops);
+ if (host->ocr_avail_mmc)
+ host->ocr_avail = host->ocr_avail_mmc;
+
+ /*
+ * We need to get OCR a different way for SPI.
+ */
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_read_ocr(host, 1, &ocr);
+ if (err)
+ goto err;
+ }
+
+ rocr = mmc_select_voltage(host, ocr);
+
+ /*
+ * Can we support the voltage of the card?
+ */
+ if (!rocr) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Detect and init the card.
+ */
+ err = mmc_init_card(host, rocr, NULL);
+ if (err)
+ goto err;
+
+ mmc_release_host(host);
+ err = mmc_add_card(host->card);
+ mmc_claim_host(host);
+ if (err)
+ goto remove_card;
+
+ return 0;
+
+remove_card:
+ mmc_release_host(host);
+ mmc_remove_card(host->card);
+ mmc_claim_host(host);
+ host->card = NULL;
+err:
+ mmc_detach_bus(host);
+
+ pr_err("%s: error %d whilst initialising MMC card\n",
+ mmc_hostname(host), err);
+
+ return err;
+}
diff --git a/kernel/drivers/mmc/core/mmc_ops.c b/kernel/drivers/mmc/core/mmc_ops.c
new file mode 100644
index 000000000..0ea042dc7
--- /dev/null
+++ b/kernel/drivers/mmc/core/mmc_ops.c
@@ -0,0 +1,785 @@
+/*
+ * linux/drivers/mmc/core/mmc_ops.h
+ *
+ * Copyright 2006-2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+
+#include "core.h"
+#include "mmc_ops.h"
+
+#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+
+static const u8 tuning_blk_pattern_4bit[] = {
+ 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
+ 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
+ 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
+ 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
+ 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
+ 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
+ 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
+ 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
+};
+
+static const u8 tuning_blk_pattern_8bit[] = {
+ 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
+ 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
+ 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
+ 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
+ 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
+ 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
+ 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
+ 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
+ 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
+ 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
+ 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
+ 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
+ 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
+};
+
+static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
+ bool ignore_crc)
+{
+ int err;
+ struct mmc_command cmd = {0};
+
+ BUG_ON(!card);
+ BUG_ON(!card->host);
+
+ cmd.opcode = MMC_SEND_STATUS;
+ if (!mmc_host_is_spi(card->host))
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+ if (ignore_crc)
+ cmd.flags &= ~MMC_RSP_CRC;
+
+ err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ /* NOTE: callers are required to understand the difference
+ * between "native" and SPI format status words!
+ */
+ if (status)
+ *status = cmd.resp[0];
+
+ return 0;
+}
+
+int mmc_send_status(struct mmc_card *card, u32 *status)
+{
+ return __mmc_send_status(card, status, false);
+}
+
+static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
+{
+ int err;
+ struct mmc_command cmd = {0};
+
+ BUG_ON(!host);
+
+ cmd.opcode = MMC_SELECT_CARD;
+
+ if (card) {
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
+ }
+
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mmc_select_card(struct mmc_card *card)
+{
+ BUG_ON(!card);
+
+ return _mmc_select_card(card->host, card);
+}
+
+int mmc_deselect_cards(struct mmc_host *host)
+{
+ return _mmc_select_card(host, NULL);
+}
+
+/*
+ * Write the value specified in the device tree or board code into the optional
+ * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
+ * drive strength of the DAT and CMD outputs. The actual meaning of a given
+ * value is hardware dependant.
+ * The presence of the DSR register can be determined from the CSD register,
+ * bit 76.
+ */
+int mmc_set_dsr(struct mmc_host *host)
+{
+ struct mmc_command cmd = {0};
+
+ cmd.opcode = MMC_SET_DSR;
+
+ cmd.arg = (host->dsr << 16) | 0xffff;
+ cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+}
+
+int mmc_go_idle(struct mmc_host *host)
+{
+ int err;
+ struct mmc_command cmd = {0};
+
+ /*
+ * Non-SPI hosts need to prevent chipselect going active during
+ * GO_IDLE; that would put chips into SPI mode. Remind them of
+ * that in case of hardware that won't pull up DAT3/nCS otherwise.
+ *
+ * SPI hosts ignore ios.chip_select; it's managed according to
+ * rules that must accommodate non-MMC slaves which this layer
+ * won't even know about.
+ */
+ if (!mmc_host_is_spi(host)) {
+ mmc_set_chip_select(host, MMC_CS_HIGH);
+ mmc_delay(1);
+ }
+
+ cmd.opcode = MMC_GO_IDLE_STATE;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+
+ mmc_delay(1);
+
+ if (!mmc_host_is_spi(host)) {
+ mmc_set_chip_select(host, MMC_CS_DONTCARE);
+ mmc_delay(1);
+ }
+
+ host->use_spi_crc = 0;
+
+ return err;
+}
+
+int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
+{
+ struct mmc_command cmd = {0};
+ int i, err = 0;
+
+ BUG_ON(!host);
+
+ cmd.opcode = MMC_SEND_OP_COND;
+ cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
+
+ for (i = 100; i; i--) {
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err)
+ break;
+
+ /* if we're just probing, do a single pass */
+ if (ocr == 0)
+ break;
+
+ /* otherwise wait until reset completes */
+ if (mmc_host_is_spi(host)) {
+ if (!(cmd.resp[0] & R1_SPI_IDLE))
+ break;
+ } else {
+ if (cmd.resp[0] & MMC_CARD_BUSY)
+ break;
+ }
+
+ err = -ETIMEDOUT;
+
+ mmc_delay(10);
+ }
+
+ if (rocr && !mmc_host_is_spi(host))
+ *rocr = cmd.resp[0];
+
+ return err;
+}
+
+int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
+{
+ int err;
+ struct mmc_command cmd = {0};
+
+ BUG_ON(!host);
+ BUG_ON(!cid);
+
+ cmd.opcode = MMC_ALL_SEND_CID;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
+
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ memcpy(cid, cmd.resp, sizeof(u32) * 4);
+
+ return 0;
+}
+
+int mmc_set_relative_addr(struct mmc_card *card)
+{
+ int err;
+ struct mmc_command cmd = {0};
+
+ BUG_ON(!card);
+ BUG_ON(!card->host);
+
+ cmd.opcode = MMC_SET_RELATIVE_ADDR;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int
+mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
+{
+ int err;
+ struct mmc_command cmd = {0};
+
+ BUG_ON(!host);
+ BUG_ON(!cxd);
+
+ cmd.opcode = opcode;
+ cmd.arg = arg;
+ cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ memcpy(cxd, cmd.resp, sizeof(u32) * 4);
+
+ return 0;
+}
+
+/*
+ * NOTE: void *buf, caller for the buf is required to use DMA-capable
+ * buffer or on-stack buffer (with some overhead in callee).
+ */
+static int
+mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
+ u32 opcode, void *buf, unsigned len)
+{
+ struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ struct scatterlist sg;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+
+ /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
+ * rely on callers to never use this with "native" calls for reading
+ * CSD or CID. Native versions of those commands use the R2 type,
+ * not R1 plus a data block.
+ */
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = len;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, buf, len);
+
+ if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
+ /*
+ * The spec states that CSR and CID accesses have a timeout
+ * of 64 clock cycles.
+ */
+ data.timeout_ns = 0;
+ data.timeout_clks = 64;
+ } else
+ mmc_set_data_timeout(&data, card);
+
+ mmc_wait_for_req(host, &mrq);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return 0;
+}
+
+int mmc_send_csd(struct mmc_card *card, u32 *csd)
+{
+ int ret, i;
+ u32 *csd_tmp;
+
+ if (!mmc_host_is_spi(card->host))
+ return mmc_send_cxd_native(card->host, card->rca << 16,
+ csd, MMC_SEND_CSD);
+
+ csd_tmp = kzalloc(16, GFP_KERNEL);
+ if (!csd_tmp)
+ return -ENOMEM;
+
+ ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
+ if (ret)
+ goto err;
+
+ for (i = 0;i < 4;i++)
+ csd[i] = be32_to_cpu(csd_tmp[i]);
+
+err:
+ kfree(csd_tmp);
+ return ret;
+}
+
+int mmc_send_cid(struct mmc_host *host, u32 *cid)
+{
+ int ret, i;
+ u32 *cid_tmp;
+
+ if (!mmc_host_is_spi(host)) {
+ if (!host->card)
+ return -EINVAL;
+ return mmc_send_cxd_native(host, host->card->rca << 16,
+ cid, MMC_SEND_CID);
+ }
+
+ cid_tmp = kzalloc(16, GFP_KERNEL);
+ if (!cid_tmp)
+ return -ENOMEM;
+
+ ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
+ if (ret)
+ goto err;
+
+ for (i = 0;i < 4;i++)
+ cid[i] = be32_to_cpu(cid_tmp[i]);
+
+err:
+ kfree(cid_tmp);
+ return ret;
+}
+
+int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
+{
+ int err;
+ u8 *ext_csd;
+
+ if (!card || !new_ext_csd)
+ return -EINVAL;
+
+ if (!mmc_can_ext_csd(card))
+ return -EOPNOTSUPP;
+
+ /*
+ * As the ext_csd is so large and mostly unused, we don't store the
+ * raw block in mmc_card.
+ */
+ ext_csd = kzalloc(512, GFP_KERNEL);
+ if (!ext_csd)
+ return -ENOMEM;
+
+ err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
+ 512);
+ if (err)
+ kfree(ext_csd);
+ else
+ *new_ext_csd = ext_csd;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
+
+int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
+{
+ struct mmc_command cmd = {0};
+ int err;
+
+ cmd.opcode = MMC_SPI_READ_OCR;
+ cmd.arg = highcap ? (1 << 30) : 0;
+ cmd.flags = MMC_RSP_SPI_R3;
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+
+ *ocrp = cmd.resp[1];
+ return err;
+}
+
+int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
+{
+ struct mmc_command cmd = {0};
+ int err;
+
+ cmd.opcode = MMC_SPI_CRC_ON_OFF;
+ cmd.flags = MMC_RSP_SPI_R1;
+ cmd.arg = use_crc;
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (!err)
+ host->use_spi_crc = use_crc;
+ return err;
+}
+
+/**
+ * __mmc_switch - modify EXT_CSD register
+ * @card: the MMC card associated with the data transfer
+ * @set: cmd set values
+ * @index: EXT_CSD register index
+ * @value: value to program into EXT_CSD register
+ * @timeout_ms: timeout (ms) for operation performed by register write,
+ * timeout of zero implies maximum possible timeout
+ * @use_busy_signal: use the busy signal as response type
+ * @send_status: send status cmd to poll for busy
+ * @ignore_crc: ignore CRC errors when sending status cmd to poll for busy
+ *
+ * Modifies the EXT_CSD register for selected card.
+ */
+int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
+ unsigned int timeout_ms, bool use_busy_signal, bool send_status,
+ bool ignore_crc)
+{
+ struct mmc_host *host = card->host;
+ int err;
+ struct mmc_command cmd = {0};
+ unsigned long timeout;
+ u32 status = 0;
+ bool use_r1b_resp = use_busy_signal;
+
+ /*
+ * If the cmd timeout and the max_busy_timeout of the host are both
+ * specified, let's validate them. A failure means we need to prevent
+ * the host from doing hw busy detection, which is done by converting
+ * to a R1 response instead of a R1B.
+ */
+ if (timeout_ms && host->max_busy_timeout &&
+ (timeout_ms > host->max_busy_timeout))
+ use_r1b_resp = false;
+
+ cmd.opcode = MMC_SWITCH;
+ cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (index << 16) |
+ (value << 8) |
+ set;
+ cmd.flags = MMC_CMD_AC;
+ if (use_r1b_resp) {
+ cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
+ /*
+ * A busy_timeout of zero means the host can decide to use
+ * whatever value it finds suitable.
+ */
+ cmd.busy_timeout = timeout_ms;
+ } else {
+ cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
+ }
+
+ if (index == EXT_CSD_SANITIZE_START)
+ cmd.sanitize_busy = true;
+
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ /* No need to check card status in case of unblocking command */
+ if (!use_busy_signal)
+ return 0;
+
+ /*
+ * CRC errors shall only be ignored in cases were CMD13 is used to poll
+ * to detect busy completion.
+ */
+ if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
+ ignore_crc = false;
+
+ /* We have an unspecified cmd timeout, use the fallback value. */
+ if (!timeout_ms)
+ timeout_ms = MMC_OPS_TIMEOUT_MS;
+
+ /* Must check status to be sure of no errors. */
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ if (send_status) {
+ err = __mmc_send_status(card, &status, ignore_crc);
+ if (err)
+ return err;
+ }
+ if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
+ break;
+ if (mmc_host_is_spi(host))
+ break;
+
+ /*
+ * We are not allowed to issue a status command and the host
+ * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
+ * rely on waiting for the stated timeout to be sufficient.
+ */
+ if (!send_status) {
+ mmc_delay(timeout_ms);
+ return 0;
+ }
+
+ /* Timeout if the device never leaves the program state. */
+ if (time_after(jiffies, timeout)) {
+ pr_err("%s: Card stuck in programming state! %s\n",
+ mmc_hostname(host), __func__);
+ return -ETIMEDOUT;
+ }
+ } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
+
+ if (mmc_host_is_spi(host)) {
+ if (status & R1_SPI_ILLEGAL_COMMAND)
+ return -EBADMSG;
+ } else {
+ if (status & 0xFDFFA000)
+ pr_warn("%s: unexpected status %#x after switch\n",
+ mmc_hostname(host), status);
+ if (status & R1_SWITCH_ERROR)
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__mmc_switch);
+
+int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
+ unsigned int timeout_ms)
+{
+ return __mmc_switch(card, set, index, value, timeout_ms, true, true,
+ false);
+}
+EXPORT_SYMBOL_GPL(mmc_switch);
+
+int mmc_send_tuning(struct mmc_host *host)
+{
+ struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ struct scatterlist sg;
+ struct mmc_ios *ios = &host->ios;
+ const u8 *tuning_block_pattern;
+ int size, err = 0;
+ u8 *data_buf;
+ u32 opcode;
+
+ if (ios->bus_width == MMC_BUS_WIDTH_8) {
+ tuning_block_pattern = tuning_blk_pattern_8bit;
+ size = sizeof(tuning_blk_pattern_8bit);
+ opcode = MMC_SEND_TUNING_BLOCK_HS200;
+ } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
+ tuning_block_pattern = tuning_blk_pattern_4bit;
+ size = sizeof(tuning_blk_pattern_4bit);
+ opcode = MMC_SEND_TUNING_BLOCK;
+ } else
+ return -EINVAL;
+
+ data_buf = kzalloc(size, GFP_KERNEL);
+ if (!data_buf)
+ return -ENOMEM;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ cmd.opcode = opcode;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = size;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+
+ /*
+ * According to the tuning specs, Tuning process
+ * is normally shorter 40 executions of CMD19,
+ * and timeout value should be shorter than 150 ms
+ */
+ data.timeout_ns = 150 * NSEC_PER_MSEC;
+
+ data.sg = &sg;
+ data.sg_len = 1;
+ sg_init_one(&sg, data_buf, size);
+
+ mmc_wait_for_req(host, &mrq);
+
+ if (cmd.error) {
+ err = cmd.error;
+ goto out;
+ }
+
+ if (data.error) {
+ err = data.error;
+ goto out;
+ }
+
+ if (memcmp(data_buf, tuning_block_pattern, size))
+ err = -EIO;
+
+out:
+ kfree(data_buf);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mmc_send_tuning);
+
+static int
+mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
+ u8 len)
+{
+ struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ struct scatterlist sg;
+ u8 *data_buf;
+ u8 *test_buf;
+ int i, err;
+ static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
+ static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
+
+ /* dma onto stack is unsafe/nonportable, but callers to this
+ * routine normally provide temporary on-stack buffers ...
+ */
+ data_buf = kmalloc(len, GFP_KERNEL);
+ if (!data_buf)
+ return -ENOMEM;
+
+ if (len == 8)
+ test_buf = testdata_8bit;
+ else if (len == 4)
+ test_buf = testdata_4bit;
+ else {
+ pr_err("%s: Invalid bus_width %d\n",
+ mmc_hostname(host), len);
+ kfree(data_buf);
+ return -EINVAL;
+ }
+
+ if (opcode == MMC_BUS_TEST_W)
+ memcpy(data_buf, test_buf, len);
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+
+ /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
+ * rely on callers to never use this with "native" calls for reading
+ * CSD or CID. Native versions of those commands use the R2 type,
+ * not R1 plus a data block.
+ */
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = len;
+ data.blocks = 1;
+ if (opcode == MMC_BUS_TEST_R)
+ data.flags = MMC_DATA_READ;
+ else
+ data.flags = MMC_DATA_WRITE;
+
+ data.sg = &sg;
+ data.sg_len = 1;
+ mmc_set_data_timeout(&data, card);
+ sg_init_one(&sg, data_buf, len);
+ mmc_wait_for_req(host, &mrq);
+ err = 0;
+ if (opcode == MMC_BUS_TEST_R) {
+ for (i = 0; i < len / 4; i++)
+ if ((test_buf[i] ^ data_buf[i]) != 0xff) {
+ err = -EIO;
+ break;
+ }
+ }
+ kfree(data_buf);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return err;
+}
+
+int mmc_bus_test(struct mmc_card *card, u8 bus_width)
+{
+ int err, width;
+
+ if (bus_width == MMC_BUS_WIDTH_8)
+ width = 8;
+ else if (bus_width == MMC_BUS_WIDTH_4)
+ width = 4;
+ else if (bus_width == MMC_BUS_WIDTH_1)
+ return 0; /* no need for test */
+ else
+ return -EINVAL;
+
+ /*
+ * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
+ * is a problem. This improves chances that the test will work.
+ */
+ mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
+ err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
+ return err;
+}
+
+int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
+{
+ struct mmc_command cmd = {0};
+ unsigned int opcode;
+ int err;
+
+ if (!card->ext_csd.hpi) {
+ pr_warn("%s: Card didn't support HPI command\n",
+ mmc_hostname(card->host));
+ return -EINVAL;
+ }
+
+ opcode = card->ext_csd.hpi_cmd;
+ if (opcode == MMC_STOP_TRANSMISSION)
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ else if (opcode == MMC_SEND_STATUS)
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ cmd.opcode = opcode;
+ cmd.arg = card->rca << 16 | 1;
+
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err) {
+ pr_warn("%s: error %d interrupting operation. "
+ "HPI command response %#x\n", mmc_hostname(card->host),
+ err, cmd.resp[0]);
+ return err;
+ }
+ if (status)
+ *status = cmd.resp[0];
+
+ return 0;
+}
+
+int mmc_can_ext_csd(struct mmc_card *card)
+{
+ return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
+}
diff --git a/kernel/drivers/mmc/core/mmc_ops.h b/kernel/drivers/mmc/core/mmc_ops.h
new file mode 100644
index 000000000..6f4b00ed9
--- /dev/null
+++ b/kernel/drivers/mmc/core/mmc_ops.h
@@ -0,0 +1,32 @@
+/*
+ * linux/drivers/mmc/core/mmc_ops.h
+ *
+ * Copyright 2006-2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef _MMC_MMC_OPS_H
+#define _MMC_MMC_OPS_H
+
+int mmc_select_card(struct mmc_card *card);
+int mmc_deselect_cards(struct mmc_host *host);
+int mmc_set_dsr(struct mmc_host *host);
+int mmc_go_idle(struct mmc_host *host);
+int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
+int mmc_all_send_cid(struct mmc_host *host, u32 *cid);
+int mmc_set_relative_addr(struct mmc_card *card);
+int mmc_send_csd(struct mmc_card *card, u32 *csd);
+int mmc_send_status(struct mmc_card *card, u32 *status);
+int mmc_send_cid(struct mmc_host *host, u32 *cid);
+int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
+int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
+int mmc_bus_test(struct mmc_card *card, u8 bus_width);
+int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
+int mmc_can_ext_csd(struct mmc_card *card);
+
+#endif
+
diff --git a/kernel/drivers/mmc/core/pwrseq.c b/kernel/drivers/mmc/core/pwrseq.c
new file mode 100644
index 000000000..4c1d1757d
--- /dev/null
+++ b/kernel/drivers/mmc/core/pwrseq.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * MMC power sequence management
+ */
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include <linux/mmc/host.h>
+
+#include "pwrseq.h"
+
+struct mmc_pwrseq_match {
+ const char *compatible;
+ struct mmc_pwrseq *(*alloc)(struct mmc_host *host, struct device *dev);
+};
+
+static struct mmc_pwrseq_match pwrseq_match[] = {
+ {
+ .compatible = "mmc-pwrseq-simple",
+ .alloc = mmc_pwrseq_simple_alloc,
+ }, {
+ .compatible = "mmc-pwrseq-emmc",
+ .alloc = mmc_pwrseq_emmc_alloc,
+ },
+};
+
+static struct mmc_pwrseq_match *mmc_pwrseq_find(struct device_node *np)
+{
+ struct mmc_pwrseq_match *match = ERR_PTR(-ENODEV);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pwrseq_match); i++) {
+ if (of_device_is_compatible(np, pwrseq_match[i].compatible)) {
+ match = &pwrseq_match[i];
+ break;
+ }
+ }
+
+ return match;
+}
+
+int mmc_pwrseq_alloc(struct mmc_host *host)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct mmc_pwrseq_match *match;
+ struct mmc_pwrseq *pwrseq;
+ int ret = 0;
+
+ np = of_parse_phandle(host->parent->of_node, "mmc-pwrseq", 0);
+ if (!np)
+ return 0;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ match = mmc_pwrseq_find(np);
+ if (IS_ERR(match)) {
+ ret = PTR_ERR(match);
+ goto err;
+ }
+
+ pwrseq = match->alloc(host, &pdev->dev);
+ if (IS_ERR(pwrseq)) {
+ ret = PTR_ERR(pwrseq);
+ goto err;
+ }
+
+ host->pwrseq = pwrseq;
+ dev_info(host->parent, "allocated mmc-pwrseq\n");
+
+err:
+ of_node_put(np);
+ return ret;
+}
+
+void mmc_pwrseq_pre_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq *pwrseq = host->pwrseq;
+
+ if (pwrseq && pwrseq->ops && pwrseq->ops->pre_power_on)
+ pwrseq->ops->pre_power_on(host);
+}
+
+void mmc_pwrseq_post_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq *pwrseq = host->pwrseq;
+
+ if (pwrseq && pwrseq->ops && pwrseq->ops->post_power_on)
+ pwrseq->ops->post_power_on(host);
+}
+
+void mmc_pwrseq_power_off(struct mmc_host *host)
+{
+ struct mmc_pwrseq *pwrseq = host->pwrseq;
+
+ if (pwrseq && pwrseq->ops && pwrseq->ops->power_off)
+ pwrseq->ops->power_off(host);
+}
+
+void mmc_pwrseq_free(struct mmc_host *host)
+{
+ struct mmc_pwrseq *pwrseq = host->pwrseq;
+
+ if (pwrseq && pwrseq->ops && pwrseq->ops->free)
+ pwrseq->ops->free(host);
+
+ host->pwrseq = NULL;
+}
diff --git a/kernel/drivers/mmc/core/pwrseq.h b/kernel/drivers/mmc/core/pwrseq.h
new file mode 100644
index 000000000..096da48c6
--- /dev/null
+++ b/kernel/drivers/mmc/core/pwrseq.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef _MMC_CORE_PWRSEQ_H
+#define _MMC_CORE_PWRSEQ_H
+
+struct mmc_pwrseq_ops {
+ void (*pre_power_on)(struct mmc_host *host);
+ void (*post_power_on)(struct mmc_host *host);
+ void (*power_off)(struct mmc_host *host);
+ void (*free)(struct mmc_host *host);
+};
+
+struct mmc_pwrseq {
+ struct mmc_pwrseq_ops *ops;
+};
+
+#ifdef CONFIG_OF
+
+int mmc_pwrseq_alloc(struct mmc_host *host);
+void mmc_pwrseq_pre_power_on(struct mmc_host *host);
+void mmc_pwrseq_post_power_on(struct mmc_host *host);
+void mmc_pwrseq_power_off(struct mmc_host *host);
+void mmc_pwrseq_free(struct mmc_host *host);
+
+struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
+ struct device *dev);
+struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
+ struct device *dev);
+
+#else
+
+static inline int mmc_pwrseq_alloc(struct mmc_host *host) { return 0; }
+static inline void mmc_pwrseq_pre_power_on(struct mmc_host *host) {}
+static inline void mmc_pwrseq_post_power_on(struct mmc_host *host) {}
+static inline void mmc_pwrseq_power_off(struct mmc_host *host) {}
+static inline void mmc_pwrseq_free(struct mmc_host *host) {}
+
+#endif
+
+#endif
diff --git a/kernel/drivers/mmc/core/pwrseq_emmc.c b/kernel/drivers/mmc/core/pwrseq_emmc.c
new file mode 100644
index 000000000..9d6d2fb21
--- /dev/null
+++ b/kernel/drivers/mmc/core/pwrseq_emmc.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ *
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Simple eMMC hardware reset provider
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/reboot.h>
+
+#include <linux/mmc/host.h>
+
+#include "pwrseq.h"
+
+struct mmc_pwrseq_emmc {
+ struct mmc_pwrseq pwrseq;
+ struct notifier_block reset_nb;
+ struct gpio_desc *reset_gpio;
+};
+
+static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
+{
+ gpiod_set_value(pwrseq->reset_gpio, 1);
+ udelay(1);
+ gpiod_set_value(pwrseq->reset_gpio, 0);
+ udelay(200);
+}
+
+static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
+{
+ struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
+ struct mmc_pwrseq_emmc, pwrseq);
+
+ __mmc_pwrseq_emmc_reset(pwrseq);
+}
+
+static void mmc_pwrseq_emmc_free(struct mmc_host *host)
+{
+ struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
+ struct mmc_pwrseq_emmc, pwrseq);
+
+ unregister_restart_handler(&pwrseq->reset_nb);
+ gpiod_put(pwrseq->reset_gpio);
+ kfree(pwrseq);
+}
+
+static struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
+ .post_power_on = mmc_pwrseq_emmc_reset,
+ .free = mmc_pwrseq_emmc_free,
+};
+
+static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ struct mmc_pwrseq_emmc *pwrseq = container_of(this,
+ struct mmc_pwrseq_emmc, reset_nb);
+
+ __mmc_pwrseq_emmc_reset(pwrseq);
+ return NOTIFY_DONE;
+}
+
+struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
+ struct device *dev)
+{
+ struct mmc_pwrseq_emmc *pwrseq;
+ int ret = 0;
+
+ pwrseq = kzalloc(sizeof(struct mmc_pwrseq_emmc), GFP_KERNEL);
+ if (!pwrseq)
+ return ERR_PTR(-ENOMEM);
+
+ pwrseq->reset_gpio = gpiod_get_index(dev, "reset", 0, GPIOD_OUT_LOW);
+ if (IS_ERR(pwrseq->reset_gpio)) {
+ ret = PTR_ERR(pwrseq->reset_gpio);
+ goto free;
+ }
+
+ /*
+ * register reset handler to ensure emmc reset also from
+ * emergency_reboot(), priority 129 schedules it just before
+ * system reboot
+ */
+ pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
+ pwrseq->reset_nb.priority = 129;
+ register_restart_handler(&pwrseq->reset_nb);
+
+ pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
+
+ return &pwrseq->pwrseq;
+free:
+ kfree(pwrseq);
+ return ERR_PTR(ret);
+}
diff --git a/kernel/drivers/mmc/core/pwrseq_simple.c b/kernel/drivers/mmc/core/pwrseq_simple.c
new file mode 100644
index 000000000..0b14b83a5
--- /dev/null
+++ b/kernel/drivers/mmc/core/pwrseq_simple.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Simple MMC power sequence management
+ */
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+
+#include <linux/mmc/host.h>
+
+#include "pwrseq.h"
+
+struct mmc_pwrseq_simple {
+ struct mmc_pwrseq pwrseq;
+ bool clk_enabled;
+ struct clk *ext_clk;
+ int nr_gpios;
+ struct gpio_desc *reset_gpios[0];
+};
+
+static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
+ int value)
+{
+ int i;
+
+ for (i = 0; i < pwrseq->nr_gpios; i++)
+ if (!IS_ERR(pwrseq->reset_gpios[i]))
+ gpiod_set_value_cansleep(pwrseq->reset_gpios[i], value);
+}
+
+static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
+ struct mmc_pwrseq_simple, pwrseq);
+
+ if (!IS_ERR(pwrseq->ext_clk) && !pwrseq->clk_enabled) {
+ clk_prepare_enable(pwrseq->ext_clk);
+ pwrseq->clk_enabled = true;
+ }
+
+ mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
+}
+
+static void mmc_pwrseq_simple_post_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
+ struct mmc_pwrseq_simple, pwrseq);
+
+ mmc_pwrseq_simple_set_gpios_value(pwrseq, 0);
+}
+
+static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
+{
+ struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
+ struct mmc_pwrseq_simple, pwrseq);
+
+ mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
+
+ if (!IS_ERR(pwrseq->ext_clk) && pwrseq->clk_enabled) {
+ clk_disable_unprepare(pwrseq->ext_clk);
+ pwrseq->clk_enabled = false;
+ }
+}
+
+static void mmc_pwrseq_simple_free(struct mmc_host *host)
+{
+ struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
+ struct mmc_pwrseq_simple, pwrseq);
+ int i;
+
+ for (i = 0; i < pwrseq->nr_gpios; i++)
+ if (!IS_ERR(pwrseq->reset_gpios[i]))
+ gpiod_put(pwrseq->reset_gpios[i]);
+
+ if (!IS_ERR(pwrseq->ext_clk))
+ clk_put(pwrseq->ext_clk);
+
+ kfree(pwrseq);
+}
+
+static struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = {
+ .pre_power_on = mmc_pwrseq_simple_pre_power_on,
+ .post_power_on = mmc_pwrseq_simple_post_power_on,
+ .power_off = mmc_pwrseq_simple_power_off,
+ .free = mmc_pwrseq_simple_free,
+};
+
+struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
+ struct device *dev)
+{
+ struct mmc_pwrseq_simple *pwrseq;
+ int i, nr_gpios, ret = 0;
+
+ nr_gpios = of_gpio_named_count(dev->of_node, "reset-gpios");
+ if (nr_gpios < 0)
+ nr_gpios = 0;
+
+ pwrseq = kzalloc(sizeof(struct mmc_pwrseq_simple) + nr_gpios *
+ sizeof(struct gpio_desc *), GFP_KERNEL);
+ if (!pwrseq)
+ return ERR_PTR(-ENOMEM);
+
+ pwrseq->ext_clk = clk_get(dev, "ext_clock");
+ if (IS_ERR(pwrseq->ext_clk) &&
+ PTR_ERR(pwrseq->ext_clk) != -ENOENT) {
+ ret = PTR_ERR(pwrseq->ext_clk);
+ goto free;
+ }
+
+ for (i = 0; i < nr_gpios; i++) {
+ pwrseq->reset_gpios[i] = gpiod_get_index(dev, "reset", i,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pwrseq->reset_gpios[i]) &&
+ PTR_ERR(pwrseq->reset_gpios[i]) != -ENOENT &&
+ PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) {
+ ret = PTR_ERR(pwrseq->reset_gpios[i]);
+
+ while (i--)
+ gpiod_put(pwrseq->reset_gpios[i]);
+
+ goto clk_put;
+ }
+ }
+
+ pwrseq->nr_gpios = nr_gpios;
+ pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
+
+ return &pwrseq->pwrseq;
+clk_put:
+ if (!IS_ERR(pwrseq->ext_clk))
+ clk_put(pwrseq->ext_clk);
+free:
+ kfree(pwrseq);
+ return ERR_PTR(ret);
+}
diff --git a/kernel/drivers/mmc/core/quirks.c b/kernel/drivers/mmc/core/quirks.c
new file mode 100644
index 000000000..dd1d1e0fe
--- /dev/null
+++ b/kernel/drivers/mmc/core/quirks.c
@@ -0,0 +1,99 @@
+/*
+ * This file contains work-arounds for many known SD/MMC
+ * and SDIO hardware bugs.
+ *
+ * Copyright (c) 2011 Andrei Warkentin <andreiw@motorola.com>
+ * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com>
+ * Inspired from pci fixup code:
+ * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_ids.h>
+
+#ifndef SDIO_VENDOR_ID_TI
+#define SDIO_VENDOR_ID_TI 0x0097
+#endif
+
+#ifndef SDIO_DEVICE_ID_TI_WL1271
+#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+#endif
+
+#ifndef SDIO_VENDOR_ID_STE
+#define SDIO_VENDOR_ID_STE 0x0020
+#endif
+
+#ifndef SDIO_DEVICE_ID_STE_CW1200
+#define SDIO_DEVICE_ID_STE_CW1200 0x2280
+#endif
+
+#ifndef SDIO_DEVICE_ID_MARVELL_8797_F0
+#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
+#endif
+
+/*
+ * This hook just adds a quirk for all sdio devices
+ */
+static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
+{
+ if (mmc_card_sdio(card))
+ card->quirks |= data;
+}
+
+static const struct mmc_fixup mmc_fixup_methods[] = {
+ /* by default sdio devices are considered CLK_GATING broken */
+ /* good cards will be whitelisted as they are tested */
+ SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID,
+ add_quirk_for_sdio_devices,
+ MMC_QUIRK_BROKEN_CLK_GATING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+ add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+ add_quirk, MMC_QUIRK_DISABLE_CD),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200,
+ add_quirk, MMC_QUIRK_BROKEN_BYTE_MODE_512),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0,
+ add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING),
+
+ END_FIXUP
+};
+
+void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table)
+{
+ const struct mmc_fixup *f;
+ u64 rev = cid_rev_card(card);
+
+ /* Non-core specific workarounds. */
+ if (!table)
+ table = mmc_fixup_methods;
+
+ for (f = table; f->vendor_fixup; f++) {
+ if ((f->manfid == CID_MANFID_ANY ||
+ f->manfid == card->cid.manfid) &&
+ (f->oemid == CID_OEMID_ANY ||
+ f->oemid == card->cid.oemid) &&
+ (f->name == CID_NAME_ANY ||
+ !strncmp(f->name, card->cid.prod_name,
+ sizeof(card->cid.prod_name))) &&
+ (f->cis_vendor == card->cis.vendor ||
+ f->cis_vendor == (u16) SDIO_ANY_ID) &&
+ (f->cis_device == card->cis.device ||
+ f->cis_device == (u16) SDIO_ANY_ID) &&
+ rev >= f->rev_start && rev <= f->rev_end) {
+ dev_dbg(&card->dev, "calling %pf\n", f->vendor_fixup);
+ f->vendor_fixup(card, f->data);
+ }
+ }
+}
+EXPORT_SYMBOL(mmc_fixup_device);
diff --git a/kernel/drivers/mmc/core/sd.c b/kernel/drivers/mmc/core/sd.c
new file mode 100644
index 000000000..31a9ef256
--- /dev/null
+++ b/kernel/drivers/mmc/core/sd.c
@@ -0,0 +1,1281 @@
+/*
+ * linux/drivers/mmc/core/sd.c
+ *
+ * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
+ * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
+ * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#include "core.h"
+#include "bus.h"
+#include "mmc_ops.h"
+#include "sd.h"
+#include "sd_ops.h"
+
+static const unsigned int tran_exp[] = {
+ 10000, 100000, 1000000, 10000000,
+ 0, 0, 0, 0
+};
+
+static const unsigned char tran_mant[] = {
+ 0, 10, 12, 13, 15, 20, 25, 30,
+ 35, 40, 45, 50, 55, 60, 70, 80,
+};
+
+static const unsigned int tacc_exp[] = {
+ 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
+};
+
+static const unsigned int tacc_mant[] = {
+ 0, 10, 12, 13, 15, 20, 25, 30,
+ 35, 40, 45, 50, 55, 60, 70, 80,
+};
+
+static const unsigned int sd_au_size[] = {
+ 0, SZ_16K / 512, SZ_32K / 512, SZ_64K / 512,
+ SZ_128K / 512, SZ_256K / 512, SZ_512K / 512, SZ_1M / 512,
+ SZ_2M / 512, SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
+ SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
+};
+
+#define UNSTUFF_BITS(resp,start,size) \
+ ({ \
+ const int __size = size; \
+ const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
+ const int __off = 3 - ((start) / 32); \
+ const int __shft = (start) & 31; \
+ u32 __res; \
+ \
+ __res = resp[__off] >> __shft; \
+ if (__size + __shft > 32) \
+ __res |= resp[__off-1] << ((32 - __shft) % 32); \
+ __res & __mask; \
+ })
+
+/*
+ * Given the decoded CSD structure, decode the raw CID to our CID structure.
+ */
+void mmc_decode_cid(struct mmc_card *card)
+{
+ u32 *resp = card->raw_cid;
+
+ memset(&card->cid, 0, sizeof(struct mmc_cid));
+
+ /*
+ * SD doesn't currently have a version field so we will
+ * have to assume we can parse this.
+ */
+ card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
+ card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
+ card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
+ card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
+ card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
+ card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
+ card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
+ card->cid.hwrev = UNSTUFF_BITS(resp, 60, 4);
+ card->cid.fwrev = UNSTUFF_BITS(resp, 56, 4);
+ card->cid.serial = UNSTUFF_BITS(resp, 24, 32);
+ card->cid.year = UNSTUFF_BITS(resp, 12, 8);
+ card->cid.month = UNSTUFF_BITS(resp, 8, 4);
+
+ card->cid.year += 2000; /* SD cards year offset */
+}
+
+/*
+ * Given a 128-bit response, decode to our card CSD structure.
+ */
+static int mmc_decode_csd(struct mmc_card *card)
+{
+ struct mmc_csd *csd = &card->csd;
+ unsigned int e, m, csd_struct;
+ u32 *resp = card->raw_csd;
+
+ csd_struct = UNSTUFF_BITS(resp, 126, 2);
+
+ switch (csd_struct) {
+ case 0:
+ m = UNSTUFF_BITS(resp, 115, 4);
+ e = UNSTUFF_BITS(resp, 112, 3);
+ csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
+ csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
+
+ m = UNSTUFF_BITS(resp, 99, 4);
+ e = UNSTUFF_BITS(resp, 96, 3);
+ csd->max_dtr = tran_exp[e] * tran_mant[m];
+ csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
+
+ e = UNSTUFF_BITS(resp, 47, 3);
+ m = UNSTUFF_BITS(resp, 62, 12);
+ csd->capacity = (1 + m) << (e + 2);
+
+ csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
+ csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
+ csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
+ csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
+ csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
+ csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
+ csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
+ csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
+
+ if (UNSTUFF_BITS(resp, 46, 1)) {
+ csd->erase_size = 1;
+ } else if (csd->write_blkbits >= 9) {
+ csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1;
+ csd->erase_size <<= csd->write_blkbits - 9;
+ }
+ break;
+ case 1:
+ /*
+ * This is a block-addressed SDHC or SDXC card. Most
+ * interesting fields are unused and have fixed
+ * values. To avoid getting tripped by buggy cards,
+ * we assume those fixed values ourselves.
+ */
+ mmc_card_set_blockaddr(card);
+
+ csd->tacc_ns = 0; /* Unused */
+ csd->tacc_clks = 0; /* Unused */
+
+ m = UNSTUFF_BITS(resp, 99, 4);
+ e = UNSTUFF_BITS(resp, 96, 3);
+ csd->max_dtr = tran_exp[e] * tran_mant[m];
+ csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
+ csd->c_size = UNSTUFF_BITS(resp, 48, 22);
+
+ /* SDXC cards have a minimum C_SIZE of 0x00FFFF */
+ if (csd->c_size >= 0xFFFF)
+ mmc_card_set_ext_capacity(card);
+
+ m = UNSTUFF_BITS(resp, 48, 22);
+ csd->capacity = (1 + m) << 10;
+
+ csd->read_blkbits = 9;
+ csd->read_partial = 0;
+ csd->write_misalign = 0;
+ csd->read_misalign = 0;
+ csd->r2w_factor = 4; /* Unused */
+ csd->write_blkbits = 9;
+ csd->write_partial = 0;
+ csd->erase_size = 1;
+ break;
+ default:
+ pr_err("%s: unrecognised CSD structure version %d\n",
+ mmc_hostname(card->host), csd_struct);
+ return -EINVAL;
+ }
+
+ card->erase_size = csd->erase_size;
+
+ return 0;
+}
+
+/*
+ * Given a 64-bit response, decode to our card SCR structure.
+ */
+static int mmc_decode_scr(struct mmc_card *card)
+{
+ struct sd_scr *scr = &card->scr;
+ unsigned int scr_struct;
+ u32 resp[4];
+
+ resp[3] = card->raw_scr[1];
+ resp[2] = card->raw_scr[0];
+
+ scr_struct = UNSTUFF_BITS(resp, 60, 4);
+ if (scr_struct != 0) {
+ pr_err("%s: unrecognised SCR structure version %d\n",
+ mmc_hostname(card->host), scr_struct);
+ return -EINVAL;
+ }
+
+ scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4);
+ scr->bus_widths = UNSTUFF_BITS(resp, 48, 4);
+ if (scr->sda_vsn == SCR_SPEC_VER_2)
+ /* Check if Physical Layer Spec v3.0 is supported */
+ scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1);
+
+ if (UNSTUFF_BITS(resp, 55, 1))
+ card->erased_byte = 0xFF;
+ else
+ card->erased_byte = 0x0;
+
+ if (scr->sda_spec3)
+ scr->cmds = UNSTUFF_BITS(resp, 32, 2);
+ return 0;
+}
+
+/*
+ * Fetch and process SD Status register.
+ */
+static int mmc_read_ssr(struct mmc_card *card)
+{
+ unsigned int au, es, et, eo;
+ int err, i;
+ u32 *ssr;
+
+ if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
+ pr_warn("%s: card lacks mandatory SD Status function\n",
+ mmc_hostname(card->host));
+ return 0;
+ }
+
+ ssr = kmalloc(64, GFP_KERNEL);
+ if (!ssr)
+ return -ENOMEM;
+
+ err = mmc_app_sd_status(card, ssr);
+ if (err) {
+ pr_warn("%s: problem reading SD Status register\n",
+ mmc_hostname(card->host));
+ err = 0;
+ goto out;
+ }
+
+ for (i = 0; i < 16; i++)
+ ssr[i] = be32_to_cpu(ssr[i]);
+
+ /*
+ * UNSTUFF_BITS only works with four u32s so we have to offset the
+ * bitfield positions accordingly.
+ */
+ au = UNSTUFF_BITS(ssr, 428 - 384, 4);
+ if (au) {
+ if (au <= 9 || card->scr.sda_spec3) {
+ card->ssr.au = sd_au_size[au];
+ es = UNSTUFF_BITS(ssr, 408 - 384, 16);
+ et = UNSTUFF_BITS(ssr, 402 - 384, 6);
+ if (es && et) {
+ eo = UNSTUFF_BITS(ssr, 400 - 384, 2);
+ card->ssr.erase_timeout = (et * 1000) / es;
+ card->ssr.erase_offset = eo * 1000;
+ }
+ } else {
+ pr_warn("%s: SD Status: Invalid Allocation Unit size\n",
+ mmc_hostname(card->host));
+ }
+ }
+out:
+ kfree(ssr);
+ return err;
+}
+
+/*
+ * Fetches and decodes switch information
+ */
+static int mmc_read_switch(struct mmc_card *card)
+{
+ int err;
+ u8 *status;
+
+ if (card->scr.sda_vsn < SCR_SPEC_VER_1)
+ return 0;
+
+ if (!(card->csd.cmdclass & CCC_SWITCH)) {
+ pr_warn("%s: card lacks mandatory switch function, performance might suffer\n",
+ mmc_hostname(card->host));
+ return 0;
+ }
+
+ err = -EIO;
+
+ status = kmalloc(64, GFP_KERNEL);
+ if (!status) {
+ pr_err("%s: could not allocate a buffer for "
+ "switch capabilities.\n",
+ mmc_hostname(card->host));
+ return -ENOMEM;
+ }
+
+ /*
+ * Find out the card's support bits with a mode 0 operation.
+ * The argument does not matter, as the support bits do not
+ * change with the arguments.
+ */
+ err = mmc_sd_switch(card, 0, 0, 0, status);
+ if (err) {
+ /*
+ * If the host or the card can't do the switch,
+ * fail more gracefully.
+ */
+ if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
+ goto out;
+
+ pr_warn("%s: problem reading Bus Speed modes\n",
+ mmc_hostname(card->host));
+ err = 0;
+
+ goto out;
+ }
+
+ if (status[13] & SD_MODE_HIGH_SPEED)
+ card->sw_caps.hs_max_dtr = HIGH_SPEED_MAX_DTR;
+
+ if (card->scr.sda_spec3) {
+ card->sw_caps.sd3_bus_mode = status[13];
+ /* Driver Strengths supported by the card */
+ card->sw_caps.sd3_drv_type = status[9];
+ }
+
+out:
+ kfree(status);
+
+ return err;
+}
+
+/*
+ * Test if the card supports high-speed mode and, if so, switch to it.
+ */
+int mmc_sd_switch_hs(struct mmc_card *card)
+{
+ int err;
+ u8 *status;
+
+ if (card->scr.sda_vsn < SCR_SPEC_VER_1)
+ return 0;
+
+ if (!(card->csd.cmdclass & CCC_SWITCH))
+ return 0;
+
+ if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED))
+ return 0;
+
+ if (card->sw_caps.hs_max_dtr == 0)
+ return 0;
+
+ err = -EIO;
+
+ status = kmalloc(64, GFP_KERNEL);
+ if (!status) {
+ pr_err("%s: could not allocate a buffer for "
+ "switch capabilities.\n", mmc_hostname(card->host));
+ return -ENOMEM;
+ }
+
+ err = mmc_sd_switch(card, 1, 0, 1, status);
+ if (err)
+ goto out;
+
+ if ((status[16] & 0xF) != 1) {
+ pr_warn("%s: Problem switching card into high-speed mode!\n",
+ mmc_hostname(card->host));
+ err = 0;
+ } else {
+ err = 1;
+ }
+
+out:
+ kfree(status);
+
+ return err;
+}
+
+static int sd_select_driver_type(struct mmc_card *card, u8 *status)
+{
+ int host_drv_type = SD_DRIVER_TYPE_B;
+ int card_drv_type = SD_DRIVER_TYPE_B;
+ int drive_strength;
+ int err;
+
+ /*
+ * If the host doesn't support any of the Driver Types A,C or D,
+ * or there is no board specific handler then default Driver
+ * Type B is used.
+ */
+ if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C
+ | MMC_CAP_DRIVER_TYPE_D)))
+ return 0;
+
+ if (!card->host->ops->select_drive_strength)
+ return 0;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_A)
+ host_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_C)
+ host_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_D)
+ host_drv_type |= SD_DRIVER_TYPE_D;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
+ card_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
+ card_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D)
+ card_drv_type |= SD_DRIVER_TYPE_D;
+
+ /*
+ * The drive strength that the hardware can support
+ * depends on the board design. Pass the appropriate
+ * information and let the hardware specific code
+ * return what is possible given the options
+ */
+ mmc_host_clk_hold(card->host);
+ drive_strength = card->host->ops->select_drive_strength(
+ card->sw_caps.uhs_max_dtr,
+ host_drv_type, card_drv_type);
+ mmc_host_clk_release(card->host);
+
+ err = mmc_sd_switch(card, 1, 2, drive_strength, status);
+ if (err)
+ return err;
+
+ if ((status[15] & 0xF) != drive_strength) {
+ pr_warn("%s: Problem setting drive strength!\n",
+ mmc_hostname(card->host));
+ return 0;
+ }
+
+ mmc_set_driver_type(card->host, drive_strength);
+
+ return 0;
+}
+
+static void sd_update_bus_speed_mode(struct mmc_card *card)
+{
+ /*
+ * If the host doesn't support any of the UHS-I modes, fallback on
+ * default speed.
+ */
+ if (!mmc_host_uhs(card->host)) {
+ card->sd_bus_speed = 0;
+ return;
+ }
+
+ if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
+ card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
+ } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
+ card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
+ SD_MODE_UHS_SDR50)) {
+ card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
+ card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
+ SD_MODE_UHS_SDR12)) {
+ card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
+ }
+}
+
+static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
+{
+ int err;
+ unsigned int timing = 0;
+
+ switch (card->sd_bus_speed) {
+ case UHS_SDR104_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR104;
+ card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+ break;
+ case UHS_DDR50_BUS_SPEED:
+ timing = MMC_TIMING_UHS_DDR50;
+ card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+ break;
+ case UHS_SDR50_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR50;
+ card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+ break;
+ case UHS_SDR25_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR25;
+ card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+ break;
+ case UHS_SDR12_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR12;
+ card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+ break;
+ default:
+ return 0;
+ }
+
+ err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
+ if (err)
+ return err;
+
+ if ((status[16] & 0xF) != card->sd_bus_speed)
+ pr_warn("%s: Problem setting bus speed mode!\n",
+ mmc_hostname(card->host));
+ else {
+ mmc_set_timing(card->host, timing);
+ mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
+ }
+
+ return 0;
+}
+
+/* Get host's max current setting at its current voltage */
+static u32 sd_get_host_max_current(struct mmc_host *host)
+{
+ u32 voltage, max_current;
+
+ voltage = 1 << host->ios.vdd;
+ switch (voltage) {
+ case MMC_VDD_165_195:
+ max_current = host->max_current_180;
+ break;
+ case MMC_VDD_29_30:
+ case MMC_VDD_30_31:
+ max_current = host->max_current_300;
+ break;
+ case MMC_VDD_32_33:
+ case MMC_VDD_33_34:
+ max_current = host->max_current_330;
+ break;
+ default:
+ max_current = 0;
+ }
+
+ return max_current;
+}
+
+static int sd_set_current_limit(struct mmc_card *card, u8 *status)
+{
+ int current_limit = SD_SET_CURRENT_NO_CHANGE;
+ int err;
+ u32 max_current;
+
+ /*
+ * Current limit switch is only defined for SDR50, SDR104, and DDR50
+ * bus speed modes. For other bus speed modes, we do not change the
+ * current limit.
+ */
+ if ((card->sd_bus_speed != UHS_SDR50_BUS_SPEED) &&
+ (card->sd_bus_speed != UHS_SDR104_BUS_SPEED) &&
+ (card->sd_bus_speed != UHS_DDR50_BUS_SPEED))
+ return 0;
+
+ /*
+ * Host has different current capabilities when operating at
+ * different voltages, so find out its max current first.
+ */
+ max_current = sd_get_host_max_current(card->host);
+
+ /*
+ * We only check host's capability here, if we set a limit that is
+ * higher than the card's maximum current, the card will be using its
+ * maximum current, e.g. if the card's maximum current is 300ma, and
+ * when we set current limit to 200ma, the card will draw 200ma, and
+ * when we set current limit to 400/600/800ma, the card will draw its
+ * maximum 300ma from the host.
+ */
+ if (max_current >= 800)
+ current_limit = SD_SET_CURRENT_LIMIT_800;
+ else if (max_current >= 600)
+ current_limit = SD_SET_CURRENT_LIMIT_600;
+ else if (max_current >= 400)
+ current_limit = SD_SET_CURRENT_LIMIT_400;
+ else if (max_current >= 200)
+ current_limit = SD_SET_CURRENT_LIMIT_200;
+
+ if (current_limit != SD_SET_CURRENT_NO_CHANGE) {
+ err = mmc_sd_switch(card, 1, 3, current_limit, status);
+ if (err)
+ return err;
+
+ if (((status[15] >> 4) & 0x0F) != current_limit)
+ pr_warn("%s: Problem setting current limit!\n",
+ mmc_hostname(card->host));
+
+ }
+
+ return 0;
+}
+
+/*
+ * UHS-I specific initialization procedure
+ */
+static int mmc_sd_init_uhs_card(struct mmc_card *card)
+{
+ int err;
+ u8 *status;
+
+ if (!card->scr.sda_spec3)
+ return 0;
+
+ if (!(card->csd.cmdclass & CCC_SWITCH))
+ return 0;
+
+ status = kmalloc(64, GFP_KERNEL);
+ if (!status) {
+ pr_err("%s: could not allocate a buffer for "
+ "switch capabilities.\n", mmc_hostname(card->host));
+ return -ENOMEM;
+ }
+
+ /* Set 4-bit bus width */
+ if ((card->host->caps & MMC_CAP_4_BIT_DATA) &&
+ (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
+ err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
+ if (err)
+ goto out;
+
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+ }
+
+ /*
+ * Select the bus speed mode depending on host
+ * and card capability.
+ */
+ sd_update_bus_speed_mode(card);
+
+ /* Set the driver strength for the card */
+ err = sd_select_driver_type(card, status);
+ if (err)
+ goto out;
+
+ /* Set current limit for the card */
+ err = sd_set_current_limit(card, status);
+ if (err)
+ goto out;
+
+ /* Set bus speed mode of the card */
+ err = sd_set_bus_speed_mode(card, status);
+ if (err)
+ goto out;
+
+ /*
+ * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
+ * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
+ */
+ if (!mmc_host_is_spi(card->host) &&
+ (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
+ card->sd_bus_speed == UHS_SDR104_BUS_SPEED))
+ err = mmc_execute_tuning(card);
+out:
+ kfree(status);
+
+ return err;
+}
+
+MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
+ card->raw_cid[2], card->raw_cid[3]);
+MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
+ card->raw_csd[2], card->raw_csd[3]);
+MMC_DEV_ATTR(scr, "%08x%08x\n", card->raw_scr[0], card->raw_scr[1]);
+MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
+MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
+MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
+MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev);
+MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
+MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
+MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
+MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
+MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
+
+
+static struct attribute *sd_std_attrs[] = {
+ &dev_attr_cid.attr,
+ &dev_attr_csd.attr,
+ &dev_attr_scr.attr,
+ &dev_attr_date.attr,
+ &dev_attr_erase_size.attr,
+ &dev_attr_preferred_erase_size.attr,
+ &dev_attr_fwrev.attr,
+ &dev_attr_hwrev.attr,
+ &dev_attr_manfid.attr,
+ &dev_attr_name.attr,
+ &dev_attr_oemid.attr,
+ &dev_attr_serial.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(sd_std);
+
+struct device_type sd_type = {
+ .groups = sd_std_groups,
+};
+
+/*
+ * Fetch CID from card.
+ */
+int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
+{
+ int err;
+ u32 max_current;
+ int retries = 10;
+ u32 pocr = ocr;
+
+try_again:
+ if (!retries) {
+ ocr &= ~SD_OCR_S18R;
+ pr_warn("%s: Skipping voltage switch\n", mmc_hostname(host));
+ }
+
+ /*
+ * Since we're changing the OCR value, we seem to
+ * need to tell some cards to go back to the idle
+ * state. We wait 1ms to give cards time to
+ * respond.
+ */
+ mmc_go_idle(host);
+
+ /*
+ * If SD_SEND_IF_COND indicates an SD 2.0
+ * compliant card and we should set bit 30
+ * of the ocr to indicate that we can handle
+ * block-addressed SDHC cards.
+ */
+ err = mmc_send_if_cond(host, ocr);
+ if (!err)
+ ocr |= SD_OCR_CCS;
+
+ /*
+ * If the host supports one of UHS-I modes, request the card
+ * to switch to 1.8V signaling level. If the card has failed
+ * repeatedly to switch however, skip this.
+ */
+ if (retries && mmc_host_uhs(host))
+ ocr |= SD_OCR_S18R;
+
+ /*
+ * If the host can supply more than 150mA at current voltage,
+ * XPC should be set to 1.
+ */
+ max_current = sd_get_host_max_current(host);
+ if (max_current > 150)
+ ocr |= SD_OCR_XPC;
+
+ err = mmc_send_app_op_cond(host, ocr, rocr);
+ if (err)
+ return err;
+
+ /*
+ * In case CCS and S18A in the response is set, start Signal Voltage
+ * Switch procedure. SPI mode doesn't support CMD11.
+ */
+ if (!mmc_host_is_spi(host) && rocr &&
+ ((*rocr & 0x41000000) == 0x41000000)) {
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
+ pocr);
+ if (err == -EAGAIN) {
+ retries--;
+ goto try_again;
+ } else if (err) {
+ retries = 0;
+ goto try_again;
+ }
+ }
+
+ if (mmc_host_is_spi(host))
+ err = mmc_send_cid(host, cid);
+ else
+ err = mmc_all_send_cid(host, cid);
+
+ return err;
+}
+
+int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card)
+{
+ int err;
+
+ /*
+ * Fetch CSD from card.
+ */
+ err = mmc_send_csd(card, card->raw_csd);
+ if (err)
+ return err;
+
+ err = mmc_decode_csd(card);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
+ bool reinit)
+{
+ int err;
+
+ if (!reinit) {
+ /*
+ * Fetch SCR from card.
+ */
+ err = mmc_app_send_scr(card, card->raw_scr);
+ if (err)
+ return err;
+
+ err = mmc_decode_scr(card);
+ if (err)
+ return err;
+
+ /*
+ * Fetch and process SD Status register.
+ */
+ err = mmc_read_ssr(card);
+ if (err)
+ return err;
+
+ /* Erase init depends on CSD and SSR */
+ mmc_init_erase(card);
+
+ /*
+ * Fetch switch information from card.
+ */
+ err = mmc_read_switch(card);
+ if (err)
+ return err;
+ }
+
+ /*
+ * For SPI, enable CRC as appropriate.
+ * This CRC enable is located AFTER the reading of the
+ * card registers because some SDHC cards are not able
+ * to provide valid CRCs for non-512-byte blocks.
+ */
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_set_crc(host, use_spi_crc);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Check if read-only switch is active.
+ */
+ if (!reinit) {
+ int ro = -1;
+
+ if (host->ops->get_ro) {
+ mmc_host_clk_hold(card->host);
+ ro = host->ops->get_ro(host);
+ mmc_host_clk_release(card->host);
+ }
+
+ if (ro < 0) {
+ pr_warn("%s: host does not support reading read-only switch, assuming write-enable\n",
+ mmc_hostname(host));
+ } else if (ro > 0) {
+ mmc_card_set_readonly(card);
+ }
+ }
+
+ return 0;
+}
+
+unsigned mmc_sd_get_max_clock(struct mmc_card *card)
+{
+ unsigned max_dtr = (unsigned int)-1;
+
+ if (mmc_card_hs(card)) {
+ if (max_dtr > card->sw_caps.hs_max_dtr)
+ max_dtr = card->sw_caps.hs_max_dtr;
+ } else if (max_dtr > card->csd.max_dtr) {
+ max_dtr = card->csd.max_dtr;
+ }
+
+ return max_dtr;
+}
+
+/*
+ * Handle the detection and initialisation of a card.
+ *
+ * In the case of a resume, "oldcard" will contain the card
+ * we're trying to reinitialise.
+ */
+static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
+ struct mmc_card *oldcard)
+{
+ struct mmc_card *card;
+ int err;
+ u32 cid[4];
+ u32 rocr = 0;
+
+ BUG_ON(!host);
+ WARN_ON(!host->claimed);
+
+ err = mmc_sd_get_cid(host, ocr, cid, &rocr);
+ if (err)
+ return err;
+
+ if (oldcard) {
+ if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0)
+ return -ENOENT;
+
+ card = oldcard;
+ } else {
+ /*
+ * Allocate card structure.
+ */
+ card = mmc_alloc_card(host, &sd_type);
+ if (IS_ERR(card))
+ return PTR_ERR(card);
+
+ card->ocr = ocr;
+ card->type = MMC_TYPE_SD;
+ memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
+ }
+
+ /*
+ * Call the optional HC's init_card function to handle quirks.
+ */
+ if (host->ops->init_card)
+ host->ops->init_card(host, card);
+
+ /*
+ * For native busses: get card RCA and quit open drain mode.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_send_relative_addr(host, &card->rca);
+ if (err)
+ goto free_card;
+ }
+
+ if (!oldcard) {
+ err = mmc_sd_get_csd(host, card);
+ if (err)
+ goto free_card;
+
+ mmc_decode_cid(card);
+ }
+
+ /*
+ * handling only for cards supporting DSR and hosts requesting
+ * DSR configuration
+ */
+ if (card->csd.dsr_imp && host->dsr_req)
+ mmc_set_dsr(host);
+
+ /*
+ * Select card, as all following commands rely on that.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_select_card(card);
+ if (err)
+ goto free_card;
+ }
+
+ err = mmc_sd_setup_card(host, card, oldcard != NULL);
+ if (err)
+ goto free_card;
+
+ /* Initialization sequence for UHS-I cards */
+ if (rocr & SD_ROCR_S18A) {
+ err = mmc_sd_init_uhs_card(card);
+ if (err)
+ goto free_card;
+ } else {
+ /*
+ * Attempt to change to high-speed (if supported)
+ */
+ err = mmc_sd_switch_hs(card);
+ if (err > 0)
+ mmc_set_timing(card->host, MMC_TIMING_SD_HS);
+ else if (err)
+ goto free_card;
+
+ /*
+ * Set bus speed.
+ */
+ mmc_set_clock(host, mmc_sd_get_max_clock(card));
+
+ /*
+ * Switch to wider bus (if supported).
+ */
+ if ((host->caps & MMC_CAP_4_BIT_DATA) &&
+ (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
+ err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
+ if (err)
+ goto free_card;
+
+ mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+ }
+ }
+
+ host->card = card;
+ return 0;
+
+free_card:
+ if (!oldcard)
+ mmc_remove_card(card);
+
+ return err;
+}
+
+/*
+ * Host is being removed. Free up the current card.
+ */
+static void mmc_sd_remove(struct mmc_host *host)
+{
+ BUG_ON(!host);
+ BUG_ON(!host->card);
+
+ mmc_remove_card(host->card);
+ host->card = NULL;
+}
+
+/*
+ * Card detection - card is alive.
+ */
+static int mmc_sd_alive(struct mmc_host *host)
+{
+ return mmc_send_status(host->card, NULL);
+}
+
+/*
+ * Card detection callback from host.
+ */
+static void mmc_sd_detect(struct mmc_host *host)
+{
+ int err;
+
+ BUG_ON(!host);
+ BUG_ON(!host->card);
+
+ mmc_get_card(host->card);
+
+ /*
+ * Just check if our card has been removed.
+ */
+ err = _mmc_detect_card_removed(host);
+
+ mmc_put_card(host->card);
+
+ if (err) {
+ mmc_sd_remove(host);
+
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+ }
+}
+
+static int _mmc_sd_suspend(struct mmc_host *host)
+{
+ int err = 0;
+
+ BUG_ON(!host);
+ BUG_ON(!host->card);
+
+ mmc_claim_host(host);
+
+ if (mmc_card_suspended(host->card))
+ goto out;
+
+ if (!mmc_host_is_spi(host))
+ err = mmc_deselect_cards(host);
+
+ if (!err) {
+ mmc_power_off(host);
+ mmc_card_set_suspended(host->card);
+ }
+
+out:
+ mmc_release_host(host);
+ return err;
+}
+
+/*
+ * Callback for suspend
+ */
+static int mmc_sd_suspend(struct mmc_host *host)
+{
+ int err;
+
+ err = _mmc_sd_suspend(host);
+ if (!err) {
+ pm_runtime_disable(&host->card->dev);
+ pm_runtime_set_suspended(&host->card->dev);
+ }
+
+ return err;
+}
+
+/*
+ * This function tries to determine if the same card is still present
+ * and, if so, restore all state to it.
+ */
+static int _mmc_sd_resume(struct mmc_host *host)
+{
+ int err = 0;
+
+ BUG_ON(!host);
+ BUG_ON(!host->card);
+
+ mmc_claim_host(host);
+
+ if (!mmc_card_suspended(host->card))
+ goto out;
+
+ mmc_power_up(host, host->card->ocr);
+ err = mmc_sd_init_card(host, host->card->ocr, host->card);
+ mmc_card_clr_suspended(host->card);
+
+out:
+ mmc_release_host(host);
+ return err;
+}
+
+/*
+ * Callback for resume
+ */
+static int mmc_sd_resume(struct mmc_host *host)
+{
+ int err = 0;
+
+ if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
+ err = _mmc_sd_resume(host);
+ pm_runtime_set_active(&host->card->dev);
+ pm_runtime_mark_last_busy(&host->card->dev);
+ }
+ pm_runtime_enable(&host->card->dev);
+
+ return err;
+}
+
+/*
+ * Callback for runtime_suspend.
+ */
+static int mmc_sd_runtime_suspend(struct mmc_host *host)
+{
+ int err;
+
+ if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
+ return 0;
+
+ err = _mmc_sd_suspend(host);
+ if (err)
+ pr_err("%s: error %d doing aggressive suspend\n",
+ mmc_hostname(host), err);
+
+ return err;
+}
+
+/*
+ * Callback for runtime_resume.
+ */
+static int mmc_sd_runtime_resume(struct mmc_host *host)
+{
+ int err;
+
+ if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME)))
+ return 0;
+
+ err = _mmc_sd_resume(host);
+ if (err)
+ pr_err("%s: error %d doing aggressive resume\n",
+ mmc_hostname(host), err);
+
+ return 0;
+}
+
+static int mmc_sd_power_restore(struct mmc_host *host)
+{
+ int ret;
+
+ mmc_claim_host(host);
+ ret = mmc_sd_init_card(host, host->card->ocr, host->card);
+ mmc_release_host(host);
+
+ return ret;
+}
+
+static int mmc_sd_reset(struct mmc_host *host)
+{
+ mmc_power_cycle(host, host->card->ocr);
+ return mmc_sd_power_restore(host);
+}
+
+static const struct mmc_bus_ops mmc_sd_ops = {
+ .remove = mmc_sd_remove,
+ .detect = mmc_sd_detect,
+ .runtime_suspend = mmc_sd_runtime_suspend,
+ .runtime_resume = mmc_sd_runtime_resume,
+ .suspend = mmc_sd_suspend,
+ .resume = mmc_sd_resume,
+ .power_restore = mmc_sd_power_restore,
+ .alive = mmc_sd_alive,
+ .shutdown = mmc_sd_suspend,
+ .reset = mmc_sd_reset,
+};
+
+/*
+ * Starting point for SD card init.
+ */
+int mmc_attach_sd(struct mmc_host *host)
+{
+ int err;
+ u32 ocr, rocr;
+
+ BUG_ON(!host);
+ WARN_ON(!host->claimed);
+
+ err = mmc_send_app_op_cond(host, 0, &ocr);
+ if (err)
+ return err;
+
+ mmc_attach_bus(host, &mmc_sd_ops);
+ if (host->ocr_avail_sd)
+ host->ocr_avail = host->ocr_avail_sd;
+
+ /*
+ * We need to get OCR a different way for SPI.
+ */
+ if (mmc_host_is_spi(host)) {
+ mmc_go_idle(host);
+
+ err = mmc_spi_read_ocr(host, 0, &ocr);
+ if (err)
+ goto err;
+ }
+
+ rocr = mmc_select_voltage(host, ocr);
+
+ /*
+ * Can we support the voltage(s) of the card(s)?
+ */
+ if (!rocr) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Detect and init the card.
+ */
+ err = mmc_sd_init_card(host, rocr, NULL);
+ if (err)
+ goto err;
+
+ mmc_release_host(host);
+ err = mmc_add_card(host->card);
+ mmc_claim_host(host);
+ if (err)
+ goto remove_card;
+
+ return 0;
+
+remove_card:
+ mmc_release_host(host);
+ mmc_remove_card(host->card);
+ host->card = NULL;
+ mmc_claim_host(host);
+err:
+ mmc_detach_bus(host);
+
+ pr_err("%s: error %d whilst initialising SD card\n",
+ mmc_hostname(host), err);
+
+ return err;
+}
diff --git a/kernel/drivers/mmc/core/sd.h b/kernel/drivers/mmc/core/sd.h
new file mode 100644
index 000000000..aab824a9a
--- /dev/null
+++ b/kernel/drivers/mmc/core/sd.h
@@ -0,0 +1,16 @@
+#ifndef _MMC_CORE_SD_H
+#define _MMC_CORE_SD_H
+
+#include <linux/mmc/card.h>
+
+extern struct device_type sd_type;
+
+int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr);
+int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card);
+void mmc_decode_cid(struct mmc_card *card);
+int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
+ bool reinit);
+unsigned mmc_sd_get_max_clock(struct mmc_card *card);
+int mmc_sd_switch_hs(struct mmc_card *card);
+
+#endif
diff --git a/kernel/drivers/mmc/core/sd_ops.c b/kernel/drivers/mmc/core/sd_ops.c
new file mode 100644
index 000000000..48d0c93ba
--- /dev/null
+++ b/kernel/drivers/mmc/core/sd_ops.c
@@ -0,0 +1,395 @@
+/*
+ * linux/drivers/mmc/core/sd_ops.h
+ *
+ * Copyright 2006-2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/scatterlist.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#include "core.h"
+#include "sd_ops.h"
+
+int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
+{
+ int err;
+ struct mmc_command cmd = {0};
+
+ BUG_ON(!host);
+ BUG_ON(card && (card->host != host));
+
+ cmd.opcode = MMC_APP_CMD;
+
+ if (card) {
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_BCR;
+ }
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err)
+ return err;
+
+ /* Check that card supported application commands */
+ if (!mmc_host_is_spi(host) && !(cmd.resp[0] & R1_APP_CMD))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_app_cmd);
+
+/**
+ * mmc_wait_for_app_cmd - start an application command and wait for
+ completion
+ * @host: MMC host to start command
+ * @card: Card to send MMC_APP_CMD to
+ * @cmd: MMC command to start
+ * @retries: maximum number of retries
+ *
+ * Sends a MMC_APP_CMD, checks the card response, sends the command
+ * in the parameter and waits for it to complete. Return any error
+ * that occurred while the command was executing. Do not attempt to
+ * parse the response.
+ */
+int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
+ struct mmc_command *cmd, int retries)
+{
+ struct mmc_request mrq = {NULL};
+
+ int i, err;
+
+ BUG_ON(!cmd);
+ BUG_ON(retries < 0);
+
+ err = -EIO;
+
+ /*
+ * We have to resend MMC_APP_CMD for each attempt so
+ * we cannot use the retries field in mmc_command.
+ */
+ for (i = 0;i <= retries;i++) {
+ err = mmc_app_cmd(host, card);
+ if (err) {
+ /* no point in retrying; no APP commands allowed */
+ if (mmc_host_is_spi(host)) {
+ if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
+ break;
+ }
+ continue;
+ }
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+
+ memset(cmd->resp, 0, sizeof(cmd->resp));
+ cmd->retries = 0;
+
+ mrq.cmd = cmd;
+ cmd->data = NULL;
+
+ mmc_wait_for_req(host, &mrq);
+
+ err = cmd->error;
+ if (!cmd->error)
+ break;
+
+ /* no point in retrying illegal APP commands */
+ if (mmc_host_is_spi(host)) {
+ if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
+ break;
+ }
+ }
+
+ return err;
+}
+
+EXPORT_SYMBOL(mmc_wait_for_app_cmd);
+
+int mmc_app_set_bus_width(struct mmc_card *card, int width)
+{
+ int err;
+ struct mmc_command cmd = {0};
+
+ BUG_ON(!card);
+ BUG_ON(!card->host);
+
+ cmd.opcode = SD_APP_SET_BUS_WIDTH;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ switch (width) {
+ case MMC_BUS_WIDTH_1:
+ cmd.arg = SD_BUS_WIDTH_1;
+ break;
+ case MMC_BUS_WIDTH_4:
+ cmd.arg = SD_BUS_WIDTH_4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
+{
+ struct mmc_command cmd = {0};
+ int i, err = 0;
+
+ BUG_ON(!host);
+
+ cmd.opcode = SD_APP_OP_COND;
+ if (mmc_host_is_spi(host))
+ cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */
+ else
+ cmd.arg = ocr;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
+
+ for (i = 100; i; i--) {
+ err = mmc_wait_for_app_cmd(host, NULL, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ break;
+
+ /* if we're just probing, do a single pass */
+ if (ocr == 0)
+ break;
+
+ /* otherwise wait until reset completes */
+ if (mmc_host_is_spi(host)) {
+ if (!(cmd.resp[0] & R1_SPI_IDLE))
+ break;
+ } else {
+ if (cmd.resp[0] & MMC_CARD_BUSY)
+ break;
+ }
+
+ err = -ETIMEDOUT;
+
+ mmc_delay(10);
+ }
+
+ if (!i)
+ pr_err("%s: card never left busy state\n", mmc_hostname(host));
+
+ if (rocr && !mmc_host_is_spi(host))
+ *rocr = cmd.resp[0];
+
+ return err;
+}
+
+int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
+{
+ struct mmc_command cmd = {0};
+ int err;
+ static const u8 test_pattern = 0xAA;
+ u8 result_pattern;
+
+ /*
+ * To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND
+ * before SD_APP_OP_COND. This command will harmlessly fail for
+ * SD 1.0 cards.
+ */
+ cmd.opcode = SD_SEND_IF_COND;
+ cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern;
+ cmd.flags = MMC_RSP_SPI_R7 | MMC_RSP_R7 | MMC_CMD_BCR;
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err)
+ return err;
+
+ if (mmc_host_is_spi(host))
+ result_pattern = cmd.resp[1] & 0xFF;
+ else
+ result_pattern = cmd.resp[0] & 0xFF;
+
+ if (result_pattern != test_pattern)
+ return -EIO;
+
+ return 0;
+}
+
+int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
+{
+ int err;
+ struct mmc_command cmd = {0};
+
+ BUG_ON(!host);
+ BUG_ON(!rca);
+
+ cmd.opcode = SD_SEND_RELATIVE_ADDR;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR;
+
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ return err;
+
+ *rca = cmd.resp[0] >> 16;
+
+ return 0;
+}
+
+int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
+{
+ int err;
+ struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ struct scatterlist sg;
+ void *data_buf;
+
+ BUG_ON(!card);
+ BUG_ON(!card->host);
+ BUG_ON(!scr);
+
+ /* NOTE: caller guarantees scr is heap-allocated */
+
+ err = mmc_app_cmd(card->host, card);
+ if (err)
+ return err;
+
+ /* dma onto stack is unsafe/nonportable, but callers to this
+ * routine normally provide temporary on-stack buffers ...
+ */
+ data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL);
+ if (data_buf == NULL)
+ return -ENOMEM;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ cmd.opcode = SD_APP_SEND_SCR;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = 8;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, data_buf, 8);
+
+ mmc_set_data_timeout(&data, card);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ memcpy(scr, data_buf, sizeof(card->raw_scr));
+ kfree(data_buf);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ scr[0] = be32_to_cpu(scr[0]);
+ scr[1] = be32_to_cpu(scr[1]);
+
+ return 0;
+}
+
+int mmc_sd_switch(struct mmc_card *card, int mode, int group,
+ u8 value, u8 *resp)
+{
+ struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ struct scatterlist sg;
+
+ BUG_ON(!card);
+ BUG_ON(!card->host);
+
+ /* NOTE: caller guarantees resp is heap-allocated */
+
+ mode = !!mode;
+ value &= 0xF;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ cmd.opcode = SD_SWITCH;
+ cmd.arg = mode << 31 | 0x00FFFFFF;
+ cmd.arg &= ~(0xF << (group * 4));
+ cmd.arg |= value << (group * 4);
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = 64;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, resp, 64);
+
+ mmc_set_data_timeout(&data, card);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return 0;
+}
+
+int mmc_app_sd_status(struct mmc_card *card, void *ssr)
+{
+ int err;
+ struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ struct scatterlist sg;
+
+ BUG_ON(!card);
+ BUG_ON(!card->host);
+ BUG_ON(!ssr);
+
+ /* NOTE: caller guarantees ssr is heap-allocated */
+
+ err = mmc_app_cmd(card->host, card);
+ if (err)
+ return err;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ cmd.opcode = SD_APP_SD_STATUS;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = 64;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, ssr, 64);
+
+ mmc_set_data_timeout(&data, card);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return 0;
+}
diff --git a/kernel/drivers/mmc/core/sd_ops.h b/kernel/drivers/mmc/core/sd_ops.h
new file mode 100644
index 000000000..ffc2305d9
--- /dev/null
+++ b/kernel/drivers/mmc/core/sd_ops.h
@@ -0,0 +1,25 @@
+/*
+ * linux/drivers/mmc/core/sd_ops.h
+ *
+ * Copyright 2006-2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef _MMC_SD_OPS_H
+#define _MMC_SD_OPS_H
+
+int mmc_app_set_bus_width(struct mmc_card *card, int width);
+int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
+int mmc_send_if_cond(struct mmc_host *host, u32 ocr);
+int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca);
+int mmc_app_send_scr(struct mmc_card *card, u32 *scr);
+int mmc_sd_switch(struct mmc_card *card, int mode, int group,
+ u8 value, u8 *resp);
+int mmc_app_sd_status(struct mmc_card *card, void *ssr);
+
+#endif
+
diff --git a/kernel/drivers/mmc/core/sdio.c b/kernel/drivers/mmc/core/sdio.c
new file mode 100644
index 000000000..5bc6c7dbb
--- /dev/null
+++ b/kernel/drivers/mmc/core/sdio.c
@@ -0,0 +1,1190 @@
+/*
+ * linux/drivers/mmc/sdio.c
+ *
+ * Copyright 2006-2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include "core.h"
+#include "bus.h"
+#include "sd.h"
+#include "sdio_bus.h"
+#include "mmc_ops.h"
+#include "sd_ops.h"
+#include "sdio_ops.h"
+#include "sdio_cis.h"
+
+static int sdio_read_fbr(struct sdio_func *func)
+{
+ int ret;
+ unsigned char data;
+
+ if (mmc_card_nonstd_func_interface(func->card)) {
+ func->class = SDIO_CLASS_NONE;
+ return 0;
+ }
+
+ ret = mmc_io_rw_direct(func->card, 0, 0,
+ SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF, 0, &data);
+ if (ret)
+ goto out;
+
+ data &= 0x0f;
+
+ if (data == 0x0f) {
+ ret = mmc_io_rw_direct(func->card, 0, 0,
+ SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF_EXT, 0, &data);
+ if (ret)
+ goto out;
+ }
+
+ func->class = data;
+
+out:
+ return ret;
+}
+
+static int sdio_init_func(struct mmc_card *card, unsigned int fn)
+{
+ int ret;
+ struct sdio_func *func;
+
+ BUG_ON(fn > SDIO_MAX_FUNCS);
+
+ func = sdio_alloc_func(card);
+ if (IS_ERR(func))
+ return PTR_ERR(func);
+
+ func->num = fn;
+
+ if (!(card->quirks & MMC_QUIRK_NONSTD_SDIO)) {
+ ret = sdio_read_fbr(func);
+ if (ret)
+ goto fail;
+
+ ret = sdio_read_func_cis(func);
+ if (ret)
+ goto fail;
+ } else {
+ func->vendor = func->card->cis.vendor;
+ func->device = func->card->cis.device;
+ func->max_blksize = func->card->cis.blksize;
+ }
+
+ card->sdio_func[fn - 1] = func;
+
+ return 0;
+
+fail:
+ /*
+ * It is okay to remove the function here even though we hold
+ * the host lock as we haven't registered the device yet.
+ */
+ sdio_remove_func(func);
+ return ret;
+}
+
+static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
+{
+ int ret;
+ int cccr_vsn;
+ int uhs = ocr & R4_18V_PRESENT;
+ unsigned char data;
+ unsigned char speed;
+
+ memset(&card->cccr, 0, sizeof(struct sdio_cccr));
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_CCCR, 0, &data);
+ if (ret)
+ goto out;
+
+ cccr_vsn = data & 0x0f;
+
+ if (cccr_vsn > SDIO_CCCR_REV_3_00) {
+ pr_err("%s: unrecognised CCCR structure version %d\n",
+ mmc_hostname(card->host), cccr_vsn);
+ return -EINVAL;
+ }
+
+ card->cccr.sdio_vsn = (data & 0xf0) >> 4;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_CAPS, 0, &data);
+ if (ret)
+ goto out;
+
+ if (data & SDIO_CCCR_CAP_SMB)
+ card->cccr.multi_block = 1;
+ if (data & SDIO_CCCR_CAP_LSC)
+ card->cccr.low_speed = 1;
+ if (data & SDIO_CCCR_CAP_4BLS)
+ card->cccr.wide_bus = 1;
+
+ if (cccr_vsn >= SDIO_CCCR_REV_1_10) {
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_POWER, 0, &data);
+ if (ret)
+ goto out;
+
+ if (data & SDIO_POWER_SMPC)
+ card->cccr.high_power = 1;
+ }
+
+ if (cccr_vsn >= SDIO_CCCR_REV_1_20) {
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
+ if (ret)
+ goto out;
+
+ card->scr.sda_spec3 = 0;
+ card->sw_caps.sd3_bus_mode = 0;
+ card->sw_caps.sd3_drv_type = 0;
+ if (cccr_vsn >= SDIO_CCCR_REV_3_00 && uhs) {
+ card->scr.sda_spec3 = 1;
+ ret = mmc_io_rw_direct(card, 0, 0,
+ SDIO_CCCR_UHS, 0, &data);
+ if (ret)
+ goto out;
+
+ if (mmc_host_uhs(card->host)) {
+ if (data & SDIO_UHS_DDR50)
+ card->sw_caps.sd3_bus_mode
+ |= SD_MODE_UHS_DDR50;
+
+ if (data & SDIO_UHS_SDR50)
+ card->sw_caps.sd3_bus_mode
+ |= SD_MODE_UHS_SDR50;
+
+ if (data & SDIO_UHS_SDR104)
+ card->sw_caps.sd3_bus_mode
+ |= SD_MODE_UHS_SDR104;
+ }
+
+ ret = mmc_io_rw_direct(card, 0, 0,
+ SDIO_CCCR_DRIVE_STRENGTH, 0, &data);
+ if (ret)
+ goto out;
+
+ if (data & SDIO_DRIVE_SDTA)
+ card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_A;
+ if (data & SDIO_DRIVE_SDTC)
+ card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
+ if (data & SDIO_DRIVE_SDTD)
+ card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
+ }
+
+ /* if no uhs mode ensure we check for high speed */
+ if (!card->sw_caps.sd3_bus_mode) {
+ if (speed & SDIO_SPEED_SHS) {
+ card->cccr.high_speed = 1;
+ card->sw_caps.hs_max_dtr = 50000000;
+ } else {
+ card->cccr.high_speed = 0;
+ card->sw_caps.hs_max_dtr = 25000000;
+ }
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int sdio_enable_wide(struct mmc_card *card)
+{
+ int ret;
+ u8 ctrl;
+
+ if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+ return 0;
+
+ if (card->cccr.low_speed && !card->cccr.wide_bus)
+ return 0;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
+ if (ret)
+ return ret;
+
+ if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED)
+ pr_warn("%s: SDIO_CCCR_IF is invalid: 0x%02x\n",
+ mmc_hostname(card->host), ctrl);
+
+ /* set as 4-bit bus width */
+ ctrl &= ~SDIO_BUS_WIDTH_MASK;
+ ctrl |= SDIO_BUS_WIDTH_4BIT;
+
+ ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+/*
+ * If desired, disconnect the pull-up resistor on CD/DAT[3] (pin 1)
+ * of the card. This may be required on certain setups of boards,
+ * controllers and embedded sdio device which do not need the card's
+ * pull-up. As a result, card detection is disabled and power is saved.
+ */
+static int sdio_disable_cd(struct mmc_card *card)
+{
+ int ret;
+ u8 ctrl;
+
+ if (!mmc_card_disable_cd(card))
+ return 0;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
+ if (ret)
+ return ret;
+
+ ctrl |= SDIO_BUS_CD_DISABLE;
+
+ return mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
+}
+
+/*
+ * Devices that remain active during a system suspend are
+ * put back into 1-bit mode.
+ */
+static int sdio_disable_wide(struct mmc_card *card)
+{
+ int ret;
+ u8 ctrl;
+
+ if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+ return 0;
+
+ if (card->cccr.low_speed && !card->cccr.wide_bus)
+ return 0;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
+ if (ret)
+ return ret;
+
+ if (!(ctrl & SDIO_BUS_WIDTH_4BIT))
+ return 0;
+
+ ctrl &= ~SDIO_BUS_WIDTH_4BIT;
+ ctrl |= SDIO_BUS_ASYNC_INT;
+
+ ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
+ if (ret)
+ return ret;
+
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_1);
+
+ return 0;
+}
+
+
+static int sdio_enable_4bit_bus(struct mmc_card *card)
+{
+ int err;
+
+ if (card->type == MMC_TYPE_SDIO)
+ err = sdio_enable_wide(card);
+ else if ((card->host->caps & MMC_CAP_4_BIT_DATA) &&
+ (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
+ err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
+ if (err)
+ return err;
+ err = sdio_enable_wide(card);
+ if (err <= 0)
+ mmc_app_set_bus_width(card, MMC_BUS_WIDTH_1);
+ } else
+ return 0;
+
+ if (err > 0) {
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+ err = 0;
+ }
+
+ return err;
+}
+
+
+/*
+ * Test if the card supports high-speed mode and, if so, switch to it.
+ */
+static int mmc_sdio_switch_hs(struct mmc_card *card, int enable)
+{
+ int ret;
+ u8 speed;
+
+ if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED))
+ return 0;
+
+ if (!card->cccr.high_speed)
+ return 0;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
+ if (ret)
+ return ret;
+
+ if (enable)
+ speed |= SDIO_SPEED_EHS;
+ else
+ speed &= ~SDIO_SPEED_EHS;
+
+ ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+/*
+ * Enable SDIO/combo card's high-speed mode. Return 0/1 if [not]supported.
+ */
+static int sdio_enable_hs(struct mmc_card *card)
+{
+ int ret;
+
+ ret = mmc_sdio_switch_hs(card, true);
+ if (ret <= 0 || card->type == MMC_TYPE_SDIO)
+ return ret;
+
+ ret = mmc_sd_switch_hs(card);
+ if (ret <= 0)
+ mmc_sdio_switch_hs(card, false);
+
+ return ret;
+}
+
+static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
+{
+ unsigned max_dtr;
+
+ if (mmc_card_hs(card)) {
+ /*
+ * The SDIO specification doesn't mention how
+ * the CIS transfer speed register relates to
+ * high-speed, but it seems that 50 MHz is
+ * mandatory.
+ */
+ max_dtr = 50000000;
+ } else {
+ max_dtr = card->cis.max_dtr;
+ }
+
+ if (card->type == MMC_TYPE_SD_COMBO)
+ max_dtr = min(max_dtr, mmc_sd_get_max_clock(card));
+
+ return max_dtr;
+}
+
+static unsigned char host_drive_to_sdio_drive(int host_strength)
+{
+ switch (host_strength) {
+ case MMC_SET_DRIVER_TYPE_A:
+ return SDIO_DTSx_SET_TYPE_A;
+ case MMC_SET_DRIVER_TYPE_B:
+ return SDIO_DTSx_SET_TYPE_B;
+ case MMC_SET_DRIVER_TYPE_C:
+ return SDIO_DTSx_SET_TYPE_C;
+ case MMC_SET_DRIVER_TYPE_D:
+ return SDIO_DTSx_SET_TYPE_D;
+ default:
+ return SDIO_DTSx_SET_TYPE_B;
+ }
+}
+
+static void sdio_select_driver_type(struct mmc_card *card)
+{
+ int host_drv_type = SD_DRIVER_TYPE_B;
+ int card_drv_type = SD_DRIVER_TYPE_B;
+ int drive_strength;
+ unsigned char card_strength;
+ int err;
+
+ /*
+ * If the host doesn't support any of the Driver Types A,C or D,
+ * or there is no board specific handler then default Driver
+ * Type B is used.
+ */
+ if (!(card->host->caps &
+ (MMC_CAP_DRIVER_TYPE_A |
+ MMC_CAP_DRIVER_TYPE_C |
+ MMC_CAP_DRIVER_TYPE_D)))
+ return;
+
+ if (!card->host->ops->select_drive_strength)
+ return;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_A)
+ host_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_C)
+ host_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_D)
+ host_drv_type |= SD_DRIVER_TYPE_D;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
+ card_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
+ card_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D)
+ card_drv_type |= SD_DRIVER_TYPE_D;
+
+ /*
+ * The drive strength that the hardware can support
+ * depends on the board design. Pass the appropriate
+ * information and let the hardware specific code
+ * return what is possible given the options
+ */
+ drive_strength = card->host->ops->select_drive_strength(
+ card->sw_caps.uhs_max_dtr,
+ host_drv_type, card_drv_type);
+
+ /* if error just use default for drive strength B */
+ err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_DRIVE_STRENGTH, 0,
+ &card_strength);
+ if (err)
+ return;
+
+ card_strength &= ~(SDIO_DRIVE_DTSx_MASK<<SDIO_DRIVE_DTSx_SHIFT);
+ card_strength |= host_drive_to_sdio_drive(drive_strength);
+
+ err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_DRIVE_STRENGTH,
+ card_strength, NULL);
+
+ /* if error default to drive strength B */
+ if (!err)
+ mmc_set_driver_type(card->host, drive_strength);
+}
+
+
+static int sdio_set_bus_speed_mode(struct mmc_card *card)
+{
+ unsigned int bus_speed, timing;
+ int err;
+ unsigned char speed;
+
+ /*
+ * If the host doesn't support any of the UHS-I modes, fallback on
+ * default speed.
+ */
+ if (!mmc_host_uhs(card->host))
+ return 0;
+
+ bus_speed = SDIO_SPEED_SDR12;
+ timing = MMC_TIMING_UHS_SDR12;
+ if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
+ bus_speed = SDIO_SPEED_SDR104;
+ timing = MMC_TIMING_UHS_SDR104;
+ card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
+ } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
+ bus_speed = SDIO_SPEED_DDR50;
+ timing = MMC_TIMING_UHS_DDR50;
+ card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+ card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
+ SD_MODE_UHS_SDR50)) {
+ bus_speed = SDIO_SPEED_SDR50;
+ timing = MMC_TIMING_UHS_SDR50;
+ card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
+ bus_speed = SDIO_SPEED_SDR25;
+ timing = MMC_TIMING_UHS_SDR25;
+ card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
+ SD_MODE_UHS_SDR12)) {
+ bus_speed = SDIO_SPEED_SDR12;
+ timing = MMC_TIMING_UHS_SDR12;
+ card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
+ }
+
+ err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
+ if (err)
+ return err;
+
+ speed &= ~SDIO_SPEED_BSS_MASK;
+ speed |= bus_speed;
+ err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
+ if (err)
+ return err;
+
+ if (bus_speed) {
+ mmc_set_timing(card->host, timing);
+ mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
+ }
+
+ return 0;
+}
+
+/*
+ * UHS-I specific initialization procedure
+ */
+static int mmc_sdio_init_uhs_card(struct mmc_card *card)
+{
+ int err;
+
+ if (!card->scr.sda_spec3)
+ return 0;
+
+ /*
+ * Switch to wider bus (if supported).
+ */
+ if (card->host->caps & MMC_CAP_4_BIT_DATA)
+ err = sdio_enable_4bit_bus(card);
+
+ /* Set the driver strength for the card */
+ sdio_select_driver_type(card);
+
+ /* Set bus speed mode of the card */
+ err = sdio_set_bus_speed_mode(card);
+ if (err)
+ goto out;
+
+ /*
+ * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
+ * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
+ */
+ if (!mmc_host_is_spi(card->host) &&
+ ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) ||
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)))
+ err = mmc_execute_tuning(card);
+out:
+ return err;
+}
+
+/*
+ * Handle the detection and initialisation of a card.
+ *
+ * In the case of a resume, "oldcard" will contain the card
+ * we're trying to reinitialise.
+ */
+static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
+ struct mmc_card *oldcard, int powered_resume)
+{
+ struct mmc_card *card;
+ int err;
+ int retries = 10;
+ u32 rocr = 0;
+ u32 ocr_card = ocr;
+
+ BUG_ON(!host);
+ WARN_ON(!host->claimed);
+
+ /* to query card if 1.8V signalling is supported */
+ if (mmc_host_uhs(host))
+ ocr |= R4_18V_PRESENT;
+
+try_again:
+ if (!retries) {
+ pr_warn("%s: Skipping voltage switch\n", mmc_hostname(host));
+ ocr &= ~R4_18V_PRESENT;
+ }
+
+ /*
+ * Inform the card of the voltage
+ */
+ if (!powered_resume) {
+ err = mmc_send_io_op_cond(host, ocr, &rocr);
+ if (err)
+ goto err;
+ }
+
+ /*
+ * For SPI, enable CRC as appropriate.
+ */
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_set_crc(host, use_spi_crc);
+ if (err)
+ goto err;
+ }
+
+ /*
+ * Allocate card structure.
+ */
+ card = mmc_alloc_card(host, NULL);
+ if (IS_ERR(card)) {
+ err = PTR_ERR(card);
+ goto err;
+ }
+
+ if ((rocr & R4_MEMORY_PRESENT) &&
+ mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) {
+ card->type = MMC_TYPE_SD_COMBO;
+
+ if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
+ memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) {
+ mmc_remove_card(card);
+ return -ENOENT;
+ }
+ } else {
+ card->type = MMC_TYPE_SDIO;
+
+ if (oldcard && oldcard->type != MMC_TYPE_SDIO) {
+ mmc_remove_card(card);
+ return -ENOENT;
+ }
+ }
+
+ /*
+ * Call the optional HC's init_card function to handle quirks.
+ */
+ if (host->ops->init_card)
+ host->ops->init_card(host, card);
+
+ /*
+ * If the host and card support UHS-I mode request the card
+ * to switch to 1.8V signaling level. No 1.8v signalling if
+ * UHS mode is not enabled to maintain compatibility and some
+ * systems that claim 1.8v signalling in fact do not support
+ * it.
+ */
+ if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
+ ocr);
+ if (err == -EAGAIN) {
+ sdio_reset(host);
+ mmc_go_idle(host);
+ mmc_send_if_cond(host, host->ocr_avail);
+ mmc_remove_card(card);
+ retries--;
+ goto try_again;
+ } else if (err) {
+ ocr &= ~R4_18V_PRESENT;
+ }
+ err = 0;
+ } else {
+ ocr &= ~R4_18V_PRESENT;
+ }
+
+ /*
+ * For native busses: set card RCA and quit open drain mode.
+ */
+ if (!powered_resume && !mmc_host_is_spi(host)) {
+ err = mmc_send_relative_addr(host, &card->rca);
+ if (err)
+ goto remove;
+
+ /*
+ * Update oldcard with the new RCA received from the SDIO
+ * device -- we're doing this so that it's updated in the
+ * "card" struct when oldcard overwrites that later.
+ */
+ if (oldcard)
+ oldcard->rca = card->rca;
+ }
+
+ /*
+ * Read CSD, before selecting the card
+ */
+ if (!oldcard && card->type == MMC_TYPE_SD_COMBO) {
+ err = mmc_sd_get_csd(host, card);
+ if (err)
+ return err;
+
+ mmc_decode_cid(card);
+ }
+
+ /*
+ * Select card, as all following commands rely on that.
+ */
+ if (!powered_resume && !mmc_host_is_spi(host)) {
+ err = mmc_select_card(card);
+ if (err)
+ goto remove;
+ }
+
+ if (card->quirks & MMC_QUIRK_NONSTD_SDIO) {
+ /*
+ * This is non-standard SDIO device, meaning it doesn't
+ * have any CIA (Common I/O area) registers present.
+ * It's host's responsibility to fill cccr and cis
+ * structures in init_card().
+ */
+ mmc_set_clock(host, card->cis.max_dtr);
+
+ if (card->cccr.high_speed) {
+ mmc_set_timing(card->host, MMC_TIMING_SD_HS);
+ }
+
+ goto finish;
+ }
+
+ /*
+ * Read the common registers.
+ */
+ err = sdio_read_cccr(card, ocr);
+ if (err)
+ goto remove;
+
+ /*
+ * Read the common CIS tuples.
+ */
+ err = sdio_read_common_cis(card);
+ if (err)
+ goto remove;
+
+ if (oldcard) {
+ int same = (card->cis.vendor == oldcard->cis.vendor &&
+ card->cis.device == oldcard->cis.device);
+ mmc_remove_card(card);
+ if (!same)
+ return -ENOENT;
+
+ card = oldcard;
+ }
+ card->ocr = ocr_card;
+ mmc_fixup_device(card, NULL);
+
+ if (card->type == MMC_TYPE_SD_COMBO) {
+ err = mmc_sd_setup_card(host, card, oldcard != NULL);
+ /* handle as SDIO-only card if memory init failed */
+ if (err) {
+ mmc_go_idle(host);
+ if (mmc_host_is_spi(host))
+ /* should not fail, as it worked previously */
+ mmc_spi_set_crc(host, use_spi_crc);
+ card->type = MMC_TYPE_SDIO;
+ } else
+ card->dev.type = &sd_type;
+ }
+
+ /*
+ * If needed, disconnect card detection pull-up resistor.
+ */
+ err = sdio_disable_cd(card);
+ if (err)
+ goto remove;
+
+ /* Initialization sequence for UHS-I cards */
+ /* Only if card supports 1.8v and UHS signaling */
+ if ((ocr & R4_18V_PRESENT) && card->sw_caps.sd3_bus_mode) {
+ err = mmc_sdio_init_uhs_card(card);
+ if (err)
+ goto remove;
+ } else {
+ /*
+ * Switch to high-speed (if supported).
+ */
+ err = sdio_enable_hs(card);
+ if (err > 0)
+ mmc_set_timing(card->host, MMC_TIMING_SD_HS);
+ else if (err)
+ goto remove;
+
+ /*
+ * Change to the card's maximum speed.
+ */
+ mmc_set_clock(host, mmc_sdio_get_max_clock(card));
+
+ /*
+ * Switch to wider bus (if supported).
+ */
+ err = sdio_enable_4bit_bus(card);
+ if (err)
+ goto remove;
+ }
+finish:
+ if (!oldcard)
+ host->card = card;
+ return 0;
+
+remove:
+ if (!oldcard)
+ mmc_remove_card(card);
+
+err:
+ return err;
+}
+
+/*
+ * Host is being removed. Free up the current card.
+ */
+static void mmc_sdio_remove(struct mmc_host *host)
+{
+ int i;
+
+ BUG_ON(!host);
+ BUG_ON(!host->card);
+
+ for (i = 0;i < host->card->sdio_funcs;i++) {
+ if (host->card->sdio_func[i]) {
+ sdio_remove_func(host->card->sdio_func[i]);
+ host->card->sdio_func[i] = NULL;
+ }
+ }
+
+ mmc_remove_card(host->card);
+ host->card = NULL;
+}
+
+/*
+ * Card detection - card is alive.
+ */
+static int mmc_sdio_alive(struct mmc_host *host)
+{
+ return mmc_select_card(host->card);
+}
+
+/*
+ * Card detection callback from host.
+ */
+static void mmc_sdio_detect(struct mmc_host *host)
+{
+ int err;
+
+ BUG_ON(!host);
+ BUG_ON(!host->card);
+
+ /* Make sure card is powered before detecting it */
+ if (host->caps & MMC_CAP_POWER_OFF_CARD) {
+ err = pm_runtime_get_sync(&host->card->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(&host->card->dev);
+ goto out;
+ }
+ }
+
+ mmc_claim_host(host);
+
+ /*
+ * Just check if our card has been removed.
+ */
+ err = _mmc_detect_card_removed(host);
+
+ mmc_release_host(host);
+
+ /*
+ * Tell PM core it's OK to power off the card now.
+ *
+ * The _sync variant is used in order to ensure that the card
+ * is left powered off in case an error occurred, and the card
+ * is going to be removed.
+ *
+ * Since there is no specific reason to believe a new user
+ * is about to show up at this point, the _sync variant is
+ * desirable anyway.
+ */
+ if (host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_put_sync(&host->card->dev);
+
+out:
+ if (err) {
+ mmc_sdio_remove(host);
+
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+ }
+}
+
+/*
+ * SDIO pre_suspend. We need to suspend all functions separately.
+ * Therefore all registered functions must have drivers with suspend
+ * and resume methods. Failing that we simply remove the whole card.
+ */
+static int mmc_sdio_pre_suspend(struct mmc_host *host)
+{
+ int i, err = 0;
+
+ for (i = 0; i < host->card->sdio_funcs; i++) {
+ struct sdio_func *func = host->card->sdio_func[i];
+ if (func && sdio_func_present(func) && func->dev.driver) {
+ const struct dev_pm_ops *pmops = func->dev.driver->pm;
+ if (!pmops || !pmops->suspend || !pmops->resume) {
+ /* force removal of entire card in that case */
+ err = -ENOSYS;
+ break;
+ }
+ }
+ }
+
+ return err;
+}
+
+/*
+ * SDIO suspend. Suspend all functions separately.
+ */
+static int mmc_sdio_suspend(struct mmc_host *host)
+{
+ if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
+ mmc_claim_host(host);
+ sdio_disable_wide(host->card);
+ mmc_release_host(host);
+ }
+
+ if (!mmc_card_keep_power(host))
+ mmc_power_off(host);
+
+ return 0;
+}
+
+static int mmc_sdio_resume(struct mmc_host *host)
+{
+ int err = 0;
+
+ BUG_ON(!host);
+ BUG_ON(!host->card);
+
+ /* Basic card reinitialization. */
+ mmc_claim_host(host);
+
+ /* Restore power if needed */
+ if (!mmc_card_keep_power(host)) {
+ mmc_power_up(host, host->card->ocr);
+ /*
+ * Tell runtime PM core we just powered up the card,
+ * since it still believes the card is powered off.
+ * Note that currently runtime PM is only enabled
+ * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
+ */
+ if (host->caps & MMC_CAP_POWER_OFF_CARD) {
+ pm_runtime_disable(&host->card->dev);
+ pm_runtime_set_active(&host->card->dev);
+ pm_runtime_enable(&host->card->dev);
+ }
+ }
+
+ /* No need to reinitialize powered-resumed nonremovable cards */
+ if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) {
+ sdio_reset(host);
+ mmc_go_idle(host);
+ mmc_send_if_cond(host, host->card->ocr);
+ err = mmc_send_io_op_cond(host, 0, NULL);
+ if (!err)
+ err = mmc_sdio_init_card(host, host->card->ocr,
+ host->card,
+ mmc_card_keep_power(host));
+ } else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
+ /* We may have switched to 1-bit mode during suspend */
+ err = sdio_enable_4bit_bus(host->card);
+ }
+
+ if (!err && host->sdio_irqs) {
+ if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
+ wake_up_process(host->sdio_irq_thread);
+ } else if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
+ host->ops->enable_sdio_irq(host, 1);
+ mmc_host_clk_release(host);
+ }
+ }
+
+ mmc_release_host(host);
+
+ host->pm_flags &= ~MMC_PM_KEEP_POWER;
+ return err;
+}
+
+static int mmc_sdio_power_restore(struct mmc_host *host)
+{
+ int ret;
+
+ BUG_ON(!host);
+ BUG_ON(!host->card);
+
+ mmc_claim_host(host);
+
+ /*
+ * Reset the card by performing the same steps that are taken by
+ * mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe.
+ *
+ * sdio_reset() is technically not needed. Having just powered up the
+ * hardware, it should already be in reset state. However, some
+ * platforms (such as SD8686 on OLPC) do not instantly cut power,
+ * meaning that a reset is required when restoring power soon after
+ * powering off. It is harmless in other cases.
+ *
+ * The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec,
+ * is not necessary for non-removable cards. However, it is required
+ * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
+ * harmless in other situations.
+ *
+ */
+
+ sdio_reset(host);
+ mmc_go_idle(host);
+ mmc_send_if_cond(host, host->card->ocr);
+
+ ret = mmc_send_io_op_cond(host, 0, NULL);
+ if (ret)
+ goto out;
+
+ ret = mmc_sdio_init_card(host, host->card->ocr, host->card,
+ mmc_card_keep_power(host));
+ if (!ret && host->sdio_irqs)
+ mmc_signal_sdio_irq(host);
+
+out:
+ mmc_release_host(host);
+
+ return ret;
+}
+
+static int mmc_sdio_runtime_suspend(struct mmc_host *host)
+{
+ /* No references to the card, cut the power to it. */
+ mmc_power_off(host);
+ return 0;
+}
+
+static int mmc_sdio_runtime_resume(struct mmc_host *host)
+{
+ /* Restore power and re-initialize. */
+ mmc_power_up(host, host->card->ocr);
+ return mmc_sdio_power_restore(host);
+}
+
+static const struct mmc_bus_ops mmc_sdio_ops = {
+ .remove = mmc_sdio_remove,
+ .detect = mmc_sdio_detect,
+ .pre_suspend = mmc_sdio_pre_suspend,
+ .suspend = mmc_sdio_suspend,
+ .resume = mmc_sdio_resume,
+ .runtime_suspend = mmc_sdio_runtime_suspend,
+ .runtime_resume = mmc_sdio_runtime_resume,
+ .power_restore = mmc_sdio_power_restore,
+ .alive = mmc_sdio_alive,
+};
+
+
+/*
+ * Starting point for SDIO card init.
+ */
+int mmc_attach_sdio(struct mmc_host *host)
+{
+ int err, i, funcs;
+ u32 ocr, rocr;
+ struct mmc_card *card;
+
+ BUG_ON(!host);
+ WARN_ON(!host->claimed);
+
+ err = mmc_send_io_op_cond(host, 0, &ocr);
+ if (err)
+ return err;
+
+ mmc_attach_bus(host, &mmc_sdio_ops);
+ if (host->ocr_avail_sdio)
+ host->ocr_avail = host->ocr_avail_sdio;
+
+
+ rocr = mmc_select_voltage(host, ocr);
+
+ /*
+ * Can we support the voltage(s) of the card(s)?
+ */
+ if (!rocr) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Detect and init the card.
+ */
+ err = mmc_sdio_init_card(host, rocr, NULL, 0);
+ if (err)
+ goto err;
+
+ card = host->card;
+
+ /*
+ * Enable runtime PM only if supported by host+card+board
+ */
+ if (host->caps & MMC_CAP_POWER_OFF_CARD) {
+ /*
+ * Let runtime PM core know our card is active
+ */
+ err = pm_runtime_set_active(&card->dev);
+ if (err)
+ goto remove;
+
+ /*
+ * Enable runtime PM for this card
+ */
+ pm_runtime_enable(&card->dev);
+ }
+
+ /*
+ * The number of functions on the card is encoded inside
+ * the ocr.
+ */
+ funcs = (ocr & 0x70000000) >> 28;
+ card->sdio_funcs = 0;
+
+ /*
+ * Initialize (but don't add) all present functions.
+ */
+ for (i = 0; i < funcs; i++, card->sdio_funcs++) {
+ err = sdio_init_func(host->card, i + 1);
+ if (err)
+ goto remove;
+
+ /*
+ * Enable Runtime PM for this func (if supported)
+ */
+ if (host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_enable(&card->sdio_func[i]->dev);
+ }
+
+ /*
+ * First add the card to the driver model...
+ */
+ mmc_release_host(host);
+ err = mmc_add_card(host->card);
+ if (err)
+ goto remove_added;
+
+ /*
+ * ...then the SDIO functions.
+ */
+ for (i = 0;i < funcs;i++) {
+ err = sdio_add_func(host->card->sdio_func[i]);
+ if (err)
+ goto remove_added;
+ }
+
+ mmc_claim_host(host);
+ return 0;
+
+
+remove_added:
+ /* Remove without lock if the device has been added. */
+ mmc_sdio_remove(host);
+ mmc_claim_host(host);
+remove:
+ /* And with lock if it hasn't been added. */
+ mmc_release_host(host);
+ if (host->card)
+ mmc_sdio_remove(host);
+ mmc_claim_host(host);
+err:
+ mmc_detach_bus(host);
+
+ pr_err("%s: error %d whilst initialising SDIO card\n",
+ mmc_hostname(host), err);
+
+ return err;
+}
+
diff --git a/kernel/drivers/mmc/core/sdio_bus.c b/kernel/drivers/mmc/core/sdio_bus.c
new file mode 100644
index 000000000..bee02e644
--- /dev/null
+++ b/kernel/drivers/mmc/core/sdio_bus.c
@@ -0,0 +1,343 @@
+/*
+ * linux/drivers/mmc/core/sdio_bus.c
+ *
+ * Copyright 2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * SDIO function driver model
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/acpi.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/of.h>
+
+#include "core.h"
+#include "sdio_cis.h"
+#include "sdio_bus.h"
+
+#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
+
+/* show configuration fields */
+#define sdio_config_attr(field, format_string) \
+static ssize_t \
+field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct sdio_func *func; \
+ \
+ func = dev_to_sdio_func (dev); \
+ return sprintf (buf, format_string, func->field); \
+} \
+static DEVICE_ATTR_RO(field)
+
+sdio_config_attr(class, "0x%02x\n");
+sdio_config_attr(vendor, "0x%04x\n");
+sdio_config_attr(device, "0x%04x\n");
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sdio_func *func = dev_to_sdio_func (dev);
+
+ return sprintf(buf, "sdio:c%02Xv%04Xd%04X\n",
+ func->class, func->vendor, func->device);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *sdio_dev_attrs[] = {
+ &dev_attr_class.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(sdio_dev);
+
+static const struct sdio_device_id *sdio_match_one(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ if (id->class != (__u8)SDIO_ANY_ID && id->class != func->class)
+ return NULL;
+ if (id->vendor != (__u16)SDIO_ANY_ID && id->vendor != func->vendor)
+ return NULL;
+ if (id->device != (__u16)SDIO_ANY_ID && id->device != func->device)
+ return NULL;
+ return id;
+}
+
+static const struct sdio_device_id *sdio_match_device(struct sdio_func *func,
+ struct sdio_driver *sdrv)
+{
+ const struct sdio_device_id *ids;
+
+ ids = sdrv->id_table;
+
+ if (ids) {
+ while (ids->class || ids->vendor || ids->device) {
+ if (sdio_match_one(func, ids))
+ return ids;
+ ids++;
+ }
+ }
+
+ return NULL;
+}
+
+static int sdio_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct sdio_driver *sdrv = to_sdio_driver(drv);
+
+ if (sdio_match_device(func, sdrv))
+ return 1;
+
+ return 0;
+}
+
+static int
+sdio_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+
+ if (add_uevent_var(env,
+ "SDIO_CLASS=%02X", func->class))
+ return -ENOMEM;
+
+ if (add_uevent_var(env,
+ "SDIO_ID=%04X:%04X", func->vendor, func->device))
+ return -ENOMEM;
+
+ if (add_uevent_var(env,
+ "MODALIAS=sdio:c%02Xv%04Xd%04X",
+ func->class, func->vendor, func->device))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int sdio_bus_probe(struct device *dev)
+{
+ struct sdio_driver *drv = to_sdio_driver(dev->driver);
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ const struct sdio_device_id *id;
+ int ret;
+
+ id = sdio_match_device(func, drv);
+ if (!id)
+ return -ENODEV;
+
+ /* Unbound SDIO functions are always suspended.
+ * During probe, the function is set active and the usage count
+ * is incremented. If the driver supports runtime PM,
+ * it should call pm_runtime_put_noidle() in its probe routine and
+ * pm_runtime_get_noresume() in its remove routine.
+ */
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto disable_runtimepm;
+ }
+
+ /* Set the default block size so the driver is sure it's something
+ * sensible. */
+ sdio_claim_host(func);
+ ret = sdio_set_block_size(func, 0);
+ sdio_release_host(func);
+ if (ret)
+ goto disable_runtimepm;
+
+ ret = drv->probe(func, id);
+ if (ret)
+ goto disable_runtimepm;
+
+ return 0;
+
+disable_runtimepm:
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_put_noidle(dev);
+ return ret;
+}
+
+static int sdio_bus_remove(struct device *dev)
+{
+ struct sdio_driver *drv = to_sdio_driver(dev->driver);
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ int ret = 0;
+
+ /* Make sure card is powered before invoking ->remove() */
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_get_sync(dev);
+
+ drv->remove(func);
+
+ if (func->irq_handler) {
+ pr_warn("WARNING: driver %s did not remove its interrupt handler!\n",
+ drv->name);
+ sdio_claim_host(func);
+ sdio_release_irq(func);
+ sdio_release_host(func);
+ }
+
+ /* First, undo the increment made directly above */
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_put_noidle(dev);
+
+ /* Then undo the runtime PM settings in sdio_bus_probe() */
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_put_sync(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops sdio_bus_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume)
+ SET_RUNTIME_PM_OPS(
+ pm_generic_runtime_suspend,
+ pm_generic_runtime_resume,
+ NULL
+ )
+};
+
+static struct bus_type sdio_bus_type = {
+ .name = "sdio",
+ .dev_groups = sdio_dev_groups,
+ .match = sdio_bus_match,
+ .uevent = sdio_bus_uevent,
+ .probe = sdio_bus_probe,
+ .remove = sdio_bus_remove,
+ .pm = &sdio_bus_pm_ops,
+};
+
+int sdio_register_bus(void)
+{
+ return bus_register(&sdio_bus_type);
+}
+
+void sdio_unregister_bus(void)
+{
+ bus_unregister(&sdio_bus_type);
+}
+
+/**
+ * sdio_register_driver - register a function driver
+ * @drv: SDIO function driver
+ */
+int sdio_register_driver(struct sdio_driver *drv)
+{
+ drv->drv.name = drv->name;
+ drv->drv.bus = &sdio_bus_type;
+ return driver_register(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(sdio_register_driver);
+
+/**
+ * sdio_unregister_driver - unregister a function driver
+ * @drv: SDIO function driver
+ */
+void sdio_unregister_driver(struct sdio_driver *drv)
+{
+ drv->drv.bus = &sdio_bus_type;
+ driver_unregister(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(sdio_unregister_driver);
+
+static void sdio_release_func(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+
+ sdio_free_func_cis(func);
+
+ kfree(func->info);
+
+ kfree(func);
+}
+
+/*
+ * Allocate and initialise a new SDIO function structure.
+ */
+struct sdio_func *sdio_alloc_func(struct mmc_card *card)
+{
+ struct sdio_func *func;
+
+ func = kzalloc(sizeof(struct sdio_func), GFP_KERNEL);
+ if (!func)
+ return ERR_PTR(-ENOMEM);
+
+ func->card = card;
+
+ device_initialize(&func->dev);
+
+ func->dev.parent = &card->dev;
+ func->dev.bus = &sdio_bus_type;
+ func->dev.release = sdio_release_func;
+
+ return func;
+}
+
+#ifdef CONFIG_ACPI
+static void sdio_acpi_set_handle(struct sdio_func *func)
+{
+ struct mmc_host *host = func->card->host;
+ u64 addr = ((u64)host->slotno << 16) | func->num;
+
+ acpi_preset_companion(&func->dev, ACPI_COMPANION(host->parent), addr);
+}
+#else
+static inline void sdio_acpi_set_handle(struct sdio_func *func) {}
+#endif
+
+static void sdio_set_of_node(struct sdio_func *func)
+{
+ struct mmc_host *host = func->card->host;
+
+ func->dev.of_node = mmc_of_find_child_device(host, func->num);
+}
+
+/*
+ * Register a new SDIO function with the driver model.
+ */
+int sdio_add_func(struct sdio_func *func)
+{
+ int ret;
+
+ dev_set_name(&func->dev, "%s:%d", mmc_card_id(func->card), func->num);
+
+ sdio_set_of_node(func);
+ sdio_acpi_set_handle(func);
+ ret = device_add(&func->dev);
+ if (ret == 0) {
+ sdio_func_set_present(func);
+ dev_pm_domain_attach(&func->dev, false);
+ }
+
+ return ret;
+}
+
+/*
+ * Unregister a SDIO function with the driver model, and
+ * (eventually) free it.
+ * This function can be called through error paths where sdio_add_func() was
+ * never executed (because a failure occurred at an earlier point).
+ */
+void sdio_remove_func(struct sdio_func *func)
+{
+ if (!sdio_func_present(func))
+ return;
+
+ dev_pm_domain_detach(&func->dev, false);
+ device_del(&func->dev);
+ of_node_put(func->dev.of_node);
+ put_device(&func->dev);
+}
+
diff --git a/kernel/drivers/mmc/core/sdio_bus.h b/kernel/drivers/mmc/core/sdio_bus.h
new file mode 100644
index 000000000..567a76821
--- /dev/null
+++ b/kernel/drivers/mmc/core/sdio_bus.h
@@ -0,0 +1,22 @@
+/*
+ * linux/drivers/mmc/core/sdio_bus.h
+ *
+ * Copyright 2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+#ifndef _MMC_CORE_SDIO_BUS_H
+#define _MMC_CORE_SDIO_BUS_H
+
+struct sdio_func *sdio_alloc_func(struct mmc_card *card);
+int sdio_add_func(struct sdio_func *func);
+void sdio_remove_func(struct sdio_func *func);
+
+int sdio_register_bus(void);
+void sdio_unregister_bus(void);
+
+#endif
+
diff --git a/kernel/drivers/mmc/core/sdio_cis.c b/kernel/drivers/mmc/core/sdio_cis.c
new file mode 100644
index 000000000..8e94e555b
--- /dev/null
+++ b/kernel/drivers/mmc/core/sdio_cis.c
@@ -0,0 +1,412 @@
+/*
+ * linux/drivers/mmc/core/sdio_cis.c
+ *
+ * Author: Nicolas Pitre
+ * Created: June 11, 2007
+ * Copyright: MontaVista Software Inc.
+ *
+ * Copyright 2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+
+#include "sdio_cis.h"
+#include "sdio_ops.h"
+
+static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
+ const unsigned char *buf, unsigned size)
+{
+ unsigned i, nr_strings;
+ char **buffer, *string;
+
+ /* Find all null-terminated (including zero length) strings in
+ the TPLLV1_INFO field. Trailing garbage is ignored. */
+ buf += 2;
+ size -= 2;
+
+ nr_strings = 0;
+ for (i = 0; i < size; i++) {
+ if (buf[i] == 0xff)
+ break;
+ if (buf[i] == 0)
+ nr_strings++;
+ }
+ if (nr_strings == 0)
+ return 0;
+
+ size = i;
+
+ buffer = kzalloc(sizeof(char*) * nr_strings + size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ string = (char*)(buffer + nr_strings);
+
+ for (i = 0; i < nr_strings; i++) {
+ buffer[i] = string;
+ strcpy(string, buf);
+ string += strlen(string) + 1;
+ buf += strlen(buf) + 1;
+ }
+
+ if (func) {
+ func->num_info = nr_strings;
+ func->info = (const char**)buffer;
+ } else {
+ card->num_info = nr_strings;
+ card->info = (const char**)buffer;
+ }
+
+ return 0;
+}
+
+static int cistpl_manfid(struct mmc_card *card, struct sdio_func *func,
+ const unsigned char *buf, unsigned size)
+{
+ unsigned int vendor, device;
+
+ /* TPLMID_MANF */
+ vendor = buf[0] | (buf[1] << 8);
+
+ /* TPLMID_CARD */
+ device = buf[2] | (buf[3] << 8);
+
+ if (func) {
+ func->vendor = vendor;
+ func->device = device;
+ } else {
+ card->cis.vendor = vendor;
+ card->cis.device = device;
+ }
+
+ return 0;
+}
+
+static const unsigned char speed_val[16] =
+ { 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80 };
+static const unsigned int speed_unit[8] =
+ { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 };
+
+
+typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
+ const unsigned char *, unsigned);
+
+struct cis_tpl {
+ unsigned char code;
+ unsigned char min_size;
+ tpl_parse_t *parse;
+};
+
+static int cis_tpl_parse(struct mmc_card *card, struct sdio_func *func,
+ const char *tpl_descr,
+ const struct cis_tpl *tpl, int tpl_count,
+ unsigned char code,
+ const unsigned char *buf, unsigned size)
+{
+ int i, ret;
+
+ /* look for a matching code in the table */
+ for (i = 0; i < tpl_count; i++, tpl++) {
+ if (tpl->code == code)
+ break;
+ }
+ if (i < tpl_count) {
+ if (size >= tpl->min_size) {
+ if (tpl->parse)
+ ret = tpl->parse(card, func, buf, size);
+ else
+ ret = -EILSEQ; /* known tuple, not parsed */
+ } else {
+ /* invalid tuple */
+ ret = -EINVAL;
+ }
+ if (ret && ret != -EILSEQ && ret != -ENOENT) {
+ pr_err("%s: bad %s tuple 0x%02x (%u bytes)\n",
+ mmc_hostname(card->host), tpl_descr, code, size);
+ }
+ } else {
+ /* unknown tuple */
+ ret = -ENOENT;
+ }
+
+ return ret;
+}
+
+static int cistpl_funce_common(struct mmc_card *card, struct sdio_func *func,
+ const unsigned char *buf, unsigned size)
+{
+ /* Only valid for the common CIS (function 0) */
+ if (func)
+ return -EINVAL;
+
+ /* TPLFE_FN0_BLK_SIZE */
+ card->cis.blksize = buf[1] | (buf[2] << 8);
+
+ /* TPLFE_MAX_TRAN_SPEED */
+ card->cis.max_dtr = speed_val[(buf[3] >> 3) & 15] *
+ speed_unit[buf[3] & 7];
+
+ return 0;
+}
+
+static int cistpl_funce_func(struct mmc_card *card, struct sdio_func *func,
+ const unsigned char *buf, unsigned size)
+{
+ unsigned vsn;
+ unsigned min_size;
+
+ /* Only valid for the individual function's CIS (1-7) */
+ if (!func)
+ return -EINVAL;
+
+ /*
+ * This tuple has a different length depending on the SDIO spec
+ * version.
+ */
+ vsn = func->card->cccr.sdio_vsn;
+ min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42;
+
+ if (size < min_size)
+ return -EINVAL;
+
+ /* TPLFE_MAX_BLK_SIZE */
+ func->max_blksize = buf[12] | (buf[13] << 8);
+
+ /* TPLFE_ENABLE_TIMEOUT_VAL, present in ver 1.1 and above */
+ if (vsn > SDIO_SDIO_REV_1_00)
+ func->enable_timeout = (buf[28] | (buf[29] << 8)) * 10;
+ else
+ func->enable_timeout = jiffies_to_msecs(HZ);
+
+ return 0;
+}
+
+/*
+ * Known TPLFE_TYPEs table for CISTPL_FUNCE tuples.
+ *
+ * Note that, unlike PCMCIA, CISTPL_FUNCE tuples are not parsed depending
+ * on the TPLFID_FUNCTION value of the previous CISTPL_FUNCID as on SDIO
+ * TPLFID_FUNCTION is always hardcoded to 0x0C.
+ */
+static const struct cis_tpl cis_tpl_funce_list[] = {
+ { 0x00, 4, cistpl_funce_common },
+ { 0x01, 0, cistpl_funce_func },
+ { 0x04, 1+1+6, /* CISTPL_FUNCE_LAN_NODE_ID */ },
+};
+
+static int cistpl_funce(struct mmc_card *card, struct sdio_func *func,
+ const unsigned char *buf, unsigned size)
+{
+ if (size < 1)
+ return -EINVAL;
+
+ return cis_tpl_parse(card, func, "CISTPL_FUNCE",
+ cis_tpl_funce_list,
+ ARRAY_SIZE(cis_tpl_funce_list),
+ buf[0], buf, size);
+}
+
+/* Known TPL_CODEs table for CIS tuples */
+static const struct cis_tpl cis_tpl_list[] = {
+ { 0x15, 3, cistpl_vers_1 },
+ { 0x20, 4, cistpl_manfid },
+ { 0x21, 2, /* cistpl_funcid */ },
+ { 0x22, 0, cistpl_funce },
+};
+
+static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
+{
+ int ret;
+ struct sdio_func_tuple *this, **prev;
+ unsigned i, ptr = 0;
+
+ /*
+ * Note that this works for the common CIS (function number 0) as
+ * well as a function's CIS * since SDIO_CCCR_CIS and SDIO_FBR_CIS
+ * have the same offset.
+ */
+ for (i = 0; i < 3; i++) {
+ unsigned char x, fn;
+
+ if (func)
+ fn = func->num;
+ else
+ fn = 0;
+
+ ret = mmc_io_rw_direct(card, 0, 0,
+ SDIO_FBR_BASE(fn) + SDIO_FBR_CIS + i, 0, &x);
+ if (ret)
+ return ret;
+ ptr |= x << (i * 8);
+ }
+
+ if (func)
+ prev = &func->tuples;
+ else
+ prev = &card->tuples;
+
+ BUG_ON(*prev);
+
+ do {
+ unsigned char tpl_code, tpl_link;
+
+ ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
+ if (ret)
+ break;
+
+ /* 0xff means we're done */
+ if (tpl_code == 0xff)
+ break;
+
+ /* null entries have no link field or data */
+ if (tpl_code == 0x00)
+ continue;
+
+ ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_link);
+ if (ret)
+ break;
+
+ /* a size of 0xff also means we're done */
+ if (tpl_link == 0xff)
+ break;
+
+ this = kmalloc(sizeof(*this) + tpl_link, GFP_KERNEL);
+ if (!this)
+ return -ENOMEM;
+
+ for (i = 0; i < tpl_link; i++) {
+ ret = mmc_io_rw_direct(card, 0, 0,
+ ptr + i, 0, &this->data[i]);
+ if (ret)
+ break;
+ }
+ if (ret) {
+ kfree(this);
+ break;
+ }
+
+ /* Try to parse the CIS tuple */
+ ret = cis_tpl_parse(card, func, "CIS",
+ cis_tpl_list, ARRAY_SIZE(cis_tpl_list),
+ tpl_code, this->data, tpl_link);
+ if (ret == -EILSEQ || ret == -ENOENT) {
+ /*
+ * The tuple is unknown or known but not parsed.
+ * Queue the tuple for the function driver.
+ */
+ this->next = NULL;
+ this->code = tpl_code;
+ this->size = tpl_link;
+ *prev = this;
+ prev = &this->next;
+
+ if (ret == -ENOENT) {
+ /* warn about unknown tuples */
+ pr_warn_ratelimited("%s: queuing unknown"
+ " CIS tuple 0x%02x (%u bytes)\n",
+ mmc_hostname(card->host),
+ tpl_code, tpl_link);
+ }
+
+ /* keep on analyzing tuples */
+ ret = 0;
+ } else {
+ /*
+ * We don't need the tuple anymore if it was
+ * successfully parsed by the SDIO core or if it is
+ * not going to be queued for a driver.
+ */
+ kfree(this);
+ }
+
+ ptr += tpl_link;
+ } while (!ret);
+
+ /*
+ * Link in all unknown tuples found in the common CIS so that
+ * drivers don't have to go digging in two places.
+ */
+ if (func)
+ *prev = card->tuples;
+
+ return ret;
+}
+
+int sdio_read_common_cis(struct mmc_card *card)
+{
+ return sdio_read_cis(card, NULL);
+}
+
+void sdio_free_common_cis(struct mmc_card *card)
+{
+ struct sdio_func_tuple *tuple, *victim;
+
+ tuple = card->tuples;
+
+ while (tuple) {
+ victim = tuple;
+ tuple = tuple->next;
+ kfree(victim);
+ }
+
+ card->tuples = NULL;
+}
+
+int sdio_read_func_cis(struct sdio_func *func)
+{
+ int ret;
+
+ ret = sdio_read_cis(func->card, func);
+ if (ret)
+ return ret;
+
+ /*
+ * Since we've linked to tuples in the card structure,
+ * we must make sure we have a reference to it.
+ */
+ get_device(&func->card->dev);
+
+ /*
+ * Vendor/device id is optional for function CIS, so
+ * copy it from the card structure as needed.
+ */
+ if (func->vendor == 0) {
+ func->vendor = func->card->cis.vendor;
+ func->device = func->card->cis.device;
+ }
+
+ return 0;
+}
+
+void sdio_free_func_cis(struct sdio_func *func)
+{
+ struct sdio_func_tuple *tuple, *victim;
+
+ tuple = func->tuples;
+
+ while (tuple && tuple != func->card->tuples) {
+ victim = tuple;
+ tuple = tuple->next;
+ kfree(victim);
+ }
+
+ func->tuples = NULL;
+
+ /*
+ * We have now removed the link to the tuples in the
+ * card structure, so remove the reference.
+ */
+ put_device(&func->card->dev);
+}
+
diff --git a/kernel/drivers/mmc/core/sdio_cis.h b/kernel/drivers/mmc/core/sdio_cis.h
new file mode 100644
index 000000000..4d903c2e4
--- /dev/null
+++ b/kernel/drivers/mmc/core/sdio_cis.h
@@ -0,0 +1,23 @@
+/*
+ * linux/drivers/mmc/core/sdio_cis.h
+ *
+ * Author: Nicolas Pitre
+ * Created: June 11, 2007
+ * Copyright: MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef _MMC_SDIO_CIS_H
+#define _MMC_SDIO_CIS_H
+
+int sdio_read_common_cis(struct mmc_card *card);
+void sdio_free_common_cis(struct mmc_card *card);
+
+int sdio_read_func_cis(struct sdio_func *func);
+void sdio_free_func_cis(struct sdio_func *func);
+
+#endif
diff --git a/kernel/drivers/mmc/core/sdio_io.c b/kernel/drivers/mmc/core/sdio_io.c
new file mode 100644
index 000000000..78cb4d5d9
--- /dev/null
+++ b/kernel/drivers/mmc/core/sdio_io.c
@@ -0,0 +1,722 @@
+/*
+ * linux/drivers/mmc/core/sdio_io.c
+ *
+ * Copyright 2007-2008 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/export.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+
+#include "sdio_ops.h"
+
+/**
+ * sdio_claim_host - exclusively claim a bus for a certain SDIO function
+ * @func: SDIO function that will be accessed
+ *
+ * Claim a bus for a set of operations. The SDIO function given
+ * is used to figure out which bus is relevant.
+ */
+void sdio_claim_host(struct sdio_func *func)
+{
+ BUG_ON(!func);
+ BUG_ON(!func->card);
+
+ mmc_claim_host(func->card->host);
+}
+EXPORT_SYMBOL_GPL(sdio_claim_host);
+
+/**
+ * sdio_release_host - release a bus for a certain SDIO function
+ * @func: SDIO function that was accessed
+ *
+ * Release a bus, allowing others to claim the bus for their
+ * operations.
+ */
+void sdio_release_host(struct sdio_func *func)
+{
+ BUG_ON(!func);
+ BUG_ON(!func->card);
+
+ mmc_release_host(func->card->host);
+}
+EXPORT_SYMBOL_GPL(sdio_release_host);
+
+/**
+ * sdio_enable_func - enables a SDIO function for usage
+ * @func: SDIO function to enable
+ *
+ * Powers up and activates a SDIO function so that register
+ * access is possible.
+ */
+int sdio_enable_func(struct sdio_func *func)
+{
+ int ret;
+ unsigned char reg;
+ unsigned long timeout;
+
+ BUG_ON(!func);
+ BUG_ON(!func->card);
+
+ pr_debug("SDIO: Enabling device %s...\n", sdio_func_id(func));
+
+ ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IOEx, 0, &reg);
+ if (ret)
+ goto err;
+
+ reg |= 1 << func->num;
+
+ ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IOEx, reg, NULL);
+ if (ret)
+ goto err;
+
+ timeout = jiffies + msecs_to_jiffies(func->enable_timeout);
+
+ while (1) {
+ ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IORx, 0, &reg);
+ if (ret)
+ goto err;
+ if (reg & (1 << func->num))
+ break;
+ ret = -ETIME;
+ if (time_after(jiffies, timeout))
+ goto err;
+ }
+
+ pr_debug("SDIO: Enabled device %s\n", sdio_func_id(func));
+
+ return 0;
+
+err:
+ pr_debug("SDIO: Failed to enable device %s\n", sdio_func_id(func));
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sdio_enable_func);
+
+/**
+ * sdio_disable_func - disable a SDIO function
+ * @func: SDIO function to disable
+ *
+ * Powers down and deactivates a SDIO function. Register access
+ * to this function will fail until the function is reenabled.
+ */
+int sdio_disable_func(struct sdio_func *func)
+{
+ int ret;
+ unsigned char reg;
+
+ BUG_ON(!func);
+ BUG_ON(!func->card);
+
+ pr_debug("SDIO: Disabling device %s...\n", sdio_func_id(func));
+
+ ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IOEx, 0, &reg);
+ if (ret)
+ goto err;
+
+ reg &= ~(1 << func->num);
+
+ ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IOEx, reg, NULL);
+ if (ret)
+ goto err;
+
+ pr_debug("SDIO: Disabled device %s\n", sdio_func_id(func));
+
+ return 0;
+
+err:
+ pr_debug("SDIO: Failed to disable device %s\n", sdio_func_id(func));
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(sdio_disable_func);
+
+/**
+ * sdio_set_block_size - set the block size of an SDIO function
+ * @func: SDIO function to change
+ * @blksz: new block size or 0 to use the default.
+ *
+ * The default block size is the largest supported by both the function
+ * and the host, with a maximum of 512 to ensure that arbitrarily sized
+ * data transfer use the optimal (least) number of commands.
+ *
+ * A driver may call this to override the default block size set by the
+ * core. This can be used to set a block size greater than the maximum
+ * that reported by the card; it is the driver's responsibility to ensure
+ * it uses a value that the card supports.
+ *
+ * Returns 0 on success, -EINVAL if the host does not support the
+ * requested block size, or -EIO (etc.) if one of the resultant FBR block
+ * size register writes failed.
+ *
+ */
+int sdio_set_block_size(struct sdio_func *func, unsigned blksz)
+{
+ int ret;
+
+ if (blksz > func->card->host->max_blk_size)
+ return -EINVAL;
+
+ if (blksz == 0) {
+ blksz = min(func->max_blksize, func->card->host->max_blk_size);
+ blksz = min(blksz, 512u);
+ }
+
+ ret = mmc_io_rw_direct(func->card, 1, 0,
+ SDIO_FBR_BASE(func->num) + SDIO_FBR_BLKSIZE,
+ blksz & 0xff, NULL);
+ if (ret)
+ return ret;
+ ret = mmc_io_rw_direct(func->card, 1, 0,
+ SDIO_FBR_BASE(func->num) + SDIO_FBR_BLKSIZE + 1,
+ (blksz >> 8) & 0xff, NULL);
+ if (ret)
+ return ret;
+ func->cur_blksize = blksz;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdio_set_block_size);
+
+/*
+ * Calculate the maximum byte mode transfer size
+ */
+static inline unsigned int sdio_max_byte_size(struct sdio_func *func)
+{
+ unsigned mval = func->card->host->max_blk_size;
+
+ if (mmc_blksz_for_byte_mode(func->card))
+ mval = min(mval, func->cur_blksize);
+ else
+ mval = min(mval, func->max_blksize);
+
+ if (mmc_card_broken_byte_mode_512(func->card))
+ return min(mval, 511u);
+
+ return min(mval, 512u); /* maximum size for byte mode */
+}
+
+/**
+ * sdio_align_size - pads a transfer size to a more optimal value
+ * @func: SDIO function
+ * @sz: original transfer size
+ *
+ * Pads the original data size with a number of extra bytes in
+ * order to avoid controller bugs and/or performance hits
+ * (e.g. some controllers revert to PIO for certain sizes).
+ *
+ * If possible, it will also adjust the size so that it can be
+ * handled in just a single request.
+ *
+ * Returns the improved size, which might be unmodified.
+ */
+unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
+{
+ unsigned int orig_sz;
+ unsigned int blk_sz, byte_sz;
+ unsigned chunk_sz;
+
+ orig_sz = sz;
+
+ /*
+ * Do a first check with the controller, in case it
+ * wants to increase the size up to a point where it
+ * might need more than one block.
+ */
+ sz = mmc_align_data_size(func->card, sz);
+
+ /*
+ * If we can still do this with just a byte transfer, then
+ * we're done.
+ */
+ if (sz <= sdio_max_byte_size(func))
+ return sz;
+
+ if (func->card->cccr.multi_block) {
+ /*
+ * Check if the transfer is already block aligned
+ */
+ if ((sz % func->cur_blksize) == 0)
+ return sz;
+
+ /*
+ * Realign it so that it can be done with one request,
+ * and recheck if the controller still likes it.
+ */
+ blk_sz = ((sz + func->cur_blksize - 1) /
+ func->cur_blksize) * func->cur_blksize;
+ blk_sz = mmc_align_data_size(func->card, blk_sz);
+
+ /*
+ * This value is only good if it is still just
+ * one request.
+ */
+ if ((blk_sz % func->cur_blksize) == 0)
+ return blk_sz;
+
+ /*
+ * We failed to do one request, but at least try to
+ * pad the remainder properly.
+ */
+ byte_sz = mmc_align_data_size(func->card,
+ sz % func->cur_blksize);
+ if (byte_sz <= sdio_max_byte_size(func)) {
+ blk_sz = sz / func->cur_blksize;
+ return blk_sz * func->cur_blksize + byte_sz;
+ }
+ } else {
+ /*
+ * We need multiple requests, so first check that the
+ * controller can handle the chunk size;
+ */
+ chunk_sz = mmc_align_data_size(func->card,
+ sdio_max_byte_size(func));
+ if (chunk_sz == sdio_max_byte_size(func)) {
+ /*
+ * Fix up the size of the remainder (if any)
+ */
+ byte_sz = orig_sz % chunk_sz;
+ if (byte_sz) {
+ byte_sz = mmc_align_data_size(func->card,
+ byte_sz);
+ }
+
+ return (orig_sz / chunk_sz) * chunk_sz + byte_sz;
+ }
+ }
+
+ /*
+ * The controller is simply incapable of transferring the size
+ * we want in decent manner, so just return the original size.
+ */
+ return orig_sz;
+}
+EXPORT_SYMBOL_GPL(sdio_align_size);
+
+/* Split an arbitrarily sized data transfer into several
+ * IO_RW_EXTENDED commands. */
+static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
+ unsigned addr, int incr_addr, u8 *buf, unsigned size)
+{
+ unsigned remainder = size;
+ unsigned max_blocks;
+ int ret;
+
+ /* Do the bulk of the transfer using block mode (if supported). */
+ if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) {
+ /* Blocks per command is limited by host count, host transfer
+ * size and the maximum for IO_RW_EXTENDED of 511 blocks. */
+ max_blocks = min(func->card->host->max_blk_count, 511u);
+
+ while (remainder >= func->cur_blksize) {
+ unsigned blocks;
+
+ blocks = remainder / func->cur_blksize;
+ if (blocks > max_blocks)
+ blocks = max_blocks;
+ size = blocks * func->cur_blksize;
+
+ ret = mmc_io_rw_extended(func->card, write,
+ func->num, addr, incr_addr, buf,
+ blocks, func->cur_blksize);
+ if (ret)
+ return ret;
+
+ remainder -= size;
+ buf += size;
+ if (incr_addr)
+ addr += size;
+ }
+ }
+
+ /* Write the remainder using byte mode. */
+ while (remainder > 0) {
+ size = min(remainder, sdio_max_byte_size(func));
+
+ /* Indicate byte mode by setting "blocks" = 0 */
+ ret = mmc_io_rw_extended(func->card, write, func->num, addr,
+ incr_addr, buf, 0, size);
+ if (ret)
+ return ret;
+
+ remainder -= size;
+ buf += size;
+ if (incr_addr)
+ addr += size;
+ }
+ return 0;
+}
+
+/**
+ * sdio_readb - read a single byte from a SDIO function
+ * @func: SDIO function to access
+ * @addr: address to read
+ * @err_ret: optional status value from transfer
+ *
+ * Reads a single byte from the address space of a given SDIO
+ * function. If there is a problem reading the address, 0xff
+ * is returned and @err_ret will contain the error code.
+ */
+u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
+{
+ int ret;
+ u8 val;
+
+ BUG_ON(!func);
+
+ if (err_ret)
+ *err_ret = 0;
+
+ ret = mmc_io_rw_direct(func->card, 0, func->num, addr, 0, &val);
+ if (ret) {
+ if (err_ret)
+ *err_ret = ret;
+ return 0xFF;
+ }
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(sdio_readb);
+
+/**
+ * sdio_writeb - write a single byte to a SDIO function
+ * @func: SDIO function to access
+ * @b: byte to write
+ * @addr: address to write to
+ * @err_ret: optional status value from transfer
+ *
+ * Writes a single byte to the address space of a given SDIO
+ * function. @err_ret will contain the status of the actual
+ * transfer.
+ */
+void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret)
+{
+ int ret;
+
+ BUG_ON(!func);
+
+ ret = mmc_io_rw_direct(func->card, 1, func->num, addr, b, NULL);
+ if (err_ret)
+ *err_ret = ret;
+}
+EXPORT_SYMBOL_GPL(sdio_writeb);
+
+/**
+ * sdio_writeb_readb - write and read a byte from SDIO function
+ * @func: SDIO function to access
+ * @write_byte: byte to write
+ * @addr: address to write to
+ * @err_ret: optional status value from transfer
+ *
+ * Performs a RAW (Read after Write) operation as defined by SDIO spec -
+ * single byte is written to address space of a given SDIO function and
+ * response is read back from the same address, both using single request.
+ * If there is a problem with the operation, 0xff is returned and
+ * @err_ret will contain the error code.
+ */
+u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
+ unsigned int addr, int *err_ret)
+{
+ int ret;
+ u8 val;
+
+ ret = mmc_io_rw_direct(func->card, 1, func->num, addr,
+ write_byte, &val);
+ if (err_ret)
+ *err_ret = ret;
+ if (ret)
+ val = 0xff;
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(sdio_writeb_readb);
+
+/**
+ * sdio_memcpy_fromio - read a chunk of memory from a SDIO function
+ * @func: SDIO function to access
+ * @dst: buffer to store the data
+ * @addr: address to begin reading from
+ * @count: number of bytes to read
+ *
+ * Reads from the address space of a given SDIO function. Return
+ * value indicates if the transfer succeeded or not.
+ */
+int sdio_memcpy_fromio(struct sdio_func *func, void *dst,
+ unsigned int addr, int count)
+{
+ return sdio_io_rw_ext_helper(func, 0, addr, 1, dst, count);
+}
+EXPORT_SYMBOL_GPL(sdio_memcpy_fromio);
+
+/**
+ * sdio_memcpy_toio - write a chunk of memory to a SDIO function
+ * @func: SDIO function to access
+ * @addr: address to start writing to
+ * @src: buffer that contains the data to write
+ * @count: number of bytes to write
+ *
+ * Writes to the address space of a given SDIO function. Return
+ * value indicates if the transfer succeeded or not.
+ */
+int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr,
+ void *src, int count)
+{
+ return sdio_io_rw_ext_helper(func, 1, addr, 1, src, count);
+}
+EXPORT_SYMBOL_GPL(sdio_memcpy_toio);
+
+/**
+ * sdio_readsb - read from a FIFO on a SDIO function
+ * @func: SDIO function to access
+ * @dst: buffer to store the data
+ * @addr: address of (single byte) FIFO
+ * @count: number of bytes to read
+ *
+ * Reads from the specified FIFO of a given SDIO function. Return
+ * value indicates if the transfer succeeded or not.
+ */
+int sdio_readsb(struct sdio_func *func, void *dst, unsigned int addr,
+ int count)
+{
+ return sdio_io_rw_ext_helper(func, 0, addr, 0, dst, count);
+}
+EXPORT_SYMBOL_GPL(sdio_readsb);
+
+/**
+ * sdio_writesb - write to a FIFO of a SDIO function
+ * @func: SDIO function to access
+ * @addr: address of (single byte) FIFO
+ * @src: buffer that contains the data to write
+ * @count: number of bytes to write
+ *
+ * Writes to the specified FIFO of a given SDIO function. Return
+ * value indicates if the transfer succeeded or not.
+ */
+int sdio_writesb(struct sdio_func *func, unsigned int addr, void *src,
+ int count)
+{
+ return sdio_io_rw_ext_helper(func, 1, addr, 0, src, count);
+}
+EXPORT_SYMBOL_GPL(sdio_writesb);
+
+/**
+ * sdio_readw - read a 16 bit integer from a SDIO function
+ * @func: SDIO function to access
+ * @addr: address to read
+ * @err_ret: optional status value from transfer
+ *
+ * Reads a 16 bit integer from the address space of a given SDIO
+ * function. If there is a problem reading the address, 0xffff
+ * is returned and @err_ret will contain the error code.
+ */
+u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret)
+{
+ int ret;
+
+ if (err_ret)
+ *err_ret = 0;
+
+ ret = sdio_memcpy_fromio(func, func->tmpbuf, addr, 2);
+ if (ret) {
+ if (err_ret)
+ *err_ret = ret;
+ return 0xFFFF;
+ }
+
+ return le16_to_cpup((__le16 *)func->tmpbuf);
+}
+EXPORT_SYMBOL_GPL(sdio_readw);
+
+/**
+ * sdio_writew - write a 16 bit integer to a SDIO function
+ * @func: SDIO function to access
+ * @b: integer to write
+ * @addr: address to write to
+ * @err_ret: optional status value from transfer
+ *
+ * Writes a 16 bit integer to the address space of a given SDIO
+ * function. @err_ret will contain the status of the actual
+ * transfer.
+ */
+void sdio_writew(struct sdio_func *func, u16 b, unsigned int addr, int *err_ret)
+{
+ int ret;
+
+ *(__le16 *)func->tmpbuf = cpu_to_le16(b);
+
+ ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 2);
+ if (err_ret)
+ *err_ret = ret;
+}
+EXPORT_SYMBOL_GPL(sdio_writew);
+
+/**
+ * sdio_readl - read a 32 bit integer from a SDIO function
+ * @func: SDIO function to access
+ * @addr: address to read
+ * @err_ret: optional status value from transfer
+ *
+ * Reads a 32 bit integer from the address space of a given SDIO
+ * function. If there is a problem reading the address,
+ * 0xffffffff is returned and @err_ret will contain the error
+ * code.
+ */
+u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret)
+{
+ int ret;
+
+ if (err_ret)
+ *err_ret = 0;
+
+ ret = sdio_memcpy_fromio(func, func->tmpbuf, addr, 4);
+ if (ret) {
+ if (err_ret)
+ *err_ret = ret;
+ return 0xFFFFFFFF;
+ }
+
+ return le32_to_cpup((__le32 *)func->tmpbuf);
+}
+EXPORT_SYMBOL_GPL(sdio_readl);
+
+/**
+ * sdio_writel - write a 32 bit integer to a SDIO function
+ * @func: SDIO function to access
+ * @b: integer to write
+ * @addr: address to write to
+ * @err_ret: optional status value from transfer
+ *
+ * Writes a 32 bit integer to the address space of a given SDIO
+ * function. @err_ret will contain the status of the actual
+ * transfer.
+ */
+void sdio_writel(struct sdio_func *func, u32 b, unsigned int addr, int *err_ret)
+{
+ int ret;
+
+ *(__le32 *)func->tmpbuf = cpu_to_le32(b);
+
+ ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 4);
+ if (err_ret)
+ *err_ret = ret;
+}
+EXPORT_SYMBOL_GPL(sdio_writel);
+
+/**
+ * sdio_f0_readb - read a single byte from SDIO function 0
+ * @func: an SDIO function of the card
+ * @addr: address to read
+ * @err_ret: optional status value from transfer
+ *
+ * Reads a single byte from the address space of SDIO function 0.
+ * If there is a problem reading the address, 0xff is returned
+ * and @err_ret will contain the error code.
+ */
+unsigned char sdio_f0_readb(struct sdio_func *func, unsigned int addr,
+ int *err_ret)
+{
+ int ret;
+ unsigned char val;
+
+ BUG_ON(!func);
+
+ if (err_ret)
+ *err_ret = 0;
+
+ ret = mmc_io_rw_direct(func->card, 0, 0, addr, 0, &val);
+ if (ret) {
+ if (err_ret)
+ *err_ret = ret;
+ return 0xFF;
+ }
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(sdio_f0_readb);
+
+/**
+ * sdio_f0_writeb - write a single byte to SDIO function 0
+ * @func: an SDIO function of the card
+ * @b: byte to write
+ * @addr: address to write to
+ * @err_ret: optional status value from transfer
+ *
+ * Writes a single byte to the address space of SDIO function 0.
+ * @err_ret will contain the status of the actual transfer.
+ *
+ * Only writes to the vendor specific CCCR registers (0xF0 -
+ * 0xFF) are permiited; @err_ret will be set to -EINVAL for *
+ * writes outside this range.
+ */
+void sdio_f0_writeb(struct sdio_func *func, unsigned char b, unsigned int addr,
+ int *err_ret)
+{
+ int ret;
+
+ BUG_ON(!func);
+
+ if ((addr < 0xF0 || addr > 0xFF) && (!mmc_card_lenient_fn0(func->card))) {
+ if (err_ret)
+ *err_ret = -EINVAL;
+ return;
+ }
+
+ ret = mmc_io_rw_direct(func->card, 1, 0, addr, b, NULL);
+ if (err_ret)
+ *err_ret = ret;
+}
+EXPORT_SYMBOL_GPL(sdio_f0_writeb);
+
+/**
+ * sdio_get_host_pm_caps - get host power management capabilities
+ * @func: SDIO function attached to host
+ *
+ * Returns a capability bitmask corresponding to power management
+ * features supported by the host controller that the card function
+ * might rely upon during a system suspend. The host doesn't need
+ * to be claimed, nor the function active, for this information to be
+ * obtained.
+ */
+mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func)
+{
+ BUG_ON(!func);
+ BUG_ON(!func->card);
+
+ return func->card->host->pm_caps;
+}
+EXPORT_SYMBOL_GPL(sdio_get_host_pm_caps);
+
+/**
+ * sdio_set_host_pm_flags - set wanted host power management capabilities
+ * @func: SDIO function attached to host
+ *
+ * Set a capability bitmask corresponding to wanted host controller
+ * power management features for the upcoming suspend state.
+ * This must be called, if needed, each time the suspend method of
+ * the function driver is called, and must contain only bits that
+ * were returned by sdio_get_host_pm_caps().
+ * The host doesn't need to be claimed, nor the function active,
+ * for this information to be set.
+ */
+int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags)
+{
+ struct mmc_host *host;
+
+ BUG_ON(!func);
+ BUG_ON(!func->card);
+
+ host = func->card->host;
+
+ if (flags & ~host->pm_caps)
+ return -EINVAL;
+
+ /* function suspend methods are serialized, hence no lock needed */
+ host->pm_flags |= flags;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdio_set_host_pm_flags);
diff --git a/kernel/drivers/mmc/core/sdio_irq.c b/kernel/drivers/mmc/core/sdio_irq.c
new file mode 100644
index 000000000..09cc67d02
--- /dev/null
+++ b/kernel/drivers/mmc/core/sdio_irq.c
@@ -0,0 +1,345 @@
+/*
+ * linux/drivers/mmc/core/sdio_irq.c
+ *
+ * Author: Nicolas Pitre
+ * Created: June 18, 2007
+ * Copyright: MontaVista Software Inc.
+ *
+ * Copyright 2008 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/export.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+
+#include "sdio_ops.h"
+
+static int process_sdio_pending_irqs(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ int i, ret, count;
+ unsigned char pending;
+ struct sdio_func *func;
+
+ /*
+ * Optimization, if there is only 1 function interrupt registered
+ * and we know an IRQ was signaled then call irq handler directly.
+ * Otherwise do the full probe.
+ */
+ func = card->sdio_single_irq;
+ if (func && host->sdio_irq_pending) {
+ func->irq_handler(func);
+ return 1;
+ }
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending);
+ if (ret) {
+ pr_debug("%s: error %d reading SDIO_CCCR_INTx\n",
+ mmc_card_id(card), ret);
+ return ret;
+ }
+
+ if (pending && mmc_card_broken_irq_polling(card) &&
+ !(host->caps & MMC_CAP_SDIO_IRQ)) {
+ unsigned char dummy;
+
+ /* A fake interrupt could be created when we poll SDIO_CCCR_INTx
+ * register with a Marvell SD8797 card. A dummy CMD52 read to
+ * function 0 register 0xff can avoid this.
+ */
+ mmc_io_rw_direct(card, 0, 0, 0xff, 0, &dummy);
+ }
+
+ count = 0;
+ for (i = 1; i <= 7; i++) {
+ if (pending & (1 << i)) {
+ func = card->sdio_func[i - 1];
+ if (!func) {
+ pr_warn("%s: pending IRQ for non-existent function\n",
+ mmc_card_id(card));
+ ret = -EINVAL;
+ } else if (func->irq_handler) {
+ func->irq_handler(func);
+ count++;
+ } else {
+ pr_warn("%s: pending IRQ with no handler\n",
+ sdio_func_id(func));
+ ret = -EINVAL;
+ }
+ }
+ }
+
+ if (count)
+ return count;
+
+ return ret;
+}
+
+void sdio_run_irqs(struct mmc_host *host)
+{
+ mmc_claim_host(host);
+ host->sdio_irq_pending = true;
+ process_sdio_pending_irqs(host);
+ mmc_release_host(host);
+}
+EXPORT_SYMBOL_GPL(sdio_run_irqs);
+
+static int sdio_irq_thread(void *_host)
+{
+ struct mmc_host *host = _host;
+ struct sched_param param = { .sched_priority = 1 };
+ unsigned long period, idle_period;
+ int ret;
+
+ sched_setscheduler(current, SCHED_FIFO, &param);
+
+ /*
+ * We want to allow for SDIO cards to work even on non SDIO
+ * aware hosts. One thing that non SDIO host cannot do is
+ * asynchronous notification of pending SDIO card interrupts
+ * hence we poll for them in that case.
+ */
+ idle_period = msecs_to_jiffies(10);
+ period = (host->caps & MMC_CAP_SDIO_IRQ) ?
+ MAX_SCHEDULE_TIMEOUT : idle_period;
+
+ pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
+ mmc_hostname(host), period);
+
+ do {
+ /*
+ * We claim the host here on drivers behalf for a couple
+ * reasons:
+ *
+ * 1) it is already needed to retrieve the CCCR_INTx;
+ * 2) we want the driver(s) to clear the IRQ condition ASAP;
+ * 3) we need to control the abort condition locally.
+ *
+ * Just like traditional hard IRQ handlers, we expect SDIO
+ * IRQ handlers to be quick and to the point, so that the
+ * holding of the host lock does not cover too much work
+ * that doesn't require that lock to be held.
+ */
+ ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
+ if (ret)
+ break;
+ ret = process_sdio_pending_irqs(host);
+ host->sdio_irq_pending = false;
+ mmc_release_host(host);
+
+ /*
+ * Give other threads a chance to run in the presence of
+ * errors.
+ */
+ if (ret < 0) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!kthread_should_stop())
+ schedule_timeout(HZ);
+ set_current_state(TASK_RUNNING);
+ }
+
+ /*
+ * Adaptive polling frequency based on the assumption
+ * that an interrupt will be closely followed by more.
+ * This has a substantial benefit for network devices.
+ */
+ if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
+ if (ret > 0)
+ period /= 2;
+ else {
+ period++;
+ if (period > idle_period)
+ period = idle_period;
+ }
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
+ host->ops->enable_sdio_irq(host, 1);
+ mmc_host_clk_release(host);
+ }
+ if (!kthread_should_stop())
+ schedule_timeout(period);
+ set_current_state(TASK_RUNNING);
+ } while (!kthread_should_stop());
+
+ if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
+ host->ops->enable_sdio_irq(host, 0);
+ mmc_host_clk_release(host);
+ }
+
+ pr_debug("%s: IRQ thread exiting with code %d\n",
+ mmc_hostname(host), ret);
+
+ return ret;
+}
+
+static int sdio_card_irq_get(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+
+ WARN_ON(!host->claimed);
+
+ if (!host->sdio_irqs++) {
+ if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
+ atomic_set(&host->sdio_irq_thread_abort, 0);
+ host->sdio_irq_thread =
+ kthread_run(sdio_irq_thread, host,
+ "ksdioirqd/%s", mmc_hostname(host));
+ if (IS_ERR(host->sdio_irq_thread)) {
+ int err = PTR_ERR(host->sdio_irq_thread);
+ host->sdio_irqs--;
+ return err;
+ }
+ } else if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
+ host->ops->enable_sdio_irq(host, 1);
+ mmc_host_clk_release(host);
+ }
+ }
+
+ return 0;
+}
+
+static int sdio_card_irq_put(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+
+ WARN_ON(!host->claimed);
+ BUG_ON(host->sdio_irqs < 1);
+
+ if (!--host->sdio_irqs) {
+ if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
+ atomic_set(&host->sdio_irq_thread_abort, 1);
+ kthread_stop(host->sdio_irq_thread);
+ } else if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
+ host->ops->enable_sdio_irq(host, 0);
+ mmc_host_clk_release(host);
+ }
+ }
+
+ return 0;
+}
+
+/* If there is only 1 function registered set sdio_single_irq */
+static void sdio_single_irq_set(struct mmc_card *card)
+{
+ struct sdio_func *func;
+ int i;
+
+ card->sdio_single_irq = NULL;
+ if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
+ card->host->sdio_irqs == 1)
+ for (i = 0; i < card->sdio_funcs; i++) {
+ func = card->sdio_func[i];
+ if (func && func->irq_handler) {
+ card->sdio_single_irq = func;
+ break;
+ }
+ }
+}
+
+/**
+ * sdio_claim_irq - claim the IRQ for a SDIO function
+ * @func: SDIO function
+ * @handler: IRQ handler callback
+ *
+ * Claim and activate the IRQ for the given SDIO function. The provided
+ * handler will be called when that IRQ is asserted. The host is always
+ * claimed already when the handler is called so the handler must not
+ * call sdio_claim_host() nor sdio_release_host().
+ */
+int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
+{
+ int ret;
+ unsigned char reg;
+
+ BUG_ON(!func);
+ BUG_ON(!func->card);
+
+ pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));
+
+ if (func->irq_handler) {
+ pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func));
+ return -EBUSY;
+ }
+
+ ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
+ if (ret)
+ return ret;
+
+ reg |= 1 << func->num;
+
+ reg |= 1; /* Master interrupt enable */
+
+ ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
+ if (ret)
+ return ret;
+
+ func->irq_handler = handler;
+ ret = sdio_card_irq_get(func->card);
+ if (ret)
+ func->irq_handler = NULL;
+ sdio_single_irq_set(func->card);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sdio_claim_irq);
+
+/**
+ * sdio_release_irq - release the IRQ for a SDIO function
+ * @func: SDIO function
+ *
+ * Disable and release the IRQ for the given SDIO function.
+ */
+int sdio_release_irq(struct sdio_func *func)
+{
+ int ret;
+ unsigned char reg;
+
+ BUG_ON(!func);
+ BUG_ON(!func->card);
+
+ pr_debug("SDIO: Disabling IRQ for %s...\n", sdio_func_id(func));
+
+ if (func->irq_handler) {
+ func->irq_handler = NULL;
+ sdio_card_irq_put(func->card);
+ sdio_single_irq_set(func->card);
+ }
+
+ ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
+ if (ret)
+ return ret;
+
+ reg &= ~(1 << func->num);
+
+ /* Disable master interrupt with the last function interrupt */
+ if (!(reg & 0xFE))
+ reg = 0;
+
+ ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdio_release_irq);
+
diff --git a/kernel/drivers/mmc/core/sdio_ops.c b/kernel/drivers/mmc/core/sdio_ops.c
new file mode 100644
index 000000000..62508b457
--- /dev/null
+++ b/kernel/drivers/mmc/core/sdio_ops.c
@@ -0,0 +1,223 @@
+/*
+ * linux/drivers/mmc/sdio_ops.c
+ *
+ * Copyright 2006-2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/scatterlist.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+
+#include "core.h"
+#include "sdio_ops.h"
+
+int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
+{
+ struct mmc_command cmd = {0};
+ int i, err = 0;
+
+ BUG_ON(!host);
+
+ cmd.opcode = SD_IO_SEND_OP_COND;
+ cmd.arg = ocr;
+ cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR;
+
+ for (i = 100; i; i--) {
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ break;
+
+ /* if we're just probing, do a single pass */
+ if (ocr == 0)
+ break;
+
+ /* otherwise wait until reset completes */
+ if (mmc_host_is_spi(host)) {
+ /*
+ * Both R1_SPI_IDLE and MMC_CARD_BUSY indicate
+ * an initialized card under SPI, but some cards
+ * (Marvell's) only behave when looking at this
+ * one.
+ */
+ if (cmd.resp[1] & MMC_CARD_BUSY)
+ break;
+ } else {
+ if (cmd.resp[0] & MMC_CARD_BUSY)
+ break;
+ }
+
+ err = -ETIMEDOUT;
+
+ mmc_delay(10);
+ }
+
+ if (rocr)
+ *rocr = cmd.resp[mmc_host_is_spi(host) ? 1 : 0];
+
+ return err;
+}
+
+static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
+ unsigned addr, u8 in, u8 *out)
+{
+ struct mmc_command cmd = {0};
+ int err;
+
+ BUG_ON(!host);
+ BUG_ON(fn > 7);
+
+ /* sanity check */
+ if (addr & ~0x1FFFF)
+ return -EINVAL;
+
+ cmd.opcode = SD_IO_RW_DIRECT;
+ cmd.arg = write ? 0x80000000 : 0x00000000;
+ cmd.arg |= fn << 28;
+ cmd.arg |= (write && out) ? 0x08000000 : 0x00000000;
+ cmd.arg |= addr << 9;
+ cmd.arg |= in;
+ cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err)
+ return err;
+
+ if (mmc_host_is_spi(host)) {
+ /* host driver already reported errors */
+ } else {
+ if (cmd.resp[0] & R5_ERROR)
+ return -EIO;
+ if (cmd.resp[0] & R5_FUNCTION_NUMBER)
+ return -EINVAL;
+ if (cmd.resp[0] & R5_OUT_OF_RANGE)
+ return -ERANGE;
+ }
+
+ if (out) {
+ if (mmc_host_is_spi(host))
+ *out = (cmd.resp[0] >> 8) & 0xFF;
+ else
+ *out = cmd.resp[0] & 0xFF;
+ }
+
+ return 0;
+}
+
+int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
+ unsigned addr, u8 in, u8 *out)
+{
+ BUG_ON(!card);
+ return mmc_io_rw_direct_host(card->host, write, fn, addr, in, out);
+}
+
+int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
+ unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz)
+{
+ struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ struct scatterlist sg, *sg_ptr;
+ struct sg_table sgtable;
+ unsigned int nents, left_size, i;
+ unsigned int seg_size = card->host->max_seg_size;
+
+ BUG_ON(!card);
+ BUG_ON(fn > 7);
+ WARN_ON(blksz == 0);
+
+ /* sanity check */
+ if (addr & ~0x1FFFF)
+ return -EINVAL;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ cmd.opcode = SD_IO_RW_EXTENDED;
+ cmd.arg = write ? 0x80000000 : 0x00000000;
+ cmd.arg |= fn << 28;
+ cmd.arg |= incr_addr ? 0x04000000 : 0x00000000;
+ cmd.arg |= addr << 9;
+ if (blocks == 0)
+ cmd.arg |= (blksz == 512) ? 0 : blksz; /* byte mode */
+ else
+ cmd.arg |= 0x08000000 | blocks; /* block mode */
+ cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+
+ data.blksz = blksz;
+ /* Code in host drivers/fwk assumes that "blocks" always is >=1 */
+ data.blocks = blocks ? blocks : 1;
+ data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+
+ left_size = data.blksz * data.blocks;
+ nents = (left_size - 1) / seg_size + 1;
+ if (nents > 1) {
+ if (sg_alloc_table(&sgtable, nents, GFP_KERNEL))
+ return -ENOMEM;
+
+ data.sg = sgtable.sgl;
+ data.sg_len = nents;
+
+ for_each_sg(data.sg, sg_ptr, data.sg_len, i) {
+ sg_set_page(sg_ptr, virt_to_page(buf + (i * seg_size)),
+ min(seg_size, left_size),
+ offset_in_page(buf + (i * seg_size)));
+ left_size = left_size - seg_size;
+ }
+ } else {
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, buf, left_size);
+ }
+
+ mmc_set_data_timeout(&data, card);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (nents > 1)
+ sg_free_table(&sgtable);
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ if (mmc_host_is_spi(card->host)) {
+ /* host driver already reported errors */
+ } else {
+ if (cmd.resp[0] & R5_ERROR)
+ return -EIO;
+ if (cmd.resp[0] & R5_FUNCTION_NUMBER)
+ return -EINVAL;
+ if (cmd.resp[0] & R5_OUT_OF_RANGE)
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+int sdio_reset(struct mmc_host *host)
+{
+ int ret;
+ u8 abort;
+
+ /* SDIO Simplified Specification V2.0, 4.4 Reset for SDIO */
+
+ ret = mmc_io_rw_direct_host(host, 0, 0, SDIO_CCCR_ABORT, 0, &abort);
+ if (ret)
+ abort = 0x08;
+ else
+ abort |= 0x08;
+
+ ret = mmc_io_rw_direct_host(host, 1, 0, SDIO_CCCR_ABORT, abort, NULL);
+ return ret;
+}
+
diff --git a/kernel/drivers/mmc/core/sdio_ops.h b/kernel/drivers/mmc/core/sdio_ops.h
new file mode 100644
index 000000000..12a4d3ab1
--- /dev/null
+++ b/kernel/drivers/mmc/core/sdio_ops.h
@@ -0,0 +1,23 @@
+/*
+ * linux/drivers/mmc/sdio_ops.c
+ *
+ * Copyright 2006-2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef _MMC_SDIO_OPS_H
+#define _MMC_SDIO_OPS_H
+
+int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
+int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
+ unsigned addr, u8 in, u8* out);
+int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
+ unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz);
+int sdio_reset(struct mmc_host *host);
+
+#endif
+
diff --git a/kernel/drivers/mmc/core/slot-gpio.c b/kernel/drivers/mmc/core/slot-gpio.c
new file mode 100644
index 000000000..27117ba47
--- /dev/null
+++ b/kernel/drivers/mmc/core/slot-gpio.c
@@ -0,0 +1,305 @@
+/*
+ * Generic GPIO card-detect helper
+ *
+ * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "slot-gpio.h"
+
+struct mmc_gpio {
+ struct gpio_desc *ro_gpio;
+ struct gpio_desc *cd_gpio;
+ bool override_ro_active_level;
+ bool override_cd_active_level;
+ irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
+ char *ro_label;
+ char cd_label[0];
+};
+
+static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
+{
+ /* Schedule a card detection after a debounce timeout */
+ struct mmc_host *host = dev_id;
+
+ host->trigger_card_event = true;
+ mmc_detect_change(host, msecs_to_jiffies(200));
+
+ return IRQ_HANDLED;
+}
+
+int mmc_gpio_alloc(struct mmc_host *host)
+{
+ size_t len = strlen(dev_name(host->parent)) + 4;
+ struct mmc_gpio *ctx = devm_kzalloc(host->parent,
+ sizeof(*ctx) + 2 * len, GFP_KERNEL);
+
+ if (ctx) {
+ ctx->ro_label = ctx->cd_label + len;
+ snprintf(ctx->cd_label, len, "%s cd", dev_name(host->parent));
+ snprintf(ctx->ro_label, len, "%s ro", dev_name(host->parent));
+ host->slot.handler_priv = ctx;
+ host->slot.cd_irq = -EINVAL;
+ }
+
+ return ctx ? 0 : -ENOMEM;
+}
+
+int mmc_gpio_get_ro(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ if (!ctx || !ctx->ro_gpio)
+ return -ENOSYS;
+
+ if (ctx->override_ro_active_level)
+ return !gpiod_get_raw_value_cansleep(ctx->ro_gpio) ^
+ !!(host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
+
+ return gpiod_get_value_cansleep(ctx->ro_gpio);
+}
+EXPORT_SYMBOL(mmc_gpio_get_ro);
+
+int mmc_gpio_get_cd(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ if (!ctx || !ctx->cd_gpio)
+ return -ENOSYS;
+
+ if (ctx->override_cd_active_level)
+ return !gpiod_get_raw_value_cansleep(ctx->cd_gpio) ^
+ !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
+
+ return gpiod_get_value_cansleep(ctx->cd_gpio);
+}
+EXPORT_SYMBOL(mmc_gpio_get_cd);
+
+/**
+ * mmc_gpio_request_ro - request a gpio for write-protection
+ * @host: mmc host
+ * @gpio: gpio number requested
+ *
+ * As devm_* managed functions are used in mmc_gpio_request_ro(), client
+ * drivers do not need to worry about freeing up memory.
+ *
+ * Returns zero on success, else an error.
+ */
+int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+ int ret;
+
+ if (!gpio_is_valid(gpio))
+ return -EINVAL;
+
+ ret = devm_gpio_request_one(host->parent, gpio, GPIOF_DIR_IN,
+ ctx->ro_label);
+ if (ret < 0)
+ return ret;
+
+ ctx->override_ro_active_level = true;
+ ctx->ro_gpio = gpio_to_desc(gpio);
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_gpio_request_ro);
+
+void mmc_gpiod_request_cd_irq(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+ int ret, irq;
+
+ if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio)
+ return;
+
+ irq = gpiod_to_irq(ctx->cd_gpio);
+
+ /*
+ * Even if gpiod_to_irq() returns a valid IRQ number, the platform might
+ * still prefer to poll, e.g., because that IRQ number is already used
+ * by another unit and cannot be shared.
+ */
+ if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL)
+ irq = -EINVAL;
+
+ if (irq >= 0) {
+ if (!ctx->cd_gpio_isr)
+ ctx->cd_gpio_isr = mmc_gpio_cd_irqt;
+ ret = devm_request_threaded_irq(host->parent, irq,
+ NULL, ctx->cd_gpio_isr,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ ctx->cd_label, host);
+ if (ret < 0)
+ irq = ret;
+ }
+
+ host->slot.cd_irq = irq;
+
+ if (irq < 0)
+ host->caps |= MMC_CAP_NEEDS_POLL;
+}
+EXPORT_SYMBOL(mmc_gpiod_request_cd_irq);
+
+/* Register an alternate interrupt service routine for
+ * the card-detect GPIO.
+ */
+void mmc_gpio_set_cd_isr(struct mmc_host *host,
+ irqreturn_t (*isr)(int irq, void *dev_id))
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ WARN_ON(ctx->cd_gpio_isr);
+ ctx->cd_gpio_isr = isr;
+}
+EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
+
+/**
+ * mmc_gpio_request_cd - request a gpio for card-detection
+ * @host: mmc host
+ * @gpio: gpio number requested
+ * @debounce: debounce time in microseconds
+ *
+ * As devm_* managed functions are used in mmc_gpio_request_cd(), client
+ * drivers do not need to worry about freeing up memory.
+ *
+ * If GPIO debouncing is desired, set the debounce parameter to a non-zero
+ * value. The caller is responsible for ensuring that the GPIO driver associated
+ * with the GPIO supports debouncing, otherwise an error will be returned.
+ *
+ * Returns zero on success, else an error.
+ */
+int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
+ unsigned int debounce)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+ int ret;
+
+ ret = devm_gpio_request_one(host->parent, gpio, GPIOF_DIR_IN,
+ ctx->cd_label);
+ if (ret < 0)
+ /*
+ * don't bother freeing memory. It might still get used by other
+ * slot functions, in any case it will be freed, when the device
+ * is destroyed.
+ */
+ return ret;
+
+ if (debounce) {
+ ret = gpio_set_debounce(gpio, debounce);
+ if (ret < 0)
+ return ret;
+ }
+
+ ctx->override_cd_active_level = true;
+ ctx->cd_gpio = gpio_to_desc(gpio);
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_gpio_request_cd);
+
+/**
+ * mmc_gpiod_request_cd - request a gpio descriptor for card-detection
+ * @host: mmc host
+ * @con_id: function within the GPIO consumer
+ * @idx: index of the GPIO to obtain in the consumer
+ * @override_active_level: ignore %GPIO_ACTIVE_LOW flag
+ * @debounce: debounce time in microseconds
+ * @gpio_invert: will return whether the GPIO line is inverted or not, set
+ * to NULL to ignore
+ *
+ * Use this function in place of mmc_gpio_request_cd() to use the GPIO
+ * descriptor API. Note that it must be called prior to mmc_add_host()
+ * otherwise the caller must also call mmc_gpiod_request_cd_irq().
+ *
+ * Returns zero on success, else an error.
+ */
+int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
+ unsigned int idx, bool override_active_level,
+ unsigned int debounce, bool *gpio_invert)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+ struct gpio_desc *desc;
+ int ret;
+
+ if (!con_id)
+ con_id = ctx->cd_label;
+
+ desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ if (debounce) {
+ ret = gpiod_set_debounce(desc, debounce);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (gpio_invert)
+ *gpio_invert = !gpiod_is_active_low(desc);
+
+ ctx->override_cd_active_level = override_active_level;
+ ctx->cd_gpio = desc;
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_gpiod_request_cd);
+
+/**
+ * mmc_gpiod_request_ro - request a gpio descriptor for write protection
+ * @host: mmc host
+ * @con_id: function within the GPIO consumer
+ * @idx: index of the GPIO to obtain in the consumer
+ * @override_active_level: ignore %GPIO_ACTIVE_LOW flag
+ * @debounce: debounce time in microseconds
+ * @gpio_invert: will return whether the GPIO line is inverted or not,
+ * set to NULL to ignore
+ *
+ * Use this function in place of mmc_gpio_request_ro() to use the GPIO
+ * descriptor API.
+ *
+ * Returns zero on success, else an error.
+ */
+int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
+ unsigned int idx, bool override_active_level,
+ unsigned int debounce, bool *gpio_invert)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+ struct gpio_desc *desc;
+ int ret;
+
+ if (!con_id)
+ con_id = ctx->ro_label;
+
+ desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ if (debounce) {
+ ret = gpiod_set_debounce(desc, debounce);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (gpio_invert)
+ *gpio_invert = !gpiod_is_active_low(desc);
+
+ ctx->override_ro_active_level = override_active_level;
+ ctx->ro_gpio = desc;
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_gpiod_request_ro);
diff --git a/kernel/drivers/mmc/core/slot-gpio.h b/kernel/drivers/mmc/core/slot-gpio.h
new file mode 100644
index 000000000..8c1854dc5
--- /dev/null
+++ b/kernel/drivers/mmc/core/slot-gpio.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd
+ *
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef _MMC_CORE_SLOTGPIO_H
+#define _MMC_CORE_SLOTGPIO_H
+
+int mmc_gpio_alloc(struct mmc_host *host);
+
+#endif