summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/mtd')
-rw-r--r--kernel/drivers/mtd/chips/Kconfig1
-rw-r--r--kernel/drivers/mtd/chips/cfi_cmdset_0002.c2
-rw-r--r--kernel/drivers/mtd/chips/cfi_util.c188
-rw-r--r--kernel/drivers/mtd/cmdlinepart.c28
-rw-r--r--kernel/drivers/mtd/devices/Kconfig8
-rw-r--r--kernel/drivers/mtd/devices/Makefile1
-rw-r--r--kernel/drivers/mtd/devices/bcm47xxsflash.c7
-rw-r--r--kernel/drivers/mtd/devices/block2mtd.c1
-rw-r--r--kernel/drivers/mtd/devices/docg3.c42
-rw-r--r--kernel/drivers/mtd/devices/m25p80.c94
-rw-r--r--kernel/drivers/mtd/devices/mtd_dataflash.c3
-rw-r--r--kernel/drivers/mtd/devices/mtdram.c21
-rw-r--r--kernel/drivers/mtd/devices/powernv_flash.c285
-rw-r--r--kernel/drivers/mtd/devices/slram.c2
-rw-r--r--kernel/drivers/mtd/devices/spear_smi.c5
-rw-r--r--kernel/drivers/mtd/devices/sst25l.c4
-rw-r--r--kernel/drivers/mtd/lpddr/lpddr2_nvm.c1
-rw-r--r--kernel/drivers/mtd/maps/Kconfig2
-rw-r--r--kernel/drivers/mtd/maps/amd76xrom.c2
-rw-r--r--kernel/drivers/mtd/maps/esb2rom.c2
-rw-r--r--kernel/drivers/mtd/maps/gpio-addr-flash.c2
-rw-r--r--kernel/drivers/mtd/maps/ichxrom.c2
-rw-r--r--kernel/drivers/mtd/maps/intel_vr_nor.c2
-rw-r--r--kernel/drivers/mtd/maps/ixp4xx.c2
-rw-r--r--kernel/drivers/mtd/maps/lantiq-flash.c6
-rw-r--r--kernel/drivers/mtd/maps/latch-addr-flash.c2
-rw-r--r--kernel/drivers/mtd/maps/nettel.c13
-rw-r--r--kernel/drivers/mtd/maps/pcmciamtd.c1
-rw-r--r--kernel/drivers/mtd/maps/physmap.c1
-rw-r--r--kernel/drivers/mtd/maps/physmap_of.c11
-rw-r--r--kernel/drivers/mtd/maps/plat-ram.c1
-rw-r--r--kernel/drivers/mtd/maps/pxa2xx-flash.c8
-rw-r--r--kernel/drivers/mtd/maps/rbtx4939-flash.c2
-rw-r--r--kernel/drivers/mtd/maps/sa1100-flash.c2
-rw-r--r--kernel/drivers/mtd/mtd_blkdevs.c24
-rw-r--r--kernel/drivers/mtd/mtdchar.c42
-rw-r--r--kernel/drivers/mtd/mtdcore.c108
-rw-r--r--kernel/drivers/mtd/mtdpart.c23
-rw-r--r--kernel/drivers/mtd/nand/Kconfig34
-rw-r--r--kernel/drivers/mtd/nand/Makefile5
-rw-r--r--kernel/drivers/mtd/nand/atmel_nand.c8
-rw-r--r--kernel/drivers/mtd/nand/au1550nd.c2
-rw-r--r--kernel/drivers/mtd/nand/bcm47xxnflash/main.c2
-rw-r--r--kernel/drivers/mtd/nand/bf5xx_nand.c5
-rw-r--r--kernel/drivers/mtd/nand/brcmnand/Makefile6
-rw-r--r--kernel/drivers/mtd/nand/brcmnand/bcm63138_nand.c109
-rw-r--r--kernel/drivers/mtd/nand/brcmnand/brcmnand.c2281
-rw-r--r--kernel/drivers/mtd/nand/brcmnand/brcmnand.h71
-rw-r--r--kernel/drivers/mtd/nand/brcmnand/brcmstb_nand.c44
-rw-r--r--kernel/drivers/mtd/nand/brcmnand/iproc_nand.c150
-rw-r--r--kernel/drivers/mtd/nand/cafe_nand.c4
-rw-r--r--kernel/drivers/mtd/nand/cs553x_nand.c12
-rw-r--r--kernel/drivers/mtd/nand/davinci_nand.c45
-rw-r--r--kernel/drivers/mtd/nand/denali.c19
-rw-r--r--kernel/drivers/mtd/nand/denali.h2
-rw-r--r--kernel/drivers/mtd/nand/denali_pci.c43
-rw-r--r--kernel/drivers/mtd/nand/diskonchip.c39
-rw-r--r--kernel/drivers/mtd/nand/docg4.c8
-rw-r--r--kernel/drivers/mtd/nand/fsl_elbc_nand.c7
-rw-r--r--kernel/drivers/mtd/nand/fsl_ifc_nand.c263
-rw-r--r--kernel/drivers/mtd/nand/fsl_upm.c2
-rw-r--r--kernel/drivers/mtd/nand/fsmc_nand.c75
-rw-r--r--kernel/drivers/mtd/nand/gpio.c2
-rw-r--r--kernel/drivers/mtd/nand/gpmi-nand/gpmi-nand.c10
-rw-r--r--kernel/drivers/mtd/nand/hisi504_nand.c4
-rw-r--r--kernel/drivers/mtd/nand/jz4740_nand.c3
-rw-r--r--kernel/drivers/mtd/nand/lpc32xx_mlc.c14
-rw-r--r--kernel/drivers/mtd/nand/lpc32xx_slc.c42
-rw-r--r--kernel/drivers/mtd/nand/mpc5121_nfc.c4
-rw-r--r--kernel/drivers/mtd/nand/mxc_nand.c114
-rw-r--r--kernel/drivers/mtd/nand/nand_base.c250
-rw-r--r--kernel/drivers/mtd/nand/nand_bbt.c28
-rw-r--r--kernel/drivers/mtd/nand/nand_ids.c6
-rw-r--r--kernel/drivers/mtd/nand/nandsim.c27
-rw-r--r--kernel/drivers/mtd/nand/ndfc.c4
-rw-r--r--kernel/drivers/mtd/nand/nuc900_nand.c2
-rw-r--r--kernel/drivers/mtd/nand/omap2.c6
-rw-r--r--kernel/drivers/mtd/nand/omap_elm.c2
-rw-r--r--kernel/drivers/mtd/nand/orion_nand.c3
-rw-r--r--kernel/drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--kernel/drivers/mtd/nand/plat_nand.c7
-rw-r--r--kernel/drivers/mtd/nand/pxa3xx_nand.c668
-rw-r--r--kernel/drivers/mtd/nand/r852.c9
-rw-r--r--kernel/drivers/mtd/nand/s3c2410.c4
-rw-r--r--kernel/drivers/mtd/nand/sh_flctl.c4
-rw-r--r--kernel/drivers/mtd/nand/sharpsl.c2
-rw-r--r--kernel/drivers/mtd/nand/socrates_nand.c1
-rw-r--r--kernel/drivers/mtd/nand/sunxi_nand.c639
-rw-r--r--kernel/drivers/mtd/nand/tmio_nand.c1
-rw-r--r--kernel/drivers/mtd/nand/txx9ndfmc.c2
-rw-r--r--kernel/drivers/mtd/nand/vf610_nfc.c878
-rw-r--r--kernel/drivers/mtd/nand/xway_nand.c4
-rw-r--r--kernel/drivers/mtd/ofpart.c71
-rw-r--r--kernel/drivers/mtd/onenand/generic.c5
-rw-r--r--kernel/drivers/mtd/onenand/omap2.c2
-rw-r--r--kernel/drivers/mtd/onenand/samsung.c3
-rw-r--r--kernel/drivers/mtd/spi-nor/Kconfig17
-rw-r--r--kernel/drivers/mtd/spi-nor/Makefile1
-rw-r--r--kernel/drivers/mtd/spi-nor/fsl-quadspi.c309
-rw-r--r--kernel/drivers/mtd/spi-nor/nxp-spifi.c479
-rw-r--r--kernel/drivers/mtd/spi-nor/spi-nor.c412
-rw-r--r--kernel/drivers/mtd/tests/oobtest.c18
-rw-r--r--kernel/drivers/mtd/tests/speedtest.c10
-rw-r--r--kernel/drivers/mtd/tests/torturetest.c10
-rw-r--r--kernel/drivers/mtd/ubi/attach.c4
-rw-r--r--kernel/drivers/mtd/ubi/block.c18
-rw-r--r--kernel/drivers/mtd/ubi/build.c107
-rw-r--r--kernel/drivers/mtd/ubi/cdev.c2
-rw-r--r--kernel/drivers/mtd/ubi/debug.c2
-rw-r--r--kernel/drivers/mtd/ubi/eba.c2
-rw-r--r--kernel/drivers/mtd/ubi/fastmap-wl.c29
-rw-r--r--kernel/drivers/mtd/ubi/fastmap.c87
-rw-r--r--kernel/drivers/mtd/ubi/gluebi.c4
-rw-r--r--kernel/drivers/mtd/ubi/io.c7
-rw-r--r--kernel/drivers/mtd/ubi/ubi-media.h2
-rw-r--r--kernel/drivers/mtd/ubi/ubi.h2
-rw-r--r--kernel/drivers/mtd/ubi/upd.c2
-rw-r--r--kernel/drivers/mtd/ubi/vmt.c98
-rw-r--r--kernel/drivers/mtd/ubi/vtbl.c46
-rw-r--r--kernel/drivers/mtd/ubi/wl.c56
120 files changed, 7084 insertions, 1650 deletions
diff --git a/kernel/drivers/mtd/chips/Kconfig b/kernel/drivers/mtd/chips/Kconfig
index 9f02c28c0..54479c481 100644
--- a/kernel/drivers/mtd/chips/Kconfig
+++ b/kernel/drivers/mtd/chips/Kconfig
@@ -16,6 +16,7 @@ config MTD_CFI
config MTD_JEDECPROBE
tristate "Detect non-CFI AMD/JEDEC-compatible flash chips"
select MTD_GEN_PROBE
+ select MTD_CFI_UTIL
help
This option enables JEDEC-style probing of flash chips which are not
compatible with the Common Flash Interface, but will use the common
diff --git a/kernel/drivers/mtd/chips/cfi_cmdset_0002.c b/kernel/drivers/mtd/chips/cfi_cmdset_0002.c
index c50d8cf0f..c3624eb57 100644
--- a/kernel/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/kernel/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1295,7 +1295,7 @@ static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
int gap = adr - bus_ofs;
int n = min_t(int, len, map_bankwidth(map) - gap);
- map_word datum;
+ map_word datum = map_word_ff(map);
if (n != map_bankwidth(map)) {
/* partial write of a word, load old contents */
diff --git a/kernel/drivers/mtd/chips/cfi_util.c b/kernel/drivers/mtd/chips/cfi_util.c
index 09c79bd0b..6f16552cd 100644
--- a/kernel/drivers/mtd/chips/cfi_util.c
+++ b/kernel/drivers/mtd/chips/cfi_util.c
@@ -23,6 +23,194 @@
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
+void cfi_udelay(int us)
+{
+ if (us >= 1000) {
+ msleep((us+999)/1000);
+ } else {
+ udelay(us);
+ cond_resched();
+ }
+}
+EXPORT_SYMBOL(cfi_udelay);
+
+/*
+ * Returns the command address according to the given geometry.
+ */
+uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
+ struct map_info *map, struct cfi_private *cfi)
+{
+ unsigned bankwidth = map_bankwidth(map);
+ unsigned interleave = cfi_interleave(cfi);
+ unsigned type = cfi->device_type;
+ uint32_t addr;
+
+ addr = (cmd_ofs * type) * interleave;
+
+ /* Modify the unlock address if we are in compatibility mode.
+ * For 16bit devices on 8 bit busses
+ * and 32bit devices on 16 bit busses
+ * set the low bit of the alternating bit sequence of the address.
+ */
+ if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
+ addr |= (type >> 1)*interleave;
+
+ return addr;
+}
+EXPORT_SYMBOL(cfi_build_cmd_addr);
+
+/*
+ * Transforms the CFI command for the given geometry (bus width & interleave).
+ * It looks too long to be inline, but in the common case it should almost all
+ * get optimised away.
+ */
+map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
+{
+ map_word val = { {0} };
+ int wordwidth, words_per_bus, chip_mode, chips_per_word;
+ unsigned long onecmd;
+ int i;
+
+ /* We do it this way to give the compiler a fighting chance
+ of optimising away all the crap for 'bankwidth' larger than
+ an unsigned long, in the common case where that support is
+ disabled */
+ if (map_bankwidth_is_large(map)) {
+ wordwidth = sizeof(unsigned long);
+ words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
+ } else {
+ wordwidth = map_bankwidth(map);
+ words_per_bus = 1;
+ }
+
+ chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
+ chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
+
+ /* First, determine what the bit-pattern should be for a single
+ device, according to chip mode and endianness... */
+ switch (chip_mode) {
+ default: BUG();
+ case 1:
+ onecmd = cmd;
+ break;
+ case 2:
+ onecmd = cpu_to_cfi16(map, cmd);
+ break;
+ case 4:
+ onecmd = cpu_to_cfi32(map, cmd);
+ break;
+ }
+
+ /* Now replicate it across the size of an unsigned long, or
+ just to the bus width as appropriate */
+ switch (chips_per_word) {
+ default: BUG();
+#if BITS_PER_LONG >= 64
+ case 8:
+ onecmd |= (onecmd << (chip_mode * 32));
+#endif
+ case 4:
+ onecmd |= (onecmd << (chip_mode * 16));
+ case 2:
+ onecmd |= (onecmd << (chip_mode * 8));
+ case 1:
+ ;
+ }
+
+ /* And finally, for the multi-word case, replicate it
+ in all words in the structure */
+ for (i=0; i < words_per_bus; i++) {
+ val.x[i] = onecmd;
+ }
+
+ return val;
+}
+EXPORT_SYMBOL(cfi_build_cmd);
+
+unsigned long cfi_merge_status(map_word val, struct map_info *map,
+ struct cfi_private *cfi)
+{
+ int wordwidth, words_per_bus, chip_mode, chips_per_word;
+ unsigned long onestat, res = 0;
+ int i;
+
+ /* We do it this way to give the compiler a fighting chance
+ of optimising away all the crap for 'bankwidth' larger than
+ an unsigned long, in the common case where that support is
+ disabled */
+ if (map_bankwidth_is_large(map)) {
+ wordwidth = sizeof(unsigned long);
+ words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
+ } else {
+ wordwidth = map_bankwidth(map);
+ words_per_bus = 1;
+ }
+
+ chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
+ chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
+
+ onestat = val.x[0];
+ /* Or all status words together */
+ for (i=1; i < words_per_bus; i++) {
+ onestat |= val.x[i];
+ }
+
+ res = onestat;
+ switch(chips_per_word) {
+ default: BUG();
+#if BITS_PER_LONG >= 64
+ case 8:
+ res |= (onestat >> (chip_mode * 32));
+#endif
+ case 4:
+ res |= (onestat >> (chip_mode * 16));
+ case 2:
+ res |= (onestat >> (chip_mode * 8));
+ case 1:
+ ;
+ }
+
+ /* Last, determine what the bit-pattern should be for a single
+ device, according to chip mode and endianness... */
+ switch (chip_mode) {
+ case 1:
+ break;
+ case 2:
+ res = cfi16_to_cpu(map, res);
+ break;
+ case 4:
+ res = cfi32_to_cpu(map, res);
+ break;
+ default: BUG();
+ }
+ return res;
+}
+EXPORT_SYMBOL(cfi_merge_status);
+
+/*
+ * Sends a CFI command to a bank of flash for the given geometry.
+ *
+ * Returns the offset in flash where the command was written.
+ * If prev_val is non-null, it will be set to the value at the command address,
+ * before the command was written.
+ */
+uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
+ struct map_info *map, struct cfi_private *cfi,
+ int type, map_word *prev_val)
+{
+ map_word val;
+ uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
+ val = cfi_build_cmd(cmd, map, cfi);
+
+ if (prev_val)
+ *prev_val = map_read(map, addr);
+
+ map_write(map, val, addr);
+
+ return addr - base;
+}
+EXPORT_SYMBOL(cfi_send_gen_cmd);
+
int __xipram cfi_qry_present(struct map_info *map, __u32 base,
struct cfi_private *cfi)
{
diff --git a/kernel/drivers/mtd/cmdlinepart.c b/kernel/drivers/mtd/cmdlinepart.c
index c8503006f..08f62987c 100644
--- a/kernel/drivers/mtd/cmdlinepart.c
+++ b/kernel/drivers/mtd/cmdlinepart.c
@@ -48,6 +48,8 @@
* edb7312-nor:256k(ARMboot)ro,-(root);edb7312-nand:-(home)
*/
+#define pr_fmt(fmt) "mtd: " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
@@ -55,9 +57,6 @@
#include <linux/module.h>
#include <linux/err.h>
-/* error message prefix */
-#define ERRP "mtd: "
-
/* debug macro */
#if 0
#define dbg(x) do { printk("DEBUG-CMDLINE-PART: "); printk x; } while(0)
@@ -115,9 +114,8 @@ static struct mtd_partition * newpart(char *s,
s++;
} else {
size = memparse(s, &s);
- if (size < PAGE_SIZE) {
- printk(KERN_ERR ERRP "partition size too small (%llx)\n",
- size);
+ if (!size) {
+ pr_err("partition has size 0\n");
return ERR_PTR(-EINVAL);
}
}
@@ -142,7 +140,7 @@ static struct mtd_partition * newpart(char *s,
name = ++s;
p = strchr(name, delim);
if (!p) {
- printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim);
+ pr_err("no closing %c found in partition name\n", delim);
return ERR_PTR(-EINVAL);
}
name_len = p - name;
@@ -170,7 +168,7 @@ static struct mtd_partition * newpart(char *s,
/* test if more partitions are following */
if (*s == ',') {
if (size == SIZE_REMAINING) {
- printk(KERN_ERR ERRP "no partitions allowed after a fill-up partition\n");
+ pr_err("no partitions allowed after a fill-up partition\n");
return ERR_PTR(-EINVAL);
}
/* more partitions follow, parse them */
@@ -237,7 +235,7 @@ static int mtdpart_setup_real(char *s)
/* fetch <mtd-id> */
p = strchr(s, ':');
if (!p) {
- printk(KERN_ERR ERRP "no mtd-id\n");
+ pr_err("no mtd-id\n");
return -EINVAL;
}
mtd_id_len = p - mtd_id;
@@ -289,7 +287,7 @@ static int mtdpart_setup_real(char *s)
/* does another spec follow? */
if (*s != ';') {
- printk(KERN_ERR ERRP "bad character after partition (%c)\n", *s);
+ pr_err("bad character after partition (%c)\n", *s);
return -EINVAL;
}
s++;
@@ -343,17 +341,15 @@ static int parse_cmdline_partitions(struct mtd_info *master,
part->parts[i].size = master->size - offset;
if (offset + part->parts[i].size > master->size) {
- printk(KERN_WARNING ERRP
- "%s: partitioning exceeds flash size, truncating\n",
- part->mtd_id);
+ pr_warn("%s: partitioning exceeds flash size, truncating\n",
+ part->mtd_id);
part->parts[i].size = master->size - offset;
}
offset += part->parts[i].size;
if (part->parts[i].size == 0) {
- printk(KERN_WARNING ERRP
- "%s: skipping zero sized partition\n",
- part->mtd_id);
+ pr_warn("%s: skipping zero sized partition\n",
+ part->mtd_id);
part->num_parts--;
memmove(&part->parts[i], &part->parts[i + 1],
sizeof(*part->parts) * (part->num_parts - i));
diff --git a/kernel/drivers/mtd/devices/Kconfig b/kernel/drivers/mtd/devices/Kconfig
index c49d0b127..f73c41697 100644
--- a/kernel/drivers/mtd/devices/Kconfig
+++ b/kernel/drivers/mtd/devices/Kconfig
@@ -195,6 +195,14 @@ config MTD_BLOCK2MTD
Testing MTD users (eg JFFS2) on large media and media that might
be removed during a write (using the floppy drive).
+config MTD_POWERNV_FLASH
+ tristate "powernv flash MTD driver"
+ depends on PPC_POWERNV
+ help
+ This provides an MTD device to access flash on powernv OPAL
+ platforms from Linux. This device abstracts away the
+ firmware interface for flash access.
+
comment "Disk-On-Chip Device Drivers"
config MTD_DOCG3
diff --git a/kernel/drivers/mtd/devices/Makefile b/kernel/drivers/mtd/devices/Makefile
index f0b0e611d..7912d3a0e 100644
--- a/kernel/drivers/mtd/devices/Makefile
+++ b/kernel/drivers/mtd/devices/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o
obj-$(CONFIG_MTD_ST_SPI_FSM) += st_spi_fsm.o
+obj-$(CONFIG_MTD_POWERNV_FLASH) += powernv_flash.o
CFLAGS_docg3.o += -I$(src)
diff --git a/kernel/drivers/mtd/devices/bcm47xxsflash.c b/kernel/drivers/mtd/devices/bcm47xxsflash.c
index 3d008a941..347bb83db 100644
--- a/kernel/drivers/mtd/devices/bcm47xxsflash.c
+++ b/kernel/drivers/mtd/devices/bcm47xxsflash.c
@@ -237,13 +237,14 @@ static int bcm47xxsflash_write(struct mtd_info *mtd, loff_t to, size_t len,
return 0;
}
-static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s)
+static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s,
+ struct device *dev)
{
struct mtd_info *mtd = &b47s->mtd;
mtd->priv = b47s;
+ mtd->dev.parent = dev;
mtd->name = "bcm47xxsflash";
- mtd->owner = THIS_MODULE;
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_CAP_NORFLASH;
@@ -300,7 +301,7 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
b47s->blocksize = sflash->blocksize;
b47s->numblocks = sflash->numblocks;
b47s->size = sflash->size;
- bcm47xxsflash_fill_mtd(b47s);
+ bcm47xxsflash_fill_mtd(b47s, &pdev->dev);
err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0);
if (err) {
diff --git a/kernel/drivers/mtd/devices/block2mtd.c b/kernel/drivers/mtd/devices/block2mtd.c
index b16f3cda9..e2c005773 100644
--- a/kernel/drivers/mtd/devices/block2mtd.c
+++ b/kernel/drivers/mtd/devices/block2mtd.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
+#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/pagemap.h>
#include <linux/list.h>
diff --git a/kernel/drivers/mtd/devices/docg3.c b/kernel/drivers/mtd/devices/docg3.c
index 866d31904..c3a2695a4 100644
--- a/kernel/drivers/mtd/devices/docg3.c
+++ b/kernel/drivers/mtd/devices/docg3.c
@@ -1620,20 +1620,30 @@ static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = {
static int doc_register_sysfs(struct platform_device *pdev,
struct docg3_cascade *cascade)
{
- int ret = 0, floor, i = 0;
struct device *dev = &pdev->dev;
+ int floor;
+ int ret;
+ int i;
- for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS &&
- cascade->floors[floor]; floor++)
- for (i = 0; !ret && i < 4; i++)
+ for (floor = 0;
+ floor < DOC_MAX_NBFLOORS && cascade->floors[floor];
+ floor++) {
+ for (i = 0; i < 4; i++) {
ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
- if (!ret)
- return 0;
+ if (ret)
+ goto remove_files;
+ }
+ }
+
+ return 0;
+
+remove_files:
do {
while (--i >= 0)
device_remove_file(dev, &doc_sys_attrs[floor][i]);
i = 4;
} while (--floor >= 0);
+
return ret;
}
@@ -1815,7 +1825,7 @@ static void doc_dbg_unregister(struct docg3 *docg3)
* @chip_id: The chip ID of the supported chip
* @mtd: The structure to fill
*/
-static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
+static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
{
struct docg3 *docg3 = mtd->priv;
int cfg;
@@ -1828,6 +1838,8 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
case DOC_CHIPID_G3:
mtd->name = kasprintf(GFP_KERNEL, "docg3.%d",
docg3->device_id);
+ if (!mtd->name)
+ return -ENOMEM;
docg3->max_block = 2047;
break;
}
@@ -1841,7 +1853,6 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
mtd->erasesize /= 2;
mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
- mtd->owner = THIS_MODULE;
mtd->_erase = doc_erase;
mtd->_read = doc_read;
mtd->_write = doc_write;
@@ -1850,6 +1861,8 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
mtd->_block_isbad = doc_block_isbad;
mtd->ecclayout = &docg3_oobinfo;
mtd->ecc_strength = DOC_ECC_BCH_T;
+
+ return 0;
}
/**
@@ -1881,6 +1894,7 @@ doc_probe_device(struct docg3_cascade *cascade, int floor, struct device *dev)
if (!mtd)
goto nomem2;
mtd->priv = docg3;
+ mtd->dev.parent = dev;
bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
8 * DOC_LAYOUT_PAGE_SIZE);
docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
@@ -1900,7 +1914,7 @@ doc_probe_device(struct docg3_cascade *cascade, int floor, struct device *dev)
ret = 0;
if (chip_id != (u16)(~chip_id_inv)) {
- goto nomem3;
+ goto nomem4;
}
switch (chip_id) {
@@ -1910,15 +1924,19 @@ doc_probe_device(struct docg3_cascade *cascade, int floor, struct device *dev)
break;
default:
doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
- goto nomem3;
+ goto nomem4;
}
- doc_set_driver_info(chip_id, mtd);
+ ret = doc_set_driver_info(chip_id, mtd);
+ if (ret)
+ goto nomem4;
doc_hamming_ecc_init(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ);
doc_reload_bbt(docg3);
return mtd;
+nomem4:
+ kfree(docg3->bbt);
nomem3:
kfree(mtd);
nomem2:
@@ -2117,7 +2135,7 @@ static int docg3_release(struct platform_device *pdev)
}
#ifdef CONFIG_OF
-static struct of_device_id docg3_dt_ids[] = {
+static const struct of_device_id docg3_dt_ids[] = {
{ .compatible = "m-systems,diskonchip-g3" },
{}
};
diff --git a/kernel/drivers/mtd/devices/m25p80.c b/kernel/drivers/mtd/devices/m25p80.c
index 3af137f49..fe9ceb7b5 100644
--- a/kernel/drivers/mtd/devices/m25p80.c
+++ b/kernel/drivers/mtd/devices/m25p80.c
@@ -31,7 +31,6 @@
struct m25p {
struct spi_device *spi;
struct spi_nor spi_nor;
- struct mtd_info mtd;
u8 command[MAX_CMD_SIZE];
};
@@ -62,8 +61,7 @@ static int m25p_cmdsz(struct spi_nor *nor)
return 1 + nor->addr_width;
}
-static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
- int wr_en)
+static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
{
struct m25p *flash = nor->priv;
struct spi_device *spi = flash->spi;
@@ -159,7 +157,7 @@ static int m25p80_erase(struct spi_nor *nor, loff_t offset)
struct m25p *flash = nor->priv;
dev_dbg(nor->dev, "%dKiB at 0x%08x\n",
- flash->mtd.erasesize / 1024, (u32)offset);
+ flash->spi_nor.mtd.erasesize / 1024, (u32)offset);
/* Set up command buffer. */
flash->command[0] = nor->erase_opcode;
@@ -201,11 +199,10 @@ static int m25p_probe(struct spi_device *spi)
nor->read_reg = m25p80_read_reg;
nor->dev = &spi->dev;
- nor->mtd = &flash->mtd;
+ nor->flash_node = spi->dev.of_node;
nor->priv = flash;
spi_set_drvdata(spi, flash);
- flash->mtd.priv = nor;
flash->spi = spi;
if (spi->mode & SPI_RX_QUAD)
@@ -214,7 +211,7 @@ static int m25p_probe(struct spi_device *spi)
mode = SPI_NOR_DUAL;
if (data && data->name)
- flash->mtd.name = data->name;
+ nor->mtd.name = data->name;
/* For some (historical?) reason many platforms provide two different
* names in flash_platform_data: "name" and "type". Quite often name is
@@ -223,8 +220,6 @@ static int m25p_probe(struct spi_device *spi)
*/
if (data && data->type)
flash_name = data->type;
- else if (!strcmp(spi->modalias, "spi-nor"))
- flash_name = NULL; /* auto-detect */
else
flash_name = spi->modalias;
@@ -234,7 +229,7 @@ static int m25p_probe(struct spi_device *spi)
ppdata.of_node = spi->dev.of_node;
- return mtd_device_parse_register(&flash->mtd, NULL, &ppdata,
+ return mtd_device_parse_register(&nor->mtd, NULL, &ppdata,
data ? data->parts : NULL,
data ? data->nr_parts : 0);
}
@@ -245,7 +240,7 @@ static int m25p_remove(struct spi_device *spi)
struct m25p *flash = spi_get_drvdata(spi);
/* Clean up MTD stuff. */
- return mtd_device_unregister(&flash->mtd);
+ return mtd_device_unregister(&flash->spi_nor.mtd);
}
/*
@@ -261,59 +256,52 @@ static int m25p_remove(struct spi_device *spi)
* keep them available as module aliases for existing platforms.
*/
static const struct spi_device_id m25p_ids[] = {
- {"at25fs010"}, {"at25fs040"}, {"at25df041a"}, {"at25df321a"},
- {"at25df641"}, {"at26f004"}, {"at26df081a"}, {"at26df161a"},
- {"at26df321"}, {"at45db081d"},
- {"en25f32"}, {"en25p32"}, {"en25q32b"}, {"en25p64"},
- {"en25q64"}, {"en25qh128"}, {"en25qh256"},
- {"f25l32pa"},
- {"mr25h256"}, {"mr25h10"},
- {"gd25q32"}, {"gd25q64"},
- {"160s33b"}, {"320s33b"}, {"640s33b"},
- {"mx25l2005a"}, {"mx25l4005a"}, {"mx25l8005"}, {"mx25l1606e"},
- {"mx25l3205d"}, {"mx25l3255e"}, {"mx25l6405d"}, {"mx25l12805d"},
- {"mx25l12855e"},{"mx25l25635e"},{"mx25l25655e"},{"mx66l51235l"},
- {"mx66l1g55g"},
- {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q256a"},
- {"n25q512a"}, {"n25q512ax3"}, {"n25q00"},
- {"pm25lv512"}, {"pm25lv010"}, {"pm25lq032"},
- {"s25sl032p"}, {"s25sl064p"}, {"s25fl256s0"}, {"s25fl256s1"},
- {"s25fl512s"}, {"s70fl01gs"}, {"s25sl12800"}, {"s25sl12801"},
- {"s25fl129p0"}, {"s25fl129p1"}, {"s25sl004a"}, {"s25sl008a"},
- {"s25sl016a"}, {"s25sl032a"}, {"s25sl064a"}, {"s25fl008k"},
- {"s25fl016k"}, {"s25fl064k"}, {"s25fl132k"},
- {"sst25vf040b"},{"sst25vf080b"},{"sst25vf016b"},{"sst25vf032b"},
- {"sst25vf064c"},{"sst25wf512"}, {"sst25wf010"}, {"sst25wf020"},
- {"sst25wf040"},
- {"m25p05"}, {"m25p10"}, {"m25p20"}, {"m25p40"},
- {"m25p80"}, {"m25p16"}, {"m25p32"}, {"m25p64"},
- {"m25p128"}, {"n25q032"},
+ /*
+ * Entries not used in DTs that should be safe to drop after replacing
+ * them with "nor-jedec" in platform data.
+ */
+ {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
+
+ /*
+ * Entries that were used in DTs without "nor-jedec" fallback and should
+ * be kept for backward compatibility.
+ */
+ {"at25df321a"}, {"at25df641"}, {"at26df081a"},
+ {"mr25h256"},
+ {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
+ {"mx25l25635e"},{"mx66l51235l"},
+ {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
+ {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
+ {"s25fl064k"},
+ {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
+ {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
+ {"m25p64"}, {"m25p128"},
+ {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
+ {"w25q80bl"}, {"w25q128"}, {"w25q256"},
+
+ /* Flashes that can't be detected using JEDEC */
{"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
{"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
{"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
- {"m45pe10"}, {"m45pe80"}, {"m45pe16"},
- {"m25pe20"}, {"m25pe80"}, {"m25pe16"},
- {"m25px16"}, {"m25px32"}, {"m25px32-s0"}, {"m25px32-s1"},
- {"m25px64"}, {"m25px80"},
- {"w25x10"}, {"w25x20"}, {"w25x40"}, {"w25x80"},
- {"w25x16"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
- {"w25x64"}, {"w25q64"}, {"w25q80"}, {"w25q80bl"},
- {"w25q128"}, {"w25q256"}, {"cat25c11"},
- {"cat25c03"}, {"cat25c09"}, {"cat25c17"}, {"cat25128"},
- /*
- * Generic support for SPI NOR that can be identified by the JEDEC READ
- * ID opcode (0x9F). Use this, if possible.
- */
- {"spi-nor"},
{ },
};
MODULE_DEVICE_TABLE(spi, m25p_ids);
+static const struct of_device_id m25p_of_table[] = {
+ /*
+ * Generic compatibility for SPI NOR that can be identified by the
+ * JEDEC READ ID opcode (0x9F). Use this, if possible.
+ */
+ { .compatible = "jedec,spi-nor" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, m25p_of_table);
+
static struct spi_driver m25p80_driver = {
.driver = {
.name = "m25p80",
- .owner = THIS_MODULE,
+ .of_match_table = m25p_of_table,
},
.id_table = m25p_ids,
.probe = m25p_probe,
diff --git a/kernel/drivers/mtd/devices/mtd_dataflash.c b/kernel/drivers/mtd/devices/mtd_dataflash.c
index 0099aba72..e4a88715a 100644
--- a/kernel/drivers/mtd/devices/mtd_dataflash.c
+++ b/kernel/drivers/mtd/devices/mtd_dataflash.c
@@ -102,6 +102,7 @@ static const struct of_device_id dataflash_dt_ids[] = {
{ .compatible = "atmel,dataflash", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, dataflash_dt_ids);
#endif
/* ......................................................................... */
@@ -647,7 +648,6 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
device->size = nr_pages * pagesize;
device->erasesize = pagesize;
device->writesize = pagesize;
- device->owner = THIS_MODULE;
device->type = MTD_DATAFLASH;
device->flags = MTD_WRITEABLE;
device->_erase = dataflash_erase;
@@ -910,7 +910,6 @@ static int dataflash_remove(struct spi_device *spi)
static struct spi_driver dataflash_driver = {
.driver = {
.name = "mtd_dataflash",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(dataflash_dt_ids),
},
diff --git a/kernel/drivers/mtd/devices/mtdram.c b/kernel/drivers/mtd/devices/mtdram.c
index 8e2850892..627a9bc37 100644
--- a/kernel/drivers/mtd/devices/mtdram.c
+++ b/kernel/drivers/mtd/devices/mtdram.c
@@ -32,8 +32,29 @@ MODULE_PARM_DESC(erase_size, "Device erase block size in KiB");
// We could store these in the mtd structure, but we only support 1 device..
static struct mtd_info *mtd_info;
+static int check_offs_len(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+
+ /* Start address must align on block boundary */
+ if (mtd_mod_by_eb(ofs, mtd)) {
+ pr_debug("%s: unaligned address\n", __func__);
+ ret = -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (mtd_mod_by_eb(len, mtd)) {
+ pr_debug("%s: length not block aligned\n", __func__);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static int ram_erase(struct mtd_info *mtd, struct erase_info *instr)
{
+ if (check_offs_len(mtd, instr->addr, instr->len))
+ return -EINVAL;
memset((char *)mtd->priv + instr->addr, 0xff, instr->len);
instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
diff --git a/kernel/drivers/mtd/devices/powernv_flash.c b/kernel/drivers/mtd/devices/powernv_flash.c
new file mode 100644
index 000000000..d5b870b3f
--- /dev/null
+++ b/kernel/drivers/mtd/devices/powernv_flash.c
@@ -0,0 +1,285 @@
+/*
+ * OPAL PNOR flash MTD abstraction
+ *
+ * Copyright IBM 2015
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/opal.h>
+
+
+/*
+ * This driver creates the a Linux MTD abstraction for platform PNOR flash
+ * backed by OPAL calls
+ */
+
+struct powernv_flash {
+ struct mtd_info mtd;
+ u32 id;
+};
+
+enum flash_op {
+ FLASH_OP_READ,
+ FLASH_OP_WRITE,
+ FLASH_OP_ERASE,
+};
+
+static int powernv_flash_async_op(struct mtd_info *mtd, enum flash_op op,
+ loff_t offset, size_t len, size_t *retlen, u_char *buf)
+{
+ struct powernv_flash *info = (struct powernv_flash *)mtd->priv;
+ struct device *dev = &mtd->dev;
+ int token;
+ struct opal_msg msg;
+ int rc;
+
+ dev_dbg(dev, "%s(op=%d, offset=0x%llx, len=%zu)\n",
+ __func__, op, offset, len);
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ if (token != -ERESTARTSYS)
+ dev_err(dev, "Failed to get an async token\n");
+
+ return token;
+ }
+
+ switch (op) {
+ case FLASH_OP_READ:
+ rc = opal_flash_read(info->id, offset, __pa(buf), len, token);
+ break;
+ case FLASH_OP_WRITE:
+ rc = opal_flash_write(info->id, offset, __pa(buf), len, token);
+ break;
+ case FLASH_OP_ERASE:
+ rc = opal_flash_erase(info->id, offset, len, token);
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ if (rc != OPAL_ASYNC_COMPLETION) {
+ dev_err(dev, "opal_flash_async_op(op=%d) failed (rc %d)\n",
+ op, rc);
+ opal_async_release_token(token);
+ return -EIO;
+ }
+
+ rc = opal_async_wait_response(token, &msg);
+ opal_async_release_token(token);
+ if (rc) {
+ dev_err(dev, "opal async wait failed (rc %d)\n", rc);
+ return -EIO;
+ }
+
+ rc = be64_to_cpu(msg.params[1]);
+ if (rc == OPAL_SUCCESS) {
+ rc = 0;
+ if (retlen)
+ *retlen = len;
+ } else {
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+/**
+ * @mtd: the device
+ * @from: the offset to read from
+ * @len: the number of bytes to read
+ * @retlen: the number of bytes actually read
+ * @buf: the filled in buffer
+ *
+ * Returns 0 if read successful, or -ERRNO if an error occurred
+ */
+static int powernv_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ return powernv_flash_async_op(mtd, FLASH_OP_READ, from,
+ len, retlen, buf);
+}
+
+/**
+ * @mtd: the device
+ * @to: the offset to write to
+ * @len: the number of bytes to write
+ * @retlen: the number of bytes actually written
+ * @buf: the buffer to get bytes from
+ *
+ * Returns 0 if write successful, -ERRNO if error occurred
+ */
+static int powernv_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ return powernv_flash_async_op(mtd, FLASH_OP_WRITE, to,
+ len, retlen, (u_char *)buf);
+}
+
+/**
+ * @mtd: the device
+ * @erase: the erase info
+ * Returns 0 if erase successful or -ERRNO if an error occurred
+ */
+static int powernv_flash_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+ int rc;
+
+ erase->state = MTD_ERASING;
+
+ /* todo: register our own notifier to do a true async implementation */
+ rc = powernv_flash_async_op(mtd, FLASH_OP_ERASE, erase->addr,
+ erase->len, NULL, NULL);
+
+ if (rc) {
+ erase->fail_addr = erase->addr;
+ erase->state = MTD_ERASE_FAILED;
+ } else {
+ erase->state = MTD_ERASE_DONE;
+ }
+ mtd_erase_callback(erase);
+ return rc;
+}
+
+/**
+ * powernv_flash_set_driver_info - Fill the mtd_info structure and docg3
+ * structure @pdev: The platform device
+ * @mtd: The structure to fill
+ */
+static int powernv_flash_set_driver_info(struct device *dev,
+ struct mtd_info *mtd)
+{
+ u64 size;
+ u32 erase_size;
+ int rc;
+
+ rc = of_property_read_u32(dev->of_node, "ibm,flash-block-size",
+ &erase_size);
+ if (rc) {
+ dev_err(dev, "couldn't get resource block size information\n");
+ return rc;
+ }
+
+ rc = of_property_read_u64(dev->of_node, "reg", &size);
+ if (rc) {
+ dev_err(dev, "couldn't get resource size information\n");
+ return rc;
+ }
+
+ /*
+ * Going to have to check what details I need to set and how to
+ * get them
+ */
+ mtd->name = of_get_property(dev->of_node, "name", NULL);
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_WRITEABLE;
+ mtd->size = size;
+ mtd->erasesize = erase_size;
+ mtd->writebufsize = mtd->writesize = 1;
+ mtd->owner = THIS_MODULE;
+ mtd->_erase = powernv_flash_erase;
+ mtd->_read = powernv_flash_read;
+ mtd->_write = powernv_flash_write;
+ mtd->dev.parent = dev;
+ return 0;
+}
+
+/**
+ * powernv_flash_probe
+ * @pdev: platform device
+ *
+ * Returns 0 on success, -ENOMEM, -ENXIO on error
+ */
+static int powernv_flash_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct powernv_flash *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ data->mtd.priv = data;
+
+ ret = of_property_read_u32(dev->of_node, "ibm,opal-id", &(data->id));
+ if (ret) {
+ dev_err(dev, "no device property 'ibm,opal-id'\n");
+ goto out;
+ }
+
+ ret = powernv_flash_set_driver_info(dev, &data->mtd);
+ if (ret)
+ goto out;
+
+ dev_set_drvdata(dev, data);
+
+ /*
+ * The current flash that skiboot exposes is one contiguous flash chip
+ * with an ffs partition at the start, it should prove easier for users
+ * to deal with partitions or not as they see fit
+ */
+ ret = mtd_device_register(&data->mtd, NULL, 0);
+
+out:
+ return ret;
+}
+
+/**
+ * op_release - Release the driver
+ * @pdev: the platform device
+ *
+ * Returns 0
+ */
+static int powernv_flash_release(struct platform_device *pdev)
+{
+ struct powernv_flash *data = dev_get_drvdata(&(pdev->dev));
+
+ /* All resources should be freed automatically */
+ return mtd_device_unregister(&(data->mtd));
+}
+
+static const struct of_device_id powernv_flash_match[] = {
+ { .compatible = "ibm,opal-flash" },
+ {}
+};
+
+static struct platform_driver powernv_flash_driver = {
+ .driver = {
+ .name = "powernv_flash",
+ .of_match_table = powernv_flash_match,
+ },
+ .remove = powernv_flash_release,
+ .probe = powernv_flash_probe,
+};
+
+module_platform_driver(powernv_flash_driver);
+
+MODULE_DEVICE_TABLE(of, powernv_flash_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cyril Bur <cyril.bur@au1.ibm.com>");
+MODULE_DESCRIPTION("MTD abstraction for OPAL flash");
diff --git a/kernel/drivers/mtd/devices/slram.c b/kernel/drivers/mtd/devices/slram.c
index 2fc4957cb..a70eb83e6 100644
--- a/kernel/drivers/mtd/devices/slram.c
+++ b/kernel/drivers/mtd/devices/slram.c
@@ -41,7 +41,7 @@
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/mtd/mtd.h>
diff --git a/kernel/drivers/mtd/devices/spear_smi.c b/kernel/drivers/mtd/devices/spear_smi.c
index 508bab3bd..64c745834 100644
--- a/kernel/drivers/mtd/devices/spear_smi.c
+++ b/kernel/drivers/mtd/devices/spear_smi.c
@@ -1,8 +1,8 @@
/*
* SMI (Serial Memory Controller) device driver for Serial NOR Flash on
* SPEAr platform
- * The serial nor interface is largely based on drivers/mtd/m25p80.c,
- * however the SPI interface has been replaced by SMI.
+ * The serial nor interface is largely based on m25p80.c, however the SPI
+ * interface has been replaced by SMI.
*
* Copyright © 2010 STMicroelectronics.
* Ashish Priyadarshi
@@ -854,6 +854,7 @@ static int spear_smi_setup_banks(struct platform_device *pdev,
else
flash->mtd.name = flash_devices[flash_index].name;
+ flash->mtd.dev.parent = &pdev->dev;
flash->mtd.type = MTD_NORFLASH;
flash->mtd.writesize = 1;
flash->mtd.flags = MTD_CAP_NORFLASH;
diff --git a/kernel/drivers/mtd/devices/sst25l.c b/kernel/drivers/mtd/devices/sst25l.c
index c63ecbcad..5b84d71ef 100644
--- a/kernel/drivers/mtd/devices/sst25l.c
+++ b/kernel/drivers/mtd/devices/sst25l.c
@@ -374,9 +374,8 @@ static int sst25l_probe(struct spi_device *spi)
data = dev_get_platdata(&spi->dev);
if (data && data->name)
flash->mtd.name = data->name;
- else
- flash->mtd.name = dev_name(&spi->dev);
+ flash->mtd.dev.parent = &spi->dev;
flash->mtd.type = MTD_NORFLASH;
flash->mtd.flags = MTD_CAP_NORFLASH;
flash->mtd.erasesize = flash_info->erase_size;
@@ -417,7 +416,6 @@ static int sst25l_remove(struct spi_device *spi)
static struct spi_driver sst25l_driver = {
.driver = {
.name = "sst25l",
- .owner = THIS_MODULE,
},
.probe = sst25l_probe,
.remove = sst25l_remove,
diff --git a/kernel/drivers/mtd/lpddr/lpddr2_nvm.c b/kernel/drivers/mtd/lpddr/lpddr2_nvm.c
index 063cec40d..2342277c9 100644
--- a/kernel/drivers/mtd/lpddr/lpddr2_nvm.c
+++ b/kernel/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -460,6 +460,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
/* Populate mtd_info data structure */
*mtd = (struct mtd_info) {
+ .dev = { .parent = &pdev->dev },
.name = pdev->dev.init_name,
.type = MTD_RAM,
.priv = map,
diff --git a/kernel/drivers/mtd/maps/Kconfig b/kernel/drivers/mtd/maps/Kconfig
index e715ae906..7c95a656f 100644
--- a/kernel/drivers/mtd/maps/Kconfig
+++ b/kernel/drivers/mtd/maps/Kconfig
@@ -326,7 +326,7 @@ config MTD_BFIN_ASYNC
config MTD_GPIO_ADDR
tristate "GPIO-assisted Flash Chip Support"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
depends on MTD_COMPLEX_MAPPINGS
help
Map driver which allows flashes to be partially physically addressed
diff --git a/kernel/drivers/mtd/maps/amd76xrom.c b/kernel/drivers/mtd/maps/amd76xrom.c
index f7207b0a7..f2b68667e 100644
--- a/kernel/drivers/mtd/maps/amd76xrom.c
+++ b/kernel/drivers/mtd/maps/amd76xrom.c
@@ -138,7 +138,7 @@ static int amd76xrom_init_one(struct pci_dev *pdev,
/*
* Try to reserve the window mem region. If this fails then
* it is likely due to a fragment of the window being
- * "reseved" by the BIOS. In the case that the
+ * "reserved" by the BIOS. In the case that the
* request_mem_region() fails then once the rom size is
* discovered we will try to reserve the unreserved fragment.
*/
diff --git a/kernel/drivers/mtd/maps/esb2rom.c b/kernel/drivers/mtd/maps/esb2rom.c
index f784cf0ca..76ed651b5 100644
--- a/kernel/drivers/mtd/maps/esb2rom.c
+++ b/kernel/drivers/mtd/maps/esb2rom.c
@@ -234,7 +234,7 @@ static int esb2rom_init_one(struct pci_dev *pdev,
/*
* Try to reserve the window mem region. If this fails then
- * it is likely due to the window being "reseved" by the BIOS.
+ * it is likely due to the window being "reserved" by the BIOS.
*/
window->rsrc.name = MOD_NAME;
window->rsrc.start = window->phys;
diff --git a/kernel/drivers/mtd/maps/gpio-addr-flash.c b/kernel/drivers/mtd/maps/gpio-addr-flash.c
index 2fb346091..385305e66 100644
--- a/kernel/drivers/mtd/maps/gpio-addr-flash.c
+++ b/kernel/drivers/mtd/maps/gpio-addr-flash.c
@@ -266,7 +266,7 @@ static int gpio_flash_probe(struct platform_device *pdev)
kfree(state);
return -ENXIO;
}
-
+ state->mtd->dev.parent = &pdev->dev;
mtd_device_parse_register(state->mtd, part_probe_types, NULL,
pdata->parts, pdata->nr_parts);
diff --git a/kernel/drivers/mtd/maps/ichxrom.c b/kernel/drivers/mtd/maps/ichxrom.c
index c7478e18f..8636bba42 100644
--- a/kernel/drivers/mtd/maps/ichxrom.c
+++ b/kernel/drivers/mtd/maps/ichxrom.c
@@ -167,7 +167,7 @@ static int ichxrom_init_one(struct pci_dev *pdev,
/*
* Try to reserve the window mem region. If this fails then
- * it is likely due to the window being "reseved" by the BIOS.
+ * it is likely due to the window being "reserved" by the BIOS.
*/
window->rsrc.name = MOD_NAME;
window->rsrc.start = window->phys;
diff --git a/kernel/drivers/mtd/maps/intel_vr_nor.c b/kernel/drivers/mtd/maps/intel_vr_nor.c
index 5ab71f0e1..8bf79775e 100644
--- a/kernel/drivers/mtd/maps/intel_vr_nor.c
+++ b/kernel/drivers/mtd/maps/intel_vr_nor.c
@@ -90,7 +90,7 @@ static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
if (!p->info)
return -ENODEV;
- p->info->owner = THIS_MODULE;
+ p->info->dev.parent = &p->dev->dev;
return 0;
}
diff --git a/kernel/drivers/mtd/maps/ixp4xx.c b/kernel/drivers/mtd/maps/ixp4xx.c
index b44307410..e3180d5aa 100644
--- a/kernel/drivers/mtd/maps/ixp4xx.c
+++ b/kernel/drivers/mtd/maps/ixp4xx.c
@@ -226,7 +226,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
err = -ENXIO;
goto Error;
}
- info->mtd->owner = THIS_MODULE;
+ info->mtd->dev.parent = &dev->dev;
/* Use the fast version */
info->map.write = ixp4xx_write16;
diff --git a/kernel/drivers/mtd/maps/lantiq-flash.c b/kernel/drivers/mtd/maps/lantiq-flash.c
index 33d26f5be..938520549 100644
--- a/kernel/drivers/mtd/maps/lantiq-flash.c
+++ b/kernel/drivers/mtd/maps/lantiq-flash.c
@@ -45,7 +45,6 @@ struct ltq_mtd {
};
static const char ltq_map_name[] = "ltq_nor";
-static const char * const ltq_probe_types[] = { "cmdlinepart", "ofpart", NULL };
static map_word
ltq_read16(struct map_info *map, unsigned long adr)
@@ -161,15 +160,14 @@ ltq_mtd_probe(struct platform_device *pdev)
return -ENXIO;
}
- ltq_mtd->mtd->owner = THIS_MODULE;
+ ltq_mtd->mtd->dev.parent = &pdev->dev;
cfi = ltq_mtd->map->fldrv_priv;
cfi->addr_unlock1 ^= 1;
cfi->addr_unlock2 ^= 1;
ppdata.of_node = pdev->dev.of_node;
- err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types,
- &ppdata, NULL, 0);
+ err = mtd_device_parse_register(ltq_mtd->mtd, NULL, &ppdata, NULL, 0);
if (err) {
dev_err(&pdev->dev, "failed to add partitions\n");
goto err_destroy;
diff --git a/kernel/drivers/mtd/maps/latch-addr-flash.c b/kernel/drivers/mtd/maps/latch-addr-flash.c
index cadfbe051..6dc97aa66 100644
--- a/kernel/drivers/mtd/maps/latch-addr-flash.c
+++ b/kernel/drivers/mtd/maps/latch-addr-flash.c
@@ -195,7 +195,7 @@ static int latch_addr_flash_probe(struct platform_device *dev)
err = -ENODEV;
goto iounmap;
}
- info->mtd->owner = THIS_MODULE;
+ info->mtd->dev.parent = &dev->dev;
mtd_device_parse_register(info->mtd, NULL, NULL,
latch_addr_data->parts,
diff --git a/kernel/drivers/mtd/maps/nettel.c b/kernel/drivers/mtd/maps/nettel.c
index eadcfffc4..a577ef855 100644
--- a/kernel/drivers/mtd/maps/nettel.c
+++ b/kernel/drivers/mtd/maps/nettel.c
@@ -385,20 +385,28 @@ static int __init nettel_init(void)
}
rc = mtd_device_register(intel_mtd, nettel_intel_partitions,
num_intel_partitions);
+ if (rc)
+ goto out_map_destroy;
#endif
if (amd_mtd) {
rc = mtd_device_register(amd_mtd, nettel_amd_partitions,
num_amd_partitions);
+ if (rc)
+ goto out_mtd_unreg;
}
#ifdef CONFIG_MTD_CFI_INTELEXT
register_reboot_notifier(&nettel_notifier_block);
#endif
- return(rc);
+ return rc;
+out_mtd_unreg:
#ifdef CONFIG_MTD_CFI_INTELEXT
+ mtd_device_unregister(intel_mtd);
+out_map_destroy:
+ map_destroy(intel_mtd);
out_unmap1:
iounmap(nettel_intel_map.virt);
#endif
@@ -407,8 +415,7 @@ out_unmap2:
iounmap(nettel_mmcrp);
iounmap(nettel_amd_map.virt);
- return(rc);
-
+ return rc;
}
/****************************************************************************/
diff --git a/kernel/drivers/mtd/maps/pcmciamtd.c b/kernel/drivers/mtd/maps/pcmciamtd.c
index af747af5e..3dad2111b 100644
--- a/kernel/drivers/mtd/maps/pcmciamtd.c
+++ b/kernel/drivers/mtd/maps/pcmciamtd.c
@@ -700,6 +700,7 @@ static const struct pcmcia_device_id pcmciamtd_ids[] = {
PCMCIA_DEVICE_PROD_ID12("Maxtor", "MAXFL MobileMax Flash Memory Card", 0xb68968c8, 0x2dfb47b0),
PCMCIA_DEVICE_PROD_ID123("M-Systems", "M-SYS Flash Memory Card", "(c) M-Systems", 0x7ed2ad87, 0x675dc3fb, 0x7aef3965),
PCMCIA_DEVICE_PROD_ID12("PRETEC", " 2MB SRAM CARD", 0xebf91155, 0x805360ca),
+ PCMCIA_DEVICE_PROD_ID12("PRETEC", " 4MB SRAM CARD", 0xebf91155, 0x20b6bf17),
PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB101EN20", 0xf9876baf, 0xad0b207b),
PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB513EN20", 0xf9876baf, 0xe8d884ad),
PCMCIA_DEVICE_PROD_ID12("SMART Modular Technologies", " 4MB FLASH Card", 0x96fd8277, 0x737a5b05),
diff --git a/kernel/drivers/mtd/maps/physmap.c b/kernel/drivers/mtd/maps/physmap.c
index 4305fd607..cc2adbbcd 100644
--- a/kernel/drivers/mtd/maps/physmap.c
+++ b/kernel/drivers/mtd/maps/physmap.c
@@ -167,7 +167,6 @@ static int physmap_flash_probe(struct platform_device *dev)
} else {
devices_found++;
}
- info->mtd[i]->owner = THIS_MODULE;
info->mtd[i]->dev.parent = &dev->dev;
}
diff --git a/kernel/drivers/mtd/maps/physmap_of.c b/kernel/drivers/mtd/maps/physmap_of.c
index ff26e979b..e46b4e983 100644
--- a/kernel/drivers/mtd/maps/physmap_of.c
+++ b/kernel/drivers/mtd/maps/physmap_of.c
@@ -130,6 +130,8 @@ static const char * const *of_get_probes(struct device_node *dp)
count++;
res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return NULL;
count = 0;
while (cplen > 0) {
res[count] = cp;
@@ -147,7 +149,7 @@ static void of_free_probes(const char * const *probes)
kfree(probes);
}
-static struct of_device_id of_flash_match[];
+static const struct of_device_id of_flash_match[];
static int of_flash_probe(struct platform_device *dev)
{
const char * const *part_probe_types;
@@ -288,7 +290,6 @@ static int of_flash_probe(struct platform_device *dev)
} else {
info->list_size++;
}
- info->list[i].mtd->owner = THIS_MODULE;
info->list[i].mtd->dev.parent = &dev->dev;
}
@@ -311,6 +312,10 @@ static int of_flash_probe(struct platform_device *dev)
ppdata.of_node = dp;
part_probe_types = of_get_probes(dp);
+ if (!part_probe_types) {
+ err = -ENOMEM;
+ goto err_out;
+ }
mtd_device_parse_register(info->cmtd, part_probe_types, &ppdata,
NULL, 0);
of_free_probes(part_probe_types);
@@ -327,7 +332,7 @@ err_flash_remove:
return err;
}
-static struct of_device_id of_flash_match[] = {
+static const struct of_device_id of_flash_match[] = {
{
.compatible = "cfi-flash",
.data = (void *)"cfi_probe",
diff --git a/kernel/drivers/mtd/maps/plat-ram.c b/kernel/drivers/mtd/maps/plat-ram.c
index 4b65c08d1..51572895c 100644
--- a/kernel/drivers/mtd/maps/plat-ram.c
+++ b/kernel/drivers/mtd/maps/plat-ram.c
@@ -210,7 +210,6 @@ static int platram_probe(struct platform_device *pdev)
goto exit_free;
}
- info->mtd->owner = THIS_MODULE;
info->mtd->dev.parent = &pdev->dev;
platram_setrw(info, PLATRAM_RW);
diff --git a/kernel/drivers/mtd/maps/pxa2xx-flash.c b/kernel/drivers/mtd/maps/pxa2xx-flash.c
index 12fa75df5..7497090e9 100644
--- a/kernel/drivers/mtd/maps/pxa2xx-flash.c
+++ b/kernel/drivers/mtd/maps/pxa2xx-flash.c
@@ -71,8 +71,8 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
info->map.name);
return -ENOMEM;
}
- info->map.cached =
- ioremap_cache(info->map.phys, info->map.size);
+ info->map.cached = memremap(info->map.phys, info->map.size,
+ MEMREMAP_WB);
if (!info->map.cached)
printk(KERN_WARNING "Failed to ioremap cached %s\n",
info->map.name);
@@ -93,7 +93,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
iounmap(info->map.cached);
return -EIO;
}
- info->mtd->owner = THIS_MODULE;
+ info->mtd->dev.parent = &pdev->dev;
mtd_device_parse_register(info->mtd, probes, NULL, flash->parts,
flash->nr_parts);
@@ -111,7 +111,7 @@ static int pxa2xx_flash_remove(struct platform_device *dev)
map_destroy(info->mtd);
iounmap(info->map.virt);
if (info->map.cached)
- iounmap(info->map.cached);
+ memunmap(info->map.cached);
kfree(info);
return 0;
}
diff --git a/kernel/drivers/mtd/maps/rbtx4939-flash.c b/kernel/drivers/mtd/maps/rbtx4939-flash.c
index 5a7551aa2..3a06ecfc5 100644
--- a/kernel/drivers/mtd/maps/rbtx4939-flash.c
+++ b/kernel/drivers/mtd/maps/rbtx4939-flash.c
@@ -96,7 +96,7 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
err = -ENXIO;
goto err_out;
}
- info->mtd->owner = THIS_MODULE;
+ info->mtd->dev.parent = &dev->dev;
err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts,
pdata->nr_parts);
diff --git a/kernel/drivers/mtd/maps/sa1100-flash.c b/kernel/drivers/mtd/maps/sa1100-flash.c
index 892ad6ac6..142fc3d79 100644
--- a/kernel/drivers/mtd/maps/sa1100-flash.c
+++ b/kernel/drivers/mtd/maps/sa1100-flash.c
@@ -117,7 +117,6 @@ static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *r
ret = -ENXIO;
goto err;
}
- subdev->mtd->owner = THIS_MODULE;
printk(KERN_INFO "SA1100 flash: CFI device at 0x%08lx, %uMiB, %d-bit\n",
phys, (unsigned)(subdev->mtd->size >> 20),
@@ -234,6 +233,7 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
if (info->mtd == NULL)
ret = -ENXIO;
}
+ info->mtd->dev.parent = &pdev->dev;
if (ret == 0)
return info;
diff --git a/kernel/drivers/mtd/mtd_blkdevs.c b/kernel/drivers/mtd/mtd_blkdevs.c
index df7c6c707..f4701182b 100644
--- a/kernel/drivers/mtd/mtd_blkdevs.c
+++ b/kernel/drivers/mtd/mtd_blkdevs.c
@@ -97,14 +97,13 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (req->cmd_flags & REQ_DISCARD)
return tr->discard(dev, block, nsect);
- switch(rq_data_dir(req)) {
- case READ:
+ if (rq_data_dir(req) == READ) {
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->readsect(dev, block, buf))
return -EIO;
rq_flush_dcache_pages(req);
return 0;
- case WRITE:
+ } else {
if (!tr->writesect)
return -EIO;
@@ -113,9 +112,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (tr->writesect(dev, block, buf))
return -EIO;
return 0;
- default:
- printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
- return -EIO;
}
}
@@ -196,8 +192,8 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
if (!dev)
return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
- mutex_lock(&dev->lock);
mutex_lock(&mtd_table_mutex);
+ mutex_lock(&dev->lock);
if (dev->open)
goto unlock;
@@ -221,8 +217,8 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
unlock:
dev->open++;
- mutex_unlock(&mtd_table_mutex);
mutex_unlock(&dev->lock);
+ mutex_unlock(&mtd_table_mutex);
blktrans_dev_put(dev);
return ret;
@@ -232,8 +228,8 @@ error_release:
error_put:
module_put(dev->tr->owner);
kref_put(&dev->ref, blktrans_dev_release);
- mutex_unlock(&mtd_table_mutex);
mutex_unlock(&dev->lock);
+ mutex_unlock(&mtd_table_mutex);
blktrans_dev_put(dev);
return ret;
}
@@ -245,8 +241,8 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode)
if (!dev)
return;
- mutex_lock(&dev->lock);
mutex_lock(&mtd_table_mutex);
+ mutex_lock(&dev->lock);
if (--dev->open)
goto unlock;
@@ -260,8 +256,8 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode)
__put_mtd_device(dev->mtd);
}
unlock:
- mutex_unlock(&mtd_table_mutex);
mutex_unlock(&dev->lock);
+ mutex_unlock(&mtd_table_mutex);
blktrans_dev_put(dev);
}
@@ -278,7 +274,7 @@ static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
if (!dev->mtd)
goto unlock;
- ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
+ ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY;
unlock:
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
@@ -403,7 +399,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%d", tr->name, new->devnum);
- set_capacity(gd, (new->size * tr->blksize) >> 9);
+ set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
/* Create the request queue */
spin_lock_init(&new->queue_lock);
@@ -423,7 +419,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (tr->discard) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
- new->rq->limits.max_discard_sectors = UINT_MAX;
+ blk_queue_max_discard_sectors(new->rq, UINT_MAX);
}
gd->queue = new->rq;
diff --git a/kernel/drivers/mtd/mtdchar.c b/kernel/drivers/mtd/mtdchar.c
index 55fa27ecf..6d19835b8 100644
--- a/kernel/drivers/mtd/mtdchar.c
+++ b/kernel/drivers/mtd/mtdchar.c
@@ -498,21 +498,17 @@ static int shrink_ecclayout(const struct nand_ecclayout *from,
}
static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
- struct blkpg_ioctl_arg __user *arg)
+ struct blkpg_ioctl_arg *arg)
{
- struct blkpg_ioctl_arg a;
struct blkpg_partition p;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
+ if (copy_from_user(&p, arg->data, sizeof(p)))
return -EFAULT;
- if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
- return -EFAULT;
-
- switch (a.op) {
+ switch (arg->op) {
case BLKPG_ADD_PARTITION:
/* Only master mtd device must be used to add partitions */
@@ -966,8 +962,13 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
case BLKPG:
{
- ret = mtdchar_blkpg_ioctl(mtd,
- (struct blkpg_ioctl_arg __user *)arg);
+ struct blkpg_ioctl_arg __user *blk_arg = argp;
+ struct blkpg_ioctl_arg a;
+
+ if (copy_from_user(&a, blk_arg, sizeof(a)))
+ ret = -EFAULT;
+ else
+ ret = mtdchar_blkpg_ioctl(mtd, &a);
break;
}
@@ -1046,6 +1047,29 @@ static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
&buf_user->start);
break;
}
+
+ case BLKPG:
+ {
+ /* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */
+ struct blkpg_compat_ioctl_arg __user *uarg = argp;
+ struct blkpg_compat_ioctl_arg compat_arg;
+ struct blkpg_ioctl_arg a;
+
+ if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ memset(&a, 0, sizeof(a));
+ a.op = compat_arg.op;
+ a.flags = compat_arg.flags;
+ a.datalen = compat_arg.datalen;
+ a.data = compat_ptr(compat_arg.data);
+
+ ret = mtdchar_blkpg_ioctl(mtd, &a);
+ break;
+ }
+
default:
ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
}
diff --git a/kernel/drivers/mtd/mtdcore.c b/kernel/drivers/mtd/mtdcore.c
index d172195fb..ffa288474 100644
--- a/kernel/drivers/mtd/mtdcore.c
+++ b/kernel/drivers/mtd/mtdcore.c
@@ -48,14 +48,34 @@
static struct backing_dev_info mtd_bdi = {
};
-static int mtd_cls_suspend(struct device *dev, pm_message_t state);
-static int mtd_cls_resume(struct device *dev);
+#ifdef CONFIG_PM_SLEEP
+
+static int mtd_cls_suspend(struct device *dev)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return mtd ? mtd_suspend(mtd) : 0;
+}
+
+static int mtd_cls_resume(struct device *dev)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ if (mtd)
+ mtd_resume(mtd);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
+#define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
+#else
+#define MTD_CLS_PM_OPS NULL
+#endif
static struct class mtd_class = {
.name = "mtd",
.owner = THIS_MODULE,
- .suspend = mtd_cls_suspend,
- .resume = mtd_cls_resume,
+ .pm = MTD_CLS_PM_OPS,
};
static DEFINE_IDR(mtd_idr);
@@ -88,22 +108,6 @@ static void mtd_release(struct device *dev)
device_destroy(&mtd_class, index + 1);
}
-static int mtd_cls_suspend(struct device *dev, pm_message_t state)
-{
- struct mtd_info *mtd = dev_get_drvdata(dev);
-
- return mtd ? mtd_suspend(mtd) : 0;
-}
-
-static int mtd_cls_resume(struct device *dev)
-{
- struct mtd_info *mtd = dev_get_drvdata(dev);
-
- if (mtd)
- mtd_resume(mtd);
- return 0;
-}
-
static ssize_t mtd_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -375,8 +379,7 @@ static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
*
* Add a device to the list of MTD devices present in the system, and
* notify each currently active MTD 'user' of its arrival. Returns
- * zero on success or 1 on failure, which currently will only happen
- * if there is insufficient memory or a sysfs error.
+ * zero on success or non-zero on failure.
*/
int add_mtd_device(struct mtd_info *mtd)
@@ -384,14 +387,24 @@ int add_mtd_device(struct mtd_info *mtd)
struct mtd_notifier *not;
int i, error;
+ /*
+ * May occur, for instance, on buggy drivers which call
+ * mtd_device_parse_register() multiple times on the same master MTD,
+ * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
+ */
+ if (WARN_ONCE(mtd->backing_dev_info, "MTD already registered\n"))
+ return -EEXIST;
+
mtd->backing_dev_info = &mtd_bdi;
BUG_ON(mtd->writesize == 0);
mutex_lock(&mtd_table_mutex);
i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
- if (i < 0)
+ if (i < 0) {
+ error = i;
goto fail_locked;
+ }
mtd->index = i;
mtd->usecount = 0;
@@ -420,17 +433,20 @@ int add_mtd_device(struct mtd_info *mtd)
printk(KERN_WARNING
"%s: unlock failed, writes may not work\n",
mtd->name);
+ /* Ignore unlock failures? */
+ error = 0;
}
/* Caller should have set dev.parent to match the
- * physical device.
+ * physical device, if appropriate.
*/
mtd->dev.type = &mtd_devtype;
mtd->dev.class = &mtd_class;
mtd->dev.devt = MTD_DEVT(i);
dev_set_name(&mtd->dev, "mtd%d", i);
dev_set_drvdata(&mtd->dev, mtd);
- if (device_register(&mtd->dev) != 0)
+ error = device_register(&mtd->dev);
+ if (error)
goto fail_added;
device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
@@ -454,7 +470,7 @@ fail_added:
idr_remove(&mtd_idr, i);
fail_locked:
mutex_unlock(&mtd_table_mutex);
- return 1;
+ return error;
}
/**
@@ -510,8 +526,8 @@ static int mtd_add_device_partitions(struct mtd_info *mtd,
if (nbparts == 0 || IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
ret = add_mtd_device(mtd);
- if (ret == 1)
- return -ENODEV;
+ if (ret)
+ return ret;
}
if (nbparts > 0) {
@@ -524,6 +540,21 @@ static int mtd_add_device_partitions(struct mtd_info *mtd,
return 0;
}
+/*
+ * Set a few defaults based on the parent devices, if not provided by the
+ * driver
+ */
+static void mtd_set_dev_defaults(struct mtd_info *mtd)
+{
+ if (mtd->dev.parent) {
+ if (!mtd->owner && mtd->dev.parent->driver)
+ mtd->owner = mtd->dev.parent->driver->owner;
+ if (!mtd->name)
+ mtd->name = dev_name(mtd->dev.parent);
+ } else {
+ pr_debug("mtd device won't show a device symlink in sysfs\n");
+ }
+}
/**
* mtd_device_parse_register - parse partitions and register an MTD device.
@@ -562,6 +593,8 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
int ret;
struct mtd_partition *real_parts = NULL;
+ mtd_set_dev_defaults(mtd);
+
ret = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
if (ret <= 0 && nr_parts && parts) {
real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
@@ -571,9 +604,17 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
else
ret = nr_parts;
}
+ /* Didn't come up with either parsed OR fallback partitions */
+ if (ret < 0) {
+ pr_info("mtd: failed to find partitions; one or more parsers reports errors (%d)\n",
+ ret);
+ /* Don't abort on errors; we can still use unpartitioned MTD */
+ ret = 0;
+ }
- if (ret >= 0)
- ret = mtd_add_device_partitions(mtd, real_parts, ret);
+ ret = mtd_add_device_partitions(mtd, real_parts, ret);
+ if (ret)
+ goto out;
/*
* FIXME: some drivers unfortunately call this function more than once.
@@ -583,11 +624,14 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
* does cause problems with parse_mtd_partitions() above (e.g.,
* cmdlineparts will register partitions more than once).
*/
+ WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
+ "MTD already registered\n");
if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
register_reboot_notifier(&mtd->reboot_notifier);
}
+out:
kfree(real_parts);
return ret;
}
@@ -1180,8 +1224,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
*/
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
{
- gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
- __GFP_NORETRY | __GFP_NO_KSWAPD;
+ gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
void *kbuf;
@@ -1293,6 +1336,7 @@ static void __exit cleanup_mtd(void)
remove_proc_entry("mtd", NULL);
class_unregister(&mtd_class);
bdi_destroy(&mtd_bdi);
+ idr_destroy(&mtd_idr);
}
module_init(init_mtd);
diff --git a/kernel/drivers/mtd/mtdpart.c b/kernel/drivers/mtd/mtdpart.c
index cafdb8855..f8ba153f6 100644
--- a/kernel/drivers/mtd/mtdpart.c
+++ b/kernel/drivers/mtd/mtdpart.c
@@ -664,8 +664,10 @@ int add_mtd_partitions(struct mtd_info *master,
for (i = 0; i < nbparts; i++) {
slave = allocate_partition(master, parts + i, i, cur_offset);
- if (IS_ERR(slave))
+ if (IS_ERR(slave)) {
+ del_mtd_partitions(master);
return PTR_ERR(slave);
+ }
mutex_lock(&mtd_partitions_mutex);
list_add(&slave->list, &mtd_partitions);
@@ -753,26 +755,37 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
struct mtd_part_parser_data *data)
{
struct mtd_part_parser *parser;
- int ret = 0;
+ int ret, err = 0;
if (!types)
types = default_mtd_part_types;
- for ( ; ret <= 0 && *types; types++) {
+ for ( ; *types; types++) {
+ pr_debug("%s: parsing partitions %s\n", master->name, *types);
parser = get_partition_parser(*types);
if (!parser && !request_module("%s", *types))
parser = get_partition_parser(*types);
+ pr_debug("%s: got parser %s\n", master->name,
+ parser ? parser->name : NULL);
if (!parser)
continue;
ret = (*parser->parse_fn)(master, pparts, data);
+ pr_debug("%s: parser %s: %i\n",
+ master->name, parser->name, ret);
put_partition_parser(parser);
if (ret > 0) {
printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
ret, parser->name, master->name);
- break;
+ return ret;
}
+ /*
+ * Stash the first error we see; only report it if no parser
+ * succeeds
+ */
+ if (ret < 0 && !err)
+ err = ret;
}
- return ret;
+ return err;
}
int mtd_is_partition(const struct mtd_info *mtd)
diff --git a/kernel/drivers/mtd/nand/Kconfig b/kernel/drivers/mtd/nand/Kconfig
index 5897d8d8f..289664089 100644
--- a/kernel/drivers/mtd/nand/Kconfig
+++ b/kernel/drivers/mtd/nand/Kconfig
@@ -42,23 +42,20 @@ config MTD_SM_COMMON
default n
config MTD_NAND_DENALI
- tristate "Support Denali NAND controller"
- depends on HAS_DMA
- help
- Enable support for the Denali NAND controller. This should be
- combined with either the PCI or platform drivers to provide device
- registration.
+ tristate
config MTD_NAND_DENALI_PCI
tristate "Support Denali NAND controller on Intel Moorestown"
- depends on PCI && MTD_NAND_DENALI
+ select MTD_NAND_DENALI
+ depends on HAS_DMA && PCI
help
Enable the driver for NAND flash on Intel Moorestown, using the
Denali NAND controller core.
config MTD_NAND_DENALI_DT
tristate "Support Denali NAND controller as a DT device"
- depends on HAVE_CLK && MTD_NAND_DENALI
+ select MTD_NAND_DENALI
+ depends on HAS_DMA && HAVE_CLK
help
Enable the driver for NAND flash on platforms using a Denali NAND
controller as a DT device.
@@ -76,7 +73,7 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
config MTD_NAND_GPIO
tristate "GPIO assisted NAND Flash driver"
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
This enables a NAND flash driver where control signals are
connected to GPIO pins, and commands and data are communicated
@@ -394,6 +391,14 @@ config MTD_NAND_GPMI_NAND
block, such as SD card. So pay attention to it when you enable
the GPMI.
+config MTD_NAND_BRCMNAND
+ tristate "Broadcom STB NAND controller"
+ depends on ARM || ARM64 || MIPS
+ help
+ Enables the Broadcom NAND controller driver. The controller was
+ originally designed for Set-Top Box but is used on various BCM7xxx,
+ BCM3xxx, BCM63xxx, iProc/Cygnus and more.
+
config MTD_NAND_BCM47XXNFLASH
tristate "Support for NAND flash on BCM4706 BCMA bus"
depends on BCMA_NFLASH
@@ -455,6 +460,17 @@ config MTD_NAND_MPC5121_NFC
This enables the driver for the NAND flash controller on the
MPC5121 SoC.
+config MTD_NAND_VF610_NFC
+ tristate "Support for Freescale NFC for VF610/MPC5125"
+ depends on (SOC_VF610 || COMPILE_TEST)
+ help
+ Enables support for NAND Flash Controller on some Freescale
+ processors like the VF610, MPC5125, MCF54418 or Kinetis K70.
+ The driver supports a maximum 2k page size. With 2k pages and
+ 64 bytes or more of OOB, hardware ECC with up to 32-bit error
+ correction is supported. Hardware ECC is only enabled through
+ device tree.
+
config MTD_NAND_MXC
tristate "MXC NAND support"
depends on ARCH_MXC
diff --git a/kernel/drivers/mtd/nand/Makefile b/kernel/drivers/mtd/nand/Makefile
index 582bbd05a..2c7f014b3 100644
--- a/kernel/drivers/mtd/nand/Makefile
+++ b/kernel/drivers/mtd/nand/Makefile
@@ -26,7 +26,8 @@ obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
-obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
+omap2_nand-objs := omap2.o
+obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o
obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
@@ -45,6 +46,7 @@ obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
+obj-$(CONFIG_MTD_NAND_VF610_NFC) += vf610_nfc.o
obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
@@ -52,5 +54,6 @@ obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
nand-objs := nand_base.o nand_bbt.o nand_timings.o
diff --git a/kernel/drivers/mtd/nand/atmel_nand.c b/kernel/drivers/mtd/nand/atmel_nand.c
index 46010bd89..583cdd9bb 100644
--- a/kernel/drivers/mtd/nand/atmel_nand.c
+++ b/kernel/drivers/mtd/nand/atmel_nand.c
@@ -954,7 +954,8 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
}
static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf, int oob_required)
+ struct nand_chip *chip, const uint8_t *buf, int oob_required,
+ int page)
{
struct atmel_nand_host *host = chip->priv;
uint32_t *eccpos = chip->ecc.layout->eccpos;
@@ -2005,7 +2006,8 @@ static int nfc_sram_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (likely(!raw))
/* Need to write ecc into oob */
- status = chip->ecc.write_page(mtd, chip, buf, oob_required);
+ status = chip->ecc.write_page(mtd, chip, buf, oob_required,
+ page);
if (status < 0)
return status;
@@ -2126,7 +2128,7 @@ static int atmel_nand_probe(struct platform_device *pdev)
nand_chip->priv = host; /* link the private data structures */
mtd->priv = nand_chip;
- mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
/* Set address of NAND IO lines */
nand_chip->IO_ADDR_R = host->io_base;
diff --git a/kernel/drivers/mtd/nand/au1550nd.c b/kernel/drivers/mtd/nand/au1550nd.c
index c0c3be180..08a130f63 100644
--- a/kernel/drivers/mtd/nand/au1550nd.c
+++ b/kernel/drivers/mtd/nand/au1550nd.c
@@ -439,7 +439,7 @@ static int au1550nd_probe(struct platform_device *pdev)
this = &ctx->chip;
ctx->info.priv = this;
- ctx->info.owner = THIS_MODULE;
+ ctx->info.dev.parent = &pdev->dev;
/* figure out which CS# r->start belongs to */
cs = find_nand_cs(r->start);
diff --git a/kernel/drivers/mtd/nand/bcm47xxnflash/main.c b/kernel/drivers/mtd/nand/bcm47xxnflash/main.c
index 461577cfb..9ba0c0f2c 100644
--- a/kernel/drivers/mtd/nand/bcm47xxnflash/main.c
+++ b/kernel/drivers/mtd/nand/bcm47xxnflash/main.c
@@ -34,7 +34,7 @@ static int bcm47xxnflash_probe(struct platform_device *pdev)
return -ENOMEM;
b47n->nand_chip.priv = b47n;
- b47n->mtd.owner = THIS_MODULE;
+ b47n->mtd.dev.parent = &pdev->dev;
b47n->mtd.priv = &b47n->nand_chip; /* Required */
b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash);
diff --git a/kernel/drivers/mtd/nand/bf5xx_nand.c b/kernel/drivers/mtd/nand/bf5xx_nand.c
index 4d8d4ba4b..61bd21607 100644
--- a/kernel/drivers/mtd/nand/bf5xx_nand.c
+++ b/kernel/drivers/mtd/nand/bf5xx_nand.c
@@ -566,7 +566,8 @@ static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip
}
static int bf5xx_nand_write_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf, int oob_required)
+ struct nand_chip *chip, const uint8_t *buf, int oob_required,
+ int page)
{
bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -782,7 +783,7 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
/* initialise mtd info data struct */
mtd = &info->mtd;
mtd->priv = chip;
- mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
/* initialise the hardware */
err = bf5xx_nand_hw_init(info);
diff --git a/kernel/drivers/mtd/nand/brcmnand/Makefile b/kernel/drivers/mtd/nand/brcmnand/Makefile
new file mode 100644
index 000000000..3b1fbfd27
--- /dev/null
+++ b/kernel/drivers/mtd/nand/brcmnand/Makefile
@@ -0,0 +1,6 @@
+# link order matters; don't link the more generic brcmstb_nand.o before the
+# more specific iproc_nand.o, for instance
+obj-$(CONFIG_MTD_NAND_BRCMNAND) += iproc_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND) += bcm63138_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmstb_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand.o
diff --git a/kernel/drivers/mtd/nand/brcmnand/bcm63138_nand.c b/kernel/drivers/mtd/nand/brcmnand/bcm63138_nand.c
new file mode 100644
index 000000000..59444b3a6
--- /dev/null
+++ b/kernel/drivers/mtd/nand/brcmnand/bcm63138_nand.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright © 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "brcmnand.h"
+
+struct bcm63138_nand_soc {
+ struct brcmnand_soc soc;
+ void __iomem *base;
+};
+
+#define BCM63138_NAND_INT_STATUS 0x00
+#define BCM63138_NAND_INT_EN 0x04
+
+enum {
+ BCM63138_CTLRDY = BIT(4),
+};
+
+static bool bcm63138_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct bcm63138_nand_soc *priv =
+ container_of(soc, struct bcm63138_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM63138_NAND_INT_STATUS;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & BCM63138_CTLRDY) {
+ brcmnand_writel(val & ~BCM63138_CTLRDY, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void bcm63138_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct bcm63138_nand_soc *priv =
+ container_of(soc, struct bcm63138_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM63138_NAND_INT_EN;
+ u32 val = brcmnand_readl(mmio);
+
+ if (en)
+ val |= BCM63138_CTLRDY;
+ else
+ val &= ~BCM63138_CTLRDY;
+
+ brcmnand_writel(val, mmio);
+}
+
+static int bcm63138_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm63138_nand_soc *priv;
+ struct brcmnand_soc *soc;
+ struct resource *res;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ soc = &priv->soc;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-int-base");
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ soc->ctlrdy_ack = bcm63138_nand_intc_ack;
+ soc->ctlrdy_set_enabled = bcm63138_nand_intc_set;
+
+ return brcmnand_probe(pdev, soc);
+}
+
+static const struct of_device_id bcm63138_nand_of_match[] = {
+ { .compatible = "brcm,nand-bcm63138" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm63138_nand_of_match);
+
+static struct platform_driver bcm63138_nand_driver = {
+ .probe = bcm63138_nand_probe,
+ .remove = brcmnand_remove,
+ .driver = {
+ .name = "bcm63138_nand",
+ .pm = &brcmnand_pm_ops,
+ .of_match_table = bcm63138_nand_of_match,
+ }
+};
+module_platform_driver(bcm63138_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for BCM63138");
diff --git a/kernel/drivers/mtd/nand/brcmnand/brcmnand.c b/kernel/drivers/mtd/nand/brcmnand/brcmnand.c
new file mode 100644
index 000000000..12c6190c6
--- /dev/null
+++ b/kernel/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -0,0 +1,2281 @@
+/*
+ * Copyright © 2010-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/ioport.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/of_mtd.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+
+#include "brcmnand.h"
+
+/*
+ * This flag controls if WP stays on between erase/write commands to mitigate
+ * flash corruption due to power glitches. Values:
+ * 0: NAND_WP is not used or not available
+ * 1: NAND_WP is set by default, cleared for erase/write operations
+ * 2: NAND_WP is always cleared
+ */
+static int wp_on = 1;
+module_param(wp_on, int, 0444);
+
+/***********************************************************************
+ * Definitions
+ ***********************************************************************/
+
+#define DRV_NAME "brcmnand"
+
+#define CMD_NULL 0x00
+#define CMD_PAGE_READ 0x01
+#define CMD_SPARE_AREA_READ 0x02
+#define CMD_STATUS_READ 0x03
+#define CMD_PROGRAM_PAGE 0x04
+#define CMD_PROGRAM_SPARE_AREA 0x05
+#define CMD_COPY_BACK 0x06
+#define CMD_DEVICE_ID_READ 0x07
+#define CMD_BLOCK_ERASE 0x08
+#define CMD_FLASH_RESET 0x09
+#define CMD_BLOCKS_LOCK 0x0a
+#define CMD_BLOCKS_LOCK_DOWN 0x0b
+#define CMD_BLOCKS_UNLOCK 0x0c
+#define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
+#define CMD_PARAMETER_READ 0x0e
+#define CMD_PARAMETER_CHANGE_COL 0x0f
+#define CMD_LOW_LEVEL_OP 0x10
+
+struct brcm_nand_dma_desc {
+ u32 next_desc;
+ u32 next_desc_ext;
+ u32 cmd_irq;
+ u32 dram_addr;
+ u32 dram_addr_ext;
+ u32 tfr_len;
+ u32 total_len;
+ u32 flash_addr;
+ u32 flash_addr_ext;
+ u32 cs;
+ u32 pad2[5];
+ u32 status_valid;
+} __packed;
+
+/* Bitfields for brcm_nand_dma_desc::status_valid */
+#define FLASH_DMA_ECC_ERROR (1 << 8)
+#define FLASH_DMA_CORR_ERROR (1 << 9)
+
+/* 512B flash cache in the NAND controller HW */
+#define FC_SHIFT 9U
+#define FC_BYTES 512U
+#define FC_WORDS (FC_BYTES >> 2)
+
+#define BRCMNAND_MIN_PAGESIZE 512
+#define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
+#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
+
+/* Controller feature flags */
+enum {
+ BRCMNAND_HAS_1K_SECTORS = BIT(0),
+ BRCMNAND_HAS_PREFETCH = BIT(1),
+ BRCMNAND_HAS_CACHE_MODE = BIT(2),
+ BRCMNAND_HAS_WP = BIT(3),
+};
+
+struct brcmnand_controller {
+ struct device *dev;
+ struct nand_hw_control controller;
+ void __iomem *nand_base;
+ void __iomem *nand_fc; /* flash cache */
+ void __iomem *flash_dma_base;
+ unsigned int irq;
+ unsigned int dma_irq;
+ int nand_version;
+
+ /* Some SoCs provide custom interrupt status register(s) */
+ struct brcmnand_soc *soc;
+
+ int cmd_pending;
+ bool dma_pending;
+ struct completion done;
+ struct completion dma_done;
+
+ /* List of NAND hosts (one for each chip-select) */
+ struct list_head host_list;
+
+ struct brcm_nand_dma_desc *dma_desc;
+ dma_addr_t dma_pa;
+
+ /* in-memory cache of the FLASH_CACHE, used only for some commands */
+ u32 flash_cache[FC_WORDS];
+
+ /* Controller revision details */
+ const u16 *reg_offsets;
+ unsigned int reg_spacing; /* between CS1, CS2, ... regs */
+ const u8 *cs_offsets; /* within each chip-select */
+ const u8 *cs0_offsets; /* within CS0, if different */
+ unsigned int max_block_size;
+ const unsigned int *block_sizes;
+ unsigned int max_page_size;
+ const unsigned int *page_sizes;
+ unsigned int max_oob;
+ u32 features;
+
+ /* for low-power standby/resume only */
+ u32 nand_cs_nand_select;
+ u32 nand_cs_nand_xor;
+ u32 corr_stat_threshold;
+ u32 flash_dma_mode;
+};
+
+struct brcmnand_cfg {
+ u64 device_size;
+ unsigned int block_size;
+ unsigned int page_size;
+ unsigned int spare_area_size;
+ unsigned int device_width;
+ unsigned int col_adr_bytes;
+ unsigned int blk_adr_bytes;
+ unsigned int ful_adr_bytes;
+ unsigned int sector_size_1k;
+ unsigned int ecc_level;
+ /* use for low-power standby/resume only */
+ u32 acc_control;
+ u32 config;
+ u32 config_ext;
+ u32 timing_1;
+ u32 timing_2;
+};
+
+struct brcmnand_host {
+ struct list_head node;
+ struct device_node *of_node;
+
+ struct nand_chip chip;
+ struct mtd_info mtd;
+ struct platform_device *pdev;
+ int cs;
+
+ unsigned int last_cmd;
+ unsigned int last_byte;
+ u64 last_addr;
+ struct brcmnand_cfg hwcfg;
+ struct brcmnand_controller *ctrl;
+};
+
+enum brcmnand_reg {
+ BRCMNAND_CMD_START = 0,
+ BRCMNAND_CMD_EXT_ADDRESS,
+ BRCMNAND_CMD_ADDRESS,
+ BRCMNAND_INTFC_STATUS,
+ BRCMNAND_CS_SELECT,
+ BRCMNAND_CS_XOR,
+ BRCMNAND_LL_OP,
+ BRCMNAND_CS0_BASE,
+ BRCMNAND_CS1_BASE, /* CS1 regs, if non-contiguous */
+ BRCMNAND_CORR_THRESHOLD,
+ BRCMNAND_CORR_THRESHOLD_EXT,
+ BRCMNAND_UNCORR_COUNT,
+ BRCMNAND_CORR_COUNT,
+ BRCMNAND_CORR_EXT_ADDR,
+ BRCMNAND_CORR_ADDR,
+ BRCMNAND_UNCORR_EXT_ADDR,
+ BRCMNAND_UNCORR_ADDR,
+ BRCMNAND_SEMAPHORE,
+ BRCMNAND_ID,
+ BRCMNAND_ID_EXT,
+ BRCMNAND_LL_RDATA,
+ BRCMNAND_OOB_READ_BASE,
+ BRCMNAND_OOB_READ_10_BASE, /* offset 0x10, if non-contiguous */
+ BRCMNAND_OOB_WRITE_BASE,
+ BRCMNAND_OOB_WRITE_10_BASE, /* offset 0x10, if non-contiguous */
+ BRCMNAND_FC_BASE,
+};
+
+/* BRCMNAND v4.0 */
+static const u16 brcmnand_regs_v40[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x6c,
+ [BRCMNAND_CS_SELECT] = 0x14,
+ [BRCMNAND_CS_XOR] = 0x18,
+ [BRCMNAND_LL_OP] = 0x178,
+ [BRCMNAND_CS0_BASE] = 0x40,
+ [BRCMNAND_CS1_BASE] = 0xd0,
+ [BRCMNAND_CORR_THRESHOLD] = 0x84,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
+ [BRCMNAND_UNCORR_COUNT] = 0,
+ [BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x70,
+ [BRCMNAND_CORR_ADDR] = 0x74,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
+ [BRCMNAND_UNCORR_ADDR] = 0x7c,
+ [BRCMNAND_SEMAPHORE] = 0x58,
+ [BRCMNAND_ID] = 0x60,
+ [BRCMNAND_ID_EXT] = 0x64,
+ [BRCMNAND_LL_RDATA] = 0x17c,
+ [BRCMNAND_OOB_READ_BASE] = 0x20,
+ [BRCMNAND_OOB_READ_10_BASE] = 0x130,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x30,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x200,
+};
+
+/* BRCMNAND v5.0 */
+static const u16 brcmnand_regs_v50[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x6c,
+ [BRCMNAND_CS_SELECT] = 0x14,
+ [BRCMNAND_CS_XOR] = 0x18,
+ [BRCMNAND_LL_OP] = 0x178,
+ [BRCMNAND_CS0_BASE] = 0x40,
+ [BRCMNAND_CS1_BASE] = 0xd0,
+ [BRCMNAND_CORR_THRESHOLD] = 0x84,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
+ [BRCMNAND_UNCORR_COUNT] = 0,
+ [BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x70,
+ [BRCMNAND_CORR_ADDR] = 0x74,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
+ [BRCMNAND_UNCORR_ADDR] = 0x7c,
+ [BRCMNAND_SEMAPHORE] = 0x58,
+ [BRCMNAND_ID] = 0x60,
+ [BRCMNAND_ID_EXT] = 0x64,
+ [BRCMNAND_LL_RDATA] = 0x17c,
+ [BRCMNAND_OOB_READ_BASE] = 0x20,
+ [BRCMNAND_OOB_READ_10_BASE] = 0x130,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x30,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0x140,
+ [BRCMNAND_FC_BASE] = 0x200,
+};
+
+/* BRCMNAND v6.0 - v7.1 */
+static const u16 brcmnand_regs_v60[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x14,
+ [BRCMNAND_CS_SELECT] = 0x18,
+ [BRCMNAND_CS_XOR] = 0x1c,
+ [BRCMNAND_LL_OP] = 0x20,
+ [BRCMNAND_CS0_BASE] = 0x50,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0xc0,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4,
+ [BRCMNAND_UNCORR_COUNT] = 0xfc,
+ [BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
+ [BRCMNAND_CORR_ADDR] = 0x110,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
+ [BRCMNAND_UNCORR_ADDR] = 0x118,
+ [BRCMNAND_SEMAPHORE] = 0x150,
+ [BRCMNAND_ID] = 0x194,
+ [BRCMNAND_ID_EXT] = 0x198,
+ [BRCMNAND_LL_RDATA] = 0x19c,
+ [BRCMNAND_OOB_READ_BASE] = 0x200,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x280,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x400,
+};
+
+enum brcmnand_cs_reg {
+ BRCMNAND_CS_CFG_EXT = 0,
+ BRCMNAND_CS_CFG,
+ BRCMNAND_CS_ACC_CONTROL,
+ BRCMNAND_CS_TIMING1,
+ BRCMNAND_CS_TIMING2,
+};
+
+/* Per chip-select offsets for v7.1 */
+static const u8 brcmnand_cs_offsets_v71[] = {
+ [BRCMNAND_CS_ACC_CONTROL] = 0x00,
+ [BRCMNAND_CS_CFG_EXT] = 0x04,
+ [BRCMNAND_CS_CFG] = 0x08,
+ [BRCMNAND_CS_TIMING1] = 0x0c,
+ [BRCMNAND_CS_TIMING2] = 0x10,
+};
+
+/* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
+static const u8 brcmnand_cs_offsets[] = {
+ [BRCMNAND_CS_ACC_CONTROL] = 0x00,
+ [BRCMNAND_CS_CFG_EXT] = 0x04,
+ [BRCMNAND_CS_CFG] = 0x04,
+ [BRCMNAND_CS_TIMING1] = 0x08,
+ [BRCMNAND_CS_TIMING2] = 0x0c,
+};
+
+/* Per chip-select offset for <= v5.0 on CS0 only */
+static const u8 brcmnand_cs_offsets_cs0[] = {
+ [BRCMNAND_CS_ACC_CONTROL] = 0x00,
+ [BRCMNAND_CS_CFG_EXT] = 0x08,
+ [BRCMNAND_CS_CFG] = 0x08,
+ [BRCMNAND_CS_TIMING1] = 0x10,
+ [BRCMNAND_CS_TIMING2] = 0x14,
+};
+
+/*
+ * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
+ * one config register, but once the bitfields overflowed, newer controllers
+ * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
+ */
+enum {
+ CFG_BLK_ADR_BYTES_SHIFT = 8,
+ CFG_COL_ADR_BYTES_SHIFT = 12,
+ CFG_FUL_ADR_BYTES_SHIFT = 16,
+ CFG_BUS_WIDTH_SHIFT = 23,
+ CFG_BUS_WIDTH = BIT(CFG_BUS_WIDTH_SHIFT),
+ CFG_DEVICE_SIZE_SHIFT = 24,
+
+ /* Only for pre-v7.1 (with no CFG_EXT register) */
+ CFG_PAGE_SIZE_SHIFT = 20,
+ CFG_BLK_SIZE_SHIFT = 28,
+
+ /* Only for v7.1+ (with CFG_EXT register) */
+ CFG_EXT_PAGE_SIZE_SHIFT = 0,
+ CFG_EXT_BLK_SIZE_SHIFT = 4,
+};
+
+/* BRCMNAND_INTFC_STATUS */
+enum {
+ INTFC_FLASH_STATUS = GENMASK(7, 0),
+
+ INTFC_ERASED = BIT(27),
+ INTFC_OOB_VALID = BIT(28),
+ INTFC_CACHE_VALID = BIT(29),
+ INTFC_FLASH_READY = BIT(30),
+ INTFC_CTLR_READY = BIT(31),
+};
+
+static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
+{
+ return brcmnand_readl(ctrl->nand_base + offs);
+}
+
+static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
+ u32 val)
+{
+ brcmnand_writel(val, ctrl->nand_base + offs);
+}
+
+static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
+{
+ static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
+ static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
+ static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 };
+
+ ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
+
+ /* Only support v4.0+? */
+ if (ctrl->nand_version < 0x0400) {
+ dev_err(ctrl->dev, "version %#x not supported\n",
+ ctrl->nand_version);
+ return -ENODEV;
+ }
+
+ /* Register offsets */
+ if (ctrl->nand_version >= 0x0600)
+ ctrl->reg_offsets = brcmnand_regs_v60;
+ else if (ctrl->nand_version >= 0x0500)
+ ctrl->reg_offsets = brcmnand_regs_v50;
+ else if (ctrl->nand_version >= 0x0400)
+ ctrl->reg_offsets = brcmnand_regs_v40;
+
+ /* Chip-select stride */
+ if (ctrl->nand_version >= 0x0701)
+ ctrl->reg_spacing = 0x14;
+ else
+ ctrl->reg_spacing = 0x10;
+
+ /* Per chip-select registers */
+ if (ctrl->nand_version >= 0x0701) {
+ ctrl->cs_offsets = brcmnand_cs_offsets_v71;
+ } else {
+ ctrl->cs_offsets = brcmnand_cs_offsets;
+
+ /* v5.0 and earlier has a different CS0 offset layout */
+ if (ctrl->nand_version <= 0x0500)
+ ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
+ }
+
+ /* Page / block sizes */
+ if (ctrl->nand_version >= 0x0701) {
+ /* >= v7.1 use nice power-of-2 values! */
+ ctrl->max_page_size = 16 * 1024;
+ ctrl->max_block_size = 2 * 1024 * 1024;
+ } else {
+ ctrl->page_sizes = page_sizes;
+ if (ctrl->nand_version >= 0x0600)
+ ctrl->block_sizes = block_sizes_v6;
+ else
+ ctrl->block_sizes = block_sizes_v4;
+
+ if (ctrl->nand_version < 0x0400) {
+ ctrl->max_page_size = 4096;
+ ctrl->max_block_size = 512 * 1024;
+ }
+ }
+
+ /* Maximum spare area sector size (per 512B) */
+ if (ctrl->nand_version >= 0x0600)
+ ctrl->max_oob = 64;
+ else if (ctrl->nand_version >= 0x0500)
+ ctrl->max_oob = 32;
+ else
+ ctrl->max_oob = 16;
+
+ /* v6.0 and newer (except v6.1) have prefetch support */
+ if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
+ ctrl->features |= BRCMNAND_HAS_PREFETCH;
+
+ /*
+ * v6.x has cache mode, but it's implemented differently. Ignore it for
+ * now.
+ */
+ if (ctrl->nand_version >= 0x0700)
+ ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
+
+ if (ctrl->nand_version >= 0x0500)
+ ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
+
+ if (ctrl->nand_version >= 0x0700)
+ ctrl->features |= BRCMNAND_HAS_WP;
+ else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
+ ctrl->features |= BRCMNAND_HAS_WP;
+
+ return 0;
+}
+
+static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
+ enum brcmnand_reg reg)
+{
+ u16 offs = ctrl->reg_offsets[reg];
+
+ if (offs)
+ return nand_readreg(ctrl, offs);
+ else
+ return 0;
+}
+
+static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
+ enum brcmnand_reg reg, u32 val)
+{
+ u16 offs = ctrl->reg_offsets[reg];
+
+ if (offs)
+ nand_writereg(ctrl, offs, val);
+}
+
+static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
+ enum brcmnand_reg reg, u32 mask, unsigned
+ int shift, u32 val)
+{
+ u32 tmp = brcmnand_read_reg(ctrl, reg);
+
+ tmp &= ~mask;
+ tmp |= val << shift;
+ brcmnand_write_reg(ctrl, reg, tmp);
+}
+
+static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
+{
+ return __raw_readl(ctrl->nand_fc + word * 4);
+}
+
+static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
+ int word, u32 val)
+{
+ __raw_writel(val, ctrl->nand_fc + word * 4);
+}
+
+static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
+ enum brcmnand_cs_reg reg)
+{
+ u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
+ u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
+ u8 cs_offs;
+
+ if (cs == 0 && ctrl->cs0_offsets)
+ cs_offs = ctrl->cs0_offsets[reg];
+ else
+ cs_offs = ctrl->cs_offsets[reg];
+
+ if (cs && offs_cs1)
+ return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
+
+ return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
+}
+
+static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->nand_version < 0x0600)
+ return 1;
+ return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
+}
+
+static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned int shift = 0, bits;
+ enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
+ int cs = host->cs;
+
+ if (ctrl->nand_version >= 0x0600)
+ bits = 6;
+ else if (ctrl->nand_version >= 0x0500)
+ bits = 5;
+ else
+ bits = 4;
+
+ if (ctrl->nand_version >= 0x0600) {
+ if (cs >= 5)
+ reg = BRCMNAND_CORR_THRESHOLD_EXT;
+ shift = (cs % 5) * bits;
+ }
+ brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
+}
+
+static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->nand_version < 0x0700)
+ return 24;
+ return 0;
+}
+
+/***********************************************************************
+ * NAND ACC CONTROL bitfield
+ *
+ * Some bits have remained constant throughout hardware revision, while
+ * others have shifted around.
+ ***********************************************************************/
+
+/* Constant for all versions (where supported) */
+enum {
+ /* See BRCMNAND_HAS_CACHE_MODE */
+ ACC_CONTROL_CACHE_MODE = BIT(22),
+
+ /* See BRCMNAND_HAS_PREFETCH */
+ ACC_CONTROL_PREFETCH = BIT(23),
+
+ ACC_CONTROL_PAGE_HIT = BIT(24),
+ ACC_CONTROL_WR_PREEMPT = BIT(25),
+ ACC_CONTROL_PARTIAL_PAGE = BIT(26),
+ ACC_CONTROL_RD_ERASED = BIT(27),
+ ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
+ ACC_CONTROL_WR_ECC = BIT(30),
+ ACC_CONTROL_RD_ECC = BIT(31),
+};
+
+static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->nand_version >= 0x0600)
+ return GENMASK(6, 0);
+ else
+ return GENMASK(5, 0);
+}
+
+#define NAND_ACC_CONTROL_ECC_SHIFT 16
+
+static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
+{
+ u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
+
+ return mask << NAND_ACC_CONTROL_ECC_SHIFT;
+}
+
+static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
+ u32 acc_control = nand_readreg(ctrl, offs);
+ u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC;
+
+ if (en) {
+ acc_control |= ecc_flags; /* enable RD/WR ECC */
+ acc_control |= host->hwcfg.ecc_level
+ << NAND_ACC_CONTROL_ECC_SHIFT;
+ } else {
+ acc_control &= ~ecc_flags; /* disable RD/WR ECC */
+ acc_control &= ~brcmnand_ecc_level_mask(ctrl);
+ }
+
+ nand_writereg(ctrl, offs, acc_control);
+}
+
+static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->nand_version >= 0x0600)
+ return 7;
+ else if (ctrl->nand_version >= 0x0500)
+ return 6;
+ else
+ return -1;
+}
+
+static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int shift = brcmnand_sector_1k_shift(ctrl);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+
+ if (shift < 0)
+ return 0;
+
+ return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
+}
+
+static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int shift = brcmnand_sector_1k_shift(ctrl);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u32 tmp;
+
+ if (shift < 0)
+ return;
+
+ tmp = nand_readreg(ctrl, acc_control_offs);
+ tmp &= ~(1 << shift);
+ tmp |= (!!val) << shift;
+ nand_writereg(ctrl, acc_control_offs, tmp);
+}
+
+/***********************************************************************
+ * CS_NAND_SELECT
+ ***********************************************************************/
+
+enum {
+ CS_SELECT_NAND_WP = BIT(29),
+ CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
+};
+
+static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
+{
+ u32 val = en ? CS_SELECT_NAND_WP : 0;
+
+ brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
+}
+
+/***********************************************************************
+ * Flash DMA
+ ***********************************************************************/
+
+enum flash_dma_reg {
+ FLASH_DMA_REVISION = 0x00,
+ FLASH_DMA_FIRST_DESC = 0x04,
+ FLASH_DMA_FIRST_DESC_EXT = 0x08,
+ FLASH_DMA_CTRL = 0x0c,
+ FLASH_DMA_MODE = 0x10,
+ FLASH_DMA_STATUS = 0x14,
+ FLASH_DMA_INTERRUPT_DESC = 0x18,
+ FLASH_DMA_INTERRUPT_DESC_EXT = 0x1c,
+ FLASH_DMA_ERROR_STATUS = 0x20,
+ FLASH_DMA_CURRENT_DESC = 0x24,
+ FLASH_DMA_CURRENT_DESC_EXT = 0x28,
+};
+
+static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
+{
+ return ctrl->flash_dma_base;
+}
+
+static inline bool flash_dma_buf_ok(const void *buf)
+{
+ return buf && !is_vmalloc_addr(buf) &&
+ likely(IS_ALIGNED((uintptr_t)buf, 4));
+}
+
+static inline void flash_dma_writel(struct brcmnand_controller *ctrl, u8 offs,
+ u32 val)
+{
+ brcmnand_writel(val, ctrl->flash_dma_base + offs);
+}
+
+static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, u8 offs)
+{
+ return brcmnand_readl(ctrl->flash_dma_base + offs);
+}
+
+/* Low-level operation types: command, address, write, or read */
+enum brcmnand_llop_type {
+ LL_OP_CMD,
+ LL_OP_ADDR,
+ LL_OP_WR,
+ LL_OP_RD,
+};
+
+/***********************************************************************
+ * Internal support functions
+ ***********************************************************************/
+
+static inline bool is_hamming_ecc(struct brcmnand_cfg *cfg)
+{
+ return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
+ cfg->ecc_level == 15;
+}
+
+/*
+ * Returns a nand_ecclayout strucutre for the given layout/configuration.
+ * Returns NULL on failure.
+ */
+static struct nand_ecclayout *brcmnand_create_layout(int ecc_level,
+ struct brcmnand_host *host)
+{
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int i, j;
+ struct nand_ecclayout *layout;
+ int req;
+ int sectors;
+ int sas;
+ int idx1, idx2;
+
+ layout = devm_kzalloc(&host->pdev->dev, sizeof(*layout), GFP_KERNEL);
+ if (!layout)
+ return NULL;
+
+ sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+ sas = cfg->spare_area_size << cfg->sector_size_1k;
+
+ /* Hamming */
+ if (is_hamming_ecc(cfg)) {
+ for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) {
+ /* First sector of each page may have BBI */
+ if (i == 0) {
+ layout->oobfree[idx2].offset = i * sas + 1;
+ /* Small-page NAND use byte 6 for BBI */
+ if (cfg->page_size == 512)
+ layout->oobfree[idx2].offset--;
+ layout->oobfree[idx2].length = 5;
+ } else {
+ layout->oobfree[idx2].offset = i * sas;
+ layout->oobfree[idx2].length = 6;
+ }
+ idx2++;
+ layout->eccpos[idx1++] = i * sas + 6;
+ layout->eccpos[idx1++] = i * sas + 7;
+ layout->eccpos[idx1++] = i * sas + 8;
+ layout->oobfree[idx2].offset = i * sas + 9;
+ layout->oobfree[idx2].length = 7;
+ idx2++;
+ /* Leave zero-terminated entry for OOBFREE */
+ if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE ||
+ idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
+ break;
+ }
+ goto out;
+ }
+
+ /*
+ * CONTROLLER_VERSION:
+ * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
+ * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
+ * But we will just be conservative.
+ */
+ req = DIV_ROUND_UP(ecc_level * 14, 8);
+ if (req >= sas) {
+ dev_err(&host->pdev->dev,
+ "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
+ req, sas);
+ return NULL;
+ }
+
+ layout->eccbytes = req * sectors;
+ for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) {
+ for (j = sas - req; j < sas && idx1 <
+ MTD_MAX_ECCPOS_ENTRIES_LARGE; j++, idx1++)
+ layout->eccpos[idx1] = i * sas + j;
+
+ /* First sector of each page may have BBI */
+ if (i == 0) {
+ if (cfg->page_size == 512 && (sas - req >= 6)) {
+ /* Small-page NAND use byte 6 for BBI */
+ layout->oobfree[idx2].offset = 0;
+ layout->oobfree[idx2].length = 5;
+ idx2++;
+ if (sas - req > 6) {
+ layout->oobfree[idx2].offset = 6;
+ layout->oobfree[idx2].length =
+ sas - req - 6;
+ idx2++;
+ }
+ } else if (sas > req + 1) {
+ layout->oobfree[idx2].offset = i * sas + 1;
+ layout->oobfree[idx2].length = sas - req - 1;
+ idx2++;
+ }
+ } else if (sas > req) {
+ layout->oobfree[idx2].offset = i * sas;
+ layout->oobfree[idx2].length = sas - req;
+ idx2++;
+ }
+ /* Leave zero-terminated entry for OOBFREE */
+ if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE ||
+ idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
+ break;
+ }
+out:
+ /* Sum available OOB */
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE; i++)
+ layout->oobavail += layout->oobfree[i].length;
+ return layout;
+}
+
+static struct nand_ecclayout *brcmstb_choose_ecc_layout(
+ struct brcmnand_host *host)
+{
+ struct nand_ecclayout *layout;
+ struct brcmnand_cfg *p = &host->hwcfg;
+ unsigned int ecc_level = p->ecc_level;
+
+ if (p->sector_size_1k)
+ ecc_level <<= 1;
+
+ layout = brcmnand_create_layout(ecc_level, host);
+ if (!layout) {
+ dev_err(&host->pdev->dev,
+ "no proper ecc_layout for this NAND cfg\n");
+ return NULL;
+ }
+
+ return layout;
+}
+
+static void brcmnand_wp(struct mtd_info *mtd, int wp)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct brcmnand_host *host = chip->priv;
+ struct brcmnand_controller *ctrl = host->ctrl;
+
+ if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
+ static int old_wp = -1;
+
+ if (old_wp != wp) {
+ dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
+ old_wp = wp;
+ }
+ brcmnand_set_wp(ctrl, wp);
+ }
+}
+
+/* Helper functions for reading and writing OOB registers */
+static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
+{
+ u16 offset0, offset10, reg_offs;
+
+ offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
+ offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
+
+ if (offs >= ctrl->max_oob)
+ return 0x77;
+
+ if (offs >= 16 && offset10)
+ reg_offs = offset10 + ((offs - 0x10) & ~0x03);
+ else
+ reg_offs = offset0 + (offs & ~0x03);
+
+ return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
+}
+
+static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
+ u32 data)
+{
+ u16 offset0, offset10, reg_offs;
+
+ offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
+ offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
+
+ if (offs >= ctrl->max_oob)
+ return;
+
+ if (offs >= 16 && offset10)
+ reg_offs = offset10 + ((offs - 0x10) & ~0x03);
+ else
+ reg_offs = offset0 + (offs & ~0x03);
+
+ nand_writereg(ctrl, reg_offs, data);
+}
+
+/*
+ * read_oob_from_regs - read data from OOB registers
+ * @ctrl: NAND controller
+ * @i: sub-page sector index
+ * @oob: buffer to read to
+ * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
+ * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
+ */
+static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
+ int sas, int sector_1k)
+{
+ int tbytes = sas << sector_1k;
+ int j;
+
+ /* Adjust OOB values for 1K sector size */
+ if (sector_1k && (i & 0x01))
+ tbytes = max(0, tbytes - (int)ctrl->max_oob);
+ tbytes = min_t(int, tbytes, ctrl->max_oob);
+
+ for (j = 0; j < tbytes; j++)
+ oob[j] = oob_reg_read(ctrl, j);
+ return tbytes;
+}
+
+/*
+ * write_oob_to_regs - write data to OOB registers
+ * @i: sub-page sector index
+ * @oob: buffer to write from
+ * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
+ * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
+ */
+static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
+ const u8 *oob, int sas, int sector_1k)
+{
+ int tbytes = sas << sector_1k;
+ int j;
+
+ /* Adjust OOB values for 1K sector size */
+ if (sector_1k && (i & 0x01))
+ tbytes = max(0, tbytes - (int)ctrl->max_oob);
+ tbytes = min_t(int, tbytes, ctrl->max_oob);
+
+ for (j = 0; j < tbytes; j += 4)
+ oob_reg_write(ctrl, j,
+ (oob[j + 0] << 24) |
+ (oob[j + 1] << 16) |
+ (oob[j + 2] << 8) |
+ (oob[j + 3] << 0));
+ return tbytes;
+}
+
+static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ /* Discard all NAND_CTLRDY interrupts during DMA */
+ if (ctrl->dma_pending)
+ return IRQ_HANDLED;
+
+ complete(&ctrl->done);
+ return IRQ_HANDLED;
+}
+
+/* Handle SoC-specific interrupt hardware */
+static irqreturn_t brcmnand_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ if (ctrl->soc->ctlrdy_ack(ctrl->soc))
+ return brcmnand_ctlrdy_irq(irq, data);
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t brcmnand_dma_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ complete(&ctrl->dma_done);
+
+ return IRQ_HANDLED;
+}
+
+static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u32 intfc;
+
+ dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
+ brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
+ BUG_ON(ctrl->cmd_pending != 0);
+ ctrl->cmd_pending = cmd;
+
+ intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
+ BUG_ON(!(intfc & INTFC_CTLR_READY));
+
+ mb(); /* flush previous writes */
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
+ cmd << brcmnand_cmd_shift(ctrl));
+}
+
+/***********************************************************************
+ * NAND MTD API: read/program/erase
+ ***********************************************************************/
+
+static void brcmnand_cmd_ctrl(struct mtd_info *mtd, int dat,
+ unsigned int ctrl)
+{
+ /* intentionally left blank */
+}
+
+static int brcmnand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct brcmnand_host *host = chip->priv;
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned long timeo = msecs_to_jiffies(100);
+
+ dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
+ if (ctrl->cmd_pending &&
+ wait_for_completion_timeout(&ctrl->done, timeo) <= 0) {
+ u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
+ >> brcmnand_cmd_shift(ctrl);
+
+ dev_err_ratelimited(ctrl->dev,
+ "timeout waiting for command %#02x\n", cmd);
+ dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
+ brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
+ }
+ ctrl->cmd_pending = 0;
+ return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+ INTFC_FLASH_STATUS;
+}
+
+enum {
+ LLOP_RE = BIT(16),
+ LLOP_WE = BIT(17),
+ LLOP_ALE = BIT(18),
+ LLOP_CLE = BIT(19),
+ LLOP_RETURN_IDLE = BIT(31),
+
+ LLOP_DATA_MASK = GENMASK(15, 0),
+};
+
+static int brcmnand_low_level_op(struct brcmnand_host *host,
+ enum brcmnand_llop_type type, u32 data,
+ bool last_op)
+{
+ struct mtd_info *mtd = &host->mtd;
+ struct nand_chip *chip = &host->chip;
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u32 tmp;
+
+ tmp = data & LLOP_DATA_MASK;
+ switch (type) {
+ case LL_OP_CMD:
+ tmp |= LLOP_WE | LLOP_CLE;
+ break;
+ case LL_OP_ADDR:
+ /* WE | ALE */
+ tmp |= LLOP_WE | LLOP_ALE;
+ break;
+ case LL_OP_WR:
+ /* WE */
+ tmp |= LLOP_WE;
+ break;
+ case LL_OP_RD:
+ /* RE */
+ tmp |= LLOP_RE;
+ break;
+ }
+ if (last_op)
+ /* RETURN_IDLE */
+ tmp |= LLOP_RETURN_IDLE;
+
+ dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
+
+ brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
+
+ brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP);
+ return brcmnand_waitfunc(mtd, chip);
+}
+
+static void brcmnand_cmdfunc(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct brcmnand_host *host = chip->priv;
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u64 addr = (u64)page_addr << chip->page_shift;
+ int native_cmd = 0;
+
+ if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
+ command == NAND_CMD_RNDOUT)
+ addr = (u64)column;
+ /* Avoid propagating a negative, don't-care address */
+ else if (page_addr < 0)
+ addr = 0;
+
+ dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
+ (unsigned long long)addr);
+
+ host->last_cmd = command;
+ host->last_byte = 0;
+ host->last_addr = addr;
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ native_cmd = CMD_FLASH_RESET;
+ break;
+ case NAND_CMD_STATUS:
+ native_cmd = CMD_STATUS_READ;
+ break;
+ case NAND_CMD_READID:
+ native_cmd = CMD_DEVICE_ID_READ;
+ break;
+ case NAND_CMD_READOOB:
+ native_cmd = CMD_SPARE_AREA_READ;
+ break;
+ case NAND_CMD_ERASE1:
+ native_cmd = CMD_BLOCK_ERASE;
+ brcmnand_wp(mtd, 0);
+ break;
+ case NAND_CMD_PARAM:
+ native_cmd = CMD_PARAMETER_READ;
+ break;
+ case NAND_CMD_SET_FEATURES:
+ case NAND_CMD_GET_FEATURES:
+ brcmnand_low_level_op(host, LL_OP_CMD, command, false);
+ brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
+ break;
+ case NAND_CMD_RNDOUT:
+ native_cmd = CMD_PARAMETER_CHANGE_COL;
+ addr &= ~((u64)(FC_BYTES - 1));
+ /*
+ * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
+ * NB: hwcfg.sector_size_1k may not be initialized yet
+ */
+ if (brcmnand_get_sector_size_1k(host)) {
+ host->hwcfg.sector_size_1k =
+ brcmnand_get_sector_size_1k(host);
+ brcmnand_set_sector_size_1k(host, 0);
+ }
+ break;
+ }
+
+ if (!native_cmd)
+ return;
+
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
+ (host->cs << 16) | ((addr >> 32) & 0xffff));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, lower_32_bits(addr));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+
+ brcmnand_send_cmd(host, native_cmd);
+ brcmnand_waitfunc(mtd, chip);
+
+ if (native_cmd == CMD_PARAMETER_READ ||
+ native_cmd == CMD_PARAMETER_CHANGE_COL) {
+ int i;
+
+ brcmnand_soc_data_bus_prepare(ctrl->soc);
+
+ /*
+ * Must cache the FLASH_CACHE now, since changes in
+ * SECTOR_SIZE_1K may invalidate it
+ */
+ for (i = 0; i < FC_WORDS; i++)
+ ctrl->flash_cache[i] = brcmnand_read_fc(ctrl, i);
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc);
+
+ /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
+ if (host->hwcfg.sector_size_1k)
+ brcmnand_set_sector_size_1k(host,
+ host->hwcfg.sector_size_1k);
+ }
+
+ /* Re-enable protection is necessary only after erase */
+ if (command == NAND_CMD_ERASE1)
+ brcmnand_wp(mtd, 1);
+}
+
+static uint8_t brcmnand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct brcmnand_host *host = chip->priv;
+ struct brcmnand_controller *ctrl = host->ctrl;
+ uint8_t ret = 0;
+ int addr, offs;
+
+ switch (host->last_cmd) {
+ case NAND_CMD_READID:
+ if (host->last_byte < 4)
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
+ (24 - (host->last_byte << 3));
+ else if (host->last_byte < 8)
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
+ (56 - (host->last_byte << 3));
+ break;
+
+ case NAND_CMD_READOOB:
+ ret = oob_reg_read(ctrl, host->last_byte);
+ break;
+
+ case NAND_CMD_STATUS:
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+ INTFC_FLASH_STATUS;
+ if (wp_on) /* hide WP status */
+ ret |= NAND_STATUS_WP;
+ break;
+
+ case NAND_CMD_PARAM:
+ case NAND_CMD_RNDOUT:
+ addr = host->last_addr + host->last_byte;
+ offs = addr & (FC_BYTES - 1);
+
+ /* At FC_BYTES boundary, switch to next column */
+ if (host->last_byte > 0 && offs == 0)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, addr, -1);
+
+ ret = ctrl->flash_cache[offs >> 2] >>
+ (24 - ((offs & 0x03) << 3));
+ break;
+ case NAND_CMD_GET_FEATURES:
+ if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
+ ret = 0;
+ } else {
+ bool last = host->last_byte ==
+ ONFI_SUBFEATURE_PARAM_LEN - 1;
+ brcmnand_low_level_op(host, LL_OP_RD, 0, last);
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
+ }
+ }
+
+ dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
+ host->last_byte++;
+
+ return ret;
+}
+
+static void brcmnand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++, buf++)
+ *buf = brcmnand_read_byte(mtd);
+}
+
+static void brcmnand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ int i;
+ struct nand_chip *chip = mtd->priv;
+ struct brcmnand_host *host = chip->priv;
+
+ switch (host->last_cmd) {
+ case NAND_CMD_SET_FEATURES:
+ for (i = 0; i < len; i++)
+ brcmnand_low_level_op(host, LL_OP_WR, buf[i],
+ (i + 1) == len);
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+/**
+ * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
+ * following ahead of time:
+ * - Is this descriptor the beginning or end of a linked list?
+ * - What is the (DMA) address of the next descriptor in the linked list?
+ */
+static int brcmnand_fill_dma_desc(struct brcmnand_host *host,
+ struct brcm_nand_dma_desc *desc, u64 addr,
+ dma_addr_t buf, u32 len, u8 dma_cmd,
+ bool begin, bool end,
+ dma_addr_t next_desc)
+{
+ memset(desc, 0, sizeof(*desc));
+ /* Descriptors are written in native byte order (wordwise) */
+ desc->next_desc = lower_32_bits(next_desc);
+ desc->next_desc_ext = upper_32_bits(next_desc);
+ desc->cmd_irq = (dma_cmd << 24) |
+ (end ? (0x03 << 8) : 0) | /* IRQ | STOP */
+ (!!begin) | ((!!end) << 1); /* head, tail */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ desc->cmd_irq |= 0x01 << 12;
+#endif
+ desc->dram_addr = lower_32_bits(buf);
+ desc->dram_addr_ext = upper_32_bits(buf);
+ desc->tfr_len = len;
+ desc->total_len = len;
+ desc->flash_addr = lower_32_bits(addr);
+ desc->flash_addr_ext = upper_32_bits(addr);
+ desc->cs = host->cs;
+ desc->status_valid = 0x01;
+ return 0;
+}
+
+/**
+ * Kick the FLASH_DMA engine, with a given DMA descriptor
+ */
+static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned long timeo = msecs_to_jiffies(100);
+
+ flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
+ (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
+ flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc));
+ (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
+
+ /* Start FLASH_DMA engine */
+ ctrl->dma_pending = true;
+ mb(); /* flush previous writes */
+ flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */
+
+ if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
+ dev_err(ctrl->dev,
+ "timeout waiting for DMA; status %#x, error status %#x\n",
+ flash_dma_readl(ctrl, FLASH_DMA_STATUS),
+ flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
+ }
+ ctrl->dma_pending = false;
+ flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */
+}
+
+static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
+ u32 len, u8 dma_cmd)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ dma_addr_t buf_pa;
+ int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
+ if (dma_mapping_error(ctrl->dev, buf_pa)) {
+ dev_err(ctrl->dev, "unable to map buffer for DMA\n");
+ return -ENOMEM;
+ }
+
+ brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
+ dma_cmd, true, true, 0);
+
+ brcmnand_dma_run(host, ctrl->dma_pa);
+
+ dma_unmap_single(ctrl->dev, buf_pa, len, dir);
+
+ if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
+ return -EBADMSG;
+ else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
+ return -EUCLEAN;
+
+ return 0;
+}
+
+/*
+ * Assumes proper CS is already set
+ */
+static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
+ u64 addr, unsigned int trans, u32 *buf,
+ u8 *oob, u64 *err_addr)
+{
+ struct brcmnand_host *host = chip->priv;
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int i, j, ret = 0;
+
+ /* Clear error addresses */
+ brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
+
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
+ (host->cs << 16) | ((addr >> 32) & 0xffff));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
+
+ for (i = 0; i < trans; i++, addr += FC_BYTES) {
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
+ lower_32_bits(addr));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+ /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
+ brcmnand_send_cmd(host, CMD_PAGE_READ);
+ brcmnand_waitfunc(mtd, chip);
+
+ if (likely(buf)) {
+ brcmnand_soc_data_bus_prepare(ctrl->soc);
+
+ for (j = 0; j < FC_WORDS; j++, buf++)
+ *buf = brcmnand_read_fc(ctrl, j);
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc);
+ }
+
+ if (oob)
+ oob += read_oob_from_regs(ctrl, i, oob,
+ mtd->oobsize / trans,
+ host->hwcfg.sector_size_1k);
+
+ if (!ret) {
+ *err_addr = brcmnand_read_reg(ctrl,
+ BRCMNAND_UNCORR_ADDR) |
+ ((u64)(brcmnand_read_reg(ctrl,
+ BRCMNAND_UNCORR_EXT_ADDR)
+ & 0xffff) << 32);
+ if (*err_addr)
+ ret = -EBADMSG;
+ }
+
+ if (!ret) {
+ *err_addr = brcmnand_read_reg(ctrl,
+ BRCMNAND_CORR_ADDR) |
+ ((u64)(brcmnand_read_reg(ctrl,
+ BRCMNAND_CORR_EXT_ADDR)
+ & 0xffff) << 32);
+ if (*err_addr)
+ ret = -EUCLEAN;
+ }
+ }
+
+ return ret;
+}
+
+static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
+ u64 addr, unsigned int trans, u32 *buf, u8 *oob)
+{
+ struct brcmnand_host *host = chip->priv;
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u64 err_addr = 0;
+ int err;
+
+ dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
+
+ brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0);
+
+ if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
+ err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
+ CMD_PAGE_READ);
+ if (err) {
+ if (mtd_is_bitflip_or_eccerr(err))
+ err_addr = addr;
+ else
+ return -EIO;
+ }
+ } else {
+ if (oob)
+ memset(oob, 0x99, mtd->oobsize);
+
+ err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
+ oob, &err_addr);
+ }
+
+ if (mtd_is_eccerr(err)) {
+ dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
+ (unsigned long long)err_addr);
+ mtd->ecc_stats.failed++;
+ /* NAND layer expects zero on ECC errors */
+ return 0;
+ }
+
+ if (mtd_is_bitflip(err)) {
+ unsigned int corrected = brcmnand_count_corrected(ctrl);
+
+ dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
+ (unsigned long long)err_addr);
+ mtd->ecc_stats.corrected += corrected;
+ /* Always exceed the software-imposed threshold */
+ return max(mtd->bitflip_threshold, corrected);
+ }
+
+ return 0;
+}
+
+static int brcmnand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct brcmnand_host *host = chip->priv;
+ u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+
+ return brcmnand_read(mtd, chip, host->last_addr,
+ mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+}
+
+static int brcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct brcmnand_host *host = chip->priv;
+ u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+ int ret;
+
+ brcmnand_set_ecc_enabled(host, 0);
+ ret = brcmnand_read(mtd, chip, host->last_addr,
+ mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+ brcmnand_set_ecc_enabled(host, 1);
+ return ret;
+}
+
+static int brcmnand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
+ mtd->writesize >> FC_SHIFT,
+ NULL, (u8 *)chip->oob_poi);
+}
+
+static int brcmnand_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct brcmnand_host *host = chip->priv;
+
+ brcmnand_set_ecc_enabled(host, 0);
+ brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
+ mtd->writesize >> FC_SHIFT,
+ NULL, (u8 *)chip->oob_poi);
+ brcmnand_set_ecc_enabled(host, 1);
+ return 0;
+}
+
+static int brcmnand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t data_offs, uint32_t readlen,
+ uint8_t *bufpoi, int page)
+{
+ struct brcmnand_host *host = chip->priv;
+
+ return brcmnand_read(mtd, chip, host->last_addr + data_offs,
+ readlen >> FC_SHIFT, (u32 *)bufpoi, NULL);
+}
+
+static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
+ u64 addr, const u32 *buf, u8 *oob)
+{
+ struct brcmnand_host *host = chip->priv;
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned int i, j, trans = mtd->writesize >> FC_SHIFT;
+ int status, ret = 0;
+
+ dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
+
+ if (unlikely((unsigned long)buf & 0x03)) {
+ dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
+ buf = (u32 *)((unsigned long)buf & ~0x03);
+ }
+
+ brcmnand_wp(mtd, 0);
+
+ for (i = 0; i < ctrl->max_oob; i += 4)
+ oob_reg_write(ctrl, i, 0xffffffff);
+
+ if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
+ if (brcmnand_dma_trans(host, addr, (u32 *)buf,
+ mtd->writesize, CMD_PROGRAM_PAGE))
+ ret = -EIO;
+ goto out;
+ }
+
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
+ (host->cs << 16) | ((addr >> 32) & 0xffff));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
+
+ for (i = 0; i < trans; i++, addr += FC_BYTES) {
+ /* full address MUST be set before populating FC */
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
+ lower_32_bits(addr));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+
+ if (buf) {
+ brcmnand_soc_data_bus_prepare(ctrl->soc);
+
+ for (j = 0; j < FC_WORDS; j++, buf++)
+ brcmnand_write_fc(ctrl, j, *buf);
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc);
+ } else if (oob) {
+ for (j = 0; j < FC_WORDS; j++)
+ brcmnand_write_fc(ctrl, j, 0xffffffff);
+ }
+
+ if (oob) {
+ oob += write_oob_to_regs(ctrl, i, oob,
+ mtd->oobsize / trans,
+ host->hwcfg.sector_size_1k);
+ }
+
+ /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
+ brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
+ status = brcmnand_waitfunc(mtd, chip);
+
+ if (status & NAND_STATUS_FAIL) {
+ dev_info(ctrl->dev, "program failed at %llx\n",
+ (unsigned long long)addr);
+ ret = -EIO;
+ goto out;
+ }
+ }
+out:
+ brcmnand_wp(mtd, 1);
+ return ret;
+}
+
+static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page)
+{
+ struct brcmnand_host *host = chip->priv;
+ void *oob = oob_required ? chip->oob_poi : NULL;
+
+ brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ return 0;
+}
+
+static int brcmnand_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct brcmnand_host *host = chip->priv;
+ void *oob = oob_required ? chip->oob_poi : NULL;
+
+ brcmnand_set_ecc_enabled(host, 0);
+ brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ brcmnand_set_ecc_enabled(host, 1);
+ return 0;
+}
+
+static int brcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ return brcmnand_write(mtd, chip, (u64)page << chip->page_shift,
+ NULL, chip->oob_poi);
+}
+
+static int brcmnand_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct brcmnand_host *host = chip->priv;
+ int ret;
+
+ brcmnand_set_ecc_enabled(host, 0);
+ ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL,
+ (u8 *)chip->oob_poi);
+ brcmnand_set_ecc_enabled(host, 1);
+
+ return ret;
+}
+
+/***********************************************************************
+ * Per-CS setup (1 NAND device)
+ ***********************************************************************/
+
+static int brcmnand_set_cfg(struct brcmnand_host *host,
+ struct brcmnand_cfg *cfg)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ struct nand_chip *chip = &host->chip;
+ u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
+ u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_CFG_EXT);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u8 block_size = 0, page_size = 0, device_size = 0;
+ u32 tmp;
+
+ if (ctrl->block_sizes) {
+ int i, found;
+
+ for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
+ if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
+ block_size = i;
+ found = 1;
+ }
+ if (!found) {
+ dev_warn(ctrl->dev, "invalid block size %u\n",
+ cfg->block_size);
+ return -EINVAL;
+ }
+ } else {
+ block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE);
+ }
+
+ if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
+ cfg->block_size > ctrl->max_block_size)) {
+ dev_warn(ctrl->dev, "invalid block size %u\n",
+ cfg->block_size);
+ block_size = 0;
+ }
+
+ if (ctrl->page_sizes) {
+ int i, found;
+
+ for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
+ if (ctrl->page_sizes[i] == cfg->page_size) {
+ page_size = i;
+ found = 1;
+ }
+ if (!found) {
+ dev_warn(ctrl->dev, "invalid page size %u\n",
+ cfg->page_size);
+ return -EINVAL;
+ }
+ } else {
+ page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE);
+ }
+
+ if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
+ cfg->page_size > ctrl->max_page_size)) {
+ dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
+ return -EINVAL;
+ }
+
+ if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) {
+ dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
+ (unsigned long long)cfg->device_size);
+ return -EINVAL;
+ }
+ device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE);
+
+ tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) |
+ (cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) |
+ (cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) |
+ (!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
+ (device_size << CFG_DEVICE_SIZE_SHIFT);
+ if (cfg_offs == cfg_ext_offs) {
+ tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) |
+ (block_size << CFG_BLK_SIZE_SHIFT);
+ nand_writereg(ctrl, cfg_offs, tmp);
+ } else {
+ nand_writereg(ctrl, cfg_offs, tmp);
+ tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) |
+ (block_size << CFG_EXT_BLK_SIZE_SHIFT);
+ nand_writereg(ctrl, cfg_ext_offs, tmp);
+ }
+
+ tmp = nand_readreg(ctrl, acc_control_offs);
+ tmp &= ~brcmnand_ecc_level_mask(ctrl);
+ tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
+ tmp &= ~brcmnand_spare_area_mask(ctrl);
+ tmp |= cfg->spare_area_size;
+ nand_writereg(ctrl, acc_control_offs, tmp);
+
+ brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
+
+ /* threshold = ceil(BCH-level * 0.75) */
+ brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4));
+
+ return 0;
+}
+
+static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg)
+{
+ buf += sprintf(buf,
+ "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
+ (unsigned long long)cfg->device_size >> 20,
+ cfg->block_size >> 10,
+ cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size,
+ cfg->page_size >= 1024 ? "KiB" : "B",
+ cfg->spare_area_size, cfg->device_width);
+
+ /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
+ if (is_hamming_ecc(cfg))
+ sprintf(buf, ", Hamming ECC");
+ else if (cfg->sector_size_1k)
+ sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
+ else
+ sprintf(buf, ", BCH-%u", cfg->ecc_level);
+}
+
+/*
+ * Minimum number of bytes to address a page. Calculated as:
+ * roundup(log2(size / page-size) / 8)
+ *
+ * NB: the following does not "round up" for non-power-of-2 'size'; but this is
+ * OK because many other things will break if 'size' is irregular...
+ */
+static inline int get_blk_adr_bytes(u64 size, u32 writesize)
+{
+ return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3;
+}
+
+static int brcmnand_setup_dev(struct brcmnand_host *host)
+{
+ struct mtd_info *mtd = &host->mtd;
+ struct nand_chip *chip = &host->chip;
+ struct brcmnand_controller *ctrl = host->ctrl;
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ char msg[128];
+ u32 offs, tmp, oob_sector;
+ int ret;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ ret = of_property_read_u32(chip->flash_node,
+ "brcm,nand-oob-sector-size",
+ &oob_sector);
+ if (ret) {
+ /* Use detected size */
+ cfg->spare_area_size = mtd->oobsize /
+ (mtd->writesize >> FC_SHIFT);
+ } else {
+ cfg->spare_area_size = oob_sector;
+ }
+ if (cfg->spare_area_size > ctrl->max_oob)
+ cfg->spare_area_size = ctrl->max_oob;
+ /*
+ * Set oobsize to be consistent with controller's spare_area_size, as
+ * the rest is inaccessible.
+ */
+ mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
+
+ cfg->device_size = mtd->size;
+ cfg->block_size = mtd->erasesize;
+ cfg->page_size = mtd->writesize;
+ cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8;
+ cfg->col_adr_bytes = 2;
+ cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
+
+ switch (chip->ecc.size) {
+ case 512:
+ if (chip->ecc.strength == 1) /* Hamming */
+ cfg->ecc_level = 15;
+ else
+ cfg->ecc_level = chip->ecc.strength;
+ cfg->sector_size_1k = 0;
+ break;
+ case 1024:
+ if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
+ dev_err(ctrl->dev, "1KB sectors not supported\n");
+ return -EINVAL;
+ }
+ if (chip->ecc.strength & 0x1) {
+ dev_err(ctrl->dev,
+ "odd ECC not supported with 1KB sectors\n");
+ return -EINVAL;
+ }
+
+ cfg->ecc_level = chip->ecc.strength >> 1;
+ cfg->sector_size_1k = 1;
+ break;
+ default:
+ dev_err(ctrl->dev, "unsupported ECC size: %d\n",
+ chip->ecc.size);
+ return -EINVAL;
+ }
+
+ cfg->ful_adr_bytes = cfg->blk_adr_bytes;
+ if (mtd->writesize > 512)
+ cfg->ful_adr_bytes += cfg->col_adr_bytes;
+ else
+ cfg->ful_adr_bytes += 1;
+
+ ret = brcmnand_set_cfg(host, cfg);
+ if (ret)
+ return ret;
+
+ brcmnand_set_ecc_enabled(host, 1);
+
+ brcmnand_print_cfg(msg, cfg);
+ dev_info(ctrl->dev, "detected %s\n", msg);
+
+ /* Configure ACC_CONTROL */
+ offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
+ tmp = nand_readreg(ctrl, offs);
+ tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
+ tmp &= ~ACC_CONTROL_RD_ERASED;
+ tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
+ if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
+ /*
+ * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
+ * errors
+ */
+ if (has_flash_dma(ctrl))
+ tmp &= ~ACC_CONTROL_PREFETCH;
+ else
+ tmp |= ACC_CONTROL_PREFETCH;
+ }
+ nand_writereg(ctrl, offs, tmp);
+
+ return 0;
+}
+
+static int brcmnand_init_cs(struct brcmnand_host *host)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ struct device_node *dn = host->of_node;
+ struct platform_device *pdev = host->pdev;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int ret;
+ u16 cfg_offs;
+ struct mtd_part_parser_data ppdata = { .of_node = dn };
+
+ ret = of_property_read_u32(dn, "reg", &host->cs);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get chip-select\n");
+ return -ENXIO;
+ }
+
+ mtd = &host->mtd;
+ chip = &host->chip;
+
+ chip->flash_node = dn;
+ chip->priv = host;
+ mtd->priv = chip;
+ mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
+ host->cs);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
+
+ chip->IO_ADDR_R = (void __iomem *)0xdeadbeef;
+ chip->IO_ADDR_W = (void __iomem *)0xdeadbeef;
+
+ chip->cmd_ctrl = brcmnand_cmd_ctrl;
+ chip->cmdfunc = brcmnand_cmdfunc;
+ chip->waitfunc = brcmnand_waitfunc;
+ chip->read_byte = brcmnand_read_byte;
+ chip->read_buf = brcmnand_read_buf;
+ chip->write_buf = brcmnand_write_buf;
+
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.read_page = brcmnand_read_page;
+ chip->ecc.read_subpage = brcmnand_read_subpage;
+ chip->ecc.write_page = brcmnand_write_page;
+ chip->ecc.read_page_raw = brcmnand_read_page_raw;
+ chip->ecc.write_page_raw = brcmnand_write_page_raw;
+ chip->ecc.write_oob_raw = brcmnand_write_oob_raw;
+ chip->ecc.read_oob_raw = brcmnand_read_oob_raw;
+ chip->ecc.read_oob = brcmnand_read_oob;
+ chip->ecc.write_oob = brcmnand_write_oob;
+
+ chip->controller = &ctrl->controller;
+
+ /*
+ * The bootloader might have configured 16bit mode but
+ * NAND READID command only works in 8bit mode. We force
+ * 8bit mode here to ensure that NAND READID commands works.
+ */
+ cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
+ nand_writereg(ctrl, cfg_offs,
+ nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
+
+ if (nand_scan_ident(mtd, 1, NULL))
+ return -ENXIO;
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ /*
+ * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
+ * to/from, and have nand_base pass us a bounce buffer instead, as
+ * needed.
+ */
+ chip->options |= NAND_USE_BOUNCE_BUFFER;
+
+ if (of_get_nand_on_flash_bbt(dn))
+ chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+
+ if (brcmnand_setup_dev(host))
+ return -ENXIO;
+
+ chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
+ /* only use our internal HW threshold */
+ mtd->bitflip_threshold = 1;
+
+ chip->ecc.layout = brcmstb_choose_ecc_layout(host);
+ if (!chip->ecc.layout)
+ return -ENXIO;
+
+ if (nand_scan_tail(mtd))
+ return -ENXIO;
+
+ return mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
+}
+
+static void brcmnand_save_restore_cs_config(struct brcmnand_host *host,
+ int restore)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
+ u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_CFG_EXT);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
+ u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
+
+ if (restore) {
+ nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
+ if (cfg_offs != cfg_ext_offs)
+ nand_writereg(ctrl, cfg_ext_offs,
+ host->hwcfg.config_ext);
+ nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
+ nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
+ nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
+ } else {
+ host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
+ if (cfg_offs != cfg_ext_offs)
+ host->hwcfg.config_ext =
+ nand_readreg(ctrl, cfg_ext_offs);
+ host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
+ host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
+ host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
+ }
+}
+
+static int brcmnand_suspend(struct device *dev)
+{
+ struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
+ struct brcmnand_host *host;
+
+ list_for_each_entry(host, &ctrl->host_list, node)
+ brcmnand_save_restore_cs_config(host, 0);
+
+ ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
+ ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
+ ctrl->corr_stat_threshold =
+ brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
+
+ if (has_flash_dma(ctrl))
+ ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
+
+ return 0;
+}
+
+static int brcmnand_resume(struct device *dev)
+{
+ struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
+ struct brcmnand_host *host;
+
+ if (has_flash_dma(ctrl)) {
+ flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
+ flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
+ }
+
+ brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
+ brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
+ brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
+ ctrl->corr_stat_threshold);
+ if (ctrl->soc) {
+ /* Clear/re-enable interrupt */
+ ctrl->soc->ctlrdy_ack(ctrl->soc);
+ ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
+ }
+
+ list_for_each_entry(host, &ctrl->host_list, node) {
+ struct mtd_info *mtd = &host->mtd;
+ struct nand_chip *chip = mtd->priv;
+
+ brcmnand_save_restore_cs_config(host, 1);
+
+ /* Reset the chip, required by some chips after power-up */
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ }
+
+ return 0;
+}
+
+const struct dev_pm_ops brcmnand_pm_ops = {
+ .suspend = brcmnand_suspend,
+ .resume = brcmnand_resume,
+};
+EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
+
+static const struct of_device_id brcmnand_of_match[] = {
+ { .compatible = "brcm,brcmnand-v4.0" },
+ { .compatible = "brcm,brcmnand-v5.0" },
+ { .compatible = "brcm,brcmnand-v6.0" },
+ { .compatible = "brcm,brcmnand-v6.1" },
+ { .compatible = "brcm,brcmnand-v7.0" },
+ { .compatible = "brcm,brcmnand-v7.1" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcmnand_of_match);
+
+/***********************************************************************
+ * Platform driver setup (per controller)
+ ***********************************************************************/
+
+int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node, *child;
+ struct brcmnand_controller *ctrl;
+ struct resource *res;
+ int ret;
+
+ /* We only support device-tree instantiation */
+ if (!dn)
+ return -ENODEV;
+
+ if (!of_match_node(brcmnand_of_match, dn))
+ return -ENODEV;
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, ctrl);
+ ctrl->dev = dev;
+
+ init_completion(&ctrl->done);
+ init_completion(&ctrl->dma_done);
+ spin_lock_init(&ctrl->controller.lock);
+ init_waitqueue_head(&ctrl->controller.wq);
+ INIT_LIST_HEAD(&ctrl->host_list);
+
+ /* NAND register range */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctrl->nand_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->nand_base))
+ return PTR_ERR(ctrl->nand_base);
+
+ /* Initialize NAND revision */
+ ret = brcmnand_revision_init(ctrl);
+ if (ret)
+ return ret;
+
+ /*
+ * Most chips have this cache at a fixed offset within 'nand' block.
+ * Some must specify this region separately.
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache");
+ if (res) {
+ ctrl->nand_fc = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->nand_fc))
+ return PTR_ERR(ctrl->nand_fc);
+ } else {
+ ctrl->nand_fc = ctrl->nand_base +
+ ctrl->reg_offsets[BRCMNAND_FC_BASE];
+ }
+
+ /* FLASH_DMA */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma");
+ if (res) {
+ ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->flash_dma_base))
+ return PTR_ERR(ctrl->flash_dma_base);
+
+ flash_dma_writel(ctrl, FLASH_DMA_MODE, 1); /* linked-list */
+ flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
+
+ /* Allocate descriptor(s) */
+ ctrl->dma_desc = dmam_alloc_coherent(dev,
+ sizeof(*ctrl->dma_desc),
+ &ctrl->dma_pa, GFP_KERNEL);
+ if (!ctrl->dma_desc)
+ return -ENOMEM;
+
+ ctrl->dma_irq = platform_get_irq(pdev, 1);
+ if ((int)ctrl->dma_irq < 0) {
+ dev_err(dev, "missing FLASH_DMA IRQ\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(dev, ctrl->dma_irq,
+ brcmnand_dma_irq, 0, DRV_NAME,
+ ctrl);
+ if (ret < 0) {
+ dev_err(dev, "can't allocate IRQ %d: error %d\n",
+ ctrl->dma_irq, ret);
+ return ret;
+ }
+
+ dev_info(dev, "enabling FLASH_DMA\n");
+ }
+
+ /* Disable automatic device ID config, direct addressing */
+ brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
+ CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0);
+ /* Disable XOR addressing */
+ brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
+
+ if (ctrl->features & BRCMNAND_HAS_WP) {
+ /* Permanently disable write protection */
+ if (wp_on == 2)
+ brcmnand_set_wp(ctrl, false);
+ } else {
+ wp_on = 0;
+ }
+
+ /* IRQ */
+ ctrl->irq = platform_get_irq(pdev, 0);
+ if ((int)ctrl->irq < 0) {
+ dev_err(dev, "no IRQ defined\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Some SoCs integrate this controller (e.g., its interrupt bits) in
+ * interesting ways
+ */
+ if (soc) {
+ ctrl->soc = soc;
+
+ ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
+ DRV_NAME, ctrl);
+
+ /* Enable interrupt */
+ ctrl->soc->ctlrdy_ack(ctrl->soc);
+ ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
+ } else {
+ /* Use standard interrupt infrastructure */
+ ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
+ DRV_NAME, ctrl);
+ }
+ if (ret < 0) {
+ dev_err(dev, "can't allocate IRQ %d: error %d\n",
+ ctrl->irq, ret);
+ return ret;
+ }
+
+ for_each_available_child_of_node(dn, child) {
+ if (of_device_is_compatible(child, "brcm,nandcs")) {
+ struct brcmnand_host *host;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+ host->pdev = pdev;
+ host->ctrl = ctrl;
+ host->of_node = child;
+
+ ret = brcmnand_init_cs(host);
+ if (ret)
+ continue; /* Try all chip-selects */
+
+ list_add_tail(&host->node, &ctrl->host_list);
+ }
+ }
+
+ /* No chip-selects could initialize properly */
+ if (list_empty(&ctrl->host_list))
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(brcmnand_probe);
+
+int brcmnand_remove(struct platform_device *pdev)
+{
+ struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
+ struct brcmnand_host *host;
+
+ list_for_each_entry(host, &ctrl->host_list, node)
+ nand_release(&host->mtd);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(brcmnand_remove);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Kevin Cernekee");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for Broadcom chips");
+MODULE_ALIAS("platform:brcmnand");
diff --git a/kernel/drivers/mtd/nand/brcmnand/brcmnand.h b/kernel/drivers/mtd/nand/brcmnand/brcmnand.h
new file mode 100644
index 000000000..ef5eabba8
--- /dev/null
+++ b/kernel/drivers/mtd/nand/brcmnand/brcmnand.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright © 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __BRCMNAND_H__
+#define __BRCMNAND_H__
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+struct platform_device;
+struct dev_pm_ops;
+
+struct brcmnand_soc {
+ bool (*ctlrdy_ack)(struct brcmnand_soc *soc);
+ void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en);
+ void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare);
+};
+
+static inline void brcmnand_soc_data_bus_prepare(struct brcmnand_soc *soc)
+{
+ if (soc && soc->prepare_data_bus)
+ soc->prepare_data_bus(soc, true);
+}
+
+static inline void brcmnand_soc_data_bus_unprepare(struct brcmnand_soc *soc)
+{
+ if (soc && soc->prepare_data_bus)
+ soc->prepare_data_bus(soc, false);
+}
+
+static inline u32 brcmnand_readl(void __iomem *addr)
+{
+ /*
+ * MIPS endianness is configured by boot strap, which also reverses all
+ * bus endianness (i.e., big-endian CPU + big endian bus ==> native
+ * endian I/O).
+ *
+ * Other architectures (e.g., ARM) either do not support big endian, or
+ * else leave I/O in little endian mode.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return __raw_readl(addr);
+ else
+ return readl_relaxed(addr);
+}
+
+static inline void brcmnand_writel(u32 val, void __iomem *addr)
+{
+ /* See brcmnand_readl() comments */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ __raw_writel(val, addr);
+ else
+ writel_relaxed(val, addr);
+}
+
+int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc);
+int brcmnand_remove(struct platform_device *pdev);
+
+extern const struct dev_pm_ops brcmnand_pm_ops;
+
+#endif /* __BRCMNAND_H__ */
diff --git a/kernel/drivers/mtd/nand/brcmnand/brcmstb_nand.c b/kernel/drivers/mtd/nand/brcmnand/brcmstb_nand.c
new file mode 100644
index 000000000..5c271077a
--- /dev/null
+++ b/kernel/drivers/mtd/nand/brcmnand/brcmstb_nand.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright © 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "brcmnand.h"
+
+static const struct of_device_id brcmstb_nand_of_match[] = {
+ { .compatible = "brcm,brcmnand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcmstb_nand_of_match);
+
+static int brcmstb_nand_probe(struct platform_device *pdev)
+{
+ return brcmnand_probe(pdev, NULL);
+}
+
+static struct platform_driver brcmstb_nand_driver = {
+ .probe = brcmstb_nand_probe,
+ .remove = brcmnand_remove,
+ .driver = {
+ .name = "brcmstb_nand",
+ .pm = &brcmnand_pm_ops,
+ .of_match_table = brcmstb_nand_of_match,
+ }
+};
+module_platform_driver(brcmstb_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for Broadcom STB chips");
diff --git a/kernel/drivers/mtd/nand/brcmnand/iproc_nand.c b/kernel/drivers/mtd/nand/brcmnand/iproc_nand.c
new file mode 100644
index 000000000..585596c54
--- /dev/null
+++ b/kernel/drivers/mtd/nand/brcmnand/iproc_nand.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright © 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "brcmnand.h"
+
+struct iproc_nand_soc {
+ struct brcmnand_soc soc;
+
+ void __iomem *idm_base;
+ void __iomem *ext_base;
+ spinlock_t idm_lock;
+};
+
+#define IPROC_NAND_CTLR_READY_OFFSET 0x10
+#define IPROC_NAND_CTLR_READY BIT(0)
+
+#define IPROC_NAND_IO_CTRL_OFFSET 0x00
+#define IPROC_NAND_APB_LE_MODE BIT(24)
+#define IPROC_NAND_INT_CTRL_READ_ENABLE BIT(6)
+
+static bool iproc_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct iproc_nand_soc *priv =
+ container_of(soc, struct iproc_nand_soc, soc);
+ void __iomem *mmio = priv->ext_base + IPROC_NAND_CTLR_READY_OFFSET;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & IPROC_NAND_CTLR_READY) {
+ brcmnand_writel(IPROC_NAND_CTLR_READY, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void iproc_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct iproc_nand_soc *priv =
+ container_of(soc, struct iproc_nand_soc, soc);
+ void __iomem *mmio = priv->idm_base + IPROC_NAND_IO_CTRL_OFFSET;
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->idm_lock, flags);
+
+ val = brcmnand_readl(mmio);
+
+ if (en)
+ val |= IPROC_NAND_INT_CTRL_READ_ENABLE;
+ else
+ val &= ~IPROC_NAND_INT_CTRL_READ_ENABLE;
+
+ brcmnand_writel(val, mmio);
+
+ spin_unlock_irqrestore(&priv->idm_lock, flags);
+}
+
+static void iproc_nand_apb_access(struct brcmnand_soc *soc, bool prepare)
+{
+ struct iproc_nand_soc *priv =
+ container_of(soc, struct iproc_nand_soc, soc);
+ void __iomem *mmio = priv->idm_base + IPROC_NAND_IO_CTRL_OFFSET;
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->idm_lock, flags);
+
+ val = brcmnand_readl(mmio);
+
+ if (prepare)
+ val |= IPROC_NAND_APB_LE_MODE;
+ else
+ val &= ~IPROC_NAND_APB_LE_MODE;
+
+ brcmnand_writel(val, mmio);
+
+ spin_unlock_irqrestore(&priv->idm_lock, flags);
+}
+
+static int iproc_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iproc_nand_soc *priv;
+ struct brcmnand_soc *soc;
+ struct resource *res;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ soc = &priv->soc;
+
+ spin_lock_init(&priv->idm_lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-idm");
+ priv->idm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->idm_base))
+ return PTR_ERR(priv->idm_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-ext");
+ priv->ext_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->ext_base))
+ return PTR_ERR(priv->ext_base);
+
+ soc->ctlrdy_ack = iproc_nand_intc_ack;
+ soc->ctlrdy_set_enabled = iproc_nand_intc_set;
+ soc->prepare_data_bus = iproc_nand_apb_access;
+
+ return brcmnand_probe(pdev, soc);
+}
+
+static const struct of_device_id iproc_nand_of_match[] = {
+ { .compatible = "brcm,nand-iproc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, iproc_nand_of_match);
+
+static struct platform_driver iproc_nand_driver = {
+ .probe = iproc_nand_probe,
+ .remove = brcmnand_remove,
+ .driver = {
+ .name = "iproc_nand",
+ .pm = &brcmnand_pm_ops,
+ .of_match_table = iproc_nand_of_match,
+ }
+};
+module_platform_driver(iproc_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_AUTHOR("Ray Jui");
+MODULE_DESCRIPTION("NAND driver for Broadcom IPROC-based SoCs");
diff --git a/kernel/drivers/mtd/nand/cafe_nand.c b/kernel/drivers/mtd/nand/cafe_nand.c
index 9a0f45f1d..9de78d2a2 100644
--- a/kernel/drivers/mtd/nand/cafe_nand.c
+++ b/kernel/drivers/mtd/nand/cafe_nand.c
@@ -516,7 +516,8 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required,
+ int page)
{
struct cafe_priv *cafe = mtd->priv;
@@ -604,7 +605,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
mtd->dev.parent = &pdev->dev;
mtd->priv = cafe;
- mtd->owner = THIS_MODULE;
cafe->pdev = pdev;
cafe->mmio = pci_iomap(pdev, 0, 0);
diff --git a/kernel/drivers/mtd/nand/cs553x_nand.c b/kernel/drivers/mtd/nand/cs553x_nand.c
index 88109d375..aec604505 100644
--- a/kernel/drivers/mtd/nand/cs553x_nand.c
+++ b/kernel/drivers/mtd/nand/cs553x_nand.c
@@ -237,17 +237,23 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
/* Enable the following for a flash based bad block table */
this->bbt_options = NAND_BBT_USE_FLASH;
+ new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
+ if (!new_mtd->name) {
+ err = -ENOMEM;
+ goto out_ior;
+ }
+
/* Scan to find existence of the device */
if (nand_scan(new_mtd, 1)) {
err = -ENXIO;
- goto out_ior;
+ goto out_free;
}
- new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
-
cs553x_mtd[cs] = new_mtd;
goto out;
+out_free:
+ kfree(new_mtd->name);
out_ior:
iounmap(this->IO_ADDR_R);
out_mtd:
diff --git a/kernel/drivers/mtd/nand/davinci_nand.c b/kernel/drivers/mtd/nand/davinci_nand.c
index feb6d18de..c72313d66 100644
--- a/kernel/drivers/mtd/nand/davinci_nand.c
+++ b/kernel/drivers/mtd/nand/davinci_nand.c
@@ -520,6 +520,32 @@ static struct nand_ecclayout hwecc4_2048 = {
},
};
+/*
+ * An ECC layout for using 4-bit ECC with large-page (4096bytes) flash,
+ * storing ten ECC bytes plus the manufacturer's bad block marker byte,
+ * and not overlapping the default BBT markers.
+ */
+static struct nand_ecclayout hwecc4_4096 = {
+ .eccbytes = 80,
+ .eccpos = {
+ /* at the end of spare sector */
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ },
+ .oobfree = {
+ /* 2 bytes at offset 0 hold manufacturer badblock markers */
+ {.offset = 2, .length = 46, },
+ /* 5 bytes at offset 8 hold BBT markers */
+ /* 8 bytes at offset 16 hold JFFS2 clean markers */
+ },
+};
+
#if defined(CONFIG_OF)
static const struct of_device_id davinci_nand_of_match[] = {
{.compatible = "ti,davinci-nand", },
@@ -657,9 +683,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->vaddr = vaddr;
info->mtd.priv = &info->chip;
- info->mtd.name = dev_name(&pdev->dev);
- info->mtd.owner = THIS_MODULE;
-
info->mtd.dev.parent = &pdev->dev;
info->chip.IO_ADDR_R = vaddr;
@@ -796,18 +819,12 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
goto syndrome_done;
}
+ if (chunks == 8) {
+ info->ecclayout = hwecc4_4096;
+ info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
+ goto syndrome_done;
+ }
- /* 4KiB page chips are not yet supported. The eccpos from
- * nand_ecclayout cannot hold 80 bytes and change to eccpos[]
- * breaks userspace ioctl interface with mtd-utils. Once we
- * resolve this issue, NAND_ECC_HW_OOB_FIRST mode can be used
- * for the 4KiB page chips.
- *
- * TODO: Note that nand_ecclayout has now been expanded and can
- * hold plenty of OOB entries.
- */
- dev_warn(&pdev->dev, "no 4-bit ECC support yet "
- "for 4KiB-page NAND\n");
ret = -EIO;
goto err;
diff --git a/kernel/drivers/mtd/nand/denali.c b/kernel/drivers/mtd/nand/denali.c
index 870c7fc0f..67eb2be0d 100644
--- a/kernel/drivers/mtd/nand/denali.c
+++ b/kernel/drivers/mtd/nand/denali.c
@@ -458,8 +458,17 @@ static void find_valid_banks(struct denali_nand_info *denali)
static void detect_max_banks(struct denali_nand_info *denali)
{
uint32_t features = ioread32(denali->flash_reg + FEATURES);
+ /*
+ * Read the revision register, so we can calculate the max_banks
+ * properly: the encoding changed from rev 5.0 to 5.1
+ */
+ u32 revision = MAKE_COMPARABLE_REVISION(
+ ioread32(denali->flash_reg + REVISION));
- denali->max_banks = 2 << (features & FEATURES__N_BANKS);
+ if (revision < REVISION_5_1)
+ denali->max_banks = 2 << (features & FEATURES__N_BANKS);
+ else
+ denali->max_banks = 1 << (features & FEATURES__N_BANKS);
}
static void detect_partition_feature(struct denali_nand_info *denali)
@@ -1105,7 +1114,7 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
* by write_page above.
*/
static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required, int page)
{
/*
* for regular page writes, we let HW handle all the ECC
@@ -1120,7 +1129,8 @@ static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
* write_page() function above.
*/
static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required,
+ int page)
{
/*
* for raw page writes, we want to disable ECC and simply write
@@ -1304,7 +1314,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
*/
addr = MODE_11 | BANK(denali->flash_bank);
index_addr(denali, addr | 0, 0x90);
- index_addr(denali, addr | 1, 0);
+ index_addr(denali, addr | 1, col);
for (i = 0; i < 8; i++) {
index_addr_read_data(denali, addr | 2, &id);
write_byte_to_buf(denali, id);
@@ -1454,7 +1464,6 @@ int denali_init(struct denali_nand_info *denali)
/* now that our ISR is registered, we can enable interrupts */
denali_set_intr_modes(denali, true);
denali->mtd.name = "denali-nand";
- denali->mtd.owner = THIS_MODULE;
denali->mtd.priv = &denali->nand;
/* register the driver with the NAND core subsystem */
diff --git a/kernel/drivers/mtd/nand/denali.h b/kernel/drivers/mtd/nand/denali.h
index 145bf8893..4b12cd302 100644
--- a/kernel/drivers/mtd/nand/denali.h
+++ b/kernel/drivers/mtd/nand/denali.h
@@ -178,6 +178,8 @@
#define REVISION 0x370
#define REVISION__VALUE 0xffff
+#define MAKE_COMPARABLE_REVISION(x) swab16((x) & REVISION__VALUE)
+#define REVISION_5_1 0x00000501
#define ONFI_DEVICE_FEATURES 0x380
#define ONFI_DEVICE_FEATURES__VALUE 0x003f
diff --git a/kernel/drivers/mtd/nand/denali_pci.c b/kernel/drivers/mtd/nand/denali_pci.c
index 6e2f387b8..de31514df 100644
--- a/kernel/drivers/mtd/nand/denali_pci.c
+++ b/kernel/drivers/mtd/nand/denali_pci.c
@@ -30,19 +30,19 @@ MODULE_DEVICE_TABLE(pci, denali_pci_ids);
static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- int ret = -ENODEV;
+ int ret;
resource_size_t csr_base, mem_base;
unsigned long csr_len, mem_len;
struct denali_nand_info *denali;
- denali = kzalloc(sizeof(*denali), GFP_KERNEL);
+ denali = devm_kzalloc(&dev->dev, sizeof(*denali), GFP_KERNEL);
if (!denali)
return -ENOMEM;
- ret = pci_enable_device(dev);
+ ret = pcim_enable_device(dev);
if (ret) {
- pr_err("Spectra: pci_enable_device failed.\n");
- goto failed_alloc_memery;
+ dev_err(&dev->dev, "Spectra: pci_enable_device failed.\n");
+ return ret;
}
if (id->driver_data == INTEL_CE4100) {
@@ -69,20 +69,19 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
ret = pci_request_regions(dev, DENALI_NAND_NAME);
if (ret) {
- pr_err("Spectra: Unable to request memory regions\n");
- goto failed_enable_dev;
+ dev_err(&dev->dev, "Spectra: Unable to request memory regions\n");
+ return ret;
}
denali->flash_reg = ioremap_nocache(csr_base, csr_len);
if (!denali->flash_reg) {
- pr_err("Spectra: Unable to remap memory region\n");
- ret = -ENOMEM;
- goto failed_req_regions;
+ dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
+ return -ENOMEM;
}
denali->flash_mem = ioremap_nocache(mem_base, mem_len);
if (!denali->flash_mem) {
- pr_err("Spectra: ioremap_nocache failed!");
+ dev_err(&dev->dev, "Spectra: ioremap_nocache failed!");
ret = -ENOMEM;
goto failed_remap_reg;
}
@@ -99,13 +98,6 @@ failed_remap_mem:
iounmap(denali->flash_mem);
failed_remap_reg:
iounmap(denali->flash_reg);
-failed_req_regions:
- pci_release_regions(dev);
-failed_enable_dev:
- pci_disable_device(dev);
-failed_alloc_memery:
- kfree(denali);
-
return ret;
}
@@ -117,9 +109,6 @@ static void denali_pci_remove(struct pci_dev *dev)
denali_remove(denali);
iounmap(denali->flash_reg);
iounmap(denali->flash_mem);
- pci_release_regions(dev);
- pci_disable_device(dev);
- kfree(denali);
}
static struct pci_driver denali_pci_driver = {
@@ -129,14 +118,4 @@ static struct pci_driver denali_pci_driver = {
.remove = denali_pci_remove,
};
-static int denali_init_pci(void)
-{
- return pci_register_driver(&denali_pci_driver);
-}
-module_init(denali_init_pci);
-
-static void denali_exit_pci(void)
-{
- pci_unregister_driver(&denali_pci_driver);
-}
-module_exit(denali_exit_pci);
+module_pci_driver(denali_pci_driver);
diff --git a/kernel/drivers/mtd/nand/diskonchip.c b/kernel/drivers/mtd/nand/diskonchip.c
index f68a7bcce..0802158a3 100644
--- a/kernel/drivers/mtd/nand/diskonchip.c
+++ b/kernel/drivers/mtd/nand/diskonchip.c
@@ -24,7 +24,7 @@
#include <linux/rslib.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -69,6 +69,9 @@ struct doc_priv {
int mh0_page;
int mh1_page;
struct mtd_info *nextdoc;
+
+ /* Handle the last stage of initialization (BBT scan, partitioning) */
+ int (*late_init)(struct mtd_info *mtd);
};
/* This is the syndrome computed by the HW ecc generator upon reading an empty
@@ -1294,14 +1297,11 @@ static int __init nftl_scan_bbt(struct mtd_info *mtd)
this->bbt_md = NULL;
}
- /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
- At least as nand_bbt.c is currently written. */
- if ((ret = nand_scan_bbt(mtd, NULL)))
+ ret = this->scan_bbt(mtd);
+ if (ret)
return ret;
- mtd_device_register(mtd, NULL, 0);
- if (!no_autopart)
- mtd_device_register(mtd, parts, numparts);
- return 0;
+
+ return mtd_device_register(mtd, parts, no_autopart ? 0 : numparts);
}
static int __init inftl_scan_bbt(struct mtd_info *mtd)
@@ -1344,10 +1344,10 @@ static int __init inftl_scan_bbt(struct mtd_info *mtd)
this->bbt_md->pattern = "TBB_SYSM";
}
- /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
- At least as nand_bbt.c is currently written. */
- if ((ret = nand_scan_bbt(mtd, NULL)))
+ ret = this->scan_bbt(mtd);
+ if (ret)
return ret;
+
memset((char *)parts, 0, sizeof(parts));
numparts = inftl_partscan(mtd, parts);
/* At least for now, require the INFTL Media Header. We could probably
@@ -1355,10 +1355,7 @@ static int __init inftl_scan_bbt(struct mtd_info *mtd)
autopartitioning, but I want to give it more thought. */
if (!numparts)
return -EIO;
- mtd_device_register(mtd, NULL, 0);
- if (!no_autopart)
- mtd_device_register(mtd, parts, numparts);
- return 0;
+ return mtd_device_register(mtd, parts, no_autopart ? 0 : numparts);
}
static inline int __init doc2000_init(struct mtd_info *mtd)
@@ -1369,7 +1366,7 @@ static inline int __init doc2000_init(struct mtd_info *mtd)
this->read_byte = doc2000_read_byte;
this->write_buf = doc2000_writebuf;
this->read_buf = doc2000_readbuf;
- this->scan_bbt = nftl_scan_bbt;
+ doc->late_init = nftl_scan_bbt;
doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
doc2000_count_chips(mtd);
@@ -1396,13 +1393,13 @@ static inline int __init doc2001_init(struct mtd_info *mtd)
can have multiple chips. */
doc2000_count_chips(mtd);
mtd->name = "DiskOnChip 2000 (INFTL Model)";
- this->scan_bbt = inftl_scan_bbt;
+ doc->late_init = inftl_scan_bbt;
return (4 * doc->chips_per_floor);
} else {
/* Bog-standard Millennium */
doc->chips_per_floor = 1;
mtd->name = "DiskOnChip Millennium";
- this->scan_bbt = nftl_scan_bbt;
+ doc->late_init = nftl_scan_bbt;
return 1;
}
}
@@ -1415,7 +1412,7 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd)
this->read_byte = doc2001plus_read_byte;
this->write_buf = doc2001plus_writebuf;
this->read_buf = doc2001plus_readbuf;
- this->scan_bbt = inftl_scan_bbt;
+ doc->late_init = inftl_scan_bbt;
this->cmd_ctrl = NULL;
this->select_chip = doc2001plus_select_chip;
this->cmdfunc = doc2001plus_command;
@@ -1591,6 +1588,8 @@ static int __init doc_probe(unsigned long physadr)
nand->ecc.bytes = 6;
nand->ecc.strength = 2;
nand->bbt_options = NAND_BBT_USE_FLASH;
+ /* Skip the automatic BBT scan so we can run it manually */
+ nand->options |= NAND_SKIP_BBTSCAN;
doc->physadr = physadr;
doc->virtadr = virtadr;
@@ -1608,7 +1607,7 @@ static int __init doc_probe(unsigned long physadr)
else
numchips = doc2001_init(mtd);
- if ((ret = nand_scan(mtd, numchips))) {
+ if ((ret = nand_scan(mtd, numchips)) || (ret = doc->late_init(mtd))) {
/* DBB note: i believe nand_release is necessary here, as
buffers may have been allocated in nand_base. Check with
Thomas. FIX ME! */
diff --git a/kernel/drivers/mtd/nand/docg4.c b/kernel/drivers/mtd/nand/docg4.c
index e5d7bcaaf..408cf69b8 100644
--- a/kernel/drivers/mtd/nand/docg4.c
+++ b/kernel/drivers/mtd/nand/docg4.c
@@ -977,13 +977,13 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
}
static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required, int page)
{
return write_page(mtd, nand, buf, false);
}
static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required, int page)
{
return write_page(mtd, nand, buf, true);
}
@@ -1113,7 +1113,7 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
/* write first page of block */
write_page_prologue(mtd, g4_addr);
- docg4_write_page(mtd, nand, buf, 1);
+ docg4_write_page(mtd, nand, buf, 1, page);
ret = pageprog(mtd);
kfree(buf);
@@ -1316,7 +1316,7 @@ static int __init probe_docg4(struct platform_device *pdev)
doc = (struct docg4_priv *) (nand + 1);
mtd->priv = nand;
nand->priv = doc;
- mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
doc->virtadr = virtadr;
doc->dev = dev;
diff --git a/kernel/drivers/mtd/nand/fsl_elbc_nand.c b/kernel/drivers/mtd/nand/fsl_elbc_nand.c
index 04b22fd37..dcb1f7f48 100644
--- a/kernel/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/kernel/drivers/mtd/nand/fsl_elbc_nand.c
@@ -715,7 +715,7 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
* waitfunc.
*/
static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required, int page)
{
fsl_elbc_write_buf(mtd, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -728,7 +728,7 @@ static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
*/
static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offset, uint32_t data_len,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required, int page)
{
fsl_elbc_write_buf(mtd, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -747,7 +747,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
/* Fill in fsl_elbc_mtd structure */
priv->mtd.priv = chip;
- priv->mtd.owner = THIS_MODULE;
+ priv->mtd.dev.parent = priv->dev;
/* set timeout to maximum */
priv->fmr = 15 << FMR_CWTO_SHIFT;
@@ -946,6 +946,7 @@ static const struct of_device_id fsl_elbc_nand_match[] = {
{ .compatible = "fsl,elbc-fcm-nand", },
{}
};
+MODULE_DEVICE_TABLE(of, fsl_elbc_nand_match);
static struct platform_driver fsl_elbc_nand_driver = {
.driver = {
diff --git a/kernel/drivers/mtd/nand/fsl_ifc_nand.c b/kernel/drivers/mtd/nand/fsl_ifc_nand.c
index 51394e599..7f4ac8c19 100644
--- a/kernel/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/kernel/drivers/mtd/nand/fsl_ifc_nand.c
@@ -238,8 +238,8 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
ifc_nand_ctrl->page = page_addr;
/* Program ROW0/COL0 */
- iowrite32be(page_addr, &ifc->ifc_nand.row0);
- iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
+ ifc_out32(page_addr, &ifc->ifc_nand.row0);
+ ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
buf_num = page_addr & priv->bufnum_mask;
@@ -301,19 +301,19 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
int i;
/* set the chip select for NAND Transaction */
- iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT,
- &ifc->ifc_nand.nand_csel);
+ ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT,
+ &ifc->ifc_nand.nand_csel);
dev_vdbg(priv->dev,
"%s: fir0=%08x fcr0=%08x\n",
__func__,
- ioread32be(&ifc->ifc_nand.nand_fir0),
- ioread32be(&ifc->ifc_nand.nand_fcr0));
+ ifc_in32(&ifc->ifc_nand.nand_fir0),
+ ifc_in32(&ifc->ifc_nand.nand_fcr0));
ctrl->nand_stat = 0;
/* start read/write seq */
- iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -336,7 +336,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
int sector_end = sector + chip->ecc.steps - 1;
for (i = sector / 4; i <= sector_end / 4; i++)
- eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]);
+ eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]);
for (i = sector; i <= sector_end; i++) {
errors = check_read_ecc(mtd, ctrl, eccstat, i);
@@ -376,33 +376,33 @@ static void fsl_ifc_do_read(struct nand_chip *chip,
/* Program FIR/IFC_NAND_FCR0 for Small/Large page */
if (mtd->writesize > 512) {
- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
-
- iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
- (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
- &ifc->ifc_nand.nand_fcr0);
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
+
+ ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
} else {
- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
if (oob)
- iowrite32be(NAND_CMD_READOOB <<
- IFC_NAND_FCR0_CMD0_SHIFT,
- &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(NAND_CMD_READOOB <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
else
- iowrite32be(NAND_CMD_READ0 <<
- IFC_NAND_FCR0_CMD0_SHIFT,
- &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(NAND_CMD_READ0 <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
}
}
@@ -422,7 +422,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
switch (command) {
/* READ0 read the entire buffer to use hardware ECC. */
case NAND_CMD_READ0:
- iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, page_addr, 0);
ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -437,7 +437,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* READOOB reads only the OOB because no ECC is performed. */
case NAND_CMD_READOOB:
- iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
+ ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, column, page_addr, 1);
ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -453,19 +453,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
if (command == NAND_CMD_PARAM)
timing = IFC_FIR_OP_RBCD;
- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
- (timing << IFC_NAND_FIR0_OP2_SHIFT),
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT,
- &ifc->ifc_nand.nand_fcr0);
- iowrite32be(column, &ifc->ifc_nand.row3);
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (timing << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(column, &ifc->ifc_nand.row3);
/*
* although currently it's 8 bytes for READID, we always read
* the maximum 256 bytes(for PARAM)
*/
- iowrite32be(256, &ifc->ifc_nand.nand_fbcr);
+ ifc_out32(256, &ifc->ifc_nand.nand_fbcr);
ifc_nand_ctrl->read_bytes = 256;
set_addr(mtd, 0, 0, 0);
@@ -480,16 +480,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* ERASE2 uses the block and page address from ERASE1 */
case NAND_CMD_ERASE2:
- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
- &ifc->ifc_nand.nand_fir0);
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
- iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
- (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
- &ifc->ifc_nand.nand_fcr0);
+ ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
- iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
ifc_nand_ctrl->read_bytes = 0;
fsl_ifc_run_command(mtd);
return;
@@ -506,19 +506,18 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
- iowrite32be(
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
- (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(
- (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
- (IFC_FIR_OP_RDSTAT <<
- IFC_NAND_FIR1_OP6_SHIFT) |
- (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
- &ifc->ifc_nand.nand_fir1);
+ ifc_out32(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
} else {
nand_fcr0 = ((NAND_CMD_PAGEPROG <<
IFC_NAND_FCR0_CMD1_SHIFT) |
@@ -527,20 +526,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(NAND_CMD_STATUS <<
IFC_NAND_FCR0_CMD3_SHIFT));
- iowrite32be(
+ ifc_out32(
(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
&ifc->ifc_nand.nand_fir0);
- iowrite32be(
- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
- (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
- (IFC_FIR_OP_RDSTAT <<
- IFC_NAND_FIR1_OP7_SHIFT) |
- (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
- &ifc->ifc_nand.nand_fir1);
+ ifc_out32(
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
+ &ifc->ifc_nand.nand_fir1);
if (column >= mtd->writesize)
nand_fcr0 |=
@@ -555,7 +553,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
column -= mtd->writesize;
ifc_nand_ctrl->oob = 1;
}
- iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
return;
}
@@ -563,24 +561,26 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
case NAND_CMD_PAGEPROG: {
if (ifc_nand_ctrl->oob) {
- iowrite32be(ifc_nand_ctrl->index -
- ifc_nand_ctrl->column,
- &ifc->ifc_nand.nand_fbcr);
+ ifc_out32(ifc_nand_ctrl->index -
+ ifc_nand_ctrl->column,
+ &ifc->ifc_nand.nand_fbcr);
} else {
- iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
}
fsl_ifc_run_command(mtd);
return;
}
- case NAND_CMD_STATUS:
- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
- &ifc->ifc_nand.nand_fcr0);
- iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
+ case NAND_CMD_STATUS: {
+ void __iomem *addr;
+
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, 0, 0);
ifc_nand_ctrl->read_bytes = 1;
@@ -590,17 +590,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
* The chip always seems to report that it is
* write-protected, even when it is not.
*/
+ addr = ifc_nand_ctrl->addr;
if (chip->options & NAND_BUSWIDTH_16)
- setbits16(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+ ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr);
else
- setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+ ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr);
return;
+ }
case NAND_CMD_RESET:
- iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
- &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
fsl_ifc_run_command(mtd);
return;
@@ -658,7 +660,7 @@ static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)
*/
if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
offset = ifc_nand_ctrl->index++;
- return in_8(ifc_nand_ctrl->addr + offset);
+ return ifc_in8(ifc_nand_ctrl->addr + offset);
}
dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
@@ -680,7 +682,7 @@ static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
* next byte.
*/
if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
- data = in_be16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
+ data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
ifc_nand_ctrl->index += 2;
return (uint8_t) data;
}
@@ -726,18 +728,18 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
u32 nand_fsr;
/* Use READ_STATUS command, but wait for the device to be ready */
- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
- &ifc->ifc_nand.nand_fcr0);
- iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, 0, 0);
ifc_nand_ctrl->read_bytes = 1;
fsl_ifc_run_command(mtd);
- nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr);
+ nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
/*
* The chip always seems to report that it is
@@ -770,7 +772,7 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
* waitfunc.
*/
static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required, int page)
{
fsl_ifc_write_buf(mtd, buf, mtd->writesize);
fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -829,34 +831,34 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
uint32_t cs = priv->bank;
/* Save CSOR and CSOR_ext */
- csor = ioread32be(&ifc->csor_cs[cs].csor);
- csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext);
+ csor = ifc_in32(&ifc->csor_cs[cs].csor);
+ csor_ext = ifc_in32(&ifc->csor_cs[cs].csor_ext);
/* chage PageSize 8K and SpareSize 1K*/
csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
- iowrite32be(csor_8k, &ifc->csor_cs[cs].csor);
- iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext);
+ ifc_out32(csor_8k, &ifc->csor_cs[cs].csor);
+ ifc_out32(0x0000400, &ifc->csor_cs[cs].csor_ext);
/* READID */
- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
- &ifc->ifc_nand.nand_fir0);
- iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
- &ifc->ifc_nand.nand_fcr0);
- iowrite32be(0x0, &ifc->ifc_nand.row3);
+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ ifc_out32(0x0, &ifc->ifc_nand.row3);
- iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr);
+ ifc_out32(0x0, &ifc->ifc_nand.nand_fbcr);
/* Program ROW0/COL0 */
- iowrite32be(0x0, &ifc->ifc_nand.row0);
- iowrite32be(0x0, &ifc->ifc_nand.col0);
+ ifc_out32(0x0, &ifc->ifc_nand.row0);
+ ifc_out32(0x0, &ifc->ifc_nand.col0);
/* set the chip select for NAND Transaction */
- iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
+ ifc_out32(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
/* start read seq */
- iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -866,8 +868,8 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
/* Restore CSOR and CSOR_ext */
- iowrite32be(csor, &ifc->csor_cs[cs].csor);
- iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext);
+ ifc_out32(csor, &ifc->csor_cs[cs].csor);
+ ifc_out32(csor_ext, &ifc->csor_cs[cs].csor_ext);
}
static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
@@ -880,11 +882,11 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
/* Fill in fsl_ifc_mtd structure */
priv->mtd.priv = chip;
- priv->mtd.owner = THIS_MODULE;
+ priv->mtd.dev.parent = priv->dev;
/* fill in nand_chip structure */
/* set up function call table */
- if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
+ if ((ifc_in32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
chip->read_byte = fsl_ifc_read_byte16;
else
chip->read_byte = fsl_ifc_read_byte;
@@ -898,13 +900,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
- iowrite32be(0x0, &ifc->ifc_nand.ncfgr);
+ ifc_out32(0x0, &ifc->ifc_nand.ncfgr);
/* set up nand options */
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->options = NAND_NO_SUBPAGE_WRITE;
- if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
+ if (ifc_in32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
chip->read_byte = fsl_ifc_read_byte16;
chip->options |= NAND_BUSWIDTH_16;
} else {
@@ -917,7 +919,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->ecc.read_page = fsl_ifc_read_page;
chip->ecc.write_page = fsl_ifc_write_page;
- csor = ioread32be(&ifc->csor_cs[priv->bank].csor);
+ csor = ifc_in32(&ifc->csor_cs[priv->bank].csor);
/* Hardware generates ECC per 512 Bytes */
chip->ecc.size = 512;
@@ -1006,7 +1008,7 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
phys_addr_t addr)
{
- u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr);
+ u32 cspr = ifc_in32(&ifc->cspr_cs[bank].cspr);
if (!(cspr & CSPR_V))
return 0;
@@ -1092,16 +1094,16 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
dev_set_drvdata(priv->dev, priv);
- iowrite32be(IFC_NAND_EVTER_EN_OPC_EN |
- IFC_NAND_EVTER_EN_FTOER_EN |
- IFC_NAND_EVTER_EN_WPER_EN,
- &ifc->ifc_nand.nand_evter_en);
+ ifc_out32(IFC_NAND_EVTER_EN_OPC_EN |
+ IFC_NAND_EVTER_EN_FTOER_EN |
+ IFC_NAND_EVTER_EN_WPER_EN,
+ &ifc->ifc_nand.nand_evter_en);
/* enable NAND Machine Interrupts */
- iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN |
- IFC_NAND_EVTER_INTR_FTOERIR_EN |
- IFC_NAND_EVTER_INTR_WPERIR_EN,
- &ifc->ifc_nand.nand_evter_intr_en);
+ ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN |
+ IFC_NAND_EVTER_INTR_FTOERIR_EN |
+ IFC_NAND_EVTER_INTR_WPERIR_EN,
+ &ifc->ifc_nand.nand_evter_intr_en);
priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
if (!priv->mtd.name) {
ret = -ENOMEM;
@@ -1161,6 +1163,7 @@ static const struct of_device_id fsl_ifc_nand_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, fsl_ifc_nand_match);
static struct platform_driver fsl_ifc_nand_driver = {
.driver = {
diff --git a/kernel/drivers/mtd/nand/fsl_upm.c b/kernel/drivers/mtd/nand/fsl_upm.c
index 72755d7ec..d32636998 100644
--- a/kernel/drivers/mtd/nand/fsl_upm.c
+++ b/kernel/drivers/mtd/nand/fsl_upm.c
@@ -176,7 +176,7 @@ static int fun_chip_init(struct fsl_upm_nand *fun,
fun->chip.dev_ready = fun_chip_ready;
fun->mtd.priv = &fun->chip;
- fun->mtd.owner = THIS_MODULE;
+ fun->mtd.dev.parent = fun->dev;
flash_np = of_get_next_child(upm_np, NULL);
if (!flash_np)
diff --git a/kernel/drivers/mtd/nand/fsmc_nand.c b/kernel/drivers/mtd/nand/fsmc_nand.c
index e58af4bfa..07af3dc7a 100644
--- a/kernel/drivers/mtd/nand/fsmc_nand.c
+++ b/kernel/drivers/mtd/nand/fsmc_nand.c
@@ -348,7 +348,7 @@ static void fsmc_select_chip(struct mtd_info *mtd, int chipnr)
break;
default:
- BUG();
+ dev_err(host->dev, "unsupported chip-select %d\n", chipnr);
}
}
@@ -562,6 +562,7 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
dma_cookie_t cookie;
unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
int ret;
+ unsigned long time_left;
if (direction == DMA_TO_DEVICE)
chan = host->write_dma_chan;
@@ -601,14 +602,13 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
dma_async_issue_pending(chan);
- ret =
+ time_left =
wait_for_completion_timeout(&host->dma_access_complete,
msecs_to_jiffies(3000));
- if (ret <= 0) {
+ if (time_left == 0) {
dmaengine_terminate_all(chan);
dev_err(host->dev, "wait_for_completion_timeout\n");
- if (!ret)
- ret = -ETIMEDOUT;
+ ret = -ETIMEDOUT;
goto unmap_dma;
}
@@ -960,7 +960,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
host->data_va = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->data_va))
return PTR_ERR(host->data_va);
-
+
host->data_pa = (dma_addr_t)res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
@@ -1017,18 +1017,23 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
mtd->priv = nand;
nand->priv = host;
- host->mtd.owner = THIS_MODULE;
+ host->mtd.dev.parent = &pdev->dev;
nand->IO_ADDR_R = host->data_va;
nand->IO_ADDR_W = host->data_va;
nand->cmd_ctrl = fsmc_cmd_ctrl;
nand->chip_delay = 30;
+ /*
+ * Setup default ECC mode. nand_dt_init() called from nand_scan_ident()
+ * can overwrite this value if the DT provides a different value.
+ */
nand->ecc.mode = NAND_ECC_HW;
nand->ecc.hwctl = fsmc_enable_hwecc;
nand->ecc.size = 512;
nand->options = pdata->options;
nand->select_chip = fsmc_select_chip;
nand->badblockbits = 7;
+ nand->flash_node = np;
if (pdata->width == FSMC_NAND_BW16)
nand->options |= NAND_BUSWIDTH_16;
@@ -1070,11 +1075,6 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
nand->ecc.correct = fsmc_bch8_correct_data;
nand->ecc.bytes = 13;
nand->ecc.strength = 8;
- } else {
- nand->ecc.calculate = fsmc_read_hwecc_ecc1;
- nand->ecc.correct = nand_correct_data;
- nand->ecc.bytes = 3;
- nand->ecc.strength = 1;
}
/*
@@ -1111,23 +1111,50 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
default:
dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
mtd->oobsize);
- BUG();
+ ret = -EINVAL;
+ goto err_probe;
}
} else {
- switch (host->mtd.oobsize) {
- case 16:
- nand->ecc.layout = &fsmc_ecc1_16_layout;
- break;
- case 64:
- nand->ecc.layout = &fsmc_ecc1_64_layout;
+ switch (nand->ecc.mode) {
+ case NAND_ECC_HW:
+ dev_info(&pdev->dev, "Using 1-bit HW ECC scheme\n");
+ nand->ecc.calculate = fsmc_read_hwecc_ecc1;
+ nand->ecc.correct = nand_correct_data;
+ nand->ecc.bytes = 3;
+ nand->ecc.strength = 1;
break;
- case 128:
- nand->ecc.layout = &fsmc_ecc1_128_layout;
+
+ case NAND_ECC_SOFT_BCH:
+ dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n");
break;
+
default:
- dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
- mtd->oobsize);
- BUG();
+ dev_err(&pdev->dev, "Unsupported ECC mode!\n");
+ goto err_probe;
+ }
+
+ /*
+ * Don't set layout for BCH4 SW ECC. This will be
+ * generated later in nand_bch_init() later.
+ */
+ if (nand->ecc.mode != NAND_ECC_SOFT_BCH) {
+ switch (host->mtd.oobsize) {
+ case 16:
+ nand->ecc.layout = &fsmc_ecc1_16_layout;
+ break;
+ case 64:
+ nand->ecc.layout = &fsmc_ecc1_64_layout;
+ break;
+ case 128:
+ nand->ecc.layout = &fsmc_ecc1_128_layout;
+ break;
+ default:
+ dev_warn(&pdev->dev,
+ "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ ret = -EINVAL;
+ goto err_probe;
+ }
}
}
diff --git a/kernel/drivers/mtd/nand/gpio.c b/kernel/drivers/mtd/nand/gpio.c
index 73c4048c3..9ab97f934 100644
--- a/kernel/drivers/mtd/nand/gpio.c
+++ b/kernel/drivers/mtd/nand/gpio.c
@@ -275,7 +275,7 @@ static int gpio_nand_probe(struct platform_device *pdev)
chip->cmd_ctrl = gpio_nand_cmd_ctrl;
gpiomtd->mtd_info.priv = chip;
- gpiomtd->mtd_info.owner = THIS_MODULE;
+ gpiomtd->mtd_info.dev.parent = &pdev->dev;
platform_set_drvdata(pdev, gpiomtd);
diff --git a/kernel/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/kernel/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 1b8f3500e..2064adac1 100644
--- a/kernel/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/kernel/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1160,7 +1160,7 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
}
static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required, int page)
{
struct gpmi_nand_data *this = chip->priv;
struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -1446,7 +1446,7 @@ static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
struct nand_chip *chip,
const uint8_t *buf,
- int oob_required)
+ int oob_required, int page)
{
struct gpmi_nand_data *this = chip->priv;
struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -1533,7 +1533,7 @@ static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
{
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
- return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1);
+ return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1, page);
}
static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -1717,7 +1717,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
/* Write the first page of the current stride. */
dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- chip->ecc.write_page_raw(mtd, chip, buffer, 0);
+ chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
/* Wait for the write to finish. */
@@ -1897,7 +1897,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
/* init the MTD data structures */
mtd->priv = chip;
mtd->name = "gpmi-nand";
- mtd->owner = THIS_MODULE;
+ mtd->dev.parent = this->dev;
/* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
chip->priv = this;
diff --git a/kernel/drivers/mtd/nand/hisi504_nand.c b/kernel/drivers/mtd/nand/hisi504_nand.c
index 8dcc7b8fe..0cb2e8869 100644
--- a/kernel/drivers/mtd/nand/hisi504_nand.c
+++ b/kernel/drivers/mtd/nand/hisi504_nand.c
@@ -590,7 +590,8 @@ static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
}
static int hisi_nand_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf, int oob_required)
+ struct nand_chip *chip, const uint8_t *buf, int oob_required,
+ int page)
{
chip->write_buf(mtd, buf, mtd->writesize);
if (oob_required)
@@ -737,7 +738,6 @@ static int hisi_nfc_probe(struct platform_device *pdev)
}
mtd->priv = chip;
- mtd->owner = THIS_MODULE;
mtd->name = "hisi_nand";
mtd->dev.parent = &pdev->dev;
diff --git a/kernel/drivers/mtd/nand/jz4740_nand.c b/kernel/drivers/mtd/nand/jz4740_nand.c
index ebf2cce04..5a99a93ed 100644
--- a/kernel/drivers/mtd/nand/jz4740_nand.c
+++ b/kernel/drivers/mtd/nand/jz4740_nand.c
@@ -25,6 +25,7 @@
#include <linux/gpio.h>
+#include <asm/mach-jz4740/gpio.h>
#include <asm/mach-jz4740/jz4740_nand.h>
#define JZ_REG_NAND_CTRL 0x50
@@ -434,7 +435,7 @@ static int jz_nand_probe(struct platform_device *pdev)
mtd = &nand->mtd;
chip = &nand->chip;
mtd->priv = chip;
- mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
mtd->name = "jz4740-nand";
chip->ecc.hwctl = jz_nand_hwctl;
diff --git a/kernel/drivers/mtd/nand/lpc32xx_mlc.c b/kernel/drivers/mtd/nand/lpc32xx_mlc.c
index 79c3b7801..347510978 100644
--- a/kernel/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/kernel/drivers/mtd/nand/lpc32xx_mlc.c
@@ -495,7 +495,8 @@ static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required,
+ int page)
{
struct lpc32xx_nand_host *host = chip->priv;
const uint8_t *oobbuf = chip->oob_poi;
@@ -682,7 +683,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
nand_chip->priv = host; /* link the private data structures */
mtd->priv = nand_chip;
- mtd->owner = THIS_MODULE;
mtd->dev.parent = &pdev->dev;
/* Get NAND clock */
@@ -692,7 +692,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
res = -ENOENT;
goto err_exit1;
}
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
nand_chip->dev_ready = lpc32xx_nand_device_ready;
@@ -800,7 +800,7 @@ err_exit3:
if (use_dma)
dma_release_channel(host->dma_chan);
err_exit2:
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
clk_put(host->clk);
err_exit1:
lpc32xx_wp_enable(host);
@@ -822,7 +822,7 @@ static int lpc32xx_nand_remove(struct platform_device *pdev)
if (use_dma)
dma_release_channel(host->dma_chan);
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
clk_put(host->clk);
lpc32xx_wp_enable(host);
@@ -837,7 +837,7 @@ static int lpc32xx_nand_resume(struct platform_device *pdev)
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
/* Re-enable NAND clock */
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
/* Fresh init of NAND controller */
lpc32xx_nand_setup(host);
@@ -856,7 +856,7 @@ static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
lpc32xx_wp_enable(host);
/* Disable clock */
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
return 0;
}
diff --git a/kernel/drivers/mtd/nand/lpc32xx_slc.c b/kernel/drivers/mtd/nand/lpc32xx_slc.c
index abfec1386..4f3d4eb17 100644
--- a/kernel/drivers/mtd/nand/lpc32xx_slc.c
+++ b/kernel/drivers/mtd/nand/lpc32xx_slc.c
@@ -94,22 +94,25 @@
/**********************************************************************
* slc_tac register definitions
**********************************************************************/
+/* Computation of clock cycles on basis of controller and device clock rates */
+#define SLCTAC_CLOCKS(c, n, s) (min_t(u32, DIV_ROUND_UP(c, n) - 1, 0xF) << s)
+
/* Clock setting for RDY write sample wait time in 2*n clocks */
#define SLCTAC_WDR(n) (((n) & 0xF) << 28)
/* Write pulse width in clock cycles, 1 to 16 clocks */
-#define SLCTAC_WWIDTH(n) (((n) & 0xF) << 24)
+#define SLCTAC_WWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 24))
/* Write hold time of control and data signals, 1 to 16 clocks */
-#define SLCTAC_WHOLD(n) (((n) & 0xF) << 20)
+#define SLCTAC_WHOLD(c, n) (SLCTAC_CLOCKS(c, n, 20))
/* Write setup time of control and data signals, 1 to 16 clocks */
-#define SLCTAC_WSETUP(n) (((n) & 0xF) << 16)
+#define SLCTAC_WSETUP(c, n) (SLCTAC_CLOCKS(c, n, 16))
/* Clock setting for RDY read sample wait time in 2*n clocks */
#define SLCTAC_RDR(n) (((n) & 0xF) << 12)
/* Read pulse width in clock cycles, 1 to 16 clocks */
-#define SLCTAC_RWIDTH(n) (((n) & 0xF) << 8)
+#define SLCTAC_RWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 8))
/* Read hold time of control and data signals, 1 to 16 clocks */
-#define SLCTAC_RHOLD(n) (((n) & 0xF) << 4)
+#define SLCTAC_RHOLD(c, n) (SLCTAC_CLOCKS(c, n, 4))
/* Read setup time of control and data signals, 1 to 16 clocks */
-#define SLCTAC_RSETUP(n) (((n) & 0xF) << 0)
+#define SLCTAC_RSETUP(c, n) (SLCTAC_CLOCKS(c, n, 0))
/**********************************************************************
* slc_ecc register definitions
@@ -240,13 +243,13 @@ static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
/* Compute clock setup values */
tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
- SLCTAC_WWIDTH(1 + (clkrate / host->ncfg->wwidth)) |
- SLCTAC_WHOLD(1 + (clkrate / host->ncfg->whold)) |
- SLCTAC_WSETUP(1 + (clkrate / host->ncfg->wsetup)) |
+ SLCTAC_WWIDTH(clkrate, host->ncfg->wwidth) |
+ SLCTAC_WHOLD(clkrate, host->ncfg->whold) |
+ SLCTAC_WSETUP(clkrate, host->ncfg->wsetup) |
SLCTAC_RDR(host->ncfg->rdr_clks) |
- SLCTAC_RWIDTH(1 + (clkrate / host->ncfg->rwidth)) |
- SLCTAC_RHOLD(1 + (clkrate / host->ncfg->rhold)) |
- SLCTAC_RSETUP(1 + (clkrate / host->ncfg->rsetup));
+ SLCTAC_RWIDTH(clkrate, host->ncfg->rwidth) |
+ SLCTAC_RHOLD(clkrate, host->ncfg->rhold) |
+ SLCTAC_RSETUP(clkrate, host->ncfg->rsetup);
writel(tmp, SLC_TAC(host->io_base));
}
@@ -660,7 +663,8 @@ static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
*/
static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf,
+ int oob_required, int page)
{
struct lpc32xx_nand_host *host = chip->priv;
uint8_t *pb = chip->oob_poi + chip->ecc.layout->eccpos[0];
@@ -689,7 +693,7 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
struct nand_chip *chip,
const uint8_t *buf,
- int oob_required)
+ int oob_required, int page)
{
/* Raw writes can just use the FIFO interface */
chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
@@ -810,7 +814,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
res = -ENOENT;
goto err_exit1;
}
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
/* Set NAND IO addresses and command/ready functions */
chip->IO_ADDR_R = SLC_DATA(host->io_base);
@@ -915,7 +919,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
err_exit3:
dma_release_channel(host->dma_chan);
err_exit2:
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
err_exit1:
lpc32xx_wp_enable(host);
@@ -939,7 +943,7 @@ static int lpc32xx_nand_remove(struct platform_device *pdev)
tmp &= ~SLCCFG_CE_LOW;
writel(tmp, SLC_CTRL(host->io_base));
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
lpc32xx_wp_enable(host);
return 0;
@@ -951,7 +955,7 @@ static int lpc32xx_nand_resume(struct platform_device *pdev)
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
/* Re-enable NAND clock */
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
/* Fresh init of NAND controller */
lpc32xx_nand_setup(host);
@@ -976,7 +980,7 @@ static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
lpc32xx_wp_enable(host);
/* Disable clock */
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
return 0;
}
diff --git a/kernel/drivers/mtd/nand/mpc5121_nfc.c b/kernel/drivers/mtd/nand/mpc5121_nfc.c
index 1f12e5bfb..d6bbde4a5 100644
--- a/kernel/drivers/mtd/nand/mpc5121_nfc.c
+++ b/kernel/drivers/mtd/nand/mpc5121_nfc.c
@@ -659,6 +659,7 @@ static int mpc5121_nfc_probe(struct platform_device *op)
chip = &prv->chip;
mtd->priv = chip;
+ mtd->dev.parent = dev;
chip->priv = prv;
prv->dev = dev;
@@ -837,10 +838,11 @@ static int mpc5121_nfc_remove(struct platform_device *op)
return 0;
}
-static struct of_device_id mpc5121_nfc_match[] = {
+static const struct of_device_id mpc5121_nfc_match[] = {
{ .compatible = "fsl,mpc5121-nfc", },
{},
};
+MODULE_DEVICE_TABLE(of, mpc5121_nfc_match);
static struct platform_driver mpc5121_nfc_driver = {
.probe = mpc5121_nfc_probe,
diff --git a/kernel/drivers/mtd/nand/mxc_nand.c b/kernel/drivers/mtd/nand/mxc_nand.c
index 372e0e38f..136e73a3e 100644
--- a/kernel/drivers/mtd/nand/mxc_nand.c
+++ b/kernel/drivers/mtd/nand/mxc_nand.c
@@ -189,6 +189,7 @@ struct mxc_nand_host {
int clk_act;
int irq;
int eccsize;
+ int used_oobsize;
int active_cs;
struct completion op_completion;
@@ -280,12 +281,44 @@ static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
*t++ = __raw_readl(s++);
}
+static void memcpy16_fromio(void *trg, const void __iomem *src, size_t size)
+{
+ int i;
+ u16 *t = trg;
+ const __iomem u16 *s = src;
+
+ /* We assume that src (IO) is always 32bit aligned */
+ if (PTR_ALIGN(trg, 4) == trg && IS_ALIGNED(size, 4)) {
+ memcpy32_fromio(trg, src, size);
+ return;
+ }
+
+ for (i = 0; i < (size >> 1); i++)
+ *t++ = __raw_readw(s++);
+}
+
static inline void memcpy32_toio(void __iomem *trg, const void *src, int size)
{
/* __iowrite32_copy use 32bit size values so divide by 4 */
__iowrite32_copy(trg, src, size / 4);
}
+static void memcpy16_toio(void __iomem *trg, const void *src, int size)
+{
+ int i;
+ __iomem u16 *t = trg;
+ const u16 *s = src;
+
+ /* We assume that trg (IO) is always 32bit aligned */
+ if (PTR_ALIGN(src, 4) == src && IS_ALIGNED(size, 4)) {
+ memcpy32_toio(trg, src, size);
+ return;
+ }
+
+ for (i = 0; i < (size >> 1); i++)
+ __raw_writew(*s++, t++);
+}
+
static int check_int_v3(struct mxc_nand_host *host)
{
uint32_t tmp;
@@ -807,32 +840,48 @@ static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
}
/*
- * Function to transfer data to/from spare area.
+ * The controller splits a page into data chunks of 512 bytes + partial oob.
+ * There are writesize / 512 such chunks, the size of the partial oob parts is
+ * oobsize / #chunks rounded down to a multiple of 2. The last oob chunk then
+ * contains additionally the byte lost by rounding (if any).
+ * This function handles the needed shuffling between host->data_buf (which
+ * holds a page in natural order, i.e. writesize bytes data + oobsize bytes
+ * spare) and the NFC buffer.
*/
static void copy_spare(struct mtd_info *mtd, bool bfrom)
{
struct nand_chip *this = mtd->priv;
struct mxc_nand_host *host = this->priv;
- u16 i, j;
- u16 n = mtd->writesize >> 9;
+ u16 i, oob_chunk_size;
+ u16 num_chunks = mtd->writesize / 512;
+
u8 *d = host->data_buf + mtd->writesize;
u8 __iomem *s = host->spare0;
- u16 t = host->devtype_data->spare_len;
+ u16 sparebuf_size = host->devtype_data->spare_len;
- j = (mtd->oobsize / n >> 1) << 1;
+ /* size of oob chunk for all but possibly the last one */
+ oob_chunk_size = (host->used_oobsize / num_chunks) & ~1;
if (bfrom) {
- for (i = 0; i < n - 1; i++)
- memcpy32_fromio(d + i * j, s + i * t, j);
-
- /* the last section */
- memcpy32_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
+ for (i = 0; i < num_chunks - 1; i++)
+ memcpy16_fromio(d + i * oob_chunk_size,
+ s + i * sparebuf_size,
+ oob_chunk_size);
+
+ /* the last chunk */
+ memcpy16_fromio(d + i * oob_chunk_size,
+ s + i * sparebuf_size,
+ host->used_oobsize - i * oob_chunk_size);
} else {
- for (i = 0; i < n - 1; i++)
- memcpy32_toio(&s[i * t], &d[i * j], j);
-
- /* the last section */
- memcpy32_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
+ for (i = 0; i < num_chunks - 1; i++)
+ memcpy16_toio(&s[i * sparebuf_size],
+ &d[i * oob_chunk_size],
+ oob_chunk_size);
+
+ /* the last chunk */
+ memcpy16_toio(&s[i * sparebuf_size],
+ &d[i * oob_chunk_size],
+ host->used_oobsize - i * oob_chunk_size);
}
}
@@ -911,6 +960,23 @@ static int get_eccsize(struct mtd_info *mtd)
return 8;
}
+static void ecc_8bit_layout_4k(struct nand_ecclayout *layout)
+{
+ int i, j;
+
+ layout->eccbytes = 8*18;
+ for (i = 0; i < 8; i++)
+ for (j = 0; j < 18; j++)
+ layout->eccpos[i*18 + j] = i*26 + j + 7;
+
+ layout->oobfree[0].offset = 2;
+ layout->oobfree[0].length = 4;
+ for (i = 1; i < 8; i++) {
+ layout->oobfree[i].offset = i*26;
+ layout->oobfree[i].length = 7;
+ }
+}
+
static void preset_v1(struct mtd_info *mtd)
{
struct nand_chip *nand_chip = mtd->priv;
@@ -1350,7 +1416,7 @@ static inline int is_imx53_nfc(struct mxc_nand_host *host)
return host->devtype_data == &imx53_nand_devtype_data;
}
-static struct platform_device_id mxcnd_devtype[] = {
+static const struct platform_device_id mxcnd_devtype[] = {
{
.name = "imx21-nand",
.driver_data = (kernel_ulong_t) &imx21_nand_devtype_data,
@@ -1392,6 +1458,7 @@ static const struct of_device_id mxcnd_dt_ids[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, mxcnd_dt_ids);
static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
{
@@ -1450,7 +1517,6 @@ static int mxcnd_probe(struct platform_device *pdev)
this = &host->nand;
mtd = &host->mtd;
mtd->priv = this;
- mtd->owner = THIS_MODULE;
mtd->dev.parent = &pdev->dev;
mtd->name = DRIVER_NAME;
@@ -1587,8 +1653,20 @@ static int mxcnd_probe(struct platform_device *pdev)
if (mtd->writesize == 2048)
this->ecc.layout = host->devtype_data->ecclayout_2k;
- else if (mtd->writesize == 4096)
+ else if (mtd->writesize == 4096) {
this->ecc.layout = host->devtype_data->ecclayout_4k;
+ if (get_eccsize(mtd) == 8)
+ ecc_8bit_layout_4k(this->ecc.layout);
+ }
+
+ /*
+ * Experimentation shows that i.MX NFC can only handle up to 218 oob
+ * bytes. Limit used_oobsize to 218 so as to not confuse copy_spare()
+ * into copying invalid data to/from the spare IO buffer, as this
+ * might cause ECC data corruption when doing sub-page write to a
+ * partially written page.
+ */
+ host->used_oobsize = min(mtd->oobsize, 218U);
if (this->ecc.mode == NAND_ECC_HW) {
if (is_imx21_nfc(host) || is_imx27_nfc(host))
diff --git a/kernel/drivers/mtd/nand/nand_base.c b/kernel/drivers/mtd/nand/nand_base.c
index c2e1232cd..3ff583f16 100644
--- a/kernel/drivers/mtd/nand/nand_base.c
+++ b/kernel/drivers/mtd/nand/nand_base.c
@@ -1,6 +1,4 @@
/*
- * drivers/mtd/nand.c
- *
* Overview:
* This is the generic MTD driver for NAND flash devices. It should be
* capable of working with almost all NAND chips currently available.
@@ -48,6 +46,7 @@
#include <linux/leds.h>
#include <linux/io.h>
#include <linux/mtd/partitions.h>
+#include <linux/of_mtd.h>
/* Define default oob placement schemes for large and small page devices */
static struct nand_ecclayout nand_oob_8 = {
@@ -544,23 +543,32 @@ static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
}
}
-/* Wait for the ready pin, after a command. The timeout is caught later. */
+/**
+ * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
+ * @mtd: MTD device structure
+ *
+ * Wait for the ready pin after a command, and warn if a timeout occurs.
+ */
void nand_wait_ready(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
- unsigned long timeo = jiffies + msecs_to_jiffies(20);
+ unsigned long timeo = 400;
- /* 400ms timeout */
if (in_interrupt() || oops_in_progress)
- return panic_nand_wait_ready(mtd, 400);
+ return panic_nand_wait_ready(mtd, timeo);
led_trigger_event(nand_led_trigger, LED_FULL);
/* Wait until command is processed or timeout occurs */
+ timeo = jiffies + msecs_to_jiffies(timeo);
do {
if (chip->dev_ready(mtd))
- break;
- touch_softlockup_watchdog();
+ goto out;
+ cond_resched();
} while (time_before(jiffies, timeo));
+
+ pr_warn_ratelimited(
+ "timeout while waiting for chip to become ready\n");
+out:
led_trigger_event(nand_led_trigger, LED_OFF);
}
EXPORT_SYMBOL_GPL(nand_wait_ready);
@@ -886,15 +894,13 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: MTD device structure
* @chip: NAND chip structure
*
- * Wait for command done. This applies to erase and program only. Erase can
- * take up to 400ms and program up to 20ms according to general NAND and
- * SmartMedia specs.
+ * Wait for command done. This applies to erase and program only.
*/
static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
- int status, state = chip->state;
- unsigned long timeo = (state == FL_ERASING ? 400 : 20);
+ int status;
+ unsigned long timeo = 400;
led_trigger_event(nand_led_trigger, LED_FULL);
@@ -910,7 +916,7 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
panic_nand_wait(mtd, chip, timeo);
else {
timeo = jiffies + msecs_to_jiffies(timeo);
- while (time_before(jiffies, timeo)) {
+ do {
if (chip->dev_ready) {
if (chip->dev_ready(mtd))
break;
@@ -919,7 +925,7 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
break;
}
cond_resched();
- }
+ } while (time_before(jiffies, timeo));
}
led_trigger_event(nand_led_trigger, LED_OFF);
@@ -1102,6 +1108,134 @@ out:
EXPORT_SYMBOL(nand_lock);
/**
+ * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
+ * @buf: buffer to test
+ * @len: buffer length
+ * @bitflips_threshold: maximum number of bitflips
+ *
+ * Check if a buffer contains only 0xff, which means the underlying region
+ * has been erased and is ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region is not erased.
+ * Note: The logic of this function has been extracted from the memweight
+ * implementation, except that nand_check_erased_buf function exit before
+ * testing the whole buffer if the number of bitflips exceed the
+ * bitflips_threshold value.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold.
+ */
+static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
+{
+ const unsigned char *bitmap = buf;
+ int bitflips = 0;
+ int weight;
+
+ for (; len && ((uintptr_t)bitmap) % sizeof(long);
+ len--, bitmap++) {
+ weight = hweight8(*bitmap);
+ bitflips += BITS_PER_BYTE - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ for (; len >= sizeof(long);
+ len -= sizeof(long), bitmap += sizeof(long)) {
+ weight = hweight_long(*((unsigned long *)bitmap));
+ bitflips += BITS_PER_LONG - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ for (; len > 0; len--, bitmap++) {
+ weight = hweight8(*bitmap);
+ bitflips += BITS_PER_BYTE - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ return bitflips;
+}
+
+/**
+ * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
+ * 0xff data
+ * @data: data buffer to test
+ * @datalen: data length
+ * @ecc: ECC buffer
+ * @ecclen: ECC length
+ * @extraoob: extra OOB buffer
+ * @extraooblen: extra OOB length
+ * @bitflips_threshold: maximum number of bitflips
+ *
+ * Check if a data buffer and its associated ECC and OOB data contains only
+ * 0xff pattern, which means the underlying region has been erased and is
+ * ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region as not erased.
+ *
+ * Note:
+ * 1/ ECC algorithms are working on pre-defined block sizes which are usually
+ * different from the NAND page size. When fixing bitflips, ECC engines will
+ * report the number of errors per chunk, and the NAND core infrastructure
+ * expect you to return the maximum number of bitflips for the whole page.
+ * This is why you should always use this function on a single chunk and
+ * not on the whole page. After checking each chunk you should update your
+ * max_bitflips value accordingly.
+ * 2/ When checking for bitflips in erased pages you should not only check
+ * the payload data but also their associated ECC data, because a user might
+ * have programmed almost all bits to 1 but a few. In this case, we
+ * shouldn't consider the chunk as erased, and checking ECC bytes prevent
+ * this case.
+ * 3/ The extraoob argument is optional, and should be used if some of your OOB
+ * data are protected by the ECC engine.
+ * It could also be used if you support subpages and want to attach some
+ * extra OOB data to an ECC chunk.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold. In case of success, the passed buffers are filled with 0xff.
+ */
+int nand_check_erased_ecc_chunk(void *data, int datalen,
+ void *ecc, int ecclen,
+ void *extraoob, int extraooblen,
+ int bitflips_threshold)
+{
+ int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
+
+ data_bitflips = nand_check_erased_buf(data, datalen,
+ bitflips_threshold);
+ if (data_bitflips < 0)
+ return data_bitflips;
+
+ bitflips_threshold -= data_bitflips;
+
+ ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
+ if (ecc_bitflips < 0)
+ return ecc_bitflips;
+
+ bitflips_threshold -= ecc_bitflips;
+
+ extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
+ bitflips_threshold);
+ if (extraoob_bitflips < 0)
+ return extraoob_bitflips;
+
+ if (data_bitflips)
+ memset(data, 0xff, datalen);
+
+ if (ecc_bitflips)
+ memset(ecc, 0xff, ecclen);
+
+ if (extraoob_bitflips)
+ memset(extraoob, 0xff, extraooblen);
+
+ return data_bitflips + ecc_bitflips + extraoob_bitflips;
+}
+EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
+
+/**
* nand_read_page_raw - [INTERN] read raw page data without ecc
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -2028,11 +2162,12 @@ out:
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
*
* Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required, int page)
{
chip->write_buf(mtd, buf, mtd->writesize);
if (oob_required)
@@ -2047,12 +2182,14 @@ static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
*
* We need a special oob layout and handling even when ECC isn't checked.
*/
static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required,
+ int page)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -2089,9 +2226,11 @@ static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
*/
static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required,
+ int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -2107,7 +2246,7 @@ static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; i < chip->ecc.total; i++)
chip->oob_poi[eccpos[i]] = ecc_calc[i];
- return chip->ecc.write_page_raw(mtd, chip, buf, 1);
+ return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
}
/**
@@ -2116,9 +2255,11 @@ static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
*/
static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required,
+ int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -2150,11 +2291,12 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* @data_len: data length
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
*/
static int nand_write_subpage_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, uint32_t offset,
uint32_t data_len, const uint8_t *buf,
- int oob_required)
+ int oob_required, int page)
{
uint8_t *oob_buf = chip->oob_poi;
uint8_t *ecc_calc = chip->buffers->ecccalc;
@@ -2209,13 +2351,15 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
*
* The hw generator calculates the error syndrome automatically. Therefore we
* need a special oob layout and handling.
*/
static int nand_write_page_syndrome(struct mtd_info *mtd,
struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required,
+ int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -2279,12 +2423,13 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (unlikely(raw))
status = chip->ecc.write_page_raw(mtd, chip, buf,
- oob_required);
+ oob_required, page);
else if (subpage)
status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
- buf, oob_required);
+ buf, oob_required, page);
else
- status = chip->ecc.write_page(mtd, chip, buf, oob_required);
+ status = chip->ecc.write_page(mtd, chip, buf, oob_required,
+ page);
if (status < 0)
return status;
@@ -2928,9 +3073,6 @@ static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
& ONFI_OPT_CMD_SET_GET_FEATURES))
return -EINVAL;
- /* clear the sub feature parameters */
- memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
-
chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
*subfeature_param++ = chip->read_byte(mtd);
@@ -2968,7 +3110,7 @@ static void nand_resume(struct mtd_info *mtd)
*/
static void nand_shutdown(struct mtd_info *mtd)
{
- nand_get_device(mtd, FL_SHUTDOWN);
+ nand_get_device(mtd, FL_PM_SUSPENDED);
}
/* Set default functions */
@@ -3689,7 +3831,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (find_full_id_nand(mtd, chip, type, id_data, &busw))
goto ident_done;
} else if (*dev_id == type->dev_id) {
- break;
+ break;
}
}
@@ -3712,10 +3854,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->chipsize = (uint64_t)type->chipsize << 20;
- if (!type->pagesize && chip->init_size) {
- /* Set the pagesize, oobsize, erasesize by the driver */
- busw = chip->init_size(mtd, chip, id_data);
- } else if (!type->pagesize) {
+ if (!type->pagesize) {
/* Decode parameters from extended ID */
nand_decode_ext_id(mtd, chip, id_data, &busw);
} else {
@@ -3798,6 +3937,39 @@ ident_done:
return type;
}
+static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip,
+ struct device_node *dn)
+{
+ int ecc_mode, ecc_strength, ecc_step;
+
+ if (of_get_nand_bus_width(dn) == 16)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ if (of_get_nand_on_flash_bbt(dn))
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ ecc_mode = of_get_nand_ecc_mode(dn);
+ ecc_strength = of_get_nand_ecc_strength(dn);
+ ecc_step = of_get_nand_ecc_step_size(dn);
+
+ if ((ecc_step >= 0 && !(ecc_strength >= 0)) ||
+ (!(ecc_step >= 0) && ecc_strength >= 0)) {
+ pr_err("must set both strength and step size in DT\n");
+ return -EINVAL;
+ }
+
+ if (ecc_mode >= 0)
+ chip->ecc.mode = ecc_mode;
+
+ if (ecc_strength >= 0)
+ chip->ecc.strength = ecc_strength;
+
+ if (ecc_step > 0)
+ chip->ecc.size = ecc_step;
+
+ return 0;
+}
+
/**
* nand_scan_ident - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
@@ -3815,6 +3987,16 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
int i, nand_maf_id, nand_dev_id;
struct nand_chip *chip = mtd->priv;
struct nand_flash_dev *type;
+ int ret;
+
+ if (chip->flash_node) {
+ ret = nand_dt_init(mtd, chip, chip->flash_node);
+ if (ret)
+ return ret;
+ }
+
+ if (!mtd->name && mtd->dev.parent)
+ mtd->name = dev_name(mtd->dev.parent);
/* Set the default functions */
nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
diff --git a/kernel/drivers/mtd/nand/nand_bbt.c b/kernel/drivers/mtd/nand/nand_bbt.c
index 9bb8453d2..b1d4f813a 100644
--- a/kernel/drivers/mtd/nand/nand_bbt.c
+++ b/kernel/drivers/mtd/nand/nand_bbt.c
@@ -1,6 +1,4 @@
/*
- * drivers/mtd/nand_bbt.c
- *
* Overview:
* Bad block table support for the NAND driver
*
@@ -64,7 +62,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/bbm.h>
#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
@@ -720,7 +717,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
/* Must we save the block contents? */
if (td->options & NAND_BBT_SAVECONTENT) {
/* Make it block aligned */
- to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
+ to &= ~(((loff_t)1 << this->bbt_erase_shift) - 1);
len = 1 << this->bbt_erase_shift;
res = mtd_read(mtd, to, len, &retlen, buf);
if (res < 0) {
@@ -1075,15 +1072,15 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
* The bad block table memory is allocated here. It must be freed by calling
* the nand_free_bbt function.
*/
-int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+static int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct nand_chip *this = mtd->priv;
- int len, res = 0;
+ int len, res;
uint8_t *buf;
struct nand_bbt_descr *td = this->bbt_td;
struct nand_bbt_descr *md = this->bbt_md;
- len = mtd->size >> (this->bbt_erase_shift + 2);
+ len = (mtd->size >> (this->bbt_erase_shift + 2)) ? : 1;
/*
* Allocate memory (2bit per block) and clear the memory bad block
* table.
@@ -1099,10 +1096,9 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
if (!td) {
if ((res = nand_memory_bbt(mtd, bd))) {
pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
- kfree(this->bbt);
- this->bbt = NULL;
+ goto err;
}
- return res;
+ return 0;
}
verify_bbt_descr(mtd, td);
verify_bbt_descr(mtd, md);
@@ -1112,9 +1108,8 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
len += (len >> this->page_shift) * mtd->oobsize;
buf = vmalloc(len);
if (!buf) {
- kfree(this->bbt);
- this->bbt = NULL;
- return -ENOMEM;
+ res = -ENOMEM;
+ goto err;
}
/* Is the bbt at a given page? */
@@ -1126,6 +1121,8 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
}
res = check_create(mtd, buf, bd);
+ if (res)
+ goto err;
/* Prevent the bbt regions from erasing / writing */
mark_bbt_region(mtd, td);
@@ -1133,6 +1130,11 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
mark_bbt_region(mtd, md);
vfree(buf);
+ return 0;
+
+err:
+ kfree(this->bbt);
+ this->bbt = NULL;
return res;
}
diff --git a/kernel/drivers/mtd/nand/nand_ids.c b/kernel/drivers/mtd/nand/nand_ids.c
index dd620c19c..a8804a3da 100644
--- a/kernel/drivers/mtd/nand/nand_ids.c
+++ b/kernel/drivers/mtd/nand/nand_ids.c
@@ -1,6 +1,4 @@
/*
- * drivers/mtd/nandids.c
- *
* Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
*
* This program is free software; you can redistribute it and/or modify
@@ -31,6 +29,10 @@ struct nand_flash_dev nand_flash_ids[] = {
* listed by full ID. We list them first so that we can easily identify
* the most specific match.
*/
+ {"TC58NVG0S3E 1G 3.3V 8-bit",
+ { .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} },
+ SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512),
+ 2 },
{"TC58NVG2S0F 4G 3.3V 8-bit",
{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
diff --git a/kernel/drivers/mtd/nand/nandsim.c b/kernel/drivers/mtd/nand/nandsim.c
index f2324271b..b16d70aaf 100644
--- a/kernel/drivers/mtd/nand/nandsim.c
+++ b/kernel/drivers/mtd/nand/nandsim.c
@@ -729,8 +729,7 @@ static int init_nandsim(struct mtd_info *mtd)
/* Fill the partition_info structure */
if (parts_num > ARRAY_SIZE(ns->partitions)) {
NS_ERR("too many partitions.\n");
- ret = -EINVAL;
- goto error;
+ return -EINVAL;
}
remains = ns->geom.totsz;
next_offset = 0;
@@ -739,10 +738,13 @@ static int init_nandsim(struct mtd_info *mtd)
if (!part_sz || part_sz > remains) {
NS_ERR("bad partition size.\n");
- ret = -EINVAL;
- goto error;
+ return -EINVAL;
}
ns->partitions[i].name = get_partition_name(i);
+ if (!ns->partitions[i].name) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
ns->partitions[i].offset = next_offset;
ns->partitions[i].size = part_sz;
next_offset += ns->partitions[i].size;
@@ -752,10 +754,13 @@ static int init_nandsim(struct mtd_info *mtd)
if (remains) {
if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
NS_ERR("too many partitions.\n");
- ret = -EINVAL;
- goto error;
+ return -EINVAL;
}
ns->partitions[i].name = get_partition_name(i);
+ if (!ns->partitions[i].name) {
+ NS_ERR("unable to allocate memory.\n");
+ return -ENOMEM;
+ }
ns->partitions[i].offset = next_offset;
ns->partitions[i].size = remains;
ns->nbparts += 1;
@@ -782,24 +787,18 @@ static int init_nandsim(struct mtd_info *mtd)
printk("options: %#x\n", ns->options);
if ((ret = alloc_device(ns)) != 0)
- goto error;
+ return ret;
/* Allocate / initialize the internal buffer */
ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
if (!ns->buf.byte) {
NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
ns->geom.pgszoob);
- ret = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
return 0;
-
-error:
- free_device(ns);
-
- return ret;
}
/*
diff --git a/kernel/drivers/mtd/nand/ndfc.c b/kernel/drivers/mtd/nand/ndfc.c
index 3187c6b92..4f0d62f9d 100644
--- a/kernel/drivers/mtd/nand/ndfc.c
+++ b/kernel/drivers/mtd/nand/ndfc.c
@@ -1,6 +1,4 @@
/*
- * drivers/mtd/ndfc.c
- *
* Overview:
* Platform independent driver for NDFC (NanD Flash Controller)
* integrated into EP440 cores
@@ -171,7 +169,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
chip->priv = ndfc;
ndfc->mtd.priv = chip;
- ndfc->mtd.owner = THIS_MODULE;
+ ndfc->mtd.dev.parent = &ndfc->ofdev->dev;
flash_np = of_get_next_child(node, NULL);
if (!flash_np)
diff --git a/kernel/drivers/mtd/nand/nuc900_nand.c b/kernel/drivers/mtd/nand/nuc900_nand.c
index e58c644dd..f0687f71f 100644
--- a/kernel/drivers/mtd/nand/nuc900_nand.c
+++ b/kernel/drivers/mtd/nand/nuc900_nand.c
@@ -250,7 +250,7 @@ static int nuc900_nand_probe(struct platform_device *pdev)
chip = &(nuc900_nand->chip);
nuc900_nand->mtd.priv = chip;
- nuc900_nand->mtd.owner = THIS_MODULE;
+ nuc900_nand->mtd.dev.parent = &pdev->dev;
spin_lock_init(&nuc900_nand->lock);
nuc900_nand->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/kernel/drivers/mtd/nand/omap2.c b/kernel/drivers/mtd/nand/omap2.c
index 60fa89939..93f664cd1 100644
--- a/kernel/drivers/mtd/nand/omap2.c
+++ b/kernel/drivers/mtd/nand/omap2.c
@@ -1500,11 +1500,12 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
+ * @page: page
*
* Custom write page method evolved to support multi sector writing in one shot
*/
static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required, int page)
{
int i;
uint8_t *ecc_calc = chip->buffers->ecccalc;
@@ -1684,8 +1685,7 @@ static int omap_nand_probe(struct platform_device *pdev)
info->ecc_opt = pdata->ecc_opt;
mtd = &info->mtd;
mtd->priv = &info->nand;
- mtd->name = dev_name(&pdev->dev);
- mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
nand_chip = &info->nand;
nand_chip->ecc.priv = NULL;
diff --git a/kernel/drivers/mtd/nand/omap_elm.c b/kernel/drivers/mtd/nand/omap_elm.c
index 376bfe191..235ec7992 100644
--- a/kernel/drivers/mtd/nand/omap_elm.c
+++ b/kernel/drivers/mtd/nand/omap_elm.c
@@ -574,5 +574,5 @@ module_platform_driver(elm_driver);
MODULE_DESCRIPTION("ELM driver for BCH error correction");
MODULE_AUTHOR("Texas Instruments");
-MODULE_ALIAS("platform: elm");
+MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mtd/nand/orion_nand.c b/kernel/drivers/mtd/nand/orion_nand.c
index c3c6d305c..ee83749fb 100644
--- a/kernel/drivers/mtd/nand/orion_nand.c
+++ b/kernel/drivers/mtd/nand/orion_nand.c
@@ -124,7 +124,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
}
mtd->priv = nc;
- mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
nc->priv = board;
nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
@@ -201,6 +201,7 @@ static const struct of_device_id orion_nand_of_match_table[] = {
{ .compatible = "marvell,orion-nand", },
{},
};
+MODULE_DEVICE_TABLE(of, orion_nand_of_match_table);
#endif
static struct platform_driver orion_nand_driver = {
diff --git a/kernel/drivers/mtd/nand/pasemi_nand.c b/kernel/drivers/mtd/nand/pasemi_nand.c
index 66c345b42..83cf021b9 100644
--- a/kernel/drivers/mtd/nand/pasemi_nand.c
+++ b/kernel/drivers/mtd/nand/pasemi_nand.c
@@ -124,7 +124,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
/* Link the private data with the MTD structure */
pasemi_nand_mtd->priv = chip;
- pasemi_nand_mtd->owner = THIS_MODULE;
+ pasemi_nand_mtd->dev.parent = &ofdev->dev;
chip->IO_ADDR_R = of_iomap(np, 0);
chip->IO_ADDR_W = chip->IO_ADDR_R;
diff --git a/kernel/drivers/mtd/nand/plat_nand.c b/kernel/drivers/mtd/nand/plat_nand.c
index 4535c263f..65b9dbbe6 100644
--- a/kernel/drivers/mtd/nand/plat_nand.c
+++ b/kernel/drivers/mtd/nand/plat_nand.c
@@ -24,8 +24,6 @@ struct plat_nand_data {
void __iomem *io_base;
};
-static const char *part_probe_types[] = { "cmdlinepart", NULL };
-
/*
* Probe for the NAND device.
*/
@@ -61,8 +59,7 @@ static int plat_nand_probe(struct platform_device *pdev)
data->chip.priv = &data;
data->mtd.priv = &data->chip;
- data->mtd.owner = THIS_MODULE;
- data->mtd.name = dev_name(&pdev->dev);
+ data->mtd.dev.parent = &pdev->dev;
data->chip.IO_ADDR_R = data->io_base;
data->chip.IO_ADDR_W = data->io_base;
@@ -95,7 +92,7 @@ static int plat_nand_probe(struct platform_device *pdev)
goto out;
}
- part_types = pdata->chip.part_probe_types ? : part_probe_types;
+ part_types = pdata->chip.part_probe_types;
ppdata.of_node = pdev->dev.of_node;
err = mtd_device_parse_register(&data->mtd, part_types, &ppdata,
diff --git a/kernel/drivers/mtd/nand/pxa3xx_nand.c b/kernel/drivers/mtd/nand/pxa3xx_nand.c
index a4615fcc3..e453ae9a1 100644
--- a/kernel/drivers/mtd/nand/pxa3xx_nand.c
+++ b/kernel/drivers/mtd/nand/pxa3xx_nand.c
@@ -15,27 +15,26 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_mtd.h>
-#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
+#if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
#define ARCH_HAS_DMA
#endif
-#ifdef ARCH_HAS_DMA
-#include <mach/dma.h>
-#endif
-
#include <linux/platform_data/mtd-nand-pxa3xx.h>
#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
@@ -44,10 +43,13 @@
/*
* Define a buffer size for the initial command that detects the flash device:
- * STATUS, READID and PARAM. The largest of these is the PARAM command,
- * needing 256 bytes.
+ * STATUS, READID and PARAM.
+ * ONFI param page is 256 bytes, and there are three redundant copies
+ * to be read. JEDEC param page is 512 bytes, and there are also three
+ * redundant copies to be read.
+ * Hence this buffer should be at least 512 x 3. Let's pick 2048.
*/
-#define INIT_BUFFER_SIZE 256
+#define INIT_BUFFER_SIZE 2048
/* registers and bit definitions */
#define NDCR (0x00) /* Control register */
@@ -74,7 +76,8 @@
#define NDCR_ND_MODE (0x3 << 21)
#define NDCR_NAND_MODE (0x0)
#define NDCR_CLR_PG_CNT (0x1 << 20)
-#define NDCR_STOP_ON_UNCOR (0x1 << 19)
+#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
+#define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
@@ -125,6 +128,13 @@
#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
+/*
+ * This should be large enough to read 'ONFI' and 'JEDEC'.
+ * Let's use 7 bytes, which is the maximum ID count supported
+ * by the controller (see NDCR_RD_ID_CNT_MASK).
+ */
+#define READ_ID_BYTES 7
+
/* macros for registers read/write */
#define nand_writel(info, off, val) \
writel_relaxed((val), (info)->mmio_base + (off))
@@ -172,8 +182,6 @@ struct pxa3xx_nand_host {
/* calculated from pxa3xx_nand_flash data */
unsigned int col_addr_cycles;
unsigned int row_addr_cycles;
- size_t read_id_bytes;
-
};
struct pxa3xx_nand_info {
@@ -192,6 +200,10 @@ struct pxa3xx_nand_info {
unsigned int oob_buff_pos;
/* DMA information */
+ struct scatterlist sg;
+ enum dma_data_direction dma_dir;
+ struct dma_chan *dma_chan;
+ dma_cookie_t dma_cookie;
int drcmr_dat;
int drcmr_cmd;
@@ -199,8 +211,6 @@ struct pxa3xx_nand_info {
unsigned char *oob_buff;
dma_addr_t data_buff_phys;
int data_dma_ch;
- struct pxa_dma_desc *data_desc;
- dma_addr_t data_desc_addr;
struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
unsigned int state;
@@ -243,6 +253,25 @@ static bool use_dma = 1;
module_param(use_dma, bool, 0444);
MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
+struct pxa3xx_nand_timing {
+ unsigned int tCH; /* Enable signal hold time */
+ unsigned int tCS; /* Enable signal setup time */
+ unsigned int tWH; /* ND_nWE high duration */
+ unsigned int tWP; /* ND_nWE pulse time */
+ unsigned int tRH; /* ND_nRE high duration */
+ unsigned int tRP; /* ND_nRE pulse width */
+ unsigned int tR; /* ND_nWE high to ND_nRE low for read */
+ unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
+ unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
+};
+
+struct pxa3xx_nand_flash {
+ uint32_t chip_id;
+ unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
+ unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
+ struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
+};
+
static struct pxa3xx_nand_timing timing[] = {
{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
{ 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
@@ -251,15 +280,14 @@ static struct pxa3xx_nand_timing timing[] = {
};
static struct pxa3xx_nand_flash builtin_flash_types[] = {
-{ "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
-{ "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
-{ "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
-{ "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
-{ "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
-{ "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
-{ "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
-{ "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
-{ "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
+ { 0x46ec, 16, 16, &timing[1] },
+ { 0xdaec, 8, 8, &timing[1] },
+ { 0xd7ec, 8, 8, &timing[1] },
+ { 0xa12c, 8, 8, &timing[2] },
+ { 0xb12c, 16, 16, &timing[2] },
+ { 0xdc2c, 8, 8, &timing[2] },
+ { 0xcc2c, 16, 16, &timing[2] },
+ { 0xba20, 16, 16, &timing[3] },
};
static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
@@ -320,9 +348,6 @@ static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
.oobfree = { }
};
-/* Define a default flash type setting serve as flash detecting only */
-#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
-
#define NDTR0_tCH(c) (min((c), 7) << 19)
#define NDTR0_tCS(c) (min((c), 7) << 16)
#define NDTR0_tWH(c) (min((c), 7) << 11)
@@ -384,6 +409,128 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
nand_writel(info, NDTR1CS0, ndtr1);
}
+static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
+ const struct nand_sdr_timings *t)
+{
+ struct pxa3xx_nand_info *info = host->info_data;
+ struct nand_chip *chip = &host->chip;
+ unsigned long nand_clk = clk_get_rate(info->clk);
+ uint32_t ndtr0, ndtr1;
+
+ u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
+ u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
+ u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
+ u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
+ u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
+ u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
+ u32 tR = chip->chip_delay * 1000;
+ u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
+ u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
+
+ /* fallback to a default value if tR = 0 */
+ if (!tR)
+ tR = 20000;
+
+ ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
+ NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
+ NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
+ NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
+ NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
+ NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
+
+ ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
+ NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
+ NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
+
+ info->ndtr0cs0 = ndtr0;
+ info->ndtr1cs0 = ndtr1;
+ nand_writel(info, NDTR0CS0, ndtr0);
+ nand_writel(info, NDTR1CS0, ndtr1);
+}
+
+static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
+ unsigned int *flash_width,
+ unsigned int *dfc_width)
+{
+ struct nand_chip *chip = &host->chip;
+ struct pxa3xx_nand_info *info = host->info_data;
+ const struct pxa3xx_nand_flash *f = NULL;
+ int i, id, ntypes;
+
+ ntypes = ARRAY_SIZE(builtin_flash_types);
+
+ chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
+
+ id = chip->read_byte(host->mtd);
+ id |= chip->read_byte(host->mtd) << 0x8;
+
+ for (i = 0; i < ntypes; i++) {
+ f = &builtin_flash_types[i];
+
+ if (f->chip_id == id)
+ break;
+ }
+
+ if (i == ntypes) {
+ dev_err(&info->pdev->dev, "Error: timings not found\n");
+ return -EINVAL;
+ }
+
+ pxa3xx_nand_set_timing(host, f->timing);
+
+ *flash_width = f->flash_width;
+ *dfc_width = f->dfc_width;
+
+ return 0;
+}
+
+static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
+ int mode)
+{
+ const struct nand_sdr_timings *timings;
+
+ mode = fls(mode) - 1;
+ if (mode < 0)
+ mode = 0;
+
+ timings = onfi_async_timing_mode_to_sdr_timings(mode);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ pxa3xx_nand_set_sdr_timing(host, timings);
+
+ return 0;
+}
+
+static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct pxa3xx_nand_info *info = host->info_data;
+ unsigned int flash_width = 0, dfc_width = 0;
+ int mode, err;
+
+ mode = onfi_get_async_timing_mode(chip);
+ if (mode == ONFI_TIMING_MODE_UNKNOWN) {
+ err = pxa3xx_nand_init_timings_compat(host, &flash_width,
+ &dfc_width);
+ if (err)
+ return err;
+
+ if (flash_width == 16) {
+ info->reg_ndcr |= NDCR_DWIDTH_M;
+ chip->options |= NAND_BUSWIDTH_16;
+ }
+
+ info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
+ } else {
+ err = pxa3xx_nand_init_timings_onfi(host, mode);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
/*
* Set the data and OOB size, depending on the selected
* spare and ECC configuration.
@@ -438,8 +585,8 @@ static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
ndcr |= NDCR_ND_RUN;
/* clear status bits and run */
- nand_writel(info, NDCR, 0);
nand_writel(info, NDSR, NDSR_MASK);
+ nand_writel(info, NDCR, 0);
nand_writel(info, NDCR, ndcr);
}
@@ -459,6 +606,9 @@ static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
ndcr &= ~NDCR_ND_RUN;
nand_writel(info, NDCR, ndcr);
}
+ if (info->dma_chan)
+ dmaengine_terminate_all(info->dma_chan);
+
/* clear status bits */
nand_writel(info, NDSR, NDSR_MASK);
}
@@ -483,7 +633,8 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
{
if (info->ecc_bch) {
- int timeout;
+ u32 val;
+ int ret;
/*
* According to the datasheet, when reading from NDDB
@@ -494,18 +645,14 @@ static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
* the polling on the last read.
*/
while (len > 8) {
- __raw_readsl(info->mmio_base + NDDB, data, 8);
-
- for (timeout = 0;
- !(nand_readl(info, NDSR) & NDSR_RDDREQ);
- timeout++) {
- if (timeout >= 5) {
- dev_err(&info->pdev->dev,
- "Timeout on RDDREQ while draining the FIFO\n");
- return;
- }
-
- mdelay(1);
+ ioread32_rep(info->mmio_base + NDDB, data, 8);
+
+ ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
+ val & NDSR_RDDREQ, 1000, 5000);
+ if (ret) {
+ dev_err(&info->pdev->dev,
+ "Timeout on RDDREQ while draining the FIFO\n");
+ return;
}
data += 32;
@@ -513,7 +660,7 @@ static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
}
}
- __raw_readsl(info->mmio_base + NDDB, data, len);
+ ioread32_rep(info->mmio_base + NDDB, data, len);
}
static void handle_data_pio(struct pxa3xx_nand_info *info)
@@ -522,14 +669,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
switch (info->state) {
case STATE_PIO_WRITING:
- __raw_writesl(info->mmio_base + NDDB,
- info->data_buff + info->data_buff_pos,
- DIV_ROUND_UP(do_bytes, 4));
+ writesl(info->mmio_base + NDDB,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(do_bytes, 4));
if (info->oob_size > 0)
- __raw_writesl(info->mmio_base + NDDB,
- info->oob_buff + info->oob_buff_pos,
- DIV_ROUND_UP(info->oob_size, 4));
+ writesl(info->mmio_base + NDDB,
+ info->oob_buff + info->oob_buff_pos,
+ DIV_ROUND_UP(info->oob_size, 4));
break;
case STATE_PIO_READING:
drain_fifo(info,
@@ -553,57 +700,61 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
info->data_size -= do_bytes;
}
-#ifdef ARCH_HAS_DMA
-static void start_data_dma(struct pxa3xx_nand_info *info)
+static void pxa3xx_nand_data_dma_irq(void *data)
{
- struct pxa_dma_desc *desc = info->data_desc;
- int dma_len = ALIGN(info->data_size + info->oob_size, 32);
+ struct pxa3xx_nand_info *info = data;
+ struct dma_tx_state state;
+ enum dma_status status;
- desc->ddadr = DDADR_STOP;
- desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
+ status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
+ if (likely(status == DMA_COMPLETE)) {
+ info->state = STATE_DMA_DONE;
+ } else {
+ dev_err(&info->pdev->dev, "DMA error on data channel\n");
+ info->retcode = ERR_DMABUSERR;
+ }
+ dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
+
+ nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
+ enable_int(info, NDCR_INT_MASK);
+}
+
+static void start_data_dma(struct pxa3xx_nand_info *info)
+{
+ enum dma_transfer_direction direction;
+ struct dma_async_tx_descriptor *tx;
switch (info->state) {
case STATE_DMA_WRITING:
- desc->dsadr = info->data_buff_phys;
- desc->dtadr = info->mmio_phys + NDDB;
- desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
+ info->dma_dir = DMA_TO_DEVICE;
+ direction = DMA_MEM_TO_DEV;
break;
case STATE_DMA_READING:
- desc->dtadr = info->data_buff_phys;
- desc->dsadr = info->mmio_phys + NDDB;
- desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
+ info->dma_dir = DMA_FROM_DEVICE;
+ direction = DMA_DEV_TO_MEM;
break;
default:
dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
info->state);
BUG();
}
-
- DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
- DDADR(info->data_dma_ch) = info->data_desc_addr;
- DCSR(info->data_dma_ch) |= DCSR_RUN;
-}
-
-static void pxa3xx_nand_data_dma_irq(int channel, void *data)
-{
- struct pxa3xx_nand_info *info = data;
- uint32_t dcsr;
-
- dcsr = DCSR(channel);
- DCSR(channel) = dcsr;
-
- if (dcsr & DCSR_BUSERR) {
- info->retcode = ERR_DMABUSERR;
+ info->sg.length = info->data_size +
+ (info->oob_size ? info->spare_size + info->ecc_size : 0);
+ dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
+
+ tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
+ return;
}
-
- info->state = STATE_DMA_DONE;
- enable_int(info, NDCR_INT_MASK);
- nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
+ tx->callback = pxa3xx_nand_data_dma_irq;
+ tx->callback_param = info;
+ info->dma_cookie = dmaengine_submit(tx);
+ dma_async_issue_pending(info->dma_chan);
+ dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
+ __func__, direction, info->dma_cookie, info->sg.length);
}
-#else
-static void start_data_dma(struct pxa3xx_nand_info *info)
-{}
-#endif
static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
{
@@ -677,8 +828,14 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
is_ready = 1;
}
+ /*
+ * Clear all status bit before issuing the next command, which
+ * can and will alter the status bits and will deserve a new
+ * interrupt on its own. This lets the controller exit the IRQ
+ */
+ nand_writel(info, NDSR, status);
+
if (status & NDSR_WRCMDREQ) {
- nand_writel(info, NDSR, NDSR_WRCMDREQ);
status &= ~NDSR_WRCMDREQ;
info->state = STATE_CMD_HANDLE;
@@ -699,8 +856,6 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
nand_writel(info, NDCB0, info->ndcb3);
}
- /* clear NDSR to let the controller exit the IRQ */
- nand_writel(info, NDSR, status);
if (is_completed)
complete(&info->cmd_complete);
if (is_ready)
@@ -901,18 +1056,18 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
break;
case NAND_CMD_PARAM:
- info->buf_count = 256;
+ info->buf_count = INIT_BUFFER_SIZE;
info->ndcb0 |= NDCB0_CMD_TYPE(0)
| NDCB0_ADDR_CYC(1)
| NDCB0_LEN_OVRD
| command;
info->ndcb1 = (column & 0xFF);
- info->ndcb3 = 256;
- info->data_size = 256;
+ info->ndcb3 = INIT_BUFFER_SIZE;
+ info->data_size = INIT_BUFFER_SIZE;
break;
case NAND_CMD_READID:
- info->buf_count = host->read_id_bytes;
+ info->buf_count = READ_ID_BYTES;
info->ndcb0 |= NDCB0_CMD_TYPE(3)
| NDCB0_ADDR_CYC(1)
| command;
@@ -1118,7 +1273,8 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
}
static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf, int oob_required)
+ struct nand_chip *chip, const uint8_t *buf, int oob_required,
+ int page)
{
chip->write_buf(mtd, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -1231,106 +1387,83 @@ static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
return NAND_STATUS_READY;
}
-static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
- const struct pxa3xx_nand_flash *f)
+static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
{
struct platform_device *pdev = info->pdev;
struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct pxa3xx_nand_host *host = info->host[info->cs];
- uint32_t ndcr = 0x0; /* enable all interrupts */
-
- if (f->page_size != 2048 && f->page_size != 512) {
- dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
- return -EINVAL;
- }
-
- if (f->flash_width != 16 && f->flash_width != 8) {
- dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
- return -EINVAL;
- }
-
- /* calculate flash information */
- host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
-
- /* calculate addressing information */
- host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
-
- if (f->num_blocks * f->page_per_block > 65536)
- host->row_addr_cycles = 3;
- else
- host->row_addr_cycles = 2;
-
- ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
- ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
- ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
- ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
- ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
- ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
-
- ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
- ndcr |= NDCR_SPARE_EN; /* enable spare by default */
+ struct mtd_info *mtd = host->mtd;
+ struct nand_chip *chip = mtd->priv;
- info->reg_ndcr = ndcr;
+ /* configure default flash values */
+ info->reg_ndcr = 0x0; /* enable all interrupts */
+ info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
+ info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
+ info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
+ info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
+ info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
+ info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
- pxa3xx_nand_set_timing(host, f->timing);
return 0;
}
static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
{
- /*
- * We set 0 by hard coding here, for we don't support keep_config
- * when there is more than one chip attached to the controller
- */
- struct pxa3xx_nand_host *host = info->host[0];
uint32_t ndcr = nand_readl(info, NDCR);
- if (ndcr & NDCR_PAGE_SZ) {
- /* Controller's FIFO size */
- info->chunk_size = 2048;
- host->read_id_bytes = 4;
- } else {
- info->chunk_size = 512;
- host->read_id_bytes = 2;
- }
-
/* Set an initial chunk size */
- info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
+ info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
+ info->reg_ndcr = ndcr &
+ ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
return 0;
}
-#ifdef ARCH_HAS_DMA
static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
{
struct platform_device *pdev = info->pdev;
- int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
+ struct dma_slave_config config;
+ dma_cap_mask_t mask;
+ struct pxad_param param;
+ int ret;
- if (use_dma == 0) {
- info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
- if (info->data_buff == NULL)
- return -ENOMEM;
+ info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+ if (info->data_buff == NULL)
+ return -ENOMEM;
+ if (use_dma == 0)
return 0;
- }
- info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
- &info->data_buff_phys, GFP_KERNEL);
- if (info->data_buff == NULL) {
- dev_err(&pdev->dev, "failed to allocate dma buffer\n");
- return -ENOMEM;
- }
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
- info->data_desc = (void *)info->data_buff + data_desc_offset;
- info->data_desc_addr = info->data_buff_phys + data_desc_offset;
+ sg_init_one(&info->sg, info->data_buff, info->buf_size);
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ param.prio = PXAD_PRIO_LOWEST;
+ param.drcmr = info->drcmr_dat;
+ info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param, &pdev->dev,
+ "data");
+ if (!info->dma_chan) {
+ dev_err(&pdev->dev, "unable to request data dma channel\n");
+ return -ENODEV;
+ }
- info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
- pxa3xx_nand_data_dma_irq, info);
- if (info->data_dma_ch < 0) {
- dev_err(&pdev->dev, "failed to request data dma\n");
- dma_free_coherent(&pdev->dev, info->buf_size,
- info->data_buff, info->data_buff_phys);
- return info->data_dma_ch;
+ memset(&config, 0, sizeof(config));
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.src_addr = info->mmio_phys + NDDB;
+ config.dst_addr = info->mmio_phys + NDDB;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+ ret = dmaengine_slave_config(info->dma_chan, &config);
+ if (ret < 0) {
+ dev_err(&info->pdev->dev,
+ "dma channel configuration failed: %d\n",
+ ret);
+ return ret;
}
/*
@@ -1343,43 +1476,30 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
{
- struct platform_device *pdev = info->pdev;
if (info->use_dma) {
- pxa_free_dma(info->data_dma_ch);
- dma_free_coherent(&pdev->dev, info->buf_size,
- info->data_buff, info->data_buff_phys);
- } else {
- kfree(info->data_buff);
+ dmaengine_terminate_all(info->dma_chan);
+ dma_release_channel(info->dma_chan);
}
-}
-#else
-static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
-{
- info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
- if (info->data_buff == NULL)
- return -ENOMEM;
- return 0;
-}
-
-static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
-{
kfree(info->data_buff);
}
-#endif
-static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
+static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
{
+ struct pxa3xx_nand_info *info = host->info_data;
struct mtd_info *mtd;
struct nand_chip *chip;
+ const struct nand_sdr_timings *timings;
int ret;
mtd = info->host[info->cs]->mtd;
chip = mtd->priv;
/* use the common timing to make a try */
- ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
- if (ret)
- return ret;
+ timings = onfi_async_timing_mode_to_sdr_timings(0);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ pxa3xx_nand_set_sdr_timing(host, timings);
chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
ret = chip->waitfunc(mtd, chip);
@@ -1464,73 +1584,30 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
struct pxa3xx_nand_info *info = host->info_data;
struct platform_device *pdev = info->pdev;
struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
- const struct pxa3xx_nand_flash *f = NULL;
struct nand_chip *chip = mtd->priv;
- uint32_t id = -1;
- uint64_t chipsize;
- int i, ret, num;
+ int ret;
uint16_t ecc_strength, ecc_step;
if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
goto KEEP_CONFIG;
- ret = pxa3xx_nand_sensing(info);
- if (ret) {
- dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
- info->cs);
+ /* Set a default chunk size */
+ info->chunk_size = 512;
+ ret = pxa3xx_nand_config_flash(info);
+ if (ret)
return ret;
- }
-
- chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
- id = *((uint16_t *)(info->data_buff));
- if (id != 0)
- dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
- else {
- dev_warn(&info->pdev->dev,
- "Read out ID 0, potential timing set wrong!!\n");
-
- return -EINVAL;
- }
-
- num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
- for (i = 0; i < num; i++) {
- if (i < pdata->num_flash)
- f = pdata->flash + i;
- else
- f = &builtin_flash_types[i - pdata->num_flash + 1];
- /* find the chip in default list */
- if (f->chip_id == id)
- break;
- }
-
- if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
- dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
-
- return -EINVAL;
- }
-
- ret = pxa3xx_nand_config_flash(info, f);
+ ret = pxa3xx_nand_sensing(host);
if (ret) {
- dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
+ dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
+ info->cs);
+
return ret;
}
- memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
-
- pxa3xx_flash_ids[0].name = f->name;
- pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
- pxa3xx_flash_ids[0].pagesize = f->page_size;
- chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
- pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
- pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
- if (f->flash_width == 16)
- pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
- pxa3xx_flash_ids[1].name = NULL;
- def = pxa3xx_flash_ids;
KEEP_CONFIG:
+ info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
if (info->reg_ndcr & NDCR_DWIDTH_M)
chip->options |= NAND_BUSWIDTH_16;
@@ -1538,9 +1615,18 @@ KEEP_CONFIG:
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
nand_writel(info, NDECCCTRL, 0x0);
- if (nand_scan_ident(mtd, 1, def))
+ if (nand_scan_ident(mtd, 1, NULL))
return -ENODEV;
+ if (!pdata->keep_config) {
+ ret = pxa3xx_nand_init(host);
+ if (ret) {
+ dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
if (pdata->flash_bbt) {
/*
* We'll use a bad block table stored in-flash and don't
@@ -1630,8 +1716,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
info->pdev = pdev;
info->variant = pxa3xx_nand_get_variant(pdev);
for (cs = 0; cs < pdata->num_cs; cs++) {
- mtd = (struct mtd_info *)((unsigned int)&info[1] +
- (sizeof(*mtd) + sizeof(*host)) * cs);
+ mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
chip = (struct nand_chip *)(&mtd[1]);
host = (struct pxa3xx_nand_host *)chip;
info->host[cs] = host;
@@ -1639,7 +1724,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
host->cs = cs;
host->info_data = info;
mtd->priv = host;
- mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &pdev->dev;
chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
@@ -1666,34 +1751,23 @@ static int alloc_nand_resource(struct platform_device *pdev)
return ret;
if (use_dma) {
- /*
- * This is a dirty hack to make this driver work from
- * devicetree bindings. It can be removed once we have
- * a prober DMA controller framework for DT.
- */
- if (pdev->dev.of_node &&
- of_machine_is_compatible("marvell,pxa3xx")) {
- info->drcmr_dat = 97;
- info->drcmr_cmd = 99;
- } else {
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (r == NULL) {
- dev_err(&pdev->dev,
- "no resource defined for data DMA\n");
- ret = -ENXIO;
- goto fail_disable_clk;
- }
- info->drcmr_dat = r->start;
-
- r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (r == NULL) {
- dev_err(&pdev->dev,
- "no resource defined for cmd DMA\n");
- ret = -ENXIO;
- goto fail_disable_clk;
- }
- info->drcmr_cmd = r->start;
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev,
+ "no resource defined for data DMA\n");
+ ret = -ENXIO;
+ goto fail_disable_clk;
+ }
+ info->drcmr_dat = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (r == NULL) {
+ dev_err(&pdev->dev,
+ "no resource defined for cmd DMA\n");
+ ret = -ENXIO;
+ goto fail_disable_clk;
}
+ info->drcmr_cmd = r->start;
}
irq = platform_get_irq(pdev, 0);
@@ -1758,6 +1832,16 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
free_irq(irq, info);
pxa3xx_nand_free_buff(info);
+ /*
+ * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
+ * In order to prevent a lockup of the system bus, the DFI bus
+ * arbitration is granted to SMC upon driver removal. This is done by
+ * setting the x_ARB_CNTL bit, which also prevents the NAND to have
+ * access to the bus anymore.
+ */
+ nand_writel(info, NDCR,
+ (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
+ NFCV1_NDCR_ARB_CNTL);
clk_disable_unprepare(info->clk);
for (cs = 0; cs < pdata->num_cs; cs++)
@@ -1804,15 +1888,16 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
struct pxa3xx_nand_platform_data *pdata;
struct mtd_part_parser_data ppdata = {};
struct pxa3xx_nand_info *info;
- int ret, cs, probe_success;
+ int ret, cs, probe_success, dma_available;
-#ifndef ARCH_HAS_DMA
- if (use_dma) {
+ dma_available = IS_ENABLED(CONFIG_ARM) &&
+ (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
+ if (use_dma && !dma_available) {
use_dma = 0;
dev_warn(&pdev->dev,
"This platform can't do DMA on this device\n");
}
-#endif
+
ret = pxa3xx_nand_probe_dt(pdev);
if (ret)
return ret;
@@ -1865,35 +1950,22 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
+static int pxa3xx_nand_suspend(struct device *dev)
{
- struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
- struct pxa3xx_nand_platform_data *pdata;
- struct mtd_info *mtd;
- int cs;
+ struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
- pdata = dev_get_platdata(&pdev->dev);
if (info->state) {
- dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
+ dev_err(dev, "driver busy, state = %d\n", info->state);
return -EAGAIN;
}
- for (cs = 0; cs < pdata->num_cs; cs++) {
- mtd = info->host[cs]->mtd;
- mtd_suspend(mtd);
- }
-
return 0;
}
-static int pxa3xx_nand_resume(struct platform_device *pdev)
+static int pxa3xx_nand_resume(struct device *dev)
{
- struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
- struct pxa3xx_nand_platform_data *pdata;
- struct mtd_info *mtd;
- int cs;
+ struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
- pdata = dev_get_platdata(&pdev->dev);
/* We don't want to handle interrupt without calling mtd routine */
disable_int(info, NDCR_INT_MASK);
@@ -1911,10 +1983,6 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
* all status before resume
*/
nand_writel(info, NDSR, NDSR_MASK);
- for (cs = 0; cs < pdata->num_cs; cs++) {
- mtd = info->host[cs]->mtd;
- mtd_resume(mtd);
- }
return 0;
}
@@ -1923,15 +1991,19 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
#define pxa3xx_nand_resume NULL
#endif
+static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
+ .suspend = pxa3xx_nand_suspend,
+ .resume = pxa3xx_nand_resume,
+};
+
static struct platform_driver pxa3xx_nand_driver = {
.driver = {
.name = "pxa3xx-nand",
.of_match_table = pxa3xx_nand_dt_ids,
+ .pm = &pxa3xx_nand_pm_ops,
},
.probe = pxa3xx_nand_probe,
.remove = pxa3xx_nand_remove,
- .suspend = pxa3xx_nand_suspend,
- .resume = pxa3xx_nand_resume,
};
module_platform_driver(pxa3xx_nand_driver);
diff --git a/kernel/drivers/mtd/nand/r852.c b/kernel/drivers/mtd/nand/r852.c
index baea83f4d..d8bb2be32 100644
--- a/kernel/drivers/mtd/nand/r852.c
+++ b/kernel/drivers/mtd/nand/r852.c
@@ -466,7 +466,7 @@ static int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
static int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
uint8_t *read_ecc, uint8_t *calc_ecc)
{
- uint16_t ecc_reg;
+ uint32_t ecc_reg;
uint8_t ecc_status, err_byte;
int i, error = 0;
@@ -641,7 +641,6 @@ static int r852_register_nand_device(struct r852_device *dev)
WARN_ON(dev->card_registred);
- dev->mtd->owner = THIS_MODULE;
dev->mtd->priv = dev->chip;
dev->mtd->dev.parent = &dev->pci_dev->dev;
@@ -653,11 +652,15 @@ static int r852_register_nand_device(struct r852_device *dev)
if (sm_register_device(dev->mtd, dev->sm))
goto error2;
- if (device_create_file(&dev->mtd->dev, &dev_attr_media_type))
+ if (device_create_file(&dev->mtd->dev, &dev_attr_media_type)) {
message("can't create media type sysfs attribute");
+ goto error3;
+ }
dev->card_registred = 1;
return 0;
+error3:
+ nand_release(dev->mtd);
error2:
kfree(dev->mtd);
error1:
diff --git a/kernel/drivers/mtd/nand/s3c2410.c b/kernel/drivers/mtd/nand/s3c2410.c
index 0e02be47c..05105cadd 100644
--- a/kernel/drivers/mtd/nand/s3c2410.c
+++ b/kernel/drivers/mtd/nand/s3c2410.c
@@ -832,7 +832,6 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
nmtd->info = info;
nmtd->mtd.priv = chip;
- nmtd->mtd.owner = THIS_MODULE;
nmtd->set = set;
#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
@@ -1016,6 +1015,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
pr_debug("initialising set %d (%p, info %p)\n",
setno, nmtd, info);
+ nmtd->mtd.dev.parent = &pdev->dev;
s3c2410_nand_init_chip(info, nmtd, sets);
nmtd->scan_res = nand_scan_ident(&nmtd->mtd,
@@ -1105,7 +1105,7 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
/* driver device registration */
-static struct platform_device_id s3c24xx_driver_ids[] = {
+static const struct platform_device_id s3c24xx_driver_ids[] = {
{
.name = "s3c2410-nand",
.driver_data = TYPE_S3C2410,
diff --git a/kernel/drivers/mtd/nand/sh_flctl.c b/kernel/drivers/mtd/nand/sh_flctl.c
index c3ce81c1a..bcba1a924 100644
--- a/kernel/drivers/mtd/nand/sh_flctl.c
+++ b/kernel/drivers/mtd/nand/sh_flctl.c
@@ -569,7 +569,8 @@ static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
}
static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required,
+ int page)
{
chip->write_buf(mtd, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -1123,6 +1124,7 @@ static int flctl_probe(struct platform_device *pdev)
flctl_mtd = &flctl->mtd;
nand = &flctl->chip;
flctl_mtd->priv = nand;
+ flctl_mtd->dev.parent = &pdev->dev;
flctl->pdev = pdev;
flctl->hwecc = pdata->has_hwecc;
flctl->holden = pdata->use_holden;
diff --git a/kernel/drivers/mtd/nand/sharpsl.c b/kernel/drivers/mtd/nand/sharpsl.c
index 842c47a45..082b60097 100644
--- a/kernel/drivers/mtd/nand/sharpsl.c
+++ b/kernel/drivers/mtd/nand/sharpsl.c
@@ -144,7 +144,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
/* Link the private data with the MTD structure */
sharpsl->mtd.priv = this;
- sharpsl->mtd.owner = THIS_MODULE;
+ sharpsl->mtd.dev.parent = &pdev->dev;
platform_set_drvdata(pdev, sharpsl);
diff --git a/kernel/drivers/mtd/nand/socrates_nand.c b/kernel/drivers/mtd/nand/socrates_nand.c
index d71062273..b94f53427 100644
--- a/kernel/drivers/mtd/nand/socrates_nand.c
+++ b/kernel/drivers/mtd/nand/socrates_nand.c
@@ -167,7 +167,6 @@ static int socrates_nand_probe(struct platform_device *ofdev)
nand_chip->priv = host; /* link the private data structures */
mtd->priv = nand_chip;
mtd->name = "socrates_nand";
- mtd->owner = THIS_MODULE;
mtd->dev.parent = &ofdev->dev;
ppdata.of_node = ofdev->dev.of_node;
diff --git a/kernel/drivers/mtd/nand/sunxi_nand.c b/kernel/drivers/mtd/nand/sunxi_nand.c
index 6f93b2990..824711845 100644
--- a/kernel/drivers/mtd/nand/sunxi_nand.c
+++ b/kernel/drivers/mtd/nand/sunxi_nand.c
@@ -57,11 +57,8 @@
#define NFC_REG_ECC_CTL 0x0034
#define NFC_REG_ECC_ST 0x0038
#define NFC_REG_DEBUG 0x003C
-#define NFC_REG_ECC_CNT0 0x0040
-#define NFC_REG_ECC_CNT1 0x0044
-#define NFC_REG_ECC_CNT2 0x0048
-#define NFC_REG_ECC_CNT3 0x004c
-#define NFC_REG_USER_DATA_BASE 0x0050
+#define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3)
+#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
#define NFC_REG_SPARE_AREA 0x00A0
#define NFC_RAM0_BASE 0x0400
#define NFC_RAM1_BASE 0x0800
@@ -69,12 +66,16 @@
/* define bit use in NFC_CTL */
#define NFC_EN BIT(0)
#define NFC_RESET BIT(1)
-#define NFC_BUS_WIDYH BIT(2)
-#define NFC_RB_SEL BIT(3)
-#define NFC_CE_SEL GENMASK(26, 24)
+#define NFC_BUS_WIDTH_MSK BIT(2)
+#define NFC_BUS_WIDTH_8 (0 << 2)
+#define NFC_BUS_WIDTH_16 (1 << 2)
+#define NFC_RB_SEL_MSK BIT(3)
+#define NFC_RB_SEL(x) ((x) << 3)
+#define NFC_CE_SEL_MSK GENMASK(26, 24)
+#define NFC_CE_SEL(x) ((x) << 24)
#define NFC_CE_CTL BIT(6)
-#define NFC_CE_CTL1 BIT(7)
-#define NFC_PAGE_SIZE GENMASK(11, 8)
+#define NFC_PAGE_SHIFT_MSK GENMASK(11, 8)
+#define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8)
#define NFC_SAM BIT(12)
#define NFC_RAM_METHOD BIT(14)
#define NFC_DEBUG_CTL BIT(31)
@@ -86,10 +87,7 @@
#define NFC_CMD_FIFO_STATUS BIT(3)
#define NFC_STA BIT(4)
#define NFC_NATCH_INT_FLAG BIT(5)
-#define NFC_RB_STATE0 BIT(8)
-#define NFC_RB_STATE1 BIT(9)
-#define NFC_RB_STATE2 BIT(10)
-#define NFC_RB_STATE3 BIT(11)
+#define NFC_RB_STATE(x) BIT(x + 8)
/* define bit use in NFC_INT */
#define NFC_B2R_INT_ENABLE BIT(0)
@@ -99,10 +97,21 @@
NFC_CMD_INT_ENABLE | \
NFC_DMA_INT_ENABLE)
+/* define bit use in NFC_TIMING_CTL */
+#define NFC_TIMING_CTL_EDO BIT(8)
+
+/* define NFC_TIMING_CFG register layout */
+#define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD) \
+ (((tWB) & 0x3) | (((tADL) & 0x3) << 2) | \
+ (((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) | \
+ (((tCAD) & 0x7) << 8))
+
/* define bit use in NFC_CMD */
-#define NFC_CMD_LOW_BYTE GENMASK(7, 0)
-#define NFC_CMD_HIGH_BYTE GENMASK(15, 8)
-#define NFC_ADR_NUM GENMASK(18, 16)
+#define NFC_CMD_LOW_BYTE_MSK GENMASK(7, 0)
+#define NFC_CMD_HIGH_BYTE_MSK GENMASK(15, 8)
+#define NFC_CMD(x) (x)
+#define NFC_ADR_NUM_MSK GENMASK(18, 16)
+#define NFC_ADR_NUM(x) (((x) - 1) << 16)
#define NFC_SEND_ADR BIT(19)
#define NFC_ACCESS_DIR BIT(20)
#define NFC_DATA_TRANS BIT(21)
@@ -114,29 +123,38 @@
#define NFC_ROW_AUTO_INC BIT(27)
#define NFC_SEND_CMD3 BIT(28)
#define NFC_SEND_CMD4 BIT(29)
-#define NFC_CMD_TYPE GENMASK(31, 30)
+#define NFC_CMD_TYPE_MSK GENMASK(31, 30)
+#define NFC_NORMAL_OP (0 << 30)
+#define NFC_ECC_OP (1 << 30)
+#define NFC_PAGE_OP (2 << 30)
/* define bit use in NFC_RCMD_SET */
-#define NFC_READ_CMD GENMASK(7, 0)
-#define NFC_RANDOM_READ_CMD0 GENMASK(15, 8)
-#define NFC_RANDOM_READ_CMD1 GENMASK(23, 16)
+#define NFC_READ_CMD_MSK GENMASK(7, 0)
+#define NFC_RND_READ_CMD0_MSK GENMASK(15, 8)
+#define NFC_RND_READ_CMD1_MSK GENMASK(23, 16)
/* define bit use in NFC_WCMD_SET */
-#define NFC_PROGRAM_CMD GENMASK(7, 0)
-#define NFC_RANDOM_WRITE_CMD GENMASK(15, 8)
-#define NFC_READ_CMD0 GENMASK(23, 16)
-#define NFC_READ_CMD1 GENMASK(31, 24)
+#define NFC_PROGRAM_CMD_MSK GENMASK(7, 0)
+#define NFC_RND_WRITE_CMD_MSK GENMASK(15, 8)
+#define NFC_READ_CMD0_MSK GENMASK(23, 16)
+#define NFC_READ_CMD1_MSK GENMASK(31, 24)
/* define bit use in NFC_ECC_CTL */
#define NFC_ECC_EN BIT(0)
#define NFC_ECC_PIPELINE BIT(3)
#define NFC_ECC_EXCEPTION BIT(4)
-#define NFC_ECC_BLOCK_SIZE BIT(5)
+#define NFC_ECC_BLOCK_SIZE_MSK BIT(5)
#define NFC_RANDOM_EN BIT(9)
#define NFC_RANDOM_DIRECTION BIT(10)
-#define NFC_ECC_MODE_SHIFT 12
-#define NFC_ECC_MODE GENMASK(15, 12)
-#define NFC_RANDOM_SEED GENMASK(30, 16)
+#define NFC_ECC_MODE_MSK GENMASK(15, 12)
+#define NFC_ECC_MODE(x) ((x) << 12)
+#define NFC_RANDOM_SEED_MSK GENMASK(30, 16)
+#define NFC_RANDOM_SEED(x) ((x) << 16)
+
+/* define bit use in NFC_ECC_ST */
+#define NFC_ECC_ERR(x) BIT(x)
+#define NFC_ECC_PAT_FOUND(x) BIT(x + 16)
+#define NFC_ECC_ERR_CNT(b, x) (((x) >> ((b) * 8)) & 0xff)
#define NFC_DEFAULT_TIMEOUT_MS 1000
@@ -208,6 +226,7 @@ struct sunxi_nand_hw_ecc {
* @nand: base NAND chip structure
* @mtd: base MTD structure
* @clk_rate: clk_rate required for this NAND chip
+ * @timing_cfg TIMING_CFG register value for this NAND chip
* @selected: current active CS
* @nsels: number of CS lines required by the NAND chip
* @sels: array of CS lines descriptions
@@ -217,6 +236,8 @@ struct sunxi_nand_chip {
struct nand_chip nand;
struct mtd_info mtd;
unsigned long clk_rate;
+ u32 timing_cfg;
+ u32 timing_ctl;
int selected;
int nsels;
struct sunxi_nand_chip_sel sels[0];
@@ -344,13 +365,13 @@ static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
switch (rb->type) {
case RB_NATIVE:
ret = !!(readl(nfc->regs + NFC_REG_ST) &
- (NFC_RB_STATE0 << rb->info.nativeid));
+ NFC_RB_STATE(rb->info.nativeid));
if (ret)
break;
sunxi_nfc_wait_int(nfc, NFC_RB_B2R, timeo);
ret = !!(readl(nfc->regs + NFC_REG_ST) &
- (NFC_RB_STATE0 << rb->info.nativeid));
+ NFC_RB_STATE(rb->info.nativeid));
break;
case RB_GPIO:
ret = gpio_get_value(rb->info.gpio);
@@ -380,19 +401,19 @@ static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
return;
ctl = readl(nfc->regs + NFC_REG_CTL) &
- ~(NFC_CE_SEL | NFC_RB_SEL | NFC_EN);
+ ~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN);
if (chip >= 0) {
sel = &sunxi_nand->sels[chip];
- ctl |= (sel->cs << 24) | NFC_EN |
- (((nand->page_shift - 10) & 0xf) << 8);
+ ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
+ NFC_PAGE_SHIFT(nand->page_shift - 10);
if (sel->rb.type == RB_NONE) {
nand->dev_ready = NULL;
} else {
nand->dev_ready = sunxi_nfc_dev_ready;
if (sel->rb.type == RB_NATIVE)
- ctl |= (sel->rb.info.nativeid << 3);
+ ctl |= NFC_RB_SEL(sel->rb.info.nativeid);
}
writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
@@ -403,6 +424,8 @@ static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
}
}
+ writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
+ writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
writel(ctl, nfc->regs + NFC_REG_CTL);
sunxi_nand->selected = chip;
@@ -516,161 +539,244 @@ static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
}
-static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf,
- int oob_required, int page)
+static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
{
- struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
- struct nand_ecclayout *layout = ecc->layout;
- struct sunxi_nand_hw_ecc *data = ecc->priv;
- unsigned int max_bitflips = 0;
- int offset;
- int ret;
- u32 tmp;
- int i;
- int cnt;
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct sunxi_nand_hw_ecc *data = nand->ecc.priv;
+ u32 ecc_ctl;
- tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
- tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
- tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
- NFC_ECC_EXCEPTION;
+ ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+ ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE |
+ NFC_ECC_BLOCK_SIZE_MSK);
+ ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION;
- writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+ writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
+}
- for (i = 0; i < ecc->steps; i++) {
- if (i)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, i * ecc->size, -1);
+static void sunxi_nfc_hw_ecc_disable(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
+ nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf)
+{
+ buf[0] = user_data;
+ buf[1] = user_data >> 8;
+ buf[2] = user_data >> 16;
+ buf[3] = user_data >> 24;
+}
- offset = mtd->writesize + layout->eccpos[i * ecc->bytes] - 4;
+static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
+ u8 *data, int data_off,
+ u8 *oob, int oob_off,
+ int *cur_off,
+ unsigned int *max_bitflips)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ u32 status;
+ int ret;
- chip->read_buf(mtd, NULL, ecc->size);
+ if (*cur_off != data_off)
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+ sunxi_nfc_read_buf(mtd, NULL, ecc->size);
- ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
- if (ret)
- return ret;
+ if (data_off + ecc->size != oob_off)
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
- tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | (1 << 30);
- writel(tmp, nfc->regs + NFC_REG_CMD);
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
- ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
- if (ret)
- return ret;
+ writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
+ nfc->regs + NFC_REG_CMD);
- memcpy_fromio(buf + (i * ecc->size),
- nfc->regs + NFC_RAM0_BASE, ecc->size);
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ return ret;
- if (readl(nfc->regs + NFC_REG_ECC_ST) & 0x1) {
- mtd->ecc_stats.failed++;
- } else {
- tmp = readl(nfc->regs + NFC_REG_ECC_CNT0) & 0xff;
- mtd->ecc_stats.corrected += tmp;
- max_bitflips = max_t(unsigned int, max_bitflips, tmp);
- }
+ status = readl(nfc->regs + NFC_REG_ECC_ST);
+ ret = NFC_ECC_ERR_CNT(0, readl(nfc->regs + NFC_REG_ECC_ERR_CNT(0)));
- if (oob_required) {
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+ memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
- ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
- if (ret)
- return ret;
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ sunxi_nfc_read_buf(mtd, oob, ecc->bytes + 4);
- offset -= mtd->writesize;
- chip->read_buf(mtd, chip->oob_poi + offset,
- ecc->bytes + 4);
- }
+ if (status & NFC_ECC_ERR(0)) {
+ ret = nand_check_erased_ecc_chunk(data, ecc->size,
+ oob, ecc->bytes + 4,
+ NULL, 0, ecc->strength);
+ } else {
+ /*
+ * The engine protects 4 bytes of OOB data per chunk.
+ * Retrieve the corrected OOB bytes.
+ */
+ sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(0)),
+ oob);
}
- if (oob_required) {
- cnt = ecc->layout->oobfree[ecc->steps].length;
- if (cnt > 0) {
- offset = mtd->writesize +
- ecc->layout->oobfree[ecc->steps].offset;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
- offset -= mtd->writesize;
- chip->read_buf(mtd, chip->oob_poi + offset, cnt);
- }
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
}
- tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
- tmp &= ~NFC_ECC_EN;
+ *cur_off = oob_off + ecc->bytes + 4;
+
+ return 0;
+}
- writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
+ u8 *oob, int *cur_off)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int offset = ((ecc->bytes + 4) * ecc->steps);
+ int len = mtd->oobsize - offset;
- return max_bitflips;
+ if (len <= 0)
+ return;
+
+ if (*cur_off != offset)
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ offset + mtd->writesize, -1);
+
+ sunxi_nfc_read_buf(mtd, oob + offset, len);
+
+ *cur_off = mtd->oobsize + mtd->writesize;
}
-static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
- struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
{
- struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
- struct nand_ecclayout *layout = ecc->layout;
- struct sunxi_nand_hw_ecc *data = ecc->priv;
- int offset;
+ return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
+}
+
+static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
+ const u8 *data, int data_off,
+ const u8 *oob, int oob_off,
+ int *cur_off)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
int ret;
- u32 tmp;
- int i;
- int cnt;
- tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
- tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
- tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
- NFC_ECC_EXCEPTION;
+ if (data_off != *cur_off)
+ nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
- writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+ sunxi_nfc_write_buf(mtd, data, ecc->size);
- for (i = 0; i < ecc->steps; i++) {
- if (i)
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, i * ecc->size, -1);
+ /* Fill OOB data in */
+ writel(sunxi_nfc_buf_to_user_data(oob),
+ nfc->regs + NFC_REG_USER_DATA(0));
- chip->write_buf(mtd, buf + (i * ecc->size), ecc->size);
+ if (data_off + ecc->size != oob_off)
+ nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
- offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize;
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
- /* Fill OOB data in */
- if (oob_required) {
- tmp = 0xffffffff;
- memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
- 4);
- } else {
- memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE,
- chip->oob_poi + offset - mtd->writesize,
- 4);
- }
+ writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
+ NFC_ACCESS_DIR | NFC_ECC_OP,
+ nfc->regs + NFC_REG_CMD);
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ return ret;
- ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
- if (ret)
- return ret;
+ *cur_off = oob_off + ecc->bytes + 4;
- tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
- (1 << 30);
- writel(tmp, nfc->regs + NFC_REG_CMD);
- ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ return 0;
+}
+
+static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
+ u8 *oob, int *cur_off)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int offset = ((ecc->bytes + 4) * ecc->steps);
+ int len = mtd->oobsize - offset;
+
+ if (len <= 0)
+ return;
+
+ if (*cur_off != offset)
+ nand->cmdfunc(mtd, NAND_CMD_RNDIN,
+ offset + mtd->writesize, -1);
+
+ sunxi_nfc_write_buf(mtd, oob + offset, len);
+
+ *cur_off = mtd->oobsize + mtd->writesize;
+}
+
+static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ unsigned int max_bitflips = 0;
+ int ret, i, cur_off = 0;
+
+ sunxi_nfc_hw_ecc_enable(mtd);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = buf + data_off;
+ u8 *oob = chip->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
+ oob_off + mtd->writesize,
+ &cur_off, &max_bitflips);
if (ret)
return ret;
}
- if (oob_required) {
- cnt = ecc->layout->oobfree[i].length;
- if (cnt > 0) {
- offset = mtd->writesize +
- ecc->layout->oobfree[i].offset;
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
- offset -= mtd->writesize;
- chip->write_buf(mtd, chip->oob_poi + offset, cnt);
- }
+ if (oob_required)
+ sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off);
+
+ sunxi_nfc_hw_ecc_disable(mtd);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret, i, cur_off = 0;
+
+ sunxi_nfc_hw_ecc_enable(mtd);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ const u8 *data = buf + data_off;
+ const u8 *oob = chip->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
+ oob_off + mtd->writesize,
+ &cur_off);
+ if (ret)
+ return ret;
}
- tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
- tmp &= ~NFC_ECC_EN;
+ if (oob_required)
+ sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, &cur_off);
- writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+ sunxi_nfc_hw_ecc_disable(mtd);
return 0;
}
@@ -680,65 +786,29 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
uint8_t *buf, int oob_required,
int page)
{
- struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- struct sunxi_nand_hw_ecc *data = ecc->priv;
unsigned int max_bitflips = 0;
- uint8_t *oob = chip->oob_poi;
- int offset = 0;
- int ret;
- int cnt;
- u32 tmp;
- int i;
+ int ret, i, cur_off = 0;
- tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
- tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
- tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
- NFC_ECC_EXCEPTION;
-
- writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+ sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
- chip->read_buf(mtd, NULL, ecc->size);
-
- tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | (1 << 30);
- writel(tmp, nfc->regs + NFC_REG_CMD);
-
- ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ int data_off = i * (ecc->size + ecc->bytes + 4);
+ int oob_off = data_off + ecc->size;
+ u8 *data = buf + (i * ecc->size);
+ u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
+
+ ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
+ oob_off, &cur_off,
+ &max_bitflips);
if (ret)
return ret;
-
- memcpy_fromio(buf, nfc->regs + NFC_RAM0_BASE, ecc->size);
- buf += ecc->size;
- offset += ecc->size;
-
- if (readl(nfc->regs + NFC_REG_ECC_ST) & 0x1) {
- mtd->ecc_stats.failed++;
- } else {
- tmp = readl(nfc->regs + NFC_REG_ECC_CNT0) & 0xff;
- mtd->ecc_stats.corrected += tmp;
- max_bitflips = max_t(unsigned int, max_bitflips, tmp);
- }
-
- if (oob_required) {
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
- chip->read_buf(mtd, oob, ecc->bytes + ecc->prepad);
- oob += ecc->bytes + ecc->prepad;
- }
-
- offset += ecc->bytes + ecc->prepad;
}
- if (oob_required) {
- cnt = mtd->oobsize - (oob - chip->oob_poi);
- if (cnt > 0) {
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
- chip->read_buf(mtd, oob, cnt);
- }
- }
+ if (oob_required)
+ sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off);
- writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
- nfc->regs + NFC_REG_ECC_CTL);
+ sunxi_nfc_hw_ecc_disable(mtd);
return max_bitflips;
}
@@ -746,71 +816,60 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
struct nand_chip *chip,
const uint8_t *buf,
- int oob_required)
+ int oob_required, int page)
{
- struct sunxi_nfc *nfc = to_sunxi_nfc(chip->controller);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- struct sunxi_nand_hw_ecc *data = ecc->priv;
- uint8_t *oob = chip->oob_poi;
- int offset = 0;
- int ret;
- int cnt;
- u32 tmp;
- int i;
-
- tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
- tmp &= ~(NFC_ECC_MODE | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE);
- tmp |= NFC_ECC_EN | (data->mode << NFC_ECC_MODE_SHIFT) |
- NFC_ECC_EXCEPTION;
+ int ret, i, cur_off = 0;
- writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+ sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
- chip->write_buf(mtd, buf + (i * ecc->size), ecc->size);
- offset += ecc->size;
-
- /* Fill OOB data in */
- if (oob_required) {
- tmp = 0xffffffff;
- memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
- 4);
- } else {
- memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob,
- 4);
- }
+ int data_off = i * (ecc->size + ecc->bytes + 4);
+ int oob_off = data_off + ecc->size;
+ const u8 *data = buf + (i * ecc->size);
+ const u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
- tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
- (1 << 30);
- writel(tmp, nfc->regs + NFC_REG_CMD);
-
- ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off,
+ oob, oob_off, &cur_off);
if (ret)
return ret;
-
- offset += ecc->bytes + ecc->prepad;
- oob += ecc->bytes + ecc->prepad;
- }
-
- if (oob_required) {
- cnt = mtd->oobsize - (oob - chip->oob_poi);
- if (cnt > 0) {
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
- chip->write_buf(mtd, oob, cnt);
- }
}
- tmp = readl(nfc->regs + NFC_REG_ECC_CTL);
- tmp &= ~NFC_ECC_EN;
+ if (oob_required)
+ sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, &cur_off);
- writel(tmp, nfc->regs + NFC_REG_ECC_CTL);
+ sunxi_nfc_hw_ecc_disable(mtd);
return 0;
}
+static const s32 tWB_lut[] = {6, 12, 16, 20};
+static const s32 tRHW_lut[] = {4, 8, 12, 20};
+
+static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
+ u32 clk_period)
+{
+ u32 clk_cycles = DIV_ROUND_UP(duration, clk_period);
+ int i;
+
+ for (i = 0; i < lut_size; i++) {
+ if (clk_cycles <= lut[i])
+ return i;
+ }
+
+ /* Doesn't fit */
+ return -EINVAL;
+}
+
+#define sunxi_nand_lookup_timing(l, p, c) \
+ _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
+
static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
const struct nand_sdr_timings *timings)
{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
u32 min_clk_period = 0;
+ s32 tWB, tADL, tWHR, tRHW, tCAD;
/* T1 <=> tCLS */
if (timings->tCLS_min > min_clk_period)
@@ -872,6 +931,48 @@ static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
if (timings->tWC_min > (min_clk_period * 2))
min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
+ /* T16 - T19 + tCAD */
+ tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
+ min_clk_period);
+ if (tWB < 0) {
+ dev_err(nfc->dev, "unsupported tWB\n");
+ return tWB;
+ }
+
+ tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3;
+ if (tADL > 3) {
+ dev_err(nfc->dev, "unsupported tADL\n");
+ return -EINVAL;
+ }
+
+ tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3;
+ if (tWHR > 3) {
+ dev_err(nfc->dev, "unsupported tWHR\n");
+ return -EINVAL;
+ }
+
+ tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min,
+ min_clk_period);
+ if (tRHW < 0) {
+ dev_err(nfc->dev, "unsupported tRHW\n");
+ return tRHW;
+ }
+
+ /*
+ * TODO: according to ONFI specs this value only applies for DDR NAND,
+ * but Allwinner seems to set this to 0x7. Mimic them for now.
+ */
+ tCAD = 0x7;
+
+ /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
+ chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
+
+ /*
+ * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
+ * output cycle timings shall be used if the host drives tRC less than
+ * 30 ns.
+ */
+ chip->timing_ctl = (timings->tRC_min < 30000) ? NFC_TIMING_CTL_EDO : 0;
/* Convert min_clk_period from picoseconds to nanoseconds */
min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
@@ -884,8 +985,6 @@ static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
*/
chip->clk_rate = (2 * NSEC_PER_SEC) / min_clk_period;
- /* TODO: configure T16-T19 */
-
return 0;
}
@@ -901,17 +1000,23 @@ static int sunxi_nand_chip_init_timings(struct sunxi_nand_chip *chip,
mode = chip->nand.onfi_timing_mode_default;
} else {
uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {};
+ int i;
mode = fls(mode) - 1;
if (mode < 0)
mode = 0;
feature[0] = mode;
- ret = chip->nand.onfi_set_features(&chip->mtd, &chip->nand,
+ for (i = 0; i < chip->nsels; i++) {
+ chip->nand.select_chip(&chip->mtd, i);
+ ret = chip->nand.onfi_set_features(&chip->mtd,
+ &chip->nand,
ONFI_FEATURE_ADDR_TIMING_MODE,
feature);
- if (ret)
- return ret;
+ chip->nand.select_chip(&chip->mtd, -1);
+ if (ret)
+ return ret;
+ }
}
timings = onfi_async_timing_mode_to_sdr_timings(mode);
@@ -1085,16 +1190,9 @@ static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
struct device_node *np)
{
struct nand_chip *nand = mtd->priv;
- int strength;
- int blk_size;
int ret;
- blk_size = of_get_nand_ecc_step_size(np);
- strength = of_get_nand_ecc_strength(np);
- if (blk_size > 0 && strength > 0) {
- ecc->size = blk_size;
- ecc->strength = strength;
- } else {
+ if (!ecc->size) {
ecc->size = nand->ecc_step_ds;
ecc->strength = nand->ecc_strength_ds;
}
@@ -1102,12 +1200,6 @@ static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
if (!ecc->size || !ecc->strength)
return -EINVAL;
- ecc->mode = NAND_ECC_HW;
-
- ret = of_get_nand_ecc_mode(np);
- if (ret >= 0)
- ecc->mode = ret;
-
switch (ecc->mode) {
case NAND_ECC_SOFT_BCH:
break;
@@ -1233,24 +1325,29 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
/* Default tR value specified in the ONFI spec (chapter 4.15.1) */
nand->chip_delay = 200;
nand->controller = &nfc->controller;
+ /*
+ * Set the ECC mode to the default value in case nothing is specified
+ * in the DT.
+ */
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->flash_node = np;
nand->select_chip = sunxi_nfc_select_chip;
nand->cmd_ctrl = sunxi_nfc_cmd_ctrl;
nand->read_buf = sunxi_nfc_read_buf;
nand->write_buf = sunxi_nfc_write_buf;
nand->read_byte = sunxi_nfc_read_byte;
- if (of_get_nand_on_flash_bbt(np))
- nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
-
mtd = &chip->mtd;
mtd->dev.parent = dev;
mtd->priv = nand;
- mtd->owner = THIS_MODULE;
ret = nand_scan_ident(mtd, nsels, NULL);
if (ret)
return ret;
+ if (nand->bbt_options & NAND_BBT_USE_FLASH)
+ nand->bbt_options |= NAND_BBT_NO_OOB;
+
ret = sunxi_nand_chip_init_timings(chip, np);
if (ret) {
dev_err(dev, "could not configure chip timings: %d\n", ret);
@@ -1312,6 +1409,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
node);
nand_release(&chip->mtd);
sunxi_nand_ecc_cleanup(&chip->nand.ecc);
+ list_del(&chip->node);
}
}
@@ -1376,13 +1474,6 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, nfc);
- /*
- * TODO: replace these magic values with proper flags as soon as we
- * know what they are encoding.
- */
- writel(0x100, nfc->regs + NFC_REG_TIMING_CTL);
- writel(0x7ff, nfc->regs + NFC_REG_TIMING_CFG);
-
ret = sunxi_nand_chips_init(dev, nfc);
if (ret) {
dev_err(dev, "failed to init nand chips\n");
diff --git a/kernel/drivers/mtd/nand/tmio_nand.c b/kernel/drivers/mtd/nand/tmio_nand.c
index fb8fd35fa..befddf077 100644
--- a/kernel/drivers/mtd/nand/tmio_nand.c
+++ b/kernel/drivers/mtd/nand/tmio_nand.c
@@ -382,6 +382,7 @@ static int tmio_probe(struct platform_device *dev)
nand_chip = &tmio->chip;
mtd->priv = nand_chip;
mtd->name = "tmio-nand";
+ mtd->dev.parent = &dev->dev;
tmio->ccr = devm_ioremap(&dev->dev, ccr->start, resource_size(ccr));
if (!tmio->ccr)
diff --git a/kernel/drivers/mtd/nand/txx9ndfmc.c b/kernel/drivers/mtd/nand/txx9ndfmc.c
index 9c0bc45e2..8572519b8 100644
--- a/kernel/drivers/mtd/nand/txx9ndfmc.c
+++ b/kernel/drivers/mtd/nand/txx9ndfmc.c
@@ -323,7 +323,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
continue;
chip = &txx9_priv->chip;
mtd = &txx9_priv->mtd;
- mtd->owner = THIS_MODULE;
+ mtd->dev.parent = &dev->dev;
mtd->priv = chip;
diff --git a/kernel/drivers/mtd/nand/vf610_nfc.c b/kernel/drivers/mtd/nand/vf610_nfc.c
new file mode 100644
index 000000000..8805d6325
--- /dev/null
+++ b/kernel/drivers/mtd/nand/vf610_nfc.c
@@ -0,0 +1,878 @@
+/*
+ * Copyright 2009-2015 Freescale Semiconductor, Inc. and others
+ *
+ * Description: MPC5125, VF610, MCF54418 and Kinetis K70 Nand driver.
+ * Jason ported to M54418TWR and MVFA5 (VF610).
+ * Authors: Stefan Agner <stefan.agner@toradex.com>
+ * Bill Pringlemeir <bpringlemeir@nbsps.com>
+ * Shaohui Xie <b21989@freescale.com>
+ * Jason Jin <Jason.jin@freescale.com>
+ *
+ * Based on original driver mpc5121_nfc.c.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Limitations:
+ * - Untested on MPC5125 and M54418.
+ * - DMA and pipelining not used.
+ * - 2K pages or less.
+ * - HW ECC: Only 2K page with 64+ OOB.
+ * - HW ECC: Only 24 and 32-bit error correction implemented.
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_mtd.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define DRV_NAME "vf610_nfc"
+
+/* Register Offsets */
+#define NFC_FLASH_CMD1 0x3F00
+#define NFC_FLASH_CMD2 0x3F04
+#define NFC_COL_ADDR 0x3F08
+#define NFC_ROW_ADDR 0x3F0c
+#define NFC_ROW_ADDR_INC 0x3F14
+#define NFC_FLASH_STATUS1 0x3F18
+#define NFC_FLASH_STATUS2 0x3F1c
+#define NFC_CACHE_SWAP 0x3F28
+#define NFC_SECTOR_SIZE 0x3F2c
+#define NFC_FLASH_CONFIG 0x3F30
+#define NFC_IRQ_STATUS 0x3F38
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n) ((n) * 0x1000)
+
+#define PAGE_2K 0x0800
+#define OOB_64 0x0040
+#define OOB_MAX 0x0100
+
+/*
+ * NFC_CMD2[CODE] values. See section:
+ * - 31.4.7 Flash Command Code Description, Vybrid manual
+ * - 23.8.6 Flash Command Sequencer, MPC5125 manual
+ *
+ * Briefly these are bitmasks of controller cycles.
+ */
+#define READ_PAGE_CMD_CODE 0x7EE0
+#define READ_ONFI_PARAM_CMD_CODE 0x4860
+#define PROGRAM_PAGE_CMD_CODE 0x7FC0
+#define ERASE_CMD_CODE 0x4EC0
+#define READ_ID_CMD_CODE 0x4804
+#define RESET_CMD_CODE 0x4040
+#define STATUS_READ_CMD_CODE 0x4068
+
+/* NFC ECC mode define */
+#define ECC_BYPASS 0
+#define ECC_45_BYTE 6
+#define ECC_60_BYTE 7
+
+/*** Register Mask and bit definitions */
+
+/* NFC_FLASH_CMD1 Field */
+#define CMD_BYTE2_MASK 0xFF000000
+#define CMD_BYTE2_SHIFT 24
+
+/* NFC_FLASH_CM2 Field */
+#define CMD_BYTE1_MASK 0xFF000000
+#define CMD_BYTE1_SHIFT 24
+#define CMD_CODE_MASK 0x00FFFF00
+#define CMD_CODE_SHIFT 8
+#define BUFNO_MASK 0x00000006
+#define BUFNO_SHIFT 1
+#define START_BIT BIT(0)
+
+/* NFC_COL_ADDR Field */
+#define COL_ADDR_MASK 0x0000FFFF
+#define COL_ADDR_SHIFT 0
+
+/* NFC_ROW_ADDR Field */
+#define ROW_ADDR_MASK 0x00FFFFFF
+#define ROW_ADDR_SHIFT 0
+#define ROW_ADDR_CHIP_SEL_RB_MASK 0xF0000000
+#define ROW_ADDR_CHIP_SEL_RB_SHIFT 28
+#define ROW_ADDR_CHIP_SEL_MASK 0x0F000000
+#define ROW_ADDR_CHIP_SEL_SHIFT 24
+
+/* NFC_FLASH_STATUS2 Field */
+#define STATUS_BYTE1_MASK 0x000000FF
+
+/* NFC_FLASH_CONFIG Field */
+#define CONFIG_ECC_SRAM_ADDR_MASK 0x7FC00000
+#define CONFIG_ECC_SRAM_ADDR_SHIFT 22
+#define CONFIG_ECC_SRAM_REQ_BIT BIT(21)
+#define CONFIG_DMA_REQ_BIT BIT(20)
+#define CONFIG_ECC_MODE_MASK 0x000E0000
+#define CONFIG_ECC_MODE_SHIFT 17
+#define CONFIG_FAST_FLASH_BIT BIT(16)
+#define CONFIG_16BIT BIT(7)
+#define CONFIG_BOOT_MODE_BIT BIT(6)
+#define CONFIG_ADDR_AUTO_INCR_BIT BIT(5)
+#define CONFIG_BUFNO_AUTO_INCR_BIT BIT(4)
+#define CONFIG_PAGE_CNT_MASK 0xF
+#define CONFIG_PAGE_CNT_SHIFT 0
+
+/* NFC_IRQ_STATUS Field */
+#define IDLE_IRQ_BIT BIT(29)
+#define IDLE_EN_BIT BIT(20)
+#define CMD_DONE_CLEAR_BIT BIT(18)
+#define IDLE_CLEAR_BIT BIT(17)
+
+/*
+ * ECC status - seems to consume 8 bytes (double word). The documented
+ * status byte is located in the lowest byte of the second word (which is
+ * the 4th or 7th byte depending on endianness).
+ * Calculate an offset to store the ECC status at the end of the buffer.
+ */
+#define ECC_SRAM_ADDR (PAGE_2K + OOB_MAX - 8)
+
+#define ECC_STATUS 0x4
+#define ECC_STATUS_MASK 0x80
+#define ECC_STATUS_ERR_COUNT 0x3F
+
+enum vf610_nfc_alt_buf {
+ ALT_BUF_DATA = 0,
+ ALT_BUF_ID = 1,
+ ALT_BUF_STAT = 2,
+ ALT_BUF_ONFI = 3,
+};
+
+enum vf610_nfc_variant {
+ NFC_VFC610 = 1,
+};
+
+struct vf610_nfc {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ struct device *dev;
+ void __iomem *regs;
+ struct completion cmd_done;
+ uint buf_offset;
+ int write_sz;
+ /* Status and ID are in alternate locations. */
+ enum vf610_nfc_alt_buf alt_buf;
+ enum vf610_nfc_variant variant;
+ struct clk *clk;
+ bool use_hw_ecc;
+ u32 ecc_mode;
+};
+
+#define mtd_to_nfc(_mtd) container_of(_mtd, struct vf610_nfc, mtd)
+
+static struct nand_ecclayout vf610_nfc_ecc45 = {
+ .eccbytes = 45,
+ .eccpos = {19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = {
+ {.offset = 2,
+ .length = 17} }
+};
+
+static struct nand_ecclayout vf610_nfc_ecc60 = {
+ .eccbytes = 60,
+ .eccpos = { 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63 },
+ .oobfree = {
+ {.offset = 2,
+ .length = 2} }
+};
+
+static inline u32 vf610_nfc_read(struct vf610_nfc *nfc, uint reg)
+{
+ return readl(nfc->regs + reg);
+}
+
+static inline void vf610_nfc_write(struct vf610_nfc *nfc, uint reg, u32 val)
+{
+ writel(val, nfc->regs + reg);
+}
+
+static inline void vf610_nfc_set(struct vf610_nfc *nfc, uint reg, u32 bits)
+{
+ vf610_nfc_write(nfc, reg, vf610_nfc_read(nfc, reg) | bits);
+}
+
+static inline void vf610_nfc_clear(struct vf610_nfc *nfc, uint reg, u32 bits)
+{
+ vf610_nfc_write(nfc, reg, vf610_nfc_read(nfc, reg) & ~bits);
+}
+
+static inline void vf610_nfc_set_field(struct vf610_nfc *nfc, u32 reg,
+ u32 mask, u32 shift, u32 val)
+{
+ vf610_nfc_write(nfc, reg,
+ (vf610_nfc_read(nfc, reg) & (~mask)) | val << shift);
+}
+
+static inline void vf610_nfc_memcpy(void *dst, const void __iomem *src,
+ size_t n)
+{
+ /*
+ * Use this accessor for the internal SRAM buffers. On the ARM
+ * Freescale Vybrid SoC it's known that the driver can treat
+ * the SRAM buffer as if it's memory. Other platform might need
+ * to treat the buffers differently.
+ *
+ * For the time being, use memcpy
+ */
+ memcpy(dst, src, n);
+}
+
+/* Clear flags for upcoming command */
+static inline void vf610_nfc_clear_status(struct vf610_nfc *nfc)
+{
+ u32 tmp = vf610_nfc_read(nfc, NFC_IRQ_STATUS);
+
+ tmp |= CMD_DONE_CLEAR_BIT | IDLE_CLEAR_BIT;
+ vf610_nfc_write(nfc, NFC_IRQ_STATUS, tmp);
+}
+
+static void vf610_nfc_done(struct vf610_nfc *nfc)
+{
+ unsigned long timeout = msecs_to_jiffies(100);
+
+ /*
+ * Barrier is needed after this write. This write need
+ * to be done before reading the next register the first
+ * time.
+ * vf610_nfc_set implicates such a barrier by using writel
+ * to write to the register.
+ */
+ vf610_nfc_set(nfc, NFC_IRQ_STATUS, IDLE_EN_BIT);
+ vf610_nfc_set(nfc, NFC_FLASH_CMD2, START_BIT);
+
+ if (!wait_for_completion_timeout(&nfc->cmd_done, timeout))
+ dev_warn(nfc->dev, "Timeout while waiting for BUSY.\n");
+
+ vf610_nfc_clear_status(nfc);
+}
+
+static u8 vf610_nfc_get_id(struct vf610_nfc *nfc, int col)
+{
+ u32 flash_id;
+
+ if (col < 4) {
+ flash_id = vf610_nfc_read(nfc, NFC_FLASH_STATUS1);
+ flash_id >>= (3 - col) * 8;
+ } else {
+ flash_id = vf610_nfc_read(nfc, NFC_FLASH_STATUS2);
+ flash_id >>= 24;
+ }
+
+ return flash_id & 0xff;
+}
+
+static u8 vf610_nfc_get_status(struct vf610_nfc *nfc)
+{
+ return vf610_nfc_read(nfc, NFC_FLASH_STATUS2) & STATUS_BYTE1_MASK;
+}
+
+static void vf610_nfc_send_command(struct vf610_nfc *nfc, u32 cmd_byte1,
+ u32 cmd_code)
+{
+ u32 tmp;
+
+ vf610_nfc_clear_status(nfc);
+
+ tmp = vf610_nfc_read(nfc, NFC_FLASH_CMD2);
+ tmp &= ~(CMD_BYTE1_MASK | CMD_CODE_MASK | BUFNO_MASK);
+ tmp |= cmd_byte1 << CMD_BYTE1_SHIFT;
+ tmp |= cmd_code << CMD_CODE_SHIFT;
+ vf610_nfc_write(nfc, NFC_FLASH_CMD2, tmp);
+}
+
+static void vf610_nfc_send_commands(struct vf610_nfc *nfc, u32 cmd_byte1,
+ u32 cmd_byte2, u32 cmd_code)
+{
+ u32 tmp;
+
+ vf610_nfc_send_command(nfc, cmd_byte1, cmd_code);
+
+ tmp = vf610_nfc_read(nfc, NFC_FLASH_CMD1);
+ tmp &= ~CMD_BYTE2_MASK;
+ tmp |= cmd_byte2 << CMD_BYTE2_SHIFT;
+ vf610_nfc_write(nfc, NFC_FLASH_CMD1, tmp);
+}
+
+static irqreturn_t vf610_nfc_irq(int irq, void *data)
+{
+ struct mtd_info *mtd = data;
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+
+ vf610_nfc_clear(nfc, NFC_IRQ_STATUS, IDLE_EN_BIT);
+ complete(&nfc->cmd_done);
+
+ return IRQ_HANDLED;
+}
+
+static void vf610_nfc_addr_cycle(struct vf610_nfc *nfc, int column, int page)
+{
+ if (column != -1) {
+ if (nfc->chip.options & NAND_BUSWIDTH_16)
+ column = column / 2;
+ vf610_nfc_set_field(nfc, NFC_COL_ADDR, COL_ADDR_MASK,
+ COL_ADDR_SHIFT, column);
+ }
+ if (page != -1)
+ vf610_nfc_set_field(nfc, NFC_ROW_ADDR, ROW_ADDR_MASK,
+ ROW_ADDR_SHIFT, page);
+}
+
+static inline void vf610_nfc_ecc_mode(struct vf610_nfc *nfc, int ecc_mode)
+{
+ vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG,
+ CONFIG_ECC_MODE_MASK,
+ CONFIG_ECC_MODE_SHIFT, ecc_mode);
+}
+
+static inline void vf610_nfc_transfer_size(struct vf610_nfc *nfc, int size)
+{
+ vf610_nfc_write(nfc, NFC_SECTOR_SIZE, size);
+}
+
+static void vf610_nfc_command(struct mtd_info *mtd, unsigned command,
+ int column, int page)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ int trfr_sz = nfc->chip.options & NAND_BUSWIDTH_16 ? 1 : 0;
+
+ nfc->buf_offset = max(column, 0);
+ nfc->alt_buf = ALT_BUF_DATA;
+
+ switch (command) {
+ case NAND_CMD_SEQIN:
+ /* Use valid column/page from preread... */
+ vf610_nfc_addr_cycle(nfc, column, page);
+ nfc->buf_offset = 0;
+
+ /*
+ * SEQIN => data => PAGEPROG sequence is done by the controller
+ * hence we do not need to issue the command here...
+ */
+ return;
+ case NAND_CMD_PAGEPROG:
+ trfr_sz += nfc->write_sz;
+ vf610_nfc_transfer_size(nfc, trfr_sz);
+ vf610_nfc_send_commands(nfc, NAND_CMD_SEQIN,
+ command, PROGRAM_PAGE_CMD_CODE);
+ if (nfc->use_hw_ecc)
+ vf610_nfc_ecc_mode(nfc, nfc->ecc_mode);
+ else
+ vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
+ break;
+
+ case NAND_CMD_RESET:
+ vf610_nfc_transfer_size(nfc, 0);
+ vf610_nfc_send_command(nfc, command, RESET_CMD_CODE);
+ break;
+
+ case NAND_CMD_READOOB:
+ trfr_sz += mtd->oobsize;
+ column = mtd->writesize;
+ vf610_nfc_transfer_size(nfc, trfr_sz);
+ vf610_nfc_send_commands(nfc, NAND_CMD_READ0,
+ NAND_CMD_READSTART, READ_PAGE_CMD_CODE);
+ vf610_nfc_addr_cycle(nfc, column, page);
+ vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
+ break;
+
+ case NAND_CMD_READ0:
+ trfr_sz += mtd->writesize + mtd->oobsize;
+ vf610_nfc_transfer_size(nfc, trfr_sz);
+ vf610_nfc_send_commands(nfc, NAND_CMD_READ0,
+ NAND_CMD_READSTART, READ_PAGE_CMD_CODE);
+ vf610_nfc_addr_cycle(nfc, column, page);
+ vf610_nfc_ecc_mode(nfc, nfc->ecc_mode);
+ break;
+
+ case NAND_CMD_PARAM:
+ nfc->alt_buf = ALT_BUF_ONFI;
+ trfr_sz = 3 * sizeof(struct nand_onfi_params);
+ vf610_nfc_transfer_size(nfc, trfr_sz);
+ vf610_nfc_send_command(nfc, command, READ_ONFI_PARAM_CMD_CODE);
+ vf610_nfc_addr_cycle(nfc, -1, column);
+ vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
+ break;
+
+ case NAND_CMD_ERASE1:
+ vf610_nfc_transfer_size(nfc, 0);
+ vf610_nfc_send_commands(nfc, command,
+ NAND_CMD_ERASE2, ERASE_CMD_CODE);
+ vf610_nfc_addr_cycle(nfc, column, page);
+ break;
+
+ case NAND_CMD_READID:
+ nfc->alt_buf = ALT_BUF_ID;
+ nfc->buf_offset = 0;
+ vf610_nfc_transfer_size(nfc, 0);
+ vf610_nfc_send_command(nfc, command, READ_ID_CMD_CODE);
+ vf610_nfc_addr_cycle(nfc, -1, column);
+ break;
+
+ case NAND_CMD_STATUS:
+ nfc->alt_buf = ALT_BUF_STAT;
+ vf610_nfc_transfer_size(nfc, 0);
+ vf610_nfc_send_command(nfc, command, STATUS_READ_CMD_CODE);
+ break;
+ default:
+ return;
+ }
+
+ vf610_nfc_done(nfc);
+
+ nfc->use_hw_ecc = false;
+ nfc->write_sz = 0;
+}
+
+static void vf610_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ uint c = nfc->buf_offset;
+
+ /* Alternate buffers are only supported through read_byte */
+ WARN_ON(nfc->alt_buf);
+
+ vf610_nfc_memcpy(buf, nfc->regs + NFC_MAIN_AREA(0) + c, len);
+
+ nfc->buf_offset += len;
+}
+
+static void vf610_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ uint c = nfc->buf_offset;
+ uint l;
+
+ l = min_t(uint, len, mtd->writesize + mtd->oobsize - c);
+ vf610_nfc_memcpy(nfc->regs + NFC_MAIN_AREA(0) + c, buf, l);
+
+ nfc->write_sz += l;
+ nfc->buf_offset += l;
+}
+
+static uint8_t vf610_nfc_read_byte(struct mtd_info *mtd)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ u8 tmp;
+ uint c = nfc->buf_offset;
+
+ switch (nfc->alt_buf) {
+ case ALT_BUF_ID:
+ tmp = vf610_nfc_get_id(nfc, c);
+ break;
+ case ALT_BUF_STAT:
+ tmp = vf610_nfc_get_status(nfc);
+ break;
+#ifdef __LITTLE_ENDIAN
+ case ALT_BUF_ONFI:
+ /* Reverse byte since the controller uses big endianness */
+ c = nfc->buf_offset ^ 0x3;
+ /* fall-through */
+#endif
+ default:
+ tmp = *((u8 *)(nfc->regs + NFC_MAIN_AREA(0) + c));
+ break;
+ }
+ nfc->buf_offset++;
+ return tmp;
+}
+
+static u16 vf610_nfc_read_word(struct mtd_info *mtd)
+{
+ u16 tmp;
+
+ vf610_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
+ return tmp;
+}
+
+/* If not provided, upper layers apply a fixed delay. */
+static int vf610_nfc_dev_ready(struct mtd_info *mtd)
+{
+ /* NFC handles R/B internally; always ready. */
+ return 1;
+}
+
+/*
+ * This function supports Vybrid only (MPC5125 would have full RB and four CS)
+ */
+static void vf610_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ u32 tmp = vf610_nfc_read(nfc, NFC_ROW_ADDR);
+
+ /* Vybrid only (MPC5125 would have full RB and four CS) */
+ if (nfc->variant != NFC_VFC610)
+ return;
+
+ tmp &= ~(ROW_ADDR_CHIP_SEL_RB_MASK | ROW_ADDR_CHIP_SEL_MASK);
+
+ if (chip >= 0) {
+ tmp |= 1 << ROW_ADDR_CHIP_SEL_RB_SHIFT;
+ tmp |= BIT(chip) << ROW_ADDR_CHIP_SEL_SHIFT;
+ }
+
+ vf610_nfc_write(nfc, NFC_ROW_ADDR, tmp);
+}
+
+/* Count the number of 0's in buff up to max_bits */
+static inline int count_written_bits(uint8_t *buff, int size, int max_bits)
+{
+ uint32_t *buff32 = (uint32_t *)buff;
+ int k, written_bits = 0;
+
+ for (k = 0; k < (size / 4); k++) {
+ written_bits += hweight32(~buff32[k]);
+ if (unlikely(written_bits > max_bits))
+ break;
+ }
+
+ return written_bits;
+}
+
+static inline int vf610_nfc_correct_data(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *oob, int page)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ u32 ecc_status_off = NFC_MAIN_AREA(0) + ECC_SRAM_ADDR + ECC_STATUS;
+ u8 ecc_status;
+ u8 ecc_count;
+ int flips_threshold = nfc->chip.ecc.strength / 2;
+
+ ecc_status = vf610_nfc_read(nfc, ecc_status_off) & 0xff;
+ ecc_count = ecc_status & ECC_STATUS_ERR_COUNT;
+
+ if (!(ecc_status & ECC_STATUS_MASK))
+ return ecc_count;
+
+ /* Read OOB without ECC unit enabled */
+ vf610_nfc_command(mtd, NAND_CMD_READOOB, 0, page);
+ vf610_nfc_read_buf(mtd, oob, mtd->oobsize);
+
+ /*
+ * On an erased page, bit count (including OOB) should be zero or
+ * at least less then half of the ECC strength.
+ */
+ return nand_check_erased_ecc_chunk(dat, nfc->chip.ecc.size, oob,
+ mtd->oobsize, NULL, 0,
+ flips_threshold);
+}
+
+static int vf610_nfc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ int eccsize = chip->ecc.size;
+ int stat;
+
+ vf610_nfc_read_buf(mtd, buf, eccsize);
+ if (oob_required)
+ vf610_nfc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ stat = vf610_nfc_correct_data(mtd, buf, chip->oob_poi, page);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ return 0;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ return stat;
+ }
+}
+
+static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+
+ vf610_nfc_write_buf(mtd, buf, mtd->writesize);
+ if (oob_required)
+ vf610_nfc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ /* Always write whole page including OOB due to HW ECC */
+ nfc->use_hw_ecc = true;
+ nfc->write_sz = mtd->writesize + mtd->oobsize;
+
+ return 0;
+}
+
+static const struct of_device_id vf610_nfc_dt_ids[] = {
+ { .compatible = "fsl,vf610-nfc", .data = (void *)NFC_VFC610 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vf610_nfc_dt_ids);
+
+static void vf610_nfc_preinit_controller(struct vf610_nfc *nfc)
+{
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_ADDR_AUTO_INCR_BIT);
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_BUFNO_AUTO_INCR_BIT);
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_BOOT_MODE_BIT);
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_DMA_REQ_BIT);
+ vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_FAST_FLASH_BIT);
+
+ /* Disable virtual pages, only one elementary transfer unit */
+ vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG, CONFIG_PAGE_CNT_MASK,
+ CONFIG_PAGE_CNT_SHIFT, 1);
+}
+
+static void vf610_nfc_init_controller(struct vf610_nfc *nfc)
+{
+ if (nfc->chip.options & NAND_BUSWIDTH_16)
+ vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+ else
+ vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+
+ if (nfc->chip.ecc.mode == NAND_ECC_HW) {
+ /* Set ECC status offset in SRAM */
+ vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG,
+ CONFIG_ECC_SRAM_ADDR_MASK,
+ CONFIG_ECC_SRAM_ADDR_SHIFT,
+ ECC_SRAM_ADDR >> 3);
+
+ /* Enable ECC status in SRAM */
+ vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_ECC_SRAM_REQ_BIT);
+ }
+}
+
+static int vf610_nfc_probe(struct platform_device *pdev)
+{
+ struct vf610_nfc *nfc;
+ struct resource *res;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct device_node *child;
+ const struct of_device_id *of_id;
+ int err;
+ int irq;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = &pdev->dev;
+ mtd = &nfc->mtd;
+ chip = &nfc->chip;
+
+ mtd->priv = chip;
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = nfc->dev;
+ mtd->name = DRV_NAME;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->regs = devm_ioremap_resource(nfc->dev, res);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ nfc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nfc->clk))
+ return PTR_ERR(nfc->clk);
+
+ err = clk_prepare_enable(nfc->clk);
+ if (err) {
+ dev_err(nfc->dev, "Unable to enable clock!\n");
+ return err;
+ }
+
+ of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
+ nfc->variant = (enum vf610_nfc_variant)of_id->data;
+
+ for_each_available_child_of_node(nfc->dev->of_node, child) {
+ if (of_device_is_compatible(child, "fsl,vf610-nfc-nandcs")) {
+
+ if (chip->flash_node) {
+ dev_err(nfc->dev,
+ "Only one NAND chip supported!\n");
+ err = -EINVAL;
+ goto error;
+ }
+
+ chip->flash_node = child;
+ }
+ }
+
+ if (!chip->flash_node) {
+ dev_err(nfc->dev, "NAND chip sub-node missing!\n");
+ err = -ENODEV;
+ goto err_clk;
+ }
+
+ chip->dev_ready = vf610_nfc_dev_ready;
+ chip->cmdfunc = vf610_nfc_command;
+ chip->read_byte = vf610_nfc_read_byte;
+ chip->read_word = vf610_nfc_read_word;
+ chip->read_buf = vf610_nfc_read_buf;
+ chip->write_buf = vf610_nfc_write_buf;
+ chip->select_chip = vf610_nfc_select_chip;
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ init_completion(&nfc->cmd_done);
+
+ err = devm_request_irq(nfc->dev, irq, vf610_nfc_irq, 0, DRV_NAME, mtd);
+ if (err) {
+ dev_err(nfc->dev, "Error requesting IRQ!\n");
+ goto error;
+ }
+
+ vf610_nfc_preinit_controller(nfc);
+
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ err = -ENXIO;
+ goto error;
+ }
+
+ vf610_nfc_init_controller(nfc);
+
+ /* Bad block options. */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ /* Single buffer only, max 256 OOB minus ECC status */
+ if (mtd->writesize + mtd->oobsize > PAGE_2K + OOB_MAX - 8) {
+ dev_err(nfc->dev, "Unsupported flash page size\n");
+ err = -ENXIO;
+ goto error;
+ }
+
+ if (chip->ecc.mode == NAND_ECC_HW) {
+ if (mtd->writesize != PAGE_2K && mtd->oobsize < 64) {
+ dev_err(nfc->dev, "Unsupported flash with hwecc\n");
+ err = -ENXIO;
+ goto error;
+ }
+
+ if (chip->ecc.size != mtd->writesize) {
+ dev_err(nfc->dev, "Step size needs to be page size\n");
+ err = -ENXIO;
+ goto error;
+ }
+
+ /* Only 64 byte ECC layouts known */
+ if (mtd->oobsize > 64)
+ mtd->oobsize = 64;
+
+ if (chip->ecc.strength == 32) {
+ nfc->ecc_mode = ECC_60_BYTE;
+ chip->ecc.bytes = 60;
+ chip->ecc.layout = &vf610_nfc_ecc60;
+ } else if (chip->ecc.strength == 24) {
+ nfc->ecc_mode = ECC_45_BYTE;
+ chip->ecc.bytes = 45;
+ chip->ecc.layout = &vf610_nfc_ecc45;
+ } else {
+ dev_err(nfc->dev, "Unsupported ECC strength\n");
+ err = -ENXIO;
+ goto error;
+ }
+
+ /* propagate ecc.layout to mtd_info */
+ mtd->ecclayout = chip->ecc.layout;
+ chip->ecc.read_page = vf610_nfc_read_page;
+ chip->ecc.write_page = vf610_nfc_write_page;
+
+ chip->ecc.size = PAGE_2K;
+ }
+
+ /* second phase scan */
+ if (nand_scan_tail(mtd)) {
+ err = -ENXIO;
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, mtd);
+
+ /* Register device in MTD */
+ return mtd_device_parse_register(mtd, NULL,
+ &(struct mtd_part_parser_data){
+ .of_node = chip->flash_node,
+ },
+ NULL, 0);
+
+error:
+ of_node_put(chip->flash_node);
+err_clk:
+ clk_disable_unprepare(nfc->clk);
+ return err;
+}
+
+static int vf610_nfc_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+
+ nand_release(mtd);
+ clk_disable_unprepare(nfc->clk);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vf610_nfc_suspend(struct device *dev)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+
+ clk_disable_unprepare(nfc->clk);
+ return 0;
+}
+
+static int vf610_nfc_resume(struct device *dev)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+
+ pinctrl_pm_select_default_state(dev);
+
+ clk_prepare_enable(nfc->clk);
+
+ vf610_nfc_preinit_controller(nfc);
+ vf610_nfc_init_controller(nfc);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(vf610_nfc_pm_ops, vf610_nfc_suspend, vf610_nfc_resume);
+
+static struct platform_driver vf610_nfc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = vf610_nfc_dt_ids,
+ .pm = &vf610_nfc_pm_ops,
+ },
+ .probe = vf610_nfc_probe,
+ .remove = vf610_nfc_remove,
+};
+
+module_platform_driver(vf610_nfc_driver);
+
+MODULE_AUTHOR("Stefan Agner <stefan.agner@toradex.com>");
+MODULE_DESCRIPTION("Freescale VF610/MPC5125 NFC MTD NAND driver");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/mtd/nand/xway_nand.c b/kernel/drivers/mtd/nand/xway_nand.c
index 3f81dc8f2..3b28db458 100644
--- a/kernel/drivers/mtd/nand/xway_nand.c
+++ b/kernel/drivers/mtd/nand/xway_nand.c
@@ -160,14 +160,10 @@ static int xway_nand_probe(struct platform_device *pdev)
return 0;
}
-/* allow users to override the partition in DT using the cmdline */
-static const char *part_probes[] = { "cmdlinepart", "ofpart", NULL };
-
static struct platform_nand_data xway_nand_data = {
.chip = {
.nr_chips = 1,
.chip_delay = 30,
- .part_probe_types = part_probes,
},
.ctrl = {
.probe = xway_nand_probe,
diff --git a/kernel/drivers/mtd/ofpart.c b/kernel/drivers/mtd/ofpart.c
index aa26c32e1..9ed6038e4 100644
--- a/kernel/drivers/mtd/ofpart.c
+++ b/kernel/drivers/mtd/ofpart.c
@@ -29,23 +29,41 @@ static int parse_ofpart_partitions(struct mtd_info *master,
struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
- struct device_node *node;
+ struct device_node *mtd_node;
+ struct device_node *ofpart_node;
const char *partname;
struct device_node *pp;
- int nr_parts, i;
+ int nr_parts, i, ret = 0;
+ bool dedicated = true;
if (!data)
return 0;
- node = data->of_node;
- if (!node)
+ mtd_node = data->of_node;
+ if (!mtd_node)
return 0;
+ ofpart_node = of_get_child_by_name(mtd_node, "partitions");
+ if (!ofpart_node) {
+ /*
+ * We might get here even when ofpart isn't used at all (e.g.,
+ * when using another parser), so don't be louder than
+ * KERN_DEBUG
+ */
+ pr_debug("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n",
+ master->name, mtd_node->full_name);
+ ofpart_node = mtd_node;
+ dedicated = false;
+ } else if (!of_device_is_compatible(ofpart_node, "fixed-partitions")) {
+ /* The 'partitions' subnode might be used by another parser */
+ return 0;
+ }
+
/* First count the subnodes */
nr_parts = 0;
- for_each_child_of_node(node, pp) {
- if (node_has_compatible(pp))
+ for_each_child_of_node(ofpart_node, pp) {
+ if (!dedicated && node_has_compatible(pp))
continue;
nr_parts++;
@@ -59,22 +77,36 @@ static int parse_ofpart_partitions(struct mtd_info *master,
return -ENOMEM;
i = 0;
- for_each_child_of_node(node, pp) {
+ for_each_child_of_node(ofpart_node, pp) {
const __be32 *reg;
int len;
int a_cells, s_cells;
- if (node_has_compatible(pp))
+ if (!dedicated && node_has_compatible(pp))
continue;
reg = of_get_property(pp, "reg", &len);
if (!reg) {
- nr_parts--;
- continue;
+ if (dedicated) {
+ pr_debug("%s: ofpart partition %s (%s) missing reg property.\n",
+ master->name, pp->full_name,
+ mtd_node->full_name);
+ goto ofpart_fail;
+ } else {
+ nr_parts--;
+ continue;
+ }
}
a_cells = of_n_addr_cells(pp);
s_cells = of_n_size_cells(pp);
+ if (len / 4 != a_cells + s_cells) {
+ pr_debug("%s: ofpart partition %s (%s) error parsing reg property.\n",
+ master->name, pp->full_name,
+ mtd_node->full_name);
+ goto ofpart_fail;
+ }
+
(*pparts)[i].offset = of_read_number(reg, a_cells);
(*pparts)[i].size = of_read_number(reg + a_cells, s_cells);
@@ -92,15 +124,20 @@ static int parse_ofpart_partitions(struct mtd_info *master,
i++;
}
- if (!i) {
- of_node_put(pp);
- pr_err("No valid partition found on %s\n", node->full_name);
- kfree(*pparts);
- *pparts = NULL;
- return -EINVAL;
- }
+ if (!nr_parts)
+ goto ofpart_none;
return nr_parts;
+
+ofpart_fail:
+ pr_err("%s: error parsing ofpart partition %s (%s)\n",
+ master->name, pp->full_name, mtd_node->full_name);
+ ret = -EINVAL;
+ofpart_none:
+ of_node_put(pp);
+ kfree(*pparts);
+ *pparts = NULL;
+ return ret;
}
static struct mtd_part_parser ofpart_parser = {
diff --git a/kernel/drivers/mtd/onenand/generic.c b/kernel/drivers/mtd/onenand/generic.c
index 32a216d31..125da34d8 100644
--- a/kernel/drivers/mtd/onenand/generic.c
+++ b/kernel/drivers/mtd/onenand/generic.c
@@ -18,7 +18,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
-#include <asm/io.h>
+#include <linux/io.h>
/*
* Note: Driver name and platform data format have been updated!
@@ -60,9 +60,8 @@ static int generic_onenand_probe(struct platform_device *pdev)
info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL;
info->onenand.irq = platform_get_irq(pdev, 0);
- info->mtd.name = dev_name(&pdev->dev);
+ info->mtd.dev.parent = &pdev->dev;
info->mtd.priv = &info->onenand;
- info->mtd.owner = THIS_MODULE;
if (onenand_scan(&info->mtd, 1)) {
err = -ENXIO;
diff --git a/kernel/drivers/mtd/onenand/omap2.c b/kernel/drivers/mtd/onenand/omap2.c
index 646ddd6db..3e0285696 100644
--- a/kernel/drivers/mtd/onenand/omap2.c
+++ b/kernel/drivers/mtd/onenand/omap2.c
@@ -710,9 +710,7 @@ static int omap2_onenand_probe(struct platform_device *pdev)
c->onenand.base, c->freq);
c->pdev = pdev;
- c->mtd.name = dev_name(&pdev->dev);
c->mtd.priv = &c->onenand;
- c->mtd.owner = THIS_MODULE;
c->mtd.dev.parent = &pdev->dev;
diff --git a/kernel/drivers/mtd/onenand/samsung.c b/kernel/drivers/mtd/onenand/samsung.c
index 19cfb97ad..af0ac1a7b 100644
--- a/kernel/drivers/mtd/onenand/samsung.c
+++ b/kernel/drivers/mtd/onenand/samsung.c
@@ -864,7 +864,6 @@ static int s3c_onenand_probe(struct platform_device *pdev)
this = (struct onenand_chip *) &mtd[1];
mtd->priv = this;
mtd->dev.parent = &pdev->dev;
- mtd->owner = THIS_MODULE;
onenand->pdev = pdev;
onenand->type = platform_get_device_id(pdev)->driver_data;
@@ -1083,7 +1082,7 @@ static const struct dev_pm_ops s3c_pm_ops = {
.resume = s3c_pm_ops_resume,
};
-static struct platform_device_id s3c_onenand_driver_ids[] = {
+static const struct platform_device_id s3c_onenand_driver_ids[] = {
{
.name = "s3c6400-onenand",
.driver_data = TYPE_S3C6400,
diff --git a/kernel/drivers/mtd/spi-nor/Kconfig b/kernel/drivers/mtd/spi-nor/Kconfig
index 64a4f0eda..2fe2a7e90 100644
--- a/kernel/drivers/mtd/spi-nor/Kconfig
+++ b/kernel/drivers/mtd/spi-nor/Kconfig
@@ -23,9 +23,22 @@ config MTD_SPI_NOR_USE_4K_SECTORS
config SPI_FSL_QUADSPI
tristate "Freescale Quad SPI controller"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables support for the Quad SPI controller in master mode.
- We only connect the NOR to this controller now.
+ This controller does not support generic SPI. It only supports
+ SPI NOR.
+
+config SPI_NXP_SPIFI
+ tristate "NXP SPI Flash Interface (SPIFI)"
+ depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
+ depends on HAS_IOMEM
+ help
+ Enable support for the NXP LPC SPI Flash Interface controller.
+
+ SPIFI is a specialized controller for connecting serial SPI
+ Flash. Enable this option if you have a device with a SPIFI
+ controller and want to access the Flash as a mtd device.
endif # MTD_SPI_NOR
diff --git a/kernel/drivers/mtd/spi-nor/Makefile b/kernel/drivers/mtd/spi-nor/Makefile
index 6a7ce1462..e53333ef8 100644
--- a/kernel/drivers/mtd/spi-nor/Makefile
+++ b/kernel/drivers/mtd/spi-nor/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
+obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
diff --git a/kernel/drivers/mtd/spi-nor/fsl-quadspi.c b/kernel/drivers/mtd/spi-nor/fsl-quadspi.c
index 5d5d36272..7b10ed413 100644
--- a/kernel/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/kernel/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -26,6 +26,21 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/spi-nor.h>
+#include <linux/mutex.h>
+#include <linux/pm_qos.h>
+#include <linux/sizes.h>
+
+/* Controller needs driver to swap endian */
+#define QUADSPI_QUIRK_SWAP_ENDIAN (1 << 0)
+/* Controller needs 4x internal clock */
+#define QUADSPI_QUIRK_4X_INT_CLK (1 << 1)
+/*
+ * TKT253890, Controller needs driver to fill txfifo till 16 byte to
+ * trigger data transfer even though extern data will not transferred.
+ */
+#define QUADSPI_QUIRK_TKT253890 (1 << 2)
+/* Controller cannot wake up from wait mode, TKT245618 */
+#define QUADSPI_QUIRK_TKT245618 (1 << 3)
/* The registers */
#define QUADSPI_MCR 0x00
@@ -140,15 +155,15 @@
#define LUT_MODE 4
#define LUT_MODE2 5
#define LUT_MODE4 6
-#define LUT_READ 7
-#define LUT_WRITE 8
+#define LUT_FSL_READ 7
+#define LUT_FSL_WRITE 8
#define LUT_JMP_ON_CS 9
#define LUT_ADDR_DDR 10
#define LUT_MODE_DDR 11
#define LUT_MODE2_DDR 12
#define LUT_MODE4_DDR 13
-#define LUT_READ_DDR 14
-#define LUT_WRITE_DDR 15
+#define LUT_FSL_READ_DDR 14
+#define LUT_FSL_WRITE_DDR 15
#define LUT_DATA_LEARN 16
/*
@@ -191,9 +206,13 @@
#define SEQID_EN4B 10
#define SEQID_BRWR 11
+#define QUADSPI_MIN_IOMAP SZ_4M
+
enum fsl_qspi_devtype {
FSL_QUADSPI_VYBRID,
FSL_QUADSPI_IMX6SX,
+ FSL_QUADSPI_IMX7D,
+ FSL_QUADSPI_IMX6UL,
};
struct fsl_qspi_devtype_data {
@@ -201,29 +220,52 @@ struct fsl_qspi_devtype_data {
int rxfifo;
int txfifo;
int ahb_buf_size;
+ int driver_data;
};
static struct fsl_qspi_devtype_data vybrid_data = {
.devtype = FSL_QUADSPI_VYBRID,
.rxfifo = 128,
.txfifo = 64,
- .ahb_buf_size = 1024
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_SWAP_ENDIAN,
};
static struct fsl_qspi_devtype_data imx6sx_data = {
.devtype = FSL_QUADSPI_IMX6SX,
.rxfifo = 128,
.txfifo = 512,
- .ahb_buf_size = 1024
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_4X_INT_CLK
+ | QUADSPI_QUIRK_TKT245618,
+};
+
+static struct fsl_qspi_devtype_data imx7d_data = {
+ .devtype = FSL_QUADSPI_IMX7D,
+ .rxfifo = 512,
+ .txfifo = 512,
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_TKT253890
+ | QUADSPI_QUIRK_4X_INT_CLK,
+};
+
+static struct fsl_qspi_devtype_data imx6ul_data = {
+ .devtype = FSL_QUADSPI_IMX6UL,
+ .rxfifo = 128,
+ .txfifo = 512,
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_TKT253890
+ | QUADSPI_QUIRK_4X_INT_CLK,
};
#define FSL_QSPI_MAX_CHIP 4
struct fsl_qspi {
- struct mtd_info mtd[FSL_QSPI_MAX_CHIP];
struct spi_nor nor[FSL_QSPI_MAX_CHIP];
void __iomem *iobase;
- void __iomem *ahb_base; /* Used when read from AHB bus */
+ void __iomem *ahb_addr;
u32 memmap_phy;
+ u32 memmap_offs;
+ u32 memmap_len;
struct clk *clk, *clk_en;
struct device *dev;
struct completion c;
@@ -233,16 +275,28 @@ struct fsl_qspi {
u32 clk_rate;
unsigned int chip_base_addr; /* We may support two chips. */
bool has_second_chip;
+ struct mutex lock;
+ struct pm_qos_request pm_qos_req;
};
-static inline int is_vybrid_qspi(struct fsl_qspi *q)
+static inline int needs_swap_endian(struct fsl_qspi *q)
+{
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_SWAP_ENDIAN;
+}
+
+static inline int needs_4x_clock(struct fsl_qspi *q)
{
- return q->devtype_data->devtype == FSL_QUADSPI_VYBRID;
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_4X_INT_CLK;
}
-static inline int is_imx6sx_qspi(struct fsl_qspi *q)
+static inline int needs_fill_txfifo(struct fsl_qspi *q)
{
- return q->devtype_data->devtype == FSL_QUADSPI_IMX6SX;
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT253890;
+}
+
+static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
+{
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618;
}
/*
@@ -251,7 +305,7 @@ static inline int is_imx6sx_qspi(struct fsl_qspi *q)
*/
static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
{
- return is_vybrid_qspi(q) ? __swab32(a) : a;
+ return needs_swap_endian(q) ? __swab32(a) : a;
}
static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q)
@@ -312,7 +366,7 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
base + QUADSPI_LUT(lut_base));
- writel(LUT0(DUMMY, PAD1, dummy) | LUT1(READ, PAD4, rxfifo),
+ writel(LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo),
base + QUADSPI_LUT(lut_base + 1));
/* Write enable */
@@ -333,24 +387,18 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
base + QUADSPI_LUT(lut_base));
- writel(LUT0(WRITE, PAD1, 0), base + QUADSPI_LUT(lut_base + 1));
+ writel(LUT0(FSL_WRITE, PAD1, 0), base + QUADSPI_LUT(lut_base + 1));
/* Read Status */
lut_base = SEQID_RDSR * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(READ, PAD1, 0x1),
+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(FSL_READ, PAD1, 0x1),
base + QUADSPI_LUT(lut_base));
/* Erase a sector */
lut_base = SEQID_SE * 4;
- if (q->nor_size <= SZ_16M) {
- cmd = SPINOR_OP_SE;
- addrlen = ADDR24BIT;
- } else {
- /* use the 4-byte address */
- cmd = SPINOR_OP_SE;
- addrlen = ADDR32BIT;
- }
+ cmd = q->nor[0].erase_opcode;
+ addrlen = q->nor_size <= SZ_16M ? ADDR24BIT : ADDR32BIT;
writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
base + QUADSPI_LUT(lut_base));
@@ -362,17 +410,17 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
/* READ ID */
lut_base = SEQID_RDID * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(READ, PAD1, 0x8),
+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(FSL_READ, PAD1, 0x8),
base + QUADSPI_LUT(lut_base));
/* Write Register */
lut_base = SEQID_WRSR * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(WRITE, PAD1, 0x2),
+ writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(FSL_WRITE, PAD1, 0x2),
base + QUADSPI_LUT(lut_base));
/* Read Configuration Register */
lut_base = SEQID_RDCR * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(READ, PAD1, 0x1),
+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(FSL_READ, PAD1, 0x1),
base + QUADSPI_LUT(lut_base));
/* Write disable */
@@ -419,6 +467,8 @@ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
case SPINOR_OP_BRWR:
return SEQID_BRWR;
default:
+ if (cmd == q->nor[0].erase_opcode)
+ return SEQID_SE;
dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd);
break;
}
@@ -537,7 +587,7 @@ static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
/* clear the TX FIFO. */
tmp = readl(q->iobase + QUADSPI_MCR);
- writel(tmp | QUADSPI_MCR_CLR_RXF_MASK, q->iobase + QUADSPI_MCR);
+ writel(tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR);
/* fill the TX data to the FIFO */
for (j = 0, i = ((count + 3) / 4); j < i; j++) {
@@ -546,6 +596,11 @@ static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
txbuf++;
}
+ /* fill the TXFIFO upto 16 bytes for i.MX7d */
+ if (needs_fill_txfifo(q))
+ for (; i < 4; i++)
+ writel(tmp, q->iobase + QUADSPI_TBDR);
+
/* Trigger it */
ret = fsl_qspi_runcmd(q, opcode, to, count);
@@ -606,6 +661,38 @@ static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
q->iobase + QUADSPI_BFGENCR);
}
+/* This function was used to prepare and enable QSPI clock */
+static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
+{
+ int ret;
+
+ ret = clk_prepare_enable(q->clk_en);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(q->clk);
+ if (ret) {
+ clk_disable_unprepare(q->clk_en);
+ return ret;
+ }
+
+ if (needs_wakeup_wait_mode(q))
+ pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
+
+ return 0;
+}
+
+/* This function was used to disable and unprepare QSPI clock */
+static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
+{
+ if (needs_wakeup_wait_mode(q))
+ pm_qos_remove_request(&q->pm_qos_req);
+
+ clk_disable_unprepare(q->clk);
+ clk_disable_unprepare(q->clk_en);
+
+}
+
/* We use this function to do some basic init for spi_nor_scan(). */
static int fsl_qspi_nor_setup(struct fsl_qspi *q)
{
@@ -613,11 +700,23 @@ static int fsl_qspi_nor_setup(struct fsl_qspi *q)
u32 reg;
int ret;
- /* the default frequency, we will change it in the future.*/
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ fsl_qspi_clk_disable_unprep(q);
+
+ /* the default frequency, we will change it in the future. */
ret = clk_set_rate(q->clk, 66000000);
if (ret)
return ret;
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return ret;
+
+ /* Reset the module */
+ writel(QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
+ base + QUADSPI_MCR);
+ udelay(1);
+
/* Init the LUT table. */
fsl_qspi_init_lut(q);
@@ -635,6 +734,9 @@ static int fsl_qspi_nor_setup(struct fsl_qspi *q)
writel(QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
base + QUADSPI_MCR);
+ /* clear all interrupt status */
+ writel(0xffffffff, q->iobase + QUADSPI_FR);
+
/* enable the interrupt */
writel(QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
@@ -646,13 +748,20 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
unsigned long rate = q->clk_rate;
int ret;
- if (is_imx6sx_qspi(q))
+ if (needs_4x_clock(q))
rate *= 4;
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ fsl_qspi_clk_disable_unprep(q);
+
ret = clk_set_rate(q->clk, rate);
if (ret)
return ret;
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return ret;
+
/* Init the LUT table again. */
fsl_qspi_init_lut(q);
@@ -662,9 +771,11 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
return 0;
}
-static struct of_device_id fsl_qspi_dt_ids[] = {
+static const struct of_device_id fsl_qspi_dt_ids[] = {
{ .compatible = "fsl,vf610-qspi", .data = (void *)&vybrid_data, },
{ .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, },
+ { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, },
+ { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
@@ -687,8 +798,7 @@ static int fsl_qspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return 0;
}
-static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
- int write_enable)
+static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
{
struct fsl_qspi *q = nor->priv;
int ret;
@@ -730,11 +840,42 @@ static int fsl_qspi_read(struct spi_nor *nor, loff_t from,
struct fsl_qspi *q = nor->priv;
u8 cmd = nor->read_opcode;
- dev_dbg(q->dev, "cmd [%x],read from (0x%p, 0x%.8x, 0x%.8x),len:%d\n",
- cmd, q->ahb_base, q->chip_base_addr, (unsigned int)from, len);
+ /* if necessary,ioremap buffer before AHB read, */
+ if (!q->ahb_addr) {
+ q->memmap_offs = q->chip_base_addr + from;
+ q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP;
+
+ q->ahb_addr = ioremap_nocache(
+ q->memmap_phy + q->memmap_offs,
+ q->memmap_len);
+ if (!q->ahb_addr) {
+ dev_err(q->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+ /* ioremap if the data requested is out of range */
+ } else if (q->chip_base_addr + from < q->memmap_offs
+ || q->chip_base_addr + from + len >
+ q->memmap_offs + q->memmap_len) {
+ iounmap(q->ahb_addr);
+
+ q->memmap_offs = q->chip_base_addr + from;
+ q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP;
+ q->ahb_addr = ioremap_nocache(
+ q->memmap_phy + q->memmap_offs,
+ q->memmap_len);
+ if (!q->ahb_addr) {
+ dev_err(q->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+ }
+
+ dev_dbg(q->dev, "cmd [%x],read from %p, len:%zd\n",
+ cmd, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
+ len);
/* Read out the data directly from the AHB buffer.*/
- memcpy(buf, q->ahb_base + q->chip_base_addr + from, len);
+ memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
+ len);
*retlen += len;
return 0;
@@ -746,7 +887,7 @@ static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs)
int ret;
dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n",
- nor->mtd->erasesize / 1024, q->chip_base_addr, (u32)offs);
+ nor->mtd.erasesize / 1024, q->chip_base_addr, (u32)offs);
ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0);
if (ret)
@@ -761,26 +902,26 @@ static int fsl_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
struct fsl_qspi *q = nor->priv;
int ret;
- ret = clk_enable(q->clk_en);
- if (ret)
- return ret;
+ mutex_lock(&q->lock);
- ret = clk_enable(q->clk);
- if (ret) {
- clk_disable(q->clk_en);
- return ret;
- }
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ goto err_mutex;
fsl_qspi_set_base_addr(q, nor);
return 0;
+
+err_mutex:
+ mutex_unlock(&q->lock);
+ return ret;
}
static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
{
struct fsl_qspi *q = nor->priv;
- clk_disable(q->clk);
- clk_disable(q->clk_en);
+ fsl_qspi_clk_disable_unprep(q);
+ mutex_unlock(&q->lock);
}
static int fsl_qspi_probe(struct platform_device *pdev)
@@ -804,6 +945,10 @@ static int fsl_qspi_probe(struct platform_device *pdev)
if (!q->nor_num || q->nor_num > FSL_QSPI_MAX_CHIP)
return -ENODEV;
+ q->dev = dev;
+ q->devtype_data = (struct fsl_qspi_devtype_data *)of_id->data;
+ platform_set_drvdata(pdev, q);
+
/* find the resources */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
q->iobase = devm_ioremap_resource(dev, res);
@@ -812,9 +957,11 @@ static int fsl_qspi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"QuadSPI-memory");
- q->ahb_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(q->ahb_base))
- return PTR_ERR(q->ahb_base);
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ res->name)) {
+ dev_err(dev, "can't request region for resource %pR\n", res);
+ return -EBUSY;
+ }
q->memmap_phy = res->start;
@@ -827,15 +974,9 @@ static int fsl_qspi_probe(struct platform_device *pdev)
if (IS_ERR(q->clk))
return PTR_ERR(q->clk);
- ret = clk_prepare_enable(q->clk_en);
- if (ret) {
- dev_err(dev, "cannot enable the qspi_en clock: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(q->clk);
+ ret = fsl_qspi_clk_prep_enable(q);
if (ret) {
- dev_err(dev, "cannot enable the qspi clock: %d\n", ret);
+ dev_err(dev, "can not enable the clock\n");
goto clk_failed;
}
@@ -853,10 +994,6 @@ static int fsl_qspi_probe(struct platform_device *pdev)
goto irq_failed;
}
- q->dev = dev;
- q->devtype_data = (struct fsl_qspi_devtype_data *)of_id->data;
- platform_set_drvdata(pdev, q);
-
ret = fsl_qspi_nor_setup(q);
if (ret)
goto irq_failed;
@@ -864,21 +1001,20 @@ static int fsl_qspi_probe(struct platform_device *pdev)
if (of_get_property(np, "fsl,qspi-has-second-chip", NULL))
q->has_second_chip = true;
+ mutex_init(&q->lock);
+
/* iterate the subnodes. */
for_each_available_child_of_node(dev->of_node, np) {
- char modalias[40];
-
/* skip the holes */
if (!q->has_second_chip)
i *= 2;
nor = &q->nor[i];
- mtd = &q->mtd[i];
+ mtd = &nor->mtd;
- nor->mtd = mtd;
nor->dev = dev;
+ nor->flash_node = np;
nor->priv = q;
- mtd->priv = nor;
/* fill the hooks */
nor->read_reg = fsl_qspi_read_reg;
@@ -890,26 +1026,22 @@ static int fsl_qspi_probe(struct platform_device *pdev)
nor->prepare = fsl_qspi_prep;
nor->unprepare = fsl_qspi_unprep;
- ret = of_modalias_node(np, modalias, sizeof(modalias));
- if (ret < 0)
- goto irq_failed;
-
ret = of_property_read_u32(np, "spi-max-frequency",
&q->clk_rate);
if (ret < 0)
- goto irq_failed;
+ goto mutex_failed;
/* set the chip address for READID */
fsl_qspi_set_base_addr(q, nor);
- ret = spi_nor_scan(nor, modalias, SPI_NOR_QUAD);
+ ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
if (ret)
- goto irq_failed;
+ goto mutex_failed;
ppdata.of_node = np;
ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
if (ret)
- goto irq_failed;
+ goto mutex_failed;
/* Set the correct NOR size now. */
if (q->nor_size == 0) {
@@ -939,8 +1071,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
if (ret)
goto last_init_failed;
- clk_disable(q->clk);
- clk_disable(q->clk_en);
+ fsl_qspi_clk_disable_unprep(q);
return 0;
last_init_failed:
@@ -948,12 +1079,14 @@ last_init_failed:
/* skip the holes */
if (!q->has_second_chip)
i *= 2;
- mtd_device_unregister(&q->mtd[i]);
+ mtd_device_unregister(&q->nor[i].mtd);
}
+mutex_failed:
+ mutex_destroy(&q->lock);
irq_failed:
- clk_disable_unprepare(q->clk);
+ fsl_qspi_clk_disable_unprep(q);
clk_failed:
- clk_disable_unprepare(q->clk_en);
+ dev_err(dev, "Freescale QuadSPI probe failed\n");
return ret;
}
@@ -966,15 +1099,18 @@ static int fsl_qspi_remove(struct platform_device *pdev)
/* skip the holes */
if (!q->has_second_chip)
i *= 2;
- mtd_device_unregister(&q->mtd[i]);
+ mtd_device_unregister(&q->nor[i].mtd);
}
/* disable the hardware */
writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
writel(0x0, q->iobase + QUADSPI_RSER);
- clk_unprepare(q->clk);
- clk_unprepare(q->clk_en);
+ mutex_destroy(&q->lock);
+
+ if (q->ahb_addr)
+ iounmap(q->ahb_addr);
+
return 0;
}
@@ -985,12 +1121,19 @@ static int fsl_qspi_suspend(struct platform_device *pdev, pm_message_t state)
static int fsl_qspi_resume(struct platform_device *pdev)
{
+ int ret;
struct fsl_qspi *q = platform_get_drvdata(pdev);
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return ret;
+
fsl_qspi_nor_setup(q);
fsl_qspi_set_map_addr(q);
fsl_qspi_nor_setup_last(q);
+ fsl_qspi_clk_disable_unprep(q);
+
return 0;
}
diff --git a/kernel/drivers/mtd/spi-nor/nxp-spifi.c b/kernel/drivers/mtd/spi-nor/nxp-spifi.c
new file mode 100644
index 000000000..9e82098ae
--- /dev/null
+++ b/kernel/drivers/mtd/spi-nor/nxp-spifi.c
@@ -0,0 +1,479 @@
+/*
+ * SPI-NOR driver for NXP SPI Flash Interface (SPIFI)
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on Freescale QuadSPI driver:
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+/* NXP SPIFI registers, bits and macros */
+#define SPIFI_CTRL 0x000
+#define SPIFI_CTRL_TIMEOUT(timeout) (timeout)
+#define SPIFI_CTRL_CSHIGH(cshigh) ((cshigh) << 16)
+#define SPIFI_CTRL_MODE3 BIT(23)
+#define SPIFI_CTRL_DUAL BIT(28)
+#define SPIFI_CTRL_FBCLK BIT(30)
+#define SPIFI_CMD 0x004
+#define SPIFI_CMD_DATALEN(dlen) ((dlen) & 0x3fff)
+#define SPIFI_CMD_DOUT BIT(15)
+#define SPIFI_CMD_INTLEN(ilen) ((ilen) << 16)
+#define SPIFI_CMD_FIELDFORM(field) ((field) << 19)
+#define SPIFI_CMD_FIELDFORM_ALL_SERIAL SPIFI_CMD_FIELDFORM(0x0)
+#define SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA SPIFI_CMD_FIELDFORM(0x1)
+#define SPIFI_CMD_FRAMEFORM(frame) ((frame) << 21)
+#define SPIFI_CMD_FRAMEFORM_OPCODE_ONLY SPIFI_CMD_FRAMEFORM(0x1)
+#define SPIFI_CMD_OPCODE(op) ((op) << 24)
+#define SPIFI_ADDR 0x008
+#define SPIFI_IDATA 0x00c
+#define SPIFI_CLIMIT 0x010
+#define SPIFI_DATA 0x014
+#define SPIFI_MCMD 0x018
+#define SPIFI_STAT 0x01c
+#define SPIFI_STAT_MCINIT BIT(0)
+#define SPIFI_STAT_CMD BIT(1)
+#define SPIFI_STAT_RESET BIT(4)
+
+#define SPI_NOR_MAX_ID_LEN 6
+
+struct nxp_spifi {
+ struct device *dev;
+ struct clk *clk_spifi;
+ struct clk *clk_reg;
+ void __iomem *io_base;
+ void __iomem *flash_base;
+ struct spi_nor nor;
+ bool memory_mode;
+ u32 mcmd;
+};
+
+static int nxp_spifi_wait_for_cmd(struct nxp_spifi *spifi)
+{
+ u8 stat;
+ int ret;
+
+ ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+ !(stat & SPIFI_STAT_CMD), 10, 30);
+ if (ret)
+ dev_warn(spifi->dev, "command timed out\n");
+
+ return ret;
+}
+
+static int nxp_spifi_reset(struct nxp_spifi *spifi)
+{
+ u8 stat;
+ int ret;
+
+ writel(SPIFI_STAT_RESET, spifi->io_base + SPIFI_STAT);
+ ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+ !(stat & SPIFI_STAT_RESET), 10, 30);
+ if (ret)
+ dev_warn(spifi->dev, "state reset timed out\n");
+
+ return ret;
+}
+
+static int nxp_spifi_set_memory_mode_off(struct nxp_spifi *spifi)
+{
+ int ret;
+
+ if (!spifi->memory_mode)
+ return 0;
+
+ ret = nxp_spifi_reset(spifi);
+ if (ret)
+ dev_err(spifi->dev, "unable to enter command mode\n");
+ else
+ spifi->memory_mode = false;
+
+ return ret;
+}
+
+static int nxp_spifi_set_memory_mode_on(struct nxp_spifi *spifi)
+{
+ u8 stat;
+ int ret;
+
+ if (spifi->memory_mode)
+ return 0;
+
+ writel(spifi->mcmd, spifi->io_base + SPIFI_MCMD);
+ ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+ stat & SPIFI_STAT_MCINIT, 10, 30);
+ if (ret)
+ dev_err(spifi->dev, "unable to enter memory mode\n");
+ else
+ spifi->memory_mode = true;
+
+ return ret;
+}
+
+static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ cmd = SPIFI_CMD_DATALEN(len) |
+ SPIFI_CMD_OPCODE(opcode) |
+ SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_FRAMEFORM_OPCODE_ONLY;
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ while (len--)
+ *buf++ = readb(spifi->io_base + SPIFI_DATA);
+
+ return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ cmd = SPIFI_CMD_DOUT |
+ SPIFI_CMD_DATALEN(len) |
+ SPIFI_CMD_OPCODE(opcode) |
+ SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_FRAMEFORM_OPCODE_ONLY;
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ while (len--)
+ writeb(*buf++, spifi->io_base + SPIFI_DATA);
+
+ return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_on(spifi);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf, spifi->flash_base + from, len);
+ *retlen += len;
+
+ return 0;
+}
+
+static void nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return;
+
+ writel(to, spifi->io_base + SPIFI_ADDR);
+ *retlen += len;
+
+ cmd = SPIFI_CMD_DOUT |
+ SPIFI_CMD_DATALEN(len) |
+ SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_OPCODE(nor->program_opcode) |
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ while (len--)
+ writeb(*buf++, spifi->io_base + SPIFI_DATA);
+
+ nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ writel(offs, spifi->io_base + SPIFI_ADDR);
+
+ cmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_OPCODE(nor->erase_opcode) |
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi)
+{
+ switch (spifi->nor.flash_read) {
+ case SPI_NOR_NORMAL:
+ case SPI_NOR_FAST:
+ spifi->mcmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL;
+ break;
+ case SPI_NOR_DUAL:
+ case SPI_NOR_QUAD:
+ spifi->mcmd = SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA;
+ break;
+ default:
+ dev_err(spifi->dev, "unsupported SPI read mode\n");
+ return -EINVAL;
+ }
+
+ /* Memory mode supports address length between 1 and 4 */
+ if (spifi->nor.addr_width < 1 || spifi->nor.addr_width > 4)
+ return -EINVAL;
+
+ spifi->mcmd |= SPIFI_CMD_OPCODE(spifi->nor.read_opcode) |
+ SPIFI_CMD_INTLEN(spifi->nor.read_dummy / 8) |
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+
+ return 0;
+}
+
+static void nxp_spifi_dummy_id_read(struct spi_nor *nor)
+{
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
+}
+
+static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
+ struct device_node *np)
+{
+ struct mtd_part_parser_data ppdata;
+ enum read_mode flash_read;
+ u32 ctrl, property;
+ u16 mode = 0;
+ int ret;
+
+ if (!of_property_read_u32(np, "spi-rx-bus-width", &property)) {
+ switch (property) {
+ case 1:
+ break;
+ case 2:
+ mode |= SPI_RX_DUAL;
+ break;
+ case 4:
+ mode |= SPI_RX_QUAD;
+ break;
+ default:
+ dev_err(spifi->dev, "unsupported rx-bus-width\n");
+ return -EINVAL;
+ }
+ }
+
+ if (of_find_property(np, "spi-cpha", NULL))
+ mode |= SPI_CPHA;
+
+ if (of_find_property(np, "spi-cpol", NULL))
+ mode |= SPI_CPOL;
+
+ /* Setup control register defaults */
+ ctrl = SPIFI_CTRL_TIMEOUT(1000) |
+ SPIFI_CTRL_CSHIGH(15) |
+ SPIFI_CTRL_FBCLK;
+
+ if (mode & SPI_RX_DUAL) {
+ ctrl |= SPIFI_CTRL_DUAL;
+ flash_read = SPI_NOR_DUAL;
+ } else if (mode & SPI_RX_QUAD) {
+ ctrl &= ~SPIFI_CTRL_DUAL;
+ flash_read = SPI_NOR_QUAD;
+ } else {
+ ctrl |= SPIFI_CTRL_DUAL;
+ flash_read = SPI_NOR_NORMAL;
+ }
+
+ switch (mode & (SPI_CPHA | SPI_CPOL)) {
+ case SPI_MODE_0:
+ ctrl &= ~SPIFI_CTRL_MODE3;
+ break;
+ case SPI_MODE_3:
+ ctrl |= SPIFI_CTRL_MODE3;
+ break;
+ default:
+ dev_err(spifi->dev, "only mode 0 and 3 supported\n");
+ return -EINVAL;
+ }
+
+ writel(ctrl, spifi->io_base + SPIFI_CTRL);
+
+ spifi->nor.dev = spifi->dev;
+ spifi->nor.flash_node = np;
+ spifi->nor.priv = spifi;
+ spifi->nor.read = nxp_spifi_read;
+ spifi->nor.write = nxp_spifi_write;
+ spifi->nor.erase = nxp_spifi_erase;
+ spifi->nor.read_reg = nxp_spifi_read_reg;
+ spifi->nor.write_reg = nxp_spifi_write_reg;
+
+ /*
+ * The first read on a hard reset isn't reliable so do a
+ * dummy read of the id before calling spi_nor_scan().
+ * The reason for this problem is unknown.
+ *
+ * The official NXP spifilib uses more or less the same
+ * workaround that is applied here by reading the device
+ * id multiple times.
+ */
+ nxp_spifi_dummy_id_read(&spifi->nor);
+
+ ret = spi_nor_scan(&spifi->nor, NULL, flash_read);
+ if (ret) {
+ dev_err(spifi->dev, "device scan failed\n");
+ return ret;
+ }
+
+ ret = nxp_spifi_setup_memory_cmd(spifi);
+ if (ret) {
+ dev_err(spifi->dev, "memory command setup failed\n");
+ return ret;
+ }
+
+ ppdata.of_node = np;
+ ret = mtd_device_parse_register(&spifi->nor.mtd, NULL, &ppdata, NULL, 0);
+ if (ret) {
+ dev_err(spifi->dev, "mtd device parse failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nxp_spifi_probe(struct platform_device *pdev)
+{
+ struct device_node *flash_np;
+ struct nxp_spifi *spifi;
+ struct resource *res;
+ int ret;
+
+ spifi = devm_kzalloc(&pdev->dev, sizeof(*spifi), GFP_KERNEL);
+ if (!spifi)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spifi");
+ spifi->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spifi->io_base))
+ return PTR_ERR(spifi->io_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash");
+ spifi->flash_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spifi->flash_base))
+ return PTR_ERR(spifi->flash_base);
+
+ spifi->clk_spifi = devm_clk_get(&pdev->dev, "spifi");
+ if (IS_ERR(spifi->clk_spifi)) {
+ dev_err(&pdev->dev, "spifi clock not found\n");
+ return PTR_ERR(spifi->clk_spifi);
+ }
+
+ spifi->clk_reg = devm_clk_get(&pdev->dev, "reg");
+ if (IS_ERR(spifi->clk_reg)) {
+ dev_err(&pdev->dev, "reg clock not found\n");
+ return PTR_ERR(spifi->clk_reg);
+ }
+
+ ret = clk_prepare_enable(spifi->clk_reg);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable reg clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(spifi->clk_spifi);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable spifi clock\n");
+ goto dis_clk_reg;
+ }
+
+ spifi->dev = &pdev->dev;
+ platform_set_drvdata(pdev, spifi);
+
+ /* Initialize and reset device */
+ nxp_spifi_reset(spifi);
+ writel(0, spifi->io_base + SPIFI_IDATA);
+ writel(0, spifi->io_base + SPIFI_MCMD);
+ nxp_spifi_reset(spifi);
+
+ flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
+ if (!flash_np) {
+ dev_err(&pdev->dev, "no SPI flash device to configure\n");
+ ret = -ENODEV;
+ goto dis_clks;
+ }
+
+ ret = nxp_spifi_setup_flash(spifi, flash_np);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to setup flash chip\n");
+ goto dis_clks;
+ }
+
+ return 0;
+
+dis_clks:
+ clk_disable_unprepare(spifi->clk_spifi);
+dis_clk_reg:
+ clk_disable_unprepare(spifi->clk_reg);
+ return ret;
+}
+
+static int nxp_spifi_remove(struct platform_device *pdev)
+{
+ struct nxp_spifi *spifi = platform_get_drvdata(pdev);
+
+ mtd_device_unregister(&spifi->nor.mtd);
+ clk_disable_unprepare(spifi->clk_spifi);
+ clk_disable_unprepare(spifi->clk_reg);
+
+ return 0;
+}
+
+static const struct of_device_id nxp_spifi_match[] = {
+ {.compatible = "nxp,lpc1773-spifi"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nxp_spifi_match);
+
+static struct platform_driver nxp_spifi_driver = {
+ .probe = nxp_spifi_probe,
+ .remove = nxp_spifi_remove,
+ .driver = {
+ .name = "nxp-spifi",
+ .of_match_table = nxp_spifi_match,
+ },
+};
+module_platform_driver(nxp_spifi_driver);
+
+MODULE_DESCRIPTION("NXP SPI Flash Interface driver");
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mtd/spi-nor/spi-nor.c b/kernel/drivers/mtd/spi-nor/spi-nor.c
index 14a5d2325..32477c4eb 100644
--- a/kernel/drivers/mtd/spi-nor/spi-nor.c
+++ b/kernel/drivers/mtd/spi-nor/spi-nor.c
@@ -16,19 +16,32 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/math64.h>
+#include <linux/sizes.h>
-#include <linux/mtd/cfi.h>
#include <linux/mtd/mtd.h>
#include <linux/of_platform.h>
#include <linux/spi/flash.h>
#include <linux/mtd/spi-nor.h>
/* Define max times to check status register before we give up. */
-#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
+
+/*
+ * For everything but full-chip erase; probably could be much smaller, but kept
+ * around for safety for now
+ */
+#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
+
+/*
+ * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
+ * for larger flash
+ */
+#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
#define SPI_NOR_MAX_ID_LEN 6
struct flash_info {
+ char *name;
+
/*
* This array stores the ID bytes.
* The first three bytes are the JEDIC ID.
@@ -59,7 +72,7 @@ struct flash_info {
#define JEDEC_MFR(info) ((info)->id[0])
-static const struct spi_device_id *spi_nor_match_id(const char *name);
+static const struct flash_info *spi_nor_match_id(const char *name);
/*
* Read the status register, returning its value in the location
@@ -143,7 +156,7 @@ static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
static inline int write_sr(struct spi_nor *nor, u8 val)
{
nor->cmd_buf[0] = val;
- return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0);
+ return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
}
/*
@@ -152,7 +165,7 @@ static inline int write_sr(struct spi_nor *nor, u8 val)
*/
static inline int write_enable(struct spi_nor *nor)
{
- return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
+ return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
}
/*
@@ -160,7 +173,7 @@ static inline int write_enable(struct spi_nor *nor)
*/
static inline int write_disable(struct spi_nor *nor)
{
- return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0, 0);
+ return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
}
static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
@@ -169,7 +182,7 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
}
/* Enable/disable 4-byte addressing mode. */
-static inline int set_4byte(struct spi_nor *nor, struct flash_info *info,
+static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
int enable)
{
int status;
@@ -177,16 +190,16 @@ static inline int set_4byte(struct spi_nor *nor, struct flash_info *info,
u8 cmd;
switch (JEDEC_MFR(info)) {
- case CFI_MFR_ST: /* Micron, actually */
+ case SNOR_MFR_MICRON:
/* Some Micron need WREN command; all will accept it */
need_wren = true;
- case CFI_MFR_MACRONIX:
- case 0xEF /* winbond */:
+ case SNOR_MFR_MACRONIX:
+ case SNOR_MFR_WINBOND:
if (need_wren)
write_enable(nor);
cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
- status = nor->write_reg(nor, cmd, NULL, 0, 0);
+ status = nor->write_reg(nor, cmd, NULL, 0);
if (need_wren)
write_disable(nor);
@@ -194,7 +207,7 @@ static inline int set_4byte(struct spi_nor *nor, struct flash_info *info,
default:
/* Spansion style */
nor->cmd_buf[0] = enable << 7;
- return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1, 0);
+ return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
}
}
static inline int spi_nor_sr_ready(struct spi_nor *nor)
@@ -231,12 +244,13 @@ static int spi_nor_ready(struct spi_nor *nor)
* Service routine to read status register until ready, or timeout occurs.
* Returns non-zero if error.
*/
-static int spi_nor_wait_till_ready(struct spi_nor *nor)
+static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
+ unsigned long timeout_jiffies)
{
unsigned long deadline;
int timeout = 0, ret;
- deadline = jiffies + MAX_READY_WAIT_JIFFIES;
+ deadline = jiffies + timeout_jiffies;
while (!timeout) {
if (time_after_eq(jiffies, deadline))
@@ -256,6 +270,12 @@ static int spi_nor_wait_till_ready(struct spi_nor *nor)
return -ETIMEDOUT;
}
+static int spi_nor_wait_till_ready(struct spi_nor *nor)
+{
+ return spi_nor_wait_till_ready_with_timeout(nor,
+ DEFAULT_READY_WAIT_JIFFIES);
+}
+
/*
* Erase the whole flash memory
*
@@ -263,9 +283,9 @@ static int spi_nor_wait_till_ready(struct spi_nor *nor)
*/
static int erase_chip(struct spi_nor *nor)
{
- dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd->size >> 10));
+ dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
- return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0, 0);
+ return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
}
static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
@@ -319,6 +339,8 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
/* whole-chip erase? */
if (len == mtd->size) {
+ unsigned long timeout;
+
write_enable(nor);
if (erase_chip(nor)) {
@@ -326,7 +348,16 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
goto erase_err;
}
- ret = spi_nor_wait_till_ready(nor);
+ /*
+ * Scale the timeout linearly with the size of the flash, with
+ * a minimum calibrated to an old 2MB flash. We could try to
+ * pull these from CFI/SFDP, but these values should be good
+ * enough for now.
+ */
+ timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
+ CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
+ (unsigned long)(mtd->size / SZ_2M));
+ ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
if (ret)
goto erase_err;
@@ -369,72 +400,171 @@ erase_err:
return ret;
}
+static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
+ uint64_t *len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ int shift = ffs(mask) - 1;
+ int pow;
+
+ if (!(sr & mask)) {
+ /* No protection */
+ *ofs = 0;
+ *len = 0;
+ } else {
+ pow = ((sr & mask) ^ mask) >> shift;
+ *len = mtd->size >> pow;
+ *ofs = mtd->size - *len;
+ }
+}
+
+/*
+ * Return 1 if the entire region is locked, 0 otherwise
+ */
+static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ loff_t lock_offs;
+ uint64_t lock_len;
+
+ stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
+
+ return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
+}
+
+/*
+ * Lock a region of the flash. Compatible with ST Micro and similar flash.
+ * Supports only the block protection bits BP{0,1,2} in the status register
+ * (SR). Does not support these features found in newer SR bitfields:
+ * - TB: top/bottom protect - only handle TB=0 (top protect)
+ * - SEC: sector/block protect - only handle SEC=0 (block protect)
+ * - CMP: complement protect - only support CMP=0 (range is not complemented)
+ *
+ * Sample table portion for 8MB flash (Winbond w25q64fw):
+ *
+ * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
+ * --------------------------------------------------------------------------
+ * X | X | 0 | 0 | 0 | NONE | NONE
+ * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
+ * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
+ * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
+ * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
+ * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
+ * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
+ * X | X | 1 | 1 | 1 | 8 MB | ALL
+ *
+ * Returns negative on errors, 0 on success.
+ */
static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
- struct mtd_info *mtd = nor->mtd;
- uint32_t offset = ofs;
- uint8_t status_old, status_new;
- int ret = 0;
+ struct mtd_info *mtd = &nor->mtd;
+ u8 status_old, status_new;
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ u8 shift = ffs(mask) - 1, pow, val;
status_old = read_sr(nor);
- if (offset < mtd->size - (mtd->size / 2))
- status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0;
- else if (offset < mtd->size - (mtd->size / 4))
- status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
- else if (offset < mtd->size - (mtd->size / 8))
- status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
- else if (offset < mtd->size - (mtd->size / 16))
- status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
- else if (offset < mtd->size - (mtd->size / 32))
- status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
- else if (offset < mtd->size - (mtd->size / 64))
- status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
- else
- status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
+ /* SPI NOR always locks to the end */
+ if (ofs + len != mtd->size) {
+ /* Does combined region extend to end? */
+ if (!stm_is_locked_sr(nor, ofs + len, mtd->size - ofs - len,
+ status_old))
+ return -EINVAL;
+ len = mtd->size - ofs;
+ }
+
+ /*
+ * Need smallest pow such that:
+ *
+ * 1 / (2^pow) <= (len / size)
+ *
+ * so (assuming power-of-2 size) we do:
+ *
+ * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
+ */
+ pow = ilog2(mtd->size) - ilog2(len);
+ val = mask - (pow << shift);
+ if (val & ~mask)
+ return -EINVAL;
+ /* Don't "lock" with no region! */
+ if (!(val & mask))
+ return -EINVAL;
+
+ status_new = (status_old & ~mask) | val;
/* Only modify protection if it will not unlock other areas */
- if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) >
- (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
- write_enable(nor);
- ret = write_sr(nor, status_new);
- }
+ if ((status_new & mask) <= (status_old & mask))
+ return -EINVAL;
- return ret;
+ write_enable(nor);
+ return write_sr(nor, status_new);
}
+/*
+ * Unlock a region of the flash. See stm_lock() for more info
+ *
+ * Returns negative on errors, 0 on success.
+ */
static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
- struct mtd_info *mtd = nor->mtd;
- uint32_t offset = ofs;
+ struct mtd_info *mtd = &nor->mtd;
uint8_t status_old, status_new;
- int ret = 0;
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ u8 shift = ffs(mask) - 1, pow, val;
status_old = read_sr(nor);
- if (offset+len > mtd->size - (mtd->size / 64))
- status_new = status_old & ~(SR_BP2 | SR_BP1 | SR_BP0);
- else if (offset+len > mtd->size - (mtd->size / 32))
- status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
- else if (offset+len > mtd->size - (mtd->size / 16))
- status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
- else if (offset+len > mtd->size - (mtd->size / 8))
- status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
- else if (offset+len > mtd->size - (mtd->size / 4))
- status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
- else if (offset+len > mtd->size - (mtd->size / 2))
- status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
- else
- status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+ /* Cannot unlock; would unlock larger region than requested */
+ if (stm_is_locked_sr(nor, ofs - mtd->erasesize, mtd->erasesize,
+ status_old))
+ return -EINVAL;
- /* Only modify protection if it will not lock other areas */
- if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) <
- (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
- write_enable(nor);
- ret = write_sr(nor, status_new);
+ /*
+ * Need largest pow such that:
+ *
+ * 1 / (2^pow) >= (len / size)
+ *
+ * so (assuming power-of-2 size) we do:
+ *
+ * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
+ */
+ pow = ilog2(mtd->size) - order_base_2(mtd->size - (ofs + len));
+ if (ofs + len == mtd->size) {
+ val = 0; /* fully unlocked */
+ } else {
+ val = mask - (pow << shift);
+ /* Some power-of-two sizes are not supported */
+ if (val & ~mask)
+ return -EINVAL;
}
- return ret;
+ status_new = (status_old & ~mask) | val;
+
+ /* Only modify protection if it will not lock other areas */
+ if ((status_new & mask) >= (status_old & mask))
+ return -EINVAL;
+
+ write_enable(nor);
+ return write_sr(nor, status_new);
+}
+
+/*
+ * Check if a region of the flash is (completely) locked. See stm_lock() for
+ * more info.
+ *
+ * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
+ * negative on errors.
+ */
+static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ int status;
+
+ status = read_sr(nor);
+ if (status < 0)
+ return status;
+
+ return stm_is_locked_sr(nor, ofs, len, status);
}
static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -467,9 +597,23 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
+static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+ if (ret)
+ return ret;
+
+ ret = nor->flash_is_locked(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+ return ret;
+}
+
/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
- ((kernel_ulong_t)&(struct flash_info) { \
.id = { \
((_jedec_id) >> 16) & 0xff, \
((_jedec_id) >> 8) & 0xff, \
@@ -481,11 +625,9 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
- .flags = (_flags), \
- })
+ .flags = (_flags),
#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
- ((kernel_ulong_t)&(struct flash_info) { \
.id = { \
((_jedec_id) >> 16) & 0xff, \
((_jedec_id) >> 8) & 0xff, \
@@ -498,23 +640,27 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
- .flags = (_flags), \
- })
+ .flags = (_flags),
#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
- ((kernel_ulong_t)&(struct flash_info) { \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
.addr_width = (_addr_width), \
- .flags = (_flags), \
- })
+ .flags = (_flags),
/* NOTE: double check command sets and memory organization when you add
* more nor chips. This current list focusses on newer chips, which
* have been converging on command sets which including JEDEC ID.
+ *
+ * All newly added entries should describe *hardware* and should use SECT_4K
+ * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
+ * scenarios excluding small sectors there is config option that can be
+ * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
+ * For historical (and compatibility) reasons (before we got above config) some
+ * old entries may be missing 4K flag.
*/
-static const struct spi_device_id spi_nor_ids[] = {
+static const struct flash_info spi_nor_ids[] = {
/* Atmel -- some are (confusingly) marketed as "DataFlash" */
{ "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
{ "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
@@ -538,7 +684,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
{ "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
- { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, 0) },
+ { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
/* ESMT */
{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
@@ -560,7 +706,11 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
+ /* ISSI */
+ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+
/* Macronix */
+ { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
{ "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
{ "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
{ "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
@@ -578,7 +728,9 @@ static const struct spi_device_id spi_nor_ids[] = {
/* Micron */
{ "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
- { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SPI_NOR_QUAD_READ) },
+ { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
@@ -595,25 +747,28 @@ static const struct spi_device_id spi_nor_ids[] = {
* for the chips listed here (without boot sectors).
*/
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
+ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
{ "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
+ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
{ "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
{ "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
{ "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
{ "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
- { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
- { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
+ { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
- { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, 0) },
+ { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
+ { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
+ { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -624,6 +779,8 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
{ "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
{ "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
+ { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
+ { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
{ "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
{ "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
@@ -672,10 +829,11 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
- { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
- { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
@@ -690,11 +848,11 @@ static const struct spi_device_id spi_nor_ids[] = {
{ },
};
-static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
+static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
{
int tmp;
u8 id[SPI_NOR_MAX_ID_LEN];
- struct flash_info *info;
+ const struct flash_info *info;
tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
if (tmp < 0) {
@@ -703,7 +861,7 @@ static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
}
for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
- info = (void *)spi_nor_ids[tmp].driver_data;
+ info = &spi_nor_ids[tmp];
if (info->id_len) {
if (!memcmp(info->id, id, info->id_len))
return &spi_nor_ids[tmp];
@@ -857,8 +1015,7 @@ static int macronix_quad_enable(struct spi_nor *nor)
val = read_sr(nor);
write_enable(nor);
- nor->cmd_buf[0] = val | SR_QUAD_EN_MX;
- nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0);
+ write_sr(nor, val | SR_QUAD_EN_MX);
if (spi_nor_wait_till_ready(nor))
return 1;
@@ -883,7 +1040,7 @@ static int write_sr_cr(struct spi_nor *nor, u16 val)
nor->cmd_buf[0] = val & 0xff;
nor->cmd_buf[1] = (val >> 8);
- return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2, 0);
+ return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2);
}
static int spansion_quad_enable(struct spi_nor *nor)
@@ -925,7 +1082,7 @@ static int micron_quad_enable(struct spi_nor *nor)
/* set EVCR, enable quad I/O */
nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
- ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1, 0);
+ ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
if (ret < 0) {
dev_err(nor->dev, "error while writing EVCR register\n");
return ret;
@@ -949,19 +1106,19 @@ static int micron_quad_enable(struct spi_nor *nor)
return 0;
}
-static int set_quad_mode(struct spi_nor *nor, struct flash_info *info)
+static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
{
int status;
switch (JEDEC_MFR(info)) {
- case CFI_MFR_MACRONIX:
+ case SNOR_MFR_MACRONIX:
status = macronix_quad_enable(nor);
if (status) {
dev_err(nor->dev, "Macronix quad-read not enabled\n");
return -EINVAL;
}
return status;
- case CFI_MFR_ST:
+ case SNOR_MFR_MICRON:
status = micron_quad_enable(nor);
if (status) {
dev_err(nor->dev, "Micron quad-read not enabled\n");
@@ -991,11 +1148,10 @@ static int spi_nor_check(struct spi_nor *nor)
int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
{
- const struct spi_device_id *id = NULL;
- struct flash_info *info;
+ const struct flash_info *info = NULL;
struct device *dev = nor->dev;
- struct mtd_info *mtd = nor->mtd;
- struct device_node *np = dev->of_node;
+ struct mtd_info *mtd = &nor->mtd;
+ struct device_node *np = nor->flash_node;
int ret;
int i;
@@ -1003,27 +1159,25 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
if (ret)
return ret;
- /* Try to auto-detect if chip name wasn't specified */
- if (!name)
- id = spi_nor_read_id(nor);
- else
- id = spi_nor_match_id(name);
- if (IS_ERR_OR_NULL(id))
+ if (name)
+ info = spi_nor_match_id(name);
+ /* Try to auto-detect if chip name wasn't specified or not found */
+ if (!info)
+ info = spi_nor_read_id(nor);
+ if (IS_ERR_OR_NULL(info))
return -ENOENT;
- info = (void *)id->driver_data;
-
/*
* If caller has specified name of flash model that can normally be
* detected using JEDEC, let's verify it.
*/
if (name && info->id_len) {
- const struct spi_device_id *jid;
+ const struct flash_info *jinfo;
- jid = spi_nor_read_id(nor);
- if (IS_ERR(jid)) {
- return PTR_ERR(jid);
- } else if (jid != id) {
+ jinfo = spi_nor_read_id(nor);
+ if (IS_ERR(jinfo)) {
+ return PTR_ERR(jinfo);
+ } else if (jinfo != info) {
/*
* JEDEC knows better, so overwrite platform ID. We
* can't trust partitions any longer, but we'll let
@@ -1032,28 +1186,28 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
* information, even if it's not 100% accurate.
*/
dev_warn(dev, "found %s, expected %s\n",
- jid->name, id->name);
- id = jid;
- info = (void *)jid->driver_data;
+ jinfo->name, info->name);
+ info = jinfo;
}
}
mutex_init(&nor->lock);
/*
- * Atmel, SST and Intel/Numonyx serial nor tend to power
- * up with the software protection bits set
+ * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
+ * with the software protection bits set
*/
- if (JEDEC_MFR(info) == CFI_MFR_ATMEL ||
- JEDEC_MFR(info) == CFI_MFR_INTEL ||
- JEDEC_MFR(info) == CFI_MFR_SST) {
+ if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
+ JEDEC_MFR(info) == SNOR_MFR_INTEL ||
+ JEDEC_MFR(info) == SNOR_MFR_SST) {
write_enable(nor);
write_sr(nor, 0);
}
if (!mtd->name)
mtd->name = dev_name(dev);
+ mtd->priv = nor;
mtd->type = MTD_NORFLASH;
mtd->writesize = 1;
mtd->flags = MTD_CAP_NORFLASH;
@@ -1061,15 +1215,17 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
- /* nor protection support for STmicro chips */
- if (JEDEC_MFR(info) == CFI_MFR_ST) {
+ /* NOR protection support for STmicro/Micron chips and similar */
+ if (JEDEC_MFR(info) == SNOR_MFR_MICRON) {
nor->flash_lock = stm_lock;
nor->flash_unlock = stm_unlock;
+ nor->flash_is_locked = stm_is_locked;
}
- if (nor->flash_lock && nor->flash_unlock) {
+ if (nor->flash_lock && nor->flash_unlock && nor->flash_is_locked) {
mtd->_lock = spi_nor_lock;
mtd->_unlock = spi_nor_unlock;
+ mtd->_is_locked = spi_nor_is_locked;
}
/* sst nor chips use AAI word program */
@@ -1156,7 +1312,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
else if (mtd->size > 0x1000000) {
/* enable 4-byte addressing if the device exceeds 16MiB */
nor->addr_width = 4;
- if (JEDEC_MFR(info) == CFI_MFR_AMD) {
+ if (JEDEC_MFR(info) == SNOR_MFR_SPANSION) {
/* Dedicated 4-byte command set */
switch (nor->flash_read) {
case SPI_NOR_QUAD:
@@ -1184,7 +1340,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
nor->read_dummy = spi_nor_read_dummy_cycles(nor);
- dev_info(dev, "%s (%lld Kbytes)\n", id->name,
+ dev_info(dev, "%s (%lld Kbytes)\n", info->name,
(long long)mtd->size >> 10);
dev_dbg(dev,
@@ -1207,11 +1363,11 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
}
EXPORT_SYMBOL_GPL(spi_nor_scan);
-static const struct spi_device_id *spi_nor_match_id(const char *name)
+static const struct flash_info *spi_nor_match_id(const char *name)
{
- const struct spi_device_id *id = spi_nor_ids;
+ const struct flash_info *id = spi_nor_ids;
- while (id->name[0]) {
+ while (id->name) {
if (!strcmp(name, id->name))
return id;
id++;
diff --git a/kernel/drivers/mtd/tests/oobtest.c b/kernel/drivers/mtd/tests/oobtest.c
index 8e8525f02..31762120e 100644
--- a/kernel/drivers/mtd/tests/oobtest.c
+++ b/kernel/drivers/mtd/tests/oobtest.c
@@ -125,7 +125,8 @@ static int write_whole_device(void)
* Display the address, offset and data bytes at comparison failure.
* Return number of bitflips encountered.
*/
-static size_t memcmpshow(loff_t addr, const void *cs, const void *ct, size_t count)
+static size_t memcmpshowoffset(loff_t addr, loff_t offset, const void *cs,
+ const void *ct, size_t count)
{
const unsigned char *su1, *su2;
int res;
@@ -135,8 +136,9 @@ static size_t memcmpshow(loff_t addr, const void *cs, const void *ct, size_t cou
for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--, i++) {
res = *su1 ^ *su2;
if (res) {
- pr_info("error @addr[0x%lx:0x%zx] 0x%x -> 0x%x diff 0x%x\n",
- (unsigned long)addr, i, *su1, *su2, res);
+ pr_info("error @addr[0x%lx:0x%lx] 0x%x -> 0x%x diff 0x%x\n",
+ (unsigned long)addr, (unsigned long)offset + i,
+ *su1, *su2, res);
bitflips += hweight8(res);
}
}
@@ -144,6 +146,9 @@ static size_t memcmpshow(loff_t addr, const void *cs, const void *ct, size_t cou
return bitflips;
}
+#define memcmpshow(addr, cs, ct, count) memcmpshowoffset((addr), 0, (cs), (ct),\
+ (count))
+
/*
* Compare with 0xff and show the address, offset and data bytes at
* comparison failure. Return number of bitflips encountered.
@@ -228,9 +233,10 @@ static int verify_eraseblock(int ebnum)
errcnt += 1;
return err ? err : -1;
}
- bitflips = memcmpshow(addr, readbuf + use_offset,
- writebuf + (use_len_max * i) + use_offset,
- use_len);
+ bitflips = memcmpshowoffset(addr, use_offset,
+ readbuf + use_offset,
+ writebuf + (use_len_max * i) + use_offset,
+ use_len);
/* verify pre-offset area for 0xff */
bitflips += memffshow(addr, 0, readbuf, use_offset);
diff --git a/kernel/drivers/mtd/tests/speedtest.c b/kernel/drivers/mtd/tests/speedtest.c
index 5a6f31af0..0b89418a0 100644
--- a/kernel/drivers/mtd/tests/speedtest.c
+++ b/kernel/drivers/mtd/tests/speedtest.c
@@ -22,6 +22,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
+#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/err.h>
@@ -49,7 +50,7 @@ static int pgsize;
static int ebcnt;
static int pgcnt;
static int goodebcnt;
-static struct timeval start, finish;
+static ktime_t start, finish;
static int multiblock_erase(int ebnum, int blocks)
{
@@ -168,12 +169,12 @@ static int read_eraseblock_by_2pages(int ebnum)
static inline void start_timing(void)
{
- do_gettimeofday(&start);
+ start = ktime_get();
}
static inline void stop_timing(void)
{
- do_gettimeofday(&finish);
+ finish = ktime_get();
}
static long calc_speed(void)
@@ -181,8 +182,7 @@ static long calc_speed(void)
uint64_t k;
long ms;
- ms = (finish.tv_sec - start.tv_sec) * 1000 +
- (finish.tv_usec - start.tv_usec) / 1000;
+ ms = ktime_ms_delta(finish, start);
if (ms == 0)
return 0;
k = (uint64_t)goodebcnt * (mtd->erasesize / 1024) * 1000;
diff --git a/kernel/drivers/mtd/tests/torturetest.c b/kernel/drivers/mtd/tests/torturetest.c
index e5d6e6d95..93c2729c4 100644
--- a/kernel/drivers/mtd/tests/torturetest.c
+++ b/kernel/drivers/mtd/tests/torturetest.c
@@ -26,6 +26,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
+#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/err.h>
@@ -79,18 +80,18 @@ static unsigned char *check_buf;
static unsigned int erase_cycles;
static int pgsize;
-static struct timeval start, finish;
+static ktime_t start, finish;
static void report_corrupt(unsigned char *read, unsigned char *written);
static inline void start_timing(void)
{
- do_gettimeofday(&start);
+ start = ktime_get();
}
static inline void stop_timing(void)
{
- do_gettimeofday(&finish);
+ finish = ktime_get();
}
/*
@@ -333,8 +334,7 @@ static int __init tort_init(void)
long ms;
stop_timing();
- ms = (finish.tv_sec - start.tv_sec) * 1000 +
- (finish.tv_usec - start.tv_usec) / 1000;
+ ms = ktime_ms_delta(finish, start);
pr_info("%08u erase cycles done, took %lu "
"milliseconds (%lu seconds)\n",
erase_cycles, ms, ms / 1000);
diff --git a/kernel/drivers/mtd/ubi/attach.c b/kernel/drivers/mtd/ubi/attach.c
index 68eea5bef..c1aaf0336 100644
--- a/kernel/drivers/mtd/ubi/attach.c
+++ b/kernel/drivers/mtd/ubi/attach.c
@@ -1209,9 +1209,7 @@ static void destroy_ai(struct ubi_attach_info *ai)
}
}
- if (ai->aeb_slab_cache)
- kmem_cache_destroy(ai->aeb_slab_cache);
-
+ kmem_cache_destroy(ai->aeb_slab_cache);
kfree(ai);
}
diff --git a/kernel/drivers/mtd/ubi/block.c b/kernel/drivers/mtd/ubi/block.c
index c9eb78f10..ebf46ad2d 100644
--- a/kernel/drivers/mtd/ubi/block.c
+++ b/kernel/drivers/mtd/ubi/block.c
@@ -48,6 +48,7 @@
#include <linux/blk-mq.h>
#include <linux/hdreg.h>
#include <linux/scatterlist.h>
+#include <linux/idr.h>
#include <asm/div64.h>
#include "ubi-media.h"
@@ -161,7 +162,7 @@ static int __init ubiblock_set_param(const char *val,
return 0;
}
-static struct kernel_param_ops ubiblock_param_ops = {
+static const struct kernel_param_ops ubiblock_param_ops = {
.set = ubiblock_set_param,
};
module_param_cb(block, &ubiblock_param_ops, NULL, 0);
@@ -353,6 +354,8 @@ static struct blk_mq_ops ubiblock_mq_ops = {
.map_queue = blk_mq_map_queue,
};
+static DEFINE_IDR(ubiblock_minor_idr);
+
int ubiblock_create(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
@@ -390,7 +393,13 @@ int ubiblock_create(struct ubi_volume_info *vi)
gd->fops = &ubiblock_ops;
gd->major = ubiblock_major;
- gd->first_minor = dev->ubi_num * UBI_MAX_VOLUMES + dev->vol_id;
+ gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
+ if (gd->first_minor < 0) {
+ dev_err(disk_to_dev(gd),
+ "block: dynamic minor allocation failed");
+ ret = -ENODEV;
+ goto out_put_disk;
+ }
gd->private_data = dev;
sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
set_capacity(gd, disk_capacity);
@@ -407,7 +416,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
ret = blk_mq_alloc_tag_set(&dev->tag_set);
if (ret) {
dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
- goto out_put_disk;
+ goto out_remove_minor;
}
dev->rq = blk_mq_init_queue(&dev->tag_set);
@@ -445,6 +454,8 @@ out_free_queue:
blk_cleanup_queue(dev->rq);
out_free_tags:
blk_mq_free_tag_set(&dev->tag_set);
+out_remove_minor:
+ idr_remove(&ubiblock_minor_idr, gd->first_minor);
out_put_disk:
put_disk(dev->gd);
out_free_dev:
@@ -463,6 +474,7 @@ static void ubiblock_cleanup(struct ubiblock *dev)
blk_cleanup_queue(dev->rq);
blk_mq_free_tag_set(&dev->tag_set);
dev_info(disk_to_dev(dev->gd), "released");
+ idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
put_disk(dev->gd);
}
diff --git a/kernel/drivers/mtd/ubi/build.c b/kernel/drivers/mtd/ubi/build.c
index b7f824d5e..22fd19c0c 100644
--- a/kernel/drivers/mtd/ubi/build.c
+++ b/kernel/drivers/mtd/ubi/build.c
@@ -83,8 +83,6 @@ static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
static bool fm_autoconvert;
static bool fm_debug;
#endif
-/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
-struct class *ubi_class;
/* Slab cache for wear-leveling entries */
struct kmem_cache *ubi_wl_entry_slab;
@@ -113,8 +111,17 @@ static ssize_t ubi_version_show(struct class *class,
}
/* UBI version attribute ('/<sysfs>/class/ubi/version') */
-static struct class_attribute ubi_version =
- __ATTR(version, S_IRUGO, ubi_version_show, NULL);
+static struct class_attribute ubi_class_attrs[] = {
+ __ATTR(version, S_IRUGO, ubi_version_show, NULL),
+ __ATTR_NULL
+};
+
+/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
+struct class ubi_class = {
+ .name = UBI_NAME_STR,
+ .owner = THIS_MODULE,
+ .class_attrs = ubi_class_attrs,
+};
static ssize_t dev_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf);
@@ -385,6 +392,22 @@ static ssize_t dev_attribute_show(struct device *dev,
return ret;
}
+static struct attribute *ubi_dev_attrs[] = {
+ &dev_eraseblock_size.attr,
+ &dev_avail_eraseblocks.attr,
+ &dev_total_eraseblocks.attr,
+ &dev_volumes_count.attr,
+ &dev_max_ec.attr,
+ &dev_reserved_for_bad.attr,
+ &dev_bad_peb_count.attr,
+ &dev_max_vol_count.attr,
+ &dev_min_io_size.attr,
+ &dev_bgt_enabled.attr,
+ &dev_mtd_num.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(ubi_dev);
+
static void dev_release(struct device *dev)
{
struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
@@ -407,45 +430,15 @@ static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
ubi->dev.release = dev_release;
ubi->dev.devt = ubi->cdev.dev;
- ubi->dev.class = ubi_class;
+ ubi->dev.class = &ubi_class;
+ ubi->dev.groups = ubi_dev_groups;
dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
err = device_register(&ubi->dev);
if (err)
return err;
*ref = 1;
- err = device_create_file(&ubi->dev, &dev_eraseblock_size);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_volumes_count);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_max_ec);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_bad_peb_count);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_max_vol_count);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_min_io_size);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_bgt_enabled);
- if (err)
- return err;
- err = device_create_file(&ubi->dev, &dev_mtd_num);
- return err;
+ return 0;
}
/**
@@ -454,17 +447,6 @@ static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
*/
static void ubi_sysfs_close(struct ubi_device *ubi)
{
- device_remove_file(&ubi->dev, &dev_mtd_num);
- device_remove_file(&ubi->dev, &dev_bgt_enabled);
- device_remove_file(&ubi->dev, &dev_min_io_size);
- device_remove_file(&ubi->dev, &dev_max_vol_count);
- device_remove_file(&ubi->dev, &dev_bad_peb_count);
- device_remove_file(&ubi->dev, &dev_reserved_for_bad);
- device_remove_file(&ubi->dev, &dev_max_ec);
- device_remove_file(&ubi->dev, &dev_volumes_count);
- device_remove_file(&ubi->dev, &dev_total_eraseblocks);
- device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
- device_remove_file(&ubi->dev, &dev_eraseblock_size);
device_unregister(&ubi->dev);
}
@@ -947,8 +929,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
*/
ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
- if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
- ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
+ ubi->fm_pool.max_size = max(ubi->fm_pool.max_size,
+ UBI_FM_MIN_POOL_SIZE);
ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
ubi->fm_disabled = !fm_autoconvert;
@@ -1233,23 +1215,14 @@ static int __init ubi_init(void)
}
/* Create base sysfs directory and sysfs files */
- ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
- if (IS_ERR(ubi_class)) {
- err = PTR_ERR(ubi_class);
- pr_err("UBI error: cannot create UBI class");
- goto out;
- }
-
- err = class_create_file(ubi_class, &ubi_version);
- if (err) {
- pr_err("UBI error: cannot create sysfs file");
- goto out_class;
- }
+ err = class_register(&ubi_class);
+ if (err < 0)
+ return err;
err = misc_register(&ubi_ctrl_cdev);
if (err) {
pr_err("UBI error: cannot register device");
- goto out_version;
+ goto out;
}
ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
@@ -1333,11 +1306,8 @@ out_slab:
kmem_cache_destroy(ubi_wl_entry_slab);
out_dev_unreg:
misc_deregister(&ubi_ctrl_cdev);
-out_version:
- class_remove_file(ubi_class, &ubi_version);
-out_class:
- class_destroy(ubi_class);
out:
+ class_unregister(&ubi_class);
pr_err("UBI error: cannot initialize UBI, error %d", err);
return err;
}
@@ -1358,8 +1328,7 @@ static void __exit ubi_exit(void)
ubi_debugfs_exit();
kmem_cache_destroy(ubi_wl_entry_slab);
misc_deregister(&ubi_ctrl_cdev);
- class_remove_file(ubi_class, &ubi_version);
- class_destroy(ubi_class);
+ class_unregister(&ubi_class);
}
module_exit(ubi_exit);
diff --git a/kernel/drivers/mtd/ubi/cdev.c b/kernel/drivers/mtd/ubi/cdev.c
index d16fccf79..54e056d3b 100644
--- a/kernel/drivers/mtd/ubi/cdev.c
+++ b/kernel/drivers/mtd/ubi/cdev.c
@@ -949,7 +949,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
if (!req) {
err = -ENOMEM;
break;
- };
+ }
err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req));
if (err) {
diff --git a/kernel/drivers/mtd/ubi/debug.c b/kernel/drivers/mtd/ubi/debug.c
index b077e43b5..c4cb15a30 100644
--- a/kernel/drivers/mtd/ubi/debug.c
+++ b/kernel/drivers/mtd/ubi/debug.c
@@ -236,7 +236,7 @@ int ubi_debugfs_init(void)
dfs_rootdir = debugfs_create_dir("ubi", NULL);
if (IS_ERR_OR_NULL(dfs_rootdir)) {
- int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
+ int err = dfs_rootdir ? PTR_ERR(dfs_rootdir) : -ENODEV;
pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n",
err);
diff --git a/kernel/drivers/mtd/ubi/eba.c b/kernel/drivers/mtd/ubi/eba.c
index 51bca035c..5b9834cf2 100644
--- a/kernel/drivers/mtd/ubi/eba.c
+++ b/kernel/drivers/mtd/ubi/eba.c
@@ -1358,7 +1358,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
continue;
ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
- vol->vol_id, i, fm_eba[i][j],
+ vol->vol_id, j, fm_eba[i][j],
scan_eba[i][j]);
ubi_assert(0);
}
diff --git a/kernel/drivers/mtd/ubi/fastmap-wl.c b/kernel/drivers/mtd/ubi/fastmap-wl.c
index b2a665398..30d3999dd 100644
--- a/kernel/drivers/mtd/ubi/fastmap-wl.c
+++ b/kernel/drivers/mtd/ubi/fastmap-wl.c
@@ -172,6 +172,30 @@ void ubi_refill_pools(struct ubi_device *ubi)
}
/**
+ * produce_free_peb - produce a free physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function tries to make a free PEB by means of synchronous execution of
+ * pending works. This may be needed if, for example the background thread is
+ * disabled. Returns zero in case of success and a negative error code in case
+ * of failure.
+ */
+static int produce_free_peb(struct ubi_device *ubi)
+{
+ int err;
+
+ while (!ubi->free.rb_node && ubi->works_count) {
+ dbg_wl("do one work synchronously");
+ err = do_work(ubi);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ubi_wl_get_peb - get a physical eraseblock.
* @ubi: UBI device description object
*
@@ -213,6 +237,11 @@ again:
}
retried = 1;
up_read(&ubi->fm_eba_sem);
+ ret = produce_free_peb(ubi);
+ if (ret < 0) {
+ down_read(&ubi->fm_eba_sem);
+ goto out;
+ }
goto again;
}
diff --git a/kernel/drivers/mtd/ubi/fastmap.c b/kernel/drivers/mtd/ubi/fastmap.c
index 02a6de2f5..263b439e2 100644
--- a/kernel/drivers/mtd/ubi/fastmap.c
+++ b/kernel/drivers/mtd/ubi/fastmap.c
@@ -88,13 +88,13 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
{
size_t size;
- size = sizeof(struct ubi_fm_sb) + \
- sizeof(struct ubi_fm_hdr) + \
- sizeof(struct ubi_fm_scan_pool) + \
- sizeof(struct ubi_fm_scan_pool) + \
- (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
- (sizeof(struct ubi_fm_eba) + \
- (ubi->peb_count * sizeof(__be32))) + \
+ size = sizeof(struct ubi_fm_sb) +
+ sizeof(struct ubi_fm_hdr) +
+ sizeof(struct ubi_fm_scan_pool) +
+ sizeof(struct ubi_fm_scan_pool) +
+ (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
+ (sizeof(struct ubi_fm_eba) +
+ (ubi->peb_count * sizeof(__be32))) +
sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
return roundup(size, ubi->leb_size);
}
@@ -192,8 +192,10 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
if (vol_id > av->vol_id)
p = &(*p)->rb_left;
- else
+ else if (vol_id < av->vol_id)
p = &(*p)->rb_right;
+ else
+ return ERR_PTR(-EINVAL);
}
av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
@@ -314,7 +316,7 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
list_add_tail(&victim->u.list, &ai->erase);
if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
- av->last_data_size = \
+ av->last_data_size =
be32_to_cpu(new_vh->data_size);
dbg_bld("vol %i: AEB %i's PEB %i is the newer",
@@ -448,7 +450,7 @@ static void unmap_peb(struct ubi_attach_info *ai, int pnum)
* < 0 indicates an internal error.
*/
static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
- int *pebs, int pool_size, unsigned long long *max_sqnum,
+ __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
struct list_head *free)
{
struct ubi_vid_hdr *vh;
@@ -601,7 +603,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
struct ubi_fm_sb *fmsb;
struct ubi_fm_hdr *fmhdr;
- struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+ struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
struct ubi_fm_ec *fmec;
struct ubi_fm_volhdr *fmvhdr;
struct ubi_fm_eba *fm_eba;
@@ -631,30 +633,30 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
goto fail_bad;
}
- fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
- fm_pos += sizeof(*fmpl1);
+ fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl);
if (fm_pos >= fm_size)
goto fail_bad;
- if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
+ if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
- be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
+ be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
goto fail_bad;
}
- fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
- fm_pos += sizeof(*fmpl2);
+ fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl_wl);
if (fm_pos >= fm_size)
goto fail_bad;
- if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
- ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
- be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
+ if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
goto fail_bad;
}
- pool_size = be16_to_cpu(fmpl1->size);
- wl_pool_size = be16_to_cpu(fmpl2->size);
- fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
- fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
+ pool_size = be16_to_cpu(fmpl->size);
+ wl_pool_size = be16_to_cpu(fmpl_wl->size);
+ fm->max_pool_size = be16_to_cpu(fmpl->max_size);
+ fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
ubi_err(ubi, "bad pool size: %i", pool_size);
@@ -748,6 +750,11 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
if (!av)
goto fail_bad;
+ if (PTR_ERR(av) == -EINVAL) {
+ ubi_err(ubi, "volume (ID %i) already exists",
+ fmvhdr->vol_id);
+ goto fail_bad;
+ }
ai->vols_found++;
if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
@@ -768,7 +775,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
int pnum = be32_to_cpu(fm_eba->pnum[j]);
- if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
+ if (pnum < 0)
continue;
aeb = NULL;
@@ -796,11 +803,11 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
}
}
- ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, &free);
+ ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
if (ret)
goto fail;
- ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, &free);
+ ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
if (ret)
goto fail;
@@ -1083,7 +1090,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
void *fm_raw;
struct ubi_fm_sb *fmsb;
struct ubi_fm_hdr *fmh;
- struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+ struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
struct ubi_fm_ec *fec;
struct ubi_fm_volhdr *fvh;
struct ubi_fm_eba *feba;
@@ -1141,25 +1148,25 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
erase_peb_count = 0;
vol_count = 0;
- fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
- fm_pos += sizeof(*fmpl1);
- fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
- fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
- fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
+ fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl);
+ fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl->size = cpu_to_be16(ubi->fm_pool.size);
+ fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
for (i = 0; i < ubi->fm_pool.size; i++) {
- fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
+ fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
}
- fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
- fm_pos += sizeof(*fmpl2);
- fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
- fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
- fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
+ fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl_wl);
+ fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
+ fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
for (i = 0; i < ubi->fm_wl_pool.size; i++) {
- fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
+ fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
}
diff --git a/kernel/drivers/mtd/ubi/gluebi.c b/kernel/drivers/mtd/ubi/gluebi.c
index b93807b4c..cb7c075f2 100644
--- a/kernel/drivers/mtd/ubi/gluebi.c
+++ b/kernel/drivers/mtd/ubi/gluebi.c
@@ -112,8 +112,8 @@ static int gluebi_get_device(struct mtd_info *mtd)
* The MTD device is already referenced and this is just one
* more reference. MTD allows many users to open the same
* volume simultaneously and do not distinguish between
- * readers/writers/exclusive openers as UBI does. So we do not
- * open the UBI volume again - just increase the reference
+ * readers/writers/exclusive/meta openers as UBI does. So we do
+ * not open the UBI volume again - just increase the reference
* counter and return.
*/
gluebi->refcnt += 1;
diff --git a/kernel/drivers/mtd/ubi/io.c b/kernel/drivers/mtd/ubi/io.c
index 5bbd1f094..10cf3b549 100644
--- a/kernel/drivers/mtd/ubi/io.c
+++ b/kernel/drivers/mtd/ubi/io.c
@@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
goto bad;
}
+ if (data_size > ubi->leb_size) {
+ ubi_err(ubi, "bad data_size");
+ goto bad;
+ }
+
if (vol_type == UBI_VID_STATIC) {
/*
* Although from high-level point of view static volumes may
@@ -1294,7 +1299,7 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
goto exit;
- crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
+ crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
if (hdr_crc != crc) {
ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
diff --git a/kernel/drivers/mtd/ubi/ubi-media.h b/kernel/drivers/mtd/ubi/ubi-media.h
index d0d072e7c..22ed3f627 100644
--- a/kernel/drivers/mtd/ubi/ubi-media.h
+++ b/kernel/drivers/mtd/ubi/ubi-media.h
@@ -500,7 +500,7 @@ struct ubi_fm_volhdr {
/* struct ubi_fm_volhdr is followed by one struct ubi_fm_eba records */
/**
- * struct ubi_fm_eba - denotes an association beween a PEB and LEB
+ * struct ubi_fm_eba - denotes an association between a PEB and LEB
* @magic: EBA table magic number
* @reserved_pebs: number of table entries
* @pnum: PEB number of LEB (LEB is the index)
diff --git a/kernel/drivers/mtd/ubi/ubi.h b/kernel/drivers/mtd/ubi/ubi.h
index c998212fc..2974b67f6 100644
--- a/kernel/drivers/mtd/ubi/ubi.h
+++ b/kernel/drivers/mtd/ubi/ubi.h
@@ -775,7 +775,7 @@ extern struct kmem_cache *ubi_wl_entry_slab;
extern const struct file_operations ubi_ctrl_cdev_operations;
extern const struct file_operations ubi_cdev_operations;
extern const struct file_operations ubi_vol_cdev_operations;
-extern struct class *ubi_class;
+extern struct class ubi_class;
extern struct mutex ubi_devices_mutex;
extern struct blocking_notifier_head ubi_notifiers;
diff --git a/kernel/drivers/mtd/ubi/upd.c b/kernel/drivers/mtd/ubi/upd.c
index 2a1b6e037..0134ba32a 100644
--- a/kernel/drivers/mtd/ubi/upd.c
+++ b/kernel/drivers/mtd/ubi/upd.c
@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
vol->changing_leb = 1;
vol->ch_lnum = req->lnum;
- vol->upd_buf = vmalloc(req->bytes);
+ vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
if (!vol->upd_buf)
return -ENOMEM;
diff --git a/kernel/drivers/mtd/ubi/vmt.c b/kernel/drivers/mtd/ubi/vmt.c
index ff4d97848..1ae17bb9b 100644
--- a/kernel/drivers/mtd/ubi/vmt.c
+++ b/kernel/drivers/mtd/ubi/vmt.c
@@ -120,6 +120,19 @@ static ssize_t vol_attribute_show(struct device *dev,
return ret;
}
+static struct attribute *volume_dev_attrs[] = {
+ &attr_vol_reserved_ebs.attr,
+ &attr_vol_type.attr,
+ &attr_vol_name.attr,
+ &attr_vol_corrupted.attr,
+ &attr_vol_alignment.attr,
+ &attr_vol_usable_eb_size.attr,
+ &attr_vol_data_bytes.attr,
+ &attr_vol_upd_marker.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(volume_dev);
+
/* Release method for volume devices */
static void vol_release(struct device *dev)
{
@@ -130,64 +143,6 @@ static void vol_release(struct device *dev)
}
/**
- * volume_sysfs_init - initialize sysfs for new volume.
- * @ubi: UBI device description object
- * @vol: volume description object
- *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
- *
- * Note, this function does not free allocated resources in case of failure -
- * the caller does it. This is because this would cause release() here and the
- * caller would oops.
- */
-static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
-{
- int err;
-
- err = device_create_file(&vol->dev, &attr_vol_reserved_ebs);
- if (err)
- return err;
- err = device_create_file(&vol->dev, &attr_vol_type);
- if (err)
- return err;
- err = device_create_file(&vol->dev, &attr_vol_name);
- if (err)
- return err;
- err = device_create_file(&vol->dev, &attr_vol_corrupted);
- if (err)
- return err;
- err = device_create_file(&vol->dev, &attr_vol_alignment);
- if (err)
- return err;
- err = device_create_file(&vol->dev, &attr_vol_usable_eb_size);
- if (err)
- return err;
- err = device_create_file(&vol->dev, &attr_vol_data_bytes);
- if (err)
- return err;
- err = device_create_file(&vol->dev, &attr_vol_upd_marker);
- return err;
-}
-
-/**
- * volume_sysfs_close - close sysfs for a volume.
- * @vol: volume description object
- */
-static void volume_sysfs_close(struct ubi_volume *vol)
-{
- device_remove_file(&vol->dev, &attr_vol_upd_marker);
- device_remove_file(&vol->dev, &attr_vol_data_bytes);
- device_remove_file(&vol->dev, &attr_vol_usable_eb_size);
- device_remove_file(&vol->dev, &attr_vol_alignment);
- device_remove_file(&vol->dev, &attr_vol_corrupted);
- device_remove_file(&vol->dev, &attr_vol_name);
- device_remove_file(&vol->dev, &attr_vol_type);
- device_remove_file(&vol->dev, &attr_vol_reserved_ebs);
- device_unregister(&vol->dev);
-}
-
-/**
* ubi_create_volume - create volume.
* @ubi: UBI device description object
* @req: volume creation request
@@ -253,8 +208,8 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Calculate how many eraseblocks are requested */
vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
- vol->reserved_pebs += div_u64(req->bytes + vol->usable_leb_size - 1,
- vol->usable_leb_size);
+ vol->reserved_pebs = div_u64(req->bytes + vol->usable_leb_size - 1,
+ vol->usable_leb_size);
/* Reserve physical eraseblocks */
if (vol->reserved_pebs > ubi->avail_pebs) {
@@ -323,7 +278,8 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
vol->dev.devt = dev;
- vol->dev.class = ubi_class;
+ vol->dev.class = &ubi_class;
+ vol->dev.groups = volume_dev_groups;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
@@ -332,10 +288,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
goto out_cdev;
}
- err = volume_sysfs_init(ubi, vol);
- if (err)
- goto out_sysfs;
-
/* Fill volume table record */
memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record));
vtbl_rec.reserved_pebs = cpu_to_be32(vol->reserved_pebs);
@@ -372,7 +324,7 @@ out_sysfs:
*/
do_free = 0;
get_device(&vol->dev);
- volume_sysfs_close(vol);
+ device_unregister(&vol->dev);
out_cdev:
cdev_del(&vol->cdev);
out_mapping:
@@ -440,7 +392,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
}
cdev_del(&vol->cdev);
- volume_sysfs_close(vol);
+ device_unregister(&vol->dev);
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= reserved_pebs;
@@ -653,19 +605,13 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
vol->dev.devt = dev;
- vol->dev.class = ubi_class;
+ vol->dev.class = &ubi_class;
+ vol->dev.groups = volume_dev_groups;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
if (err)
goto out_cdev;
- err = volume_sysfs_init(ubi, vol);
- if (err) {
- cdev_del(&vol->cdev);
- volume_sysfs_close(vol);
- return err;
- }
-
self_check_volumes(ubi);
return err;
@@ -688,7 +634,7 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
ubi->volumes[vol->vol_id] = NULL;
cdev_del(&vol->cdev);
- volume_sysfs_close(vol);
+ device_unregister(&vol->dev);
}
/**
diff --git a/kernel/drivers/mtd/ubi/vtbl.c b/kernel/drivers/mtd/ubi/vtbl.c
index 68c9c5ea6..d85c19762 100644
--- a/kernel/drivers/mtd/ubi/vtbl.c
+++ b/kernel/drivers/mtd/ubi/vtbl.c
@@ -70,6 +70,26 @@ static void self_vtbl_check(const struct ubi_device *ubi);
static struct ubi_vtbl_record empty_vtbl_record;
/**
+ * ubi_update_layout_vol - helper for updatting layout volumes on flash
+ * @ubi: UBI device description object
+ */
+static int ubi_update_layout_vol(struct ubi_device *ubi)
+{
+ struct ubi_volume *layout_vol;
+ int i, err;
+
+ layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
+ for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
+ err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl,
+ ubi->vtbl_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ubi_change_vtbl_record - change volume table record.
* @ubi: UBI device description object
* @idx: table index to change
@@ -83,12 +103,10 @@ static struct ubi_vtbl_record empty_vtbl_record;
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
struct ubi_vtbl_record *vtbl_rec)
{
- int i, err;
+ int err;
uint32_t crc;
- struct ubi_volume *layout_vol;
ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
- layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
if (!vtbl_rec)
vtbl_rec = &empty_vtbl_record;
@@ -98,15 +116,10 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
}
memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
- for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
- err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl,
- ubi->vtbl_size);
- if (err)
- return err;
- }
+ err = ubi_update_layout_vol(ubi);
self_vtbl_check(ubi);
- return 0;
+ return err ? err : 0;
}
/**
@@ -121,9 +134,7 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
struct list_head *rename_list)
{
- int i, err;
struct ubi_rename_entry *re;
- struct ubi_volume *layout_vol;
list_for_each_entry(re, rename_list, list) {
uint32_t crc;
@@ -145,15 +156,7 @@ int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
vtbl_rec->crc = cpu_to_be32(crc);
}
- layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
- for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
- err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl,
- ubi->vtbl_size);
- if (err)
- return err;
- }
-
- return 0;
+ return ubi_update_layout_vol(ubi);
}
/**
@@ -646,6 +649,7 @@ static int init_volumes(struct ubi_device *ubi,
if (ubi->corr_peb_count)
ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
+ return -ENOSPC;
}
ubi->rsvd_pebs += reserved_pebs;
ubi->avail_pebs -= reserved_pebs;
diff --git a/kernel/drivers/mtd/ubi/wl.c b/kernel/drivers/mtd/ubi/wl.c
index 16214d3d5..56065632a 100644
--- a/kernel/drivers/mtd/ubi/wl.c
+++ b/kernel/drivers/mtd/ubi/wl.c
@@ -603,6 +603,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
return 0;
}
+static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
/**
* do_sync_erase - run the erase worker synchronously.
* @ubi: UBI device description object
@@ -615,20 +616,16 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
int vol_id, int lnum, int torture)
{
- struct ubi_work *wl_wrk;
+ struct ubi_work wl_wrk;
dbg_wl("sync erase of PEB %i", e->pnum);
- wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
- if (!wl_wrk)
- return -ENOMEM;
-
- wl_wrk->e = e;
- wl_wrk->vol_id = vol_id;
- wl_wrk->lnum = lnum;
- wl_wrk->torture = torture;
+ wl_wrk.e = e;
+ wl_wrk.vol_id = vol_id;
+ wl_wrk.lnum = lnum;
+ wl_wrk.torture = torture;
- return erase_worker(ubi, wl_wrk, 0);
+ return __erase_worker(ubi, &wl_wrk);
}
/**
@@ -1014,7 +1011,7 @@ out_unlock:
}
/**
- * erase_worker - physical eraseblock erase worker function.
+ * __erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
* @wl_wrk: the work object
* @shutdown: non-zero if the worker has to free memory and exit
@@ -1025,8 +1022,7 @@ out_unlock:
* needed. Returns zero in case of success and a negative error code in case of
* failure.
*/
-static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
- int shutdown)
+static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
{
struct ubi_wl_entry *e = wl_wrk->e;
int pnum = e->pnum;
@@ -1034,21 +1030,11 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int lnum = wl_wrk->lnum;
int err, available_consumed = 0;
- if (shutdown) {
- dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
- kfree(wl_wrk);
- wl_entry_destroy(ubi, e);
- return 0;
- }
-
dbg_wl("erase PEB %d EC %d LEB %d:%d",
pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
- /* Fine, we've erased it successfully */
- kfree(wl_wrk);
-
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->free);
ubi->free_count++;
@@ -1066,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
}
ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
- kfree(wl_wrk);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
err == -EBUSY) {
@@ -1075,6 +1060,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
/* Re-schedule the LEB for erasure */
err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
if (err1) {
+ wl_entry_destroy(ubi, e);
err = err1;
goto out_ro;
}
@@ -1150,6 +1136,25 @@ out_ro:
return err;
}
+static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+ int shutdown)
+{
+ int ret;
+
+ if (shutdown) {
+ struct ubi_wl_entry *e = wl_wrk->e;
+
+ dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
+ kfree(wl_wrk);
+ wl_entry_destroy(ubi, e);
+ return 0;
+ }
+
+ ret = __erase_worker(ubi, wl_wrk);
+ kfree(wl_wrk);
+ return ret;
+}
+
/**
* ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
* @ubi: UBI device description object
@@ -1581,7 +1586,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
dbg_wl("found %i PEBs", found_pebs);
if (ubi->fm) {
- ubi_assert(ubi->good_peb_count == \
+ ubi_assert(ubi->good_peb_count ==
found_pebs + ubi->fm->used_blocks);
for (i = 0; i < ubi->fm->used_blocks; i++) {
@@ -1601,6 +1606,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (ubi->corr_peb_count)
ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
+ err = -ENOSPC;
goto out_free;
}
ubi->avail_pebs -= reserved_pebs;