summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/mtd/spi-nor
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/mtd/spi-nor')
-rw-r--r--kernel/drivers/mtd/spi-nor/Kconfig17
-rw-r--r--kernel/drivers/mtd/spi-nor/Makefile1
-rw-r--r--kernel/drivers/mtd/spi-nor/fsl-quadspi.c309
-rw-r--r--kernel/drivers/mtd/spi-nor/nxp-spifi.c479
-rw-r--r--kernel/drivers/mtd/spi-nor/spi-nor.c412
5 files changed, 1005 insertions, 213 deletions
diff --git a/kernel/drivers/mtd/spi-nor/Kconfig b/kernel/drivers/mtd/spi-nor/Kconfig
index 64a4f0eda..2fe2a7e90 100644
--- a/kernel/drivers/mtd/spi-nor/Kconfig
+++ b/kernel/drivers/mtd/spi-nor/Kconfig
@@ -23,9 +23,22 @@ config MTD_SPI_NOR_USE_4K_SECTORS
config SPI_FSL_QUADSPI
tristate "Freescale Quad SPI controller"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables support for the Quad SPI controller in master mode.
- We only connect the NOR to this controller now.
+ This controller does not support generic SPI. It only supports
+ SPI NOR.
+
+config SPI_NXP_SPIFI
+ tristate "NXP SPI Flash Interface (SPIFI)"
+ depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
+ depends on HAS_IOMEM
+ help
+ Enable support for the NXP LPC SPI Flash Interface controller.
+
+ SPIFI is a specialized controller for connecting serial SPI
+ Flash. Enable this option if you have a device with a SPIFI
+ controller and want to access the Flash as a mtd device.
endif # MTD_SPI_NOR
diff --git a/kernel/drivers/mtd/spi-nor/Makefile b/kernel/drivers/mtd/spi-nor/Makefile
index 6a7ce1462..e53333ef8 100644
--- a/kernel/drivers/mtd/spi-nor/Makefile
+++ b/kernel/drivers/mtd/spi-nor/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
+obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
diff --git a/kernel/drivers/mtd/spi-nor/fsl-quadspi.c b/kernel/drivers/mtd/spi-nor/fsl-quadspi.c
index 5d5d36272..7b10ed413 100644
--- a/kernel/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/kernel/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -26,6 +26,21 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/spi-nor.h>
+#include <linux/mutex.h>
+#include <linux/pm_qos.h>
+#include <linux/sizes.h>
+
+/* Controller needs driver to swap endian */
+#define QUADSPI_QUIRK_SWAP_ENDIAN (1 << 0)
+/* Controller needs 4x internal clock */
+#define QUADSPI_QUIRK_4X_INT_CLK (1 << 1)
+/*
+ * TKT253890, Controller needs driver to fill txfifo till 16 byte to
+ * trigger data transfer even though extern data will not transferred.
+ */
+#define QUADSPI_QUIRK_TKT253890 (1 << 2)
+/* Controller cannot wake up from wait mode, TKT245618 */
+#define QUADSPI_QUIRK_TKT245618 (1 << 3)
/* The registers */
#define QUADSPI_MCR 0x00
@@ -140,15 +155,15 @@
#define LUT_MODE 4
#define LUT_MODE2 5
#define LUT_MODE4 6
-#define LUT_READ 7
-#define LUT_WRITE 8
+#define LUT_FSL_READ 7
+#define LUT_FSL_WRITE 8
#define LUT_JMP_ON_CS 9
#define LUT_ADDR_DDR 10
#define LUT_MODE_DDR 11
#define LUT_MODE2_DDR 12
#define LUT_MODE4_DDR 13
-#define LUT_READ_DDR 14
-#define LUT_WRITE_DDR 15
+#define LUT_FSL_READ_DDR 14
+#define LUT_FSL_WRITE_DDR 15
#define LUT_DATA_LEARN 16
/*
@@ -191,9 +206,13 @@
#define SEQID_EN4B 10
#define SEQID_BRWR 11
+#define QUADSPI_MIN_IOMAP SZ_4M
+
enum fsl_qspi_devtype {
FSL_QUADSPI_VYBRID,
FSL_QUADSPI_IMX6SX,
+ FSL_QUADSPI_IMX7D,
+ FSL_QUADSPI_IMX6UL,
};
struct fsl_qspi_devtype_data {
@@ -201,29 +220,52 @@ struct fsl_qspi_devtype_data {
int rxfifo;
int txfifo;
int ahb_buf_size;
+ int driver_data;
};
static struct fsl_qspi_devtype_data vybrid_data = {
.devtype = FSL_QUADSPI_VYBRID,
.rxfifo = 128,
.txfifo = 64,
- .ahb_buf_size = 1024
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_SWAP_ENDIAN,
};
static struct fsl_qspi_devtype_data imx6sx_data = {
.devtype = FSL_QUADSPI_IMX6SX,
.rxfifo = 128,
.txfifo = 512,
- .ahb_buf_size = 1024
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_4X_INT_CLK
+ | QUADSPI_QUIRK_TKT245618,
+};
+
+static struct fsl_qspi_devtype_data imx7d_data = {
+ .devtype = FSL_QUADSPI_IMX7D,
+ .rxfifo = 512,
+ .txfifo = 512,
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_TKT253890
+ | QUADSPI_QUIRK_4X_INT_CLK,
+};
+
+static struct fsl_qspi_devtype_data imx6ul_data = {
+ .devtype = FSL_QUADSPI_IMX6UL,
+ .rxfifo = 128,
+ .txfifo = 512,
+ .ahb_buf_size = 1024,
+ .driver_data = QUADSPI_QUIRK_TKT253890
+ | QUADSPI_QUIRK_4X_INT_CLK,
};
#define FSL_QSPI_MAX_CHIP 4
struct fsl_qspi {
- struct mtd_info mtd[FSL_QSPI_MAX_CHIP];
struct spi_nor nor[FSL_QSPI_MAX_CHIP];
void __iomem *iobase;
- void __iomem *ahb_base; /* Used when read from AHB bus */
+ void __iomem *ahb_addr;
u32 memmap_phy;
+ u32 memmap_offs;
+ u32 memmap_len;
struct clk *clk, *clk_en;
struct device *dev;
struct completion c;
@@ -233,16 +275,28 @@ struct fsl_qspi {
u32 clk_rate;
unsigned int chip_base_addr; /* We may support two chips. */
bool has_second_chip;
+ struct mutex lock;
+ struct pm_qos_request pm_qos_req;
};
-static inline int is_vybrid_qspi(struct fsl_qspi *q)
+static inline int needs_swap_endian(struct fsl_qspi *q)
+{
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_SWAP_ENDIAN;
+}
+
+static inline int needs_4x_clock(struct fsl_qspi *q)
{
- return q->devtype_data->devtype == FSL_QUADSPI_VYBRID;
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_4X_INT_CLK;
}
-static inline int is_imx6sx_qspi(struct fsl_qspi *q)
+static inline int needs_fill_txfifo(struct fsl_qspi *q)
{
- return q->devtype_data->devtype == FSL_QUADSPI_IMX6SX;
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT253890;
+}
+
+static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
+{
+ return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618;
}
/*
@@ -251,7 +305,7 @@ static inline int is_imx6sx_qspi(struct fsl_qspi *q)
*/
static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
{
- return is_vybrid_qspi(q) ? __swab32(a) : a;
+ return needs_swap_endian(q) ? __swab32(a) : a;
}
static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q)
@@ -312,7 +366,7 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
base + QUADSPI_LUT(lut_base));
- writel(LUT0(DUMMY, PAD1, dummy) | LUT1(READ, PAD4, rxfifo),
+ writel(LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo),
base + QUADSPI_LUT(lut_base + 1));
/* Write enable */
@@ -333,24 +387,18 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
base + QUADSPI_LUT(lut_base));
- writel(LUT0(WRITE, PAD1, 0), base + QUADSPI_LUT(lut_base + 1));
+ writel(LUT0(FSL_WRITE, PAD1, 0), base + QUADSPI_LUT(lut_base + 1));
/* Read Status */
lut_base = SEQID_RDSR * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(READ, PAD1, 0x1),
+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(FSL_READ, PAD1, 0x1),
base + QUADSPI_LUT(lut_base));
/* Erase a sector */
lut_base = SEQID_SE * 4;
- if (q->nor_size <= SZ_16M) {
- cmd = SPINOR_OP_SE;
- addrlen = ADDR24BIT;
- } else {
- /* use the 4-byte address */
- cmd = SPINOR_OP_SE;
- addrlen = ADDR32BIT;
- }
+ cmd = q->nor[0].erase_opcode;
+ addrlen = q->nor_size <= SZ_16M ? ADDR24BIT : ADDR32BIT;
writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
base + QUADSPI_LUT(lut_base));
@@ -362,17 +410,17 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
/* READ ID */
lut_base = SEQID_RDID * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(READ, PAD1, 0x8),
+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(FSL_READ, PAD1, 0x8),
base + QUADSPI_LUT(lut_base));
/* Write Register */
lut_base = SEQID_WRSR * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(WRITE, PAD1, 0x2),
+ writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(FSL_WRITE, PAD1, 0x2),
base + QUADSPI_LUT(lut_base));
/* Read Configuration Register */
lut_base = SEQID_RDCR * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(READ, PAD1, 0x1),
+ writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(FSL_READ, PAD1, 0x1),
base + QUADSPI_LUT(lut_base));
/* Write disable */
@@ -419,6 +467,8 @@ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
case SPINOR_OP_BRWR:
return SEQID_BRWR;
default:
+ if (cmd == q->nor[0].erase_opcode)
+ return SEQID_SE;
dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd);
break;
}
@@ -537,7 +587,7 @@ static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
/* clear the TX FIFO. */
tmp = readl(q->iobase + QUADSPI_MCR);
- writel(tmp | QUADSPI_MCR_CLR_RXF_MASK, q->iobase + QUADSPI_MCR);
+ writel(tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR);
/* fill the TX data to the FIFO */
for (j = 0, i = ((count + 3) / 4); j < i; j++) {
@@ -546,6 +596,11 @@ static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
txbuf++;
}
+ /* fill the TXFIFO upto 16 bytes for i.MX7d */
+ if (needs_fill_txfifo(q))
+ for (; i < 4; i++)
+ writel(tmp, q->iobase + QUADSPI_TBDR);
+
/* Trigger it */
ret = fsl_qspi_runcmd(q, opcode, to, count);
@@ -606,6 +661,38 @@ static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
q->iobase + QUADSPI_BFGENCR);
}
+/* This function was used to prepare and enable QSPI clock */
+static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
+{
+ int ret;
+
+ ret = clk_prepare_enable(q->clk_en);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(q->clk);
+ if (ret) {
+ clk_disable_unprepare(q->clk_en);
+ return ret;
+ }
+
+ if (needs_wakeup_wait_mode(q))
+ pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
+
+ return 0;
+}
+
+/* This function was used to disable and unprepare QSPI clock */
+static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
+{
+ if (needs_wakeup_wait_mode(q))
+ pm_qos_remove_request(&q->pm_qos_req);
+
+ clk_disable_unprepare(q->clk);
+ clk_disable_unprepare(q->clk_en);
+
+}
+
/* We use this function to do some basic init for spi_nor_scan(). */
static int fsl_qspi_nor_setup(struct fsl_qspi *q)
{
@@ -613,11 +700,23 @@ static int fsl_qspi_nor_setup(struct fsl_qspi *q)
u32 reg;
int ret;
- /* the default frequency, we will change it in the future.*/
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ fsl_qspi_clk_disable_unprep(q);
+
+ /* the default frequency, we will change it in the future. */
ret = clk_set_rate(q->clk, 66000000);
if (ret)
return ret;
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return ret;
+
+ /* Reset the module */
+ writel(QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
+ base + QUADSPI_MCR);
+ udelay(1);
+
/* Init the LUT table. */
fsl_qspi_init_lut(q);
@@ -635,6 +734,9 @@ static int fsl_qspi_nor_setup(struct fsl_qspi *q)
writel(QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
base + QUADSPI_MCR);
+ /* clear all interrupt status */
+ writel(0xffffffff, q->iobase + QUADSPI_FR);
+
/* enable the interrupt */
writel(QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
@@ -646,13 +748,20 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
unsigned long rate = q->clk_rate;
int ret;
- if (is_imx6sx_qspi(q))
+ if (needs_4x_clock(q))
rate *= 4;
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ fsl_qspi_clk_disable_unprep(q);
+
ret = clk_set_rate(q->clk, rate);
if (ret)
return ret;
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return ret;
+
/* Init the LUT table again. */
fsl_qspi_init_lut(q);
@@ -662,9 +771,11 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
return 0;
}
-static struct of_device_id fsl_qspi_dt_ids[] = {
+static const struct of_device_id fsl_qspi_dt_ids[] = {
{ .compatible = "fsl,vf610-qspi", .data = (void *)&vybrid_data, },
{ .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, },
+ { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, },
+ { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
@@ -687,8 +798,7 @@ static int fsl_qspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return 0;
}
-static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
- int write_enable)
+static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
{
struct fsl_qspi *q = nor->priv;
int ret;
@@ -730,11 +840,42 @@ static int fsl_qspi_read(struct spi_nor *nor, loff_t from,
struct fsl_qspi *q = nor->priv;
u8 cmd = nor->read_opcode;
- dev_dbg(q->dev, "cmd [%x],read from (0x%p, 0x%.8x, 0x%.8x),len:%d\n",
- cmd, q->ahb_base, q->chip_base_addr, (unsigned int)from, len);
+ /* if necessary,ioremap buffer before AHB read, */
+ if (!q->ahb_addr) {
+ q->memmap_offs = q->chip_base_addr + from;
+ q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP;
+
+ q->ahb_addr = ioremap_nocache(
+ q->memmap_phy + q->memmap_offs,
+ q->memmap_len);
+ if (!q->ahb_addr) {
+ dev_err(q->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+ /* ioremap if the data requested is out of range */
+ } else if (q->chip_base_addr + from < q->memmap_offs
+ || q->chip_base_addr + from + len >
+ q->memmap_offs + q->memmap_len) {
+ iounmap(q->ahb_addr);
+
+ q->memmap_offs = q->chip_base_addr + from;
+ q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP;
+ q->ahb_addr = ioremap_nocache(
+ q->memmap_phy + q->memmap_offs,
+ q->memmap_len);
+ if (!q->ahb_addr) {
+ dev_err(q->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+ }
+
+ dev_dbg(q->dev, "cmd [%x],read from %p, len:%zd\n",
+ cmd, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
+ len);
/* Read out the data directly from the AHB buffer.*/
- memcpy(buf, q->ahb_base + q->chip_base_addr + from, len);
+ memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
+ len);
*retlen += len;
return 0;
@@ -746,7 +887,7 @@ static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs)
int ret;
dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n",
- nor->mtd->erasesize / 1024, q->chip_base_addr, (u32)offs);
+ nor->mtd.erasesize / 1024, q->chip_base_addr, (u32)offs);
ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0);
if (ret)
@@ -761,26 +902,26 @@ static int fsl_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
struct fsl_qspi *q = nor->priv;
int ret;
- ret = clk_enable(q->clk_en);
- if (ret)
- return ret;
+ mutex_lock(&q->lock);
- ret = clk_enable(q->clk);
- if (ret) {
- clk_disable(q->clk_en);
- return ret;
- }
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ goto err_mutex;
fsl_qspi_set_base_addr(q, nor);
return 0;
+
+err_mutex:
+ mutex_unlock(&q->lock);
+ return ret;
}
static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
{
struct fsl_qspi *q = nor->priv;
- clk_disable(q->clk);
- clk_disable(q->clk_en);
+ fsl_qspi_clk_disable_unprep(q);
+ mutex_unlock(&q->lock);
}
static int fsl_qspi_probe(struct platform_device *pdev)
@@ -804,6 +945,10 @@ static int fsl_qspi_probe(struct platform_device *pdev)
if (!q->nor_num || q->nor_num > FSL_QSPI_MAX_CHIP)
return -ENODEV;
+ q->dev = dev;
+ q->devtype_data = (struct fsl_qspi_devtype_data *)of_id->data;
+ platform_set_drvdata(pdev, q);
+
/* find the resources */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
q->iobase = devm_ioremap_resource(dev, res);
@@ -812,9 +957,11 @@ static int fsl_qspi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"QuadSPI-memory");
- q->ahb_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(q->ahb_base))
- return PTR_ERR(q->ahb_base);
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ res->name)) {
+ dev_err(dev, "can't request region for resource %pR\n", res);
+ return -EBUSY;
+ }
q->memmap_phy = res->start;
@@ -827,15 +974,9 @@ static int fsl_qspi_probe(struct platform_device *pdev)
if (IS_ERR(q->clk))
return PTR_ERR(q->clk);
- ret = clk_prepare_enable(q->clk_en);
- if (ret) {
- dev_err(dev, "cannot enable the qspi_en clock: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(q->clk);
+ ret = fsl_qspi_clk_prep_enable(q);
if (ret) {
- dev_err(dev, "cannot enable the qspi clock: %d\n", ret);
+ dev_err(dev, "can not enable the clock\n");
goto clk_failed;
}
@@ -853,10 +994,6 @@ static int fsl_qspi_probe(struct platform_device *pdev)
goto irq_failed;
}
- q->dev = dev;
- q->devtype_data = (struct fsl_qspi_devtype_data *)of_id->data;
- platform_set_drvdata(pdev, q);
-
ret = fsl_qspi_nor_setup(q);
if (ret)
goto irq_failed;
@@ -864,21 +1001,20 @@ static int fsl_qspi_probe(struct platform_device *pdev)
if (of_get_property(np, "fsl,qspi-has-second-chip", NULL))
q->has_second_chip = true;
+ mutex_init(&q->lock);
+
/* iterate the subnodes. */
for_each_available_child_of_node(dev->of_node, np) {
- char modalias[40];
-
/* skip the holes */
if (!q->has_second_chip)
i *= 2;
nor = &q->nor[i];
- mtd = &q->mtd[i];
+ mtd = &nor->mtd;
- nor->mtd = mtd;
nor->dev = dev;
+ nor->flash_node = np;
nor->priv = q;
- mtd->priv = nor;
/* fill the hooks */
nor->read_reg = fsl_qspi_read_reg;
@@ -890,26 +1026,22 @@ static int fsl_qspi_probe(struct platform_device *pdev)
nor->prepare = fsl_qspi_prep;
nor->unprepare = fsl_qspi_unprep;
- ret = of_modalias_node(np, modalias, sizeof(modalias));
- if (ret < 0)
- goto irq_failed;
-
ret = of_property_read_u32(np, "spi-max-frequency",
&q->clk_rate);
if (ret < 0)
- goto irq_failed;
+ goto mutex_failed;
/* set the chip address for READID */
fsl_qspi_set_base_addr(q, nor);
- ret = spi_nor_scan(nor, modalias, SPI_NOR_QUAD);
+ ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
if (ret)
- goto irq_failed;
+ goto mutex_failed;
ppdata.of_node = np;
ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
if (ret)
- goto irq_failed;
+ goto mutex_failed;
/* Set the correct NOR size now. */
if (q->nor_size == 0) {
@@ -939,8 +1071,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
if (ret)
goto last_init_failed;
- clk_disable(q->clk);
- clk_disable(q->clk_en);
+ fsl_qspi_clk_disable_unprep(q);
return 0;
last_init_failed:
@@ -948,12 +1079,14 @@ last_init_failed:
/* skip the holes */
if (!q->has_second_chip)
i *= 2;
- mtd_device_unregister(&q->mtd[i]);
+ mtd_device_unregister(&q->nor[i].mtd);
}
+mutex_failed:
+ mutex_destroy(&q->lock);
irq_failed:
- clk_disable_unprepare(q->clk);
+ fsl_qspi_clk_disable_unprep(q);
clk_failed:
- clk_disable_unprepare(q->clk_en);
+ dev_err(dev, "Freescale QuadSPI probe failed\n");
return ret;
}
@@ -966,15 +1099,18 @@ static int fsl_qspi_remove(struct platform_device *pdev)
/* skip the holes */
if (!q->has_second_chip)
i *= 2;
- mtd_device_unregister(&q->mtd[i]);
+ mtd_device_unregister(&q->nor[i].mtd);
}
/* disable the hardware */
writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
writel(0x0, q->iobase + QUADSPI_RSER);
- clk_unprepare(q->clk);
- clk_unprepare(q->clk_en);
+ mutex_destroy(&q->lock);
+
+ if (q->ahb_addr)
+ iounmap(q->ahb_addr);
+
return 0;
}
@@ -985,12 +1121,19 @@ static int fsl_qspi_suspend(struct platform_device *pdev, pm_message_t state)
static int fsl_qspi_resume(struct platform_device *pdev)
{
+ int ret;
struct fsl_qspi *q = platform_get_drvdata(pdev);
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return ret;
+
fsl_qspi_nor_setup(q);
fsl_qspi_set_map_addr(q);
fsl_qspi_nor_setup_last(q);
+ fsl_qspi_clk_disable_unprep(q);
+
return 0;
}
diff --git a/kernel/drivers/mtd/spi-nor/nxp-spifi.c b/kernel/drivers/mtd/spi-nor/nxp-spifi.c
new file mode 100644
index 000000000..9e82098ae
--- /dev/null
+++ b/kernel/drivers/mtd/spi-nor/nxp-spifi.c
@@ -0,0 +1,479 @@
+/*
+ * SPI-NOR driver for NXP SPI Flash Interface (SPIFI)
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on Freescale QuadSPI driver:
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+/* NXP SPIFI registers, bits and macros */
+#define SPIFI_CTRL 0x000
+#define SPIFI_CTRL_TIMEOUT(timeout) (timeout)
+#define SPIFI_CTRL_CSHIGH(cshigh) ((cshigh) << 16)
+#define SPIFI_CTRL_MODE3 BIT(23)
+#define SPIFI_CTRL_DUAL BIT(28)
+#define SPIFI_CTRL_FBCLK BIT(30)
+#define SPIFI_CMD 0x004
+#define SPIFI_CMD_DATALEN(dlen) ((dlen) & 0x3fff)
+#define SPIFI_CMD_DOUT BIT(15)
+#define SPIFI_CMD_INTLEN(ilen) ((ilen) << 16)
+#define SPIFI_CMD_FIELDFORM(field) ((field) << 19)
+#define SPIFI_CMD_FIELDFORM_ALL_SERIAL SPIFI_CMD_FIELDFORM(0x0)
+#define SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA SPIFI_CMD_FIELDFORM(0x1)
+#define SPIFI_CMD_FRAMEFORM(frame) ((frame) << 21)
+#define SPIFI_CMD_FRAMEFORM_OPCODE_ONLY SPIFI_CMD_FRAMEFORM(0x1)
+#define SPIFI_CMD_OPCODE(op) ((op) << 24)
+#define SPIFI_ADDR 0x008
+#define SPIFI_IDATA 0x00c
+#define SPIFI_CLIMIT 0x010
+#define SPIFI_DATA 0x014
+#define SPIFI_MCMD 0x018
+#define SPIFI_STAT 0x01c
+#define SPIFI_STAT_MCINIT BIT(0)
+#define SPIFI_STAT_CMD BIT(1)
+#define SPIFI_STAT_RESET BIT(4)
+
+#define SPI_NOR_MAX_ID_LEN 6
+
+struct nxp_spifi {
+ struct device *dev;
+ struct clk *clk_spifi;
+ struct clk *clk_reg;
+ void __iomem *io_base;
+ void __iomem *flash_base;
+ struct spi_nor nor;
+ bool memory_mode;
+ u32 mcmd;
+};
+
+static int nxp_spifi_wait_for_cmd(struct nxp_spifi *spifi)
+{
+ u8 stat;
+ int ret;
+
+ ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+ !(stat & SPIFI_STAT_CMD), 10, 30);
+ if (ret)
+ dev_warn(spifi->dev, "command timed out\n");
+
+ return ret;
+}
+
+static int nxp_spifi_reset(struct nxp_spifi *spifi)
+{
+ u8 stat;
+ int ret;
+
+ writel(SPIFI_STAT_RESET, spifi->io_base + SPIFI_STAT);
+ ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+ !(stat & SPIFI_STAT_RESET), 10, 30);
+ if (ret)
+ dev_warn(spifi->dev, "state reset timed out\n");
+
+ return ret;
+}
+
+static int nxp_spifi_set_memory_mode_off(struct nxp_spifi *spifi)
+{
+ int ret;
+
+ if (!spifi->memory_mode)
+ return 0;
+
+ ret = nxp_spifi_reset(spifi);
+ if (ret)
+ dev_err(spifi->dev, "unable to enter command mode\n");
+ else
+ spifi->memory_mode = false;
+
+ return ret;
+}
+
+static int nxp_spifi_set_memory_mode_on(struct nxp_spifi *spifi)
+{
+ u8 stat;
+ int ret;
+
+ if (spifi->memory_mode)
+ return 0;
+
+ writel(spifi->mcmd, spifi->io_base + SPIFI_MCMD);
+ ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+ stat & SPIFI_STAT_MCINIT, 10, 30);
+ if (ret)
+ dev_err(spifi->dev, "unable to enter memory mode\n");
+ else
+ spifi->memory_mode = true;
+
+ return ret;
+}
+
+static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ cmd = SPIFI_CMD_DATALEN(len) |
+ SPIFI_CMD_OPCODE(opcode) |
+ SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_FRAMEFORM_OPCODE_ONLY;
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ while (len--)
+ *buf++ = readb(spifi->io_base + SPIFI_DATA);
+
+ return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ cmd = SPIFI_CMD_DOUT |
+ SPIFI_CMD_DATALEN(len) |
+ SPIFI_CMD_OPCODE(opcode) |
+ SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_FRAMEFORM_OPCODE_ONLY;
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ while (len--)
+ writeb(*buf++, spifi->io_base + SPIFI_DATA);
+
+ return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_on(spifi);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf, spifi->flash_base + from, len);
+ *retlen += len;
+
+ return 0;
+}
+
+static void nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return;
+
+ writel(to, spifi->io_base + SPIFI_ADDR);
+ *retlen += len;
+
+ cmd = SPIFI_CMD_DOUT |
+ SPIFI_CMD_DATALEN(len) |
+ SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_OPCODE(nor->program_opcode) |
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ while (len--)
+ writeb(*buf++, spifi->io_base + SPIFI_DATA);
+
+ nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs)
+{
+ struct nxp_spifi *spifi = nor->priv;
+ u32 cmd;
+ int ret;
+
+ ret = nxp_spifi_set_memory_mode_off(spifi);
+ if (ret)
+ return ret;
+
+ writel(offs, spifi->io_base + SPIFI_ADDR);
+
+ cmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+ SPIFI_CMD_OPCODE(nor->erase_opcode) |
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+ writel(cmd, spifi->io_base + SPIFI_CMD);
+
+ return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi)
+{
+ switch (spifi->nor.flash_read) {
+ case SPI_NOR_NORMAL:
+ case SPI_NOR_FAST:
+ spifi->mcmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL;
+ break;
+ case SPI_NOR_DUAL:
+ case SPI_NOR_QUAD:
+ spifi->mcmd = SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA;
+ break;
+ default:
+ dev_err(spifi->dev, "unsupported SPI read mode\n");
+ return -EINVAL;
+ }
+
+ /* Memory mode supports address length between 1 and 4 */
+ if (spifi->nor.addr_width < 1 || spifi->nor.addr_width > 4)
+ return -EINVAL;
+
+ spifi->mcmd |= SPIFI_CMD_OPCODE(spifi->nor.read_opcode) |
+ SPIFI_CMD_INTLEN(spifi->nor.read_dummy / 8) |
+ SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+
+ return 0;
+}
+
+static void nxp_spifi_dummy_id_read(struct spi_nor *nor)
+{
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
+}
+
+static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
+ struct device_node *np)
+{
+ struct mtd_part_parser_data ppdata;
+ enum read_mode flash_read;
+ u32 ctrl, property;
+ u16 mode = 0;
+ int ret;
+
+ if (!of_property_read_u32(np, "spi-rx-bus-width", &property)) {
+ switch (property) {
+ case 1:
+ break;
+ case 2:
+ mode |= SPI_RX_DUAL;
+ break;
+ case 4:
+ mode |= SPI_RX_QUAD;
+ break;
+ default:
+ dev_err(spifi->dev, "unsupported rx-bus-width\n");
+ return -EINVAL;
+ }
+ }
+
+ if (of_find_property(np, "spi-cpha", NULL))
+ mode |= SPI_CPHA;
+
+ if (of_find_property(np, "spi-cpol", NULL))
+ mode |= SPI_CPOL;
+
+ /* Setup control register defaults */
+ ctrl = SPIFI_CTRL_TIMEOUT(1000) |
+ SPIFI_CTRL_CSHIGH(15) |
+ SPIFI_CTRL_FBCLK;
+
+ if (mode & SPI_RX_DUAL) {
+ ctrl |= SPIFI_CTRL_DUAL;
+ flash_read = SPI_NOR_DUAL;
+ } else if (mode & SPI_RX_QUAD) {
+ ctrl &= ~SPIFI_CTRL_DUAL;
+ flash_read = SPI_NOR_QUAD;
+ } else {
+ ctrl |= SPIFI_CTRL_DUAL;
+ flash_read = SPI_NOR_NORMAL;
+ }
+
+ switch (mode & (SPI_CPHA | SPI_CPOL)) {
+ case SPI_MODE_0:
+ ctrl &= ~SPIFI_CTRL_MODE3;
+ break;
+ case SPI_MODE_3:
+ ctrl |= SPIFI_CTRL_MODE3;
+ break;
+ default:
+ dev_err(spifi->dev, "only mode 0 and 3 supported\n");
+ return -EINVAL;
+ }
+
+ writel(ctrl, spifi->io_base + SPIFI_CTRL);
+
+ spifi->nor.dev = spifi->dev;
+ spifi->nor.flash_node = np;
+ spifi->nor.priv = spifi;
+ spifi->nor.read = nxp_spifi_read;
+ spifi->nor.write = nxp_spifi_write;
+ spifi->nor.erase = nxp_spifi_erase;
+ spifi->nor.read_reg = nxp_spifi_read_reg;
+ spifi->nor.write_reg = nxp_spifi_write_reg;
+
+ /*
+ * The first read on a hard reset isn't reliable so do a
+ * dummy read of the id before calling spi_nor_scan().
+ * The reason for this problem is unknown.
+ *
+ * The official NXP spifilib uses more or less the same
+ * workaround that is applied here by reading the device
+ * id multiple times.
+ */
+ nxp_spifi_dummy_id_read(&spifi->nor);
+
+ ret = spi_nor_scan(&spifi->nor, NULL, flash_read);
+ if (ret) {
+ dev_err(spifi->dev, "device scan failed\n");
+ return ret;
+ }
+
+ ret = nxp_spifi_setup_memory_cmd(spifi);
+ if (ret) {
+ dev_err(spifi->dev, "memory command setup failed\n");
+ return ret;
+ }
+
+ ppdata.of_node = np;
+ ret = mtd_device_parse_register(&spifi->nor.mtd, NULL, &ppdata, NULL, 0);
+ if (ret) {
+ dev_err(spifi->dev, "mtd device parse failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nxp_spifi_probe(struct platform_device *pdev)
+{
+ struct device_node *flash_np;
+ struct nxp_spifi *spifi;
+ struct resource *res;
+ int ret;
+
+ spifi = devm_kzalloc(&pdev->dev, sizeof(*spifi), GFP_KERNEL);
+ if (!spifi)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spifi");
+ spifi->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spifi->io_base))
+ return PTR_ERR(spifi->io_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash");
+ spifi->flash_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spifi->flash_base))
+ return PTR_ERR(spifi->flash_base);
+
+ spifi->clk_spifi = devm_clk_get(&pdev->dev, "spifi");
+ if (IS_ERR(spifi->clk_spifi)) {
+ dev_err(&pdev->dev, "spifi clock not found\n");
+ return PTR_ERR(spifi->clk_spifi);
+ }
+
+ spifi->clk_reg = devm_clk_get(&pdev->dev, "reg");
+ if (IS_ERR(spifi->clk_reg)) {
+ dev_err(&pdev->dev, "reg clock not found\n");
+ return PTR_ERR(spifi->clk_reg);
+ }
+
+ ret = clk_prepare_enable(spifi->clk_reg);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable reg clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(spifi->clk_spifi);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable spifi clock\n");
+ goto dis_clk_reg;
+ }
+
+ spifi->dev = &pdev->dev;
+ platform_set_drvdata(pdev, spifi);
+
+ /* Initialize and reset device */
+ nxp_spifi_reset(spifi);
+ writel(0, spifi->io_base + SPIFI_IDATA);
+ writel(0, spifi->io_base + SPIFI_MCMD);
+ nxp_spifi_reset(spifi);
+
+ flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
+ if (!flash_np) {
+ dev_err(&pdev->dev, "no SPI flash device to configure\n");
+ ret = -ENODEV;
+ goto dis_clks;
+ }
+
+ ret = nxp_spifi_setup_flash(spifi, flash_np);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to setup flash chip\n");
+ goto dis_clks;
+ }
+
+ return 0;
+
+dis_clks:
+ clk_disable_unprepare(spifi->clk_spifi);
+dis_clk_reg:
+ clk_disable_unprepare(spifi->clk_reg);
+ return ret;
+}
+
+static int nxp_spifi_remove(struct platform_device *pdev)
+{
+ struct nxp_spifi *spifi = platform_get_drvdata(pdev);
+
+ mtd_device_unregister(&spifi->nor.mtd);
+ clk_disable_unprepare(spifi->clk_spifi);
+ clk_disable_unprepare(spifi->clk_reg);
+
+ return 0;
+}
+
+static const struct of_device_id nxp_spifi_match[] = {
+ {.compatible = "nxp,lpc1773-spifi"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nxp_spifi_match);
+
+static struct platform_driver nxp_spifi_driver = {
+ .probe = nxp_spifi_probe,
+ .remove = nxp_spifi_remove,
+ .driver = {
+ .name = "nxp-spifi",
+ .of_match_table = nxp_spifi_match,
+ },
+};
+module_platform_driver(nxp_spifi_driver);
+
+MODULE_DESCRIPTION("NXP SPI Flash Interface driver");
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/mtd/spi-nor/spi-nor.c b/kernel/drivers/mtd/spi-nor/spi-nor.c
index 14a5d2325..32477c4eb 100644
--- a/kernel/drivers/mtd/spi-nor/spi-nor.c
+++ b/kernel/drivers/mtd/spi-nor/spi-nor.c
@@ -16,19 +16,32 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/math64.h>
+#include <linux/sizes.h>
-#include <linux/mtd/cfi.h>
#include <linux/mtd/mtd.h>
#include <linux/of_platform.h>
#include <linux/spi/flash.h>
#include <linux/mtd/spi-nor.h>
/* Define max times to check status register before we give up. */
-#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
+
+/*
+ * For everything but full-chip erase; probably could be much smaller, but kept
+ * around for safety for now
+ */
+#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
+
+/*
+ * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
+ * for larger flash
+ */
+#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
#define SPI_NOR_MAX_ID_LEN 6
struct flash_info {
+ char *name;
+
/*
* This array stores the ID bytes.
* The first three bytes are the JEDIC ID.
@@ -59,7 +72,7 @@ struct flash_info {
#define JEDEC_MFR(info) ((info)->id[0])
-static const struct spi_device_id *spi_nor_match_id(const char *name);
+static const struct flash_info *spi_nor_match_id(const char *name);
/*
* Read the status register, returning its value in the location
@@ -143,7 +156,7 @@ static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
static inline int write_sr(struct spi_nor *nor, u8 val)
{
nor->cmd_buf[0] = val;
- return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0);
+ return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
}
/*
@@ -152,7 +165,7 @@ static inline int write_sr(struct spi_nor *nor, u8 val)
*/
static inline int write_enable(struct spi_nor *nor)
{
- return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0, 0);
+ return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
}
/*
@@ -160,7 +173,7 @@ static inline int write_enable(struct spi_nor *nor)
*/
static inline int write_disable(struct spi_nor *nor)
{
- return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0, 0);
+ return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
}
static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
@@ -169,7 +182,7 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
}
/* Enable/disable 4-byte addressing mode. */
-static inline int set_4byte(struct spi_nor *nor, struct flash_info *info,
+static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
int enable)
{
int status;
@@ -177,16 +190,16 @@ static inline int set_4byte(struct spi_nor *nor, struct flash_info *info,
u8 cmd;
switch (JEDEC_MFR(info)) {
- case CFI_MFR_ST: /* Micron, actually */
+ case SNOR_MFR_MICRON:
/* Some Micron need WREN command; all will accept it */
need_wren = true;
- case CFI_MFR_MACRONIX:
- case 0xEF /* winbond */:
+ case SNOR_MFR_MACRONIX:
+ case SNOR_MFR_WINBOND:
if (need_wren)
write_enable(nor);
cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
- status = nor->write_reg(nor, cmd, NULL, 0, 0);
+ status = nor->write_reg(nor, cmd, NULL, 0);
if (need_wren)
write_disable(nor);
@@ -194,7 +207,7 @@ static inline int set_4byte(struct spi_nor *nor, struct flash_info *info,
default:
/* Spansion style */
nor->cmd_buf[0] = enable << 7;
- return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1, 0);
+ return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
}
}
static inline int spi_nor_sr_ready(struct spi_nor *nor)
@@ -231,12 +244,13 @@ static int spi_nor_ready(struct spi_nor *nor)
* Service routine to read status register until ready, or timeout occurs.
* Returns non-zero if error.
*/
-static int spi_nor_wait_till_ready(struct spi_nor *nor)
+static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
+ unsigned long timeout_jiffies)
{
unsigned long deadline;
int timeout = 0, ret;
- deadline = jiffies + MAX_READY_WAIT_JIFFIES;
+ deadline = jiffies + timeout_jiffies;
while (!timeout) {
if (time_after_eq(jiffies, deadline))
@@ -256,6 +270,12 @@ static int spi_nor_wait_till_ready(struct spi_nor *nor)
return -ETIMEDOUT;
}
+static int spi_nor_wait_till_ready(struct spi_nor *nor)
+{
+ return spi_nor_wait_till_ready_with_timeout(nor,
+ DEFAULT_READY_WAIT_JIFFIES);
+}
+
/*
* Erase the whole flash memory
*
@@ -263,9 +283,9 @@ static int spi_nor_wait_till_ready(struct spi_nor *nor)
*/
static int erase_chip(struct spi_nor *nor)
{
- dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd->size >> 10));
+ dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
- return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0, 0);
+ return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
}
static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
@@ -319,6 +339,8 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
/* whole-chip erase? */
if (len == mtd->size) {
+ unsigned long timeout;
+
write_enable(nor);
if (erase_chip(nor)) {
@@ -326,7 +348,16 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
goto erase_err;
}
- ret = spi_nor_wait_till_ready(nor);
+ /*
+ * Scale the timeout linearly with the size of the flash, with
+ * a minimum calibrated to an old 2MB flash. We could try to
+ * pull these from CFI/SFDP, but these values should be good
+ * enough for now.
+ */
+ timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
+ CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
+ (unsigned long)(mtd->size / SZ_2M));
+ ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
if (ret)
goto erase_err;
@@ -369,72 +400,171 @@ erase_err:
return ret;
}
+static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
+ uint64_t *len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ int shift = ffs(mask) - 1;
+ int pow;
+
+ if (!(sr & mask)) {
+ /* No protection */
+ *ofs = 0;
+ *len = 0;
+ } else {
+ pow = ((sr & mask) ^ mask) >> shift;
+ *len = mtd->size >> pow;
+ *ofs = mtd->size - *len;
+ }
+}
+
+/*
+ * Return 1 if the entire region is locked, 0 otherwise
+ */
+static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ loff_t lock_offs;
+ uint64_t lock_len;
+
+ stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
+
+ return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
+}
+
+/*
+ * Lock a region of the flash. Compatible with ST Micro and similar flash.
+ * Supports only the block protection bits BP{0,1,2} in the status register
+ * (SR). Does not support these features found in newer SR bitfields:
+ * - TB: top/bottom protect - only handle TB=0 (top protect)
+ * - SEC: sector/block protect - only handle SEC=0 (block protect)
+ * - CMP: complement protect - only support CMP=0 (range is not complemented)
+ *
+ * Sample table portion for 8MB flash (Winbond w25q64fw):
+ *
+ * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
+ * --------------------------------------------------------------------------
+ * X | X | 0 | 0 | 0 | NONE | NONE
+ * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
+ * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
+ * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
+ * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
+ * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
+ * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
+ * X | X | 1 | 1 | 1 | 8 MB | ALL
+ *
+ * Returns negative on errors, 0 on success.
+ */
static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
- struct mtd_info *mtd = nor->mtd;
- uint32_t offset = ofs;
- uint8_t status_old, status_new;
- int ret = 0;
+ struct mtd_info *mtd = &nor->mtd;
+ u8 status_old, status_new;
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ u8 shift = ffs(mask) - 1, pow, val;
status_old = read_sr(nor);
- if (offset < mtd->size - (mtd->size / 2))
- status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0;
- else if (offset < mtd->size - (mtd->size / 4))
- status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
- else if (offset < mtd->size - (mtd->size / 8))
- status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
- else if (offset < mtd->size - (mtd->size / 16))
- status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
- else if (offset < mtd->size - (mtd->size / 32))
- status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
- else if (offset < mtd->size - (mtd->size / 64))
- status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
- else
- status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
+ /* SPI NOR always locks to the end */
+ if (ofs + len != mtd->size) {
+ /* Does combined region extend to end? */
+ if (!stm_is_locked_sr(nor, ofs + len, mtd->size - ofs - len,
+ status_old))
+ return -EINVAL;
+ len = mtd->size - ofs;
+ }
+
+ /*
+ * Need smallest pow such that:
+ *
+ * 1 / (2^pow) <= (len / size)
+ *
+ * so (assuming power-of-2 size) we do:
+ *
+ * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
+ */
+ pow = ilog2(mtd->size) - ilog2(len);
+ val = mask - (pow << shift);
+ if (val & ~mask)
+ return -EINVAL;
+ /* Don't "lock" with no region! */
+ if (!(val & mask))
+ return -EINVAL;
+
+ status_new = (status_old & ~mask) | val;
/* Only modify protection if it will not unlock other areas */
- if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) >
- (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
- write_enable(nor);
- ret = write_sr(nor, status_new);
- }
+ if ((status_new & mask) <= (status_old & mask))
+ return -EINVAL;
- return ret;
+ write_enable(nor);
+ return write_sr(nor, status_new);
}
+/*
+ * Unlock a region of the flash. See stm_lock() for more info
+ *
+ * Returns negative on errors, 0 on success.
+ */
static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
- struct mtd_info *mtd = nor->mtd;
- uint32_t offset = ofs;
+ struct mtd_info *mtd = &nor->mtd;
uint8_t status_old, status_new;
- int ret = 0;
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ u8 shift = ffs(mask) - 1, pow, val;
status_old = read_sr(nor);
- if (offset+len > mtd->size - (mtd->size / 64))
- status_new = status_old & ~(SR_BP2 | SR_BP1 | SR_BP0);
- else if (offset+len > mtd->size - (mtd->size / 32))
- status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
- else if (offset+len > mtd->size - (mtd->size / 16))
- status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
- else if (offset+len > mtd->size - (mtd->size / 8))
- status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
- else if (offset+len > mtd->size - (mtd->size / 4))
- status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
- else if (offset+len > mtd->size - (mtd->size / 2))
- status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
- else
- status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+ /* Cannot unlock; would unlock larger region than requested */
+ if (stm_is_locked_sr(nor, ofs - mtd->erasesize, mtd->erasesize,
+ status_old))
+ return -EINVAL;
- /* Only modify protection if it will not lock other areas */
- if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) <
- (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
- write_enable(nor);
- ret = write_sr(nor, status_new);
+ /*
+ * Need largest pow such that:
+ *
+ * 1 / (2^pow) >= (len / size)
+ *
+ * so (assuming power-of-2 size) we do:
+ *
+ * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
+ */
+ pow = ilog2(mtd->size) - order_base_2(mtd->size - (ofs + len));
+ if (ofs + len == mtd->size) {
+ val = 0; /* fully unlocked */
+ } else {
+ val = mask - (pow << shift);
+ /* Some power-of-two sizes are not supported */
+ if (val & ~mask)
+ return -EINVAL;
}
- return ret;
+ status_new = (status_old & ~mask) | val;
+
+ /* Only modify protection if it will not lock other areas */
+ if ((status_new & mask) >= (status_old & mask))
+ return -EINVAL;
+
+ write_enable(nor);
+ return write_sr(nor, status_new);
+}
+
+/*
+ * Check if a region of the flash is (completely) locked. See stm_lock() for
+ * more info.
+ *
+ * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
+ * negative on errors.
+ */
+static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ int status;
+
+ status = read_sr(nor);
+ if (status < 0)
+ return status;
+
+ return stm_is_locked_sr(nor, ofs, len, status);
}
static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -467,9 +597,23 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
+static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+ if (ret)
+ return ret;
+
+ ret = nor->flash_is_locked(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+ return ret;
+}
+
/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
- ((kernel_ulong_t)&(struct flash_info) { \
.id = { \
((_jedec_id) >> 16) & 0xff, \
((_jedec_id) >> 8) & 0xff, \
@@ -481,11 +625,9 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
- .flags = (_flags), \
- })
+ .flags = (_flags),
#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
- ((kernel_ulong_t)&(struct flash_info) { \
.id = { \
((_jedec_id) >> 16) & 0xff, \
((_jedec_id) >> 8) & 0xff, \
@@ -498,23 +640,27 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
- .flags = (_flags), \
- })
+ .flags = (_flags),
#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
- ((kernel_ulong_t)&(struct flash_info) { \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
.addr_width = (_addr_width), \
- .flags = (_flags), \
- })
+ .flags = (_flags),
/* NOTE: double check command sets and memory organization when you add
* more nor chips. This current list focusses on newer chips, which
* have been converging on command sets which including JEDEC ID.
+ *
+ * All newly added entries should describe *hardware* and should use SECT_4K
+ * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
+ * scenarios excluding small sectors there is config option that can be
+ * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
+ * For historical (and compatibility) reasons (before we got above config) some
+ * old entries may be missing 4K flag.
*/
-static const struct spi_device_id spi_nor_ids[] = {
+static const struct flash_info spi_nor_ids[] = {
/* Atmel -- some are (confusingly) marketed as "DataFlash" */
{ "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
{ "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
@@ -538,7 +684,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
{ "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
- { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, 0) },
+ { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
/* ESMT */
{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
@@ -560,7 +706,11 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
+ /* ISSI */
+ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+
/* Macronix */
+ { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
{ "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
{ "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
{ "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
@@ -578,7 +728,9 @@ static const struct spi_device_id spi_nor_ids[] = {
/* Micron */
{ "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
- { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SPI_NOR_QUAD_READ) },
+ { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
@@ -595,25 +747,28 @@ static const struct spi_device_id spi_nor_ids[] = {
* for the chips listed here (without boot sectors).
*/
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
+ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
{ "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
+ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
{ "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
{ "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
{ "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
{ "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
- { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
- { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
+ { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
- { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, 0) },
+ { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
+ { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
+ { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -624,6 +779,8 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
{ "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
{ "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
+ { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
+ { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
{ "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
{ "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
@@ -672,10 +829,11 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
- { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
- { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
@@ -690,11 +848,11 @@ static const struct spi_device_id spi_nor_ids[] = {
{ },
};
-static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
+static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
{
int tmp;
u8 id[SPI_NOR_MAX_ID_LEN];
- struct flash_info *info;
+ const struct flash_info *info;
tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
if (tmp < 0) {
@@ -703,7 +861,7 @@ static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
}
for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
- info = (void *)spi_nor_ids[tmp].driver_data;
+ info = &spi_nor_ids[tmp];
if (info->id_len) {
if (!memcmp(info->id, id, info->id_len))
return &spi_nor_ids[tmp];
@@ -857,8 +1015,7 @@ static int macronix_quad_enable(struct spi_nor *nor)
val = read_sr(nor);
write_enable(nor);
- nor->cmd_buf[0] = val | SR_QUAD_EN_MX;
- nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1, 0);
+ write_sr(nor, val | SR_QUAD_EN_MX);
if (spi_nor_wait_till_ready(nor))
return 1;
@@ -883,7 +1040,7 @@ static int write_sr_cr(struct spi_nor *nor, u16 val)
nor->cmd_buf[0] = val & 0xff;
nor->cmd_buf[1] = (val >> 8);
- return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2, 0);
+ return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2);
}
static int spansion_quad_enable(struct spi_nor *nor)
@@ -925,7 +1082,7 @@ static int micron_quad_enable(struct spi_nor *nor)
/* set EVCR, enable quad I/O */
nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
- ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1, 0);
+ ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
if (ret < 0) {
dev_err(nor->dev, "error while writing EVCR register\n");
return ret;
@@ -949,19 +1106,19 @@ static int micron_quad_enable(struct spi_nor *nor)
return 0;
}
-static int set_quad_mode(struct spi_nor *nor, struct flash_info *info)
+static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
{
int status;
switch (JEDEC_MFR(info)) {
- case CFI_MFR_MACRONIX:
+ case SNOR_MFR_MACRONIX:
status = macronix_quad_enable(nor);
if (status) {
dev_err(nor->dev, "Macronix quad-read not enabled\n");
return -EINVAL;
}
return status;
- case CFI_MFR_ST:
+ case SNOR_MFR_MICRON:
status = micron_quad_enable(nor);
if (status) {
dev_err(nor->dev, "Micron quad-read not enabled\n");
@@ -991,11 +1148,10 @@ static int spi_nor_check(struct spi_nor *nor)
int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
{
- const struct spi_device_id *id = NULL;
- struct flash_info *info;
+ const struct flash_info *info = NULL;
struct device *dev = nor->dev;
- struct mtd_info *mtd = nor->mtd;
- struct device_node *np = dev->of_node;
+ struct mtd_info *mtd = &nor->mtd;
+ struct device_node *np = nor->flash_node;
int ret;
int i;
@@ -1003,27 +1159,25 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
if (ret)
return ret;
- /* Try to auto-detect if chip name wasn't specified */
- if (!name)
- id = spi_nor_read_id(nor);
- else
- id = spi_nor_match_id(name);
- if (IS_ERR_OR_NULL(id))
+ if (name)
+ info = spi_nor_match_id(name);
+ /* Try to auto-detect if chip name wasn't specified or not found */
+ if (!info)
+ info = spi_nor_read_id(nor);
+ if (IS_ERR_OR_NULL(info))
return -ENOENT;
- info = (void *)id->driver_data;
-
/*
* If caller has specified name of flash model that can normally be
* detected using JEDEC, let's verify it.
*/
if (name && info->id_len) {
- const struct spi_device_id *jid;
+ const struct flash_info *jinfo;
- jid = spi_nor_read_id(nor);
- if (IS_ERR(jid)) {
- return PTR_ERR(jid);
- } else if (jid != id) {
+ jinfo = spi_nor_read_id(nor);
+ if (IS_ERR(jinfo)) {
+ return PTR_ERR(jinfo);
+ } else if (jinfo != info) {
/*
* JEDEC knows better, so overwrite platform ID. We
* can't trust partitions any longer, but we'll let
@@ -1032,28 +1186,28 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
* information, even if it's not 100% accurate.
*/
dev_warn(dev, "found %s, expected %s\n",
- jid->name, id->name);
- id = jid;
- info = (void *)jid->driver_data;
+ jinfo->name, info->name);
+ info = jinfo;
}
}
mutex_init(&nor->lock);
/*
- * Atmel, SST and Intel/Numonyx serial nor tend to power
- * up with the software protection bits set
+ * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
+ * with the software protection bits set
*/
- if (JEDEC_MFR(info) == CFI_MFR_ATMEL ||
- JEDEC_MFR(info) == CFI_MFR_INTEL ||
- JEDEC_MFR(info) == CFI_MFR_SST) {
+ if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
+ JEDEC_MFR(info) == SNOR_MFR_INTEL ||
+ JEDEC_MFR(info) == SNOR_MFR_SST) {
write_enable(nor);
write_sr(nor, 0);
}
if (!mtd->name)
mtd->name = dev_name(dev);
+ mtd->priv = nor;
mtd->type = MTD_NORFLASH;
mtd->writesize = 1;
mtd->flags = MTD_CAP_NORFLASH;
@@ -1061,15 +1215,17 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
- /* nor protection support for STmicro chips */
- if (JEDEC_MFR(info) == CFI_MFR_ST) {
+ /* NOR protection support for STmicro/Micron chips and similar */
+ if (JEDEC_MFR(info) == SNOR_MFR_MICRON) {
nor->flash_lock = stm_lock;
nor->flash_unlock = stm_unlock;
+ nor->flash_is_locked = stm_is_locked;
}
- if (nor->flash_lock && nor->flash_unlock) {
+ if (nor->flash_lock && nor->flash_unlock && nor->flash_is_locked) {
mtd->_lock = spi_nor_lock;
mtd->_unlock = spi_nor_unlock;
+ mtd->_is_locked = spi_nor_is_locked;
}
/* sst nor chips use AAI word program */
@@ -1156,7 +1312,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
else if (mtd->size > 0x1000000) {
/* enable 4-byte addressing if the device exceeds 16MiB */
nor->addr_width = 4;
- if (JEDEC_MFR(info) == CFI_MFR_AMD) {
+ if (JEDEC_MFR(info) == SNOR_MFR_SPANSION) {
/* Dedicated 4-byte command set */
switch (nor->flash_read) {
case SPI_NOR_QUAD:
@@ -1184,7 +1340,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
nor->read_dummy = spi_nor_read_dummy_cycles(nor);
- dev_info(dev, "%s (%lld Kbytes)\n", id->name,
+ dev_info(dev, "%s (%lld Kbytes)\n", info->name,
(long long)mtd->size >> 10);
dev_dbg(dev,
@@ -1207,11 +1363,11 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
}
EXPORT_SYMBOL_GPL(spi_nor_scan);
-static const struct spi_device_id *spi_nor_match_id(const char *name)
+static const struct flash_info *spi_nor_match_id(const char *name)
{
- const struct spi_device_id *id = spi_nor_ids;
+ const struct flash_info *id = spi_nor_ids;
- while (id->name[0]) {
+ while (id->name) {
if (!strcmp(name, id->name))
return id;
id++;