2 * drivers/mtd/nand/lpc32xx_nand.c
4 * Author: Kevin Wells <kevin.wells@nxp.com>
6 * Copyright (C) 2010 NXP Semiconductors
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/slab.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/mtd/nand.h>
24 #include <linux/mtd/partitions.h>
25 #include <linux/clk.h>
26 #include <linux/err.h>
27 #include <linux/delay.h>
28 #include <linux/completion.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/mtd/nand_ecc.h>
35 #include <asm/sizes.h>
36 #include <mach/hardware.h>
37 #include <mach/board.h>
38 #include <mach/slcnand.h>
39 #include <mach/dmac.h>
43 * LPC3250 has 3 bytes of ECC data but due to DMA
44 * word transfer limitation, we'll use 4 bytes
46 #define NAND_ECC_LEN_PER_SUBPAGE 0x4
47 #define NAND_ECC_SUBPAGE_LEN 256
49 #define NAND_LARGE_BLOCK_PAGE_SIZE 2048
50 #define NAND_SMALL_BLOCK_PAGE_SIZE 512
52 #define NAND_ERASED_BLOCK_ECC_VALUE 0xFFFFFFFF
54 static struct nand_ecclayout lpc32xx_nand_oob_16 = {
56 .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
64 static struct nand_ecclayout lpc32xx_nand_oob_64 = {
66 .eccpos = { 8, 9, 10, 11, 12, 13, 14, 15,
67 24, 25, 26, 27, 28, 29, 30, 31,
68 40, 41, 42, 43, 44, 45, 46, 47,
69 56, 57, 58, 59, 60, 61, 62, 63},
81 struct lpc32xx_nand_host {
82 struct nand_chip nand_chip;
85 void __iomem *io_base;
86 struct lpc32XX_nand_cfg *ncfg;
87 struct completion comp;
88 struct dma_config dmacfg;
90 uint32_t dma_xfer_status;
94 * Physical addresses of ECC buffer,DMA data buffers,OOB data buffer
96 dma_addr_t oob_buf_phy;
97 dma_addr_t ecc_calc_buf_phy;
98 dma_addr_t dma_buf_phy;
100 * Virtual addresses of ECC buffer,DMA data buffers,OOB data buffer
103 uint8_t *ecc_calc_buf;
105 /* Physical address of DMA base address */
106 dma_addr_t io_base_phy;
107 uint8_t *erase_buf_data;
110 #ifdef CONFIG_MTD_PARTITIONS
111 const char *part_probes[] = { "cmdlinepart", NULL };
114 static uint8_t nand_slc_bit_cnt16(uint16_t ch)
116 ch = (ch & 0x5555) + ((ch & ~0x5555) >> 1);
117 ch = (ch & 0x3333) + ((ch & ~0x3333) >> 2);
118 ch = (ch & 0x0F0F) + ((ch & ~0x0F0F) >> 4);
119 return (ch + (ch >> 8)) & 0xFF;
122 static uint8_t bit_cnt32(uint32_t val)
124 return nand_slc_bit_cnt16(val & 0xFFFF) +
125 nand_slc_bit_cnt16(val >> 16);
128 static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
132 /* Reset SLC controller */
133 __raw_writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
137 __raw_writel(0, SLC_CFG(host->io_base));
138 __raw_writel(0, SLC_IEN(host->io_base));
139 __raw_writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN), SLC_ICR(host->io_base));
141 /* Get base clock for SLC block */
142 clkrate = clk_get_rate(host->clk);
146 /* Compute clock setup values */
147 tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
148 SLCTAC_WWIDTH(1 + (clkrate / host->ncfg->wwidth)) |
149 SLCTAC_WHOLD(1 + (clkrate / host->ncfg->whold)) |
150 SLCTAC_WSETUP(1 + (clkrate / host->ncfg->wsetup)) |
151 SLCTAC_RDR(host->ncfg->rdr_clks) |
152 SLCTAC_RWIDTH(1 + (clkrate / host->ncfg->rwidth)) |
153 SLCTAC_RHOLD(1 + (clkrate / host->ncfg->rhold)) |
154 SLCTAC_RSETUP(1 + (clkrate / host->ncfg->rsetup));
155 __raw_writel(tmp, SLC_TAC(host->io_base));
159 * Hardware specific access to control lines
161 static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
164 struct nand_chip *nand_chip = mtd->priv;
165 struct lpc32xx_nand_host *host = nand_chip->priv;
167 /* Does CE state need to be changed? */
168 tmp = __raw_readl(SLC_CFG(host->io_base));
170 tmp |= SLCCFG_CE_LOW;
172 tmp &= ~SLCCFG_CE_LOW;
173 __raw_writel(tmp, SLC_CFG(host->io_base));
175 if (cmd != NAND_CMD_NONE) {
177 __raw_writel(cmd, SLC_CMD(host->io_base));
179 __raw_writel(cmd, SLC_ADDR(host->io_base));
184 * Read the Device Ready pin.
186 static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
188 struct nand_chip *nand_chip = mtd->priv;
189 struct lpc32xx_nand_host *host = nand_chip->priv;
192 if ((__raw_readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
199 * Enable NAND write protect
201 static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
203 if (host->ncfg->enable_write_prot != NULL)
204 /* Disable write protection */
205 host->ncfg->enable_write_prot(1);
209 * Disable NAND write protect
211 static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
213 if (host->ncfg->enable_write_prot != NULL)
214 /* Enable write protection */
215 host->ncfg->enable_write_prot(0);
218 static uint8_t lpc32xx_read_byte(struct mtd_info *mtd)
220 struct nand_chip *nand_chip = mtd->priv;
221 struct lpc32xx_nand_host *host = nand_chip->priv;
223 return (uint8_t) __raw_readl(SLC_DATA(host->io_base));
226 static void lpc32xx_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
228 struct nand_chip *nand_chip = mtd->priv;
229 struct lpc32xx_nand_host *host = nand_chip->priv;
232 for (i = 0; i < len; i++)
233 buf[i] = (uint8_t) __raw_readl(SLC_DATA(host->io_base));
236 static int lpc32xx_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
238 struct nand_chip *nand_chip = mtd->priv;
239 struct lpc32xx_nand_host *host = nand_chip->priv;
242 for (i = 0; i < len; i++) {
243 if (buf[i] != (uint8_t) __raw_readl(SLC_DATA(host->io_base)))
250 static void lpc32xx_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
252 struct nand_chip *nand_chip = mtd->priv;
253 struct lpc32xx_nand_host *host = nand_chip->priv;
256 for (i = 0; i < len; i++)
257 __raw_writel((u32) buf[i], SLC_DATA(host->io_base));
261 * DMA ISR - occurs when DMA transfer complete.
263 static void lpc3xxx_nand_dma_irq(int channel, int cause,
264 struct lpc32xx_nand_host *host)
266 /* Flush DMA link list */
267 lpc32xx_dma_flush_llist(host->dmach);
269 host->dma_xfer_status = (cause & DMA_TC_INT)? 0: 1;
270 complete(&host->comp);
274 * Get DMA channel and allocate DMA descriptors memory.
275 * Prepare DMA descriptors link lists
277 static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host, int num_entries)
281 host->dmach = DMA_CH_SLCNAND;
282 host->dmacfg.ch = DMA_CH_SLCNAND;
285 * All the DMA configuration parameters will
286 * be overwritten in lpc32xx_nand_dma_configure().
288 host->dmacfg.tc_inten = 1;
289 host->dmacfg.err_inten = 1;
290 host->dmacfg.src_size = 4;
291 host->dmacfg.src_inc = 1;
292 host->dmacfg.src_ahb1 = 1;
293 host->dmacfg.src_bsize = DMAC_CHAN_SRC_BURST_4;
294 host->dmacfg.src_prph = 0;
295 host->dmacfg.dst_size = 4;
296 host->dmacfg.dst_inc = 0;
297 host->dmacfg.dst_bsize = DMAC_CHAN_DEST_BURST_4;
298 host->dmacfg.dst_ahb1 = 0;
299 host->dmacfg.dst_prph = DMAC_DEST_PERIP(DMA_PERID_NAND1);
300 host->dmacfg.flowctrl = DMAC_CHAN_FLOW_D_M2P;
301 if (lpc32xx_dma_ch_get(&host->dmacfg, "dma_slcnand",
302 &lpc3xxx_nand_dma_irq, host) < 0) {
303 printk(KERN_ERR "Error setting up SLC NAND DMA channel\n");
309 * Allocate Linked list of total DMA Descriptors.
310 * For Large Block: 17 descriptors = ((16 Data and ECC Read) + 1 Spare Area)
311 * For Small Block: 5 descriptors = ((4 Data and ECC Read) + 1 Spare Area)
313 host->llptr = lpc32xx_dma_alloc_llist(host->dmach, num_entries);
314 if (host->llptr == 0) {
315 lpc32xx_dma_ch_put(host->dmach);
317 printk(KERN_ERR "Error allocating list buffer for SLC NAND\n");
324 lpc32xx_dma_ch_put(host->dmach);
330 * Configure DMA descriptors and start DMA x'fer
332 static void lpc32xx_nand_dma_configure(struct mtd_info *mtd,
333 dma_addr_t buffer, int size, int read)
335 struct nand_chip *chip = mtd->priv;
336 struct lpc32xx_nand_host *host = chip->priv;
337 uint32_t page_divider = (size == NAND_LARGE_BLOCK_PAGE_SIZE) ? 8: 2;
338 uint32_t dmasrc, dmadst, ctrl, ecc_ctrl, oob_ctrl;
340 uint32_t *eccpos = chip->ecc.layout->eccpos;
343 * CTRL descriptor entry for reading ECC
344 * Copy Multiple times to sync DMA with Flash Controller
347 DMAC_CHAN_SRC_BURST_1 |
348 DMAC_CHAN_DEST_BURST_1 |
349 DMAC_CHAN_SRC_WIDTH_32 |
350 DMAC_CHAN_DEST_WIDTH_32 |
351 DMAC_CHAN_DEST_AHB1);
353 /* CTRL descriptor entry for reading/writing data */
354 ctrl = ((mtd->writesize / page_divider) / 4) |
355 DMAC_CHAN_SRC_BURST_4 |
356 DMAC_CHAN_DEST_BURST_4 |
357 DMAC_CHAN_SRC_WIDTH_32 |
358 DMAC_CHAN_DEST_WIDTH_32 |
361 /* CTRL descriptor entry for reading/writing Spare Area */
362 oob_ctrl = ((mtd->oobsize / 4) |
363 DMAC_CHAN_SRC_BURST_4 |
364 DMAC_CHAN_DEST_BURST_4 |
365 DMAC_CHAN_SRC_WIDTH_32 |
366 DMAC_CHAN_DEST_WIDTH_32 |
367 DMAC_CHAN_DEST_AHB1);
370 dmasrc = (uint32_t) SLC_DMA_DATA(host->io_base_phy);
371 dmadst = (uint32_t) (buffer);
372 ctrl |= DMAC_CHAN_DEST_AUTOINC;
374 dmadst = (uint32_t) SLC_DMA_DATA(host->io_base_phy);
375 dmasrc = (uint32_t) (buffer);
376 ctrl |= DMAC_CHAN_SRC_AUTOINC;
380 * Write Operation Sequence for Small Block NAND
381 * ----------------------------------------------------------
382 * 1. X'fer 256 bytes of data from Memory to Flash.
383 * 2. Copy generated ECC data from Register to Spare Area
384 * 3. X'fer next 256 bytes of data from Memory to Flash.
385 * 4. Copy generated ECC data from Register to Spare Area.
386 * 5. X'fer 16 byets of Spare area from Memory to Flash.
388 * Read Operation Sequence for Small Block NAND
389 * ----------------------------------------------------------
390 * 1. X'fer 256 bytes of data from Flash to Memory.
391 * 2. Copy generated ECC data from Register to ECC calc Buffer.
392 * 3. X'fer next 256 bytes of data from Flash to Memory.
393 * 4. Copy generated ECC data from Register to ECC calc Buffer.
394 * 5. X'fer 16 bytes of Spare area from Flash to Memory.
396 * Write Operation Sequence for Large Block NAND
397 * ----------------------------------------------------------
398 * 1. Steps(1-4) of Write Operations repeate for four times
399 * which generates 16 DMA descriptors to X'fer 2048 byets of
400 * data & 32 bytes of ECC data.
401 * 2. X'fer 64 bytes of Spare area from Memory to Flash.
403 * Read Operation Sequence for Large Block NAND
404 * ----------------------------------------------------------
405 * 1. Steps(1-4) of Read Operations repeate for four times
406 * which generates 16 DMA descriptors to X'fer 2048 byets of
407 * data & 32 bytes of ECC data.
408 * 2. X'fer 64 bytes of Spare area from Flash to Memory.
410 for (i = 0; i < size/256; i++) {
411 lpc32xx_dma_queue_llist(host->dmach,
412 (void *)(read ?(dmasrc) :(dmasrc + (i*256))),
413 (void *)(read ?(dmadst + (i*256)) :dmadst),
415 lpc32xx_dma_queue_llist(host->dmach,
416 (void *)SLC_ECC(host->io_base_phy),
417 (void *)(read ?((uint32_t) host->ecc_calc_buf_phy + (i*4)):
418 ((uint32_t) host->oob_buf_phy + eccpos[i*4])),
423 dmasrc = (uint32_t) (uint32_t) SLC_DMA_DATA(host->io_base_phy);
424 dmadst = (uint32_t) (host->oob_buf_phy);
425 oob_ctrl |= DMAC_CHAN_DEST_AUTOINC;
427 dmadst = (uint32_t) (uint32_t) SLC_DMA_DATA(host->io_base_phy);
428 dmasrc = (uint32_t) (host->oob_buf_phy);
429 oob_ctrl |= DMAC_CHAN_SRC_AUTOINC;
432 /* Read/ Write Spare Area Data To/From Flash */
433 lpc32xx_dma_queue_llist(host->dmach, (void *)dmasrc, (void *)dmadst, -1,
434 oob_ctrl | DMAC_CHAN_INT_TC_EN);
437 static void lpc32xx_nand_dma_xfer(struct mtd_info *mtd, u_char *buf, int len, int read)
439 struct nand_chip *this = mtd->priv;
441 struct lpc32xx_nand_host *host = this->priv;
442 dma_addr_t buf_phy = (dma_addr_t) 0;
445 /* Calculate the physical address of the Buffer */
446 /* Check if memory not allocated by vmalloc */
447 if (likely((void *) buf < high_memory)) {
448 buf_phy = dma_map_single(mtd->dev.parent,
449 buf, len, read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
450 if (unlikely(dma_mapping_error(mtd->dev.parent, buf_phy))) {
451 dev_err(mtd->dev.parent, "Unable to DMA map a buffer "
452 "of size %d\r\n", len);
460 memcpy(host->dma_buf, buf, len);
461 buf_phy = host->dma_buf_phy;
464 config = DMAC_CHAN_ITC | DMAC_CHAN_IE |
465 (read ? DMAC_CHAN_FLOW_D_P2M : DMAC_CHAN_FLOW_D_M2P) |
466 (read ? DMAC_DEST_PERIP(0) : DMAC_DEST_PERIP(DMA_PERID_NAND1)) |
467 (read ? DMAC_SRC_PERIP(DMA_PERID_NAND1) : DMAC_SRC_PERIP(0)) |
470 /* Prepare descriptors for read transfer */
471 lpc32xx_nand_dma_configure(mtd, buf_phy, len, read);
473 /* This should start the DMA transfers */
474 lpc32xx_dma_start_xfer(host->dmach, config);
475 __raw_writel(__raw_readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
476 SLC_CTRL(host->io_base));
478 /* Wait for NAND to be ready */
479 nand_wait_ready(mtd);
481 /* Wait till DMA transfer is DONE! */
482 wait_for_completion(&host->comp);
483 if (unlikely(host->dma_xfer_status != 0)) {
484 dev_err(mtd->dev.parent, "DMA transfer error!\r\n");
489 dma_unmap_single(mtd->dev.parent, buf_phy, len,
490 read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
492 /* Stop DMA & HW ECC */
493 __raw_writel(__raw_readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
494 SLC_CTRL(host->io_base));
495 __raw_writel( __raw_readl(SLC_CFG(host->io_base)) &
496 ~(SLCCFG_DMA_BURST | SLCCFG_ECC_EN |
497 SLCCFG_DMA_ECC | SLCCFG_DMA_DIR),
498 SLC_CFG(host->io_base));
501 static int lpc32xx_nand_correct_data(struct mtd_info *mtd, u_char *dat,
502 u_char *read_ecc, u_char *calc_ecc)
506 uint32_t *ecc_stored = (uint32_t*)read_ecc;
507 uint32_t *ecc_gen = (uint32_t*)calc_ecc;
510 err = *ecc_stored ^ *ecc_gen;
511 /* Only perform ECC processing if an error is detected */
513 /* ECC Failure in i-th block */
514 tmp = bit_cnt32(err);
516 uint32_t byte = err >> 6;
518 bit = ((err & _BIT(1)) >> 1)|((err & _BIT(3)) >> 2)|
519 ((err & _BIT(5)) >> 3);
521 /* Calculate Byte offset */
522 byte = ((byte & _BIT(1)) >> 1)|((byte & _BIT(3)) >> 2)|
523 ((byte & _BIT(5)) >> 3)|((byte & _BIT(7)) >> 4)|
524 ((byte & _BIT(9)) >> 5)|((byte & _BIT(11)) >> 6)|
525 ((byte & _BIT(13)) >> 7)|((byte & _BIT(15)) >> 8);
527 /* Do the correction */
528 dat[byte] ^= _BIT(bit);
531 /* Non-corrrectable */
538 /* Prepares SLC for transfers with H/W ECC enabled */
539 static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
541 struct nand_chip *this = mtd->priv;
542 struct lpc32xx_nand_host *host = this->priv;
544 /* Clear ECC, start DMA */
545 __raw_writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
547 if (mode == NAND_ECC_READ) {
548 __raw_writel( __raw_readl(SLC_CFG(host->io_base)) |
549 SLCCFG_DMA_DIR, SLC_CFG(host->io_base));
551 else { /* NAND_ECC_WRITE */
552 __raw_writel( __raw_readl(SLC_CFG(host->io_base)) &
553 ~SLCCFG_DMA_DIR, SLC_CFG(host->io_base));
556 __raw_writel( __raw_readl(SLC_CFG(host->io_base)) |
557 SLCCFG_DMA_BURST | SLCCFG_ECC_EN | SLCCFG_DMA_ECC,
558 SLC_CFG(host->io_base));
560 /* Set transfer count */
561 __raw_writel(this->ecc.size + mtd->oobsize, SLC_TC(host->io_base));
564 /* Function to calculate inverted ECC from the ECC got from H/W */
565 static int lpc32xx_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
571 static void lpc32xx_nand_write_page_hwecc(struct mtd_info *mtd,
572 struct nand_chip *chip, const uint8_t *buf)
574 struct nand_chip *this = mtd->priv;
575 struct lpc32xx_nand_host *host = this->priv;
576 int eccsize = chip->ecc.size;
579 * Skip writting page which has all 0xFF data as this will
580 * generate 0x0 value.
582 if(memcmp(buf, host->erase_buf_data, mtd->writesize) == 0)
585 /* Enable H/W ECC & DMA */
586 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
588 /* Copy OOB data from kernel buffer to DMA memory */
589 memcpy(host->oob_buf, chip->oob_poi,mtd->oobsize);
591 /* Configure DMA Desriptor for NAND Write Operation */
592 lpc32xx_nand_dma_xfer(mtd, (uint8_t *)buf, eccsize, 0);
595 static int lpc32xx_nand_read_page_hwecc(struct mtd_info *mtd,
596 struct nand_chip *chip, uint8_t *buf, int page)
598 struct nand_chip *this = mtd->priv;
599 struct lpc32xx_nand_host *host = this->priv;
600 int i, eccsize = chip->ecc.size;
601 int eccsteps = (mtd->writesize/NAND_ECC_SUBPAGE_LEN);
603 uint8_t *ecc_calc = chip->buffers->ecccalc;
604 uint8_t *ecc_code = chip->buffers->ecccode;
605 uint32_t *eccpos = chip->ecc.layout->eccpos;
607 memset(host->ecc_calc_buf, 0x0, this->ecc.bytes);
609 /* Enable HW ECC & DMA */
610 chip->ecc.hwctl(mtd, NAND_ECC_READ);
612 /* Configure DMA Desriptor for NAND Read Operation */
613 lpc32xx_nand_dma_xfer(mtd, buf, eccsize, 1);
615 /* Copy OOB data from DMA memory to kernel buffer */
616 memcpy(chip->oob_poi, host->oob_buf, mtd->oobsize);
618 /* Copy only ECC data which are stored into Flash */
619 for (i = 0; i < chip->ecc.total; i++) {
620 ecc_code[i] = chip->oob_poi[eccpos[i]];
621 ecc_calc[i] = host->ecc_calc_buf[i];
625 * LPC3250 has 4 bytes of ECC data per 256 bytes of data block
626 * As eccsteps are calucated based on subpage size.
628 for (i = 0; eccsteps; eccsteps--, i += NAND_ECC_LEN_PER_SUBPAGE,
629 p += NAND_ECC_SUBPAGE_LEN) {
633 * Once block is erased, all the data including OOB data are 0xFF.
634 * ECC generator always generate zero value ECC for such page while,
635 * stored value is 0xFFFFFFFF.
637 if(*((uint32_t *)&ecc_code[i]) == NAND_ERASED_BLOCK_ECC_VALUE)
640 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
642 mtd->ecc_stats.failed++;
644 mtd->ecc_stats.corrected += stat;
650 * Probe for NAND controller
652 static int __init lpc32xx_nand_probe(struct platform_device *pdev)
654 struct lpc32xx_nand_host *host;
655 struct mtd_info *mtd;
656 struct nand_chip *nand_chip;
660 #ifdef CONFIG_MTD_PARTITIONS
661 struct mtd_partition *partitions = NULL;
662 int num_partitions = 0;
665 /* Allocate memory for the device structure (and zero it) */
666 host = kzalloc(sizeof(struct lpc32xx_nand_host), GFP_KERNEL);
668 dev_err(&pdev->dev,"lpc32xx_nand: failed to allocate device structure.\n");
672 rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
674 dev_err(&pdev->dev,"No memory resource found for device!\r\n");
679 host->io_base = ioremap(rc->start, rc->end - rc->start + 1);
680 if (host->io_base == NULL) {
681 dev_err(&pdev->dev,"lpc32xx_nand: ioremap failed\n");
687 nand_chip = &host->nand_chip;
688 host->ncfg = pdev->dev.platform_data;
690 nand_chip->priv = host; /* link the private data structures */
691 mtd->priv = nand_chip;
692 mtd->owner = THIS_MODULE;
693 mtd->dev.parent = &pdev->dev;
696 host->clk = clk_get(&pdev->dev, "nand_ck");
697 if (IS_ERR(host->clk)) {
698 dev_err(&pdev->dev,"lpc32xx_nand: Clock failure\n");
702 clk_enable(host->clk);
704 /* Set address of NAND IO lines */
705 nand_chip->IO_ADDR_R = SLC_DATA(host->io_base);
706 nand_chip->IO_ADDR_W = SLC_DATA(host->io_base);
707 nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
708 nand_chip->dev_ready = lpc32xx_nand_device_ready;
709 nand_chip->chip_delay = 20; /* 20us command delay time */
710 nand_chip->read_byte = lpc32xx_read_byte;
711 nand_chip->read_buf = lpc32xx_read_buf;
712 nand_chip->verify_buf = lpc32xx_verify_buf;
713 nand_chip->write_buf = lpc32xx_write_buf;
715 /* Init NAND controller */
716 lpc32xx_nand_setup(host);
717 lpc32xx_wp_disable(host);
719 platform_set_drvdata(pdev, host);
722 * Scan to find existance of the device and
723 * Get the type of NAND device SMALL block or LARGE block
725 if (nand_scan_ident(mtd, 1)) {
730 nand_chip->ecc.mode = NAND_ECC_HW;
731 nand_chip->ecc.size = mtd->writesize;
732 nand_chip->ecc.bytes = (mtd->writesize / 256) * 4;
733 nand_chip->ecc.read_page_raw = lpc32xx_nand_read_page_hwecc;
734 nand_chip->ecc.read_page = lpc32xx_nand_read_page_hwecc;
735 nand_chip->ecc.write_page = lpc32xx_nand_write_page_hwecc;
737 switch (mtd->oobsize) {
739 nand_chip->ecc.layout = &lpc32xx_nand_oob_16;
742 nand_chip->ecc.layout = &lpc32xx_nand_oob_64;
745 dev_err(&pdev->dev, "No oob scheme defined for "
746 "oobsize %d\n", mtd->oobsize);
750 /* H/W ECC specific functions */
751 nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
752 nand_chip->ecc.correct = lpc32xx_nand_correct_data;
753 nand_chip->ecc.calculate = lpc32xx_ecc_calculate;
756 * Fills out all the uninitialized function pointers with the defaults
757 * And scans for a bad block table if appropriate.
759 if (nand_scan_tail(mtd)) {
764 /* Get free DMA channel and alloc DMA descriptor link list */
765 res = lpc32xx_nand_dma_setup(host,((mtd->writesize/128) + 1));
771 /* allocate DMA buffer */
773 (/* OOB size area for storing OOB data including ECC */
775 /* Page Size area for storing Page RAW data */
777 /* ECC bytes area for storing Calculated ECC at the time reading page */
778 nand_chip->ecc.bytes);
780 host->oob_buf = dmam_alloc_coherent(&pdev->dev, host->dma_buf_len,
781 &host->oob_buf_phy, GFP_KERNEL);
782 if (host->oob_buf == NULL) {
783 dev_err(&pdev->dev, "Unable to allocate DMA memory!\r\n");
788 host->dma_buf = (uint8_t *)host->oob_buf + mtd->oobsize;
789 host->ecc_calc_buf = (uint8_t *)host->dma_buf + mtd->writesize;
791 host->dma_buf_phy = host->oob_buf_phy + mtd->oobsize;
792 host->ecc_calc_buf_phy = host->dma_buf_phy + mtd->writesize;
794 host->io_base_phy = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start;
797 * Allocate a page size buffer to check all 0xFF data
798 * at the time page writting.
800 host->erase_buf_data = kmalloc(mtd->writesize, GFP_KERNEL);
801 if (!host->erase_buf_data) {
802 dev_err(&pdev->dev,"lpc32xx_nand: failed to allocate device structure.\n");
806 memset(host->erase_buf_data, 0xFF, mtd->writesize);
807 init_completion(&host->comp);
809 #ifdef CONFIG_MTD_PARTITIONS
810 #ifdef CONFIG_MTD_CMDLINE_PARTS
811 mtd->name = "lpc32xx_nand";
812 num_partitions = parse_mtd_partitions(mtd, part_probes,
815 if ((num_partitions <= 0) && (host->ncfg->partition_info)) {
816 partitions = host->ncfg->partition_info(mtd->size,
820 if ((!partitions) || (num_partitions == 0)) {
821 dev_err(&pdev->dev,"lpc32xx_nand: No parititions defined, or unsupported device.\n");
826 res = add_mtd_partitions(mtd, partitions, num_partitions);
828 res = add_mtd_device(mtd);
835 kfree(host->erase_buf_data);
837 dma_free_coherent(&pdev->dev, host->dma_buf_len,
838 host->oob_buf, host->oob_buf_phy);
840 /* Free the DMA channel used by us */
841 lpc32xx_dma_ch_disable(host->dmach);
842 lpc32xx_dma_dealloc_llist(host->dmach);
843 lpc32xx_dma_ch_put(host->dmach);
846 clk_disable(host->clk);
848 platform_set_drvdata(pdev, NULL);
850 lpc32xx_wp_enable(host);
851 iounmap(host->io_base);
859 * Remove NAND device.
861 static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
864 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
865 struct mtd_info *mtd = &host->mtd;
867 /* Free the DMA channel used by us */
868 lpc32xx_dma_ch_disable(host->dmach);
869 lpc32xx_dma_dealloc_llist(host->dmach);
870 lpc32xx_dma_ch_put(host->dmach);
873 dma_free_coherent(&pdev->dev, host->dma_buf_len,
874 host->oob_buf, host->oob_buf_phy);
878 tmp = __raw_readl(SLC_CTRL(host->io_base));
879 tmp &= ~SLCCFG_CE_LOW;
880 __raw_writel(tmp, SLC_CTRL(host->io_base));
882 lpc32xx_wp_enable(host);
883 clk_disable(host->clk);
886 iounmap(host->io_base);
888 kfree(host->erase_buf_data);
894 #if defined (CONFIG_PM)
895 static int lpc32xx_nand_resume(struct platform_device *pdev)
897 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
899 /* Re-enable NAND clock */
900 clk_enable(host->clk);
902 /* Fresh init of NAND controller */
903 lpc32xx_nand_setup(host);
905 /* Disable write protect */
906 lpc32xx_wp_disable(host);
911 static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
914 struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
917 tmp = __raw_readl(SLC_CTRL(host->io_base));
918 tmp &= ~SLCCFG_CE_LOW;
919 __raw_writel(tmp, SLC_CTRL(host->io_base));
921 /* Enable write protect for safety */
922 lpc32xx_wp_enable(host);
925 clk_disable(host->clk);
931 #define lpc32xx_nand_resume NULL
932 #define lpc32xx_nand_suspend NULL
935 static struct platform_driver lpc32xx_nand_driver = {
936 .probe = lpc32xx_nand_probe,
937 .remove = __devexit_p(lpc32xx_nand_remove),
938 .resume = lpc32xx_nand_resume,
939 .suspend = lpc32xx_nand_suspend,
941 .name = "lpc32xx-nand",
942 .owner = THIS_MODULE,
946 static int __init lpc32xx_nand_init(void)
948 return platform_driver_register(&lpc32xx_nand_driver);
951 static void __exit lpc32xx_nand_exit(void)
953 platform_driver_unregister(&lpc32xx_nand_driver);
956 module_init(lpc32xx_nand_init);
957 module_exit(lpc32xx_nand_exit);
959 MODULE_LICENSE("GPL");
960 MODULE_AUTHOR("Kevin Wells(kevin.wells@nxp.com)");
961 MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");