blob: d529467ebc84599194f3a52ca6b1e46075bd6999 [file] [log] [blame]
Stefan Roese873960c2015-07-23 10:26:16 +02001/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * SPDX-License-Identifier: GPL-2.0
8 */
9
10#include <common.h>
11#include <malloc.h>
12#include <nand.h>
13#include <asm/errno.h>
14#include <asm/io.h>
15#include <asm/arch/cpu.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/nand.h>
18#include <linux/types.h>
19
20#include "pxa3xx_nand.h"
21
Stefan Roese873960c2015-07-23 10:26:16 +020022#define TIMEOUT_DRAIN_FIFO 5 /* in ms */
23#define CHIP_DELAY_TIMEOUT 200
24#define NAND_STOP_DELAY 40
25#define PAGE_CHUNK_SIZE (2048)
26
27/*
28 * Define a buffer size for the initial command that detects the flash device:
29 * STATUS, READID and PARAM. The largest of these is the PARAM command,
30 * needing 256 bytes.
31 */
32#define INIT_BUFFER_SIZE 256
33
34/* registers and bit definitions */
35#define NDCR (0x00) /* Control register */
36#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
37#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
38#define NDSR (0x14) /* Status Register */
39#define NDPCR (0x18) /* Page Count Register */
40#define NDBDR0 (0x1C) /* Bad Block Register 0 */
41#define NDBDR1 (0x20) /* Bad Block Register 1 */
42#define NDECCCTRL (0x28) /* ECC control */
43#define NDDB (0x40) /* Data Buffer */
44#define NDCB0 (0x48) /* Command Buffer0 */
45#define NDCB1 (0x4C) /* Command Buffer1 */
46#define NDCB2 (0x50) /* Command Buffer2 */
47
48#define NDCR_SPARE_EN (0x1 << 31)
49#define NDCR_ECC_EN (0x1 << 30)
50#define NDCR_DMA_EN (0x1 << 29)
51#define NDCR_ND_RUN (0x1 << 28)
52#define NDCR_DWIDTH_C (0x1 << 27)
53#define NDCR_DWIDTH_M (0x1 << 26)
54#define NDCR_PAGE_SZ (0x1 << 24)
55#define NDCR_NCSX (0x1 << 23)
56#define NDCR_ND_MODE (0x3 << 21)
57#define NDCR_NAND_MODE (0x0)
58#define NDCR_CLR_PG_CNT (0x1 << 20)
59#define NDCR_STOP_ON_UNCOR (0x1 << 19)
60#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
61#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
62
63#define NDCR_RA_START (0x1 << 15)
64#define NDCR_PG_PER_BLK (0x1 << 14)
65#define NDCR_ND_ARB_EN (0x1 << 12)
66#define NDCR_INT_MASK (0xFFF)
67
68#define NDSR_MASK (0xfff)
69#define NDSR_ERR_CNT_OFF (16)
70#define NDSR_ERR_CNT_MASK (0x1f)
71#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
72#define NDSR_RDY (0x1 << 12)
73#define NDSR_FLASH_RDY (0x1 << 11)
74#define NDSR_CS0_PAGED (0x1 << 10)
75#define NDSR_CS1_PAGED (0x1 << 9)
76#define NDSR_CS0_CMDD (0x1 << 8)
77#define NDSR_CS1_CMDD (0x1 << 7)
78#define NDSR_CS0_BBD (0x1 << 6)
79#define NDSR_CS1_BBD (0x1 << 5)
80#define NDSR_UNCORERR (0x1 << 4)
81#define NDSR_CORERR (0x1 << 3)
82#define NDSR_WRDREQ (0x1 << 2)
83#define NDSR_RDDREQ (0x1 << 1)
84#define NDSR_WRCMDREQ (0x1)
85
86#define NDCB0_LEN_OVRD (0x1 << 28)
87#define NDCB0_ST_ROW_EN (0x1 << 26)
88#define NDCB0_AUTO_RS (0x1 << 25)
89#define NDCB0_CSEL (0x1 << 24)
90#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
91#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
92#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
93#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
94#define NDCB0_NC (0x1 << 20)
95#define NDCB0_DBC (0x1 << 19)
96#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
97#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
98#define NDCB0_CMD2_MASK (0xff << 8)
99#define NDCB0_CMD1_MASK (0xff)
100#define NDCB0_ADDR_CYC_SHIFT (16)
101
102#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
103#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
104#define EXT_CMD_TYPE_READ 4 /* Read */
105#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
106#define EXT_CMD_TYPE_FINAL 3 /* Final command */
107#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
108#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
109
110/* macros for registers read/write */
111#define nand_writel(info, off, val) \
112 writel((val), (info)->mmio_base + (off))
113
114#define nand_readl(info, off) \
115 readl((info)->mmio_base + (off))
116
117/* error code and state */
118enum {
119 ERR_NONE = 0,
120 ERR_DMABUSERR = -1,
121 ERR_SENDCMD = -2,
122 ERR_UNCORERR = -3,
123 ERR_BBERR = -4,
124 ERR_CORERR = -5,
125};
126
127enum {
128 STATE_IDLE = 0,
129 STATE_PREPARED,
130 STATE_CMD_HANDLE,
131 STATE_DMA_READING,
132 STATE_DMA_WRITING,
133 STATE_DMA_DONE,
134 STATE_PIO_READING,
135 STATE_PIO_WRITING,
136 STATE_CMD_DONE,
137 STATE_READY,
138};
139
140enum pxa3xx_nand_variant {
141 PXA3XX_NAND_VARIANT_PXA,
142 PXA3XX_NAND_VARIANT_ARMADA370,
143};
144
145struct pxa3xx_nand_host {
146 struct nand_chip chip;
147 struct mtd_info *mtd;
148 void *info_data;
149
150 /* page size of attached chip */
151 int use_ecc;
152 int cs;
153
154 /* calculated from pxa3xx_nand_flash data */
155 unsigned int col_addr_cycles;
156 unsigned int row_addr_cycles;
157 size_t read_id_bytes;
158
159};
160
161struct pxa3xx_nand_info {
162 struct nand_hw_control controller;
163 struct pxa3xx_nand_platform_data *pdata;
164
165 struct clk *clk;
166 void __iomem *mmio_base;
167 unsigned long mmio_phys;
168 int cmd_complete, dev_ready;
169
170 unsigned int buf_start;
171 unsigned int buf_count;
172 unsigned int buf_size;
173 unsigned int data_buff_pos;
174 unsigned int oob_buff_pos;
175
176 unsigned char *data_buff;
177 unsigned char *oob_buff;
178
179 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
180 unsigned int state;
181
182 /*
183 * This driver supports NFCv1 (as found in PXA SoC)
184 * and NFCv2 (as found in Armada 370/XP SoC).
185 */
186 enum pxa3xx_nand_variant variant;
187
188 int cs;
189 int use_ecc; /* use HW ECC ? */
190 int ecc_bch; /* using BCH ECC? */
191 int use_spare; /* use spare ? */
192 int need_wait;
193
194 unsigned int data_size; /* data to be read from FIFO */
195 unsigned int chunk_size; /* split commands chunk size */
196 unsigned int oob_size;
197 unsigned int spare_size;
198 unsigned int ecc_size;
199 unsigned int ecc_err_cnt;
200 unsigned int max_bitflips;
201 int retcode;
202
203 /* cached register value */
204 uint32_t reg_ndcr;
205 uint32_t ndtr0cs0;
206 uint32_t ndtr1cs0;
207
208 /* generated NDCBx register values */
209 uint32_t ndcb0;
210 uint32_t ndcb1;
211 uint32_t ndcb2;
212 uint32_t ndcb3;
213};
214
215static struct pxa3xx_nand_timing timing[] = {
216 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
217 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
218 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
219 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
220};
221
222static struct pxa3xx_nand_flash builtin_flash_types[] = {
223 { 0x46ec, 16, 16, &timing[1] },
224 { 0xdaec, 8, 8, &timing[1] },
225 { 0xd7ec, 8, 8, &timing[1] },
226 { 0xa12c, 8, 8, &timing[2] },
227 { 0xb12c, 16, 16, &timing[2] },
228 { 0xdc2c, 8, 8, &timing[2] },
229 { 0xcc2c, 16, 16, &timing[2] },
230 { 0xba20, 16, 16, &timing[3] },
231};
232
233static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
234static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
235
236static struct nand_bbt_descr bbt_main_descr = {
237 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
238 | NAND_BBT_2BIT | NAND_BBT_VERSION,
239 .offs = 8,
240 .len = 6,
241 .veroffs = 14,
242 .maxblocks = 8, /* Last 8 blocks in each chip */
243 .pattern = bbt_pattern
244};
245
246static struct nand_bbt_descr bbt_mirror_descr = {
247 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
248 | NAND_BBT_2BIT | NAND_BBT_VERSION,
249 .offs = 8,
250 .len = 6,
251 .veroffs = 14,
252 .maxblocks = 8, /* Last 8 blocks in each chip */
253 .pattern = bbt_mirror_pattern
254};
255
256static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
257 .eccbytes = 32,
258 .eccpos = {
259 32, 33, 34, 35, 36, 37, 38, 39,
260 40, 41, 42, 43, 44, 45, 46, 47,
261 48, 49, 50, 51, 52, 53, 54, 55,
262 56, 57, 58, 59, 60, 61, 62, 63},
263 .oobfree = { {2, 30} }
264};
265
266static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
267 .eccbytes = 64,
268 .eccpos = {
269 32, 33, 34, 35, 36, 37, 38, 39,
270 40, 41, 42, 43, 44, 45, 46, 47,
271 48, 49, 50, 51, 52, 53, 54, 55,
272 56, 57, 58, 59, 60, 61, 62, 63,
273 96, 97, 98, 99, 100, 101, 102, 103,
274 104, 105, 106, 107, 108, 109, 110, 111,
275 112, 113, 114, 115, 116, 117, 118, 119,
276 120, 121, 122, 123, 124, 125, 126, 127},
277 /* Bootrom looks in bytes 0 & 5 for bad blocks */
278 .oobfree = { {6, 26}, { 64, 32} }
279};
280
281static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
282 .eccbytes = 128,
283 .eccpos = {
284 32, 33, 34, 35, 36, 37, 38, 39,
285 40, 41, 42, 43, 44, 45, 46, 47,
286 48, 49, 50, 51, 52, 53, 54, 55,
287 56, 57, 58, 59, 60, 61, 62, 63},
288 .oobfree = { }
289};
290
291#define NDTR0_tCH(c) (min((c), 7) << 19)
292#define NDTR0_tCS(c) (min((c), 7) << 16)
293#define NDTR0_tWH(c) (min((c), 7) << 11)
294#define NDTR0_tWP(c) (min((c), 7) << 8)
295#define NDTR0_tRH(c) (min((c), 7) << 3)
296#define NDTR0_tRP(c) (min((c), 7) << 0)
297
298#define NDTR1_tR(c) (min((c), 65535) << 16)
299#define NDTR1_tWHR(c) (min((c), 15) << 4)
300#define NDTR1_tAR(c) (min((c), 15) << 0)
301
302/* convert nano-seconds to nand flash controller clock cycles */
303#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
304
305static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
306{
307 /* We only support the Armada 370/XP/38x for now */
308 return PXA3XX_NAND_VARIANT_ARMADA370;
309}
310
311static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
312 const struct pxa3xx_nand_timing *t)
313{
314 struct pxa3xx_nand_info *info = host->info_data;
315 unsigned long nand_clk = mvebu_get_nand_clock();
316 uint32_t ndtr0, ndtr1;
317
318 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
319 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
320 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
321 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
322 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
323 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
324
325 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
326 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
327 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
328
329 info->ndtr0cs0 = ndtr0;
330 info->ndtr1cs0 = ndtr1;
331 nand_writel(info, NDTR0CS0, ndtr0);
332 nand_writel(info, NDTR1CS0, ndtr1);
333}
334
335static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
336 const struct nand_sdr_timings *t)
337{
338 struct pxa3xx_nand_info *info = host->info_data;
339 struct nand_chip *chip = &host->chip;
340 unsigned long nand_clk = mvebu_get_nand_clock();
341 uint32_t ndtr0, ndtr1;
342
343 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
344 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
345 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
346 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - tWH_min, 1000);
347 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
348 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - tREH_min, 1000);
349 u32 tR = chip->chip_delay * 1000;
350 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
351 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
352
353 /* fallback to a default value if tR = 0 */
354 if (!tR)
355 tR = 20000;
356
357 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
358 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
359 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
360 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
361 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
362 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
363
364 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
365 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
366 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
367
368 info->ndtr0cs0 = ndtr0;
369 info->ndtr1cs0 = ndtr1;
370 nand_writel(info, NDTR0CS0, ndtr0);
371 nand_writel(info, NDTR1CS0, ndtr1);
372}
373
374static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
375{
376 const struct nand_sdr_timings *timings;
377 struct nand_chip *chip = &host->chip;
378 struct pxa3xx_nand_info *info = host->info_data;
379 const struct pxa3xx_nand_flash *f = NULL;
380 int mode, id, ntypes, i;
381
382 mode = onfi_get_async_timing_mode(chip);
383 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
384 ntypes = ARRAY_SIZE(builtin_flash_types);
385
386 chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
387
388 id = chip->read_byte(host->mtd);
389 id |= chip->read_byte(host->mtd) << 0x8;
390
391 for (i = 0; i < ntypes; i++) {
392 f = &builtin_flash_types[i];
393
394 if (f->chip_id == id)
395 break;
396 }
397
398 if (i == ntypes) {
399 dev_err(&info->pdev->dev, "Error: timings not found\n");
400 return -EINVAL;
401 }
402
403 pxa3xx_nand_set_timing(host, f->timing);
404
405 if (f->flash_width == 16) {
406 info->reg_ndcr |= NDCR_DWIDTH_M;
407 chip->options |= NAND_BUSWIDTH_16;
408 }
409
410 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
411 } else {
412 mode = fls(mode) - 1;
413 if (mode < 0)
414 mode = 0;
415
416 timings = onfi_async_timing_mode_to_sdr_timings(mode);
417 if (IS_ERR(timings))
418 return PTR_ERR(timings);
419
420 pxa3xx_nand_set_sdr_timing(host, timings);
421 }
422
423 return 0;
424}
425
426/*
427 * Set the data and OOB size, depending on the selected
428 * spare and ECC configuration.
429 * Only applicable to READ0, READOOB and PAGEPROG commands.
430 */
431static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
432 struct mtd_info *mtd)
433{
434 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
435
436 info->data_size = mtd->writesize;
437 if (!oob_enable)
438 return;
439
440 info->oob_size = info->spare_size;
441 if (!info->use_ecc)
442 info->oob_size += info->ecc_size;
443}
444
445/**
Vagrant Cascadian1b25e582015-11-24 14:46:24 -0800446 * NOTE: it is a must to set ND_RUN first, then write
Stefan Roese873960c2015-07-23 10:26:16 +0200447 * command buffer, otherwise, it does not work.
448 * We enable all the interrupt at the same time, and
449 * let pxa3xx_nand_irq to handle all logic.
450 */
451static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
452{
453 uint32_t ndcr;
454
455 ndcr = info->reg_ndcr;
456
457 if (info->use_ecc) {
458 ndcr |= NDCR_ECC_EN;
459 if (info->ecc_bch)
460 nand_writel(info, NDECCCTRL, 0x1);
461 } else {
462 ndcr &= ~NDCR_ECC_EN;
463 if (info->ecc_bch)
464 nand_writel(info, NDECCCTRL, 0x0);
465 }
466
467 ndcr &= ~NDCR_DMA_EN;
468
469 if (info->use_spare)
470 ndcr |= NDCR_SPARE_EN;
471 else
472 ndcr &= ~NDCR_SPARE_EN;
473
474 ndcr |= NDCR_ND_RUN;
475
476 /* clear status bits and run */
477 nand_writel(info, NDCR, 0);
478 nand_writel(info, NDSR, NDSR_MASK);
479 nand_writel(info, NDCR, ndcr);
480}
481
482static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
483{
484 uint32_t ndcr;
485
486 ndcr = nand_readl(info, NDCR);
487 nand_writel(info, NDCR, ndcr | int_mask);
488}
489
490static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
491{
492 if (info->ecc_bch) {
493 u32 ts;
494
495 /*
496 * According to the datasheet, when reading from NDDB
497 * with BCH enabled, after each 32 bytes reads, we
498 * have to make sure that the NDSR.RDDREQ bit is set.
499 *
500 * Drain the FIFO 8 32 bits reads at a time, and skip
501 * the polling on the last read.
502 */
503 while (len > 8) {
504 readsl(info->mmio_base + NDDB, data, 8);
505
506 ts = get_timer(0);
507 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
508 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
509 dev_err(&info->pdev->dev,
510 "Timeout on RDDREQ while draining the FIFO\n");
511 return;
512 }
513 }
514
515 data += 32;
516 len -= 8;
517 }
518 }
519
520 readsl(info->mmio_base + NDDB, data, len);
521}
522
523static void handle_data_pio(struct pxa3xx_nand_info *info)
524{
525 unsigned int do_bytes = min(info->data_size, info->chunk_size);
526
527 switch (info->state) {
528 case STATE_PIO_WRITING:
529 writesl(info->mmio_base + NDDB,
530 info->data_buff + info->data_buff_pos,
531 DIV_ROUND_UP(do_bytes, 4));
532
533 if (info->oob_size > 0)
534 writesl(info->mmio_base + NDDB,
535 info->oob_buff + info->oob_buff_pos,
536 DIV_ROUND_UP(info->oob_size, 4));
537 break;
538 case STATE_PIO_READING:
539 drain_fifo(info,
540 info->data_buff + info->data_buff_pos,
541 DIV_ROUND_UP(do_bytes, 4));
542
543 if (info->oob_size > 0)
544 drain_fifo(info,
545 info->oob_buff + info->oob_buff_pos,
546 DIV_ROUND_UP(info->oob_size, 4));
547 break;
548 default:
549 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
550 info->state);
551 BUG();
552 }
553
554 /* Update buffer pointers for multi-page read/write */
555 info->data_buff_pos += do_bytes;
556 info->oob_buff_pos += info->oob_size;
557 info->data_size -= do_bytes;
558}
559
560static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
561{
562 handle_data_pio(info);
563
564 info->state = STATE_CMD_DONE;
565 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
566}
567
568static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
569{
570 unsigned int status, is_completed = 0, is_ready = 0;
571 unsigned int ready, cmd_done;
572 irqreturn_t ret = IRQ_HANDLED;
573
574 if (info->cs == 0) {
575 ready = NDSR_FLASH_RDY;
576 cmd_done = NDSR_CS0_CMDD;
577 } else {
578 ready = NDSR_RDY;
579 cmd_done = NDSR_CS1_CMDD;
580 }
581
582 status = nand_readl(info, NDSR);
583
584 if (status & NDSR_UNCORERR)
585 info->retcode = ERR_UNCORERR;
586 if (status & NDSR_CORERR) {
587 info->retcode = ERR_CORERR;
588 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
589 info->ecc_bch)
590 info->ecc_err_cnt = NDSR_ERR_CNT(status);
591 else
592 info->ecc_err_cnt = 1;
593
594 /*
595 * Each chunk composing a page is corrected independently,
596 * and we need to store maximum number of corrected bitflips
597 * to return it to the MTD layer in ecc.read_page().
598 */
599 info->max_bitflips = max_t(unsigned int,
600 info->max_bitflips,
601 info->ecc_err_cnt);
602 }
603 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
604 info->state = (status & NDSR_RDDREQ) ?
605 STATE_PIO_READING : STATE_PIO_WRITING;
606 /* Call the IRQ thread in U-Boot directly */
607 pxa3xx_nand_irq_thread(info);
608 return 0;
609 }
610 if (status & cmd_done) {
611 info->state = STATE_CMD_DONE;
612 is_completed = 1;
613 }
614 if (status & ready) {
615 info->state = STATE_READY;
616 is_ready = 1;
617 }
618
619 if (status & NDSR_WRCMDREQ) {
620 nand_writel(info, NDSR, NDSR_WRCMDREQ);
621 status &= ~NDSR_WRCMDREQ;
622 info->state = STATE_CMD_HANDLE;
623
624 /*
625 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
626 * must be loaded by writing directly either 12 or 16
627 * bytes directly to NDCB0, four bytes at a time.
628 *
629 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
630 * but each NDCBx register can be read.
631 */
632 nand_writel(info, NDCB0, info->ndcb0);
633 nand_writel(info, NDCB0, info->ndcb1);
634 nand_writel(info, NDCB0, info->ndcb2);
635
636 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
637 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
638 nand_writel(info, NDCB0, info->ndcb3);
639 }
640
641 /* clear NDSR to let the controller exit the IRQ */
642 nand_writel(info, NDSR, status);
643 if (is_completed)
644 info->cmd_complete = 1;
645 if (is_ready)
646 info->dev_ready = 1;
647
648 return ret;
649}
650
651static inline int is_buf_blank(uint8_t *buf, size_t len)
652{
653 for (; len > 0; len--)
654 if (*buf++ != 0xff)
655 return 0;
656 return 1;
657}
658
659static void set_command_address(struct pxa3xx_nand_info *info,
660 unsigned int page_size, uint16_t column, int page_addr)
661{
662 /* small page addr setting */
663 if (page_size < PAGE_CHUNK_SIZE) {
664 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
665 | (column & 0xFF);
666
667 info->ndcb2 = 0;
668 } else {
669 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
670 | (column & 0xFFFF);
671
672 if (page_addr & 0xFF0000)
673 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
674 else
675 info->ndcb2 = 0;
676 }
677}
678
679static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
680{
681 struct pxa3xx_nand_host *host = info->host[info->cs];
682 struct mtd_info *mtd = host->mtd;
683
684 /* reset data and oob column point to handle data */
685 info->buf_start = 0;
686 info->buf_count = 0;
687 info->oob_size = 0;
688 info->data_buff_pos = 0;
689 info->oob_buff_pos = 0;
690 info->use_ecc = 0;
691 info->use_spare = 1;
692 info->retcode = ERR_NONE;
693 info->ecc_err_cnt = 0;
694 info->ndcb3 = 0;
695 info->need_wait = 0;
696
697 switch (command) {
698 case NAND_CMD_READ0:
699 case NAND_CMD_PAGEPROG:
700 info->use_ecc = 1;
701 case NAND_CMD_READOOB:
702 pxa3xx_set_datasize(info, mtd);
703 break;
704 case NAND_CMD_PARAM:
705 info->use_spare = 0;
706 break;
707 default:
708 info->ndcb1 = 0;
709 info->ndcb2 = 0;
710 break;
711 }
712
713 /*
714 * If we are about to issue a read command, or about to set
715 * the write address, then clean the data buffer.
716 */
717 if (command == NAND_CMD_READ0 ||
718 command == NAND_CMD_READOOB ||
719 command == NAND_CMD_SEQIN) {
720 info->buf_count = mtd->writesize + mtd->oobsize;
721 memset(info->data_buff, 0xFF, info->buf_count);
722 }
723}
724
725static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
726 int ext_cmd_type, uint16_t column, int page_addr)
727{
728 int addr_cycle, exec_cmd;
729 struct pxa3xx_nand_host *host;
730 struct mtd_info *mtd;
731
732 host = info->host[info->cs];
733 mtd = host->mtd;
734 addr_cycle = 0;
735 exec_cmd = 1;
736
737 if (info->cs != 0)
738 info->ndcb0 = NDCB0_CSEL;
739 else
740 info->ndcb0 = 0;
741
742 if (command == NAND_CMD_SEQIN)
743 exec_cmd = 0;
744
745 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
746 + host->col_addr_cycles);
747
748 switch (command) {
749 case NAND_CMD_READOOB:
750 case NAND_CMD_READ0:
751 info->buf_start = column;
752 info->ndcb0 |= NDCB0_CMD_TYPE(0)
753 | addr_cycle
754 | NAND_CMD_READ0;
755
756 if (command == NAND_CMD_READOOB)
757 info->buf_start += mtd->writesize;
758
759 /*
760 * Multiple page read needs an 'extended command type' field,
761 * which is either naked-read or last-read according to the
762 * state.
763 */
764 if (mtd->writesize == PAGE_CHUNK_SIZE) {
765 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
766 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
767 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
768 | NDCB0_LEN_OVRD
769 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
770 info->ndcb3 = info->chunk_size +
771 info->oob_size;
772 }
773
774 set_command_address(info, mtd->writesize, column, page_addr);
775 break;
776
777 case NAND_CMD_SEQIN:
778
779 info->buf_start = column;
780 set_command_address(info, mtd->writesize, 0, page_addr);
781
782 /*
783 * Multiple page programming needs to execute the initial
784 * SEQIN command that sets the page address.
785 */
786 if (mtd->writesize > PAGE_CHUNK_SIZE) {
787 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
788 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
789 | addr_cycle
790 | command;
791 /* No data transfer in this case */
792 info->data_size = 0;
793 exec_cmd = 1;
794 }
795 break;
796
797 case NAND_CMD_PAGEPROG:
798 if (is_buf_blank(info->data_buff,
799 (mtd->writesize + mtd->oobsize))) {
800 exec_cmd = 0;
801 break;
802 }
803
804 /* Second command setting for large pages */
805 if (mtd->writesize > PAGE_CHUNK_SIZE) {
806 /*
807 * Multiple page write uses the 'extended command'
808 * field. This can be used to issue a command dispatch
809 * or a naked-write depending on the current stage.
810 */
811 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
812 | NDCB0_LEN_OVRD
813 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
814 info->ndcb3 = info->chunk_size +
815 info->oob_size;
816
817 /*
818 * This is the command dispatch that completes a chunked
819 * page program operation.
820 */
821 if (info->data_size == 0) {
822 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
823 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
824 | command;
825 info->ndcb1 = 0;
826 info->ndcb2 = 0;
827 info->ndcb3 = 0;
828 }
829 } else {
830 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
831 | NDCB0_AUTO_RS
832 | NDCB0_ST_ROW_EN
833 | NDCB0_DBC
834 | (NAND_CMD_PAGEPROG << 8)
835 | NAND_CMD_SEQIN
836 | addr_cycle;
837 }
838 break;
839
840 case NAND_CMD_PARAM:
841 info->buf_count = 256;
842 info->ndcb0 |= NDCB0_CMD_TYPE(0)
843 | NDCB0_ADDR_CYC(1)
844 | NDCB0_LEN_OVRD
845 | command;
846 info->ndcb1 = (column & 0xFF);
847 info->ndcb3 = 256;
848 info->data_size = 256;
849 break;
850
851 case NAND_CMD_READID:
852 info->buf_count = host->read_id_bytes;
853 info->ndcb0 |= NDCB0_CMD_TYPE(3)
854 | NDCB0_ADDR_CYC(1)
855 | command;
856 info->ndcb1 = (column & 0xFF);
857
858 info->data_size = 8;
859 break;
860 case NAND_CMD_STATUS:
861 info->buf_count = 1;
862 info->ndcb0 |= NDCB0_CMD_TYPE(4)
863 | NDCB0_ADDR_CYC(1)
864 | command;
865
866 info->data_size = 8;
867 break;
868
869 case NAND_CMD_ERASE1:
870 info->ndcb0 |= NDCB0_CMD_TYPE(2)
871 | NDCB0_AUTO_RS
872 | NDCB0_ADDR_CYC(3)
873 | NDCB0_DBC
874 | (NAND_CMD_ERASE2 << 8)
875 | NAND_CMD_ERASE1;
876 info->ndcb1 = page_addr;
877 info->ndcb2 = 0;
878
879 break;
880 case NAND_CMD_RESET:
881 info->ndcb0 |= NDCB0_CMD_TYPE(5)
882 | command;
883
884 break;
885
886 case NAND_CMD_ERASE2:
887 exec_cmd = 0;
888 break;
889
890 default:
891 exec_cmd = 0;
892 dev_err(&info->pdev->dev, "non-supported command %x\n",
893 command);
894 break;
895 }
896
897 return exec_cmd;
898}
899
900static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
901 int column, int page_addr)
902{
903 struct pxa3xx_nand_host *host = mtd->priv;
904 struct pxa3xx_nand_info *info = host->info_data;
905 int exec_cmd;
906
907 /*
908 * if this is a x16 device ,then convert the input
909 * "byte" address into a "word" address appropriate
910 * for indexing a word-oriented device
911 */
912 if (info->reg_ndcr & NDCR_DWIDTH_M)
913 column /= 2;
914
915 /*
916 * There may be different NAND chip hooked to
917 * different chip select, so check whether
918 * chip select has been changed, if yes, reset the timing
919 */
920 if (info->cs != host->cs) {
921 info->cs = host->cs;
922 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
923 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
924 }
925
926 prepare_start_command(info, command);
927
928 info->state = STATE_PREPARED;
929 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
930
931 if (exec_cmd) {
932 u32 ts;
933
934 info->cmd_complete = 0;
935 info->dev_ready = 0;
936 info->need_wait = 1;
937 pxa3xx_nand_start(info);
938
939 ts = get_timer(0);
940 while (1) {
941 u32 status;
942
943 status = nand_readl(info, NDSR);
944 if (status)
945 pxa3xx_nand_irq(info);
946
947 if (info->cmd_complete)
948 break;
949
950 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
951 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
952 return;
953 }
954 }
955 }
956 info->state = STATE_IDLE;
957}
958
959static void nand_cmdfunc_extended(struct mtd_info *mtd,
960 const unsigned command,
961 int column, int page_addr)
962{
963 struct pxa3xx_nand_host *host = mtd->priv;
964 struct pxa3xx_nand_info *info = host->info_data;
965 int exec_cmd, ext_cmd_type;
966
967 /*
968 * if this is a x16 device then convert the input
969 * "byte" address into a "word" address appropriate
970 * for indexing a word-oriented device
971 */
972 if (info->reg_ndcr & NDCR_DWIDTH_M)
973 column /= 2;
974
975 /*
976 * There may be different NAND chip hooked to
977 * different chip select, so check whether
978 * chip select has been changed, if yes, reset the timing
979 */
980 if (info->cs != host->cs) {
981 info->cs = host->cs;
982 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
983 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
984 }
985
986 /* Select the extended command for the first command */
987 switch (command) {
988 case NAND_CMD_READ0:
989 case NAND_CMD_READOOB:
990 ext_cmd_type = EXT_CMD_TYPE_MONO;
991 break;
992 case NAND_CMD_SEQIN:
993 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
994 break;
995 case NAND_CMD_PAGEPROG:
996 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
997 break;
998 default:
999 ext_cmd_type = 0;
1000 break;
1001 }
1002
1003 prepare_start_command(info, command);
1004
1005 /*
1006 * Prepare the "is ready" completion before starting a command
1007 * transaction sequence. If the command is not executed the
1008 * completion will be completed, see below.
1009 *
1010 * We can do that inside the loop because the command variable
1011 * is invariant and thus so is the exec_cmd.
1012 */
1013 info->need_wait = 1;
1014 info->dev_ready = 0;
1015
1016 do {
1017 u32 ts;
1018
1019 info->state = STATE_PREPARED;
1020 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1021 column, page_addr);
1022 if (!exec_cmd) {
1023 info->need_wait = 0;
1024 info->dev_ready = 1;
1025 break;
1026 }
1027
1028 info->cmd_complete = 0;
1029 pxa3xx_nand_start(info);
1030
1031 ts = get_timer(0);
1032 while (1) {
1033 u32 status;
1034
1035 status = nand_readl(info, NDSR);
1036 if (status)
1037 pxa3xx_nand_irq(info);
1038
1039 if (info->cmd_complete)
1040 break;
1041
1042 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1043 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1044 return;
1045 }
1046 }
1047
1048 /* Check if the sequence is complete */
1049 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1050 break;
1051
1052 /*
1053 * After a splitted program command sequence has issued
1054 * the command dispatch, the command sequence is complete.
1055 */
1056 if (info->data_size == 0 &&
1057 command == NAND_CMD_PAGEPROG &&
1058 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1059 break;
1060
1061 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1062 /* Last read: issue a 'last naked read' */
1063 if (info->data_size == info->chunk_size)
1064 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1065 else
1066 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1067
1068 /*
1069 * If a splitted program command has no more data to transfer,
1070 * the command dispatch must be issued to complete.
1071 */
1072 } else if (command == NAND_CMD_PAGEPROG &&
1073 info->data_size == 0) {
1074 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1075 }
1076 } while (1);
1077
1078 info->state = STATE_IDLE;
1079}
1080
1081static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1082 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1083{
1084 chip->write_buf(mtd, buf, mtd->writesize);
1085 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1086
1087 return 0;
1088}
1089
1090static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1091 struct nand_chip *chip, uint8_t *buf, int oob_required,
1092 int page)
1093{
1094 struct pxa3xx_nand_host *host = mtd->priv;
1095 struct pxa3xx_nand_info *info = host->info_data;
1096
1097 chip->read_buf(mtd, buf, mtd->writesize);
1098 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1099
1100 if (info->retcode == ERR_CORERR && info->use_ecc) {
1101 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1102
1103 } else if (info->retcode == ERR_UNCORERR) {
1104 /*
1105 * for blank page (all 0xff), HW will calculate its ECC as
1106 * 0, which is different from the ECC information within
1107 * OOB, ignore such uncorrectable errors
1108 */
1109 if (is_buf_blank(buf, mtd->writesize))
1110 info->retcode = ERR_NONE;
1111 else
1112 mtd->ecc_stats.failed++;
1113 }
1114
1115 return info->max_bitflips;
1116}
1117
1118static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1119{
1120 struct pxa3xx_nand_host *host = mtd->priv;
1121 struct pxa3xx_nand_info *info = host->info_data;
1122 char retval = 0xFF;
1123
1124 if (info->buf_start < info->buf_count)
1125 /* Has just send a new command? */
1126 retval = info->data_buff[info->buf_start++];
1127
1128 return retval;
1129}
1130
1131static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1132{
1133 struct pxa3xx_nand_host *host = mtd->priv;
1134 struct pxa3xx_nand_info *info = host->info_data;
1135 u16 retval = 0xFFFF;
1136
1137 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1138 retval = *((u16 *)(info->data_buff+info->buf_start));
1139 info->buf_start += 2;
1140 }
1141 return retval;
1142}
1143
1144static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1145{
1146 struct pxa3xx_nand_host *host = mtd->priv;
1147 struct pxa3xx_nand_info *info = host->info_data;
1148 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1149
1150 memcpy(buf, info->data_buff + info->buf_start, real_len);
1151 info->buf_start += real_len;
1152}
1153
1154static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1155 const uint8_t *buf, int len)
1156{
1157 struct pxa3xx_nand_host *host = mtd->priv;
1158 struct pxa3xx_nand_info *info = host->info_data;
1159 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1160
1161 memcpy(info->data_buff + info->buf_start, buf, real_len);
1162 info->buf_start += real_len;
1163}
1164
1165static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1166{
1167 return;
1168}
1169
1170static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1171{
1172 struct pxa3xx_nand_host *host = mtd->priv;
1173 struct pxa3xx_nand_info *info = host->info_data;
1174
1175 if (info->need_wait) {
1176 u32 ts;
1177
1178 info->need_wait = 0;
1179
1180 ts = get_timer(0);
1181 while (1) {
1182 u32 status;
1183
1184 status = nand_readl(info, NDSR);
1185 if (status)
1186 pxa3xx_nand_irq(info);
1187
1188 if (info->dev_ready)
1189 break;
1190
1191 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1192 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1193 return NAND_STATUS_FAIL;
1194 }
1195 }
1196 }
1197
1198 /* pxa3xx_nand_send_command has waited for command complete */
1199 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1200 if (info->retcode == ERR_NONE)
1201 return 0;
1202 else
1203 return NAND_STATUS_FAIL;
1204 }
1205
1206 return NAND_STATUS_READY;
1207}
1208
1209static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
1210{
1211 struct pxa3xx_nand_host *host = info->host[info->cs];
1212 struct mtd_info *mtd = host->mtd;
1213 struct nand_chip *chip = mtd->priv;
1214
1215 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1216 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1217 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1218
1219 return 0;
1220}
1221
1222static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1223{
1224 /*
1225 * We set 0 by hard coding here, for we don't support keep_config
1226 * when there is more than one chip attached to the controller
1227 */
1228 struct pxa3xx_nand_host *host = info->host[0];
1229 uint32_t ndcr = nand_readl(info, NDCR);
1230
1231 if (ndcr & NDCR_PAGE_SZ) {
1232 /* Controller's FIFO size */
1233 info->chunk_size = 2048;
1234 host->read_id_bytes = 4;
1235 } else {
1236 info->chunk_size = 512;
1237 host->read_id_bytes = 2;
1238 }
1239
1240 /* Set an initial chunk size */
1241 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1242 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1243 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1244 return 0;
1245}
1246
1247static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1248{
1249 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1250 if (info->data_buff == NULL)
1251 return -ENOMEM;
1252 return 0;
1253}
1254
1255static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1256{
1257 struct pxa3xx_nand_info *info = host->info_data;
1258 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1259 struct mtd_info *mtd;
1260 struct nand_chip *chip;
1261 const struct nand_sdr_timings *timings;
1262 int ret;
1263
1264 mtd = info->host[info->cs]->mtd;
1265 chip = mtd->priv;
1266
1267 /* configure default flash values */
1268 info->reg_ndcr = 0x0; /* enable all interrupts */
1269 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1270 info->reg_ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1271 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1272
1273 /* use the common timing to make a try */
1274 timings = onfi_async_timing_mode_to_sdr_timings(0);
1275 if (IS_ERR(timings))
1276 return PTR_ERR(timings);
1277
1278 pxa3xx_nand_set_sdr_timing(host, timings);
1279
1280 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1281 ret = chip->waitfunc(mtd, chip);
1282 if (ret & NAND_STATUS_FAIL)
1283 return -ENODEV;
1284
1285 return 0;
1286}
1287
1288static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1289 struct nand_ecc_ctrl *ecc,
1290 int strength, int ecc_stepsize, int page_size)
1291{
1292 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1293 info->chunk_size = 2048;
1294 info->spare_size = 40;
1295 info->ecc_size = 24;
1296 ecc->mode = NAND_ECC_HW;
1297 ecc->size = 512;
1298 ecc->strength = 1;
1299
1300 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1301 info->chunk_size = 512;
1302 info->spare_size = 8;
1303 info->ecc_size = 8;
1304 ecc->mode = NAND_ECC_HW;
1305 ecc->size = 512;
1306 ecc->strength = 1;
1307
1308 /*
1309 * Required ECC: 4-bit correction per 512 bytes
1310 * Select: 16-bit correction per 2048 bytes
1311 */
1312 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1313 info->ecc_bch = 1;
1314 info->chunk_size = 2048;
1315 info->spare_size = 32;
1316 info->ecc_size = 32;
1317 ecc->mode = NAND_ECC_HW;
1318 ecc->size = info->chunk_size;
1319 ecc->layout = &ecc_layout_2KB_bch4bit;
1320 ecc->strength = 16;
1321
1322 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1323 info->ecc_bch = 1;
1324 info->chunk_size = 2048;
1325 info->spare_size = 32;
1326 info->ecc_size = 32;
1327 ecc->mode = NAND_ECC_HW;
1328 ecc->size = info->chunk_size;
1329 ecc->layout = &ecc_layout_4KB_bch4bit;
1330 ecc->strength = 16;
1331
1332 /*
1333 * Required ECC: 8-bit correction per 512 bytes
1334 * Select: 16-bit correction per 1024 bytes
1335 */
1336 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1337 info->ecc_bch = 1;
1338 info->chunk_size = 1024;
1339 info->spare_size = 0;
1340 info->ecc_size = 32;
1341 ecc->mode = NAND_ECC_HW;
1342 ecc->size = info->chunk_size;
1343 ecc->layout = &ecc_layout_4KB_bch8bit;
1344 ecc->strength = 16;
1345 } else {
1346 dev_err(&info->pdev->dev,
1347 "ECC strength %d at page size %d is not supported\n",
1348 strength, page_size);
1349 return -ENODEV;
1350 }
1351
1352 return 0;
1353}
1354
1355static int pxa3xx_nand_scan(struct mtd_info *mtd)
1356{
1357 struct pxa3xx_nand_host *host = mtd->priv;
1358 struct pxa3xx_nand_info *info = host->info_data;
1359 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1360 struct nand_chip *chip = mtd->priv;
1361 int ret;
1362 uint16_t ecc_strength, ecc_step;
1363
1364 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1365 goto KEEP_CONFIG;
1366
1367 /* Set a default chunk size */
1368 info->chunk_size = 512;
1369
1370 ret = pxa3xx_nand_sensing(host);
1371 if (ret) {
1372 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1373 info->cs);
1374
1375 return ret;
1376 }
1377
1378KEEP_CONFIG:
1379 /* Device detection must be done with ECC disabled */
1380 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1381 nand_writel(info, NDECCCTRL, 0x0);
1382
1383 if (nand_scan_ident(mtd, 1, NULL))
1384 return -ENODEV;
1385
1386 if (!pdata->keep_config) {
1387 ret = pxa3xx_nand_init_timings(host);
1388 if (ret) {
1389 dev_err(&info->pdev->dev,
1390 "Failed to set timings: %d\n", ret);
1391 return ret;
1392 }
1393 }
1394
1395 ret = pxa3xx_nand_config_flash(info);
1396 if (ret)
1397 return ret;
1398
1399#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1400 /*
1401 * We'll use a bad block table stored in-flash and don't
1402 * allow writing the bad block marker to the flash.
1403 */
1404 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1405 chip->bbt_td = &bbt_main_descr;
1406 chip->bbt_md = &bbt_mirror_descr;
1407#endif
1408
1409 /*
1410 * If the page size is bigger than the FIFO size, let's check
1411 * we are given the right variant and then switch to the extended
1412 * (aka splitted) command handling,
1413 */
1414 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1415 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1416 chip->cmdfunc = nand_cmdfunc_extended;
1417 } else {
1418 dev_err(&info->pdev->dev,
1419 "unsupported page size on this variant\n");
1420 return -ENODEV;
1421 }
1422 }
1423
1424 if (pdata->ecc_strength && pdata->ecc_step_size) {
1425 ecc_strength = pdata->ecc_strength;
1426 ecc_step = pdata->ecc_step_size;
1427 } else {
1428 ecc_strength = chip->ecc_strength_ds;
1429 ecc_step = chip->ecc_step_ds;
1430 }
1431
1432 /* Set default ECC strength requirements on non-ONFI devices */
1433 if (ecc_strength < 1 && ecc_step < 1) {
1434 ecc_strength = 1;
1435 ecc_step = 512;
1436 }
1437
1438 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1439 ecc_step, mtd->writesize);
1440 if (ret)
1441 return ret;
1442
1443 /* calculate addressing information */
1444 if (mtd->writesize >= 2048)
1445 host->col_addr_cycles = 2;
1446 else
1447 host->col_addr_cycles = 1;
1448
1449 /* release the initial buffer */
1450 kfree(info->data_buff);
1451
1452 /* allocate the real data + oob buffer */
1453 info->buf_size = mtd->writesize + mtd->oobsize;
1454 ret = pxa3xx_nand_init_buff(info);
1455 if (ret)
1456 return ret;
1457 info->oob_buff = info->data_buff + mtd->writesize;
1458
1459 if ((mtd->size >> chip->page_shift) > 65536)
1460 host->row_addr_cycles = 3;
1461 else
1462 host->row_addr_cycles = 2;
1463 return nand_scan_tail(mtd);
1464}
1465
1466static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1467{
1468 struct pxa3xx_nand_platform_data *pdata;
1469 struct pxa3xx_nand_host *host;
1470 struct nand_chip *chip = NULL;
1471 struct mtd_info *mtd;
1472 int ret, cs;
1473
1474 pdata = info->pdata;
1475 if (pdata->num_cs <= 0)
1476 return -ENODEV;
1477
1478 info->variant = pxa3xx_nand_get_variant();
1479 for (cs = 0; cs < pdata->num_cs; cs++) {
1480 mtd = &nand_info[cs];
Kevin Smith84caff32016-01-14 16:01:38 +00001481 chip = (struct nand_chip *)
1482 ((u8 *)&info[1] + sizeof(*host) * cs);
Stefan Roese873960c2015-07-23 10:26:16 +02001483 host = (struct pxa3xx_nand_host *)chip;
1484 info->host[cs] = host;
1485 host->mtd = mtd;
1486 host->cs = cs;
1487 host->info_data = info;
1488 host->read_id_bytes = 4;
1489 mtd->priv = host;
1490 mtd->owner = THIS_MODULE;
1491
1492 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1493 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1494 chip->controller = &info->controller;
1495 chip->waitfunc = pxa3xx_nand_waitfunc;
1496 chip->select_chip = pxa3xx_nand_select_chip;
1497 chip->read_word = pxa3xx_nand_read_word;
1498 chip->read_byte = pxa3xx_nand_read_byte;
1499 chip->read_buf = pxa3xx_nand_read_buf;
1500 chip->write_buf = pxa3xx_nand_write_buf;
1501 chip->options |= NAND_NO_SUBPAGE_WRITE;
1502 chip->cmdfunc = nand_cmdfunc;
1503 }
1504
1505 info->mmio_base = (void __iomem *)MVEBU_NAND_BASE;
1506
1507 /* Allocate a buffer to allow flash detection */
1508 info->buf_size = INIT_BUFFER_SIZE;
1509 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1510 if (info->data_buff == NULL) {
1511 ret = -ENOMEM;
1512 goto fail_disable_clk;
1513 }
1514
1515 /* initialize all interrupts to be disabled */
1516 disable_int(info, NDSR_MASK);
1517
1518 return 0;
1519
1520 kfree(info->data_buff);
1521fail_disable_clk:
1522 return ret;
1523}
1524
1525static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1526{
1527 struct pxa3xx_nand_platform_data *pdata;
1528
1529 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1530 if (!pdata)
1531 return -ENOMEM;
1532
1533 pdata->enable_arbiter = 1;
1534 pdata->num_cs = 1;
1535
1536 info->pdata = pdata;
1537
1538 return 0;
1539}
1540
1541static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1542{
1543 struct pxa3xx_nand_platform_data *pdata;
1544 int ret, cs, probe_success;
1545
1546 ret = pxa3xx_nand_probe_dt(info);
1547 if (ret)
1548 return ret;
1549
1550 pdata = info->pdata;
1551
1552 ret = alloc_nand_resource(info);
1553 if (ret) {
1554 dev_err(&pdev->dev, "alloc nand resource failed\n");
1555 return ret;
1556 }
1557
1558 probe_success = 0;
1559 for (cs = 0; cs < pdata->num_cs; cs++) {
1560 struct mtd_info *mtd = info->host[cs]->mtd;
1561
1562 /*
1563 * The mtd name matches the one used in 'mtdparts' kernel
1564 * parameter. This name cannot be changed or otherwise
1565 * user's mtd partitions configuration would get broken.
1566 */
1567 mtd->name = "pxa3xx_nand-0";
1568 info->cs = cs;
1569 ret = pxa3xx_nand_scan(mtd);
1570 if (ret) {
1571 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1572 cs);
1573 continue;
1574 }
1575
1576 if (!ret)
1577 probe_success = 1;
1578 }
1579
1580 if (!probe_success)
1581 return -ENODEV;
1582
1583 return 0;
1584}
1585
1586/*
1587 * Main initialization routine
1588 */
1589void board_nand_init(void)
1590{
1591 struct pxa3xx_nand_info *info;
1592 struct pxa3xx_nand_host *host;
1593 int ret;
1594
Kevin Smith065a3732016-01-14 16:01:39 +00001595 info = kzalloc(sizeof(*info) +
1596 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1597 GFP_KERNEL);
Stefan Roese873960c2015-07-23 10:26:16 +02001598 if (!info)
1599 return;
1600
Stefan Roese873960c2015-07-23 10:26:16 +02001601 ret = pxa3xx_nand_probe(info);
1602 if (ret)
1603 return;
1604
1605 nand_register(0);
1606}