blob: 8748c927c61a26789557d1363d58c32642a1942a [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasut0d4e8502011-11-08 23:18:16 +00002/*
3 * Freescale i.MX28 NAND flash driver
4 *
5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
6 * on behalf of DENX Software Engineering GmbH
7 *
8 * Based on code from LTIB:
9 * Freescale GPMI NFC NAND Flash Driver
10 *
11 * Copyright (C) 2010 Freescale Semiconductor, Inc.
12 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
Marek Vasut0d4e8502011-11-08 23:18:16 +000013 */
14
Tom Warren651eb732012-09-10 08:47:51 -070015#include <common.h>
Stefan Agner68748342018-06-22 18:06:16 +020016#include <dm.h>
Masahiro Yamada6ae39002017-11-30 13:45:24 +090017#include <linux/mtd/rawnand.h>
Stefan Agner984df7a2018-06-22 17:19:51 +020018#include <linux/sizes.h>
Marek Vasut0d4e8502011-11-08 23:18:16 +000019#include <linux/types.h>
Marek Vasut0d4e8502011-11-08 23:18:16 +000020#include <malloc.h>
Masahiro Yamada1221ce42016-09-21 11:28:55 +090021#include <linux/errno.h>
Marek Vasut0d4e8502011-11-08 23:18:16 +000022#include <asm/io.h>
23#include <asm/arch/clock.h>
24#include <asm/arch/imx-regs.h>
Stefano Babic552a8482017-06-29 10:16:06 +020025#include <asm/mach-imx/regs-bch.h>
26#include <asm/mach-imx/regs-gpmi.h>
Marek Vasut0d4e8502011-11-08 23:18:16 +000027#include <asm/arch/sys_proto.h>
Stefan Agner93459432018-06-22 17:19:46 +020028#include "mxs_nand.h"
Marek Vasut0d4e8502011-11-08 23:18:16 +000029
30#define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
31
Peng Fanbedaa842015-12-22 17:04:23 +080032#if (defined(CONFIG_MX6) || defined(CONFIG_MX7))
Stefan Roeseae695b12013-04-15 21:14:12 +000033#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2
34#else
35#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0
36#endif
Marek Vasut0d4e8502011-11-08 23:18:16 +000037#define MXS_NAND_METADATA_SIZE 10
Jörg Krause1fbdb702015-04-15 09:27:22 +020038#define MXS_NAND_BITS_PER_ECC_LEVEL 13
Stefan Agner2a83c952016-08-01 23:55:18 -070039
40#if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
Marek Vasut0d4e8502011-11-08 23:18:16 +000041#define MXS_NAND_COMMAND_BUFFER_SIZE 32
Stefan Agner2a83c952016-08-01 23:55:18 -070042#else
43#define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE
44#endif
Marek Vasut0d4e8502011-11-08 23:18:16 +000045
46#define MXS_NAND_BCH_TIMEOUT 10000
47
Marek Vasut0d4e8502011-11-08 23:18:16 +000048struct nand_ecclayout fake_ecc_layout;
49
Marek Vasut6b9408e2012-03-15 18:33:19 +000050/*
51 * Cache management functions
52 */
53#ifndef CONFIG_SYS_DCACHE_OFF
54static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
55{
56 uint32_t addr = (uint32_t)info->data_buf;
57
58 flush_dcache_range(addr, addr + info->data_buf_size);
59}
60
61static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
62{
63 uint32_t addr = (uint32_t)info->data_buf;
64
65 invalidate_dcache_range(addr, addr + info->data_buf_size);
66}
67
68static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
69{
70 uint32_t addr = (uint32_t)info->cmd_buf;
71
72 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
73}
74#else
75static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
76static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
77static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
78#endif
79
Marek Vasut0d4e8502011-11-08 23:18:16 +000080static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
81{
82 struct mxs_dma_desc *desc;
83
84 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
85 printf("MXS NAND: Too many DMA descriptors requested\n");
86 return NULL;
87 }
88
89 desc = info->desc[info->desc_index];
90 info->desc_index++;
91
92 return desc;
93}
94
95static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
96{
97 int i;
98 struct mxs_dma_desc *desc;
99
100 for (i = 0; i < info->desc_index; i++) {
101 desc = info->desc[i];
102 memset(desc, 0, sizeof(struct mxs_dma_desc));
103 desc->address = (dma_addr_t)desc;
104 }
105
106 info->desc_index = 0;
107}
108
Marek Vasut0d4e8502011-11-08 23:18:16 +0000109static uint32_t mxs_nand_aux_status_offset(void)
110{
111 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
112}
113
Stefan Agner28897e82018-06-22 17:19:49 +0200114static inline int mxs_nand_calc_mark_offset(struct bch_geometry *geo,
115 uint32_t page_data_size)
Marek Vasut0d4e8502011-11-08 23:18:16 +0000116{
Stefan Agner28897e82018-06-22 17:19:49 +0200117 uint32_t chunk_data_size_in_bits = geo->ecc_chunk_size * 8;
118 uint32_t chunk_ecc_size_in_bits = geo->ecc_strength * geo->gf_len;
Marek Vasut0d4e8502011-11-08 23:18:16 +0000119 uint32_t chunk_total_size_in_bits;
120 uint32_t block_mark_chunk_number;
121 uint32_t block_mark_chunk_bit_offset;
122 uint32_t block_mark_bit_offset;
123
Marek Vasut0d4e8502011-11-08 23:18:16 +0000124 chunk_total_size_in_bits =
125 chunk_data_size_in_bits + chunk_ecc_size_in_bits;
126
127 /* Compute the bit offset of the block mark within the physical page. */
128 block_mark_bit_offset = page_data_size * 8;
129
130 /* Subtract the metadata bits. */
131 block_mark_bit_offset -= MXS_NAND_METADATA_SIZE * 8;
132
133 /*
134 * Compute the chunk number (starting at zero) in which the block mark
135 * appears.
136 */
137 block_mark_chunk_number =
138 block_mark_bit_offset / chunk_total_size_in_bits;
139
140 /*
141 * Compute the bit offset of the block mark within its chunk, and
142 * validate it.
143 */
144 block_mark_chunk_bit_offset = block_mark_bit_offset -
145 (block_mark_chunk_number * chunk_total_size_in_bits);
146
147 if (block_mark_chunk_bit_offset > chunk_data_size_in_bits)
Stefan Agner28897e82018-06-22 17:19:49 +0200148 return -EINVAL;
Marek Vasut0d4e8502011-11-08 23:18:16 +0000149
150 /*
151 * Now that we know the chunk number in which the block mark appears,
152 * we can subtract all the ECC bits that appear before it.
153 */
154 block_mark_bit_offset -=
155 block_mark_chunk_number * chunk_ecc_size_in_bits;
156
Stefan Agner28897e82018-06-22 17:19:49 +0200157 geo->block_mark_byte_offset = block_mark_bit_offset >> 3;
158 geo->block_mark_bit_offset = block_mark_bit_offset & 0x7;
159
160 return 0;
Marek Vasut0d4e8502011-11-08 23:18:16 +0000161}
162
Stefan Agner984df7a2018-06-22 17:19:51 +0200163static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
164 struct mtd_info *mtd)
165{
166 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Agner502bdc62018-06-22 18:06:15 +0200167 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Stefan Agner984df7a2018-06-22 17:19:51 +0200168
169 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
170 return -ENOTSUPP;
171
172 switch (chip->ecc_step_ds) {
173 case SZ_512:
174 geo->gf_len = 13;
175 break;
176 case SZ_1K:
177 geo->gf_len = 14;
178 break;
179 default:
180 return -EINVAL;
181 }
182
183 geo->ecc_chunk_size = chip->ecc_step_ds;
184 geo->ecc_strength = round_up(chip->ecc_strength_ds, 2);
185
186 /* Keep the C >= O */
187 if (geo->ecc_chunk_size < mtd->oobsize)
188 return -EINVAL;
189
Stefan Agner502bdc62018-06-22 18:06:15 +0200190 if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
Stefan Agner984df7a2018-06-22 17:19:51 +0200191 return -EINVAL;
192
193 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
194
195 return 0;
196}
197
Stefan Agner28897e82018-06-22 17:19:49 +0200198static inline int mxs_nand_calc_ecc_layout(struct bch_geometry *geo,
199 struct mtd_info *mtd)
Marek Vasut0d4e8502011-11-08 23:18:16 +0000200{
Stefan Agner502bdc62018-06-22 18:06:15 +0200201 struct nand_chip *chip = mtd_to_nand(mtd);
202 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
203
Stefan Agner28897e82018-06-22 17:19:49 +0200204 /* The default for the length of Galois Field. */
205 geo->gf_len = 13;
206
207 /* The default for chunk size. */
208 geo->ecc_chunk_size = 512;
209
210 if (geo->ecc_chunk_size < mtd->oobsize) {
211 geo->gf_len = 14;
212 geo->ecc_chunk_size *= 2;
213 }
214
215 if (mtd->oobsize > geo->ecc_chunk_size) {
216 printf("Not support the NAND chips whose oob size is larger then %d bytes!\n",
217 geo->ecc_chunk_size);
218 return -EINVAL;
219 }
220
221 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
222
Stefan Agner28897e82018-06-22 17:19:49 +0200223 /*
224 * Determine the ECC layout with the formula:
225 * ECC bits per chunk = (total page spare data bits) /
226 * (bits per ECC level) / (chunks per page)
227 * where:
228 * total page spare data bits =
229 * (page oob size - meta data size) * (bits per byte)
230 */
231 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
232 / (geo->gf_len * geo->ecc_chunk_count);
233
Stefan Agner984df7a2018-06-22 17:19:51 +0200234 geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
Stefan Agner502bdc62018-06-22 18:06:15 +0200235 nand_info->max_ecc_strength_supported);
Stefan Agner28897e82018-06-22 17:19:49 +0200236
237 return 0;
Marek Vasut0d4e8502011-11-08 23:18:16 +0000238}
239
240/*
241 * Wait for BCH complete IRQ and clear the IRQ
242 */
Stefan Agner931747e2018-06-22 18:06:12 +0200243static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
Marek Vasut0d4e8502011-11-08 23:18:16 +0000244{
Marek Vasut0d4e8502011-11-08 23:18:16 +0000245 int timeout = MXS_NAND_BCH_TIMEOUT;
246 int ret;
247
Stefan Agner931747e2018-06-22 18:06:12 +0200248 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
Marek Vasut0d4e8502011-11-08 23:18:16 +0000249 BCH_CTRL_COMPLETE_IRQ, timeout);
250
Stefan Agner931747e2018-06-22 18:06:12 +0200251 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000252
253 return ret;
254}
255
256/*
257 * This is the function that we install in the cmd_ctrl function pointer of the
258 * owning struct nand_chip. The only functions in the reference implementation
259 * that use these functions pointers are cmdfunc and select_chip.
260 *
261 * In this driver, we implement our own select_chip, so this function will only
262 * be called by the reference implementation's cmdfunc. For this reason, we can
263 * ignore the chip enable bit and concentrate only on sending bytes to the NAND
264 * Flash.
265 */
266static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
267{
Scott Wood17cb4b82016-05-30 13:57:56 -0500268 struct nand_chip *nand = mtd_to_nand(mtd);
269 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000270 struct mxs_dma_desc *d;
271 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
272 int ret;
273
274 /*
275 * If this condition is true, something is _VERY_ wrong in MTD
276 * subsystem!
277 */
278 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
279 printf("MXS NAND: Command queue too long\n");
280 return;
281 }
282
283 /*
284 * Every operation begins with a command byte and a series of zero or
285 * more address bytes. These are distinguished by either the Address
286 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
287 * asserted. When MTD is ready to execute the command, it will
288 * deasert both latch enables.
289 *
290 * Rather than run a separate DMA operation for every single byte, we
291 * queue them up and run a single DMA operation for the entire series
292 * of command and data bytes.
293 */
294 if (ctrl & (NAND_ALE | NAND_CLE)) {
295 if (data != NAND_CMD_NONE)
296 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
297 return;
298 }
299
300 /*
301 * If control arrives here, MTD has deasserted both the ALE and CLE,
302 * which means it's ready to run an operation. Check if we have any
303 * bytes to send.
304 */
305 if (nand_info->cmd_queue_len == 0)
306 return;
307
308 /* Compile the DMA descriptor -- a descriptor that sends command. */
309 d = mxs_nand_get_dma_desc(nand_info);
310 d->cmd.data =
311 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
312 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
313 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
314 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
315
316 d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
317
318 d->cmd.pio_words[0] =
319 GPMI_CTRL0_COMMAND_MODE_WRITE |
320 GPMI_CTRL0_WORD_LENGTH |
321 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
322 GPMI_CTRL0_ADDRESS_NAND_CLE |
323 GPMI_CTRL0_ADDRESS_INCREMENT |
324 nand_info->cmd_queue_len;
325
326 mxs_dma_desc_append(channel, d);
327
Marek Vasut6b9408e2012-03-15 18:33:19 +0000328 /* Flush caches */
329 mxs_nand_flush_cmd_buf(nand_info);
330
Marek Vasut0d4e8502011-11-08 23:18:16 +0000331 /* Execute the DMA chain. */
332 ret = mxs_dma_go(channel);
333 if (ret)
334 printf("MXS NAND: Error sending command\n");
335
336 mxs_nand_return_dma_descs(nand_info);
337
338 /* Reset the command queue. */
339 nand_info->cmd_queue_len = 0;
340}
341
342/*
343 * Test if the NAND flash is ready.
344 */
345static int mxs_nand_device_ready(struct mtd_info *mtd)
346{
Scott Wood17cb4b82016-05-30 13:57:56 -0500347 struct nand_chip *chip = mtd_to_nand(mtd);
348 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000349 uint32_t tmp;
350
Stefan Agner931747e2018-06-22 18:06:12 +0200351 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000352 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
353
354 return tmp & 1;
355}
356
357/*
358 * Select the NAND chip.
359 */
360static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
361{
Scott Wood17cb4b82016-05-30 13:57:56 -0500362 struct nand_chip *nand = mtd_to_nand(mtd);
363 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000364
365 nand_info->cur_chip = chip;
366}
367
368/*
369 * Handle block mark swapping.
370 *
371 * Note that, when this function is called, it doesn't know whether it's
372 * swapping the block mark, or swapping it *back* -- but it doesn't matter
373 * because the the operation is the same.
374 */
Stefan Agner28897e82018-06-22 17:19:49 +0200375static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
376 uint8_t *data_buf, uint8_t *oob_buf)
Marek Vasut0d4e8502011-11-08 23:18:16 +0000377{
Stefan Agner28897e82018-06-22 17:19:49 +0200378 uint32_t bit_offset = geo->block_mark_bit_offset;
379 uint32_t buf_offset = geo->block_mark_byte_offset;
Marek Vasut0d4e8502011-11-08 23:18:16 +0000380
381 uint32_t src;
382 uint32_t dst;
383
Marek Vasut0d4e8502011-11-08 23:18:16 +0000384 /*
385 * Get the byte from the data area that overlays the block mark. Since
386 * the ECC engine applies its own view to the bits in the page, the
387 * physical block mark won't (in general) appear on a byte boundary in
388 * the data.
389 */
390 src = data_buf[buf_offset] >> bit_offset;
391 src |= data_buf[buf_offset + 1] << (8 - bit_offset);
392
393 dst = oob_buf[0];
394
395 oob_buf[0] = src;
396
397 data_buf[buf_offset] &= ~(0xff << bit_offset);
398 data_buf[buf_offset + 1] &= 0xff << bit_offset;
399
400 data_buf[buf_offset] |= dst << bit_offset;
401 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
402}
403
404/*
405 * Read data from NAND.
406 */
407static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
408{
Scott Wood17cb4b82016-05-30 13:57:56 -0500409 struct nand_chip *nand = mtd_to_nand(mtd);
410 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000411 struct mxs_dma_desc *d;
412 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
413 int ret;
414
415 if (length > NAND_MAX_PAGESIZE) {
416 printf("MXS NAND: DMA buffer too big\n");
417 return;
418 }
419
420 if (!buf) {
421 printf("MXS NAND: DMA buffer is NULL\n");
422 return;
423 }
424
425 /* Compile the DMA descriptor - a descriptor that reads data. */
426 d = mxs_nand_get_dma_desc(nand_info);
427 d->cmd.data =
428 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
429 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
430 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
431 (length << MXS_DMA_DESC_BYTES_OFFSET);
432
433 d->cmd.address = (dma_addr_t)nand_info->data_buf;
434
435 d->cmd.pio_words[0] =
436 GPMI_CTRL0_COMMAND_MODE_READ |
437 GPMI_CTRL0_WORD_LENGTH |
438 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
439 GPMI_CTRL0_ADDRESS_NAND_DATA |
440 length;
441
442 mxs_dma_desc_append(channel, d);
443
444 /*
445 * A DMA descriptor that waits for the command to end and the chip to
446 * become ready.
447 *
448 * I think we actually should *not* be waiting for the chip to become
449 * ready because, after all, we don't care. I think the original code
450 * did that and no one has re-thought it yet.
451 */
452 d = mxs_nand_get_dma_desc(nand_info);
453 d->cmd.data =
454 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
455 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
Luca Ellero5263a022014-12-16 15:36:14 +0100456 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000457
458 d->cmd.address = 0;
459
460 d->cmd.pio_words[0] =
461 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
462 GPMI_CTRL0_WORD_LENGTH |
463 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
464 GPMI_CTRL0_ADDRESS_NAND_DATA;
465
466 mxs_dma_desc_append(channel, d);
467
Peng Fanecfb8762015-07-21 16:15:21 +0800468 /* Invalidate caches */
469 mxs_nand_inval_data_buf(nand_info);
470
Marek Vasut0d4e8502011-11-08 23:18:16 +0000471 /* Execute the DMA chain. */
472 ret = mxs_dma_go(channel);
473 if (ret) {
474 printf("MXS NAND: DMA read error\n");
475 goto rtn;
476 }
477
Marek Vasut6b9408e2012-03-15 18:33:19 +0000478 /* Invalidate caches */
479 mxs_nand_inval_data_buf(nand_info);
480
Marek Vasut0d4e8502011-11-08 23:18:16 +0000481 memcpy(buf, nand_info->data_buf, length);
482
483rtn:
484 mxs_nand_return_dma_descs(nand_info);
485}
486
487/*
488 * Write data to NAND.
489 */
490static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
491 int length)
492{
Scott Wood17cb4b82016-05-30 13:57:56 -0500493 struct nand_chip *nand = mtd_to_nand(mtd);
494 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000495 struct mxs_dma_desc *d;
496 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
497 int ret;
498
499 if (length > NAND_MAX_PAGESIZE) {
500 printf("MXS NAND: DMA buffer too big\n");
501 return;
502 }
503
504 if (!buf) {
505 printf("MXS NAND: DMA buffer is NULL\n");
506 return;
507 }
508
509 memcpy(nand_info->data_buf, buf, length);
510
511 /* Compile the DMA descriptor - a descriptor that writes data. */
512 d = mxs_nand_get_dma_desc(nand_info);
513 d->cmd.data =
514 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
515 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
Luca Ellero88a2cbb2014-12-16 15:36:15 +0100516 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
Marek Vasut0d4e8502011-11-08 23:18:16 +0000517 (length << MXS_DMA_DESC_BYTES_OFFSET);
518
519 d->cmd.address = (dma_addr_t)nand_info->data_buf;
520
521 d->cmd.pio_words[0] =
522 GPMI_CTRL0_COMMAND_MODE_WRITE |
523 GPMI_CTRL0_WORD_LENGTH |
524 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
525 GPMI_CTRL0_ADDRESS_NAND_DATA |
526 length;
527
528 mxs_dma_desc_append(channel, d);
529
Marek Vasut6b9408e2012-03-15 18:33:19 +0000530 /* Flush caches */
531 mxs_nand_flush_data_buf(nand_info);
532
Marek Vasut0d4e8502011-11-08 23:18:16 +0000533 /* Execute the DMA chain. */
534 ret = mxs_dma_go(channel);
535 if (ret)
536 printf("MXS NAND: DMA write error\n");
537
538 mxs_nand_return_dma_descs(nand_info);
539}
540
541/*
542 * Read a single byte from NAND.
543 */
544static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
545{
546 uint8_t buf;
547 mxs_nand_read_buf(mtd, &buf, 1);
548 return buf;
549}
550
551/*
552 * Read a page from NAND.
553 */
554static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
Sergey Lapindfe64e22013-01-14 03:46:50 +0000555 uint8_t *buf, int oob_required,
556 int page)
Marek Vasut0d4e8502011-11-08 23:18:16 +0000557{
Scott Wood17cb4b82016-05-30 13:57:56 -0500558 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agner28897e82018-06-22 17:19:49 +0200559 struct bch_geometry *geo = &nand_info->bch_geometry;
Marek Vasut0d4e8502011-11-08 23:18:16 +0000560 struct mxs_dma_desc *d;
561 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
562 uint32_t corrected = 0, failed = 0;
563 uint8_t *status;
564 int i, ret;
565
566 /* Compile the DMA descriptor - wait for ready. */
567 d = mxs_nand_get_dma_desc(nand_info);
568 d->cmd.data =
569 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
570 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
571 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
572
573 d->cmd.address = 0;
574
575 d->cmd.pio_words[0] =
576 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
577 GPMI_CTRL0_WORD_LENGTH |
578 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
579 GPMI_CTRL0_ADDRESS_NAND_DATA;
580
581 mxs_dma_desc_append(channel, d);
582
583 /* Compile the DMA descriptor - enable the BCH block and read. */
584 d = mxs_nand_get_dma_desc(nand_info);
585 d->cmd.data =
586 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
587 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
588
589 d->cmd.address = 0;
590
591 d->cmd.pio_words[0] =
592 GPMI_CTRL0_COMMAND_MODE_READ |
593 GPMI_CTRL0_WORD_LENGTH |
594 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
595 GPMI_CTRL0_ADDRESS_NAND_DATA |
596 (mtd->writesize + mtd->oobsize);
597 d->cmd.pio_words[1] = 0;
598 d->cmd.pio_words[2] =
599 GPMI_ECCCTRL_ENABLE_ECC |
600 GPMI_ECCCTRL_ECC_CMD_DECODE |
601 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
602 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
603 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
604 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
605
606 mxs_dma_desc_append(channel, d);
607
608 /* Compile the DMA descriptor - disable the BCH block. */
609 d = mxs_nand_get_dma_desc(nand_info);
610 d->cmd.data =
611 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
612 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
613 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
614
615 d->cmd.address = 0;
616
617 d->cmd.pio_words[0] =
618 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
619 GPMI_CTRL0_WORD_LENGTH |
620 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
621 GPMI_CTRL0_ADDRESS_NAND_DATA |
622 (mtd->writesize + mtd->oobsize);
623 d->cmd.pio_words[1] = 0;
624 d->cmd.pio_words[2] = 0;
625
626 mxs_dma_desc_append(channel, d);
627
628 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */
629 d = mxs_nand_get_dma_desc(nand_info);
630 d->cmd.data =
631 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
632 MXS_DMA_DESC_DEC_SEM;
633
634 d->cmd.address = 0;
635
636 mxs_dma_desc_append(channel, d);
637
Peng Fanecfb8762015-07-21 16:15:21 +0800638 /* Invalidate caches */
639 mxs_nand_inval_data_buf(nand_info);
640
Marek Vasut0d4e8502011-11-08 23:18:16 +0000641 /* Execute the DMA chain. */
642 ret = mxs_dma_go(channel);
643 if (ret) {
644 printf("MXS NAND: DMA read error\n");
645 goto rtn;
646 }
647
Stefan Agner931747e2018-06-22 18:06:12 +0200648 ret = mxs_nand_wait_for_bch_complete(nand_info);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000649 if (ret) {
650 printf("MXS NAND: BCH read timeout\n");
651 goto rtn;
652 }
653
Marek Vasut6b9408e2012-03-15 18:33:19 +0000654 /* Invalidate caches */
655 mxs_nand_inval_data_buf(nand_info);
656
Marek Vasut0d4e8502011-11-08 23:18:16 +0000657 /* Read DMA completed, now do the mark swapping. */
Stefan Agner28897e82018-06-22 17:19:49 +0200658 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000659
660 /* Loop over status bytes, accumulating ECC status. */
661 status = nand_info->oob_buf + mxs_nand_aux_status_offset();
Stefan Agner28897e82018-06-22 17:19:49 +0200662 for (i = 0; i < geo->ecc_chunk_count; i++) {
Marek Vasut0d4e8502011-11-08 23:18:16 +0000663 if (status[i] == 0x00)
664 continue;
665
666 if (status[i] == 0xff)
667 continue;
668
669 if (status[i] == 0xfe) {
670 failed++;
671 continue;
672 }
673
674 corrected += status[i];
675 }
676
677 /* Propagate ECC status to the owning MTD. */
678 mtd->ecc_stats.failed += failed;
679 mtd->ecc_stats.corrected += corrected;
680
681 /*
682 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for
683 * details about our policy for delivering the OOB.
684 *
685 * We fill the caller's buffer with set bits, and then copy the block
686 * mark to the caller's buffer. Note that, if block mark swapping was
687 * necessary, it has already been done, so we can rely on the first
688 * byte of the auxiliary buffer to contain the block mark.
689 */
690 memset(nand->oob_poi, 0xff, mtd->oobsize);
691
692 nand->oob_poi[0] = nand_info->oob_buf[0];
693
694 memcpy(buf, nand_info->data_buf, mtd->writesize);
695
696rtn:
697 mxs_nand_return_dma_descs(nand_info);
698
699 return ret;
700}
701
702/*
703 * Write a page to NAND.
704 */
Sergey Lapindfe64e22013-01-14 03:46:50 +0000705static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
706 struct nand_chip *nand, const uint8_t *buf,
Scott Wood81c77252016-05-30 13:57:57 -0500707 int oob_required, int page)
Marek Vasut0d4e8502011-11-08 23:18:16 +0000708{
Scott Wood17cb4b82016-05-30 13:57:56 -0500709 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agner28897e82018-06-22 17:19:49 +0200710 struct bch_geometry *geo = &nand_info->bch_geometry;
Marek Vasut0d4e8502011-11-08 23:18:16 +0000711 struct mxs_dma_desc *d;
712 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
713 int ret;
714
715 memcpy(nand_info->data_buf, buf, mtd->writesize);
716 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
717
718 /* Handle block mark swapping. */
Stefan Agner28897e82018-06-22 17:19:49 +0200719 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000720
721 /* Compile the DMA descriptor - write data. */
722 d = mxs_nand_get_dma_desc(nand_info);
723 d->cmd.data =
724 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
725 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
726 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
727
728 d->cmd.address = 0;
729
730 d->cmd.pio_words[0] =
731 GPMI_CTRL0_COMMAND_MODE_WRITE |
732 GPMI_CTRL0_WORD_LENGTH |
733 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
734 GPMI_CTRL0_ADDRESS_NAND_DATA;
735 d->cmd.pio_words[1] = 0;
736 d->cmd.pio_words[2] =
737 GPMI_ECCCTRL_ENABLE_ECC |
738 GPMI_ECCCTRL_ECC_CMD_ENCODE |
739 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
740 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
741 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
742 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
743
744 mxs_dma_desc_append(channel, d);
745
Marek Vasut6b9408e2012-03-15 18:33:19 +0000746 /* Flush caches */
747 mxs_nand_flush_data_buf(nand_info);
748
Marek Vasut0d4e8502011-11-08 23:18:16 +0000749 /* Execute the DMA chain. */
750 ret = mxs_dma_go(channel);
751 if (ret) {
752 printf("MXS NAND: DMA write error\n");
753 goto rtn;
754 }
755
Stefan Agner931747e2018-06-22 18:06:12 +0200756 ret = mxs_nand_wait_for_bch_complete(nand_info);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000757 if (ret) {
758 printf("MXS NAND: BCH write timeout\n");
759 goto rtn;
760 }
761
762rtn:
763 mxs_nand_return_dma_descs(nand_info);
Sergey Lapindfe64e22013-01-14 03:46:50 +0000764 return 0;
Marek Vasut0d4e8502011-11-08 23:18:16 +0000765}
766
767/*
768 * Read OOB from NAND.
769 *
770 * This function is a veneer that replaces the function originally installed by
771 * the NAND Flash MTD code.
772 */
773static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
774 struct mtd_oob_ops *ops)
775{
Scott Wood17cb4b82016-05-30 13:57:56 -0500776 struct nand_chip *chip = mtd_to_nand(mtd);
777 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000778 int ret;
779
Sergey Lapindfe64e22013-01-14 03:46:50 +0000780 if (ops->mode == MTD_OPS_RAW)
Marek Vasut0d4e8502011-11-08 23:18:16 +0000781 nand_info->raw_oob_mode = 1;
782 else
783 nand_info->raw_oob_mode = 0;
784
785 ret = nand_info->hooked_read_oob(mtd, from, ops);
786
787 nand_info->raw_oob_mode = 0;
788
789 return ret;
790}
791
792/*
793 * Write OOB to NAND.
794 *
795 * This function is a veneer that replaces the function originally installed by
796 * the NAND Flash MTD code.
797 */
798static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
799 struct mtd_oob_ops *ops)
800{
Scott Wood17cb4b82016-05-30 13:57:56 -0500801 struct nand_chip *chip = mtd_to_nand(mtd);
802 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000803 int ret;
804
Sergey Lapindfe64e22013-01-14 03:46:50 +0000805 if (ops->mode == MTD_OPS_RAW)
Marek Vasut0d4e8502011-11-08 23:18:16 +0000806 nand_info->raw_oob_mode = 1;
807 else
808 nand_info->raw_oob_mode = 0;
809
810 ret = nand_info->hooked_write_oob(mtd, to, ops);
811
812 nand_info->raw_oob_mode = 0;
813
814 return ret;
815}
816
817/*
818 * Mark a block bad in NAND.
819 *
820 * This function is a veneer that replaces the function originally installed by
821 * the NAND Flash MTD code.
822 */
823static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
824{
Scott Wood17cb4b82016-05-30 13:57:56 -0500825 struct nand_chip *chip = mtd_to_nand(mtd);
826 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000827 int ret;
828
829 nand_info->marking_block_bad = 1;
830
831 ret = nand_info->hooked_block_markbad(mtd, ofs);
832
833 nand_info->marking_block_bad = 0;
834
835 return ret;
836}
837
838/*
839 * There are several places in this driver where we have to handle the OOB and
840 * block marks. This is the function where things are the most complicated, so
841 * this is where we try to explain it all. All the other places refer back to
842 * here.
843 *
844 * These are the rules, in order of decreasing importance:
845 *
846 * 1) Nothing the caller does can be allowed to imperil the block mark, so all
847 * write operations take measures to protect it.
848 *
849 * 2) In read operations, the first byte of the OOB we return must reflect the
850 * true state of the block mark, no matter where that block mark appears in
851 * the physical page.
852 *
853 * 3) ECC-based read operations return an OOB full of set bits (since we never
854 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
855 * return).
856 *
857 * 4) "Raw" read operations return a direct view of the physical bytes in the
858 * page, using the conventional definition of which bytes are data and which
859 * are OOB. This gives the caller a way to see the actual, physical bytes
860 * in the page, without the distortions applied by our ECC engine.
861 *
862 * What we do for this specific read operation depends on whether we're doing
863 * "raw" read, or an ECC-based read.
864 *
865 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
866 * easy. When reading a page, for example, the NAND Flash MTD code calls our
867 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
868 * ECC-based or raw view of the page is implicit in which function it calls
869 * (there is a similar pair of ECC-based/raw functions for writing).
870 *
871 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
872 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
873 * caller wants an ECC-based or raw view of the page is not propagated down to
874 * this driver.
875 *
876 * Since our OOB *is* covered by ECC, we need this information. So, we hook the
877 * ecc.read_oob and ecc.write_oob function pointers in the owning
878 * struct mtd_info with our own functions. These hook functions set the
879 * raw_oob_mode field so that, when control finally arrives here, we'll know
880 * what to do.
881 */
882static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
Sergey Lapindfe64e22013-01-14 03:46:50 +0000883 int page)
Marek Vasut0d4e8502011-11-08 23:18:16 +0000884{
Scott Wood17cb4b82016-05-30 13:57:56 -0500885 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000886
887 /*
888 * First, fill in the OOB buffer. If we're doing a raw read, we need to
889 * get the bytes from the physical page. If we're not doing a raw read,
890 * we need to fill the buffer with set bits.
891 */
892 if (nand_info->raw_oob_mode) {
893 /*
894 * If control arrives here, we're doing a "raw" read. Send the
895 * command to read the conventional OOB and read it.
896 */
897 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
898 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
899 } else {
900 /*
901 * If control arrives here, we're not doing a "raw" read. Fill
902 * the OOB buffer with set bits and correct the block mark.
903 */
904 memset(nand->oob_poi, 0xff, mtd->oobsize);
905
906 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
907 mxs_nand_read_buf(mtd, nand->oob_poi, 1);
908 }
909
910 return 0;
911
912}
913
914/*
915 * Write OOB data to NAND.
916 */
917static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
918 int page)
919{
Scott Wood17cb4b82016-05-30 13:57:56 -0500920 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000921 uint8_t block_mark = 0;
922
923 /*
924 * There are fundamental incompatibilities between the i.MX GPMI NFC and
925 * the NAND Flash MTD model that make it essentially impossible to write
926 * the out-of-band bytes.
927 *
928 * We permit *ONE* exception. If the *intent* of writing the OOB is to
929 * mark a block bad, we can do that.
930 */
931
932 if (!nand_info->marking_block_bad) {
933 printf("NXS NAND: Writing OOB isn't supported\n");
934 return -EIO;
935 }
936
937 /* Write the block mark. */
938 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
939 nand->write_buf(mtd, &block_mark, 1);
940 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
941
942 /* Check if it worked. */
943 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
944 return -EIO;
945
946 return 0;
947}
948
949/*
950 * Claims all blocks are good.
951 *
952 * In principle, this function is *only* called when the NAND Flash MTD system
953 * isn't allowed to keep an in-memory bad block table, so it is forced to ask
954 * the driver for bad block information.
955 *
956 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so
957 * this function is *only* called when we take it away.
958 *
959 * Thus, this function is only called when we want *all* blocks to look good,
960 * so it *always* return success.
961 */
Scott Woodceee07b2016-05-30 13:57:58 -0500962static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
Marek Vasut0d4e8502011-11-08 23:18:16 +0000963{
964 return 0;
965}
966
967/*
Marek Vasut0d4e8502011-11-08 23:18:16 +0000968 * At this point, the physical NAND Flash chips have been identified and
969 * counted, so we know the physical geometry. This enables us to make some
970 * important configuration decisions.
971 *
Robert P. J. Day62a3b7d2016-07-15 13:44:45 -0400972 * The return value of this function propagates directly back to this driver's
Stefan Agner5346c312018-06-22 17:19:47 +0200973 * board_nand_init(). Anything other than zero will cause this driver to
Marek Vasut0d4e8502011-11-08 23:18:16 +0000974 * tear everything down and declare failure.
975 */
Stefan Agner5346c312018-06-22 17:19:47 +0200976int mxs_nand_setup_ecc(struct mtd_info *mtd)
Marek Vasut0d4e8502011-11-08 23:18:16 +0000977{
Scott Wood17cb4b82016-05-30 13:57:56 -0500978 struct nand_chip *nand = mtd_to_nand(mtd);
979 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agner28897e82018-06-22 17:19:49 +0200980 struct bch_geometry *geo = &nand_info->bch_geometry;
Stefan Agner931747e2018-06-22 18:06:12 +0200981 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
Marek Vasut0d4e8502011-11-08 23:18:16 +0000982 uint32_t tmp;
Stefan Agner984df7a2018-06-22 17:19:51 +0200983 int ret = -ENOTSUPP;
Marek Vasut0d4e8502011-11-08 23:18:16 +0000984
Stefan Agner502bdc62018-06-22 18:06:15 +0200985 if (nand_info->use_minimum_ecc)
986 ret = mxs_nand_calc_ecc_layout_by_info(geo, mtd);
Stefan Agner984df7a2018-06-22 17:19:51 +0200987
988 if (ret == -ENOTSUPP)
989 ret = mxs_nand_calc_ecc_layout(geo, mtd);
990
991 if (ret)
992 return ret;
993
994 mxs_nand_calc_mark_offset(geo, mtd->writesize);
Peng Fan63b29d82015-07-21 16:15:19 +0800995
Marek Vasut0d4e8502011-11-08 23:18:16 +0000996 /* Configure BCH and set NFC geometry */
Otavio Salvadorfa7a51c2012-08-13 09:53:12 +0000997 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
Marek Vasut0d4e8502011-11-08 23:18:16 +0000998
999 /* Configure layout 0 */
Stefan Agner28897e82018-06-22 17:19:49 +02001000 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001001 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
Stefan Agner28897e82018-06-22 17:19:49 +02001002 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1003 tmp |= geo->ecc_chunk_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1004 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
Peng Fan63b29d82015-07-21 16:15:19 +08001005 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001006 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1007
1008 tmp = (mtd->writesize + mtd->oobsize)
1009 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
Stefan Agner28897e82018-06-22 17:19:49 +02001010 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1011 tmp |= geo->ecc_chunk_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1012 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
Peng Fan63b29d82015-07-21 16:15:19 +08001013 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001014 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1015
1016 /* Set *all* chip selects to use layout 0 */
1017 writel(0, &bch_regs->hw_bch_layoutselect);
1018
1019 /* Enable BCH complete interrupt */
1020 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1021
1022 /* Hook some operations at the MTD level. */
Sergey Lapindfe64e22013-01-14 03:46:50 +00001023 if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1024 nand_info->hooked_read_oob = mtd->_read_oob;
1025 mtd->_read_oob = mxs_nand_hook_read_oob;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001026 }
1027
Sergey Lapindfe64e22013-01-14 03:46:50 +00001028 if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1029 nand_info->hooked_write_oob = mtd->_write_oob;
1030 mtd->_write_oob = mxs_nand_hook_write_oob;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001031 }
1032
Sergey Lapindfe64e22013-01-14 03:46:50 +00001033 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1034 nand_info->hooked_block_markbad = mtd->_block_markbad;
1035 mtd->_block_markbad = mxs_nand_hook_block_markbad;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001036 }
1037
Stefan Agner5346c312018-06-22 17:19:47 +02001038 return 0;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001039}
1040
1041/*
1042 * Allocate DMA buffers
1043 */
1044int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1045{
1046 uint8_t *buf;
1047 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1048
Marek Vasut6b9408e2012-03-15 18:33:19 +00001049 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1050
Marek Vasut0d4e8502011-11-08 23:18:16 +00001051 /* DMA buffers */
Marek Vasut6b9408e2012-03-15 18:33:19 +00001052 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
Marek Vasut0d4e8502011-11-08 23:18:16 +00001053 if (!buf) {
1054 printf("MXS NAND: Error allocating DMA buffers\n");
1055 return -ENOMEM;
1056 }
1057
Marek Vasut6b9408e2012-03-15 18:33:19 +00001058 memset(buf, 0, nand_info->data_buf_size);
Marek Vasut0d4e8502011-11-08 23:18:16 +00001059
1060 nand_info->data_buf = buf;
1061 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001062 /* Command buffers */
1063 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1064 MXS_NAND_COMMAND_BUFFER_SIZE);
1065 if (!nand_info->cmd_buf) {
1066 free(buf);
1067 printf("MXS NAND: Error allocating command buffers\n");
1068 return -ENOMEM;
1069 }
1070 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1071 nand_info->cmd_queue_len = 0;
1072
1073 return 0;
1074}
1075
1076/*
1077 * Initializes the NFC hardware.
1078 */
Stefan Agner0d4e9d82018-06-22 18:06:13 +02001079int mxs_nand_init_dma(struct mxs_nand_info *info)
Marek Vasut0d4e8502011-11-08 23:18:16 +00001080{
Peng Fan549d7c02016-01-27 10:38:02 +08001081 int i = 0, j, ret = 0;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001082
1083 info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1084 MXS_NAND_DMA_DESCRIPTOR_COUNT);
Peng Fan549d7c02016-01-27 10:38:02 +08001085 if (!info->desc) {
1086 ret = -ENOMEM;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001087 goto err1;
Peng Fan549d7c02016-01-27 10:38:02 +08001088 }
Marek Vasut0d4e8502011-11-08 23:18:16 +00001089
1090 /* Allocate the DMA descriptors. */
1091 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1092 info->desc[i] = mxs_dma_desc_alloc();
Peng Fan549d7c02016-01-27 10:38:02 +08001093 if (!info->desc[i]) {
1094 ret = -ENOMEM;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001095 goto err2;
Peng Fan549d7c02016-01-27 10:38:02 +08001096 }
Marek Vasut0d4e8502011-11-08 23:18:16 +00001097 }
1098
1099 /* Init the DMA controller. */
Fabio Estevama1d1fdc2017-06-29 09:33:44 -03001100 mxs_dma_init();
Marek Vasut96666a32012-04-08 17:34:46 +00001101 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1102 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
Peng Fan549d7c02016-01-27 10:38:02 +08001103 ret = mxs_dma_init_channel(j);
1104 if (ret)
Marek Vasut96666a32012-04-08 17:34:46 +00001105 goto err3;
1106 }
Marek Vasut0d4e8502011-11-08 23:18:16 +00001107
1108 /* Reset the GPMI block. */
Stefan Agner931747e2018-06-22 18:06:12 +02001109 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
1110 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
Marek Vasut0d4e8502011-11-08 23:18:16 +00001111
1112 /*
1113 * Choose NAND mode, set IRQ polarity, disable write protection and
1114 * select BCH ECC.
1115 */
Stefan Agner931747e2018-06-22 18:06:12 +02001116 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
Marek Vasut0d4e8502011-11-08 23:18:16 +00001117 GPMI_CTRL1_GPMI_MODE,
1118 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1119 GPMI_CTRL1_BCH_MODE);
1120
1121 return 0;
1122
Marek Vasut96666a32012-04-08 17:34:46 +00001123err3:
Peng Fan549d7c02016-01-27 10:38:02 +08001124 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
Marek Vasut96666a32012-04-08 17:34:46 +00001125 mxs_dma_release(j);
Marek Vasut0d4e8502011-11-08 23:18:16 +00001126err2:
Marek Vasut0d4e8502011-11-08 23:18:16 +00001127 for (--i; i >= 0; i--)
1128 mxs_dma_desc_free(info->desc[i]);
Peng Fan549d7c02016-01-27 10:38:02 +08001129 free(info->desc);
1130err1:
1131 if (ret == -ENOMEM)
1132 printf("MXS NAND: Unable to allocate DMA descriptors\n");
1133 return ret;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001134}
1135
Stefan Agner93459432018-06-22 17:19:46 +02001136int mxs_nand_init_spl(struct nand_chip *nand)
1137{
1138 struct mxs_nand_info *nand_info;
1139 int err;
1140
1141 nand_info = malloc(sizeof(struct mxs_nand_info));
1142 if (!nand_info) {
1143 printf("MXS NAND: Failed to allocate private data\n");
1144 return -ENOMEM;
1145 }
1146 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1147
Stefan Agner931747e2018-06-22 18:06:12 +02001148 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1149 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
Stefan Agner93459432018-06-22 17:19:46 +02001150 err = mxs_nand_alloc_buffers(nand_info);
1151 if (err)
1152 return err;
1153
Stefan Agner0d4e9d82018-06-22 18:06:13 +02001154 err = mxs_nand_init_dma(nand_info);
Stefan Agner93459432018-06-22 17:19:46 +02001155 if (err)
1156 return err;
1157
1158 nand_set_controller_data(nand, nand_info);
1159
1160 nand->options |= NAND_NO_SUBPAGE_WRITE;
1161
1162 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1163 nand->dev_ready = mxs_nand_device_ready;
1164 nand->select_chip = mxs_nand_select_chip;
Stefan Agner93459432018-06-22 17:19:46 +02001165
1166 nand->read_byte = mxs_nand_read_byte;
1167 nand->read_buf = mxs_nand_read_buf;
1168
1169 nand->ecc.read_page = mxs_nand_ecc_read_page;
1170
1171 nand->ecc.mode = NAND_ECC_HW;
1172 nand->ecc.bytes = 9;
1173 nand->ecc.size = 512;
1174 nand->ecc.strength = 8;
1175
1176 return 0;
1177}
1178
Stefan Agner68748342018-06-22 18:06:16 +02001179int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
Marek Vasut0d4e8502011-11-08 23:18:16 +00001180{
Stefan Agner5346c312018-06-22 17:19:47 +02001181 struct mtd_info *mtd;
Stefan Agner5346c312018-06-22 17:19:47 +02001182 struct nand_chip *nand;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001183 int err;
1184
Stefan Agner5346c312018-06-22 17:19:47 +02001185 nand = &nand_info->chip;
1186 mtd = nand_to_mtd(nand);
Marek Vasut0d4e8502011-11-08 23:18:16 +00001187 err = mxs_nand_alloc_buffers(nand_info);
1188 if (err)
Stefan Agner3b1328a2018-06-22 18:06:14 +02001189 return err;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001190
Stefan Agner0d4e9d82018-06-22 18:06:13 +02001191 err = mxs_nand_init_dma(nand_info);
Marek Vasut0d4e8502011-11-08 23:18:16 +00001192 if (err)
Stefan Agner3b1328a2018-06-22 18:06:14 +02001193 goto err_free_buffers;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001194
1195 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1196
Stefan Agnerdc0b69f2018-06-22 17:19:48 +02001197#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1198 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1199#endif
1200
Scott Wood17cb4b82016-05-30 13:57:56 -05001201 nand_set_controller_data(nand, nand_info);
Marek Vasut0d4e8502011-11-08 23:18:16 +00001202 nand->options |= NAND_NO_SUBPAGE_WRITE;
1203
1204 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1205
1206 nand->dev_ready = mxs_nand_device_ready;
1207 nand->select_chip = mxs_nand_select_chip;
1208 nand->block_bad = mxs_nand_block_bad;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001209
1210 nand->read_byte = mxs_nand_read_byte;
1211
1212 nand->read_buf = mxs_nand_read_buf;
1213 nand->write_buf = mxs_nand_write_buf;
1214
Stefan Agner5346c312018-06-22 17:19:47 +02001215 /* first scan to find the device and get the page size */
1216 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
Stefan Agner3b1328a2018-06-22 18:06:14 +02001217 goto err_free_buffers;
Stefan Agner5346c312018-06-22 17:19:47 +02001218
1219 if (mxs_nand_setup_ecc(mtd))
Stefan Agner3b1328a2018-06-22 18:06:14 +02001220 goto err_free_buffers;
Stefan Agner5346c312018-06-22 17:19:47 +02001221
Marek Vasut0d4e8502011-11-08 23:18:16 +00001222 nand->ecc.read_page = mxs_nand_ecc_read_page;
1223 nand->ecc.write_page = mxs_nand_ecc_write_page;
1224 nand->ecc.read_oob = mxs_nand_ecc_read_oob;
1225 nand->ecc.write_oob = mxs_nand_ecc_write_oob;
1226
1227 nand->ecc.layout = &fake_ecc_layout;
1228 nand->ecc.mode = NAND_ECC_HW;
Stefan Agner5c69dd02018-06-22 17:19:50 +02001229 nand->ecc.size = nand_info->bch_geometry.ecc_chunk_size;
1230 nand->ecc.strength = nand_info->bch_geometry.ecc_strength;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001231
Stefan Agner5346c312018-06-22 17:19:47 +02001232 /* second phase scan */
1233 err = nand_scan_tail(mtd);
1234 if (err)
Stefan Agner3b1328a2018-06-22 18:06:14 +02001235 goto err_free_buffers;
Stefan Agner5346c312018-06-22 17:19:47 +02001236
1237 err = nand_register(0, mtd);
1238 if (err)
Stefan Agner3b1328a2018-06-22 18:06:14 +02001239 goto err_free_buffers;
Stefan Agner5346c312018-06-22 17:19:47 +02001240
Stefan Agner3b1328a2018-06-22 18:06:14 +02001241 return 0;
Marek Vasut0d4e8502011-11-08 23:18:16 +00001242
Stefan Agner3b1328a2018-06-22 18:06:14 +02001243err_free_buffers:
Marek Vasut0d4e8502011-11-08 23:18:16 +00001244 free(nand_info->data_buf);
1245 free(nand_info->cmd_buf);
Stefan Agner3b1328a2018-06-22 18:06:14 +02001246
1247 return err;
1248}
1249
1250void board_nand_init(void)
1251{
1252 struct mxs_nand_info *nand_info;
1253
1254 nand_info = malloc(sizeof(struct mxs_nand_info));
1255 if (!nand_info) {
1256 printf("MXS NAND: Failed to allocate private data\n");
1257 return;
1258 }
1259 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1260
1261 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1262 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1263
Stefan Agner502bdc62018-06-22 18:06:15 +02001264 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */
1265 if (is_mx6sx() || is_mx7())
1266 nand_info->max_ecc_strength_supported = 62;
1267 else
1268 nand_info->max_ecc_strength_supported = 40;
1269
1270#ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
1271 nand_info->use_minimum_ecc = true;
1272#endif
1273
Stefan Agner68748342018-06-22 18:06:16 +02001274 if (mxs_nand_init_ctrl(nand_info) < 0)
Stefan Agner3b1328a2018-06-22 18:06:14 +02001275 goto err;
1276
Stefan Agner5346c312018-06-22 17:19:47 +02001277 return;
Stefan Agner3b1328a2018-06-22 18:06:14 +02001278
1279err:
1280 free(nand_info);
Marek Vasut0d4e8502011-11-08 23:18:16 +00001281}