blob: 39daeabd9f1e578741fc2a2403ae7d353c8fef05 [file] [log] [blame]
Stefan Roese0a572652009-05-12 14:29:39 +02001/*
2 * MTD device concatenation layer
3 *
Heiko Schocherff94bc42014-06-24 10:10:04 +02004 * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
5 * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
Stefan Roese0a572652009-05-12 14:29:39 +02006 *
7 * NAND support by Christian Gan <cgan@iders.ca>
8 *
Heiko Schocherff94bc42014-06-24 10:10:04 +02009 * SPDX-License-Identifier: GPL-2.0+
10 *
Stefan Roese0a572652009-05-12 14:29:39 +020011 */
12
Heiko Schocherff94bc42014-06-24 10:10:04 +020013#define __UBOOT__
14#ifndef __UBOOT__
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/types.h>
20#include <linux/backing-dev.h>
21#include <asm/div64.h>
22#else
23#include <div64.h>
Mike Frysinger7b15e2b2012-04-09 13:39:55 +000024#include <linux/compat.h>
Heiko Schocherff94bc42014-06-24 10:10:04 +020025#endif
26
27#include <linux/mtd/mtd.h>
Stefan Roese0a572652009-05-12 14:29:39 +020028#include <linux/mtd/concat.h>
Heiko Schocherff94bc42014-06-24 10:10:04 +020029
Stefan Roese0a572652009-05-12 14:29:39 +020030#include <ubi_uboot.h>
31
32/*
33 * Our storage structure:
34 * Subdev points to an array of pointers to struct mtd_info objects
35 * which is allocated along with this structure
36 *
37 */
38struct mtd_concat {
39 struct mtd_info mtd;
40 int num_subdev;
41 struct mtd_info **subdev;
42};
43
44/*
45 * how to calculate the size required for the above structure,
46 * including the pointer array subdev points to:
47 */
48#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
49 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
50
51/*
52 * Given a pointer to the MTD object in the mtd_concat structure,
53 * we can retrieve the pointer to that structure with this macro.
54 */
55#define CONCAT(x) ((struct mtd_concat *)(x))
56
57/*
58 * MTD methods which look up the relevant subdevice, translate the
59 * effective address and pass through to the subdevice.
60 */
61
62static int
63concat_read(struct mtd_info *mtd, loff_t from, size_t len,
64 size_t * retlen, u_char * buf)
65{
66 struct mtd_concat *concat = CONCAT(mtd);
67 int ret = 0, err;
68 int i;
69
Heiko Schocherff94bc42014-06-24 10:10:04 +020070#ifdef __UBOOT__
Stefan Roese0a572652009-05-12 14:29:39 +020071 *retlen = 0;
Heiko Schocherff94bc42014-06-24 10:10:04 +020072#endif
Stefan Roese0a572652009-05-12 14:29:39 +020073
74 for (i = 0; i < concat->num_subdev; i++) {
75 struct mtd_info *subdev = concat->subdev[i];
76 size_t size, retsize;
77
78 if (from >= subdev->size) {
79 /* Not destined for this subdev */
80 size = 0;
81 from -= subdev->size;
82 continue;
83 }
84 if (from + len > subdev->size)
85 /* First part goes into this subdev */
86 size = subdev->size - from;
87 else
88 /* Entire transaction goes into this subdev */
89 size = len;
90
Sergey Lapindfe64e22013-01-14 03:46:50 +000091 err = mtd_read(subdev, from, size, &retsize, buf);
Stefan Roese0a572652009-05-12 14:29:39 +020092
93 /* Save information about bitflips! */
94 if (unlikely(err)) {
Sergey Lapindfe64e22013-01-14 03:46:50 +000095 if (mtd_is_eccerr(err)) {
Stefan Roese0a572652009-05-12 14:29:39 +020096 mtd->ecc_stats.failed++;
97 ret = err;
Sergey Lapindfe64e22013-01-14 03:46:50 +000098 } else if (mtd_is_bitflip(err)) {
Stefan Roese0a572652009-05-12 14:29:39 +020099 mtd->ecc_stats.corrected++;
100 /* Do not overwrite -EBADMSG !! */
101 if (!ret)
102 ret = err;
103 } else
104 return err;
105 }
106
107 *retlen += retsize;
108 len -= size;
109 if (len == 0)
110 return ret;
111
112 buf += size;
113 from = 0;
114 }
115 return -EINVAL;
116}
117
118static int
119concat_write(struct mtd_info *mtd, loff_t to, size_t len,
120 size_t * retlen, const u_char * buf)
121{
122 struct mtd_concat *concat = CONCAT(mtd);
123 int err = -EINVAL;
124 int i;
125
Heiko Schocherff94bc42014-06-24 10:10:04 +0200126#ifdef __UBOOT__
Stefan Roese0a572652009-05-12 14:29:39 +0200127 *retlen = 0;
Heiko Schocherff94bc42014-06-24 10:10:04 +0200128#endif
Stefan Roese0a572652009-05-12 14:29:39 +0200129
130 for (i = 0; i < concat->num_subdev; i++) {
131 struct mtd_info *subdev = concat->subdev[i];
132 size_t size, retsize;
133
134 if (to >= subdev->size) {
135 size = 0;
136 to -= subdev->size;
137 continue;
138 }
139 if (to + len > subdev->size)
140 size = subdev->size - to;
141 else
142 size = len;
143
Sergey Lapindfe64e22013-01-14 03:46:50 +0000144 err = mtd_write(subdev, to, size, &retsize, buf);
Stefan Roese0a572652009-05-12 14:29:39 +0200145 if (err)
146 break;
147
148 *retlen += retsize;
149 len -= size;
150 if (len == 0)
151 break;
152
153 err = -EINVAL;
154 buf += size;
155 to = 0;
156 }
157 return err;
158}
159
Heiko Schocherff94bc42014-06-24 10:10:04 +0200160#ifndef __UBOOT__
161static int
162concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
163 unsigned long count, loff_t to, size_t * retlen)
164{
165 struct mtd_concat *concat = CONCAT(mtd);
166 struct kvec *vecs_copy;
167 unsigned long entry_low, entry_high;
168 size_t total_len = 0;
169 int i;
170 int err = -EINVAL;
171
172 /* Calculate total length of data */
173 for (i = 0; i < count; i++)
174 total_len += vecs[i].iov_len;
175
176 /* Check alignment */
177 if (mtd->writesize > 1) {
178 uint64_t __to = to;
179 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
180 return -EINVAL;
181 }
182
183 /* make a copy of vecs */
184 vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
185 if (!vecs_copy)
186 return -ENOMEM;
187
188 entry_low = 0;
189 for (i = 0; i < concat->num_subdev; i++) {
190 struct mtd_info *subdev = concat->subdev[i];
191 size_t size, wsize, retsize, old_iov_len;
192
193 if (to >= subdev->size) {
194 to -= subdev->size;
195 continue;
196 }
197
198 size = min_t(uint64_t, total_len, subdev->size - to);
199 wsize = size; /* store for future use */
200
201 entry_high = entry_low;
202 while (entry_high < count) {
203 if (size <= vecs_copy[entry_high].iov_len)
204 break;
205 size -= vecs_copy[entry_high++].iov_len;
206 }
207
208 old_iov_len = vecs_copy[entry_high].iov_len;
209 vecs_copy[entry_high].iov_len = size;
210
211 err = mtd_writev(subdev, &vecs_copy[entry_low],
212 entry_high - entry_low + 1, to, &retsize);
213
214 vecs_copy[entry_high].iov_len = old_iov_len - size;
215 vecs_copy[entry_high].iov_base += size;
216
217 entry_low = entry_high;
218
219 if (err)
220 break;
221
222 *retlen += retsize;
223 total_len -= wsize;
224
225 if (total_len == 0)
226 break;
227
228 err = -EINVAL;
229 to = 0;
230 }
231
232 kfree(vecs_copy);
233 return err;
234}
235#endif
236
Stefan Roese0a572652009-05-12 14:29:39 +0200237static int
238concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
239{
240 struct mtd_concat *concat = CONCAT(mtd);
241 struct mtd_oob_ops devops = *ops;
242 int i, err, ret = 0;
243
244 ops->retlen = ops->oobretlen = 0;
245
246 for (i = 0; i < concat->num_subdev; i++) {
247 struct mtd_info *subdev = concat->subdev[i];
248
249 if (from >= subdev->size) {
250 from -= subdev->size;
251 continue;
252 }
253
254 /* partial read ? */
255 if (from + devops.len > subdev->size)
256 devops.len = subdev->size - from;
257
Sergey Lapindfe64e22013-01-14 03:46:50 +0000258 err = mtd_read_oob(subdev, from, &devops);
Stefan Roese0a572652009-05-12 14:29:39 +0200259 ops->retlen += devops.retlen;
260 ops->oobretlen += devops.oobretlen;
261
262 /* Save information about bitflips! */
263 if (unlikely(err)) {
Sergey Lapindfe64e22013-01-14 03:46:50 +0000264 if (mtd_is_eccerr(err)) {
Stefan Roese0a572652009-05-12 14:29:39 +0200265 mtd->ecc_stats.failed++;
266 ret = err;
Sergey Lapindfe64e22013-01-14 03:46:50 +0000267 } else if (mtd_is_bitflip(err)) {
Stefan Roese0a572652009-05-12 14:29:39 +0200268 mtd->ecc_stats.corrected++;
269 /* Do not overwrite -EBADMSG !! */
270 if (!ret)
271 ret = err;
272 } else
273 return err;
274 }
275
276 if (devops.datbuf) {
277 devops.len = ops->len - ops->retlen;
278 if (!devops.len)
279 return ret;
280 devops.datbuf += devops.retlen;
281 }
282 if (devops.oobbuf) {
283 devops.ooblen = ops->ooblen - ops->oobretlen;
284 if (!devops.ooblen)
285 return ret;
286 devops.oobbuf += ops->oobretlen;
287 }
288
289 from = 0;
290 }
291 return -EINVAL;
292}
293
294static int
295concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
296{
297 struct mtd_concat *concat = CONCAT(mtd);
298 struct mtd_oob_ops devops = *ops;
299 int i, err;
300
301 if (!(mtd->flags & MTD_WRITEABLE))
302 return -EROFS;
303
Heiko Schocherff94bc42014-06-24 10:10:04 +0200304 ops->retlen = ops->oobretlen = 0;
Stefan Roese0a572652009-05-12 14:29:39 +0200305
306 for (i = 0; i < concat->num_subdev; i++) {
307 struct mtd_info *subdev = concat->subdev[i];
308
309 if (to >= subdev->size) {
310 to -= subdev->size;
311 continue;
312 }
313
314 /* partial write ? */
315 if (to + devops.len > subdev->size)
316 devops.len = subdev->size - to;
317
Sergey Lapindfe64e22013-01-14 03:46:50 +0000318 err = mtd_write_oob(subdev, to, &devops);
Heiko Schocherff94bc42014-06-24 10:10:04 +0200319 ops->retlen += devops.oobretlen;
Stefan Roese0a572652009-05-12 14:29:39 +0200320 if (err)
321 return err;
322
323 if (devops.datbuf) {
324 devops.len = ops->len - ops->retlen;
325 if (!devops.len)
326 return 0;
327 devops.datbuf += devops.retlen;
328 }
329 if (devops.oobbuf) {
330 devops.ooblen = ops->ooblen - ops->oobretlen;
331 if (!devops.ooblen)
332 return 0;
333 devops.oobbuf += devops.oobretlen;
334 }
335 to = 0;
336 }
337 return -EINVAL;
338}
339
340static void concat_erase_callback(struct erase_info *instr)
341{
342 /* Nothing to do here in U-Boot */
Heiko Schocherff94bc42014-06-24 10:10:04 +0200343#ifndef __UBOOT__
344 wake_up((wait_queue_head_t *) instr->priv);
345#endif
Stefan Roese0a572652009-05-12 14:29:39 +0200346}
347
348static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
349{
350 int err;
351 wait_queue_head_t waitq;
352 DECLARE_WAITQUEUE(wait, current);
353
354 /*
355 * This code was stol^H^H^H^Hinspired by mtdchar.c
356 */
357 init_waitqueue_head(&waitq);
358
359 erase->mtd = mtd;
360 erase->callback = concat_erase_callback;
361 erase->priv = (unsigned long) &waitq;
362
363 /*
364 * FIXME: Allow INTERRUPTIBLE. Which means
365 * not having the wait_queue head on the stack.
366 */
Sergey Lapindfe64e22013-01-14 03:46:50 +0000367 err = mtd_erase(mtd, erase);
Stefan Roese0a572652009-05-12 14:29:39 +0200368 if (!err) {
369 set_current_state(TASK_UNINTERRUPTIBLE);
370 add_wait_queue(&waitq, &wait);
371 if (erase->state != MTD_ERASE_DONE
372 && erase->state != MTD_ERASE_FAILED)
373 schedule();
374 remove_wait_queue(&waitq, &wait);
375 set_current_state(TASK_RUNNING);
376
377 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
378 }
379 return err;
380}
381
382static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
383{
384 struct mtd_concat *concat = CONCAT(mtd);
385 struct mtd_info *subdev;
386 int i, err;
387 uint64_t length, offset = 0;
388 struct erase_info *erase;
389
Stefan Roese0a572652009-05-12 14:29:39 +0200390 /*
391 * Check for proper erase block alignment of the to-be-erased area.
392 * It is easier to do this based on the super device's erase
393 * region info rather than looking at each particular sub-device
394 * in turn.
395 */
396 if (!concat->mtd.numeraseregions) {
397 /* the easy case: device has uniform erase block size */
398 if (instr->addr & (concat->mtd.erasesize - 1))
399 return -EINVAL;
400 if (instr->len & (concat->mtd.erasesize - 1))
401 return -EINVAL;
402 } else {
403 /* device has variable erase size */
404 struct mtd_erase_region_info *erase_regions =
405 concat->mtd.eraseregions;
406
407 /*
408 * Find the erase region where the to-be-erased area begins:
409 */
410 for (i = 0; i < concat->mtd.numeraseregions &&
411 instr->addr >= erase_regions[i].offset; i++) ;
412 --i;
413
414 /*
415 * Now erase_regions[i] is the region in which the
416 * to-be-erased area begins. Verify that the starting
417 * offset is aligned to this region's erase size:
418 */
Heiko Schocherff94bc42014-06-24 10:10:04 +0200419 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
Stefan Roese0a572652009-05-12 14:29:39 +0200420 return -EINVAL;
421
422 /*
423 * now find the erase region where the to-be-erased area ends:
424 */
425 for (; i < concat->mtd.numeraseregions &&
426 (instr->addr + instr->len) >= erase_regions[i].offset;
427 ++i) ;
428 --i;
429 /*
430 * check if the ending offset is aligned to this region's erase size
431 */
Heiko Schocherff94bc42014-06-24 10:10:04 +0200432 if (i < 0 || ((instr->addr + instr->len) &
433 (erase_regions[i].erasesize - 1)))
Stefan Roese0a572652009-05-12 14:29:39 +0200434 return -EINVAL;
435 }
436
Stefan Roese0a572652009-05-12 14:29:39 +0200437 /* make a local copy of instr to avoid modifying the caller's struct */
438 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
439
440 if (!erase)
441 return -ENOMEM;
442
443 *erase = *instr;
444 length = instr->len;
445
446 /*
447 * find the subdevice where the to-be-erased area begins, adjust
448 * starting offset to be relative to the subdevice start
449 */
450 for (i = 0; i < concat->num_subdev; i++) {
451 subdev = concat->subdev[i];
452 if (subdev->size <= erase->addr) {
453 erase->addr -= subdev->size;
454 offset += subdev->size;
455 } else {
456 break;
457 }
458 }
459
460 /* must never happen since size limit has been verified above */
461 BUG_ON(i >= concat->num_subdev);
462
463 /* now do the erase: */
464 err = 0;
465 for (; length > 0; i++) {
466 /* loop for all subdevices affected by this request */
467 subdev = concat->subdev[i]; /* get current subdevice */
468
469 /* limit length to subdevice's size: */
470 if (erase->addr + length > subdev->size)
471 erase->len = subdev->size - erase->addr;
472 else
473 erase->len = length;
474
Stefan Roese0a572652009-05-12 14:29:39 +0200475 length -= erase->len;
476 if ((err = concat_dev_erase(subdev, erase))) {
477 /* sanity check: should never happen since
478 * block alignment has been checked above */
479 BUG_ON(err == -EINVAL);
480 if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
481 instr->fail_addr = erase->fail_addr + offset;
482 break;
483 }
484 /*
485 * erase->addr specifies the offset of the area to be
486 * erased *within the current subdevice*. It can be
487 * non-zero only the first time through this loop, i.e.
488 * for the first subdevice where blocks need to be erased.
489 * All the following erases must begin at the start of the
490 * current subdevice, i.e. at offset zero.
491 */
492 erase->addr = 0;
493 offset += subdev->size;
494 }
495 instr->state = erase->state;
496 kfree(erase);
497 if (err)
498 return err;
499
500 if (instr->callback)
501 instr->callback(instr);
502 return 0;
503}
504
505static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
506{
507 struct mtd_concat *concat = CONCAT(mtd);
508 int i, err = -EINVAL;
509
Stefan Roese0a572652009-05-12 14:29:39 +0200510 for (i = 0; i < concat->num_subdev; i++) {
511 struct mtd_info *subdev = concat->subdev[i];
512 uint64_t size;
513
514 if (ofs >= subdev->size) {
515 size = 0;
516 ofs -= subdev->size;
517 continue;
518 }
519 if (ofs + len > subdev->size)
520 size = subdev->size - ofs;
521 else
522 size = len;
523
Sergey Lapindfe64e22013-01-14 03:46:50 +0000524 err = mtd_lock(subdev, ofs, size);
Stefan Roese0a572652009-05-12 14:29:39 +0200525 if (err)
526 break;
527
528 len -= size;
529 if (len == 0)
530 break;
531
532 err = -EINVAL;
533 ofs = 0;
534 }
535
536 return err;
537}
538
539static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
540{
541 struct mtd_concat *concat = CONCAT(mtd);
542 int i, err = 0;
543
Stefan Roese0a572652009-05-12 14:29:39 +0200544 for (i = 0; i < concat->num_subdev; i++) {
545 struct mtd_info *subdev = concat->subdev[i];
546 uint64_t size;
547
548 if (ofs >= subdev->size) {
549 size = 0;
550 ofs -= subdev->size;
551 continue;
552 }
553 if (ofs + len > subdev->size)
554 size = subdev->size - ofs;
555 else
556 size = len;
557
Sergey Lapindfe64e22013-01-14 03:46:50 +0000558 err = mtd_unlock(subdev, ofs, size);
Stefan Roese0a572652009-05-12 14:29:39 +0200559 if (err)
560 break;
561
562 len -= size;
563 if (len == 0)
564 break;
565
566 err = -EINVAL;
567 ofs = 0;
568 }
569
570 return err;
571}
572
573static void concat_sync(struct mtd_info *mtd)
574{
575 struct mtd_concat *concat = CONCAT(mtd);
576 int i;
577
578 for (i = 0; i < concat->num_subdev; i++) {
579 struct mtd_info *subdev = concat->subdev[i];
Sergey Lapindfe64e22013-01-14 03:46:50 +0000580 mtd_sync(subdev);
Stefan Roese0a572652009-05-12 14:29:39 +0200581 }
582}
583
Heiko Schocherff94bc42014-06-24 10:10:04 +0200584#ifndef __UBOOT__
585static int concat_suspend(struct mtd_info *mtd)
586{
587 struct mtd_concat *concat = CONCAT(mtd);
588 int i, rc = 0;
589
590 for (i = 0; i < concat->num_subdev; i++) {
591 struct mtd_info *subdev = concat->subdev[i];
592 if ((rc = mtd_suspend(subdev)) < 0)
593 return rc;
594 }
595 return rc;
596}
597
598static void concat_resume(struct mtd_info *mtd)
599{
600 struct mtd_concat *concat = CONCAT(mtd);
601 int i;
602
603 for (i = 0; i < concat->num_subdev; i++) {
604 struct mtd_info *subdev = concat->subdev[i];
605 mtd_resume(subdev);
606 }
607}
608#endif
609
Stefan Roese0a572652009-05-12 14:29:39 +0200610static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
611{
612 struct mtd_concat *concat = CONCAT(mtd);
613 int i, res = 0;
614
Sergey Lapindfe64e22013-01-14 03:46:50 +0000615 if (!mtd_can_have_bb(concat->subdev[0]))
Stefan Roese0a572652009-05-12 14:29:39 +0200616 return res;
617
Stefan Roese0a572652009-05-12 14:29:39 +0200618 for (i = 0; i < concat->num_subdev; i++) {
619 struct mtd_info *subdev = concat->subdev[i];
620
621 if (ofs >= subdev->size) {
622 ofs -= subdev->size;
623 continue;
624 }
625
Sergey Lapindfe64e22013-01-14 03:46:50 +0000626 res = mtd_block_isbad(subdev, ofs);
Stefan Roese0a572652009-05-12 14:29:39 +0200627 break;
628 }
629
630 return res;
631}
632
633static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
634{
635 struct mtd_concat *concat = CONCAT(mtd);
636 int i, err = -EINVAL;
637
Stefan Roese0a572652009-05-12 14:29:39 +0200638 for (i = 0; i < concat->num_subdev; i++) {
639 struct mtd_info *subdev = concat->subdev[i];
640
641 if (ofs >= subdev->size) {
642 ofs -= subdev->size;
643 continue;
644 }
645
Sergey Lapindfe64e22013-01-14 03:46:50 +0000646 err = mtd_block_markbad(subdev, ofs);
Stefan Roese0a572652009-05-12 14:29:39 +0200647 if (!err)
648 mtd->ecc_stats.badblocks++;
649 break;
650 }
651
652 return err;
653}
654
655/*
Heiko Schocherff94bc42014-06-24 10:10:04 +0200656 * try to support NOMMU mmaps on concatenated devices
657 * - we don't support subdev spanning as we can't guarantee it'll work
658 */
659static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
660 unsigned long len,
661 unsigned long offset,
662 unsigned long flags)
663{
664 struct mtd_concat *concat = CONCAT(mtd);
665 int i;
666
667 for (i = 0; i < concat->num_subdev; i++) {
668 struct mtd_info *subdev = concat->subdev[i];
669
670 if (offset >= subdev->size) {
671 offset -= subdev->size;
672 continue;
673 }
674
675 return mtd_get_unmapped_area(subdev, len, offset, flags);
676 }
677
678 return (unsigned long) -ENOSYS;
679}
680
681/*
Stefan Roese0a572652009-05-12 14:29:39 +0200682 * This function constructs a virtual MTD device by concatenating
683 * num_devs MTD devices. A pointer to the new device object is
684 * stored to *new_dev upon success. This function does _not_
685 * register any devices: this is the caller's responsibility.
686 */
687struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
688 int num_devs, /* number of subdevices */
Heiko Schocherff94bc42014-06-24 10:10:04 +0200689#ifndef __UBOOT__
Stefan Roese0a572652009-05-12 14:29:39 +0200690 const char *name)
Heiko Schocherff94bc42014-06-24 10:10:04 +0200691#else
692 char *name)
693#endif
Stefan Roese0a572652009-05-12 14:29:39 +0200694{ /* name for the new device */
695 int i;
696 size_t size;
697 struct mtd_concat *concat;
698 uint32_t max_erasesize, curr_erasesize;
699 int num_erase_region;
Heiko Schocherff94bc42014-06-24 10:10:04 +0200700 int max_writebufsize = 0;
Stefan Roese0a572652009-05-12 14:29:39 +0200701
702 debug("Concatenating MTD devices:\n");
703 for (i = 0; i < num_devs; i++)
Heiko Schocherff94bc42014-06-24 10:10:04 +0200704 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
Stefan Roese0a572652009-05-12 14:29:39 +0200705 debug("into device \"%s\"\n", name);
706
707 /* allocate the device structure */
708 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
709 concat = kzalloc(size, GFP_KERNEL);
710 if (!concat) {
711 printk
712 ("memory allocation error while creating concatenated device \"%s\"\n",
713 name);
714 return NULL;
715 }
716 concat->subdev = (struct mtd_info **) (concat + 1);
717
718 /*
719 * Set up the new "super" device's MTD object structure, check for
Heiko Schocherff94bc42014-06-24 10:10:04 +0200720 * incompatibilities between the subdevices.
Stefan Roese0a572652009-05-12 14:29:39 +0200721 */
722 concat->mtd.type = subdev[0]->type;
723 concat->mtd.flags = subdev[0]->flags;
724 concat->mtd.size = subdev[0]->size;
725 concat->mtd.erasesize = subdev[0]->erasesize;
726 concat->mtd.writesize = subdev[0]->writesize;
Heiko Schocherff94bc42014-06-24 10:10:04 +0200727
728 for (i = 0; i < num_devs; i++)
729 if (max_writebufsize < subdev[i]->writebufsize)
730 max_writebufsize = subdev[i]->writebufsize;
731 concat->mtd.writebufsize = max_writebufsize;
732
Stefan Roese0a572652009-05-12 14:29:39 +0200733 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
734 concat->mtd.oobsize = subdev[0]->oobsize;
735 concat->mtd.oobavail = subdev[0]->oobavail;
Heiko Schocherff94bc42014-06-24 10:10:04 +0200736#ifndef __UBOOT__
737 if (subdev[0]->_writev)
738 concat->mtd._writev = concat_writev;
739#endif
Sergey Lapindfe64e22013-01-14 03:46:50 +0000740 if (subdev[0]->_read_oob)
741 concat->mtd._read_oob = concat_read_oob;
742 if (subdev[0]->_write_oob)
743 concat->mtd._write_oob = concat_write_oob;
744 if (subdev[0]->_block_isbad)
745 concat->mtd._block_isbad = concat_block_isbad;
746 if (subdev[0]->_block_markbad)
747 concat->mtd._block_markbad = concat_block_markbad;
Stefan Roese0a572652009-05-12 14:29:39 +0200748
749 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
750
Heiko Schocherff94bc42014-06-24 10:10:04 +0200751#ifndef __UBOOT__
752 concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
753#endif
754
Stefan Roese0a572652009-05-12 14:29:39 +0200755 concat->subdev[0] = subdev[0];
756
757 for (i = 1; i < num_devs; i++) {
758 if (concat->mtd.type != subdev[i]->type) {
759 kfree(concat);
760 printk("Incompatible device type on \"%s\"\n",
761 subdev[i]->name);
762 return NULL;
763 }
764 if (concat->mtd.flags != subdev[i]->flags) {
765 /*
766 * Expect all flags except MTD_WRITEABLE to be
767 * equal on all subdevices.
768 */
769 if ((concat->mtd.flags ^ subdev[i]->
770 flags) & ~MTD_WRITEABLE) {
771 kfree(concat);
772 printk("Incompatible device flags on \"%s\"\n",
773 subdev[i]->name);
774 return NULL;
775 } else
776 /* if writeable attribute differs,
777 make super device writeable */
778 concat->mtd.flags |=
779 subdev[i]->flags & MTD_WRITEABLE;
780 }
781
Heiko Schocherff94bc42014-06-24 10:10:04 +0200782#ifndef __UBOOT__
783 /* only permit direct mapping if the BDIs are all the same
784 * - copy-mapping is still permitted
785 */
786 if (concat->mtd.backing_dev_info !=
787 subdev[i]->backing_dev_info)
788 concat->mtd.backing_dev_info =
789 &default_backing_dev_info;
790#endif
791
Stefan Roese0a572652009-05-12 14:29:39 +0200792 concat->mtd.size += subdev[i]->size;
793 concat->mtd.ecc_stats.badblocks +=
794 subdev[i]->ecc_stats.badblocks;
795 if (concat->mtd.writesize != subdev[i]->writesize ||
796 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
797 concat->mtd.oobsize != subdev[i]->oobsize ||
Sergey Lapindfe64e22013-01-14 03:46:50 +0000798 !concat->mtd._read_oob != !subdev[i]->_read_oob ||
799 !concat->mtd._write_oob != !subdev[i]->_write_oob) {
Stefan Roese0a572652009-05-12 14:29:39 +0200800 kfree(concat);
801 printk("Incompatible OOB or ECC data on \"%s\"\n",
802 subdev[i]->name);
803 return NULL;
804 }
805 concat->subdev[i] = subdev[i];
806
807 }
808
809 concat->mtd.ecclayout = subdev[0]->ecclayout;
810
811 concat->num_subdev = num_devs;
812 concat->mtd.name = name;
813
Sergey Lapindfe64e22013-01-14 03:46:50 +0000814 concat->mtd._erase = concat_erase;
815 concat->mtd._read = concat_read;
816 concat->mtd._write = concat_write;
817 concat->mtd._sync = concat_sync;
818 concat->mtd._lock = concat_lock;
819 concat->mtd._unlock = concat_unlock;
Heiko Schocherff94bc42014-06-24 10:10:04 +0200820#ifndef __UBOOT__
821 concat->mtd._suspend = concat_suspend;
822 concat->mtd._resume = concat_resume;
823#endif
824 concat->mtd._get_unmapped_area = concat_get_unmapped_area;
Stefan Roese0a572652009-05-12 14:29:39 +0200825
826 /*
827 * Combine the erase block size info of the subdevices:
828 *
829 * first, walk the map of the new device and see how
830 * many changes in erase size we have
831 */
832 max_erasesize = curr_erasesize = subdev[0]->erasesize;
833 num_erase_region = 1;
834 for (i = 0; i < num_devs; i++) {
835 if (subdev[i]->numeraseregions == 0) {
836 /* current subdevice has uniform erase size */
837 if (subdev[i]->erasesize != curr_erasesize) {
838 /* if it differs from the last subdevice's erase size, count it */
839 ++num_erase_region;
840 curr_erasesize = subdev[i]->erasesize;
841 if (curr_erasesize > max_erasesize)
842 max_erasesize = curr_erasesize;
843 }
844 } else {
845 /* current subdevice has variable erase size */
846 int j;
847 for (j = 0; j < subdev[i]->numeraseregions; j++) {
848
849 /* walk the list of erase regions, count any changes */
850 if (subdev[i]->eraseregions[j].erasesize !=
851 curr_erasesize) {
852 ++num_erase_region;
853 curr_erasesize =
854 subdev[i]->eraseregions[j].
855 erasesize;
856 if (curr_erasesize > max_erasesize)
857 max_erasesize = curr_erasesize;
858 }
859 }
860 }
861 }
862
863 if (num_erase_region == 1) {
864 /*
865 * All subdevices have the same uniform erase size.
866 * This is easy:
867 */
868 concat->mtd.erasesize = curr_erasesize;
869 concat->mtd.numeraseregions = 0;
870 } else {
871 uint64_t tmp64;
872
873 /*
874 * erase block size varies across the subdevices: allocate
875 * space to store the data describing the variable erase regions
876 */
877 struct mtd_erase_region_info *erase_region_p;
878 uint64_t begin, position;
879
880 concat->mtd.erasesize = max_erasesize;
881 concat->mtd.numeraseregions = num_erase_region;
882 concat->mtd.eraseregions = erase_region_p =
883 kmalloc(num_erase_region *
884 sizeof (struct mtd_erase_region_info), GFP_KERNEL);
885 if (!erase_region_p) {
886 kfree(concat);
887 printk
888 ("memory allocation error while creating erase region list"
889 " for device \"%s\"\n", name);
890 return NULL;
891 }
892
893 /*
894 * walk the map of the new device once more and fill in
895 * in erase region info:
896 */
897 curr_erasesize = subdev[0]->erasesize;
898 begin = position = 0;
899 for (i = 0; i < num_devs; i++) {
900 if (subdev[i]->numeraseregions == 0) {
901 /* current subdevice has uniform erase size */
902 if (subdev[i]->erasesize != curr_erasesize) {
903 /*
904 * fill in an mtd_erase_region_info structure for the area
905 * we have walked so far:
906 */
907 erase_region_p->offset = begin;
908 erase_region_p->erasesize =
909 curr_erasesize;
910 tmp64 = position - begin;
911 do_div(tmp64, curr_erasesize);
912 erase_region_p->numblocks = tmp64;
913 begin = position;
914
915 curr_erasesize = subdev[i]->erasesize;
916 ++erase_region_p;
917 }
918 position += subdev[i]->size;
919 } else {
920 /* current subdevice has variable erase size */
921 int j;
922 for (j = 0; j < subdev[i]->numeraseregions; j++) {
923 /* walk the list of erase regions, count any changes */
924 if (subdev[i]->eraseregions[j].
925 erasesize != curr_erasesize) {
926 erase_region_p->offset = begin;
927 erase_region_p->erasesize =
928 curr_erasesize;
929 tmp64 = position - begin;
930 do_div(tmp64, curr_erasesize);
931 erase_region_p->numblocks = tmp64;
932 begin = position;
933
934 curr_erasesize =
935 subdev[i]->eraseregions[j].
936 erasesize;
937 ++erase_region_p;
938 }
939 position +=
940 subdev[i]->eraseregions[j].
941 numblocks * (uint64_t)curr_erasesize;
942 }
943 }
944 }
945 /* Now write the final entry */
946 erase_region_p->offset = begin;
947 erase_region_p->erasesize = curr_erasesize;
948 tmp64 = position - begin;
949 do_div(tmp64, curr_erasesize);
950 erase_region_p->numblocks = tmp64;
951 }
952
953 return &concat->mtd;
954}
Heiko Schocherff94bc42014-06-24 10:10:04 +0200955
956/*
957 * This function destroys an MTD object obtained from concat_mtd_devs()
958 */
959
960void mtd_concat_destroy(struct mtd_info *mtd)
961{
962 struct mtd_concat *concat = CONCAT(mtd);
963 if (concat->mtd.numeraseregions)
964 kfree(concat->mtd.eraseregions);
965 kfree(concat);
966}
967
968EXPORT_SYMBOL(mtd_concat_create);
969EXPORT_SYMBOL(mtd_concat_destroy);
970
971MODULE_LICENSE("GPL");
972MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
973MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");