blob: a2166e431c22fc463280958db829cf1b9baf0f63 [file] [log] [blame]
Heiko Schocherff94bc42014-06-24 10:10:04 +02001/*
2 * Copyright (c) 2012 Linutronix GmbH
3 * Author: Richard Weinberger <richard@nod.at>
4 *
5 * SPDX-License-Identifier: GPL-2.0+
6 *
7 */
8
Heiko Schocherff94bc42014-06-24 10:10:04 +02009#ifndef __UBOOT__
10#include <linux/crc32.h>
11#else
12#include <div64.h>
13#include <malloc.h>
14#include <ubi_uboot.h>
15#endif
16
17#include <linux/compat.h>
18#include <linux/math64.h>
19#include "ubi.h"
20
21/**
22 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
23 * @ubi: UBI device description object
24 */
25size_t ubi_calc_fm_size(struct ubi_device *ubi)
26{
27 size_t size;
28
29 size = sizeof(struct ubi_fm_hdr) + \
30 sizeof(struct ubi_fm_scan_pool) + \
31 sizeof(struct ubi_fm_scan_pool) + \
32 (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
33 (sizeof(struct ubi_fm_eba) + \
34 (ubi->peb_count * sizeof(__be32))) + \
35 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
36 return roundup(size, ubi->leb_size);
37}
38
39
40/**
41 * new_fm_vhdr - allocate a new volume header for fastmap usage.
42 * @ubi: UBI device description object
43 * @vol_id: the VID of the new header
44 *
45 * Returns a new struct ubi_vid_hdr on success.
46 * NULL indicates out of memory.
47 */
48static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
49{
50 struct ubi_vid_hdr *new;
51
52 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
53 if (!new)
54 goto out;
55
56 new->vol_type = UBI_VID_DYNAMIC;
57 new->vol_id = cpu_to_be32(vol_id);
58
59 /* UBI implementations without fastmap support have to delete the
60 * fastmap.
61 */
62 new->compat = UBI_COMPAT_DELETE;
63
64out:
65 return new;
66}
67
68/**
69 * add_aeb - create and add a attach erase block to a given list.
70 * @ai: UBI attach info object
71 * @list: the target list
72 * @pnum: PEB number of the new attach erase block
73 * @ec: erease counter of the new LEB
74 * @scrub: scrub this PEB after attaching
75 *
76 * Returns 0 on success, < 0 indicates an internal error.
77 */
78static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
79 int pnum, int ec, int scrub)
80{
81 struct ubi_ainf_peb *aeb;
82
83 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
84 if (!aeb)
85 return -ENOMEM;
86
87 aeb->pnum = pnum;
88 aeb->ec = ec;
89 aeb->lnum = -1;
90 aeb->scrub = scrub;
91 aeb->copy_flag = aeb->sqnum = 0;
92
93 ai->ec_sum += aeb->ec;
94 ai->ec_count++;
95
96 if (ai->max_ec < aeb->ec)
97 ai->max_ec = aeb->ec;
98
99 if (ai->min_ec > aeb->ec)
100 ai->min_ec = aeb->ec;
101
102 list_add_tail(&aeb->u.list, list);
103
104 return 0;
105}
106
107/**
108 * add_vol - create and add a new volume to ubi_attach_info.
109 * @ai: ubi_attach_info object
110 * @vol_id: VID of the new volume
111 * @used_ebs: number of used EBS
112 * @data_pad: data padding value of the new volume
113 * @vol_type: volume type
114 * @last_eb_bytes: number of bytes in the last LEB
115 *
116 * Returns the new struct ubi_ainf_volume on success.
117 * NULL indicates an error.
118 */
119static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
120 int used_ebs, int data_pad, u8 vol_type,
121 int last_eb_bytes)
122{
123 struct ubi_ainf_volume *av;
124 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
125
126 while (*p) {
127 parent = *p;
128 av = rb_entry(parent, struct ubi_ainf_volume, rb);
129
130 if (vol_id > av->vol_id)
131 p = &(*p)->rb_left;
132 else if (vol_id > av->vol_id)
133 p = &(*p)->rb_right;
134 }
135
136 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
137 if (!av)
138 goto out;
139
140 av->highest_lnum = av->leb_count = 0;
141 av->vol_id = vol_id;
142 av->used_ebs = used_ebs;
143 av->data_pad = data_pad;
144 av->last_data_size = last_eb_bytes;
145 av->compat = 0;
146 av->vol_type = vol_type;
147 av->root = RB_ROOT;
148
149 dbg_bld("found volume (ID %i)", vol_id);
150
151 rb_link_node(&av->rb, parent, p);
152 rb_insert_color(&av->rb, &ai->volumes);
153
154out:
155 return av;
156}
157
158/**
159 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
160 * from it's original list.
161 * @ai: ubi_attach_info object
162 * @aeb: the to be assigned SEB
163 * @av: target scan volume
164 */
165static void assign_aeb_to_av(struct ubi_attach_info *ai,
166 struct ubi_ainf_peb *aeb,
167 struct ubi_ainf_volume *av)
168{
169 struct ubi_ainf_peb *tmp_aeb;
170 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
171
172 p = &av->root.rb_node;
173 while (*p) {
174 parent = *p;
175
176 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
177 if (aeb->lnum != tmp_aeb->lnum) {
178 if (aeb->lnum < tmp_aeb->lnum)
179 p = &(*p)->rb_left;
180 else
181 p = &(*p)->rb_right;
182
183 continue;
184 } else
185 break;
186 }
187
188 list_del(&aeb->u.list);
189 av->leb_count++;
190
191 rb_link_node(&aeb->u.rb, parent, p);
192 rb_insert_color(&aeb->u.rb, &av->root);
193}
194
195/**
196 * update_vol - inserts or updates a LEB which was found a pool.
197 * @ubi: the UBI device object
198 * @ai: attach info object
199 * @av: the volume this LEB belongs to
200 * @new_vh: the volume header derived from new_aeb
201 * @new_aeb: the AEB to be examined
202 *
203 * Returns 0 on success, < 0 indicates an internal error.
204 */
205static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
206 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
207 struct ubi_ainf_peb *new_aeb)
208{
209 struct rb_node **p = &av->root.rb_node, *parent = NULL;
210 struct ubi_ainf_peb *aeb, *victim;
211 int cmp_res;
212
213 while (*p) {
214 parent = *p;
215 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
216
217 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
218 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
219 p = &(*p)->rb_left;
220 else
221 p = &(*p)->rb_right;
222
223 continue;
224 }
225
226 /* This case can happen if the fastmap gets written
227 * because of a volume change (creation, deletion, ..).
228 * Then a PEB can be within the persistent EBA and the pool.
229 */
230 if (aeb->pnum == new_aeb->pnum) {
231 ubi_assert(aeb->lnum == new_aeb->lnum);
232 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
233
234 return 0;
235 }
236
237 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
238 if (cmp_res < 0)
239 return cmp_res;
240
241 /* new_aeb is newer */
242 if (cmp_res & 1) {
243 victim = kmem_cache_alloc(ai->aeb_slab_cache,
244 GFP_KERNEL);
245 if (!victim)
246 return -ENOMEM;
247
248 victim->ec = aeb->ec;
249 victim->pnum = aeb->pnum;
250 list_add_tail(&victim->u.list, &ai->erase);
251
252 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
253 av->last_data_size = \
254 be32_to_cpu(new_vh->data_size);
255
256 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
257 av->vol_id, aeb->lnum, new_aeb->pnum);
258
259 aeb->ec = new_aeb->ec;
260 aeb->pnum = new_aeb->pnum;
261 aeb->copy_flag = new_vh->copy_flag;
262 aeb->scrub = new_aeb->scrub;
263 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
264
265 /* new_aeb is older */
266 } else {
267 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
268 av->vol_id, aeb->lnum, new_aeb->pnum);
269 list_add_tail(&new_aeb->u.list, &ai->erase);
270 }
271
272 return 0;
273 }
274 /* This LEB is new, let's add it to the volume */
275
276 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
277 av->highest_lnum = be32_to_cpu(new_vh->lnum);
278 av->last_data_size = be32_to_cpu(new_vh->data_size);
279 }
280
281 if (av->vol_type == UBI_STATIC_VOLUME)
282 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
283
284 av->leb_count++;
285
286 rb_link_node(&new_aeb->u.rb, parent, p);
287 rb_insert_color(&new_aeb->u.rb, &av->root);
288
289 return 0;
290}
291
292/**
293 * process_pool_aeb - we found a non-empty PEB in a pool.
294 * @ubi: UBI device object
295 * @ai: attach info object
296 * @new_vh: the volume header derived from new_aeb
297 * @new_aeb: the AEB to be examined
298 *
299 * Returns 0 on success, < 0 indicates an internal error.
300 */
301static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
302 struct ubi_vid_hdr *new_vh,
303 struct ubi_ainf_peb *new_aeb)
304{
305 struct ubi_ainf_volume *av, *tmp_av = NULL;
306 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
307 int found = 0;
308
309 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
310 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
311 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
312
313 return 0;
314 }
315
316 /* Find the volume this SEB belongs to */
317 while (*p) {
318 parent = *p;
319 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
320
321 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
322 p = &(*p)->rb_left;
323 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
324 p = &(*p)->rb_right;
325 else {
326 found = 1;
327 break;
328 }
329 }
330
331 if (found)
332 av = tmp_av;
333 else {
334 ubi_err("orphaned volume in fastmap pool!");
335 return UBI_BAD_FASTMAP;
336 }
337
338 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
339
340 return update_vol(ubi, ai, av, new_vh, new_aeb);
341}
342
343/**
344 * unmap_peb - unmap a PEB.
345 * If fastmap detects a free PEB in the pool it has to check whether
346 * this PEB has been unmapped after writing the fastmap.
347 *
348 * @ai: UBI attach info object
349 * @pnum: The PEB to be unmapped
350 */
351static void unmap_peb(struct ubi_attach_info *ai, int pnum)
352{
353 struct ubi_ainf_volume *av;
354 struct rb_node *node, *node2;
355 struct ubi_ainf_peb *aeb;
356
357 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
358 av = rb_entry(node, struct ubi_ainf_volume, rb);
359
360 for (node2 = rb_first(&av->root); node2;
361 node2 = rb_next(node2)) {
362 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
363 if (aeb->pnum == pnum) {
364 rb_erase(&aeb->u.rb, &av->root);
365 kmem_cache_free(ai->aeb_slab_cache, aeb);
366 return;
367 }
368 }
369 }
370}
371
372/**
373 * scan_pool - scans a pool for changed (no longer empty PEBs).
374 * @ubi: UBI device object
375 * @ai: attach info object
376 * @pebs: an array of all PEB numbers in the to be scanned pool
377 * @pool_size: size of the pool (number of entries in @pebs)
378 * @max_sqnum: pointer to the maximal sequence number
379 * @eba_orphans: list of PEBs which need to be scanned
380 * @free: list of PEBs which are most likely free (and go into @ai->free)
381 *
382 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
383 * < 0 indicates an internal error.
384 */
385#ifndef __UBOOT__
386static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
387 int *pebs, int pool_size, unsigned long long *max_sqnum,
388 struct list_head *eba_orphans, struct list_head *freef)
389#else
390static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
391 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
392 struct list_head *eba_orphans, struct list_head *freef)
393#endif
394{
395 struct ubi_vid_hdr *vh;
396 struct ubi_ec_hdr *ech;
397 struct ubi_ainf_peb *new_aeb, *tmp_aeb;
398 int i, pnum, err, found_orphan, ret = 0;
399
400 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
401 if (!ech)
402 return -ENOMEM;
403
404 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
405 if (!vh) {
406 kfree(ech);
407 return -ENOMEM;
408 }
409
410 dbg_bld("scanning fastmap pool: size = %i", pool_size);
411
412 /*
413 * Now scan all PEBs in the pool to find changes which have been made
414 * after the creation of the fastmap
415 */
416 for (i = 0; i < pool_size; i++) {
417 int scrub = 0;
418 int image_seq;
419
420 pnum = be32_to_cpu(pebs[i]);
421
422 if (ubi_io_is_bad(ubi, pnum)) {
423 ubi_err("bad PEB in fastmap pool!");
424 ret = UBI_BAD_FASTMAP;
425 goto out;
426 }
427
428 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
429 if (err && err != UBI_IO_BITFLIPS) {
430 ubi_err("unable to read EC header! PEB:%i err:%i",
431 pnum, err);
432 ret = err > 0 ? UBI_BAD_FASTMAP : err;
433 goto out;
434 } else if (ret == UBI_IO_BITFLIPS)
435 scrub = 1;
436
437 /*
438 * Older UBI implementations have image_seq set to zero, so
439 * we shouldn't fail if image_seq == 0.
440 */
441 image_seq = be32_to_cpu(ech->image_seq);
442
443 if (image_seq && (image_seq != ubi->image_seq)) {
444 ubi_err("bad image seq: 0x%x, expected: 0x%x",
445 be32_to_cpu(ech->image_seq), ubi->image_seq);
446 ret = UBI_BAD_FASTMAP;
447 goto out;
448 }
449
450 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
451 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
452 unsigned long long ec = be64_to_cpu(ech->ec);
453 unmap_peb(ai, pnum);
454 dbg_bld("Adding PEB to free: %i", pnum);
455 if (err == UBI_IO_FF_BITFLIPS)
456 add_aeb(ai, freef, pnum, ec, 1);
457 else
458 add_aeb(ai, freef, pnum, ec, 0);
459 continue;
460 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
461 dbg_bld("Found non empty PEB:%i in pool", pnum);
462
463 if (err == UBI_IO_BITFLIPS)
464 scrub = 1;
465
466 found_orphan = 0;
467 list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
468 if (tmp_aeb->pnum == pnum) {
469 found_orphan = 1;
470 break;
471 }
472 }
473 if (found_orphan) {
474 list_del(&tmp_aeb->u.list);
475 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
476 }
477
478 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
479 GFP_KERNEL);
480 if (!new_aeb) {
481 ret = -ENOMEM;
482 goto out;
483 }
484
485 new_aeb->ec = be64_to_cpu(ech->ec);
486 new_aeb->pnum = pnum;
487 new_aeb->lnum = be32_to_cpu(vh->lnum);
488 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
489 new_aeb->copy_flag = vh->copy_flag;
490 new_aeb->scrub = scrub;
491
492 if (*max_sqnum < new_aeb->sqnum)
493 *max_sqnum = new_aeb->sqnum;
494
495 err = process_pool_aeb(ubi, ai, vh, new_aeb);
496 if (err) {
497 ret = err > 0 ? UBI_BAD_FASTMAP : err;
498 goto out;
499 }
500 } else {
501 /* We are paranoid and fall back to scanning mode */
502 ubi_err("fastmap pool PEBs contains damaged PEBs!");
503 ret = err > 0 ? UBI_BAD_FASTMAP : err;
504 goto out;
505 }
506
507 }
508
509out:
510 ubi_free_vid_hdr(ubi, vh);
511 kfree(ech);
512 return ret;
513}
514
515/**
516 * count_fastmap_pebs - Counts the PEBs found by fastmap.
517 * @ai: The UBI attach info object
518 */
519static int count_fastmap_pebs(struct ubi_attach_info *ai)
520{
521 struct ubi_ainf_peb *aeb;
522 struct ubi_ainf_volume *av;
523 struct rb_node *rb1, *rb2;
524 int n = 0;
525
526 list_for_each_entry(aeb, &ai->erase, u.list)
527 n++;
528
529 list_for_each_entry(aeb, &ai->free, u.list)
530 n++;
531
532 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
533 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
534 n++;
535
536 return n;
537}
538
539/**
540 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
541 * @ubi: UBI device object
542 * @ai: UBI attach info object
543 * @fm: the fastmap to be attached
544 *
545 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
546 * < 0 indicates an internal error.
547 */
548static int ubi_attach_fastmap(struct ubi_device *ubi,
549 struct ubi_attach_info *ai,
550 struct ubi_fastmap_layout *fm)
551{
552 struct list_head used, eba_orphans, freef;
553 struct ubi_ainf_volume *av;
554 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
555 struct ubi_ec_hdr *ech;
556 struct ubi_fm_sb *fmsb;
557 struct ubi_fm_hdr *fmhdr;
558 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
559 struct ubi_fm_ec *fmec;
560 struct ubi_fm_volhdr *fmvhdr;
561 struct ubi_fm_eba *fm_eba;
562 int ret, i, j, pool_size, wl_pool_size;
563 size_t fm_pos = 0, fm_size = ubi->fm_size;
564 unsigned long long max_sqnum = 0;
565 void *fm_raw = ubi->fm_buf;
566
567 INIT_LIST_HEAD(&used);
568 INIT_LIST_HEAD(&freef);
569 INIT_LIST_HEAD(&eba_orphans);
570 INIT_LIST_HEAD(&ai->corr);
571 INIT_LIST_HEAD(&ai->free);
572 INIT_LIST_HEAD(&ai->erase);
573 INIT_LIST_HEAD(&ai->alien);
574 ai->volumes = RB_ROOT;
575 ai->min_ec = UBI_MAX_ERASECOUNTER;
576
577 ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
578 sizeof(struct ubi_ainf_peb),
579 0, 0, NULL);
580 if (!ai->aeb_slab_cache) {
581 ret = -ENOMEM;
582 goto fail;
583 }
584
585 fmsb = (struct ubi_fm_sb *)(fm_raw);
586 ai->max_sqnum = fmsb->sqnum;
587 fm_pos += sizeof(struct ubi_fm_sb);
588 if (fm_pos >= fm_size)
589 goto fail_bad;
590
591 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
592 fm_pos += sizeof(*fmhdr);
593 if (fm_pos >= fm_size)
594 goto fail_bad;
595
596 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
597 ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
598 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
599 goto fail_bad;
600 }
601
602 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
603 fm_pos += sizeof(*fmpl1);
604 if (fm_pos >= fm_size)
605 goto fail_bad;
606 if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
607 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
608 be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
609 goto fail_bad;
610 }
611
612 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
613 fm_pos += sizeof(*fmpl2);
614 if (fm_pos >= fm_size)
615 goto fail_bad;
616 if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
617 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
618 be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
619 goto fail_bad;
620 }
621
622 pool_size = be16_to_cpu(fmpl1->size);
623 wl_pool_size = be16_to_cpu(fmpl2->size);
624 fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
625 fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
626
627 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
628 ubi_err("bad pool size: %i", pool_size);
629 goto fail_bad;
630 }
631
632 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
633 ubi_err("bad WL pool size: %i", wl_pool_size);
634 goto fail_bad;
635 }
636
637
638 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
639 fm->max_pool_size < 0) {
640 ubi_err("bad maximal pool size: %i", fm->max_pool_size);
641 goto fail_bad;
642 }
643
644 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
645 fm->max_wl_pool_size < 0) {
646 ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
647 goto fail_bad;
648 }
649
650 /* read EC values from free list */
651 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
652 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
653 fm_pos += sizeof(*fmec);
654 if (fm_pos >= fm_size)
655 goto fail_bad;
656
657 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
658 be32_to_cpu(fmec->ec), 0);
659 }
660
661 /* read EC values from used list */
662 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
663 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
664 fm_pos += sizeof(*fmec);
665 if (fm_pos >= fm_size)
666 goto fail_bad;
667
668 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
669 be32_to_cpu(fmec->ec), 0);
670 }
671
672 /* read EC values from scrub list */
673 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
674 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
675 fm_pos += sizeof(*fmec);
676 if (fm_pos >= fm_size)
677 goto fail_bad;
678
679 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
680 be32_to_cpu(fmec->ec), 1);
681 }
682
683 /* read EC values from erase list */
684 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
685 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
686 fm_pos += sizeof(*fmec);
687 if (fm_pos >= fm_size)
688 goto fail_bad;
689
690 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
691 be32_to_cpu(fmec->ec), 1);
692 }
693
694 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
695 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
696
697 /* Iterate over all volumes and read their EBA table */
698 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
699 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
700 fm_pos += sizeof(*fmvhdr);
701 if (fm_pos >= fm_size)
702 goto fail_bad;
703
704 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
705 ubi_err("bad fastmap vol header magic: 0x%x, " \
706 "expected: 0x%x",
707 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
708 goto fail_bad;
709 }
710
711 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
712 be32_to_cpu(fmvhdr->used_ebs),
713 be32_to_cpu(fmvhdr->data_pad),
714 fmvhdr->vol_type,
715 be32_to_cpu(fmvhdr->last_eb_bytes));
716
717 if (!av)
718 goto fail_bad;
719
720 ai->vols_found++;
721 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
722 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
723
724 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
725 fm_pos += sizeof(*fm_eba);
726 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
727 if (fm_pos >= fm_size)
728 goto fail_bad;
729
730 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
731 ubi_err("bad fastmap EBA header magic: 0x%x, " \
732 "expected: 0x%x",
733 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
734 goto fail_bad;
735 }
736
737 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
738 int pnum = be32_to_cpu(fm_eba->pnum[j]);
739
740 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
741 continue;
742
743 aeb = NULL;
744 list_for_each_entry(tmp_aeb, &used, u.list) {
745 if (tmp_aeb->pnum == pnum) {
746 aeb = tmp_aeb;
747 break;
748 }
749 }
750
751 /* This can happen if a PEB is already in an EBA known
752 * by this fastmap but the PEB itself is not in the used
753 * list.
754 * In this case the PEB can be within the fastmap pool
755 * or while writing the fastmap it was in the protection
756 * queue.
757 */
758 if (!aeb) {
759 aeb = kmem_cache_alloc(ai->aeb_slab_cache,
760 GFP_KERNEL);
761 if (!aeb) {
762 ret = -ENOMEM;
763
764 goto fail;
765 }
766
767 aeb->lnum = j;
768 aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
769 aeb->ec = -1;
770 aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
771 list_add_tail(&aeb->u.list, &eba_orphans);
772 continue;
773 }
774
775 aeb->lnum = j;
776
777 if (av->highest_lnum <= aeb->lnum)
778 av->highest_lnum = aeb->lnum;
779
780 assign_aeb_to_av(ai, aeb, av);
781
782 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
783 aeb->pnum, aeb->lnum, av->vol_id);
784 }
785
786 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
787 if (!ech) {
788 ret = -ENOMEM;
789 goto fail;
790 }
791
792 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
793 u.list) {
794 int err;
795
796 if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
797 ubi_err("bad PEB in fastmap EBA orphan list");
798 ret = UBI_BAD_FASTMAP;
799 kfree(ech);
800 goto fail;
801 }
802
803 err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
804 if (err && err != UBI_IO_BITFLIPS) {
805 ubi_err("unable to read EC header! PEB:%i " \
806 "err:%i", tmp_aeb->pnum, err);
807 ret = err > 0 ? UBI_BAD_FASTMAP : err;
808 kfree(ech);
809
810 goto fail;
811 } else if (err == UBI_IO_BITFLIPS)
812 tmp_aeb->scrub = 1;
813
814 tmp_aeb->ec = be64_to_cpu(ech->ec);
815 assign_aeb_to_av(ai, tmp_aeb, av);
816 }
817
818 kfree(ech);
819 }
820
821 ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
822 &eba_orphans, &freef);
823 if (ret)
824 goto fail;
825
826 ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
827 &eba_orphans, &freef);
828 if (ret)
829 goto fail;
830
831 if (max_sqnum > ai->max_sqnum)
832 ai->max_sqnum = max_sqnum;
833
834 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &freef, u.list)
835 list_move_tail(&tmp_aeb->u.list, &ai->free);
836
837 ubi_assert(list_empty(&used));
838 ubi_assert(list_empty(&eba_orphans));
839 ubi_assert(list_empty(&freef));
840
841 /*
842 * If fastmap is leaking PEBs (must not happen), raise a
843 * fat warning and fall back to scanning mode.
844 * We do this here because in ubi_wl_init() it's too late
845 * and we cannot fall back to scanning.
846 */
847#ifndef __UBOOT__
848 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
849 ai->bad_peb_count - fm->used_blocks))
850 goto fail_bad;
851#else
852 if (count_fastmap_pebs(ai) != ubi->peb_count -
853 ai->bad_peb_count - fm->used_blocks) {
854 WARN_ON(1);
855 goto fail_bad;
856 }
857#endif
858
859 return 0;
860
861fail_bad:
862 ret = UBI_BAD_FASTMAP;
863fail:
864 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
865 list_del(&tmp_aeb->u.list);
866 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
867 }
868 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
869 list_del(&tmp_aeb->u.list);
870 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
871 }
872 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &freef, u.list) {
873 list_del(&tmp_aeb->u.list);
874 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
875 }
876
877 return ret;
878}
879
880/**
881 * ubi_scan_fastmap - scan the fastmap.
882 * @ubi: UBI device object
883 * @ai: UBI attach info to be filled
884 * @fm_anchor: The fastmap starts at this PEB
885 *
886 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
887 * UBI_BAD_FASTMAP if one was found but is not usable.
888 * < 0 indicates an internal error.
889 */
890int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
891 int fm_anchor)
892{
893 struct ubi_fm_sb *fmsb, *fmsb2;
894 struct ubi_vid_hdr *vh;
895 struct ubi_ec_hdr *ech;
896 struct ubi_fastmap_layout *fm;
897 int i, used_blocks, pnum, ret = 0;
898 size_t fm_size;
899 __be32 crc, tmp_crc;
900 unsigned long long sqnum = 0;
901
902 mutex_lock(&ubi->fm_mutex);
903 memset(ubi->fm_buf, 0, ubi->fm_size);
904
905 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
906 if (!fmsb) {
907 ret = -ENOMEM;
908 goto out;
909 }
910
911 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
912 if (!fm) {
913 ret = -ENOMEM;
914 kfree(fmsb);
915 goto out;
916 }
917
918 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
919 if (ret && ret != UBI_IO_BITFLIPS)
920 goto free_fm_sb;
921 else if (ret == UBI_IO_BITFLIPS)
922 fm->to_be_tortured[0] = 1;
923
924 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
925 ubi_err("bad super block magic: 0x%x, expected: 0x%x",
926 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
927 ret = UBI_BAD_FASTMAP;
928 goto free_fm_sb;
929 }
930
931 if (fmsb->version != UBI_FM_FMT_VERSION) {
932 ubi_err("bad fastmap version: %i, expected: %i",
933 fmsb->version, UBI_FM_FMT_VERSION);
934 ret = UBI_BAD_FASTMAP;
935 goto free_fm_sb;
936 }
937
938 used_blocks = be32_to_cpu(fmsb->used_blocks);
939 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
940 ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
941 ret = UBI_BAD_FASTMAP;
942 goto free_fm_sb;
943 }
944
945 fm_size = ubi->leb_size * used_blocks;
946 if (fm_size != ubi->fm_size) {
947 ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
948 ubi->fm_size);
949 ret = UBI_BAD_FASTMAP;
950 goto free_fm_sb;
951 }
952
953 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
954 if (!ech) {
955 ret = -ENOMEM;
956 goto free_fm_sb;
957 }
958
959 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
960 if (!vh) {
961 ret = -ENOMEM;
962 goto free_hdr;
963 }
964
965 for (i = 0; i < used_blocks; i++) {
966 int image_seq;
967
968 pnum = be32_to_cpu(fmsb->block_loc[i]);
969
970 if (ubi_io_is_bad(ubi, pnum)) {
971 ret = UBI_BAD_FASTMAP;
972 goto free_hdr;
973 }
974
975 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
976 if (ret && ret != UBI_IO_BITFLIPS) {
977 ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
978 i, pnum);
979 if (ret > 0)
980 ret = UBI_BAD_FASTMAP;
981 goto free_hdr;
982 } else if (ret == UBI_IO_BITFLIPS)
983 fm->to_be_tortured[i] = 1;
984
985 image_seq = be32_to_cpu(ech->image_seq);
986 if (!ubi->image_seq)
987 ubi->image_seq = image_seq;
988
989 /*
990 * Older UBI implementations have image_seq set to zero, so
991 * we shouldn't fail if image_seq == 0.
992 */
993 if (image_seq && (image_seq != ubi->image_seq)) {
994 ubi_err("wrong image seq:%d instead of %d",
995 be32_to_cpu(ech->image_seq), ubi->image_seq);
996 ret = UBI_BAD_FASTMAP;
997 goto free_hdr;
998 }
999
1000 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
1001 if (ret && ret != UBI_IO_BITFLIPS) {
1002 ubi_err("unable to read fastmap block# %i (PEB: %i)",
1003 i, pnum);
1004 goto free_hdr;
1005 }
1006
1007 if (i == 0) {
1008 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
1009 ubi_err("bad fastmap anchor vol_id: 0x%x," \
1010 " expected: 0x%x",
1011 be32_to_cpu(vh->vol_id),
1012 UBI_FM_SB_VOLUME_ID);
1013 ret = UBI_BAD_FASTMAP;
1014 goto free_hdr;
1015 }
1016 } else {
1017 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1018 ubi_err("bad fastmap data vol_id: 0x%x," \
1019 " expected: 0x%x",
1020 be32_to_cpu(vh->vol_id),
1021 UBI_FM_DATA_VOLUME_ID);
1022 ret = UBI_BAD_FASTMAP;
1023 goto free_hdr;
1024 }
1025 }
1026
1027 if (sqnum < be64_to_cpu(vh->sqnum))
1028 sqnum = be64_to_cpu(vh->sqnum);
1029
1030 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
1031 ubi->leb_start, ubi->leb_size);
1032 if (ret && ret != UBI_IO_BITFLIPS) {
1033 ubi_err("unable to read fastmap block# %i (PEB: %i, " \
1034 "err: %i)", i, pnum, ret);
1035 goto free_hdr;
1036 }
1037 }
1038
1039 kfree(fmsb);
1040 fmsb = NULL;
1041
1042 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1043 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1044 fmsb2->data_crc = 0;
1045 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1046 if (crc != tmp_crc) {
1047 ubi_err("fastmap data CRC is invalid");
1048 ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
1049 ret = UBI_BAD_FASTMAP;
1050 goto free_hdr;
1051 }
1052
1053 fmsb2->sqnum = sqnum;
1054
1055 fm->used_blocks = used_blocks;
1056
1057 ret = ubi_attach_fastmap(ubi, ai, fm);
1058 if (ret) {
1059 if (ret > 0)
1060 ret = UBI_BAD_FASTMAP;
1061 goto free_hdr;
1062 }
1063
1064 for (i = 0; i < used_blocks; i++) {
1065 struct ubi_wl_entry *e;
1066
1067 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1068 if (!e) {
1069 while (i--)
1070 kfree(fm->e[i]);
1071
1072 ret = -ENOMEM;
1073 goto free_hdr;
1074 }
1075
1076 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1077 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1078 fm->e[i] = e;
1079 }
1080
1081 ubi->fm = fm;
1082 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1083 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1084 ubi_msg("attached by fastmap");
1085 ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
1086 ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
1087 ubi->fm_disabled = 0;
1088
1089 ubi_free_vid_hdr(ubi, vh);
1090 kfree(ech);
1091out:
1092 mutex_unlock(&ubi->fm_mutex);
1093 if (ret == UBI_BAD_FASTMAP)
1094 ubi_err("Attach by fastmap failed, doing a full scan!");
1095 return ret;
1096
1097free_hdr:
1098 ubi_free_vid_hdr(ubi, vh);
1099 kfree(ech);
1100free_fm_sb:
1101 kfree(fmsb);
1102 kfree(fm);
1103 goto out;
1104}
1105
1106/**
1107 * ubi_write_fastmap - writes a fastmap.
1108 * @ubi: UBI device object
1109 * @new_fm: the to be written fastmap
1110 *
1111 * Returns 0 on success, < 0 indicates an internal error.
1112 */
1113static int ubi_write_fastmap(struct ubi_device *ubi,
1114 struct ubi_fastmap_layout *new_fm)
1115{
1116 size_t fm_pos = 0;
1117 void *fm_raw;
1118 struct ubi_fm_sb *fmsb;
1119 struct ubi_fm_hdr *fmh;
1120 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
1121 struct ubi_fm_ec *fec;
1122 struct ubi_fm_volhdr *fvh;
1123 struct ubi_fm_eba *feba;
1124 struct rb_node *node;
1125 struct ubi_wl_entry *wl_e;
1126 struct ubi_volume *vol;
1127 struct ubi_vid_hdr *avhdr, *dvhdr;
1128 struct ubi_work *ubi_wrk;
1129 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1130 int scrub_peb_count, erase_peb_count;
1131
1132 fm_raw = ubi->fm_buf;
1133 memset(ubi->fm_buf, 0, ubi->fm_size);
1134
1135 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1136 if (!avhdr) {
1137 ret = -ENOMEM;
1138 goto out;
1139 }
1140
1141 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1142 if (!dvhdr) {
1143 ret = -ENOMEM;
1144 goto out_kfree;
1145 }
1146
1147 spin_lock(&ubi->volumes_lock);
1148 spin_lock(&ubi->wl_lock);
1149
1150 fmsb = (struct ubi_fm_sb *)fm_raw;
1151 fm_pos += sizeof(*fmsb);
1152 ubi_assert(fm_pos <= ubi->fm_size);
1153
1154 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1155 fm_pos += sizeof(*fmh);
1156 ubi_assert(fm_pos <= ubi->fm_size);
1157
1158 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1159 fmsb->version = UBI_FM_FMT_VERSION;
1160 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1161 /* the max sqnum will be filled in while *reading* the fastmap */
1162 fmsb->sqnum = 0;
1163
1164 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1165 free_peb_count = 0;
1166 used_peb_count = 0;
1167 scrub_peb_count = 0;
1168 erase_peb_count = 0;
1169 vol_count = 0;
1170
1171 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1172 fm_pos += sizeof(*fmpl1);
1173 fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1174 fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
1175 fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1176
1177 for (i = 0; i < ubi->fm_pool.size; i++)
1178 fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1179
1180 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1181 fm_pos += sizeof(*fmpl2);
1182 fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1183 fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
1184 fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1185
1186 for (i = 0; i < ubi->fm_wl_pool.size; i++)
1187 fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1188
1189 for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
1190 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1191 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1192
1193 fec->pnum = cpu_to_be32(wl_e->pnum);
1194 fec->ec = cpu_to_be32(wl_e->ec);
1195
1196 free_peb_count++;
1197 fm_pos += sizeof(*fec);
1198 ubi_assert(fm_pos <= ubi->fm_size);
1199 }
1200 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1201
1202 for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
1203 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1204 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1205
1206 fec->pnum = cpu_to_be32(wl_e->pnum);
1207 fec->ec = cpu_to_be32(wl_e->ec);
1208
1209 used_peb_count++;
1210 fm_pos += sizeof(*fec);
1211 ubi_assert(fm_pos <= ubi->fm_size);
1212 }
1213 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1214
1215 for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
1216 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1217 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1218
1219 fec->pnum = cpu_to_be32(wl_e->pnum);
1220 fec->ec = cpu_to_be32(wl_e->ec);
1221
1222 scrub_peb_count++;
1223 fm_pos += sizeof(*fec);
1224 ubi_assert(fm_pos <= ubi->fm_size);
1225 }
1226 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1227
1228
1229 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1230 if (ubi_is_erase_work(ubi_wrk)) {
1231 wl_e = ubi_wrk->e;
1232 ubi_assert(wl_e);
1233
1234 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1235
1236 fec->pnum = cpu_to_be32(wl_e->pnum);
1237 fec->ec = cpu_to_be32(wl_e->ec);
1238
1239 erase_peb_count++;
1240 fm_pos += sizeof(*fec);
1241 ubi_assert(fm_pos <= ubi->fm_size);
1242 }
1243 }
1244 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1245
1246 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1247 vol = ubi->volumes[i];
1248
1249 if (!vol)
1250 continue;
1251
1252 vol_count++;
1253
1254 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1255 fm_pos += sizeof(*fvh);
1256 ubi_assert(fm_pos <= ubi->fm_size);
1257
1258 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1259 fvh->vol_id = cpu_to_be32(vol->vol_id);
1260 fvh->vol_type = vol->vol_type;
1261 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1262 fvh->data_pad = cpu_to_be32(vol->data_pad);
1263 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1264
1265 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1266 vol->vol_type == UBI_STATIC_VOLUME);
1267
1268 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1269 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1270 ubi_assert(fm_pos <= ubi->fm_size);
1271
1272 for (j = 0; j < vol->reserved_pebs; j++)
1273 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1274
1275 feba->reserved_pebs = cpu_to_be32(j);
1276 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1277 }
1278 fmh->vol_count = cpu_to_be32(vol_count);
1279 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1280
1281 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1282 avhdr->lnum = 0;
1283
1284 spin_unlock(&ubi->wl_lock);
1285 spin_unlock(&ubi->volumes_lock);
1286
1287 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1288 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1289 if (ret) {
1290 ubi_err("unable to write vid_hdr to fastmap SB!");
1291 goto out_kfree;
1292 }
1293
1294 for (i = 0; i < new_fm->used_blocks; i++) {
1295 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1296 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1297 }
1298
1299 fmsb->data_crc = 0;
1300 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1301 ubi->fm_size));
1302
1303 for (i = 1; i < new_fm->used_blocks; i++) {
1304 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1305 dvhdr->lnum = cpu_to_be32(i);
1306 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1307 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1308 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1309 if (ret) {
1310 ubi_err("unable to write vid_hdr to PEB %i!",
1311 new_fm->e[i]->pnum);
1312 goto out_kfree;
1313 }
1314 }
1315
1316 for (i = 0; i < new_fm->used_blocks; i++) {
1317 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1318 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1319 if (ret) {
1320 ubi_err("unable to write fastmap to PEB %i!",
1321 new_fm->e[i]->pnum);
1322 goto out_kfree;
1323 }
1324 }
1325
1326 ubi_assert(new_fm);
1327 ubi->fm = new_fm;
1328
1329 dbg_bld("fastmap written!");
1330
1331out_kfree:
1332 ubi_free_vid_hdr(ubi, avhdr);
1333 ubi_free_vid_hdr(ubi, dvhdr);
1334out:
1335 return ret;
1336}
1337
1338/**
1339 * erase_block - Manually erase a PEB.
1340 * @ubi: UBI device object
1341 * @pnum: PEB to be erased
1342 *
1343 * Returns the new EC value on success, < 0 indicates an internal error.
1344 */
1345static int erase_block(struct ubi_device *ubi, int pnum)
1346{
1347 int ret;
1348 struct ubi_ec_hdr *ec_hdr;
1349 long long ec;
1350
1351 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1352 if (!ec_hdr)
1353 return -ENOMEM;
1354
1355 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1356 if (ret < 0)
1357 goto out;
1358 else if (ret && ret != UBI_IO_BITFLIPS) {
1359 ret = -EINVAL;
1360 goto out;
1361 }
1362
1363 ret = ubi_io_sync_erase(ubi, pnum, 0);
1364 if (ret < 0)
1365 goto out;
1366
1367 ec = be64_to_cpu(ec_hdr->ec);
1368 ec += ret;
1369 if (ec > UBI_MAX_ERASECOUNTER) {
1370 ret = -EINVAL;
1371 goto out;
1372 }
1373
1374 ec_hdr->ec = cpu_to_be64(ec);
1375 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1376 if (ret < 0)
1377 goto out;
1378
1379 ret = ec;
1380out:
1381 kfree(ec_hdr);
1382 return ret;
1383}
1384
1385/**
1386 * invalidate_fastmap - destroys a fastmap.
1387 * @ubi: UBI device object
1388 * @fm: the fastmap to be destroyed
1389 *
1390 * Returns 0 on success, < 0 indicates an internal error.
1391 */
1392static int invalidate_fastmap(struct ubi_device *ubi,
1393 struct ubi_fastmap_layout *fm)
1394{
1395 int ret;
1396 struct ubi_vid_hdr *vh;
1397
1398 ret = erase_block(ubi, fm->e[0]->pnum);
1399 if (ret < 0)
1400 return ret;
1401
1402 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1403 if (!vh)
1404 return -ENOMEM;
1405
1406 /* deleting the current fastmap SB is not enough, an old SB may exist,
1407 * so create a (corrupted) SB such that fastmap will find it and fall
1408 * back to scanning mode in any case */
1409 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1410 ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
1411
1412 return ret;
1413}
1414
1415/**
1416 * ubi_update_fastmap - will be called by UBI if a volume changes or
1417 * a fastmap pool becomes full.
1418 * @ubi: UBI device object
1419 *
1420 * Returns 0 on success, < 0 indicates an internal error.
1421 */
1422int ubi_update_fastmap(struct ubi_device *ubi)
1423{
1424 int ret, i;
1425 struct ubi_fastmap_layout *new_fm, *old_fm;
1426 struct ubi_wl_entry *tmp_e;
1427
1428 mutex_lock(&ubi->fm_mutex);
1429
1430 ubi_refill_pools(ubi);
1431
1432 if (ubi->ro_mode || ubi->fm_disabled) {
1433 mutex_unlock(&ubi->fm_mutex);
1434 return 0;
1435 }
1436
1437 ret = ubi_ensure_anchor_pebs(ubi);
1438 if (ret) {
1439 mutex_unlock(&ubi->fm_mutex);
1440 return ret;
1441 }
1442
1443 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1444 if (!new_fm) {
1445 mutex_unlock(&ubi->fm_mutex);
1446 return -ENOMEM;
1447 }
1448
1449 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1450
1451 for (i = 0; i < new_fm->used_blocks; i++) {
1452 new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1453 if (!new_fm->e[i]) {
1454 while (i--)
1455 kfree(new_fm->e[i]);
1456
1457 kfree(new_fm);
1458 mutex_unlock(&ubi->fm_mutex);
1459 return -ENOMEM;
1460 }
1461 }
1462
1463 old_fm = ubi->fm;
1464 ubi->fm = NULL;
1465
1466 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1467 ubi_err("fastmap too large");
1468 ret = -ENOSPC;
1469 goto err;
1470 }
1471
1472 for (i = 1; i < new_fm->used_blocks; i++) {
1473 spin_lock(&ubi->wl_lock);
1474 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1475 spin_unlock(&ubi->wl_lock);
1476
1477 if (!tmp_e && !old_fm) {
1478 int j;
1479 ubi_err("could not get any free erase block");
1480
1481 for (j = 1; j < i; j++)
1482 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1483
1484 ret = -ENOSPC;
1485 goto err;
1486 } else if (!tmp_e && old_fm) {
1487 ret = erase_block(ubi, old_fm->e[i]->pnum);
1488 if (ret < 0) {
1489 int j;
1490
1491 for (j = 1; j < i; j++)
1492 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1493 j, 0);
1494
1495 ubi_err("could not erase old fastmap PEB");
1496 goto err;
1497 }
1498
1499 new_fm->e[i]->pnum = old_fm->e[i]->pnum;
1500 new_fm->e[i]->ec = old_fm->e[i]->ec;
1501 } else {
1502 new_fm->e[i]->pnum = tmp_e->pnum;
1503 new_fm->e[i]->ec = tmp_e->ec;
1504
1505 if (old_fm)
1506 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1507 old_fm->to_be_tortured[i]);
1508 }
1509 }
1510
1511 spin_lock(&ubi->wl_lock);
1512 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1513 spin_unlock(&ubi->wl_lock);
1514
1515 if (old_fm) {
1516 /* no fresh anchor PEB was found, reuse the old one */
1517 if (!tmp_e) {
1518 ret = erase_block(ubi, old_fm->e[0]->pnum);
1519 if (ret < 0) {
1520 int i;
1521 ubi_err("could not erase old anchor PEB");
1522
1523 for (i = 1; i < new_fm->used_blocks; i++)
1524 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1525 i, 0);
1526 goto err;
1527 }
1528
1529 new_fm->e[0]->pnum = old_fm->e[0]->pnum;
1530 new_fm->e[0]->ec = ret;
1531 } else {
1532 /* we've got a new anchor PEB, return the old one */
1533 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1534 old_fm->to_be_tortured[0]);
1535
1536 new_fm->e[0]->pnum = tmp_e->pnum;
1537 new_fm->e[0]->ec = tmp_e->ec;
1538 }
1539 } else {
1540 if (!tmp_e) {
1541 int i;
1542 ubi_err("could not find any anchor PEB");
1543
1544 for (i = 1; i < new_fm->used_blocks; i++)
1545 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1546
1547 ret = -ENOSPC;
1548 goto err;
1549 }
1550
1551 new_fm->e[0]->pnum = tmp_e->pnum;
1552 new_fm->e[0]->ec = tmp_e->ec;
1553 }
1554
1555 down_write(&ubi->work_sem);
1556 down_write(&ubi->fm_sem);
1557 ret = ubi_write_fastmap(ubi, new_fm);
1558 up_write(&ubi->fm_sem);
1559 up_write(&ubi->work_sem);
1560
1561 if (ret)
1562 goto err;
1563
1564out_unlock:
1565 mutex_unlock(&ubi->fm_mutex);
1566 kfree(old_fm);
1567 return ret;
1568
1569err:
1570 kfree(new_fm);
1571
1572 ubi_warn("Unable to write new fastmap, err=%i", ret);
1573
1574 ret = 0;
1575 if (old_fm) {
1576 ret = invalidate_fastmap(ubi, old_fm);
1577 if (ret < 0)
1578 ubi_err("Unable to invalidiate current fastmap!");
1579 else if (ret)
1580 ret = 0;
1581 }
1582 goto out_unlock;
1583}