blob: 34eed1380dca8aa9640174fd53bd314e8a5ac4b8 [file] [log] [blame]
Tobias Waldekranzc41e2092023-02-16 16:33:49 +01001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2023 Addiva Elektronik
4 * Author: Tobias Waldekranz <tobias@waldekranz.com>
5 */
6
Tobias Waldekranzc41e2092023-02-16 16:33:49 +01007#include <blk.h>
8#include <blkmap.h>
9#include <dm.h>
10#include <malloc.h>
11#include <mapmem.h>
12#include <part.h>
13#include <dm/device-internal.h>
14#include <dm/lists.h>
15#include <dm/root.h>
16
17struct blkmap;
18
19/**
20 * struct blkmap_slice - Region mapped to a blkmap
21 *
22 * Common data for a region mapped to a blkmap, specialized by each
23 * map type.
24 *
25 * @node: List node used to associate this slice with a blkmap
26 * @blknr: Start block number of the mapping
27 * @blkcnt: Number of blocks covered by this mapping
28 */
29struct blkmap_slice {
30 struct list_head node;
31
32 lbaint_t blknr;
33 lbaint_t blkcnt;
34
35 /**
36 * @read: - Read from slice
37 *
38 * @read.bm: Blkmap to which this slice belongs
39 * @read.bms: This slice
40 * @read.blknr: Start block number to read from
41 * @read.blkcnt: Number of blocks to read
42 * @read.buffer: Buffer to store read data to
43 */
44 ulong (*read)(struct blkmap *bm, struct blkmap_slice *bms,
45 lbaint_t blknr, lbaint_t blkcnt, void *buffer);
46
47 /**
48 * @write: - Write to slice
49 *
50 * @write.bm: Blkmap to which this slice belongs
51 * @write.bms: This slice
52 * @write.blknr: Start block number to write to
53 * @write.blkcnt: Number of blocks to write
54 * @write.buffer: Data to be written
55 */
56 ulong (*write)(struct blkmap *bm, struct blkmap_slice *bms,
57 lbaint_t blknr, lbaint_t blkcnt, const void *buffer);
58
59 /**
60 * @destroy: - Tear down slice
61 *
62 * @read.bm: Blkmap to which this slice belongs
63 * @read.bms: This slice
64 */
65 void (*destroy)(struct blkmap *bm, struct blkmap_slice *bms);
66};
67
Tobias Waldekranzc41e2092023-02-16 16:33:49 +010068static bool blkmap_slice_contains(struct blkmap_slice *bms, lbaint_t blknr)
69{
70 return (blknr >= bms->blknr) && (blknr < (bms->blknr + bms->blkcnt));
71}
72
73static bool blkmap_slice_available(struct blkmap *bm, struct blkmap_slice *new)
74{
75 struct blkmap_slice *bms;
76 lbaint_t first, last;
77
78 first = new->blknr;
79 last = new->blknr + new->blkcnt - 1;
80
81 list_for_each_entry(bms, &bm->slices, node) {
82 if (blkmap_slice_contains(bms, first) ||
83 blkmap_slice_contains(bms, last) ||
84 blkmap_slice_contains(new, bms->blknr) ||
85 blkmap_slice_contains(new, bms->blknr + bms->blkcnt - 1))
86 return false;
87 }
88
89 return true;
90}
91
92static int blkmap_slice_add(struct blkmap *bm, struct blkmap_slice *new)
93{
94 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
95 struct list_head *insert = &bm->slices;
96 struct blkmap_slice *bms;
97
98 if (!blkmap_slice_available(bm, new))
99 return -EBUSY;
100
101 list_for_each_entry(bms, &bm->slices, node) {
102 if (bms->blknr < new->blknr)
103 continue;
104
105 insert = &bms->node;
106 break;
107 }
108
109 list_add_tail(&new->node, insert);
110
111 /* Disk might have grown, update the size */
112 bms = list_last_entry(&bm->slices, struct blkmap_slice, node);
113 bd->lba = bms->blknr + bms->blkcnt;
114 return 0;
115}
116
Tobias Waldekranz15d9e992023-02-16 16:33:50 +0100117/**
Tobias Waldekranz762dc782023-02-16 16:33:51 +0100118 * struct blkmap_linear - Linear mapping to other block device
119 *
120 * @slice: Common map data
121 * @blk: Target block device of this mapping
122 * @blknr: Start block number of the target device
123 */
124struct blkmap_linear {
125 struct blkmap_slice slice;
126
127 struct udevice *blk;
128 lbaint_t blknr;
129};
130
131static ulong blkmap_linear_read(struct blkmap *bm, struct blkmap_slice *bms,
132 lbaint_t blknr, lbaint_t blkcnt, void *buffer)
133{
134 struct blkmap_linear *bml = container_of(bms, struct blkmap_linear, slice);
135
136 return blk_read(bml->blk, bml->blknr + blknr, blkcnt, buffer);
137}
138
139static ulong blkmap_linear_write(struct blkmap *bm, struct blkmap_slice *bms,
140 lbaint_t blknr, lbaint_t blkcnt,
141 const void *buffer)
142{
143 struct blkmap_linear *bml = container_of(bms, struct blkmap_linear, slice);
144
145 return blk_write(bml->blk, bml->blknr + blknr, blkcnt, buffer);
146}
147
148int blkmap_map_linear(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
149 struct udevice *lblk, lbaint_t lblknr)
150{
151 struct blkmap *bm = dev_get_plat(dev);
152 struct blkmap_linear *linear;
153 struct blk_desc *bd, *lbd;
154 int err;
155
156 bd = dev_get_uclass_plat(bm->blk);
157 lbd = dev_get_uclass_plat(lblk);
Bin Mengcf83ff32023-09-26 16:43:39 +0800158 if (lbd->blksz != bd->blksz) {
159 /* update to match the mapped device */
160 bd->blksz = lbd->blksz;
161 bd->log2blksz = LOG2(bd->blksz);
162 }
Tobias Waldekranz762dc782023-02-16 16:33:51 +0100163
164 linear = malloc(sizeof(*linear));
165 if (!linear)
166 return -ENOMEM;
167
168 *linear = (struct blkmap_linear) {
169 .slice = {
170 .blknr = blknr,
171 .blkcnt = blkcnt,
172
173 .read = blkmap_linear_read,
174 .write = blkmap_linear_write,
175 },
176
177 .blk = lblk,
178 .blknr = lblknr,
179 };
180
181 err = blkmap_slice_add(bm, &linear->slice);
182 if (err)
183 free(linear);
184
185 return err;
186}
187
188/**
Tobias Waldekranz15d9e992023-02-16 16:33:50 +0100189 * struct blkmap_mem - Memory mapping
190 *
191 * @slice: Common map data
192 * @addr: Target memory region of this mapping
193 * @remapped: True if @addr is backed by a physical to virtual memory
194 * mapping that must be torn down at the end of this mapping's
195 * lifetime.
196 */
197struct blkmap_mem {
198 struct blkmap_slice slice;
199 void *addr;
200 bool remapped;
201};
202
203static ulong blkmap_mem_read(struct blkmap *bm, struct blkmap_slice *bms,
204 lbaint_t blknr, lbaint_t blkcnt, void *buffer)
205{
206 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
207 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
208 char *src;
209
210 src = bmm->addr + (blknr << bd->log2blksz);
211 memcpy(buffer, src, blkcnt << bd->log2blksz);
212 return blkcnt;
213}
214
215static ulong blkmap_mem_write(struct blkmap *bm, struct blkmap_slice *bms,
216 lbaint_t blknr, lbaint_t blkcnt,
217 const void *buffer)
218{
219 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
220 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
221 char *dst;
222
223 dst = bmm->addr + (blknr << bd->log2blksz);
224 memcpy(dst, buffer, blkcnt << bd->log2blksz);
225 return blkcnt;
226}
227
228static void blkmap_mem_destroy(struct blkmap *bm, struct blkmap_slice *bms)
229{
230 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
231
232 if (bmm->remapped)
233 unmap_sysmem(bmm->addr);
234}
235
236int __blkmap_map_mem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
237 void *addr, bool remapped)
238{
239 struct blkmap *bm = dev_get_plat(dev);
240 struct blkmap_mem *bmm;
241 int err;
242
243 bmm = malloc(sizeof(*bmm));
244 if (!bmm)
245 return -ENOMEM;
246
247 *bmm = (struct blkmap_mem) {
248 .slice = {
249 .blknr = blknr,
250 .blkcnt = blkcnt,
251
252 .read = blkmap_mem_read,
253 .write = blkmap_mem_write,
254 .destroy = blkmap_mem_destroy,
255 },
256
257 .addr = addr,
258 .remapped = remapped,
259 };
260
261 err = blkmap_slice_add(bm, &bmm->slice);
262 if (err)
263 free(bmm);
264
265 return err;
266}
267
268int blkmap_map_mem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
269 void *addr)
270{
271 return __blkmap_map_mem(dev, blknr, blkcnt, addr, false);
272}
273
274int blkmap_map_pmem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
275 phys_addr_t paddr)
276{
277 struct blkmap *bm = dev_get_plat(dev);
278 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
279 void *addr;
280 int err;
281
282 addr = map_sysmem(paddr, blkcnt << bd->log2blksz);
283 if (!addr)
284 return -ENOMEM;
285
286 err = __blkmap_map_mem(dev, blknr, blkcnt, addr, true);
287 if (err)
288 unmap_sysmem(addr);
289
290 return err;
291}
292
Tobias Waldekranzc41e2092023-02-16 16:33:49 +0100293static ulong blkmap_blk_read_slice(struct blkmap *bm, struct blkmap_slice *bms,
294 lbaint_t blknr, lbaint_t blkcnt,
295 void *buffer)
296{
297 lbaint_t nr, cnt;
298
299 nr = blknr - bms->blknr;
300 cnt = (blkcnt < bms->blkcnt) ? blkcnt : bms->blkcnt;
301 return bms->read(bm, bms, nr, cnt, buffer);
302}
303
304static ulong blkmap_blk_read(struct udevice *dev, lbaint_t blknr,
305 lbaint_t blkcnt, void *buffer)
306{
307 struct blk_desc *bd = dev_get_uclass_plat(dev);
308 struct blkmap *bm = dev_get_plat(dev->parent);
309 struct blkmap_slice *bms;
310 lbaint_t cnt, total = 0;
311
312 list_for_each_entry(bms, &bm->slices, node) {
313 if (!blkmap_slice_contains(bms, blknr))
314 continue;
315
316 cnt = blkmap_blk_read_slice(bm, bms, blknr, blkcnt, buffer);
317 blknr += cnt;
318 blkcnt -= cnt;
319 buffer += cnt << bd->log2blksz;
320 total += cnt;
321 }
322
323 return total;
324}
325
326static ulong blkmap_blk_write_slice(struct blkmap *bm, struct blkmap_slice *bms,
327 lbaint_t blknr, lbaint_t blkcnt,
328 const void *buffer)
329{
330 lbaint_t nr, cnt;
331
332 nr = blknr - bms->blknr;
333 cnt = (blkcnt < bms->blkcnt) ? blkcnt : bms->blkcnt;
334 return bms->write(bm, bms, nr, cnt, buffer);
335}
336
337static ulong blkmap_blk_write(struct udevice *dev, lbaint_t blknr,
338 lbaint_t blkcnt, const void *buffer)
339{
340 struct blk_desc *bd = dev_get_uclass_plat(dev);
341 struct blkmap *bm = dev_get_plat(dev->parent);
342 struct blkmap_slice *bms;
343 lbaint_t cnt, total = 0;
344
345 list_for_each_entry(bms, &bm->slices, node) {
346 if (!blkmap_slice_contains(bms, blknr))
347 continue;
348
349 cnt = blkmap_blk_write_slice(bm, bms, blknr, blkcnt, buffer);
350 blknr += cnt;
351 blkcnt -= cnt;
352 buffer += cnt << bd->log2blksz;
353 total += cnt;
354 }
355
356 return total;
357}
358
359static const struct blk_ops blkmap_blk_ops = {
360 .read = blkmap_blk_read,
361 .write = blkmap_blk_write,
362};
363
364U_BOOT_DRIVER(blkmap_blk) = {
365 .name = "blkmap_blk",
366 .id = UCLASS_BLK,
367 .ops = &blkmap_blk_ops,
368};
369
Bin Meng6efca7f2023-09-26 16:43:37 +0800370static int blkmap_dev_bind(struct udevice *dev)
Tobias Waldekranzc41e2092023-02-16 16:33:49 +0100371{
372 struct blkmap *bm = dev_get_plat(dev);
373 struct blk_desc *bd;
374 int err;
375
376 err = blk_create_devicef(dev, "blkmap_blk", "blk", UCLASS_BLKMAP,
Bin Meng7020b2e2023-09-26 16:43:31 +0800377 dev_seq(dev), DEFAULT_BLKSZ, 0, &bm->blk);
Tobias Waldekranzc41e2092023-02-16 16:33:49 +0100378 if (err)
379 return log_msg_ret("blk", err);
380
381 INIT_LIST_HEAD(&bm->slices);
382
383 bd = dev_get_uclass_plat(bm->blk);
384 snprintf(bd->vendor, BLK_VEN_SIZE, "U-Boot");
385 snprintf(bd->product, BLK_PRD_SIZE, "blkmap");
386 snprintf(bd->revision, BLK_REV_SIZE, "1.0");
387
388 /* EFI core isn't keen on zero-sized disks, so we lie. This is
389 * updated with the correct size once the user adds a
390 * mapping.
391 */
392 bd->lba = 1;
393
394 return 0;
395}
396
Bin Meng6efca7f2023-09-26 16:43:37 +0800397static int blkmap_dev_unbind(struct udevice *dev)
Tobias Waldekranzc41e2092023-02-16 16:33:49 +0100398{
399 struct blkmap *bm = dev_get_plat(dev);
400 struct blkmap_slice *bms, *tmp;
401 int err;
402
403 list_for_each_entry_safe(bms, tmp, &bm->slices, node) {
404 list_del(&bms->node);
405 free(bms);
406 }
407
408 err = device_remove(bm->blk, DM_REMOVE_NORMAL);
409 if (err)
410 return err;
411
412 return device_unbind(bm->blk);
413}
414
415U_BOOT_DRIVER(blkmap_root) = {
416 .name = "blkmap_dev",
417 .id = UCLASS_BLKMAP,
418 .bind = blkmap_dev_bind,
419 .unbind = blkmap_dev_unbind,
420 .plat_auto = sizeof(struct blkmap),
421};
422
423struct udevice *blkmap_from_label(const char *label)
424{
425 struct udevice *dev;
426 struct uclass *uc;
427 struct blkmap *bm;
428
429 uclass_id_foreach_dev(UCLASS_BLKMAP, dev, uc) {
430 bm = dev_get_plat(dev);
431 if (bm->label && !strcmp(label, bm->label))
432 return dev;
433 }
434
435 return NULL;
436}
437
438int blkmap_create(const char *label, struct udevice **devp)
439{
440 char *hname, *hlabel;
441 struct udevice *dev;
442 struct blkmap *bm;
443 size_t namelen;
444 int err;
445
446 dev = blkmap_from_label(label);
447 if (dev) {
448 err = -EBUSY;
449 goto err;
450 }
451
452 hlabel = strdup(label);
453 if (!hlabel) {
454 err = -ENOMEM;
455 goto err;
456 }
457
458 namelen = strlen("blkmap-") + strlen(label) + 1;
459 hname = malloc(namelen);
460 if (!hname) {
461 err = -ENOMEM;
462 goto err_free_hlabel;
463 }
464
465 strlcpy(hname, "blkmap-", namelen);
466 strlcat(hname, label, namelen);
467
468 err = device_bind_driver(dm_root(), "blkmap_dev", hname, &dev);
469 if (err)
470 goto err_free_hname;
471
472 device_set_name_alloced(dev);
473 bm = dev_get_plat(dev);
474 bm->label = hlabel;
475
476 if (devp)
477 *devp = dev;
478
479 return 0;
480
481err_free_hname:
482 free(hname);
483err_free_hlabel:
484 free(hlabel);
485err:
486 return err;
487}
488
489int blkmap_destroy(struct udevice *dev)
490{
491 int err;
492
493 err = device_remove(dev, DM_REMOVE_NORMAL);
494 if (err)
495 return err;
496
497 return device_unbind(dev);
498}
499
500UCLASS_DRIVER(blkmap) = {
501 .id = UCLASS_BLKMAP,
502 .name = "blkmap",
503};