blob: 473c65b59114e334ec0ab368ec29d25a82eeac92 [file] [log] [blame]
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +01001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2023 Addiva Elektronik
4 * Author: Tobias Waldekranz <tobias@waldekranz.com>
5 */
6
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +01007#include <blk.h>
8#include <blkmap.h>
9#include <dm.h>
10#include <malloc.h>
11#include <mapmem.h>
12#include <part.h>
13#include <dm/device-internal.h>
14#include <dm/lists.h>
15#include <dm/root.h>
16
17struct blkmap;
18
19/**
Sughosh Ganuf2eeee62025-03-17 14:04:00 +053020 * define BLKMAP_SLICE_LINEAR - Linear mapping to another block device
21 *
22 * This blkmap slice type is used for mapping to other existing block
23 * devices.
24 */
25#define BLKMAP_SLICE_LINEAR BIT(0)
26
27/**
28 * define BLKMAP_SLICE_MEM - Linear mapping to memory based block device
29 *
30 * This blkmap slice type is used for mapping to memory based block
31 * devices, like ramdisks.
32 */
33#define BLKMAP_SLICE_MEM BIT(1)
34
35/**
Sughosh Ganucf377222025-03-17 14:04:01 +053036 * define BLKMAP_SLICE_PRESERVE - Preserved blkmap slice
37 *
38 * This blkmap slice is intended to be preserved, and it's
39 * information passed on to a later stage, like OS.
40 */
41#define BLKMAP_SLICE_PRESERVE BIT(2)
42
43/**
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +010044 * struct blkmap_slice - Region mapped to a blkmap
45 *
46 * Common data for a region mapped to a blkmap, specialized by each
47 * map type.
48 *
49 * @node: List node used to associate this slice with a blkmap
50 * @blknr: Start block number of the mapping
51 * @blkcnt: Number of blocks covered by this mapping
Sughosh Ganuf2eeee62025-03-17 14:04:00 +053052 * @attr: Attributes of blkmap slice
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +010053 */
54struct blkmap_slice {
55 struct list_head node;
56
57 lbaint_t blknr;
58 lbaint_t blkcnt;
Sughosh Ganuf2eeee62025-03-17 14:04:00 +053059 uint attr;
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +010060
61 /**
62 * @read: - Read from slice
63 *
64 * @read.bm: Blkmap to which this slice belongs
65 * @read.bms: This slice
66 * @read.blknr: Start block number to read from
67 * @read.blkcnt: Number of blocks to read
68 * @read.buffer: Buffer to store read data to
69 */
70 ulong (*read)(struct blkmap *bm, struct blkmap_slice *bms,
71 lbaint_t blknr, lbaint_t blkcnt, void *buffer);
72
73 /**
74 * @write: - Write to slice
75 *
76 * @write.bm: Blkmap to which this slice belongs
77 * @write.bms: This slice
78 * @write.blknr: Start block number to write to
79 * @write.blkcnt: Number of blocks to write
80 * @write.buffer: Data to be written
81 */
82 ulong (*write)(struct blkmap *bm, struct blkmap_slice *bms,
83 lbaint_t blknr, lbaint_t blkcnt, const void *buffer);
84
85 /**
86 * @destroy: - Tear down slice
87 *
88 * @read.bm: Blkmap to which this slice belongs
89 * @read.bms: This slice
90 */
91 void (*destroy)(struct blkmap *bm, struct blkmap_slice *bms);
92};
93
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +010094static bool blkmap_slice_contains(struct blkmap_slice *bms, lbaint_t blknr)
95{
96 return (blknr >= bms->blknr) && (blknr < (bms->blknr + bms->blkcnt));
97}
98
99static bool blkmap_slice_available(struct blkmap *bm, struct blkmap_slice *new)
100{
101 struct blkmap_slice *bms;
102 lbaint_t first, last;
103
104 first = new->blknr;
105 last = new->blknr + new->blkcnt - 1;
106
107 list_for_each_entry(bms, &bm->slices, node) {
108 if (blkmap_slice_contains(bms, first) ||
109 blkmap_slice_contains(bms, last) ||
110 blkmap_slice_contains(new, bms->blknr) ||
111 blkmap_slice_contains(new, bms->blknr + bms->blkcnt - 1))
112 return false;
113 }
114
115 return true;
116}
117
118static int blkmap_slice_add(struct blkmap *bm, struct blkmap_slice *new)
119{
120 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
121 struct list_head *insert = &bm->slices;
122 struct blkmap_slice *bms;
123
124 if (!blkmap_slice_available(bm, new))
125 return -EBUSY;
126
127 list_for_each_entry(bms, &bm->slices, node) {
128 if (bms->blknr < new->blknr)
129 continue;
130
131 insert = &bms->node;
132 break;
133 }
134
135 list_add_tail(&new->node, insert);
136
137 /* Disk might have grown, update the size */
138 bms = list_last_entry(&bm->slices, struct blkmap_slice, node);
139 bd->lba = bms->blknr + bms->blkcnt;
140 return 0;
141}
142
Tobias Waldekranz5f7f8222023-02-16 16:33:50 +0100143/**
Tobias Waldekranzf7241a42023-02-16 16:33:51 +0100144 * struct blkmap_linear - Linear mapping to other block device
145 *
146 * @slice: Common map data
147 * @blk: Target block device of this mapping
148 * @blknr: Start block number of the target device
149 */
150struct blkmap_linear {
151 struct blkmap_slice slice;
152
153 struct udevice *blk;
154 lbaint_t blknr;
155};
156
157static ulong blkmap_linear_read(struct blkmap *bm, struct blkmap_slice *bms,
158 lbaint_t blknr, lbaint_t blkcnt, void *buffer)
159{
160 struct blkmap_linear *bml = container_of(bms, struct blkmap_linear, slice);
161
162 return blk_read(bml->blk, bml->blknr + blknr, blkcnt, buffer);
163}
164
165static ulong blkmap_linear_write(struct blkmap *bm, struct blkmap_slice *bms,
166 lbaint_t blknr, lbaint_t blkcnt,
167 const void *buffer)
168{
169 struct blkmap_linear *bml = container_of(bms, struct blkmap_linear, slice);
170
171 return blk_write(bml->blk, bml->blknr + blknr, blkcnt, buffer);
172}
173
174int blkmap_map_linear(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
175 struct udevice *lblk, lbaint_t lblknr)
176{
177 struct blkmap *bm = dev_get_plat(dev);
178 struct blkmap_linear *linear;
179 struct blk_desc *bd, *lbd;
180 int err;
181
182 bd = dev_get_uclass_plat(bm->blk);
183 lbd = dev_get_uclass_plat(lblk);
Bin Meng249f3512023-09-26 16:43:39 +0800184 if (lbd->blksz != bd->blksz) {
185 /* update to match the mapped device */
186 bd->blksz = lbd->blksz;
187 bd->log2blksz = LOG2(bd->blksz);
188 }
Tobias Waldekranzf7241a42023-02-16 16:33:51 +0100189
190 linear = malloc(sizeof(*linear));
191 if (!linear)
192 return -ENOMEM;
193
194 *linear = (struct blkmap_linear) {
195 .slice = {
196 .blknr = blknr,
197 .blkcnt = blkcnt,
Sughosh Ganuf2eeee62025-03-17 14:04:00 +0530198 .attr = BLKMAP_SLICE_LINEAR,
Tobias Waldekranzf7241a42023-02-16 16:33:51 +0100199
200 .read = blkmap_linear_read,
201 .write = blkmap_linear_write,
202 },
203
204 .blk = lblk,
205 .blknr = lblknr,
206 };
207
208 err = blkmap_slice_add(bm, &linear->slice);
209 if (err)
210 free(linear);
211
212 return err;
213}
214
215/**
Tobias Waldekranz5f7f8222023-02-16 16:33:50 +0100216 * struct blkmap_mem - Memory mapping
217 *
218 * @slice: Common map data
219 * @addr: Target memory region of this mapping
220 * @remapped: True if @addr is backed by a physical to virtual memory
221 * mapping that must be torn down at the end of this mapping's
222 * lifetime.
223 */
224struct blkmap_mem {
225 struct blkmap_slice slice;
226 void *addr;
227 bool remapped;
228};
229
230static ulong blkmap_mem_read(struct blkmap *bm, struct blkmap_slice *bms,
231 lbaint_t blknr, lbaint_t blkcnt, void *buffer)
232{
233 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
234 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
235 char *src;
236
237 src = bmm->addr + (blknr << bd->log2blksz);
238 memcpy(buffer, src, blkcnt << bd->log2blksz);
239 return blkcnt;
240}
241
242static ulong blkmap_mem_write(struct blkmap *bm, struct blkmap_slice *bms,
243 lbaint_t blknr, lbaint_t blkcnt,
244 const void *buffer)
245{
246 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
247 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
248 char *dst;
249
250 dst = bmm->addr + (blknr << bd->log2blksz);
251 memcpy(dst, buffer, blkcnt << bd->log2blksz);
252 return blkcnt;
253}
254
255static void blkmap_mem_destroy(struct blkmap *bm, struct blkmap_slice *bms)
256{
257 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
258
259 if (bmm->remapped)
260 unmap_sysmem(bmm->addr);
261}
262
263int __blkmap_map_mem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
Sughosh Ganucf377222025-03-17 14:04:01 +0530264 void *addr, bool remapped, bool preserve)
Tobias Waldekranz5f7f8222023-02-16 16:33:50 +0100265{
266 struct blkmap *bm = dev_get_plat(dev);
267 struct blkmap_mem *bmm;
268 int err;
269
270 bmm = malloc(sizeof(*bmm));
271 if (!bmm)
272 return -ENOMEM;
273
274 *bmm = (struct blkmap_mem) {
275 .slice = {
276 .blknr = blknr,
277 .blkcnt = blkcnt,
Sughosh Ganuf2eeee62025-03-17 14:04:00 +0530278 .attr = BLKMAP_SLICE_MEM,
Tobias Waldekranz5f7f8222023-02-16 16:33:50 +0100279
280 .read = blkmap_mem_read,
281 .write = blkmap_mem_write,
282 .destroy = blkmap_mem_destroy,
283 },
284
285 .addr = addr,
286 .remapped = remapped,
287 };
288
Sughosh Ganucf377222025-03-17 14:04:01 +0530289 if (preserve)
290 bmm->slice.attr |= BLKMAP_SLICE_PRESERVE;
291
Tobias Waldekranz5f7f8222023-02-16 16:33:50 +0100292 err = blkmap_slice_add(bm, &bmm->slice);
293 if (err)
294 free(bmm);
295
296 return err;
297}
298
299int blkmap_map_mem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
300 void *addr)
301{
Sughosh Ganucf377222025-03-17 14:04:01 +0530302 return __blkmap_map_mem(dev, blknr, blkcnt, addr, false, false);
Tobias Waldekranz5f7f8222023-02-16 16:33:50 +0100303}
304
305int blkmap_map_pmem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
Sughosh Ganucf377222025-03-17 14:04:01 +0530306 phys_addr_t paddr, bool preserve)
Tobias Waldekranz5f7f8222023-02-16 16:33:50 +0100307{
308 struct blkmap *bm = dev_get_plat(dev);
309 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
310 void *addr;
311 int err;
312
313 addr = map_sysmem(paddr, blkcnt << bd->log2blksz);
314 if (!addr)
315 return -ENOMEM;
316
Sughosh Ganucf377222025-03-17 14:04:01 +0530317 err = __blkmap_map_mem(dev, blknr, blkcnt, addr, true, preserve);
Tobias Waldekranz5f7f8222023-02-16 16:33:50 +0100318 if (err)
319 unmap_sysmem(addr);
320
321 return err;
322}
323
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +0100324static ulong blkmap_blk_read_slice(struct blkmap *bm, struct blkmap_slice *bms,
325 lbaint_t blknr, lbaint_t blkcnt,
326 void *buffer)
327{
328 lbaint_t nr, cnt;
329
330 nr = blknr - bms->blknr;
331 cnt = (blkcnt < bms->blkcnt) ? blkcnt : bms->blkcnt;
332 return bms->read(bm, bms, nr, cnt, buffer);
333}
334
335static ulong blkmap_blk_read(struct udevice *dev, lbaint_t blknr,
336 lbaint_t blkcnt, void *buffer)
337{
338 struct blk_desc *bd = dev_get_uclass_plat(dev);
339 struct blkmap *bm = dev_get_plat(dev->parent);
340 struct blkmap_slice *bms;
341 lbaint_t cnt, total = 0;
342
343 list_for_each_entry(bms, &bm->slices, node) {
344 if (!blkmap_slice_contains(bms, blknr))
345 continue;
346
347 cnt = blkmap_blk_read_slice(bm, bms, blknr, blkcnt, buffer);
348 blknr += cnt;
349 blkcnt -= cnt;
350 buffer += cnt << bd->log2blksz;
351 total += cnt;
352 }
353
354 return total;
355}
356
357static ulong blkmap_blk_write_slice(struct blkmap *bm, struct blkmap_slice *bms,
358 lbaint_t blknr, lbaint_t blkcnt,
359 const void *buffer)
360{
361 lbaint_t nr, cnt;
362
363 nr = blknr - bms->blknr;
364 cnt = (blkcnt < bms->blkcnt) ? blkcnt : bms->blkcnt;
365 return bms->write(bm, bms, nr, cnt, buffer);
366}
367
368static ulong blkmap_blk_write(struct udevice *dev, lbaint_t blknr,
369 lbaint_t blkcnt, const void *buffer)
370{
371 struct blk_desc *bd = dev_get_uclass_plat(dev);
372 struct blkmap *bm = dev_get_plat(dev->parent);
373 struct blkmap_slice *bms;
374 lbaint_t cnt, total = 0;
375
376 list_for_each_entry(bms, &bm->slices, node) {
377 if (!blkmap_slice_contains(bms, blknr))
378 continue;
379
380 cnt = blkmap_blk_write_slice(bm, bms, blknr, blkcnt, buffer);
381 blknr += cnt;
382 blkcnt -= cnt;
383 buffer += cnt << bd->log2blksz;
384 total += cnt;
385 }
386
387 return total;
388}
389
390static const struct blk_ops blkmap_blk_ops = {
391 .read = blkmap_blk_read,
392 .write = blkmap_blk_write,
393};
394
395U_BOOT_DRIVER(blkmap_blk) = {
396 .name = "blkmap_blk",
397 .id = UCLASS_BLK,
398 .ops = &blkmap_blk_ops,
399};
400
Bin Mengf61202c2023-09-26 16:43:37 +0800401static int blkmap_dev_bind(struct udevice *dev)
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +0100402{
403 struct blkmap *bm = dev_get_plat(dev);
404 struct blk_desc *bd;
405 int err;
406
407 err = blk_create_devicef(dev, "blkmap_blk", "blk", UCLASS_BLKMAP,
Bin Meng2294ecb2023-09-26 16:43:31 +0800408 dev_seq(dev), DEFAULT_BLKSZ, 0, &bm->blk);
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +0100409 if (err)
410 return log_msg_ret("blk", err);
411
412 INIT_LIST_HEAD(&bm->slices);
413
414 bd = dev_get_uclass_plat(bm->blk);
415 snprintf(bd->vendor, BLK_VEN_SIZE, "U-Boot");
416 snprintf(bd->product, BLK_PRD_SIZE, "blkmap");
417 snprintf(bd->revision, BLK_REV_SIZE, "1.0");
418
419 /* EFI core isn't keen on zero-sized disks, so we lie. This is
420 * updated with the correct size once the user adds a
421 * mapping.
422 */
423 bd->lba = 1;
424
425 return 0;
426}
427
Bin Mengf61202c2023-09-26 16:43:37 +0800428static int blkmap_dev_unbind(struct udevice *dev)
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +0100429{
430 struct blkmap *bm = dev_get_plat(dev);
431 struct blkmap_slice *bms, *tmp;
432 int err;
433
434 list_for_each_entry_safe(bms, tmp, &bm->slices, node) {
435 list_del(&bms->node);
436 free(bms);
437 }
438
439 err = device_remove(bm->blk, DM_REMOVE_NORMAL);
440 if (err)
441 return err;
442
443 return device_unbind(bm->blk);
444}
445
446U_BOOT_DRIVER(blkmap_root) = {
447 .name = "blkmap_dev",
448 .id = UCLASS_BLKMAP,
449 .bind = blkmap_dev_bind,
450 .unbind = blkmap_dev_unbind,
451 .plat_auto = sizeof(struct blkmap),
452};
453
454struct udevice *blkmap_from_label(const char *label)
455{
456 struct udevice *dev;
457 struct uclass *uc;
458 struct blkmap *bm;
459
460 uclass_id_foreach_dev(UCLASS_BLKMAP, dev, uc) {
461 bm = dev_get_plat(dev);
462 if (bm->label && !strcmp(label, bm->label))
463 return dev;
464 }
465
466 return NULL;
467}
468
469int blkmap_create(const char *label, struct udevice **devp)
470{
471 char *hname, *hlabel;
472 struct udevice *dev;
473 struct blkmap *bm;
474 size_t namelen;
475 int err;
476
477 dev = blkmap_from_label(label);
478 if (dev) {
479 err = -EBUSY;
480 goto err;
481 }
482
483 hlabel = strdup(label);
484 if (!hlabel) {
485 err = -ENOMEM;
486 goto err;
487 }
488
489 namelen = strlen("blkmap-") + strlen(label) + 1;
490 hname = malloc(namelen);
491 if (!hname) {
492 err = -ENOMEM;
493 goto err_free_hlabel;
494 }
495
496 strlcpy(hname, "blkmap-", namelen);
497 strlcat(hname, label, namelen);
498
499 err = device_bind_driver(dm_root(), "blkmap_dev", hname, &dev);
500 if (err)
501 goto err_free_hname;
502
503 device_set_name_alloced(dev);
504 bm = dev_get_plat(dev);
505 bm->label = hlabel;
506
507 if (devp)
508 *devp = dev;
509
510 return 0;
511
512err_free_hname:
513 free(hname);
514err_free_hlabel:
515 free(hlabel);
516err:
517 return err;
518}
519
Sughosh Ganu8231d032025-03-17 14:04:02 +0530520static bool blkmap_mem_preserve_slice(struct blkmap_slice *bms)
521{
522 return (bms->attr & (BLKMAP_SLICE_MEM | BLKMAP_SLICE_PRESERVE)) ==
523 (BLKMAP_SLICE_MEM | BLKMAP_SLICE_PRESERVE);
524}
525
526int blkmap_get_preserved_pmem_slices(int (*cb)(void *ctx, u64 addr,
527 u64 size), void *ctx)
528{
529 int ret;
530 u64 addr, size;
531 struct udevice *dev;
532 struct uclass *uc;
533 struct blkmap *bm;
534 struct blkmap_mem *bmm;
535 struct blkmap_slice *bms;
536 struct blk_desc *bd;
537
538 if (!cb) {
539 log_debug("%s: No callback passed to the function\n", __func__);
540 return 0;
541 }
542
543 uclass_id_foreach_dev(UCLASS_BLKMAP, dev, uc) {
544 bm = dev_get_plat(dev);
545 bd = dev_get_uclass_plat(bm->blk);
546
547 list_for_each_entry(bms, &bm->slices, node) {
548 if (!blkmap_mem_preserve_slice(bms))
549 continue;
550
551 bmm = container_of(bms, struct blkmap_mem, slice);
552 addr = (u64)(uintptr_t)bmm->addr;
553 size = (u64)bms->blkcnt << bd->log2blksz;
554 ret = cb(ctx, addr, size);
555 if (ret)
556 return ret;
557 }
558 }
559
560 return 0;
561}
562
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +0100563int blkmap_destroy(struct udevice *dev)
564{
565 int err;
566
567 err = device_remove(dev, DM_REMOVE_NORMAL);
568 if (err)
569 return err;
570
571 return device_unbind(dev);
572}
573
574UCLASS_DRIVER(blkmap) = {
575 .id = UCLASS_BLKMAP,
576 .name = "blkmap",
577};