blob: 08f68570224df766b70b0320672e1149b8150b9a [file] [log] [blame]
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +01001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2023 Addiva Elektronik
4 * Author: Tobias Waldekranz <tobias@waldekranz.com>
5 */
6
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +01007#include <blk.h>
8#include <blkmap.h>
9#include <dm.h>
10#include <malloc.h>
11#include <mapmem.h>
12#include <part.h>
13#include <dm/device-internal.h>
14#include <dm/lists.h>
15#include <dm/root.h>
16
17struct blkmap;
18
19/**
Sughosh Ganuf2eeee62025-03-17 14:04:00 +053020 * define BLKMAP_SLICE_LINEAR - Linear mapping to another block device
21 *
22 * This blkmap slice type is used for mapping to other existing block
23 * devices.
24 */
25#define BLKMAP_SLICE_LINEAR BIT(0)
26
27/**
28 * define BLKMAP_SLICE_MEM - Linear mapping to memory based block device
29 *
30 * This blkmap slice type is used for mapping to memory based block
31 * devices, like ramdisks.
32 */
33#define BLKMAP_SLICE_MEM BIT(1)
34
35/**
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +010036 * struct blkmap_slice - Region mapped to a blkmap
37 *
38 * Common data for a region mapped to a blkmap, specialized by each
39 * map type.
40 *
41 * @node: List node used to associate this slice with a blkmap
42 * @blknr: Start block number of the mapping
43 * @blkcnt: Number of blocks covered by this mapping
Sughosh Ganuf2eeee62025-03-17 14:04:00 +053044 * @attr: Attributes of blkmap slice
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +010045 */
46struct blkmap_slice {
47 struct list_head node;
48
49 lbaint_t blknr;
50 lbaint_t blkcnt;
Sughosh Ganuf2eeee62025-03-17 14:04:00 +053051 uint attr;
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +010052
53 /**
54 * @read: - Read from slice
55 *
56 * @read.bm: Blkmap to which this slice belongs
57 * @read.bms: This slice
58 * @read.blknr: Start block number to read from
59 * @read.blkcnt: Number of blocks to read
60 * @read.buffer: Buffer to store read data to
61 */
62 ulong (*read)(struct blkmap *bm, struct blkmap_slice *bms,
63 lbaint_t blknr, lbaint_t blkcnt, void *buffer);
64
65 /**
66 * @write: - Write to slice
67 *
68 * @write.bm: Blkmap to which this slice belongs
69 * @write.bms: This slice
70 * @write.blknr: Start block number to write to
71 * @write.blkcnt: Number of blocks to write
72 * @write.buffer: Data to be written
73 */
74 ulong (*write)(struct blkmap *bm, struct blkmap_slice *bms,
75 lbaint_t blknr, lbaint_t blkcnt, const void *buffer);
76
77 /**
78 * @destroy: - Tear down slice
79 *
80 * @read.bm: Blkmap to which this slice belongs
81 * @read.bms: This slice
82 */
83 void (*destroy)(struct blkmap *bm, struct blkmap_slice *bms);
84};
85
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +010086static bool blkmap_slice_contains(struct blkmap_slice *bms, lbaint_t blknr)
87{
88 return (blknr >= bms->blknr) && (blknr < (bms->blknr + bms->blkcnt));
89}
90
91static bool blkmap_slice_available(struct blkmap *bm, struct blkmap_slice *new)
92{
93 struct blkmap_slice *bms;
94 lbaint_t first, last;
95
96 first = new->blknr;
97 last = new->blknr + new->blkcnt - 1;
98
99 list_for_each_entry(bms, &bm->slices, node) {
100 if (blkmap_slice_contains(bms, first) ||
101 blkmap_slice_contains(bms, last) ||
102 blkmap_slice_contains(new, bms->blknr) ||
103 blkmap_slice_contains(new, bms->blknr + bms->blkcnt - 1))
104 return false;
105 }
106
107 return true;
108}
109
110static int blkmap_slice_add(struct blkmap *bm, struct blkmap_slice *new)
111{
112 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
113 struct list_head *insert = &bm->slices;
114 struct blkmap_slice *bms;
115
116 if (!blkmap_slice_available(bm, new))
117 return -EBUSY;
118
119 list_for_each_entry(bms, &bm->slices, node) {
120 if (bms->blknr < new->blknr)
121 continue;
122
123 insert = &bms->node;
124 break;
125 }
126
127 list_add_tail(&new->node, insert);
128
129 /* Disk might have grown, update the size */
130 bms = list_last_entry(&bm->slices, struct blkmap_slice, node);
131 bd->lba = bms->blknr + bms->blkcnt;
132 return 0;
133}
134
Tobias Waldekranz5f7f8222023-02-16 16:33:50 +0100135/**
Tobias Waldekranzf7241a42023-02-16 16:33:51 +0100136 * struct blkmap_linear - Linear mapping to other block device
137 *
138 * @slice: Common map data
139 * @blk: Target block device of this mapping
140 * @blknr: Start block number of the target device
141 */
142struct blkmap_linear {
143 struct blkmap_slice slice;
144
145 struct udevice *blk;
146 lbaint_t blknr;
147};
148
149static ulong blkmap_linear_read(struct blkmap *bm, struct blkmap_slice *bms,
150 lbaint_t blknr, lbaint_t blkcnt, void *buffer)
151{
152 struct blkmap_linear *bml = container_of(bms, struct blkmap_linear, slice);
153
154 return blk_read(bml->blk, bml->blknr + blknr, blkcnt, buffer);
155}
156
157static ulong blkmap_linear_write(struct blkmap *bm, struct blkmap_slice *bms,
158 lbaint_t blknr, lbaint_t blkcnt,
159 const void *buffer)
160{
161 struct blkmap_linear *bml = container_of(bms, struct blkmap_linear, slice);
162
163 return blk_write(bml->blk, bml->blknr + blknr, blkcnt, buffer);
164}
165
166int blkmap_map_linear(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
167 struct udevice *lblk, lbaint_t lblknr)
168{
169 struct blkmap *bm = dev_get_plat(dev);
170 struct blkmap_linear *linear;
171 struct blk_desc *bd, *lbd;
172 int err;
173
174 bd = dev_get_uclass_plat(bm->blk);
175 lbd = dev_get_uclass_plat(lblk);
Bin Meng249f3512023-09-26 16:43:39 +0800176 if (lbd->blksz != bd->blksz) {
177 /* update to match the mapped device */
178 bd->blksz = lbd->blksz;
179 bd->log2blksz = LOG2(bd->blksz);
180 }
Tobias Waldekranzf7241a42023-02-16 16:33:51 +0100181
182 linear = malloc(sizeof(*linear));
183 if (!linear)
184 return -ENOMEM;
185
186 *linear = (struct blkmap_linear) {
187 .slice = {
188 .blknr = blknr,
189 .blkcnt = blkcnt,
Sughosh Ganuf2eeee62025-03-17 14:04:00 +0530190 .attr = BLKMAP_SLICE_LINEAR,
Tobias Waldekranzf7241a42023-02-16 16:33:51 +0100191
192 .read = blkmap_linear_read,
193 .write = blkmap_linear_write,
194 },
195
196 .blk = lblk,
197 .blknr = lblknr,
198 };
199
200 err = blkmap_slice_add(bm, &linear->slice);
201 if (err)
202 free(linear);
203
204 return err;
205}
206
207/**
Tobias Waldekranz5f7f8222023-02-16 16:33:50 +0100208 * struct blkmap_mem - Memory mapping
209 *
210 * @slice: Common map data
211 * @addr: Target memory region of this mapping
212 * @remapped: True if @addr is backed by a physical to virtual memory
213 * mapping that must be torn down at the end of this mapping's
214 * lifetime.
215 */
216struct blkmap_mem {
217 struct blkmap_slice slice;
218 void *addr;
219 bool remapped;
220};
221
222static ulong blkmap_mem_read(struct blkmap *bm, struct blkmap_slice *bms,
223 lbaint_t blknr, lbaint_t blkcnt, void *buffer)
224{
225 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
226 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
227 char *src;
228
229 src = bmm->addr + (blknr << bd->log2blksz);
230 memcpy(buffer, src, blkcnt << bd->log2blksz);
231 return blkcnt;
232}
233
234static ulong blkmap_mem_write(struct blkmap *bm, struct blkmap_slice *bms,
235 lbaint_t blknr, lbaint_t blkcnt,
236 const void *buffer)
237{
238 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
239 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
240 char *dst;
241
242 dst = bmm->addr + (blknr << bd->log2blksz);
243 memcpy(dst, buffer, blkcnt << bd->log2blksz);
244 return blkcnt;
245}
246
247static void blkmap_mem_destroy(struct blkmap *bm, struct blkmap_slice *bms)
248{
249 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
250
251 if (bmm->remapped)
252 unmap_sysmem(bmm->addr);
253}
254
255int __blkmap_map_mem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
256 void *addr, bool remapped)
257{
258 struct blkmap *bm = dev_get_plat(dev);
259 struct blkmap_mem *bmm;
260 int err;
261
262 bmm = malloc(sizeof(*bmm));
263 if (!bmm)
264 return -ENOMEM;
265
266 *bmm = (struct blkmap_mem) {
267 .slice = {
268 .blknr = blknr,
269 .blkcnt = blkcnt,
Sughosh Ganuf2eeee62025-03-17 14:04:00 +0530270 .attr = BLKMAP_SLICE_MEM,
Tobias Waldekranz5f7f8222023-02-16 16:33:50 +0100271
272 .read = blkmap_mem_read,
273 .write = blkmap_mem_write,
274 .destroy = blkmap_mem_destroy,
275 },
276
277 .addr = addr,
278 .remapped = remapped,
279 };
280
281 err = blkmap_slice_add(bm, &bmm->slice);
282 if (err)
283 free(bmm);
284
285 return err;
286}
287
288int blkmap_map_mem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
289 void *addr)
290{
291 return __blkmap_map_mem(dev, blknr, blkcnt, addr, false);
292}
293
294int blkmap_map_pmem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
295 phys_addr_t paddr)
296{
297 struct blkmap *bm = dev_get_plat(dev);
298 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
299 void *addr;
300 int err;
301
302 addr = map_sysmem(paddr, blkcnt << bd->log2blksz);
303 if (!addr)
304 return -ENOMEM;
305
306 err = __blkmap_map_mem(dev, blknr, blkcnt, addr, true);
307 if (err)
308 unmap_sysmem(addr);
309
310 return err;
311}
312
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +0100313static ulong blkmap_blk_read_slice(struct blkmap *bm, struct blkmap_slice *bms,
314 lbaint_t blknr, lbaint_t blkcnt,
315 void *buffer)
316{
317 lbaint_t nr, cnt;
318
319 nr = blknr - bms->blknr;
320 cnt = (blkcnt < bms->blkcnt) ? blkcnt : bms->blkcnt;
321 return bms->read(bm, bms, nr, cnt, buffer);
322}
323
324static ulong blkmap_blk_read(struct udevice *dev, lbaint_t blknr,
325 lbaint_t blkcnt, void *buffer)
326{
327 struct blk_desc *bd = dev_get_uclass_plat(dev);
328 struct blkmap *bm = dev_get_plat(dev->parent);
329 struct blkmap_slice *bms;
330 lbaint_t cnt, total = 0;
331
332 list_for_each_entry(bms, &bm->slices, node) {
333 if (!blkmap_slice_contains(bms, blknr))
334 continue;
335
336 cnt = blkmap_blk_read_slice(bm, bms, blknr, blkcnt, buffer);
337 blknr += cnt;
338 blkcnt -= cnt;
339 buffer += cnt << bd->log2blksz;
340 total += cnt;
341 }
342
343 return total;
344}
345
346static ulong blkmap_blk_write_slice(struct blkmap *bm, struct blkmap_slice *bms,
347 lbaint_t blknr, lbaint_t blkcnt,
348 const void *buffer)
349{
350 lbaint_t nr, cnt;
351
352 nr = blknr - bms->blknr;
353 cnt = (blkcnt < bms->blkcnt) ? blkcnt : bms->blkcnt;
354 return bms->write(bm, bms, nr, cnt, buffer);
355}
356
357static ulong blkmap_blk_write(struct udevice *dev, lbaint_t blknr,
358 lbaint_t blkcnt, const void *buffer)
359{
360 struct blk_desc *bd = dev_get_uclass_plat(dev);
361 struct blkmap *bm = dev_get_plat(dev->parent);
362 struct blkmap_slice *bms;
363 lbaint_t cnt, total = 0;
364
365 list_for_each_entry(bms, &bm->slices, node) {
366 if (!blkmap_slice_contains(bms, blknr))
367 continue;
368
369 cnt = blkmap_blk_write_slice(bm, bms, blknr, blkcnt, buffer);
370 blknr += cnt;
371 blkcnt -= cnt;
372 buffer += cnt << bd->log2blksz;
373 total += cnt;
374 }
375
376 return total;
377}
378
379static const struct blk_ops blkmap_blk_ops = {
380 .read = blkmap_blk_read,
381 .write = blkmap_blk_write,
382};
383
384U_BOOT_DRIVER(blkmap_blk) = {
385 .name = "blkmap_blk",
386 .id = UCLASS_BLK,
387 .ops = &blkmap_blk_ops,
388};
389
Bin Mengf61202c2023-09-26 16:43:37 +0800390static int blkmap_dev_bind(struct udevice *dev)
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +0100391{
392 struct blkmap *bm = dev_get_plat(dev);
393 struct blk_desc *bd;
394 int err;
395
396 err = blk_create_devicef(dev, "blkmap_blk", "blk", UCLASS_BLKMAP,
Bin Meng2294ecb2023-09-26 16:43:31 +0800397 dev_seq(dev), DEFAULT_BLKSZ, 0, &bm->blk);
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +0100398 if (err)
399 return log_msg_ret("blk", err);
400
401 INIT_LIST_HEAD(&bm->slices);
402
403 bd = dev_get_uclass_plat(bm->blk);
404 snprintf(bd->vendor, BLK_VEN_SIZE, "U-Boot");
405 snprintf(bd->product, BLK_PRD_SIZE, "blkmap");
406 snprintf(bd->revision, BLK_REV_SIZE, "1.0");
407
408 /* EFI core isn't keen on zero-sized disks, so we lie. This is
409 * updated with the correct size once the user adds a
410 * mapping.
411 */
412 bd->lba = 1;
413
414 return 0;
415}
416
Bin Mengf61202c2023-09-26 16:43:37 +0800417static int blkmap_dev_unbind(struct udevice *dev)
Tobias Waldekranz4f76dd32023-02-16 16:33:49 +0100418{
419 struct blkmap *bm = dev_get_plat(dev);
420 struct blkmap_slice *bms, *tmp;
421 int err;
422
423 list_for_each_entry_safe(bms, tmp, &bm->slices, node) {
424 list_del(&bms->node);
425 free(bms);
426 }
427
428 err = device_remove(bm->blk, DM_REMOVE_NORMAL);
429 if (err)
430 return err;
431
432 return device_unbind(bm->blk);
433}
434
435U_BOOT_DRIVER(blkmap_root) = {
436 .name = "blkmap_dev",
437 .id = UCLASS_BLKMAP,
438 .bind = blkmap_dev_bind,
439 .unbind = blkmap_dev_unbind,
440 .plat_auto = sizeof(struct blkmap),
441};
442
443struct udevice *blkmap_from_label(const char *label)
444{
445 struct udevice *dev;
446 struct uclass *uc;
447 struct blkmap *bm;
448
449 uclass_id_foreach_dev(UCLASS_BLKMAP, dev, uc) {
450 bm = dev_get_plat(dev);
451 if (bm->label && !strcmp(label, bm->label))
452 return dev;
453 }
454
455 return NULL;
456}
457
458int blkmap_create(const char *label, struct udevice **devp)
459{
460 char *hname, *hlabel;
461 struct udevice *dev;
462 struct blkmap *bm;
463 size_t namelen;
464 int err;
465
466 dev = blkmap_from_label(label);
467 if (dev) {
468 err = -EBUSY;
469 goto err;
470 }
471
472 hlabel = strdup(label);
473 if (!hlabel) {
474 err = -ENOMEM;
475 goto err;
476 }
477
478 namelen = strlen("blkmap-") + strlen(label) + 1;
479 hname = malloc(namelen);
480 if (!hname) {
481 err = -ENOMEM;
482 goto err_free_hlabel;
483 }
484
485 strlcpy(hname, "blkmap-", namelen);
486 strlcat(hname, label, namelen);
487
488 err = device_bind_driver(dm_root(), "blkmap_dev", hname, &dev);
489 if (err)
490 goto err_free_hname;
491
492 device_set_name_alloced(dev);
493 bm = dev_get_plat(dev);
494 bm->label = hlabel;
495
496 if (devp)
497 *devp = dev;
498
499 return 0;
500
501err_free_hname:
502 free(hname);
503err_free_hlabel:
504 free(hlabel);
505err:
506 return err;
507}
508
509int blkmap_destroy(struct udevice *dev)
510{
511 int err;
512
513 err = device_remove(dev, DM_REMOVE_NORMAL);
514 if (err)
515 return err;
516
517 return device_unbind(dev);
518}
519
520UCLASS_DRIVER(blkmap) = {
521 .id = UCLASS_BLKMAP,
522 .name = "blkmap",
523};