blob: 0e47ffb46a82be0584356d686d3703e65638f0f8 [file] [log] [blame]
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +03001// SPDX-License-Identifier: GPL-2.0+
2/*
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +03003 * (C) 2007-2008 Samuel Thibault.
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +03004 * (C) Copyright 2020 EPAM Systems Inc.
5 */
Patrick Delaunay81313352021-04-27 11:02:19 +02006
7#define LOG_CATEGORY UCLASS_PVBLOCK
8
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +03009#include <blk.h>
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +030010#include <dm.h>
11#include <dm/device-internal.h>
Anastasiia Lukianenko79d9f2a2020-08-06 12:42:56 +030012#include <malloc.h>
13#include <part.h>
14
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +030015#include <asm/armv8/mmu.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060016#include <asm/global_data.h>
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +030017#include <asm/io.h>
18#include <asm/xen/system.h>
19
Anastasiia Lukianenkoe855fbf2020-08-06 12:42:58 +030020#include <linux/bug.h>
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +030021#include <linux/compat.h>
22
23#include <xen/events.h>
24#include <xen/gnttab.h>
25#include <xen/hvm.h>
Anastasiia Lukianenko79d9f2a2020-08-06 12:42:56 +030026#include <xen/xenbus.h>
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +030027
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +030028#include <xen/interface/io/ring.h>
29#include <xen/interface/io/blkif.h>
30#include <xen/interface/io/protocols.h>
31
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +030032#define DRV_NAME "pvblock"
33#define DRV_NAME_BLK "pvblock_blk"
34
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +030035#define O_RDONLY 00
36#define O_RDWR 02
Anastasiia Lukianenkoe855fbf2020-08-06 12:42:58 +030037#define WAIT_RING_TO_MS 10
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +030038
39struct blkfront_info {
40 u64 sectors;
41 unsigned int sector_size;
42 int mode;
43 int info;
44 int barrier;
45 int flush;
46};
47
48/**
49 * struct blkfront_dev - Struct representing blkfront device
50 * @dom: Domain id
51 * @ring: Front_ring structure
52 * @ring_ref: The grant reference, allowing us to grant access
53 * to the ring to the other end/domain
54 * @evtchn: Event channel used to signal ring events
55 * @handle: Events handle
56 * @nodename: Device XenStore path in format "device/vbd/" + @devid
57 * @backend: Backend XenStore path
58 * @info: Private data
59 * @devid: Device id
60 */
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +030061struct blkfront_dev {
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +030062 domid_t dom;
63
64 struct blkif_front_ring ring;
65 grant_ref_t ring_ref;
66 evtchn_port_t evtchn;
67 blkif_vdev_t handle;
68
69 char *nodename;
70 char *backend;
71 struct blkfront_info info;
72 unsigned int devid;
Anastasiia Lukianenkoe855fbf2020-08-06 12:42:58 +030073 u8 *bounce_buffer;
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +030074};
75
Simon Glassb75b15b2020-12-03 16:55:23 -070076struct blkfront_plat {
Anastasiia Lukianenko79d9f2a2020-08-06 12:42:56 +030077 unsigned int devid;
78};
79
Anastasiia Lukianenkoe855fbf2020-08-06 12:42:58 +030080/**
Michal Simekcc046dc2024-04-16 08:55:19 +020081 * struct blkfront_aiocb - AIO control block
Anastasiia Lukianenkoe855fbf2020-08-06 12:42:58 +030082 * @aio_dev: Blockfront device
83 * @aio_buf: Memory buffer, which must be sector-aligned for
84 * @aio_dev sector
85 * @aio_nbytes: Size of AIO, which must be less than @aio_dev
86 * sector-sized amounts
87 * @aio_offset: Offset, which must not go beyond @aio_dev
88 * sector-aligned location
89 * @data: Data used to receiving response from ring
90 * @gref: Array of grant references
91 * @n: Number of segments
92 * @aio_cb: Represents one I/O request.
93 */
94struct blkfront_aiocb {
95 struct blkfront_dev *aio_dev;
96 u8 *aio_buf;
97 size_t aio_nbytes;
98 off_t aio_offset;
99 void *data;
100
101 grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST];
102 int n;
103
104 void (*aio_cb)(struct blkfront_aiocb *aiocb, int ret);
105};
106
107static void blkfront_sync(struct blkfront_dev *dev);
108
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +0300109static void free_blkfront(struct blkfront_dev *dev)
110{
111 mask_evtchn(dev->evtchn);
112 free(dev->backend);
113
114 gnttab_end_access(dev->ring_ref);
115 free(dev->ring.sring);
116
117 unbind_evtchn(dev->evtchn);
118
Anastasiia Lukianenkoe855fbf2020-08-06 12:42:58 +0300119 free(dev->bounce_buffer);
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +0300120 free(dev->nodename);
121 free(dev);
122}
123
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300124static int init_blkfront(unsigned int devid, struct blkfront_dev *dev)
125{
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +0300126 xenbus_transaction_t xbt;
127 char *err = NULL;
128 char *message = NULL;
129 struct blkif_sring *s;
130 int retry = 0;
131 char *msg = NULL;
132 char *c;
133 char nodename[32];
134 char path[ARRAY_SIZE(nodename) + strlen("/backend-id") + 1];
135
136 sprintf(nodename, "device/vbd/%d", devid);
137
138 memset(dev, 0, sizeof(*dev));
139 dev->nodename = strdup(nodename);
140 dev->devid = devid;
141
142 snprintf(path, sizeof(path), "%s/backend-id", nodename);
143 dev->dom = xenbus_read_integer(path);
Anastasiia Lukianenkoe855fbf2020-08-06 12:42:58 +0300144 evtchn_alloc_unbound(dev->dom, NULL, dev, &dev->evtchn);
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +0300145
146 s = (struct blkif_sring *)memalign(PAGE_SIZE, PAGE_SIZE);
147 if (!s) {
148 printf("Failed to allocate shared ring\n");
149 goto error;
150 }
151
152 SHARED_RING_INIT(s);
153 FRONT_RING_INIT(&dev->ring, s, PAGE_SIZE);
154
155 dev->ring_ref = gnttab_grant_access(dev->dom, virt_to_pfn(s), 0);
156
157again:
158 err = xenbus_transaction_start(&xbt);
159 if (err) {
160 printf("starting transaction\n");
161 free(err);
162 }
163
164 err = xenbus_printf(xbt, nodename, "ring-ref", "%u", dev->ring_ref);
165 if (err) {
166 message = "writing ring-ref";
167 goto abort_transaction;
168 }
169 err = xenbus_printf(xbt, nodename, "event-channel", "%u", dev->evtchn);
170 if (err) {
171 message = "writing event-channel";
172 goto abort_transaction;
173 }
174 err = xenbus_printf(xbt, nodename, "protocol", "%s",
175 XEN_IO_PROTO_ABI_NATIVE);
176 if (err) {
177 message = "writing protocol";
178 goto abort_transaction;
179 }
180
181 snprintf(path, sizeof(path), "%s/state", nodename);
182 err = xenbus_switch_state(xbt, path, XenbusStateConnected);
183 if (err) {
184 message = "switching state";
185 goto abort_transaction;
186 }
187
188 err = xenbus_transaction_end(xbt, 0, &retry);
189 free(err);
190 if (retry) {
191 goto again;
192 printf("completing transaction\n");
193 }
194
195 goto done;
196
197abort_transaction:
198 free(err);
199 err = xenbus_transaction_end(xbt, 1, &retry);
200 printf("Abort transaction %s\n", message);
201 goto error;
202
203done:
204 snprintf(path, sizeof(path), "%s/backend", nodename);
205 msg = xenbus_read(XBT_NIL, path, &dev->backend);
206 if (msg) {
207 printf("Error %s when reading the backend path %s\n",
208 msg, path);
209 goto error;
210 }
211
212 dev->handle = strtoul(strrchr(nodename, '/') + 1, NULL, 0);
213
214 {
215 XenbusState state;
216 char path[strlen(dev->backend) +
217 strlen("/feature-flush-cache") + 1];
218
219 snprintf(path, sizeof(path), "%s/mode", dev->backend);
220 msg = xenbus_read(XBT_NIL, path, &c);
221 if (msg) {
222 printf("Error %s when reading the mode\n", msg);
223 goto error;
224 }
225 if (*c == 'w')
226 dev->info.mode = O_RDWR;
227 else
228 dev->info.mode = O_RDONLY;
229 free(c);
230
231 snprintf(path, sizeof(path), "%s/state", dev->backend);
232
233 msg = NULL;
234 state = xenbus_read_integer(path);
235 while (!msg && state < XenbusStateConnected)
236 msg = xenbus_wait_for_state_change(path, &state);
237 if (msg || state != XenbusStateConnected) {
238 printf("backend not available, state=%d\n", state);
239 goto error;
240 }
241
242 snprintf(path, sizeof(path), "%s/info", dev->backend);
243 dev->info.info = xenbus_read_integer(path);
244
245 snprintf(path, sizeof(path), "%s/sectors", dev->backend);
246 /*
247 * FIXME: read_integer returns an int, so disk size
248 * limited to 1TB for now
249 */
250 dev->info.sectors = xenbus_read_integer(path);
251
252 snprintf(path, sizeof(path), "%s/sector-size", dev->backend);
253 dev->info.sector_size = xenbus_read_integer(path);
254
255 snprintf(path, sizeof(path), "%s/feature-barrier",
256 dev->backend);
257 dev->info.barrier = xenbus_read_integer(path);
258
259 snprintf(path, sizeof(path), "%s/feature-flush-cache",
260 dev->backend);
261 dev->info.flush = xenbus_read_integer(path);
262 }
263 unmask_evtchn(dev->evtchn);
264
Anastasiia Lukianenkoe855fbf2020-08-06 12:42:58 +0300265 dev->bounce_buffer = memalign(dev->info.sector_size,
266 dev->info.sector_size);
267 if (!dev->bounce_buffer) {
268 printf("Failed to allocate bouncing buffer\n");
269 goto error;
270 }
271
272 debug("%llu sectors of %u bytes, bounce buffer at %p\n",
273 dev->info.sectors, dev->info.sector_size,
274 dev->bounce_buffer);
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +0300275
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300276 return 0;
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +0300277
278error:
279 free(msg);
280 free(err);
281 free_blkfront(dev);
282 return -ENODEV;
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300283}
284
285static void shutdown_blkfront(struct blkfront_dev *dev)
286{
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +0300287 char *err = NULL, *err2;
288 XenbusState state;
289
290 char path[strlen(dev->backend) + strlen("/state") + 1];
291 char nodename[strlen(dev->nodename) + strlen("/event-channel") + 1];
292
293 debug("Close " DRV_NAME ", device ID %d\n", dev->devid);
294
Anastasiia Lukianenkoe855fbf2020-08-06 12:42:58 +0300295 blkfront_sync(dev);
296
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +0300297 snprintf(path, sizeof(path), "%s/state", dev->backend);
298 snprintf(nodename, sizeof(nodename), "%s/state", dev->nodename);
299
Anastasiia Lukianenkob6618c42020-08-21 12:10:04 +0300300 err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosing);
301 if (err) {
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +0300302 printf("%s: error changing state to %d: %s\n", __func__,
303 XenbusStateClosing, err);
304 goto close;
305 }
306
307 state = xenbus_read_integer(path);
308 while (!err && state < XenbusStateClosing)
309 err = xenbus_wait_for_state_change(path, &state);
310 free(err);
311
Anastasiia Lukianenkob6618c42020-08-21 12:10:04 +0300312 err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosed);
313 if (err) {
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +0300314 printf("%s: error changing state to %d: %s\n", __func__,
315 XenbusStateClosed, err);
316 goto close;
317 }
318
319 state = xenbus_read_integer(path);
320 while (state < XenbusStateClosed) {
321 err = xenbus_wait_for_state_change(path, &state);
322 free(err);
323 }
324
Anastasiia Lukianenkob6618c42020-08-21 12:10:04 +0300325 err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateInitialising);
326 if (err) {
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +0300327 printf("%s: error changing state to %d: %s\n", __func__,
328 XenbusStateInitialising, err);
329 goto close;
330 }
331
332 state = xenbus_read_integer(path);
333 while (!err &&
334 (state < XenbusStateInitWait || state >= XenbusStateClosed))
335 err = xenbus_wait_for_state_change(path, &state);
336
337close:
338 free(err);
339
340 snprintf(nodename, sizeof(nodename), "%s/ring-ref", dev->nodename);
341 err2 = xenbus_rm(XBT_NIL, nodename);
342 free(err2);
343 snprintf(nodename, sizeof(nodename), "%s/event-channel", dev->nodename);
344 err2 = xenbus_rm(XBT_NIL, nodename);
345 free(err2);
346
347 if (!err)
348 free_blkfront(dev);
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300349}
350
Anastasiia Lukianenkoe855fbf2020-08-06 12:42:58 +0300351/**
352 * blkfront_aio_poll() - AIO polling function.
353 * @dev: Blkfront device
354 *
355 * Here we receive response from the ring and check its status. This happens
356 * until we read all data from the ring. We read the data from consumed pointer
357 * to the response pointer. Then increase consumed pointer to make it clear that
358 * the data has been read.
359 *
360 * Return: Number of consumed bytes.
361 */
362static int blkfront_aio_poll(struct blkfront_dev *dev)
363{
364 RING_IDX rp, cons;
365 struct blkif_response *rsp;
366 int more;
367 int nr_consumed;
368
369moretodo:
370 rp = dev->ring.sring->rsp_prod;
371 rmb(); /* Ensure we see queued responses up to 'rp'. */
372 cons = dev->ring.rsp_cons;
373
374 nr_consumed = 0;
375 while ((cons != rp)) {
376 struct blkfront_aiocb *aiocbp;
377 int status;
378
379 rsp = RING_GET_RESPONSE(&dev->ring, cons);
380 nr_consumed++;
381
382 aiocbp = (void *)(uintptr_t)rsp->id;
383 status = rsp->status;
384
385 switch (rsp->operation) {
386 case BLKIF_OP_READ:
387 case BLKIF_OP_WRITE:
388 {
389 int j;
390
391 if (status != BLKIF_RSP_OKAY)
392 printf("%s error %d on %s at offset %llu, num bytes %llu\n",
393 rsp->operation == BLKIF_OP_READ ?
394 "read" : "write",
395 status, aiocbp->aio_dev->nodename,
396 (unsigned long long)aiocbp->aio_offset,
397 (unsigned long long)aiocbp->aio_nbytes);
398
399 for (j = 0; j < aiocbp->n; j++)
400 gnttab_end_access(aiocbp->gref[j]);
401
402 break;
403 }
404
405 case BLKIF_OP_WRITE_BARRIER:
406 if (status != BLKIF_RSP_OKAY)
407 printf("write barrier error %d\n", status);
408 break;
409 case BLKIF_OP_FLUSH_DISKCACHE:
410 if (status != BLKIF_RSP_OKAY)
411 printf("flush error %d\n", status);
412 break;
413
414 default:
415 printf("unrecognized block operation %d response (status %d)\n",
416 rsp->operation, status);
417 break;
418 }
419
420 dev->ring.rsp_cons = ++cons;
421 /* Nota: callback frees aiocbp itself */
422 if (aiocbp && aiocbp->aio_cb)
423 aiocbp->aio_cb(aiocbp, status ? -EIO : 0);
424 if (dev->ring.rsp_cons != cons)
425 /* We reentered, we must not continue here */
426 break;
427 }
428
429 RING_FINAL_CHECK_FOR_RESPONSES(&dev->ring, more);
430 if (more)
431 goto moretodo;
432
433 return nr_consumed;
434}
435
436static void blkfront_wait_slot(struct blkfront_dev *dev)
437{
438 /* Wait for a slot */
439 if (RING_FULL(&dev->ring)) {
440 while (true) {
441 blkfront_aio_poll(dev);
442 if (!RING_FULL(&dev->ring))
443 break;
444 wait_event_timeout(NULL, !RING_FULL(&dev->ring),
445 WAIT_RING_TO_MS);
446 }
447 }
448}
449
450/**
451 * blkfront_aio_poll() - Issue an aio.
452 * @aiocbp: AIO control block structure
453 * @write: Describes is it read or write operation
454 * 0 - read
455 * 1 - write
456 *
457 * We check whether the AIO parameters meet the requirements of the device.
458 * Then receive request from ring and define its arguments. After this we
459 * grant access to the grant references. The last step is notifying about AIO
460 * via event channel.
461 */
462static void blkfront_aio(struct blkfront_aiocb *aiocbp, int write)
463{
464 struct blkfront_dev *dev = aiocbp->aio_dev;
465 struct blkif_request *req;
466 RING_IDX i;
467 int notify;
468 int n, j;
469 uintptr_t start, end;
470
471 /* Can't io at non-sector-aligned location */
472 BUG_ON(aiocbp->aio_offset & (dev->info.sector_size - 1));
473 /* Can't io non-sector-sized amounts */
474 BUG_ON(aiocbp->aio_nbytes & (dev->info.sector_size - 1));
475 /* Can't io non-sector-aligned buffer */
476 BUG_ON(((uintptr_t)aiocbp->aio_buf & (dev->info.sector_size - 1)));
477
478 start = (uintptr_t)aiocbp->aio_buf & PAGE_MASK;
479 end = ((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes +
480 PAGE_SIZE - 1) & PAGE_MASK;
481 n = (end - start) / PAGE_SIZE;
482 aiocbp->n = n;
483
484 BUG_ON(n > BLKIF_MAX_SEGMENTS_PER_REQUEST);
485
486 blkfront_wait_slot(dev);
487 i = dev->ring.req_prod_pvt;
488 req = RING_GET_REQUEST(&dev->ring, i);
489
490 req->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ;
491 req->nr_segments = n;
492 req->handle = dev->handle;
493 req->id = (uintptr_t)aiocbp;
494 req->sector_number = aiocbp->aio_offset / dev->info.sector_size;
495
496 for (j = 0; j < n; j++) {
497 req->seg[j].first_sect = 0;
498 req->seg[j].last_sect = PAGE_SIZE / dev->info.sector_size - 1;
499 }
500 req->seg[0].first_sect = ((uintptr_t)aiocbp->aio_buf & ~PAGE_MASK) /
501 dev->info.sector_size;
502 req->seg[n - 1].last_sect = (((uintptr_t)aiocbp->aio_buf +
503 aiocbp->aio_nbytes - 1) & ~PAGE_MASK) / dev->info.sector_size;
504 for (j = 0; j < n; j++) {
505 uintptr_t data = start + j * PAGE_SIZE;
506
507 if (!write) {
508 /* Trigger CoW if needed */
509 *(char *)(data + (req->seg[j].first_sect *
510 dev->info.sector_size)) = 0;
511 barrier();
512 }
513 req->seg[j].gref = gnttab_grant_access(dev->dom,
514 virt_to_pfn((void *)data),
515 write);
516 aiocbp->gref[j] = req->seg[j].gref;
517 }
518
519 dev->ring.req_prod_pvt = i + 1;
520
521 wmb();
522 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
523
524 if (notify)
525 notify_remote_via_evtchn(dev->evtchn);
526}
527
528static void blkfront_aio_cb(struct blkfront_aiocb *aiocbp, int ret)
529{
530 aiocbp->data = (void *)1;
531 aiocbp->aio_cb = NULL;
532}
533
534static void blkfront_io(struct blkfront_aiocb *aiocbp, int write)
535{
536 aiocbp->aio_cb = blkfront_aio_cb;
537 blkfront_aio(aiocbp, write);
538 aiocbp->data = NULL;
539
540 while (true) {
541 blkfront_aio_poll(aiocbp->aio_dev);
542 if (aiocbp->data)
543 break;
544 cpu_relax();
545 }
546}
547
548static void blkfront_push_operation(struct blkfront_dev *dev, u8 op,
549 uint64_t id)
550{
551 struct blkif_request *req;
552 int notify, i;
553
554 blkfront_wait_slot(dev);
555 i = dev->ring.req_prod_pvt;
556 req = RING_GET_REQUEST(&dev->ring, i);
557 req->operation = op;
558 req->nr_segments = 0;
559 req->handle = dev->handle;
560 req->id = id;
561 req->sector_number = 0;
562 dev->ring.req_prod_pvt = i + 1;
563 wmb();
564 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
565 if (notify)
566 notify_remote_via_evtchn(dev->evtchn);
567}
568
569static void blkfront_sync(struct blkfront_dev *dev)
570{
571 if (dev->info.mode == O_RDWR) {
572 if (dev->info.barrier == 1)
573 blkfront_push_operation(dev,
574 BLKIF_OP_WRITE_BARRIER, 0);
575
576 if (dev->info.flush == 1)
577 blkfront_push_operation(dev,
578 BLKIF_OP_FLUSH_DISKCACHE, 0);
579 }
580
581 while (true) {
582 blkfront_aio_poll(dev);
583 if (RING_FREE_REQUESTS(&dev->ring) == RING_SIZE(&dev->ring))
584 break;
585 cpu_relax();
586 }
587}
588
589/**
590 * pvblock_iop() - Issue an aio.
591 * @udev: Pvblock device
592 * @blknr: Block number to read from / write to
593 * @blkcnt: Amount of blocks to read / write
594 * @buffer: Memory buffer with data to be read / write
595 * @write: Describes is it read or write operation
596 * 0 - read
597 * 1 - write
598 *
599 * Depending on the operation - reading or writing, data is read / written from the
600 * specified address (@buffer) to the sector (@blknr).
601 */
602static ulong pvblock_iop(struct udevice *udev, lbaint_t blknr,
603 lbaint_t blkcnt, void *buffer, int write)
604{
605 struct blkfront_dev *blk_dev = dev_get_priv(udev);
Simon Glass71fa5b42020-12-03 16:55:18 -0700606 struct blk_desc *desc = dev_get_uclass_plat(udev);
Anastasiia Lukianenkoe855fbf2020-08-06 12:42:58 +0300607 struct blkfront_aiocb aiocb;
608 lbaint_t blocks_todo;
609 bool unaligned;
610
611 if (blkcnt == 0)
612 return 0;
613
614 if ((blknr + blkcnt) > desc->lba) {
615 printf(DRV_NAME ": block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
616 blknr + blkcnt, desc->lba);
617 return 0;
618 }
619
620 unaligned = (uintptr_t)buffer & (blk_dev->info.sector_size - 1);
621
622 aiocb.aio_dev = blk_dev;
623 aiocb.aio_offset = blknr * desc->blksz;
624 aiocb.aio_cb = NULL;
625 aiocb.data = NULL;
626 blocks_todo = blkcnt;
627 do {
628 aiocb.aio_buf = unaligned ? blk_dev->bounce_buffer : buffer;
629
630 if (write && unaligned)
631 memcpy(blk_dev->bounce_buffer, buffer, desc->blksz);
632
633 aiocb.aio_nbytes = unaligned ? desc->blksz :
AKASHI Takahiro782ad642023-11-15 10:53:45 +0900634 min((size_t)((BLKIF_MAX_SEGMENTS_PER_REQUEST - 1)
635 * PAGE_SIZE),
Anastasiia Lukianenkoe855fbf2020-08-06 12:42:58 +0300636 (size_t)(blocks_todo * desc->blksz));
637
638 blkfront_io(&aiocb, write);
639
640 if (!write && unaligned)
641 memcpy(buffer, blk_dev->bounce_buffer, desc->blksz);
642
643 aiocb.aio_offset += aiocb.aio_nbytes;
644 buffer += aiocb.aio_nbytes;
645 blocks_todo -= aiocb.aio_nbytes / desc->blksz;
646 } while (blocks_todo > 0);
647
648 return blkcnt;
649}
650
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300651ulong pvblock_blk_read(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt,
652 void *buffer)
653{
Anastasiia Lukianenkoe855fbf2020-08-06 12:42:58 +0300654 return pvblock_iop(udev, blknr, blkcnt, buffer, 0);
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300655}
656
657ulong pvblock_blk_write(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt,
658 const void *buffer)
659{
Anastasiia Lukianenkoe855fbf2020-08-06 12:42:58 +0300660 return pvblock_iop(udev, blknr, blkcnt, (void *)buffer, 1);
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300661}
662
663static int pvblock_blk_bind(struct udevice *udev)
664{
Simon Glass71fa5b42020-12-03 16:55:18 -0700665 struct blk_desc *desc = dev_get_uclass_plat(udev);
Anastasiia Lukianenko79d9f2a2020-08-06 12:42:56 +0300666 int devnum;
667
Simon Glassfada3f92022-09-17 09:00:09 -0600668 desc->uclass_id = UCLASS_PVBLOCK;
Anastasiia Lukianenko79d9f2a2020-08-06 12:42:56 +0300669 /*
670 * Initialize the devnum to -ENODEV. This is to make sure that
671 * blk_next_free_devnum() works as expected, since the default
672 * value 0 is a valid devnum.
673 */
674 desc->devnum = -ENODEV;
Simon Glassdbfa32c2022-08-11 19:34:59 -0600675 devnum = blk_next_free_devnum(UCLASS_PVBLOCK);
Anastasiia Lukianenko79d9f2a2020-08-06 12:42:56 +0300676 if (devnum < 0)
677 return devnum;
678 desc->devnum = devnum;
679 desc->part_type = PART_TYPE_UNKNOWN;
680 desc->bdev = udev;
681
682 strncpy(desc->vendor, "Xen", sizeof(desc->vendor));
683 strncpy(desc->revision, "1", sizeof(desc->revision));
684 strncpy(desc->product, "Virtual disk", sizeof(desc->product));
685
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300686 return 0;
687}
688
689static int pvblock_blk_probe(struct udevice *udev)
690{
691 struct blkfront_dev *blk_dev = dev_get_priv(udev);
Simon Glassb75b15b2020-12-03 16:55:23 -0700692 struct blkfront_plat *plat = dev_get_plat(udev);
Simon Glass71fa5b42020-12-03 16:55:18 -0700693 struct blk_desc *desc = dev_get_uclass_plat(udev);
Anastasiia Lukianenko79d9f2a2020-08-06 12:42:56 +0300694 int ret, devid;
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300695
Simon Glass71fa5b42020-12-03 16:55:18 -0700696 devid = plat->devid;
697 free(plat);
Anastasiia Lukianenko79d9f2a2020-08-06 12:42:56 +0300698
699 ret = init_blkfront(devid, blk_dev);
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300700 if (ret < 0)
701 return ret;
Anastasiia Lukianenko7ad05de2020-08-06 12:42:57 +0300702
703 desc->blksz = blk_dev->info.sector_size;
704 desc->lba = blk_dev->info.sectors;
705 desc->log2blksz = LOG2(blk_dev->info.sector_size);
706
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300707 return 0;
708}
709
710static int pvblock_blk_remove(struct udevice *udev)
711{
712 struct blkfront_dev *blk_dev = dev_get_priv(udev);
713
714 shutdown_blkfront(blk_dev);
715 return 0;
716}
717
718static const struct blk_ops pvblock_blk_ops = {
719 .read = pvblock_blk_read,
720 .write = pvblock_blk_write,
721};
722
723U_BOOT_DRIVER(pvblock_blk) = {
724 .name = DRV_NAME_BLK,
725 .id = UCLASS_BLK,
726 .ops = &pvblock_blk_ops,
727 .bind = pvblock_blk_bind,
728 .probe = pvblock_blk_probe,
729 .remove = pvblock_blk_remove,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700730 .priv_auto = sizeof(struct blkfront_dev),
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300731 .flags = DM_FLAG_OS_PREPARE,
732};
733
734/*******************************************************************************
735 * Para-virtual block device class
736 *******************************************************************************/
737
Anastasiia Lukianenko79d9f2a2020-08-06 12:42:56 +0300738typedef int (*enum_vbd_callback)(struct udevice *parent, unsigned int devid);
739
740static int on_new_vbd(struct udevice *parent, unsigned int devid)
741{
742 struct driver_info info;
743 struct udevice *udev;
Simon Glassb75b15b2020-12-03 16:55:23 -0700744 struct blkfront_plat *plat;
Anastasiia Lukianenko79d9f2a2020-08-06 12:42:56 +0300745 int ret;
746
747 debug("New " DRV_NAME_BLK ", device ID %d\n", devid);
748
Simon Glassb75b15b2020-12-03 16:55:23 -0700749 plat = malloc(sizeof(struct blkfront_plat));
Simon Glass71fa5b42020-12-03 16:55:18 -0700750 if (!plat) {
Anastasiia Lukianenko79d9f2a2020-08-06 12:42:56 +0300751 printf("Failed to allocate platform data\n");
752 return -ENOMEM;
753 }
754
Simon Glass71fa5b42020-12-03 16:55:18 -0700755 plat->devid = devid;
Anastasiia Lukianenko79d9f2a2020-08-06 12:42:56 +0300756
757 info.name = DRV_NAME_BLK;
Simon Glass71fa5b42020-12-03 16:55:18 -0700758 info.plat = plat;
Anastasiia Lukianenko79d9f2a2020-08-06 12:42:56 +0300759
760 ret = device_bind_by_name(parent, false, &info, &udev);
761 if (ret < 0) {
762 printf("Failed to bind " DRV_NAME_BLK " to device with ID %d, ret: %d\n",
763 devid, ret);
Simon Glass71fa5b42020-12-03 16:55:18 -0700764 free(plat);
Anastasiia Lukianenko79d9f2a2020-08-06 12:42:56 +0300765 }
766 return ret;
767}
768
769static int xenbus_enumerate_vbd(struct udevice *udev, enum_vbd_callback clb)
770{
771 char **dirs, *msg;
772 int i, ret;
773
774 msg = xenbus_ls(XBT_NIL, "device/vbd", &dirs);
775 if (msg) {
776 printf("Failed to read device/vbd directory: %s\n", msg);
777 free(msg);
778 return -ENODEV;
779 }
780
781 for (i = 0; dirs[i]; i++) {
782 int devid;
783
784 sscanf(dirs[i], "%d", &devid);
785 ret = clb(udev, devid);
786 if (ret < 0)
787 goto fail;
788
789 free(dirs[i]);
790 }
791 ret = 0;
792
793fail:
794 for (; dirs[i]; i++)
795 free(dirs[i]);
796 free(dirs);
797 return ret;
798}
799
Anastasiia Lukianenkoddf6e6a2020-08-06 12:42:59 +0300800static void print_pvblock_devices(void)
801{
802 struct udevice *udev;
803 bool first = true;
804 const char *class_name;
805
806 class_name = uclass_get_name(UCLASS_PVBLOCK);
Simon Glassdbfa32c2022-08-11 19:34:59 -0600807 for (blk_first_device(UCLASS_PVBLOCK, &udev); udev;
Anastasiia Lukianenkoddf6e6a2020-08-06 12:42:59 +0300808 blk_next_device(&udev), first = false) {
Simon Glass71fa5b42020-12-03 16:55:18 -0700809 struct blk_desc *desc = dev_get_uclass_plat(udev);
Anastasiia Lukianenkoddf6e6a2020-08-06 12:42:59 +0300810
811 if (!first)
812 puts(", ");
813 printf("%s: %d", class_name, desc->devnum);
814 }
815 printf("\n");
816}
817
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300818void pvblock_init(void)
819{
820 struct driver_info info;
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300821 int ret;
822
823 /*
824 * At this point Xen drivers have already initialized,
825 * so we can instantiate the class driver and enumerate
826 * virtual block devices.
827 */
828 info.name = DRV_NAME;
Michal Suchanek94142a72022-10-22 16:33:05 +0200829 ret = device_bind_by_name(gd->dm_root, false, &info, NULL);
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300830 if (ret < 0)
831 printf("Failed to bind " DRV_NAME ", ret: %d\n", ret);
832
833 /* Bootstrap virtual block devices class driver */
Michal Suchanek94142a72022-10-22 16:33:05 +0200834 uclass_probe_all(UCLASS_PVBLOCK);
Anastasiia Lukianenkoddf6e6a2020-08-06 12:42:59 +0300835
836 print_pvblock_devices();
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300837}
838
839static int pvblock_probe(struct udevice *udev)
840{
Anastasiia Lukianenko79d9f2a2020-08-06 12:42:56 +0300841 struct uclass *uc;
842 int ret;
843
844 if (xenbus_enumerate_vbd(udev, on_new_vbd) < 0)
845 return -ENODEV;
846
847 ret = uclass_get(UCLASS_BLK, &uc);
848 if (ret)
849 return ret;
Michal Suchanek53beee92022-10-12 21:58:05 +0200850 uclass_foreach_dev_probe(UCLASS_BLK, udev);
Anastasiia Lukianenko4fec7f82020-08-06 12:42:55 +0300851 return 0;
852}
853
854U_BOOT_DRIVER(pvblock_drv) = {
855 .name = DRV_NAME,
856 .id = UCLASS_PVBLOCK,
857 .probe = pvblock_probe,
858};
859
860UCLASS_DRIVER(pvblock) = {
861 .name = DRV_NAME,
862 .id = UCLASS_PVBLOCK,
863};