Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 3 | * (C) 2007-2008 Samuel Thibault. |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 4 | * (C) Copyright 2020 EPAM Systems Inc. |
| 5 | */ |
Patrick Delaunay | 8131335 | 2021-04-27 11:02:19 +0200 | [diff] [blame] | 6 | |
| 7 | #define LOG_CATEGORY UCLASS_PVBLOCK |
| 8 | |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 9 | #include <blk.h> |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 10 | #include <dm.h> |
| 11 | #include <dm/device-internal.h> |
Anastasiia Lukianenko | 79d9f2a | 2020-08-06 12:42:56 +0300 | [diff] [blame] | 12 | #include <malloc.h> |
| 13 | #include <part.h> |
| 14 | |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 15 | #include <asm/armv8/mmu.h> |
Simon Glass | 3ba929a | 2020-10-30 21:38:53 -0600 | [diff] [blame] | 16 | #include <asm/global_data.h> |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 17 | #include <asm/io.h> |
| 18 | #include <asm/xen/system.h> |
| 19 | |
Anastasiia Lukianenko | e855fbf | 2020-08-06 12:42:58 +0300 | [diff] [blame] | 20 | #include <linux/bug.h> |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 21 | #include <linux/compat.h> |
| 22 | |
| 23 | #include <xen/events.h> |
| 24 | #include <xen/gnttab.h> |
| 25 | #include <xen/hvm.h> |
Anastasiia Lukianenko | 79d9f2a | 2020-08-06 12:42:56 +0300 | [diff] [blame] | 26 | #include <xen/xenbus.h> |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 27 | |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 28 | #include <xen/interface/io/ring.h> |
| 29 | #include <xen/interface/io/blkif.h> |
| 30 | #include <xen/interface/io/protocols.h> |
| 31 | |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 32 | #define DRV_NAME "pvblock" |
| 33 | #define DRV_NAME_BLK "pvblock_blk" |
| 34 | |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 35 | #define O_RDONLY 00 |
| 36 | #define O_RDWR 02 |
Anastasiia Lukianenko | e855fbf | 2020-08-06 12:42:58 +0300 | [diff] [blame] | 37 | #define WAIT_RING_TO_MS 10 |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 38 | |
| 39 | struct blkfront_info { |
| 40 | u64 sectors; |
| 41 | unsigned int sector_size; |
| 42 | int mode; |
| 43 | int info; |
| 44 | int barrier; |
| 45 | int flush; |
| 46 | }; |
| 47 | |
| 48 | /** |
| 49 | * struct blkfront_dev - Struct representing blkfront device |
| 50 | * @dom: Domain id |
| 51 | * @ring: Front_ring structure |
| 52 | * @ring_ref: The grant reference, allowing us to grant access |
| 53 | * to the ring to the other end/domain |
| 54 | * @evtchn: Event channel used to signal ring events |
| 55 | * @handle: Events handle |
| 56 | * @nodename: Device XenStore path in format "device/vbd/" + @devid |
| 57 | * @backend: Backend XenStore path |
| 58 | * @info: Private data |
| 59 | * @devid: Device id |
| 60 | */ |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 61 | struct blkfront_dev { |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 62 | domid_t dom; |
| 63 | |
| 64 | struct blkif_front_ring ring; |
| 65 | grant_ref_t ring_ref; |
| 66 | evtchn_port_t evtchn; |
| 67 | blkif_vdev_t handle; |
| 68 | |
| 69 | char *nodename; |
| 70 | char *backend; |
| 71 | struct blkfront_info info; |
| 72 | unsigned int devid; |
Anastasiia Lukianenko | e855fbf | 2020-08-06 12:42:58 +0300 | [diff] [blame] | 73 | u8 *bounce_buffer; |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 74 | }; |
| 75 | |
Simon Glass | b75b15b | 2020-12-03 16:55:23 -0700 | [diff] [blame] | 76 | struct blkfront_plat { |
Anastasiia Lukianenko | 79d9f2a | 2020-08-06 12:42:56 +0300 | [diff] [blame] | 77 | unsigned int devid; |
| 78 | }; |
| 79 | |
Anastasiia Lukianenko | e855fbf | 2020-08-06 12:42:58 +0300 | [diff] [blame] | 80 | /** |
Michal Simek | cc046dc | 2024-04-16 08:55:19 +0200 | [diff] [blame] | 81 | * struct blkfront_aiocb - AIO control block |
Anastasiia Lukianenko | e855fbf | 2020-08-06 12:42:58 +0300 | [diff] [blame] | 82 | * @aio_dev: Blockfront device |
| 83 | * @aio_buf: Memory buffer, which must be sector-aligned for |
| 84 | * @aio_dev sector |
| 85 | * @aio_nbytes: Size of AIO, which must be less than @aio_dev |
| 86 | * sector-sized amounts |
| 87 | * @aio_offset: Offset, which must not go beyond @aio_dev |
| 88 | * sector-aligned location |
| 89 | * @data: Data used to receiving response from ring |
| 90 | * @gref: Array of grant references |
| 91 | * @n: Number of segments |
| 92 | * @aio_cb: Represents one I/O request. |
| 93 | */ |
| 94 | struct blkfront_aiocb { |
| 95 | struct blkfront_dev *aio_dev; |
| 96 | u8 *aio_buf; |
| 97 | size_t aio_nbytes; |
| 98 | off_t aio_offset; |
| 99 | void *data; |
| 100 | |
| 101 | grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
| 102 | int n; |
| 103 | |
| 104 | void (*aio_cb)(struct blkfront_aiocb *aiocb, int ret); |
| 105 | }; |
| 106 | |
| 107 | static void blkfront_sync(struct blkfront_dev *dev); |
| 108 | |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 109 | static void free_blkfront(struct blkfront_dev *dev) |
| 110 | { |
| 111 | mask_evtchn(dev->evtchn); |
| 112 | free(dev->backend); |
| 113 | |
| 114 | gnttab_end_access(dev->ring_ref); |
| 115 | free(dev->ring.sring); |
| 116 | |
| 117 | unbind_evtchn(dev->evtchn); |
| 118 | |
Anastasiia Lukianenko | e855fbf | 2020-08-06 12:42:58 +0300 | [diff] [blame] | 119 | free(dev->bounce_buffer); |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 120 | free(dev->nodename); |
| 121 | free(dev); |
| 122 | } |
| 123 | |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 124 | static int init_blkfront(unsigned int devid, struct blkfront_dev *dev) |
| 125 | { |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 126 | xenbus_transaction_t xbt; |
| 127 | char *err = NULL; |
| 128 | char *message = NULL; |
| 129 | struct blkif_sring *s; |
| 130 | int retry = 0; |
| 131 | char *msg = NULL; |
| 132 | char *c; |
| 133 | char nodename[32]; |
| 134 | char path[ARRAY_SIZE(nodename) + strlen("/backend-id") + 1]; |
| 135 | |
| 136 | sprintf(nodename, "device/vbd/%d", devid); |
| 137 | |
| 138 | memset(dev, 0, sizeof(*dev)); |
| 139 | dev->nodename = strdup(nodename); |
| 140 | dev->devid = devid; |
| 141 | |
| 142 | snprintf(path, sizeof(path), "%s/backend-id", nodename); |
| 143 | dev->dom = xenbus_read_integer(path); |
Anastasiia Lukianenko | e855fbf | 2020-08-06 12:42:58 +0300 | [diff] [blame] | 144 | evtchn_alloc_unbound(dev->dom, NULL, dev, &dev->evtchn); |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 145 | |
| 146 | s = (struct blkif_sring *)memalign(PAGE_SIZE, PAGE_SIZE); |
| 147 | if (!s) { |
| 148 | printf("Failed to allocate shared ring\n"); |
| 149 | goto error; |
| 150 | } |
| 151 | |
| 152 | SHARED_RING_INIT(s); |
| 153 | FRONT_RING_INIT(&dev->ring, s, PAGE_SIZE); |
| 154 | |
| 155 | dev->ring_ref = gnttab_grant_access(dev->dom, virt_to_pfn(s), 0); |
| 156 | |
| 157 | again: |
| 158 | err = xenbus_transaction_start(&xbt); |
| 159 | if (err) { |
| 160 | printf("starting transaction\n"); |
| 161 | free(err); |
| 162 | } |
| 163 | |
| 164 | err = xenbus_printf(xbt, nodename, "ring-ref", "%u", dev->ring_ref); |
| 165 | if (err) { |
| 166 | message = "writing ring-ref"; |
| 167 | goto abort_transaction; |
| 168 | } |
| 169 | err = xenbus_printf(xbt, nodename, "event-channel", "%u", dev->evtchn); |
| 170 | if (err) { |
| 171 | message = "writing event-channel"; |
| 172 | goto abort_transaction; |
| 173 | } |
| 174 | err = xenbus_printf(xbt, nodename, "protocol", "%s", |
| 175 | XEN_IO_PROTO_ABI_NATIVE); |
| 176 | if (err) { |
| 177 | message = "writing protocol"; |
| 178 | goto abort_transaction; |
| 179 | } |
| 180 | |
| 181 | snprintf(path, sizeof(path), "%s/state", nodename); |
| 182 | err = xenbus_switch_state(xbt, path, XenbusStateConnected); |
| 183 | if (err) { |
| 184 | message = "switching state"; |
| 185 | goto abort_transaction; |
| 186 | } |
| 187 | |
| 188 | err = xenbus_transaction_end(xbt, 0, &retry); |
| 189 | free(err); |
| 190 | if (retry) { |
| 191 | goto again; |
| 192 | printf("completing transaction\n"); |
| 193 | } |
| 194 | |
| 195 | goto done; |
| 196 | |
| 197 | abort_transaction: |
| 198 | free(err); |
| 199 | err = xenbus_transaction_end(xbt, 1, &retry); |
| 200 | printf("Abort transaction %s\n", message); |
| 201 | goto error; |
| 202 | |
| 203 | done: |
| 204 | snprintf(path, sizeof(path), "%s/backend", nodename); |
| 205 | msg = xenbus_read(XBT_NIL, path, &dev->backend); |
| 206 | if (msg) { |
| 207 | printf("Error %s when reading the backend path %s\n", |
| 208 | msg, path); |
| 209 | goto error; |
| 210 | } |
| 211 | |
| 212 | dev->handle = strtoul(strrchr(nodename, '/') + 1, NULL, 0); |
| 213 | |
| 214 | { |
| 215 | XenbusState state; |
| 216 | char path[strlen(dev->backend) + |
| 217 | strlen("/feature-flush-cache") + 1]; |
| 218 | |
| 219 | snprintf(path, sizeof(path), "%s/mode", dev->backend); |
| 220 | msg = xenbus_read(XBT_NIL, path, &c); |
| 221 | if (msg) { |
| 222 | printf("Error %s when reading the mode\n", msg); |
| 223 | goto error; |
| 224 | } |
| 225 | if (*c == 'w') |
| 226 | dev->info.mode = O_RDWR; |
| 227 | else |
| 228 | dev->info.mode = O_RDONLY; |
| 229 | free(c); |
| 230 | |
| 231 | snprintf(path, sizeof(path), "%s/state", dev->backend); |
| 232 | |
| 233 | msg = NULL; |
| 234 | state = xenbus_read_integer(path); |
| 235 | while (!msg && state < XenbusStateConnected) |
| 236 | msg = xenbus_wait_for_state_change(path, &state); |
| 237 | if (msg || state != XenbusStateConnected) { |
| 238 | printf("backend not available, state=%d\n", state); |
| 239 | goto error; |
| 240 | } |
| 241 | |
| 242 | snprintf(path, sizeof(path), "%s/info", dev->backend); |
| 243 | dev->info.info = xenbus_read_integer(path); |
| 244 | |
| 245 | snprintf(path, sizeof(path), "%s/sectors", dev->backend); |
| 246 | /* |
| 247 | * FIXME: read_integer returns an int, so disk size |
| 248 | * limited to 1TB for now |
| 249 | */ |
| 250 | dev->info.sectors = xenbus_read_integer(path); |
| 251 | |
| 252 | snprintf(path, sizeof(path), "%s/sector-size", dev->backend); |
| 253 | dev->info.sector_size = xenbus_read_integer(path); |
| 254 | |
| 255 | snprintf(path, sizeof(path), "%s/feature-barrier", |
| 256 | dev->backend); |
| 257 | dev->info.barrier = xenbus_read_integer(path); |
| 258 | |
| 259 | snprintf(path, sizeof(path), "%s/feature-flush-cache", |
| 260 | dev->backend); |
| 261 | dev->info.flush = xenbus_read_integer(path); |
| 262 | } |
| 263 | unmask_evtchn(dev->evtchn); |
| 264 | |
Anastasiia Lukianenko | e855fbf | 2020-08-06 12:42:58 +0300 | [diff] [blame] | 265 | dev->bounce_buffer = memalign(dev->info.sector_size, |
| 266 | dev->info.sector_size); |
| 267 | if (!dev->bounce_buffer) { |
| 268 | printf("Failed to allocate bouncing buffer\n"); |
| 269 | goto error; |
| 270 | } |
| 271 | |
| 272 | debug("%llu sectors of %u bytes, bounce buffer at %p\n", |
| 273 | dev->info.sectors, dev->info.sector_size, |
| 274 | dev->bounce_buffer); |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 275 | |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 276 | return 0; |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 277 | |
| 278 | error: |
| 279 | free(msg); |
| 280 | free(err); |
| 281 | free_blkfront(dev); |
| 282 | return -ENODEV; |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 283 | } |
| 284 | |
| 285 | static void shutdown_blkfront(struct blkfront_dev *dev) |
| 286 | { |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 287 | char *err = NULL, *err2; |
| 288 | XenbusState state; |
| 289 | |
| 290 | char path[strlen(dev->backend) + strlen("/state") + 1]; |
| 291 | char nodename[strlen(dev->nodename) + strlen("/event-channel") + 1]; |
| 292 | |
| 293 | debug("Close " DRV_NAME ", device ID %d\n", dev->devid); |
| 294 | |
Anastasiia Lukianenko | e855fbf | 2020-08-06 12:42:58 +0300 | [diff] [blame] | 295 | blkfront_sync(dev); |
| 296 | |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 297 | snprintf(path, sizeof(path), "%s/state", dev->backend); |
| 298 | snprintf(nodename, sizeof(nodename), "%s/state", dev->nodename); |
| 299 | |
Anastasiia Lukianenko | b6618c4 | 2020-08-21 12:10:04 +0300 | [diff] [blame] | 300 | err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosing); |
| 301 | if (err) { |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 302 | printf("%s: error changing state to %d: %s\n", __func__, |
| 303 | XenbusStateClosing, err); |
| 304 | goto close; |
| 305 | } |
| 306 | |
| 307 | state = xenbus_read_integer(path); |
| 308 | while (!err && state < XenbusStateClosing) |
| 309 | err = xenbus_wait_for_state_change(path, &state); |
| 310 | free(err); |
| 311 | |
Anastasiia Lukianenko | b6618c4 | 2020-08-21 12:10:04 +0300 | [diff] [blame] | 312 | err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosed); |
| 313 | if (err) { |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 314 | printf("%s: error changing state to %d: %s\n", __func__, |
| 315 | XenbusStateClosed, err); |
| 316 | goto close; |
| 317 | } |
| 318 | |
| 319 | state = xenbus_read_integer(path); |
| 320 | while (state < XenbusStateClosed) { |
| 321 | err = xenbus_wait_for_state_change(path, &state); |
| 322 | free(err); |
| 323 | } |
| 324 | |
Anastasiia Lukianenko | b6618c4 | 2020-08-21 12:10:04 +0300 | [diff] [blame] | 325 | err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateInitialising); |
| 326 | if (err) { |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 327 | printf("%s: error changing state to %d: %s\n", __func__, |
| 328 | XenbusStateInitialising, err); |
| 329 | goto close; |
| 330 | } |
| 331 | |
| 332 | state = xenbus_read_integer(path); |
| 333 | while (!err && |
| 334 | (state < XenbusStateInitWait || state >= XenbusStateClosed)) |
| 335 | err = xenbus_wait_for_state_change(path, &state); |
| 336 | |
| 337 | close: |
| 338 | free(err); |
| 339 | |
| 340 | snprintf(nodename, sizeof(nodename), "%s/ring-ref", dev->nodename); |
| 341 | err2 = xenbus_rm(XBT_NIL, nodename); |
| 342 | free(err2); |
| 343 | snprintf(nodename, sizeof(nodename), "%s/event-channel", dev->nodename); |
| 344 | err2 = xenbus_rm(XBT_NIL, nodename); |
| 345 | free(err2); |
| 346 | |
| 347 | if (!err) |
| 348 | free_blkfront(dev); |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 349 | } |
| 350 | |
Anastasiia Lukianenko | e855fbf | 2020-08-06 12:42:58 +0300 | [diff] [blame] | 351 | /** |
| 352 | * blkfront_aio_poll() - AIO polling function. |
| 353 | * @dev: Blkfront device |
| 354 | * |
| 355 | * Here we receive response from the ring and check its status. This happens |
| 356 | * until we read all data from the ring. We read the data from consumed pointer |
| 357 | * to the response pointer. Then increase consumed pointer to make it clear that |
| 358 | * the data has been read. |
| 359 | * |
| 360 | * Return: Number of consumed bytes. |
| 361 | */ |
| 362 | static int blkfront_aio_poll(struct blkfront_dev *dev) |
| 363 | { |
| 364 | RING_IDX rp, cons; |
| 365 | struct blkif_response *rsp; |
| 366 | int more; |
| 367 | int nr_consumed; |
| 368 | |
| 369 | moretodo: |
| 370 | rp = dev->ring.sring->rsp_prod; |
| 371 | rmb(); /* Ensure we see queued responses up to 'rp'. */ |
| 372 | cons = dev->ring.rsp_cons; |
| 373 | |
| 374 | nr_consumed = 0; |
| 375 | while ((cons != rp)) { |
| 376 | struct blkfront_aiocb *aiocbp; |
| 377 | int status; |
| 378 | |
| 379 | rsp = RING_GET_RESPONSE(&dev->ring, cons); |
| 380 | nr_consumed++; |
| 381 | |
| 382 | aiocbp = (void *)(uintptr_t)rsp->id; |
| 383 | status = rsp->status; |
| 384 | |
| 385 | switch (rsp->operation) { |
| 386 | case BLKIF_OP_READ: |
| 387 | case BLKIF_OP_WRITE: |
| 388 | { |
| 389 | int j; |
| 390 | |
| 391 | if (status != BLKIF_RSP_OKAY) |
| 392 | printf("%s error %d on %s at offset %llu, num bytes %llu\n", |
| 393 | rsp->operation == BLKIF_OP_READ ? |
| 394 | "read" : "write", |
| 395 | status, aiocbp->aio_dev->nodename, |
| 396 | (unsigned long long)aiocbp->aio_offset, |
| 397 | (unsigned long long)aiocbp->aio_nbytes); |
| 398 | |
| 399 | for (j = 0; j < aiocbp->n; j++) |
| 400 | gnttab_end_access(aiocbp->gref[j]); |
| 401 | |
| 402 | break; |
| 403 | } |
| 404 | |
| 405 | case BLKIF_OP_WRITE_BARRIER: |
| 406 | if (status != BLKIF_RSP_OKAY) |
| 407 | printf("write barrier error %d\n", status); |
| 408 | break; |
| 409 | case BLKIF_OP_FLUSH_DISKCACHE: |
| 410 | if (status != BLKIF_RSP_OKAY) |
| 411 | printf("flush error %d\n", status); |
| 412 | break; |
| 413 | |
| 414 | default: |
| 415 | printf("unrecognized block operation %d response (status %d)\n", |
| 416 | rsp->operation, status); |
| 417 | break; |
| 418 | } |
| 419 | |
| 420 | dev->ring.rsp_cons = ++cons; |
| 421 | /* Nota: callback frees aiocbp itself */ |
| 422 | if (aiocbp && aiocbp->aio_cb) |
| 423 | aiocbp->aio_cb(aiocbp, status ? -EIO : 0); |
| 424 | if (dev->ring.rsp_cons != cons) |
| 425 | /* We reentered, we must not continue here */ |
| 426 | break; |
| 427 | } |
| 428 | |
| 429 | RING_FINAL_CHECK_FOR_RESPONSES(&dev->ring, more); |
| 430 | if (more) |
| 431 | goto moretodo; |
| 432 | |
| 433 | return nr_consumed; |
| 434 | } |
| 435 | |
| 436 | static void blkfront_wait_slot(struct blkfront_dev *dev) |
| 437 | { |
| 438 | /* Wait for a slot */ |
| 439 | if (RING_FULL(&dev->ring)) { |
| 440 | while (true) { |
| 441 | blkfront_aio_poll(dev); |
| 442 | if (!RING_FULL(&dev->ring)) |
| 443 | break; |
| 444 | wait_event_timeout(NULL, !RING_FULL(&dev->ring), |
| 445 | WAIT_RING_TO_MS); |
| 446 | } |
| 447 | } |
| 448 | } |
| 449 | |
| 450 | /** |
| 451 | * blkfront_aio_poll() - Issue an aio. |
| 452 | * @aiocbp: AIO control block structure |
| 453 | * @write: Describes is it read or write operation |
| 454 | * 0 - read |
| 455 | * 1 - write |
| 456 | * |
| 457 | * We check whether the AIO parameters meet the requirements of the device. |
| 458 | * Then receive request from ring and define its arguments. After this we |
| 459 | * grant access to the grant references. The last step is notifying about AIO |
| 460 | * via event channel. |
| 461 | */ |
| 462 | static void blkfront_aio(struct blkfront_aiocb *aiocbp, int write) |
| 463 | { |
| 464 | struct blkfront_dev *dev = aiocbp->aio_dev; |
| 465 | struct blkif_request *req; |
| 466 | RING_IDX i; |
| 467 | int notify; |
| 468 | int n, j; |
| 469 | uintptr_t start, end; |
| 470 | |
| 471 | /* Can't io at non-sector-aligned location */ |
| 472 | BUG_ON(aiocbp->aio_offset & (dev->info.sector_size - 1)); |
| 473 | /* Can't io non-sector-sized amounts */ |
| 474 | BUG_ON(aiocbp->aio_nbytes & (dev->info.sector_size - 1)); |
| 475 | /* Can't io non-sector-aligned buffer */ |
| 476 | BUG_ON(((uintptr_t)aiocbp->aio_buf & (dev->info.sector_size - 1))); |
| 477 | |
| 478 | start = (uintptr_t)aiocbp->aio_buf & PAGE_MASK; |
| 479 | end = ((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes + |
| 480 | PAGE_SIZE - 1) & PAGE_MASK; |
| 481 | n = (end - start) / PAGE_SIZE; |
| 482 | aiocbp->n = n; |
| 483 | |
| 484 | BUG_ON(n > BLKIF_MAX_SEGMENTS_PER_REQUEST); |
| 485 | |
| 486 | blkfront_wait_slot(dev); |
| 487 | i = dev->ring.req_prod_pvt; |
| 488 | req = RING_GET_REQUEST(&dev->ring, i); |
| 489 | |
| 490 | req->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ; |
| 491 | req->nr_segments = n; |
| 492 | req->handle = dev->handle; |
| 493 | req->id = (uintptr_t)aiocbp; |
| 494 | req->sector_number = aiocbp->aio_offset / dev->info.sector_size; |
| 495 | |
| 496 | for (j = 0; j < n; j++) { |
| 497 | req->seg[j].first_sect = 0; |
| 498 | req->seg[j].last_sect = PAGE_SIZE / dev->info.sector_size - 1; |
| 499 | } |
| 500 | req->seg[0].first_sect = ((uintptr_t)aiocbp->aio_buf & ~PAGE_MASK) / |
| 501 | dev->info.sector_size; |
| 502 | req->seg[n - 1].last_sect = (((uintptr_t)aiocbp->aio_buf + |
| 503 | aiocbp->aio_nbytes - 1) & ~PAGE_MASK) / dev->info.sector_size; |
| 504 | for (j = 0; j < n; j++) { |
| 505 | uintptr_t data = start + j * PAGE_SIZE; |
| 506 | |
| 507 | if (!write) { |
| 508 | /* Trigger CoW if needed */ |
| 509 | *(char *)(data + (req->seg[j].first_sect * |
| 510 | dev->info.sector_size)) = 0; |
| 511 | barrier(); |
| 512 | } |
| 513 | req->seg[j].gref = gnttab_grant_access(dev->dom, |
| 514 | virt_to_pfn((void *)data), |
| 515 | write); |
| 516 | aiocbp->gref[j] = req->seg[j].gref; |
| 517 | } |
| 518 | |
| 519 | dev->ring.req_prod_pvt = i + 1; |
| 520 | |
| 521 | wmb(); |
| 522 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify); |
| 523 | |
| 524 | if (notify) |
| 525 | notify_remote_via_evtchn(dev->evtchn); |
| 526 | } |
| 527 | |
| 528 | static void blkfront_aio_cb(struct blkfront_aiocb *aiocbp, int ret) |
| 529 | { |
| 530 | aiocbp->data = (void *)1; |
| 531 | aiocbp->aio_cb = NULL; |
| 532 | } |
| 533 | |
| 534 | static void blkfront_io(struct blkfront_aiocb *aiocbp, int write) |
| 535 | { |
| 536 | aiocbp->aio_cb = blkfront_aio_cb; |
| 537 | blkfront_aio(aiocbp, write); |
| 538 | aiocbp->data = NULL; |
| 539 | |
| 540 | while (true) { |
| 541 | blkfront_aio_poll(aiocbp->aio_dev); |
| 542 | if (aiocbp->data) |
| 543 | break; |
| 544 | cpu_relax(); |
| 545 | } |
| 546 | } |
| 547 | |
| 548 | static void blkfront_push_operation(struct blkfront_dev *dev, u8 op, |
| 549 | uint64_t id) |
| 550 | { |
| 551 | struct blkif_request *req; |
| 552 | int notify, i; |
| 553 | |
| 554 | blkfront_wait_slot(dev); |
| 555 | i = dev->ring.req_prod_pvt; |
| 556 | req = RING_GET_REQUEST(&dev->ring, i); |
| 557 | req->operation = op; |
| 558 | req->nr_segments = 0; |
| 559 | req->handle = dev->handle; |
| 560 | req->id = id; |
| 561 | req->sector_number = 0; |
| 562 | dev->ring.req_prod_pvt = i + 1; |
| 563 | wmb(); |
| 564 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify); |
| 565 | if (notify) |
| 566 | notify_remote_via_evtchn(dev->evtchn); |
| 567 | } |
| 568 | |
| 569 | static void blkfront_sync(struct blkfront_dev *dev) |
| 570 | { |
| 571 | if (dev->info.mode == O_RDWR) { |
| 572 | if (dev->info.barrier == 1) |
| 573 | blkfront_push_operation(dev, |
| 574 | BLKIF_OP_WRITE_BARRIER, 0); |
| 575 | |
| 576 | if (dev->info.flush == 1) |
| 577 | blkfront_push_operation(dev, |
| 578 | BLKIF_OP_FLUSH_DISKCACHE, 0); |
| 579 | } |
| 580 | |
| 581 | while (true) { |
| 582 | blkfront_aio_poll(dev); |
| 583 | if (RING_FREE_REQUESTS(&dev->ring) == RING_SIZE(&dev->ring)) |
| 584 | break; |
| 585 | cpu_relax(); |
| 586 | } |
| 587 | } |
| 588 | |
| 589 | /** |
| 590 | * pvblock_iop() - Issue an aio. |
| 591 | * @udev: Pvblock device |
| 592 | * @blknr: Block number to read from / write to |
| 593 | * @blkcnt: Amount of blocks to read / write |
| 594 | * @buffer: Memory buffer with data to be read / write |
| 595 | * @write: Describes is it read or write operation |
| 596 | * 0 - read |
| 597 | * 1 - write |
| 598 | * |
| 599 | * Depending on the operation - reading or writing, data is read / written from the |
| 600 | * specified address (@buffer) to the sector (@blknr). |
| 601 | */ |
| 602 | static ulong pvblock_iop(struct udevice *udev, lbaint_t blknr, |
| 603 | lbaint_t blkcnt, void *buffer, int write) |
| 604 | { |
| 605 | struct blkfront_dev *blk_dev = dev_get_priv(udev); |
Simon Glass | 71fa5b4 | 2020-12-03 16:55:18 -0700 | [diff] [blame] | 606 | struct blk_desc *desc = dev_get_uclass_plat(udev); |
Anastasiia Lukianenko | e855fbf | 2020-08-06 12:42:58 +0300 | [diff] [blame] | 607 | struct blkfront_aiocb aiocb; |
| 608 | lbaint_t blocks_todo; |
| 609 | bool unaligned; |
| 610 | |
| 611 | if (blkcnt == 0) |
| 612 | return 0; |
| 613 | |
| 614 | if ((blknr + blkcnt) > desc->lba) { |
| 615 | printf(DRV_NAME ": block number 0x" LBAF " exceeds max(0x" LBAF ")\n", |
| 616 | blknr + blkcnt, desc->lba); |
| 617 | return 0; |
| 618 | } |
| 619 | |
| 620 | unaligned = (uintptr_t)buffer & (blk_dev->info.sector_size - 1); |
| 621 | |
| 622 | aiocb.aio_dev = blk_dev; |
| 623 | aiocb.aio_offset = blknr * desc->blksz; |
| 624 | aiocb.aio_cb = NULL; |
| 625 | aiocb.data = NULL; |
| 626 | blocks_todo = blkcnt; |
| 627 | do { |
| 628 | aiocb.aio_buf = unaligned ? blk_dev->bounce_buffer : buffer; |
| 629 | |
| 630 | if (write && unaligned) |
| 631 | memcpy(blk_dev->bounce_buffer, buffer, desc->blksz); |
| 632 | |
| 633 | aiocb.aio_nbytes = unaligned ? desc->blksz : |
AKASHI Takahiro | 782ad64 | 2023-11-15 10:53:45 +0900 | [diff] [blame] | 634 | min((size_t)((BLKIF_MAX_SEGMENTS_PER_REQUEST - 1) |
| 635 | * PAGE_SIZE), |
Anastasiia Lukianenko | e855fbf | 2020-08-06 12:42:58 +0300 | [diff] [blame] | 636 | (size_t)(blocks_todo * desc->blksz)); |
| 637 | |
| 638 | blkfront_io(&aiocb, write); |
| 639 | |
| 640 | if (!write && unaligned) |
| 641 | memcpy(buffer, blk_dev->bounce_buffer, desc->blksz); |
| 642 | |
| 643 | aiocb.aio_offset += aiocb.aio_nbytes; |
| 644 | buffer += aiocb.aio_nbytes; |
| 645 | blocks_todo -= aiocb.aio_nbytes / desc->blksz; |
| 646 | } while (blocks_todo > 0); |
| 647 | |
| 648 | return blkcnt; |
| 649 | } |
| 650 | |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 651 | ulong pvblock_blk_read(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt, |
| 652 | void *buffer) |
| 653 | { |
Anastasiia Lukianenko | e855fbf | 2020-08-06 12:42:58 +0300 | [diff] [blame] | 654 | return pvblock_iop(udev, blknr, blkcnt, buffer, 0); |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 655 | } |
| 656 | |
| 657 | ulong pvblock_blk_write(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt, |
| 658 | const void *buffer) |
| 659 | { |
Anastasiia Lukianenko | e855fbf | 2020-08-06 12:42:58 +0300 | [diff] [blame] | 660 | return pvblock_iop(udev, blknr, blkcnt, (void *)buffer, 1); |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 661 | } |
| 662 | |
| 663 | static int pvblock_blk_bind(struct udevice *udev) |
| 664 | { |
Simon Glass | 71fa5b4 | 2020-12-03 16:55:18 -0700 | [diff] [blame] | 665 | struct blk_desc *desc = dev_get_uclass_plat(udev); |
Anastasiia Lukianenko | 79d9f2a | 2020-08-06 12:42:56 +0300 | [diff] [blame] | 666 | int devnum; |
| 667 | |
Simon Glass | fada3f9 | 2022-09-17 09:00:09 -0600 | [diff] [blame] | 668 | desc->uclass_id = UCLASS_PVBLOCK; |
Anastasiia Lukianenko | 79d9f2a | 2020-08-06 12:42:56 +0300 | [diff] [blame] | 669 | /* |
| 670 | * Initialize the devnum to -ENODEV. This is to make sure that |
| 671 | * blk_next_free_devnum() works as expected, since the default |
| 672 | * value 0 is a valid devnum. |
| 673 | */ |
| 674 | desc->devnum = -ENODEV; |
Simon Glass | dbfa32c | 2022-08-11 19:34:59 -0600 | [diff] [blame] | 675 | devnum = blk_next_free_devnum(UCLASS_PVBLOCK); |
Anastasiia Lukianenko | 79d9f2a | 2020-08-06 12:42:56 +0300 | [diff] [blame] | 676 | if (devnum < 0) |
| 677 | return devnum; |
| 678 | desc->devnum = devnum; |
| 679 | desc->part_type = PART_TYPE_UNKNOWN; |
| 680 | desc->bdev = udev; |
| 681 | |
| 682 | strncpy(desc->vendor, "Xen", sizeof(desc->vendor)); |
| 683 | strncpy(desc->revision, "1", sizeof(desc->revision)); |
| 684 | strncpy(desc->product, "Virtual disk", sizeof(desc->product)); |
| 685 | |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 686 | return 0; |
| 687 | } |
| 688 | |
| 689 | static int pvblock_blk_probe(struct udevice *udev) |
| 690 | { |
| 691 | struct blkfront_dev *blk_dev = dev_get_priv(udev); |
Simon Glass | b75b15b | 2020-12-03 16:55:23 -0700 | [diff] [blame] | 692 | struct blkfront_plat *plat = dev_get_plat(udev); |
Simon Glass | 71fa5b4 | 2020-12-03 16:55:18 -0700 | [diff] [blame] | 693 | struct blk_desc *desc = dev_get_uclass_plat(udev); |
Anastasiia Lukianenko | 79d9f2a | 2020-08-06 12:42:56 +0300 | [diff] [blame] | 694 | int ret, devid; |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 695 | |
Simon Glass | 71fa5b4 | 2020-12-03 16:55:18 -0700 | [diff] [blame] | 696 | devid = plat->devid; |
| 697 | free(plat); |
Anastasiia Lukianenko | 79d9f2a | 2020-08-06 12:42:56 +0300 | [diff] [blame] | 698 | |
| 699 | ret = init_blkfront(devid, blk_dev); |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 700 | if (ret < 0) |
| 701 | return ret; |
Anastasiia Lukianenko | 7ad05de | 2020-08-06 12:42:57 +0300 | [diff] [blame] | 702 | |
| 703 | desc->blksz = blk_dev->info.sector_size; |
| 704 | desc->lba = blk_dev->info.sectors; |
| 705 | desc->log2blksz = LOG2(blk_dev->info.sector_size); |
| 706 | |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 707 | return 0; |
| 708 | } |
| 709 | |
| 710 | static int pvblock_blk_remove(struct udevice *udev) |
| 711 | { |
| 712 | struct blkfront_dev *blk_dev = dev_get_priv(udev); |
| 713 | |
| 714 | shutdown_blkfront(blk_dev); |
| 715 | return 0; |
| 716 | } |
| 717 | |
| 718 | static const struct blk_ops pvblock_blk_ops = { |
| 719 | .read = pvblock_blk_read, |
| 720 | .write = pvblock_blk_write, |
| 721 | }; |
| 722 | |
| 723 | U_BOOT_DRIVER(pvblock_blk) = { |
| 724 | .name = DRV_NAME_BLK, |
| 725 | .id = UCLASS_BLK, |
| 726 | .ops = &pvblock_blk_ops, |
| 727 | .bind = pvblock_blk_bind, |
| 728 | .probe = pvblock_blk_probe, |
| 729 | .remove = pvblock_blk_remove, |
Simon Glass | 8a2b47f | 2020-12-03 16:55:17 -0700 | [diff] [blame] | 730 | .priv_auto = sizeof(struct blkfront_dev), |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 731 | .flags = DM_FLAG_OS_PREPARE, |
| 732 | }; |
| 733 | |
| 734 | /******************************************************************************* |
| 735 | * Para-virtual block device class |
| 736 | *******************************************************************************/ |
| 737 | |
Anastasiia Lukianenko | 79d9f2a | 2020-08-06 12:42:56 +0300 | [diff] [blame] | 738 | typedef int (*enum_vbd_callback)(struct udevice *parent, unsigned int devid); |
| 739 | |
| 740 | static int on_new_vbd(struct udevice *parent, unsigned int devid) |
| 741 | { |
| 742 | struct driver_info info; |
| 743 | struct udevice *udev; |
Simon Glass | b75b15b | 2020-12-03 16:55:23 -0700 | [diff] [blame] | 744 | struct blkfront_plat *plat; |
Anastasiia Lukianenko | 79d9f2a | 2020-08-06 12:42:56 +0300 | [diff] [blame] | 745 | int ret; |
| 746 | |
| 747 | debug("New " DRV_NAME_BLK ", device ID %d\n", devid); |
| 748 | |
Simon Glass | b75b15b | 2020-12-03 16:55:23 -0700 | [diff] [blame] | 749 | plat = malloc(sizeof(struct blkfront_plat)); |
Simon Glass | 71fa5b4 | 2020-12-03 16:55:18 -0700 | [diff] [blame] | 750 | if (!plat) { |
Anastasiia Lukianenko | 79d9f2a | 2020-08-06 12:42:56 +0300 | [diff] [blame] | 751 | printf("Failed to allocate platform data\n"); |
| 752 | return -ENOMEM; |
| 753 | } |
| 754 | |
Simon Glass | 71fa5b4 | 2020-12-03 16:55:18 -0700 | [diff] [blame] | 755 | plat->devid = devid; |
Anastasiia Lukianenko | 79d9f2a | 2020-08-06 12:42:56 +0300 | [diff] [blame] | 756 | |
| 757 | info.name = DRV_NAME_BLK; |
Simon Glass | 71fa5b4 | 2020-12-03 16:55:18 -0700 | [diff] [blame] | 758 | info.plat = plat; |
Anastasiia Lukianenko | 79d9f2a | 2020-08-06 12:42:56 +0300 | [diff] [blame] | 759 | |
| 760 | ret = device_bind_by_name(parent, false, &info, &udev); |
| 761 | if (ret < 0) { |
| 762 | printf("Failed to bind " DRV_NAME_BLK " to device with ID %d, ret: %d\n", |
| 763 | devid, ret); |
Simon Glass | 71fa5b4 | 2020-12-03 16:55:18 -0700 | [diff] [blame] | 764 | free(plat); |
Anastasiia Lukianenko | 79d9f2a | 2020-08-06 12:42:56 +0300 | [diff] [blame] | 765 | } |
| 766 | return ret; |
| 767 | } |
| 768 | |
| 769 | static int xenbus_enumerate_vbd(struct udevice *udev, enum_vbd_callback clb) |
| 770 | { |
| 771 | char **dirs, *msg; |
| 772 | int i, ret; |
| 773 | |
| 774 | msg = xenbus_ls(XBT_NIL, "device/vbd", &dirs); |
| 775 | if (msg) { |
| 776 | printf("Failed to read device/vbd directory: %s\n", msg); |
| 777 | free(msg); |
| 778 | return -ENODEV; |
| 779 | } |
| 780 | |
| 781 | for (i = 0; dirs[i]; i++) { |
| 782 | int devid; |
| 783 | |
| 784 | sscanf(dirs[i], "%d", &devid); |
| 785 | ret = clb(udev, devid); |
| 786 | if (ret < 0) |
| 787 | goto fail; |
| 788 | |
| 789 | free(dirs[i]); |
| 790 | } |
| 791 | ret = 0; |
| 792 | |
| 793 | fail: |
| 794 | for (; dirs[i]; i++) |
| 795 | free(dirs[i]); |
| 796 | free(dirs); |
| 797 | return ret; |
| 798 | } |
| 799 | |
Anastasiia Lukianenko | ddf6e6a | 2020-08-06 12:42:59 +0300 | [diff] [blame] | 800 | static void print_pvblock_devices(void) |
| 801 | { |
| 802 | struct udevice *udev; |
| 803 | bool first = true; |
| 804 | const char *class_name; |
| 805 | |
| 806 | class_name = uclass_get_name(UCLASS_PVBLOCK); |
Simon Glass | dbfa32c | 2022-08-11 19:34:59 -0600 | [diff] [blame] | 807 | for (blk_first_device(UCLASS_PVBLOCK, &udev); udev; |
Anastasiia Lukianenko | ddf6e6a | 2020-08-06 12:42:59 +0300 | [diff] [blame] | 808 | blk_next_device(&udev), first = false) { |
Simon Glass | 71fa5b4 | 2020-12-03 16:55:18 -0700 | [diff] [blame] | 809 | struct blk_desc *desc = dev_get_uclass_plat(udev); |
Anastasiia Lukianenko | ddf6e6a | 2020-08-06 12:42:59 +0300 | [diff] [blame] | 810 | |
| 811 | if (!first) |
| 812 | puts(", "); |
| 813 | printf("%s: %d", class_name, desc->devnum); |
| 814 | } |
| 815 | printf("\n"); |
| 816 | } |
| 817 | |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 818 | void pvblock_init(void) |
| 819 | { |
| 820 | struct driver_info info; |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 821 | int ret; |
| 822 | |
| 823 | /* |
| 824 | * At this point Xen drivers have already initialized, |
| 825 | * so we can instantiate the class driver and enumerate |
| 826 | * virtual block devices. |
| 827 | */ |
| 828 | info.name = DRV_NAME; |
Michal Suchanek | 94142a7 | 2022-10-22 16:33:05 +0200 | [diff] [blame] | 829 | ret = device_bind_by_name(gd->dm_root, false, &info, NULL); |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 830 | if (ret < 0) |
| 831 | printf("Failed to bind " DRV_NAME ", ret: %d\n", ret); |
| 832 | |
| 833 | /* Bootstrap virtual block devices class driver */ |
Michal Suchanek | 94142a7 | 2022-10-22 16:33:05 +0200 | [diff] [blame] | 834 | uclass_probe_all(UCLASS_PVBLOCK); |
Anastasiia Lukianenko | ddf6e6a | 2020-08-06 12:42:59 +0300 | [diff] [blame] | 835 | |
| 836 | print_pvblock_devices(); |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 837 | } |
| 838 | |
| 839 | static int pvblock_probe(struct udevice *udev) |
| 840 | { |
Anastasiia Lukianenko | 79d9f2a | 2020-08-06 12:42:56 +0300 | [diff] [blame] | 841 | struct uclass *uc; |
| 842 | int ret; |
| 843 | |
| 844 | if (xenbus_enumerate_vbd(udev, on_new_vbd) < 0) |
| 845 | return -ENODEV; |
| 846 | |
| 847 | ret = uclass_get(UCLASS_BLK, &uc); |
| 848 | if (ret) |
| 849 | return ret; |
Michal Suchanek | 53beee9 | 2022-10-12 21:58:05 +0200 | [diff] [blame] | 850 | uclass_foreach_dev_probe(UCLASS_BLK, udev); |
Anastasiia Lukianenko | 4fec7f8 | 2020-08-06 12:42:55 +0300 | [diff] [blame] | 851 | return 0; |
| 852 | } |
| 853 | |
| 854 | U_BOOT_DRIVER(pvblock_drv) = { |
| 855 | .name = DRV_NAME, |
| 856 | .id = UCLASS_PVBLOCK, |
| 857 | .probe = pvblock_probe, |
| 858 | }; |
| 859 | |
| 860 | UCLASS_DRIVER(pvblock) = { |
| 861 | .name = DRV_NAME, |
| 862 | .id = UCLASS_PVBLOCK, |
| 863 | }; |