blob: f71bab784773b46ba1fee10cdfecdacc484e16f9 [file] [log] [blame]
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -07001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
4 * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
5 *
6 * virtio ring implementation
7 */
8
9#include <common.h>
10#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060011#include <log.h>
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -070012#include <malloc.h>
13#include <virtio_types.h>
14#include <virtio.h>
15#include <virtio_ring.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060016#include <linux/bug.h>
Simon Glass9bc15642020-02-03 07:36:16 -070017#include <linux/compat.h>
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -070018
Andrew Scull83d15d32022-05-16 10:41:30 +000019static unsigned int virtqueue_attach_desc(struct virtqueue *vq, unsigned int i,
20 struct virtio_sg *sg, u16 flags)
21{
Andrew Scull594d2a62022-05-16 10:41:31 +000022 struct vring_desc_shadow *desc_shadow = &vq->vring_desc_shadow[i];
Andrew Scull83d15d32022-05-16 10:41:30 +000023 struct vring_desc *desc = &vq->vring.desc[i];
24
Andrew Scull594d2a62022-05-16 10:41:31 +000025 /* Update the shadow descriptor. */
26 desc_shadow->addr = (u64)(uintptr_t)sg->addr;
27 desc_shadow->len = sg->length;
28 desc_shadow->flags = flags;
Andrew Scull83d15d32022-05-16 10:41:30 +000029
Andrew Scull594d2a62022-05-16 10:41:31 +000030 /* Update the shared descriptor to match the shadow. */
31 desc->addr = cpu_to_virtio64(vq->vdev, desc_shadow->addr);
32 desc->len = cpu_to_virtio32(vq->vdev, desc_shadow->len);
33 desc->flags = cpu_to_virtio16(vq->vdev, desc_shadow->flags);
34 desc->next = cpu_to_virtio16(vq->vdev, desc_shadow->next);
35
36 return desc_shadow->next;
Andrew Scull83d15d32022-05-16 10:41:30 +000037}
38
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -070039int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[],
40 unsigned int out_sgs, unsigned int in_sgs)
41{
42 struct vring_desc *desc;
Andrew Scullb3f36442022-05-16 10:41:29 +000043 unsigned int descs_used = out_sgs + in_sgs;
44 unsigned int i, n, avail, uninitialized_var(prev);
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -070045 int head;
46
Andrew Scullb3f36442022-05-16 10:41:29 +000047 WARN_ON(descs_used == 0);
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -070048
49 head = vq->free_head;
50
51 desc = vq->vring.desc;
52 i = head;
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -070053
54 if (vq->num_free < descs_used) {
55 debug("Can't add buf len %i - avail = %i\n",
56 descs_used, vq->num_free);
57 /*
58 * FIXME: for historical reasons, we force a notify here if
59 * there are outgoing parts to the buffer. Presumably the
60 * host should service the ring ASAP.
61 */
62 if (out_sgs)
63 virtio_notify(vq->vdev, vq);
64 return -ENOSPC;
65 }
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -070066
Andrew Scull83d15d32022-05-16 10:41:30 +000067 for (n = 0; n < descs_used; n++) {
68 u16 flags = VRING_DESC_F_NEXT;
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -070069
Andrew Scull83d15d32022-05-16 10:41:30 +000070 if (n >= out_sgs)
71 flags |= VRING_DESC_F_WRITE;
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -070072 prev = i;
Andrew Scull83d15d32022-05-16 10:41:30 +000073 i = virtqueue_attach_desc(vq, i, sgs[n], flags);
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -070074 }
75 /* Last one doesn't continue */
Andrew Scull594d2a62022-05-16 10:41:31 +000076 vq->vring_desc_shadow[prev].flags &= ~VRING_DESC_F_NEXT;
77 desc[prev].flags = cpu_to_virtio16(vq->vdev, vq->vring_desc_shadow[prev].flags);
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -070078
79 /* We're using some buffers from the free list. */
80 vq->num_free -= descs_used;
81
82 /* Update free pointer */
83 vq->free_head = i;
84
Andrew Scullc282bdb2022-05-16 10:41:32 +000085 /* Mark the descriptor as the head of a chain. */
86 vq->vring_desc_shadow[head].chain_head = true;
87
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -070088 /*
89 * Put entry in available array (but don't update avail->idx
90 * until they do sync).
91 */
92 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
93 vq->vring.avail->ring[avail] = cpu_to_virtio16(vq->vdev, head);
94
95 /*
96 * Descriptors and available array need to be set before we expose the
97 * new available array entries.
98 */
99 virtio_wmb();
100 vq->avail_idx_shadow++;
101 vq->vring.avail->idx = cpu_to_virtio16(vq->vdev, vq->avail_idx_shadow);
102 vq->num_added++;
103
104 /*
105 * This is very unlikely, but theoretically possible.
106 * Kick just in case.
107 */
108 if (unlikely(vq->num_added == (1 << 16) - 1))
109 virtqueue_kick(vq);
110
111 return 0;
112}
113
114static bool virtqueue_kick_prepare(struct virtqueue *vq)
115{
116 u16 new, old;
117 bool needs_kick;
118
119 /*
120 * We need to expose available array entries before checking
121 * avail event.
122 */
123 virtio_mb();
124
125 old = vq->avail_idx_shadow - vq->num_added;
126 new = vq->avail_idx_shadow;
127 vq->num_added = 0;
128
129 if (vq->event) {
130 needs_kick = vring_need_event(virtio16_to_cpu(vq->vdev,
131 vring_avail_event(&vq->vring)), new, old);
132 } else {
133 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(vq->vdev,
134 VRING_USED_F_NO_NOTIFY));
135 }
136
137 return needs_kick;
138}
139
140void virtqueue_kick(struct virtqueue *vq)
141{
142 if (virtqueue_kick_prepare(vq))
143 virtio_notify(vq->vdev, vq);
144}
145
146static void detach_buf(struct virtqueue *vq, unsigned int head)
147{
148 unsigned int i;
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -0700149
Andrew Scullc282bdb2022-05-16 10:41:32 +0000150 /* Unmark the descriptor as the head of a chain. */
151 vq->vring_desc_shadow[head].chain_head = false;
152
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -0700153 /* Put back on free list: unmap first-level descriptors and find end */
154 i = head;
155
Andrew Scull594d2a62022-05-16 10:41:31 +0000156 while (vq->vring_desc_shadow[i].flags & VRING_DESC_F_NEXT) {
157 i = vq->vring_desc_shadow[i].next;
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -0700158 vq->num_free++;
159 }
160
Andrew Scull594d2a62022-05-16 10:41:31 +0000161 vq->vring_desc_shadow[i].next = vq->free_head;
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -0700162 vq->free_head = head;
163
164 /* Plus final descriptor */
165 vq->num_free++;
166}
167
168static inline bool more_used(const struct virtqueue *vq)
169{
170 return vq->last_used_idx != virtio16_to_cpu(vq->vdev,
171 vq->vring.used->idx);
172}
173
174void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len)
175{
176 unsigned int i;
177 u16 last_used;
178
179 if (!more_used(vq)) {
180 debug("(%s.%d): No more buffers in queue\n",
181 vq->vdev->name, vq->index);
182 return NULL;
183 }
184
185 /* Only get used array entries after they have been exposed by host */
186 virtio_rmb();
187
188 last_used = (vq->last_used_idx & (vq->vring.num - 1));
189 i = virtio32_to_cpu(vq->vdev, vq->vring.used->ring[last_used].id);
190 if (len) {
191 *len = virtio32_to_cpu(vq->vdev,
192 vq->vring.used->ring[last_used].len);
193 debug("(%s.%d): last used idx %u with len %u\n",
194 vq->vdev->name, vq->index, i, *len);
195 }
196
197 if (unlikely(i >= vq->vring.num)) {
198 printf("(%s.%d): id %u out of range\n",
199 vq->vdev->name, vq->index, i);
200 return NULL;
201 }
202
Andrew Scullc282bdb2022-05-16 10:41:32 +0000203 if (unlikely(!vq->vring_desc_shadow[i].chain_head)) {
204 printf("(%s.%d): id %u is not a head\n",
205 vq->vdev->name, vq->index, i);
206 return NULL;
207 }
208
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -0700209 detach_buf(vq, i);
210 vq->last_used_idx++;
211 /*
212 * If we expect an interrupt for the next entry, tell host
213 * by writing event index and flush out the write before
214 * the read in the next get_buf call.
215 */
216 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
217 virtio_store_mb(&vring_used_event(&vq->vring),
218 cpu_to_virtio16(vq->vdev, vq->last_used_idx));
219
Andrew Scull594d2a62022-05-16 10:41:31 +0000220 return (void *)(uintptr_t)vq->vring_desc_shadow[i].addr;
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -0700221}
222
223static struct virtqueue *__vring_new_virtqueue(unsigned int index,
224 struct vring vring,
225 struct udevice *udev)
226{
227 unsigned int i;
228 struct virtqueue *vq;
Andrew Scull594d2a62022-05-16 10:41:31 +0000229 struct vring_desc_shadow *vring_desc_shadow;
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -0700230 struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
231 struct udevice *vdev = uc_priv->vdev;
232
233 vq = malloc(sizeof(*vq));
234 if (!vq)
235 return NULL;
236
Andrew Scull594d2a62022-05-16 10:41:31 +0000237 vring_desc_shadow = calloc(vring.num, sizeof(struct vring_desc_shadow));
238 if (!vring_desc_shadow) {
239 free(vq);
240 return NULL;
241 }
242
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -0700243 vq->vdev = vdev;
244 vq->index = index;
245 vq->num_free = vring.num;
246 vq->vring = vring;
Andrew Scull594d2a62022-05-16 10:41:31 +0000247 vq->vring_desc_shadow = vring_desc_shadow;
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -0700248 vq->last_used_idx = 0;
249 vq->avail_flags_shadow = 0;
250 vq->avail_idx_shadow = 0;
251 vq->num_added = 0;
252 list_add_tail(&vq->list, &uc_priv->vqs);
253
254 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
255
256 /* Tell other side not to bother us */
257 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
258 if (!vq->event)
259 vq->vring.avail->flags = cpu_to_virtio16(vdev,
260 vq->avail_flags_shadow);
261
262 /* Put everything in free lists */
263 vq->free_head = 0;
264 for (i = 0; i < vring.num - 1; i++)
Andrew Scull594d2a62022-05-16 10:41:31 +0000265 vq->vring_desc_shadow[i].next = i + 1;
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -0700266
267 return vq;
268}
269
270struct virtqueue *vring_create_virtqueue(unsigned int index, unsigned int num,
271 unsigned int vring_align,
272 struct udevice *udev)
273{
274 struct virtqueue *vq;
275 void *queue = NULL;
276 struct vring vring;
277
278 /* We assume num is a power of 2 */
279 if (num & (num - 1)) {
280 printf("Bad virtqueue length %u\n", num);
281 return NULL;
282 }
283
284 /* TODO: allocate each queue chunk individually */
285 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
286 queue = memalign(PAGE_SIZE, vring_size(num, vring_align));
287 if (queue)
288 break;
289 }
290
291 if (!num)
292 return NULL;
293
294 if (!queue) {
295 /* Try to get a single page. You are my only hope! */
296 queue = memalign(PAGE_SIZE, vring_size(num, vring_align));
297 }
298 if (!queue)
299 return NULL;
300
301 memset(queue, 0, vring_size(num, vring_align));
302 vring_init(&vring, num, queue, vring_align);
303
304 vq = __vring_new_virtqueue(index, vring, udev);
305 if (!vq) {
306 free(queue);
307 return NULL;
308 }
309 debug("(%s): created vring @ %p for vq @ %p with num %u\n", udev->name,
310 queue, vq, num);
311
312 return vq;
313}
314
315void vring_del_virtqueue(struct virtqueue *vq)
316{
317 free(vq->vring.desc);
Andrew Scull594d2a62022-05-16 10:41:31 +0000318 free(vq->vring_desc_shadow);
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -0700319 list_del(&vq->list);
320 free(vq);
321}
322
323unsigned int virtqueue_get_vring_size(struct virtqueue *vq)
324{
325 return vq->vring.num;
326}
327
328ulong virtqueue_get_desc_addr(struct virtqueue *vq)
329{
330 return (ulong)vq->vring.desc;
331}
332
333ulong virtqueue_get_avail_addr(struct virtqueue *vq)
334{
335 return (ulong)vq->vring.desc +
336 ((char *)vq->vring.avail - (char *)vq->vring.desc);
337}
338
339ulong virtqueue_get_used_addr(struct virtqueue *vq)
340{
341 return (ulong)vq->vring.desc +
342 ((char *)vq->vring.used - (char *)vq->vring.desc);
343}
344
345bool virtqueue_poll(struct virtqueue *vq, u16 last_used_idx)
346{
347 virtio_mb();
348
349 return last_used_idx != virtio16_to_cpu(vq->vdev, vq->vring.used->idx);
350}
351
352void virtqueue_dump(struct virtqueue *vq)
353{
354 unsigned int i;
355
356 printf("virtqueue %p for dev %s:\n", vq, vq->vdev->name);
357 printf("\tindex %u, phys addr %p num %u\n",
358 vq->index, vq->vring.desc, vq->vring.num);
359 printf("\tfree_head %u, num_added %u, num_free %u\n",
360 vq->free_head, vq->num_added, vq->num_free);
361 printf("\tlast_used_idx %u, avail_flags_shadow %u, avail_idx_shadow %u\n",
362 vq->last_used_idx, vq->avail_flags_shadow, vq->avail_idx_shadow);
363
Andrew Scull594d2a62022-05-16 10:41:31 +0000364 printf("Shadow descriptor dump:\n");
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -0700365 for (i = 0; i < vq->vring.num; i++) {
Andrew Scull594d2a62022-05-16 10:41:31 +0000366 struct vring_desc_shadow *desc = &vq->vring_desc_shadow[i];
367
368 printf("\tdesc_shadow[%u] = { 0x%llx, len %u, flags %u, next %u }\n",
369 i, desc->addr, desc->len, desc->flags, desc->next);
Tuomas Tynkkynend58269f2018-10-15 02:21:01 -0700370 }
371
372 printf("Avail ring dump:\n");
373 printf("\tflags %u, idx %u\n",
374 vq->vring.avail->flags, vq->vring.avail->idx);
375 for (i = 0; i < vq->vring.num; i++) {
376 printf("\tavail[%u] = %u\n",
377 i, vq->vring.avail->ring[i]);
378 }
379
380 printf("Used ring dump:\n");
381 printf("\tflags %u, idx %u\n",
382 vq->vring.used->flags, vq->vring.used->idx);
383 for (i = 0; i < vq->vring.num; i++) {
384 printf("\tused[%u] = { %u, %u }\n", i,
385 vq->vring.used->ring[i].id, vq->vring.used->ring[i].len);
386 }
387}