blob: 1cd737aca2499272bc5aa1cd80991885b62f60a4 [file] [log] [blame]
Bin Meng5c1a3e62018-10-15 02:21:02 -07001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
4 * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
5 *
6 * VirtIO memory-maped I/O transport driver
7 * Ported from Linux drivers/virtio/virtio_mmio.c
8 */
9
Bin Meng5c1a3e62018-10-15 02:21:02 -070010#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060011#include <log.h>
Bin Meng5c1a3e62018-10-15 02:21:02 -070012#include <virtio_types.h>
13#include <virtio.h>
14#include <virtio_ring.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060015#include <linux/bug.h>
Bin Meng5c1a3e62018-10-15 02:21:02 -070016#include <linux/compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070017#include <linux/err.h>
Bin Meng5c1a3e62018-10-15 02:21:02 -070018#include <linux/io.h>
19#include "virtio_mmio.h"
20
21static int virtio_mmio_get_config(struct udevice *udev, unsigned int offset,
22 void *buf, unsigned int len)
23{
24 struct virtio_mmio_priv *priv = dev_get_priv(udev);
25 void __iomem *base = priv->base + VIRTIO_MMIO_CONFIG;
26 u8 b;
27 __le16 w;
28 __le32 l;
29
30 if (priv->version == 1) {
31 u8 *ptr = buf;
32 int i;
33
34 for (i = 0; i < len; i++)
35 ptr[i] = readb(base + offset + i);
36
37 return 0;
38 }
39
40 switch (len) {
41 case 1:
42 b = readb(base + offset);
43 memcpy(buf, &b, sizeof(b));
44 break;
45 case 2:
46 w = cpu_to_le16(readw(base + offset));
47 memcpy(buf, &w, sizeof(w));
48 break;
49 case 4:
50 l = cpu_to_le32(readl(base + offset));
51 memcpy(buf, &l, sizeof(l));
52 break;
53 case 8:
54 l = cpu_to_le32(readl(base + offset));
55 memcpy(buf, &l, sizeof(l));
56 l = cpu_to_le32(readl(base + offset + sizeof(l)));
57 memcpy(buf + sizeof(l), &l, sizeof(l));
58 break;
59 default:
60 WARN_ON(true);
61 }
62
63 return 0;
64}
65
66static int virtio_mmio_set_config(struct udevice *udev, unsigned int offset,
67 const void *buf, unsigned int len)
68{
69 struct virtio_mmio_priv *priv = dev_get_priv(udev);
70 void __iomem *base = priv->base + VIRTIO_MMIO_CONFIG;
71 u8 b;
72 __le16 w;
73 __le32 l;
74
75 if (priv->version == 1) {
76 const u8 *ptr = buf;
77 int i;
78
79 for (i = 0; i < len; i++)
80 writeb(ptr[i], base + offset + i);
81
82 return 0;
83 }
84
85 switch (len) {
86 case 1:
87 memcpy(&b, buf, sizeof(b));
88 writeb(b, base + offset);
89 break;
90 case 2:
91 memcpy(&w, buf, sizeof(w));
92 writew(le16_to_cpu(w), base + offset);
93 break;
94 case 4:
95 memcpy(&l, buf, sizeof(l));
96 writel(le32_to_cpu(l), base + offset);
97 break;
98 case 8:
99 memcpy(&l, buf, sizeof(l));
100 writel(le32_to_cpu(l), base + offset);
101 memcpy(&l, buf + sizeof(l), sizeof(l));
102 writel(le32_to_cpu(l), base + offset + sizeof(l));
103 break;
104 default:
105 WARN_ON(true);
106 }
107
108 return 0;
109}
110
111static int virtio_mmio_generation(struct udevice *udev, u32 *counter)
112{
113 struct virtio_mmio_priv *priv = dev_get_priv(udev);
114
115 if (priv->version == 1)
116 *counter = 0;
117 else
118 *counter = readl(priv->base + VIRTIO_MMIO_CONFIG_GENERATION);
119
120 return 0;
121}
122
123static int virtio_mmio_get_status(struct udevice *udev, u8 *status)
124{
125 struct virtio_mmio_priv *priv = dev_get_priv(udev);
126
127 *status = readl(priv->base + VIRTIO_MMIO_STATUS) & 0xff;
128
129 return 0;
130}
131
132static int virtio_mmio_set_status(struct udevice *udev, u8 status)
133{
134 struct virtio_mmio_priv *priv = dev_get_priv(udev);
135
136 /* We should never be setting status to 0 */
137 WARN_ON(status == 0);
138
139 writel(status, priv->base + VIRTIO_MMIO_STATUS);
140
141 return 0;
142}
143
144static int virtio_mmio_reset(struct udevice *udev)
145{
146 struct virtio_mmio_priv *priv = dev_get_priv(udev);
147
148 /* 0 status means a reset */
149 writel(0, priv->base + VIRTIO_MMIO_STATUS);
150
151 return 0;
152}
153
154static int virtio_mmio_get_features(struct udevice *udev, u64 *features)
155{
156 struct virtio_mmio_priv *priv = dev_get_priv(udev);
157
158 writel(1, priv->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
159 *features = readl(priv->base + VIRTIO_MMIO_DEVICE_FEATURES);
160 *features <<= 32;
161
162 writel(0, priv->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
163 *features |= readl(priv->base + VIRTIO_MMIO_DEVICE_FEATURES);
164
165 return 0;
166}
167
168static int virtio_mmio_set_features(struct udevice *udev)
169{
170 struct virtio_mmio_priv *priv = dev_get_priv(udev);
171 struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
172
173 /* Make sure there is are no mixed devices */
174 if (priv->version == 2 && uc_priv->legacy) {
175 debug("New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
176 return -EINVAL;
177 }
178
179 writel(1, priv->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
180 writel((u32)(uc_priv->features >> 32),
181 priv->base + VIRTIO_MMIO_DRIVER_FEATURES);
182
183 writel(0, priv->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
184 writel((u32)uc_priv->features,
185 priv->base + VIRTIO_MMIO_DRIVER_FEATURES);
186
187 return 0;
188}
189
190static struct virtqueue *virtio_mmio_setup_vq(struct udevice *udev,
191 unsigned int index)
192{
193 struct virtio_mmio_priv *priv = dev_get_priv(udev);
194 struct virtqueue *vq;
195 unsigned int num;
196 int err;
197
198 /* Select the queue we're interested in */
199 writel(index, priv->base + VIRTIO_MMIO_QUEUE_SEL);
200
201 /* Queue shouldn't already be set up */
202 if (readl(priv->base + (priv->version == 1 ?
203 VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
204 err = -ENOENT;
205 goto error_available;
206 }
207
208 num = readl(priv->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
209 if (num == 0) {
210 err = -ENOENT;
211 goto error_new_virtqueue;
212 }
213
214 /* Create the vring */
215 vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, udev);
216 if (!vq) {
217 err = -ENOMEM;
218 goto error_new_virtqueue;
219 }
220
221 /* Activate the queue */
222 writel(virtqueue_get_vring_size(vq),
223 priv->base + VIRTIO_MMIO_QUEUE_NUM);
224 if (priv->version == 1) {
225 u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
226
227 /*
228 * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something
229 * that doesn't fit in 32bit, fail the setup rather than
230 * pretending to be successful.
231 */
232 if (q_pfn >> 32) {
233 debug("platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
234 0x1ULL << (32 + PAGE_SHIFT - 30));
235 err = -E2BIG;
236 goto error_bad_pfn;
237 }
238
239 writel(PAGE_SIZE, priv->base + VIRTIO_MMIO_QUEUE_ALIGN);
240 writel(q_pfn, priv->base + VIRTIO_MMIO_QUEUE_PFN);
241 } else {
242 u64 addr;
243
244 addr = virtqueue_get_desc_addr(vq);
245 writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
246 writel((u32)(addr >> 32),
247 priv->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
248
249 addr = virtqueue_get_avail_addr(vq);
250 writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
251 writel((u32)(addr >> 32),
252 priv->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
253
254 addr = virtqueue_get_used_addr(vq);
255 writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_USED_LOW);
256 writel((u32)(addr >> 32),
257 priv->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
258
259 writel(1, priv->base + VIRTIO_MMIO_QUEUE_READY);
260 }
261
262 return vq;
263
264error_bad_pfn:
265 vring_del_virtqueue(vq);
266
267error_new_virtqueue:
268 if (priv->version == 1) {
269 writel(0, priv->base + VIRTIO_MMIO_QUEUE_PFN);
270 } else {
271 writel(0, priv->base + VIRTIO_MMIO_QUEUE_READY);
272 WARN_ON(readl(priv->base + VIRTIO_MMIO_QUEUE_READY));
273 }
274
275error_available:
276 return ERR_PTR(err);
277}
278
279static void virtio_mmio_del_vq(struct virtqueue *vq)
280{
281 struct virtio_mmio_priv *priv = dev_get_priv(vq->vdev);
282 unsigned int index = vq->index;
283
284 /* Select and deactivate the queue */
285 writel(index, priv->base + VIRTIO_MMIO_QUEUE_SEL);
286 if (priv->version == 1) {
287 writel(0, priv->base + VIRTIO_MMIO_QUEUE_PFN);
288 } else {
289 writel(0, priv->base + VIRTIO_MMIO_QUEUE_READY);
290 WARN_ON(readl(priv->base + VIRTIO_MMIO_QUEUE_READY));
291 }
292
293 vring_del_virtqueue(vq);
294}
295
296static int virtio_mmio_del_vqs(struct udevice *udev)
297{
298 struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
299 struct virtqueue *vq, *n;
300
301 list_for_each_entry_safe(vq, n, &uc_priv->vqs, list)
302 virtio_mmio_del_vq(vq);
303
304 return 0;
305}
306
307static int virtio_mmio_find_vqs(struct udevice *udev, unsigned int nvqs,
308 struct virtqueue *vqs[])
309{
310 int i;
311
312 for (i = 0; i < nvqs; ++i) {
313 vqs[i] = virtio_mmio_setup_vq(udev, i);
314 if (IS_ERR(vqs[i])) {
315 virtio_mmio_del_vqs(udev);
316 return PTR_ERR(vqs[i]);
317 }
318 }
319
320 return 0;
321}
322
323static int virtio_mmio_notify(struct udevice *udev, struct virtqueue *vq)
324{
325 struct virtio_mmio_priv *priv = dev_get_priv(udev);
326
327 /*
328 * We write the queue's selector into the notification register
329 * to signal the other end
330 */
331 writel(vq->index, priv->base + VIRTIO_MMIO_QUEUE_NOTIFY);
332
333 return 0;
334}
335
Simon Glassaad29ae2020-12-03 16:55:21 -0700336static int virtio_mmio_of_to_plat(struct udevice *udev)
Bin Meng5c1a3e62018-10-15 02:21:02 -0700337{
338 struct virtio_mmio_priv *priv = dev_get_priv(udev);
339
340 priv->base = (void __iomem *)(ulong)dev_read_addr(udev);
341 if (priv->base == (void __iomem *)FDT_ADDR_T_NONE)
342 return -EINVAL;
343
344 return 0;
345}
346
347static int virtio_mmio_probe(struct udevice *udev)
348{
349 struct virtio_mmio_priv *priv = dev_get_priv(udev);
350 struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
351 u32 magic;
352
353 /* Check magic value */
354 magic = readl(priv->base + VIRTIO_MMIO_MAGIC_VALUE);
355 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
356 debug("(%s): wrong magic value 0x%08x!\n", udev->name, magic);
357 return 0;
358 }
359
360 /* Check device version */
361 priv->version = readl(priv->base + VIRTIO_MMIO_VERSION);
362 if (priv->version < 1 || priv->version > 2) {
363 debug("(%s): version %d not supported!\n",
364 udev->name, priv->version);
365 return 0;
366 }
367
Heinrich Schuchardta89643c2019-12-24 12:21:09 +0100368 /* Check device ID */
Bin Meng5c1a3e62018-10-15 02:21:02 -0700369 uc_priv->device = readl(priv->base + VIRTIO_MMIO_DEVICE_ID);
370 if (uc_priv->device == 0) {
371 /*
372 * virtio-mmio device with an ID 0 is a (dummy) placeholder
373 * with no function. End probing now with no error reported.
374 */
375 return 0;
376 }
377 uc_priv->vendor = readl(priv->base + VIRTIO_MMIO_VENDOR_ID);
378
379 if (priv->version == 1)
380 writel(PAGE_SIZE, priv->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
381
382 debug("(%s): device (%d) vendor (%08x) version (%d)\n", udev->name,
383 uc_priv->device, uc_priv->vendor, priv->version);
384
385 return 0;
386}
387
388static const struct dm_virtio_ops virtio_mmio_ops = {
389 .get_config = virtio_mmio_get_config,
390 .set_config = virtio_mmio_set_config,
391 .generation = virtio_mmio_generation,
392 .get_status = virtio_mmio_get_status,
393 .set_status = virtio_mmio_set_status,
394 .reset = virtio_mmio_reset,
395 .get_features = virtio_mmio_get_features,
396 .set_features = virtio_mmio_set_features,
397 .find_vqs = virtio_mmio_find_vqs,
398 .del_vqs = virtio_mmio_del_vqs,
399 .notify = virtio_mmio_notify,
400};
401
402static const struct udevice_id virtio_mmio_ids[] = {
403 { .compatible = "virtio,mmio" },
404 { }
405};
406
407U_BOOT_DRIVER(virtio_mmio) = {
408 .name = "virtio-mmio",
409 .id = UCLASS_VIRTIO,
410 .of_match = virtio_mmio_ids,
411 .ops = &virtio_mmio_ops,
412 .probe = virtio_mmio_probe,
Simon Glassaad29ae2020-12-03 16:55:21 -0700413 .of_to_plat = virtio_mmio_of_to_plat,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700414 .priv_auto = sizeof(struct virtio_mmio_priv),
Bin Meng5c1a3e62018-10-15 02:21:02 -0700415};