blob: ec45f831a36f840e29be0b8a5fb39ee318fc3b69 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Zhikang Zhang145b88f2017-08-03 02:30:57 -07002/*
3 * Copyright (C) 2017 NXP Semiconductors
4 * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
Zhikang Zhang145b88f2017-08-03 02:30:57 -07005 */
6
7#include <common.h>
Simon Glass655306c2020-05-10 11:39:58 -06008#include <blk.h>
Simon Glass08382872023-01-17 10:47:47 -07009#include <bootdev.h>
Simon Glass63334482019-11-14 12:57:39 -070010#include <cpu_func.h>
Zhikang Zhang145b88f2017-08-03 02:30:57 -070011#include <dm.h>
12#include <errno.h>
Simon Glass0f2af882020-05-10 11:40:05 -060013#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070014#include <malloc.h>
Zhikang Zhang145b88f2017-08-03 02:30:57 -070015#include <memalign.h>
Simon Glass495a5dc2019-11-14 12:57:30 -070016#include <time.h>
Zhikang Zhang145b88f2017-08-03 02:30:57 -070017#include <dm/device-internal.h>
Simon Glass9bc15642020-02-03 07:36:16 -070018#include <linux/compat.h>
Zhikang Zhang145b88f2017-08-03 02:30:57 -070019#include "nvme.h"
20
Zhikang Zhang145b88f2017-08-03 02:30:57 -070021#define NVME_Q_DEPTH 2
22#define NVME_AQ_DEPTH 2
23#define NVME_SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
24#define NVME_CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
Andre Przywara7309c172021-02-08 13:31:54 +000025#define NVME_CQ_ALLOCATION ALIGN(NVME_CQ_SIZE(NVME_Q_DEPTH), \
26 ARCH_DMA_MINALIGN)
Zhikang Zhang145b88f2017-08-03 02:30:57 -070027#define ADMIN_TIMEOUT 60
28#define IO_TIMEOUT 30
29#define MAX_PRP_POOL 512
30
Hector Martin19104f72022-07-31 15:31:31 +090031static int nvme_wait_csts(struct nvme_dev *dev, u32 mask, u32 val)
Zhikang Zhang145b88f2017-08-03 02:30:57 -070032{
Bin Mengdff7df7c2017-08-22 08:15:11 -070033 int timeout;
34 ulong start;
Zhikang Zhang145b88f2017-08-03 02:30:57 -070035
Bin Mengdff7df7c2017-08-22 08:15:11 -070036 /* Timeout field in the CAP register is in 500 millisecond units */
37 timeout = NVME_CAP_TIMEOUT(dev->cap) * 500;
38
39 start = get_timer(0);
40 while (get_timer(start) < timeout) {
Hector Martin19104f72022-07-31 15:31:31 +090041 if ((readl(&dev->bar->csts) & mask) == val)
Bin Mengdff7df7c2017-08-22 08:15:11 -070042 return 0;
43 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -070044
Bin Mengdff7df7c2017-08-22 08:15:11 -070045 return -ETIME;
Zhikang Zhang145b88f2017-08-03 02:30:57 -070046}
47
48static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
49 int total_len, u64 dma_addr)
50{
51 u32 page_size = dev->page_size;
52 int offset = dma_addr & (page_size - 1);
53 u64 *prp_pool;
54 int length = total_len;
55 int i, nprps;
Wesley Sheng85a95612021-06-22 11:34:21 +080056 u32 prps_per_page = page_size >> 3;
Aaron Williams2db51342019-08-22 20:37:26 -070057 u32 num_pages;
58
Zhikang Zhang145b88f2017-08-03 02:30:57 -070059 length -= (page_size - offset);
60
61 if (length <= 0) {
62 *prp2 = 0;
63 return 0;
64 }
65
66 if (length)
67 dma_addr += (page_size - offset);
68
69 if (length <= page_size) {
70 *prp2 = dma_addr;
71 return 0;
72 }
73
74 nprps = DIV_ROUND_UP(length, page_size);
Alexander Sowarkad34dab62022-08-28 21:30:20 +020075 num_pages = DIV_ROUND_UP(nprps - 1, prps_per_page - 1);
Zhikang Zhang145b88f2017-08-03 02:30:57 -070076
77 if (nprps > dev->prp_entry_num) {
78 free(dev->prp_pool);
Aaron Williams2db51342019-08-22 20:37:26 -070079 /*
80 * Always increase in increments of pages. It doesn't waste
81 * much memory and reduces the number of allocations.
82 */
83 dev->prp_pool = memalign(page_size, num_pages * page_size);
Zhikang Zhang145b88f2017-08-03 02:30:57 -070084 if (!dev->prp_pool) {
85 printf("Error: malloc prp_pool fail\n");
86 return -ENOMEM;
87 }
Alexander Sowarkad34dab62022-08-28 21:30:20 +020088 dev->prp_entry_num = num_pages * (prps_per_page - 1) + 1;
Zhikang Zhang145b88f2017-08-03 02:30:57 -070089 }
90
91 prp_pool = dev->prp_pool;
92 i = 0;
93 while (nprps) {
Alexander Sowarkad34dab62022-08-28 21:30:20 +020094 if ((i == (prps_per_page - 1)) && nprps > 1) {
Zhikang Zhang145b88f2017-08-03 02:30:57 -070095 *(prp_pool + i) = cpu_to_le64((ulong)prp_pool +
96 page_size);
97 i = 0;
98 prp_pool += page_size;
99 }
100 *(prp_pool + i++) = cpu_to_le64(dma_addr);
101 dma_addr += page_size;
102 nprps--;
103 }
104 *prp2 = (ulong)dev->prp_pool;
105
Patrick Wildt95f4aba2019-10-16 23:22:50 +0200106 flush_dcache_range((ulong)dev->prp_pool, (ulong)dev->prp_pool +
Alexander Sowarkad34dab62022-08-28 21:30:20 +0200107 num_pages * page_size);
Patrick Wildt95f4aba2019-10-16 23:22:50 +0200108
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700109 return 0;
110}
111
112static __le16 nvme_get_cmd_id(void)
113{
114 static unsigned short cmdid;
115
116 return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0);
117}
118
119static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
120{
Andre Przywara7309c172021-02-08 13:31:54 +0000121 /*
122 * Single CQ entries are always smaller than a cache line, so we
123 * can't invalidate them individually. However CQ entries are
124 * read only by the CPU, so it's safe to always invalidate all of them,
125 * as the cache line should never become dirty.
126 */
127 ulong start = (ulong)&nvmeq->cqes[0];
128 ulong stop = start + NVME_CQ_ALLOCATION;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700129
130 invalidate_dcache_range(start, stop);
131
David Lamparter9f7e9422021-05-06 20:24:30 +0200132 return readw(&(nvmeq->cqes[index].status));
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700133}
134
135/**
136 * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
137 *
138 * @nvmeq: The queue to use
139 * @cmd: The command to send
140 */
141static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
142{
Mark Kettenis7579e902022-01-22 20:38:15 +0100143 struct nvme_ops *ops;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700144 u16 tail = nvmeq->sq_tail;
145
146 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
147 flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
148 (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
149
Mark Kettenis7579e902022-01-22 20:38:15 +0100150 ops = (struct nvme_ops *)nvmeq->dev->udev->driver->ops;
151 if (ops && ops->submit_cmd) {
152 ops->submit_cmd(nvmeq, cmd);
153 return;
154 }
155
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700156 if (++tail == nvmeq->q_depth)
157 tail = 0;
158 writel(tail, nvmeq->q_db);
159 nvmeq->sq_tail = tail;
160}
161
162static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
163 struct nvme_command *cmd,
164 u32 *result, unsigned timeout)
165{
Mark Kettenis7579e902022-01-22 20:38:15 +0100166 struct nvme_ops *ops;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700167 u16 head = nvmeq->cq_head;
168 u16 phase = nvmeq->cq_phase;
169 u16 status;
170 ulong start_time;
171 ulong timeout_us = timeout * 100000;
172
173 cmd->common.command_id = nvme_get_cmd_id();
174 nvme_submit_cmd(nvmeq, cmd);
175
176 start_time = timer_get_us();
177
178 for (;;) {
179 status = nvme_read_completion_status(nvmeq, head);
180 if ((status & 0x01) == phase)
181 break;
182 if (timeout_us > 0 && (timer_get_us() - start_time)
183 >= timeout_us)
184 return -ETIMEDOUT;
185 }
186
Mark Kettenis7579e902022-01-22 20:38:15 +0100187 ops = (struct nvme_ops *)nvmeq->dev->udev->driver->ops;
188 if (ops && ops->complete_cmd)
189 ops->complete_cmd(nvmeq, cmd);
190
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700191 status >>= 1;
192 if (status) {
193 printf("ERROR: status = %x, phase = %d, head = %d\n",
194 status, phase, head);
195 status = 0;
196 if (++head == nvmeq->q_depth) {
197 head = 0;
198 phase = !phase;
199 }
200 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
201 nvmeq->cq_head = head;
202 nvmeq->cq_phase = phase;
203
204 return -EIO;
205 }
206
207 if (result)
David Lamparter9f7e9422021-05-06 20:24:30 +0200208 *result = readl(&(nvmeq->cqes[head].result));
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700209
210 if (++head == nvmeq->q_depth) {
211 head = 0;
212 phase = !phase;
213 }
214 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
215 nvmeq->cq_head = head;
216 nvmeq->cq_phase = phase;
217
218 return status;
219}
220
221static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
222 u32 *result)
223{
Bin Meng1c42a292017-08-22 08:15:12 -0700224 return nvme_submit_sync_cmd(dev->queues[NVME_ADMIN_Q], cmd,
225 result, ADMIN_TIMEOUT);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700226}
227
228static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
229 int qid, int depth)
230{
Mark Kettenis7579e902022-01-22 20:38:15 +0100231 struct nvme_ops *ops;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700232 struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
233 if (!nvmeq)
234 return NULL;
235 memset(nvmeq, 0, sizeof(*nvmeq));
236
Andre Przywara7309c172021-02-08 13:31:54 +0000237 nvmeq->cqes = (void *)memalign(4096, NVME_CQ_ALLOCATION);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700238 if (!nvmeq->cqes)
239 goto free_nvmeq;
240 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth));
241
242 nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth));
243 if (!nvmeq->sq_cmds)
244 goto free_queue;
245 memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth));
246
247 nvmeq->dev = dev;
248
249 nvmeq->cq_head = 0;
250 nvmeq->cq_phase = 1;
251 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
252 nvmeq->q_depth = depth;
253 nvmeq->qid = qid;
254 dev->queue_count++;
255 dev->queues[qid] = nvmeq;
256
Mark Kettenis7579e902022-01-22 20:38:15 +0100257 ops = (struct nvme_ops *)dev->udev->driver->ops;
258 if (ops && ops->setup_queue)
259 ops->setup_queue(nvmeq);
260
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700261 return nvmeq;
262
263 free_queue:
264 free((void *)nvmeq->cqes);
265 free_nvmeq:
266 free(nvmeq);
267
268 return NULL;
269}
270
271static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
272{
273 struct nvme_command c;
274
275 memset(&c, 0, sizeof(c));
276 c.delete_queue.opcode = opcode;
277 c.delete_queue.qid = cpu_to_le16(id);
278
279 return nvme_submit_admin_cmd(dev, &c, NULL);
280}
281
282static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid)
283{
284 return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid);
285}
286
287static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid)
288{
289 return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid);
290}
291
292static int nvme_enable_ctrl(struct nvme_dev *dev)
293{
294 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
295 dev->ctrl_config |= NVME_CC_ENABLE;
David Lamparter9f7e9422021-05-06 20:24:30 +0200296 writel(dev->ctrl_config, &dev->bar->cc);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700297
Hector Martin19104f72022-07-31 15:31:31 +0900298 return nvme_wait_csts(dev, NVME_CSTS_RDY, NVME_CSTS_RDY);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700299}
300
301static int nvme_disable_ctrl(struct nvme_dev *dev)
302{
303 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
304 dev->ctrl_config &= ~NVME_CC_ENABLE;
David Lamparter9f7e9422021-05-06 20:24:30 +0200305 writel(dev->ctrl_config, &dev->bar->cc);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700306
Hector Martin19104f72022-07-31 15:31:31 +0900307 return nvme_wait_csts(dev, NVME_CSTS_RDY, 0);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700308}
309
Hector Martin19104f72022-07-31 15:31:31 +0900310static int nvme_shutdown_ctrl(struct nvme_dev *dev)
311{
312 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
313 dev->ctrl_config |= NVME_CC_SHN_NORMAL;
314 writel(dev->ctrl_config, &dev->bar->cc);
315
316 return nvme_wait_csts(dev, NVME_CSTS_SHST_MASK, NVME_CSTS_SHST_CMPLT);
317}
318
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700319static void nvme_free_queue(struct nvme_queue *nvmeq)
320{
321 free((void *)nvmeq->cqes);
322 free(nvmeq->sq_cmds);
323 free(nvmeq);
324}
325
326static void nvme_free_queues(struct nvme_dev *dev, int lowest)
327{
328 int i;
329
330 for (i = dev->queue_count - 1; i >= lowest; i--) {
331 struct nvme_queue *nvmeq = dev->queues[i];
332 dev->queue_count--;
333 dev->queues[i] = NULL;
334 nvme_free_queue(nvmeq);
335 }
336}
337
338static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
339{
340 struct nvme_dev *dev = nvmeq->dev;
341
342 nvmeq->sq_tail = 0;
343 nvmeq->cq_head = 0;
344 nvmeq->cq_phase = 1;
345 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
346 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
347 flush_dcache_range((ulong)nvmeq->cqes,
Andre Przywara7309c172021-02-08 13:31:54 +0000348 (ulong)nvmeq->cqes + NVME_CQ_ALLOCATION);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700349 dev->online_queues++;
350}
351
352static int nvme_configure_admin_queue(struct nvme_dev *dev)
353{
354 int result;
355 u32 aqa;
Bin Mengf03e5fc2017-08-22 08:15:10 -0700356 u64 cap = dev->cap;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700357 struct nvme_queue *nvmeq;
358 /* most architectures use 4KB as the page size */
359 unsigned page_shift = 12;
360 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
361 unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
362
363 if (page_shift < dev_page_min) {
364 debug("Device minimum page size (%u) too large for host (%u)\n",
365 1 << dev_page_min, 1 << page_shift);
366 return -ENODEV;
367 }
368
369 if (page_shift > dev_page_max) {
370 debug("Device maximum page size (%u) smaller than host (%u)\n",
371 1 << dev_page_max, 1 << page_shift);
372 page_shift = dev_page_max;
373 }
374
375 result = nvme_disable_ctrl(dev);
376 if (result < 0)
377 return result;
378
Bin Meng1c42a292017-08-22 08:15:12 -0700379 nvmeq = dev->queues[NVME_ADMIN_Q];
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700380 if (!nvmeq) {
381 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
382 if (!nvmeq)
383 return -ENOMEM;
384 }
385
386 aqa = nvmeq->q_depth - 1;
387 aqa |= aqa << 16;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700388
389 dev->page_size = 1 << page_shift;
390
391 dev->ctrl_config = NVME_CC_CSS_NVM;
392 dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
393 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
394 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
395
396 writel(aqa, &dev->bar->aqa);
397 nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
398 nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
399
400 result = nvme_enable_ctrl(dev);
401 if (result)
402 goto free_nvmeq;
403
404 nvmeq->cq_vector = 0;
405
Bin Meng1c42a292017-08-22 08:15:12 -0700406 nvme_init_queue(dev->queues[NVME_ADMIN_Q], 0);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700407
408 return result;
409
410 free_nvmeq:
411 nvme_free_queues(dev, 0);
412
413 return result;
414}
415
416static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid,
417 struct nvme_queue *nvmeq)
418{
419 struct nvme_command c;
420 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
421
422 memset(&c, 0, sizeof(c));
423 c.create_cq.opcode = nvme_admin_create_cq;
424 c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
425 c.create_cq.cqid = cpu_to_le16(qid);
426 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
427 c.create_cq.cq_flags = cpu_to_le16(flags);
428 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
429
430 return nvme_submit_admin_cmd(dev, &c, NULL);
431}
432
433static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid,
434 struct nvme_queue *nvmeq)
435{
436 struct nvme_command c;
437 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
438
439 memset(&c, 0, sizeof(c));
440 c.create_sq.opcode = nvme_admin_create_sq;
441 c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
442 c.create_sq.sqid = cpu_to_le16(qid);
443 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
444 c.create_sq.sq_flags = cpu_to_le16(flags);
445 c.create_sq.cqid = cpu_to_le16(qid);
446
447 return nvme_submit_admin_cmd(dev, &c, NULL);
448}
449
450int nvme_identify(struct nvme_dev *dev, unsigned nsid,
451 unsigned cns, dma_addr_t dma_addr)
452{
453 struct nvme_command c;
454 u32 page_size = dev->page_size;
455 int offset = dma_addr & (page_size - 1);
456 int length = sizeof(struct nvme_id_ctrl);
Bin Meng578b1952017-08-22 08:15:14 -0700457 int ret;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700458
459 memset(&c, 0, sizeof(c));
460 c.identify.opcode = nvme_admin_identify;
461 c.identify.nsid = cpu_to_le32(nsid);
462 c.identify.prp1 = cpu_to_le64(dma_addr);
463
464 length -= (page_size - offset);
465 if (length <= 0) {
466 c.identify.prp2 = 0;
467 } else {
468 dma_addr += (page_size - offset);
Bin Mengb3ea27d2017-08-22 08:15:09 -0700469 c.identify.prp2 = cpu_to_le64(dma_addr);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700470 }
471
472 c.identify.cns = cpu_to_le32(cns);
473
Jagan Teki82bc1a62020-06-10 21:18:23 +0200474 invalidate_dcache_range(dma_addr,
475 dma_addr + sizeof(struct nvme_id_ctrl));
476
Bin Meng578b1952017-08-22 08:15:14 -0700477 ret = nvme_submit_admin_cmd(dev, &c, NULL);
478 if (!ret)
479 invalidate_dcache_range(dma_addr,
480 dma_addr + sizeof(struct nvme_id_ctrl));
481
482 return ret;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700483}
484
485int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
486 dma_addr_t dma_addr, u32 *result)
487{
488 struct nvme_command c;
Andre Przywara06083362021-03-02 15:43:43 +0000489 int ret;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700490
491 memset(&c, 0, sizeof(c));
492 c.features.opcode = nvme_admin_get_features;
493 c.features.nsid = cpu_to_le32(nsid);
494 c.features.prp1 = cpu_to_le64(dma_addr);
495 c.features.fid = cpu_to_le32(fid);
496
Andre Przywara06083362021-03-02 15:43:43 +0000497 ret = nvme_submit_admin_cmd(dev, &c, result);
498
Bin Meng578b1952017-08-22 08:15:14 -0700499 /*
Andre Przywara06083362021-03-02 15:43:43 +0000500 * TODO: Add some cache invalidation when a DMA buffer is involved
501 * in the request, here and before the command gets submitted. The
502 * buffer size varies by feature, also some features use a different
503 * field in the command packet to hold the buffer address.
504 * Section 5.21.1 (Set Features command) in the NVMe specification
505 * details the buffer requirements for each feature.
506 *
507 * At the moment there is no user of this function.
Bin Meng578b1952017-08-22 08:15:14 -0700508 */
509
Andre Przywara06083362021-03-02 15:43:43 +0000510 return ret;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700511}
512
513int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
514 dma_addr_t dma_addr, u32 *result)
515{
516 struct nvme_command c;
517
518 memset(&c, 0, sizeof(c));
519 c.features.opcode = nvme_admin_set_features;
520 c.features.prp1 = cpu_to_le64(dma_addr);
521 c.features.fid = cpu_to_le32(fid);
522 c.features.dword11 = cpu_to_le32(dword11);
523
Bin Meng578b1952017-08-22 08:15:14 -0700524 /*
Andre Przywara06083362021-03-02 15:43:43 +0000525 * TODO: Add a cache clean (aka flush) operation when a DMA buffer is
526 * involved in the request. The buffer size varies by feature, also
527 * some features use a different field in the command packet to hold
528 * the buffer address. Section 5.21.1 (Set Features command) in the
529 * NVMe specification details the buffer requirements for each
530 * feature.
531 * At the moment the only user of this function is not using
532 * any DMA buffer at all.
Bin Meng578b1952017-08-22 08:15:14 -0700533 */
534
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700535 return nvme_submit_admin_cmd(dev, &c, result);
536}
537
538static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
539{
540 struct nvme_dev *dev = nvmeq->dev;
541 int result;
542
543 nvmeq->cq_vector = qid - 1;
544 result = nvme_alloc_cq(dev, qid, nvmeq);
545 if (result < 0)
546 goto release_cq;
547
548 result = nvme_alloc_sq(dev, qid, nvmeq);
549 if (result < 0)
550 goto release_sq;
551
552 nvme_init_queue(nvmeq, qid);
553
554 return result;
555
556 release_sq:
557 nvme_delete_sq(dev, qid);
558 release_cq:
559 nvme_delete_cq(dev, qid);
560
561 return result;
562}
563
564static int nvme_set_queue_count(struct nvme_dev *dev, int count)
565{
566 int status;
567 u32 result;
568 u32 q_count = (count - 1) | ((count - 1) << 16);
569
570 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES,
571 q_count, 0, &result);
572
573 if (status < 0)
574 return status;
575 if (status > 1)
576 return 0;
577
578 return min(result & 0xffff, result >> 16) + 1;
579}
580
Simon Glass7bb3cee2023-07-15 21:38:54 -0600581static int nvme_create_io_queues(struct nvme_dev *dev)
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700582{
583 unsigned int i;
Simon Glass7bb3cee2023-07-15 21:38:54 -0600584 int ret;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700585
586 for (i = dev->queue_count; i <= dev->max_qid; i++)
587 if (!nvme_alloc_queue(dev, i, dev->q_depth))
Simon Glass7bb3cee2023-07-15 21:38:54 -0600588 return log_msg_ret("all", -ENOMEM);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700589
Simon Glass7bb3cee2023-07-15 21:38:54 -0600590 for (i = dev->online_queues; i <= dev->queue_count - 1; i++) {
591 ret = nvme_create_queue(dev->queues[i], i);
592 if (ret)
593 return log_msg_ret("cre", ret);
594 }
595
596 return 0;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700597}
598
599static int nvme_setup_io_queues(struct nvme_dev *dev)
600{
601 int nr_io_queues;
602 int result;
603
604 nr_io_queues = 1;
605 result = nvme_set_queue_count(dev, nr_io_queues);
Simon Glass7bb3cee2023-07-15 21:38:54 -0600606 if (result <= 0) {
607 log_debug("Cannot set queue count (err=%dE)\n", result);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700608 return result;
Simon Glass7bb3cee2023-07-15 21:38:54 -0600609 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700610
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700611 dev->max_qid = nr_io_queues;
612
613 /* Free previously allocated queues */
614 nvme_free_queues(dev, nr_io_queues + 1);
Simon Glass7bb3cee2023-07-15 21:38:54 -0600615 result = nvme_create_io_queues(dev);
616 if (result)
617 return result;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700618
619 return 0;
620}
621
622static int nvme_get_info_from_identify(struct nvme_dev *dev)
623{
Patrick Wildt968854f2019-10-16 08:42:04 +0200624 struct nvme_id_ctrl *ctrl;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700625 int ret;
Bin Mengf03e5fc2017-08-22 08:15:10 -0700626 int shift = NVME_CAP_MPSMIN(dev->cap) + 12;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700627
Patrick Wildt968854f2019-10-16 08:42:04 +0200628 ctrl = memalign(dev->page_size, sizeof(struct nvme_id_ctrl));
629 if (!ctrl)
630 return -ENOMEM;
631
Bin Meng225589d2019-05-15 08:37:56 -0700632 ret = nvme_identify(dev, 0, 1, (dma_addr_t)(long)ctrl);
Patrick Wildt968854f2019-10-16 08:42:04 +0200633 if (ret) {
634 free(ctrl);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700635 return -EIO;
Patrick Wildt968854f2019-10-16 08:42:04 +0200636 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700637
638 dev->nn = le32_to_cpu(ctrl->nn);
639 dev->vwc = ctrl->vwc;
640 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
641 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
642 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
643 if (ctrl->mdts)
644 dev->max_transfer_shift = (ctrl->mdts + shift);
Bin Mengab1c1602017-08-03 02:31:02 -0700645 else {
646 /*
647 * Maximum Data Transfer Size (MDTS) field indicates the maximum
648 * data transfer size between the host and the controller. The
649 * host should not submit a command that exceeds this transfer
650 * size. The value is in units of the minimum memory page size
651 * and is reported as a power of two (2^n).
652 *
653 * The spec also says: a value of 0h indicates no restrictions
654 * on transfer size. But in nvme_blk_read/write() below we have
655 * the following algorithm for maximum number of logic blocks
656 * per transfer:
657 *
658 * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
659 *
660 * In order for lbas not to overflow, the maximum number is 15
661 * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift).
662 * Let's use 20 which provides 1MB size.
663 */
664 dev->max_transfer_shift = 20;
665 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700666
Patrick Wildt968854f2019-10-16 08:42:04 +0200667 free(ctrl);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700668 return 0;
669}
670
Patrick Wildtccdff862019-10-03 13:48:47 +0200671int nvme_get_namespace_id(struct udevice *udev, u32 *ns_id, u8 *eui64)
672{
673 struct nvme_ns *ns = dev_get_priv(udev);
674
675 if (ns_id)
676 *ns_id = ns->ns_id;
677 if (eui64)
678 memcpy(eui64, ns->eui64, sizeof(ns->eui64));
679
680 return 0;
681}
682
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700683int nvme_scan_namespace(void)
684{
685 struct uclass *uc;
686 struct udevice *dev;
687 int ret;
688
689 ret = uclass_get(UCLASS_NVME, &uc);
690 if (ret)
691 return ret;
692
693 uclass_foreach_dev(dev, uc) {
694 ret = device_probe(dev);
Simon Glass7bb3cee2023-07-15 21:38:54 -0600695 if (ret) {
696 log_err("Failed to probe '%s': err=%dE\n", dev->name,
697 ret);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700698 return ret;
Simon Glass7bb3cee2023-07-15 21:38:54 -0600699 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700700 }
701
702 return 0;
703}
704
705static int nvme_blk_probe(struct udevice *udev)
706{
707 struct nvme_dev *ndev = dev_get_priv(udev->parent);
Simon Glass71fa5b42020-12-03 16:55:18 -0700708 struct blk_desc *desc = dev_get_uclass_plat(udev);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700709 struct nvme_ns *ns = dev_get_priv(udev);
710 u8 flbas;
Patrick Wildt968854f2019-10-16 08:42:04 +0200711 struct nvme_id_ns *id;
712
713 id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
714 if (!id)
715 return -ENOMEM;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700716
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700717 ns->dev = ndev;
Bin Meng348187382017-08-22 08:15:16 -0700718 /* extract the namespace id from the block device name */
Bin Mengabc6d082021-06-22 21:16:19 +0800719 ns->ns_id = trailing_strtol(udev->name);
Patrick Wildt968854f2019-10-16 08:42:04 +0200720 if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)(long)id)) {
721 free(id);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700722 return -EIO;
Patrick Wildt968854f2019-10-16 08:42:04 +0200723 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700724
Patrick Wildtccdff862019-10-03 13:48:47 +0200725 memcpy(&ns->eui64, &id->eui64, sizeof(id->eui64));
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700726 flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK;
727 ns->flbas = flbas;
728 ns->lba_shift = id->lbaf[flbas].ds;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700729 list_add(&ns->list, &ndev->namespaces);
730
Bin Meng36144bb2021-06-22 21:16:20 +0800731 desc->lba = le64_to_cpu(id->nsze);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700732 desc->log2blksz = ns->lba_shift;
733 desc->blksz = 1 << ns->lba_shift;
734 desc->bdev = udev;
Mark Kettenisf8463d62022-01-22 20:38:11 +0100735 memcpy(desc->vendor, ndev->vendor, sizeof(ndev->vendor));
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700736 memcpy(desc->product, ndev->serial, sizeof(ndev->serial));
737 memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev));
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700738
Patrick Wildt968854f2019-10-16 08:42:04 +0200739 free(id);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700740 return 0;
741}
742
Bin Meng29e558d2017-08-22 08:15:13 -0700743static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
744 lbaint_t blkcnt, void *buffer, bool read)
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700745{
746 struct nvme_ns *ns = dev_get_priv(udev);
747 struct nvme_dev *dev = ns->dev;
748 struct nvme_command c;
Simon Glass71fa5b42020-12-03 16:55:18 -0700749 struct blk_desc *desc = dev_get_uclass_plat(udev);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700750 int status;
751 u64 prp2;
752 u64 total_len = blkcnt << desc->log2blksz;
753 u64 temp_len = total_len;
Stefan Agner2c852302021-10-04 11:24:51 +0200754 uintptr_t temp_buffer = (uintptr_t)buffer;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700755
756 u64 slba = blknr;
757 u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
758 u64 total_lbas = blkcnt;
759
Patrick Wildt95f4aba2019-10-16 23:22:50 +0200760 flush_dcache_range((unsigned long)buffer,
761 (unsigned long)buffer + total_len);
Bin Meng578b1952017-08-22 08:15:14 -0700762
Bin Meng29e558d2017-08-22 08:15:13 -0700763 c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700764 c.rw.flags = 0;
765 c.rw.nsid = cpu_to_le32(ns->ns_id);
766 c.rw.control = 0;
767 c.rw.dsmgmt = 0;
768 c.rw.reftag = 0;
769 c.rw.apptag = 0;
770 c.rw.appmask = 0;
771 c.rw.metadata = 0;
772
773 while (total_lbas) {
774 if (total_lbas < lbas) {
775 lbas = (u16)total_lbas;
776 total_lbas = 0;
777 } else {
778 total_lbas -= lbas;
779 }
780
Bin Meng29e558d2017-08-22 08:15:13 -0700781 if (nvme_setup_prps(dev, &prp2,
Stefan Agner2c852302021-10-04 11:24:51 +0200782 lbas << ns->lba_shift, temp_buffer))
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700783 return -EIO;
784 c.rw.slba = cpu_to_le64(slba);
785 slba += lbas;
786 c.rw.length = cpu_to_le16(lbas - 1);
Stefan Agner2c852302021-10-04 11:24:51 +0200787 c.rw.prp1 = cpu_to_le64(temp_buffer);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700788 c.rw.prp2 = cpu_to_le64(prp2);
Bin Meng1c42a292017-08-22 08:15:12 -0700789 status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q],
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700790 &c, NULL, IO_TIMEOUT);
791 if (status)
792 break;
Bin Meng8ac9f6c2017-09-02 08:15:36 -0700793 temp_len -= (u32)lbas << ns->lba_shift;
Stefan Agner2c852302021-10-04 11:24:51 +0200794 temp_buffer += lbas << ns->lba_shift;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700795 }
796
Bin Meng578b1952017-08-22 08:15:14 -0700797 if (read)
798 invalidate_dcache_range((unsigned long)buffer,
799 (unsigned long)buffer + total_len);
800
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700801 return (total_len - temp_len) >> desc->log2blksz;
802}
803
Bin Meng29e558d2017-08-22 08:15:13 -0700804static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr,
805 lbaint_t blkcnt, void *buffer)
806{
807 return nvme_blk_rw(udev, blknr, blkcnt, buffer, true);
808}
809
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700810static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr,
811 lbaint_t blkcnt, const void *buffer)
812{
Bin Meng29e558d2017-08-22 08:15:13 -0700813 return nvme_blk_rw(udev, blknr, blkcnt, (void *)buffer, false);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700814}
815
816static const struct blk_ops nvme_blk_ops = {
817 .read = nvme_blk_read,
818 .write = nvme_blk_write,
819};
820
821U_BOOT_DRIVER(nvme_blk) = {
822 .name = "nvme-blk",
823 .id = UCLASS_BLK,
824 .probe = nvme_blk_probe,
825 .ops = &nvme_blk_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700826 .priv_auto = sizeof(struct nvme_ns),
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700827};
828
Mark Kettenisf8463d62022-01-22 20:38:11 +0100829int nvme_init(struct udevice *udev)
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700830{
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700831 struct nvme_dev *ndev = dev_get_priv(udev);
Bin Meng818db242021-06-22 21:16:18 +0800832 struct nvme_id_ns *id;
Mark Kettenisf8463d62022-01-22 20:38:11 +0100833 int ret;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700834
Mark Kettenis7579e902022-01-22 20:38:15 +0100835 ndev->udev = udev;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700836 INIT_LIST_HEAD(&ndev->namespaces);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700837 if (readl(&ndev->bar->csts) == -1) {
Moritz Fischerd04fd992024-01-10 05:04:47 +0000838 ret = -EBUSY;
839 printf("Error: %s: Controller not ready!\n", udev->name);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700840 goto free_nvme;
841 }
842
Bin Meng1c42a292017-08-22 08:15:12 -0700843 ndev->queues = malloc(NVME_Q_NUM * sizeof(struct nvme_queue *));
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700844 if (!ndev->queues) {
845 ret = -ENOMEM;
846 printf("Error: %s: Out of memory!\n", udev->name);
847 goto free_nvme;
848 }
Bin Meng318dda22017-09-02 08:15:35 -0700849 memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *));
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700850
Bin Mengf03e5fc2017-08-22 08:15:10 -0700851 ndev->cap = nvme_readq(&ndev->bar->cap);
852 ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH);
853 ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700854 ndev->dbs = ((void __iomem *)ndev->bar) + 4096;
855
856 ret = nvme_configure_admin_queue(ndev);
Simon Glass7bb3cee2023-07-15 21:38:54 -0600857 if (ret) {
858 log_debug("Unable to configure admin queue (err=%dE)\n", ret);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700859 goto free_queue;
Simon Glass7bb3cee2023-07-15 21:38:54 -0600860 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700861
Aaron Williams2db51342019-08-22 20:37:26 -0700862 /* Allocate after the page size is known */
863 ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL);
864 if (!ndev->prp_pool) {
865 ret = -ENOMEM;
866 printf("Error: %s: Out of memory!\n", udev->name);
867 goto free_nvme;
868 }
869 ndev->prp_entry_num = MAX_PRP_POOL >> 3;
870
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700871 ret = nvme_setup_io_queues(ndev);
Simon Glass7bb3cee2023-07-15 21:38:54 -0600872 if (ret) {
873 log_debug("Unable to setup I/O queues(err=%dE)\n", ret);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700874 goto free_queue;
Simon Glass7bb3cee2023-07-15 21:38:54 -0600875 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700876
877 nvme_get_info_from_identify(ndev);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700878
Bin Menga82f3252021-06-22 21:16:17 +0800879 /* Create a blk device for each namespace */
Bin Meng818db242021-06-22 21:16:18 +0800880
881 id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
882 if (!id) {
883 ret = -ENOMEM;
884 goto free_queue;
885 }
886
Bin Mengabc6d082021-06-22 21:16:19 +0800887 for (int i = 1; i <= ndev->nn; i++) {
Bin Menga82f3252021-06-22 21:16:17 +0800888 struct udevice *ns_udev;
889 char name[20];
890
Bin Meng818db242021-06-22 21:16:18 +0800891 memset(id, 0, sizeof(*id));
Bin Mengabc6d082021-06-22 21:16:19 +0800892 if (nvme_identify(ndev, i, 0, (dma_addr_t)(long)id)) {
Bin Meng818db242021-06-22 21:16:18 +0800893 ret = -EIO;
894 goto free_id;
895 }
896
897 /* skip inactive namespace */
898 if (!id->nsze)
899 continue;
900
Bin Menga82f3252021-06-22 21:16:17 +0800901 /*
902 * Encode the namespace id to the device name so that
903 * we can extract it when doing the probe.
904 */
905 sprintf(name, "blk#%d", i);
906
907 /* The real blksz and size will be set by nvme_blk_probe() */
Simon Glassdbfa32c2022-08-11 19:34:59 -0600908 ret = blk_create_devicef(udev, "nvme-blk", name, UCLASS_NVME,
Bin Meng2294ecb2023-09-26 16:43:31 +0800909 -1, DEFAULT_BLKSZ, 0, &ns_udev);
Bin Menga82f3252021-06-22 21:16:17 +0800910 if (ret)
Bin Meng818db242021-06-22 21:16:18 +0800911 goto free_id;
AKASHI Takahiro9fb7a362022-03-08 20:36:42 +0900912
Simon Glassb1d581d2023-07-30 11:15:14 -0600913 ret = bootdev_setup_for_sibling_blk(ns_udev, "nvme_bootdev");
Simon Glass08382872023-01-17 10:47:47 -0700914 if (ret)
915 return log_msg_ret("bootdev", ret);
916
AKASHI Takahiro9fb7a362022-03-08 20:36:42 +0900917 ret = blk_probe_or_unbind(ns_udev);
918 if (ret)
919 goto free_id;
Bin Menga82f3252021-06-22 21:16:17 +0800920 }
921
Bin Meng818db242021-06-22 21:16:18 +0800922 free(id);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700923 return 0;
924
Bin Meng818db242021-06-22 21:16:18 +0800925free_id:
926 free(id);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700927free_queue:
928 free((void *)ndev->queues);
929free_nvme:
930 return ret;
931}
Mark Kettenisa106a302022-01-22 20:38:16 +0100932
933int nvme_shutdown(struct udevice *udev)
934{
935 struct nvme_dev *ndev = dev_get_priv(udev);
Hector Martin19104f72022-07-31 15:31:31 +0900936 int ret;
937
938 ret = nvme_shutdown_ctrl(ndev);
939 if (ret < 0) {
940 printf("Error: %s: Shutdown timed out!\n", udev->name);
941 return ret;
942 }
Mark Kettenisa106a302022-01-22 20:38:16 +0100943
944 return nvme_disable_ctrl(ndev);
945}