blob: 7c58ceb78f515ccaa56abe9616f09159e422e0d0 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Zhikang Zhang145b88f2017-08-03 02:30:57 -07002/*
3 * Copyright (C) 2017 NXP Semiconductors
4 * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
Zhikang Zhang145b88f2017-08-03 02:30:57 -07005 */
6
Simon Glass655306c2020-05-10 11:39:58 -06007#include <blk.h>
Simon Glass08382872023-01-17 10:47:47 -07008#include <bootdev.h>
Simon Glass63334482019-11-14 12:57:39 -07009#include <cpu_func.h>
Zhikang Zhang145b88f2017-08-03 02:30:57 -070010#include <dm.h>
11#include <errno.h>
Simon Glass0f2af882020-05-10 11:40:05 -060012#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070013#include <malloc.h>
Zhikang Zhang145b88f2017-08-03 02:30:57 -070014#include <memalign.h>
Simon Glass495a5dc2019-11-14 12:57:30 -070015#include <time.h>
Zhikang Zhang145b88f2017-08-03 02:30:57 -070016#include <dm/device-internal.h>
Simon Glass9bc15642020-02-03 07:36:16 -070017#include <linux/compat.h>
Zhikang Zhang145b88f2017-08-03 02:30:57 -070018#include "nvme.h"
19
Zhikang Zhang145b88f2017-08-03 02:30:57 -070020#define NVME_Q_DEPTH 2
21#define NVME_AQ_DEPTH 2
22#define NVME_SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
23#define NVME_CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
Andre Przywara7309c172021-02-08 13:31:54 +000024#define NVME_CQ_ALLOCATION ALIGN(NVME_CQ_SIZE(NVME_Q_DEPTH), \
25 ARCH_DMA_MINALIGN)
Zhikang Zhang145b88f2017-08-03 02:30:57 -070026#define ADMIN_TIMEOUT 60
27#define IO_TIMEOUT 30
28#define MAX_PRP_POOL 512
29
Hector Martin19104f72022-07-31 15:31:31 +090030static int nvme_wait_csts(struct nvme_dev *dev, u32 mask, u32 val)
Zhikang Zhang145b88f2017-08-03 02:30:57 -070031{
Bin Mengdff7df7c2017-08-22 08:15:11 -070032 int timeout;
33 ulong start;
Zhikang Zhang145b88f2017-08-03 02:30:57 -070034
Bin Mengdff7df7c2017-08-22 08:15:11 -070035 /* Timeout field in the CAP register is in 500 millisecond units */
36 timeout = NVME_CAP_TIMEOUT(dev->cap) * 500;
37
38 start = get_timer(0);
39 while (get_timer(start) < timeout) {
Hector Martin19104f72022-07-31 15:31:31 +090040 if ((readl(&dev->bar->csts) & mask) == val)
Bin Mengdff7df7c2017-08-22 08:15:11 -070041 return 0;
42 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -070043
Bin Mengdff7df7c2017-08-22 08:15:11 -070044 return -ETIME;
Zhikang Zhang145b88f2017-08-03 02:30:57 -070045}
46
47static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
48 int total_len, u64 dma_addr)
49{
50 u32 page_size = dev->page_size;
51 int offset = dma_addr & (page_size - 1);
52 u64 *prp_pool;
53 int length = total_len;
54 int i, nprps;
Wesley Sheng85a95612021-06-22 11:34:21 +080055 u32 prps_per_page = page_size >> 3;
Aaron Williams2db51342019-08-22 20:37:26 -070056 u32 num_pages;
57
Zhikang Zhang145b88f2017-08-03 02:30:57 -070058 length -= (page_size - offset);
59
60 if (length <= 0) {
61 *prp2 = 0;
62 return 0;
63 }
64
65 if (length)
66 dma_addr += (page_size - offset);
67
68 if (length <= page_size) {
69 *prp2 = dma_addr;
70 return 0;
71 }
72
73 nprps = DIV_ROUND_UP(length, page_size);
Alexander Sowarkad34dab62022-08-28 21:30:20 +020074 num_pages = DIV_ROUND_UP(nprps - 1, prps_per_page - 1);
Zhikang Zhang145b88f2017-08-03 02:30:57 -070075
76 if (nprps > dev->prp_entry_num) {
77 free(dev->prp_pool);
Aaron Williams2db51342019-08-22 20:37:26 -070078 /*
79 * Always increase in increments of pages. It doesn't waste
80 * much memory and reduces the number of allocations.
81 */
82 dev->prp_pool = memalign(page_size, num_pages * page_size);
Zhikang Zhang145b88f2017-08-03 02:30:57 -070083 if (!dev->prp_pool) {
84 printf("Error: malloc prp_pool fail\n");
85 return -ENOMEM;
86 }
Alexander Sowarkad34dab62022-08-28 21:30:20 +020087 dev->prp_entry_num = num_pages * (prps_per_page - 1) + 1;
Zhikang Zhang145b88f2017-08-03 02:30:57 -070088 }
89
90 prp_pool = dev->prp_pool;
91 i = 0;
92 while (nprps) {
Alexander Sowarkad34dab62022-08-28 21:30:20 +020093 if ((i == (prps_per_page - 1)) && nprps > 1) {
Zhikang Zhang145b88f2017-08-03 02:30:57 -070094 *(prp_pool + i) = cpu_to_le64((ulong)prp_pool +
95 page_size);
96 i = 0;
97 prp_pool += page_size;
98 }
99 *(prp_pool + i++) = cpu_to_le64(dma_addr);
100 dma_addr += page_size;
101 nprps--;
102 }
103 *prp2 = (ulong)dev->prp_pool;
104
Patrick Wildt95f4aba2019-10-16 23:22:50 +0200105 flush_dcache_range((ulong)dev->prp_pool, (ulong)dev->prp_pool +
Alexander Sowarkad34dab62022-08-28 21:30:20 +0200106 num_pages * page_size);
Patrick Wildt95f4aba2019-10-16 23:22:50 +0200107
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700108 return 0;
109}
110
111static __le16 nvme_get_cmd_id(void)
112{
113 static unsigned short cmdid;
114
115 return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0);
116}
117
118static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
119{
Andre Przywara7309c172021-02-08 13:31:54 +0000120 /*
121 * Single CQ entries are always smaller than a cache line, so we
122 * can't invalidate them individually. However CQ entries are
123 * read only by the CPU, so it's safe to always invalidate all of them,
124 * as the cache line should never become dirty.
125 */
126 ulong start = (ulong)&nvmeq->cqes[0];
127 ulong stop = start + NVME_CQ_ALLOCATION;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700128
129 invalidate_dcache_range(start, stop);
130
David Lamparter9f7e9422021-05-06 20:24:30 +0200131 return readw(&(nvmeq->cqes[index].status));
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700132}
133
134/**
135 * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
136 *
137 * @nvmeq: The queue to use
138 * @cmd: The command to send
139 */
140static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
141{
Mark Kettenis7579e902022-01-22 20:38:15 +0100142 struct nvme_ops *ops;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700143 u16 tail = nvmeq->sq_tail;
144
145 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
146 flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
147 (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
148
Mark Kettenis7579e902022-01-22 20:38:15 +0100149 ops = (struct nvme_ops *)nvmeq->dev->udev->driver->ops;
150 if (ops && ops->submit_cmd) {
151 ops->submit_cmd(nvmeq, cmd);
152 return;
153 }
154
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700155 if (++tail == nvmeq->q_depth)
156 tail = 0;
157 writel(tail, nvmeq->q_db);
158 nvmeq->sq_tail = tail;
159}
160
161static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
162 struct nvme_command *cmd,
163 u32 *result, unsigned timeout)
164{
Mark Kettenis7579e902022-01-22 20:38:15 +0100165 struct nvme_ops *ops;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700166 u16 head = nvmeq->cq_head;
167 u16 phase = nvmeq->cq_phase;
168 u16 status;
169 ulong start_time;
170 ulong timeout_us = timeout * 100000;
171
172 cmd->common.command_id = nvme_get_cmd_id();
173 nvme_submit_cmd(nvmeq, cmd);
174
175 start_time = timer_get_us();
176
177 for (;;) {
178 status = nvme_read_completion_status(nvmeq, head);
179 if ((status & 0x01) == phase)
180 break;
181 if (timeout_us > 0 && (timer_get_us() - start_time)
182 >= timeout_us)
183 return -ETIMEDOUT;
184 }
185
Mark Kettenis7579e902022-01-22 20:38:15 +0100186 ops = (struct nvme_ops *)nvmeq->dev->udev->driver->ops;
187 if (ops && ops->complete_cmd)
188 ops->complete_cmd(nvmeq, cmd);
189
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700190 status >>= 1;
191 if (status) {
192 printf("ERROR: status = %x, phase = %d, head = %d\n",
193 status, phase, head);
194 status = 0;
195 if (++head == nvmeq->q_depth) {
196 head = 0;
197 phase = !phase;
198 }
199 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
200 nvmeq->cq_head = head;
201 nvmeq->cq_phase = phase;
202
203 return -EIO;
204 }
205
206 if (result)
David Lamparter9f7e9422021-05-06 20:24:30 +0200207 *result = readl(&(nvmeq->cqes[head].result));
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700208
209 if (++head == nvmeq->q_depth) {
210 head = 0;
211 phase = !phase;
212 }
213 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
214 nvmeq->cq_head = head;
215 nvmeq->cq_phase = phase;
216
217 return status;
218}
219
220static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
221 u32 *result)
222{
Bin Meng1c42a292017-08-22 08:15:12 -0700223 return nvme_submit_sync_cmd(dev->queues[NVME_ADMIN_Q], cmd,
224 result, ADMIN_TIMEOUT);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700225}
226
227static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
228 int qid, int depth)
229{
Mark Kettenis7579e902022-01-22 20:38:15 +0100230 struct nvme_ops *ops;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700231 struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
232 if (!nvmeq)
233 return NULL;
234 memset(nvmeq, 0, sizeof(*nvmeq));
235
Andre Przywara7309c172021-02-08 13:31:54 +0000236 nvmeq->cqes = (void *)memalign(4096, NVME_CQ_ALLOCATION);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700237 if (!nvmeq->cqes)
238 goto free_nvmeq;
239 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth));
240
241 nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth));
242 if (!nvmeq->sq_cmds)
243 goto free_queue;
244 memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth));
245
246 nvmeq->dev = dev;
247
248 nvmeq->cq_head = 0;
249 nvmeq->cq_phase = 1;
250 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
251 nvmeq->q_depth = depth;
252 nvmeq->qid = qid;
253 dev->queue_count++;
254 dev->queues[qid] = nvmeq;
255
Mark Kettenis7579e902022-01-22 20:38:15 +0100256 ops = (struct nvme_ops *)dev->udev->driver->ops;
257 if (ops && ops->setup_queue)
258 ops->setup_queue(nvmeq);
259
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700260 return nvmeq;
261
262 free_queue:
263 free((void *)nvmeq->cqes);
264 free_nvmeq:
265 free(nvmeq);
266
267 return NULL;
268}
269
270static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
271{
272 struct nvme_command c;
273
274 memset(&c, 0, sizeof(c));
275 c.delete_queue.opcode = opcode;
276 c.delete_queue.qid = cpu_to_le16(id);
277
278 return nvme_submit_admin_cmd(dev, &c, NULL);
279}
280
281static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid)
282{
283 return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid);
284}
285
286static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid)
287{
288 return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid);
289}
290
291static int nvme_enable_ctrl(struct nvme_dev *dev)
292{
293 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
294 dev->ctrl_config |= NVME_CC_ENABLE;
David Lamparter9f7e9422021-05-06 20:24:30 +0200295 writel(dev->ctrl_config, &dev->bar->cc);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700296
Hector Martin19104f72022-07-31 15:31:31 +0900297 return nvme_wait_csts(dev, NVME_CSTS_RDY, NVME_CSTS_RDY);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700298}
299
300static int nvme_disable_ctrl(struct nvme_dev *dev)
301{
302 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
303 dev->ctrl_config &= ~NVME_CC_ENABLE;
David Lamparter9f7e9422021-05-06 20:24:30 +0200304 writel(dev->ctrl_config, &dev->bar->cc);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700305
Hector Martin19104f72022-07-31 15:31:31 +0900306 return nvme_wait_csts(dev, NVME_CSTS_RDY, 0);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700307}
308
Hector Martin19104f72022-07-31 15:31:31 +0900309static int nvme_shutdown_ctrl(struct nvme_dev *dev)
310{
311 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
312 dev->ctrl_config |= NVME_CC_SHN_NORMAL;
313 writel(dev->ctrl_config, &dev->bar->cc);
314
315 return nvme_wait_csts(dev, NVME_CSTS_SHST_MASK, NVME_CSTS_SHST_CMPLT);
316}
317
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700318static void nvme_free_queue(struct nvme_queue *nvmeq)
319{
320 free((void *)nvmeq->cqes);
321 free(nvmeq->sq_cmds);
322 free(nvmeq);
323}
324
325static void nvme_free_queues(struct nvme_dev *dev, int lowest)
326{
327 int i;
328
329 for (i = dev->queue_count - 1; i >= lowest; i--) {
330 struct nvme_queue *nvmeq = dev->queues[i];
331 dev->queue_count--;
332 dev->queues[i] = NULL;
333 nvme_free_queue(nvmeq);
334 }
335}
336
337static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
338{
339 struct nvme_dev *dev = nvmeq->dev;
340
341 nvmeq->sq_tail = 0;
342 nvmeq->cq_head = 0;
343 nvmeq->cq_phase = 1;
344 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
345 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
346 flush_dcache_range((ulong)nvmeq->cqes,
Andre Przywara7309c172021-02-08 13:31:54 +0000347 (ulong)nvmeq->cqes + NVME_CQ_ALLOCATION);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700348 dev->online_queues++;
349}
350
351static int nvme_configure_admin_queue(struct nvme_dev *dev)
352{
353 int result;
354 u32 aqa;
Bin Mengf03e5fc2017-08-22 08:15:10 -0700355 u64 cap = dev->cap;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700356 struct nvme_queue *nvmeq;
357 /* most architectures use 4KB as the page size */
358 unsigned page_shift = 12;
359 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
360 unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
361
362 if (page_shift < dev_page_min) {
363 debug("Device minimum page size (%u) too large for host (%u)\n",
364 1 << dev_page_min, 1 << page_shift);
365 return -ENODEV;
366 }
367
368 if (page_shift > dev_page_max) {
369 debug("Device maximum page size (%u) smaller than host (%u)\n",
370 1 << dev_page_max, 1 << page_shift);
371 page_shift = dev_page_max;
372 }
373
374 result = nvme_disable_ctrl(dev);
375 if (result < 0)
376 return result;
377
Bin Meng1c42a292017-08-22 08:15:12 -0700378 nvmeq = dev->queues[NVME_ADMIN_Q];
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700379 if (!nvmeq) {
380 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
381 if (!nvmeq)
382 return -ENOMEM;
383 }
384
385 aqa = nvmeq->q_depth - 1;
386 aqa |= aqa << 16;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700387
388 dev->page_size = 1 << page_shift;
389
390 dev->ctrl_config = NVME_CC_CSS_NVM;
391 dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
392 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
393 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
394
395 writel(aqa, &dev->bar->aqa);
396 nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
397 nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
398
399 result = nvme_enable_ctrl(dev);
400 if (result)
401 goto free_nvmeq;
402
403 nvmeq->cq_vector = 0;
404
Bin Meng1c42a292017-08-22 08:15:12 -0700405 nvme_init_queue(dev->queues[NVME_ADMIN_Q], 0);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700406
407 return result;
408
409 free_nvmeq:
410 nvme_free_queues(dev, 0);
411
412 return result;
413}
414
415static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid,
416 struct nvme_queue *nvmeq)
417{
418 struct nvme_command c;
419 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
420
421 memset(&c, 0, sizeof(c));
422 c.create_cq.opcode = nvme_admin_create_cq;
423 c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
424 c.create_cq.cqid = cpu_to_le16(qid);
425 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
426 c.create_cq.cq_flags = cpu_to_le16(flags);
427 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
428
429 return nvme_submit_admin_cmd(dev, &c, NULL);
430}
431
432static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid,
433 struct nvme_queue *nvmeq)
434{
435 struct nvme_command c;
436 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
437
438 memset(&c, 0, sizeof(c));
439 c.create_sq.opcode = nvme_admin_create_sq;
440 c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
441 c.create_sq.sqid = cpu_to_le16(qid);
442 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
443 c.create_sq.sq_flags = cpu_to_le16(flags);
444 c.create_sq.cqid = cpu_to_le16(qid);
445
446 return nvme_submit_admin_cmd(dev, &c, NULL);
447}
448
449int nvme_identify(struct nvme_dev *dev, unsigned nsid,
450 unsigned cns, dma_addr_t dma_addr)
451{
452 struct nvme_command c;
453 u32 page_size = dev->page_size;
454 int offset = dma_addr & (page_size - 1);
455 int length = sizeof(struct nvme_id_ctrl);
Bin Meng578b1952017-08-22 08:15:14 -0700456 int ret;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700457
458 memset(&c, 0, sizeof(c));
459 c.identify.opcode = nvme_admin_identify;
460 c.identify.nsid = cpu_to_le32(nsid);
461 c.identify.prp1 = cpu_to_le64(dma_addr);
462
463 length -= (page_size - offset);
464 if (length <= 0) {
465 c.identify.prp2 = 0;
466 } else {
467 dma_addr += (page_size - offset);
Bin Mengb3ea27d2017-08-22 08:15:09 -0700468 c.identify.prp2 = cpu_to_le64(dma_addr);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700469 }
470
471 c.identify.cns = cpu_to_le32(cns);
472
Jagan Teki82bc1a62020-06-10 21:18:23 +0200473 invalidate_dcache_range(dma_addr,
474 dma_addr + sizeof(struct nvme_id_ctrl));
475
Bin Meng578b1952017-08-22 08:15:14 -0700476 ret = nvme_submit_admin_cmd(dev, &c, NULL);
477 if (!ret)
478 invalidate_dcache_range(dma_addr,
479 dma_addr + sizeof(struct nvme_id_ctrl));
480
481 return ret;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700482}
483
484int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
485 dma_addr_t dma_addr, u32 *result)
486{
487 struct nvme_command c;
Andre Przywara06083362021-03-02 15:43:43 +0000488 int ret;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700489
490 memset(&c, 0, sizeof(c));
491 c.features.opcode = nvme_admin_get_features;
492 c.features.nsid = cpu_to_le32(nsid);
493 c.features.prp1 = cpu_to_le64(dma_addr);
494 c.features.fid = cpu_to_le32(fid);
495
Andre Przywara06083362021-03-02 15:43:43 +0000496 ret = nvme_submit_admin_cmd(dev, &c, result);
497
Bin Meng578b1952017-08-22 08:15:14 -0700498 /*
Andre Przywara06083362021-03-02 15:43:43 +0000499 * TODO: Add some cache invalidation when a DMA buffer is involved
500 * in the request, here and before the command gets submitted. The
501 * buffer size varies by feature, also some features use a different
502 * field in the command packet to hold the buffer address.
503 * Section 5.21.1 (Set Features command) in the NVMe specification
504 * details the buffer requirements for each feature.
505 *
506 * At the moment there is no user of this function.
Bin Meng578b1952017-08-22 08:15:14 -0700507 */
508
Andre Przywara06083362021-03-02 15:43:43 +0000509 return ret;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700510}
511
512int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
513 dma_addr_t dma_addr, u32 *result)
514{
515 struct nvme_command c;
516
517 memset(&c, 0, sizeof(c));
518 c.features.opcode = nvme_admin_set_features;
519 c.features.prp1 = cpu_to_le64(dma_addr);
520 c.features.fid = cpu_to_le32(fid);
521 c.features.dword11 = cpu_to_le32(dword11);
522
Bin Meng578b1952017-08-22 08:15:14 -0700523 /*
Andre Przywara06083362021-03-02 15:43:43 +0000524 * TODO: Add a cache clean (aka flush) operation when a DMA buffer is
525 * involved in the request. The buffer size varies by feature, also
526 * some features use a different field in the command packet to hold
527 * the buffer address. Section 5.21.1 (Set Features command) in the
528 * NVMe specification details the buffer requirements for each
529 * feature.
530 * At the moment the only user of this function is not using
531 * any DMA buffer at all.
Bin Meng578b1952017-08-22 08:15:14 -0700532 */
533
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700534 return nvme_submit_admin_cmd(dev, &c, result);
535}
536
537static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
538{
539 struct nvme_dev *dev = nvmeq->dev;
540 int result;
541
542 nvmeq->cq_vector = qid - 1;
543 result = nvme_alloc_cq(dev, qid, nvmeq);
544 if (result < 0)
545 goto release_cq;
546
547 result = nvme_alloc_sq(dev, qid, nvmeq);
548 if (result < 0)
549 goto release_sq;
550
551 nvme_init_queue(nvmeq, qid);
552
553 return result;
554
555 release_sq:
556 nvme_delete_sq(dev, qid);
557 release_cq:
558 nvme_delete_cq(dev, qid);
559
560 return result;
561}
562
563static int nvme_set_queue_count(struct nvme_dev *dev, int count)
564{
565 int status;
566 u32 result;
567 u32 q_count = (count - 1) | ((count - 1) << 16);
568
569 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES,
570 q_count, 0, &result);
571
572 if (status < 0)
573 return status;
574 if (status > 1)
575 return 0;
576
577 return min(result & 0xffff, result >> 16) + 1;
578}
579
Simon Glass7bb3cee2023-07-15 21:38:54 -0600580static int nvme_create_io_queues(struct nvme_dev *dev)
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700581{
582 unsigned int i;
Simon Glass7bb3cee2023-07-15 21:38:54 -0600583 int ret;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700584
585 for (i = dev->queue_count; i <= dev->max_qid; i++)
586 if (!nvme_alloc_queue(dev, i, dev->q_depth))
Simon Glass7bb3cee2023-07-15 21:38:54 -0600587 return log_msg_ret("all", -ENOMEM);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700588
Simon Glass7bb3cee2023-07-15 21:38:54 -0600589 for (i = dev->online_queues; i <= dev->queue_count - 1; i++) {
590 ret = nvme_create_queue(dev->queues[i], i);
591 if (ret)
592 return log_msg_ret("cre", ret);
593 }
594
595 return 0;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700596}
597
598static int nvme_setup_io_queues(struct nvme_dev *dev)
599{
600 int nr_io_queues;
601 int result;
602
603 nr_io_queues = 1;
604 result = nvme_set_queue_count(dev, nr_io_queues);
Simon Glass7bb3cee2023-07-15 21:38:54 -0600605 if (result <= 0) {
606 log_debug("Cannot set queue count (err=%dE)\n", result);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700607 return result;
Simon Glass7bb3cee2023-07-15 21:38:54 -0600608 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700609
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700610 dev->max_qid = nr_io_queues;
611
612 /* Free previously allocated queues */
613 nvme_free_queues(dev, nr_io_queues + 1);
Simon Glass7bb3cee2023-07-15 21:38:54 -0600614 result = nvme_create_io_queues(dev);
615 if (result)
616 return result;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700617
618 return 0;
619}
620
621static int nvme_get_info_from_identify(struct nvme_dev *dev)
622{
Patrick Wildt968854f2019-10-16 08:42:04 +0200623 struct nvme_id_ctrl *ctrl;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700624 int ret;
Bin Mengf03e5fc2017-08-22 08:15:10 -0700625 int shift = NVME_CAP_MPSMIN(dev->cap) + 12;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700626
Patrick Wildt968854f2019-10-16 08:42:04 +0200627 ctrl = memalign(dev->page_size, sizeof(struct nvme_id_ctrl));
628 if (!ctrl)
629 return -ENOMEM;
630
Bin Meng225589d2019-05-15 08:37:56 -0700631 ret = nvme_identify(dev, 0, 1, (dma_addr_t)(long)ctrl);
Patrick Wildt968854f2019-10-16 08:42:04 +0200632 if (ret) {
633 free(ctrl);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700634 return -EIO;
Patrick Wildt968854f2019-10-16 08:42:04 +0200635 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700636
637 dev->nn = le32_to_cpu(ctrl->nn);
638 dev->vwc = ctrl->vwc;
639 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
640 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
641 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
642 if (ctrl->mdts)
643 dev->max_transfer_shift = (ctrl->mdts + shift);
Bin Mengab1c1602017-08-03 02:31:02 -0700644 else {
645 /*
646 * Maximum Data Transfer Size (MDTS) field indicates the maximum
647 * data transfer size between the host and the controller. The
648 * host should not submit a command that exceeds this transfer
649 * size. The value is in units of the minimum memory page size
650 * and is reported as a power of two (2^n).
651 *
652 * The spec also says: a value of 0h indicates no restrictions
653 * on transfer size. But in nvme_blk_read/write() below we have
654 * the following algorithm for maximum number of logic blocks
655 * per transfer:
656 *
657 * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
658 *
659 * In order for lbas not to overflow, the maximum number is 15
660 * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift).
661 * Let's use 20 which provides 1MB size.
662 */
663 dev->max_transfer_shift = 20;
664 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700665
Patrick Wildt968854f2019-10-16 08:42:04 +0200666 free(ctrl);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700667 return 0;
668}
669
Patrick Wildtccdff862019-10-03 13:48:47 +0200670int nvme_get_namespace_id(struct udevice *udev, u32 *ns_id, u8 *eui64)
671{
672 struct nvme_ns *ns = dev_get_priv(udev);
673
674 if (ns_id)
675 *ns_id = ns->ns_id;
676 if (eui64)
677 memcpy(eui64, ns->eui64, sizeof(ns->eui64));
678
679 return 0;
680}
681
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700682int nvme_scan_namespace(void)
683{
684 struct uclass *uc;
685 struct udevice *dev;
686 int ret;
687
688 ret = uclass_get(UCLASS_NVME, &uc);
689 if (ret)
690 return ret;
691
692 uclass_foreach_dev(dev, uc) {
693 ret = device_probe(dev);
Simon Glass7bb3cee2023-07-15 21:38:54 -0600694 if (ret) {
695 log_err("Failed to probe '%s': err=%dE\n", dev->name,
696 ret);
Moritz Fischer693d5592024-01-10 05:04:48 +0000697 /* Bail if we ran out of memory, else keep trying */
698 if (ret != -EBUSY)
699 return ret;
Simon Glass7bb3cee2023-07-15 21:38:54 -0600700 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700701 }
702
703 return 0;
704}
705
706static int nvme_blk_probe(struct udevice *udev)
707{
708 struct nvme_dev *ndev = dev_get_priv(udev->parent);
Simon Glass71fa5b42020-12-03 16:55:18 -0700709 struct blk_desc *desc = dev_get_uclass_plat(udev);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700710 struct nvme_ns *ns = dev_get_priv(udev);
711 u8 flbas;
Patrick Wildt968854f2019-10-16 08:42:04 +0200712 struct nvme_id_ns *id;
713
714 id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
715 if (!id)
716 return -ENOMEM;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700717
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700718 ns->dev = ndev;
Bin Meng348187382017-08-22 08:15:16 -0700719 /* extract the namespace id from the block device name */
Bin Mengabc6d082021-06-22 21:16:19 +0800720 ns->ns_id = trailing_strtol(udev->name);
Patrick Wildt968854f2019-10-16 08:42:04 +0200721 if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)(long)id)) {
722 free(id);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700723 return -EIO;
Patrick Wildt968854f2019-10-16 08:42:04 +0200724 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700725
Patrick Wildtccdff862019-10-03 13:48:47 +0200726 memcpy(&ns->eui64, &id->eui64, sizeof(id->eui64));
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700727 flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK;
728 ns->flbas = flbas;
729 ns->lba_shift = id->lbaf[flbas].ds;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700730 list_add(&ns->list, &ndev->namespaces);
731
Bin Meng36144bb2021-06-22 21:16:20 +0800732 desc->lba = le64_to_cpu(id->nsze);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700733 desc->log2blksz = ns->lba_shift;
734 desc->blksz = 1 << ns->lba_shift;
735 desc->bdev = udev;
Mark Kettenisf8463d62022-01-22 20:38:11 +0100736 memcpy(desc->vendor, ndev->vendor, sizeof(ndev->vendor));
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700737 memcpy(desc->product, ndev->serial, sizeof(ndev->serial));
738 memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev));
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700739
Patrick Wildt968854f2019-10-16 08:42:04 +0200740 free(id);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700741 return 0;
742}
743
Bin Meng29e558d2017-08-22 08:15:13 -0700744static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr,
745 lbaint_t blkcnt, void *buffer, bool read)
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700746{
747 struct nvme_ns *ns = dev_get_priv(udev);
748 struct nvme_dev *dev = ns->dev;
749 struct nvme_command c;
Simon Glass71fa5b42020-12-03 16:55:18 -0700750 struct blk_desc *desc = dev_get_uclass_plat(udev);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700751 int status;
752 u64 prp2;
753 u64 total_len = blkcnt << desc->log2blksz;
754 u64 temp_len = total_len;
Stefan Agner2c852302021-10-04 11:24:51 +0200755 uintptr_t temp_buffer = (uintptr_t)buffer;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700756
757 u64 slba = blknr;
758 u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
759 u64 total_lbas = blkcnt;
760
Patrick Wildt95f4aba2019-10-16 23:22:50 +0200761 flush_dcache_range((unsigned long)buffer,
762 (unsigned long)buffer + total_len);
Bin Meng578b1952017-08-22 08:15:14 -0700763
Bin Meng29e558d2017-08-22 08:15:13 -0700764 c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700765 c.rw.flags = 0;
766 c.rw.nsid = cpu_to_le32(ns->ns_id);
767 c.rw.control = 0;
768 c.rw.dsmgmt = 0;
769 c.rw.reftag = 0;
770 c.rw.apptag = 0;
771 c.rw.appmask = 0;
772 c.rw.metadata = 0;
773
774 while (total_lbas) {
775 if (total_lbas < lbas) {
776 lbas = (u16)total_lbas;
777 total_lbas = 0;
778 } else {
779 total_lbas -= lbas;
780 }
781
Bin Meng29e558d2017-08-22 08:15:13 -0700782 if (nvme_setup_prps(dev, &prp2,
Stefan Agner2c852302021-10-04 11:24:51 +0200783 lbas << ns->lba_shift, temp_buffer))
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700784 return -EIO;
785 c.rw.slba = cpu_to_le64(slba);
786 slba += lbas;
787 c.rw.length = cpu_to_le16(lbas - 1);
Stefan Agner2c852302021-10-04 11:24:51 +0200788 c.rw.prp1 = cpu_to_le64(temp_buffer);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700789 c.rw.prp2 = cpu_to_le64(prp2);
Bin Meng1c42a292017-08-22 08:15:12 -0700790 status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q],
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700791 &c, NULL, IO_TIMEOUT);
792 if (status)
793 break;
Bin Meng8ac9f6c2017-09-02 08:15:36 -0700794 temp_len -= (u32)lbas << ns->lba_shift;
Stefan Agner2c852302021-10-04 11:24:51 +0200795 temp_buffer += lbas << ns->lba_shift;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700796 }
797
Bin Meng578b1952017-08-22 08:15:14 -0700798 if (read)
799 invalidate_dcache_range((unsigned long)buffer,
800 (unsigned long)buffer + total_len);
801
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700802 return (total_len - temp_len) >> desc->log2blksz;
803}
804
Bin Meng29e558d2017-08-22 08:15:13 -0700805static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr,
806 lbaint_t blkcnt, void *buffer)
807{
808 return nvme_blk_rw(udev, blknr, blkcnt, buffer, true);
809}
810
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700811static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr,
812 lbaint_t blkcnt, const void *buffer)
813{
Bin Meng29e558d2017-08-22 08:15:13 -0700814 return nvme_blk_rw(udev, blknr, blkcnt, (void *)buffer, false);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700815}
816
817static const struct blk_ops nvme_blk_ops = {
818 .read = nvme_blk_read,
819 .write = nvme_blk_write,
820};
821
822U_BOOT_DRIVER(nvme_blk) = {
823 .name = "nvme-blk",
824 .id = UCLASS_BLK,
825 .probe = nvme_blk_probe,
826 .ops = &nvme_blk_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700827 .priv_auto = sizeof(struct nvme_ns),
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700828};
829
Mark Kettenisf8463d62022-01-22 20:38:11 +0100830int nvme_init(struct udevice *udev)
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700831{
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700832 struct nvme_dev *ndev = dev_get_priv(udev);
Bin Meng818db242021-06-22 21:16:18 +0800833 struct nvme_id_ns *id;
Mark Kettenisf8463d62022-01-22 20:38:11 +0100834 int ret;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700835
Mark Kettenis7579e902022-01-22 20:38:15 +0100836 ndev->udev = udev;
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700837 INIT_LIST_HEAD(&ndev->namespaces);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700838 if (readl(&ndev->bar->csts) == -1) {
Moritz Fischerd04fd992024-01-10 05:04:47 +0000839 ret = -EBUSY;
840 printf("Error: %s: Controller not ready!\n", udev->name);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700841 goto free_nvme;
842 }
843
Bin Meng1c42a292017-08-22 08:15:12 -0700844 ndev->queues = malloc(NVME_Q_NUM * sizeof(struct nvme_queue *));
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700845 if (!ndev->queues) {
846 ret = -ENOMEM;
847 printf("Error: %s: Out of memory!\n", udev->name);
848 goto free_nvme;
849 }
Bin Meng318dda22017-09-02 08:15:35 -0700850 memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *));
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700851
Bin Mengf03e5fc2017-08-22 08:15:10 -0700852 ndev->cap = nvme_readq(&ndev->bar->cap);
853 ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH);
854 ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700855 ndev->dbs = ((void __iomem *)ndev->bar) + 4096;
856
857 ret = nvme_configure_admin_queue(ndev);
Simon Glass7bb3cee2023-07-15 21:38:54 -0600858 if (ret) {
859 log_debug("Unable to configure admin queue (err=%dE)\n", ret);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700860 goto free_queue;
Simon Glass7bb3cee2023-07-15 21:38:54 -0600861 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700862
Aaron Williams2db51342019-08-22 20:37:26 -0700863 /* Allocate after the page size is known */
864 ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL);
865 if (!ndev->prp_pool) {
866 ret = -ENOMEM;
867 printf("Error: %s: Out of memory!\n", udev->name);
868 goto free_nvme;
869 }
870 ndev->prp_entry_num = MAX_PRP_POOL >> 3;
871
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700872 ret = nvme_setup_io_queues(ndev);
Simon Glass7bb3cee2023-07-15 21:38:54 -0600873 if (ret) {
874 log_debug("Unable to setup I/O queues(err=%dE)\n", ret);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700875 goto free_queue;
Simon Glass7bb3cee2023-07-15 21:38:54 -0600876 }
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700877
878 nvme_get_info_from_identify(ndev);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700879
Bin Menga82f3252021-06-22 21:16:17 +0800880 /* Create a blk device for each namespace */
Bin Meng818db242021-06-22 21:16:18 +0800881
882 id = memalign(ndev->page_size, sizeof(struct nvme_id_ns));
883 if (!id) {
884 ret = -ENOMEM;
885 goto free_queue;
886 }
887
Bin Mengabc6d082021-06-22 21:16:19 +0800888 for (int i = 1; i <= ndev->nn; i++) {
Bin Menga82f3252021-06-22 21:16:17 +0800889 struct udevice *ns_udev;
890 char name[20];
891
Bin Meng818db242021-06-22 21:16:18 +0800892 memset(id, 0, sizeof(*id));
Bin Mengabc6d082021-06-22 21:16:19 +0800893 if (nvme_identify(ndev, i, 0, (dma_addr_t)(long)id)) {
Bin Meng818db242021-06-22 21:16:18 +0800894 ret = -EIO;
895 goto free_id;
896 }
897
898 /* skip inactive namespace */
899 if (!id->nsze)
900 continue;
901
Bin Menga82f3252021-06-22 21:16:17 +0800902 /*
903 * Encode the namespace id to the device name so that
904 * we can extract it when doing the probe.
905 */
906 sprintf(name, "blk#%d", i);
907
908 /* The real blksz and size will be set by nvme_blk_probe() */
Simon Glassdbfa32c2022-08-11 19:34:59 -0600909 ret = blk_create_devicef(udev, "nvme-blk", name, UCLASS_NVME,
Bin Meng2294ecb2023-09-26 16:43:31 +0800910 -1, DEFAULT_BLKSZ, 0, &ns_udev);
Bin Menga82f3252021-06-22 21:16:17 +0800911 if (ret)
Bin Meng818db242021-06-22 21:16:18 +0800912 goto free_id;
AKASHI Takahiro9fb7a362022-03-08 20:36:42 +0900913
Simon Glassb1d581d2023-07-30 11:15:14 -0600914 ret = bootdev_setup_for_sibling_blk(ns_udev, "nvme_bootdev");
Simon Glass08382872023-01-17 10:47:47 -0700915 if (ret)
916 return log_msg_ret("bootdev", ret);
917
AKASHI Takahiro9fb7a362022-03-08 20:36:42 +0900918 ret = blk_probe_or_unbind(ns_udev);
919 if (ret)
920 goto free_id;
Bin Menga82f3252021-06-22 21:16:17 +0800921 }
922
Bin Meng818db242021-06-22 21:16:18 +0800923 free(id);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700924 return 0;
925
Bin Meng818db242021-06-22 21:16:18 +0800926free_id:
927 free(id);
Zhikang Zhang145b88f2017-08-03 02:30:57 -0700928free_queue:
929 free((void *)ndev->queues);
930free_nvme:
931 return ret;
932}
Mark Kettenisa106a302022-01-22 20:38:16 +0100933
934int nvme_shutdown(struct udevice *udev)
935{
936 struct nvme_dev *ndev = dev_get_priv(udev);
Hector Martin19104f72022-07-31 15:31:31 +0900937 int ret;
938
939 ret = nvme_shutdown_ctrl(ndev);
940 if (ret < 0) {
941 printf("Error: %s: Shutdown timed out!\n", udev->name);
942 return ret;
943 }
Mark Kettenisa106a302022-01-22 20:38:16 +0100944
945 return nvme_disable_ctrl(ndev);
946}