Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2017 NXP Semiconductors |
| 4 | * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com> |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <common.h> |
Simon Glass | 6333448 | 2019-11-14 12:57:39 -0700 | [diff] [blame] | 8 | #include <cpu_func.h> |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 9 | #include <dm.h> |
| 10 | #include <errno.h> |
Simon Glass | 9bc1564 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 11 | #include <malloc.h> |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 12 | #include <memalign.h> |
| 13 | #include <pci.h> |
Simon Glass | 495a5dc | 2019-11-14 12:57:30 -0700 | [diff] [blame] | 14 | #include <time.h> |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 15 | #include <dm/device-internal.h> |
Simon Glass | 9bc1564 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 16 | #include <linux/compat.h> |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 17 | #include "nvme.h" |
| 18 | |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 19 | #define NVME_Q_DEPTH 2 |
| 20 | #define NVME_AQ_DEPTH 2 |
| 21 | #define NVME_SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) |
| 22 | #define NVME_CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) |
| 23 | #define ADMIN_TIMEOUT 60 |
| 24 | #define IO_TIMEOUT 30 |
| 25 | #define MAX_PRP_POOL 512 |
| 26 | |
Bin Meng | 1c42a29 | 2017-08-22 08:15:12 -0700 | [diff] [blame] | 27 | enum nvme_queue_id { |
| 28 | NVME_ADMIN_Q, |
| 29 | NVME_IO_Q, |
| 30 | NVME_Q_NUM, |
| 31 | }; |
| 32 | |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 33 | /* |
| 34 | * An NVM Express queue. Each device has at least two (one for admin |
| 35 | * commands and one for I/O commands). |
| 36 | */ |
| 37 | struct nvme_queue { |
| 38 | struct nvme_dev *dev; |
| 39 | struct nvme_command *sq_cmds; |
| 40 | struct nvme_completion *cqes; |
| 41 | wait_queue_head_t sq_full; |
| 42 | u32 __iomem *q_db; |
| 43 | u16 q_depth; |
| 44 | s16 cq_vector; |
| 45 | u16 sq_head; |
| 46 | u16 sq_tail; |
| 47 | u16 cq_head; |
| 48 | u16 qid; |
| 49 | u8 cq_phase; |
| 50 | u8 cqe_seen; |
| 51 | unsigned long cmdid_data[]; |
| 52 | }; |
| 53 | |
| 54 | static int nvme_wait_ready(struct nvme_dev *dev, bool enabled) |
| 55 | { |
| 56 | u32 bit = enabled ? NVME_CSTS_RDY : 0; |
Bin Meng | dff7df7c | 2017-08-22 08:15:11 -0700 | [diff] [blame] | 57 | int timeout; |
| 58 | ulong start; |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 59 | |
Bin Meng | dff7df7c | 2017-08-22 08:15:11 -0700 | [diff] [blame] | 60 | /* Timeout field in the CAP register is in 500 millisecond units */ |
| 61 | timeout = NVME_CAP_TIMEOUT(dev->cap) * 500; |
| 62 | |
| 63 | start = get_timer(0); |
| 64 | while (get_timer(start) < timeout) { |
| 65 | if ((readl(&dev->bar->csts) & NVME_CSTS_RDY) == bit) |
| 66 | return 0; |
| 67 | } |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 68 | |
Bin Meng | dff7df7c | 2017-08-22 08:15:11 -0700 | [diff] [blame] | 69 | return -ETIME; |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 70 | } |
| 71 | |
| 72 | static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2, |
| 73 | int total_len, u64 dma_addr) |
| 74 | { |
| 75 | u32 page_size = dev->page_size; |
| 76 | int offset = dma_addr & (page_size - 1); |
| 77 | u64 *prp_pool; |
| 78 | int length = total_len; |
| 79 | int i, nprps; |
Aaron Williams | 2db5134 | 2019-08-22 20:37:26 -0700 | [diff] [blame] | 80 | u32 prps_per_page = (page_size >> 3) - 1; |
| 81 | u32 num_pages; |
| 82 | |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 83 | length -= (page_size - offset); |
| 84 | |
| 85 | if (length <= 0) { |
| 86 | *prp2 = 0; |
| 87 | return 0; |
| 88 | } |
| 89 | |
| 90 | if (length) |
| 91 | dma_addr += (page_size - offset); |
| 92 | |
| 93 | if (length <= page_size) { |
| 94 | *prp2 = dma_addr; |
| 95 | return 0; |
| 96 | } |
| 97 | |
| 98 | nprps = DIV_ROUND_UP(length, page_size); |
Aaron Williams | 2db5134 | 2019-08-22 20:37:26 -0700 | [diff] [blame] | 99 | num_pages = DIV_ROUND_UP(nprps, prps_per_page); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 100 | |
| 101 | if (nprps > dev->prp_entry_num) { |
| 102 | free(dev->prp_pool); |
Aaron Williams | 2db5134 | 2019-08-22 20:37:26 -0700 | [diff] [blame] | 103 | /* |
| 104 | * Always increase in increments of pages. It doesn't waste |
| 105 | * much memory and reduces the number of allocations. |
| 106 | */ |
| 107 | dev->prp_pool = memalign(page_size, num_pages * page_size); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 108 | if (!dev->prp_pool) { |
| 109 | printf("Error: malloc prp_pool fail\n"); |
| 110 | return -ENOMEM; |
| 111 | } |
Aaron Williams | 2db5134 | 2019-08-22 20:37:26 -0700 | [diff] [blame] | 112 | dev->prp_entry_num = prps_per_page * num_pages; |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 113 | } |
| 114 | |
| 115 | prp_pool = dev->prp_pool; |
| 116 | i = 0; |
| 117 | while (nprps) { |
| 118 | if (i == ((page_size >> 3) - 1)) { |
| 119 | *(prp_pool + i) = cpu_to_le64((ulong)prp_pool + |
| 120 | page_size); |
| 121 | i = 0; |
| 122 | prp_pool += page_size; |
| 123 | } |
| 124 | *(prp_pool + i++) = cpu_to_le64(dma_addr); |
| 125 | dma_addr += page_size; |
| 126 | nprps--; |
| 127 | } |
| 128 | *prp2 = (ulong)dev->prp_pool; |
| 129 | |
Patrick Wildt | 95f4aba | 2019-10-16 23:22:50 +0200 | [diff] [blame] | 130 | flush_dcache_range((ulong)dev->prp_pool, (ulong)dev->prp_pool + |
| 131 | dev->prp_entry_num * sizeof(u64)); |
| 132 | |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 133 | return 0; |
| 134 | } |
| 135 | |
| 136 | static __le16 nvme_get_cmd_id(void) |
| 137 | { |
| 138 | static unsigned short cmdid; |
| 139 | |
| 140 | return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0); |
| 141 | } |
| 142 | |
| 143 | static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index) |
| 144 | { |
| 145 | u64 start = (ulong)&nvmeq->cqes[index]; |
| 146 | u64 stop = start + sizeof(struct nvme_completion); |
| 147 | |
| 148 | invalidate_dcache_range(start, stop); |
| 149 | |
| 150 | return le16_to_cpu(readw(&(nvmeq->cqes[index].status))); |
| 151 | } |
| 152 | |
| 153 | /** |
| 154 | * nvme_submit_cmd() - copy a command into a queue and ring the doorbell |
| 155 | * |
| 156 | * @nvmeq: The queue to use |
| 157 | * @cmd: The command to send |
| 158 | */ |
| 159 | static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) |
| 160 | { |
| 161 | u16 tail = nvmeq->sq_tail; |
| 162 | |
| 163 | memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); |
| 164 | flush_dcache_range((ulong)&nvmeq->sq_cmds[tail], |
| 165 | (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd)); |
| 166 | |
| 167 | if (++tail == nvmeq->q_depth) |
| 168 | tail = 0; |
| 169 | writel(tail, nvmeq->q_db); |
| 170 | nvmeq->sq_tail = tail; |
| 171 | } |
| 172 | |
| 173 | static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, |
| 174 | struct nvme_command *cmd, |
| 175 | u32 *result, unsigned timeout) |
| 176 | { |
| 177 | u16 head = nvmeq->cq_head; |
| 178 | u16 phase = nvmeq->cq_phase; |
| 179 | u16 status; |
| 180 | ulong start_time; |
| 181 | ulong timeout_us = timeout * 100000; |
| 182 | |
| 183 | cmd->common.command_id = nvme_get_cmd_id(); |
| 184 | nvme_submit_cmd(nvmeq, cmd); |
| 185 | |
| 186 | start_time = timer_get_us(); |
| 187 | |
| 188 | for (;;) { |
| 189 | status = nvme_read_completion_status(nvmeq, head); |
| 190 | if ((status & 0x01) == phase) |
| 191 | break; |
| 192 | if (timeout_us > 0 && (timer_get_us() - start_time) |
| 193 | >= timeout_us) |
| 194 | return -ETIMEDOUT; |
| 195 | } |
| 196 | |
| 197 | status >>= 1; |
| 198 | if (status) { |
| 199 | printf("ERROR: status = %x, phase = %d, head = %d\n", |
| 200 | status, phase, head); |
| 201 | status = 0; |
| 202 | if (++head == nvmeq->q_depth) { |
| 203 | head = 0; |
| 204 | phase = !phase; |
| 205 | } |
| 206 | writel(head, nvmeq->q_db + nvmeq->dev->db_stride); |
| 207 | nvmeq->cq_head = head; |
| 208 | nvmeq->cq_phase = phase; |
| 209 | |
| 210 | return -EIO; |
| 211 | } |
| 212 | |
| 213 | if (result) |
| 214 | *result = le32_to_cpu(readl(&(nvmeq->cqes[head].result))); |
| 215 | |
| 216 | if (++head == nvmeq->q_depth) { |
| 217 | head = 0; |
| 218 | phase = !phase; |
| 219 | } |
| 220 | writel(head, nvmeq->q_db + nvmeq->dev->db_stride); |
| 221 | nvmeq->cq_head = head; |
| 222 | nvmeq->cq_phase = phase; |
| 223 | |
| 224 | return status; |
| 225 | } |
| 226 | |
| 227 | static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, |
| 228 | u32 *result) |
| 229 | { |
Bin Meng | 1c42a29 | 2017-08-22 08:15:12 -0700 | [diff] [blame] | 230 | return nvme_submit_sync_cmd(dev->queues[NVME_ADMIN_Q], cmd, |
| 231 | result, ADMIN_TIMEOUT); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 232 | } |
| 233 | |
| 234 | static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, |
| 235 | int qid, int depth) |
| 236 | { |
| 237 | struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq)); |
| 238 | if (!nvmeq) |
| 239 | return NULL; |
| 240 | memset(nvmeq, 0, sizeof(*nvmeq)); |
| 241 | |
| 242 | nvmeq->cqes = (void *)memalign(4096, NVME_CQ_SIZE(depth)); |
| 243 | if (!nvmeq->cqes) |
| 244 | goto free_nvmeq; |
| 245 | memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth)); |
| 246 | |
| 247 | nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth)); |
| 248 | if (!nvmeq->sq_cmds) |
| 249 | goto free_queue; |
| 250 | memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth)); |
| 251 | |
| 252 | nvmeq->dev = dev; |
| 253 | |
| 254 | nvmeq->cq_head = 0; |
| 255 | nvmeq->cq_phase = 1; |
| 256 | nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; |
| 257 | nvmeq->q_depth = depth; |
| 258 | nvmeq->qid = qid; |
| 259 | dev->queue_count++; |
| 260 | dev->queues[qid] = nvmeq; |
| 261 | |
| 262 | return nvmeq; |
| 263 | |
| 264 | free_queue: |
| 265 | free((void *)nvmeq->cqes); |
| 266 | free_nvmeq: |
| 267 | free(nvmeq); |
| 268 | |
| 269 | return NULL; |
| 270 | } |
| 271 | |
| 272 | static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) |
| 273 | { |
| 274 | struct nvme_command c; |
| 275 | |
| 276 | memset(&c, 0, sizeof(c)); |
| 277 | c.delete_queue.opcode = opcode; |
| 278 | c.delete_queue.qid = cpu_to_le16(id); |
| 279 | |
| 280 | return nvme_submit_admin_cmd(dev, &c, NULL); |
| 281 | } |
| 282 | |
| 283 | static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid) |
| 284 | { |
| 285 | return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid); |
| 286 | } |
| 287 | |
| 288 | static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid) |
| 289 | { |
| 290 | return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid); |
| 291 | } |
| 292 | |
| 293 | static int nvme_enable_ctrl(struct nvme_dev *dev) |
| 294 | { |
| 295 | dev->ctrl_config &= ~NVME_CC_SHN_MASK; |
| 296 | dev->ctrl_config |= NVME_CC_ENABLE; |
| 297 | writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc); |
| 298 | |
| 299 | return nvme_wait_ready(dev, true); |
| 300 | } |
| 301 | |
| 302 | static int nvme_disable_ctrl(struct nvme_dev *dev) |
| 303 | { |
| 304 | dev->ctrl_config &= ~NVME_CC_SHN_MASK; |
| 305 | dev->ctrl_config &= ~NVME_CC_ENABLE; |
| 306 | writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc); |
| 307 | |
| 308 | return nvme_wait_ready(dev, false); |
| 309 | } |
| 310 | |
| 311 | static void nvme_free_queue(struct nvme_queue *nvmeq) |
| 312 | { |
| 313 | free((void *)nvmeq->cqes); |
| 314 | free(nvmeq->sq_cmds); |
| 315 | free(nvmeq); |
| 316 | } |
| 317 | |
| 318 | static void nvme_free_queues(struct nvme_dev *dev, int lowest) |
| 319 | { |
| 320 | int i; |
| 321 | |
| 322 | for (i = dev->queue_count - 1; i >= lowest; i--) { |
| 323 | struct nvme_queue *nvmeq = dev->queues[i]; |
| 324 | dev->queue_count--; |
| 325 | dev->queues[i] = NULL; |
| 326 | nvme_free_queue(nvmeq); |
| 327 | } |
| 328 | } |
| 329 | |
| 330 | static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) |
| 331 | { |
| 332 | struct nvme_dev *dev = nvmeq->dev; |
| 333 | |
| 334 | nvmeq->sq_tail = 0; |
| 335 | nvmeq->cq_head = 0; |
| 336 | nvmeq->cq_phase = 1; |
| 337 | nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; |
| 338 | memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth)); |
| 339 | flush_dcache_range((ulong)nvmeq->cqes, |
| 340 | (ulong)nvmeq->cqes + NVME_CQ_SIZE(nvmeq->q_depth)); |
| 341 | dev->online_queues++; |
| 342 | } |
| 343 | |
| 344 | static int nvme_configure_admin_queue(struct nvme_dev *dev) |
| 345 | { |
| 346 | int result; |
| 347 | u32 aqa; |
Bin Meng | f03e5fc | 2017-08-22 08:15:10 -0700 | [diff] [blame] | 348 | u64 cap = dev->cap; |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 349 | struct nvme_queue *nvmeq; |
| 350 | /* most architectures use 4KB as the page size */ |
| 351 | unsigned page_shift = 12; |
| 352 | unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; |
| 353 | unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12; |
| 354 | |
| 355 | if (page_shift < dev_page_min) { |
| 356 | debug("Device minimum page size (%u) too large for host (%u)\n", |
| 357 | 1 << dev_page_min, 1 << page_shift); |
| 358 | return -ENODEV; |
| 359 | } |
| 360 | |
| 361 | if (page_shift > dev_page_max) { |
| 362 | debug("Device maximum page size (%u) smaller than host (%u)\n", |
| 363 | 1 << dev_page_max, 1 << page_shift); |
| 364 | page_shift = dev_page_max; |
| 365 | } |
| 366 | |
| 367 | result = nvme_disable_ctrl(dev); |
| 368 | if (result < 0) |
| 369 | return result; |
| 370 | |
Bin Meng | 1c42a29 | 2017-08-22 08:15:12 -0700 | [diff] [blame] | 371 | nvmeq = dev->queues[NVME_ADMIN_Q]; |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 372 | if (!nvmeq) { |
| 373 | nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); |
| 374 | if (!nvmeq) |
| 375 | return -ENOMEM; |
| 376 | } |
| 377 | |
| 378 | aqa = nvmeq->q_depth - 1; |
| 379 | aqa |= aqa << 16; |
| 380 | aqa |= aqa << 16; |
| 381 | |
| 382 | dev->page_size = 1 << page_shift; |
| 383 | |
| 384 | dev->ctrl_config = NVME_CC_CSS_NVM; |
| 385 | dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; |
| 386 | dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; |
| 387 | dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; |
| 388 | |
| 389 | writel(aqa, &dev->bar->aqa); |
| 390 | nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq); |
| 391 | nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq); |
| 392 | |
| 393 | result = nvme_enable_ctrl(dev); |
| 394 | if (result) |
| 395 | goto free_nvmeq; |
| 396 | |
| 397 | nvmeq->cq_vector = 0; |
| 398 | |
Bin Meng | 1c42a29 | 2017-08-22 08:15:12 -0700 | [diff] [blame] | 399 | nvme_init_queue(dev->queues[NVME_ADMIN_Q], 0); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 400 | |
| 401 | return result; |
| 402 | |
| 403 | free_nvmeq: |
| 404 | nvme_free_queues(dev, 0); |
| 405 | |
| 406 | return result; |
| 407 | } |
| 408 | |
| 409 | static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid, |
| 410 | struct nvme_queue *nvmeq) |
| 411 | { |
| 412 | struct nvme_command c; |
| 413 | int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; |
| 414 | |
| 415 | memset(&c, 0, sizeof(c)); |
| 416 | c.create_cq.opcode = nvme_admin_create_cq; |
| 417 | c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes); |
| 418 | c.create_cq.cqid = cpu_to_le16(qid); |
| 419 | c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); |
| 420 | c.create_cq.cq_flags = cpu_to_le16(flags); |
| 421 | c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); |
| 422 | |
| 423 | return nvme_submit_admin_cmd(dev, &c, NULL); |
| 424 | } |
| 425 | |
| 426 | static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid, |
| 427 | struct nvme_queue *nvmeq) |
| 428 | { |
| 429 | struct nvme_command c; |
| 430 | int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; |
| 431 | |
| 432 | memset(&c, 0, sizeof(c)); |
| 433 | c.create_sq.opcode = nvme_admin_create_sq; |
| 434 | c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds); |
| 435 | c.create_sq.sqid = cpu_to_le16(qid); |
| 436 | c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); |
| 437 | c.create_sq.sq_flags = cpu_to_le16(flags); |
| 438 | c.create_sq.cqid = cpu_to_le16(qid); |
| 439 | |
| 440 | return nvme_submit_admin_cmd(dev, &c, NULL); |
| 441 | } |
| 442 | |
| 443 | int nvme_identify(struct nvme_dev *dev, unsigned nsid, |
| 444 | unsigned cns, dma_addr_t dma_addr) |
| 445 | { |
| 446 | struct nvme_command c; |
| 447 | u32 page_size = dev->page_size; |
| 448 | int offset = dma_addr & (page_size - 1); |
| 449 | int length = sizeof(struct nvme_id_ctrl); |
Bin Meng | 578b195 | 2017-08-22 08:15:14 -0700 | [diff] [blame] | 450 | int ret; |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 451 | |
| 452 | memset(&c, 0, sizeof(c)); |
| 453 | c.identify.opcode = nvme_admin_identify; |
| 454 | c.identify.nsid = cpu_to_le32(nsid); |
| 455 | c.identify.prp1 = cpu_to_le64(dma_addr); |
| 456 | |
| 457 | length -= (page_size - offset); |
| 458 | if (length <= 0) { |
| 459 | c.identify.prp2 = 0; |
| 460 | } else { |
| 461 | dma_addr += (page_size - offset); |
Bin Meng | b3ea27d | 2017-08-22 08:15:09 -0700 | [diff] [blame] | 462 | c.identify.prp2 = cpu_to_le64(dma_addr); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 463 | } |
| 464 | |
| 465 | c.identify.cns = cpu_to_le32(cns); |
| 466 | |
Bin Meng | 578b195 | 2017-08-22 08:15:14 -0700 | [diff] [blame] | 467 | ret = nvme_submit_admin_cmd(dev, &c, NULL); |
| 468 | if (!ret) |
| 469 | invalidate_dcache_range(dma_addr, |
| 470 | dma_addr + sizeof(struct nvme_id_ctrl)); |
| 471 | |
| 472 | return ret; |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 473 | } |
| 474 | |
| 475 | int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, |
| 476 | dma_addr_t dma_addr, u32 *result) |
| 477 | { |
| 478 | struct nvme_command c; |
| 479 | |
| 480 | memset(&c, 0, sizeof(c)); |
| 481 | c.features.opcode = nvme_admin_get_features; |
| 482 | c.features.nsid = cpu_to_le32(nsid); |
| 483 | c.features.prp1 = cpu_to_le64(dma_addr); |
| 484 | c.features.fid = cpu_to_le32(fid); |
| 485 | |
Bin Meng | 578b195 | 2017-08-22 08:15:14 -0700 | [diff] [blame] | 486 | /* |
| 487 | * TODO: add cache invalidate operation when the size of |
| 488 | * the DMA buffer is known |
| 489 | */ |
| 490 | |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 491 | return nvme_submit_admin_cmd(dev, &c, result); |
| 492 | } |
| 493 | |
| 494 | int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, |
| 495 | dma_addr_t dma_addr, u32 *result) |
| 496 | { |
| 497 | struct nvme_command c; |
| 498 | |
| 499 | memset(&c, 0, sizeof(c)); |
| 500 | c.features.opcode = nvme_admin_set_features; |
| 501 | c.features.prp1 = cpu_to_le64(dma_addr); |
| 502 | c.features.fid = cpu_to_le32(fid); |
| 503 | c.features.dword11 = cpu_to_le32(dword11); |
| 504 | |
Bin Meng | 578b195 | 2017-08-22 08:15:14 -0700 | [diff] [blame] | 505 | /* |
| 506 | * TODO: add cache flush operation when the size of |
| 507 | * the DMA buffer is known |
| 508 | */ |
| 509 | |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 510 | return nvme_submit_admin_cmd(dev, &c, result); |
| 511 | } |
| 512 | |
| 513 | static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) |
| 514 | { |
| 515 | struct nvme_dev *dev = nvmeq->dev; |
| 516 | int result; |
| 517 | |
| 518 | nvmeq->cq_vector = qid - 1; |
| 519 | result = nvme_alloc_cq(dev, qid, nvmeq); |
| 520 | if (result < 0) |
| 521 | goto release_cq; |
| 522 | |
| 523 | result = nvme_alloc_sq(dev, qid, nvmeq); |
| 524 | if (result < 0) |
| 525 | goto release_sq; |
| 526 | |
| 527 | nvme_init_queue(nvmeq, qid); |
| 528 | |
| 529 | return result; |
| 530 | |
| 531 | release_sq: |
| 532 | nvme_delete_sq(dev, qid); |
| 533 | release_cq: |
| 534 | nvme_delete_cq(dev, qid); |
| 535 | |
| 536 | return result; |
| 537 | } |
| 538 | |
| 539 | static int nvme_set_queue_count(struct nvme_dev *dev, int count) |
| 540 | { |
| 541 | int status; |
| 542 | u32 result; |
| 543 | u32 q_count = (count - 1) | ((count - 1) << 16); |
| 544 | |
| 545 | status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, |
| 546 | q_count, 0, &result); |
| 547 | |
| 548 | if (status < 0) |
| 549 | return status; |
| 550 | if (status > 1) |
| 551 | return 0; |
| 552 | |
| 553 | return min(result & 0xffff, result >> 16) + 1; |
| 554 | } |
| 555 | |
| 556 | static void nvme_create_io_queues(struct nvme_dev *dev) |
| 557 | { |
| 558 | unsigned int i; |
| 559 | |
| 560 | for (i = dev->queue_count; i <= dev->max_qid; i++) |
| 561 | if (!nvme_alloc_queue(dev, i, dev->q_depth)) |
| 562 | break; |
| 563 | |
| 564 | for (i = dev->online_queues; i <= dev->queue_count - 1; i++) |
| 565 | if (nvme_create_queue(dev->queues[i], i)) |
| 566 | break; |
| 567 | } |
| 568 | |
| 569 | static int nvme_setup_io_queues(struct nvme_dev *dev) |
| 570 | { |
| 571 | int nr_io_queues; |
| 572 | int result; |
| 573 | |
| 574 | nr_io_queues = 1; |
| 575 | result = nvme_set_queue_count(dev, nr_io_queues); |
| 576 | if (result <= 0) |
| 577 | return result; |
| 578 | |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 579 | dev->max_qid = nr_io_queues; |
| 580 | |
| 581 | /* Free previously allocated queues */ |
| 582 | nvme_free_queues(dev, nr_io_queues + 1); |
| 583 | nvme_create_io_queues(dev); |
| 584 | |
| 585 | return 0; |
| 586 | } |
| 587 | |
| 588 | static int nvme_get_info_from_identify(struct nvme_dev *dev) |
| 589 | { |
Patrick Wildt | 968854f | 2019-10-16 08:42:04 +0200 | [diff] [blame] | 590 | struct nvme_id_ctrl *ctrl; |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 591 | int ret; |
Bin Meng | f03e5fc | 2017-08-22 08:15:10 -0700 | [diff] [blame] | 592 | int shift = NVME_CAP_MPSMIN(dev->cap) + 12; |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 593 | |
Patrick Wildt | 968854f | 2019-10-16 08:42:04 +0200 | [diff] [blame] | 594 | ctrl = memalign(dev->page_size, sizeof(struct nvme_id_ctrl)); |
| 595 | if (!ctrl) |
| 596 | return -ENOMEM; |
| 597 | |
Bin Meng | 225589d | 2019-05-15 08:37:56 -0700 | [diff] [blame] | 598 | ret = nvme_identify(dev, 0, 1, (dma_addr_t)(long)ctrl); |
Patrick Wildt | 968854f | 2019-10-16 08:42:04 +0200 | [diff] [blame] | 599 | if (ret) { |
| 600 | free(ctrl); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 601 | return -EIO; |
Patrick Wildt | 968854f | 2019-10-16 08:42:04 +0200 | [diff] [blame] | 602 | } |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 603 | |
| 604 | dev->nn = le32_to_cpu(ctrl->nn); |
| 605 | dev->vwc = ctrl->vwc; |
| 606 | memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); |
| 607 | memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); |
| 608 | memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); |
| 609 | if (ctrl->mdts) |
| 610 | dev->max_transfer_shift = (ctrl->mdts + shift); |
Bin Meng | ab1c160 | 2017-08-03 02:31:02 -0700 | [diff] [blame] | 611 | else { |
| 612 | /* |
| 613 | * Maximum Data Transfer Size (MDTS) field indicates the maximum |
| 614 | * data transfer size between the host and the controller. The |
| 615 | * host should not submit a command that exceeds this transfer |
| 616 | * size. The value is in units of the minimum memory page size |
| 617 | * and is reported as a power of two (2^n). |
| 618 | * |
| 619 | * The spec also says: a value of 0h indicates no restrictions |
| 620 | * on transfer size. But in nvme_blk_read/write() below we have |
| 621 | * the following algorithm for maximum number of logic blocks |
| 622 | * per transfer: |
| 623 | * |
| 624 | * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift); |
| 625 | * |
| 626 | * In order for lbas not to overflow, the maximum number is 15 |
| 627 | * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift). |
| 628 | * Let's use 20 which provides 1MB size. |
| 629 | */ |
| 630 | dev->max_transfer_shift = 20; |
| 631 | } |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 632 | |
Patrick Wildt | 968854f | 2019-10-16 08:42:04 +0200 | [diff] [blame] | 633 | free(ctrl); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 634 | return 0; |
| 635 | } |
| 636 | |
Patrick Wildt | ccdff86 | 2019-10-03 13:48:47 +0200 | [diff] [blame] | 637 | int nvme_get_namespace_id(struct udevice *udev, u32 *ns_id, u8 *eui64) |
| 638 | { |
| 639 | struct nvme_ns *ns = dev_get_priv(udev); |
| 640 | |
| 641 | if (ns_id) |
| 642 | *ns_id = ns->ns_id; |
| 643 | if (eui64) |
| 644 | memcpy(eui64, ns->eui64, sizeof(ns->eui64)); |
| 645 | |
| 646 | return 0; |
| 647 | } |
| 648 | |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 649 | int nvme_scan_namespace(void) |
| 650 | { |
| 651 | struct uclass *uc; |
| 652 | struct udevice *dev; |
| 653 | int ret; |
| 654 | |
| 655 | ret = uclass_get(UCLASS_NVME, &uc); |
| 656 | if (ret) |
| 657 | return ret; |
| 658 | |
| 659 | uclass_foreach_dev(dev, uc) { |
| 660 | ret = device_probe(dev); |
| 661 | if (ret) |
| 662 | return ret; |
| 663 | } |
| 664 | |
| 665 | return 0; |
| 666 | } |
| 667 | |
| 668 | static int nvme_blk_probe(struct udevice *udev) |
| 669 | { |
| 670 | struct nvme_dev *ndev = dev_get_priv(udev->parent); |
| 671 | struct blk_desc *desc = dev_get_uclass_platdata(udev); |
| 672 | struct nvme_ns *ns = dev_get_priv(udev); |
| 673 | u8 flbas; |
Bin Meng | a7cbfff | 2017-08-22 08:15:07 -0700 | [diff] [blame] | 674 | struct pci_child_platdata *pplat; |
Patrick Wildt | 968854f | 2019-10-16 08:42:04 +0200 | [diff] [blame] | 675 | struct nvme_id_ns *id; |
| 676 | |
| 677 | id = memalign(ndev->page_size, sizeof(struct nvme_id_ns)); |
| 678 | if (!id) |
| 679 | return -ENOMEM; |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 680 | |
| 681 | memset(ns, 0, sizeof(*ns)); |
| 682 | ns->dev = ndev; |
Bin Meng | 34818738 | 2017-08-22 08:15:16 -0700 | [diff] [blame] | 683 | /* extract the namespace id from the block device name */ |
| 684 | ns->ns_id = trailing_strtol(udev->name) + 1; |
Patrick Wildt | 968854f | 2019-10-16 08:42:04 +0200 | [diff] [blame] | 685 | if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)(long)id)) { |
| 686 | free(id); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 687 | return -EIO; |
Patrick Wildt | 968854f | 2019-10-16 08:42:04 +0200 | [diff] [blame] | 688 | } |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 689 | |
Patrick Wildt | ccdff86 | 2019-10-03 13:48:47 +0200 | [diff] [blame] | 690 | memcpy(&ns->eui64, &id->eui64, sizeof(id->eui64)); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 691 | flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK; |
| 692 | ns->flbas = flbas; |
| 693 | ns->lba_shift = id->lbaf[flbas].ds; |
Jon Nettleton | e15555a | 2017-08-03 02:31:01 -0700 | [diff] [blame] | 694 | ns->mode_select_num_blocks = le64_to_cpu(id->nsze); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 695 | ns->mode_select_block_len = 1 << ns->lba_shift; |
| 696 | list_add(&ns->list, &ndev->namespaces); |
| 697 | |
| 698 | desc->lba = ns->mode_select_num_blocks; |
| 699 | desc->log2blksz = ns->lba_shift; |
| 700 | desc->blksz = 1 << ns->lba_shift; |
| 701 | desc->bdev = udev; |
Bin Meng | a7cbfff | 2017-08-22 08:15:07 -0700 | [diff] [blame] | 702 | pplat = dev_get_parent_platdata(udev->parent); |
| 703 | sprintf(desc->vendor, "0x%.4x", pplat->vendor); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 704 | memcpy(desc->product, ndev->serial, sizeof(ndev->serial)); |
| 705 | memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev)); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 706 | |
Patrick Wildt | 968854f | 2019-10-16 08:42:04 +0200 | [diff] [blame] | 707 | free(id); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 708 | return 0; |
| 709 | } |
| 710 | |
Bin Meng | 29e558d | 2017-08-22 08:15:13 -0700 | [diff] [blame] | 711 | static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr, |
| 712 | lbaint_t blkcnt, void *buffer, bool read) |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 713 | { |
| 714 | struct nvme_ns *ns = dev_get_priv(udev); |
| 715 | struct nvme_dev *dev = ns->dev; |
| 716 | struct nvme_command c; |
| 717 | struct blk_desc *desc = dev_get_uclass_platdata(udev); |
| 718 | int status; |
| 719 | u64 prp2; |
| 720 | u64 total_len = blkcnt << desc->log2blksz; |
| 721 | u64 temp_len = total_len; |
| 722 | |
| 723 | u64 slba = blknr; |
| 724 | u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift); |
| 725 | u64 total_lbas = blkcnt; |
| 726 | |
Patrick Wildt | 95f4aba | 2019-10-16 23:22:50 +0200 | [diff] [blame] | 727 | flush_dcache_range((unsigned long)buffer, |
| 728 | (unsigned long)buffer + total_len); |
Bin Meng | 578b195 | 2017-08-22 08:15:14 -0700 | [diff] [blame] | 729 | |
Bin Meng | 29e558d | 2017-08-22 08:15:13 -0700 | [diff] [blame] | 730 | c.rw.opcode = read ? nvme_cmd_read : nvme_cmd_write; |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 731 | c.rw.flags = 0; |
| 732 | c.rw.nsid = cpu_to_le32(ns->ns_id); |
| 733 | c.rw.control = 0; |
| 734 | c.rw.dsmgmt = 0; |
| 735 | c.rw.reftag = 0; |
| 736 | c.rw.apptag = 0; |
| 737 | c.rw.appmask = 0; |
| 738 | c.rw.metadata = 0; |
| 739 | |
| 740 | while (total_lbas) { |
| 741 | if (total_lbas < lbas) { |
| 742 | lbas = (u16)total_lbas; |
| 743 | total_lbas = 0; |
| 744 | } else { |
| 745 | total_lbas -= lbas; |
| 746 | } |
| 747 | |
Bin Meng | 29e558d | 2017-08-22 08:15:13 -0700 | [diff] [blame] | 748 | if (nvme_setup_prps(dev, &prp2, |
| 749 | lbas << ns->lba_shift, (ulong)buffer)) |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 750 | return -EIO; |
| 751 | c.rw.slba = cpu_to_le64(slba); |
| 752 | slba += lbas; |
| 753 | c.rw.length = cpu_to_le16(lbas - 1); |
| 754 | c.rw.prp1 = cpu_to_le64((ulong)buffer); |
| 755 | c.rw.prp2 = cpu_to_le64(prp2); |
Bin Meng | 1c42a29 | 2017-08-22 08:15:12 -0700 | [diff] [blame] | 756 | status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q], |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 757 | &c, NULL, IO_TIMEOUT); |
| 758 | if (status) |
| 759 | break; |
Bin Meng | 8ac9f6c | 2017-09-02 08:15:36 -0700 | [diff] [blame] | 760 | temp_len -= (u32)lbas << ns->lba_shift; |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 761 | buffer += lbas << ns->lba_shift; |
| 762 | } |
| 763 | |
Bin Meng | 578b195 | 2017-08-22 08:15:14 -0700 | [diff] [blame] | 764 | if (read) |
| 765 | invalidate_dcache_range((unsigned long)buffer, |
| 766 | (unsigned long)buffer + total_len); |
| 767 | |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 768 | return (total_len - temp_len) >> desc->log2blksz; |
| 769 | } |
| 770 | |
Bin Meng | 29e558d | 2017-08-22 08:15:13 -0700 | [diff] [blame] | 771 | static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr, |
| 772 | lbaint_t blkcnt, void *buffer) |
| 773 | { |
| 774 | return nvme_blk_rw(udev, blknr, blkcnt, buffer, true); |
| 775 | } |
| 776 | |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 777 | static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr, |
| 778 | lbaint_t blkcnt, const void *buffer) |
| 779 | { |
Bin Meng | 29e558d | 2017-08-22 08:15:13 -0700 | [diff] [blame] | 780 | return nvme_blk_rw(udev, blknr, blkcnt, (void *)buffer, false); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 781 | } |
| 782 | |
| 783 | static const struct blk_ops nvme_blk_ops = { |
| 784 | .read = nvme_blk_read, |
| 785 | .write = nvme_blk_write, |
| 786 | }; |
| 787 | |
| 788 | U_BOOT_DRIVER(nvme_blk) = { |
| 789 | .name = "nvme-blk", |
| 790 | .id = UCLASS_BLK, |
| 791 | .probe = nvme_blk_probe, |
| 792 | .ops = &nvme_blk_ops, |
| 793 | .priv_auto_alloc_size = sizeof(struct nvme_ns), |
| 794 | }; |
| 795 | |
| 796 | static int nvme_bind(struct udevice *udev) |
| 797 | { |
Bin Meng | 34818738 | 2017-08-22 08:15:16 -0700 | [diff] [blame] | 798 | static int ndev_num; |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 799 | char name[20]; |
Bin Meng | 34818738 | 2017-08-22 08:15:16 -0700 | [diff] [blame] | 800 | |
| 801 | sprintf(name, "nvme#%d", ndev_num++); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 802 | |
| 803 | return device_set_name(udev, name); |
| 804 | } |
| 805 | |
| 806 | static int nvme_probe(struct udevice *udev) |
| 807 | { |
| 808 | int ret; |
| 809 | struct nvme_dev *ndev = dev_get_priv(udev); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 810 | |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 811 | ndev->instance = trailing_strtol(udev->name); |
| 812 | |
| 813 | INIT_LIST_HEAD(&ndev->namespaces); |
| 814 | ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0, |
| 815 | PCI_REGION_MEM); |
| 816 | if (readl(&ndev->bar->csts) == -1) { |
| 817 | ret = -ENODEV; |
| 818 | printf("Error: %s: Out of memory!\n", udev->name); |
| 819 | goto free_nvme; |
| 820 | } |
| 821 | |
Bin Meng | 1c42a29 | 2017-08-22 08:15:12 -0700 | [diff] [blame] | 822 | ndev->queues = malloc(NVME_Q_NUM * sizeof(struct nvme_queue *)); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 823 | if (!ndev->queues) { |
| 824 | ret = -ENOMEM; |
| 825 | printf("Error: %s: Out of memory!\n", udev->name); |
| 826 | goto free_nvme; |
| 827 | } |
Bin Meng | 318dda2 | 2017-09-02 08:15:35 -0700 | [diff] [blame] | 828 | memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *)); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 829 | |
Bin Meng | f03e5fc | 2017-08-22 08:15:10 -0700 | [diff] [blame] | 830 | ndev->cap = nvme_readq(&ndev->bar->cap); |
| 831 | ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH); |
| 832 | ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 833 | ndev->dbs = ((void __iomem *)ndev->bar) + 4096; |
| 834 | |
| 835 | ret = nvme_configure_admin_queue(ndev); |
| 836 | if (ret) |
| 837 | goto free_queue; |
| 838 | |
Aaron Williams | 2db5134 | 2019-08-22 20:37:26 -0700 | [diff] [blame] | 839 | /* Allocate after the page size is known */ |
| 840 | ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL); |
| 841 | if (!ndev->prp_pool) { |
| 842 | ret = -ENOMEM; |
| 843 | printf("Error: %s: Out of memory!\n", udev->name); |
| 844 | goto free_nvme; |
| 845 | } |
| 846 | ndev->prp_entry_num = MAX_PRP_POOL >> 3; |
| 847 | |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 848 | ret = nvme_setup_io_queues(ndev); |
| 849 | if (ret) |
| 850 | goto free_queue; |
| 851 | |
| 852 | nvme_get_info_from_identify(ndev); |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 853 | |
| 854 | return 0; |
| 855 | |
| 856 | free_queue: |
| 857 | free((void *)ndev->queues); |
| 858 | free_nvme: |
| 859 | return ret; |
| 860 | } |
| 861 | |
| 862 | U_BOOT_DRIVER(nvme) = { |
| 863 | .name = "nvme", |
| 864 | .id = UCLASS_NVME, |
| 865 | .bind = nvme_bind, |
| 866 | .probe = nvme_probe, |
| 867 | .priv_auto_alloc_size = sizeof(struct nvme_dev), |
| 868 | }; |
| 869 | |
| 870 | struct pci_device_id nvme_supported[] = { |
Jon Nettleton | 7435481 | 2017-08-03 02:31:00 -0700 | [diff] [blame] | 871 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, ~0) }, |
Zhikang Zhang | 145b88f | 2017-08-03 02:30:57 -0700 | [diff] [blame] | 872 | {} |
| 873 | }; |
| 874 | |
| 875 | U_BOOT_PCI_DEVICE(nvme, nvme_supported); |