blob: a60682af117431e785951e7da8c855072c9c54f2 [file] [log] [blame]
Zhikang Zhang982388e2017-08-03 02:30:57 -07001/*
2 * Copyright (C) 2017 NXP Semiconductors
3 * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0+
6 */
7
8#include <common.h>
9#include <dm.h>
10#include <errno.h>
11#include <memalign.h>
12#include <pci.h>
13#include <dm/device-internal.h>
14#include "nvme.h"
15
16struct nvme_info *nvme_info;
17
18#define NVME_Q_DEPTH 2
19#define NVME_AQ_DEPTH 2
20#define NVME_SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
21#define NVME_CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
22#define ADMIN_TIMEOUT 60
23#define IO_TIMEOUT 30
24#define MAX_PRP_POOL 512
25
26/*
27 * An NVM Express queue. Each device has at least two (one for admin
28 * commands and one for I/O commands).
29 */
30struct nvme_queue {
31 struct nvme_dev *dev;
32 struct nvme_command *sq_cmds;
33 struct nvme_completion *cqes;
34 wait_queue_head_t sq_full;
35 u32 __iomem *q_db;
36 u16 q_depth;
37 s16 cq_vector;
38 u16 sq_head;
39 u16 sq_tail;
40 u16 cq_head;
41 u16 qid;
42 u8 cq_phase;
43 u8 cqe_seen;
44 unsigned long cmdid_data[];
45};
46
47static int nvme_wait_ready(struct nvme_dev *dev, bool enabled)
48{
49 u32 bit = enabled ? NVME_CSTS_RDY : 0;
50
51 while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit)
52 udelay(10000);
53
54 return 0;
55}
56
57static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
58 int total_len, u64 dma_addr)
59{
60 u32 page_size = dev->page_size;
61 int offset = dma_addr & (page_size - 1);
62 u64 *prp_pool;
63 int length = total_len;
64 int i, nprps;
65 length -= (page_size - offset);
66
67 if (length <= 0) {
68 *prp2 = 0;
69 return 0;
70 }
71
72 if (length)
73 dma_addr += (page_size - offset);
74
75 if (length <= page_size) {
76 *prp2 = dma_addr;
77 return 0;
78 }
79
80 nprps = DIV_ROUND_UP(length, page_size);
81
82 if (nprps > dev->prp_entry_num) {
83 free(dev->prp_pool);
84 dev->prp_pool = malloc(nprps << 3);
85 if (!dev->prp_pool) {
86 printf("Error: malloc prp_pool fail\n");
87 return -ENOMEM;
88 }
89 dev->prp_entry_num = nprps;
90 }
91
92 prp_pool = dev->prp_pool;
93 i = 0;
94 while (nprps) {
95 if (i == ((page_size >> 3) - 1)) {
96 *(prp_pool + i) = cpu_to_le64((ulong)prp_pool +
97 page_size);
98 i = 0;
99 prp_pool += page_size;
100 }
101 *(prp_pool + i++) = cpu_to_le64(dma_addr);
102 dma_addr += page_size;
103 nprps--;
104 }
105 *prp2 = (ulong)dev->prp_pool;
106
107 return 0;
108}
109
110static __le16 nvme_get_cmd_id(void)
111{
112 static unsigned short cmdid;
113
114 return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0);
115}
116
117static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index)
118{
119 u64 start = (ulong)&nvmeq->cqes[index];
120 u64 stop = start + sizeof(struct nvme_completion);
121
122 invalidate_dcache_range(start, stop);
123
124 return le16_to_cpu(readw(&(nvmeq->cqes[index].status)));
125}
126
127/**
128 * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
129 *
130 * @nvmeq: The queue to use
131 * @cmd: The command to send
132 */
133static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
134{
135 u16 tail = nvmeq->sq_tail;
136
137 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
138 flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
139 (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
140
141 if (++tail == nvmeq->q_depth)
142 tail = 0;
143 writel(tail, nvmeq->q_db);
144 nvmeq->sq_tail = tail;
145}
146
147static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
148 struct nvme_command *cmd,
149 u32 *result, unsigned timeout)
150{
151 u16 head = nvmeq->cq_head;
152 u16 phase = nvmeq->cq_phase;
153 u16 status;
154 ulong start_time;
155 ulong timeout_us = timeout * 100000;
156
157 cmd->common.command_id = nvme_get_cmd_id();
158 nvme_submit_cmd(nvmeq, cmd);
159
160 start_time = timer_get_us();
161
162 for (;;) {
163 status = nvme_read_completion_status(nvmeq, head);
164 if ((status & 0x01) == phase)
165 break;
166 if (timeout_us > 0 && (timer_get_us() - start_time)
167 >= timeout_us)
168 return -ETIMEDOUT;
169 }
170
171 status >>= 1;
172 if (status) {
173 printf("ERROR: status = %x, phase = %d, head = %d\n",
174 status, phase, head);
175 status = 0;
176 if (++head == nvmeq->q_depth) {
177 head = 0;
178 phase = !phase;
179 }
180 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
181 nvmeq->cq_head = head;
182 nvmeq->cq_phase = phase;
183
184 return -EIO;
185 }
186
187 if (result)
188 *result = le32_to_cpu(readl(&(nvmeq->cqes[head].result)));
189
190 if (++head == nvmeq->q_depth) {
191 head = 0;
192 phase = !phase;
193 }
194 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
195 nvmeq->cq_head = head;
196 nvmeq->cq_phase = phase;
197
198 return status;
199}
200
201static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
202 u32 *result)
203{
204 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
205}
206
207static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
208 int qid, int depth)
209{
210 struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
211 if (!nvmeq)
212 return NULL;
213 memset(nvmeq, 0, sizeof(*nvmeq));
214
215 nvmeq->cqes = (void *)memalign(4096, NVME_CQ_SIZE(depth));
216 if (!nvmeq->cqes)
217 goto free_nvmeq;
218 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth));
219
220 nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth));
221 if (!nvmeq->sq_cmds)
222 goto free_queue;
223 memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth));
224
225 nvmeq->dev = dev;
226
227 nvmeq->cq_head = 0;
228 nvmeq->cq_phase = 1;
229 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
230 nvmeq->q_depth = depth;
231 nvmeq->qid = qid;
232 dev->queue_count++;
233 dev->queues[qid] = nvmeq;
234
235 return nvmeq;
236
237 free_queue:
238 free((void *)nvmeq->cqes);
239 free_nvmeq:
240 free(nvmeq);
241
242 return NULL;
243}
244
245static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
246{
247 struct nvme_command c;
248
249 memset(&c, 0, sizeof(c));
250 c.delete_queue.opcode = opcode;
251 c.delete_queue.qid = cpu_to_le16(id);
252
253 return nvme_submit_admin_cmd(dev, &c, NULL);
254}
255
256static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid)
257{
258 return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid);
259}
260
261static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid)
262{
263 return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid);
264}
265
266static int nvme_enable_ctrl(struct nvme_dev *dev)
267{
268 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
269 dev->ctrl_config |= NVME_CC_ENABLE;
270 writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
271
272 return nvme_wait_ready(dev, true);
273}
274
275static int nvme_disable_ctrl(struct nvme_dev *dev)
276{
277 dev->ctrl_config &= ~NVME_CC_SHN_MASK;
278 dev->ctrl_config &= ~NVME_CC_ENABLE;
279 writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
280
281 return nvme_wait_ready(dev, false);
282}
283
284static void nvme_free_queue(struct nvme_queue *nvmeq)
285{
286 free((void *)nvmeq->cqes);
287 free(nvmeq->sq_cmds);
288 free(nvmeq);
289}
290
291static void nvme_free_queues(struct nvme_dev *dev, int lowest)
292{
293 int i;
294
295 for (i = dev->queue_count - 1; i >= lowest; i--) {
296 struct nvme_queue *nvmeq = dev->queues[i];
297 dev->queue_count--;
298 dev->queues[i] = NULL;
299 nvme_free_queue(nvmeq);
300 }
301}
302
303static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
304{
305 struct nvme_dev *dev = nvmeq->dev;
306
307 nvmeq->sq_tail = 0;
308 nvmeq->cq_head = 0;
309 nvmeq->cq_phase = 1;
310 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
311 memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth));
312 flush_dcache_range((ulong)nvmeq->cqes,
313 (ulong)nvmeq->cqes + NVME_CQ_SIZE(nvmeq->q_depth));
314 dev->online_queues++;
315}
316
317static int nvme_configure_admin_queue(struct nvme_dev *dev)
318{
319 int result;
320 u32 aqa;
321 u64 cap = nvme_readq(&dev->bar->cap);
322 struct nvme_queue *nvmeq;
323 /* most architectures use 4KB as the page size */
324 unsigned page_shift = 12;
325 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
326 unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
327
328 if (page_shift < dev_page_min) {
329 debug("Device minimum page size (%u) too large for host (%u)\n",
330 1 << dev_page_min, 1 << page_shift);
331 return -ENODEV;
332 }
333
334 if (page_shift > dev_page_max) {
335 debug("Device maximum page size (%u) smaller than host (%u)\n",
336 1 << dev_page_max, 1 << page_shift);
337 page_shift = dev_page_max;
338 }
339
340 result = nvme_disable_ctrl(dev);
341 if (result < 0)
342 return result;
343
344 nvmeq = dev->queues[0];
345 if (!nvmeq) {
346 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
347 if (!nvmeq)
348 return -ENOMEM;
349 }
350
351 aqa = nvmeq->q_depth - 1;
352 aqa |= aqa << 16;
353 aqa |= aqa << 16;
354
355 dev->page_size = 1 << page_shift;
356
357 dev->ctrl_config = NVME_CC_CSS_NVM;
358 dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
359 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
360 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
361
362 writel(aqa, &dev->bar->aqa);
363 nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq);
364 nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq);
365
366 result = nvme_enable_ctrl(dev);
367 if (result)
368 goto free_nvmeq;
369
370 nvmeq->cq_vector = 0;
371
372 nvme_init_queue(dev->queues[0], 0);
373
374 return result;
375
376 free_nvmeq:
377 nvme_free_queues(dev, 0);
378
379 return result;
380}
381
382static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid,
383 struct nvme_queue *nvmeq)
384{
385 struct nvme_command c;
386 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
387
388 memset(&c, 0, sizeof(c));
389 c.create_cq.opcode = nvme_admin_create_cq;
390 c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes);
391 c.create_cq.cqid = cpu_to_le16(qid);
392 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
393 c.create_cq.cq_flags = cpu_to_le16(flags);
394 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
395
396 return nvme_submit_admin_cmd(dev, &c, NULL);
397}
398
399static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid,
400 struct nvme_queue *nvmeq)
401{
402 struct nvme_command c;
403 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
404
405 memset(&c, 0, sizeof(c));
406 c.create_sq.opcode = nvme_admin_create_sq;
407 c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds);
408 c.create_sq.sqid = cpu_to_le16(qid);
409 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
410 c.create_sq.sq_flags = cpu_to_le16(flags);
411 c.create_sq.cqid = cpu_to_le16(qid);
412
413 return nvme_submit_admin_cmd(dev, &c, NULL);
414}
415
416int nvme_identify(struct nvme_dev *dev, unsigned nsid,
417 unsigned cns, dma_addr_t dma_addr)
418{
419 struct nvme_command c;
420 u32 page_size = dev->page_size;
421 int offset = dma_addr & (page_size - 1);
422 int length = sizeof(struct nvme_id_ctrl);
423
424 memset(&c, 0, sizeof(c));
425 c.identify.opcode = nvme_admin_identify;
426 c.identify.nsid = cpu_to_le32(nsid);
427 c.identify.prp1 = cpu_to_le64(dma_addr);
428
429 length -= (page_size - offset);
430 if (length <= 0) {
431 c.identify.prp2 = 0;
432 } else {
433 dma_addr += (page_size - offset);
434 c.identify.prp2 = dma_addr;
435 }
436
437 c.identify.cns = cpu_to_le32(cns);
438
439 return nvme_submit_admin_cmd(dev, &c, NULL);
440}
441
442int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
443 dma_addr_t dma_addr, u32 *result)
444{
445 struct nvme_command c;
446
447 memset(&c, 0, sizeof(c));
448 c.features.opcode = nvme_admin_get_features;
449 c.features.nsid = cpu_to_le32(nsid);
450 c.features.prp1 = cpu_to_le64(dma_addr);
451 c.features.fid = cpu_to_le32(fid);
452
453 return nvme_submit_admin_cmd(dev, &c, result);
454}
455
456int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
457 dma_addr_t dma_addr, u32 *result)
458{
459 struct nvme_command c;
460
461 memset(&c, 0, sizeof(c));
462 c.features.opcode = nvme_admin_set_features;
463 c.features.prp1 = cpu_to_le64(dma_addr);
464 c.features.fid = cpu_to_le32(fid);
465 c.features.dword11 = cpu_to_le32(dword11);
466
467 return nvme_submit_admin_cmd(dev, &c, result);
468}
469
470static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
471{
472 struct nvme_dev *dev = nvmeq->dev;
473 int result;
474
475 nvmeq->cq_vector = qid - 1;
476 result = nvme_alloc_cq(dev, qid, nvmeq);
477 if (result < 0)
478 goto release_cq;
479
480 result = nvme_alloc_sq(dev, qid, nvmeq);
481 if (result < 0)
482 goto release_sq;
483
484 nvme_init_queue(nvmeq, qid);
485
486 return result;
487
488 release_sq:
489 nvme_delete_sq(dev, qid);
490 release_cq:
491 nvme_delete_cq(dev, qid);
492
493 return result;
494}
495
496static int nvme_set_queue_count(struct nvme_dev *dev, int count)
497{
498 int status;
499 u32 result;
500 u32 q_count = (count - 1) | ((count - 1) << 16);
501
502 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES,
503 q_count, 0, &result);
504
505 if (status < 0)
506 return status;
507 if (status > 1)
508 return 0;
509
510 return min(result & 0xffff, result >> 16) + 1;
511}
512
513static void nvme_create_io_queues(struct nvme_dev *dev)
514{
515 unsigned int i;
516
517 for (i = dev->queue_count; i <= dev->max_qid; i++)
518 if (!nvme_alloc_queue(dev, i, dev->q_depth))
519 break;
520
521 for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
522 if (nvme_create_queue(dev->queues[i], i))
523 break;
524}
525
526static int nvme_setup_io_queues(struct nvme_dev *dev)
527{
528 int nr_io_queues;
529 int result;
530
531 nr_io_queues = 1;
532 result = nvme_set_queue_count(dev, nr_io_queues);
533 if (result <= 0)
534 return result;
535
536 if (result < nr_io_queues)
537 nr_io_queues = result;
538
539 dev->max_qid = nr_io_queues;
540
541 /* Free previously allocated queues */
542 nvme_free_queues(dev, nr_io_queues + 1);
543 nvme_create_io_queues(dev);
544
545 return 0;
546}
547
548static int nvme_get_info_from_identify(struct nvme_dev *dev)
549{
550 u16 vendor, device;
551 struct nvme_id_ctrl buf, *ctrl = &buf;
552 int ret;
553 int shift = NVME_CAP_MPSMIN(nvme_readq(&dev->bar->cap)) + 12;
554
555 ret = nvme_identify(dev, 0, 1, (dma_addr_t)ctrl);
556 if (ret)
557 return -EIO;
558
559 dev->nn = le32_to_cpu(ctrl->nn);
560 dev->vwc = ctrl->vwc;
561 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
562 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
563 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
564 if (ctrl->mdts)
565 dev->max_transfer_shift = (ctrl->mdts + shift);
566
567 /* Apply quirk stuff */
568 dm_pci_read_config16(dev->pdev, PCI_VENDOR_ID, &vendor);
569 dm_pci_read_config16(dev->pdev, PCI_DEVICE_ID, &device);
570 if ((vendor == PCI_VENDOR_ID_INTEL) &&
571 (device == 0x0953) && ctrl->vs[3]) {
572 unsigned int max_transfer_shift;
573 dev->stripe_size = (ctrl->vs[3] + shift);
574 max_transfer_shift = (ctrl->vs[3] + 18);
575 if (dev->max_transfer_shift) {
576 dev->max_transfer_shift = min(max_transfer_shift,
577 dev->max_transfer_shift);
578 } else {
579 dev->max_transfer_shift = max_transfer_shift;
580 }
581 }
582
583 return 0;
584}
585
586int nvme_scan_namespace(void)
587{
588 struct uclass *uc;
589 struct udevice *dev;
590 int ret;
591
592 ret = uclass_get(UCLASS_NVME, &uc);
593 if (ret)
594 return ret;
595
596 uclass_foreach_dev(dev, uc) {
597 ret = device_probe(dev);
598 if (ret)
599 return ret;
600 }
601
602 return 0;
603}
604
605static int nvme_blk_probe(struct udevice *udev)
606{
607 struct nvme_dev *ndev = dev_get_priv(udev->parent);
608 struct blk_desc *desc = dev_get_uclass_platdata(udev);
609 struct nvme_ns *ns = dev_get_priv(udev);
610 u8 flbas;
611 u16 vendor;
612 struct nvme_id_ns buf, *id = &buf;
613
614 memset(ns, 0, sizeof(*ns));
615 ns->dev = ndev;
616 ns->ns_id = desc->devnum - ndev->blk_dev_start + 1;
617 if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)id))
618 return -EIO;
619
620 flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK;
621 ns->flbas = flbas;
622 ns->lba_shift = id->lbaf[flbas].ds;
623 ns->mode_select_num_blocks = le64_to_cpu(id->nuse);
624 ns->mode_select_block_len = 1 << ns->lba_shift;
625 list_add(&ns->list, &ndev->namespaces);
626
627 desc->lba = ns->mode_select_num_blocks;
628 desc->log2blksz = ns->lba_shift;
629 desc->blksz = 1 << ns->lba_shift;
630 desc->bdev = udev;
631 dm_pci_read_config16(ndev->pdev, PCI_VENDOR_ID, &vendor);
632 sprintf(desc->vendor, "0x%.4x", vendor);
633 memcpy(desc->product, ndev->serial, sizeof(ndev->serial));
634 memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev));
635 part_init(desc);
636
637 return 0;
638}
639
640static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr,
641 lbaint_t blkcnt, void *buffer)
642{
643 struct nvme_ns *ns = dev_get_priv(udev);
644 struct nvme_dev *dev = ns->dev;
645 struct nvme_command c;
646 struct blk_desc *desc = dev_get_uclass_platdata(udev);
647 int status;
648 u64 prp2;
649 u64 total_len = blkcnt << desc->log2blksz;
650 u64 temp_len = total_len;
651
652 u64 slba = blknr;
653 u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
654 u64 total_lbas = blkcnt;
655
656 c.rw.opcode = nvme_cmd_read;
657 c.rw.flags = 0;
658 c.rw.nsid = cpu_to_le32(ns->ns_id);
659 c.rw.control = 0;
660 c.rw.dsmgmt = 0;
661 c.rw.reftag = 0;
662 c.rw.apptag = 0;
663 c.rw.appmask = 0;
664 c.rw.metadata = 0;
665
666 while (total_lbas) {
667 if (total_lbas < lbas) {
668 lbas = (u16)total_lbas;
669 total_lbas = 0;
670 } else {
671 total_lbas -= lbas;
672 }
673
674 if (nvme_setup_prps
675 (dev, &prp2, lbas << ns->lba_shift, (ulong)buffer))
676 return -EIO;
677 c.rw.slba = cpu_to_le64(slba);
678 slba += lbas;
679 c.rw.length = cpu_to_le16(lbas - 1);
680 c.rw.prp1 = cpu_to_le64((ulong)buffer);
681 c.rw.prp2 = cpu_to_le64(prp2);
682 status = nvme_submit_sync_cmd(dev->queues[1],
683 &c, NULL, IO_TIMEOUT);
684 if (status)
685 break;
686 temp_len -= lbas << ns->lba_shift;
687 buffer += lbas << ns->lba_shift;
688 }
689
690 return (total_len - temp_len) >> desc->log2blksz;
691}
692
693static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr,
694 lbaint_t blkcnt, const void *buffer)
695{
696 struct nvme_ns *ns = dev_get_priv(udev);
697 struct nvme_dev *dev = ns->dev;
698 struct nvme_command c;
699 struct blk_desc *desc = dev_get_uclass_platdata(udev);
700 int status;
701 u64 prp2;
702 u64 total_len = blkcnt << desc->log2blksz;
703 u64 temp_len = total_len;
704
705 u64 slba = blknr;
706 u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
707 u64 total_lbas = blkcnt;
708
709 c.rw.opcode = nvme_cmd_write;
710 c.rw.flags = 0;
711 c.rw.nsid = cpu_to_le32(ns->ns_id);
712 c.rw.control = 0;
713 c.rw.dsmgmt = 0;
714 c.rw.reftag = 0;
715 c.rw.apptag = 0;
716 c.rw.appmask = 0;
717 c.rw.metadata = 0;
718
719 while (total_lbas) {
720 if (total_lbas < lbas) {
721 lbas = (u16)total_lbas;
722 total_lbas = 0;
723 } else {
724 total_lbas -= lbas;
725 }
726
727 if (nvme_setup_prps
728 (dev, &prp2, lbas << ns->lba_shift, (ulong)buffer))
729 return -EIO;
730 c.rw.slba = cpu_to_le64(slba);
731 slba += lbas;
732 c.rw.length = cpu_to_le16(lbas - 1);
733 c.rw.prp1 = cpu_to_le64((ulong)buffer);
734 c.rw.prp2 = cpu_to_le64(prp2);
735 status = nvme_submit_sync_cmd(dev->queues[1],
736 &c, NULL, IO_TIMEOUT);
737 if (status)
738 break;
739 temp_len -= lbas << ns->lba_shift;
740 buffer += lbas << ns->lba_shift;
741 }
742
743 return (total_len - temp_len) >> desc->log2blksz;
744}
745
746static const struct blk_ops nvme_blk_ops = {
747 .read = nvme_blk_read,
748 .write = nvme_blk_write,
749};
750
751U_BOOT_DRIVER(nvme_blk) = {
752 .name = "nvme-blk",
753 .id = UCLASS_BLK,
754 .probe = nvme_blk_probe,
755 .ops = &nvme_blk_ops,
756 .priv_auto_alloc_size = sizeof(struct nvme_ns),
757};
758
759static int nvme_bind(struct udevice *udev)
760{
761 char name[20];
762 sprintf(name, "nvme#%d", nvme_info->ndev_num++);
763
764 return device_set_name(udev, name);
765}
766
767static int nvme_probe(struct udevice *udev)
768{
769 int ret;
770 struct nvme_dev *ndev = dev_get_priv(udev);
771 u64 cap;
772
773 ndev->pdev = pci_get_controller(udev);
774 ndev->instance = trailing_strtol(udev->name);
775
776 INIT_LIST_HEAD(&ndev->namespaces);
777 ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0,
778 PCI_REGION_MEM);
779 if (readl(&ndev->bar->csts) == -1) {
780 ret = -ENODEV;
781 printf("Error: %s: Out of memory!\n", udev->name);
782 goto free_nvme;
783 }
784
785 ndev->queues = malloc(2 * sizeof(struct nvme_queue));
786 if (!ndev->queues) {
787 ret = -ENOMEM;
788 printf("Error: %s: Out of memory!\n", udev->name);
789 goto free_nvme;
790 }
791 memset(ndev->queues, 0, sizeof(2 * sizeof(struct nvme_queue)));
792
793 ndev->prp_pool = malloc(MAX_PRP_POOL);
794 if (!ndev->prp_pool) {
795 ret = -ENOMEM;
796 printf("Error: %s: Out of memory!\n", udev->name);
797 goto free_nvme;
798 }
799 ndev->prp_entry_num = MAX_PRP_POOL >> 3;
800
801 cap = nvme_readq(&ndev->bar->cap);
802 ndev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
803 ndev->db_stride = 1 << NVME_CAP_STRIDE(cap);
804 ndev->dbs = ((void __iomem *)ndev->bar) + 4096;
805
806 ret = nvme_configure_admin_queue(ndev);
807 if (ret)
808 goto free_queue;
809
810 ret = nvme_setup_io_queues(ndev);
811 if (ret)
812 goto free_queue;
813
814 nvme_get_info_from_identify(ndev);
815 ndev->blk_dev_start = nvme_info->ns_num;
816 list_add(&ndev->node, &nvme_info->dev_list);
817
818 return 0;
819
820free_queue:
821 free((void *)ndev->queues);
822free_nvme:
823 return ret;
824}
825
826U_BOOT_DRIVER(nvme) = {
827 .name = "nvme",
828 .id = UCLASS_NVME,
829 .bind = nvme_bind,
830 .probe = nvme_probe,
831 .priv_auto_alloc_size = sizeof(struct nvme_dev),
832};
833
834struct pci_device_id nvme_supported[] = {
835 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0953) },
836 {}
837};
838
839U_BOOT_PCI_DEVICE(nvme, nvme_supported);