Lines Matching +full:dev +full:- +full:ctrl

1 // SPDX-License-Identifier: GPL-2.0
7 * REDS Institute, HEIG-VD, HES-SO, Switzerland
19 #include <linux/pci-epc.h>
20 #include <linux/pci-epf.h>
31 * allow up to 128 page-sized segments. For the maximum allowed,
36 (NVMET_PCI_EPF_MAX_SEGS << (PAGE_SHIFT - 10))
85 struct nvmet_pci_epf_ctrl *ctrl; member
130 struct nvmet_pci_epf_ctrl *ctrl; member
163 struct device *dev; member
210 struct nvmet_pci_epf_ctrl ctrl; member
227 static inline u32 nvmet_pci_epf_bar_read32(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_bar_read32() argument
230 __le32 *bar_reg = ctrl->bar + off; in nvmet_pci_epf_bar_read32()
235 static inline void nvmet_pci_epf_bar_write32(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_bar_write32() argument
238 __le32 *bar_reg = ctrl->bar + off; in nvmet_pci_epf_bar_write32()
243 static inline u64 nvmet_pci_epf_bar_read64(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_bar_read64() argument
246 return (u64)nvmet_pci_epf_bar_read32(ctrl, off) | in nvmet_pci_epf_bar_read64()
247 ((u64)nvmet_pci_epf_bar_read32(ctrl, off + 4) << 32); in nvmet_pci_epf_bar_read64()
250 static inline void nvmet_pci_epf_bar_write64(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_bar_write64() argument
253 nvmet_pci_epf_bar_write32(ctrl, off, val & 0xFFFFFFFF); in nvmet_pci_epf_bar_write64()
254 nvmet_pci_epf_bar_write32(ctrl, off + 4, (val >> 32) & 0xFFFFFFFF); in nvmet_pci_epf_bar_write64()
260 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_mem_map()
262 return pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_mem_map()
269 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_mem_unmap()
271 pci_epc_mem_unmap(epf->epc, epf->func_no, epf->vfunc_no, map); in nvmet_pci_epf_mem_unmap()
275 struct device *dev; member
287 return chan->device->dev == filter->dev && in nvmet_pci_epf_dma_filter()
288 (filter->dma_mask & caps.directions); in nvmet_pci_epf_dma_filter()
293 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_init_dma()
294 struct device *dev = &epf->dev; in nvmet_pci_epf_init_dma() local
299 mutex_init(&nvme_epf->dma_rx_lock); in nvmet_pci_epf_init_dma()
300 mutex_init(&nvme_epf->dma_tx_lock); in nvmet_pci_epf_init_dma()
305 filter.dev = epf->epc->dev.parent; in nvmet_pci_epf_init_dma()
312 nvme_epf->dma_rx_chan = chan; in nvmet_pci_epf_init_dma()
319 nvme_epf->dma_tx_chan = chan; in nvmet_pci_epf_init_dma()
321 nvme_epf->dma_enabled = true; in nvmet_pci_epf_init_dma()
323 dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n", in nvmet_pci_epf_init_dma()
327 dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n", in nvmet_pci_epf_init_dma()
334 dma_release_channel(nvme_epf->dma_rx_chan); in nvmet_pci_epf_init_dma()
335 nvme_epf->dma_rx_chan = NULL; in nvmet_pci_epf_init_dma()
338 mutex_destroy(&nvme_epf->dma_rx_lock); in nvmet_pci_epf_init_dma()
339 mutex_destroy(&nvme_epf->dma_tx_lock); in nvmet_pci_epf_init_dma()
340 nvme_epf->dma_enabled = false; in nvmet_pci_epf_init_dma()
342 dev_info(&epf->dev, "DMA not supported, falling back to MMIO\n"); in nvmet_pci_epf_init_dma()
347 if (!nvme_epf->dma_enabled) in nvmet_pci_epf_deinit_dma()
350 dma_release_channel(nvme_epf->dma_tx_chan); in nvmet_pci_epf_deinit_dma()
351 nvme_epf->dma_tx_chan = NULL; in nvmet_pci_epf_deinit_dma()
352 dma_release_channel(nvme_epf->dma_rx_chan); in nvmet_pci_epf_deinit_dma()
353 nvme_epf->dma_rx_chan = NULL; in nvmet_pci_epf_deinit_dma()
354 mutex_destroy(&nvme_epf->dma_rx_lock); in nvmet_pci_epf_deinit_dma()
355 mutex_destroy(&nvme_epf->dma_tx_lock); in nvmet_pci_epf_deinit_dma()
356 nvme_epf->dma_enabled = false; in nvmet_pci_epf_deinit_dma()
362 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_dma_transfer()
365 struct device *dev = &epf->dev; in nvmet_pci_epf_dma_transfer() local
375 lock = &nvme_epf->dma_rx_lock; in nvmet_pci_epf_dma_transfer()
376 chan = nvme_epf->dma_rx_chan; in nvmet_pci_epf_dma_transfer()
378 sconf.src_addr = seg->pci_addr; in nvmet_pci_epf_dma_transfer()
381 lock = &nvme_epf->dma_tx_lock; in nvmet_pci_epf_dma_transfer()
382 chan = nvme_epf->dma_tx_chan; in nvmet_pci_epf_dma_transfer()
384 sconf.dst_addr = seg->pci_addr; in nvmet_pci_epf_dma_transfer()
387 return -EINVAL; in nvmet_pci_epf_dma_transfer()
393 dma_addr = dma_map_single(dma_dev, seg->buf, seg->length, dir); in nvmet_pci_epf_dma_transfer()
400 dev_err(dev, "Failed to configure DMA channel\n"); in nvmet_pci_epf_dma_transfer()
404 desc = dmaengine_prep_slave_single(chan, dma_addr, seg->length, in nvmet_pci_epf_dma_transfer()
407 dev_err(dev, "Failed to prepare DMA\n"); in nvmet_pci_epf_dma_transfer()
408 ret = -EIO; in nvmet_pci_epf_dma_transfer()
415 dev_err(dev, "Failed to do DMA submit (err=%d)\n", ret); in nvmet_pci_epf_dma_transfer()
420 dev_err(dev, "DMA transfer failed\n"); in nvmet_pci_epf_dma_transfer()
421 ret = -EIO; in nvmet_pci_epf_dma_transfer()
427 dma_unmap_single(dma_dev, dma_addr, seg->length, dir); in nvmet_pci_epf_dma_transfer()
438 u64 pci_addr = seg->pci_addr; in nvmet_pci_epf_mmio_transfer()
439 u32 length = seg->length; in nvmet_pci_epf_mmio_transfer()
440 void *buf = seg->buf; in nvmet_pci_epf_mmio_transfer()
442 int ret = -EINVAL; in nvmet_pci_epf_mmio_transfer()
448 mutex_lock(&nvme_epf->mmio_lock); in nvmet_pci_epf_mmio_transfer()
463 ret = -EINVAL; in nvmet_pci_epf_mmio_transfer()
469 length -= map.pci_size; in nvmet_pci_epf_mmio_transfer()
475 mutex_unlock(&nvme_epf->mmio_lock); in nvmet_pci_epf_mmio_transfer()
483 if (nvme_epf->dma_enabled) in nvmet_pci_epf_transfer_seg()
489 static inline int nvmet_pci_epf_transfer(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_transfer() argument
499 return nvmet_pci_epf_transfer_seg(ctrl->nvme_epf, &seg, dir); in nvmet_pci_epf_transfer()
502 static int nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_alloc_irq_vectors() argument
504 ctrl->irq_vectors = kcalloc(ctrl->nr_queues, in nvmet_pci_epf_alloc_irq_vectors()
507 if (!ctrl->irq_vectors) in nvmet_pci_epf_alloc_irq_vectors()
508 return -ENOMEM; in nvmet_pci_epf_alloc_irq_vectors()
510 mutex_init(&ctrl->irq_lock); in nvmet_pci_epf_alloc_irq_vectors()
515 static void nvmet_pci_epf_free_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_free_irq_vectors() argument
517 if (ctrl->irq_vectors) { in nvmet_pci_epf_free_irq_vectors()
518 mutex_destroy(&ctrl->irq_lock); in nvmet_pci_epf_free_irq_vectors()
519 kfree(ctrl->irq_vectors); in nvmet_pci_epf_free_irq_vectors()
520 ctrl->irq_vectors = NULL; in nvmet_pci_epf_free_irq_vectors()
525 nvmet_pci_epf_find_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector) in nvmet_pci_epf_find_irq_vector() argument
530 lockdep_assert_held(&ctrl->irq_lock); in nvmet_pci_epf_find_irq_vector()
532 for (i = 0; i < ctrl->nr_queues; i++) { in nvmet_pci_epf_find_irq_vector()
533 iv = &ctrl->irq_vectors[i]; in nvmet_pci_epf_find_irq_vector()
534 if (iv->ref && iv->vector == vector) in nvmet_pci_epf_find_irq_vector()
542 nvmet_pci_epf_add_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector) in nvmet_pci_epf_add_irq_vector() argument
547 mutex_lock(&ctrl->irq_lock); in nvmet_pci_epf_add_irq_vector()
549 iv = nvmet_pci_epf_find_irq_vector(ctrl, vector); in nvmet_pci_epf_add_irq_vector()
551 iv->ref++; in nvmet_pci_epf_add_irq_vector()
555 for (i = 0; i < ctrl->nr_queues; i++) { in nvmet_pci_epf_add_irq_vector()
556 iv = &ctrl->irq_vectors[i]; in nvmet_pci_epf_add_irq_vector()
557 if (!iv->ref) in nvmet_pci_epf_add_irq_vector()
564 iv->ref = 1; in nvmet_pci_epf_add_irq_vector()
565 iv->vector = vector; in nvmet_pci_epf_add_irq_vector()
566 iv->nr_irqs = 0; in nvmet_pci_epf_add_irq_vector()
569 mutex_unlock(&ctrl->irq_lock); in nvmet_pci_epf_add_irq_vector()
574 static void nvmet_pci_epf_remove_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_remove_irq_vector() argument
579 mutex_lock(&ctrl->irq_lock); in nvmet_pci_epf_remove_irq_vector()
581 iv = nvmet_pci_epf_find_irq_vector(ctrl, vector); in nvmet_pci_epf_remove_irq_vector()
583 iv->ref--; in nvmet_pci_epf_remove_irq_vector()
584 if (!iv->ref) { in nvmet_pci_epf_remove_irq_vector()
585 iv->vector = 0; in nvmet_pci_epf_remove_irq_vector()
586 iv->nr_irqs = 0; in nvmet_pci_epf_remove_irq_vector()
590 mutex_unlock(&ctrl->irq_lock); in nvmet_pci_epf_remove_irq_vector()
593 static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_should_raise_irq() argument
596 struct nvmet_pci_epf_irq_vector *iv = cq->iv; in nvmet_pci_epf_should_raise_irq()
599 if (!test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) in nvmet_pci_epf_should_raise_irq()
603 if (!cq->qid) in nvmet_pci_epf_should_raise_irq()
606 if (iv->cd) in nvmet_pci_epf_should_raise_irq()
610 ret = iv->nr_irqs > 0; in nvmet_pci_epf_should_raise_irq()
612 iv->nr_irqs++; in nvmet_pci_epf_should_raise_irq()
613 ret = iv->nr_irqs >= ctrl->irq_vector_threshold; in nvmet_pci_epf_should_raise_irq()
616 iv->nr_irqs = 0; in nvmet_pci_epf_should_raise_irq()
621 static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_raise_irq() argument
624 struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf; in nvmet_pci_epf_raise_irq()
625 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_raise_irq()
628 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) in nvmet_pci_epf_raise_irq()
631 mutex_lock(&ctrl->irq_lock); in nvmet_pci_epf_raise_irq()
633 if (!nvmet_pci_epf_should_raise_irq(ctrl, cq, force)) in nvmet_pci_epf_raise_irq()
636 switch (nvme_epf->irq_type) { in nvmet_pci_epf_raise_irq()
639 ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_raise_irq()
640 nvme_epf->irq_type, cq->vector + 1); in nvmet_pci_epf_raise_irq()
649 ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_raise_irq()
654 ret = -EINVAL; in nvmet_pci_epf_raise_irq()
659 dev_err(ctrl->dev, "Failed to raise IRQ (err=%d)\n", ret); in nvmet_pci_epf_raise_irq()
662 mutex_unlock(&ctrl->irq_lock); in nvmet_pci_epf_raise_irq()
667 return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode); in nvmet_pci_epf_iod_name()
675 struct nvmet_pci_epf_ctrl *ctrl = sq->ctrl; in nvmet_pci_epf_alloc_iod() local
678 iod = mempool_alloc(&ctrl->iod_pool, GFP_KERNEL); in nvmet_pci_epf_alloc_iod()
683 iod->req.cmd = &iod->cmd; in nvmet_pci_epf_alloc_iod()
684 iod->req.cqe = &iod->cqe; in nvmet_pci_epf_alloc_iod()
685 iod->req.port = ctrl->port; in nvmet_pci_epf_alloc_iod()
686 iod->ctrl = ctrl; in nvmet_pci_epf_alloc_iod()
687 iod->sq = sq; in nvmet_pci_epf_alloc_iod()
688 iod->cq = &ctrl->cq[sq->qid]; in nvmet_pci_epf_alloc_iod()
689 INIT_LIST_HEAD(&iod->link); in nvmet_pci_epf_alloc_iod()
690 iod->dma_dir = DMA_NONE; in nvmet_pci_epf_alloc_iod()
691 INIT_WORK(&iod->work, nvmet_pci_epf_exec_iod_work); in nvmet_pci_epf_alloc_iod()
692 init_completion(&iod->done); in nvmet_pci_epf_alloc_iod()
704 int nr_segs = iod->nr_data_segs + nsegs; in nvmet_pci_epf_alloc_iod_data_segs()
706 segs = krealloc(iod->data_segs, in nvmet_pci_epf_alloc_iod_data_segs()
710 return -ENOMEM; in nvmet_pci_epf_alloc_iod_data_segs()
712 iod->nr_data_segs = nr_segs; in nvmet_pci_epf_alloc_iod_data_segs()
713 iod->data_segs = segs; in nvmet_pci_epf_alloc_iod_data_segs()
722 if (iod->data_segs) { in nvmet_pci_epf_free_iod()
723 for (i = 0; i < iod->nr_data_segs; i++) in nvmet_pci_epf_free_iod()
724 kfree(iod->data_segs[i].buf); in nvmet_pci_epf_free_iod()
725 if (iod->data_segs != &iod->data_seg) in nvmet_pci_epf_free_iod()
726 kfree(iod->data_segs); in nvmet_pci_epf_free_iod()
728 if (iod->data_sgt.nents > 1) in nvmet_pci_epf_free_iod()
729 sg_free_table(&iod->data_sgt); in nvmet_pci_epf_free_iod()
730 mempool_free(iod, &iod->ctrl->iod_pool); in nvmet_pci_epf_free_iod()
735 struct nvmet_pci_epf *nvme_epf = iod->ctrl->nvme_epf; in nvmet_pci_epf_transfer_iod_data()
736 struct nvmet_pci_epf_segment *seg = &iod->data_segs[0]; in nvmet_pci_epf_transfer_iod_data()
740 for (i = 0; i < iod->nr_data_segs; i++, seg++) { in nvmet_pci_epf_transfer_iod_data()
741 ret = nvmet_pci_epf_transfer_seg(nvme_epf, seg, iod->dma_dir); in nvmet_pci_epf_transfer_iod_data()
743 iod->status = NVME_SC_DATA_XFER_ERROR | NVME_STATUS_DNR; in nvmet_pci_epf_transfer_iod_data()
751 static inline u32 nvmet_pci_epf_prp_ofst(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_prp_ofst() argument
754 return prp & ctrl->mps_mask; in nvmet_pci_epf_prp_ofst()
757 static inline size_t nvmet_pci_epf_prp_size(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_prp_size() argument
760 return ctrl->mps - nvmet_pci_epf_prp_ofst(ctrl, prp); in nvmet_pci_epf_prp_size()
766 static int nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl *ctrl, u64 prp, in nvmet_pci_epf_get_prp_list() argument
769 size_t nr_prps = (xfer_len + ctrl->mps_mask) >> ctrl->mps_shift; in nvmet_pci_epf_get_prp_list()
780 length = min(nvmet_pci_epf_prp_size(ctrl, prp), nr_prps << 3); in nvmet_pci_epf_get_prp_list()
781 ret = nvmet_pci_epf_transfer(ctrl, prps, prp, length, DMA_FROM_DEVICE); in nvmet_pci_epf_get_prp_list()
788 static int nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_iod_parse_prp_list() argument
791 struct nvme_command *cmd = &iod->cmd; in nvmet_pci_epf_iod_parse_prp_list()
794 size_t transfer_len = iod->data_len; in nvmet_pci_epf_iod_parse_prp_list()
800 prps = kzalloc(ctrl->mps, GFP_KERNEL); in nvmet_pci_epf_iod_parse_prp_list()
810 prp = le64_to_cpu(cmd->common.dptr.prp1); in nvmet_pci_epf_iod_parse_prp_list()
814 ofst = nvmet_pci_epf_prp_ofst(ctrl, prp); in nvmet_pci_epf_iod_parse_prp_list()
815 nr_segs = (transfer_len + ofst + ctrl->mps - 1) >> ctrl->mps_shift; in nvmet_pci_epf_iod_parse_prp_list()
822 seg = &iod->data_segs[0]; in nvmet_pci_epf_iod_parse_prp_list()
823 seg->pci_addr = prp; in nvmet_pci_epf_iod_parse_prp_list()
824 seg->length = nvmet_pci_epf_prp_size(ctrl, prp); in nvmet_pci_epf_iod_parse_prp_list()
826 size = seg->length; in nvmet_pci_epf_iod_parse_prp_list()
834 prp = le64_to_cpu(cmd->common.dptr.prp2); in nvmet_pci_epf_iod_parse_prp_list()
839 xfer_len = transfer_len - size; in nvmet_pci_epf_iod_parse_prp_list()
842 nr_prps = nvmet_pci_epf_get_prp_list(ctrl, prp, in nvmet_pci_epf_iod_parse_prp_list()
857 if (xfer_len > ctrl->mps && i == nr_prps - 1) { in nvmet_pci_epf_iod_parse_prp_list()
864 if (nvmet_pci_epf_prp_ofst(ctrl, prp)) in nvmet_pci_epf_iod_parse_prp_list()
870 if (WARN_ON_ONCE(nr_segs > iod->nr_data_segs)) in nvmet_pci_epf_iod_parse_prp_list()
874 seg->pci_addr = prp; in nvmet_pci_epf_iod_parse_prp_list()
875 seg->length = 0; in nvmet_pci_epf_iod_parse_prp_list()
879 prp_size = min_t(size_t, ctrl->mps, xfer_len); in nvmet_pci_epf_iod_parse_prp_list()
880 seg->length += prp_size; in nvmet_pci_epf_iod_parse_prp_list()
887 iod->nr_data_segs = nr_segs; in nvmet_pci_epf_iod_parse_prp_list()
891 dev_err(ctrl->dev, in nvmet_pci_epf_iod_parse_prp_list()
902 dev_err(ctrl->dev, "PRPs list invalid offset\n"); in nvmet_pci_epf_iod_parse_prp_list()
903 iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_prp_list()
907 dev_err(ctrl->dev, "PRPs list invalid field\n"); in nvmet_pci_epf_iod_parse_prp_list()
908 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_prp_list()
912 dev_err(ctrl->dev, "PRPs list internal error\n"); in nvmet_pci_epf_iod_parse_prp_list()
913 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_prp_list()
917 return -EINVAL; in nvmet_pci_epf_iod_parse_prp_list()
920 static int nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_iod_parse_prp_simple() argument
923 struct nvme_command *cmd = &iod->cmd; in nvmet_pci_epf_iod_parse_prp_simple()
924 size_t transfer_len = iod->data_len; in nvmet_pci_epf_iod_parse_prp_simple()
929 prp1 = le64_to_cpu(cmd->common.dptr.prp1); in nvmet_pci_epf_iod_parse_prp_simple()
930 prp1_size = nvmet_pci_epf_prp_size(ctrl, prp1); in nvmet_pci_epf_iod_parse_prp_simple()
934 prp2 = le64_to_cpu(cmd->common.dptr.prp2); in nvmet_pci_epf_iod_parse_prp_simple()
936 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_prp_simple()
937 return -EINVAL; in nvmet_pci_epf_iod_parse_prp_simple()
939 if (nvmet_pci_epf_prp_ofst(ctrl, prp2)) { in nvmet_pci_epf_iod_parse_prp_simple()
940 iod->status = in nvmet_pci_epf_iod_parse_prp_simple()
942 return -EINVAL; in nvmet_pci_epf_iod_parse_prp_simple()
949 iod->nr_data_segs = 1; in nvmet_pci_epf_iod_parse_prp_simple()
950 iod->data_segs = &iod->data_seg; in nvmet_pci_epf_iod_parse_prp_simple()
951 iod->data_segs[0].pci_addr = prp1; in nvmet_pci_epf_iod_parse_prp_simple()
952 iod->data_segs[0].length = transfer_len; in nvmet_pci_epf_iod_parse_prp_simple()
958 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_prp_simple()
962 iod->data_segs[0].pci_addr = prp1; in nvmet_pci_epf_iod_parse_prp_simple()
963 iod->data_segs[0].length = prp1_size; in nvmet_pci_epf_iod_parse_prp_simple()
964 iod->data_segs[1].pci_addr = prp2; in nvmet_pci_epf_iod_parse_prp_simple()
965 iod->data_segs[1].length = transfer_len - prp1_size; in nvmet_pci_epf_iod_parse_prp_simple()
972 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; in nvmet_pci_epf_iod_parse_prps() local
973 u64 prp1 = le64_to_cpu(iod->cmd.common.dptr.prp1); in nvmet_pci_epf_iod_parse_prps()
977 ofst = nvmet_pci_epf_prp_ofst(ctrl, prp1); in nvmet_pci_epf_iod_parse_prps()
979 iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_prps()
980 return -EINVAL; in nvmet_pci_epf_iod_parse_prps()
983 if (iod->data_len + ofst <= ctrl->mps * 2) in nvmet_pci_epf_iod_parse_prps()
984 return nvmet_pci_epf_iod_parse_prp_simple(ctrl, iod); in nvmet_pci_epf_iod_parse_prps()
986 return nvmet_pci_epf_iod_parse_prp_list(ctrl, iod); in nvmet_pci_epf_iod_parse_prps()
994 nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_get_sgl_segment() argument
998 u32 length = le32_to_cpu(desc->length); in nvmet_pci_epf_get_sgl_segment()
1006 ret = nvmet_pci_epf_transfer(ctrl, buf, le64_to_cpu(desc->addr), length, in nvmet_pci_epf_get_sgl_segment()
1015 if (sgls[nr_descs - 1].type == (NVME_SGL_FMT_SEG_DESC << 4) || in nvmet_pci_epf_get_sgl_segment()
1016 sgls[nr_descs - 1].type == (NVME_SGL_FMT_LAST_SEG_DESC << 4)) { in nvmet_pci_epf_get_sgl_segment()
1022 *desc = sgls[nr_descs - 1]; in nvmet_pci_epf_get_sgl_segment()
1023 nr_descs--; in nvmet_pci_epf_get_sgl_segment()
1026 desc->length = 0; in nvmet_pci_epf_get_sgl_segment()
1034 static int nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_iod_parse_sgl_segments() argument
1037 struct nvme_command *cmd = &iod->cmd; in nvmet_pci_epf_iod_parse_sgl_segments()
1038 struct nvme_sgl_desc seg = cmd->common.dptr.sgl; in nvmet_pci_epf_iod_parse_sgl_segments()
1049 iod->status = NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_sgl_segments()
1050 return -EIO; in nvmet_pci_epf_iod_parse_sgl_segments()
1054 sgls = nvmet_pci_epf_get_sgl_segment(ctrl, &seg, &nr_sgls); in nvmet_pci_epf_iod_parse_sgl_segments()
1056 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_sgl_segments()
1057 return -EIO; in nvmet_pci_epf_iod_parse_sgl_segments()
1063 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; in nvmet_pci_epf_iod_parse_sgl_segments()
1073 iod->status = NVME_SC_SGL_INVALID_TYPE | in nvmet_pci_epf_iod_parse_sgl_segments()
1077 iod->data_segs[n].pci_addr = le64_to_cpu(sgls[i].addr); in nvmet_pci_epf_iod_parse_sgl_segments()
1078 iod->data_segs[n].length = le32_to_cpu(sgls[i].length); in nvmet_pci_epf_iod_parse_sgl_segments()
1086 if (iod->status != NVME_SC_SUCCESS) { in nvmet_pci_epf_iod_parse_sgl_segments()
1088 return -EIO; in nvmet_pci_epf_iod_parse_sgl_segments()
1096 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; in nvmet_pci_epf_iod_parse_sgls() local
1097 struct nvme_sgl_desc *sgl = &iod->cmd.common.dptr.sgl; in nvmet_pci_epf_iod_parse_sgls()
1099 if (sgl->type == (NVME_SGL_FMT_DATA_DESC << 4)) { in nvmet_pci_epf_iod_parse_sgls()
1101 iod->nr_data_segs = 1; in nvmet_pci_epf_iod_parse_sgls()
1102 iod->data_segs = &iod->data_seg; in nvmet_pci_epf_iod_parse_sgls()
1103 iod->data_seg.pci_addr = le64_to_cpu(sgl->addr); in nvmet_pci_epf_iod_parse_sgls()
1104 iod->data_seg.length = le32_to_cpu(sgl->length); in nvmet_pci_epf_iod_parse_sgls()
1108 return nvmet_pci_epf_iod_parse_sgl_segments(ctrl, iod); in nvmet_pci_epf_iod_parse_sgls()
1113 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; in nvmet_pci_epf_alloc_iod_data_buf() local
1114 struct nvmet_req *req = &iod->req; in nvmet_pci_epf_alloc_iod_data_buf()
1119 if (iod->data_len > ctrl->mdts) { in nvmet_pci_epf_alloc_iod_data_buf()
1120 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; in nvmet_pci_epf_alloc_iod_data_buf()
1121 return -EINVAL; in nvmet_pci_epf_alloc_iod_data_buf()
1128 if (iod->cmd.common.flags & NVME_CMD_SGL_ALL) in nvmet_pci_epf_alloc_iod_data_buf()
1136 if (iod->nr_data_segs == 1) { in nvmet_pci_epf_alloc_iod_data_buf()
1137 sg_init_table(&iod->data_sgl, 1); in nvmet_pci_epf_alloc_iod_data_buf()
1138 iod->data_sgt.sgl = &iod->data_sgl; in nvmet_pci_epf_alloc_iod_data_buf()
1139 iod->data_sgt.nents = 1; in nvmet_pci_epf_alloc_iod_data_buf()
1140 iod->data_sgt.orig_nents = 1; in nvmet_pci_epf_alloc_iod_data_buf()
1142 ret = sg_alloc_table(&iod->data_sgt, iod->nr_data_segs, in nvmet_pci_epf_alloc_iod_data_buf()
1148 for_each_sgtable_sg(&iod->data_sgt, sg, i) { in nvmet_pci_epf_alloc_iod_data_buf()
1149 seg = &iod->data_segs[i]; in nvmet_pci_epf_alloc_iod_data_buf()
1150 seg->buf = kmalloc(seg->length, GFP_KERNEL); in nvmet_pci_epf_alloc_iod_data_buf()
1151 if (!seg->buf) in nvmet_pci_epf_alloc_iod_data_buf()
1153 sg_set_buf(sg, seg->buf, seg->length); in nvmet_pci_epf_alloc_iod_data_buf()
1156 req->transfer_len = iod->data_len; in nvmet_pci_epf_alloc_iod_data_buf()
1157 req->sg = iod->data_sgt.sgl; in nvmet_pci_epf_alloc_iod_data_buf()
1158 req->sg_cnt = iod->data_sgt.nents; in nvmet_pci_epf_alloc_iod_data_buf()
1163 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; in nvmet_pci_epf_alloc_iod_data_buf()
1164 return -ENOMEM; in nvmet_pci_epf_alloc_iod_data_buf()
1169 struct nvmet_pci_epf_queue *cq = iod->cq; in nvmet_pci_epf_complete_iod()
1173 iod->status = le16_to_cpu(iod->cqe.status) >> 1; in nvmet_pci_epf_complete_iod()
1174 if (iod->status && iod->cmd.common.opcode != nvme_admin_async_event) in nvmet_pci_epf_complete_iod()
1175 dev_err(iod->ctrl->dev, in nvmet_pci_epf_complete_iod()
1177 iod->sq->qid, nvmet_pci_epf_iod_name(iod), in nvmet_pci_epf_complete_iod()
1178 iod->cmd.common.opcode, iod->status); in nvmet_pci_epf_complete_iod()
1184 spin_lock_irqsave(&cq->lock, flags); in nvmet_pci_epf_complete_iod()
1185 list_add_tail(&iod->link, &cq->list); in nvmet_pci_epf_complete_iod()
1186 queue_delayed_work(system_highpri_wq, &cq->work, 0); in nvmet_pci_epf_complete_iod()
1187 spin_unlock_irqrestore(&cq->lock, flags); in nvmet_pci_epf_complete_iod()
1195 spin_lock_irqsave(&queue->lock, flags); in nvmet_pci_epf_drain_queue()
1196 while (!list_empty(&queue->list)) { in nvmet_pci_epf_drain_queue()
1197 iod = list_first_entry(&queue->list, struct nvmet_pci_epf_iod, in nvmet_pci_epf_drain_queue()
1199 list_del_init(&iod->link); in nvmet_pci_epf_drain_queue()
1202 spin_unlock_irqrestore(&queue->lock, flags); in nvmet_pci_epf_drain_queue()
1208 list_add_tail(&port->entry, &nvmet_pci_epf_ports); in nvmet_pci_epf_add_port()
1216 list_del_init(&port->entry); in nvmet_pci_epf_remove_port()
1221 nvmet_pci_epf_find_port(struct nvmet_pci_epf_ctrl *ctrl, __le16 portid) in nvmet_pci_epf_find_port() argument
1227 if (p->disc_addr.portid == portid) { in nvmet_pci_epf_find_port()
1242 iod->status = le16_to_cpu(req->cqe->status) >> 1; in nvmet_pci_epf_queue_response()
1245 if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) { in nvmet_pci_epf_queue_response()
1250 complete(&iod->done); in nvmet_pci_epf_queue_response()
1255 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; in nvmet_pci_epf_get_mdts() local
1256 int page_shift = NVME_CAP_MPSMIN(tctrl->cap) + 12; in nvmet_pci_epf_get_mdts()
1258 return ilog2(ctrl->mdts) - page_shift; in nvmet_pci_epf_get_mdts()
1264 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; in nvmet_pci_epf_create_cq() local
1265 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; in nvmet_pci_epf_create_cq()
1268 if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) in nvmet_pci_epf_create_cq()
1274 cq->pci_addr = pci_addr; in nvmet_pci_epf_create_cq()
1275 cq->qid = cqid; in nvmet_pci_epf_create_cq()
1276 cq->depth = qsize + 1; in nvmet_pci_epf_create_cq()
1277 cq->vector = vector; in nvmet_pci_epf_create_cq()
1278 cq->head = 0; in nvmet_pci_epf_create_cq()
1279 cq->tail = 0; in nvmet_pci_epf_create_cq()
1280 cq->phase = 1; in nvmet_pci_epf_create_cq()
1281 cq->db = NVME_REG_DBS + (((cqid * 2) + 1) * sizeof(u32)); in nvmet_pci_epf_create_cq()
1282 nvmet_pci_epf_bar_write32(ctrl, cq->db, 0); in nvmet_pci_epf_create_cq()
1285 cq->qes = sizeof(struct nvme_completion); in nvmet_pci_epf_create_cq()
1287 cq->qes = ctrl->io_cqes; in nvmet_pci_epf_create_cq()
1288 cq->pci_size = cq->qes * cq->depth; in nvmet_pci_epf_create_cq()
1291 cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector); in nvmet_pci_epf_create_cq()
1292 if (!cq->iv) in nvmet_pci_epf_create_cq()
1294 set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags); in nvmet_pci_epf_create_cq()
1297 status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth); in nvmet_pci_epf_create_cq()
1301 set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags); in nvmet_pci_epf_create_cq()
1303 dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n", in nvmet_pci_epf_create_cq()
1304 cqid, qsize, cq->qes, cq->vector); in nvmet_pci_epf_create_cq()
1309 if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) in nvmet_pci_epf_create_cq()
1310 nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); in nvmet_pci_epf_create_cq()
1316 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; in nvmet_pci_epf_delete_cq() local
1317 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; in nvmet_pci_epf_delete_cq()
1319 if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) in nvmet_pci_epf_delete_cq()
1322 cancel_delayed_work_sync(&cq->work); in nvmet_pci_epf_delete_cq()
1324 nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); in nvmet_pci_epf_delete_cq()
1332 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; in nvmet_pci_epf_create_sq() local
1333 struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; in nvmet_pci_epf_create_sq()
1336 if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) in nvmet_pci_epf_create_sq()
1342 sq->pci_addr = pci_addr; in nvmet_pci_epf_create_sq()
1343 sq->qid = sqid; in nvmet_pci_epf_create_sq()
1344 sq->depth = qsize + 1; in nvmet_pci_epf_create_sq()
1345 sq->head = 0; in nvmet_pci_epf_create_sq()
1346 sq->tail = 0; in nvmet_pci_epf_create_sq()
1347 sq->phase = 0; in nvmet_pci_epf_create_sq()
1348 sq->db = NVME_REG_DBS + (sqid * 2 * sizeof(u32)); in nvmet_pci_epf_create_sq()
1349 nvmet_pci_epf_bar_write32(ctrl, sq->db, 0); in nvmet_pci_epf_create_sq()
1351 sq->qes = 1UL << NVME_ADM_SQES; in nvmet_pci_epf_create_sq()
1353 sq->qes = ctrl->io_sqes; in nvmet_pci_epf_create_sq()
1354 sq->pci_size = sq->qes * sq->depth; in nvmet_pci_epf_create_sq()
1356 status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth); in nvmet_pci_epf_create_sq()
1360 sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND, in nvmet_pci_epf_create_sq()
1361 min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid); in nvmet_pci_epf_create_sq()
1362 if (!sq->iod_wq) { in nvmet_pci_epf_create_sq()
1363 dev_err(ctrl->dev, "Failed to create SQ %d work queue\n", sqid); in nvmet_pci_epf_create_sq()
1368 set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags); in nvmet_pci_epf_create_sq()
1370 dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n", in nvmet_pci_epf_create_sq()
1371 sqid, qsize, sq->qes); in nvmet_pci_epf_create_sq()
1376 nvmet_sq_destroy(&sq->nvme_sq); in nvmet_pci_epf_create_sq()
1382 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; in nvmet_pci_epf_delete_sq() local
1383 struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; in nvmet_pci_epf_delete_sq()
1385 if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) in nvmet_pci_epf_delete_sq()
1388 flush_workqueue(sq->iod_wq); in nvmet_pci_epf_delete_sq()
1389 destroy_workqueue(sq->iod_wq); in nvmet_pci_epf_delete_sq()
1390 sq->iod_wq = NULL; in nvmet_pci_epf_delete_sq()
1394 if (sq->nvme_sq.ctrl) in nvmet_pci_epf_delete_sq()
1395 nvmet_sq_destroy(&sq->nvme_sq); in nvmet_pci_epf_delete_sq()
1403 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; in nvmet_pci_epf_get_feat() local
1413 if (!ctrl->sq_ab) in nvmet_pci_epf_get_feat()
1414 arb->ab = 0x7; in nvmet_pci_epf_get_feat()
1416 arb->ab = ilog2(ctrl->sq_ab); in nvmet_pci_epf_get_feat()
1421 irqc->thr = ctrl->irq_vector_threshold; in nvmet_pci_epf_get_feat()
1422 irqc->time = 0; in nvmet_pci_epf_get_feat()
1427 mutex_lock(&ctrl->irq_lock); in nvmet_pci_epf_get_feat()
1428 iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv); in nvmet_pci_epf_get_feat()
1430 irqcfg->cd = iv->cd; in nvmet_pci_epf_get_feat()
1435 mutex_unlock(&ctrl->irq_lock); in nvmet_pci_epf_get_feat()
1446 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; in nvmet_pci_epf_set_feat() local
1456 if (arb->ab == 0x7) in nvmet_pci_epf_set_feat()
1457 ctrl->sq_ab = 0; in nvmet_pci_epf_set_feat()
1459 ctrl->sq_ab = 1 << arb->ab; in nvmet_pci_epf_set_feat()
1468 ctrl->irq_vector_threshold = irqc->thr + 1; in nvmet_pci_epf_set_feat()
1473 mutex_lock(&ctrl->irq_lock); in nvmet_pci_epf_set_feat()
1474 iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv); in nvmet_pci_epf_set_feat()
1476 iv->cd = irqcfg->cd; in nvmet_pci_epf_set_feat()
1481 mutex_unlock(&ctrl->irq_lock); in nvmet_pci_epf_set_feat()
1506 static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_init_queue() argument
1512 queue = &ctrl->sq[qid]; in nvmet_pci_epf_init_queue()
1513 set_bit(NVMET_PCI_EPF_Q_IS_SQ, &queue->flags); in nvmet_pci_epf_init_queue()
1515 queue = &ctrl->cq[qid]; in nvmet_pci_epf_init_queue()
1516 INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work); in nvmet_pci_epf_init_queue()
1518 queue->ctrl = ctrl; in nvmet_pci_epf_init_queue()
1519 queue->qid = qid; in nvmet_pci_epf_init_queue()
1520 spin_lock_init(&queue->lock); in nvmet_pci_epf_init_queue()
1521 INIT_LIST_HEAD(&queue->list); in nvmet_pci_epf_init_queue()
1524 static int nvmet_pci_epf_alloc_queues(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_alloc_queues() argument
1528 ctrl->sq = kcalloc(ctrl->nr_queues, in nvmet_pci_epf_alloc_queues()
1530 if (!ctrl->sq) in nvmet_pci_epf_alloc_queues()
1531 return -ENOMEM; in nvmet_pci_epf_alloc_queues()
1533 ctrl->cq = kcalloc(ctrl->nr_queues, in nvmet_pci_epf_alloc_queues()
1535 if (!ctrl->cq) { in nvmet_pci_epf_alloc_queues()
1536 kfree(ctrl->sq); in nvmet_pci_epf_alloc_queues()
1537 ctrl->sq = NULL; in nvmet_pci_epf_alloc_queues()
1538 return -ENOMEM; in nvmet_pci_epf_alloc_queues()
1541 for (qid = 0; qid < ctrl->nr_queues; qid++) { in nvmet_pci_epf_alloc_queues()
1542 nvmet_pci_epf_init_queue(ctrl, qid, true); in nvmet_pci_epf_alloc_queues()
1543 nvmet_pci_epf_init_queue(ctrl, qid, false); in nvmet_pci_epf_alloc_queues()
1549 static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_free_queues() argument
1551 kfree(ctrl->sq); in nvmet_pci_epf_free_queues()
1552 ctrl->sq = NULL; in nvmet_pci_epf_free_queues()
1553 kfree(ctrl->cq); in nvmet_pci_epf_free_queues()
1554 ctrl->cq = NULL; in nvmet_pci_epf_free_queues()
1557 static int nvmet_pci_epf_map_queue(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_map_queue() argument
1560 struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf; in nvmet_pci_epf_map_queue()
1563 ret = nvmet_pci_epf_mem_map(nvme_epf, queue->pci_addr, in nvmet_pci_epf_map_queue()
1564 queue->pci_size, &queue->pci_map); in nvmet_pci_epf_map_queue()
1566 dev_err(ctrl->dev, "Failed to map queue %u (err=%d)\n", in nvmet_pci_epf_map_queue()
1567 queue->qid, ret); in nvmet_pci_epf_map_queue()
1571 if (queue->pci_map.pci_size < queue->pci_size) { in nvmet_pci_epf_map_queue()
1572 dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n", in nvmet_pci_epf_map_queue()
1573 queue->qid); in nvmet_pci_epf_map_queue()
1574 nvmet_pci_epf_mem_unmap(nvme_epf, &queue->pci_map); in nvmet_pci_epf_map_queue()
1575 return -ENOMEM; in nvmet_pci_epf_map_queue()
1581 static inline void nvmet_pci_epf_unmap_queue(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_unmap_queue() argument
1584 nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &queue->pci_map); in nvmet_pci_epf_unmap_queue()
1591 struct nvmet_req *req = &iod->req; in nvmet_pci_epf_exec_iod_work()
1594 if (!iod->ctrl->link_up) { in nvmet_pci_epf_exec_iod_work()
1599 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &iod->sq->flags)) { in nvmet_pci_epf_exec_iod_work()
1600 iod->status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; in nvmet_pci_epf_exec_iod_work()
1604 if (!nvmet_req_init(req, &iod->cq->nvme_cq, &iod->sq->nvme_sq, in nvmet_pci_epf_exec_iod_work()
1608 iod->data_len = nvmet_req_transfer_len(req); in nvmet_pci_epf_exec_iod_work()
1609 if (iod->data_len) { in nvmet_pci_epf_exec_iod_work()
1612 * PCI root-complex host. in nvmet_pci_epf_exec_iod_work()
1614 if (nvme_is_write(&iod->cmd)) in nvmet_pci_epf_exec_iod_work()
1615 iod->dma_dir = DMA_FROM_DEVICE; in nvmet_pci_epf_exec_iod_work()
1617 iod->dma_dir = DMA_TO_DEVICE; in nvmet_pci_epf_exec_iod_work()
1624 if (!ret && iod->dma_dir == DMA_FROM_DEVICE) in nvmet_pci_epf_exec_iod_work()
1632 req->execute(req); in nvmet_pci_epf_exec_iod_work()
1639 if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) in nvmet_pci_epf_exec_iod_work()
1642 wait_for_completion(&iod->done); in nvmet_pci_epf_exec_iod_work()
1644 if (iod->status == NVME_SC_SUCCESS) { in nvmet_pci_epf_exec_iod_work()
1645 WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE); in nvmet_pci_epf_exec_iod_work()
1653 static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_process_sq() argument
1658 u16 head = sq->head; in nvmet_pci_epf_process_sq()
1660 sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db); in nvmet_pci_epf_process_sq()
1661 while (head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) { in nvmet_pci_epf_process_sq()
1667 ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd, in nvmet_pci_epf_process_sq()
1668 sq->pci_addr + head * sq->qes, in nvmet_pci_epf_process_sq()
1669 sq->qes, DMA_FROM_DEVICE); in nvmet_pci_epf_process_sq()
1676 dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n", in nvmet_pci_epf_process_sq()
1677 sq->qid, head, sq->tail, in nvmet_pci_epf_process_sq()
1681 if (head == sq->depth) in nvmet_pci_epf_process_sq()
1683 WRITE_ONCE(sq->head, head); in nvmet_pci_epf_process_sq()
1686 queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work); in nvmet_pci_epf_process_sq()
1688 sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db); in nvmet_pci_epf_process_sq()
1696 struct nvmet_pci_epf_ctrl *ctrl = in nvmet_pci_epf_poll_sqs_work() local
1703 while (ctrl->link_up && ctrl->enabled) { in nvmet_pci_epf_poll_sqs_work()
1705 /* Do round-robin arbitration. */ in nvmet_pci_epf_poll_sqs_work()
1706 for (i = 0; i < ctrl->nr_queues; i++) { in nvmet_pci_epf_poll_sqs_work()
1707 sq = &ctrl->sq[i]; in nvmet_pci_epf_poll_sqs_work()
1708 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) in nvmet_pci_epf_poll_sqs_work()
1710 if (nvmet_pci_epf_process_sq(ctrl, sq)) in nvmet_pci_epf_poll_sqs_work()
1741 schedule_delayed_work(&ctrl->poll_sqs, NVMET_PCI_EPF_SQ_POLL_INTERVAL); in nvmet_pci_epf_poll_sqs_work()
1748 struct nvmet_pci_epf_ctrl *ctrl = cq->ctrl; in nvmet_pci_epf_cq_work() local
1754 ret = nvmet_pci_epf_map_queue(ctrl, cq); in nvmet_pci_epf_cq_work()
1758 while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) { in nvmet_pci_epf_cq_work()
1761 cq->head = nvmet_pci_epf_bar_read32(ctrl, cq->db); in nvmet_pci_epf_cq_work()
1762 if (cq->head == cq->tail + 1) { in nvmet_pci_epf_cq_work()
1763 ret = -EAGAIN; in nvmet_pci_epf_cq_work()
1767 spin_lock_irqsave(&cq->lock, flags); in nvmet_pci_epf_cq_work()
1768 iod = list_first_entry_or_null(&cq->list, in nvmet_pci_epf_cq_work()
1771 list_del_init(&iod->link); in nvmet_pci_epf_cq_work()
1772 spin_unlock_irqrestore(&cq->lock, flags); in nvmet_pci_epf_cq_work()
1779 * executed (req->execute() called), the CQE is already in nvmet_pci_epf_cq_work()
1784 cqe = &iod->cqe; in nvmet_pci_epf_cq_work()
1785 cqe->sq_head = cpu_to_le16(READ_ONCE(iod->sq->head)); in nvmet_pci_epf_cq_work()
1786 cqe->sq_id = cpu_to_le16(iod->sq->qid); in nvmet_pci_epf_cq_work()
1787 cqe->command_id = iod->cmd.common.command_id; in nvmet_pci_epf_cq_work()
1788 cqe->status = cpu_to_le16((iod->status << 1) | cq->phase); in nvmet_pci_epf_cq_work()
1790 dev_dbg(ctrl->dev, in nvmet_pci_epf_cq_work()
1792 cq->qid, nvmet_pci_epf_iod_name(iod), iod->status, in nvmet_pci_epf_cq_work()
1793 le64_to_cpu(cqe->result.u64), cq->head, cq->tail, in nvmet_pci_epf_cq_work()
1794 cq->phase); in nvmet_pci_epf_cq_work()
1796 memcpy_toio(cq->pci_map.virt_addr + cq->tail * cq->qes, in nvmet_pci_epf_cq_work()
1797 cqe, cq->qes); in nvmet_pci_epf_cq_work()
1799 cq->tail++; in nvmet_pci_epf_cq_work()
1800 if (cq->tail >= cq->depth) { in nvmet_pci_epf_cq_work()
1801 cq->tail = 0; in nvmet_pci_epf_cq_work()
1802 cq->phase ^= 1; in nvmet_pci_epf_cq_work()
1808 nvmet_pci_epf_raise_irq(ctrl, cq, false); in nvmet_pci_epf_cq_work()
1812 nvmet_pci_epf_unmap_queue(ctrl, cq); in nvmet_pci_epf_cq_work()
1820 nvmet_pci_epf_raise_irq(ctrl, cq, true); in nvmet_pci_epf_cq_work()
1824 queue_delayed_work(system_highpri_wq, &cq->work, in nvmet_pci_epf_cq_work()
1828 static void nvmet_pci_epf_clear_ctrl_config(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_clear_ctrl_config() argument
1830 struct nvmet_ctrl *tctrl = ctrl->tctrl; in nvmet_pci_epf_clear_ctrl_config()
1833 tctrl->csts = 0; in nvmet_pci_epf_clear_ctrl_config()
1834 ctrl->csts = 0; in nvmet_pci_epf_clear_ctrl_config()
1835 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); in nvmet_pci_epf_clear_ctrl_config()
1838 tctrl->cc = 0; in nvmet_pci_epf_clear_ctrl_config()
1839 ctrl->cc = 0; in nvmet_pci_epf_clear_ctrl_config()
1840 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); in nvmet_pci_epf_clear_ctrl_config()
1843 static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_enable_ctrl() argument
1849 if (ctrl->enabled) in nvmet_pci_epf_enable_ctrl()
1852 dev_info(ctrl->dev, "Enabling controller\n"); in nvmet_pci_epf_enable_ctrl()
1854 ctrl->mps_shift = nvmet_cc_mps(ctrl->cc) + 12; in nvmet_pci_epf_enable_ctrl()
1855 ctrl->mps = 1UL << ctrl->mps_shift; in nvmet_pci_epf_enable_ctrl()
1856 ctrl->mps_mask = ctrl->mps - 1; in nvmet_pci_epf_enable_ctrl()
1858 ctrl->io_sqes = 1UL << nvmet_cc_iosqes(ctrl->cc); in nvmet_pci_epf_enable_ctrl()
1859 if (ctrl->io_sqes < sizeof(struct nvme_command)) { in nvmet_pci_epf_enable_ctrl()
1860 dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n", in nvmet_pci_epf_enable_ctrl()
1861 ctrl->io_sqes, sizeof(struct nvme_command)); in nvmet_pci_epf_enable_ctrl()
1865 ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc); in nvmet_pci_epf_enable_ctrl()
1866 if (ctrl->io_cqes < sizeof(struct nvme_completion)) { in nvmet_pci_epf_enable_ctrl()
1867 dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n", in nvmet_pci_epf_enable_ctrl()
1868 ctrl->io_sqes, sizeof(struct nvme_completion)); in nvmet_pci_epf_enable_ctrl()
1873 aqa = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_AQA); in nvmet_pci_epf_enable_ctrl()
1874 asq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ASQ); in nvmet_pci_epf_enable_ctrl()
1875 acq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ACQ); in nvmet_pci_epf_enable_ctrl()
1879 status = nvmet_pci_epf_create_cq(ctrl->tctrl, 0, in nvmet_pci_epf_enable_ctrl()
1883 dev_err(ctrl->dev, "Failed to create admin completion queue\n"); in nvmet_pci_epf_enable_ctrl()
1889 status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, NVME_QUEUE_PHYS_CONTIG, in nvmet_pci_epf_enable_ctrl()
1892 dev_err(ctrl->dev, "Failed to create admin submission queue\n"); in nvmet_pci_epf_enable_ctrl()
1893 nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); in nvmet_pci_epf_enable_ctrl()
1897 ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB; in nvmet_pci_epf_enable_ctrl()
1898 ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD; in nvmet_pci_epf_enable_ctrl()
1899 ctrl->enabled = true; in nvmet_pci_epf_enable_ctrl()
1900 ctrl->csts = NVME_CSTS_RDY; in nvmet_pci_epf_enable_ctrl()
1903 schedule_delayed_work(&ctrl->poll_sqs, 0); in nvmet_pci_epf_enable_ctrl()
1908 nvmet_pci_epf_clear_ctrl_config(ctrl); in nvmet_pci_epf_enable_ctrl()
1909 return -EINVAL; in nvmet_pci_epf_enable_ctrl()
1912 static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl, in nvmet_pci_epf_disable_ctrl() argument
1917 if (!ctrl->enabled) in nvmet_pci_epf_disable_ctrl()
1920 dev_info(ctrl->dev, "%s controller\n", in nvmet_pci_epf_disable_ctrl()
1923 ctrl->enabled = false; in nvmet_pci_epf_disable_ctrl()
1924 cancel_delayed_work_sync(&ctrl->poll_sqs); in nvmet_pci_epf_disable_ctrl()
1927 for (qid = 1; qid < ctrl->nr_queues; qid++) in nvmet_pci_epf_disable_ctrl()
1928 nvmet_pci_epf_delete_sq(ctrl->tctrl, qid); in nvmet_pci_epf_disable_ctrl()
1930 for (qid = 1; qid < ctrl->nr_queues; qid++) in nvmet_pci_epf_disable_ctrl()
1931 nvmet_pci_epf_delete_cq(ctrl->tctrl, qid); in nvmet_pci_epf_disable_ctrl()
1934 nvmet_pci_epf_delete_sq(ctrl->tctrl, 0); in nvmet_pci_epf_disable_ctrl()
1935 nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); in nvmet_pci_epf_disable_ctrl()
1937 ctrl->csts &= ~NVME_CSTS_RDY; in nvmet_pci_epf_disable_ctrl()
1939 ctrl->csts |= NVME_CSTS_SHST_CMPLT; in nvmet_pci_epf_disable_ctrl()
1940 ctrl->cc &= ~NVME_CC_ENABLE; in nvmet_pci_epf_disable_ctrl()
1941 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); in nvmet_pci_epf_disable_ctrl()
1947 struct nvmet_pci_epf_ctrl *ctrl = in nvmet_pci_epf_poll_cc_work() local
1952 if (!ctrl->tctrl) in nvmet_pci_epf_poll_cc_work()
1955 old_cc = ctrl->cc; in nvmet_pci_epf_poll_cc_work()
1956 new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC); in nvmet_pci_epf_poll_cc_work()
1960 ctrl->cc = new_cc; in nvmet_pci_epf_poll_cc_work()
1963 ret = nvmet_pci_epf_enable_ctrl(ctrl); in nvmet_pci_epf_poll_cc_work()
1969 nvmet_pci_epf_disable_ctrl(ctrl, false); in nvmet_pci_epf_poll_cc_work()
1972 nvmet_pci_epf_disable_ctrl(ctrl, true); in nvmet_pci_epf_poll_cc_work()
1975 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; in nvmet_pci_epf_poll_cc_work()
1977 nvmet_update_cc(ctrl->tctrl, ctrl->cc); in nvmet_pci_epf_poll_cc_work()
1978 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); in nvmet_pci_epf_poll_cc_work()
1981 schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL); in nvmet_pci_epf_poll_cc_work()
1984 static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_init_bar() argument
1986 struct nvmet_ctrl *tctrl = ctrl->tctrl; in nvmet_pci_epf_init_bar()
1988 ctrl->bar = ctrl->nvme_epf->reg_bar; in nvmet_pci_epf_init_bar()
1991 ctrl->cap = tctrl->cap; in nvmet_pci_epf_init_bar()
1994 ctrl->cap |= 0x1ULL << 16; in nvmet_pci_epf_init_bar()
1997 ctrl->cap &= ~GENMASK_ULL(35, 32); in nvmet_pci_epf_init_bar()
2000 ctrl->cap &= ~(0x1ULL << 36); in nvmet_pci_epf_init_bar()
2003 ctrl->cap &= ~(0x1ULL << 45); in nvmet_pci_epf_init_bar()
2006 ctrl->cap &= ~(0x1ULL << 56); in nvmet_pci_epf_init_bar()
2009 ctrl->cap &= ~(0x1ULL << 57); in nvmet_pci_epf_init_bar()
2011 nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap); in nvmet_pci_epf_init_bar()
2012 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver); in nvmet_pci_epf_init_bar()
2014 nvmet_pci_epf_clear_ctrl_config(ctrl); in nvmet_pci_epf_init_bar()
2020 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; in nvmet_pci_epf_create_ctrl() local
2026 memset(ctrl, 0, sizeof(*ctrl)); in nvmet_pci_epf_create_ctrl()
2027 ctrl->dev = &nvme_epf->epf->dev; in nvmet_pci_epf_create_ctrl()
2028 mutex_init(&ctrl->irq_lock); in nvmet_pci_epf_create_ctrl()
2029 ctrl->nvme_epf = nvme_epf; in nvmet_pci_epf_create_ctrl()
2030 ctrl->mdts = nvme_epf->mdts_kb * SZ_1K; in nvmet_pci_epf_create_ctrl()
2031 INIT_DELAYED_WORK(&ctrl->poll_cc, nvmet_pci_epf_poll_cc_work); in nvmet_pci_epf_create_ctrl()
2032 INIT_DELAYED_WORK(&ctrl->poll_sqs, nvmet_pci_epf_poll_sqs_work); in nvmet_pci_epf_create_ctrl()
2034 ret = mempool_init_kmalloc_pool(&ctrl->iod_pool, in nvmet_pci_epf_create_ctrl()
2038 dev_err(ctrl->dev, "Failed to initialize IOD mempool\n"); in nvmet_pci_epf_create_ctrl()
2042 ctrl->port = nvmet_pci_epf_find_port(ctrl, nvme_epf->portid); in nvmet_pci_epf_create_ctrl()
2043 if (!ctrl->port) { in nvmet_pci_epf_create_ctrl()
2044 dev_err(ctrl->dev, "Port not found\n"); in nvmet_pci_epf_create_ctrl()
2045 ret = -EINVAL; in nvmet_pci_epf_create_ctrl()
2052 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id); in nvmet_pci_epf_create_ctrl()
2053 args.port = ctrl->port; in nvmet_pci_epf_create_ctrl()
2054 args.subsysnqn = nvme_epf->subsysnqn; in nvmet_pci_epf_create_ctrl()
2060 ctrl->tctrl = nvmet_alloc_ctrl(&args); in nvmet_pci_epf_create_ctrl()
2061 if (!ctrl->tctrl) { in nvmet_pci_epf_create_ctrl()
2062 dev_err(ctrl->dev, "Failed to create target controller\n"); in nvmet_pci_epf_create_ctrl()
2063 ret = -ENOMEM; in nvmet_pci_epf_create_ctrl()
2066 ctrl->tctrl->drvdata = ctrl; in nvmet_pci_epf_create_ctrl()
2069 if (ctrl->tctrl->pi_support) { in nvmet_pci_epf_create_ctrl()
2070 dev_err(ctrl->dev, in nvmet_pci_epf_create_ctrl()
2072 ret = -ENOTSUPP; in nvmet_pci_epf_create_ctrl()
2077 ctrl->nr_queues = min(ctrl->tctrl->subsys->max_qid + 1, max_nr_queues); in nvmet_pci_epf_create_ctrl()
2078 ret = nvmet_pci_epf_alloc_queues(ctrl); in nvmet_pci_epf_create_ctrl()
2086 ret = nvmet_pci_epf_alloc_irq_vectors(ctrl); in nvmet_pci_epf_create_ctrl()
2090 dev_info(ctrl->dev, in nvmet_pci_epf_create_ctrl()
2091 "New PCI ctrl \"%s\", %u I/O queues, mdts %u B\n", in nvmet_pci_epf_create_ctrl()
2092 ctrl->tctrl->subsys->subsysnqn, ctrl->nr_queues - 1, in nvmet_pci_epf_create_ctrl()
2093 ctrl->mdts); in nvmet_pci_epf_create_ctrl()
2096 nvmet_pci_epf_init_bar(ctrl); in nvmet_pci_epf_create_ctrl()
2101 nvmet_pci_epf_free_queues(ctrl); in nvmet_pci_epf_create_ctrl()
2103 nvmet_ctrl_put(ctrl->tctrl); in nvmet_pci_epf_create_ctrl()
2104 ctrl->tctrl = NULL; in nvmet_pci_epf_create_ctrl()
2106 mempool_exit(&ctrl->iod_pool); in nvmet_pci_epf_create_ctrl()
2110 static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_start_ctrl() argument
2112 schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL); in nvmet_pci_epf_start_ctrl()
2115 static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_stop_ctrl() argument
2117 cancel_delayed_work_sync(&ctrl->poll_cc); in nvmet_pci_epf_stop_ctrl()
2119 nvmet_pci_epf_disable_ctrl(ctrl, false); in nvmet_pci_epf_stop_ctrl()
2120 nvmet_pci_epf_clear_ctrl_config(ctrl); in nvmet_pci_epf_stop_ctrl()
2123 static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl) in nvmet_pci_epf_destroy_ctrl() argument
2125 if (!ctrl->tctrl) in nvmet_pci_epf_destroy_ctrl()
2128 dev_info(ctrl->dev, "Destroying PCI ctrl \"%s\"\n", in nvmet_pci_epf_destroy_ctrl()
2129 ctrl->tctrl->subsys->subsysnqn); in nvmet_pci_epf_destroy_ctrl()
2131 nvmet_pci_epf_stop_ctrl(ctrl); in nvmet_pci_epf_destroy_ctrl()
2133 nvmet_pci_epf_free_queues(ctrl); in nvmet_pci_epf_destroy_ctrl()
2134 nvmet_pci_epf_free_irq_vectors(ctrl); in nvmet_pci_epf_destroy_ctrl()
2136 nvmet_ctrl_put(ctrl->tctrl); in nvmet_pci_epf_destroy_ctrl()
2137 ctrl->tctrl = NULL; in nvmet_pci_epf_destroy_ctrl()
2139 mempool_exit(&ctrl->iod_pool); in nvmet_pci_epf_destroy_ctrl()
2144 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_configure_bar()
2145 const struct pci_epc_features *epc_features = nvme_epf->epc_features; in nvmet_pci_epf_configure_bar()
2154 dev_err(&epf->dev, "BAR 0 is not free\n"); in nvmet_pci_epf_configure_bar()
2155 return -ENODEV; in nvmet_pci_epf_configure_bar()
2161 * is required to be 64-bit. Thus, for interoperability, always set the in nvmet_pci_epf_configure_bar()
2162 * type to 64-bit. In the rare case that the PCI EPC does not support in nvmet_pci_epf_configure_bar()
2163 * configuring BAR0 as 64-bit, the call to pci_epc_set_bar() will fail, in nvmet_pci_epf_configure_bar()
2166 epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; in nvmet_pci_epf_configure_bar()
2170 * enough space for the doorbells, followed by the MSI-X table in nvmet_pci_epf_configure_bar()
2176 if (epc_features->msix_capable) { in nvmet_pci_epf_configure_bar()
2179 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts; in nvmet_pci_epf_configure_bar()
2180 nvme_epf->msix_table_offset = reg_size; in nvmet_pci_epf_configure_bar()
2181 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8); in nvmet_pci_epf_configure_bar()
2186 if (epc_features->bar[BAR_0].type == BAR_FIXED) { in nvmet_pci_epf_configure_bar()
2187 if (reg_size > epc_features->bar[BAR_0].fixed_size) { in nvmet_pci_epf_configure_bar()
2188 dev_err(&epf->dev, in nvmet_pci_epf_configure_bar()
2190 epc_features->bar[BAR_0].fixed_size, in nvmet_pci_epf_configure_bar()
2192 return -ENOMEM; in nvmet_pci_epf_configure_bar()
2194 reg_bar_size = epc_features->bar[BAR_0].fixed_size; in nvmet_pci_epf_configure_bar()
2196 reg_bar_size = ALIGN(reg_size, max(epc_features->align, 4096)); in nvmet_pci_epf_configure_bar()
2199 nvme_epf->reg_bar = pci_epf_alloc_space(epf, reg_bar_size, BAR_0, in nvmet_pci_epf_configure_bar()
2201 if (!nvme_epf->reg_bar) { in nvmet_pci_epf_configure_bar()
2202 dev_err(&epf->dev, "Failed to allocate BAR 0\n"); in nvmet_pci_epf_configure_bar()
2203 return -ENOMEM; in nvmet_pci_epf_configure_bar()
2205 memset(nvme_epf->reg_bar, 0, reg_bar_size); in nvmet_pci_epf_configure_bar()
2212 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_free_bar()
2214 if (!nvme_epf->reg_bar) in nvmet_pci_epf_free_bar()
2217 pci_epf_free_space(epf, nvme_epf->reg_bar, BAR_0, PRIMARY_INTERFACE); in nvmet_pci_epf_free_bar()
2218 nvme_epf->reg_bar = NULL; in nvmet_pci_epf_free_bar()
2223 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_clear_bar()
2225 pci_epc_clear_bar(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_clear_bar()
2226 &epf->bar[BAR_0]); in nvmet_pci_epf_clear_bar()
2231 const struct pci_epc_features *epc_features = nvme_epf->epc_features; in nvmet_pci_epf_init_irq()
2232 struct pci_epf *epf = nvme_epf->epf; in nvmet_pci_epf_init_irq()
2235 /* Enable MSI-X if supported, otherwise, use MSI. */ in nvmet_pci_epf_init_irq()
2236 if (epc_features->msix_capable && epf->msix_interrupts) { in nvmet_pci_epf_init_irq()
2237 ret = pci_epc_set_msix(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_init_irq()
2238 epf->msix_interrupts, BAR_0, in nvmet_pci_epf_init_irq()
2239 nvme_epf->msix_table_offset); in nvmet_pci_epf_init_irq()
2241 dev_err(&epf->dev, "Failed to configure MSI-X\n"); in nvmet_pci_epf_init_irq()
2245 nvme_epf->nr_vectors = epf->msix_interrupts; in nvmet_pci_epf_init_irq()
2246 nvme_epf->irq_type = PCI_IRQ_MSIX; in nvmet_pci_epf_init_irq()
2251 if (epc_features->msi_capable && epf->msi_interrupts) { in nvmet_pci_epf_init_irq()
2252 ret = pci_epc_set_msi(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_init_irq()
2253 epf->msi_interrupts); in nvmet_pci_epf_init_irq()
2255 dev_err(&epf->dev, "Failed to configure MSI\n"); in nvmet_pci_epf_init_irq()
2259 nvme_epf->nr_vectors = epf->msi_interrupts; in nvmet_pci_epf_init_irq()
2260 nvme_epf->irq_type = PCI_IRQ_MSI; in nvmet_pci_epf_init_irq()
2265 /* MSI and MSI-X are not supported: fall back to INTx. */ in nvmet_pci_epf_init_irq()
2266 nvme_epf->nr_vectors = 1; in nvmet_pci_epf_init_irq()
2267 nvme_epf->irq_type = PCI_IRQ_INTX; in nvmet_pci_epf_init_irq()
2275 const struct pci_epc_features *epc_features = nvme_epf->epc_features; in nvmet_pci_epf_epc_init()
2276 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; in nvmet_pci_epf_epc_init() local
2281 if (epf->vfunc_no > 0) { in nvmet_pci_epf_epc_init()
2282 dev_err(&epf->dev, "Virtual functions are not supported\n"); in nvmet_pci_epf_epc_init()
2283 return -EINVAL; in nvmet_pci_epf_epc_init()
2290 if (epc_features->msix_capable && epf->msix_interrupts) { in nvmet_pci_epf_epc_init()
2291 dev_info(&epf->dev, in nvmet_pci_epf_epc_init()
2292 "PCI endpoint controller supports MSI-X, %u vectors\n", in nvmet_pci_epf_epc_init()
2293 epf->msix_interrupts); in nvmet_pci_epf_epc_init()
2294 max_nr_queues = min(max_nr_queues, epf->msix_interrupts); in nvmet_pci_epf_epc_init()
2295 } else if (epc_features->msi_capable && epf->msi_interrupts) { in nvmet_pci_epf_epc_init()
2296 dev_info(&epf->dev, in nvmet_pci_epf_epc_init()
2298 epf->msi_interrupts); in nvmet_pci_epf_epc_init()
2299 max_nr_queues = min(max_nr_queues, epf->msi_interrupts); in nvmet_pci_epf_epc_init()
2303 dev_err(&epf->dev, "Invalid maximum number of queues %u\n", in nvmet_pci_epf_epc_init()
2305 return -EINVAL; in nvmet_pci_epf_epc_init()
2311 dev_err(&epf->dev, in nvmet_pci_epf_epc_init()
2318 epf->header->vendorid = ctrl->tctrl->subsys->vendor_id; in nvmet_pci_epf_epc_init()
2319 epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id; in nvmet_pci_epf_epc_init()
2320 ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_epc_init()
2321 epf->header); in nvmet_pci_epf_epc_init()
2323 dev_err(&epf->dev, in nvmet_pci_epf_epc_init()
2328 ret = pci_epc_set_bar(epf->epc, epf->func_no, epf->vfunc_no, in nvmet_pci_epf_epc_init()
2329 &epf->bar[BAR_0]); in nvmet_pci_epf_epc_init()
2331 dev_err(&epf->dev, "Failed to set BAR 0 (err=%d)\n", ret); in nvmet_pci_epf_epc_init()
2343 if (!epc_features->linkup_notifier) { in nvmet_pci_epf_epc_init()
2344 ctrl->link_up = true; in nvmet_pci_epf_epc_init()
2345 nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl); in nvmet_pci_epf_epc_init()
2353 nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl); in nvmet_pci_epf_epc_init()
2360 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; in nvmet_pci_epf_epc_deinit() local
2362 ctrl->link_up = false; in nvmet_pci_epf_epc_deinit()
2363 nvmet_pci_epf_destroy_ctrl(ctrl); in nvmet_pci_epf_epc_deinit()
2372 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; in nvmet_pci_epf_link_up() local
2374 ctrl->link_up = true; in nvmet_pci_epf_link_up()
2375 nvmet_pci_epf_start_ctrl(ctrl); in nvmet_pci_epf_link_up()
2383 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; in nvmet_pci_epf_link_down() local
2385 ctrl->link_up = false; in nvmet_pci_epf_link_down()
2386 nvmet_pci_epf_stop_ctrl(ctrl); in nvmet_pci_epf_link_down()
2402 struct pci_epc *epc = epf->epc; in nvmet_pci_epf_bind()
2406 return -EINVAL; in nvmet_pci_epf_bind()
2408 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); in nvmet_pci_epf_bind()
2410 dev_err(&epf->dev, "epc_features not implemented\n"); in nvmet_pci_epf_bind()
2411 return -EOPNOTSUPP; in nvmet_pci_epf_bind()
2413 nvme_epf->epc_features = epc_features; in nvmet_pci_epf_bind()
2427 struct pci_epc *epc = epf->epc; in nvmet_pci_epf_unbind()
2429 nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl); in nvmet_pci_epf_unbind()
2431 if (epc->init_complete) { in nvmet_pci_epf_unbind()
2444 .subclass_code = 0x08, /* Non-Volatile Memory controller */
2454 nvme_epf = devm_kzalloc(&epf->dev, sizeof(*nvme_epf), GFP_KERNEL); in nvmet_pci_epf_probe()
2456 return -ENOMEM; in nvmet_pci_epf_probe()
2458 ret = devm_mutex_init(&epf->dev, &nvme_epf->mmio_lock); in nvmet_pci_epf_probe()
2462 nvme_epf->epf = epf; in nvmet_pci_epf_probe()
2463 nvme_epf->mdts_kb = NVMET_PCI_EPF_MDTS_KB; in nvmet_pci_epf_probe()
2465 epf->event_ops = &nvmet_pci_epf_event_ops; in nvmet_pci_epf_probe()
2466 epf->header = &nvme_epf_pci_header; in nvmet_pci_epf_probe()
2480 return sysfs_emit(page, "%u\n", le16_to_cpu(nvme_epf->portid)); in nvmet_pci_epf_portid_show()
2491 if (nvme_epf->ctrl.tctrl) in nvmet_pci_epf_portid_store()
2492 return -EBUSY; in nvmet_pci_epf_portid_store()
2495 return -EINVAL; in nvmet_pci_epf_portid_store()
2498 return -EINVAL; in nvmet_pci_epf_portid_store()
2500 nvme_epf->portid = cpu_to_le16(portid); in nvmet_pci_epf_portid_store()
2513 return sysfs_emit(page, "%s\n", nvme_epf->subsysnqn); in nvmet_pci_epf_subsysnqn_show()
2523 if (nvme_epf->ctrl.tctrl) in nvmet_pci_epf_subsysnqn_store()
2524 return -EBUSY; in nvmet_pci_epf_subsysnqn_store()
2527 return -EINVAL; in nvmet_pci_epf_subsysnqn_store()
2529 strscpy(nvme_epf->subsysnqn, page, len); in nvmet_pci_epf_subsysnqn_store()
2541 return sysfs_emit(page, "%u\n", nvme_epf->mdts_kb); in nvmet_pci_epf_mdts_kb_show()
2552 if (nvme_epf->ctrl.tctrl) in nvmet_pci_epf_mdts_kb_store()
2553 return -EBUSY; in nvmet_pci_epf_mdts_kb_store()
2564 return -EINVAL; in nvmet_pci_epf_mdts_kb_store()
2566 nvme_epf->mdts_kb = mdts_kb; in nvmet_pci_epf_mdts_kb_store()
2590 config_group_init_type_name(&nvme_epf->group, "nvme", in nvmet_pci_epf_add_cfs()
2593 return &nvme_epf->group; in nvmet_pci_epf_add_cfs()