Lines Matching +full:ctrl +full:- +full:a
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
10 #include <linux/pci-p2pdma.h>
32 * information on a target system that will result in discovery log page
36 * - subsystems list
37 * - per-subsystem allowed hosts list
38 * - allow_any_host subsystem attribute
39 * - nvmet_genctr
40 * - the nvmet_transports array
43 * while when reading (popolating discovery log page or checking host-subsystem
57 case -ENOSPC: in errno_to_nvme_status()
58 req->error_loc = offsetof(struct nvme_rw_command, length); in errno_to_nvme_status()
60 case -EREMOTEIO: in errno_to_nvme_status()
61 req->error_loc = offsetof(struct nvme_rw_command, slba); in errno_to_nvme_status()
63 case -EOPNOTSUPP: in errno_to_nvme_status()
64 req->error_loc = offsetof(struct nvme_common_command, opcode); in errno_to_nvme_status()
65 switch (req->cmd->common.opcode) { in errno_to_nvme_status()
73 case -ENODATA: in errno_to_nvme_status()
74 req->error_loc = offsetof(struct nvme_rw_command, nsid); in errno_to_nvme_status()
76 case -EIO: in errno_to_nvme_status()
79 req->error_loc = offsetof(struct nvme_common_command, opcode); in errno_to_nvme_status()
86 pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode, in nvmet_report_invalid_opcode()
87 req->sq->qid); in nvmet_report_invalid_opcode()
89 req->error_loc = offsetof(struct nvme_common_command, opcode); in nvmet_report_invalid_opcode()
99 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { in nvmet_copy_to_sgl()
100 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_copy_to_sgl()
108 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { in nvmet_copy_from_sgl()
109 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_copy_from_sgl()
117 if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) { in nvmet_zero_sgl()
118 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_zero_sgl()
130 nvmet_for_each_enabled_ns(&subsys->namespaces, idx, cur) in nvmet_max_nsid()
131 nsid = cur->nsid; in nvmet_max_nsid()
138 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16); in nvmet_async_event_result()
141 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl) in nvmet_async_events_failall() argument
145 mutex_lock(&ctrl->lock); in nvmet_async_events_failall()
146 while (ctrl->nr_async_event_cmds) { in nvmet_async_events_failall()
147 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; in nvmet_async_events_failall()
148 mutex_unlock(&ctrl->lock); in nvmet_async_events_failall()
150 mutex_lock(&ctrl->lock); in nvmet_async_events_failall()
152 mutex_unlock(&ctrl->lock); in nvmet_async_events_failall()
155 static void nvmet_async_events_process(struct nvmet_ctrl *ctrl) in nvmet_async_events_process() argument
160 mutex_lock(&ctrl->lock); in nvmet_async_events_process()
161 while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) { in nvmet_async_events_process()
162 aen = list_first_entry(&ctrl->async_events, in nvmet_async_events_process()
164 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; in nvmet_async_events_process()
167 list_del(&aen->entry); in nvmet_async_events_process()
170 mutex_unlock(&ctrl->lock); in nvmet_async_events_process()
171 trace_nvmet_async_event(ctrl, req->cqe->result.u32); in nvmet_async_events_process()
173 mutex_lock(&ctrl->lock); in nvmet_async_events_process()
175 mutex_unlock(&ctrl->lock); in nvmet_async_events_process()
178 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) in nvmet_async_events_free() argument
182 mutex_lock(&ctrl->lock); in nvmet_async_events_free()
183 list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) { in nvmet_async_events_free()
184 list_del(&aen->entry); in nvmet_async_events_free()
187 mutex_unlock(&ctrl->lock); in nvmet_async_events_free()
192 struct nvmet_ctrl *ctrl = in nvmet_async_event_work() local
195 nvmet_async_events_process(ctrl); in nvmet_async_event_work()
198 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, in nvmet_add_async_event() argument
207 aen->event_type = event_type; in nvmet_add_async_event()
208 aen->event_info = event_info; in nvmet_add_async_event()
209 aen->log_page = log_page; in nvmet_add_async_event()
211 mutex_lock(&ctrl->lock); in nvmet_add_async_event()
212 list_add_tail(&aen->entry, &ctrl->async_events); in nvmet_add_async_event()
213 mutex_unlock(&ctrl->lock); in nvmet_add_async_event()
215 queue_work(nvmet_wq, &ctrl->async_event_work); in nvmet_add_async_event()
218 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) in nvmet_add_to_changed_ns_log() argument
222 mutex_lock(&ctrl->lock); in nvmet_add_to_changed_ns_log()
223 if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES) in nvmet_add_to_changed_ns_log()
226 for (i = 0; i < ctrl->nr_changed_ns; i++) { in nvmet_add_to_changed_ns_log()
227 if (ctrl->changed_ns_list[i] == nsid) in nvmet_add_to_changed_ns_log()
231 if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) { in nvmet_add_to_changed_ns_log()
232 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff); in nvmet_add_to_changed_ns_log()
233 ctrl->nr_changed_ns = U32_MAX; in nvmet_add_to_changed_ns_log()
237 ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid; in nvmet_add_to_changed_ns_log()
239 mutex_unlock(&ctrl->lock); in nvmet_add_to_changed_ns_log()
244 struct nvmet_ctrl *ctrl; in nvmet_ns_changed() local
246 lockdep_assert_held(&subsys->lock); in nvmet_ns_changed()
248 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_ns_changed()
249 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid)); in nvmet_ns_changed()
250 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR)) in nvmet_ns_changed()
252 nvmet_add_async_event(ctrl, NVME_AER_NOTICE, in nvmet_ns_changed()
261 struct nvmet_ctrl *ctrl; in nvmet_send_ana_event() local
263 mutex_lock(&subsys->lock); in nvmet_send_ana_event()
264 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_send_ana_event()
265 if (port && ctrl->port != port) in nvmet_send_ana_event()
267 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE)) in nvmet_send_ana_event()
269 nvmet_add_async_event(ctrl, NVME_AER_NOTICE, in nvmet_send_ana_event()
272 mutex_unlock(&subsys->lock); in nvmet_send_ana_event()
280 list_for_each_entry(p, &port->subsystems, entry) in nvmet_port_send_ana_event()
281 nvmet_send_ana_event(p->subsys, port); in nvmet_port_send_ana_event()
290 if (nvmet_transports[ops->type]) in nvmet_register_transport()
291 ret = -EINVAL; in nvmet_register_transport()
293 nvmet_transports[ops->type] = ops; in nvmet_register_transport()
303 nvmet_transports[ops->type] = NULL; in nvmet_unregister_transport()
310 struct nvmet_ctrl *ctrl; in nvmet_port_del_ctrls() local
312 mutex_lock(&subsys->lock); in nvmet_port_del_ctrls()
313 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_port_del_ctrls()
314 if (ctrl->port == port) in nvmet_port_del_ctrls()
315 ctrl->ops->delete_ctrl(ctrl); in nvmet_port_del_ctrls()
317 mutex_unlock(&subsys->lock); in nvmet_port_del_ctrls()
327 ops = nvmet_transports[port->disc_addr.trtype]; in nvmet_enable_port()
330 request_module("nvmet-transport-%d", port->disc_addr.trtype); in nvmet_enable_port()
332 ops = nvmet_transports[port->disc_addr.trtype]; in nvmet_enable_port()
335 port->disc_addr.trtype); in nvmet_enable_port()
336 return -EINVAL; in nvmet_enable_port()
340 if (!try_module_get(ops->owner)) in nvmet_enable_port()
341 return -EINVAL; in nvmet_enable_port()
347 if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) { in nvmet_enable_port()
348 pr_err("T10-PI is not supported by transport type %d\n", in nvmet_enable_port()
349 port->disc_addr.trtype); in nvmet_enable_port()
350 ret = -EINVAL; in nvmet_enable_port()
354 ret = ops->add_port(port); in nvmet_enable_port()
359 if (port->inline_data_size < 0) in nvmet_enable_port()
360 port->inline_data_size = 0; in nvmet_enable_port()
367 if (port->max_queue_size < 0) in nvmet_enable_port()
368 port->max_queue_size = NVMET_MAX_QUEUE_SIZE; in nvmet_enable_port()
370 port->max_queue_size = clamp_t(int, port->max_queue_size, in nvmet_enable_port()
374 port->enabled = true; in nvmet_enable_port()
375 port->tr_ops = ops; in nvmet_enable_port()
379 module_put(ops->owner); in nvmet_enable_port()
389 port->enabled = false; in nvmet_disable_port()
390 port->tr_ops = NULL; in nvmet_disable_port()
392 ops = nvmet_transports[port->disc_addr.trtype]; in nvmet_disable_port()
393 ops->remove_port(port); in nvmet_disable_port()
394 module_put(ops->owner); in nvmet_disable_port()
399 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), in nvmet_keep_alive_timer() local
401 bool reset_tbkas = ctrl->reset_tbkas; in nvmet_keep_alive_timer()
403 ctrl->reset_tbkas = false; in nvmet_keep_alive_timer()
405 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n", in nvmet_keep_alive_timer()
406 ctrl->cntlid); in nvmet_keep_alive_timer()
407 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_keep_alive_timer()
411 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", in nvmet_keep_alive_timer()
412 ctrl->cntlid, ctrl->kato); in nvmet_keep_alive_timer()
414 nvmet_ctrl_fatal_error(ctrl); in nvmet_keep_alive_timer()
417 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) in nvmet_start_keep_alive_timer() argument
419 if (unlikely(ctrl->kato == 0)) in nvmet_start_keep_alive_timer()
422 pr_debug("ctrl %d start keep-alive timer for %d secs\n", in nvmet_start_keep_alive_timer()
423 ctrl->cntlid, ctrl->kato); in nvmet_start_keep_alive_timer()
425 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_start_keep_alive_timer()
428 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) in nvmet_stop_keep_alive_timer() argument
430 if (unlikely(ctrl->kato == 0)) in nvmet_stop_keep_alive_timer()
433 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); in nvmet_stop_keep_alive_timer()
435 cancel_delayed_work_sync(&ctrl->ka_work); in nvmet_stop_keep_alive_timer()
440 u32 nsid = le32_to_cpu(req->cmd->common.nsid); in nvmet_req_find_ns()
443 req->ns = xa_load(&subsys->namespaces, nsid); in nvmet_req_find_ns()
444 if (unlikely(!req->ns || !req->ns->enabled)) { in nvmet_req_find_ns()
445 req->error_loc = offsetof(struct nvme_common_command, nsid); in nvmet_req_find_ns()
446 if (!req->ns) /* ns doesn't exist! */ in nvmet_req_find_ns()
450 req->ns = NULL; in nvmet_req_find_ns()
454 percpu_ref_get(&req->ns->ref); in nvmet_req_find_ns()
462 complete(&ns->disable_done); in nvmet_destroy_namespace()
467 percpu_ref_put(&ns->ref); in nvmet_put_namespace()
481 if (!ns->use_p2pmem) in nvmet_p2pmem_ns_enable()
484 if (!ns->bdev) { in nvmet_p2pmem_ns_enable()
485 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n"); in nvmet_p2pmem_ns_enable()
486 return -EINVAL; in nvmet_p2pmem_ns_enable()
489 if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) { in nvmet_p2pmem_ns_enable()
490 pr_err("peer-to-peer DMA is not supported by the driver of %s\n", in nvmet_p2pmem_ns_enable()
491 ns->device_path); in nvmet_p2pmem_ns_enable()
492 return -EINVAL; in nvmet_p2pmem_ns_enable()
495 if (ns->p2p_dev) { in nvmet_p2pmem_ns_enable()
496 ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true); in nvmet_p2pmem_ns_enable()
498 return -EINVAL; in nvmet_p2pmem_ns_enable()
509 pr_err("no peer-to-peer memory is available for %s\n", in nvmet_p2pmem_ns_enable()
510 ns->device_path); in nvmet_p2pmem_ns_enable()
511 return -EINVAL; in nvmet_p2pmem_ns_enable()
521 * Note: ctrl->subsys->lock should be held when calling this function
523 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl, in nvmet_p2pmem_ns_add_p2p() argument
530 if (!ctrl->p2p_client || !ns->use_p2pmem) in nvmet_p2pmem_ns_add_p2p()
533 if (ns->p2p_dev) { in nvmet_p2pmem_ns_add_p2p()
534 ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true); in nvmet_p2pmem_ns_add_p2p()
538 p2p_dev = pci_dev_get(ns->p2p_dev); in nvmet_p2pmem_ns_add_p2p()
540 clients[0] = ctrl->p2p_client; in nvmet_p2pmem_ns_add_p2p()
545 pr_err("no peer-to-peer memory is available that's supported by %s and %s\n", in nvmet_p2pmem_ns_add_p2p()
546 dev_name(ctrl->p2p_client), ns->device_path); in nvmet_p2pmem_ns_add_p2p()
551 ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev); in nvmet_p2pmem_ns_add_p2p()
556 ns->nsid); in nvmet_p2pmem_ns_add_p2p()
561 loff_t oldsize = ns->size; in nvmet_ns_revalidate()
563 if (ns->bdev) in nvmet_ns_revalidate()
568 return oldsize != ns->size; in nvmet_ns_revalidate()
573 struct nvmet_subsys *subsys = ns->subsys; in nvmet_ns_enable()
574 struct nvmet_ctrl *ctrl; in nvmet_ns_enable() local
577 mutex_lock(&subsys->lock); in nvmet_ns_enable()
581 pr_info("cannot enable both passthru and regular namespaces for a single subsystem"); in nvmet_ns_enable()
585 if (ns->enabled) in nvmet_ns_enable()
588 ret = -EMFILE; in nvmet_ns_enable()
591 if (ret == -ENOTBLK) in nvmet_ns_enable()
600 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_enable()
601 nvmet_p2pmem_ns_add_p2p(ctrl, ns); in nvmet_ns_enable()
603 if (ns->pr.enable) { in nvmet_ns_enable()
609 if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL)) in nvmet_ns_enable()
612 nvmet_ns_changed(subsys, ns->nsid); in nvmet_ns_enable()
613 ns->enabled = true; in nvmet_ns_enable()
614 xa_set_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED); in nvmet_ns_enable()
617 mutex_unlock(&subsys->lock); in nvmet_ns_enable()
620 if (ns->pr.enable) in nvmet_ns_enable()
623 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_enable()
624 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); in nvmet_ns_enable()
632 struct nvmet_subsys *subsys = ns->subsys; in nvmet_ns_disable()
633 struct nvmet_ctrl *ctrl; in nvmet_ns_disable() local
635 mutex_lock(&subsys->lock); in nvmet_ns_disable()
636 if (!ns->enabled) in nvmet_ns_disable()
639 ns->enabled = false; in nvmet_ns_disable()
640 xa_clear_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED); in nvmet_ns_disable()
642 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_disable()
643 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); in nvmet_ns_disable()
645 mutex_unlock(&subsys->lock); in nvmet_ns_disable()
650 * to be dropped, as well as a RCU grace period for anyone only in nvmet_ns_disable()
655 percpu_ref_kill(&ns->ref); in nvmet_ns_disable()
657 wait_for_completion(&ns->disable_done); in nvmet_ns_disable()
658 percpu_ref_exit(&ns->ref); in nvmet_ns_disable()
660 if (ns->pr.enable) in nvmet_ns_disable()
663 mutex_lock(&subsys->lock); in nvmet_ns_disable()
664 nvmet_ns_changed(subsys, ns->nsid); in nvmet_ns_disable()
667 mutex_unlock(&subsys->lock); in nvmet_ns_disable()
672 struct nvmet_subsys *subsys = ns->subsys; in nvmet_ns_free()
676 mutex_lock(&subsys->lock); in nvmet_ns_free()
678 xa_erase(&subsys->namespaces, ns->nsid); in nvmet_ns_free()
679 if (ns->nsid == subsys->max_nsid) in nvmet_ns_free()
680 subsys->max_nsid = nvmet_max_nsid(subsys); in nvmet_ns_free()
682 subsys->nr_namespaces--; in nvmet_ns_free()
683 mutex_unlock(&subsys->lock); in nvmet_ns_free()
686 nvmet_ana_group_enabled[ns->anagrpid]--; in nvmet_ns_free()
689 kfree(ns->device_path); in nvmet_ns_free()
697 mutex_lock(&subsys->lock); in nvmet_ns_alloc()
699 if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES) in nvmet_ns_alloc()
706 init_completion(&ns->disable_done); in nvmet_ns_alloc()
708 ns->nsid = nsid; in nvmet_ns_alloc()
709 ns->subsys = subsys; in nvmet_ns_alloc()
711 if (ns->nsid > subsys->max_nsid) in nvmet_ns_alloc()
712 subsys->max_nsid = nsid; in nvmet_ns_alloc()
714 if (xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL)) in nvmet_ns_alloc()
717 subsys->nr_namespaces++; in nvmet_ns_alloc()
719 mutex_unlock(&subsys->lock); in nvmet_ns_alloc()
722 ns->anagrpid = NVMET_DEFAULT_ANA_GRPID; in nvmet_ns_alloc()
723 nvmet_ana_group_enabled[ns->anagrpid]++; in nvmet_ns_alloc()
726 uuid_gen(&ns->uuid); in nvmet_ns_alloc()
727 ns->buffered_io = false; in nvmet_ns_alloc()
728 ns->csi = NVME_CSI_NVM; in nvmet_ns_alloc()
732 subsys->max_nsid = nvmet_max_nsid(subsys); in nvmet_ns_alloc()
735 mutex_unlock(&subsys->lock); in nvmet_ns_alloc()
741 if (req->sq->size) { in nvmet_update_sq_head()
744 old_sqhd = READ_ONCE(req->sq->sqhd); in nvmet_update_sq_head()
746 new_sqhd = (old_sqhd + 1) % req->sq->size; in nvmet_update_sq_head()
747 } while (!try_cmpxchg(&req->sq->sqhd, &old_sqhd, new_sqhd)); in nvmet_update_sq_head()
749 req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF); in nvmet_update_sq_head()
754 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_set_error() local
758 req->cqe->status = cpu_to_le16(status << 1); in nvmet_set_error()
760 if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC) in nvmet_set_error()
763 spin_lock_irqsave(&ctrl->error_lock, flags); in nvmet_set_error()
764 ctrl->err_counter++; in nvmet_set_error()
766 &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS]; in nvmet_set_error()
768 new_error_slot->error_count = cpu_to_le64(ctrl->err_counter); in nvmet_set_error()
769 new_error_slot->sqid = cpu_to_le16(req->sq->qid); in nvmet_set_error()
770 new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id); in nvmet_set_error()
771 new_error_slot->status_field = cpu_to_le16(status << 1); in nvmet_set_error()
772 new_error_slot->param_error_location = cpu_to_le16(req->error_loc); in nvmet_set_error()
773 new_error_slot->lba = cpu_to_le64(req->error_slba); in nvmet_set_error()
774 new_error_slot->nsid = req->cmd->common.nsid; in nvmet_set_error()
775 spin_unlock_irqrestore(&ctrl->error_lock, flags); in nvmet_set_error()
778 req->cqe->status |= cpu_to_le16(1 << 14); in nvmet_set_error()
783 struct nvmet_ns *ns = req->ns; in __nvmet_req_complete()
784 struct nvmet_pr_per_ctrl_ref *pc_ref = req->pc_ref; in __nvmet_req_complete()
786 if (!req->sq->sqhd_disabled) in __nvmet_req_complete()
788 req->cqe->sq_id = cpu_to_le16(req->sq->qid); in __nvmet_req_complete()
789 req->cqe->command_id = req->cmd->common.command_id; in __nvmet_req_complete()
796 req->ops->queue_response(req); in __nvmet_req_complete()
806 struct nvmet_sq *sq = req->sq; in nvmet_req_complete()
809 percpu_ref_put(&sq->ref); in nvmet_req_complete()
813 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, in nvmet_cq_setup() argument
816 cq->qid = qid; in nvmet_cq_setup()
817 cq->size = size; in nvmet_cq_setup()
820 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, in nvmet_sq_setup() argument
823 sq->sqhd = 0; in nvmet_sq_setup()
824 sq->qid = qid; in nvmet_sq_setup()
825 sq->size = size; in nvmet_sq_setup()
827 ctrl->sqs[qid] = sq; in nvmet_sq_setup()
834 complete(&sq->confirm_done); in nvmet_confirm_sq()
837 u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid) in nvmet_check_cqid() argument
839 if (!ctrl->sqs) in nvmet_check_cqid()
842 if (cqid > ctrl->subsys->max_qid) in nvmet_check_cqid()
847 * SQs to share a single CQ. However, we do not support this yet, so in nvmet_check_cqid()
848 * check that there is no SQ defined for a CQ. If one exist, then the in nvmet_check_cqid()
853 if (ctrl->sqs[cqid]) in nvmet_check_cqid()
859 u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, in nvmet_cq_create() argument
864 status = nvmet_check_cqid(ctrl, qid); in nvmet_cq_create()
868 nvmet_cq_setup(ctrl, cq, qid, size); in nvmet_cq_create()
874 u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, in nvmet_check_sqid() argument
877 if (!ctrl->sqs) in nvmet_check_sqid()
880 if (sqid > ctrl->subsys->max_qid) in nvmet_check_sqid()
883 if ((create && ctrl->sqs[sqid]) || in nvmet_check_sqid()
884 (!create && !ctrl->sqs[sqid])) in nvmet_check_sqid()
890 u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, in nvmet_sq_create() argument
896 if (!kref_get_unless_zero(&ctrl->ref)) in nvmet_sq_create()
899 status = nvmet_check_sqid(ctrl, sqid, true); in nvmet_sq_create()
909 nvmet_sq_setup(ctrl, sq, sqid, size); in nvmet_sq_create()
910 sq->ctrl = ctrl; in nvmet_sq_create()
915 nvmet_ctrl_put(ctrl); in nvmet_sq_create()
922 struct nvmet_ctrl *ctrl = sq->ctrl; in nvmet_sq_destroy() local
928 if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) in nvmet_sq_destroy()
929 nvmet_async_events_failall(ctrl); in nvmet_sq_destroy()
930 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); in nvmet_sq_destroy()
931 wait_for_completion(&sq->confirm_done); in nvmet_sq_destroy()
932 wait_for_completion(&sq->free_done); in nvmet_sq_destroy()
933 percpu_ref_exit(&sq->ref); in nvmet_sq_destroy()
937 * we must reference the ctrl again after waiting for inflight IO in nvmet_sq_destroy()
939 * store sq->ctrl locally, but before we killed the percpu_ref. the in nvmet_sq_destroy()
940 * admin connect allocates and assigns sq->ctrl, which now needs a in nvmet_sq_destroy()
941 * final ref put, as this ctrl is going away. in nvmet_sq_destroy()
943 ctrl = sq->ctrl; in nvmet_sq_destroy()
945 if (ctrl) { in nvmet_sq_destroy()
948 * send us keep-alive during this period, hence reset the in nvmet_sq_destroy()
949 * traffic based keep-alive timer so we don't trigger a in nvmet_sq_destroy()
950 * controller teardown as a result of a keep-alive expiration. in nvmet_sq_destroy()
952 ctrl->reset_tbkas = true; in nvmet_sq_destroy()
953 sq->ctrl->sqs[sq->qid] = NULL; in nvmet_sq_destroy()
954 nvmet_ctrl_put(ctrl); in nvmet_sq_destroy()
955 sq->ctrl = NULL; /* allows reusing the queue later */ in nvmet_sq_destroy()
964 complete(&sq->free_done); in nvmet_sq_free()
971 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL); in nvmet_sq_init()
976 init_completion(&sq->free_done); in nvmet_sq_init()
977 init_completion(&sq->confirm_done); in nvmet_sq_init()
987 enum nvme_ana_state state = port->ana_state[ns->anagrpid]; in nvmet_check_ana_state()
1000 if (unlikely(req->ns->readonly)) { in nvmet_io_cmd_check_access()
1001 switch (req->cmd->common.opcode) { in nvmet_io_cmd_check_access()
1015 struct nvme_command *cmd = req->cmd; in nvmet_io_cmd_transfer_len()
1021 if (!req->ns) in nvmet_io_cmd_transfer_len()
1024 switch (req->cmd->common.opcode) { in nvmet_io_cmd_transfer_len()
1028 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) in nvmet_io_cmd_transfer_len()
1034 return (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; in nvmet_io_cmd_transfer_len()
1042 struct nvme_command *cmd = req->cmd; in nvmet_parse_io_cmd()
1062 ret = nvmet_check_ana_state(req->port, req->ns); in nvmet_parse_io_cmd()
1064 req->error_loc = offsetof(struct nvme_common_command, nsid); in nvmet_parse_io_cmd()
1069 req->error_loc = offsetof(struct nvme_common_command, nsid); in nvmet_parse_io_cmd()
1073 if (req->ns->pr.enable) { in nvmet_parse_io_cmd()
1079 switch (req->ns->csi) { in nvmet_parse_io_cmd()
1081 if (req->ns->file) in nvmet_parse_io_cmd()
1098 if (req->ns->pr.enable) { in nvmet_parse_io_cmd()
1111 u8 flags = req->cmd->common.flags; in nvmet_req_init()
1114 req->cq = cq; in nvmet_req_init()
1115 req->sq = sq; in nvmet_req_init()
1116 req->ops = ops; in nvmet_req_init()
1117 req->sg = NULL; in nvmet_req_init()
1118 req->metadata_sg = NULL; in nvmet_req_init()
1119 req->sg_cnt = 0; in nvmet_req_init()
1120 req->metadata_sg_cnt = 0; in nvmet_req_init()
1121 req->transfer_len = 0; in nvmet_req_init()
1122 req->metadata_len = 0; in nvmet_req_init()
1123 req->cqe->result.u64 = 0; in nvmet_req_init()
1124 req->cqe->status = 0; in nvmet_req_init()
1125 req->cqe->sq_head = 0; in nvmet_req_init()
1126 req->ns = NULL; in nvmet_req_init()
1127 req->error_loc = NVMET_NO_ERROR_LOC; in nvmet_req_init()
1128 req->error_slba = 0; in nvmet_req_init()
1129 req->pc_ref = NULL; in nvmet_req_init()
1133 req->error_loc = offsetof(struct nvme_common_command, flags); in nvmet_req_init()
1140 * contains an address of a single contiguous physical buffer that is in nvmet_req_init()
1144 if (!req->sq->ctrl || !nvmet_is_pci_ctrl(req->sq->ctrl)) { in nvmet_req_init()
1145 req->error_loc = in nvmet_req_init()
1152 if (unlikely(!req->sq->ctrl)) in nvmet_req_init()
1153 /* will return an error for any non-connect command: */ in nvmet_req_init()
1155 else if (likely(req->sq->qid != 0)) in nvmet_req_init()
1163 trace_nvmet_req_init(req, req->cmd); in nvmet_req_init()
1165 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { in nvmet_req_init()
1170 if (sq->ctrl) in nvmet_req_init()
1171 sq->ctrl->reset_tbkas = true; in nvmet_req_init()
1183 percpu_ref_put(&req->sq->ref); in nvmet_req_uninit()
1184 if (req->pc_ref) in nvmet_req_uninit()
1185 nvmet_pr_put_ns_pc_ref(req->pc_ref); in nvmet_req_uninit()
1186 if (req->ns) in nvmet_req_uninit()
1187 nvmet_put_namespace(req->ns); in nvmet_req_uninit()
1193 if (likely(req->sq->qid != 0)) in nvmet_req_transfer_len()
1195 if (unlikely(!req->sq->ctrl)) in nvmet_req_transfer_len()
1203 if (unlikely(len != req->transfer_len)) { in nvmet_check_transfer_len()
1206 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_check_transfer_len()
1207 if (req->cmd->common.flags & NVME_CMD_SGL_ALL) in nvmet_check_transfer_len()
1221 if (unlikely(data_len > req->transfer_len)) { in nvmet_check_data_len_lte()
1224 req->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_check_data_len_lte()
1225 if (req->cmd->common.flags & NVME_CMD_SGL_ALL) in nvmet_check_data_len_lte()
1238 return req->transfer_len - req->metadata_len; in nvmet_data_transfer_len()
1244 req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt, in nvmet_req_alloc_p2pmem_sgls()
1246 if (!req->sg) in nvmet_req_alloc_p2pmem_sgls()
1249 if (req->metadata_len) { in nvmet_req_alloc_p2pmem_sgls()
1250 req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev, in nvmet_req_alloc_p2pmem_sgls()
1251 &req->metadata_sg_cnt, req->metadata_len); in nvmet_req_alloc_p2pmem_sgls()
1252 if (!req->metadata_sg) in nvmet_req_alloc_p2pmem_sgls()
1256 req->p2p_dev = p2p_dev; in nvmet_req_alloc_p2pmem_sgls()
1260 pci_p2pmem_free_sgl(req->p2p_dev, req->sg); in nvmet_req_alloc_p2pmem_sgls()
1262 return -ENOMEM; in nvmet_req_alloc_p2pmem_sgls()
1268 !req->sq->ctrl || !req->sq->qid || !req->ns) in nvmet_req_find_p2p_dev()
1270 return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid); in nvmet_req_find_p2p_dev()
1280 req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL, in nvmet_req_alloc_sgls()
1281 &req->sg_cnt); in nvmet_req_alloc_sgls()
1282 if (unlikely(!req->sg)) in nvmet_req_alloc_sgls()
1285 if (req->metadata_len) { in nvmet_req_alloc_sgls()
1286 req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL, in nvmet_req_alloc_sgls()
1287 &req->metadata_sg_cnt); in nvmet_req_alloc_sgls()
1288 if (unlikely(!req->metadata_sg)) in nvmet_req_alloc_sgls()
1294 sgl_free(req->sg); in nvmet_req_alloc_sgls()
1296 return -ENOMEM; in nvmet_req_alloc_sgls()
1302 if (req->p2p_dev) { in nvmet_req_free_sgls()
1303 pci_p2pmem_free_sgl(req->p2p_dev, req->sg); in nvmet_req_free_sgls()
1304 if (req->metadata_sg) in nvmet_req_free_sgls()
1305 pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg); in nvmet_req_free_sgls()
1306 req->p2p_dev = NULL; in nvmet_req_free_sgls()
1308 sgl_free(req->sg); in nvmet_req_free_sgls()
1309 if (req->metadata_sg) in nvmet_req_free_sgls()
1310 sgl_free(req->metadata_sg); in nvmet_req_free_sgls()
1313 req->sg = NULL; in nvmet_req_free_sgls()
1314 req->metadata_sg = NULL; in nvmet_req_free_sgls()
1315 req->sg_cnt = 0; in nvmet_req_free_sgls()
1316 req->metadata_sg_cnt = 0; in nvmet_req_free_sgls()
1331 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) in nvmet_start_ctrl() argument
1333 lockdep_assert_held(&ctrl->lock); in nvmet_start_ctrl()
1337 * Strictly speaking, the spec says a discovery controller in nvmet_start_ctrl()
1341 if (!nvmet_is_disc_subsys(ctrl->subsys) && in nvmet_start_ctrl()
1342 (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || in nvmet_start_ctrl()
1343 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) { in nvmet_start_ctrl()
1344 ctrl->csts = NVME_CSTS_CFS; in nvmet_start_ctrl()
1348 if (nvmet_cc_mps(ctrl->cc) != 0 || in nvmet_start_ctrl()
1349 nvmet_cc_ams(ctrl->cc) != 0 || in nvmet_start_ctrl()
1350 !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) { in nvmet_start_ctrl()
1351 ctrl->csts = NVME_CSTS_CFS; in nvmet_start_ctrl()
1355 ctrl->csts = NVME_CSTS_RDY; in nvmet_start_ctrl()
1359 * keep alive timeout, but we still want to track a timeout and cleanup in nvmet_start_ctrl()
1360 * in case a host died before it enabled the controller. Hence, simply in nvmet_start_ctrl()
1363 if (ctrl->kato) in nvmet_start_ctrl()
1364 mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_start_ctrl()
1367 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) in nvmet_clear_ctrl() argument
1369 lockdep_assert_held(&ctrl->lock); in nvmet_clear_ctrl()
1372 ctrl->csts &= ~NVME_CSTS_RDY; in nvmet_clear_ctrl()
1373 ctrl->cc = 0; in nvmet_clear_ctrl()
1376 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new) in nvmet_update_cc() argument
1380 mutex_lock(&ctrl->lock); in nvmet_update_cc()
1381 old = ctrl->cc; in nvmet_update_cc()
1382 ctrl->cc = new; in nvmet_update_cc()
1385 nvmet_start_ctrl(ctrl); in nvmet_update_cc()
1387 nvmet_clear_ctrl(ctrl); in nvmet_update_cc()
1389 nvmet_clear_ctrl(ctrl); in nvmet_update_cc()
1390 ctrl->csts |= NVME_CSTS_SHST_CMPLT; in nvmet_update_cc()
1393 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; in nvmet_update_cc()
1394 mutex_unlock(&ctrl->lock); in nvmet_update_cc()
1398 static void nvmet_init_cap(struct nvmet_ctrl *ctrl) in nvmet_init_cap() argument
1401 ctrl->cap = (1ULL << 37); in nvmet_init_cap()
1403 ctrl->cap |= (1ULL << 43); in nvmet_init_cap()
1405 ctrl->cap |= (15ULL << 24); in nvmet_init_cap()
1407 if (ctrl->ops->get_max_queue_size) in nvmet_init_cap()
1408 ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl), in nvmet_init_cap()
1409 ctrl->port->max_queue_size) - 1; in nvmet_init_cap()
1411 ctrl->cap |= ctrl->port->max_queue_size - 1; in nvmet_init_cap()
1413 if (nvmet_is_passthru_subsys(ctrl->subsys)) in nvmet_init_cap()
1414 nvmet_passthrough_override_cap(ctrl); in nvmet_init_cap()
1421 struct nvmet_ctrl *ctrl = NULL; in nvmet_ctrl_find_get() local
1424 subsys = nvmet_find_get_subsys(req->port, subsysnqn); in nvmet_ctrl_find_get()
1428 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); in nvmet_ctrl_find_get()
1432 mutex_lock(&subsys->lock); in nvmet_ctrl_find_get()
1433 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_ctrl_find_get()
1434 if (ctrl->cntlid == cntlid) { in nvmet_ctrl_find_get()
1435 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) { in nvmet_ctrl_find_get()
1439 if (!kref_get_unless_zero(&ctrl->ref)) in nvmet_ctrl_find_get()
1442 /* ctrl found */ in nvmet_ctrl_find_get()
1447 ctrl = NULL; /* ctrl not found */ in nvmet_ctrl_find_get()
1450 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); in nvmet_ctrl_find_get()
1453 mutex_unlock(&subsys->lock); in nvmet_ctrl_find_get()
1456 return ctrl; in nvmet_ctrl_find_get()
1461 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { in nvmet_check_ctrl_status()
1463 req->cmd->common.opcode, req->sq->qid); in nvmet_check_ctrl_status()
1467 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { in nvmet_check_ctrl_status()
1469 req->cmd->common.opcode, req->sq->qid); in nvmet_check_ctrl_status()
1474 pr_warn("qid %d not authenticated\n", req->sq->qid); in nvmet_check_ctrl_status()
1486 if (subsys->allow_any_host) in nvmet_host_allowed()
1492 list_for_each_entry(p, &subsys->hosts, entry) { in nvmet_host_allowed()
1493 if (!strcmp(nvmet_host_name(p->host), hostnqn)) in nvmet_host_allowed()
1501 * Note: ctrl->subsys->lock should be held when calling this function
1503 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, in nvmet_setup_p2p_ns_map() argument
1512 ctrl->p2p_client = get_device(p2p_client); in nvmet_setup_p2p_ns_map()
1514 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) in nvmet_setup_p2p_ns_map()
1515 nvmet_p2pmem_ns_add_p2p(ctrl, ns); in nvmet_setup_p2p_ns_map()
1519 * Note: ctrl->subsys->lock should be held when calling this function
1521 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl) in nvmet_release_p2p_ns_map() argument
1526 radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0) in nvmet_release_p2p_ns_map()
1529 put_device(ctrl->p2p_client); in nvmet_release_p2p_ns_map()
1534 struct nvmet_ctrl *ctrl = in nvmet_fatal_error_handler() local
1537 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid); in nvmet_fatal_error_handler()
1538 ctrl->ops->delete_ctrl(ctrl); in nvmet_fatal_error_handler()
1544 struct nvmet_ctrl *ctrl; in nvmet_alloc_ctrl() local
1545 u32 kato = args->kato; in nvmet_alloc_ctrl()
1549 args->status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; in nvmet_alloc_ctrl()
1550 subsys = nvmet_find_get_subsys(args->port, args->subsysnqn); in nvmet_alloc_ctrl()
1553 args->subsysnqn); in nvmet_alloc_ctrl()
1554 args->result = IPO_IATTR_CONNECT_DATA(subsysnqn); in nvmet_alloc_ctrl()
1555 args->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_alloc_ctrl()
1560 if (!nvmet_host_allowed(subsys, args->hostnqn)) { in nvmet_alloc_ctrl()
1562 args->hostnqn, args->subsysnqn); in nvmet_alloc_ctrl()
1563 args->result = IPO_IATTR_CONNECT_DATA(hostnqn); in nvmet_alloc_ctrl()
1565 args->status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR; in nvmet_alloc_ctrl()
1566 args->error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_alloc_ctrl()
1571 args->status = NVME_SC_INTERNAL; in nvmet_alloc_ctrl()
1572 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvmet_alloc_ctrl()
1573 if (!ctrl) in nvmet_alloc_ctrl()
1575 mutex_init(&ctrl->lock); in nvmet_alloc_ctrl()
1577 ctrl->port = args->port; in nvmet_alloc_ctrl()
1578 ctrl->ops = args->ops; in nvmet_alloc_ctrl()
1582 if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP) in nvmet_alloc_ctrl()
1583 subsys->clear_ids = 1; in nvmet_alloc_ctrl()
1586 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); in nvmet_alloc_ctrl()
1587 INIT_LIST_HEAD(&ctrl->async_events); in nvmet_alloc_ctrl()
1588 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); in nvmet_alloc_ctrl()
1589 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); in nvmet_alloc_ctrl()
1590 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); in nvmet_alloc_ctrl()
1592 memcpy(ctrl->subsysnqn, args->subsysnqn, NVMF_NQN_SIZE); in nvmet_alloc_ctrl()
1593 memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE); in nvmet_alloc_ctrl()
1595 kref_init(&ctrl->ref); in nvmet_alloc_ctrl()
1596 ctrl->subsys = subsys; in nvmet_alloc_ctrl()
1597 ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support; in nvmet_alloc_ctrl()
1598 nvmet_init_cap(ctrl); in nvmet_alloc_ctrl()
1599 WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL); in nvmet_alloc_ctrl()
1601 ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES, in nvmet_alloc_ctrl()
1603 if (!ctrl->changed_ns_list) in nvmet_alloc_ctrl()
1606 ctrl->sqs = kcalloc(subsys->max_qid + 1, in nvmet_alloc_ctrl()
1609 if (!ctrl->sqs) in nvmet_alloc_ctrl()
1613 subsys->cntlid_min, subsys->cntlid_max, in nvmet_alloc_ctrl()
1616 args->status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; in nvmet_alloc_ctrl()
1619 ctrl->cntlid = ret; in nvmet_alloc_ctrl()
1621 uuid_copy(&ctrl->hostid, args->hostid); in nvmet_alloc_ctrl()
1627 if (nvmet_is_disc_subsys(ctrl->subsys) && !kato) in nvmet_alloc_ctrl()
1630 /* keep-alive timeout in seconds */ in nvmet_alloc_ctrl()
1631 ctrl->kato = DIV_ROUND_UP(kato, 1000); in nvmet_alloc_ctrl()
1633 ctrl->err_counter = 0; in nvmet_alloc_ctrl()
1634 spin_lock_init(&ctrl->error_lock); in nvmet_alloc_ctrl()
1636 nvmet_start_keep_alive_timer(ctrl); in nvmet_alloc_ctrl()
1638 mutex_lock(&subsys->lock); in nvmet_alloc_ctrl()
1639 ret = nvmet_ctrl_init_pr(ctrl); in nvmet_alloc_ctrl()
1642 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); in nvmet_alloc_ctrl()
1643 nvmet_setup_p2p_ns_map(ctrl, args->p2p_client); in nvmet_alloc_ctrl()
1644 nvmet_debugfs_ctrl_setup(ctrl); in nvmet_alloc_ctrl()
1645 mutex_unlock(&subsys->lock); in nvmet_alloc_ctrl()
1647 if (args->hostid) in nvmet_alloc_ctrl()
1648 uuid_copy(&ctrl->hostid, args->hostid); in nvmet_alloc_ctrl()
1650 dhchap_status = nvmet_setup_auth(ctrl); in nvmet_alloc_ctrl()
1654 nvmet_ctrl_put(ctrl); in nvmet_alloc_ctrl()
1656 args->status = in nvmet_alloc_ctrl()
1659 args->status = NVME_SC_INTERNAL; in nvmet_alloc_ctrl()
1663 args->status = NVME_SC_SUCCESS; in nvmet_alloc_ctrl()
1666 nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm", in nvmet_alloc_ctrl()
1667 ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn, in nvmet_alloc_ctrl()
1668 ctrl->pi_support ? " T10-PI is enabled" : "", in nvmet_alloc_ctrl()
1669 nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : ""); in nvmet_alloc_ctrl()
1671 return ctrl; in nvmet_alloc_ctrl()
1674 mutex_unlock(&subsys->lock); in nvmet_alloc_ctrl()
1675 nvmet_stop_keep_alive_timer(ctrl); in nvmet_alloc_ctrl()
1676 ida_free(&cntlid_ida, ctrl->cntlid); in nvmet_alloc_ctrl()
1678 kfree(ctrl->sqs); in nvmet_alloc_ctrl()
1680 kfree(ctrl->changed_ns_list); in nvmet_alloc_ctrl()
1682 kfree(ctrl); in nvmet_alloc_ctrl()
1691 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref); in nvmet_ctrl_free() local
1692 struct nvmet_subsys *subsys = ctrl->subsys; in nvmet_ctrl_free()
1694 mutex_lock(&subsys->lock); in nvmet_ctrl_free()
1695 nvmet_ctrl_destroy_pr(ctrl); in nvmet_ctrl_free()
1696 nvmet_release_p2p_ns_map(ctrl); in nvmet_ctrl_free()
1697 list_del(&ctrl->subsys_entry); in nvmet_ctrl_free()
1698 mutex_unlock(&subsys->lock); in nvmet_ctrl_free()
1700 nvmet_stop_keep_alive_timer(ctrl); in nvmet_ctrl_free()
1702 flush_work(&ctrl->async_event_work); in nvmet_ctrl_free()
1703 cancel_work_sync(&ctrl->fatal_err_work); in nvmet_ctrl_free()
1705 nvmet_destroy_auth(ctrl); in nvmet_ctrl_free()
1707 nvmet_debugfs_ctrl_free(ctrl); in nvmet_ctrl_free()
1709 ida_free(&cntlid_ida, ctrl->cntlid); in nvmet_ctrl_free()
1711 nvmet_async_events_free(ctrl); in nvmet_ctrl_free()
1712 kfree(ctrl->sqs); in nvmet_ctrl_free()
1713 kfree(ctrl->changed_ns_list); in nvmet_ctrl_free()
1714 kfree(ctrl); in nvmet_ctrl_free()
1719 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) in nvmet_ctrl_put() argument
1721 kref_put(&ctrl->ref, nvmet_ctrl_free); in nvmet_ctrl_put()
1725 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) in nvmet_ctrl_fatal_error() argument
1727 mutex_lock(&ctrl->lock); in nvmet_ctrl_fatal_error()
1728 if (!(ctrl->csts & NVME_CSTS_CFS)) { in nvmet_ctrl_fatal_error()
1729 ctrl->csts |= NVME_CSTS_CFS; in nvmet_ctrl_fatal_error()
1730 queue_work(nvmet_wq, &ctrl->fatal_err_work); in nvmet_ctrl_fatal_error()
1732 mutex_unlock(&ctrl->lock); in nvmet_ctrl_fatal_error()
1736 ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl, in nvmet_ctrl_host_traddr() argument
1739 if (!ctrl->ops->host_traddr) in nvmet_ctrl_host_traddr()
1740 return -EOPNOTSUPP; in nvmet_ctrl_host_traddr()
1741 return ctrl->ops->host_traddr(ctrl, traddr, traddr_len); in nvmet_ctrl_host_traddr()
1753 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref)) in nvmet_find_get_subsys()
1759 if (!strncmp(nvmet_disc_subsys->subsysnqn, subsysnqn, in nvmet_find_get_subsys()
1761 if (kref_get_unless_zero(&nvmet_disc_subsys->ref)) { in nvmet_find_get_subsys()
1766 list_for_each_entry(p, &port->subsystems, entry) { in nvmet_find_get_subsys()
1767 if (!strncmp(p->subsys->subsysnqn, subsysnqn, in nvmet_find_get_subsys()
1769 if (!kref_get_unless_zero(&p->subsys->ref)) in nvmet_find_get_subsys()
1772 return p->subsys; in nvmet_find_get_subsys()
1788 return ERR_PTR(-ENOMEM); in nvmet_subsys_alloc()
1790 subsys->ver = NVMET_DEFAULT_VS; in nvmet_subsys_alloc()
1791 /* generate a random serial number as our controllers are ephemeral: */ in nvmet_subsys_alloc()
1793 bin2hex(subsys->serial, &serial, sizeof(serial)); in nvmet_subsys_alloc()
1795 subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL); in nvmet_subsys_alloc()
1796 if (!subsys->model_number) { in nvmet_subsys_alloc()
1797 ret = -ENOMEM; in nvmet_subsys_alloc()
1801 subsys->ieee_oui = 0; in nvmet_subsys_alloc()
1803 subsys->firmware_rev = kstrndup(UTS_RELEASE, NVMET_FR_MAX_SIZE, GFP_KERNEL); in nvmet_subsys_alloc()
1804 if (!subsys->firmware_rev) { in nvmet_subsys_alloc()
1805 ret = -ENOMEM; in nvmet_subsys_alloc()
1811 subsys->max_qid = NVMET_NR_QUEUES; in nvmet_subsys_alloc()
1815 subsys->max_qid = 0; in nvmet_subsys_alloc()
1818 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type); in nvmet_subsys_alloc()
1819 ret = -EINVAL; in nvmet_subsys_alloc()
1822 subsys->type = type; in nvmet_subsys_alloc()
1823 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE, in nvmet_subsys_alloc()
1825 if (!subsys->subsysnqn) { in nvmet_subsys_alloc()
1826 ret = -ENOMEM; in nvmet_subsys_alloc()
1829 subsys->cntlid_min = NVME_CNTLID_MIN; in nvmet_subsys_alloc()
1830 subsys->cntlid_max = NVME_CNTLID_MAX; in nvmet_subsys_alloc()
1831 kref_init(&subsys->ref); in nvmet_subsys_alloc()
1833 mutex_init(&subsys->lock); in nvmet_subsys_alloc()
1834 xa_init(&subsys->namespaces); in nvmet_subsys_alloc()
1835 INIT_LIST_HEAD(&subsys->ctrls); in nvmet_subsys_alloc()
1836 INIT_LIST_HEAD(&subsys->hosts); in nvmet_subsys_alloc()
1845 kfree(subsys->subsysnqn); in nvmet_subsys_alloc()
1847 kfree(subsys->firmware_rev); in nvmet_subsys_alloc()
1849 kfree(subsys->model_number); in nvmet_subsys_alloc()
1860 WARN_ON_ONCE(!xa_empty(&subsys->namespaces)); in nvmet_subsys_free()
1864 xa_destroy(&subsys->namespaces); in nvmet_subsys_free()
1867 kfree(subsys->subsysnqn); in nvmet_subsys_free()
1868 kfree(subsys->model_number); in nvmet_subsys_free()
1869 kfree(subsys->firmware_rev); in nvmet_subsys_free()
1875 struct nvmet_ctrl *ctrl; in nvmet_subsys_del_ctrls() local
1877 mutex_lock(&subsys->lock); in nvmet_subsys_del_ctrls()
1878 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_subsys_del_ctrls()
1879 ctrl->ops->delete_ctrl(ctrl); in nvmet_subsys_del_ctrls()
1880 mutex_unlock(&subsys->lock); in nvmet_subsys_del_ctrls()
1885 kref_put(&subsys->ref, nvmet_subsys_free); in nvmet_subsys_put()
1890 int error = -ENOMEM; in nvmet_init()
1894 nvmet_bvec_cache = kmem_cache_create("nvmet-bvec", in nvmet_init()
1898 return -ENOMEM; in nvmet_init()
1900 zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0); in nvmet_init()
1904 buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq", in nvmet_init()
1909 nvmet_wq = alloc_workqueue("nvmet-wq", in nvmet_init()