Lines Matching +full:ctrl +full:- +full:a

1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2011-2014, Intel Corporation.
13 #include <linux/blk-mq.h>
14 #include <linux/sed-opal.h>
15 #include <linux/fault-inject.h>
18 #include <linux/t10-pi.h>
42 * Default to a 4K page size, with the intention to update this
60 * Prefers I/O aligned to a stripe size specified in a vendor
78 * The controller needs a delay before starts checking the device
124 * Use non-standard 128 bytes SQEs.
173 * MSI (but not MSI-X) interrupts are broken and never fire.
185 * this structure as the first member of their request-private data.
197 struct nvme_ctrl *ctrl; member
201 * Mark a bio as coming in through the mpath node.
219 if (!req->q->queuedata) in nvme_req_qid()
222 return req->mq_hctx->queue_num + 1; in nvme_req_qid()
246 * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during
415 static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl) in nvme_ctrl_state() argument
417 return READ_ONCE(ctrl->state); in nvme_ctrl_state()
431 * a separate refcount.
463 * Anchor structure for namespaces. There is one for each namespace in a
466 * there is a 1:1 relation to our namespace structures, that is ->list
467 * only ever has a single entry for private namespaces.
513 return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk; in nvme_ns_head_multipath()
525 struct nvme_ctrl *ctrl; member
551 return head->pi_type && head->ms == head->pi_size; in nvme_ns_has_pi()
563 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
564 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
565 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
566 void (*free_ctrl)(struct nvme_ctrl *ctrl);
567 void (*submit_async_event)(struct nvme_ctrl *ctrl);
568 int (*subsystem_reset)(struct nvme_ctrl *ctrl);
569 void (*delete_ctrl)(struct nvme_ctrl *ctrl);
570 void (*stop_ctrl)(struct nvme_ctrl *ctrl);
571 int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
572 void (*print_device_info)(struct nvme_ctrl *ctrl);
573 bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
588 return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag; in nvme_cid()
604 if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) { in nvme_find_rq()
605 dev_err(nvme_req(rq)->ctrl->device, in nvme_find_rq()
607 tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr)); in nvme_find_rq()
624 while (s[len - 1] == ' ') in nvme_strlen()
625 len--; in nvme_strlen()
629 static inline void nvme_print_device_info(struct nvme_ctrl *ctrl) in nvme_print_device_info() argument
631 struct nvme_subsystem *subsys = ctrl->subsys; in nvme_print_device_info()
633 if (ctrl->ops->print_device_info) { in nvme_print_device_info()
634 ctrl->ops->print_device_info(ctrl); in nvme_print_device_info()
638 dev_err(ctrl->device, in nvme_print_device_info()
639 "VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id, in nvme_print_device_info()
640 nvme_strlen(subsys->model, sizeof(subsys->model)), in nvme_print_device_info()
641 subsys->model, nvme_strlen(subsys->firmware_rev, in nvme_print_device_info()
642 sizeof(subsys->firmware_rev)), in nvme_print_device_info()
643 subsys->firmware_rev); in nvme_print_device_info()
662 bool nvme_wait_reset(struct nvme_ctrl *ctrl);
663 int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
665 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) in nvme_reset_subsystem() argument
667 if (!ctrl->subsystem || !ctrl->ops->subsystem_reset) in nvme_reset_subsystem()
668 return -ENOTTY; in nvme_reset_subsystem()
669 return ctrl->ops->subsystem_reset(ctrl); in nvme_reset_subsystem()
673 * Convert a 512B sector number to a device logical block number.
677 return sector >> (head->lba_shift - SECTOR_SHIFT); in nvme_sect_to_lba()
681 * Convert a device logical block number to a 512B sector number.
685 return lba << (head->lba_shift - SECTOR_SHIFT); in nvme_lba_to_sect()
689 * Convert byte length to nvme's 0-based num dwords
693 return (len >> 2) - 1; in nvme_bytes_to_numd()
710 /* check for a status code type of 'path related status' */ in nvme_is_path_error()
716 * if blk-mq will need to use IPI magic to complete the request, and if yes do
724 struct nvme_ctrl *ctrl = rq->ctrl; in nvme_try_complete_req() local
726 if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN)) in nvme_try_complete_req()
727 rq->genctr++; in nvme_try_complete_req()
729 rq->status = le16_to_cpu(status) >> 1; in nvme_try_complete_req()
730 rq->result = result; in nvme_try_complete_req()
733 if (unlikely(blk_should_fake_timeout(req->q))) in nvme_try_complete_req()
738 static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) in nvme_get_ctrl() argument
740 get_device(ctrl->device); in nvme_get_ctrl()
743 static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) in nvme_put_ctrl() argument
745 put_device(ctrl->device); in nvme_put_ctrl()
757 static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl) in nvme_state_terminal() argument
759 switch (nvme_ctrl_state(ctrl)) { in nvme_state_terminal()
770 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); in nvme_state_terminal()
784 rq_list_for_each(&iob->req_list, req) { in nvme_complete_batch()
793 void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
794 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
795 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
797 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown);
798 int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
799 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
801 int nvme_add_ctrl(struct nvme_ctrl *ctrl);
802 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
803 void nvme_start_ctrl(struct nvme_ctrl *ctrl);
804 void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
805 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended);
806 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
808 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
809 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
812 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
814 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
816 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
819 void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl);
820 void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl);
821 void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl);
822 void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl);
823 void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl);
824 void nvme_sync_queues(struct nvme_ctrl *ctrl);
825 void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
826 void nvme_unfreeze(struct nvme_ctrl *ctrl);
827 void nvme_wait_freeze(struct nvme_ctrl *ctrl);
828 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
829 void nvme_start_freeze(struct nvme_ctrl *ctrl);
836 #define NVME_QID_ANY -1
840 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
842 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
845 static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, in nvme_check_ready() argument
848 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); in nvme_check_ready()
852 if (ctrl->ops->flags & NVME_F_FABRICS && state == NVME_CTRL_DELETING) in nvme_check_ready()
854 return __nvme_check_ready(ctrl, rq, queue_live, state); in nvme_check_ready()
864 * In other case, private namespace are not required to report a unique NSID.
866 static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl, in nvme_is_unique_nsid() argument
869 return head->shared || in nvme_is_unique_nsid()
870 (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) || in nvme_is_unique_nsid()
871 (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) || in nvme_is_unique_nsid()
872 (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS); in nvme_is_unique_nsid()
902 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
903 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
904 int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
905 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
906 int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
907 void nvme_queue_scan(struct nvme_ctrl *ctrl);
908 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
930 int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
943 void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
946 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) in nvme_ctrl_use_ana() argument
948 return ctrl->ana_log_buf != NULL; in nvme_ctrl_use_ana()
956 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
957 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
960 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
961 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
962 void nvme_mpath_update(struct nvme_ctrl *ctrl);
963 void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
964 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
967 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
974 struct nvme_ns *ns = req->q->queuedata; in nvme_trace_bio_complete()
976 if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio) in nvme_trace_bio_complete()
977 trace_block_bio_complete(ns->head->disk->queue, req->bio); in nvme_trace_bio_complete()
987 return disk->fops == &nvme_ns_head_ops; in nvme_disk_is_ns_head()
991 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) in nvme_ctrl_use_ana() argument
998 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) in nvme_kick_requeue_lists() argument
1001 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, in nvme_mpath_alloc_disk() argument
1019 static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) in nvme_mpath_clear_ctrl_paths() argument
1028 static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl) in nvme_mpath_init_ctrl() argument
1031 static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, in nvme_mpath_init_identify() argument
1034 if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) in nvme_mpath_init_identify()
1035 dev_warn(ctrl->device, in nvme_mpath_init_identify()
1036 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); in nvme_mpath_init_identify()
1039 static inline void nvme_mpath_update(struct nvme_ctrl *ctrl) in nvme_mpath_update() argument
1042 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) in nvme_mpath_uninit() argument
1045 static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) in nvme_mpath_stop() argument
1105 return disk->private_data; in nvme_get_ns_from_dev()
1109 int nvme_hwmon_init(struct nvme_ctrl *ctrl);
1110 void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
1112 static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl) in nvme_hwmon_init() argument
1117 static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) in nvme_hwmon_exit() argument
1124 if (rq->cmd_flags & REQ_NVME_MPATH) in nvme_start_request()
1129 static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) in nvme_ctrl_sgl_supported() argument
1131 return ctrl->sgls & (NVME_CTRL_SGLS_BYTE_ALIGNED | in nvme_ctrl_sgl_supported()
1135 static inline bool nvme_ctrl_meta_sgl_supported(struct nvme_ctrl *ctrl) in nvme_ctrl_meta_sgl_supported() argument
1137 if (ctrl->ops->flags & NVME_F_FABRICS) in nvme_ctrl_meta_sgl_supported()
1139 return ctrl->sgls & NVME_CTRL_SGLS_MSDS; in nvme_ctrl_meta_sgl_supported()
1145 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
1146 void nvme_auth_stop(struct nvme_ctrl *ctrl);
1147 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
1148 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
1149 void nvme_auth_free(struct nvme_ctrl *ctrl);
1151 static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) in nvme_auth_init_ctrl() argument
1162 static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {}; in nvme_auth_stop() argument
1163 static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) in nvme_auth_negotiate() argument
1165 return -EPROTONOSUPPORT; in nvme_auth_negotiate()
1167 static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid) in nvme_auth_wait() argument
1169 return -EPROTONOSUPPORT; in nvme_auth_wait()
1171 static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {}; in nvme_auth_free() argument
1174 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1176 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode);
1178 void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
1181 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
1185 static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) in nvme_multi_css() argument
1187 return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI; in nvme_multi_css()