Lines Matching full:queue

66  * queue before determining it to be idle.  This optional module behavior
110 struct nvmet_tcp_queue *queue; member
221 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, in nvmet_tcp_cmd_tag() argument
224 if (unlikely(!queue->nr_cmds)) { in nvmet_tcp_cmd_tag()
229 return cmd - queue->cmds; in nvmet_tcp_cmd_tag()
257 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_get_cmd() argument
261 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd()
277 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd()
280 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd()
283 static inline int queue_cpu(struct nvmet_tcp_queue *queue) in queue_cpu() argument
285 return queue->sock->sk->sk_incoming_cpu; in queue_cpu()
288 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) in nvmet_tcp_hdgst_len() argument
290 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvmet_tcp_hdgst_len()
293 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) in nvmet_tcp_ddgst_len() argument
295 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvmet_tcp_ddgst_len()
308 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, in nvmet_tcp_verify_hdgst() argument
316 pr_err("queue %d: header digest enabled but no header digest\n", in nvmet_tcp_verify_hdgst()
317 queue->idx); in nvmet_tcp_verify_hdgst()
322 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len); in nvmet_tcp_verify_hdgst()
325 pr_err("queue %d: header digest error: recv %#x expected %#x\n", in nvmet_tcp_verify_hdgst()
326 queue->idx, le32_to_cpu(recv_digest), in nvmet_tcp_verify_hdgst()
334 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) in nvmet_tcp_check_ddgst() argument
337 u8 digest_len = nvmet_tcp_hdgst_len(queue); in nvmet_tcp_check_ddgst()
344 pr_err("queue %d: data digest flag is cleared\n", queue->idx); in nvmet_tcp_check_ddgst()
390 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) in nvmet_tcp_fatal_error() argument
392 queue->rcv_state = NVMET_TCP_RECV_ERR; in nvmet_tcp_fatal_error()
393 if (queue->nvme_sq.ctrl) in nvmet_tcp_fatal_error()
394 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); in nvmet_tcp_fatal_error()
396 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvmet_tcp_fatal_error()
399 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) in nvmet_tcp_socket_error() argument
401 queue->rcv_state = NVMET_TCP_RECV_ERR; in nvmet_tcp_socket_error()
403 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvmet_tcp_socket_error()
405 nvmet_tcp_fatal_error(queue); in nvmet_tcp_socket_error()
456 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_c2h_data_pdu() local
457 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
458 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
464 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ? in nvmet_setup_c2h_data_pdu()
475 if (queue->data_digest) { in nvmet_setup_c2h_data_pdu()
477 nvmet_tcp_calc_ddgst(queue->snd_hash, cmd); in nvmet_setup_c2h_data_pdu()
480 if (cmd->queue->hdr_digest) { in nvmet_setup_c2h_data_pdu()
482 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvmet_setup_c2h_data_pdu()
489 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_r2t_pdu() local
490 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_r2t_pdu()
502 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); in nvmet_setup_r2t_pdu()
505 if (cmd->queue->hdr_digest) { in nvmet_setup_r2t_pdu()
507 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvmet_setup_r2t_pdu()
514 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_response_pdu() local
515 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_response_pdu()
525 if (cmd->queue->hdr_digest) { in nvmet_setup_response_pdu()
527 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvmet_setup_response_pdu()
531 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) in nvmet_tcp_process_resp_list() argument
536 for (node = llist_del_all(&queue->resp_list); node; node = node->next) { in nvmet_tcp_process_resp_list()
538 list_add(&cmd->entry, &queue->resp_send_list); in nvmet_tcp_process_resp_list()
539 queue->send_list_len++; in nvmet_tcp_process_resp_list()
543 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_fetch_cmd() argument
545 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, in nvmet_tcp_fetch_cmd()
547 if (!queue->snd_cmd) { in nvmet_tcp_fetch_cmd()
548 nvmet_tcp_process_resp_list(queue); in nvmet_tcp_fetch_cmd()
549 queue->snd_cmd = in nvmet_tcp_fetch_cmd()
550 list_first_entry_or_null(&queue->resp_send_list, in nvmet_tcp_fetch_cmd()
552 if (unlikely(!queue->snd_cmd)) in nvmet_tcp_fetch_cmd()
556 list_del_init(&queue->snd_cmd->entry); in nvmet_tcp_fetch_cmd()
557 queue->send_list_len--; in nvmet_tcp_fetch_cmd()
559 if (nvmet_tcp_need_data_out(queue->snd_cmd)) in nvmet_tcp_fetch_cmd()
560 nvmet_setup_c2h_data_pdu(queue->snd_cmd); in nvmet_tcp_fetch_cmd()
561 else if (nvmet_tcp_need_data_in(queue->snd_cmd)) in nvmet_tcp_fetch_cmd()
562 nvmet_setup_r2t_pdu(queue->snd_cmd); in nvmet_tcp_fetch_cmd()
564 nvmet_setup_response_pdu(queue->snd_cmd); in nvmet_tcp_fetch_cmd()
566 return queue->snd_cmd; in nvmet_tcp_fetch_cmd()
573 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_queue_response() local
580 queue_state = smp_load_acquire(&queue->rcv_state); in nvmet_tcp_queue_response()
581 queue_cmd = READ_ONCE(queue->cmd); in nvmet_tcp_queue_response()
598 llist_add(&cmd->lentry, &queue->resp_list); in nvmet_tcp_queue_response()
599 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); in nvmet_tcp_queue_response()
616 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_data_pdu()
622 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_data_pdu()
639 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_data() local
650 if ((!last_in_batch && cmd->queue->send_list_len) || in nvmet_try_send_data()
652 queue->data_digest || !queue->nvme_sq.sqhd_disabled) in nvmet_try_send_data()
657 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_data()
671 if (queue->data_digest) { in nvmet_try_send_data()
675 if (queue->nvme_sq.sqhd_disabled) { in nvmet_try_send_data()
676 cmd->queue->snd_cmd = NULL; in nvmet_try_send_data()
683 if (queue->nvme_sq.sqhd_disabled) in nvmet_try_send_data()
695 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_response()
699 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_response()
706 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_response()
716 cmd->queue->snd_cmd = NULL; in nvmet_try_send_response()
725 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_r2t()
729 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_r2t()
736 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_r2t()
745 cmd->queue->snd_cmd = NULL; in nvmet_try_send_r2t()
751 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_ddgst() local
760 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_ddgst()
765 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvmet_try_send_ddgst()
775 if (queue->nvme_sq.sqhd_disabled) { in nvmet_try_send_ddgst()
776 cmd->queue->snd_cmd = NULL; in nvmet_try_send_ddgst()
784 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, in nvmet_tcp_try_send_one() argument
787 struct nvmet_tcp_cmd *cmd = queue->snd_cmd; in nvmet_tcp_try_send_one()
790 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { in nvmet_tcp_try_send_one()
791 cmd = nvmet_tcp_fetch_cmd(queue); in nvmet_tcp_try_send_one()
833 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, in nvmet_tcp_try_send() argument
839 ret = nvmet_tcp_try_send_one(queue, i == budget - 1); in nvmet_tcp_try_send()
841 nvmet_tcp_socket_error(queue, ret); in nvmet_tcp_try_send()
852 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) in nvmet_prepare_receive_pdu() argument
854 queue->offset = 0; in nvmet_prepare_receive_pdu()
855 queue->left = sizeof(struct nvme_tcp_hdr); in nvmet_prepare_receive_pdu()
856 WRITE_ONCE(queue->cmd, NULL); in nvmet_prepare_receive_pdu()
857 /* Ensure rcv_state is visible only after queue->cmd is set */ in nvmet_prepare_receive_pdu()
858 smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU); in nvmet_prepare_receive_pdu()
861 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) in nvmet_tcp_free_crypto() argument
863 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); in nvmet_tcp_free_crypto()
865 ahash_request_free(queue->rcv_hash); in nvmet_tcp_free_crypto()
866 ahash_request_free(queue->snd_hash); in nvmet_tcp_free_crypto()
870 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) in nvmet_tcp_alloc_crypto() argument
878 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvmet_tcp_alloc_crypto()
879 if (!queue->snd_hash) in nvmet_tcp_alloc_crypto()
881 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); in nvmet_tcp_alloc_crypto()
883 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvmet_tcp_alloc_crypto()
884 if (!queue->rcv_hash) in nvmet_tcp_alloc_crypto()
886 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); in nvmet_tcp_alloc_crypto()
890 ahash_request_free(queue->snd_hash); in nvmet_tcp_alloc_crypto()
897 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) in nvmet_tcp_handle_icreq() argument
899 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; in nvmet_tcp_handle_icreq()
900 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp; in nvmet_tcp_handle_icreq()
908 nvmet_tcp_fatal_error(queue); in nvmet_tcp_handle_icreq()
913 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv); in nvmet_tcp_handle_icreq()
918 pr_err("queue %d: unsupported hpda %d\n", queue->idx, in nvmet_tcp_handle_icreq()
923 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); in nvmet_tcp_handle_icreq()
924 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); in nvmet_tcp_handle_icreq()
925 if (queue->hdr_digest || queue->data_digest) { in nvmet_tcp_handle_icreq()
926 ret = nvmet_tcp_alloc_crypto(queue); in nvmet_tcp_handle_icreq()
939 if (queue->hdr_digest) in nvmet_tcp_handle_icreq()
941 if (queue->data_digest) in nvmet_tcp_handle_icreq()
946 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvmet_tcp_handle_icreq()
948 queue->state = NVMET_TCP_Q_FAILED; in nvmet_tcp_handle_icreq()
949 return ret; /* queue removal will cleanup */ in nvmet_tcp_handle_icreq()
952 queue->state = NVMET_TCP_Q_LIVE; in nvmet_tcp_handle_icreq()
953 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_handle_icreq()
957 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, in nvmet_tcp_handle_req_failure() argument
972 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_handle_req_failure()
978 pr_err("queue %d: failed to map data\n", queue->idx); in nvmet_tcp_handle_req_failure()
979 nvmet_tcp_fatal_error(queue); in nvmet_tcp_handle_req_failure()
983 queue->rcv_state = NVMET_TCP_RECV_DATA; in nvmet_tcp_handle_req_failure()
988 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) in nvmet_tcp_handle_h2c_data_pdu() argument
990 struct nvme_tcp_data_pdu *data = &queue->pdu.data; in nvmet_tcp_handle_h2c_data_pdu()
994 if (likely(queue->nr_cmds)) { in nvmet_tcp_handle_h2c_data_pdu()
995 if (unlikely(data->ttag >= queue->nr_cmds)) { in nvmet_tcp_handle_h2c_data_pdu()
996 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n", in nvmet_tcp_handle_h2c_data_pdu()
997 queue->idx, data->ttag, queue->nr_cmds); in nvmet_tcp_handle_h2c_data_pdu()
1000 cmd = &queue->cmds[data->ttag]; in nvmet_tcp_handle_h2c_data_pdu()
1002 cmd = &queue->connect; in nvmet_tcp_handle_h2c_data_pdu()
1013 nvmet_tcp_hdgst_len(queue) - in nvmet_tcp_handle_h2c_data_pdu()
1014 nvmet_tcp_ddgst_len(queue) - in nvmet_tcp_handle_h2c_data_pdu()
1026 queue->cmd = cmd; in nvmet_tcp_handle_h2c_data_pdu()
1027 queue->rcv_state = NVMET_TCP_RECV_DATA; in nvmet_tcp_handle_h2c_data_pdu()
1033 nvmet_tcp_fatal_error(queue); in nvmet_tcp_handle_h2c_data_pdu()
1037 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) in nvmet_tcp_done_recv_pdu() argument
1039 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_done_recv_pdu()
1040 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; in nvmet_tcp_done_recv_pdu()
1044 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { in nvmet_tcp_done_recv_pdu()
1048 nvmet_tcp_fatal_error(queue); in nvmet_tcp_done_recv_pdu()
1051 return nvmet_tcp_handle_icreq(queue); in nvmet_tcp_done_recv_pdu()
1055 pr_err("queue %d: received icreq pdu in state %d\n", in nvmet_tcp_done_recv_pdu()
1056 queue->idx, queue->state); in nvmet_tcp_done_recv_pdu()
1057 nvmet_tcp_fatal_error(queue); in nvmet_tcp_done_recv_pdu()
1062 ret = nvmet_tcp_handle_h2c_data_pdu(queue); in nvmet_tcp_done_recv_pdu()
1068 queue->cmd = nvmet_tcp_get_cmd(queue); in nvmet_tcp_done_recv_pdu()
1069 if (unlikely(!queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1071 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d", in nvmet_tcp_done_recv_pdu()
1072 queue->idx, queue->nr_cmds, queue->send_list_len, in nvmet_tcp_done_recv_pdu()
1074 nvmet_tcp_fatal_error(queue); in nvmet_tcp_done_recv_pdu()
1078 req = &queue->cmd->req; in nvmet_tcp_done_recv_pdu()
1081 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq, in nvmet_tcp_done_recv_pdu()
1082 &queue->nvme_sq, &nvmet_tcp_ops))) { in nvmet_tcp_done_recv_pdu()
1088 nvmet_tcp_handle_req_failure(queue, queue->cmd, req); in nvmet_tcp_done_recv_pdu()
1092 ret = nvmet_tcp_map_data(queue->cmd); in nvmet_tcp_done_recv_pdu()
1094 pr_err("queue %d: failed to map data\n", queue->idx); in nvmet_tcp_done_recv_pdu()
1095 if (nvmet_tcp_has_inline_data(queue->cmd)) in nvmet_tcp_done_recv_pdu()
1096 nvmet_tcp_fatal_error(queue); in nvmet_tcp_done_recv_pdu()
1103 if (nvmet_tcp_need_data_in(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1104 if (nvmet_tcp_has_inline_data(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1105 queue->rcv_state = NVMET_TCP_RECV_DATA; in nvmet_tcp_done_recv_pdu()
1106 nvmet_tcp_build_pdu_iovec(queue->cmd); in nvmet_tcp_done_recv_pdu()
1110 nvmet_tcp_queue_response(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1114 queue->cmd->req.execute(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1116 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_done_recv_pdu()
1148 static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue, in nvmet_tcp_tls_record_ok() argument
1155 ctype = tls_get_record_type(queue->sock->sk, cmsg); in nvmet_tcp_tls_record_ok()
1162 tls_alert_recv(queue->sock->sk, msg, &level, &description); in nvmet_tcp_tls_record_ok()
1164 pr_err("queue %d: TLS Alert desc %u\n", in nvmet_tcp_tls_record_ok()
1165 queue->idx, description); in nvmet_tcp_tls_record_ok()
1168 pr_warn("queue %d: TLS Alert desc %u\n", in nvmet_tcp_tls_record_ok()
1169 queue->idx, description); in nvmet_tcp_tls_record_ok()
1175 pr_err("queue %d: TLS record %d unhandled\n", in nvmet_tcp_tls_record_ok()
1176 queue->idx, ctype); in nvmet_tcp_tls_record_ok()
1183 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) in nvmet_tcp_try_recv_pdu() argument
1185 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_try_recv_pdu()
1192 iov.iov_base = (void *)&queue->pdu + queue->offset; in nvmet_tcp_try_recv_pdu()
1193 iov.iov_len = queue->left; in nvmet_tcp_try_recv_pdu()
1194 if (queue->tls_pskid) { in nvmet_tcp_try_recv_pdu()
1198 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvmet_tcp_try_recv_pdu()
1202 if (queue->tls_pskid) { in nvmet_tcp_try_recv_pdu()
1203 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf); in nvmet_tcp_try_recv_pdu()
1208 queue->offset += len; in nvmet_tcp_try_recv_pdu()
1209 queue->left -= len; in nvmet_tcp_try_recv_pdu()
1210 if (queue->left) in nvmet_tcp_try_recv_pdu()
1213 if (queue->offset == sizeof(struct nvme_tcp_hdr)) { in nvmet_tcp_try_recv_pdu()
1214 u8 hdgst = nvmet_tcp_hdgst_len(queue); in nvmet_tcp_try_recv_pdu()
1218 nvmet_tcp_fatal_error(queue); in nvmet_tcp_try_recv_pdu()
1227 queue->left = hdr->hlen - queue->offset + hdgst; in nvmet_tcp_try_recv_pdu()
1231 if (queue->hdr_digest && in nvmet_tcp_try_recv_pdu()
1232 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) { in nvmet_tcp_try_recv_pdu()
1233 nvmet_tcp_fatal_error(queue); /* fatal */ in nvmet_tcp_try_recv_pdu()
1237 if (queue->data_digest && in nvmet_tcp_try_recv_pdu()
1238 nvmet_tcp_check_ddgst(queue, &queue->pdu)) { in nvmet_tcp_try_recv_pdu()
1239 nvmet_tcp_fatal_error(queue); /* fatal */ in nvmet_tcp_try_recv_pdu()
1243 return nvmet_tcp_done_recv_pdu(queue); in nvmet_tcp_try_recv_pdu()
1248 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_prep_recv_ddgst() local
1250 nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd); in nvmet_tcp_prep_recv_ddgst()
1251 queue->offset = 0; in nvmet_tcp_prep_recv_ddgst()
1252 queue->left = NVME_TCP_DIGEST_LENGTH; in nvmet_tcp_prep_recv_ddgst()
1253 queue->rcv_state = NVMET_TCP_RECV_DDGST; in nvmet_tcp_prep_recv_ddgst()
1256 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) in nvmet_tcp_try_recv_data() argument
1258 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_data()
1262 len = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, in nvmet_tcp_try_recv_data()
1266 if (queue->tls_pskid) { in nvmet_tcp_try_recv_data()
1267 ret = nvmet_tcp_tls_record_ok(cmd->queue, in nvmet_tcp_try_recv_data()
1277 if (queue->data_digest) { in nvmet_tcp_try_recv_data()
1285 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_try_recv_data()
1289 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) in nvmet_tcp_try_recv_ddgst() argument
1291 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_ddgst()
1296 .iov_base = (void *)&cmd->recv_ddgst + queue->offset, in nvmet_tcp_try_recv_ddgst()
1297 .iov_len = queue->left in nvmet_tcp_try_recv_ddgst()
1300 if (queue->tls_pskid) { in nvmet_tcp_try_recv_ddgst()
1304 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvmet_tcp_try_recv_ddgst()
1308 if (queue->tls_pskid) { in nvmet_tcp_try_recv_ddgst()
1309 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf); in nvmet_tcp_try_recv_ddgst()
1314 queue->offset += len; in nvmet_tcp_try_recv_ddgst()
1315 queue->left -= len; in nvmet_tcp_try_recv_ddgst()
1316 if (queue->left) in nvmet_tcp_try_recv_ddgst()
1319 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { in nvmet_tcp_try_recv_ddgst()
1320 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", in nvmet_tcp_try_recv_ddgst()
1321 queue->idx, cmd->req.cmd->common.command_id, in nvmet_tcp_try_recv_ddgst()
1322 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), in nvmet_tcp_try_recv_ddgst()
1326 nvmet_tcp_fatal_error(queue); in nvmet_tcp_try_recv_ddgst()
1336 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_try_recv_ddgst()
1340 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) in nvmet_tcp_try_recv_one() argument
1344 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) in nvmet_tcp_try_recv_one()
1347 if (queue->rcv_state == NVMET_TCP_RECV_PDU) { in nvmet_tcp_try_recv_one()
1348 result = nvmet_tcp_try_recv_pdu(queue); in nvmet_tcp_try_recv_one()
1353 if (queue->rcv_state == NVMET_TCP_RECV_DATA) { in nvmet_tcp_try_recv_one()
1354 result = nvmet_tcp_try_recv_data(queue); in nvmet_tcp_try_recv_one()
1359 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) { in nvmet_tcp_try_recv_one()
1360 result = nvmet_tcp_try_recv_ddgst(queue); in nvmet_tcp_try_recv_one()
1374 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, in nvmet_tcp_try_recv() argument
1380 ret = nvmet_tcp_try_recv_one(queue); in nvmet_tcp_try_recv()
1382 nvmet_tcp_socket_error(queue, ret); in nvmet_tcp_try_recv()
1395 struct nvmet_tcp_queue *queue = in nvmet_tcp_release_queue() local
1398 WARN_ON(queue->state != NVMET_TCP_Q_DISCONNECTING); in nvmet_tcp_release_queue()
1399 queue_work(nvmet_wq, &queue->release_work); in nvmet_tcp_release_queue()
1402 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) in nvmet_tcp_schedule_release_queue() argument
1404 spin_lock_bh(&queue->state_lock); in nvmet_tcp_schedule_release_queue()
1405 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { in nvmet_tcp_schedule_release_queue()
1407 tls_handshake_cancel(queue->sock->sk); in nvmet_tcp_schedule_release_queue()
1409 if (queue->state != NVMET_TCP_Q_DISCONNECTING) { in nvmet_tcp_schedule_release_queue()
1410 queue->state = NVMET_TCP_Q_DISCONNECTING; in nvmet_tcp_schedule_release_queue()
1411 kref_put(&queue->kref, nvmet_tcp_release_queue); in nvmet_tcp_schedule_release_queue()
1413 spin_unlock_bh(&queue->state_lock); in nvmet_tcp_schedule_release_queue()
1416 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue) in nvmet_tcp_arm_queue_deadline() argument
1418 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs); in nvmet_tcp_arm_queue_deadline()
1421 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue, in nvmet_tcp_check_queue_deadline() argument
1428 nvmet_tcp_arm_queue_deadline(queue); in nvmet_tcp_check_queue_deadline()
1430 return !time_after(jiffies, queue->poll_end); in nvmet_tcp_check_queue_deadline()
1435 struct nvmet_tcp_queue *queue = in nvmet_tcp_io_work() local
1443 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops); in nvmet_tcp_io_work()
1449 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops); in nvmet_tcp_io_work()
1461 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending) in nvmet_tcp_io_work()
1462 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); in nvmet_tcp_io_work()
1465 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, in nvmet_tcp_alloc_cmd() argument
1468 u8 hdgst = nvmet_tcp_hdgst_len(queue); in nvmet_tcp_alloc_cmd()
1470 c->queue = queue; in nvmet_tcp_alloc_cmd()
1471 c->req.port = queue->port->nport; in nvmet_tcp_alloc_cmd()
1473 c->cmd_pdu = page_frag_alloc(&queue->pf_cache, in nvmet_tcp_alloc_cmd()
1479 c->rsp_pdu = page_frag_alloc(&queue->pf_cache, in nvmet_tcp_alloc_cmd()
1485 c->data_pdu = page_frag_alloc(&queue->pf_cache, in nvmet_tcp_alloc_cmd()
1490 c->r2t_pdu = page_frag_alloc(&queue->pf_cache, in nvmet_tcp_alloc_cmd()
1495 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { in nvmet_tcp_alloc_cmd()
1501 list_add_tail(&c->entry, &queue->free_list); in nvmet_tcp_alloc_cmd()
1521 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) in nvmet_tcp_alloc_cmds() argument
1524 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; in nvmet_tcp_alloc_cmds()
1531 ret = nvmet_tcp_alloc_cmd(queue, cmds + i); in nvmet_tcp_alloc_cmds()
1536 queue->cmds = cmds; in nvmet_tcp_alloc_cmds()
1547 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) in nvmet_tcp_free_cmds() argument
1549 struct nvmet_tcp_cmd *cmds = queue->cmds; in nvmet_tcp_free_cmds()
1552 for (i = 0; i < queue->nr_cmds; i++) in nvmet_tcp_free_cmds()
1555 nvmet_tcp_free_cmd(&queue->connect); in nvmet_tcp_free_cmds()
1559 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) in nvmet_tcp_restore_socket_callbacks() argument
1561 struct socket *sock = queue->sock; in nvmet_tcp_restore_socket_callbacks()
1564 sock->sk->sk_data_ready = queue->data_ready; in nvmet_tcp_restore_socket_callbacks()
1565 sock->sk->sk_state_change = queue->state_change; in nvmet_tcp_restore_socket_callbacks()
1566 sock->sk->sk_write_space = queue->write_space; in nvmet_tcp_restore_socket_callbacks()
1571 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) in nvmet_tcp_uninit_data_in_cmds() argument
1573 struct nvmet_tcp_cmd *cmd = queue->cmds; in nvmet_tcp_uninit_data_in_cmds()
1576 for (i = 0; i < queue->nr_cmds; i++, cmd++) { in nvmet_tcp_uninit_data_in_cmds()
1581 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { in nvmet_tcp_uninit_data_in_cmds()
1583 nvmet_req_uninit(&queue->connect.req); in nvmet_tcp_uninit_data_in_cmds()
1587 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue) in nvmet_tcp_free_cmd_data_in_buffers() argument
1589 struct nvmet_tcp_cmd *cmd = queue->cmds; in nvmet_tcp_free_cmd_data_in_buffers()
1592 for (i = 0; i < queue->nr_cmds; i++, cmd++) in nvmet_tcp_free_cmd_data_in_buffers()
1594 nvmet_tcp_free_cmd_buffers(&queue->connect); in nvmet_tcp_free_cmd_data_in_buffers()
1599 struct nvmet_tcp_queue *queue = in nvmet_tcp_release_queue_work() local
1603 list_del_init(&queue->queue_list); in nvmet_tcp_release_queue_work()
1606 nvmet_tcp_restore_socket_callbacks(queue); in nvmet_tcp_release_queue_work()
1607 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work); in nvmet_tcp_release_queue_work()
1608 cancel_work_sync(&queue->io_work); in nvmet_tcp_release_queue_work()
1610 queue->rcv_state = NVMET_TCP_RECV_ERR; in nvmet_tcp_release_queue_work()
1612 nvmet_tcp_uninit_data_in_cmds(queue); in nvmet_tcp_release_queue_work()
1613 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_tcp_release_queue_work()
1614 cancel_work_sync(&queue->io_work); in nvmet_tcp_release_queue_work()
1615 nvmet_tcp_free_cmd_data_in_buffers(queue); in nvmet_tcp_release_queue_work()
1617 fput(queue->sock->file); in nvmet_tcp_release_queue_work()
1618 nvmet_tcp_free_cmds(queue); in nvmet_tcp_release_queue_work()
1619 if (queue->hdr_digest || queue->data_digest) in nvmet_tcp_release_queue_work()
1620 nvmet_tcp_free_crypto(queue); in nvmet_tcp_release_queue_work()
1621 ida_free(&nvmet_tcp_queue_ida, queue->idx); in nvmet_tcp_release_queue_work()
1622 page_frag_cache_drain(&queue->pf_cache); in nvmet_tcp_release_queue_work()
1623 kfree(queue); in nvmet_tcp_release_queue_work()
1628 struct nvmet_tcp_queue *queue; in nvmet_tcp_data_ready() local
1633 queue = sk->sk_user_data; in nvmet_tcp_data_ready()
1634 if (likely(queue)) { in nvmet_tcp_data_ready()
1635 if (queue->data_ready) in nvmet_tcp_data_ready()
1636 queue->data_ready(sk); in nvmet_tcp_data_ready()
1637 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) in nvmet_tcp_data_ready()
1638 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, in nvmet_tcp_data_ready()
1639 &queue->io_work); in nvmet_tcp_data_ready()
1646 struct nvmet_tcp_queue *queue; in nvmet_tcp_write_space() local
1649 queue = sk->sk_user_data; in nvmet_tcp_write_space()
1650 if (unlikely(!queue)) in nvmet_tcp_write_space()
1653 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { in nvmet_tcp_write_space()
1654 queue->write_space(sk); in nvmet_tcp_write_space()
1660 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); in nvmet_tcp_write_space()
1668 struct nvmet_tcp_queue *queue; in nvmet_tcp_state_change() local
1671 queue = sk->sk_user_data; in nvmet_tcp_state_change()
1672 if (!queue) in nvmet_tcp_state_change()
1683 nvmet_tcp_schedule_release_queue(queue); in nvmet_tcp_state_change()
1686 pr_warn("queue %d unhandled state %d\n", in nvmet_tcp_state_change()
1687 queue->idx, sk->sk_state); in nvmet_tcp_state_change()
1693 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) in nvmet_tcp_set_queue_sock() argument
1695 struct socket *sock = queue->sock; in nvmet_tcp_set_queue_sock()
1700 (struct sockaddr *)&queue->sockaddr); in nvmet_tcp_set_queue_sock()
1705 (struct sockaddr *)&queue->sockaddr_peer); in nvmet_tcp_set_queue_sock()
1710 * Cleanup whatever is sitting in the TCP transmit queue on socket in nvmet_tcp_set_queue_sock()
1732 sock->sk->sk_user_data = queue; in nvmet_tcp_set_queue_sock()
1733 queue->data_ready = sock->sk->sk_data_ready; in nvmet_tcp_set_queue_sock()
1735 queue->state_change = sock->sk->sk_state_change; in nvmet_tcp_set_queue_sock()
1737 queue->write_space = sock->sk->sk_write_space; in nvmet_tcp_set_queue_sock()
1740 nvmet_tcp_arm_queue_deadline(queue); in nvmet_tcp_set_queue_sock()
1741 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); in nvmet_tcp_set_queue_sock()
1749 static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue) in nvmet_tcp_try_peek_pdu() argument
1751 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_try_peek_pdu()
1754 .iov_base = (u8 *)&queue->pdu + queue->offset, in nvmet_tcp_try_peek_pdu()
1764 if (nvmet_port_secure_channel_required(queue->port->nport)) in nvmet_tcp_try_peek_pdu()
1767 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvmet_tcp_try_peek_pdu()
1770 pr_debug("queue %d: peek error %d\n", in nvmet_tcp_try_peek_pdu()
1771 queue->idx, len); in nvmet_tcp_try_peek_pdu()
1775 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf); in nvmet_tcp_try_peek_pdu()
1780 pr_debug("queue %d: short read, %d bytes missing\n", in nvmet_tcp_try_peek_pdu()
1781 queue->idx, (int)iov.iov_len - len); in nvmet_tcp_try_peek_pdu()
1784 pr_debug("queue %d: hdr type %d hlen %d plen %d size %d\n", in nvmet_tcp_try_peek_pdu()
1785 queue->idx, hdr->type, hdr->hlen, hdr->plen, in nvmet_tcp_try_peek_pdu()
1790 pr_debug("queue %d: icreq detected\n", in nvmet_tcp_try_peek_pdu()
1791 queue->idx); in nvmet_tcp_try_peek_pdu()
1800 struct nvmet_tcp_queue *queue = data; in nvmet_tcp_tls_handshake_done() local
1802 pr_debug("queue %d: TLS handshake done, key %x, status %d\n", in nvmet_tcp_tls_handshake_done()
1803 queue->idx, peerid, status); in nvmet_tcp_tls_handshake_done()
1804 spin_lock_bh(&queue->state_lock); in nvmet_tcp_tls_handshake_done()
1805 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) { in nvmet_tcp_tls_handshake_done()
1806 spin_unlock_bh(&queue->state_lock); in nvmet_tcp_tls_handshake_done()
1810 queue->tls_pskid = peerid; in nvmet_tcp_tls_handshake_done()
1811 queue->state = NVMET_TCP_Q_CONNECTING; in nvmet_tcp_tls_handshake_done()
1813 queue->state = NVMET_TCP_Q_FAILED; in nvmet_tcp_tls_handshake_done()
1814 spin_unlock_bh(&queue->state_lock); in nvmet_tcp_tls_handshake_done()
1816 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work); in nvmet_tcp_tls_handshake_done()
1818 nvmet_tcp_schedule_release_queue(queue); in nvmet_tcp_tls_handshake_done()
1820 nvmet_tcp_set_queue_sock(queue); in nvmet_tcp_tls_handshake_done()
1821 kref_put(&queue->kref, nvmet_tcp_release_queue); in nvmet_tcp_tls_handshake_done()
1826 struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w), in nvmet_tcp_tls_handshake_timeout() local
1829 pr_warn("queue %d: TLS handshake timeout\n", queue->idx); in nvmet_tcp_tls_handshake_timeout()
1833 if (!tls_handshake_cancel(queue->sock->sk)) in nvmet_tcp_tls_handshake_timeout()
1835 spin_lock_bh(&queue->state_lock); in nvmet_tcp_tls_handshake_timeout()
1836 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) { in nvmet_tcp_tls_handshake_timeout()
1837 spin_unlock_bh(&queue->state_lock); in nvmet_tcp_tls_handshake_timeout()
1840 queue->state = NVMET_TCP_Q_FAILED; in nvmet_tcp_tls_handshake_timeout()
1841 spin_unlock_bh(&queue->state_lock); in nvmet_tcp_tls_handshake_timeout()
1842 nvmet_tcp_schedule_release_queue(queue); in nvmet_tcp_tls_handshake_timeout()
1843 kref_put(&queue->kref, nvmet_tcp_release_queue); in nvmet_tcp_tls_handshake_timeout()
1846 static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue) in nvmet_tcp_tls_handshake() argument
1851 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) { in nvmet_tcp_tls_handshake()
1852 pr_warn("cannot start TLS in state %d\n", queue->state); in nvmet_tcp_tls_handshake()
1856 kref_get(&queue->kref); in nvmet_tcp_tls_handshake()
1857 pr_debug("queue %d: TLS ServerHello\n", queue->idx); in nvmet_tcp_tls_handshake()
1859 args.ta_sock = queue->sock; in nvmet_tcp_tls_handshake()
1861 args.ta_data = queue; in nvmet_tcp_tls_handshake()
1862 args.ta_keyring = key_serial(queue->port->nport->keyring); in nvmet_tcp_tls_handshake()
1867 kref_put(&queue->kref, nvmet_tcp_release_queue); in nvmet_tcp_tls_handshake()
1870 queue_delayed_work(nvmet_wq, &queue->tls_handshake_tmo_work, in nvmet_tcp_tls_handshake()
1882 struct nvmet_tcp_queue *queue; in nvmet_tcp_alloc_queue() local
1886 queue = kzalloc(sizeof(*queue), GFP_KERNEL); in nvmet_tcp_alloc_queue()
1887 if (!queue) { in nvmet_tcp_alloc_queue()
1892 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); in nvmet_tcp_alloc_queue()
1893 INIT_WORK(&queue->io_work, nvmet_tcp_io_work); in nvmet_tcp_alloc_queue()
1894 kref_init(&queue->kref); in nvmet_tcp_alloc_queue()
1895 queue->sock = newsock; in nvmet_tcp_alloc_queue()
1896 queue->port = port; in nvmet_tcp_alloc_queue()
1897 queue->nr_cmds = 0; in nvmet_tcp_alloc_queue()
1898 spin_lock_init(&queue->state_lock); in nvmet_tcp_alloc_queue()
1899 if (queue->port->nport->disc_addr.tsas.tcp.sectype == in nvmet_tcp_alloc_queue()
1901 queue->state = NVMET_TCP_Q_TLS_HANDSHAKE; in nvmet_tcp_alloc_queue()
1903 queue->state = NVMET_TCP_Q_CONNECTING; in nvmet_tcp_alloc_queue()
1904 INIT_LIST_HEAD(&queue->free_list); in nvmet_tcp_alloc_queue()
1905 init_llist_head(&queue->resp_list); in nvmet_tcp_alloc_queue()
1906 INIT_LIST_HEAD(&queue->resp_send_list); in nvmet_tcp_alloc_queue()
1908 sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL); in nvmet_tcp_alloc_queue()
1914 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL); in nvmet_tcp_alloc_queue()
1915 if (queue->idx < 0) { in nvmet_tcp_alloc_queue()
1916 ret = queue->idx; in nvmet_tcp_alloc_queue()
1920 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect); in nvmet_tcp_alloc_queue()
1924 ret = nvmet_sq_init(&queue->nvme_sq); in nvmet_tcp_alloc_queue()
1928 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_alloc_queue()
1931 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); in nvmet_tcp_alloc_queue()
1934 INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work, in nvmet_tcp_alloc_queue()
1937 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { in nvmet_tcp_alloc_queue()
1938 struct sock *sk = queue->sock->sk; in nvmet_tcp_alloc_queue()
1945 if (!nvmet_tcp_try_peek_pdu(queue)) { in nvmet_tcp_alloc_queue()
1946 if (!nvmet_tcp_tls_handshake(queue)) in nvmet_tcp_alloc_queue()
1952 queue->state = NVMET_TCP_Q_CONNECTING; in nvmet_tcp_alloc_queue()
1956 ret = nvmet_tcp_set_queue_sock(queue); in nvmet_tcp_alloc_queue()
1963 list_del_init(&queue->queue_list); in nvmet_tcp_alloc_queue()
1965 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_tcp_alloc_queue()
1967 nvmet_tcp_free_cmd(&queue->connect); in nvmet_tcp_alloc_queue()
1969 ida_free(&nvmet_tcp_queue_ida, queue->idx); in nvmet_tcp_alloc_queue()
1971 fput(queue->sock->file); in nvmet_tcp_alloc_queue()
1973 kfree(queue); in nvmet_tcp_alloc_queue()
1975 pr_err("failed to allocate queue, error %d\n", ret); in nvmet_tcp_alloc_queue()
2095 struct nvmet_tcp_queue *queue; in nvmet_tcp_destroy_port_queues() local
2098 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) in nvmet_tcp_destroy_port_queues()
2099 if (queue->port == port) in nvmet_tcp_destroy_port_queues()
2100 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvmet_tcp_destroy_port_queues()
2125 struct nvmet_tcp_queue *queue; in nvmet_tcp_delete_ctrl() local
2128 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) in nvmet_tcp_delete_ctrl()
2129 if (queue->nvme_sq.ctrl == ctrl) in nvmet_tcp_delete_ctrl()
2130 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvmet_tcp_delete_ctrl()
2136 struct nvmet_tcp_queue *queue = in nvmet_tcp_install_queue() local
2155 queue->nr_cmds = sq->size * 2; in nvmet_tcp_install_queue()
2156 if (nvmet_tcp_alloc_cmds(queue)) { in nvmet_tcp_install_queue()
2157 queue->nr_cmds = 0; in nvmet_tcp_install_queue()
2171 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_disc_port_addr() local
2173 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr); in nvmet_tcp_disc_port_addr()
2183 struct nvmet_tcp_queue *queue = in nvmet_tcp_host_port_addr() local
2186 if (queue->sockaddr_peer.ss_family == AF_UNSPEC) in nvmet_tcp_host_port_addr()
2189 (struct sockaddr *)&queue->sockaddr_peer); in nvmet_tcp_host_port_addr()
2226 struct nvmet_tcp_queue *queue; in nvmet_tcp_exit() local
2232 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) in nvmet_tcp_exit()
2233 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvmet_tcp_exit()