Lines Matching full:queue
107 struct nvme_tcp_queue *queue; member
208 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
215 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument
217 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
234 * Check if the queue is TLS encrypted
236 static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue) in nvme_tcp_queue_tls() argument
241 return queue->tls_enabled; in nvme_tcp_queue_tls()
255 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument
257 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset()
260 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset()
261 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset()
264 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_hdgst_len() argument
266 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len()
269 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_ddgst_len() argument
271 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_ddgst_len()
290 return req->queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_tcp_inline_data_size()
295 return req == &req->queue->ctrl->async_req; in nvme_tcp_async_req()
384 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) in nvme_tcp_send_all() argument
388 /* drain the send queue as much as we can... */ in nvme_tcp_send_all()
390 ret = nvme_tcp_try_send(queue); in nvme_tcp_send_all()
394 static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue) in nvme_tcp_queue_has_pending() argument
396 return !list_empty(&queue->send_list) || in nvme_tcp_queue_has_pending()
397 !llist_empty(&queue->req_list); in nvme_tcp_queue_has_pending()
400 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) in nvme_tcp_queue_more() argument
402 return !nvme_tcp_queue_tls(queue) && in nvme_tcp_queue_more()
403 nvme_tcp_queue_has_pending(queue); in nvme_tcp_queue_more()
409 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_queue_request() local
412 empty = llist_add(&req->lentry, &queue->req_list) && in nvme_tcp_queue_request()
413 list_empty(&queue->send_list) && !queue->request; in nvme_tcp_queue_request()
417 * directly, otherwise queue io_work. Also, only do that if we in nvme_tcp_queue_request()
420 if (queue->io_cpu == raw_smp_processor_id() && in nvme_tcp_queue_request()
421 empty && mutex_trylock(&queue->send_mutex)) { in nvme_tcp_queue_request()
422 nvme_tcp_send_all(queue); in nvme_tcp_queue_request()
423 mutex_unlock(&queue->send_mutex); in nvme_tcp_queue_request()
426 if (last && nvme_tcp_queue_has_pending(queue)) in nvme_tcp_queue_request()
427 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_queue_request()
430 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue) in nvme_tcp_process_req_list() argument
435 for (node = llist_del_all(&queue->req_list); node; node = node->next) { in nvme_tcp_process_req_list()
437 list_add(&req->entry, &queue->send_list); in nvme_tcp_process_req_list()
442 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) in nvme_tcp_fetch_request() argument
446 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
449 nvme_tcp_process_req_list(queue); in nvme_tcp_fetch_request()
450 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
495 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, in nvme_tcp_verify_hdgst() argument
503 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
504 "queue %d: header digest flag is cleared\n", in nvme_tcp_verify_hdgst()
505 nvme_tcp_queue_id(queue)); in nvme_tcp_verify_hdgst()
512 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
521 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu) in nvme_tcp_check_ddgst() argument
524 u8 digest_len = nvme_tcp_hdgst_len(queue); in nvme_tcp_check_ddgst()
531 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_check_ddgst()
532 "queue %d: data digest flag is cleared\n", in nvme_tcp_check_ddgst()
533 nvme_tcp_queue_id(queue)); in nvme_tcp_check_ddgst()
536 queue->rcv_crc = NVME_TCP_CRC_SEED; in nvme_tcp_check_ddgst()
557 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; in nvme_tcp_init_request() local
558 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_init_request()
560 req->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_init_request()
567 req->queue = queue; in nvme_tcp_init_request()
580 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx() local
582 hctx->driver_data = queue; in nvme_tcp_init_hctx()
590 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_init_admin_hctx() local
592 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx()
597 nvme_tcp_recv_state(struct nvme_tcp_queue *queue) in nvme_tcp_recv_state() argument
599 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : in nvme_tcp_recv_state()
600 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : in nvme_tcp_recv_state()
604 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue) in nvme_tcp_init_recv_ctx() argument
606 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + in nvme_tcp_init_recv_ctx()
607 nvme_tcp_hdgst_len(queue); in nvme_tcp_init_recv_ctx()
608 queue->pdu_offset = 0; in nvme_tcp_init_recv_ctx()
609 queue->data_remaining = -1; in nvme_tcp_init_recv_ctx()
610 queue->ddgst_remaining = 0; in nvme_tcp_init_recv_ctx()
622 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, in nvme_tcp_process_nvme_cqe() argument
628 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
630 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_process_nvme_cqe()
631 "got bad cqe.command_id %#x on queue %d\n", in nvme_tcp_process_nvme_cqe()
632 cqe->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_process_nvme_cqe()
633 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_process_nvme_cqe()
643 queue->nr_cqe++; in nvme_tcp_process_nvme_cqe()
648 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue, in nvme_tcp_handle_c2h_data() argument
653 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
655 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
656 "got bad c2hdata.command_id %#x on queue %d\n", in nvme_tcp_handle_c2h_data()
657 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_c2h_data()
662 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
663 "queue %d tag %#x unexpected data\n", in nvme_tcp_handle_c2h_data()
664 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
668 queue->data_remaining = le32_to_cpu(pdu->data_length); in nvme_tcp_handle_c2h_data()
672 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
673 "queue %d tag %#x SUCCESS set but not last PDU\n", in nvme_tcp_handle_c2h_data()
674 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
675 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_handle_c2h_data()
682 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, in nvme_tcp_handle_comp() argument
690 * survive any kind of queue freeze and often don't respond to in nvme_tcp_handle_comp()
694 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue), in nvme_tcp_handle_comp()
696 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_tcp_handle_comp()
699 ret = nvme_tcp_process_nvme_cqe(queue, cqe); in nvme_tcp_handle_comp()
707 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_h2c_data_pdu() local
710 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_setup_h2c_data_pdu()
711 u8 ddgst = nvme_tcp_ddgst_len(queue); in nvme_tcp_setup_h2c_data_pdu()
715 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata); in nvme_tcp_setup_h2c_data_pdu()
724 if (queue->hdr_digest) in nvme_tcp_setup_h2c_data_pdu()
726 if (queue->data_digest) in nvme_tcp_setup_h2c_data_pdu()
738 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, in nvme_tcp_handle_r2t() argument
746 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_r2t()
748 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
749 "got bad r2t.command_id %#x on queue %d\n", in nvme_tcp_handle_r2t()
750 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_r2t()
756 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
763 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
770 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
778 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
791 llist_add(&req->lentry, &queue->req_list); in nvme_tcp_handle_r2t()
792 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_handle_r2t()
797 static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue, in nvme_tcp_handle_c2h_term() argument
815 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_term()
827 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_term()
831 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, in nvme_tcp_recv_pdu() argument
835 char *pdu = queue->pdu; in nvme_tcp_recv_pdu()
836 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); in nvme_tcp_recv_pdu()
840 &pdu[queue->pdu_offset], rcv_len); in nvme_tcp_recv_pdu()
844 queue->pdu_remaining -= rcv_len; in nvme_tcp_recv_pdu()
845 queue->pdu_offset += rcv_len; in nvme_tcp_recv_pdu()
848 if (queue->pdu_remaining) in nvme_tcp_recv_pdu()
851 hdr = queue->pdu; in nvme_tcp_recv_pdu()
856 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_pdu()
867 nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
871 if (queue->hdr_digest) { in nvme_tcp_recv_pdu()
872 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); in nvme_tcp_recv_pdu()
878 if (queue->data_digest) { in nvme_tcp_recv_pdu()
879 ret = nvme_tcp_check_ddgst(queue, queue->pdu); in nvme_tcp_recv_pdu()
886 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
888 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_pdu()
889 return nvme_tcp_handle_comp(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
891 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_pdu()
892 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
898 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_pdu()
911 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, in nvme_tcp_recv_data() argument
914 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_data()
916 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_recv_data()
922 recv_len = min_t(size_t, *len, queue->data_remaining); in nvme_tcp_recv_data()
934 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
935 "queue %d no space in request %#x", in nvme_tcp_recv_data()
936 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
937 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_data()
947 if (queue->data_digest) in nvme_tcp_recv_data()
949 &req->iter, recv_len, &queue->rcv_crc); in nvme_tcp_recv_data()
954 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
955 "queue %d failed to copy request %#x data", in nvme_tcp_recv_data()
956 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
962 queue->data_remaining -= recv_len; in nvme_tcp_recv_data()
965 if (!queue->data_remaining) { in nvme_tcp_recv_data()
966 if (queue->data_digest) { in nvme_tcp_recv_data()
967 queue->exp_ddgst = nvme_tcp_ddgst_final(queue->rcv_crc); in nvme_tcp_recv_data()
968 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; in nvme_tcp_recv_data()
973 queue->nr_cqe++; in nvme_tcp_recv_data()
975 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_data()
982 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, in nvme_tcp_recv_ddgst() argument
985 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_ddgst()
986 char *ddgst = (char *)&queue->recv_ddgst; in nvme_tcp_recv_ddgst()
987 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); in nvme_tcp_recv_ddgst()
988 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; in nvme_tcp_recv_ddgst()
995 queue->ddgst_remaining -= recv_len; in nvme_tcp_recv_ddgst()
998 if (queue->ddgst_remaining) in nvme_tcp_recv_ddgst()
1001 if (queue->recv_ddgst != queue->exp_ddgst) { in nvme_tcp_recv_ddgst()
1002 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), in nvme_tcp_recv_ddgst()
1008 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_ddgst()
1010 le32_to_cpu(queue->recv_ddgst), in nvme_tcp_recv_ddgst()
1011 le32_to_cpu(queue->exp_ddgst)); in nvme_tcp_recv_ddgst()
1015 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), in nvme_tcp_recv_ddgst()
1020 queue->nr_cqe++; in nvme_tcp_recv_ddgst()
1023 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_ddgst()
1030 struct nvme_tcp_queue *queue = desc->arg.data; in nvme_tcp_recv_skb() local
1034 if (unlikely(!queue->rd_enabled)) in nvme_tcp_recv_skb()
1038 switch (nvme_tcp_recv_state(queue)) { in nvme_tcp_recv_skb()
1040 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
1043 result = nvme_tcp_recv_data(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
1046 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
1052 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_skb()
1054 queue->rd_enabled = false; in nvme_tcp_recv_skb()
1055 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_recv_skb()
1065 struct nvme_tcp_queue *queue; in nvme_tcp_data_ready() local
1070 queue = sk->sk_user_data; in nvme_tcp_data_ready()
1071 if (likely(queue && queue->rd_enabled) && in nvme_tcp_data_ready()
1072 !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) in nvme_tcp_data_ready()
1073 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_data_ready()
1079 struct nvme_tcp_queue *queue; in nvme_tcp_write_space() local
1082 queue = sk->sk_user_data; in nvme_tcp_write_space()
1083 if (likely(queue && sk_stream_is_writeable(sk))) { in nvme_tcp_write_space()
1086 if (nvme_tcp_queue_tls(queue)) in nvme_tcp_write_space()
1087 queue->write_space(sk); in nvme_tcp_write_space()
1088 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_write_space()
1095 struct nvme_tcp_queue *queue; in nvme_tcp_state_change() local
1098 queue = sk->sk_user_data; in nvme_tcp_state_change()
1099 if (!queue) in nvme_tcp_state_change()
1108 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_state_change()
1111 dev_info(queue->ctrl->ctrl.device, in nvme_tcp_state_change()
1112 "queue %d socket state %d\n", in nvme_tcp_state_change()
1113 nvme_tcp_queue_id(queue), sk->sk_state); in nvme_tcp_state_change()
1116 queue->state_change(sk); in nvme_tcp_state_change()
1121 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) in nvme_tcp_done_send_req() argument
1123 queue->request = NULL; in nvme_tcp_done_send_req()
1131 nvme_complete_async_event(&req->queue->ctrl->ctrl, in nvme_tcp_fail_request()
1141 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data() local
1157 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_data()
1167 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_data()
1171 if (queue->data_digest) in nvme_tcp_try_send_data()
1172 nvme_tcp_ddgst_update(&queue->snd_crc, page, in nvme_tcp_try_send_data()
1185 if (queue->data_digest) { in nvme_tcp_try_send_data()
1187 nvme_tcp_ddgst_final(queue->snd_crc); in nvme_tcp_try_send_data()
1194 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send_data()
1204 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_cmd_pdu() local
1209 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_try_send_cmd_pdu()
1213 if (inline_data || nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_cmd_pdu()
1218 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_cmd_pdu()
1223 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_cmd_pdu()
1231 if (queue->data_digest) in nvme_tcp_try_send_cmd_pdu()
1232 queue->snd_crc = NVME_TCP_CRC_SEED; in nvme_tcp_try_send_cmd_pdu()
1234 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send_cmd_pdu()
1245 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data_pdu() local
1249 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_try_send_data_pdu()
1253 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_data_pdu()
1261 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_data_pdu()
1268 if (queue->data_digest) in nvme_tcp_try_send_data_pdu()
1269 queue->snd_crc = NVME_TCP_CRC_SEED; in nvme_tcp_try_send_data_pdu()
1279 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_ddgst() local
1289 if (nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_ddgst()
1294 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_try_send_ddgst()
1302 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send_ddgst()
1310 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue) in nvme_tcp_try_send() argument
1316 if (!queue->request) { in nvme_tcp_try_send()
1317 queue->request = nvme_tcp_fetch_request(queue); in nvme_tcp_try_send()
1318 if (!queue->request) in nvme_tcp_try_send()
1321 req = queue->request; in nvme_tcp_try_send()
1350 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_try_send()
1352 nvme_tcp_fail_request(queue->request); in nvme_tcp_try_send()
1353 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send()
1360 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue) in nvme_tcp_try_recv() argument
1362 struct socket *sock = queue->sock; in nvme_tcp_try_recv()
1367 rd_desc.arg.data = queue; in nvme_tcp_try_recv()
1370 queue->nr_cqe = 0; in nvme_tcp_try_recv()
1378 struct nvme_tcp_queue *queue = in nvme_tcp_io_work() local
1386 if (mutex_trylock(&queue->send_mutex)) { in nvme_tcp_io_work()
1387 result = nvme_tcp_try_send(queue); in nvme_tcp_io_work()
1388 mutex_unlock(&queue->send_mutex); in nvme_tcp_io_work()
1395 result = nvme_tcp_try_recv(queue); in nvme_tcp_io_work()
1402 if (nvme_tcp_queue_has_pending(queue) && in nvme_tcp_io_work()
1403 sk_stream_is_writeable(queue->sock->sk)) in nvme_tcp_io_work()
1406 if (!pending || !queue->rd_enabled) in nvme_tcp_io_work()
1411 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_io_work()
1423 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req() local
1425 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_alloc_async_req()
1427 async->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_alloc_async_req()
1433 async->queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1440 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_free_queue() local
1443 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_free_queue()
1446 page_frag_cache_drain(&queue->pf_cache); in nvme_tcp_free_queue()
1450 fput(queue->sock->file); in nvme_tcp_free_queue()
1451 queue->sock = NULL; in nvme_tcp_free_queue()
1454 kfree(queue->pdu); in nvme_tcp_free_queue()
1455 mutex_destroy(&queue->send_mutex); in nvme_tcp_free_queue()
1456 mutex_destroy(&queue->queue_lock); in nvme_tcp_free_queue()
1459 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) in nvme_tcp_init_connection() argument
1488 if (queue->hdr_digest) in nvme_tcp_init_connection()
1490 if (queue->data_digest) in nvme_tcp_init_connection()
1495 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_init_connection()
1497 pr_warn("queue %d: failed to send icreq, error %d\n", in nvme_tcp_init_connection()
1498 nvme_tcp_queue_id(queue), ret); in nvme_tcp_init_connection()
1505 if (nvme_tcp_queue_tls(queue)) { in nvme_tcp_init_connection()
1510 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvme_tcp_init_connection()
1515 pr_warn("queue %d: failed to receive icresp, error %d\n", in nvme_tcp_init_connection()
1516 nvme_tcp_queue_id(queue), ret); in nvme_tcp_init_connection()
1520 if (nvme_tcp_queue_tls(queue)) { in nvme_tcp_init_connection()
1521 ctype = tls_get_record_type(queue->sock->sk, in nvme_tcp_init_connection()
1524 pr_err("queue %d: unhandled TLS record %d\n", in nvme_tcp_init_connection()
1525 nvme_tcp_queue_id(queue), ctype); in nvme_tcp_init_connection()
1531 pr_err("queue %d: bad type returned %d\n", in nvme_tcp_init_connection()
1532 nvme_tcp_queue_id(queue), icresp->hdr.type); in nvme_tcp_init_connection()
1537 pr_err("queue %d: bad pdu length returned %d\n", in nvme_tcp_init_connection()
1538 nvme_tcp_queue_id(queue), icresp->hdr.plen); in nvme_tcp_init_connection()
1543 pr_err("queue %d: bad pfv returned %d\n", in nvme_tcp_init_connection()
1544 nvme_tcp_queue_id(queue), icresp->pfv); in nvme_tcp_init_connection()
1549 if ((queue->data_digest && !ctrl_ddgst) || in nvme_tcp_init_connection()
1550 (!queue->data_digest && ctrl_ddgst)) { in nvme_tcp_init_connection()
1551 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1552 nvme_tcp_queue_id(queue), in nvme_tcp_init_connection()
1553 queue->data_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1559 if ((queue->hdr_digest && !ctrl_hdgst) || in nvme_tcp_init_connection()
1560 (!queue->hdr_digest && ctrl_hdgst)) { in nvme_tcp_init_connection()
1561 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1562 nvme_tcp_queue_id(queue), in nvme_tcp_init_connection()
1563 queue->hdr_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1569 pr_err("queue %d: unsupported cpda returned %d\n", in nvme_tcp_init_connection()
1570 nvme_tcp_queue_id(queue), icresp->cpda); in nvme_tcp_init_connection()
1576 pr_err("queue %d: invalid maxh2cdata returned %u\n", in nvme_tcp_init_connection()
1577 nvme_tcp_queue_id(queue), maxh2cdata); in nvme_tcp_init_connection()
1580 queue->maxh2cdata = maxh2cdata; in nvme_tcp_init_connection()
1590 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue) in nvme_tcp_admin_queue() argument
1592 return nvme_tcp_queue_id(queue) == 0; in nvme_tcp_admin_queue()
1595 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue) in nvme_tcp_default_queue() argument
1597 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_default_queue()
1598 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_default_queue()
1600 return !nvme_tcp_admin_queue(queue) && in nvme_tcp_default_queue()
1604 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue) in nvme_tcp_read_queue() argument
1606 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_read_queue()
1607 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_read_queue()
1609 return !nvme_tcp_admin_queue(queue) && in nvme_tcp_read_queue()
1610 !nvme_tcp_default_queue(queue) && in nvme_tcp_read_queue()
1615 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) in nvme_tcp_poll_queue() argument
1617 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_poll_queue()
1618 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_poll_queue()
1620 return !nvme_tcp_admin_queue(queue) && in nvme_tcp_poll_queue()
1621 !nvme_tcp_default_queue(queue) && in nvme_tcp_poll_queue()
1622 !nvme_tcp_read_queue(queue) && in nvme_tcp_poll_queue()
1637 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) in nvme_tcp_set_queue_io_cpu() argument
1639 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_set_queue_io_cpu()
1641 int qid = nvme_tcp_queue_id(queue) - 1; in nvme_tcp_set_queue_io_cpu()
1648 if (nvme_tcp_default_queue(queue)) in nvme_tcp_set_queue_io_cpu()
1650 else if (nvme_tcp_read_queue(queue)) in nvme_tcp_set_queue_io_cpu()
1652 else if (nvme_tcp_poll_queue(queue)) in nvme_tcp_set_queue_io_cpu()
1671 queue->io_cpu = io_cpu; in nvme_tcp_set_queue_io_cpu()
1673 set_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags); in nvme_tcp_set_queue_io_cpu()
1676 dev_dbg(ctrl->ctrl.device, "queue %d: using cpu %d\n", in nvme_tcp_set_queue_io_cpu()
1677 qid, queue->io_cpu); in nvme_tcp_set_queue_io_cpu()
1682 struct nvme_tcp_queue *queue = data; in nvme_tcp_tls_done() local
1683 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_tls_done()
1684 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_tls_done()
1687 dev_dbg(ctrl->ctrl.device, "queue %d: TLS handshake done, key %x, status %d\n", in nvme_tcp_tls_done()
1691 queue->tls_err = -status; in nvme_tcp_tls_done()
1697 dev_warn(ctrl->ctrl.device, "queue %d: Invalid key %x\n", in nvme_tcp_tls_done()
1699 queue->tls_err = -ENOKEY; in nvme_tcp_tls_done()
1701 queue->tls_enabled = true; in nvme_tcp_tls_done()
1705 queue->tls_err = 0; in nvme_tcp_tls_done()
1709 complete(&queue->tls_complete); in nvme_tcp_tls_done()
1713 struct nvme_tcp_queue *queue, in nvme_tcp_start_tls() argument
1716 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_start_tls()
1722 dev_dbg(nctrl->device, "queue %d: start TLS with key %x\n", in nvme_tcp_start_tls()
1725 args.ta_sock = queue->sock; in nvme_tcp_start_tls()
1727 args.ta_data = queue; in nvme_tcp_start_tls()
1734 queue->tls_err = -EOPNOTSUPP; in nvme_tcp_start_tls()
1735 init_completion(&queue->tls_complete); in nvme_tcp_start_tls()
1738 dev_err(nctrl->device, "queue %d: failed to start TLS: %d\n", in nvme_tcp_start_tls()
1742 ret = wait_for_completion_interruptible_timeout(&queue->tls_complete, tmo); in nvme_tcp_start_tls()
1748 "queue %d: TLS handshake failed, error %d\n", in nvme_tcp_start_tls()
1750 tls_handshake_cancel(queue->sock->sk); in nvme_tcp_start_tls()
1752 if (queue->tls_err) { in nvme_tcp_start_tls()
1754 "queue %d: TLS handshake complete, error %d\n", in nvme_tcp_start_tls()
1755 qid, queue->tls_err); in nvme_tcp_start_tls()
1758 "queue %d: TLS handshake complete\n", qid); in nvme_tcp_start_tls()
1760 ret = queue->tls_err; in nvme_tcp_start_tls()
1769 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_alloc_queue() local
1773 mutex_init(&queue->queue_lock); in nvme_tcp_alloc_queue()
1774 queue->ctrl = ctrl; in nvme_tcp_alloc_queue()
1775 init_llist_head(&queue->req_list); in nvme_tcp_alloc_queue()
1776 INIT_LIST_HEAD(&queue->send_list); in nvme_tcp_alloc_queue()
1777 mutex_init(&queue->send_mutex); in nvme_tcp_alloc_queue()
1778 INIT_WORK(&queue->io_work, nvme_tcp_io_work); in nvme_tcp_alloc_queue()
1781 queue->cmnd_capsule_len = nctrl->ioccsz * 16; in nvme_tcp_alloc_queue()
1783 queue->cmnd_capsule_len = sizeof(struct nvme_command) + in nvme_tcp_alloc_queue()
1788 IPPROTO_TCP, &queue->sock); in nvme_tcp_alloc_queue()
1795 sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL); in nvme_tcp_alloc_queue()
1801 sk_net_refcnt_upgrade(queue->sock->sk); in nvme_tcp_alloc_queue()
1802 nvme_tcp_reclassify_socket(queue->sock); in nvme_tcp_alloc_queue()
1805 tcp_sock_set_syncnt(queue->sock->sk, 1); in nvme_tcp_alloc_queue()
1808 tcp_sock_set_nodelay(queue->sock->sk); in nvme_tcp_alloc_queue()
1811 * Cleanup whatever is sitting in the TCP transmit queue on socket in nvme_tcp_alloc_queue()
1815 sock_no_linger(queue->sock->sk); in nvme_tcp_alloc_queue()
1818 sock_set_priority(queue->sock->sk, so_priority); in nvme_tcp_alloc_queue()
1822 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); in nvme_tcp_alloc_queue()
1825 queue->sock->sk->sk_rcvtimeo = 10 * HZ; in nvme_tcp_alloc_queue()
1827 queue->sock->sk->sk_allocation = GFP_ATOMIC; in nvme_tcp_alloc_queue()
1828 queue->sock->sk->sk_use_task_frag = false; in nvme_tcp_alloc_queue()
1829 queue->io_cpu = WORK_CPU_UNBOUND; in nvme_tcp_alloc_queue()
1830 queue->request = NULL; in nvme_tcp_alloc_queue()
1831 queue->data_remaining = 0; in nvme_tcp_alloc_queue()
1832 queue->ddgst_remaining = 0; in nvme_tcp_alloc_queue()
1833 queue->pdu_remaining = 0; in nvme_tcp_alloc_queue()
1834 queue->pdu_offset = 0; in nvme_tcp_alloc_queue()
1835 sk_set_memalloc(queue->sock->sk); in nvme_tcp_alloc_queue()
1838 ret = kernel_bind(queue->sock, (struct sockaddr_unsized *)&ctrl->src_addr, in nvme_tcp_alloc_queue()
1842 "failed to bind queue %d socket %d\n", in nvme_tcp_alloc_queue()
1852 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE, in nvme_tcp_alloc_queue()
1856 "failed to bind to interface %s queue %d err %d\n", in nvme_tcp_alloc_queue()
1862 queue->hdr_digest = nctrl->opts->hdr_digest; in nvme_tcp_alloc_queue()
1863 queue->data_digest = nctrl->opts->data_digest; in nvme_tcp_alloc_queue()
1866 nvme_tcp_hdgst_len(queue); in nvme_tcp_alloc_queue()
1867 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); in nvme_tcp_alloc_queue()
1868 if (!queue->pdu) { in nvme_tcp_alloc_queue()
1873 dev_dbg(nctrl->device, "connecting queue %d\n", in nvme_tcp_alloc_queue()
1874 nvme_tcp_queue_id(queue)); in nvme_tcp_alloc_queue()
1876 ret = kernel_connect(queue->sock, (struct sockaddr_unsized *)&ctrl->addr, in nvme_tcp_alloc_queue()
1886 ret = nvme_tcp_start_tls(nctrl, queue, pskid); in nvme_tcp_alloc_queue()
1891 ret = nvme_tcp_init_connection(queue); in nvme_tcp_alloc_queue()
1895 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); in nvme_tcp_alloc_queue()
1900 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvme_tcp_alloc_queue()
1902 kfree(queue->pdu); in nvme_tcp_alloc_queue()
1905 fput(queue->sock->file); in nvme_tcp_alloc_queue()
1906 queue->sock = NULL; in nvme_tcp_alloc_queue()
1908 mutex_destroy(&queue->send_mutex); in nvme_tcp_alloc_queue()
1909 mutex_destroy(&queue->queue_lock); in nvme_tcp_alloc_queue()
1913 static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue) in nvme_tcp_restore_sock_ops() argument
1915 struct socket *sock = queue->sock; in nvme_tcp_restore_sock_ops()
1919 sock->sk->sk_data_ready = queue->data_ready; in nvme_tcp_restore_sock_ops()
1920 sock->sk->sk_state_change = queue->state_change; in nvme_tcp_restore_sock_ops()
1921 sock->sk->sk_write_space = queue->write_space; in nvme_tcp_restore_sock_ops()
1925 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) in __nvme_tcp_stop_queue() argument
1927 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in __nvme_tcp_stop_queue()
1928 nvme_tcp_restore_sock_ops(queue); in __nvme_tcp_stop_queue()
1929 cancel_work_sync(&queue->io_work); in __nvme_tcp_stop_queue()
1935 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_stop_queue_nowait() local
1937 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_stop_queue_nowait()
1940 if (test_and_clear_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags)) in nvme_tcp_stop_queue_nowait()
1941 atomic_dec(&nvme_tcp_cpu_queues[queue->io_cpu]); in nvme_tcp_stop_queue_nowait()
1943 mutex_lock(&queue->queue_lock); in nvme_tcp_stop_queue_nowait()
1944 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_stop_queue_nowait()
1945 __nvme_tcp_stop_queue(queue); in nvme_tcp_stop_queue_nowait()
1946 /* Stopping the queue will disable TLS */ in nvme_tcp_stop_queue_nowait()
1947 queue->tls_enabled = false; in nvme_tcp_stop_queue_nowait()
1948 mutex_unlock(&queue->queue_lock); in nvme_tcp_stop_queue_nowait()
1954 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_wait_queue() local
1958 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) || in nvme_tcp_wait_queue()
1959 !sk_wmem_alloc_get(queue->sock->sk)) in nvme_tcp_wait_queue()
1976 static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) in nvme_tcp_setup_sock_ops() argument
1978 write_lock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_setup_sock_ops()
1979 queue->sock->sk->sk_user_data = queue; in nvme_tcp_setup_sock_ops()
1980 queue->state_change = queue->sock->sk->sk_state_change; in nvme_tcp_setup_sock_ops()
1981 queue->data_ready = queue->sock->sk->sk_data_ready; in nvme_tcp_setup_sock_ops()
1982 queue->write_space = queue->sock->sk->sk_write_space; in nvme_tcp_setup_sock_ops()
1983 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; in nvme_tcp_setup_sock_ops()
1984 queue->sock->sk->sk_state_change = nvme_tcp_state_change; in nvme_tcp_setup_sock_ops()
1985 queue->sock->sk->sk_write_space = nvme_tcp_write_space; in nvme_tcp_setup_sock_ops()
1987 queue->sock->sk->sk_ll_usec = 1; in nvme_tcp_setup_sock_ops()
1989 write_unlock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_setup_sock_ops()
1995 struct nvme_tcp_queue *queue = &ctrl->queues[idx]; in nvme_tcp_start_queue() local
1998 queue->rd_enabled = true; in nvme_tcp_start_queue()
1999 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_start_queue()
2000 nvme_tcp_setup_sock_ops(queue); in nvme_tcp_start_queue()
2003 nvme_tcp_set_queue_io_cpu(queue); in nvme_tcp_start_queue()
2009 set_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_start_queue()
2011 if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_start_queue()
2012 __nvme_tcp_stop_queue(queue); in nvme_tcp_start_queue()
2014 "failed to connect queue: %d ret=%d\n", idx, ret); in nvme_tcp_start_queue()
2187 * queue number might have changed. in nvme_tcp_configure_io_queues()
2346 * completed on the admin queue. We need to revoke the key when:
2376 dev_dbg(ctrl->device, "restart admin queue for secure concatenation\n"); in nvme_tcp_setup_ctrl()
2580 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue, in nvme_tcp_set_sg_inline() argument
2585 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_tcp_set_sg_inline()
2604 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_submit_async_event() local
2607 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_submit_async_event()
2611 if (queue->hdr_digest) in nvme_tcp_submit_async_event()
2634 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_complete_timed_out()
2636 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); in nvme_tcp_complete_timed_out()
2643 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_timeout()
2646 int qid = nvme_tcp_queue_id(req->queue); in nvme_tcp_timeout()
2679 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue, in nvme_tcp_map_data() argument
2692 nvme_tcp_set_sg_inline(queue, c, req->data_len); in nvme_tcp_map_data()
2704 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_cmd_pdu() local
2705 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0; in nvme_tcp_setup_cmd_pdu()
2731 if (queue->hdr_digest) in nvme_tcp_setup_cmd_pdu()
2733 if (queue->data_digest && req->pdu_len) { in nvme_tcp_setup_cmd_pdu()
2735 ddgst = nvme_tcp_ddgst_len(queue); in nvme_tcp_setup_cmd_pdu()
2742 ret = nvme_tcp_map_data(queue, rq); in nvme_tcp_setup_cmd_pdu()
2745 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_cmd_pdu()
2755 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_commit_rqs() local
2757 if (!llist_empty(&queue->req_list)) in nvme_tcp_commit_rqs()
2758 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_commit_rqs()
2764 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq()
2765 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq() local
2768 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_queue_rq()
2771 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_tcp_queue_rq()
2772 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_tcp_queue_rq()
2794 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_poll() local
2795 struct sock *sk = queue->sock->sk; in nvme_tcp_poll()
2798 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_poll()
2801 set_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2804 ret = nvme_tcp_try_recv(queue); in nvme_tcp_poll()
2805 clear_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2806 return ret < 0 ? ret : queue->nr_cqe; in nvme_tcp_poll()
2811 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0]; in nvme_tcp_get_address() local
2817 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_get_address()
2820 mutex_lock(&queue->queue_lock); in nvme_tcp_get_address()
2822 ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr); in nvme_tcp_get_address()
2830 mutex_unlock(&queue->queue_lock); in nvme_tcp_get_address()