Lines Matching full:cmd
169 struct nvmet_tcp_cmd *cmd; member
216 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
219 struct nvmet_tcp_cmd *cmd) in nvmet_tcp_cmd_tag() argument
226 return cmd - queue->cmds; in nvmet_tcp_cmd_tag()
229 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_has_data_in() argument
231 return nvme_is_write(cmd->req.cmd) && in nvmet_tcp_has_data_in()
232 cmd->rbytes_done < cmd->req.transfer_len; in nvmet_tcp_has_data_in()
235 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_need_data_in() argument
237 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status; in nvmet_tcp_need_data_in()
240 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_need_data_out() argument
242 return !nvme_is_write(cmd->req.cmd) && in nvmet_tcp_need_data_out()
243 cmd->req.transfer_len > 0 && in nvmet_tcp_need_data_out()
244 !cmd->req.cqe->status; in nvmet_tcp_need_data_out()
247 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_has_inline_data() argument
249 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len && in nvmet_tcp_has_inline_data()
250 !cmd->rbytes_done; in nvmet_tcp_has_inline_data()
256 struct nvmet_tcp_cmd *cmd; in nvmet_tcp_get_cmd() local
258 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd()
260 if (!cmd) in nvmet_tcp_get_cmd()
262 list_del_init(&cmd->entry); in nvmet_tcp_get_cmd()
264 cmd->rbytes_done = cmd->wbytes_done = 0; in nvmet_tcp_get_cmd()
265 cmd->pdu_len = 0; in nvmet_tcp_get_cmd()
266 cmd->pdu_recv = 0; in nvmet_tcp_get_cmd()
267 cmd->iov = NULL; in nvmet_tcp_get_cmd()
268 cmd->flags = 0; in nvmet_tcp_get_cmd()
269 return cmd; in nvmet_tcp_get_cmd()
272 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_put_cmd() argument
274 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd()
277 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd()
343 /* If cmd buffers are NULL, no operation is performed */
344 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_free_cmd_buffers() argument
346 kfree(cmd->iov); in nvmet_tcp_free_cmd_buffers()
347 sgl_free(cmd->req.sg); in nvmet_tcp_free_cmd_buffers()
348 cmd->iov = NULL; in nvmet_tcp_free_cmd_buffers()
349 cmd->req.sg = NULL; in nvmet_tcp_free_cmd_buffers()
352 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_build_pdu_iovec() argument
354 struct bio_vec *iov = cmd->iov; in nvmet_tcp_build_pdu_iovec()
359 length = cmd->pdu_len; in nvmet_tcp_build_pdu_iovec()
361 offset = cmd->rbytes_done; in nvmet_tcp_build_pdu_iovec()
362 cmd->sg_idx = offset / PAGE_SIZE; in nvmet_tcp_build_pdu_iovec()
364 sg = &cmd->req.sg[cmd->sg_idx]; in nvmet_tcp_build_pdu_iovec()
378 iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov, in nvmet_tcp_build_pdu_iovec()
379 nr_pages, cmd->pdu_len); in nvmet_tcp_build_pdu_iovec()
400 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_map_data() argument
402 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; in nvmet_tcp_map_data()
410 if (!nvme_is_write(cmd->req.cmd)) in nvmet_tcp_map_data()
413 if (len > cmd->req.port->inline_data_size) in nvmet_tcp_map_data()
415 cmd->pdu_len = len; in nvmet_tcp_map_data()
417 cmd->req.transfer_len += len; in nvmet_tcp_map_data()
419 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt); in nvmet_tcp_map_data()
420 if (!cmd->req.sg) in nvmet_tcp_map_data()
422 cmd->cur_sg = cmd->req.sg; in nvmet_tcp_map_data()
424 if (nvmet_tcp_has_data_in(cmd)) { in nvmet_tcp_map_data()
425 cmd->iov = kmalloc_array(cmd->req.sg_cnt, in nvmet_tcp_map_data()
426 sizeof(*cmd->iov), GFP_KERNEL); in nvmet_tcp_map_data()
427 if (!cmd->iov) in nvmet_tcp_map_data()
433 nvmet_tcp_free_cmd_buffers(cmd); in nvmet_tcp_map_data()
437 static void nvmet_tcp_calc_ddgst(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_calc_ddgst() argument
439 size_t total_len = cmd->req.transfer_len; in nvmet_tcp_calc_ddgst()
440 struct scatterlist *sg = cmd->req.sg; in nvmet_tcp_calc_ddgst()
454 cmd->exp_ddgst = cpu_to_le32(~crc); in nvmet_tcp_calc_ddgst()
457 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_setup_c2h_data_pdu() argument
459 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; in nvmet_setup_c2h_data_pdu()
460 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_c2h_data_pdu()
461 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
462 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
464 cmd->offset = 0; in nvmet_setup_c2h_data_pdu()
465 cmd->state = NVMET_TCP_SEND_DATA_PDU; in nvmet_setup_c2h_data_pdu()
474 cmd->req.transfer_len + ddgst); in nvmet_setup_c2h_data_pdu()
475 pdu->command_id = cmd->req.cqe->command_id; in nvmet_setup_c2h_data_pdu()
476 pdu->data_length = cpu_to_le32(cmd->req.transfer_len); in nvmet_setup_c2h_data_pdu()
477 pdu->data_offset = cpu_to_le32(cmd->wbytes_done); in nvmet_setup_c2h_data_pdu()
481 nvmet_tcp_calc_ddgst(cmd); in nvmet_setup_c2h_data_pdu()
484 if (cmd->queue->hdr_digest) { in nvmet_setup_c2h_data_pdu()
490 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_setup_r2t_pdu() argument
492 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; in nvmet_setup_r2t_pdu()
493 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_r2t_pdu()
495 cmd->offset = 0; in nvmet_setup_r2t_pdu()
496 cmd->state = NVMET_TCP_SEND_R2T; in nvmet_setup_r2t_pdu()
504 pdu->command_id = cmd->req.cmd->common.command_id; in nvmet_setup_r2t_pdu()
505 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); in nvmet_setup_r2t_pdu()
506 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); in nvmet_setup_r2t_pdu()
507 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); in nvmet_setup_r2t_pdu()
508 if (cmd->queue->hdr_digest) { in nvmet_setup_r2t_pdu()
514 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_setup_response_pdu() argument
516 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; in nvmet_setup_response_pdu()
517 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_response_pdu()
519 cmd->offset = 0; in nvmet_setup_response_pdu()
520 cmd->state = NVMET_TCP_SEND_RESPONSE; in nvmet_setup_response_pdu()
527 if (cmd->queue->hdr_digest) { in nvmet_setup_response_pdu()
536 struct nvmet_tcp_cmd *cmd; in nvmet_tcp_process_resp_list() local
539 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry); in nvmet_tcp_process_resp_list()
540 list_add(&cmd->entry, &queue->resp_send_list); in nvmet_tcp_process_resp_list()
573 struct nvmet_tcp_cmd *cmd = in nvmet_tcp_queue_response() local
575 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_queue_response()
583 queue_cmd = READ_ONCE(queue->cmd); in nvmet_tcp_queue_response()
585 if (unlikely(cmd == queue_cmd)) { in nvmet_tcp_queue_response()
586 sgl = &cmd->req.cmd->common.dptr.sgl; in nvmet_tcp_queue_response()
595 len && len <= cmd->req.port->inline_data_size && in nvmet_tcp_queue_response()
596 nvme_is_write(cmd->req.cmd)) in nvmet_tcp_queue_response()
600 llist_add(&cmd->lentry, &queue->resp_list); in nvmet_tcp_queue_response()
601 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); in nvmet_tcp_queue_response()
604 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_execute_request() argument
606 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED)) in nvmet_tcp_execute_request()
607 nvmet_tcp_queue_response(&cmd->req); in nvmet_tcp_execute_request()
609 cmd->req.execute(&cmd->req); in nvmet_tcp_execute_request()
612 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_try_send_data_pdu() argument
618 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_data_pdu()
619 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst; in nvmet_try_send_data_pdu()
622 bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left); in nvmet_try_send_data_pdu()
624 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_data_pdu()
628 cmd->offset += ret; in nvmet_try_send_data_pdu()
634 cmd->state = NVMET_TCP_SEND_DATA; in nvmet_try_send_data_pdu()
635 cmd->offset = 0; in nvmet_try_send_data_pdu()
639 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) in nvmet_try_send_data() argument
641 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_data()
644 while (cmd->cur_sg) { in nvmet_try_send_data()
648 struct page *page = sg_page(cmd->cur_sg); in nvmet_try_send_data()
650 u32 left = cmd->cur_sg->length - cmd->offset; in nvmet_try_send_data()
652 if ((!last_in_batch && cmd->queue->send_list_len) || in nvmet_try_send_data()
653 cmd->wbytes_done + left < cmd->req.transfer_len || in nvmet_try_send_data()
657 bvec_set_page(&bvec, page, left, cmd->offset); in nvmet_try_send_data()
659 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_data()
663 cmd->offset += ret; in nvmet_try_send_data()
664 cmd->wbytes_done += ret; in nvmet_try_send_data()
667 if (cmd->offset == cmd->cur_sg->length) { in nvmet_try_send_data()
668 cmd->cur_sg = sg_next(cmd->cur_sg); in nvmet_try_send_data()
669 cmd->offset = 0; in nvmet_try_send_data()
674 cmd->state = NVMET_TCP_SEND_DDGST; in nvmet_try_send_data()
675 cmd->offset = 0; in nvmet_try_send_data()
678 cmd->queue->snd_cmd = NULL; in nvmet_try_send_data()
679 nvmet_tcp_put_cmd(cmd); in nvmet_try_send_data()
681 nvmet_setup_response_pdu(cmd); in nvmet_try_send_data()
686 nvmet_tcp_free_cmd_buffers(cmd); in nvmet_try_send_data()
692 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, in nvmet_try_send_response() argument
697 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_response()
698 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst; in nvmet_try_send_response()
701 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_response()
706 bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left); in nvmet_try_send_response()
708 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_response()
711 cmd->offset += ret; in nvmet_try_send_response()
717 nvmet_tcp_free_cmd_buffers(cmd); in nvmet_try_send_response()
718 cmd->queue->snd_cmd = NULL; in nvmet_try_send_response()
719 nvmet_tcp_put_cmd(cmd); in nvmet_try_send_response()
723 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) in nvmet_try_send_r2t() argument
727 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_r2t()
728 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst; in nvmet_try_send_r2t()
731 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_r2t()
736 bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left); in nvmet_try_send_r2t()
738 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_r2t()
741 cmd->offset += ret; in nvmet_try_send_r2t()
747 cmd->queue->snd_cmd = NULL; in nvmet_try_send_r2t()
751 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) in nvmet_try_send_ddgst() argument
753 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_ddgst()
754 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset; in nvmet_try_send_ddgst()
757 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, in nvmet_try_send_ddgst()
762 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_ddgst()
771 cmd->offset += ret; in nvmet_try_send_ddgst()
778 cmd->queue->snd_cmd = NULL; in nvmet_try_send_ddgst()
779 nvmet_tcp_put_cmd(cmd); in nvmet_try_send_ddgst()
781 nvmet_setup_response_pdu(cmd); in nvmet_try_send_ddgst()
789 struct nvmet_tcp_cmd *cmd = queue->snd_cmd; in nvmet_tcp_try_send_one() local
792 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { in nvmet_tcp_try_send_one()
793 cmd = nvmet_tcp_fetch_cmd(queue); in nvmet_tcp_try_send_one()
794 if (unlikely(!cmd)) in nvmet_tcp_try_send_one()
798 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) { in nvmet_tcp_try_send_one()
799 ret = nvmet_try_send_data_pdu(cmd); in nvmet_tcp_try_send_one()
804 if (cmd->state == NVMET_TCP_SEND_DATA) { in nvmet_tcp_try_send_one()
805 ret = nvmet_try_send_data(cmd, last_in_batch); in nvmet_tcp_try_send_one()
810 if (cmd->state == NVMET_TCP_SEND_DDGST) { in nvmet_tcp_try_send_one()
811 ret = nvmet_try_send_ddgst(cmd, last_in_batch); in nvmet_tcp_try_send_one()
816 if (cmd->state == NVMET_TCP_SEND_R2T) { in nvmet_tcp_try_send_one()
817 ret = nvmet_try_send_r2t(cmd, last_in_batch); in nvmet_tcp_try_send_one()
822 if (cmd->state == NVMET_TCP_SEND_RESPONSE) in nvmet_tcp_try_send_one()
823 ret = nvmet_try_send_response(cmd, last_in_batch); in nvmet_tcp_try_send_one()
858 WRITE_ONCE(queue->cmd, NULL); in nvmet_prepare_receive_pdu()
859 /* Ensure rcv_state is visible only after queue->cmd is set */ in nvmet_prepare_receive_pdu()
919 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) in nvmet_tcp_handle_req_failure() argument
921 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); in nvmet_tcp_handle_req_failure()
931 if (!nvme_is_write(cmd->req.cmd) || !data_len || in nvmet_tcp_handle_req_failure()
932 data_len > cmd->req.port->inline_data_size) { in nvmet_tcp_handle_req_failure()
937 ret = nvmet_tcp_map_data(cmd); in nvmet_tcp_handle_req_failure()
945 nvmet_tcp_build_pdu_iovec(cmd); in nvmet_tcp_handle_req_failure()
946 cmd->flags |= NVMET_TCP_F_INIT_FAILED; in nvmet_tcp_handle_req_failure()
952 struct nvmet_tcp_cmd *cmd; in nvmet_tcp_handle_h2c_data_pdu() local
961 cmd = &queue->cmds[data->ttag]; in nvmet_tcp_handle_h2c_data_pdu()
963 cmd = &queue->connect; in nvmet_tcp_handle_h2c_data_pdu()
966 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { in nvmet_tcp_handle_h2c_data_pdu()
969 cmd->rbytes_done); in nvmet_tcp_handle_h2c_data_pdu()
978 cmd->pdu_len = le32_to_cpu(data->data_length); in nvmet_tcp_handle_h2c_data_pdu()
979 if (unlikely(cmd->pdu_len != exp_data_len || in nvmet_tcp_handle_h2c_data_pdu()
980 cmd->pdu_len == 0 || in nvmet_tcp_handle_h2c_data_pdu()
981 cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) { in nvmet_tcp_handle_h2c_data_pdu()
982 pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len); in nvmet_tcp_handle_h2c_data_pdu()
985 cmd->pdu_recv = 0; in nvmet_tcp_handle_h2c_data_pdu()
986 nvmet_tcp_build_pdu_iovec(cmd); in nvmet_tcp_handle_h2c_data_pdu()
987 queue->cmd = cmd; in nvmet_tcp_handle_h2c_data_pdu()
1000 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_done_recv_pdu()
1001 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; in nvmet_tcp_done_recv_pdu()
1029 queue->cmd = nvmet_tcp_get_cmd(queue); in nvmet_tcp_done_recv_pdu()
1030 if (unlikely(!queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1039 req = &queue->cmd->req; in nvmet_tcp_done_recv_pdu()
1040 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); in nvmet_tcp_done_recv_pdu()
1043 pr_err("failed cmd %p id %d opcode %d, data_len: %d, status: %04x\n", in nvmet_tcp_done_recv_pdu()
1044 req->cmd, req->cmd->common.command_id, in nvmet_tcp_done_recv_pdu()
1045 req->cmd->common.opcode, in nvmet_tcp_done_recv_pdu()
1046 le32_to_cpu(req->cmd->common.dptr.sgl.length), in nvmet_tcp_done_recv_pdu()
1049 nvmet_tcp_handle_req_failure(queue, queue->cmd, req); in nvmet_tcp_done_recv_pdu()
1053 ret = nvmet_tcp_map_data(queue->cmd); in nvmet_tcp_done_recv_pdu()
1056 if (nvmet_tcp_has_inline_data(queue->cmd)) in nvmet_tcp_done_recv_pdu()
1064 if (nvmet_tcp_need_data_in(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1065 if (nvmet_tcp_has_inline_data(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1067 nvmet_tcp_build_pdu_iovec(queue->cmd); in nvmet_tcp_done_recv_pdu()
1071 nvmet_tcp_queue_response(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1075 queue->cmd->req.execute(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1146 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_try_recv_pdu()
1207 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_prep_recv_ddgst() argument
1209 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_prep_recv_ddgst()
1211 nvmet_tcp_calc_ddgst(cmd); in nvmet_tcp_prep_recv_ddgst()
1219 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_data() local
1222 while (msg_data_left(&cmd->recv_msg)) { in nvmet_tcp_try_recv_data()
1223 len = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, in nvmet_tcp_try_recv_data()
1224 cmd->recv_msg.msg_flags); in nvmet_tcp_try_recv_data()
1228 ret = nvmet_tcp_tls_record_ok(cmd->queue, in nvmet_tcp_try_recv_data()
1229 &cmd->recv_msg, cmd->recv_cbuf); in nvmet_tcp_try_recv_data()
1234 cmd->pdu_recv += len; in nvmet_tcp_try_recv_data()
1235 cmd->rbytes_done += len; in nvmet_tcp_try_recv_data()
1239 nvmet_tcp_prep_recv_ddgst(cmd); in nvmet_tcp_try_recv_data()
1243 if (cmd->rbytes_done == cmd->req.transfer_len) in nvmet_tcp_try_recv_data()
1244 nvmet_tcp_execute_request(cmd); in nvmet_tcp_try_recv_data()
1252 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_ddgst() local
1257 .iov_base = (void *)&cmd->recv_ddgst + queue->offset, in nvmet_tcp_try_recv_ddgst()
1280 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { in nvmet_tcp_try_recv_ddgst()
1281 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", in nvmet_tcp_try_recv_ddgst()
1282 queue->idx, cmd->req.cmd->common.command_id, in nvmet_tcp_try_recv_ddgst()
1283 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), in nvmet_tcp_try_recv_ddgst()
1284 le32_to_cpu(cmd->exp_ddgst)); in nvmet_tcp_try_recv_ddgst()
1285 nvmet_req_uninit(&cmd->req); in nvmet_tcp_try_recv_ddgst()
1286 nvmet_tcp_free_cmd_buffers(cmd); in nvmet_tcp_try_recv_ddgst()
1292 if (cmd->rbytes_done == cmd->req.transfer_len) in nvmet_tcp_try_recv_ddgst()
1293 nvmet_tcp_execute_request(cmd); in nvmet_tcp_try_recv_ddgst()
1438 c->req.cmd = &c->cmd_pdu->cmd; in nvmet_tcp_alloc_cmd()
1537 struct nvmet_tcp_cmd *cmd = queue->cmds; in nvmet_tcp_uninit_data_in_cmds() local
1540 for (i = 0; i < queue->nr_cmds; i++, cmd++) { in nvmet_tcp_uninit_data_in_cmds()
1541 if (nvmet_tcp_need_data_in(cmd)) in nvmet_tcp_uninit_data_in_cmds()
1542 nvmet_req_uninit(&cmd->req); in nvmet_tcp_uninit_data_in_cmds()
1553 struct nvmet_tcp_cmd *cmd = queue->cmds; in nvmet_tcp_free_cmd_data_in_buffers() local
1556 for (i = 0; i < queue->nr_cmds; i++, cmd++) in nvmet_tcp_free_cmd_data_in_buffers()
1557 nvmet_tcp_free_cmd_buffers(cmd); in nvmet_tcp_free_cmd_data_in_buffers()
1715 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_try_peek_pdu()
2160 struct nvmet_tcp_cmd *cmd = in nvmet_tcp_disc_port_addr() local
2162 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_disc_port_addr()