1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe over Fabrics TCP target. 4 * Copyright (c) 2018 Lightbits Labs. All rights reserved. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/module.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <linux/crc32c.h> 11 #include <linux/err.h> 12 #include <linux/nvme-tcp.h> 13 #include <linux/nvme-keyring.h> 14 #include <net/sock.h> 15 #include <net/tcp.h> 16 #include <net/tls.h> 17 #include <net/tls_prot.h> 18 #include <net/handshake.h> 19 #include <linux/inet.h> 20 #include <linux/llist.h> 21 #include <trace/events/sock.h> 22 23 #include "nvmet.h" 24 25 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE) 26 #define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */ 27 #define NVMET_TCP_BACKLOG 128 28 29 static int param_store_val(const char *str, int *val, int min, int max) 30 { 31 int ret, new_val; 32 33 ret = kstrtoint(str, 10, &new_val); 34 if (ret) 35 return -EINVAL; 36 37 if (new_val < min || new_val > max) 38 return -EINVAL; 39 40 *val = new_val; 41 return 0; 42 } 43 44 static int set_params(const char *str, const struct kernel_param *kp) 45 { 46 return param_store_val(str, kp->arg, 0, INT_MAX); 47 } 48 49 static const struct kernel_param_ops set_param_ops = { 50 .set = set_params, 51 .get = param_get_int, 52 }; 53 54 /* Define the socket priority to use for connections were it is desirable 55 * that the NIC consider performing optimized packet processing or filtering. 56 * A non-zero value being sufficient to indicate general consideration of any 57 * possible optimization. Making it a module param allows for alternative 58 * values that may be unique for some NIC implementations. 59 */ 60 static int so_priority; 61 device_param_cb(so_priority, &set_param_ops, &so_priority, 0644); 62 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0"); 63 64 /* Define a time period (in usecs) that io_work() shall sample an activated 65 * queue before determining it to be idle. This optional module behavior 66 * can enable NIC solutions that support socket optimized packet processing 67 * using advanced interrupt moderation techniques. 68 */ 69 static int idle_poll_period_usecs; 70 device_param_cb(idle_poll_period_usecs, &set_param_ops, 71 &idle_poll_period_usecs, 0644); 72 MODULE_PARM_DESC(idle_poll_period_usecs, 73 "nvmet tcp io_work poll till idle time period in usecs: Default 0"); 74 75 #ifdef CONFIG_NVME_TARGET_TCP_TLS 76 /* 77 * TLS handshake timeout 78 */ 79 static int tls_handshake_timeout = 10; 80 module_param(tls_handshake_timeout, int, 0644); 81 MODULE_PARM_DESC(tls_handshake_timeout, 82 "nvme TLS handshake timeout in seconds (default 10)"); 83 #endif 84 85 #define NVMET_TCP_RECV_BUDGET 8 86 #define NVMET_TCP_SEND_BUDGET 8 87 #define NVMET_TCP_IO_WORK_BUDGET 64 88 89 enum nvmet_tcp_send_state { 90 NVMET_TCP_SEND_DATA_PDU, 91 NVMET_TCP_SEND_DATA, 92 NVMET_TCP_SEND_R2T, 93 NVMET_TCP_SEND_DDGST, 94 NVMET_TCP_SEND_RESPONSE 95 }; 96 97 enum nvmet_tcp_recv_state { 98 NVMET_TCP_RECV_PDU, 99 NVMET_TCP_RECV_DATA, 100 NVMET_TCP_RECV_DDGST, 101 NVMET_TCP_RECV_ERR, 102 }; 103 104 enum { 105 NVMET_TCP_F_INIT_FAILED = (1 << 0), 106 }; 107 108 struct nvmet_tcp_cmd { 109 struct nvmet_tcp_queue *queue; 110 struct nvmet_req req; 111 112 struct nvme_tcp_cmd_pdu *cmd_pdu; 113 struct nvme_tcp_rsp_pdu *rsp_pdu; 114 struct nvme_tcp_data_pdu *data_pdu; 115 struct nvme_tcp_r2t_pdu *r2t_pdu; 116 117 u32 rbytes_done; 118 u32 wbytes_done; 119 120 u32 pdu_len; 121 u32 pdu_recv; 122 int sg_idx; 123 char recv_cbuf[CMSG_LEN(sizeof(char))]; 124 struct msghdr recv_msg; 125 struct bio_vec *iov; 126 u32 flags; 127 128 struct list_head entry; 129 struct llist_node lentry; 130 131 /* send state */ 132 u32 offset; 133 struct scatterlist *cur_sg; 134 enum nvmet_tcp_send_state state; 135 136 __le32 exp_ddgst; 137 __le32 recv_ddgst; 138 }; 139 140 enum nvmet_tcp_queue_state { 141 NVMET_TCP_Q_CONNECTING, 142 NVMET_TCP_Q_TLS_HANDSHAKE, 143 NVMET_TCP_Q_LIVE, 144 NVMET_TCP_Q_DISCONNECTING, 145 NVMET_TCP_Q_FAILED, 146 }; 147 148 struct nvmet_tcp_queue { 149 struct socket *sock; 150 struct nvmet_tcp_port *port; 151 struct work_struct io_work; 152 struct nvmet_cq nvme_cq; 153 struct nvmet_sq nvme_sq; 154 struct kref kref; 155 156 /* send state */ 157 struct nvmet_tcp_cmd *cmds; 158 unsigned int nr_cmds; 159 struct list_head free_list; 160 struct llist_head resp_list; 161 struct list_head resp_send_list; 162 int send_list_len; 163 struct nvmet_tcp_cmd *snd_cmd; 164 165 /* recv state */ 166 int offset; 167 int left; 168 enum nvmet_tcp_recv_state rcv_state; 169 struct nvmet_tcp_cmd *cmd; 170 union nvme_tcp_pdu pdu; 171 172 /* digest state */ 173 bool hdr_digest; 174 bool data_digest; 175 176 /* TLS state */ 177 key_serial_t tls_pskid; 178 struct delayed_work tls_handshake_tmo_work; 179 180 unsigned long poll_end; 181 182 spinlock_t state_lock; 183 enum nvmet_tcp_queue_state state; 184 185 struct sockaddr_storage sockaddr; 186 struct sockaddr_storage sockaddr_peer; 187 struct work_struct release_work; 188 189 int idx; 190 struct list_head queue_list; 191 192 struct nvmet_tcp_cmd connect; 193 194 struct page_frag_cache pf_cache; 195 196 void (*data_ready)(struct sock *); 197 void (*state_change)(struct sock *); 198 void (*write_space)(struct sock *); 199 }; 200 201 struct nvmet_tcp_port { 202 struct socket *sock; 203 struct work_struct accept_work; 204 struct nvmet_port *nport; 205 struct sockaddr_storage addr; 206 void (*data_ready)(struct sock *); 207 }; 208 209 static DEFINE_IDA(nvmet_tcp_queue_ida); 210 static LIST_HEAD(nvmet_tcp_queue_list); 211 static DEFINE_MUTEX(nvmet_tcp_queue_mutex); 212 213 static struct workqueue_struct *nvmet_tcp_wq; 214 static const struct nvmet_fabrics_ops nvmet_tcp_ops; 215 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c); 216 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd); 217 218 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, 219 struct nvmet_tcp_cmd *cmd) 220 { 221 if (unlikely(!queue->nr_cmds)) { 222 /* We didn't allocate cmds yet, send 0xffff */ 223 return USHRT_MAX; 224 } 225 226 return cmd - queue->cmds; 227 } 228 229 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd) 230 { 231 return nvme_is_write(cmd->req.cmd) && 232 cmd->rbytes_done < cmd->req.transfer_len; 233 } 234 235 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) 236 { 237 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status; 238 } 239 240 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) 241 { 242 return !nvme_is_write(cmd->req.cmd) && 243 cmd->req.transfer_len > 0 && 244 !cmd->req.cqe->status; 245 } 246 247 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) 248 { 249 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len && 250 !cmd->rbytes_done; 251 } 252 253 static inline struct nvmet_tcp_cmd * 254 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) 255 { 256 struct nvmet_tcp_cmd *cmd; 257 258 cmd = list_first_entry_or_null(&queue->free_list, 259 struct nvmet_tcp_cmd, entry); 260 if (!cmd) 261 return NULL; 262 list_del_init(&cmd->entry); 263 264 cmd->rbytes_done = cmd->wbytes_done = 0; 265 cmd->pdu_len = 0; 266 cmd->pdu_recv = 0; 267 cmd->iov = NULL; 268 cmd->flags = 0; 269 return cmd; 270 } 271 272 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) 273 { 274 if (unlikely(cmd == &cmd->queue->connect)) 275 return; 276 277 list_add_tail(&cmd->entry, &cmd->queue->free_list); 278 } 279 280 static inline int queue_cpu(struct nvmet_tcp_queue *queue) 281 { 282 return queue->sock->sk->sk_incoming_cpu; 283 } 284 285 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) 286 { 287 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; 288 } 289 290 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) 291 { 292 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; 293 } 294 295 static inline void nvmet_tcp_hdgst(void *pdu, size_t len) 296 { 297 put_unaligned_le32(~crc32c(~0, pdu, len), pdu + len); 298 } 299 300 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, 301 void *pdu, size_t len) 302 { 303 struct nvme_tcp_hdr *hdr = pdu; 304 __le32 recv_digest; 305 __le32 exp_digest; 306 307 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { 308 pr_err("queue %d: header digest enabled but no header digest\n", 309 queue->idx); 310 return -EPROTO; 311 } 312 313 recv_digest = *(__le32 *)(pdu + hdr->hlen); 314 nvmet_tcp_hdgst(pdu, len); 315 exp_digest = *(__le32 *)(pdu + hdr->hlen); 316 if (recv_digest != exp_digest) { 317 pr_err("queue %d: header digest error: recv %#x expected %#x\n", 318 queue->idx, le32_to_cpu(recv_digest), 319 le32_to_cpu(exp_digest)); 320 return -EPROTO; 321 } 322 323 return 0; 324 } 325 326 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) 327 { 328 struct nvme_tcp_hdr *hdr = pdu; 329 u8 digest_len = nvmet_tcp_hdgst_len(queue); 330 u32 len; 331 332 len = le32_to_cpu(hdr->plen) - hdr->hlen - 333 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0); 334 335 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { 336 pr_err("queue %d: data digest flag is cleared\n", queue->idx); 337 return -EPROTO; 338 } 339 340 return 0; 341 } 342 343 /* If cmd buffers are NULL, no operation is performed */ 344 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd) 345 { 346 kfree(cmd->iov); 347 sgl_free(cmd->req.sg); 348 cmd->iov = NULL; 349 cmd->req.sg = NULL; 350 } 351 352 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) 353 { 354 struct bio_vec *iov = cmd->iov; 355 struct scatterlist *sg; 356 u32 length, offset, sg_offset; 357 int nr_pages; 358 359 length = cmd->pdu_len; 360 nr_pages = DIV_ROUND_UP(length, PAGE_SIZE); 361 offset = cmd->rbytes_done; 362 cmd->sg_idx = offset / PAGE_SIZE; 363 sg_offset = offset % PAGE_SIZE; 364 sg = &cmd->req.sg[cmd->sg_idx]; 365 366 while (length) { 367 u32 iov_len = min_t(u32, length, sg->length - sg_offset); 368 369 bvec_set_page(iov, sg_page(sg), iov_len, 370 sg->offset + sg_offset); 371 372 length -= iov_len; 373 sg = sg_next(sg); 374 iov++; 375 sg_offset = 0; 376 } 377 378 iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov, 379 nr_pages, cmd->pdu_len); 380 } 381 382 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) 383 { 384 queue->rcv_state = NVMET_TCP_RECV_ERR; 385 if (queue->nvme_sq.ctrl) 386 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 387 else 388 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 389 } 390 391 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) 392 { 393 queue->rcv_state = NVMET_TCP_RECV_ERR; 394 if (status == -EPIPE || status == -ECONNRESET) 395 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 396 else 397 nvmet_tcp_fatal_error(queue); 398 } 399 400 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) 401 { 402 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; 403 u32 len = le32_to_cpu(sgl->length); 404 405 if (!len) 406 return 0; 407 408 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | 409 NVME_SGL_FMT_OFFSET)) { 410 if (!nvme_is_write(cmd->req.cmd)) 411 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 412 413 if (len > cmd->req.port->inline_data_size) 414 return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR; 415 cmd->pdu_len = len; 416 } 417 cmd->req.transfer_len += len; 418 419 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt); 420 if (!cmd->req.sg) 421 return NVME_SC_INTERNAL; 422 cmd->cur_sg = cmd->req.sg; 423 424 if (nvmet_tcp_has_data_in(cmd)) { 425 cmd->iov = kmalloc_array(cmd->req.sg_cnt, 426 sizeof(*cmd->iov), GFP_KERNEL); 427 if (!cmd->iov) 428 goto err; 429 } 430 431 return 0; 432 err: 433 nvmet_tcp_free_cmd_buffers(cmd); 434 return NVME_SC_INTERNAL; 435 } 436 437 static void nvmet_tcp_calc_ddgst(struct nvmet_tcp_cmd *cmd) 438 { 439 size_t total_len = cmd->req.transfer_len; 440 struct scatterlist *sg = cmd->req.sg; 441 u32 crc = ~0; 442 443 while (total_len) { 444 size_t len = min_t(size_t, total_len, sg->length); 445 446 /* 447 * Note that the scatterlist does not contain any highmem pages, 448 * as it was allocated by sgl_alloc() with GFP_KERNEL. 449 */ 450 crc = crc32c(crc, sg_virt(sg), len); 451 total_len -= len; 452 sg = sg_next(sg); 453 } 454 cmd->exp_ddgst = cpu_to_le32(~crc); 455 } 456 457 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) 458 { 459 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; 460 struct nvmet_tcp_queue *queue = cmd->queue; 461 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 462 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); 463 464 cmd->offset = 0; 465 cmd->state = NVMET_TCP_SEND_DATA_PDU; 466 467 pdu->hdr.type = nvme_tcp_c2h_data; 468 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ? 469 NVME_TCP_F_DATA_SUCCESS : 0); 470 pdu->hdr.hlen = sizeof(*pdu); 471 pdu->hdr.pdo = pdu->hdr.hlen + hdgst; 472 pdu->hdr.plen = 473 cpu_to_le32(pdu->hdr.hlen + hdgst + 474 cmd->req.transfer_len + ddgst); 475 pdu->command_id = cmd->req.cqe->command_id; 476 pdu->data_length = cpu_to_le32(cmd->req.transfer_len); 477 pdu->data_offset = cpu_to_le32(cmd->wbytes_done); 478 479 if (queue->data_digest) { 480 pdu->hdr.flags |= NVME_TCP_F_DDGST; 481 nvmet_tcp_calc_ddgst(cmd); 482 } 483 484 if (cmd->queue->hdr_digest) { 485 pdu->hdr.flags |= NVME_TCP_F_HDGST; 486 nvmet_tcp_hdgst(pdu, sizeof(*pdu)); 487 } 488 } 489 490 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) 491 { 492 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; 493 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 494 495 cmd->offset = 0; 496 cmd->state = NVMET_TCP_SEND_R2T; 497 498 pdu->hdr.type = nvme_tcp_r2t; 499 pdu->hdr.flags = 0; 500 pdu->hdr.hlen = sizeof(*pdu); 501 pdu->hdr.pdo = 0; 502 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 503 504 pdu->command_id = cmd->req.cmd->common.command_id; 505 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); 506 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); 507 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); 508 if (cmd->queue->hdr_digest) { 509 pdu->hdr.flags |= NVME_TCP_F_HDGST; 510 nvmet_tcp_hdgst(pdu, sizeof(*pdu)); 511 } 512 } 513 514 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) 515 { 516 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; 517 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 518 519 cmd->offset = 0; 520 cmd->state = NVMET_TCP_SEND_RESPONSE; 521 522 pdu->hdr.type = nvme_tcp_rsp; 523 pdu->hdr.flags = 0; 524 pdu->hdr.hlen = sizeof(*pdu); 525 pdu->hdr.pdo = 0; 526 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 527 if (cmd->queue->hdr_digest) { 528 pdu->hdr.flags |= NVME_TCP_F_HDGST; 529 nvmet_tcp_hdgst(pdu, sizeof(*pdu)); 530 } 531 } 532 533 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) 534 { 535 struct llist_node *node; 536 struct nvmet_tcp_cmd *cmd; 537 538 for (node = llist_del_all(&queue->resp_list); node; node = node->next) { 539 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry); 540 list_add(&cmd->entry, &queue->resp_send_list); 541 queue->send_list_len++; 542 } 543 } 544 545 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) 546 { 547 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, 548 struct nvmet_tcp_cmd, entry); 549 if (!queue->snd_cmd) { 550 nvmet_tcp_process_resp_list(queue); 551 queue->snd_cmd = 552 list_first_entry_or_null(&queue->resp_send_list, 553 struct nvmet_tcp_cmd, entry); 554 if (unlikely(!queue->snd_cmd)) 555 return NULL; 556 } 557 558 list_del_init(&queue->snd_cmd->entry); 559 queue->send_list_len--; 560 561 if (nvmet_tcp_need_data_out(queue->snd_cmd)) 562 nvmet_setup_c2h_data_pdu(queue->snd_cmd); 563 else if (nvmet_tcp_need_data_in(queue->snd_cmd)) 564 nvmet_setup_r2t_pdu(queue->snd_cmd); 565 else 566 nvmet_setup_response_pdu(queue->snd_cmd); 567 568 return queue->snd_cmd; 569 } 570 571 static void nvmet_tcp_queue_response(struct nvmet_req *req) 572 { 573 struct nvmet_tcp_cmd *cmd = 574 container_of(req, struct nvmet_tcp_cmd, req); 575 struct nvmet_tcp_queue *queue = cmd->queue; 576 enum nvmet_tcp_recv_state queue_state; 577 struct nvmet_tcp_cmd *queue_cmd; 578 struct nvme_sgl_desc *sgl; 579 u32 len; 580 581 /* Pairs with store_release in nvmet_prepare_receive_pdu() */ 582 queue_state = smp_load_acquire(&queue->rcv_state); 583 queue_cmd = READ_ONCE(queue->cmd); 584 585 if (unlikely(cmd == queue_cmd)) { 586 sgl = &cmd->req.cmd->common.dptr.sgl; 587 len = le32_to_cpu(sgl->length); 588 589 /* 590 * Wait for inline data before processing the response. 591 * Avoid using helpers, this might happen before 592 * nvmet_req_init is completed. 593 */ 594 if (queue_state == NVMET_TCP_RECV_PDU && 595 len && len <= cmd->req.port->inline_data_size && 596 nvme_is_write(cmd->req.cmd)) 597 return; 598 } 599 600 llist_add(&cmd->lentry, &queue->resp_list); 601 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); 602 } 603 604 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd) 605 { 606 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED)) 607 nvmet_tcp_queue_response(&cmd->req); 608 else 609 cmd->req.execute(&cmd->req); 610 } 611 612 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) 613 { 614 struct msghdr msg = { 615 .msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES, 616 }; 617 struct bio_vec bvec; 618 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 619 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst; 620 int ret; 621 622 bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left); 623 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); 624 ret = sock_sendmsg(cmd->queue->sock, &msg); 625 if (ret <= 0) 626 return ret; 627 628 cmd->offset += ret; 629 left -= ret; 630 631 if (left) 632 return -EAGAIN; 633 634 cmd->state = NVMET_TCP_SEND_DATA; 635 cmd->offset = 0; 636 return 1; 637 } 638 639 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 640 { 641 struct nvmet_tcp_queue *queue = cmd->queue; 642 int ret; 643 644 while (cmd->cur_sg) { 645 struct msghdr msg = { 646 .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, 647 }; 648 struct page *page = sg_page(cmd->cur_sg); 649 struct bio_vec bvec; 650 u32 left = cmd->cur_sg->length - cmd->offset; 651 652 if ((!last_in_batch && cmd->queue->send_list_len) || 653 cmd->wbytes_done + left < cmd->req.transfer_len || 654 queue->data_digest || !queue->nvme_sq.sqhd_disabled) 655 msg.msg_flags |= MSG_MORE; 656 657 bvec_set_page(&bvec, page, left, cmd->offset); 658 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); 659 ret = sock_sendmsg(cmd->queue->sock, &msg); 660 if (ret <= 0) 661 return ret; 662 663 cmd->offset += ret; 664 cmd->wbytes_done += ret; 665 666 /* Done with sg?*/ 667 if (cmd->offset == cmd->cur_sg->length) { 668 cmd->cur_sg = sg_next(cmd->cur_sg); 669 cmd->offset = 0; 670 } 671 } 672 673 if (queue->data_digest) { 674 cmd->state = NVMET_TCP_SEND_DDGST; 675 cmd->offset = 0; 676 } else { 677 if (queue->nvme_sq.sqhd_disabled) { 678 cmd->queue->snd_cmd = NULL; 679 nvmet_tcp_put_cmd(cmd); 680 } else { 681 nvmet_setup_response_pdu(cmd); 682 } 683 } 684 685 if (queue->nvme_sq.sqhd_disabled) 686 nvmet_tcp_free_cmd_buffers(cmd); 687 688 return 1; 689 690 } 691 692 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, 693 bool last_in_batch) 694 { 695 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, }; 696 struct bio_vec bvec; 697 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 698 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst; 699 int ret; 700 701 if (!last_in_batch && cmd->queue->send_list_len) 702 msg.msg_flags |= MSG_MORE; 703 else 704 msg.msg_flags |= MSG_EOR; 705 706 bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left); 707 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); 708 ret = sock_sendmsg(cmd->queue->sock, &msg); 709 if (ret <= 0) 710 return ret; 711 cmd->offset += ret; 712 left -= ret; 713 714 if (left) 715 return -EAGAIN; 716 717 nvmet_tcp_free_cmd_buffers(cmd); 718 cmd->queue->snd_cmd = NULL; 719 nvmet_tcp_put_cmd(cmd); 720 return 1; 721 } 722 723 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 724 { 725 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, }; 726 struct bio_vec bvec; 727 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 728 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst; 729 int ret; 730 731 if (!last_in_batch && cmd->queue->send_list_len) 732 msg.msg_flags |= MSG_MORE; 733 else 734 msg.msg_flags |= MSG_EOR; 735 736 bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left); 737 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); 738 ret = sock_sendmsg(cmd->queue->sock, &msg); 739 if (ret <= 0) 740 return ret; 741 cmd->offset += ret; 742 left -= ret; 743 744 if (left) 745 return -EAGAIN; 746 747 cmd->queue->snd_cmd = NULL; 748 return 1; 749 } 750 751 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 752 { 753 struct nvmet_tcp_queue *queue = cmd->queue; 754 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset; 755 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 756 struct kvec iov = { 757 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, 758 .iov_len = left 759 }; 760 int ret; 761 762 if (!last_in_batch && cmd->queue->send_list_len) 763 msg.msg_flags |= MSG_MORE; 764 else 765 msg.msg_flags |= MSG_EOR; 766 767 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 768 if (unlikely(ret <= 0)) 769 return ret; 770 771 cmd->offset += ret; 772 left -= ret; 773 774 if (left) 775 return -EAGAIN; 776 777 if (queue->nvme_sq.sqhd_disabled) { 778 cmd->queue->snd_cmd = NULL; 779 nvmet_tcp_put_cmd(cmd); 780 } else { 781 nvmet_setup_response_pdu(cmd); 782 } 783 return 1; 784 } 785 786 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, 787 bool last_in_batch) 788 { 789 struct nvmet_tcp_cmd *cmd = queue->snd_cmd; 790 int ret = 0; 791 792 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { 793 cmd = nvmet_tcp_fetch_cmd(queue); 794 if (unlikely(!cmd)) 795 return 0; 796 } 797 798 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) { 799 ret = nvmet_try_send_data_pdu(cmd); 800 if (ret <= 0) 801 goto done_send; 802 } 803 804 if (cmd->state == NVMET_TCP_SEND_DATA) { 805 ret = nvmet_try_send_data(cmd, last_in_batch); 806 if (ret <= 0) 807 goto done_send; 808 } 809 810 if (cmd->state == NVMET_TCP_SEND_DDGST) { 811 ret = nvmet_try_send_ddgst(cmd, last_in_batch); 812 if (ret <= 0) 813 goto done_send; 814 } 815 816 if (cmd->state == NVMET_TCP_SEND_R2T) { 817 ret = nvmet_try_send_r2t(cmd, last_in_batch); 818 if (ret <= 0) 819 goto done_send; 820 } 821 822 if (cmd->state == NVMET_TCP_SEND_RESPONSE) 823 ret = nvmet_try_send_response(cmd, last_in_batch); 824 825 done_send: 826 if (ret < 0) { 827 if (ret == -EAGAIN) 828 return 0; 829 return ret; 830 } 831 832 return 1; 833 } 834 835 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, 836 int budget, int *sends) 837 { 838 int i, ret = 0; 839 840 for (i = 0; i < budget; i++) { 841 ret = nvmet_tcp_try_send_one(queue, i == budget - 1); 842 if (unlikely(ret < 0)) { 843 nvmet_tcp_socket_error(queue, ret); 844 goto done; 845 } else if (ret == 0) { 846 break; 847 } 848 (*sends)++; 849 } 850 done: 851 return ret; 852 } 853 854 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) 855 { 856 queue->offset = 0; 857 queue->left = sizeof(struct nvme_tcp_hdr); 858 WRITE_ONCE(queue->cmd, NULL); 859 /* Ensure rcv_state is visible only after queue->cmd is set */ 860 smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU); 861 } 862 863 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) 864 { 865 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; 866 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp; 867 struct msghdr msg = {}; 868 struct kvec iov; 869 int ret; 870 871 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) { 872 pr_err("bad nvme-tcp pdu length (%d)\n", 873 le32_to_cpu(icreq->hdr.plen)); 874 nvmet_tcp_fatal_error(queue); 875 return -EPROTO; 876 } 877 878 if (icreq->pfv != NVME_TCP_PFV_1_0) { 879 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv); 880 return -EPROTO; 881 } 882 883 if (icreq->hpda != 0) { 884 pr_err("queue %d: unsupported hpda %d\n", queue->idx, 885 icreq->hpda); 886 return -EPROTO; 887 } 888 889 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); 890 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); 891 892 memset(icresp, 0, sizeof(*icresp)); 893 icresp->hdr.type = nvme_tcp_icresp; 894 icresp->hdr.hlen = sizeof(*icresp); 895 icresp->hdr.pdo = 0; 896 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); 897 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); 898 icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA); 899 icresp->cpda = 0; 900 if (queue->hdr_digest) 901 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; 902 if (queue->data_digest) 903 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE; 904 905 iov.iov_base = icresp; 906 iov.iov_len = sizeof(*icresp); 907 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 908 if (ret < 0) { 909 queue->state = NVMET_TCP_Q_FAILED; 910 return ret; /* queue removal will cleanup */ 911 } 912 913 queue->state = NVMET_TCP_Q_LIVE; 914 nvmet_prepare_receive_pdu(queue); 915 return 0; 916 } 917 918 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, 919 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) 920 { 921 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); 922 int ret; 923 924 /* 925 * This command has not been processed yet, hence we are trying to 926 * figure out if there is still pending data left to receive. If 927 * we don't, we can simply prepare for the next pdu and bail out, 928 * otherwise we will need to prepare a buffer and receive the 929 * stale data before continuing forward. 930 */ 931 if (!nvme_is_write(cmd->req.cmd) || !data_len || 932 data_len > cmd->req.port->inline_data_size) { 933 nvmet_prepare_receive_pdu(queue); 934 return; 935 } 936 937 ret = nvmet_tcp_map_data(cmd); 938 if (unlikely(ret)) { 939 pr_err("queue %d: failed to map data\n", queue->idx); 940 nvmet_tcp_fatal_error(queue); 941 return; 942 } 943 944 queue->rcv_state = NVMET_TCP_RECV_DATA; 945 nvmet_tcp_build_pdu_iovec(cmd); 946 cmd->flags |= NVMET_TCP_F_INIT_FAILED; 947 } 948 949 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) 950 { 951 struct nvme_tcp_data_pdu *data = &queue->pdu.data; 952 struct nvmet_tcp_cmd *cmd; 953 unsigned int exp_data_len; 954 955 if (likely(queue->nr_cmds)) { 956 if (unlikely(data->ttag >= queue->nr_cmds)) { 957 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n", 958 queue->idx, data->ttag, queue->nr_cmds); 959 goto err_proto; 960 } 961 cmd = &queue->cmds[data->ttag]; 962 } else { 963 cmd = &queue->connect; 964 } 965 966 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { 967 pr_err("ttag %u unexpected data offset %u (expected %u)\n", 968 data->ttag, le32_to_cpu(data->data_offset), 969 cmd->rbytes_done); 970 goto err_proto; 971 } 972 973 exp_data_len = le32_to_cpu(data->hdr.plen) - 974 nvmet_tcp_hdgst_len(queue) - 975 nvmet_tcp_ddgst_len(queue) - 976 sizeof(*data); 977 978 cmd->pdu_len = le32_to_cpu(data->data_length); 979 if (unlikely(cmd->pdu_len != exp_data_len || 980 cmd->pdu_len == 0 || 981 cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) { 982 pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len); 983 goto err_proto; 984 } 985 cmd->pdu_recv = 0; 986 nvmet_tcp_build_pdu_iovec(cmd); 987 queue->cmd = cmd; 988 queue->rcv_state = NVMET_TCP_RECV_DATA; 989 990 return 0; 991 992 err_proto: 993 /* FIXME: use proper transport errors */ 994 nvmet_tcp_fatal_error(queue); 995 return -EPROTO; 996 } 997 998 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) 999 { 1000 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 1001 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; 1002 struct nvmet_req *req; 1003 int ret; 1004 1005 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { 1006 if (hdr->type != nvme_tcp_icreq) { 1007 pr_err("unexpected pdu type (%d) before icreq\n", 1008 hdr->type); 1009 nvmet_tcp_fatal_error(queue); 1010 return -EPROTO; 1011 } 1012 return nvmet_tcp_handle_icreq(queue); 1013 } 1014 1015 if (unlikely(hdr->type == nvme_tcp_icreq)) { 1016 pr_err("queue %d: received icreq pdu in state %d\n", 1017 queue->idx, queue->state); 1018 nvmet_tcp_fatal_error(queue); 1019 return -EPROTO; 1020 } 1021 1022 if (hdr->type == nvme_tcp_h2c_data) { 1023 ret = nvmet_tcp_handle_h2c_data_pdu(queue); 1024 if (unlikely(ret)) 1025 return ret; 1026 return 0; 1027 } 1028 1029 queue->cmd = nvmet_tcp_get_cmd(queue); 1030 if (unlikely(!queue->cmd)) { 1031 /* This should never happen */ 1032 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d", 1033 queue->idx, queue->nr_cmds, queue->send_list_len, 1034 nvme_cmd->common.opcode); 1035 nvmet_tcp_fatal_error(queue); 1036 return -ENOMEM; 1037 } 1038 1039 req = &queue->cmd->req; 1040 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); 1041 1042 if (unlikely(!nvmet_req_init(req, &queue->nvme_sq, &nvmet_tcp_ops))) { 1043 pr_err("failed cmd %p id %d opcode %d, data_len: %d, status: %04x\n", 1044 req->cmd, req->cmd->common.command_id, 1045 req->cmd->common.opcode, 1046 le32_to_cpu(req->cmd->common.dptr.sgl.length), 1047 le16_to_cpu(req->cqe->status)); 1048 1049 nvmet_tcp_handle_req_failure(queue, queue->cmd, req); 1050 return 0; 1051 } 1052 1053 ret = nvmet_tcp_map_data(queue->cmd); 1054 if (unlikely(ret)) { 1055 pr_err("queue %d: failed to map data\n", queue->idx); 1056 if (nvmet_tcp_has_inline_data(queue->cmd)) 1057 nvmet_tcp_fatal_error(queue); 1058 else 1059 nvmet_req_complete(req, ret); 1060 ret = -EAGAIN; 1061 goto out; 1062 } 1063 1064 if (nvmet_tcp_need_data_in(queue->cmd)) { 1065 if (nvmet_tcp_has_inline_data(queue->cmd)) { 1066 queue->rcv_state = NVMET_TCP_RECV_DATA; 1067 nvmet_tcp_build_pdu_iovec(queue->cmd); 1068 return 0; 1069 } 1070 /* send back R2T */ 1071 nvmet_tcp_queue_response(&queue->cmd->req); 1072 goto out; 1073 } 1074 1075 queue->cmd->req.execute(&queue->cmd->req); 1076 out: 1077 nvmet_prepare_receive_pdu(queue); 1078 return ret; 1079 } 1080 1081 static const u8 nvme_tcp_pdu_sizes[] = { 1082 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu), 1083 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu), 1084 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu), 1085 }; 1086 1087 static inline u8 nvmet_tcp_pdu_size(u8 type) 1088 { 1089 size_t idx = type; 1090 1091 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) && 1092 nvme_tcp_pdu_sizes[idx]) ? 1093 nvme_tcp_pdu_sizes[idx] : 0; 1094 } 1095 1096 static inline bool nvmet_tcp_pdu_valid(u8 type) 1097 { 1098 switch (type) { 1099 case nvme_tcp_icreq: 1100 case nvme_tcp_cmd: 1101 case nvme_tcp_h2c_data: 1102 /* fallthru */ 1103 return true; 1104 } 1105 1106 return false; 1107 } 1108 1109 static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue, 1110 struct msghdr *msg, char *cbuf) 1111 { 1112 struct cmsghdr *cmsg = (struct cmsghdr *)cbuf; 1113 u8 ctype, level, description; 1114 int ret = 0; 1115 1116 ctype = tls_get_record_type(queue->sock->sk, cmsg); 1117 switch (ctype) { 1118 case 0: 1119 break; 1120 case TLS_RECORD_TYPE_DATA: 1121 break; 1122 case TLS_RECORD_TYPE_ALERT: 1123 tls_alert_recv(queue->sock->sk, msg, &level, &description); 1124 if (level == TLS_ALERT_LEVEL_FATAL) { 1125 pr_err("queue %d: TLS Alert desc %u\n", 1126 queue->idx, description); 1127 ret = -ENOTCONN; 1128 } else { 1129 pr_warn("queue %d: TLS Alert desc %u\n", 1130 queue->idx, description); 1131 ret = -EAGAIN; 1132 } 1133 break; 1134 default: 1135 /* discard this record type */ 1136 pr_err("queue %d: TLS record %d unhandled\n", 1137 queue->idx, ctype); 1138 ret = -EAGAIN; 1139 break; 1140 } 1141 return ret; 1142 } 1143 1144 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) 1145 { 1146 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 1147 int len, ret; 1148 struct kvec iov; 1149 char cbuf[CMSG_LEN(sizeof(char))] = {}; 1150 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 1151 1152 recv: 1153 iov.iov_base = (void *)&queue->pdu + queue->offset; 1154 iov.iov_len = queue->left; 1155 if (queue->tls_pskid) { 1156 msg.msg_control = cbuf; 1157 msg.msg_controllen = sizeof(cbuf); 1158 } 1159 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1160 iov.iov_len, msg.msg_flags); 1161 if (unlikely(len < 0)) 1162 return len; 1163 if (queue->tls_pskid) { 1164 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf); 1165 if (ret < 0) 1166 return ret; 1167 } 1168 1169 queue->offset += len; 1170 queue->left -= len; 1171 if (queue->left) 1172 return -EAGAIN; 1173 1174 if (queue->offset == sizeof(struct nvme_tcp_hdr)) { 1175 u8 hdgst = nvmet_tcp_hdgst_len(queue); 1176 1177 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) { 1178 pr_err("unexpected pdu type %d\n", hdr->type); 1179 nvmet_tcp_fatal_error(queue); 1180 return -EIO; 1181 } 1182 1183 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) { 1184 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen); 1185 return -EIO; 1186 } 1187 1188 queue->left = hdr->hlen - queue->offset + hdgst; 1189 goto recv; 1190 } 1191 1192 if (queue->hdr_digest && 1193 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) { 1194 nvmet_tcp_fatal_error(queue); /* fatal */ 1195 return -EPROTO; 1196 } 1197 1198 if (queue->data_digest && 1199 nvmet_tcp_check_ddgst(queue, &queue->pdu)) { 1200 nvmet_tcp_fatal_error(queue); /* fatal */ 1201 return -EPROTO; 1202 } 1203 1204 return nvmet_tcp_done_recv_pdu(queue); 1205 } 1206 1207 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) 1208 { 1209 struct nvmet_tcp_queue *queue = cmd->queue; 1210 1211 nvmet_tcp_calc_ddgst(cmd); 1212 queue->offset = 0; 1213 queue->left = NVME_TCP_DIGEST_LENGTH; 1214 queue->rcv_state = NVMET_TCP_RECV_DDGST; 1215 } 1216 1217 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) 1218 { 1219 struct nvmet_tcp_cmd *cmd = queue->cmd; 1220 int len, ret; 1221 1222 while (msg_data_left(&cmd->recv_msg)) { 1223 len = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, 1224 cmd->recv_msg.msg_flags); 1225 if (len <= 0) 1226 return len; 1227 if (queue->tls_pskid) { 1228 ret = nvmet_tcp_tls_record_ok(cmd->queue, 1229 &cmd->recv_msg, cmd->recv_cbuf); 1230 if (ret < 0) 1231 return ret; 1232 } 1233 1234 cmd->pdu_recv += len; 1235 cmd->rbytes_done += len; 1236 } 1237 1238 if (queue->data_digest) { 1239 nvmet_tcp_prep_recv_ddgst(cmd); 1240 return 0; 1241 } 1242 1243 if (cmd->rbytes_done == cmd->req.transfer_len) 1244 nvmet_tcp_execute_request(cmd); 1245 1246 nvmet_prepare_receive_pdu(queue); 1247 return 0; 1248 } 1249 1250 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) 1251 { 1252 struct nvmet_tcp_cmd *cmd = queue->cmd; 1253 int ret, len; 1254 char cbuf[CMSG_LEN(sizeof(char))] = {}; 1255 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 1256 struct kvec iov = { 1257 .iov_base = (void *)&cmd->recv_ddgst + queue->offset, 1258 .iov_len = queue->left 1259 }; 1260 1261 if (queue->tls_pskid) { 1262 msg.msg_control = cbuf; 1263 msg.msg_controllen = sizeof(cbuf); 1264 } 1265 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1266 iov.iov_len, msg.msg_flags); 1267 if (unlikely(len < 0)) 1268 return len; 1269 if (queue->tls_pskid) { 1270 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf); 1271 if (ret < 0) 1272 return ret; 1273 } 1274 1275 queue->offset += len; 1276 queue->left -= len; 1277 if (queue->left) 1278 return -EAGAIN; 1279 1280 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { 1281 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", 1282 queue->idx, cmd->req.cmd->common.command_id, 1283 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), 1284 le32_to_cpu(cmd->exp_ddgst)); 1285 nvmet_req_uninit(&cmd->req); 1286 nvmet_tcp_free_cmd_buffers(cmd); 1287 nvmet_tcp_fatal_error(queue); 1288 ret = -EPROTO; 1289 goto out; 1290 } 1291 1292 if (cmd->rbytes_done == cmd->req.transfer_len) 1293 nvmet_tcp_execute_request(cmd); 1294 1295 ret = 0; 1296 out: 1297 nvmet_prepare_receive_pdu(queue); 1298 return ret; 1299 } 1300 1301 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) 1302 { 1303 int result = 0; 1304 1305 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) 1306 return 0; 1307 1308 if (queue->rcv_state == NVMET_TCP_RECV_PDU) { 1309 result = nvmet_tcp_try_recv_pdu(queue); 1310 if (result != 0) 1311 goto done_recv; 1312 } 1313 1314 if (queue->rcv_state == NVMET_TCP_RECV_DATA) { 1315 result = nvmet_tcp_try_recv_data(queue); 1316 if (result != 0) 1317 goto done_recv; 1318 } 1319 1320 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) { 1321 result = nvmet_tcp_try_recv_ddgst(queue); 1322 if (result != 0) 1323 goto done_recv; 1324 } 1325 1326 done_recv: 1327 if (result < 0) { 1328 if (result == -EAGAIN) 1329 return 0; 1330 return result; 1331 } 1332 return 1; 1333 } 1334 1335 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, 1336 int budget, int *recvs) 1337 { 1338 int i, ret = 0; 1339 1340 for (i = 0; i < budget; i++) { 1341 ret = nvmet_tcp_try_recv_one(queue); 1342 if (unlikely(ret < 0)) { 1343 nvmet_tcp_socket_error(queue, ret); 1344 goto done; 1345 } else if (ret == 0) { 1346 break; 1347 } 1348 (*recvs)++; 1349 } 1350 done: 1351 return ret; 1352 } 1353 1354 static void nvmet_tcp_release_queue(struct kref *kref) 1355 { 1356 struct nvmet_tcp_queue *queue = 1357 container_of(kref, struct nvmet_tcp_queue, kref); 1358 1359 WARN_ON(queue->state != NVMET_TCP_Q_DISCONNECTING); 1360 queue_work(nvmet_wq, &queue->release_work); 1361 } 1362 1363 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) 1364 { 1365 spin_lock_bh(&queue->state_lock); 1366 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { 1367 /* Socket closed during handshake */ 1368 tls_handshake_cancel(queue->sock->sk); 1369 } 1370 if (queue->state != NVMET_TCP_Q_DISCONNECTING) { 1371 queue->state = NVMET_TCP_Q_DISCONNECTING; 1372 kref_put(&queue->kref, nvmet_tcp_release_queue); 1373 } 1374 spin_unlock_bh(&queue->state_lock); 1375 } 1376 1377 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue) 1378 { 1379 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs); 1380 } 1381 1382 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue, 1383 int ops) 1384 { 1385 if (!idle_poll_period_usecs) 1386 return false; 1387 1388 if (ops) 1389 nvmet_tcp_arm_queue_deadline(queue); 1390 1391 return !time_after(jiffies, queue->poll_end); 1392 } 1393 1394 static void nvmet_tcp_io_work(struct work_struct *w) 1395 { 1396 struct nvmet_tcp_queue *queue = 1397 container_of(w, struct nvmet_tcp_queue, io_work); 1398 bool pending; 1399 int ret, ops = 0; 1400 1401 do { 1402 pending = false; 1403 1404 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops); 1405 if (ret > 0) 1406 pending = true; 1407 else if (ret < 0) 1408 return; 1409 1410 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops); 1411 if (ret > 0) 1412 pending = true; 1413 else if (ret < 0) 1414 return; 1415 1416 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET); 1417 1418 /* 1419 * Requeue the worker if idle deadline period is in progress or any 1420 * ops activity was recorded during the do-while loop above. 1421 */ 1422 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending) 1423 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1424 } 1425 1426 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, 1427 struct nvmet_tcp_cmd *c) 1428 { 1429 u8 hdgst = nvmet_tcp_hdgst_len(queue); 1430 1431 c->queue = queue; 1432 c->req.port = queue->port->nport; 1433 1434 c->cmd_pdu = page_frag_alloc(&queue->pf_cache, 1435 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1436 if (!c->cmd_pdu) 1437 return -ENOMEM; 1438 c->req.cmd = &c->cmd_pdu->cmd; 1439 1440 c->rsp_pdu = page_frag_alloc(&queue->pf_cache, 1441 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1442 if (!c->rsp_pdu) 1443 goto out_free_cmd; 1444 c->req.cqe = &c->rsp_pdu->cqe; 1445 1446 c->data_pdu = page_frag_alloc(&queue->pf_cache, 1447 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1448 if (!c->data_pdu) 1449 goto out_free_rsp; 1450 1451 c->r2t_pdu = page_frag_alloc(&queue->pf_cache, 1452 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1453 if (!c->r2t_pdu) 1454 goto out_free_data; 1455 1456 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { 1457 c->recv_msg.msg_control = c->recv_cbuf; 1458 c->recv_msg.msg_controllen = sizeof(c->recv_cbuf); 1459 } 1460 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1461 1462 list_add_tail(&c->entry, &queue->free_list); 1463 1464 return 0; 1465 out_free_data: 1466 page_frag_free(c->data_pdu); 1467 out_free_rsp: 1468 page_frag_free(c->rsp_pdu); 1469 out_free_cmd: 1470 page_frag_free(c->cmd_pdu); 1471 return -ENOMEM; 1472 } 1473 1474 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c) 1475 { 1476 page_frag_free(c->r2t_pdu); 1477 page_frag_free(c->data_pdu); 1478 page_frag_free(c->rsp_pdu); 1479 page_frag_free(c->cmd_pdu); 1480 } 1481 1482 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) 1483 { 1484 struct nvmet_tcp_cmd *cmds; 1485 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; 1486 1487 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); 1488 if (!cmds) 1489 goto out; 1490 1491 for (i = 0; i < nr_cmds; i++) { 1492 ret = nvmet_tcp_alloc_cmd(queue, cmds + i); 1493 if (ret) 1494 goto out_free; 1495 } 1496 1497 queue->cmds = cmds; 1498 1499 return 0; 1500 out_free: 1501 while (--i >= 0) 1502 nvmet_tcp_free_cmd(cmds + i); 1503 kfree(cmds); 1504 out: 1505 return ret; 1506 } 1507 1508 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) 1509 { 1510 struct nvmet_tcp_cmd *cmds = queue->cmds; 1511 int i; 1512 1513 for (i = 0; i < queue->nr_cmds; i++) 1514 nvmet_tcp_free_cmd(cmds + i); 1515 1516 nvmet_tcp_free_cmd(&queue->connect); 1517 kfree(cmds); 1518 } 1519 1520 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) 1521 { 1522 struct socket *sock = queue->sock; 1523 1524 if (!queue->state_change) 1525 return; 1526 1527 write_lock_bh(&sock->sk->sk_callback_lock); 1528 sock->sk->sk_data_ready = queue->data_ready; 1529 sock->sk->sk_state_change = queue->state_change; 1530 sock->sk->sk_write_space = queue->write_space; 1531 sock->sk->sk_user_data = NULL; 1532 write_unlock_bh(&sock->sk->sk_callback_lock); 1533 } 1534 1535 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) 1536 { 1537 struct nvmet_tcp_cmd *cmd = queue->cmds; 1538 int i; 1539 1540 for (i = 0; i < queue->nr_cmds; i++, cmd++) { 1541 if (nvmet_tcp_need_data_in(cmd)) 1542 nvmet_req_uninit(&cmd->req); 1543 } 1544 1545 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { 1546 /* failed in connect */ 1547 nvmet_req_uninit(&queue->connect.req); 1548 } 1549 } 1550 1551 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue) 1552 { 1553 struct nvmet_tcp_cmd *cmd = queue->cmds; 1554 int i; 1555 1556 for (i = 0; i < queue->nr_cmds; i++, cmd++) 1557 nvmet_tcp_free_cmd_buffers(cmd); 1558 nvmet_tcp_free_cmd_buffers(&queue->connect); 1559 } 1560 1561 static void nvmet_tcp_release_queue_work(struct work_struct *w) 1562 { 1563 struct nvmet_tcp_queue *queue = 1564 container_of(w, struct nvmet_tcp_queue, release_work); 1565 1566 mutex_lock(&nvmet_tcp_queue_mutex); 1567 list_del_init(&queue->queue_list); 1568 mutex_unlock(&nvmet_tcp_queue_mutex); 1569 1570 nvmet_tcp_restore_socket_callbacks(queue); 1571 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work); 1572 cancel_work_sync(&queue->io_work); 1573 /* stop accepting incoming data */ 1574 queue->rcv_state = NVMET_TCP_RECV_ERR; 1575 1576 nvmet_sq_put_tls_key(&queue->nvme_sq); 1577 nvmet_tcp_uninit_data_in_cmds(queue); 1578 nvmet_sq_destroy(&queue->nvme_sq); 1579 nvmet_cq_put(&queue->nvme_cq); 1580 cancel_work_sync(&queue->io_work); 1581 nvmet_tcp_free_cmd_data_in_buffers(queue); 1582 /* ->sock will be released by fput() */ 1583 fput(queue->sock->file); 1584 nvmet_tcp_free_cmds(queue); 1585 ida_free(&nvmet_tcp_queue_ida, queue->idx); 1586 page_frag_cache_drain(&queue->pf_cache); 1587 kfree(queue); 1588 } 1589 1590 static void nvmet_tcp_data_ready(struct sock *sk) 1591 { 1592 struct nvmet_tcp_queue *queue; 1593 1594 trace_sk_data_ready(sk); 1595 1596 read_lock_bh(&sk->sk_callback_lock); 1597 queue = sk->sk_user_data; 1598 if (likely(queue)) { 1599 if (queue->data_ready) 1600 queue->data_ready(sk); 1601 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) 1602 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, 1603 &queue->io_work); 1604 } 1605 read_unlock_bh(&sk->sk_callback_lock); 1606 } 1607 1608 static void nvmet_tcp_write_space(struct sock *sk) 1609 { 1610 struct nvmet_tcp_queue *queue; 1611 1612 read_lock_bh(&sk->sk_callback_lock); 1613 queue = sk->sk_user_data; 1614 if (unlikely(!queue)) 1615 goto out; 1616 1617 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { 1618 queue->write_space(sk); 1619 goto out; 1620 } 1621 1622 if (sk_stream_is_writeable(sk)) { 1623 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1624 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1625 } 1626 out: 1627 read_unlock_bh(&sk->sk_callback_lock); 1628 } 1629 1630 static void nvmet_tcp_state_change(struct sock *sk) 1631 { 1632 struct nvmet_tcp_queue *queue; 1633 1634 read_lock_bh(&sk->sk_callback_lock); 1635 queue = sk->sk_user_data; 1636 if (!queue) 1637 goto done; 1638 1639 switch (sk->sk_state) { 1640 case TCP_FIN_WAIT2: 1641 case TCP_LAST_ACK: 1642 break; 1643 case TCP_FIN_WAIT1: 1644 case TCP_CLOSE_WAIT: 1645 case TCP_CLOSE: 1646 /* FALLTHRU */ 1647 nvmet_tcp_schedule_release_queue(queue); 1648 break; 1649 default: 1650 pr_warn("queue %d unhandled state %d\n", 1651 queue->idx, sk->sk_state); 1652 } 1653 done: 1654 read_unlock_bh(&sk->sk_callback_lock); 1655 } 1656 1657 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) 1658 { 1659 struct socket *sock = queue->sock; 1660 struct inet_sock *inet = inet_sk(sock->sk); 1661 int ret; 1662 1663 ret = kernel_getsockname(sock, 1664 (struct sockaddr *)&queue->sockaddr); 1665 if (ret < 0) 1666 return ret; 1667 1668 ret = kernel_getpeername(sock, 1669 (struct sockaddr *)&queue->sockaddr_peer); 1670 if (ret < 0) 1671 return ret; 1672 1673 /* 1674 * Cleanup whatever is sitting in the TCP transmit queue on socket 1675 * close. This is done to prevent stale data from being sent should 1676 * the network connection be restored before TCP times out. 1677 */ 1678 sock_no_linger(sock->sk); 1679 1680 if (so_priority > 0) 1681 sock_set_priority(sock->sk, so_priority); 1682 1683 /* Set socket type of service */ 1684 if (inet->rcv_tos > 0) 1685 ip_sock_set_tos(sock->sk, inet->rcv_tos); 1686 1687 ret = 0; 1688 write_lock_bh(&sock->sk->sk_callback_lock); 1689 if (sock->sk->sk_state != TCP_ESTABLISHED) { 1690 /* 1691 * If the socket is already closing, don't even start 1692 * consuming it 1693 */ 1694 ret = -ENOTCONN; 1695 } else { 1696 sock->sk->sk_user_data = queue; 1697 queue->data_ready = sock->sk->sk_data_ready; 1698 sock->sk->sk_data_ready = nvmet_tcp_data_ready; 1699 queue->state_change = sock->sk->sk_state_change; 1700 sock->sk->sk_state_change = nvmet_tcp_state_change; 1701 queue->write_space = sock->sk->sk_write_space; 1702 sock->sk->sk_write_space = nvmet_tcp_write_space; 1703 if (idle_poll_period_usecs) 1704 nvmet_tcp_arm_queue_deadline(queue); 1705 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1706 } 1707 write_unlock_bh(&sock->sk->sk_callback_lock); 1708 1709 return ret; 1710 } 1711 1712 #ifdef CONFIG_NVME_TARGET_TCP_TLS 1713 static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue) 1714 { 1715 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 1716 int len, ret; 1717 struct kvec iov = { 1718 .iov_base = (u8 *)&queue->pdu + queue->offset, 1719 .iov_len = sizeof(struct nvme_tcp_hdr), 1720 }; 1721 char cbuf[CMSG_LEN(sizeof(char))] = {}; 1722 struct msghdr msg = { 1723 .msg_control = cbuf, 1724 .msg_controllen = sizeof(cbuf), 1725 .msg_flags = MSG_PEEK, 1726 }; 1727 1728 if (nvmet_port_secure_channel_required(queue->port->nport)) 1729 return 0; 1730 1731 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1732 iov.iov_len, msg.msg_flags); 1733 if (unlikely(len < 0)) { 1734 pr_debug("queue %d: peek error %d\n", 1735 queue->idx, len); 1736 return len; 1737 } 1738 1739 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf); 1740 if (ret < 0) 1741 return ret; 1742 1743 if (len < sizeof(struct nvme_tcp_hdr)) { 1744 pr_debug("queue %d: short read, %d bytes missing\n", 1745 queue->idx, (int)iov.iov_len - len); 1746 return -EAGAIN; 1747 } 1748 pr_debug("queue %d: hdr type %d hlen %d plen %d size %d\n", 1749 queue->idx, hdr->type, hdr->hlen, hdr->plen, 1750 (int)sizeof(struct nvme_tcp_icreq_pdu)); 1751 if (hdr->type == nvme_tcp_icreq && 1752 hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) && 1753 hdr->plen == cpu_to_le32(sizeof(struct nvme_tcp_icreq_pdu))) { 1754 pr_debug("queue %d: icreq detected\n", 1755 queue->idx); 1756 return len; 1757 } 1758 return 0; 1759 } 1760 1761 static int nvmet_tcp_tls_key_lookup(struct nvmet_tcp_queue *queue, 1762 key_serial_t peerid) 1763 { 1764 struct key *tls_key = nvme_tls_key_lookup(peerid); 1765 int status = 0; 1766 1767 if (IS_ERR(tls_key)) { 1768 pr_warn("%s: queue %d failed to lookup key %x\n", 1769 __func__, queue->idx, peerid); 1770 spin_lock_bh(&queue->state_lock); 1771 queue->state = NVMET_TCP_Q_FAILED; 1772 spin_unlock_bh(&queue->state_lock); 1773 status = PTR_ERR(tls_key); 1774 } else { 1775 pr_debug("%s: queue %d using TLS PSK %x\n", 1776 __func__, queue->idx, peerid); 1777 queue->nvme_sq.tls_key = tls_key; 1778 } 1779 return status; 1780 } 1781 1782 static void nvmet_tcp_tls_handshake_done(void *data, int status, 1783 key_serial_t peerid) 1784 { 1785 struct nvmet_tcp_queue *queue = data; 1786 1787 pr_debug("queue %d: TLS handshake done, key %x, status %d\n", 1788 queue->idx, peerid, status); 1789 spin_lock_bh(&queue->state_lock); 1790 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) { 1791 spin_unlock_bh(&queue->state_lock); 1792 return; 1793 } 1794 if (!status) { 1795 queue->tls_pskid = peerid; 1796 queue->state = NVMET_TCP_Q_CONNECTING; 1797 } else 1798 queue->state = NVMET_TCP_Q_FAILED; 1799 spin_unlock_bh(&queue->state_lock); 1800 1801 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work); 1802 1803 if (!status) 1804 status = nvmet_tcp_tls_key_lookup(queue, peerid); 1805 1806 if (status) 1807 nvmet_tcp_schedule_release_queue(queue); 1808 else 1809 nvmet_tcp_set_queue_sock(queue); 1810 kref_put(&queue->kref, nvmet_tcp_release_queue); 1811 } 1812 1813 static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) 1814 { 1815 struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w), 1816 struct nvmet_tcp_queue, tls_handshake_tmo_work); 1817 1818 pr_warn("queue %d: TLS handshake timeout\n", queue->idx); 1819 /* 1820 * If tls_handshake_cancel() fails we've lost the race with 1821 * nvmet_tcp_tls_handshake_done() */ 1822 if (!tls_handshake_cancel(queue->sock->sk)) 1823 return; 1824 spin_lock_bh(&queue->state_lock); 1825 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) { 1826 spin_unlock_bh(&queue->state_lock); 1827 return; 1828 } 1829 queue->state = NVMET_TCP_Q_FAILED; 1830 spin_unlock_bh(&queue->state_lock); 1831 nvmet_tcp_schedule_release_queue(queue); 1832 kref_put(&queue->kref, nvmet_tcp_release_queue); 1833 } 1834 1835 static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue) 1836 { 1837 int ret = -EOPNOTSUPP; 1838 struct tls_handshake_args args; 1839 1840 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) { 1841 pr_warn("cannot start TLS in state %d\n", queue->state); 1842 return -EINVAL; 1843 } 1844 1845 kref_get(&queue->kref); 1846 pr_debug("queue %d: TLS ServerHello\n", queue->idx); 1847 memset(&args, 0, sizeof(args)); 1848 args.ta_sock = queue->sock; 1849 args.ta_done = nvmet_tcp_tls_handshake_done; 1850 args.ta_data = queue; 1851 args.ta_keyring = key_serial(queue->port->nport->keyring); 1852 args.ta_timeout_ms = tls_handshake_timeout * 1000; 1853 1854 ret = tls_server_hello_psk(&args, GFP_KERNEL); 1855 if (ret) { 1856 kref_put(&queue->kref, nvmet_tcp_release_queue); 1857 pr_err("failed to start TLS, err=%d\n", ret); 1858 } else { 1859 queue_delayed_work(nvmet_wq, &queue->tls_handshake_tmo_work, 1860 tls_handshake_timeout * HZ); 1861 } 1862 return ret; 1863 } 1864 #else 1865 static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) {} 1866 #endif 1867 1868 static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, 1869 struct socket *newsock) 1870 { 1871 struct nvmet_tcp_queue *queue; 1872 struct file *sock_file = NULL; 1873 int ret; 1874 1875 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 1876 if (!queue) { 1877 ret = -ENOMEM; 1878 goto out_release; 1879 } 1880 1881 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); 1882 INIT_WORK(&queue->io_work, nvmet_tcp_io_work); 1883 kref_init(&queue->kref); 1884 queue->sock = newsock; 1885 queue->port = port; 1886 queue->nr_cmds = 0; 1887 spin_lock_init(&queue->state_lock); 1888 if (queue->port->nport->disc_addr.tsas.tcp.sectype == 1889 NVMF_TCP_SECTYPE_TLS13) 1890 queue->state = NVMET_TCP_Q_TLS_HANDSHAKE; 1891 else 1892 queue->state = NVMET_TCP_Q_CONNECTING; 1893 INIT_LIST_HEAD(&queue->free_list); 1894 init_llist_head(&queue->resp_list); 1895 INIT_LIST_HEAD(&queue->resp_send_list); 1896 1897 sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL); 1898 if (IS_ERR(sock_file)) { 1899 ret = PTR_ERR(sock_file); 1900 goto out_free_queue; 1901 } 1902 1903 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL); 1904 if (queue->idx < 0) { 1905 ret = queue->idx; 1906 goto out_sock; 1907 } 1908 1909 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect); 1910 if (ret) 1911 goto out_ida_remove; 1912 1913 nvmet_cq_init(&queue->nvme_cq); 1914 ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); 1915 if (ret) 1916 goto out_free_connect; 1917 1918 nvmet_prepare_receive_pdu(queue); 1919 1920 mutex_lock(&nvmet_tcp_queue_mutex); 1921 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); 1922 mutex_unlock(&nvmet_tcp_queue_mutex); 1923 1924 INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work, 1925 nvmet_tcp_tls_handshake_timeout); 1926 #ifdef CONFIG_NVME_TARGET_TCP_TLS 1927 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { 1928 struct sock *sk = queue->sock->sk; 1929 1930 /* Restore the default callbacks before starting upcall */ 1931 write_lock_bh(&sk->sk_callback_lock); 1932 sk->sk_user_data = NULL; 1933 sk->sk_data_ready = port->data_ready; 1934 write_unlock_bh(&sk->sk_callback_lock); 1935 if (!nvmet_tcp_try_peek_pdu(queue)) { 1936 if (!nvmet_tcp_tls_handshake(queue)) 1937 return; 1938 /* TLS handshake failed, terminate the connection */ 1939 goto out_destroy_sq; 1940 } 1941 /* Not a TLS connection, continue with normal processing */ 1942 queue->state = NVMET_TCP_Q_CONNECTING; 1943 } 1944 #endif 1945 1946 ret = nvmet_tcp_set_queue_sock(queue); 1947 if (ret) 1948 goto out_destroy_sq; 1949 1950 return; 1951 out_destroy_sq: 1952 mutex_lock(&nvmet_tcp_queue_mutex); 1953 list_del_init(&queue->queue_list); 1954 mutex_unlock(&nvmet_tcp_queue_mutex); 1955 nvmet_sq_destroy(&queue->nvme_sq); 1956 out_free_connect: 1957 nvmet_cq_put(&queue->nvme_cq); 1958 nvmet_tcp_free_cmd(&queue->connect); 1959 out_ida_remove: 1960 ida_free(&nvmet_tcp_queue_ida, queue->idx); 1961 out_sock: 1962 fput(queue->sock->file); 1963 out_free_queue: 1964 kfree(queue); 1965 out_release: 1966 pr_err("failed to allocate queue, error %d\n", ret); 1967 if (!sock_file) 1968 sock_release(newsock); 1969 } 1970 1971 static void nvmet_tcp_accept_work(struct work_struct *w) 1972 { 1973 struct nvmet_tcp_port *port = 1974 container_of(w, struct nvmet_tcp_port, accept_work); 1975 struct socket *newsock; 1976 int ret; 1977 1978 while (true) { 1979 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK); 1980 if (ret < 0) { 1981 if (ret != -EAGAIN) 1982 pr_warn("failed to accept err=%d\n", ret); 1983 return; 1984 } 1985 nvmet_tcp_alloc_queue(port, newsock); 1986 } 1987 } 1988 1989 static void nvmet_tcp_listen_data_ready(struct sock *sk) 1990 { 1991 struct nvmet_tcp_port *port; 1992 1993 trace_sk_data_ready(sk); 1994 1995 read_lock_bh(&sk->sk_callback_lock); 1996 port = sk->sk_user_data; 1997 if (!port) 1998 goto out; 1999 2000 if (sk->sk_state == TCP_LISTEN) 2001 queue_work(nvmet_wq, &port->accept_work); 2002 out: 2003 read_unlock_bh(&sk->sk_callback_lock); 2004 } 2005 2006 static int nvmet_tcp_add_port(struct nvmet_port *nport) 2007 { 2008 struct nvmet_tcp_port *port; 2009 __kernel_sa_family_t af; 2010 int ret; 2011 2012 port = kzalloc(sizeof(*port), GFP_KERNEL); 2013 if (!port) 2014 return -ENOMEM; 2015 2016 switch (nport->disc_addr.adrfam) { 2017 case NVMF_ADDR_FAMILY_IP4: 2018 af = AF_INET; 2019 break; 2020 case NVMF_ADDR_FAMILY_IP6: 2021 af = AF_INET6; 2022 break; 2023 default: 2024 pr_err("address family %d not supported\n", 2025 nport->disc_addr.adrfam); 2026 ret = -EINVAL; 2027 goto err_port; 2028 } 2029 2030 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, 2031 nport->disc_addr.trsvcid, &port->addr); 2032 if (ret) { 2033 pr_err("malformed ip/port passed: %s:%s\n", 2034 nport->disc_addr.traddr, nport->disc_addr.trsvcid); 2035 goto err_port; 2036 } 2037 2038 port->nport = nport; 2039 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work); 2040 if (port->nport->inline_data_size < 0) 2041 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE; 2042 2043 ret = sock_create(port->addr.ss_family, SOCK_STREAM, 2044 IPPROTO_TCP, &port->sock); 2045 if (ret) { 2046 pr_err("failed to create a socket\n"); 2047 goto err_port; 2048 } 2049 2050 port->sock->sk->sk_user_data = port; 2051 port->data_ready = port->sock->sk->sk_data_ready; 2052 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready; 2053 sock_set_reuseaddr(port->sock->sk); 2054 tcp_sock_set_nodelay(port->sock->sk); 2055 if (so_priority > 0) 2056 sock_set_priority(port->sock->sk, so_priority); 2057 2058 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr, 2059 sizeof(port->addr)); 2060 if (ret) { 2061 pr_err("failed to bind port socket %d\n", ret); 2062 goto err_sock; 2063 } 2064 2065 ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG); 2066 if (ret) { 2067 pr_err("failed to listen %d on port sock\n", ret); 2068 goto err_sock; 2069 } 2070 2071 nport->priv = port; 2072 pr_info("enabling port %d (%pISpc)\n", 2073 le16_to_cpu(nport->disc_addr.portid), &port->addr); 2074 2075 return 0; 2076 2077 err_sock: 2078 sock_release(port->sock); 2079 err_port: 2080 kfree(port); 2081 return ret; 2082 } 2083 2084 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port) 2085 { 2086 struct nvmet_tcp_queue *queue; 2087 2088 mutex_lock(&nvmet_tcp_queue_mutex); 2089 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 2090 if (queue->port == port) 2091 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 2092 mutex_unlock(&nvmet_tcp_queue_mutex); 2093 } 2094 2095 static void nvmet_tcp_remove_port(struct nvmet_port *nport) 2096 { 2097 struct nvmet_tcp_port *port = nport->priv; 2098 2099 write_lock_bh(&port->sock->sk->sk_callback_lock); 2100 port->sock->sk->sk_data_ready = port->data_ready; 2101 port->sock->sk->sk_user_data = NULL; 2102 write_unlock_bh(&port->sock->sk->sk_callback_lock); 2103 cancel_work_sync(&port->accept_work); 2104 /* 2105 * Destroy the remaining queues, which are not belong to any 2106 * controller yet. 2107 */ 2108 nvmet_tcp_destroy_port_queues(port); 2109 2110 sock_release(port->sock); 2111 kfree(port); 2112 } 2113 2114 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl) 2115 { 2116 struct nvmet_tcp_queue *queue; 2117 2118 mutex_lock(&nvmet_tcp_queue_mutex); 2119 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 2120 if (queue->nvme_sq.ctrl == ctrl) 2121 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 2122 mutex_unlock(&nvmet_tcp_queue_mutex); 2123 } 2124 2125 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) 2126 { 2127 struct nvmet_tcp_queue *queue = 2128 container_of(sq, struct nvmet_tcp_queue, nvme_sq); 2129 2130 if (sq->qid == 0) { 2131 struct nvmet_tcp_queue *q; 2132 int pending = 0; 2133 2134 /* Check for pending controller teardown */ 2135 mutex_lock(&nvmet_tcp_queue_mutex); 2136 list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) { 2137 if (q->nvme_sq.ctrl == sq->ctrl && 2138 q->state == NVMET_TCP_Q_DISCONNECTING) 2139 pending++; 2140 } 2141 mutex_unlock(&nvmet_tcp_queue_mutex); 2142 if (pending > NVMET_TCP_BACKLOG) 2143 return NVME_SC_CONNECT_CTRL_BUSY; 2144 } 2145 2146 queue->nr_cmds = sq->size * 2; 2147 if (nvmet_tcp_alloc_cmds(queue)) { 2148 queue->nr_cmds = 0; 2149 return NVME_SC_INTERNAL; 2150 } 2151 return 0; 2152 } 2153 2154 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req, 2155 struct nvmet_port *nport, char *traddr) 2156 { 2157 struct nvmet_tcp_port *port = nport->priv; 2158 2159 if (inet_addr_is_any(&port->addr)) { 2160 struct nvmet_tcp_cmd *cmd = 2161 container_of(req, struct nvmet_tcp_cmd, req); 2162 struct nvmet_tcp_queue *queue = cmd->queue; 2163 2164 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr); 2165 } else { 2166 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); 2167 } 2168 } 2169 2170 static ssize_t nvmet_tcp_host_port_addr(struct nvmet_ctrl *ctrl, 2171 char *traddr, size_t traddr_len) 2172 { 2173 struct nvmet_sq *sq = ctrl->sqs[0]; 2174 struct nvmet_tcp_queue *queue = 2175 container_of(sq, struct nvmet_tcp_queue, nvme_sq); 2176 2177 if (queue->sockaddr_peer.ss_family == AF_UNSPEC) 2178 return -EINVAL; 2179 return snprintf(traddr, traddr_len, "%pISc", 2180 (struct sockaddr *)&queue->sockaddr_peer); 2181 } 2182 2183 static const struct nvmet_fabrics_ops nvmet_tcp_ops = { 2184 .owner = THIS_MODULE, 2185 .type = NVMF_TRTYPE_TCP, 2186 .msdbd = 1, 2187 .add_port = nvmet_tcp_add_port, 2188 .remove_port = nvmet_tcp_remove_port, 2189 .queue_response = nvmet_tcp_queue_response, 2190 .delete_ctrl = nvmet_tcp_delete_ctrl, 2191 .install_queue = nvmet_tcp_install_queue, 2192 .disc_traddr = nvmet_tcp_disc_port_addr, 2193 .host_traddr = nvmet_tcp_host_port_addr, 2194 }; 2195 2196 static int __init nvmet_tcp_init(void) 2197 { 2198 int ret; 2199 2200 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", 2201 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 2202 if (!nvmet_tcp_wq) 2203 return -ENOMEM; 2204 2205 ret = nvmet_register_transport(&nvmet_tcp_ops); 2206 if (ret) 2207 goto err; 2208 2209 return 0; 2210 err: 2211 destroy_workqueue(nvmet_tcp_wq); 2212 return ret; 2213 } 2214 2215 static void __exit nvmet_tcp_exit(void) 2216 { 2217 struct nvmet_tcp_queue *queue; 2218 2219 nvmet_unregister_transport(&nvmet_tcp_ops); 2220 2221 flush_workqueue(nvmet_wq); 2222 mutex_lock(&nvmet_tcp_queue_mutex); 2223 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 2224 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 2225 mutex_unlock(&nvmet_tcp_queue_mutex); 2226 flush_workqueue(nvmet_wq); 2227 2228 destroy_workqueue(nvmet_tcp_wq); 2229 ida_destroy(&nvmet_tcp_queue_ida); 2230 } 2231 2232 module_init(nvmet_tcp_init); 2233 module_exit(nvmet_tcp_exit); 2234 2235 MODULE_DESCRIPTION("NVMe target TCP transport driver"); 2236 MODULE_LICENSE("GPL v2"); 2237 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */ 2238