1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe over Fabrics TCP target. 4 * Copyright (c) 2018 Lightbits Labs. All rights reserved. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/module.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <linux/crc32c.h> 11 #include <linux/err.h> 12 #include <linux/nvme-tcp.h> 13 #include <linux/nvme-keyring.h> 14 #include <net/sock.h> 15 #include <net/tcp.h> 16 #include <net/tls.h> 17 #include <net/tls_prot.h> 18 #include <net/handshake.h> 19 #include <linux/inet.h> 20 #include <linux/llist.h> 21 #include <trace/events/sock.h> 22 23 #include "nvmet.h" 24 25 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE) 26 #define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */ 27 #define NVMET_TCP_BACKLOG 128 28 29 static int param_store_val(const char *str, int *val, int min, int max) 30 { 31 int ret, new_val; 32 33 ret = kstrtoint(str, 10, &new_val); 34 if (ret) 35 return -EINVAL; 36 37 if (new_val < min || new_val > max) 38 return -EINVAL; 39 40 *val = new_val; 41 return 0; 42 } 43 44 static int set_params(const char *str, const struct kernel_param *kp) 45 { 46 return param_store_val(str, kp->arg, 0, INT_MAX); 47 } 48 49 static const struct kernel_param_ops set_param_ops = { 50 .set = set_params, 51 .get = param_get_int, 52 }; 53 54 /* Define the socket priority to use for connections were it is desirable 55 * that the NIC consider performing optimized packet processing or filtering. 56 * A non-zero value being sufficient to indicate general consideration of any 57 * possible optimization. Making it a module param allows for alternative 58 * values that may be unique for some NIC implementations. 59 */ 60 static int so_priority; 61 device_param_cb(so_priority, &set_param_ops, &so_priority, 0644); 62 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0"); 63 64 /* Define a time period (in usecs) that io_work() shall sample an activated 65 * queue before determining it to be idle. This optional module behavior 66 * can enable NIC solutions that support socket optimized packet processing 67 * using advanced interrupt moderation techniques. 68 */ 69 static int idle_poll_period_usecs; 70 device_param_cb(idle_poll_period_usecs, &set_param_ops, 71 &idle_poll_period_usecs, 0644); 72 MODULE_PARM_DESC(idle_poll_period_usecs, 73 "nvmet tcp io_work poll till idle time period in usecs: Default 0"); 74 75 #ifdef CONFIG_NVME_TARGET_TCP_TLS 76 /* 77 * TLS handshake timeout 78 */ 79 static int tls_handshake_timeout = 10; 80 module_param(tls_handshake_timeout, int, 0644); 81 MODULE_PARM_DESC(tls_handshake_timeout, 82 "nvme TLS handshake timeout in seconds (default 10)"); 83 #endif 84 85 #define NVMET_TCP_RECV_BUDGET 8 86 #define NVMET_TCP_SEND_BUDGET 8 87 #define NVMET_TCP_IO_WORK_BUDGET 64 88 89 enum nvmet_tcp_send_state { 90 NVMET_TCP_SEND_DATA_PDU, 91 NVMET_TCP_SEND_DATA, 92 NVMET_TCP_SEND_R2T, 93 NVMET_TCP_SEND_DDGST, 94 NVMET_TCP_SEND_RESPONSE 95 }; 96 97 enum nvmet_tcp_recv_state { 98 NVMET_TCP_RECV_PDU, 99 NVMET_TCP_RECV_DATA, 100 NVMET_TCP_RECV_DDGST, 101 NVMET_TCP_RECV_ERR, 102 }; 103 104 enum { 105 NVMET_TCP_F_INIT_FAILED = (1 << 0), 106 }; 107 108 struct nvmet_tcp_cmd { 109 struct nvmet_tcp_queue *queue; 110 struct nvmet_req req; 111 112 struct nvme_tcp_cmd_pdu *cmd_pdu; 113 struct nvme_tcp_rsp_pdu *rsp_pdu; 114 struct nvme_tcp_data_pdu *data_pdu; 115 struct nvme_tcp_r2t_pdu *r2t_pdu; 116 117 u32 rbytes_done; 118 u32 wbytes_done; 119 120 u32 pdu_len; 121 u32 pdu_recv; 122 int sg_idx; 123 char recv_cbuf[CMSG_LEN(sizeof(char))]; 124 struct msghdr recv_msg; 125 struct bio_vec *iov; 126 u32 flags; 127 128 struct list_head entry; 129 struct llist_node lentry; 130 131 /* send state */ 132 u32 offset; 133 struct scatterlist *cur_sg; 134 enum nvmet_tcp_send_state state; 135 136 __le32 exp_ddgst; 137 __le32 recv_ddgst; 138 }; 139 140 enum nvmet_tcp_queue_state { 141 NVMET_TCP_Q_CONNECTING, 142 NVMET_TCP_Q_TLS_HANDSHAKE, 143 NVMET_TCP_Q_LIVE, 144 NVMET_TCP_Q_DISCONNECTING, 145 NVMET_TCP_Q_FAILED, 146 }; 147 148 struct nvmet_tcp_queue { 149 struct socket *sock; 150 struct nvmet_tcp_port *port; 151 struct work_struct io_work; 152 struct nvmet_cq nvme_cq; 153 struct nvmet_sq nvme_sq; 154 struct kref kref; 155 156 /* send state */ 157 struct nvmet_tcp_cmd *cmds; 158 unsigned int nr_cmds; 159 struct list_head free_list; 160 struct llist_head resp_list; 161 struct list_head resp_send_list; 162 int send_list_len; 163 struct nvmet_tcp_cmd *snd_cmd; 164 165 /* recv state */ 166 int offset; 167 int left; 168 enum nvmet_tcp_recv_state rcv_state; 169 struct nvmet_tcp_cmd *cmd; 170 union nvme_tcp_pdu pdu; 171 172 /* digest state */ 173 bool hdr_digest; 174 bool data_digest; 175 176 /* TLS state */ 177 key_serial_t tls_pskid; 178 struct delayed_work tls_handshake_tmo_work; 179 180 unsigned long poll_end; 181 182 spinlock_t state_lock; 183 enum nvmet_tcp_queue_state state; 184 185 struct sockaddr_storage sockaddr; 186 struct sockaddr_storage sockaddr_peer; 187 struct work_struct release_work; 188 189 int idx; 190 struct list_head queue_list; 191 192 struct nvmet_tcp_cmd connect; 193 194 struct page_frag_cache pf_cache; 195 196 void (*data_ready)(struct sock *); 197 void (*state_change)(struct sock *); 198 void (*write_space)(struct sock *); 199 }; 200 201 struct nvmet_tcp_port { 202 struct socket *sock; 203 struct work_struct accept_work; 204 struct nvmet_port *nport; 205 struct sockaddr_storage addr; 206 void (*data_ready)(struct sock *); 207 }; 208 209 static DEFINE_IDA(nvmet_tcp_queue_ida); 210 static LIST_HEAD(nvmet_tcp_queue_list); 211 static DEFINE_MUTEX(nvmet_tcp_queue_mutex); 212 213 static struct workqueue_struct *nvmet_tcp_wq; 214 static const struct nvmet_fabrics_ops nvmet_tcp_ops; 215 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c); 216 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd); 217 218 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, 219 struct nvmet_tcp_cmd *cmd) 220 { 221 if (unlikely(!queue->nr_cmds)) { 222 /* We didn't allocate cmds yet, send 0xffff */ 223 return USHRT_MAX; 224 } 225 226 return cmd - queue->cmds; 227 } 228 229 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd) 230 { 231 return nvme_is_write(cmd->req.cmd) && 232 cmd->rbytes_done < cmd->req.transfer_len; 233 } 234 235 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) 236 { 237 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status; 238 } 239 240 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) 241 { 242 return !nvme_is_write(cmd->req.cmd) && 243 cmd->req.transfer_len > 0 && 244 !cmd->req.cqe->status; 245 } 246 247 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) 248 { 249 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len && 250 !cmd->rbytes_done; 251 } 252 253 static inline struct nvmet_tcp_cmd * 254 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) 255 { 256 struct nvmet_tcp_cmd *cmd; 257 258 cmd = list_first_entry_or_null(&queue->free_list, 259 struct nvmet_tcp_cmd, entry); 260 if (!cmd) 261 return NULL; 262 list_del_init(&cmd->entry); 263 264 cmd->rbytes_done = cmd->wbytes_done = 0; 265 cmd->pdu_len = 0; 266 cmd->pdu_recv = 0; 267 cmd->iov = NULL; 268 cmd->flags = 0; 269 return cmd; 270 } 271 272 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) 273 { 274 if (unlikely(cmd == &cmd->queue->connect)) 275 return; 276 277 list_add_tail(&cmd->entry, &cmd->queue->free_list); 278 } 279 280 static inline int queue_cpu(struct nvmet_tcp_queue *queue) 281 { 282 return queue->sock->sk->sk_incoming_cpu; 283 } 284 285 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) 286 { 287 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; 288 } 289 290 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) 291 { 292 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; 293 } 294 295 static inline void nvmet_tcp_hdgst(void *pdu, size_t len) 296 { 297 put_unaligned_le32(~crc32c(~0, pdu, len), pdu + len); 298 } 299 300 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, 301 void *pdu, size_t len) 302 { 303 struct nvme_tcp_hdr *hdr = pdu; 304 __le32 recv_digest; 305 __le32 exp_digest; 306 307 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { 308 pr_err("queue %d: header digest enabled but no header digest\n", 309 queue->idx); 310 return -EPROTO; 311 } 312 313 recv_digest = *(__le32 *)(pdu + hdr->hlen); 314 nvmet_tcp_hdgst(pdu, len); 315 exp_digest = *(__le32 *)(pdu + hdr->hlen); 316 if (recv_digest != exp_digest) { 317 pr_err("queue %d: header digest error: recv %#x expected %#x\n", 318 queue->idx, le32_to_cpu(recv_digest), 319 le32_to_cpu(exp_digest)); 320 return -EPROTO; 321 } 322 323 return 0; 324 } 325 326 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) 327 { 328 struct nvme_tcp_hdr *hdr = pdu; 329 u8 digest_len = nvmet_tcp_hdgst_len(queue); 330 u32 len; 331 332 len = le32_to_cpu(hdr->plen) - hdr->hlen - 333 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0); 334 335 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { 336 pr_err("queue %d: data digest flag is cleared\n", queue->idx); 337 return -EPROTO; 338 } 339 340 return 0; 341 } 342 343 /* If cmd buffers are NULL, no operation is performed */ 344 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd) 345 { 346 kfree(cmd->iov); 347 sgl_free(cmd->req.sg); 348 cmd->iov = NULL; 349 cmd->req.sg = NULL; 350 } 351 352 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue); 353 354 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) 355 { 356 struct bio_vec *iov = cmd->iov; 357 struct scatterlist *sg; 358 u32 length, offset, sg_offset; 359 unsigned int sg_remaining; 360 int nr_pages; 361 362 length = cmd->pdu_len; 363 nr_pages = DIV_ROUND_UP(length, PAGE_SIZE); 364 offset = cmd->rbytes_done; 365 cmd->sg_idx = offset / PAGE_SIZE; 366 sg_offset = offset % PAGE_SIZE; 367 if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) { 368 nvmet_tcp_fatal_error(cmd->queue); 369 return; 370 } 371 sg = &cmd->req.sg[cmd->sg_idx]; 372 sg_remaining = cmd->req.sg_cnt - cmd->sg_idx; 373 374 while (length) { 375 if (!sg_remaining) { 376 nvmet_tcp_fatal_error(cmd->queue); 377 return; 378 } 379 if (!sg->length || sg->length <= sg_offset) { 380 nvmet_tcp_fatal_error(cmd->queue); 381 return; 382 } 383 u32 iov_len = min_t(u32, length, sg->length - sg_offset); 384 385 bvec_set_page(iov, sg_page(sg), iov_len, 386 sg->offset + sg_offset); 387 388 length -= iov_len; 389 sg = sg_next(sg); 390 sg_remaining--; 391 iov++; 392 sg_offset = 0; 393 } 394 395 iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov, 396 nr_pages, cmd->pdu_len); 397 } 398 399 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) 400 { 401 queue->rcv_state = NVMET_TCP_RECV_ERR; 402 if (queue->nvme_sq.ctrl) 403 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 404 else 405 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 406 } 407 408 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) 409 { 410 queue->rcv_state = NVMET_TCP_RECV_ERR; 411 if (status == -EPIPE || status == -ECONNRESET) 412 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 413 else 414 nvmet_tcp_fatal_error(queue); 415 } 416 417 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) 418 { 419 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; 420 u32 len = le32_to_cpu(sgl->length); 421 422 if (!len) 423 return 0; 424 425 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | 426 NVME_SGL_FMT_OFFSET)) { 427 if (!nvme_is_write(cmd->req.cmd)) 428 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 429 430 if (len > cmd->req.port->inline_data_size) 431 return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR; 432 cmd->pdu_len = len; 433 } 434 cmd->req.transfer_len += len; 435 436 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt); 437 if (!cmd->req.sg) 438 return NVME_SC_INTERNAL; 439 cmd->cur_sg = cmd->req.sg; 440 441 if (nvmet_tcp_has_data_in(cmd)) { 442 cmd->iov = kmalloc_objs(*cmd->iov, cmd->req.sg_cnt); 443 if (!cmd->iov) 444 goto err; 445 } 446 447 return 0; 448 err: 449 nvmet_tcp_free_cmd_buffers(cmd); 450 return NVME_SC_INTERNAL; 451 } 452 453 static void nvmet_tcp_calc_ddgst(struct nvmet_tcp_cmd *cmd) 454 { 455 size_t total_len = cmd->req.transfer_len; 456 struct scatterlist *sg = cmd->req.sg; 457 u32 crc = ~0; 458 459 while (total_len) { 460 size_t len = min_t(size_t, total_len, sg->length); 461 462 /* 463 * Note that the scatterlist does not contain any highmem pages, 464 * as it was allocated by sgl_alloc() with GFP_KERNEL. 465 */ 466 crc = crc32c(crc, sg_virt(sg), len); 467 total_len -= len; 468 sg = sg_next(sg); 469 } 470 cmd->exp_ddgst = cpu_to_le32(~crc); 471 } 472 473 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) 474 { 475 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; 476 struct nvmet_tcp_queue *queue = cmd->queue; 477 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 478 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); 479 480 cmd->offset = 0; 481 cmd->state = NVMET_TCP_SEND_DATA_PDU; 482 483 pdu->hdr.type = nvme_tcp_c2h_data; 484 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ? 485 NVME_TCP_F_DATA_SUCCESS : 0); 486 pdu->hdr.hlen = sizeof(*pdu); 487 pdu->hdr.pdo = pdu->hdr.hlen + hdgst; 488 pdu->hdr.plen = 489 cpu_to_le32(pdu->hdr.hlen + hdgst + 490 cmd->req.transfer_len + ddgst); 491 pdu->command_id = cmd->req.cqe->command_id; 492 pdu->data_length = cpu_to_le32(cmd->req.transfer_len); 493 pdu->data_offset = cpu_to_le32(cmd->wbytes_done); 494 495 if (queue->data_digest) { 496 pdu->hdr.flags |= NVME_TCP_F_DDGST; 497 nvmet_tcp_calc_ddgst(cmd); 498 } 499 500 if (cmd->queue->hdr_digest) { 501 pdu->hdr.flags |= NVME_TCP_F_HDGST; 502 nvmet_tcp_hdgst(pdu, sizeof(*pdu)); 503 } 504 } 505 506 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) 507 { 508 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; 509 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 510 511 cmd->offset = 0; 512 cmd->state = NVMET_TCP_SEND_R2T; 513 514 pdu->hdr.type = nvme_tcp_r2t; 515 pdu->hdr.flags = 0; 516 pdu->hdr.hlen = sizeof(*pdu); 517 pdu->hdr.pdo = 0; 518 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 519 520 pdu->command_id = cmd->req.cmd->common.command_id; 521 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); 522 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); 523 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); 524 if (cmd->queue->hdr_digest) { 525 pdu->hdr.flags |= NVME_TCP_F_HDGST; 526 nvmet_tcp_hdgst(pdu, sizeof(*pdu)); 527 } 528 } 529 530 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) 531 { 532 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; 533 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 534 535 cmd->offset = 0; 536 cmd->state = NVMET_TCP_SEND_RESPONSE; 537 538 pdu->hdr.type = nvme_tcp_rsp; 539 pdu->hdr.flags = 0; 540 pdu->hdr.hlen = sizeof(*pdu); 541 pdu->hdr.pdo = 0; 542 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 543 if (cmd->queue->hdr_digest) { 544 pdu->hdr.flags |= NVME_TCP_F_HDGST; 545 nvmet_tcp_hdgst(pdu, sizeof(*pdu)); 546 } 547 } 548 549 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) 550 { 551 struct llist_node *node; 552 struct nvmet_tcp_cmd *cmd; 553 554 for (node = llist_del_all(&queue->resp_list); node; node = node->next) { 555 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry); 556 list_add(&cmd->entry, &queue->resp_send_list); 557 queue->send_list_len++; 558 } 559 } 560 561 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) 562 { 563 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, 564 struct nvmet_tcp_cmd, entry); 565 if (!queue->snd_cmd) { 566 nvmet_tcp_process_resp_list(queue); 567 queue->snd_cmd = 568 list_first_entry_or_null(&queue->resp_send_list, 569 struct nvmet_tcp_cmd, entry); 570 if (unlikely(!queue->snd_cmd)) 571 return NULL; 572 } 573 574 list_del_init(&queue->snd_cmd->entry); 575 queue->send_list_len--; 576 577 if (nvmet_tcp_need_data_out(queue->snd_cmd)) 578 nvmet_setup_c2h_data_pdu(queue->snd_cmd); 579 else if (nvmet_tcp_need_data_in(queue->snd_cmd)) 580 nvmet_setup_r2t_pdu(queue->snd_cmd); 581 else 582 nvmet_setup_response_pdu(queue->snd_cmd); 583 584 return queue->snd_cmd; 585 } 586 587 static void nvmet_tcp_queue_response(struct nvmet_req *req) 588 { 589 struct nvmet_tcp_cmd *cmd = 590 container_of(req, struct nvmet_tcp_cmd, req); 591 struct nvmet_tcp_queue *queue = cmd->queue; 592 enum nvmet_tcp_recv_state queue_state; 593 struct nvmet_tcp_cmd *queue_cmd; 594 struct nvme_sgl_desc *sgl; 595 u32 len; 596 597 /* Pairs with store_release in nvmet_prepare_receive_pdu() */ 598 queue_state = smp_load_acquire(&queue->rcv_state); 599 queue_cmd = READ_ONCE(queue->cmd); 600 601 if (unlikely(cmd == queue_cmd)) { 602 sgl = &cmd->req.cmd->common.dptr.sgl; 603 len = le32_to_cpu(sgl->length); 604 605 /* 606 * Wait for inline data before processing the response. 607 * Avoid using helpers, this might happen before 608 * nvmet_req_init is completed. 609 */ 610 if (queue_state == NVMET_TCP_RECV_PDU && 611 len && len <= cmd->req.port->inline_data_size && 612 nvme_is_write(cmd->req.cmd)) 613 return; 614 } 615 616 llist_add(&cmd->lentry, &queue->resp_list); 617 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); 618 } 619 620 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd) 621 { 622 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED)) 623 nvmet_tcp_queue_response(&cmd->req); 624 else 625 cmd->req.execute(&cmd->req); 626 } 627 628 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) 629 { 630 struct msghdr msg = { 631 .msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES, 632 }; 633 struct bio_vec bvec; 634 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 635 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst; 636 int ret; 637 638 bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left); 639 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); 640 ret = sock_sendmsg(cmd->queue->sock, &msg); 641 if (ret <= 0) 642 return ret; 643 644 cmd->offset += ret; 645 left -= ret; 646 647 if (left) 648 return -EAGAIN; 649 650 cmd->state = NVMET_TCP_SEND_DATA; 651 cmd->offset = 0; 652 return 1; 653 } 654 655 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 656 { 657 struct nvmet_tcp_queue *queue = cmd->queue; 658 int ret; 659 660 while (cmd->cur_sg) { 661 struct msghdr msg = { 662 .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, 663 }; 664 struct page *page = sg_page(cmd->cur_sg); 665 struct bio_vec bvec; 666 u32 left = cmd->cur_sg->length - cmd->offset; 667 668 if ((!last_in_batch && cmd->queue->send_list_len) || 669 cmd->wbytes_done + left < cmd->req.transfer_len || 670 queue->data_digest || !queue->nvme_sq.sqhd_disabled) 671 msg.msg_flags |= MSG_MORE; 672 673 bvec_set_page(&bvec, page, left, cmd->offset); 674 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); 675 ret = sock_sendmsg(cmd->queue->sock, &msg); 676 if (ret <= 0) 677 return ret; 678 679 cmd->offset += ret; 680 cmd->wbytes_done += ret; 681 682 /* Done with sg?*/ 683 if (cmd->offset == cmd->cur_sg->length) { 684 cmd->cur_sg = sg_next(cmd->cur_sg); 685 cmd->offset = 0; 686 } 687 } 688 689 if (queue->data_digest) { 690 cmd->state = NVMET_TCP_SEND_DDGST; 691 cmd->offset = 0; 692 } else { 693 if (queue->nvme_sq.sqhd_disabled) { 694 cmd->queue->snd_cmd = NULL; 695 nvmet_tcp_put_cmd(cmd); 696 } else { 697 nvmet_setup_response_pdu(cmd); 698 } 699 } 700 701 if (queue->nvme_sq.sqhd_disabled) 702 nvmet_tcp_free_cmd_buffers(cmd); 703 704 return 1; 705 706 } 707 708 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, 709 bool last_in_batch) 710 { 711 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, }; 712 struct bio_vec bvec; 713 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 714 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst; 715 int ret; 716 717 if (!last_in_batch && cmd->queue->send_list_len) 718 msg.msg_flags |= MSG_MORE; 719 else 720 msg.msg_flags |= MSG_EOR; 721 722 bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left); 723 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); 724 ret = sock_sendmsg(cmd->queue->sock, &msg); 725 if (ret <= 0) 726 return ret; 727 cmd->offset += ret; 728 left -= ret; 729 730 if (left) 731 return -EAGAIN; 732 733 nvmet_tcp_free_cmd_buffers(cmd); 734 cmd->queue->snd_cmd = NULL; 735 nvmet_tcp_put_cmd(cmd); 736 return 1; 737 } 738 739 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 740 { 741 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, }; 742 struct bio_vec bvec; 743 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 744 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst; 745 int ret; 746 747 if (!last_in_batch && cmd->queue->send_list_len) 748 msg.msg_flags |= MSG_MORE; 749 else 750 msg.msg_flags |= MSG_EOR; 751 752 bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left); 753 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); 754 ret = sock_sendmsg(cmd->queue->sock, &msg); 755 if (ret <= 0) 756 return ret; 757 cmd->offset += ret; 758 left -= ret; 759 760 if (left) 761 return -EAGAIN; 762 763 cmd->queue->snd_cmd = NULL; 764 return 1; 765 } 766 767 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 768 { 769 struct nvmet_tcp_queue *queue = cmd->queue; 770 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset; 771 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 772 struct kvec iov = { 773 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, 774 .iov_len = left 775 }; 776 int ret; 777 778 if (!last_in_batch && cmd->queue->send_list_len) 779 msg.msg_flags |= MSG_MORE; 780 else 781 msg.msg_flags |= MSG_EOR; 782 783 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 784 if (unlikely(ret <= 0)) 785 return ret; 786 787 cmd->offset += ret; 788 left -= ret; 789 790 if (left) 791 return -EAGAIN; 792 793 if (queue->nvme_sq.sqhd_disabled) { 794 cmd->queue->snd_cmd = NULL; 795 nvmet_tcp_put_cmd(cmd); 796 } else { 797 nvmet_setup_response_pdu(cmd); 798 } 799 return 1; 800 } 801 802 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, 803 bool last_in_batch) 804 { 805 struct nvmet_tcp_cmd *cmd = queue->snd_cmd; 806 int ret = 0; 807 808 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { 809 cmd = nvmet_tcp_fetch_cmd(queue); 810 if (unlikely(!cmd)) 811 return 0; 812 } 813 814 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) { 815 ret = nvmet_try_send_data_pdu(cmd); 816 if (ret <= 0) 817 goto done_send; 818 } 819 820 if (cmd->state == NVMET_TCP_SEND_DATA) { 821 ret = nvmet_try_send_data(cmd, last_in_batch); 822 if (ret <= 0) 823 goto done_send; 824 } 825 826 if (cmd->state == NVMET_TCP_SEND_DDGST) { 827 ret = nvmet_try_send_ddgst(cmd, last_in_batch); 828 if (ret <= 0) 829 goto done_send; 830 } 831 832 if (cmd->state == NVMET_TCP_SEND_R2T) { 833 ret = nvmet_try_send_r2t(cmd, last_in_batch); 834 if (ret <= 0) 835 goto done_send; 836 } 837 838 if (cmd->state == NVMET_TCP_SEND_RESPONSE) 839 ret = nvmet_try_send_response(cmd, last_in_batch); 840 841 done_send: 842 if (ret < 0) { 843 if (ret == -EAGAIN) 844 return 0; 845 return ret; 846 } 847 848 return 1; 849 } 850 851 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, 852 int budget, int *sends) 853 { 854 int i, ret = 0; 855 856 for (i = 0; i < budget; i++) { 857 ret = nvmet_tcp_try_send_one(queue, i == budget - 1); 858 if (unlikely(ret < 0)) { 859 nvmet_tcp_socket_error(queue, ret); 860 goto done; 861 } else if (ret == 0) { 862 break; 863 } 864 (*sends)++; 865 } 866 done: 867 return ret; 868 } 869 870 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) 871 { 872 queue->offset = 0; 873 queue->left = sizeof(struct nvme_tcp_hdr); 874 WRITE_ONCE(queue->cmd, NULL); 875 /* Ensure rcv_state is visible only after queue->cmd is set */ 876 smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU); 877 } 878 879 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) 880 { 881 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; 882 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp; 883 struct msghdr msg = {}; 884 struct kvec iov; 885 int ret; 886 887 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) { 888 pr_err("bad nvme-tcp pdu length (%d)\n", 889 le32_to_cpu(icreq->hdr.plen)); 890 nvmet_tcp_fatal_error(queue); 891 return -EPROTO; 892 } 893 894 if (icreq->pfv != NVME_TCP_PFV_1_0) { 895 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv); 896 return -EPROTO; 897 } 898 899 if (icreq->hpda != 0) { 900 pr_err("queue %d: unsupported hpda %d\n", queue->idx, 901 icreq->hpda); 902 return -EPROTO; 903 } 904 905 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); 906 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); 907 908 memset(icresp, 0, sizeof(*icresp)); 909 icresp->hdr.type = nvme_tcp_icresp; 910 icresp->hdr.hlen = sizeof(*icresp); 911 icresp->hdr.pdo = 0; 912 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); 913 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); 914 icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA); 915 icresp->cpda = 0; 916 if (queue->hdr_digest) 917 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; 918 if (queue->data_digest) 919 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE; 920 921 iov.iov_base = icresp; 922 iov.iov_len = sizeof(*icresp); 923 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 924 if (ret < 0) { 925 queue->state = NVMET_TCP_Q_FAILED; 926 return ret; /* queue removal will cleanup */ 927 } 928 929 queue->state = NVMET_TCP_Q_LIVE; 930 nvmet_prepare_receive_pdu(queue); 931 return 0; 932 } 933 934 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, 935 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) 936 { 937 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); 938 int ret; 939 940 /* 941 * This command has not been processed yet, hence we are trying to 942 * figure out if there is still pending data left to receive. If 943 * we don't, we can simply prepare for the next pdu and bail out, 944 * otherwise we will need to prepare a buffer and receive the 945 * stale data before continuing forward. 946 */ 947 if (!nvme_is_write(cmd->req.cmd) || !data_len || 948 data_len > cmd->req.port->inline_data_size) { 949 nvmet_prepare_receive_pdu(queue); 950 return; 951 } 952 953 ret = nvmet_tcp_map_data(cmd); 954 if (unlikely(ret)) { 955 pr_err("queue %d: failed to map data\n", queue->idx); 956 nvmet_tcp_fatal_error(queue); 957 return; 958 } 959 960 queue->rcv_state = NVMET_TCP_RECV_DATA; 961 nvmet_tcp_build_pdu_iovec(cmd); 962 cmd->flags |= NVMET_TCP_F_INIT_FAILED; 963 } 964 965 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) 966 { 967 struct nvme_tcp_data_pdu *data = &queue->pdu.data; 968 struct nvmet_tcp_cmd *cmd; 969 unsigned int exp_data_len; 970 971 if (likely(queue->nr_cmds)) { 972 if (unlikely(data->ttag >= queue->nr_cmds)) { 973 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n", 974 queue->idx, data->ttag, queue->nr_cmds); 975 goto err_proto; 976 } 977 cmd = &queue->cmds[data->ttag]; 978 } else { 979 cmd = &queue->connect; 980 } 981 982 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { 983 pr_err("ttag %u unexpected data offset %u (expected %u)\n", 984 data->ttag, le32_to_cpu(data->data_offset), 985 cmd->rbytes_done); 986 goto err_proto; 987 } 988 989 exp_data_len = le32_to_cpu(data->hdr.plen) - 990 nvmet_tcp_hdgst_len(queue) - 991 nvmet_tcp_ddgst_len(queue) - 992 sizeof(*data); 993 994 cmd->pdu_len = le32_to_cpu(data->data_length); 995 if (unlikely(cmd->pdu_len != exp_data_len || 996 cmd->pdu_len == 0 || 997 cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) { 998 pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len); 999 goto err_proto; 1000 } 1001 /* 1002 * Ensure command data structures are initialized. We must check both 1003 * cmd->req.sg and cmd->iov because they can have different NULL states: 1004 * - Uninitialized commands: both NULL 1005 * - READ commands: cmd->req.sg allocated, cmd->iov NULL 1006 * - WRITE commands: both allocated 1007 */ 1008 if (unlikely(!cmd->req.sg || !cmd->iov)) { 1009 pr_err("queue %d: H2CData PDU received for invalid command state (ttag %u)\n", 1010 queue->idx, data->ttag); 1011 goto err_proto; 1012 } 1013 cmd->pdu_recv = 0; 1014 nvmet_tcp_build_pdu_iovec(cmd); 1015 queue->cmd = cmd; 1016 queue->rcv_state = NVMET_TCP_RECV_DATA; 1017 1018 return 0; 1019 1020 err_proto: 1021 /* FIXME: use proper transport errors */ 1022 nvmet_tcp_fatal_error(queue); 1023 return -EPROTO; 1024 } 1025 1026 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) 1027 { 1028 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 1029 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; 1030 struct nvmet_req *req; 1031 int ret; 1032 1033 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { 1034 if (hdr->type != nvme_tcp_icreq) { 1035 pr_err("unexpected pdu type (%d) before icreq\n", 1036 hdr->type); 1037 nvmet_tcp_fatal_error(queue); 1038 return -EPROTO; 1039 } 1040 return nvmet_tcp_handle_icreq(queue); 1041 } 1042 1043 if (unlikely(hdr->type == nvme_tcp_icreq)) { 1044 pr_err("queue %d: received icreq pdu in state %d\n", 1045 queue->idx, queue->state); 1046 nvmet_tcp_fatal_error(queue); 1047 return -EPROTO; 1048 } 1049 1050 if (hdr->type == nvme_tcp_h2c_data) { 1051 ret = nvmet_tcp_handle_h2c_data_pdu(queue); 1052 if (unlikely(ret)) 1053 return ret; 1054 return 0; 1055 } 1056 1057 queue->cmd = nvmet_tcp_get_cmd(queue); 1058 if (unlikely(!queue->cmd)) { 1059 /* This should never happen */ 1060 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d", 1061 queue->idx, queue->nr_cmds, queue->send_list_len, 1062 nvme_cmd->common.opcode); 1063 nvmet_tcp_fatal_error(queue); 1064 return -ENOMEM; 1065 } 1066 1067 req = &queue->cmd->req; 1068 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); 1069 1070 if (unlikely(!nvmet_req_init(req, &queue->nvme_sq, &nvmet_tcp_ops))) { 1071 pr_err("failed cmd %p id %d opcode %d, data_len: %d, status: %04x\n", 1072 req->cmd, req->cmd->common.command_id, 1073 req->cmd->common.opcode, 1074 le32_to_cpu(req->cmd->common.dptr.sgl.length), 1075 le16_to_cpu(req->cqe->status)); 1076 1077 nvmet_tcp_handle_req_failure(queue, queue->cmd, req); 1078 return 0; 1079 } 1080 1081 ret = nvmet_tcp_map_data(queue->cmd); 1082 if (unlikely(ret)) { 1083 pr_err("queue %d: failed to map data\n", queue->idx); 1084 if (nvmet_tcp_has_inline_data(queue->cmd)) 1085 nvmet_tcp_fatal_error(queue); 1086 else 1087 nvmet_req_complete(req, ret); 1088 ret = -EAGAIN; 1089 goto out; 1090 } 1091 1092 if (nvmet_tcp_need_data_in(queue->cmd)) { 1093 if (nvmet_tcp_has_inline_data(queue->cmd)) { 1094 queue->rcv_state = NVMET_TCP_RECV_DATA; 1095 nvmet_tcp_build_pdu_iovec(queue->cmd); 1096 return 0; 1097 } 1098 /* send back R2T */ 1099 nvmet_tcp_queue_response(&queue->cmd->req); 1100 goto out; 1101 } 1102 1103 queue->cmd->req.execute(&queue->cmd->req); 1104 out: 1105 nvmet_prepare_receive_pdu(queue); 1106 return ret; 1107 } 1108 1109 static const u8 nvme_tcp_pdu_sizes[] = { 1110 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu), 1111 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu), 1112 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu), 1113 }; 1114 1115 static inline u8 nvmet_tcp_pdu_size(u8 type) 1116 { 1117 size_t idx = type; 1118 1119 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) && 1120 nvme_tcp_pdu_sizes[idx]) ? 1121 nvme_tcp_pdu_sizes[idx] : 0; 1122 } 1123 1124 static inline bool nvmet_tcp_pdu_valid(u8 type) 1125 { 1126 switch (type) { 1127 case nvme_tcp_icreq: 1128 case nvme_tcp_cmd: 1129 case nvme_tcp_h2c_data: 1130 /* fallthru */ 1131 return true; 1132 } 1133 1134 return false; 1135 } 1136 1137 static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue, 1138 struct msghdr *msg, char *cbuf) 1139 { 1140 struct cmsghdr *cmsg = (struct cmsghdr *)cbuf; 1141 u8 ctype, level, description; 1142 int ret = 0; 1143 1144 ctype = tls_get_record_type(queue->sock->sk, cmsg); 1145 switch (ctype) { 1146 case 0: 1147 break; 1148 case TLS_RECORD_TYPE_DATA: 1149 break; 1150 case TLS_RECORD_TYPE_ALERT: 1151 tls_alert_recv(queue->sock->sk, msg, &level, &description); 1152 if (level == TLS_ALERT_LEVEL_FATAL) { 1153 pr_err("queue %d: TLS Alert desc %u\n", 1154 queue->idx, description); 1155 ret = -ENOTCONN; 1156 } else { 1157 pr_warn("queue %d: TLS Alert desc %u\n", 1158 queue->idx, description); 1159 ret = -EAGAIN; 1160 } 1161 break; 1162 default: 1163 /* discard this record type */ 1164 pr_err("queue %d: TLS record %d unhandled\n", 1165 queue->idx, ctype); 1166 ret = -EAGAIN; 1167 break; 1168 } 1169 return ret; 1170 } 1171 1172 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) 1173 { 1174 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 1175 int len, ret; 1176 struct kvec iov; 1177 char cbuf[CMSG_LEN(sizeof(char))] = {}; 1178 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 1179 1180 recv: 1181 iov.iov_base = (void *)&queue->pdu + queue->offset; 1182 iov.iov_len = queue->left; 1183 if (queue->tls_pskid) { 1184 msg.msg_control = cbuf; 1185 msg.msg_controllen = sizeof(cbuf); 1186 } 1187 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1188 iov.iov_len, msg.msg_flags); 1189 if (unlikely(len < 0)) 1190 return len; 1191 if (queue->tls_pskid) { 1192 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf); 1193 if (ret < 0) 1194 return ret; 1195 } 1196 1197 queue->offset += len; 1198 queue->left -= len; 1199 if (queue->left) 1200 return -EAGAIN; 1201 1202 if (queue->offset == sizeof(struct nvme_tcp_hdr)) { 1203 u8 hdgst = nvmet_tcp_hdgst_len(queue); 1204 1205 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) { 1206 pr_err("unexpected pdu type %d\n", hdr->type); 1207 nvmet_tcp_fatal_error(queue); 1208 return -EIO; 1209 } 1210 1211 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) { 1212 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen); 1213 return -EIO; 1214 } 1215 1216 queue->left = hdr->hlen - queue->offset + hdgst; 1217 goto recv; 1218 } 1219 1220 if (queue->hdr_digest && 1221 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) { 1222 nvmet_tcp_fatal_error(queue); /* fatal */ 1223 return -EPROTO; 1224 } 1225 1226 if (queue->data_digest && 1227 nvmet_tcp_check_ddgst(queue, &queue->pdu)) { 1228 nvmet_tcp_fatal_error(queue); /* fatal */ 1229 return -EPROTO; 1230 } 1231 1232 return nvmet_tcp_done_recv_pdu(queue); 1233 } 1234 1235 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) 1236 { 1237 struct nvmet_tcp_queue *queue = cmd->queue; 1238 1239 nvmet_tcp_calc_ddgst(cmd); 1240 queue->offset = 0; 1241 queue->left = NVME_TCP_DIGEST_LENGTH; 1242 queue->rcv_state = NVMET_TCP_RECV_DDGST; 1243 } 1244 1245 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) 1246 { 1247 struct nvmet_tcp_cmd *cmd = queue->cmd; 1248 int len, ret; 1249 1250 while (msg_data_left(&cmd->recv_msg)) { 1251 len = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, 1252 cmd->recv_msg.msg_flags); 1253 if (len <= 0) 1254 return len; 1255 if (queue->tls_pskid) { 1256 ret = nvmet_tcp_tls_record_ok(cmd->queue, 1257 &cmd->recv_msg, cmd->recv_cbuf); 1258 if (ret < 0) 1259 return ret; 1260 } 1261 1262 cmd->pdu_recv += len; 1263 cmd->rbytes_done += len; 1264 } 1265 1266 if (queue->data_digest) { 1267 nvmet_tcp_prep_recv_ddgst(cmd); 1268 return 0; 1269 } 1270 1271 if (cmd->rbytes_done == cmd->req.transfer_len) 1272 nvmet_tcp_execute_request(cmd); 1273 1274 nvmet_prepare_receive_pdu(queue); 1275 return 0; 1276 } 1277 1278 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) 1279 { 1280 struct nvmet_tcp_cmd *cmd = queue->cmd; 1281 int ret, len; 1282 char cbuf[CMSG_LEN(sizeof(char))] = {}; 1283 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 1284 struct kvec iov = { 1285 .iov_base = (void *)&cmd->recv_ddgst + queue->offset, 1286 .iov_len = queue->left 1287 }; 1288 1289 if (queue->tls_pskid) { 1290 msg.msg_control = cbuf; 1291 msg.msg_controllen = sizeof(cbuf); 1292 } 1293 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1294 iov.iov_len, msg.msg_flags); 1295 if (unlikely(len < 0)) 1296 return len; 1297 if (queue->tls_pskid) { 1298 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf); 1299 if (ret < 0) 1300 return ret; 1301 } 1302 1303 queue->offset += len; 1304 queue->left -= len; 1305 if (queue->left) 1306 return -EAGAIN; 1307 1308 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { 1309 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", 1310 queue->idx, cmd->req.cmd->common.command_id, 1311 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), 1312 le32_to_cpu(cmd->exp_ddgst)); 1313 nvmet_req_uninit(&cmd->req); 1314 nvmet_tcp_free_cmd_buffers(cmd); 1315 nvmet_tcp_fatal_error(queue); 1316 ret = -EPROTO; 1317 goto out; 1318 } 1319 1320 if (cmd->rbytes_done == cmd->req.transfer_len) 1321 nvmet_tcp_execute_request(cmd); 1322 1323 ret = 0; 1324 out: 1325 nvmet_prepare_receive_pdu(queue); 1326 return ret; 1327 } 1328 1329 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) 1330 { 1331 int result = 0; 1332 1333 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) 1334 return 0; 1335 1336 if (queue->rcv_state == NVMET_TCP_RECV_PDU) { 1337 result = nvmet_tcp_try_recv_pdu(queue); 1338 if (result != 0) 1339 goto done_recv; 1340 } 1341 1342 if (queue->rcv_state == NVMET_TCP_RECV_DATA) { 1343 result = nvmet_tcp_try_recv_data(queue); 1344 if (result != 0) 1345 goto done_recv; 1346 } 1347 1348 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) { 1349 result = nvmet_tcp_try_recv_ddgst(queue); 1350 if (result != 0) 1351 goto done_recv; 1352 } 1353 1354 done_recv: 1355 if (result < 0) { 1356 if (result == -EAGAIN) 1357 return 0; 1358 return result; 1359 } 1360 return 1; 1361 } 1362 1363 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, 1364 int budget, int *recvs) 1365 { 1366 int i, ret = 0; 1367 1368 for (i = 0; i < budget; i++) { 1369 ret = nvmet_tcp_try_recv_one(queue); 1370 if (unlikely(ret < 0)) { 1371 nvmet_tcp_socket_error(queue, ret); 1372 goto done; 1373 } else if (ret == 0) { 1374 break; 1375 } 1376 (*recvs)++; 1377 } 1378 done: 1379 return ret; 1380 } 1381 1382 static void nvmet_tcp_release_queue(struct kref *kref) 1383 { 1384 struct nvmet_tcp_queue *queue = 1385 container_of(kref, struct nvmet_tcp_queue, kref); 1386 1387 WARN_ON(queue->state != NVMET_TCP_Q_DISCONNECTING); 1388 queue_work(nvmet_wq, &queue->release_work); 1389 } 1390 1391 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) 1392 { 1393 spin_lock_bh(&queue->state_lock); 1394 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { 1395 /* Socket closed during handshake */ 1396 tls_handshake_cancel(queue->sock->sk); 1397 } 1398 if (queue->state != NVMET_TCP_Q_DISCONNECTING) { 1399 queue->state = NVMET_TCP_Q_DISCONNECTING; 1400 kref_put(&queue->kref, nvmet_tcp_release_queue); 1401 } 1402 spin_unlock_bh(&queue->state_lock); 1403 } 1404 1405 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue) 1406 { 1407 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs); 1408 } 1409 1410 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue, 1411 int ops) 1412 { 1413 if (!idle_poll_period_usecs) 1414 return false; 1415 1416 if (ops) 1417 nvmet_tcp_arm_queue_deadline(queue); 1418 1419 return !time_after(jiffies, queue->poll_end); 1420 } 1421 1422 static void nvmet_tcp_io_work(struct work_struct *w) 1423 { 1424 struct nvmet_tcp_queue *queue = 1425 container_of(w, struct nvmet_tcp_queue, io_work); 1426 bool pending; 1427 int ret, ops = 0; 1428 1429 do { 1430 pending = false; 1431 1432 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops); 1433 if (ret > 0) 1434 pending = true; 1435 else if (ret < 0) 1436 return; 1437 1438 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops); 1439 if (ret > 0) 1440 pending = true; 1441 else if (ret < 0) 1442 return; 1443 1444 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET); 1445 1446 /* 1447 * Requeue the worker if idle deadline period is in progress or any 1448 * ops activity was recorded during the do-while loop above. 1449 */ 1450 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending) 1451 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1452 } 1453 1454 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, 1455 struct nvmet_tcp_cmd *c) 1456 { 1457 u8 hdgst = nvmet_tcp_hdgst_len(queue); 1458 1459 c->queue = queue; 1460 c->req.port = queue->port->nport; 1461 1462 c->cmd_pdu = page_frag_alloc(&queue->pf_cache, 1463 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1464 if (!c->cmd_pdu) 1465 return -ENOMEM; 1466 c->req.cmd = &c->cmd_pdu->cmd; 1467 1468 c->rsp_pdu = page_frag_alloc(&queue->pf_cache, 1469 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1470 if (!c->rsp_pdu) 1471 goto out_free_cmd; 1472 c->req.cqe = &c->rsp_pdu->cqe; 1473 1474 c->data_pdu = page_frag_alloc(&queue->pf_cache, 1475 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1476 if (!c->data_pdu) 1477 goto out_free_rsp; 1478 1479 c->r2t_pdu = page_frag_alloc(&queue->pf_cache, 1480 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1481 if (!c->r2t_pdu) 1482 goto out_free_data; 1483 1484 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { 1485 c->recv_msg.msg_control = c->recv_cbuf; 1486 c->recv_msg.msg_controllen = sizeof(c->recv_cbuf); 1487 } 1488 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1489 1490 list_add_tail(&c->entry, &queue->free_list); 1491 1492 return 0; 1493 out_free_data: 1494 page_frag_free(c->data_pdu); 1495 out_free_rsp: 1496 page_frag_free(c->rsp_pdu); 1497 out_free_cmd: 1498 page_frag_free(c->cmd_pdu); 1499 return -ENOMEM; 1500 } 1501 1502 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c) 1503 { 1504 page_frag_free(c->r2t_pdu); 1505 page_frag_free(c->data_pdu); 1506 page_frag_free(c->rsp_pdu); 1507 page_frag_free(c->cmd_pdu); 1508 } 1509 1510 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) 1511 { 1512 struct nvmet_tcp_cmd *cmds; 1513 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; 1514 1515 cmds = kvzalloc_objs(struct nvmet_tcp_cmd, nr_cmds); 1516 if (!cmds) 1517 goto out; 1518 1519 for (i = 0; i < nr_cmds; i++) { 1520 ret = nvmet_tcp_alloc_cmd(queue, cmds + i); 1521 if (ret) 1522 goto out_free; 1523 } 1524 1525 queue->cmds = cmds; 1526 1527 return 0; 1528 out_free: 1529 while (--i >= 0) 1530 nvmet_tcp_free_cmd(cmds + i); 1531 kvfree(cmds); 1532 out: 1533 return ret; 1534 } 1535 1536 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) 1537 { 1538 struct nvmet_tcp_cmd *cmds = queue->cmds; 1539 int i; 1540 1541 for (i = 0; i < queue->nr_cmds; i++) 1542 nvmet_tcp_free_cmd(cmds + i); 1543 1544 nvmet_tcp_free_cmd(&queue->connect); 1545 kvfree(cmds); 1546 } 1547 1548 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) 1549 { 1550 struct socket *sock = queue->sock; 1551 1552 if (!queue->state_change) 1553 return; 1554 1555 write_lock_bh(&sock->sk->sk_callback_lock); 1556 sock->sk->sk_data_ready = queue->data_ready; 1557 sock->sk->sk_state_change = queue->state_change; 1558 sock->sk->sk_write_space = queue->write_space; 1559 sock->sk->sk_user_data = NULL; 1560 write_unlock_bh(&sock->sk->sk_callback_lock); 1561 } 1562 1563 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) 1564 { 1565 struct nvmet_tcp_cmd *cmd = queue->cmds; 1566 int i; 1567 1568 for (i = 0; i < queue->nr_cmds; i++, cmd++) { 1569 if (nvmet_tcp_need_data_in(cmd)) 1570 nvmet_req_uninit(&cmd->req); 1571 } 1572 1573 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { 1574 /* failed in connect */ 1575 nvmet_req_uninit(&queue->connect.req); 1576 } 1577 } 1578 1579 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue) 1580 { 1581 struct nvmet_tcp_cmd *cmd = queue->cmds; 1582 int i; 1583 1584 for (i = 0; i < queue->nr_cmds; i++, cmd++) 1585 nvmet_tcp_free_cmd_buffers(cmd); 1586 nvmet_tcp_free_cmd_buffers(&queue->connect); 1587 } 1588 1589 static void nvmet_tcp_release_queue_work(struct work_struct *w) 1590 { 1591 struct nvmet_tcp_queue *queue = 1592 container_of(w, struct nvmet_tcp_queue, release_work); 1593 1594 mutex_lock(&nvmet_tcp_queue_mutex); 1595 list_del_init(&queue->queue_list); 1596 mutex_unlock(&nvmet_tcp_queue_mutex); 1597 1598 nvmet_tcp_restore_socket_callbacks(queue); 1599 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work); 1600 cancel_work_sync(&queue->io_work); 1601 /* stop accepting incoming data */ 1602 queue->rcv_state = NVMET_TCP_RECV_ERR; 1603 1604 nvmet_sq_put_tls_key(&queue->nvme_sq); 1605 nvmet_tcp_uninit_data_in_cmds(queue); 1606 nvmet_sq_destroy(&queue->nvme_sq); 1607 nvmet_cq_put(&queue->nvme_cq); 1608 cancel_work_sync(&queue->io_work); 1609 nvmet_tcp_free_cmd_data_in_buffers(queue); 1610 /* ->sock will be released by fput() */ 1611 fput(queue->sock->file); 1612 nvmet_tcp_free_cmds(queue); 1613 ida_free(&nvmet_tcp_queue_ida, queue->idx); 1614 page_frag_cache_drain(&queue->pf_cache); 1615 kfree(queue); 1616 } 1617 1618 static void nvmet_tcp_data_ready(struct sock *sk) 1619 { 1620 struct nvmet_tcp_queue *queue; 1621 1622 trace_sk_data_ready(sk); 1623 1624 read_lock_bh(&sk->sk_callback_lock); 1625 queue = sk->sk_user_data; 1626 if (likely(queue)) { 1627 if (queue->data_ready) 1628 queue->data_ready(sk); 1629 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) 1630 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, 1631 &queue->io_work); 1632 } 1633 read_unlock_bh(&sk->sk_callback_lock); 1634 } 1635 1636 static void nvmet_tcp_write_space(struct sock *sk) 1637 { 1638 struct nvmet_tcp_queue *queue; 1639 1640 read_lock_bh(&sk->sk_callback_lock); 1641 queue = sk->sk_user_data; 1642 if (unlikely(!queue)) 1643 goto out; 1644 1645 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { 1646 queue->write_space(sk); 1647 goto out; 1648 } 1649 1650 if (sk_stream_is_writeable(sk)) { 1651 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1652 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1653 } 1654 out: 1655 read_unlock_bh(&sk->sk_callback_lock); 1656 } 1657 1658 static void nvmet_tcp_state_change(struct sock *sk) 1659 { 1660 struct nvmet_tcp_queue *queue; 1661 1662 read_lock_bh(&sk->sk_callback_lock); 1663 queue = sk->sk_user_data; 1664 if (!queue) 1665 goto done; 1666 1667 switch (sk->sk_state) { 1668 case TCP_FIN_WAIT2: 1669 case TCP_LAST_ACK: 1670 break; 1671 case TCP_FIN_WAIT1: 1672 case TCP_CLOSE_WAIT: 1673 case TCP_CLOSE: 1674 /* FALLTHRU */ 1675 nvmet_tcp_schedule_release_queue(queue); 1676 break; 1677 default: 1678 pr_warn("queue %d unhandled state %d\n", 1679 queue->idx, sk->sk_state); 1680 } 1681 done: 1682 read_unlock_bh(&sk->sk_callback_lock); 1683 } 1684 1685 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) 1686 { 1687 struct socket *sock = queue->sock; 1688 struct inet_sock *inet = inet_sk(sock->sk); 1689 int ret; 1690 1691 ret = kernel_getsockname(sock, 1692 (struct sockaddr *)&queue->sockaddr); 1693 if (ret < 0) 1694 return ret; 1695 1696 ret = kernel_getpeername(sock, 1697 (struct sockaddr *)&queue->sockaddr_peer); 1698 if (ret < 0) 1699 return ret; 1700 1701 /* 1702 * Cleanup whatever is sitting in the TCP transmit queue on socket 1703 * close. This is done to prevent stale data from being sent should 1704 * the network connection be restored before TCP times out. 1705 */ 1706 sock_no_linger(sock->sk); 1707 1708 if (so_priority > 0) 1709 sock_set_priority(sock->sk, so_priority); 1710 1711 /* Set socket type of service */ 1712 if (inet->rcv_tos > 0) 1713 ip_sock_set_tos(sock->sk, inet->rcv_tos); 1714 1715 ret = 0; 1716 write_lock_bh(&sock->sk->sk_callback_lock); 1717 if (sock->sk->sk_state != TCP_ESTABLISHED) { 1718 /* 1719 * If the socket is already closing, don't even start 1720 * consuming it 1721 */ 1722 ret = -ENOTCONN; 1723 } else { 1724 sock->sk->sk_user_data = queue; 1725 queue->data_ready = sock->sk->sk_data_ready; 1726 sock->sk->sk_data_ready = nvmet_tcp_data_ready; 1727 queue->state_change = sock->sk->sk_state_change; 1728 sock->sk->sk_state_change = nvmet_tcp_state_change; 1729 queue->write_space = sock->sk->sk_write_space; 1730 sock->sk->sk_write_space = nvmet_tcp_write_space; 1731 if (idle_poll_period_usecs) 1732 nvmet_tcp_arm_queue_deadline(queue); 1733 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1734 } 1735 write_unlock_bh(&sock->sk->sk_callback_lock); 1736 1737 return ret; 1738 } 1739 1740 #ifdef CONFIG_NVME_TARGET_TCP_TLS 1741 static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue) 1742 { 1743 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 1744 int len, ret; 1745 struct kvec iov = { 1746 .iov_base = (u8 *)&queue->pdu + queue->offset, 1747 .iov_len = sizeof(struct nvme_tcp_hdr), 1748 }; 1749 char cbuf[CMSG_LEN(sizeof(char))] = {}; 1750 struct msghdr msg = { 1751 .msg_control = cbuf, 1752 .msg_controllen = sizeof(cbuf), 1753 .msg_flags = MSG_PEEK, 1754 }; 1755 1756 if (nvmet_port_secure_channel_required(queue->port->nport)) 1757 return 0; 1758 1759 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1760 iov.iov_len, msg.msg_flags); 1761 if (unlikely(len < 0)) { 1762 pr_debug("queue %d: peek error %d\n", 1763 queue->idx, len); 1764 return len; 1765 } 1766 1767 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf); 1768 if (ret < 0) 1769 return ret; 1770 1771 if (len < sizeof(struct nvme_tcp_hdr)) { 1772 pr_debug("queue %d: short read, %d bytes missing\n", 1773 queue->idx, (int)iov.iov_len - len); 1774 return -EAGAIN; 1775 } 1776 pr_debug("queue %d: hdr type %d hlen %d plen %d size %d\n", 1777 queue->idx, hdr->type, hdr->hlen, hdr->plen, 1778 (int)sizeof(struct nvme_tcp_icreq_pdu)); 1779 if (hdr->type == nvme_tcp_icreq && 1780 hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) && 1781 hdr->plen == cpu_to_le32(sizeof(struct nvme_tcp_icreq_pdu))) { 1782 pr_debug("queue %d: icreq detected\n", 1783 queue->idx); 1784 return len; 1785 } 1786 return 0; 1787 } 1788 1789 static int nvmet_tcp_tls_key_lookup(struct nvmet_tcp_queue *queue, 1790 key_serial_t peerid) 1791 { 1792 struct key *tls_key = nvme_tls_key_lookup(peerid); 1793 int status = 0; 1794 1795 if (IS_ERR(tls_key)) { 1796 pr_warn("%s: queue %d failed to lookup key %x\n", 1797 __func__, queue->idx, peerid); 1798 spin_lock_bh(&queue->state_lock); 1799 queue->state = NVMET_TCP_Q_FAILED; 1800 spin_unlock_bh(&queue->state_lock); 1801 status = PTR_ERR(tls_key); 1802 } else { 1803 pr_debug("%s: queue %d using TLS PSK %x\n", 1804 __func__, queue->idx, peerid); 1805 queue->nvme_sq.tls_key = tls_key; 1806 } 1807 return status; 1808 } 1809 1810 static void nvmet_tcp_tls_handshake_done(void *data, int status, 1811 key_serial_t peerid) 1812 { 1813 struct nvmet_tcp_queue *queue = data; 1814 1815 pr_debug("queue %d: TLS handshake done, key %x, status %d\n", 1816 queue->idx, peerid, status); 1817 spin_lock_bh(&queue->state_lock); 1818 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) { 1819 spin_unlock_bh(&queue->state_lock); 1820 return; 1821 } 1822 if (!status) { 1823 queue->tls_pskid = peerid; 1824 queue->state = NVMET_TCP_Q_CONNECTING; 1825 } else 1826 queue->state = NVMET_TCP_Q_FAILED; 1827 spin_unlock_bh(&queue->state_lock); 1828 1829 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work); 1830 1831 if (!status) 1832 status = nvmet_tcp_tls_key_lookup(queue, peerid); 1833 1834 if (status) 1835 nvmet_tcp_schedule_release_queue(queue); 1836 else 1837 nvmet_tcp_set_queue_sock(queue); 1838 kref_put(&queue->kref, nvmet_tcp_release_queue); 1839 } 1840 1841 static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) 1842 { 1843 struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w), 1844 struct nvmet_tcp_queue, tls_handshake_tmo_work); 1845 1846 pr_warn("queue %d: TLS handshake timeout\n", queue->idx); 1847 /* 1848 * If tls_handshake_cancel() fails we've lost the race with 1849 * nvmet_tcp_tls_handshake_done() */ 1850 if (!tls_handshake_cancel(queue->sock->sk)) 1851 return; 1852 spin_lock_bh(&queue->state_lock); 1853 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) { 1854 spin_unlock_bh(&queue->state_lock); 1855 return; 1856 } 1857 queue->state = NVMET_TCP_Q_FAILED; 1858 spin_unlock_bh(&queue->state_lock); 1859 nvmet_tcp_schedule_release_queue(queue); 1860 kref_put(&queue->kref, nvmet_tcp_release_queue); 1861 } 1862 1863 static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue) 1864 { 1865 int ret = -EOPNOTSUPP; 1866 struct tls_handshake_args args; 1867 1868 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) { 1869 pr_warn("cannot start TLS in state %d\n", queue->state); 1870 return -EINVAL; 1871 } 1872 1873 kref_get(&queue->kref); 1874 pr_debug("queue %d: TLS ServerHello\n", queue->idx); 1875 memset(&args, 0, sizeof(args)); 1876 args.ta_sock = queue->sock; 1877 args.ta_done = nvmet_tcp_tls_handshake_done; 1878 args.ta_data = queue; 1879 args.ta_keyring = key_serial(queue->port->nport->keyring); 1880 args.ta_timeout_ms = tls_handshake_timeout * 1000; 1881 1882 ret = tls_server_hello_psk(&args, GFP_KERNEL); 1883 if (ret) { 1884 kref_put(&queue->kref, nvmet_tcp_release_queue); 1885 pr_err("failed to start TLS, err=%d\n", ret); 1886 } else { 1887 queue_delayed_work(nvmet_wq, &queue->tls_handshake_tmo_work, 1888 tls_handshake_timeout * HZ); 1889 } 1890 return ret; 1891 } 1892 #else 1893 static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) {} 1894 #endif 1895 1896 static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, 1897 struct socket *newsock) 1898 { 1899 struct nvmet_tcp_queue *queue; 1900 struct file *sock_file = NULL; 1901 int ret; 1902 1903 queue = kzalloc_obj(*queue); 1904 if (!queue) { 1905 ret = -ENOMEM; 1906 goto out_release; 1907 } 1908 1909 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); 1910 INIT_WORK(&queue->io_work, nvmet_tcp_io_work); 1911 kref_init(&queue->kref); 1912 queue->sock = newsock; 1913 queue->port = port; 1914 queue->nr_cmds = 0; 1915 spin_lock_init(&queue->state_lock); 1916 if (queue->port->nport->disc_addr.tsas.tcp.sectype == 1917 NVMF_TCP_SECTYPE_TLS13) 1918 queue->state = NVMET_TCP_Q_TLS_HANDSHAKE; 1919 else 1920 queue->state = NVMET_TCP_Q_CONNECTING; 1921 INIT_LIST_HEAD(&queue->free_list); 1922 init_llist_head(&queue->resp_list); 1923 INIT_LIST_HEAD(&queue->resp_send_list); 1924 1925 sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL); 1926 if (IS_ERR(sock_file)) { 1927 ret = PTR_ERR(sock_file); 1928 goto out_free_queue; 1929 } 1930 1931 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL); 1932 if (queue->idx < 0) { 1933 ret = queue->idx; 1934 goto out_sock; 1935 } 1936 1937 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect); 1938 if (ret) 1939 goto out_ida_remove; 1940 1941 nvmet_cq_init(&queue->nvme_cq); 1942 ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); 1943 if (ret) 1944 goto out_free_connect; 1945 1946 nvmet_prepare_receive_pdu(queue); 1947 1948 mutex_lock(&nvmet_tcp_queue_mutex); 1949 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); 1950 mutex_unlock(&nvmet_tcp_queue_mutex); 1951 1952 INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work, 1953 nvmet_tcp_tls_handshake_timeout); 1954 #ifdef CONFIG_NVME_TARGET_TCP_TLS 1955 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { 1956 struct sock *sk = queue->sock->sk; 1957 1958 /* Restore the default callbacks before starting upcall */ 1959 write_lock_bh(&sk->sk_callback_lock); 1960 sk->sk_user_data = NULL; 1961 sk->sk_data_ready = port->data_ready; 1962 write_unlock_bh(&sk->sk_callback_lock); 1963 if (!nvmet_tcp_try_peek_pdu(queue)) { 1964 if (!nvmet_tcp_tls_handshake(queue)) 1965 return; 1966 /* TLS handshake failed, terminate the connection */ 1967 goto out_destroy_sq; 1968 } 1969 /* Not a TLS connection, continue with normal processing */ 1970 queue->state = NVMET_TCP_Q_CONNECTING; 1971 } 1972 #endif 1973 1974 ret = nvmet_tcp_set_queue_sock(queue); 1975 if (ret) 1976 goto out_destroy_sq; 1977 1978 return; 1979 out_destroy_sq: 1980 mutex_lock(&nvmet_tcp_queue_mutex); 1981 list_del_init(&queue->queue_list); 1982 mutex_unlock(&nvmet_tcp_queue_mutex); 1983 nvmet_sq_destroy(&queue->nvme_sq); 1984 out_free_connect: 1985 nvmet_cq_put(&queue->nvme_cq); 1986 nvmet_tcp_free_cmd(&queue->connect); 1987 out_ida_remove: 1988 ida_free(&nvmet_tcp_queue_ida, queue->idx); 1989 out_sock: 1990 fput(queue->sock->file); 1991 out_free_queue: 1992 kfree(queue); 1993 out_release: 1994 pr_err("failed to allocate queue, error %d\n", ret); 1995 if (!sock_file) 1996 sock_release(newsock); 1997 } 1998 1999 static void nvmet_tcp_accept_work(struct work_struct *w) 2000 { 2001 struct nvmet_tcp_port *port = 2002 container_of(w, struct nvmet_tcp_port, accept_work); 2003 struct socket *newsock; 2004 int ret; 2005 2006 while (true) { 2007 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK); 2008 if (ret < 0) { 2009 if (ret != -EAGAIN) 2010 pr_warn("failed to accept err=%d\n", ret); 2011 return; 2012 } 2013 nvmet_tcp_alloc_queue(port, newsock); 2014 } 2015 } 2016 2017 static void nvmet_tcp_listen_data_ready(struct sock *sk) 2018 { 2019 struct nvmet_tcp_port *port; 2020 2021 trace_sk_data_ready(sk); 2022 2023 if (sk->sk_state != TCP_LISTEN) 2024 return; 2025 2026 read_lock_bh(&sk->sk_callback_lock); 2027 port = sk->sk_user_data; 2028 if (port) 2029 queue_work(nvmet_wq, &port->accept_work); 2030 read_unlock_bh(&sk->sk_callback_lock); 2031 } 2032 2033 static int nvmet_tcp_add_port(struct nvmet_port *nport) 2034 { 2035 struct nvmet_tcp_port *port; 2036 __kernel_sa_family_t af; 2037 int ret; 2038 2039 port = kzalloc_obj(*port); 2040 if (!port) 2041 return -ENOMEM; 2042 2043 switch (nport->disc_addr.adrfam) { 2044 case NVMF_ADDR_FAMILY_IP4: 2045 af = AF_INET; 2046 break; 2047 case NVMF_ADDR_FAMILY_IP6: 2048 af = AF_INET6; 2049 break; 2050 default: 2051 pr_err("address family %d not supported\n", 2052 nport->disc_addr.adrfam); 2053 ret = -EINVAL; 2054 goto err_port; 2055 } 2056 2057 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, 2058 nport->disc_addr.trsvcid, &port->addr); 2059 if (ret) { 2060 pr_err("malformed ip/port passed: %s:%s\n", 2061 nport->disc_addr.traddr, nport->disc_addr.trsvcid); 2062 goto err_port; 2063 } 2064 2065 port->nport = nport; 2066 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work); 2067 if (port->nport->inline_data_size < 0) 2068 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE; 2069 2070 ret = sock_create(port->addr.ss_family, SOCK_STREAM, 2071 IPPROTO_TCP, &port->sock); 2072 if (ret) { 2073 pr_err("failed to create a socket\n"); 2074 goto err_port; 2075 } 2076 2077 port->sock->sk->sk_user_data = port; 2078 port->data_ready = port->sock->sk->sk_data_ready; 2079 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready; 2080 sock_set_reuseaddr(port->sock->sk); 2081 tcp_sock_set_nodelay(port->sock->sk); 2082 if (so_priority > 0) 2083 sock_set_priority(port->sock->sk, so_priority); 2084 2085 ret = kernel_bind(port->sock, (struct sockaddr_unsized *)&port->addr, 2086 sizeof(port->addr)); 2087 if (ret) { 2088 pr_err("failed to bind port socket %d\n", ret); 2089 goto err_sock; 2090 } 2091 2092 ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG); 2093 if (ret) { 2094 pr_err("failed to listen %d on port sock\n", ret); 2095 goto err_sock; 2096 } 2097 2098 nport->priv = port; 2099 pr_info("enabling port %d (%pISpc)\n", 2100 le16_to_cpu(nport->disc_addr.portid), &port->addr); 2101 2102 return 0; 2103 2104 err_sock: 2105 sock_release(port->sock); 2106 err_port: 2107 kfree(port); 2108 return ret; 2109 } 2110 2111 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port) 2112 { 2113 struct nvmet_tcp_queue *queue; 2114 2115 mutex_lock(&nvmet_tcp_queue_mutex); 2116 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 2117 if (queue->port == port) 2118 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 2119 mutex_unlock(&nvmet_tcp_queue_mutex); 2120 } 2121 2122 static void nvmet_tcp_remove_port(struct nvmet_port *nport) 2123 { 2124 struct nvmet_tcp_port *port = nport->priv; 2125 2126 write_lock_bh(&port->sock->sk->sk_callback_lock); 2127 port->sock->sk->sk_data_ready = port->data_ready; 2128 port->sock->sk->sk_user_data = NULL; 2129 write_unlock_bh(&port->sock->sk->sk_callback_lock); 2130 cancel_work_sync(&port->accept_work); 2131 /* 2132 * Destroy the remaining queues, which are not belong to any 2133 * controller yet. 2134 */ 2135 nvmet_tcp_destroy_port_queues(port); 2136 2137 sock_release(port->sock); 2138 kfree(port); 2139 } 2140 2141 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl) 2142 { 2143 struct nvmet_tcp_queue *queue; 2144 2145 mutex_lock(&nvmet_tcp_queue_mutex); 2146 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 2147 if (queue->nvme_sq.ctrl == ctrl) 2148 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 2149 mutex_unlock(&nvmet_tcp_queue_mutex); 2150 } 2151 2152 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) 2153 { 2154 struct nvmet_tcp_queue *queue = 2155 container_of(sq, struct nvmet_tcp_queue, nvme_sq); 2156 2157 if (sq->qid == 0) { 2158 struct nvmet_tcp_queue *q; 2159 int pending = 0; 2160 2161 /* Check for pending controller teardown */ 2162 mutex_lock(&nvmet_tcp_queue_mutex); 2163 list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) { 2164 if (q->nvme_sq.ctrl == sq->ctrl && 2165 q->state == NVMET_TCP_Q_DISCONNECTING) 2166 pending++; 2167 } 2168 mutex_unlock(&nvmet_tcp_queue_mutex); 2169 if (pending > NVMET_TCP_BACKLOG) 2170 return NVME_SC_CONNECT_CTRL_BUSY; 2171 } 2172 2173 queue->nr_cmds = sq->size * 2; 2174 if (nvmet_tcp_alloc_cmds(queue)) { 2175 queue->nr_cmds = 0; 2176 return NVME_SC_INTERNAL; 2177 } 2178 return 0; 2179 } 2180 2181 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req, 2182 struct nvmet_port *nport, char *traddr) 2183 { 2184 struct nvmet_tcp_port *port = nport->priv; 2185 2186 if (inet_addr_is_any(&port->addr)) { 2187 struct nvmet_tcp_cmd *cmd = 2188 container_of(req, struct nvmet_tcp_cmd, req); 2189 struct nvmet_tcp_queue *queue = cmd->queue; 2190 2191 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr); 2192 } else { 2193 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); 2194 } 2195 } 2196 2197 static ssize_t nvmet_tcp_host_port_addr(struct nvmet_ctrl *ctrl, 2198 char *traddr, size_t traddr_len) 2199 { 2200 struct nvmet_sq *sq = ctrl->sqs[0]; 2201 struct nvmet_tcp_queue *queue = 2202 container_of(sq, struct nvmet_tcp_queue, nvme_sq); 2203 2204 if (queue->sockaddr_peer.ss_family == AF_UNSPEC) 2205 return -EINVAL; 2206 return snprintf(traddr, traddr_len, "%pISc", 2207 (struct sockaddr *)&queue->sockaddr_peer); 2208 } 2209 2210 static const struct nvmet_fabrics_ops nvmet_tcp_ops = { 2211 .owner = THIS_MODULE, 2212 .type = NVMF_TRTYPE_TCP, 2213 .msdbd = 1, 2214 .add_port = nvmet_tcp_add_port, 2215 .remove_port = nvmet_tcp_remove_port, 2216 .queue_response = nvmet_tcp_queue_response, 2217 .delete_ctrl = nvmet_tcp_delete_ctrl, 2218 .install_queue = nvmet_tcp_install_queue, 2219 .disc_traddr = nvmet_tcp_disc_port_addr, 2220 .host_traddr = nvmet_tcp_host_port_addr, 2221 }; 2222 2223 static int __init nvmet_tcp_init(void) 2224 { 2225 int ret; 2226 2227 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", 2228 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 2229 if (!nvmet_tcp_wq) 2230 return -ENOMEM; 2231 2232 ret = nvmet_register_transport(&nvmet_tcp_ops); 2233 if (ret) 2234 goto err; 2235 2236 return 0; 2237 err: 2238 destroy_workqueue(nvmet_tcp_wq); 2239 return ret; 2240 } 2241 2242 static void __exit nvmet_tcp_exit(void) 2243 { 2244 struct nvmet_tcp_queue *queue; 2245 2246 nvmet_unregister_transport(&nvmet_tcp_ops); 2247 2248 flush_workqueue(nvmet_wq); 2249 mutex_lock(&nvmet_tcp_queue_mutex); 2250 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 2251 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 2252 mutex_unlock(&nvmet_tcp_queue_mutex); 2253 flush_workqueue(nvmet_wq); 2254 2255 destroy_workqueue(nvmet_tcp_wq); 2256 ida_destroy(&nvmet_tcp_queue_ida); 2257 } 2258 2259 module_init(nvmet_tcp_init); 2260 module_exit(nvmet_tcp_exit); 2261 2262 MODULE_DESCRIPTION("NVMe target TCP transport driver"); 2263 MODULE_LICENSE("GPL v2"); 2264 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */ 2265