1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe over Fabrics TCP target. 4 * Copyright (c) 2018 Lightbits Labs. All rights reserved. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/module.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <linux/crc32c.h> 11 #include <linux/err.h> 12 #include <linux/nvme-tcp.h> 13 #include <linux/nvme-keyring.h> 14 #include <net/sock.h> 15 #include <net/tcp.h> 16 #include <net/tls.h> 17 #include <net/tls_prot.h> 18 #include <net/handshake.h> 19 #include <linux/inet.h> 20 #include <linux/llist.h> 21 #include <trace/events/sock.h> 22 23 #include "nvmet.h" 24 25 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE) 26 #define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */ 27 #define NVMET_TCP_BACKLOG 128 28 29 static int param_store_val(const char *str, int *val, int min, int max) 30 { 31 int ret, new_val; 32 33 ret = kstrtoint(str, 10, &new_val); 34 if (ret) 35 return -EINVAL; 36 37 if (new_val < min || new_val > max) 38 return -EINVAL; 39 40 *val = new_val; 41 return 0; 42 } 43 44 static int set_params(const char *str, const struct kernel_param *kp) 45 { 46 return param_store_val(str, kp->arg, 0, INT_MAX); 47 } 48 49 static const struct kernel_param_ops set_param_ops = { 50 .set = set_params, 51 .get = param_get_int, 52 }; 53 54 /* Define the socket priority to use for connections were it is desirable 55 * that the NIC consider performing optimized packet processing or filtering. 56 * A non-zero value being sufficient to indicate general consideration of any 57 * possible optimization. Making it a module param allows for alternative 58 * values that may be unique for some NIC implementations. 59 */ 60 static int so_priority; 61 device_param_cb(so_priority, &set_param_ops, &so_priority, 0644); 62 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0"); 63 64 /* Define a time period (in usecs) that io_work() shall sample an activated 65 * queue before determining it to be idle. This optional module behavior 66 * can enable NIC solutions that support socket optimized packet processing 67 * using advanced interrupt moderation techniques. 68 */ 69 static int idle_poll_period_usecs; 70 device_param_cb(idle_poll_period_usecs, &set_param_ops, 71 &idle_poll_period_usecs, 0644); 72 MODULE_PARM_DESC(idle_poll_period_usecs, 73 "nvmet tcp io_work poll till idle time period in usecs: Default 0"); 74 75 #ifdef CONFIG_NVME_TARGET_TCP_TLS 76 /* 77 * TLS handshake timeout 78 */ 79 static int tls_handshake_timeout = 10; 80 module_param(tls_handshake_timeout, int, 0644); 81 MODULE_PARM_DESC(tls_handshake_timeout, 82 "nvme TLS handshake timeout in seconds (default 10)"); 83 #endif 84 85 #define NVMET_TCP_RECV_BUDGET 8 86 #define NVMET_TCP_SEND_BUDGET 8 87 #define NVMET_TCP_IO_WORK_BUDGET 64 88 89 enum nvmet_tcp_send_state { 90 NVMET_TCP_SEND_DATA_PDU, 91 NVMET_TCP_SEND_DATA, 92 NVMET_TCP_SEND_R2T, 93 NVMET_TCP_SEND_DDGST, 94 NVMET_TCP_SEND_RESPONSE 95 }; 96 97 enum nvmet_tcp_recv_state { 98 NVMET_TCP_RECV_PDU, 99 NVMET_TCP_RECV_DATA, 100 NVMET_TCP_RECV_DDGST, 101 NVMET_TCP_RECV_ERR, 102 }; 103 104 enum { 105 NVMET_TCP_F_INIT_FAILED = (1 << 0), 106 }; 107 108 struct nvmet_tcp_cmd { 109 struct nvmet_tcp_queue *queue; 110 struct nvmet_req req; 111 112 struct nvme_tcp_cmd_pdu *cmd_pdu; 113 struct nvme_tcp_rsp_pdu *rsp_pdu; 114 struct nvme_tcp_data_pdu *data_pdu; 115 struct nvme_tcp_r2t_pdu *r2t_pdu; 116 117 u32 rbytes_done; 118 u32 wbytes_done; 119 120 u32 pdu_len; 121 u32 pdu_recv; 122 int sg_idx; 123 char recv_cbuf[CMSG_LEN(sizeof(char))]; 124 struct msghdr recv_msg; 125 struct bio_vec *iov; 126 u32 flags; 127 128 struct list_head entry; 129 struct llist_node lentry; 130 131 /* send state */ 132 u32 offset; 133 struct scatterlist *cur_sg; 134 enum nvmet_tcp_send_state state; 135 136 __le32 exp_ddgst; 137 __le32 recv_ddgst; 138 }; 139 140 enum nvmet_tcp_queue_state { 141 NVMET_TCP_Q_CONNECTING, 142 NVMET_TCP_Q_TLS_HANDSHAKE, 143 NVMET_TCP_Q_LIVE, 144 NVMET_TCP_Q_DISCONNECTING, 145 NVMET_TCP_Q_FAILED, 146 }; 147 148 struct nvmet_tcp_queue { 149 struct socket *sock; 150 struct nvmet_tcp_port *port; 151 struct work_struct io_work; 152 struct nvmet_cq nvme_cq; 153 struct nvmet_sq nvme_sq; 154 struct kref kref; 155 156 /* send state */ 157 struct nvmet_tcp_cmd *cmds; 158 unsigned int nr_cmds; 159 struct list_head free_list; 160 struct llist_head resp_list; 161 struct list_head resp_send_list; 162 int send_list_len; 163 struct nvmet_tcp_cmd *snd_cmd; 164 165 /* recv state */ 166 int offset; 167 int left; 168 enum nvmet_tcp_recv_state rcv_state; 169 struct nvmet_tcp_cmd *cmd; 170 union nvme_tcp_pdu pdu; 171 172 /* digest state */ 173 bool hdr_digest; 174 bool data_digest; 175 176 /* TLS state */ 177 key_serial_t tls_pskid; 178 struct delayed_work tls_handshake_tmo_work; 179 180 unsigned long poll_end; 181 182 spinlock_t state_lock; 183 enum nvmet_tcp_queue_state state; 184 185 struct sockaddr_storage sockaddr; 186 struct sockaddr_storage sockaddr_peer; 187 struct work_struct release_work; 188 189 int idx; 190 struct list_head queue_list; 191 192 struct nvmet_tcp_cmd connect; 193 194 struct page_frag_cache pf_cache; 195 196 void (*data_ready)(struct sock *); 197 void (*state_change)(struct sock *); 198 void (*write_space)(struct sock *); 199 }; 200 201 struct nvmet_tcp_port { 202 struct socket *sock; 203 struct work_struct accept_work; 204 struct nvmet_port *nport; 205 struct sockaddr_storage addr; 206 void (*data_ready)(struct sock *); 207 }; 208 209 static DEFINE_IDA(nvmet_tcp_queue_ida); 210 static LIST_HEAD(nvmet_tcp_queue_list); 211 static DEFINE_MUTEX(nvmet_tcp_queue_mutex); 212 213 static struct workqueue_struct *nvmet_tcp_wq; 214 static const struct nvmet_fabrics_ops nvmet_tcp_ops; 215 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c); 216 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd); 217 218 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, 219 struct nvmet_tcp_cmd *cmd) 220 { 221 if (unlikely(!queue->nr_cmds)) { 222 /* We didn't allocate cmds yet, send 0xffff */ 223 return USHRT_MAX; 224 } 225 226 return cmd - queue->cmds; 227 } 228 229 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd) 230 { 231 return nvme_is_write(cmd->req.cmd) && 232 cmd->rbytes_done < cmd->req.transfer_len; 233 } 234 235 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) 236 { 237 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status; 238 } 239 240 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) 241 { 242 return !nvme_is_write(cmd->req.cmd) && 243 cmd->req.transfer_len > 0 && 244 !cmd->req.cqe->status; 245 } 246 247 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) 248 { 249 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len && 250 !cmd->rbytes_done; 251 } 252 253 static inline struct nvmet_tcp_cmd * 254 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) 255 { 256 struct nvmet_tcp_cmd *cmd; 257 258 cmd = list_first_entry_or_null(&queue->free_list, 259 struct nvmet_tcp_cmd, entry); 260 if (!cmd) 261 return NULL; 262 list_del_init(&cmd->entry); 263 264 cmd->rbytes_done = cmd->wbytes_done = 0; 265 cmd->pdu_len = 0; 266 cmd->pdu_recv = 0; 267 cmd->iov = NULL; 268 cmd->flags = 0; 269 return cmd; 270 } 271 272 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) 273 { 274 if (unlikely(cmd == &cmd->queue->connect)) 275 return; 276 277 list_add_tail(&cmd->entry, &cmd->queue->free_list); 278 } 279 280 static inline int queue_cpu(struct nvmet_tcp_queue *queue) 281 { 282 return queue->sock->sk->sk_incoming_cpu; 283 } 284 285 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) 286 { 287 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; 288 } 289 290 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) 291 { 292 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; 293 } 294 295 static inline void nvmet_tcp_hdgst(void *pdu, size_t len) 296 { 297 put_unaligned_le32(~crc32c(~0, pdu, len), pdu + len); 298 } 299 300 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, 301 void *pdu, size_t len) 302 { 303 struct nvme_tcp_hdr *hdr = pdu; 304 __le32 recv_digest; 305 __le32 exp_digest; 306 307 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { 308 pr_err("queue %d: header digest enabled but no header digest\n", 309 queue->idx); 310 return -EPROTO; 311 } 312 313 recv_digest = *(__le32 *)(pdu + hdr->hlen); 314 nvmet_tcp_hdgst(pdu, len); 315 exp_digest = *(__le32 *)(pdu + hdr->hlen); 316 if (recv_digest != exp_digest) { 317 pr_err("queue %d: header digest error: recv %#x expected %#x\n", 318 queue->idx, le32_to_cpu(recv_digest), 319 le32_to_cpu(exp_digest)); 320 return -EPROTO; 321 } 322 323 return 0; 324 } 325 326 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) 327 { 328 struct nvme_tcp_hdr *hdr = pdu; 329 u8 digest_len = nvmet_tcp_hdgst_len(queue); 330 u32 len; 331 332 len = le32_to_cpu(hdr->plen) - hdr->hlen - 333 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0); 334 335 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { 336 pr_err("queue %d: data digest flag is cleared\n", queue->idx); 337 return -EPROTO; 338 } 339 340 return 0; 341 } 342 343 /* If cmd buffers are NULL, no operation is performed */ 344 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd) 345 { 346 kfree(cmd->iov); 347 sgl_free(cmd->req.sg); 348 cmd->iov = NULL; 349 cmd->req.sg = NULL; 350 } 351 352 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) 353 { 354 struct bio_vec *iov = cmd->iov; 355 struct scatterlist *sg; 356 u32 length, offset, sg_offset; 357 int nr_pages; 358 359 length = cmd->pdu_len; 360 nr_pages = DIV_ROUND_UP(length, PAGE_SIZE); 361 offset = cmd->rbytes_done; 362 cmd->sg_idx = offset / PAGE_SIZE; 363 sg_offset = offset % PAGE_SIZE; 364 sg = &cmd->req.sg[cmd->sg_idx]; 365 366 while (length) { 367 u32 iov_len = min_t(u32, length, sg->length - sg_offset); 368 369 bvec_set_page(iov, sg_page(sg), iov_len, 370 sg->offset + sg_offset); 371 372 length -= iov_len; 373 sg = sg_next(sg); 374 iov++; 375 sg_offset = 0; 376 } 377 378 iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov, 379 nr_pages, cmd->pdu_len); 380 } 381 382 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) 383 { 384 queue->rcv_state = NVMET_TCP_RECV_ERR; 385 if (queue->nvme_sq.ctrl) 386 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 387 else 388 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 389 } 390 391 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) 392 { 393 queue->rcv_state = NVMET_TCP_RECV_ERR; 394 if (status == -EPIPE || status == -ECONNRESET) 395 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 396 else 397 nvmet_tcp_fatal_error(queue); 398 } 399 400 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) 401 { 402 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; 403 u32 len = le32_to_cpu(sgl->length); 404 405 if (!len) 406 return 0; 407 408 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | 409 NVME_SGL_FMT_OFFSET)) { 410 if (!nvme_is_write(cmd->req.cmd)) 411 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 412 413 if (len > cmd->req.port->inline_data_size) 414 return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR; 415 cmd->pdu_len = len; 416 } 417 cmd->req.transfer_len += len; 418 419 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt); 420 if (!cmd->req.sg) 421 return NVME_SC_INTERNAL; 422 cmd->cur_sg = cmd->req.sg; 423 424 if (nvmet_tcp_has_data_in(cmd)) { 425 cmd->iov = kmalloc_array(cmd->req.sg_cnt, 426 sizeof(*cmd->iov), GFP_KERNEL); 427 if (!cmd->iov) 428 goto err; 429 } 430 431 return 0; 432 err: 433 nvmet_tcp_free_cmd_buffers(cmd); 434 return NVME_SC_INTERNAL; 435 } 436 437 static void nvmet_tcp_calc_ddgst(struct nvmet_tcp_cmd *cmd) 438 { 439 size_t total_len = cmd->req.transfer_len; 440 struct scatterlist *sg = cmd->req.sg; 441 u32 crc = ~0; 442 443 while (total_len) { 444 size_t len = min_t(size_t, total_len, sg->length); 445 446 /* 447 * Note that the scatterlist does not contain any highmem pages, 448 * as it was allocated by sgl_alloc() with GFP_KERNEL. 449 */ 450 crc = crc32c(crc, sg_virt(sg), len); 451 total_len -= len; 452 sg = sg_next(sg); 453 } 454 cmd->exp_ddgst = cpu_to_le32(~crc); 455 } 456 457 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) 458 { 459 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; 460 struct nvmet_tcp_queue *queue = cmd->queue; 461 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 462 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); 463 464 cmd->offset = 0; 465 cmd->state = NVMET_TCP_SEND_DATA_PDU; 466 467 pdu->hdr.type = nvme_tcp_c2h_data; 468 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ? 469 NVME_TCP_F_DATA_SUCCESS : 0); 470 pdu->hdr.hlen = sizeof(*pdu); 471 pdu->hdr.pdo = pdu->hdr.hlen + hdgst; 472 pdu->hdr.plen = 473 cpu_to_le32(pdu->hdr.hlen + hdgst + 474 cmd->req.transfer_len + ddgst); 475 pdu->command_id = cmd->req.cqe->command_id; 476 pdu->data_length = cpu_to_le32(cmd->req.transfer_len); 477 pdu->data_offset = cpu_to_le32(cmd->wbytes_done); 478 479 if (queue->data_digest) { 480 pdu->hdr.flags |= NVME_TCP_F_DDGST; 481 nvmet_tcp_calc_ddgst(cmd); 482 } 483 484 if (cmd->queue->hdr_digest) { 485 pdu->hdr.flags |= NVME_TCP_F_HDGST; 486 nvmet_tcp_hdgst(pdu, sizeof(*pdu)); 487 } 488 } 489 490 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) 491 { 492 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; 493 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 494 495 cmd->offset = 0; 496 cmd->state = NVMET_TCP_SEND_R2T; 497 498 pdu->hdr.type = nvme_tcp_r2t; 499 pdu->hdr.flags = 0; 500 pdu->hdr.hlen = sizeof(*pdu); 501 pdu->hdr.pdo = 0; 502 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 503 504 pdu->command_id = cmd->req.cmd->common.command_id; 505 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); 506 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); 507 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); 508 if (cmd->queue->hdr_digest) { 509 pdu->hdr.flags |= NVME_TCP_F_HDGST; 510 nvmet_tcp_hdgst(pdu, sizeof(*pdu)); 511 } 512 } 513 514 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) 515 { 516 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; 517 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 518 519 cmd->offset = 0; 520 cmd->state = NVMET_TCP_SEND_RESPONSE; 521 522 pdu->hdr.type = nvme_tcp_rsp; 523 pdu->hdr.flags = 0; 524 pdu->hdr.hlen = sizeof(*pdu); 525 pdu->hdr.pdo = 0; 526 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 527 if (cmd->queue->hdr_digest) { 528 pdu->hdr.flags |= NVME_TCP_F_HDGST; 529 nvmet_tcp_hdgst(pdu, sizeof(*pdu)); 530 } 531 } 532 533 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) 534 { 535 struct llist_node *node; 536 struct nvmet_tcp_cmd *cmd; 537 538 for (node = llist_del_all(&queue->resp_list); node; node = node->next) { 539 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry); 540 list_add(&cmd->entry, &queue->resp_send_list); 541 queue->send_list_len++; 542 } 543 } 544 545 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) 546 { 547 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, 548 struct nvmet_tcp_cmd, entry); 549 if (!queue->snd_cmd) { 550 nvmet_tcp_process_resp_list(queue); 551 queue->snd_cmd = 552 list_first_entry_or_null(&queue->resp_send_list, 553 struct nvmet_tcp_cmd, entry); 554 if (unlikely(!queue->snd_cmd)) 555 return NULL; 556 } 557 558 list_del_init(&queue->snd_cmd->entry); 559 queue->send_list_len--; 560 561 if (nvmet_tcp_need_data_out(queue->snd_cmd)) 562 nvmet_setup_c2h_data_pdu(queue->snd_cmd); 563 else if (nvmet_tcp_need_data_in(queue->snd_cmd)) 564 nvmet_setup_r2t_pdu(queue->snd_cmd); 565 else 566 nvmet_setup_response_pdu(queue->snd_cmd); 567 568 return queue->snd_cmd; 569 } 570 571 static void nvmet_tcp_queue_response(struct nvmet_req *req) 572 { 573 struct nvmet_tcp_cmd *cmd = 574 container_of(req, struct nvmet_tcp_cmd, req); 575 struct nvmet_tcp_queue *queue = cmd->queue; 576 enum nvmet_tcp_recv_state queue_state; 577 struct nvmet_tcp_cmd *queue_cmd; 578 struct nvme_sgl_desc *sgl; 579 u32 len; 580 581 /* Pairs with store_release in nvmet_prepare_receive_pdu() */ 582 queue_state = smp_load_acquire(&queue->rcv_state); 583 queue_cmd = READ_ONCE(queue->cmd); 584 585 if (unlikely(cmd == queue_cmd)) { 586 sgl = &cmd->req.cmd->common.dptr.sgl; 587 len = le32_to_cpu(sgl->length); 588 589 /* 590 * Wait for inline data before processing the response. 591 * Avoid using helpers, this might happen before 592 * nvmet_req_init is completed. 593 */ 594 if (queue_state == NVMET_TCP_RECV_PDU && 595 len && len <= cmd->req.port->inline_data_size && 596 nvme_is_write(cmd->req.cmd)) 597 return; 598 } 599 600 llist_add(&cmd->lentry, &queue->resp_list); 601 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); 602 } 603 604 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd) 605 { 606 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED)) 607 nvmet_tcp_queue_response(&cmd->req); 608 else 609 cmd->req.execute(&cmd->req); 610 } 611 612 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) 613 { 614 struct msghdr msg = { 615 .msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES, 616 }; 617 struct bio_vec bvec; 618 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 619 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst; 620 int ret; 621 622 bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left); 623 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); 624 ret = sock_sendmsg(cmd->queue->sock, &msg); 625 if (ret <= 0) 626 return ret; 627 628 cmd->offset += ret; 629 left -= ret; 630 631 if (left) 632 return -EAGAIN; 633 634 cmd->state = NVMET_TCP_SEND_DATA; 635 cmd->offset = 0; 636 return 1; 637 } 638 639 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 640 { 641 struct nvmet_tcp_queue *queue = cmd->queue; 642 int ret; 643 644 while (cmd->cur_sg) { 645 struct msghdr msg = { 646 .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, 647 }; 648 struct page *page = sg_page(cmd->cur_sg); 649 struct bio_vec bvec; 650 u32 left = cmd->cur_sg->length - cmd->offset; 651 652 if ((!last_in_batch && cmd->queue->send_list_len) || 653 cmd->wbytes_done + left < cmd->req.transfer_len || 654 queue->data_digest || !queue->nvme_sq.sqhd_disabled) 655 msg.msg_flags |= MSG_MORE; 656 657 bvec_set_page(&bvec, page, left, cmd->offset); 658 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); 659 ret = sock_sendmsg(cmd->queue->sock, &msg); 660 if (ret <= 0) 661 return ret; 662 663 cmd->offset += ret; 664 cmd->wbytes_done += ret; 665 666 /* Done with sg?*/ 667 if (cmd->offset == cmd->cur_sg->length) { 668 cmd->cur_sg = sg_next(cmd->cur_sg); 669 cmd->offset = 0; 670 } 671 } 672 673 if (queue->data_digest) { 674 cmd->state = NVMET_TCP_SEND_DDGST; 675 cmd->offset = 0; 676 } else { 677 if (queue->nvme_sq.sqhd_disabled) { 678 cmd->queue->snd_cmd = NULL; 679 nvmet_tcp_put_cmd(cmd); 680 } else { 681 nvmet_setup_response_pdu(cmd); 682 } 683 } 684 685 if (queue->nvme_sq.sqhd_disabled) 686 nvmet_tcp_free_cmd_buffers(cmd); 687 688 return 1; 689 690 } 691 692 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, 693 bool last_in_batch) 694 { 695 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, }; 696 struct bio_vec bvec; 697 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 698 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst; 699 int ret; 700 701 if (!last_in_batch && cmd->queue->send_list_len) 702 msg.msg_flags |= MSG_MORE; 703 else 704 msg.msg_flags |= MSG_EOR; 705 706 bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left); 707 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); 708 ret = sock_sendmsg(cmd->queue->sock, &msg); 709 if (ret <= 0) 710 return ret; 711 cmd->offset += ret; 712 left -= ret; 713 714 if (left) 715 return -EAGAIN; 716 717 nvmet_tcp_free_cmd_buffers(cmd); 718 cmd->queue->snd_cmd = NULL; 719 nvmet_tcp_put_cmd(cmd); 720 return 1; 721 } 722 723 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 724 { 725 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, }; 726 struct bio_vec bvec; 727 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 728 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst; 729 int ret; 730 731 if (!last_in_batch && cmd->queue->send_list_len) 732 msg.msg_flags |= MSG_MORE; 733 else 734 msg.msg_flags |= MSG_EOR; 735 736 bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left); 737 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); 738 ret = sock_sendmsg(cmd->queue->sock, &msg); 739 if (ret <= 0) 740 return ret; 741 cmd->offset += ret; 742 left -= ret; 743 744 if (left) 745 return -EAGAIN; 746 747 cmd->queue->snd_cmd = NULL; 748 return 1; 749 } 750 751 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 752 { 753 struct nvmet_tcp_queue *queue = cmd->queue; 754 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset; 755 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 756 struct kvec iov = { 757 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, 758 .iov_len = left 759 }; 760 int ret; 761 762 if (!last_in_batch && cmd->queue->send_list_len) 763 msg.msg_flags |= MSG_MORE; 764 else 765 msg.msg_flags |= MSG_EOR; 766 767 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 768 if (unlikely(ret <= 0)) 769 return ret; 770 771 cmd->offset += ret; 772 left -= ret; 773 774 if (left) 775 return -EAGAIN; 776 777 if (queue->nvme_sq.sqhd_disabled) { 778 cmd->queue->snd_cmd = NULL; 779 nvmet_tcp_put_cmd(cmd); 780 } else { 781 nvmet_setup_response_pdu(cmd); 782 } 783 return 1; 784 } 785 786 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, 787 bool last_in_batch) 788 { 789 struct nvmet_tcp_cmd *cmd = queue->snd_cmd; 790 int ret = 0; 791 792 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { 793 cmd = nvmet_tcp_fetch_cmd(queue); 794 if (unlikely(!cmd)) 795 return 0; 796 } 797 798 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) { 799 ret = nvmet_try_send_data_pdu(cmd); 800 if (ret <= 0) 801 goto done_send; 802 } 803 804 if (cmd->state == NVMET_TCP_SEND_DATA) { 805 ret = nvmet_try_send_data(cmd, last_in_batch); 806 if (ret <= 0) 807 goto done_send; 808 } 809 810 if (cmd->state == NVMET_TCP_SEND_DDGST) { 811 ret = nvmet_try_send_ddgst(cmd, last_in_batch); 812 if (ret <= 0) 813 goto done_send; 814 } 815 816 if (cmd->state == NVMET_TCP_SEND_R2T) { 817 ret = nvmet_try_send_r2t(cmd, last_in_batch); 818 if (ret <= 0) 819 goto done_send; 820 } 821 822 if (cmd->state == NVMET_TCP_SEND_RESPONSE) 823 ret = nvmet_try_send_response(cmd, last_in_batch); 824 825 done_send: 826 if (ret < 0) { 827 if (ret == -EAGAIN) 828 return 0; 829 return ret; 830 } 831 832 return 1; 833 } 834 835 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, 836 int budget, int *sends) 837 { 838 int i, ret = 0; 839 840 for (i = 0; i < budget; i++) { 841 ret = nvmet_tcp_try_send_one(queue, i == budget - 1); 842 if (unlikely(ret < 0)) { 843 nvmet_tcp_socket_error(queue, ret); 844 goto done; 845 } else if (ret == 0) { 846 break; 847 } 848 (*sends)++; 849 } 850 done: 851 return ret; 852 } 853 854 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) 855 { 856 queue->offset = 0; 857 queue->left = sizeof(struct nvme_tcp_hdr); 858 WRITE_ONCE(queue->cmd, NULL); 859 /* Ensure rcv_state is visible only after queue->cmd is set */ 860 smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU); 861 } 862 863 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) 864 { 865 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; 866 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp; 867 struct msghdr msg = {}; 868 struct kvec iov; 869 int ret; 870 871 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) { 872 pr_err("bad nvme-tcp pdu length (%d)\n", 873 le32_to_cpu(icreq->hdr.plen)); 874 nvmet_tcp_fatal_error(queue); 875 return -EPROTO; 876 } 877 878 if (icreq->pfv != NVME_TCP_PFV_1_0) { 879 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv); 880 return -EPROTO; 881 } 882 883 if (icreq->hpda != 0) { 884 pr_err("queue %d: unsupported hpda %d\n", queue->idx, 885 icreq->hpda); 886 return -EPROTO; 887 } 888 889 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); 890 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); 891 892 memset(icresp, 0, sizeof(*icresp)); 893 icresp->hdr.type = nvme_tcp_icresp; 894 icresp->hdr.hlen = sizeof(*icresp); 895 icresp->hdr.pdo = 0; 896 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); 897 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); 898 icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA); 899 icresp->cpda = 0; 900 if (queue->hdr_digest) 901 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; 902 if (queue->data_digest) 903 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE; 904 905 iov.iov_base = icresp; 906 iov.iov_len = sizeof(*icresp); 907 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 908 if (ret < 0) { 909 queue->state = NVMET_TCP_Q_FAILED; 910 return ret; /* queue removal will cleanup */ 911 } 912 913 queue->state = NVMET_TCP_Q_LIVE; 914 nvmet_prepare_receive_pdu(queue); 915 return 0; 916 } 917 918 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, 919 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) 920 { 921 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); 922 int ret; 923 924 /* 925 * This command has not been processed yet, hence we are trying to 926 * figure out if there is still pending data left to receive. If 927 * we don't, we can simply prepare for the next pdu and bail out, 928 * otherwise we will need to prepare a buffer and receive the 929 * stale data before continuing forward. 930 */ 931 if (!nvme_is_write(cmd->req.cmd) || !data_len || 932 data_len > cmd->req.port->inline_data_size) { 933 nvmet_prepare_receive_pdu(queue); 934 return; 935 } 936 937 ret = nvmet_tcp_map_data(cmd); 938 if (unlikely(ret)) { 939 pr_err("queue %d: failed to map data\n", queue->idx); 940 nvmet_tcp_fatal_error(queue); 941 return; 942 } 943 944 queue->rcv_state = NVMET_TCP_RECV_DATA; 945 nvmet_tcp_build_pdu_iovec(cmd); 946 cmd->flags |= NVMET_TCP_F_INIT_FAILED; 947 } 948 949 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) 950 { 951 struct nvme_tcp_data_pdu *data = &queue->pdu.data; 952 struct nvmet_tcp_cmd *cmd; 953 unsigned int exp_data_len; 954 955 if (likely(queue->nr_cmds)) { 956 if (unlikely(data->ttag >= queue->nr_cmds)) { 957 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n", 958 queue->idx, data->ttag, queue->nr_cmds); 959 goto err_proto; 960 } 961 cmd = &queue->cmds[data->ttag]; 962 } else { 963 cmd = &queue->connect; 964 } 965 966 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { 967 pr_err("ttag %u unexpected data offset %u (expected %u)\n", 968 data->ttag, le32_to_cpu(data->data_offset), 969 cmd->rbytes_done); 970 goto err_proto; 971 } 972 973 exp_data_len = le32_to_cpu(data->hdr.plen) - 974 nvmet_tcp_hdgst_len(queue) - 975 nvmet_tcp_ddgst_len(queue) - 976 sizeof(*data); 977 978 cmd->pdu_len = le32_to_cpu(data->data_length); 979 if (unlikely(cmd->pdu_len != exp_data_len || 980 cmd->pdu_len == 0 || 981 cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) { 982 pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len); 983 goto err_proto; 984 } 985 /* 986 * Ensure command data structures are initialized. We must check both 987 * cmd->req.sg and cmd->iov because they can have different NULL states: 988 * - Uninitialized commands: both NULL 989 * - READ commands: cmd->req.sg allocated, cmd->iov NULL 990 * - WRITE commands: both allocated 991 */ 992 if (unlikely(!cmd->req.sg || !cmd->iov)) { 993 pr_err("queue %d: H2CData PDU received for invalid command state (ttag %u)\n", 994 queue->idx, data->ttag); 995 goto err_proto; 996 } 997 cmd->pdu_recv = 0; 998 nvmet_tcp_build_pdu_iovec(cmd); 999 queue->cmd = cmd; 1000 queue->rcv_state = NVMET_TCP_RECV_DATA; 1001 1002 return 0; 1003 1004 err_proto: 1005 /* FIXME: use proper transport errors */ 1006 nvmet_tcp_fatal_error(queue); 1007 return -EPROTO; 1008 } 1009 1010 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) 1011 { 1012 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 1013 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; 1014 struct nvmet_req *req; 1015 int ret; 1016 1017 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { 1018 if (hdr->type != nvme_tcp_icreq) { 1019 pr_err("unexpected pdu type (%d) before icreq\n", 1020 hdr->type); 1021 nvmet_tcp_fatal_error(queue); 1022 return -EPROTO; 1023 } 1024 return nvmet_tcp_handle_icreq(queue); 1025 } 1026 1027 if (unlikely(hdr->type == nvme_tcp_icreq)) { 1028 pr_err("queue %d: received icreq pdu in state %d\n", 1029 queue->idx, queue->state); 1030 nvmet_tcp_fatal_error(queue); 1031 return -EPROTO; 1032 } 1033 1034 if (hdr->type == nvme_tcp_h2c_data) { 1035 ret = nvmet_tcp_handle_h2c_data_pdu(queue); 1036 if (unlikely(ret)) 1037 return ret; 1038 return 0; 1039 } 1040 1041 queue->cmd = nvmet_tcp_get_cmd(queue); 1042 if (unlikely(!queue->cmd)) { 1043 /* This should never happen */ 1044 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d", 1045 queue->idx, queue->nr_cmds, queue->send_list_len, 1046 nvme_cmd->common.opcode); 1047 nvmet_tcp_fatal_error(queue); 1048 return -ENOMEM; 1049 } 1050 1051 req = &queue->cmd->req; 1052 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); 1053 1054 if (unlikely(!nvmet_req_init(req, &queue->nvme_sq, &nvmet_tcp_ops))) { 1055 pr_err("failed cmd %p id %d opcode %d, data_len: %d, status: %04x\n", 1056 req->cmd, req->cmd->common.command_id, 1057 req->cmd->common.opcode, 1058 le32_to_cpu(req->cmd->common.dptr.sgl.length), 1059 le16_to_cpu(req->cqe->status)); 1060 1061 nvmet_tcp_handle_req_failure(queue, queue->cmd, req); 1062 return 0; 1063 } 1064 1065 ret = nvmet_tcp_map_data(queue->cmd); 1066 if (unlikely(ret)) { 1067 pr_err("queue %d: failed to map data\n", queue->idx); 1068 if (nvmet_tcp_has_inline_data(queue->cmd)) 1069 nvmet_tcp_fatal_error(queue); 1070 else 1071 nvmet_req_complete(req, ret); 1072 ret = -EAGAIN; 1073 goto out; 1074 } 1075 1076 if (nvmet_tcp_need_data_in(queue->cmd)) { 1077 if (nvmet_tcp_has_inline_data(queue->cmd)) { 1078 queue->rcv_state = NVMET_TCP_RECV_DATA; 1079 nvmet_tcp_build_pdu_iovec(queue->cmd); 1080 return 0; 1081 } 1082 /* send back R2T */ 1083 nvmet_tcp_queue_response(&queue->cmd->req); 1084 goto out; 1085 } 1086 1087 queue->cmd->req.execute(&queue->cmd->req); 1088 out: 1089 nvmet_prepare_receive_pdu(queue); 1090 return ret; 1091 } 1092 1093 static const u8 nvme_tcp_pdu_sizes[] = { 1094 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu), 1095 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu), 1096 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu), 1097 }; 1098 1099 static inline u8 nvmet_tcp_pdu_size(u8 type) 1100 { 1101 size_t idx = type; 1102 1103 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) && 1104 nvme_tcp_pdu_sizes[idx]) ? 1105 nvme_tcp_pdu_sizes[idx] : 0; 1106 } 1107 1108 static inline bool nvmet_tcp_pdu_valid(u8 type) 1109 { 1110 switch (type) { 1111 case nvme_tcp_icreq: 1112 case nvme_tcp_cmd: 1113 case nvme_tcp_h2c_data: 1114 /* fallthru */ 1115 return true; 1116 } 1117 1118 return false; 1119 } 1120 1121 static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue, 1122 struct msghdr *msg, char *cbuf) 1123 { 1124 struct cmsghdr *cmsg = (struct cmsghdr *)cbuf; 1125 u8 ctype, level, description; 1126 int ret = 0; 1127 1128 ctype = tls_get_record_type(queue->sock->sk, cmsg); 1129 switch (ctype) { 1130 case 0: 1131 break; 1132 case TLS_RECORD_TYPE_DATA: 1133 break; 1134 case TLS_RECORD_TYPE_ALERT: 1135 tls_alert_recv(queue->sock->sk, msg, &level, &description); 1136 if (level == TLS_ALERT_LEVEL_FATAL) { 1137 pr_err("queue %d: TLS Alert desc %u\n", 1138 queue->idx, description); 1139 ret = -ENOTCONN; 1140 } else { 1141 pr_warn("queue %d: TLS Alert desc %u\n", 1142 queue->idx, description); 1143 ret = -EAGAIN; 1144 } 1145 break; 1146 default: 1147 /* discard this record type */ 1148 pr_err("queue %d: TLS record %d unhandled\n", 1149 queue->idx, ctype); 1150 ret = -EAGAIN; 1151 break; 1152 } 1153 return ret; 1154 } 1155 1156 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) 1157 { 1158 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 1159 int len, ret; 1160 struct kvec iov; 1161 char cbuf[CMSG_LEN(sizeof(char))] = {}; 1162 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 1163 1164 recv: 1165 iov.iov_base = (void *)&queue->pdu + queue->offset; 1166 iov.iov_len = queue->left; 1167 if (queue->tls_pskid) { 1168 msg.msg_control = cbuf; 1169 msg.msg_controllen = sizeof(cbuf); 1170 } 1171 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1172 iov.iov_len, msg.msg_flags); 1173 if (unlikely(len < 0)) 1174 return len; 1175 if (queue->tls_pskid) { 1176 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf); 1177 if (ret < 0) 1178 return ret; 1179 } 1180 1181 queue->offset += len; 1182 queue->left -= len; 1183 if (queue->left) 1184 return -EAGAIN; 1185 1186 if (queue->offset == sizeof(struct nvme_tcp_hdr)) { 1187 u8 hdgst = nvmet_tcp_hdgst_len(queue); 1188 1189 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) { 1190 pr_err("unexpected pdu type %d\n", hdr->type); 1191 nvmet_tcp_fatal_error(queue); 1192 return -EIO; 1193 } 1194 1195 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) { 1196 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen); 1197 return -EIO; 1198 } 1199 1200 queue->left = hdr->hlen - queue->offset + hdgst; 1201 goto recv; 1202 } 1203 1204 if (queue->hdr_digest && 1205 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) { 1206 nvmet_tcp_fatal_error(queue); /* fatal */ 1207 return -EPROTO; 1208 } 1209 1210 if (queue->data_digest && 1211 nvmet_tcp_check_ddgst(queue, &queue->pdu)) { 1212 nvmet_tcp_fatal_error(queue); /* fatal */ 1213 return -EPROTO; 1214 } 1215 1216 return nvmet_tcp_done_recv_pdu(queue); 1217 } 1218 1219 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) 1220 { 1221 struct nvmet_tcp_queue *queue = cmd->queue; 1222 1223 nvmet_tcp_calc_ddgst(cmd); 1224 queue->offset = 0; 1225 queue->left = NVME_TCP_DIGEST_LENGTH; 1226 queue->rcv_state = NVMET_TCP_RECV_DDGST; 1227 } 1228 1229 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) 1230 { 1231 struct nvmet_tcp_cmd *cmd = queue->cmd; 1232 int len, ret; 1233 1234 while (msg_data_left(&cmd->recv_msg)) { 1235 len = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, 1236 cmd->recv_msg.msg_flags); 1237 if (len <= 0) 1238 return len; 1239 if (queue->tls_pskid) { 1240 ret = nvmet_tcp_tls_record_ok(cmd->queue, 1241 &cmd->recv_msg, cmd->recv_cbuf); 1242 if (ret < 0) 1243 return ret; 1244 } 1245 1246 cmd->pdu_recv += len; 1247 cmd->rbytes_done += len; 1248 } 1249 1250 if (queue->data_digest) { 1251 nvmet_tcp_prep_recv_ddgst(cmd); 1252 return 0; 1253 } 1254 1255 if (cmd->rbytes_done == cmd->req.transfer_len) 1256 nvmet_tcp_execute_request(cmd); 1257 1258 nvmet_prepare_receive_pdu(queue); 1259 return 0; 1260 } 1261 1262 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) 1263 { 1264 struct nvmet_tcp_cmd *cmd = queue->cmd; 1265 int ret, len; 1266 char cbuf[CMSG_LEN(sizeof(char))] = {}; 1267 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 1268 struct kvec iov = { 1269 .iov_base = (void *)&cmd->recv_ddgst + queue->offset, 1270 .iov_len = queue->left 1271 }; 1272 1273 if (queue->tls_pskid) { 1274 msg.msg_control = cbuf; 1275 msg.msg_controllen = sizeof(cbuf); 1276 } 1277 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1278 iov.iov_len, msg.msg_flags); 1279 if (unlikely(len < 0)) 1280 return len; 1281 if (queue->tls_pskid) { 1282 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf); 1283 if (ret < 0) 1284 return ret; 1285 } 1286 1287 queue->offset += len; 1288 queue->left -= len; 1289 if (queue->left) 1290 return -EAGAIN; 1291 1292 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { 1293 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", 1294 queue->idx, cmd->req.cmd->common.command_id, 1295 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), 1296 le32_to_cpu(cmd->exp_ddgst)); 1297 nvmet_req_uninit(&cmd->req); 1298 nvmet_tcp_free_cmd_buffers(cmd); 1299 nvmet_tcp_fatal_error(queue); 1300 ret = -EPROTO; 1301 goto out; 1302 } 1303 1304 if (cmd->rbytes_done == cmd->req.transfer_len) 1305 nvmet_tcp_execute_request(cmd); 1306 1307 ret = 0; 1308 out: 1309 nvmet_prepare_receive_pdu(queue); 1310 return ret; 1311 } 1312 1313 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) 1314 { 1315 int result = 0; 1316 1317 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) 1318 return 0; 1319 1320 if (queue->rcv_state == NVMET_TCP_RECV_PDU) { 1321 result = nvmet_tcp_try_recv_pdu(queue); 1322 if (result != 0) 1323 goto done_recv; 1324 } 1325 1326 if (queue->rcv_state == NVMET_TCP_RECV_DATA) { 1327 result = nvmet_tcp_try_recv_data(queue); 1328 if (result != 0) 1329 goto done_recv; 1330 } 1331 1332 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) { 1333 result = nvmet_tcp_try_recv_ddgst(queue); 1334 if (result != 0) 1335 goto done_recv; 1336 } 1337 1338 done_recv: 1339 if (result < 0) { 1340 if (result == -EAGAIN) 1341 return 0; 1342 return result; 1343 } 1344 return 1; 1345 } 1346 1347 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, 1348 int budget, int *recvs) 1349 { 1350 int i, ret = 0; 1351 1352 for (i = 0; i < budget; i++) { 1353 ret = nvmet_tcp_try_recv_one(queue); 1354 if (unlikely(ret < 0)) { 1355 nvmet_tcp_socket_error(queue, ret); 1356 goto done; 1357 } else if (ret == 0) { 1358 break; 1359 } 1360 (*recvs)++; 1361 } 1362 done: 1363 return ret; 1364 } 1365 1366 static void nvmet_tcp_release_queue(struct kref *kref) 1367 { 1368 struct nvmet_tcp_queue *queue = 1369 container_of(kref, struct nvmet_tcp_queue, kref); 1370 1371 WARN_ON(queue->state != NVMET_TCP_Q_DISCONNECTING); 1372 queue_work(nvmet_wq, &queue->release_work); 1373 } 1374 1375 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) 1376 { 1377 spin_lock_bh(&queue->state_lock); 1378 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { 1379 /* Socket closed during handshake */ 1380 tls_handshake_cancel(queue->sock->sk); 1381 } 1382 if (queue->state != NVMET_TCP_Q_DISCONNECTING) { 1383 queue->state = NVMET_TCP_Q_DISCONNECTING; 1384 kref_put(&queue->kref, nvmet_tcp_release_queue); 1385 } 1386 spin_unlock_bh(&queue->state_lock); 1387 } 1388 1389 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue) 1390 { 1391 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs); 1392 } 1393 1394 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue, 1395 int ops) 1396 { 1397 if (!idle_poll_period_usecs) 1398 return false; 1399 1400 if (ops) 1401 nvmet_tcp_arm_queue_deadline(queue); 1402 1403 return !time_after(jiffies, queue->poll_end); 1404 } 1405 1406 static void nvmet_tcp_io_work(struct work_struct *w) 1407 { 1408 struct nvmet_tcp_queue *queue = 1409 container_of(w, struct nvmet_tcp_queue, io_work); 1410 bool pending; 1411 int ret, ops = 0; 1412 1413 do { 1414 pending = false; 1415 1416 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops); 1417 if (ret > 0) 1418 pending = true; 1419 else if (ret < 0) 1420 return; 1421 1422 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops); 1423 if (ret > 0) 1424 pending = true; 1425 else if (ret < 0) 1426 return; 1427 1428 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET); 1429 1430 /* 1431 * Requeue the worker if idle deadline period is in progress or any 1432 * ops activity was recorded during the do-while loop above. 1433 */ 1434 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending) 1435 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1436 } 1437 1438 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, 1439 struct nvmet_tcp_cmd *c) 1440 { 1441 u8 hdgst = nvmet_tcp_hdgst_len(queue); 1442 1443 c->queue = queue; 1444 c->req.port = queue->port->nport; 1445 1446 c->cmd_pdu = page_frag_alloc(&queue->pf_cache, 1447 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1448 if (!c->cmd_pdu) 1449 return -ENOMEM; 1450 c->req.cmd = &c->cmd_pdu->cmd; 1451 1452 c->rsp_pdu = page_frag_alloc(&queue->pf_cache, 1453 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1454 if (!c->rsp_pdu) 1455 goto out_free_cmd; 1456 c->req.cqe = &c->rsp_pdu->cqe; 1457 1458 c->data_pdu = page_frag_alloc(&queue->pf_cache, 1459 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1460 if (!c->data_pdu) 1461 goto out_free_rsp; 1462 1463 c->r2t_pdu = page_frag_alloc(&queue->pf_cache, 1464 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1465 if (!c->r2t_pdu) 1466 goto out_free_data; 1467 1468 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { 1469 c->recv_msg.msg_control = c->recv_cbuf; 1470 c->recv_msg.msg_controllen = sizeof(c->recv_cbuf); 1471 } 1472 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1473 1474 list_add_tail(&c->entry, &queue->free_list); 1475 1476 return 0; 1477 out_free_data: 1478 page_frag_free(c->data_pdu); 1479 out_free_rsp: 1480 page_frag_free(c->rsp_pdu); 1481 out_free_cmd: 1482 page_frag_free(c->cmd_pdu); 1483 return -ENOMEM; 1484 } 1485 1486 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c) 1487 { 1488 page_frag_free(c->r2t_pdu); 1489 page_frag_free(c->data_pdu); 1490 page_frag_free(c->rsp_pdu); 1491 page_frag_free(c->cmd_pdu); 1492 } 1493 1494 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) 1495 { 1496 struct nvmet_tcp_cmd *cmds; 1497 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; 1498 1499 cmds = kvcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); 1500 if (!cmds) 1501 goto out; 1502 1503 for (i = 0; i < nr_cmds; i++) { 1504 ret = nvmet_tcp_alloc_cmd(queue, cmds + i); 1505 if (ret) 1506 goto out_free; 1507 } 1508 1509 queue->cmds = cmds; 1510 1511 return 0; 1512 out_free: 1513 while (--i >= 0) 1514 nvmet_tcp_free_cmd(cmds + i); 1515 kvfree(cmds); 1516 out: 1517 return ret; 1518 } 1519 1520 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) 1521 { 1522 struct nvmet_tcp_cmd *cmds = queue->cmds; 1523 int i; 1524 1525 for (i = 0; i < queue->nr_cmds; i++) 1526 nvmet_tcp_free_cmd(cmds + i); 1527 1528 nvmet_tcp_free_cmd(&queue->connect); 1529 kvfree(cmds); 1530 } 1531 1532 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) 1533 { 1534 struct socket *sock = queue->sock; 1535 1536 if (!queue->state_change) 1537 return; 1538 1539 write_lock_bh(&sock->sk->sk_callback_lock); 1540 sock->sk->sk_data_ready = queue->data_ready; 1541 sock->sk->sk_state_change = queue->state_change; 1542 sock->sk->sk_write_space = queue->write_space; 1543 sock->sk->sk_user_data = NULL; 1544 write_unlock_bh(&sock->sk->sk_callback_lock); 1545 } 1546 1547 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) 1548 { 1549 struct nvmet_tcp_cmd *cmd = queue->cmds; 1550 int i; 1551 1552 for (i = 0; i < queue->nr_cmds; i++, cmd++) { 1553 if (nvmet_tcp_need_data_in(cmd)) 1554 nvmet_req_uninit(&cmd->req); 1555 } 1556 1557 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { 1558 /* failed in connect */ 1559 nvmet_req_uninit(&queue->connect.req); 1560 } 1561 } 1562 1563 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue) 1564 { 1565 struct nvmet_tcp_cmd *cmd = queue->cmds; 1566 int i; 1567 1568 for (i = 0; i < queue->nr_cmds; i++, cmd++) 1569 nvmet_tcp_free_cmd_buffers(cmd); 1570 nvmet_tcp_free_cmd_buffers(&queue->connect); 1571 } 1572 1573 static void nvmet_tcp_release_queue_work(struct work_struct *w) 1574 { 1575 struct nvmet_tcp_queue *queue = 1576 container_of(w, struct nvmet_tcp_queue, release_work); 1577 1578 mutex_lock(&nvmet_tcp_queue_mutex); 1579 list_del_init(&queue->queue_list); 1580 mutex_unlock(&nvmet_tcp_queue_mutex); 1581 1582 nvmet_tcp_restore_socket_callbacks(queue); 1583 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work); 1584 cancel_work_sync(&queue->io_work); 1585 /* stop accepting incoming data */ 1586 queue->rcv_state = NVMET_TCP_RECV_ERR; 1587 1588 nvmet_sq_put_tls_key(&queue->nvme_sq); 1589 nvmet_tcp_uninit_data_in_cmds(queue); 1590 nvmet_sq_destroy(&queue->nvme_sq); 1591 nvmet_cq_put(&queue->nvme_cq); 1592 cancel_work_sync(&queue->io_work); 1593 nvmet_tcp_free_cmd_data_in_buffers(queue); 1594 /* ->sock will be released by fput() */ 1595 fput(queue->sock->file); 1596 nvmet_tcp_free_cmds(queue); 1597 ida_free(&nvmet_tcp_queue_ida, queue->idx); 1598 page_frag_cache_drain(&queue->pf_cache); 1599 kfree(queue); 1600 } 1601 1602 static void nvmet_tcp_data_ready(struct sock *sk) 1603 { 1604 struct nvmet_tcp_queue *queue; 1605 1606 trace_sk_data_ready(sk); 1607 1608 read_lock_bh(&sk->sk_callback_lock); 1609 queue = sk->sk_user_data; 1610 if (likely(queue)) { 1611 if (queue->data_ready) 1612 queue->data_ready(sk); 1613 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) 1614 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, 1615 &queue->io_work); 1616 } 1617 read_unlock_bh(&sk->sk_callback_lock); 1618 } 1619 1620 static void nvmet_tcp_write_space(struct sock *sk) 1621 { 1622 struct nvmet_tcp_queue *queue; 1623 1624 read_lock_bh(&sk->sk_callback_lock); 1625 queue = sk->sk_user_data; 1626 if (unlikely(!queue)) 1627 goto out; 1628 1629 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { 1630 queue->write_space(sk); 1631 goto out; 1632 } 1633 1634 if (sk_stream_is_writeable(sk)) { 1635 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1636 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1637 } 1638 out: 1639 read_unlock_bh(&sk->sk_callback_lock); 1640 } 1641 1642 static void nvmet_tcp_state_change(struct sock *sk) 1643 { 1644 struct nvmet_tcp_queue *queue; 1645 1646 read_lock_bh(&sk->sk_callback_lock); 1647 queue = sk->sk_user_data; 1648 if (!queue) 1649 goto done; 1650 1651 switch (sk->sk_state) { 1652 case TCP_FIN_WAIT2: 1653 case TCP_LAST_ACK: 1654 break; 1655 case TCP_FIN_WAIT1: 1656 case TCP_CLOSE_WAIT: 1657 case TCP_CLOSE: 1658 /* FALLTHRU */ 1659 nvmet_tcp_schedule_release_queue(queue); 1660 break; 1661 default: 1662 pr_warn("queue %d unhandled state %d\n", 1663 queue->idx, sk->sk_state); 1664 } 1665 done: 1666 read_unlock_bh(&sk->sk_callback_lock); 1667 } 1668 1669 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) 1670 { 1671 struct socket *sock = queue->sock; 1672 struct inet_sock *inet = inet_sk(sock->sk); 1673 int ret; 1674 1675 ret = kernel_getsockname(sock, 1676 (struct sockaddr *)&queue->sockaddr); 1677 if (ret < 0) 1678 return ret; 1679 1680 ret = kernel_getpeername(sock, 1681 (struct sockaddr *)&queue->sockaddr_peer); 1682 if (ret < 0) 1683 return ret; 1684 1685 /* 1686 * Cleanup whatever is sitting in the TCP transmit queue on socket 1687 * close. This is done to prevent stale data from being sent should 1688 * the network connection be restored before TCP times out. 1689 */ 1690 sock_no_linger(sock->sk); 1691 1692 if (so_priority > 0) 1693 sock_set_priority(sock->sk, so_priority); 1694 1695 /* Set socket type of service */ 1696 if (inet->rcv_tos > 0) 1697 ip_sock_set_tos(sock->sk, inet->rcv_tos); 1698 1699 ret = 0; 1700 write_lock_bh(&sock->sk->sk_callback_lock); 1701 if (sock->sk->sk_state != TCP_ESTABLISHED) { 1702 /* 1703 * If the socket is already closing, don't even start 1704 * consuming it 1705 */ 1706 ret = -ENOTCONN; 1707 } else { 1708 sock->sk->sk_user_data = queue; 1709 queue->data_ready = sock->sk->sk_data_ready; 1710 sock->sk->sk_data_ready = nvmet_tcp_data_ready; 1711 queue->state_change = sock->sk->sk_state_change; 1712 sock->sk->sk_state_change = nvmet_tcp_state_change; 1713 queue->write_space = sock->sk->sk_write_space; 1714 sock->sk->sk_write_space = nvmet_tcp_write_space; 1715 if (idle_poll_period_usecs) 1716 nvmet_tcp_arm_queue_deadline(queue); 1717 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1718 } 1719 write_unlock_bh(&sock->sk->sk_callback_lock); 1720 1721 return ret; 1722 } 1723 1724 #ifdef CONFIG_NVME_TARGET_TCP_TLS 1725 static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue) 1726 { 1727 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 1728 int len, ret; 1729 struct kvec iov = { 1730 .iov_base = (u8 *)&queue->pdu + queue->offset, 1731 .iov_len = sizeof(struct nvme_tcp_hdr), 1732 }; 1733 char cbuf[CMSG_LEN(sizeof(char))] = {}; 1734 struct msghdr msg = { 1735 .msg_control = cbuf, 1736 .msg_controllen = sizeof(cbuf), 1737 .msg_flags = MSG_PEEK, 1738 }; 1739 1740 if (nvmet_port_secure_channel_required(queue->port->nport)) 1741 return 0; 1742 1743 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1744 iov.iov_len, msg.msg_flags); 1745 if (unlikely(len < 0)) { 1746 pr_debug("queue %d: peek error %d\n", 1747 queue->idx, len); 1748 return len; 1749 } 1750 1751 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf); 1752 if (ret < 0) 1753 return ret; 1754 1755 if (len < sizeof(struct nvme_tcp_hdr)) { 1756 pr_debug("queue %d: short read, %d bytes missing\n", 1757 queue->idx, (int)iov.iov_len - len); 1758 return -EAGAIN; 1759 } 1760 pr_debug("queue %d: hdr type %d hlen %d plen %d size %d\n", 1761 queue->idx, hdr->type, hdr->hlen, hdr->plen, 1762 (int)sizeof(struct nvme_tcp_icreq_pdu)); 1763 if (hdr->type == nvme_tcp_icreq && 1764 hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) && 1765 hdr->plen == cpu_to_le32(sizeof(struct nvme_tcp_icreq_pdu))) { 1766 pr_debug("queue %d: icreq detected\n", 1767 queue->idx); 1768 return len; 1769 } 1770 return 0; 1771 } 1772 1773 static int nvmet_tcp_tls_key_lookup(struct nvmet_tcp_queue *queue, 1774 key_serial_t peerid) 1775 { 1776 struct key *tls_key = nvme_tls_key_lookup(peerid); 1777 int status = 0; 1778 1779 if (IS_ERR(tls_key)) { 1780 pr_warn("%s: queue %d failed to lookup key %x\n", 1781 __func__, queue->idx, peerid); 1782 spin_lock_bh(&queue->state_lock); 1783 queue->state = NVMET_TCP_Q_FAILED; 1784 spin_unlock_bh(&queue->state_lock); 1785 status = PTR_ERR(tls_key); 1786 } else { 1787 pr_debug("%s: queue %d using TLS PSK %x\n", 1788 __func__, queue->idx, peerid); 1789 queue->nvme_sq.tls_key = tls_key; 1790 } 1791 return status; 1792 } 1793 1794 static void nvmet_tcp_tls_handshake_done(void *data, int status, 1795 key_serial_t peerid) 1796 { 1797 struct nvmet_tcp_queue *queue = data; 1798 1799 pr_debug("queue %d: TLS handshake done, key %x, status %d\n", 1800 queue->idx, peerid, status); 1801 spin_lock_bh(&queue->state_lock); 1802 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) { 1803 spin_unlock_bh(&queue->state_lock); 1804 return; 1805 } 1806 if (!status) { 1807 queue->tls_pskid = peerid; 1808 queue->state = NVMET_TCP_Q_CONNECTING; 1809 } else 1810 queue->state = NVMET_TCP_Q_FAILED; 1811 spin_unlock_bh(&queue->state_lock); 1812 1813 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work); 1814 1815 if (!status) 1816 status = nvmet_tcp_tls_key_lookup(queue, peerid); 1817 1818 if (status) 1819 nvmet_tcp_schedule_release_queue(queue); 1820 else 1821 nvmet_tcp_set_queue_sock(queue); 1822 kref_put(&queue->kref, nvmet_tcp_release_queue); 1823 } 1824 1825 static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) 1826 { 1827 struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w), 1828 struct nvmet_tcp_queue, tls_handshake_tmo_work); 1829 1830 pr_warn("queue %d: TLS handshake timeout\n", queue->idx); 1831 /* 1832 * If tls_handshake_cancel() fails we've lost the race with 1833 * nvmet_tcp_tls_handshake_done() */ 1834 if (!tls_handshake_cancel(queue->sock->sk)) 1835 return; 1836 spin_lock_bh(&queue->state_lock); 1837 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) { 1838 spin_unlock_bh(&queue->state_lock); 1839 return; 1840 } 1841 queue->state = NVMET_TCP_Q_FAILED; 1842 spin_unlock_bh(&queue->state_lock); 1843 nvmet_tcp_schedule_release_queue(queue); 1844 kref_put(&queue->kref, nvmet_tcp_release_queue); 1845 } 1846 1847 static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue) 1848 { 1849 int ret = -EOPNOTSUPP; 1850 struct tls_handshake_args args; 1851 1852 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) { 1853 pr_warn("cannot start TLS in state %d\n", queue->state); 1854 return -EINVAL; 1855 } 1856 1857 kref_get(&queue->kref); 1858 pr_debug("queue %d: TLS ServerHello\n", queue->idx); 1859 memset(&args, 0, sizeof(args)); 1860 args.ta_sock = queue->sock; 1861 args.ta_done = nvmet_tcp_tls_handshake_done; 1862 args.ta_data = queue; 1863 args.ta_keyring = key_serial(queue->port->nport->keyring); 1864 args.ta_timeout_ms = tls_handshake_timeout * 1000; 1865 1866 ret = tls_server_hello_psk(&args, GFP_KERNEL); 1867 if (ret) { 1868 kref_put(&queue->kref, nvmet_tcp_release_queue); 1869 pr_err("failed to start TLS, err=%d\n", ret); 1870 } else { 1871 queue_delayed_work(nvmet_wq, &queue->tls_handshake_tmo_work, 1872 tls_handshake_timeout * HZ); 1873 } 1874 return ret; 1875 } 1876 #else 1877 static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) {} 1878 #endif 1879 1880 static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, 1881 struct socket *newsock) 1882 { 1883 struct nvmet_tcp_queue *queue; 1884 struct file *sock_file = NULL; 1885 int ret; 1886 1887 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 1888 if (!queue) { 1889 ret = -ENOMEM; 1890 goto out_release; 1891 } 1892 1893 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); 1894 INIT_WORK(&queue->io_work, nvmet_tcp_io_work); 1895 kref_init(&queue->kref); 1896 queue->sock = newsock; 1897 queue->port = port; 1898 queue->nr_cmds = 0; 1899 spin_lock_init(&queue->state_lock); 1900 if (queue->port->nport->disc_addr.tsas.tcp.sectype == 1901 NVMF_TCP_SECTYPE_TLS13) 1902 queue->state = NVMET_TCP_Q_TLS_HANDSHAKE; 1903 else 1904 queue->state = NVMET_TCP_Q_CONNECTING; 1905 INIT_LIST_HEAD(&queue->free_list); 1906 init_llist_head(&queue->resp_list); 1907 INIT_LIST_HEAD(&queue->resp_send_list); 1908 1909 sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL); 1910 if (IS_ERR(sock_file)) { 1911 ret = PTR_ERR(sock_file); 1912 goto out_free_queue; 1913 } 1914 1915 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL); 1916 if (queue->idx < 0) { 1917 ret = queue->idx; 1918 goto out_sock; 1919 } 1920 1921 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect); 1922 if (ret) 1923 goto out_ida_remove; 1924 1925 nvmet_cq_init(&queue->nvme_cq); 1926 ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); 1927 if (ret) 1928 goto out_free_connect; 1929 1930 nvmet_prepare_receive_pdu(queue); 1931 1932 mutex_lock(&nvmet_tcp_queue_mutex); 1933 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); 1934 mutex_unlock(&nvmet_tcp_queue_mutex); 1935 1936 INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work, 1937 nvmet_tcp_tls_handshake_timeout); 1938 #ifdef CONFIG_NVME_TARGET_TCP_TLS 1939 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) { 1940 struct sock *sk = queue->sock->sk; 1941 1942 /* Restore the default callbacks before starting upcall */ 1943 write_lock_bh(&sk->sk_callback_lock); 1944 sk->sk_user_data = NULL; 1945 sk->sk_data_ready = port->data_ready; 1946 write_unlock_bh(&sk->sk_callback_lock); 1947 if (!nvmet_tcp_try_peek_pdu(queue)) { 1948 if (!nvmet_tcp_tls_handshake(queue)) 1949 return; 1950 /* TLS handshake failed, terminate the connection */ 1951 goto out_destroy_sq; 1952 } 1953 /* Not a TLS connection, continue with normal processing */ 1954 queue->state = NVMET_TCP_Q_CONNECTING; 1955 } 1956 #endif 1957 1958 ret = nvmet_tcp_set_queue_sock(queue); 1959 if (ret) 1960 goto out_destroy_sq; 1961 1962 return; 1963 out_destroy_sq: 1964 mutex_lock(&nvmet_tcp_queue_mutex); 1965 list_del_init(&queue->queue_list); 1966 mutex_unlock(&nvmet_tcp_queue_mutex); 1967 nvmet_sq_destroy(&queue->nvme_sq); 1968 out_free_connect: 1969 nvmet_cq_put(&queue->nvme_cq); 1970 nvmet_tcp_free_cmd(&queue->connect); 1971 out_ida_remove: 1972 ida_free(&nvmet_tcp_queue_ida, queue->idx); 1973 out_sock: 1974 fput(queue->sock->file); 1975 out_free_queue: 1976 kfree(queue); 1977 out_release: 1978 pr_err("failed to allocate queue, error %d\n", ret); 1979 if (!sock_file) 1980 sock_release(newsock); 1981 } 1982 1983 static void nvmet_tcp_accept_work(struct work_struct *w) 1984 { 1985 struct nvmet_tcp_port *port = 1986 container_of(w, struct nvmet_tcp_port, accept_work); 1987 struct socket *newsock; 1988 int ret; 1989 1990 while (true) { 1991 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK); 1992 if (ret < 0) { 1993 if (ret != -EAGAIN) 1994 pr_warn("failed to accept err=%d\n", ret); 1995 return; 1996 } 1997 nvmet_tcp_alloc_queue(port, newsock); 1998 } 1999 } 2000 2001 static void nvmet_tcp_listen_data_ready(struct sock *sk) 2002 { 2003 struct nvmet_tcp_port *port; 2004 2005 trace_sk_data_ready(sk); 2006 2007 if (sk->sk_state != TCP_LISTEN) 2008 return; 2009 2010 read_lock_bh(&sk->sk_callback_lock); 2011 port = sk->sk_user_data; 2012 if (port) 2013 queue_work(nvmet_wq, &port->accept_work); 2014 read_unlock_bh(&sk->sk_callback_lock); 2015 } 2016 2017 static int nvmet_tcp_add_port(struct nvmet_port *nport) 2018 { 2019 struct nvmet_tcp_port *port; 2020 __kernel_sa_family_t af; 2021 int ret; 2022 2023 port = kzalloc(sizeof(*port), GFP_KERNEL); 2024 if (!port) 2025 return -ENOMEM; 2026 2027 switch (nport->disc_addr.adrfam) { 2028 case NVMF_ADDR_FAMILY_IP4: 2029 af = AF_INET; 2030 break; 2031 case NVMF_ADDR_FAMILY_IP6: 2032 af = AF_INET6; 2033 break; 2034 default: 2035 pr_err("address family %d not supported\n", 2036 nport->disc_addr.adrfam); 2037 ret = -EINVAL; 2038 goto err_port; 2039 } 2040 2041 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, 2042 nport->disc_addr.trsvcid, &port->addr); 2043 if (ret) { 2044 pr_err("malformed ip/port passed: %s:%s\n", 2045 nport->disc_addr.traddr, nport->disc_addr.trsvcid); 2046 goto err_port; 2047 } 2048 2049 port->nport = nport; 2050 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work); 2051 if (port->nport->inline_data_size < 0) 2052 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE; 2053 2054 ret = sock_create(port->addr.ss_family, SOCK_STREAM, 2055 IPPROTO_TCP, &port->sock); 2056 if (ret) { 2057 pr_err("failed to create a socket\n"); 2058 goto err_port; 2059 } 2060 2061 port->sock->sk->sk_user_data = port; 2062 port->data_ready = port->sock->sk->sk_data_ready; 2063 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready; 2064 sock_set_reuseaddr(port->sock->sk); 2065 tcp_sock_set_nodelay(port->sock->sk); 2066 if (so_priority > 0) 2067 sock_set_priority(port->sock->sk, so_priority); 2068 2069 ret = kernel_bind(port->sock, (struct sockaddr_unsized *)&port->addr, 2070 sizeof(port->addr)); 2071 if (ret) { 2072 pr_err("failed to bind port socket %d\n", ret); 2073 goto err_sock; 2074 } 2075 2076 ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG); 2077 if (ret) { 2078 pr_err("failed to listen %d on port sock\n", ret); 2079 goto err_sock; 2080 } 2081 2082 nport->priv = port; 2083 pr_info("enabling port %d (%pISpc)\n", 2084 le16_to_cpu(nport->disc_addr.portid), &port->addr); 2085 2086 return 0; 2087 2088 err_sock: 2089 sock_release(port->sock); 2090 err_port: 2091 kfree(port); 2092 return ret; 2093 } 2094 2095 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port) 2096 { 2097 struct nvmet_tcp_queue *queue; 2098 2099 mutex_lock(&nvmet_tcp_queue_mutex); 2100 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 2101 if (queue->port == port) 2102 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 2103 mutex_unlock(&nvmet_tcp_queue_mutex); 2104 } 2105 2106 static void nvmet_tcp_remove_port(struct nvmet_port *nport) 2107 { 2108 struct nvmet_tcp_port *port = nport->priv; 2109 2110 write_lock_bh(&port->sock->sk->sk_callback_lock); 2111 port->sock->sk->sk_data_ready = port->data_ready; 2112 port->sock->sk->sk_user_data = NULL; 2113 write_unlock_bh(&port->sock->sk->sk_callback_lock); 2114 cancel_work_sync(&port->accept_work); 2115 /* 2116 * Destroy the remaining queues, which are not belong to any 2117 * controller yet. 2118 */ 2119 nvmet_tcp_destroy_port_queues(port); 2120 2121 sock_release(port->sock); 2122 kfree(port); 2123 } 2124 2125 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl) 2126 { 2127 struct nvmet_tcp_queue *queue; 2128 2129 mutex_lock(&nvmet_tcp_queue_mutex); 2130 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 2131 if (queue->nvme_sq.ctrl == ctrl) 2132 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 2133 mutex_unlock(&nvmet_tcp_queue_mutex); 2134 } 2135 2136 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) 2137 { 2138 struct nvmet_tcp_queue *queue = 2139 container_of(sq, struct nvmet_tcp_queue, nvme_sq); 2140 2141 if (sq->qid == 0) { 2142 struct nvmet_tcp_queue *q; 2143 int pending = 0; 2144 2145 /* Check for pending controller teardown */ 2146 mutex_lock(&nvmet_tcp_queue_mutex); 2147 list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) { 2148 if (q->nvme_sq.ctrl == sq->ctrl && 2149 q->state == NVMET_TCP_Q_DISCONNECTING) 2150 pending++; 2151 } 2152 mutex_unlock(&nvmet_tcp_queue_mutex); 2153 if (pending > NVMET_TCP_BACKLOG) 2154 return NVME_SC_CONNECT_CTRL_BUSY; 2155 } 2156 2157 queue->nr_cmds = sq->size * 2; 2158 if (nvmet_tcp_alloc_cmds(queue)) { 2159 queue->nr_cmds = 0; 2160 return NVME_SC_INTERNAL; 2161 } 2162 return 0; 2163 } 2164 2165 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req, 2166 struct nvmet_port *nport, char *traddr) 2167 { 2168 struct nvmet_tcp_port *port = nport->priv; 2169 2170 if (inet_addr_is_any(&port->addr)) { 2171 struct nvmet_tcp_cmd *cmd = 2172 container_of(req, struct nvmet_tcp_cmd, req); 2173 struct nvmet_tcp_queue *queue = cmd->queue; 2174 2175 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr); 2176 } else { 2177 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); 2178 } 2179 } 2180 2181 static ssize_t nvmet_tcp_host_port_addr(struct nvmet_ctrl *ctrl, 2182 char *traddr, size_t traddr_len) 2183 { 2184 struct nvmet_sq *sq = ctrl->sqs[0]; 2185 struct nvmet_tcp_queue *queue = 2186 container_of(sq, struct nvmet_tcp_queue, nvme_sq); 2187 2188 if (queue->sockaddr_peer.ss_family == AF_UNSPEC) 2189 return -EINVAL; 2190 return snprintf(traddr, traddr_len, "%pISc", 2191 (struct sockaddr *)&queue->sockaddr_peer); 2192 } 2193 2194 static const struct nvmet_fabrics_ops nvmet_tcp_ops = { 2195 .owner = THIS_MODULE, 2196 .type = NVMF_TRTYPE_TCP, 2197 .msdbd = 1, 2198 .add_port = nvmet_tcp_add_port, 2199 .remove_port = nvmet_tcp_remove_port, 2200 .queue_response = nvmet_tcp_queue_response, 2201 .delete_ctrl = nvmet_tcp_delete_ctrl, 2202 .install_queue = nvmet_tcp_install_queue, 2203 .disc_traddr = nvmet_tcp_disc_port_addr, 2204 .host_traddr = nvmet_tcp_host_port_addr, 2205 }; 2206 2207 static int __init nvmet_tcp_init(void) 2208 { 2209 int ret; 2210 2211 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", 2212 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 2213 if (!nvmet_tcp_wq) 2214 return -ENOMEM; 2215 2216 ret = nvmet_register_transport(&nvmet_tcp_ops); 2217 if (ret) 2218 goto err; 2219 2220 return 0; 2221 err: 2222 destroy_workqueue(nvmet_tcp_wq); 2223 return ret; 2224 } 2225 2226 static void __exit nvmet_tcp_exit(void) 2227 { 2228 struct nvmet_tcp_queue *queue; 2229 2230 nvmet_unregister_transport(&nvmet_tcp_ops); 2231 2232 flush_workqueue(nvmet_wq); 2233 mutex_lock(&nvmet_tcp_queue_mutex); 2234 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 2235 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 2236 mutex_unlock(&nvmet_tcp_queue_mutex); 2237 flush_workqueue(nvmet_wq); 2238 2239 destroy_workqueue(nvmet_tcp_wq); 2240 ida_destroy(&nvmet_tcp_queue_ida); 2241 } 2242 2243 module_init(nvmet_tcp_init); 2244 module_exit(nvmet_tcp_exit); 2245 2246 MODULE_DESCRIPTION("NVMe target TCP transport driver"); 2247 MODULE_LICENSE("GPL v2"); 2248 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */ 2249